Compare commits

..

1 Commits

163 changed files with 5395 additions and 16130 deletions

View File

@ -1,19 +0,0 @@
.git
build
packages
mon/node_modules
*.o
*.so
osd
stub_osd
stub_uring_osd
stub_bench
osd_test
dump_journal
nbd_proxy
rm_inode
fio
qemu
rpm/*.Dockerfile
debian/*.Dockerfile
Dockerfile

18
.gitignore vendored
View File

@ -1,18 +0,0 @@
*.o
*.so
package-lock.json
fio
qemu
osd
stub_osd
stub_uring_osd
stub_bench
osd_test
osd_peering_pg_test
dump_journal
nbd_proxy
rm_inode
test_allocator
test_blockstore
test_shit
osd_rmw_test

6
.gitmodules vendored
View File

@ -1,6 +0,0 @@
[submodule "cpp-btree"]
path = cpp-btree
url = ../cpp-btree.git
[submodule "json11"]
path = json11
url = ../json11.git

View File

@ -1,5 +0,0 @@
cmake_minimum_required(VERSION 2.8)
project(vitastor)
add_subdirectory(src)

View File

@ -1,339 +0,0 @@
GNU GENERAL PUBLIC LICENSE
Version 2, June 1991
Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
License is intended to guarantee your freedom to share and change free
software--to make sure the software is free for all its users. This
General Public License applies to most of the Free Software
Foundation's software and to any other program whose authors commit to
using it. (Some other Free Software Foundation software is covered by
the GNU Lesser General Public License instead.) You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
this service if you wish), that you receive source code or can get it
if you want it, that you can change the software or use pieces of it
in new free programs; and that you know you can do these things.
To protect your rights, we need to make restrictions that forbid
anyone to deny you these rights or to ask you to surrender the rights.
These restrictions translate to certain responsibilities for you if you
distribute copies of the software, or if you modify it.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must give the recipients all the rights that
you have. You must make sure that they, too, receive or can get the
source code. And you must show them these terms so they know their
rights.
We protect your rights with two steps: (1) copyright the software, and
(2) offer you this license which gives you legal permission to copy,
distribute and/or modify the software.
Also, for each author's protection and ours, we want to make certain
that everyone understands that there is no warranty for this free
software. If the software is modified by someone else and passed on, we
want its recipients to know that what they have is not the original, so
that any problems introduced by others will not reflect on the original
authors' reputations.
Finally, any free program is threatened constantly by software
patents. We wish to avoid the danger that redistributors of a free
program will individually obtain patent licenses, in effect making the
program proprietary. To prevent this, we have made it clear that any
patent must be licensed for everyone's free use or not licensed at all.
The precise terms and conditions for copying, distribution and
modification follow.
GNU GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. This License applies to any program or other work which contains
a notice placed by the copyright holder saying it may be distributed
under the terms of this General Public License. The "Program", below,
refers to any such program or work, and a "work based on the Program"
means either the Program or any derivative work under copyright law:
that is to say, a work containing the Program or a portion of it,
either verbatim or with modifications and/or translated into another
language. (Hereinafter, translation is included without limitation in
the term "modification".) Each licensee is addressed as "you".
Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope. The act of
running the Program is not restricted, and the output from the Program
is covered only if its contents constitute a work based on the
Program (independent of having been made by running the Program).
Whether that is true depends on what the Program does.
1. You may copy and distribute verbatim copies of the Program's
source code as you receive it, in any medium, provided that you
conspicuously and appropriately publish on each copy an appropriate
copyright notice and disclaimer of warranty; keep intact all the
notices that refer to this License and to the absence of any warranty;
and give any other recipients of the Program a copy of this License
along with the Program.
You may charge a fee for the physical act of transferring a copy, and
you may at your option offer warranty protection in exchange for a fee.
2. You may modify your copy or copies of the Program or any portion
of it, thus forming a work based on the Program, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:
a) You must cause the modified files to carry prominent notices
stating that you changed the files and the date of any change.
b) You must cause any work that you distribute or publish, that in
whole or in part contains or is derived from the Program or any
part thereof, to be licensed as a whole at no charge to all third
parties under the terms of this License.
c) If the modified program normally reads commands interactively
when run, you must cause it, when started running for such
interactive use in the most ordinary way, to print or display an
announcement including an appropriate copyright notice and a
notice that there is no warranty (or else, saying that you provide
a warranty) and that users may redistribute the program under
these conditions, and telling the user how to view a copy of this
License. (Exception: if the Program itself is interactive but
does not normally print such an announcement, your work based on
the Program is not required to print an announcement.)
These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Program,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works. But when you
distribute the same sections as part of a whole which is a work based
on the Program, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote it.
Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Program.
In addition, mere aggregation of another work not based on the Program
with the Program (or with a work based on the Program) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.
3. You may copy and distribute the Program (or a work based on it,
under Section 2) in object code or executable form under the terms of
Sections 1 and 2 above provided that you also do one of the following:
a) Accompany it with the complete corresponding machine-readable
source code, which must be distributed under the terms of Sections
1 and 2 above on a medium customarily used for software interchange; or,
b) Accompany it with a written offer, valid for at least three
years, to give any third party, for a charge no more than your
cost of physically performing source distribution, a complete
machine-readable copy of the corresponding source code, to be
distributed under the terms of Sections 1 and 2 above on a medium
customarily used for software interchange; or,
c) Accompany it with the information you received as to the offer
to distribute corresponding source code. (This alternative is
allowed only for noncommercial distribution and only if you
received the program in object code or executable form with such
an offer, in accord with Subsection b above.)
The source code for a work means the preferred form of the work for
making modifications to it. For an executable work, complete source
code means all the source code for all modules it contains, plus any
associated interface definition files, plus the scripts used to
control compilation and installation of the executable. However, as a
special exception, the source code distributed need not include
anything that is normally distributed (in either source or binary
form) with the major components (compiler, kernel, and so on) of the
operating system on which the executable runs, unless that component
itself accompanies the executable.
If distribution of executable or object code is made by offering
access to copy from a designated place, then offering equivalent
access to copy the source code from the same place counts as
distribution of the source code, even though third parties are not
compelled to copy the source along with the object code.
4. You may not copy, modify, sublicense, or distribute the Program
except as expressly provided under this License. Any attempt
otherwise to copy, modify, sublicense or distribute the Program is
void, and will automatically terminate your rights under this License.
However, parties who have received copies, or rights, from you under
this License will not have their licenses terminated so long as such
parties remain in full compliance.
5. You are not required to accept this License, since you have not
signed it. However, nothing else grants you permission to modify or
distribute the Program or its derivative works. These actions are
prohibited by law if you do not accept this License. Therefore, by
modifying or distributing the Program (or any work based on the
Program), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Program or works based on it.
6. Each time you redistribute the Program (or any work based on the
Program), the recipient automatically receives a license from the
original licensor to copy, distribute or modify the Program subject to
these terms and conditions. You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties to
this License.
7. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Program at all. For example, if a patent
license would not permit royalty-free redistribution of the Program by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Program.
If any portion of this section is held invalid or unenforceable under
any particular circumstance, the balance of the section is intended to
apply and the section as a whole is intended to apply in other
circumstances.
It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system, which is
implemented by public license practices. Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.
This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.
8. If the distribution and/or use of the Program is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Program under this License
may add an explicit geographical distribution limitation excluding
those countries, so that distribution is permitted only in or among
countries not thus excluded. In such case, this License incorporates
the limitation as if written in the body of this License.
9. The Free Software Foundation may publish revised and/or new versions
of the General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the Program
specifies a version number of this License which applies to it and "any
later version", you have the option of following the terms and conditions
either of that version or of any later version published by the Free
Software Foundation. If the Program does not specify a version number of
this License, you may choose any version ever published by the Free Software
Foundation.
10. If you wish to incorporate parts of the Program into other free
programs whose distribution conditions are different, write to the author
to ask for permission. For software which is copyrighted by the Free
Software Foundation, write to the Free Software Foundation; we sometimes
make exceptions for this. Our decision will be guided by the two goals
of preserving the free status of all derivatives of our free software and
of promoting the sharing and reuse of software generally.
NO WARRANTY
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
REPAIR OR CORRECTION.
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
convey the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
Also add information on how to contact you by electronic and paper mail.
If the program is interactive, make it output a short notice like this
when it starts in an interactive mode:
Gnomovision version 69, Copyright (C) year name of author
Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, the commands you use may
be called something other than `show w' and `show c'; they could even be
mouse-clicks or menu items--whatever suits your program.
You should also get your employer (if you work as a programmer) or your
school, if any, to sign a "copyright disclaimer" for the program, if
necessary. Here is a sample; alter the names:
Yoyodyne, Inc., hereby disclaims all copyright interest in the program
`Gnomovision' (which makes passes at compilers) written by James Hacker.
<signature of Ty Coon>, 1 April 1989
Ty Coon, President of Vice
This General Public License does not permit incorporating your program into
proprietary programs. If your program is a subroutine library, you may
consider it more useful to permit linking proprietary applications with the
library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License.

27
LICENSE
View File

@ -1,27 +0,0 @@
Copyright (c) Vitaliy Filippov (vitalif [at] yourcmc.ru), 2019+
All server-side code (OSD, Monitor and so on) is licensed under the terms of
Vitastor Network Public License 1.1 (VNPL 1.1), a copyleft license based on
GNU GPLv3.0 with the additional "Network Interaction" clause which requires
opensourcing all programs directly or indirectly interacting with Vitastor
through a computer network and expressly designed to be used in conjunction
with it ("Proxy Programs"). Proxy Programs may be made public not only under
the terms of the same license, but also under the terms of any GPL-Compatible
Free Software License, as listed by the Free Software Foundation.
This is a stricter copyleft license than the Affero GPL.
Please note that VNPL doesn't require you to open the code of proprietary
software running inside a VM if it's not specially designed to be used with
Vitastor.
Basically, you can't use the software in a proprietary environment to provide
its functionality to users without opensourcing all intermediary components
standing between the user and Vitastor or purchasing a commercial license
from the author 😀.
Client libraries (cluster_client and so on) are dual-licensed under the same
VNPL 1.1 and also GNU GPL 2.0 or later to allow for compatibility with GPLed
software like QEMU and fio.
You can find the full text of VNPL-1.1 in the file [VNPL-1.1.txt](VNPL-1.1.txt).
GPL 2.0 is also included in this repository as [GPL-2.0.txt](GPL-2.0.txt).

46
Make-gen.pl Executable file
View File

@ -0,0 +1,46 @@
#!/usr/bin/perl
use strict;
my $deps = {};
for my $line (split /\n/, `grep '^#include "' *.cpp *.h`)
{
if ($line =~ /^([^:]+):\#include "([^"]+)"/s)
{
$deps->{$1}->{$2} = 1;
}
}
my $added;
do
{
$added = 0;
for my $file (keys %$deps)
{
for my $dep (keys %{$deps->{$file}})
{
if ($deps->{$dep})
{
for my $subdep (keys %{$deps->{$dep}})
{
if (!$deps->{$file}->{$subdep})
{
$added = 1;
$deps->{$file}->{$subdep} = 1;
}
}
}
}
}
} while ($added);
for my $file (sort keys %$deps)
{
if ($file =~ /\.cpp$/)
{
my $obj = $file;
$obj =~ s/\.cpp$/.o/s;
print "$obj: $file ".join(" ", sort keys %{$deps->{$file}})."\n";
print "\tg++ \$(CXXFLAGS) -c -o \$\@ \$\<\n";
}
}

153
Makefile Normal file
View File

@ -0,0 +1,153 @@
BLOCKSTORE_OBJS := allocator.o blockstore.o blockstore_impl.o blockstore_init.o blockstore_open.o blockstore_journal.o blockstore_read.o \
blockstore_write.o blockstore_sync.o blockstore_stable.o blockstore_rollback.o blockstore_flush.o crc32c.o ringloop.o
# -fsanitize=address
CXXFLAGS := -g -O3 -Wall -Wno-sign-compare -Wno-comment -Wno-parentheses -Wno-pointer-arith -fPIC -fdiagnostics-color=always
all: libfio_blockstore.so osd libfio_sec_osd.so libfio_cluster.so stub_osd stub_uring_osd stub_bench osd_test dump_journal
clean:
rm -f *.o
dump_journal: dump_journal.cpp crc32c.o blockstore_journal.h
g++ $(CXXFLAGS) -o $@ $< crc32c.o
libblockstore.so: $(BLOCKSTORE_OBJS)
g++ $(CXXFLAGS) -o $@ -shared $(BLOCKSTORE_OBJS) -ltcmalloc_minimal -luring
libfio_blockstore.so: ./libblockstore.so fio_engine.o json11.o
g++ $(CXXFLAGS) -shared -o $@ fio_engine.o json11.o ./libblockstore.so -ltcmalloc_minimal -luring
OSD_OBJS := osd.o osd_secondary.o msgr_receive.o msgr_send.o osd_peering.o osd_flush.o osd_peering_pg.o \
osd_primary.o osd_primary_subops.o etcd_state_client.o messenger.o osd_cluster.o http_client.o pg_states.o \
osd_rmw.o json11.o base64.o timerfd_manager.o
osd: ./libblockstore.so osd_main.cpp osd.h osd_ops.h $(OSD_OBJS)
g++ $(CXXFLAGS) -o $@ osd_main.cpp $(OSD_OBJS) ./libblockstore.so -ltcmalloc_minimal -luring
stub_osd: stub_osd.o rw_blocking.o
g++ $(CXXFLAGS) -o $@ stub_osd.o rw_blocking.o -ltcmalloc_minimal
STUB_URING_OSD_OBJS := stub_uring_osd.o epoll_manager.o messenger.o msgr_send.o msgr_receive.o ringloop.o timerfd_manager.o json11.o
stub_uring_osd: $(STUB_URING_OSD_OBJS)
g++ $(CXXFLAGS) -o $@ -ltcmalloc_minimal $(STUB_URING_OSD_OBJS) -luring
stub_bench: stub_bench.cpp osd_ops.h rw_blocking.o
g++ $(CXXFLAGS) -o $@ stub_bench.cpp rw_blocking.o -ltcmalloc_minimal
osd_test: osd_test.cpp osd_ops.h rw_blocking.o
g++ $(CXXFLAGS) -o $@ osd_test.cpp rw_blocking.o -ltcmalloc_minimal
osd_peering_pg_test: osd_peering_pg_test.cpp osd_peering_pg.o
g++ $(CXXFLAGS) -o $@ $< osd_peering_pg.o -ltcmalloc_minimal
libfio_sec_osd.so: fio_sec_osd.o rw_blocking.o
g++ $(CXXFLAGS) -ltcmalloc_minimal -shared -o $@ fio_sec_osd.o rw_blocking.o
FIO_CLUSTER_OBJS := fio_cluster.o cluster_client.o epoll_manager.o etcd_state_client.o \
messenger.o msgr_send.o msgr_receive.o ringloop.o json11.o http_client.o pg_states.o timerfd_manager.o base64.o
libfio_cluster.so: $(FIO_CLUSTER_OBJS)
g++ $(CXXFLAGS) -ltcmalloc_minimal -shared -o $@ $(FIO_CLUSTER_OBJS) -luring
test_blockstore: ./libblockstore.so test_blockstore.cpp timerfd_interval.o
g++ $(CXXFLAGS) -o test_blockstore test_blockstore.cpp timerfd_interval.o ./libblockstore.so -ltcmalloc_minimal -luring
test: test.cpp osd_peering_pg.o
g++ $(CXXFLAGS) -o test test.cpp osd_peering_pg.o -luring -lm
test_allocator: test_allocator.cpp allocator.o
g++ $(CXXFLAGS) -o test_allocator test_allocator.cpp allocator.o
crc32c.o: crc32c.c crc32c.h
g++ $(CXXFLAGS) -c -o $@ $<
json11.o: json11/json11.cpp
g++ $(CXXFLAGS) -c -o json11.o json11/json11.cpp
# Autogenerated
allocator.o: allocator.cpp allocator.h
g++ $(CXXFLAGS) -c -o $@ $<
base64.o: base64.cpp base64.h
g++ $(CXXFLAGS) -c -o $@ $<
blockstore.o: blockstore.cpp allocator.h blockstore.h blockstore_flush.h blockstore_impl.h blockstore_init.h blockstore_journal.h cpp-btree/btree_map.h crc32c.h object_id.h ringloop.h
g++ $(CXXFLAGS) -c -o $@ $<
blockstore_flush.o: blockstore_flush.cpp allocator.h blockstore.h blockstore_flush.h blockstore_impl.h blockstore_init.h blockstore_journal.h cpp-btree/btree_map.h crc32c.h object_id.h ringloop.h
g++ $(CXXFLAGS) -c -o $@ $<
blockstore_impl.o: blockstore_impl.cpp allocator.h blockstore.h blockstore_flush.h blockstore_impl.h blockstore_init.h blockstore_journal.h cpp-btree/btree_map.h crc32c.h object_id.h ringloop.h
g++ $(CXXFLAGS) -c -o $@ $<
blockstore_init.o: blockstore_init.cpp allocator.h blockstore.h blockstore_flush.h blockstore_impl.h blockstore_init.h blockstore_journal.h cpp-btree/btree_map.h crc32c.h object_id.h ringloop.h
g++ $(CXXFLAGS) -c -o $@ $<
blockstore_journal.o: blockstore_journal.cpp allocator.h blockstore.h blockstore_flush.h blockstore_impl.h blockstore_init.h blockstore_journal.h cpp-btree/btree_map.h crc32c.h object_id.h ringloop.h
g++ $(CXXFLAGS) -c -o $@ $<
blockstore_open.o: blockstore_open.cpp allocator.h blockstore.h blockstore_flush.h blockstore_impl.h blockstore_init.h blockstore_journal.h cpp-btree/btree_map.h crc32c.h object_id.h ringloop.h
g++ $(CXXFLAGS) -c -o $@ $<
blockstore_read.o: blockstore_read.cpp allocator.h blockstore.h blockstore_flush.h blockstore_impl.h blockstore_init.h blockstore_journal.h cpp-btree/btree_map.h crc32c.h object_id.h ringloop.h
g++ $(CXXFLAGS) -c -o $@ $<
blockstore_rollback.o: blockstore_rollback.cpp allocator.h blockstore.h blockstore_flush.h blockstore_impl.h blockstore_init.h blockstore_journal.h cpp-btree/btree_map.h crc32c.h object_id.h ringloop.h
g++ $(CXXFLAGS) -c -o $@ $<
blockstore_stable.o: blockstore_stable.cpp allocator.h blockstore.h blockstore_flush.h blockstore_impl.h blockstore_init.h blockstore_journal.h cpp-btree/btree_map.h crc32c.h object_id.h ringloop.h
g++ $(CXXFLAGS) -c -o $@ $<
blockstore_sync.o: blockstore_sync.cpp allocator.h blockstore.h blockstore_flush.h blockstore_impl.h blockstore_init.h blockstore_journal.h cpp-btree/btree_map.h crc32c.h object_id.h ringloop.h
g++ $(CXXFLAGS) -c -o $@ $<
blockstore_write.o: blockstore_write.cpp allocator.h blockstore.h blockstore_flush.h blockstore_impl.h blockstore_init.h blockstore_journal.h cpp-btree/btree_map.h crc32c.h object_id.h ringloop.h
g++ $(CXXFLAGS) -c -o $@ $<
cluster_client.o: cluster_client.cpp cluster_client.h etcd_state_client.h http_client.h json11/json11.hpp messenger.h object_id.h osd_id.h osd_ops.h ringloop.h timerfd_manager.h
g++ $(CXXFLAGS) -c -o $@ $<
dump_journal.o: dump_journal.cpp allocator.h blockstore.h blockstore_flush.h blockstore_impl.h blockstore_init.h blockstore_journal.h cpp-btree/btree_map.h crc32c.h object_id.h ringloop.h
g++ $(CXXFLAGS) -c -o $@ $<
epoll_manager.o: epoll_manager.cpp epoll_manager.h ringloop.h timerfd_manager.h
g++ $(CXXFLAGS) -c -o $@ $<
etcd_state_client.o: etcd_state_client.cpp base64.h etcd_state_client.h http_client.h json11/json11.hpp object_id.h osd_id.h osd_ops.h pg_states.h timerfd_manager.h
g++ $(CXXFLAGS) -c -o $@ $<
fio_cluster.o: fio_cluster.cpp cluster_client.h epoll_manager.h etcd_state_client.h fio/fio.h fio/optgroup.h http_client.h json11/json11.hpp messenger.h object_id.h osd_id.h osd_ops.h ringloop.h timerfd_manager.h
g++ $(CXXFLAGS) -c -o $@ $<
fio_engine.o: fio_engine.cpp blockstore.h fio/fio.h fio/optgroup.h json11/json11.hpp object_id.h ringloop.h
g++ $(CXXFLAGS) -c -o $@ $<
fio_sec_osd.o: fio_sec_osd.cpp fio/fio.h fio/optgroup.h object_id.h osd_id.h osd_ops.h rw_blocking.h
g++ $(CXXFLAGS) -c -o $@ $<
http_client.o: http_client.cpp http_client.h json11/json11.hpp timerfd_manager.h
g++ $(CXXFLAGS) -c -o $@ $<
messenger.o: messenger.cpp json11/json11.hpp messenger.h object_id.h osd_id.h osd_ops.h ringloop.h timerfd_manager.h
g++ $(CXXFLAGS) -c -o $@ $<
msgr_receive.o: msgr_receive.cpp json11/json11.hpp messenger.h object_id.h osd_id.h osd_ops.h ringloop.h timerfd_manager.h
g++ $(CXXFLAGS) -c -o $@ $<
msgr_send.o: msgr_send.cpp json11/json11.hpp messenger.h object_id.h osd_id.h osd_ops.h ringloop.h timerfd_manager.h
g++ $(CXXFLAGS) -c -o $@ $<
osd.o: osd.cpp blockstore.h cpp-btree/btree_map.h etcd_state_client.h http_client.h json11/json11.hpp messenger.h object_id.h osd.h osd_id.h osd_ops.h osd_peering_pg.h pg_states.h ringloop.h timerfd_manager.h
g++ $(CXXFLAGS) -c -o $@ $<
osd_cluster.o: osd_cluster.cpp base64.h blockstore.h cpp-btree/btree_map.h etcd_state_client.h http_client.h json11/json11.hpp messenger.h object_id.h osd.h osd_id.h osd_ops.h osd_peering_pg.h pg_states.h ringloop.h timerfd_manager.h
g++ $(CXXFLAGS) -c -o $@ $<
osd_flush.o: osd_flush.cpp blockstore.h cpp-btree/btree_map.h etcd_state_client.h http_client.h json11/json11.hpp messenger.h object_id.h osd.h osd_id.h osd_ops.h osd_peering_pg.h pg_states.h ringloop.h timerfd_manager.h
g++ $(CXXFLAGS) -c -o $@ $<
osd_main.o: osd_main.cpp blockstore.h cpp-btree/btree_map.h etcd_state_client.h http_client.h json11/json11.hpp messenger.h object_id.h osd.h osd_id.h osd_ops.h osd_peering_pg.h pg_states.h ringloop.h timerfd_manager.h
g++ $(CXXFLAGS) -c -o $@ $<
osd_peering.o: osd_peering.cpp base64.h blockstore.h cpp-btree/btree_map.h etcd_state_client.h http_client.h json11/json11.hpp messenger.h object_id.h osd.h osd_id.h osd_ops.h osd_peering_pg.h pg_states.h ringloop.h timerfd_manager.h
g++ $(CXXFLAGS) -c -o $@ $<
osd_peering_pg.o: osd_peering_pg.cpp cpp-btree/btree_map.h object_id.h osd_id.h osd_ops.h osd_peering_pg.h pg_states.h
g++ $(CXXFLAGS) -c -o $@ $<
osd_peering_pg_test.o: osd_peering_pg_test.cpp cpp-btree/btree_map.h object_id.h osd_id.h osd_ops.h osd_peering_pg.h pg_states.h
g++ $(CXXFLAGS) -c -o $@ $<
osd_primary.o: osd_primary.cpp blockstore.h cpp-btree/btree_map.h etcd_state_client.h http_client.h json11/json11.hpp messenger.h object_id.h osd.h osd_id.h osd_ops.h osd_peering_pg.h osd_primary.h osd_rmw.h pg_states.h ringloop.h timerfd_manager.h
g++ $(CXXFLAGS) -c -o $@ $<
osd_primary_subops.o: osd_primary_subops.cpp blockstore.h cpp-btree/btree_map.h etcd_state_client.h http_client.h json11/json11.hpp messenger.h object_id.h osd.h osd_id.h osd_ops.h osd_peering_pg.h osd_primary.h osd_rmw.h pg_states.h ringloop.h timerfd_manager.h
g++ $(CXXFLAGS) -c -o $@ $<
osd_rmw.o: osd_rmw.cpp object_id.h osd_id.h osd_rmw.h xor.h
g++ $(CXXFLAGS) -c -o $@ $<
osd_rmw_test.o: osd_rmw_test.cpp object_id.h osd_id.h osd_rmw.cpp osd_rmw.h test_pattern.h xor.h
g++ $(CXXFLAGS) -c -o $@ $<
osd_secondary.o: osd_secondary.cpp blockstore.h cpp-btree/btree_map.h etcd_state_client.h http_client.h json11/json11.hpp messenger.h object_id.h osd.h osd_id.h osd_ops.h osd_peering_pg.h pg_states.h ringloop.h timerfd_manager.h
g++ $(CXXFLAGS) -c -o $@ $<
osd_test.o: osd_test.cpp object_id.h osd_id.h osd_ops.h rw_blocking.h test_pattern.h
g++ $(CXXFLAGS) -c -o $@ $<
pg_states.o: pg_states.cpp pg_states.h
g++ $(CXXFLAGS) -c -o $@ $<
ringloop.o: ringloop.cpp ringloop.h
g++ $(CXXFLAGS) -c -o $@ $<
rw_blocking.o: rw_blocking.cpp rw_blocking.h
g++ $(CXXFLAGS) -c -o $@ $<
stub_bench.o: stub_bench.cpp object_id.h osd_id.h osd_ops.h rw_blocking.h
g++ $(CXXFLAGS) -c -o $@ $<
stub_osd.o: stub_osd.cpp object_id.h osd_id.h osd_ops.h rw_blocking.h
g++ $(CXXFLAGS) -c -o $@ $<
stub_uring_osd.o: stub_uring_osd.cpp epoll_manager.h json11/json11.hpp messenger.h object_id.h osd_id.h osd_ops.h ringloop.h timerfd_manager.h
g++ $(CXXFLAGS) -c -o $@ $<
test.o: test.cpp allocator.h blockstore.h blockstore_flush.h blockstore_impl.h blockstore_init.h blockstore_journal.h cpp-btree/btree_map.h crc32c.h object_id.h osd_id.h osd_ops.h osd_peering_pg.h pg_states.h ringloop.h
g++ $(CXXFLAGS) -c -o $@ $<
test_allocator.o: test_allocator.cpp allocator.h
g++ $(CXXFLAGS) -c -o $@ $<
test_blockstore.o: test_blockstore.cpp blockstore.h object_id.h ringloop.h timerfd_interval.h
g++ $(CXXFLAGS) -c -o $@ $<
timerfd_interval.o: timerfd_interval.cpp ringloop.h timerfd_interval.h
g++ $(CXXFLAGS) -c -o $@ $<
timerfd_manager.o: timerfd_manager.cpp timerfd_manager.h
g++ $(CXXFLAGS) -c -o $@ $<

View File

@ -1,491 +0,0 @@
## Vitastor
[Read English version](README.md)
## Идея
Я всего лишь хочу сделать качественную блочную SDS!
Vitastor - распределённая блочная SDS, прямой аналог Ceph RBD и внутренних СХД популярных
облачных провайдеров. Однако, в отличие от них, Vitastor быстрый и при этом простой.
Только пока маленький :-).
Архитектурная схожесть с Ceph означает заложенную на уровне алгоритмов записи строгую консистентность,
репликацию через первичный OSD, симметричную кластеризацию без единой точки отказа
и автоматическое распределение данных по любому числу дисков любого размера с настраиваемыми схемами
избыточности - репликацией или с произвольными кодами коррекции ошибок.
## Возможности
Vitastor на данный момент находится в статусе предварительного выпуска, расширенные
возможности пока отсутствуют, а в будущих версиях вероятны "ломающие" изменения.
Однако следующее уже реализовано:
- Базовая часть - надёжное кластерное блочное хранилище без единой точки отказа
- Производительность ;-D
- Несколько схем отказоустойчивости: репликация, XOR n+1 (1 диск чётности), коды коррекции ошибок
Рида-Соломона на основе библиотеки jerasure с любым числом дисков данных и чётности в группе
- Конфигурация через простые человекочитаемые JSON-структуры в etcd
- Автоматическое распределение данных по OSD, с поддержкой:
- Математической оптимизации для лучшей равномерности распределения и минимизации перемещений данных
- Нескольких пулов с разными схемами избыточности
- Дерева распределения, выбора OSD по тегам / классам устройств (только SSD, только HDD) и по поддереву
- Настраиваемых доменов отказа (диск/сервер/стойка и т.п.)
- Восстановление деградированных блоков
- Ребаланс, то есть перемещение данных между OSD (дисками)
- Поддержка "ленивого" fsync (fsync не на каждую операцию)
- Сбор статистики ввода/вывода в etcd
- Клиентская библиотека режима пользователя для ввода/вывода
- Драйвер диска для QEMU (собирается вне дерева исходников QEMU)
- Драйвер диска для утилиты тестирования производительности fio (также собирается вне дерева исходников fio)
- NBD-прокси для монтирования образов ядром ("блочное устройство в режиме пользователя")
- Утилита удаления образов/инодов (vitastor-rm)
- Пакеты для Debian и CentOS
- Статистика операций ввода/вывода и занятого места в разрезе инодов
- Именование инодов через хранение их метаданных в etcd
- Снапшоты и copy-on-write клоны
## Планы разработки
- Более корректные скрипты разметки дисков и автоматического запуска OSD
- Другие инструменты администрирования
- Плагины для OpenStack, Kubernetes, OpenNebula, Proxmox и других облачных систем
- iSCSI-прокси
- Таймауты операций и более быстрое выявление отказов
- Фоновая проверка целостности без контрольных сумм (сверка реплик)
- Контрольные суммы
- Оптимизации для гибридных SSD+HDD хранилищ
- Поддержка RDMA и NVDIMM
- Web-интерфейс
- Возможно, сжатие
- Возможно, поддержка кэширования данных через системный page cache
## Архитектура
Так же, как и в Ceph, в Vitastor:
- Есть пулы (pools), PG, OSD, мониторы, домены отказа, дерево распределения (аналог crush-дерева).
- Образы делятся на блоки фиксированного размера (объекты), и эти объекты распределяются по OSD.
- У OSD есть журнал и метаданные и они тоже могут размещаться на отдельных быстрых дисках.
- Все операции записи тоже транзакционны. В Vitastor, правда, есть режим отложенного/ленивого fsync
(коммита), в котором fsync не вызывается на каждую операцию записи, что делает его более
пригодным для использования на "плохих" (десктопных) SSD. Однако все операции записи
в любом случае атомарны.
- Клиентская библиотека тоже старается ждать восстановления после любого отказа кластера, то есть,
вы тоже можете перезагрузить хоть весь кластер разом, и клиенты только на время зависнут,
но не отключатся.
Некоторые базовые термины для тех, кто не знаком с Ceph:
- OSD (Object Storage Daemon) - процесс, который хранит данные на одном диске и обрабатывает
запросы чтения/записи от клиентов.
- Пул (Pool) - контейнер для данных, имеющих одну и ту же схему избыточности и правила распределения по OSD.
- PG (Placement Group) - группа объектов, хранимых на одном и том же наборе реплик (OSD).
Несколько PG могут храниться на одном и том же наборе реплик, но объекты одной PG
в норме не хранятся на разных наборах OSD.
- Монитор - демон, хранящий состояние кластера.
- Домен отказа (Failure Domain) - группа OSD, которым вы разрешаете "упасть" всем вместе.
Иными словами, это группа OSD, в которые СХД не помещает разные копии одного и того же
блока данных. Например, если домен отказа - сервер, то на двух дисках одного сервера
никогда не окажется 2 и более копий одного и того же блока данных, а значит, даже
если в этом сервере откажут все диски, это будет равносильно потере только 1 копии
любого блока данных.
- Дерево распределения (Placement Tree / CRUSH Tree) - иерархическая группировка OSD
в узлы, которые далее можно использовать как домены отказа. То есть, диск (OSD) входит в
сервер, сервер входит в стойку, стойка входит в ряд, ряд в датацентр и т.п.
Чем Vitastor отличается от Ceph:
- Vitastor в первую очередь сфокусирован на SSD. Также Vitastor, вероятно, должен неплохо работать
с комбинацией SSD и HDD через bcache, а в будущем, возможно, будут добавлены и нативные способы
оптимизации под SSD+HDD. Однако хранилище на основе одних лишь жёстких дисков, вообще без SSD,
не в приоритете, поэтому оптимизации под этот кейс могут вообще не состояться.
- OSD Vitastor однопоточный и всегда таким останется, так как это самый оптимальный способ работы.
Если вам не хватает 1 ядра на 1 диск, просто делите диск на разделы и запускайте на нём несколько OSD.
Но, скорее всего, вам хватит и 1 ядра - Vitastor не так прожорлив к ресурсам CPU, как Ceph.
- Журнал и метаданные всегда размещаются в памяти, благодаря чему никогда не тратится лишнее время
на чтение метаданных с диска. Размер метаданных линейно зависит от размера диска и блока данных,
который задаётся в конфигурации кластера и по умолчанию составляет 128 КБ. С блоком 128 КБ метаданные
занимают примерно 512 МБ памяти на 1 ТБ дискового пространства (и это всё равно меньше, чем нужно Ceph-у).
Журнал вообще не должен быть большим, например, тесты производительности в данном документе проводились
с журналом размером всего 16 МБ. Большой журнал, вероятно, даже вреден, т.к. "грязные" записи (записи,
не сброшенные из журнала) тоже занимают память и могут немного замедлять работу.
- В Vitastor нет внутреннего copy-on-write. Я считаю, что реализация CoW-хранилища гораздо сложнее,
поэтому сложнее добиться устойчиво хороших результатов. Возможно, в один прекрасный день
я придумаю красивый алгоритм для CoW-хранилища, но пока нет - внутреннего CoW в Vitastor не будет.
Всё это не относится к "внешнему" CoW (снапшотам и клонам).
- Базовый слой Vitastor - простое блочное хранилище с блоками фиксированного размера, а не сложное
объектное хранилище с расширенными возможностями, как в Ceph (RADOS).
- В Vitastor есть режим "ленивых fsync", в котором OSD группирует запросы записи перед сбросом их
на диск, что позволяет получить лучшую производительность с дешёвыми настольными SSD без конденсаторов
("Advanced Power Loss Protection" / "Capacitor-Based Power Loss Protection").
Тем не менее, такой режим всё равно медленнее использования нормальных серверных SSD и мгновенного
fsync, так как приводит к дополнительным операциям передачи данных по сети, поэтому рекомендуется
всё-таки использовать хорошие серверные диски, тем более, стоят они почти так же, как десктопные.
- PG эфемерны. Это означает, что они не хранятся на дисках и существуют только в памяти работающих OSD.
- Процессы восстановления оперируют отдельными объектами, а не целыми PG.
- PGLOG-ов нет.
- "Мониторы" не хранят данные. Конфигурация и состояние кластера хранятся в etcd в простых человекочитаемых
JSON-структурах. Мониторы Vitastor только следят за состоянием кластера и управляют перемещением данных.
В этом смысле монитор Vitastor не является критичным компонентом системы и больше похож на Ceph-овский
менеджер (MGR). Монитор Vitastor написан на node.js.
- Распределение PG не основано на консистентных хешах. Вместо этого все маппинги PG хранятся прямо в etcd
(ибо нет никакой проблемы сохранить несколько сотен-тысяч записей в памяти, а не считать каждый раз хеши).
Перераспределение PG по OSD выполняется через математическую оптимизацию,
а конкретно, сведение задачи к ЛП (задаче линейного программирования) и решение оной с помощью утилиты
lp_solve. Такой подход позволяет обычно выравнивать распределение места почти идеально - равномерность
обычно составляет 96-99%, в отличие от Ceph, где на голом CRUSH-е без балансировщика обычно выходит 80-90%.
Также это позволяет минимизировать объём перемещения данных и случайность связей между OSD, а также менять
распределение вручную, не боясь сломать логику перебалансировки. В таком подходе есть и потенциальный
недостаток - есть предположение, что в очень большом кластере он может сломаться - однако вплоть до
нескольких сотен OSD подход точно работает нормально. Ну и, собственно, при необходимости легко
реализовать и консистентные хеши.
- Отдельный слой, подобный слою "CRUSH-правил", отсутствует. Вы настраиваете схемы отказоустойчивости,
домены отказа и правила выбора OSD напрямую в конфигурации пулов.
## Понимание сути производительности систем хранения
Вкратце: для быстрой хранилки задержки важнее, чем пиковые iops-ы.
Лучшая возможная задержка достигается при тестировании в 1 поток с глубиной очереди 1,
что приблизительно означает минимально нагруженное состояние кластера. В данном случае
IOPS = 1/задержка. Ни числом серверов, ни дисков, ни серверных процессов/потоков
задержка не масштабируется... Она зависит только от того, насколько быстро один
серверный процесс (и клиент) обрабатывают одну операцию.
Почему задержки важны? Потому, что некоторые приложения *не могут* использовать глубину
очереди больше 1, ибо их задача не параллелизуется. Важный пример - это все СУБД
с поддержкой консистентности (ACID), потому что все они обеспечивают её через
журналирование, а журналы пишутся последовательно и с fsync() после каждой операции.
fsync, кстати - это ещё одна очень важная вещь, про которую почти всегда забывают в тестах.
Смысл в том, что все современные диски имеют кэши/буферы записи и не гарантируют, что
данные реально физически записываются на носитель до того, как вы делаете fsync(),
который транслируется в команду сброса кэша операционной системой.
Дешёвые SSD для настольных ПК и ноутбуков очень быстрые без fsync - NVMe диски, например,
могут обработать порядка 80000 операций записи в секунду с глубиной очереди 1 без fsync.
Однако с fsync, когда они реально вынуждены писать каждый блок данных во флеш-память,
они выжимают лишь 1000-2000 операций записи в секунду (число практически постоянное
для всех моделей SSD).
Серверные SSD часто имеют суперконденсаторы, работающие как встроенный источник
бесперебойного питания и дающие дискам успеть сбросить их DRAM-кэш в постоянную
флеш-память при отключении питания. Благодаря этому диски с чистой совестью
*игнорируют fsync*, так как точно знают, что данные из кэша доедут до постоянной
памяти.
Все наиболее известные программные СХД, например, Ceph и внутренние СХД, используемые
такими облачными провайдерами, как Amazon, Google, Яндекс, медленные в смысле задержки.
В лучшем случае они дают задержки от 0.3мс на чтение и 0.6мс на запись 4 КБ блоками
даже при условии использования наилучшего возможного железа.
И это в эпоху SSD, когда вы можете пойти на рынок и купить там SSD, задержка которого
на чтение будет 0.1мс, а на запись - 0.04мс, за 100$ или даже дешевле.
Когда мне нужно быстро протестировать производительность дисковой подсистемы, я
использую следующие 6 команд, с небольшими вариациями:
- Линейная запись:
`fio -ioengine=libaio -direct=1 -invalidate=1 -name=test -bs=4M -iodepth=32 -rw=write -runtime=60 -filename=/dev/sdX`
- Линейное чтение:
`fio -ioengine=libaio -direct=1 -invalidate=1 -name=test -bs=4M -iodepth=32 -rw=read -runtime=60 -filename=/dev/sdX`
- Запись в 1 поток (T1Q1):
`fio -ioengine=libaio -direct=1 -invalidate=1 -name=test -bs=4k -iodepth=1 -fsync=1 -rw=randwrite -runtime=60 -filename=/dev/sdX`
- Чтение в 1 поток (T1Q1):
`fio -ioengine=libaio -direct=1 -invalidate=1 -name=test -bs=4k -iodepth=1 -rw=randread -runtime=60 -filename=/dev/sdX`
- Параллельная запись (numjobs используется, когда 1 ядро CPU не может насытить диск):
`fio -ioengine=libaio -direct=1 -invalidate=1 -name=test -bs=4k -iodepth=128 [-numjobs=4 -group_reporting] -rw=randwrite -runtime=60 -filename=/dev/sdX`
- Параллельное чтение (numjobs - аналогично):
`fio -ioengine=libaio -direct=1 -invalidate=1 -name=test -bs=4k -iodepth=128 [-numjobs=4 -group_reporting] -rw=randread -runtime=60 -filename=/dev/sdX`
## Теоретическая максимальная производительность Vitastor
При использовании репликации:
- Задержка чтения в 1 поток (T1Q1): 1 сетевой RTT + 1 чтение с диска.
- Запись+fsync в 1 поток:
- С мгновенным сбросом: 2 RTT + 1 запись.
- С отложенным ("ленивым") сбросом: 4 RTT + 1 запись + 1 fsync.
- Параллельное чтение: сумма IOPS всех дисков либо производительность сети, если в сеть упрётся раньше.
- Параллельная запись: сумма IOPS всех дисков / число реплик / WA либо производительность сети, если в сеть упрётся раньше.
При использовании кодов коррекции ошибок (EC):
- Задержка чтения в 1 поток (T1Q1): 1.5 RTT + 1 чтение.
- Запись+fsync в 1 поток:
- С мгновенным сбросом: 3.5 RTT + 1 чтение + 2 записи.
- С отложенным ("ленивым") сбросом: 5.5 RTT + 1 чтение + 2 записи + 2 fsync.
- Под 0.5 на самом деле подразумевается (k-1)/k, где k - число дисков данных,
что означает, что дополнительное обращение по сети не нужно, когда операция
чтения обслуживается локально.
- Параллельное чтение: сумма IOPS всех дисков либо производительность сети, если в сеть упрётся раньше.
- Параллельная запись: сумма IOPS всех дисков / общее число дисков данных и чётности / WA либо производительность сети, если в сеть упрётся раньше.
Примечание: IOPS дисков в данном случае надо брать в смешанном режиме чтения/записи в пропорции, аналогичной формулам выше.
WA (мультипликатор записи) для 4 КБ блоков в Vitastor обычно составляет 3-5:
1. Запись метаданных в журнал
2. Запись блока данных в журнал
3. Запись метаданных в БД
4. Ещё одна запись метаданных в журнал при использовании EC
5. Запись блока данных на диск данных
Если вы найдёте SSD, хорошо работающий с 512-байтными блоками данных (Optane?),
то 1, 3 и 4 можно снизить до 512 байт (1/8 от размера данных) и получить WA всего 2.375.
Кроме того, WA снижается при использовании отложенного/ленивого сброса при параллельной
нагрузке, т.к. блоки журнала записываются на диск только когда они заполняются или явным
образом запрашивается fsync.
## Пример сравнения с Ceph
Железо - 4 сервера, в каждом:
- 6x SATA SSD Intel D3-4510 3.84 TB
- 2x Xeon Gold 6242 (16 cores @ 2.8 GHz)
- 384 GB RAM
- 1x 25 GbE сетевая карта (Mellanox ConnectX-4 LX), подключённая к свитчу Juniper QFX5200
Экономия энергии CPU отключена. В тестах и Vitastor, и Ceph развёрнуто по 2 OSD на 1 SSD.
Все результаты ниже относятся к случайной нагрузке 4 КБ блоками (если явно не указано обратное).
Производительность голых дисков:
- T1Q1 запись ~27000 iops (задержка ~0.037ms)
- T1Q1 чтение ~9800 iops (задержка ~0.101ms)
- T1Q32 запись ~60000 iops
- T1Q32 чтение ~81700 iops
Ceph 15.2.4 (Bluestore):
- T1Q1 запись ~1000 iops (задержка ~1ms)
- T1Q1 чтение ~1750 iops (задержка ~0.57ms)
- T8Q64 запись ~100000 iops, потребление CPU процессами OSD около 40 ядер на каждом сервере
- T8Q64 чтение ~480000 iops, потребление CPU процессами OSD около 40 ядер на каждом сервере
Тесты в 8 потоков проводились на 8 400GB RBD образах со всех хостов (с каждого хоста запускалось 2 процесса fio).
Это нужно потому, что в Ceph несколько RBD-клиентов, пишущих в 1 образ, очень сильно замедляются.
Настройки RocksDB и Bluestore в Ceph не менялись, единственным изменением было отключение cephx_sign_messages.
На самом деле, результаты теста не такие уж и плохие для Ceph (могло быть хуже).
Собственно говоря, эти серверы как раз хорошо сбалансированы для Ceph - 6 SATA SSD как раз
утилизируют 25-гигабитную сеть, а без 2 мощных процессоров Ceph-у бы не хватило ядер,
чтобы выдать пристойный результат. Собственно, что и показывает жор 40 ядер в процессе
параллельного теста.
Vitastor:
- T1Q1 запись: 7087 iops (задержка 0.14ms)
- T1Q1 чтение: 6838 iops (задержка 0.145ms)
- T2Q64 запись: 162000 iops, потребление CPU - 3 ядра на каждом сервере
- T8Q64 чтение: 895000 iops, потребление CPU - 4 ядра на каждом сервере
- Линейная запись (4M T1Q32): 2800 МБ/с
- Линейное чтение (4M T1Q32): 1500 МБ/с
Тест на чтение в 8 потоков проводился на 1 большом образе (3.2 ТБ) со всех хостов (опять же, по 2 fio с каждого).
В Vitastor никакой разницы между 1 образом и 8-ю нет. Естественно, примерно 1/4 запросов чтения
в такой конфигурации, как и в тестах Ceph выше, обслуживалась с локальной машины. Если проводить
тест так, чтобы все операции всегда обращались к первичным OSD по сети - тест сильнее упирался
в сеть и результат составлял примерно 689000 iops.
Настройки Vitastor: `--disable_data_fsync true --immediate_commit all --flusher_count 8
--disk_alignment 4096 --journal_block_size 4096 --meta_block_size 4096
--journal_no_same_sector_overwrites true --journal_sector_buffer_count 1024
--journal_size 16777216`.
### EC/XOR 2+1
Vitastor:
- T1Q1 запись: 2808 iops (задержка ~0.355ms)
- T1Q1 чтение: 6190 iops (задержка ~0.16ms)
- T2Q64 запись: 85500 iops, потребление CPU - 3.4 ядра на каждом сервере
- T8Q64 чтение: 812000 iops, потребление CPU - 4.7 ядра на каждом сервере
- Линейная запись (4M T1Q32): 3200 МБ/с
- Линейное чтение (4M T1Q32): 1800 МБ/с
Ceph:
- T1Q1 запись: 730 iops (задержка ~1.37ms latency)
- T1Q1 чтение: 1500 iops с холодным кэшем метаданных (задержка ~0.66ms), 2300 iops через 2 минуты прогрева (задержка ~0.435ms)
- T4Q128 запись (4 RBD images): 45300 iops, потребление CPU - 30 ядер на каждом сервере
- T8Q64 чтение (4 RBD images): 278600 iops, потребление CPU - 40 ядер на каждом сервере
- Линейная запись (4M T1Q32): 1950 МБ/с в пустой образ, 2500 МБ/с в заполненный образ
- Линейное чтение (4M T1Q32): 2400 МБ/с
### NBD
NBD - на данный момент единственный способ монтировать Vitastor ядром Linux, но он
приводит к дополнительным копированиям данных, поэтому немного ухудшает производительность,
правда, в основном - линейную, а случайная затрагивается слабо.
NBD расшифровывается как "сетевое блочное устройство", но на самом деле оно также
работает просто как аналог FUSE для блочных устройств, то есть, представляет собой
"блочное устройство в пространстве пользователя".
Vitastor с однопоточной NBD прокси на том же стенде:
- T1Q1 запись: 6000 iops (задержка 0.166ms)
- T1Q1 чтение: 5518 iops (задержка 0.18ms)
- T1Q128 запись: 94400 iops
- T1Q128 чтение: 103000 iops
- Линейная запись (4M T1Q128): 1266 МБ/с (в сравнении с 2800 МБ/с через fio)
- Линейное чтение (4M T1Q128): 975 МБ/с (в сравнении с 1500 МБ/с через fio)
## Установка
### Debian
- Добавьте ключ репозитория Vitastor:
`wget -q -O - https://vitastor.io/debian/pubkey | sudo apt-key add -`
- Добавьте репозиторий Vitastor в /etc/apt/sources.list:
- Debian 11 (Bullseye/Sid): `deb https://vitastor.io/debian bullseye main`
- Debian 10 (Buster): `deb https://vitastor.io/debian buster main`
- Для Debian 10 (Buster) также включите репозиторий backports:
`deb http://deb.debian.org/debian buster-backports main`
- Установите пакеты: `apt update; apt install vitastor lp-solve etcd linux-image-amd64 qemu`
### CentOS
- Добавьте в систему репозиторий Vitastor:
- CentOS 7: `yum install https://vitastor.io/rpms/centos/7/vitastor-release-1.0-1.el7.noarch.rpm`
- CentOS 8: `dnf install https://vitastor.io/rpms/centos/8/vitastor-release-1.0-1.el8.noarch.rpm`
- Включите EPEL: `yum/dnf install epel-release`
- Включите дополнительные репозитории CentOS:
- CentOS 7: `yum install centos-release-scl`
- CentOS 8: `dnf install centos-release-advanced-virtualization`
- Включите elrepo-kernel:
- CentOS 7: `yum install https://www.elrepo.org/elrepo-release-7.el7.elrepo.noarch.rpm`
- CentOS 8: `dnf install https://www.elrepo.org/elrepo-release-8.el8.elrepo.noarch.rpm`
- Установите пакеты: `yum/dnf install vitastor lpsolve etcd kernel-ml qemu-kvm`
### Установка из исходников
- Установите ядро 5.4 или более новое, для поддержки io_uring. Желательно 5.8 или даже новее,
так как в 5.4 есть как минимум 1 известный баг, ведущий к зависанию с io_uring и контроллером HP SmartArray.
- Установите liburing 0.4 или более новый и его заголовки.
- Установите lp_solve.
- Установите etcd. Внимание: вам нужна версия с исправлением отсюда: https://github.com/vitalif/etcd/,
из ветки release-3.4, так как в etcd есть баг, который [будет](https://github.com/etcd-io/etcd/pull/12402)
исправлен только в 3.4.15. Баг приводит к неспособности Vitastor запустить PG, когда их хотя бы 500 штук.
- Установите node.js 10 или новее.
- Установите gcc и g++ 8.x или новее.
- Склонируйте данный репозиторий с подмодулями: `git clone https://yourcmc.ru/git/vitalif/vitastor/`.
- Желательно пересобрать QEMU с патчем, который делает необязательным запуск через LD_PRELOAD.
См `qemu-*.*-vitastor.patch` - выберите версию, наиболее близкую вашей версии QEMU.
- Установите QEMU 3.0 или новее, возьмите исходные коды установленного пакета, начните его пересборку,
через некоторое время остановите её и скопируйте следующие заголовки:
- `<qemu>/include` &rarr; `<vitastor>/qemu/include`
- Debian:
* Берите qemu из основного репозитория
* `<qemu>/b/qemu/config-host.h` &rarr; `<vitastor>/qemu/b/qemu/config-host.h`
* `<qemu>/b/qemu/qapi` &rarr; `<vitastor>/qemu/b/qemu/qapi`
- CentOS 8:
* Берите qemu из репозитория Advanced-Virtualization. Чтобы включить его, запустите
`yum install centos-release-advanced-virtualization.noarch` и далее `yum install qemu`
* `<qemu>/config-host.h` &rarr; `<vitastor>/qemu/b/qemu/config-host.h`
* Для QEMU 3.0+: `<qemu>/qapi` &rarr; `<vitastor>/qemu/b/qemu/qapi`
* Для QEMU 2.0+: `<qemu>/qapi-types.h` &rarr; `<vitastor>/qemu/b/qemu/qapi-types.h`
- `config-host.h` и `qapi` нужны, т.к. в них содержатся автогенерируемые заголовки
- Установите fio 3.7 или новее, возьмите исходники пакета и сделайте на них симлинк с `<vitastor>/fio`.
- Соберите и установите Vitastor командой `mkdir build && cd build && cmake .. && make -j8 && make install`.
Обратите внимание на переменную cmake `QEMU_PLUGINDIR` - под RHEL её нужно установить равной `qemu-kvm`.
## Запуск
Внимание: процедура пока что достаточно нетривиальная, задавать конфигурацию и смещения
на диске нужно почти вручную. Это будет исправлено в ближайшем будущем.
- Желательны SATA SSD или NVMe диски с конденсаторами (серверные SSD). Можно использовать и
десктопные SSD, включив режим отложенного fsync, но производительность однопоточной записи
в этом случае пострадает.
- Быстрая сеть, минимум 10 гбит/с
- Для наилучшей производительности нужно отключить энергосбережение CPU: `cpupower idle-set -D 0 && cpupower frequency-set -g performance`.
- Пропишите нужные вам значения вверху файлов `/usr/lib/vitastor/mon/make-units.sh` и `/usr/lib/vitastor/mon/make-osd.sh`.
- Создайте юниты systemd для etcd и мониторов: `/usr/lib/vitastor/mon/make-units.sh`
- Создайте юниты для OSD: `/usr/lib/vitastor/mon/make-osd.sh /dev/disk/by-partuuid/XXX [/dev/disk/by-partuuid/YYY ...]`
- Вы можете поменять параметры OSD в юнитах systemd. Смысл некоторых параметров:
- `disable_data_fsync 1` - отключает fsync, используется с SSD с конденсаторами.
- `immediate_commit all` - используется с SSD с конденсаторами.
- `disable_device_lock 1` - отключает блокировку файла устройства, нужно, только если вы запускаете
несколько OSD на одном блочном устройстве.
- `flusher_count 256` - "flusher" - микропоток, удаляющий старые данные из журнала.
Не волнуйтесь об этой настройке, 256 теперь достаточно практически всегда.
- `disk_alignment`, `journal_block_size`, `meta_block_size` следует установить равными размеру
внутреннего блока SSD. Это почти всегда 4096.
- `journal_no_same_sector_overwrites true` запрещает перезапись одного и того же сектора журнала подряд
много раз в процессе записи. Большинство (99%) SSD не нуждаются в данной опции. Однако выяснилось, что
диски, используемые на одном из тестовых стендов - Intel D3-S4510 - очень сильно не любят такую
перезапись, и для них была добавлена эта опция. Когда данный режим включён, также нужно поднимать
значение `journal_sector_buffer_count`, так как иначе Vitastor не хватит буферов для записи в журнал.
- Запустите все etcd: `systemctl start etcd`
- Создайте глобальную конфигурацию в etcd: `etcdctl --endpoints=... put /vitastor/config/global '{"immediate_commit":"all"}'`
(если все ваши диски - серверные с конденсаторами).
- Создайте пулы: `etcdctl --endpoints=... put /vitastor/config/pools '{"1":{"name":"testpool","scheme":"replicated","pg_size":2,"pg_minsize":1,"pg_count":256,"failure_domain":"host"}}'`.
Для jerasure EC-пулов конфигурация должна выглядеть так: `2:{"name":"ecpool","scheme":"jerasure","pg_size":4,"parity_chunks":2,"pg_minsize":2,"pg_count":256,"failure_domain":"host"}`.
- Запустите все OSD: `systemctl start vitastor.target`
- Ваш кластер должен быть готов - один из мониторов должен уже сконфигурировать PG, а OSD должны запустить их.
- Вы можете проверить состояние PG прямо в etcd: `etcdctl --endpoints=... get --prefix /vitastor/pg/state`. Все PG должны быть 'active'.
- Пример команды для запуска тестов: `fio -thread -ioengine=libfio_vitastor.so -name=test -bs=4M -direct=1 -iodepth=16 -rw=write -etcd=10.115.0.10:2379/v3 -pool=1 -inode=1 -size=400G`.
- Пример команды для заливки образа ВМ в vitastor через qemu-img:
```
qemu-img convert -f qcow2 debian10.qcow2 -p -O raw 'vitastor:etcd_host=10.115.0.10\:2379/v3:pool=1:inode=1:size=2147483648'
```
Если вы используете немодифицированный QEMU, данной команде потребуется переменная окружения `LD_PRELOAD=/usr/lib/x86_64-linux-gnu/qemu/block-vitastor.so`.
- Пример команды запуска QEMU:
```
qemu-system-x86_64 -enable-kvm -m 1024
-drive 'file=vitastor:etcd_host=10.115.0.10\:2379/v3:pool=1:inode=1:size=2147483648',format=raw,if=none,id=drive-virtio-disk0,cache=none
-device virtio-blk-pci,scsi=off,bus=pci.0,addr=0x5,drive=drive-virtio-disk0,id=virtio-disk0,bootindex=1,write-cache=off,physical_block_size=4096,logical_block_size=512
-vnc 0.0.0.0:0
```
- Пример команды удаления образа (инода) из Vitastor:
```
vitastor-rm --etcd_address 10.115.0.10:2379/v3 --pool 1 --inode 1 --parallel_osds 16 --iodepth 32
```
## Известные проблемы
- Запросы удаления объектов могут в данный момент приводить к "неполным" объектам в EC-пулах,
если в процессе удаления произойдут отказы OSD или серверов, потому что правильная обработка
запросов удаления в кластере должна быть "трёхфазной", а это пока не реализовано. Если вы
столкнётесь с такой ситуацией, просто повторите запрос удаления.
## Принципы реализации
- Я люблю архитектурно простые решения. Vitastor проектируется именно так и я намерен
и далее следовать данному принципу.
- Если вы пришли сюда за идеальным кодом на C++, вы, вероятно, не по адресу. "Общепринятые"
практики написания C++ кода меня не очень волнуют, так как зачастую, опять-таки, ведут к
излишним усложнениям и код получается красивый... но медленный.
- По той же причине в коде иногда можно встретить велосипеды типа собственного упрощённого
HTTP-клиента для работы с etcd. Зато эти велосипеды маленькие и компактные и не требуют
использования десятка внешних библиотек.
- node.js для монитора - не случайный выбор. Он очень быстрый, имеет встроенную событийную
машину, приятный нейтральный C-подобный язык программирования и развитую инфраструктуру.
## Автор и лицензия
Автор: Виталий Филиппов (vitalif [at] yourcmc.ru), 2019+
Заходите в Telegram-чат Vitastor: https://t.me/vitastor
Лицензия: VNPL 1.1 на серверный код и двойная VNPL 1.1 + GPL 2.0+ на клиентский.
VNPL - "сетевой копилефт", собственная свободная копилефт-лицензия
Vitastor Network Public License 1.1, основанная на GNU GPL 3.0 с дополнительным
условием "Сетевого взаимодействия", требующим распространять все программы,
специально разработанные для использования вместе с Vitastor и взаимодействующие
с ним по сети, под лицензией VNPL или под любой другой свободной лицензией.
Идея VNPL - расширение действия копилефта не только на модули, явным образом
связываемые с кодом Vitastor, но также на модули, оформленные в виде микросервисов
и взаимодействующие с ним по сети.
Таким образом, если вы хотите построить на основе Vitastor сервис, содержаший
компоненты с закрытым кодом, взаимодействующие с Vitastor, вам нужна коммерческая
лицензия от автора 😀.
На Windows и любое другое ПО, не разработанное *специально* для использования
вместе с Vitastor, никакие ограничения не накладываются.
Клиентские библиотеки распространяются на условиях двойной лицензии VNPL 1.0
и также на условиях GNU GPL 2.0 или более поздней версии. Так сделано в целях
совместимости с таким ПО, как QEMU и fio.
Вы можете найти полный текст VNPL 1.1 в файле [VNPL-1.1.txt](VNPL-1.1.txt),
а GPL 2.0 в файле [GPL-2.0.txt](GPL-2.0.txt).

449
README.md
View File

@ -1,449 +0,0 @@
## Vitastor
[Читать на русском](README-ru.md)
## The Idea
Make Software-Defined Block Storage Great Again.
Vitastor is a small, simple and fast clustered block storage (storage for VM drives),
architecturally similar to Ceph which means strong consistency, primary-replication, symmetric
clustering and automatic data distribution over any number of drives of any size
with configurable redundancy (replication or erasure codes/XOR).
## Features
Vitastor is currently a pre-release, a lot of features are missing and you can still expect
breaking changes in the future. However, the following is implemented:
- Basic part: highly-available block storage with symmetric clustering and no SPOF
- Performance ;-D
- Multiple redundancy schemes: Replication, XOR n+1, Reed-Solomon erasure codes
based on jerasure library with any number of data and parity drives in a group
- Configuration via simple JSON data structures in etcd
- Automatic data distribution over OSDs, with support for:
- Mathematical optimization for better uniformity and less data movement
- Multiple pools
- Placement tree, OSD selection by tags (device classes) and placement root
- Configurable failure domains
- Recovery of degraded blocks
- Rebalancing (data movement between OSDs)
- Lazy fsync support
- I/O statistics reporting to etcd
- Generic user-space client library
- QEMU driver (built out-of-tree)
- Loadable fio engine for benchmarks (also built out-of-tree)
- NBD proxy for kernel mounts
- Inode removal tool (vitastor-rm)
- Packaging for Debian and CentOS
- Per-inode I/O and space usage statistics
- Inode metadata storage in etcd
- Snapshots and copy-on-write image clones
## Roadmap
- Better OSD creation and auto-start tools
- Other administrative tools
- Plugins for OpenStack, Kubernetes, OpenNebula, Proxmox and other cloud systems
- iSCSI proxy
- Operation timeouts and better failure detection
- Scrubbing without checksums (verification of replicas)
- Checksums
- SSD+HDD optimizations, possibly including tiered storage and soft journal flushes
- RDMA and NVDIMM support
- Web GUI
- Compression (possibly)
- Read caching using system page cache (possibly)
## Architecture
Similarities:
- Just like Ceph, Vitastor has Pools, PGs, OSDs, Monitors, Failure Domains, Placement Tree.
- Just like Ceph, Vitastor is transactional (even though there's a "lazy fsync mode" which
doesn't implicitly flush every operation to disks).
- OSDs also have journal and metadata and they can also be put on separate drives.
- Just like in Ceph, client library attempts to recover from any cluster failure so
you can basically reboot the whole cluster and only pause, but not crash, your clients
(I consider this a bug if the client crashes in that case).
Some basic terms for people not familiar with Ceph:
- OSD (Object Storage Daemon) is a process that stores data and serves read/write requests.
- PG (Placement Group) is a container for data that (normally) shares the same replicas.
- Pool is a container for data that has the same redundancy scheme and placement rules.
- Monitor is a separate daemon that watches cluster state and handles failures.
- Failure Domain is a group of OSDs that you allow to fail. It's "host" by default.
- Placement Tree groups OSDs in a hierarchy to later split them into Failure Domains.
Architectural differences from Ceph:
- Vitastor's primary focus is on SSDs. Proper SSD+HDD optimizations may be added in the future, though.
- Vitastor OSD is (and will always be) single-threaded. If you want to dedicate more than 1 core
per drive you should run multiple OSDs each on a different partition of the drive.
Vitastor isn't CPU-hungry though (as opposed to Ceph), so 1 core is sufficient in a lot of cases.
- Metadata and journal are always kept in memory. Metadata size depends linearly on drive capacity
and data store block size which is 128 KB by default. With 128 KB blocks metadata should occupy
around 512 MB per 1 TB (which is still less than Ceph wants). Journal doesn't have to be big,
the example test below was conducted with only 16 MB journal. A big journal is probably even
harmful as dirty write metadata also take some memory.
- Vitastor storage layer doesn't have internal copy-on-write or redirect-write. I know that maybe
it's possible to create a good copy-on-write storage, but it's much harder and makes performance
less deterministic, so CoW isn't used in Vitastor.
- The basic layer of Vitastor is block storage with fixed-size blocks, not object storage with
rich semantics like in Ceph (RADOS).
- There's a "lazy fsync" mode which allows to batch writes before flushing them to the disk.
This allows to use Vitastor with desktop SSDs, but still lowers performance due to additional
network roundtrips, so use server SSDs with capacitor-based power loss protection
("Advanced Power Loss Protection") for best performance.
- PGs are ephemeral. This means that they aren't stored on data disks and only exist in memory
while OSDs are running.
- Recovery process is per-object (per-block), not per-PG. Also there are no PGLOGs.
- Monitors don't store data. Cluster configuration and state is stored in etcd in simple human-readable
JSON structures. Monitors only watch cluster state and handle data movement.
Thus Vitastor's Monitor isn't a critical component of the system and is more similar to Ceph's Manager.
Vitastor's Monitor is implemented in node.js.
- PG distribution isn't based on consistent hashes. All PG mappings are stored in etcd.
Rebalancing PGs between OSDs is done by mathematical optimization - data distribution problem
is reduced to a linear programming problem and solved by lp_solve. This allows for almost
perfect (96-99% uniformity compared to Ceph's 80-90%) data distribution in most cases, ability
to map PGs by hand without breaking rebalancing logic, reduced OSD peer-to-peer communication
(on average, OSDs have fewer peers) and less data movement. It also probably has a drawback -
this method may fail in very large clusters, but up to several hundreds of OSDs it's perfectly fine.
It's also easy to add consistent hashes in the future if something proves their necessity.
- There's no separate CRUSH layer. You select pool redundancy scheme, placement root, failure domain
and so on directly in pool configuration.
## Understanding Storage Performance
The most important thing for fast storage is latency, not parallel iops.
The best possible latency is achieved with one thread and queue depth of 1 which basically means
"client load as low as possible". In this case IOPS = 1/latency, and this number doesn't
scale with number of servers, drives, server processes or threads and so on.
Single-threaded IOPS and latency numbers only depend on *how fast a single daemon is*.
Why is it important? It's important because some of the applications *can't* use
queue depth greater than 1 because their task isn't parallelizable. A notable example
is any ACID DBMS because all of them write their WALs sequentially with fsync()s.
fsync, by the way, is another important thing often missing in benchmarks. The point is
that drives have cache buffers and don't guarantee that your data is actually persisted
until you call fsync() which is translated to a FLUSH CACHE command by the OS.
Desktop SSDs are very fast without fsync - NVMes, for example, can process ~80000 write
operations per second with queue depth of 1 without fsync - but they're really slow with
fsync because they have to actually write data to flash chips when you call fsync. Typical
number is around 1000-2000 iops with fsync.
Server SSDs often have supercapacitors that act as a built-in UPS and allow the drive
to flush its DRAM cache to the persistent flash storage when a power loss occurs.
This makes them perform equally well with and without fsync. This feature is called
"Advanced Power Loss Protection" by Intel; other vendors either call it similarly
or directly as "Full Capacitor-Based Power Loss Protection".
All software-defined storages that I currently know are slow in terms of latency.
Notable examples are Ceph and internal SDSes used by cloud providers like Amazon, Google,
Yandex and so on. They're all slow and can only reach ~0.3ms read and ~0.6ms 4 KB write latency
with best-in-slot hardware.
And that's in the SSD era when you can buy an SSD that has ~0.04ms latency for 100 $.
I use the following 6 commands with small variations to benchmark any storage:
- Linear write:
`fio -ioengine=libaio -direct=1 -invalidate=1 -name=test -bs=4M -iodepth=32 -rw=write -runtime=60 -filename=/dev/sdX`
- Linear read:
`fio -ioengine=libaio -direct=1 -invalidate=1 -name=test -bs=4M -iodepth=32 -rw=read -runtime=60 -filename=/dev/sdX`
- Random write latency (T1Q1, this hurts storages the most):
`fio -ioengine=libaio -direct=1 -invalidate=1 -name=test -bs=4k -iodepth=1 -fsync=1 -rw=randwrite -runtime=60 -filename=/dev/sdX`
- Random read latency (T1Q1):
`fio -ioengine=libaio -direct=1 -invalidate=1 -name=test -bs=4k -iodepth=1 -rw=randread -runtime=60 -filename=/dev/sdX`
- Parallel write iops (use numjobs if a single CPU core is insufficient to saturate the load):
`fio -ioengine=libaio -direct=1 -invalidate=1 -name=test -bs=4k -iodepth=128 [-numjobs=4 -group_reporting] -rw=randwrite -runtime=60 -filename=/dev/sdX`
- Parallel read iops (use numjobs if a single CPU core is insufficient to saturate the load):
`fio -ioengine=libaio -direct=1 -invalidate=1 -name=test -bs=4k -iodepth=128 [-numjobs=4 -group_reporting] -rw=randread -runtime=60 -filename=/dev/sdX`
## Vitastor's Theoretical Maximum Random Access Performance
Replicated setups:
- Single-threaded (T1Q1) read latency: 1 network roundtrip + 1 disk read.
- Single-threaded write+fsync latency:
- With immediate commit: 2 network roundtrips + 1 disk write.
- With lazy commit: 4 network roundtrips + 1 disk write + 1 disk flush.
- Saturated parallel read iops: min(network bandwidth, sum(disk read iops)).
- Saturated parallel write iops: min(network bandwidth, sum(disk write iops / number of replicas / write amplification)).
EC/XOR setups:
- Single-threaded (T1Q1) read latency: 1.5 network roundtrips + 1 disk read.
- Single-threaded write+fsync latency:
- With immediate commit: 3.5 network roundtrips + 1 disk read + 2 disk writes.
- With lazy commit: 5.5 network roundtrips + 1 disk read + 2 disk writes + 2 disk fsyncs.
- 0.5 in actually (k-1)/k which means that an additional roundtrip doesn't happen when
the read sub-operation can be served locally.
- Saturated parallel read iops: min(network bandwidth, sum(disk read iops)).
- Saturated parallel write iops: min(network bandwidth, sum(disk write iops * number of data drives / (number of data + parity drives) / write amplification)).
In fact, you should put disk write iops under the condition of ~10% reads / ~90% writes in this formula.
Write amplification for 4 KB blocks is usually 3-5 in Vitastor:
1. Journal block write
2. Journal data write
3. Metadata block write
4. Another journal block write for EC/XOR setups
5. Data block write
If you manage to get an SSD which handles 512 byte blocks well (Optane?) you may
lower 1, 3 and 4 to 512 bytes (1/8 of data size) and get WA as low as 2.375.
Lazy fsync also reduces WA for parallel workloads because journal blocks are only
written when they fill up or fsync is requested.
## Example Comparison with Ceph
Hardware configuration: 4 nodes, each with:
- 6x SATA SSD Intel D3-4510 3.84 TB
- 2x Xeon Gold 6242 (16 cores @ 2.8 GHz)
- 384 GB RAM
- 1x 25 GbE network interface (Mellanox ConnectX-4 LX), connected to a Juniper QFX5200 switch
CPU powersaving was disabled. Both Vitastor and Ceph were configured with 2 OSDs per 1 SSD.
All of the results below apply to 4 KB blocks and random access (unless indicated otherwise).
Raw drive performance:
- T1Q1 write ~27000 iops (~0.037ms latency)
- T1Q1 read ~9800 iops (~0.101ms latency)
- T1Q32 write ~60000 iops
- T1Q32 read ~81700 iops
Ceph 15.2.4 (Bluestore):
- T1Q1 write ~1000 iops (~1ms latency)
- T1Q1 read ~1750 iops (~0.57ms latency)
- T8Q64 write ~100000 iops, total CPU usage by OSDs about 40 virtual cores on each node
- T8Q64 read ~480000 iops, total CPU usage by OSDs about 40 virtual cores on each node
T8Q64 tests were conducted over 8 400GB RBD images from all hosts (every host was running 2 instances of fio).
This is because Ceph has performance penalties related to running multiple clients over a single RBD image.
cephx_sign_messages was set to false during tests, RocksDB and Bluestore settings were left at defaults.
In fact, not that bad for Ceph. These servers are an example of well-balanced Ceph nodes.
However, CPU usage and I/O latency were through the roof, as usual.
Vitastor:
- T1Q1 write: 7087 iops (0.14ms latency)
- T1Q1 read: 6838 iops (0.145ms latency)
- T2Q64 write: 162000 iops, total CPU usage by OSDs about 3 virtual cores on each node
- T8Q64 read: 895000 iops, total CPU usage by OSDs about 4 virtual cores on each node
- Linear write (4M T1Q32): 2800 MB/s
- Linear read (4M T1Q32): 1500 MB/s
T8Q64 read test was conducted over 1 larger inode (3.2T) from all hosts (every host was running 2 instances of fio).
Vitastor has no performance penalties related to running multiple clients over a single inode.
If conducted from one node with all primary OSDs moved to other nodes the result was slightly lower (689000 iops),
this is because all operations resulted in network roundtrips between the client and the primary OSD.
When fio was colocated with OSDs (like in Ceph benchmarks above), 1/4 of the read workload actually
used the loopback network.
Vitastor was configured with: `--disable_data_fsync true --immediate_commit all --flusher_count 8
--disk_alignment 4096 --journal_block_size 4096 --meta_block_size 4096
--journal_no_same_sector_overwrites true --journal_sector_buffer_count 1024
--journal_size 16777216`.
### EC/XOR 2+1
Vitastor:
- T1Q1 write: 2808 iops (~0.355ms latency)
- T1Q1 read: 6190 iops (~0.16ms latency)
- T2Q64 write: 85500 iops, total CPU usage by OSDs about 3.4 virtual cores on each node
- T8Q64 read: 812000 iops, total CPU usage by OSDs about 4.7 virtual cores on each node
- Linear write (4M T1Q32): 3200 MB/s
- Linear read (4M T1Q32): 1800 MB/s
Ceph:
- T1Q1 write: 730 iops (~1.37ms latency)
- T1Q1 read: 1500 iops with cold cache (~0.66ms latency), 2300 iops after 2 minute metadata cache warmup (~0.435ms latency)
- T4Q128 write (4 RBD images): 45300 iops, total CPU usage by OSDs about 30 virtual cores on each node
- T8Q64 read (4 RBD images): 278600 iops, total CPU usage by OSDs about 40 virtual cores on each node
- Linear write (4M T1Q32): 1950 MB/s before preallocation, 2500 MB/s after preallocation
- Linear read (4M T1Q32): 2400 MB/s
### NBD
NBD is currently required to mount Vitastor via kernel, but it imposes additional overhead
due to additional copying between the kernel and userspace. This mostly hurts linear
bandwidth, not iops.
Vitastor with single-thread NBD on the same hardware:
- T1Q1 write: 6000 iops (0.166ms latency)
- T1Q1 read: 5518 iops (0.18ms latency)
- T1Q128 write: 94400 iops
- T1Q128 read: 103000 iops
- Linear write (4M T1Q128): 1266 MB/s (compared to 2800 MB/s via fio)
- Linear read (4M T1Q128): 975 MB/s (compared to 1500 MB/s via fio)
## Installation
### Debian
- Trust Vitastor package signing key:
`wget -q -O - https://vitastor.io/debian/pubkey | sudo apt-key add -`
- Add Vitastor package repository to your /etc/apt/sources.list:
- Debian 11 (Bullseye/Sid): `deb https://vitastor.io/debian bullseye main`
- Debian 10 (Buster): `deb https://vitastor.io/debian buster main`
- For Debian 10 (Buster) also enable backports repository:
`deb http://deb.debian.org/debian buster-backports main`
- Install packages: `apt update; apt install vitastor lp-solve etcd linux-image-amd64 qemu`
### CentOS
- Add Vitastor package repository:
- CentOS 7: `yum install https://vitastor.io/rpms/centos/7/vitastor-release-1.0-1.el7.noarch.rpm`
- CentOS 8: `dnf install https://vitastor.io/rpms/centos/8/vitastor-release-1.0-1.el8.noarch.rpm`
- Enable EPEL: `yum/dnf install epel-release`
- Enable additional CentOS repositories:
- CentOS 7: `yum install centos-release-scl`
- CentOS 8: `dnf install centos-release-advanced-virtualization`
- Enable elrepo-kernel:
- CentOS 7: `yum install https://www.elrepo.org/elrepo-release-7.el7.elrepo.noarch.rpm`
- CentOS 8: `dnf install https://www.elrepo.org/elrepo-release-8.el8.elrepo.noarch.rpm`
- Install packages: `yum/dnf install vitastor lpsolve etcd kernel-ml qemu-kvm`
### Building from Source
- Install Linux kernel 5.4 or newer, for io_uring support. 5.8 or later is highly recommended because
there is at least one known io_uring hang with 5.4 and an HP SmartArray controller.
- Install liburing 0.4 or newer and its headers.
- Install lp_solve.
- Install etcd. Attention: you need a fixed version from here: https://github.com/vitalif/etcd/,
branch release-3.4, because there is a bug in upstream etcd which makes Vitastor OSDs fail to
move PGs out of "starting" state if you have at least around ~500 PGs or so. The custom build
will be unnecessary when etcd merges the fix: https://github.com/etcd-io/etcd/pull/12402.
- Install node.js 10 or newer.
- Install gcc and g++ 8.x or newer.
- Clone https://yourcmc.ru/git/vitalif/vitastor/ with submodules.
- Install QEMU 3.0+, get its source, begin to build it, stop the build and copy headers:
- `<qemu>/include` &rarr; `<vitastor>/qemu/include`
- Debian:
* Use qemu packages from the main repository
* `<qemu>/b/qemu/config-host.h` &rarr; `<vitastor>/qemu/b/qemu/config-host.h`
* `<qemu>/b/qemu/qapi` &rarr; `<vitastor>/qemu/b/qemu/qapi`
- CentOS 8:
* Use qemu packages from the Advanced-Virtualization repository. To enable it, run
`yum install centos-release-advanced-virtualization.noarch` and then `yum install qemu`
* `<qemu>/config-host.h` &rarr; `<vitastor>/qemu/b/qemu/config-host.h`
* For QEMU 3.0+: `<qemu>/qapi` &rarr; `<vitastor>/qemu/b/qemu/qapi`
* For QEMU 2.0+: `<qemu>/qapi-types.h` &rarr; `<vitastor>/qemu/b/qemu/qapi-types.h`
- `config-host.h` and `qapi` are required because they contain generated headers
- You can also rebuild QEMU with a patch that makes LD_PRELOAD unnecessary to load vitastor driver.
See `qemu-*.*-vitastor.patch`.
- Install fio 3.7 or later, get its source and symlink it into `<vitastor>/fio`.
- Build & install Vitastor with `mkdir build && cd build && cmake .. && make -j8 && make install`.
Pay attention to the `QEMU_PLUGINDIR` cmake option - it must be set to `qemu-kvm` on RHEL.
## Running
Please note that startup procedure isn't currently simple - you specify configuration
and calculate disk offsets almost by hand. This will be fixed in near future.
- Get some SATA or NVMe SSDs with capacitors (server-grade drives). You can use desktop SSDs
with lazy fsync, but prepare for inferior single-thread latency.
- Get a fast network (at least 10 Gbit/s).
- Disable CPU powersaving: `cpupower idle-set -D 0 && cpupower frequency-set -g performance`.
- Check `/usr/lib/vitastor/mon/make-units.sh` and `/usr/lib/vitastor/mon/make-osd.sh` and
put desired values into the variables at the top of these files.
- Create systemd units for the monitor and etcd: `/usr/lib/vitastor/mon/make-units.sh`
- Create systemd units for your OSDs: `/usr/lib/vitastor/mon/make-osd.sh /dev/disk/by-partuuid/XXX [/dev/disk/by-partuuid/YYY ...]`
- You can edit the units and change OSD configuration. Notable configuration variables:
- `disable_data_fsync 1` - only safe with server-grade drives with capacitors.
- `immediate_commit all` - use this if all your drives are server-grade.
- `disable_device_lock 1` - only required if you run multiple OSDs on one block device.
- `flusher_count 256` - flusher is a micro-thread that removes old data from the journal.
You don't have to worry about this parameter anymore, 256 is enough.
- `disk_alignment`, `journal_block_size`, `meta_block_size` should be set to the internal
block size of your SSDs which is 4096 on most drives.
- `journal_no_same_sector_overwrites true` prevents multiple overwrites of the same journal sector.
Most (99%) SSDs don't need this option. But Intel D3-4510 does because it doesn't like when you
overwrite the same sector twice in a short period of time. The setting forces Vitastor to never
overwrite the same journal sector twice in a row which makes D3-4510 almost happy. Not totally
happy, because overwrites of the same block can still happen in the metadata area... When this
setting is set, it is also required to raise `journal_sector_buffer_count` setting, which is the
number of dirty journal sectors that may be written to at the same time.
- `systemctl start vitastor.target` everywhere.
- Create global configuration in etcd: `etcdctl --endpoints=... put /vitastor/config/global '{"immediate_commit":"all"}'`
(if all your drives have capacitors).
- Create pool configuration in etcd: `etcdctl --endpoints=... put /vitastor/config/pools '{"1":{"name":"testpool","scheme":"replicated","pg_size":2,"pg_minsize":1,"pg_count":256,"failure_domain":"host"}}'`.
For jerasure pools the configuration should look like the following: `2:{"name":"ecpool","scheme":"jerasure","pg_size":4,"parity_chunks":2,"pg_minsize":2,"pg_count":256,"failure_domain":"host"}`.
- At this point, one of the monitors will configure PGs and OSDs will start them.
- You can check PG states with `etcdctl --endpoints=... get --prefix /vitastor/pg/state`. All PGs should become 'active'.
- Run tests with (for example): `fio -thread -ioengine=libfio_vitastor.so -name=test -bs=4M -direct=1 -iodepth=16 -rw=write -etcd=10.115.0.10:2379/v3 -pool=1 -inode=1 -size=400G`.
- Upload VM disk image with qemu-img (for example):
```
qemu-img convert -f qcow2 debian10.qcow2 -p -O raw 'vitastor:etcd_host=10.115.0.10\:2379/v3:pool=1:inode=1:size=2147483648'
```
Note that the command requires to be run with `LD_PRELOAD=/usr/lib/x86_64-linux-gnu/qemu/block-vitastor.so qemu-img ...`
if you use unmodified QEMU.
- Run QEMU with (for example):
```
qemu-system-x86_64 -enable-kvm -m 1024
-drive 'file=vitastor:etcd_host=10.115.0.10\:2379/v3:pool=1:inode=1:size=2147483648',format=raw,if=none,id=drive-virtio-disk0,cache=none
-device virtio-blk-pci,scsi=off,bus=pci.0,addr=0x5,drive=drive-virtio-disk0,id=virtio-disk0,bootindex=1,write-cache=off,physical_block_size=4096,logical_block_size=512
-vnc 0.0.0.0:0
```
- Remove inode with (for example):
```
vitastor-rm --etcd_address 10.115.0.10:2379/v3 --pool 1 --inode 1 --parallel_osds 16 --iodepth 32
```
## Known Problems
- Object deletion requests may currently lead to 'incomplete' objects in EC pools
if your OSDs crash during deletion because proper handling of object cleanup
in a cluster should be "three-phase" and it's currently not implemented.
Just repeat the removal request again in this case.
## Implementation Principles
- I like architecturally simple solutions. Vitastor is and will always be designed
exactly like that.
- I also like reinventing the wheel to some extent, like writing my own HTTP client
for etcd interaction instead of using prebuilt libraries, because in this case
I'm confident about what my code does and what it doesn't do.
- I don't care about C++ "best practices" like RAII or proper inheritance or usage of
smart pointers or whatever and I don't intend to change my mind, so if you're here
looking for ideal reference C++ code, this probably isn't the right place.
- I like node.js better than any other dynamically-typed language interpreter
because it's faster than any other interpreter in the world, has neutral C-like
syntax and built-in event loop. That's why Monitor is implemented in node.js.
## Author and License
Copyright (c) Vitaliy Filippov (vitalif [at] yourcmc.ru), 2019+
Join Vitastor Telegram Chat: https://t.me/vitastor
All server-side code (OSD, Monitor and so on) is licensed under the terms of
Vitastor Network Public License 1.1 (VNPL 1.1), a copyleft license based on
GNU GPLv3.0 with the additional "Network Interaction" clause which requires
opensourcing all programs directly or indirectly interacting with Vitastor
through a computer network and expressly designed to be used in conjunction
with it ("Proxy Programs"). Proxy Programs may be made public not only under
the terms of the same license, but also under the terms of any GPL-Compatible
Free Software License, as listed by the Free Software Foundation.
This is a stricter copyleft license than the Affero GPL.
Please note that VNPL doesn't require you to open the code of proprietary
software running inside a VM if it's not specially designed to be used with
Vitastor.
Basically, you can't use the software in a proprietary environment to provide
its functionality to users without opensourcing all intermediary components
standing between the user and Vitastor or purchasing a commercial license
from the author 😀.
Client libraries (cluster_client and so on) are dual-licensed under the same
VNPL 1.1 and also GNU GPL 2.0 or later to allow for compatibility with GPLed
software like QEMU and fio.
You can find the full text of VNPL-1.1 in the file [VNPL-1.1.txt](VNPL-1.1.txt).
GPL 2.0 is also included in this repository as [GPL-2.0.txt](GPL-2.0.txt).

View File

@ -1,648 +0,0 @@
VITASTOR NETWORK PUBLIC LICENSE
Version 1.1, 6 February 2021
Copyright (C) 2021 Vitaliy Filippov <vitalif@yourcmc.ru>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The Vitastor Network Public License is a free, copyleft license for
software and other kinds of works, specifically designed to ensure
cooperation with the community in the case of network server software.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
GNU General Public Licenses and Vitastor Network Public License are
intended to guarantee your freedom to share and change all versions
of a program--to make sure it remains free software for all its users.
When we speak of free software, we are referring to freedom, not
price. GNU General Public Licenses and Vitastor Network Public License
are designed to make sure that you have the freedom to distribute copies
of free software (and charge for them if you wish), that you receive
source code or can get it if you want it, that you can change the software
or use pieces of it in new free programs, and that you know you can do these
things.
Developers that use GNU General Public Licenses and Vitastor
Network Public License protect your rights with two steps:
(1) assert copyright on the software, and (2) offer
you this License which gives you legal permission to copy, distribute
and/or modify the software.
A secondary benefit of defending all users' freedom is that
improvements made in alternate versions of the program, if they
receive widespread use, become available for other developers to
incorporate. Many developers of free software are heartened and
encouraged by the resulting cooperation. However, in the case of
software used on network servers, this result may fail to come about.
The GNU General Public License permits making a modified version and
letting the public access it on a server without ever releasing its
source code to the public. Even the GNU Affero General Public License
permits running a modified version in a closed environment where
public users only interact with it through a closed-source proxy, again,
without making the program and the proxy available to the public
for free.
The Vitastor Network Public License is designed specifically to
ensure that, in such cases, the modified program and the proxy stays
available to the community. It requires the operator of a network server to
provide the source code of the original program and all other programs
communicating with it running there to the users of that server.
Therefore, public use of a modified version, on a server accessible
directly or indirectly to the public, gives the public access to the source
code of the modified version.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 1 of the Vitastor Network Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Remote Network Interaction.
A "Proxy Program" means a separate program which is specially designed to
be used in conjunction with the covered work and interacts with it directly
or indirectly through any kind of API (application programming interfaces),
a computer network, an imitation of such network, or another Proxy Program
itself.
Notwithstanding any other provision of this License, if you provide any user
with an opportunity to interact with the covered work through a computer
network, an imitation of such network, or any number of "Proxy Programs",
you must prominently offer that user an opportunity to receive the
Corresponding Source of the covered work and all Proxy Programs from a
network server at no charge, through some standard or customary means of
facilitating copying of software. The Corresponding Source for the covered
work must be made available under the conditions of this License, and
the Corresponding Source for all Proxy Programs must be made available
under the conditions of either this License or any GPL-Compatible
Free Software License, as described by the Free Software Foundation
in their "GPL-Compatible License List".
14. Revised Versions of this License.
Vitastor Author may publish revised and/or new versions of
the Vitastor Network Public License from time to time. Such new versions
will be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the Vitastor Network
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version. If the Program does not specify a version
number of the Vitastor Network Public License, you may choose any version
ever published.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the Vitastor Network Public License as published by
the Vitastor Author, either version 1 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Vitastor Network Public License for more details.
Also add information on how to contact you by electronic and paper mail.
If your software can interact with users remotely through a computer
network, you should also make sure that it provides a way for users to
get its source. For example, if your program is a web application, its
interface could display a "Source" link that leads users to an archive
of the code. There are many ways you could offer source, and different
solutions will be better for different programs; see section 13 for the
specific requirements.

View File

@ -1,6 +1,3 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
#include <stdexcept> #include <stdexcept>
#include "allocator.h" #include "allocator.h"
@ -13,19 +10,19 @@ allocator::allocator(uint64_t blocks)
{ {
throw std::invalid_argument("blocks"); throw std::invalid_argument("blocks");
} }
uint64_t p2 = 1; uint64_t p2 = 1, total = 1;
total = 0;
while (p2 * 64 < blocks) while (p2 * 64 < blocks)
{ {
total += p2;
p2 = p2 * 64; p2 = p2 * 64;
total += p2;
} }
total -= p2;
total += (blocks+63) / 64; total += (blocks+63) / 64;
mask = new uint64_t[total]; mask = new uint64_t[2 + total];
size = free = blocks; size = free = blocks;
last_one_mask = (blocks % 64) == 0 last_one_mask = (blocks % 64) == 0
? UINT64_MAX ? UINT64_MAX
: ((1l << (blocks % 64)) - 1); : ~(UINT64_MAX << (64 - blocks % 64));
for (uint64_t i = 0; i < total; i++) for (uint64_t i = 0; i < total; i++)
{ {
mask[i] = 0; mask[i] = 0;
@ -99,10 +96,6 @@ uint64_t allocator::find_free()
uint64_t p2 = 1, offset = 0, addr = 0, f, i; uint64_t p2 = 1, offset = 0, addr = 0, f, i;
while (p2 < size) while (p2 < size)
{ {
if (offset+addr >= total)
{
return UINT64_MAX;
}
uint64_t m = mask[offset + addr]; uint64_t m = mask[offset + addr];
for (i = 0, f = 1; i < 64; i++, f <<= 1) for (i = 0, f = 1; i < 64; i++, f <<= 1)
{ {
@ -117,6 +110,11 @@ uint64_t allocator::find_free()
return UINT64_MAX; return UINT64_MAX;
} }
addr = (addr * 64) | i; addr = (addr * 64) | i;
if (addr >= size)
{
// No space
return UINT64_MAX;
}
offset += p2; offset += p2;
p2 = p2 * 64; p2 = p2 * 64;
} }
@ -127,35 +125,3 @@ uint64_t allocator::get_free_count()
{ {
return free; return free;
} }
void bitmap_set(void *bitmap, uint64_t start, uint64_t len, uint64_t bitmap_granularity)
{
if (start == 0)
{
if (len == 32*bitmap_granularity)
{
*((uint32_t*)bitmap) = UINT32_MAX;
return;
}
else if (len == 64*bitmap_granularity)
{
*((uint64_t*)bitmap) = UINT64_MAX;
return;
}
}
unsigned bit_start = start / bitmap_granularity;
unsigned bit_end = ((start + len) + bitmap_granularity - 1) / bitmap_granularity;
while (bit_start < bit_end)
{
if (!(bit_start & 7) && bit_end >= bit_start+8)
{
((uint8_t*)bitmap)[bit_start / 8] = UINT8_MAX;
bit_start += 8;
}
else
{
((uint8_t*)bitmap)[bit_start / 8] |= 1 << (bit_start % 8);
bit_start++;
}
}
}

View File

@ -1,6 +1,3 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
#pragma once #pragma once
#include <stdint.h> #include <stdint.h>
@ -8,7 +5,6 @@
// Hierarchical bitmap allocator // Hierarchical bitmap allocator
class allocator class allocator
{ {
uint64_t total;
uint64_t size; uint64_t size;
uint64_t free; uint64_t free;
uint64_t last_one_mask; uint64_t last_one_mask;
@ -20,5 +16,3 @@ public:
uint64_t find_free(); uint64_t find_free();
uint64_t get_free_count(); uint64_t get_free_count();
}; };
void bitmap_set(void *bitmap, uint64_t start, uint64_t len, uint64_t bitmap_granularity);

View File

@ -1,6 +1,3 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
#include "base64.h" #include "base64.h"
std::string base64_encode(const std::string &in) std::string base64_encode(const std::string &in)

View File

@ -1,6 +1,3 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
#pragma once #pragma once
#include <string> #include <string>

View File

@ -1,6 +1,3 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
#include "blockstore_impl.h" #include "blockstore_impl.h"
blockstore_t::blockstore_t(blockstore_config_t & config, ring_loop_t *ringloop) blockstore_t::blockstore_t(blockstore_config_t & config, ring_loop_t *ringloop)
@ -35,7 +32,12 @@ bool blockstore_t::is_safe_to_stop()
void blockstore_t::enqueue_op(blockstore_op_t *op) void blockstore_t::enqueue_op(blockstore_op_t *op)
{ {
impl->enqueue_op(op); impl->enqueue_op(op, false);
}
void blockstore_t::enqueue_op_first(blockstore_op_t *op)
{
impl->enqueue_op(op, true);
} }
std::unordered_map<object_id, uint64_t> & blockstore_t::get_unstable_writes() std::unordered_map<object_id, uint64_t> & blockstore_t::get_unstable_writes()
@ -43,11 +45,6 @@ std::unordered_map<object_id, uint64_t> & blockstore_t::get_unstable_writes()
return impl->unstable_writes; return impl->unstable_writes;
} }
std::map<uint64_t, uint64_t> & blockstore_t::get_inode_space_stats()
{
return impl->inode_space_stats;
}
uint32_t blockstore_t::get_block_size() uint32_t blockstore_t::get_block_size()
{ {
return impl->get_block_size(); return impl->get_block_size();
@ -63,7 +60,7 @@ uint64_t blockstore_t::get_free_block_count()
return impl->get_free_block_count(); return impl->get_free_block_count();
} }
uint32_t blockstore_t::get_bitmap_granularity() uint32_t blockstore_t::get_disk_alignment()
{ {
return impl->get_bitmap_granularity(); return impl->get_disk_alignment();
} }

View File

@ -1,6 +1,3 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
#pragma once #pragma once
#ifndef _LARGEFILE64_SOURCE #ifndef _LARGEFILE64_SOURCE
@ -9,7 +6,6 @@
#include <stdint.h> #include <stdint.h>
#include <string>
#include <map> #include <map>
#include <unordered_map> #include <unordered_map>
#include <functional> #include <functional>
@ -27,19 +23,17 @@
#define DEFAULT_ORDER 17 #define DEFAULT_ORDER 17
#define MIN_BLOCK_SIZE 4*1024 #define MIN_BLOCK_SIZE 4*1024
#define MAX_BLOCK_SIZE 128*1024*1024 #define MAX_BLOCK_SIZE 128*1024*1024
#define DEFAULT_BITMAP_GRANULARITY 4096
#define BS_OP_MIN 1 #define BS_OP_MIN 1
#define BS_OP_READ 1 #define BS_OP_READ 1
#define BS_OP_WRITE 2 #define BS_OP_WRITE 2
#define BS_OP_WRITE_STABLE 3 #define BS_OP_SYNC 3
#define BS_OP_SYNC 4 #define BS_OP_STABLE 4
#define BS_OP_STABLE 5 #define BS_OP_DELETE 5
#define BS_OP_DELETE 6 #define BS_OP_LIST 6
#define BS_OP_LIST 7 #define BS_OP_ROLLBACK 7
#define BS_OP_ROLLBACK 8 #define BS_OP_SYNC_STAB_ALL 8
#define BS_OP_SYNC_STAB_ALL 9 #define BS_OP_MAX 8
#define BS_OP_MAX 9
#define BS_OP_PRIVATE_DATA_SIZE 256 #define BS_OP_PRIVATE_DATA_SIZE 256
@ -47,9 +41,9 @@
Blockstore opcode documentation: Blockstore opcode documentation:
## BS_OP_READ / BS_OP_WRITE / BS_OP_WRITE_STABLE ## BS_OP_READ / BS_OP_WRITE
Read or write object data. WRITE_STABLE writes a version that doesn't require marking as stable. Read or write object data.
Input: Input:
- oid = requested object - oid = requested object
@ -65,8 +59,6 @@ Input:
- offset, len = offset and length within object. length may be zero, in that case - offset, len = offset and length within object. length may be zero, in that case
read operation only returns the version / write operation only bumps the version read operation only returns the version / write operation only bumps the version
- buf = pre-allocated buffer for data (read) / with data (write). may be NULL if len == 0. - buf = pre-allocated buffer for data (read) / with data (write). may be NULL if len == 0.
- bitmap = pointer to the new 'external' object bitmap data. Its part which is respective to the
write request is copied into the metadata area bitwise and stored there.
Output: Output:
- retval = number of bytes actually read/written or negative error number (-EINVAL or -ENOSPC) - retval = number of bytes actually read/written or negative error number (-EINVAL or -ENOSPC)
@ -121,8 +113,6 @@ Input:
- oid.stripe = PG alignment - oid.stripe = PG alignment
- len = PG count or 0 to list all objects - len = PG count or 0 to list all objects
- offset = PG number - offset = PG number
- oid.inode = min inode number or 0 to list all inodes
- version = max inode number or 0 to list all inodes
Output: Output:
- retval = total obj_ver_id count - retval = total obj_ver_id count
@ -144,7 +134,6 @@ struct blockstore_op_t
uint32_t offset; uint32_t offset;
uint32_t len; uint32_t len;
void *buf; void *buf;
void *bitmap;
int retval; int retval;
uint8_t private_data[BS_OP_PRIVATE_DATA_SIZE]; uint8_t private_data[BS_OP_PRIVATE_DATA_SIZE];
@ -179,16 +168,17 @@ public:
// Submission // Submission
void enqueue_op(blockstore_op_t *op); void enqueue_op(blockstore_op_t *op);
// Insert operation into the beginning of the queue
// Intended for the OSD syncer "thread" to be able to stabilize something when the journal is full
void enqueue_op_first(blockstore_op_t *op);
// Unstable writes are added here (map of object_id -> version) // Unstable writes are added here (map of object_id -> version)
std::unordered_map<object_id, uint64_t> & get_unstable_writes(); std::unordered_map<object_id, uint64_t> & get_unstable_writes();
// Get per-inode space usage statistics
std::map<uint64_t, uint64_t> & get_inode_space_stats();
// FIXME rename to object_size // FIXME rename to object_size
uint32_t get_block_size(); uint32_t get_block_size();
uint64_t get_block_count(); uint64_t get_block_count();
uint64_t get_free_block_count(); uint64_t get_free_block_count();
uint32_t get_bitmap_granularity(); uint32_t get_disk_alignment();
}; };

View File

@ -1,24 +1,16 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
#include "blockstore_impl.h" #include "blockstore_impl.h"
journal_flusher_t::journal_flusher_t(int flusher_count, blockstore_impl_t *bs) journal_flusher_t::journal_flusher_t(int flusher_count, blockstore_impl_t *bs)
{ {
this->bs = bs; this->bs = bs;
this->flusher_count = flusher_count; this->flusher_count = flusher_count;
this->cur_flusher_count = 1;
this->target_flusher_count = 1;
dequeuing = false; dequeuing = false;
trimming = false;
active_flushers = 0; active_flushers = 0;
syncing_flushers = 0; syncing_flushers = 0;
// FIXME: allow to configure flusher_start_threshold and journal_trim_interval
flusher_start_threshold = bs->journal_block_size / sizeof(journal_entry_stable); flusher_start_threshold = bs->journal_block_size / sizeof(journal_entry_stable);
journal_trim_interval = 512; journal_trim_interval = flusher_start_threshold;
journal_trim_counter = 0; journal_trim_counter = 0;
trim_wanted = 0; journal_superblock = bs->journal.inmemory ? bs->journal.buffer : memalign(MEM_ALIGNMENT, bs->journal_block_size);
journal_superblock = bs->journal.inmemory ? bs->journal.buffer : memalign_or_die(MEM_ALIGNMENT, bs->journal_block_size);
co = new journal_flusher_co[flusher_count]; co = new journal_flusher_co[flusher_count];
for (int i = 0; i < flusher_count; i++) for (int i = 0; i < flusher_count; i++)
{ {
@ -70,31 +62,14 @@ bool journal_flusher_t::is_active()
void journal_flusher_t::loop() void journal_flusher_t::loop()
{ {
target_flusher_count = bs->write_iodepth*2; for (int i = 0; (active_flushers > 0 || dequeuing) && i < flusher_count; i++)
if (target_flusher_count <= 0)
target_flusher_count = 1;
else if (target_flusher_count > flusher_count)
target_flusher_count = flusher_count;
if (target_flusher_count > cur_flusher_count)
cur_flusher_count = target_flusher_count;
else if (target_flusher_count < cur_flusher_count)
{ {
while (target_flusher_count < cur_flusher_count)
{
if (co[cur_flusher_count-1].wait_state)
break;
cur_flusher_count--;
}
}
for (int i = 0; (active_flushers > 0 || dequeuing) && i < cur_flusher_count; i++)
co[i].loop(); co[i].loop();
}
} }
void journal_flusher_t::enqueue_flush(obj_ver_id ov) void journal_flusher_t::enqueue_flush(obj_ver_id ov)
{ {
#ifdef BLOCKSTORE_DEBUG
printf("enqueue_flush %lx:%lx v%lu\n", ov.oid.inode, ov.oid.stripe, ov.version);
#endif
auto it = flush_versions.find(ov.oid); auto it = flush_versions.find(ov.oid);
if (it != flush_versions.end()) if (it != flush_versions.end())
{ {
@ -106,18 +81,15 @@ void journal_flusher_t::enqueue_flush(obj_ver_id ov)
flush_versions[ov.oid] = ov.version; flush_versions[ov.oid] = ov.version;
flush_queue.push_back(ov.oid); flush_queue.push_back(ov.oid);
} }
if (!dequeuing && (flush_queue.size() >= flusher_start_threshold || trim_wanted > 0)) if (!dequeuing && flush_queue.size() >= flusher_start_threshold)
{ {
dequeuing = true; dequeuing = true;
bs->ringloop->wakeup(); bs->ringloop->wakeup();
} }
} }
void journal_flusher_t::unshift_flush(obj_ver_id ov, bool force) void journal_flusher_t::unshift_flush(obj_ver_id ov)
{ {
#ifdef BLOCKSTORE_DEBUG
printf("unshift_flush %lx:%lx v%lu\n", ov.oid.inode, ov.oid.stripe, ov.version);
#endif
auto it = flush_versions.find(ov.oid); auto it = flush_versions.find(ov.oid);
if (it != flush_versions.end()) if (it != flush_versions.end())
{ {
@ -127,38 +99,15 @@ void journal_flusher_t::unshift_flush(obj_ver_id ov, bool force)
else else
{ {
flush_versions[ov.oid] = ov.version; flush_versions[ov.oid] = ov.version;
if (!force)
flush_queue.push_front(ov.oid); flush_queue.push_front(ov.oid);
} }
if (force) if (!dequeuing && flush_queue.size() >= flusher_start_threshold)
flush_queue.push_front(ov.oid);
if (force || !dequeuing && (flush_queue.size() >= flusher_start_threshold || trim_wanted > 0))
{ {
dequeuing = true; dequeuing = true;
bs->ringloop->wakeup(); bs->ringloop->wakeup();
} }
} }
void journal_flusher_t::remove_flush(object_id oid)
{
#ifdef BLOCKSTORE_DEBUG
printf("undo_flush %lx:%lx\n", oid.inode, oid.stripe);
#endif
auto v_it = flush_versions.find(oid);
if (v_it != flush_versions.end())
{
flush_versions.erase(v_it);
for (auto q_it = flush_queue.begin(); q_it != flush_queue.end(); q_it++)
{
if (*q_it == oid)
{
flush_queue.erase(q_it);
break;
}
}
}
}
void journal_flusher_t::request_trim() void journal_flusher_t::request_trim()
{ {
dequeuing = true; dequeuing = true;
@ -166,16 +115,6 @@ void journal_flusher_t::request_trim()
bs->ringloop->wakeup(); bs->ringloop->wakeup();
} }
void journal_flusher_t::mark_trim_possible()
{
if (trim_wanted > 0)
{
dequeuing = true;
journal_trim_counter++;
bs->ringloop->wakeup();
}
}
void journal_flusher_t::release_trim() void journal_flusher_t::release_trim()
{ {
trim_wanted--; trim_wanted--;
@ -230,22 +169,9 @@ bool journal_flusher_co::loop()
goto resume_17; goto resume_17;
else if (wait_state == 18) else if (wait_state == 18)
goto resume_18; goto resume_18;
else if (wait_state == 19)
goto resume_19;
else if (wait_state == 20)
goto resume_20;
else if (wait_state == 21)
goto resume_21;
resume_0: resume_0:
if (!flusher->flush_queue.size() || !flusher->dequeuing) if (!flusher->flush_queue.size() || !flusher->dequeuing)
{ {
stop_flusher:
if (flusher->trim_wanted > 0 && flusher->journal_trim_counter > 0)
{
// Attempt forced trim
flusher->active_flushers++;
goto trim_journal;
}
flusher->dequeuing = false; flusher->dequeuing = false;
wait_state = 0; wait_state = 0;
return true; return true;
@ -257,23 +183,6 @@ stop_flusher:
dirty_end = bs->dirty_db.find(cur); dirty_end = bs->dirty_db.find(cur);
if (dirty_end != bs->dirty_db.end()) if (dirty_end != bs->dirty_db.end())
{ {
repeat_it = flusher->sync_to_repeat.find(cur.oid);
if (repeat_it != flusher->sync_to_repeat.end())
{
#ifdef BLOCKSTORE_DEBUG
printf("Postpone %lx:%lx v%lu\n", cur.oid.inode, cur.oid.stripe, cur.version);
#endif
// We don't flush different parts of history of the same object in parallel
// So we check if someone is already flushing this object
// In that case we set sync_to_repeat and pick another object
// Another coroutine will see it and re-queue the object after it finishes
if (repeat_it->second < cur.version)
repeat_it->second = cur.version;
wait_state = 0;
goto resume_0;
}
else
flusher->sync_to_repeat[cur.oid] = 0;
if (dirty_end->second.journal_sector >= bs->journal.dirty_start && if (dirty_end->second.journal_sector >= bs->journal.dirty_start &&
(bs->journal.dirty_start >= bs->journal.used_start || (bs->journal.dirty_start >= bs->journal.used_start ||
dirty_end->second.journal_sector < bs->journal.used_start)) dirty_end->second.journal_sector < bs->journal.used_start))
@ -304,7 +213,6 @@ stop_flusher:
if (!found) if (!found)
{ {
// Try other objects // Try other objects
flusher->sync_to_repeat.erase(cur.oid);
int search_left = flusher->flush_queue.size() - 1; int search_left = flusher->flush_queue.size() - 1;
#ifdef BLOCKSTORE_DEBUG #ifdef BLOCKSTORE_DEBUG
printf("Flusher overran writers (dirty_start=%08lx) - searching for older flushes (%d left)\n", bs->journal.dirty_start, search_left); printf("Flusher overran writers (dirty_start=%08lx) - searching for older flushes (%d left)\n", bs->journal.dirty_start, search_left);
@ -323,20 +231,15 @@ stop_flusher:
dirty_end->second.journal_sector < bs->journal.used_start)) dirty_end->second.journal_sector < bs->journal.used_start))
{ {
#ifdef BLOCKSTORE_DEBUG #ifdef BLOCKSTORE_DEBUG
printf("Write %lx:%lx v%lu is too new: offset=%08lx\n", cur.oid.inode, cur.oid.stripe, cur.version, dirty_end->second.journal_sector); printf("Write %lu:%lu v%lu is too new: offset=%08lx\n", cur.oid.inode, cur.oid.stripe, cur.version, dirty_end->second.journal_sector);
#endif #endif
flusher->enqueue_flush(cur); flusher->enqueue_flush(cur);
} }
else else
{ {
repeat_it = flusher->sync_to_repeat.find(cur.oid);
if (repeat_it == flusher->sync_to_repeat.end())
{
flusher->sync_to_repeat[cur.oid] = 0;
break; break;
} }
} }
}
search_left--; search_left--;
} }
if (search_left <= 0) if (search_left <= 0)
@ -344,12 +247,31 @@ stop_flusher:
#ifdef BLOCKSTORE_DEBUG #ifdef BLOCKSTORE_DEBUG
printf("No older flushes, stopping\n"); printf("No older flushes, stopping\n");
#endif #endif
goto stop_flusher; flusher->dequeuing = false;
wait_state = 0;
return true;
} }
} }
} }
repeat_it = flusher->sync_to_repeat.find(cur.oid);
if (repeat_it != flusher->sync_to_repeat.end())
{
#ifdef BLOCKSTORE_DEBUG #ifdef BLOCKSTORE_DEBUG
printf("Flushing %lx:%lx v%lu\n", cur.oid.inode, cur.oid.stripe, cur.version); printf("Postpone %lu:%lu v%lu\n", cur.oid.inode, cur.oid.stripe, cur.version);
#endif
// We don't flush different parts of history of the same object in parallel
// So we check if someone is already flushing this object
// In that case we set sync_to_repeat and pick another object
// Another coroutine will see it and re-queue the object after it finishes
if (repeat_it->second < cur.version)
repeat_it->second = cur.version;
wait_state = 0;
goto resume_0;
}
else
flusher->sync_to_repeat[cur.oid] = 0;
#ifdef BLOCKSTORE_DEBUG
printf("Flushing %lu:%lu v%lu\n", cur.oid.inode, cur.oid.stripe, cur.version);
#endif #endif
flusher->active_flushers++; flusher->active_flushers++;
resume_1: resume_1:
@ -363,12 +285,12 @@ resume_1:
return false; return false;
} }
// Writes and deletes shouldn't happen at the same time // Writes and deletes shouldn't happen at the same time
assert(!has_writes || !has_delete); assert(!(copy_count > 0 || has_writes) || !has_delete);
if (!has_writes && !has_delete || has_delete && old_clean_loc == UINT64_MAX) if (copy_count == 0 && !has_writes && !has_delete || has_delete && old_clean_loc == UINT64_MAX)
{ {
// Nothing to flush // Nothing to flush
bs->erase_dirty(dirty_start, std::next(dirty_end), clean_loc); bs->erase_dirty(dirty_start, std::next(dirty_end), clean_loc);
goto release_oid; goto trim_journal;
} }
if (clean_loc == UINT64_MAX) if (clean_loc == UINT64_MAX)
{ {
@ -377,7 +299,7 @@ resume_1:
// Object not allocated. This is a bug. // Object not allocated. This is a bug.
char err[1024]; char err[1024];
snprintf( snprintf(
err, 1024, "BUG: Object %lx:%lx v%lu that we are trying to flush is not allocated on the data device", err, 1024, "BUG: Object %lu:%lu v%lu that we are trying to flush is not allocated on the data device",
cur.oid.inode, cur.oid.stripe, cur.version cur.oid.inode, cur.oid.stripe, cur.version
); );
throw std::runtime_error(err); throw std::runtime_error(err);
@ -426,18 +348,18 @@ resume_1:
{ {
new_clean_bitmap = (bs->inmemory_meta new_clean_bitmap = (bs->inmemory_meta
? meta_new.buf + meta_new.pos*bs->clean_entry_size + sizeof(clean_disk_entry) ? meta_new.buf + meta_new.pos*bs->clean_entry_size + sizeof(clean_disk_entry)
: bs->clean_bitmap + (clean_loc >> bs->block_order)*(2*bs->clean_entry_bitmap_size)); : bs->clean_bitmap + (clean_loc >> bs->block_order)*bs->clean_entry_bitmap_size);
if (clean_init_bitmap) if (clean_init_bitmap)
{ {
memset(new_clean_bitmap, 0, bs->clean_entry_bitmap_size); memset(new_clean_bitmap, 0, bs->clean_entry_bitmap_size);
bitmap_set(new_clean_bitmap, clean_bitmap_offset, clean_bitmap_len, bs->bitmap_granularity); bitmap_set(new_clean_bitmap, clean_bitmap_offset, clean_bitmap_len);
} }
} }
for (it = v.begin(); it != v.end(); it++) for (it = v.begin(); it != v.end(); it++)
{ {
if (new_clean_bitmap) if (new_clean_bitmap)
{ {
bitmap_set(new_clean_bitmap, it->offset, it->len, bs->bitmap_granularity); bitmap_set(new_clean_bitmap, it->offset, it->len);
} }
await_sqe(4); await_sqe(4);
data->iov = (struct iovec){ it->buf, (size_t)it->len }; data->iov = (struct iovec){ it->buf, (size_t)it->len };
@ -471,7 +393,6 @@ resume_1:
wait_state = 5; wait_state = 5;
return false; return false;
} }
// zero out old metadata entry
memset(meta_old.buf + meta_old.pos*bs->clean_entry_size, 0, bs->clean_entry_size); memset(meta_old.buf + meta_old.pos*bs->clean_entry_size, 0, bs->clean_entry_size);
await_sqe(15); await_sqe(15);
data->iov = (struct iovec){ meta_old.buf, bs->meta_block_size }; data->iov = (struct iovec){ meta_old.buf, bs->meta_block_size };
@ -483,30 +404,18 @@ resume_1:
} }
if (has_delete) if (has_delete)
{ {
// zero out new metadata entry
memset(meta_new.buf + meta_new.pos*bs->clean_entry_size, 0, bs->clean_entry_size); memset(meta_new.buf + meta_new.pos*bs->clean_entry_size, 0, bs->clean_entry_size);
} }
else else
{ {
clean_disk_entry *new_entry = (clean_disk_entry*)(meta_new.buf + meta_new.pos*bs->clean_entry_size); clean_disk_entry *new_entry = (clean_disk_entry*)(meta_new.buf + meta_new.pos*bs->clean_entry_size);
if (new_entry->oid.inode != 0 && new_entry->oid != cur.oid) assert(new_entry->oid.inode == 0 || new_entry->oid == cur.oid);
{
printf("Fatal error (metadata corruption or bug): tried to overwrite non-zero metadata entry %lu (%lx:%lx) with %lx:%lx\n",
clean_loc >> bs->block_order, new_entry->oid.inode, new_entry->oid.stripe, cur.oid.inode, cur.oid.stripe);
exit(1);
}
new_entry->oid = cur.oid; new_entry->oid = cur.oid;
new_entry->version = cur.version; new_entry->version = cur.version;
if (!bs->inmemory_meta) if (!bs->inmemory_meta)
{ {
memcpy(&new_entry->bitmap, new_clean_bitmap, bs->clean_entry_bitmap_size); memcpy(&new_entry->bitmap, new_clean_bitmap, bs->clean_entry_bitmap_size);
} }
// copy latest external bitmap/attributes
if (bs->clean_entry_bitmap_size)
{
void *bmp_ptr = bs->clean_entry_bitmap_size > sizeof(void*) ? dirty_end->second.bitmap : &dirty_end->second.bitmap;
memcpy((void*)(new_entry+1) + bs->clean_entry_bitmap_size, bmp_ptr, bs->clean_entry_bitmap_size);
}
} }
await_sqe(6); await_sqe(6);
data->iov = (struct iovec){ meta_new.buf, bs->meta_block_size }; data->iov = (struct iovec){ meta_new.buf, bs->meta_block_size };
@ -556,35 +465,14 @@ resume_1:
} }
// Update clean_db and dirty_db, free old data locations // Update clean_db and dirty_db, free old data locations
update_clean_db(); update_clean_db();
#ifdef BLOCKSTORE_DEBUG
printf("Flushed %lx:%lx v%lu (%d copies, wr:%d, del:%d), %ld left\n", cur.oid.inode, cur.oid.stripe, cur.version,
copy_count, has_writes, has_delete, flusher->flush_queue.size());
#endif
release_oid:
repeat_it = flusher->sync_to_repeat.find(cur.oid);
if (repeat_it != flusher->sync_to_repeat.end() && repeat_it->second > cur.version)
{
// Requeue version
flusher->unshift_flush({ .oid = cur.oid, .version = repeat_it->second }, false);
}
flusher->sync_to_repeat.erase(repeat_it);
trim_journal: trim_journal:
// Clear unused part of the journal every <journal_trim_interval> flushes // Clear unused part of the journal every <journal_trim_interval> flushes
if (!((++flusher->journal_trim_counter) % flusher->journal_trim_interval) || flusher->trim_wanted > 0) if (!((++flusher->journal_trim_counter) % flusher->journal_trim_interval) || flusher->trim_wanted > 0)
{ {
flusher->journal_trim_counter = 0; flusher->journal_trim_counter = 0;
new_trim_pos = bs->journal.get_trim_pos(); if (bs->journal.trim())
if (new_trim_pos != bs->journal.used_start)
{ {
resume_19: // Update journal "superblock"
// Wait for other coroutines trimming the journal, if any
if (flusher->trimming)
{
wait_state = 19;
return false;
}
flusher->trimming = true;
// First update journal "superblock" and only then update <used_start> in memory
await_sqe(12); await_sqe(12);
*((journal_entry_start*)flusher->journal_superblock) = { *((journal_entry_start*)flusher->journal_superblock) = {
.crc32 = 0, .crc32 = 0,
@ -592,7 +480,7 @@ resume_1:
.type = JE_START, .type = JE_START,
.size = sizeof(journal_entry_start), .size = sizeof(journal_entry_start),
.reserved = 0, .reserved = 0,
.journal_start = new_trim_pos, .journal_start = bs->journal.used_start,
}; };
((journal_entry_start*)flusher->journal_superblock)->crc32 = je_crc32((journal_entry*)flusher->journal_superblock); ((journal_entry_start*)flusher->journal_superblock)->crc32 = je_crc32((journal_entry*)flusher->journal_superblock);
data->iov = (struct iovec){ flusher->journal_superblock, bs->journal_block_size }; data->iov = (struct iovec){ flusher->journal_superblock, bs->journal_block_size };
@ -605,28 +493,20 @@ resume_1:
wait_state = 13; wait_state = 13;
return false; return false;
} }
if (!bs->disable_journal_fsync)
{
await_sqe(20);
my_uring_prep_fsync(sqe, bs->journal.fd, IORING_FSYNC_DATASYNC);
data->iov = { 0 };
data->callback = simple_callback_w;
resume_21:
if (wait_count > 0)
{
wait_state = 21;
return false;
}
}
bs->journal.used_start = new_trim_pos;
#ifdef BLOCKSTORE_DEBUG
printf("Journal trimmed to %08lx (next_free=%08lx)\n", bs->journal.used_start, bs->journal.next_free);
#endif
flusher->trimming = false;
} }
} }
// All done // All done
#ifdef BLOCKSTORE_DEBUG
printf("Flushed %lu:%lu v%lu (%ld left)\n", cur.oid.inode, cur.oid.stripe, cur.version, flusher->flush_queue.size());
#endif
flusher->active_flushers--; flusher->active_flushers--;
repeat_it = flusher->sync_to_repeat.find(cur.oid);
if (repeat_it != flusher->sync_to_repeat.end() && repeat_it->second > cur.version)
{
// Requeue version
flusher->unshift_flush({ .oid = cur.oid, .version = repeat_it->second });
}
flusher->sync_to_repeat.erase(repeat_it);
wait_state = 0; wait_state = 0;
goto resume_0; goto resume_0;
} }
@ -650,16 +530,7 @@ bool journal_flusher_co::scan_dirty(int wait_base)
clean_init_bitmap = false; clean_init_bitmap = false;
while (1) while (1)
{ {
if (!IS_STABLE(dirty_it->second.state)) if (dirty_it->second.state == ST_J_STABLE && !skip_copy)
{
char err[1024];
snprintf(
err, 1024, "BUG: Unexpected dirty_entry %lx:%lx v%lu unstable state during flush: %d",
dirty_it->first.oid.inode, dirty_it->first.oid.stripe, dirty_it->first.version, dirty_it->second.state
);
throw std::runtime_error(err);
}
else if (IS_JOURNAL(dirty_it->second.state) && !skip_copy)
{ {
// First we submit all reads // First we submit all reads
has_writes = true; has_writes = true;
@ -677,18 +548,18 @@ bool journal_flusher_co::scan_dirty(int wait_base)
{ {
submit_offset = dirty_it->second.location + offset - dirty_it->second.offset; submit_offset = dirty_it->second.location + offset - dirty_it->second.offset;
submit_len = it == v.end() || it->offset >= end_offset ? end_offset-offset : it->offset-offset; submit_len = it == v.end() || it->offset >= end_offset ? end_offset-offset : it->offset-offset;
it = v.insert(it, (copy_buffer_t){ .offset = offset, .len = submit_len, .buf = memalign_or_die(MEM_ALIGNMENT, submit_len) }); it = v.insert(it, (copy_buffer_t){ .offset = offset, .len = submit_len, .buf = memalign(MEM_ALIGNMENT, submit_len) });
copy_count++; copy_count++;
if (bs->journal.inmemory) if (bs->journal.inmemory)
{ {
// Take it from memory // Take it from memory
memcpy(it->buf, bs->journal.buffer + submit_offset, submit_len); memcpy(v.back().buf, bs->journal.buffer + submit_offset, submit_len);
} }
else else
{ {
// Read it from disk // Read it from disk
await_sqe(0); await_sqe(0);
data->iov = (struct iovec){ it->buf, (size_t)submit_len }; data->iov = (struct iovec){ v.back().buf, (size_t)submit_len };
data->callback = simple_callback_r; data->callback = simple_callback_r;
my_uring_prep_readv( my_uring_prep_readv(
sqe, bs->journal.fd, &data->iov, 1, bs->journal.offset + submit_offset sqe, bs->journal.fd, &data->iov, 1, bs->journal.offset + submit_offset
@ -702,7 +573,7 @@ bool journal_flusher_co::scan_dirty(int wait_base)
} }
} }
} }
else if (IS_BIG_WRITE(dirty_it->second.state) && !skip_copy) else if (dirty_it->second.state == ST_D_STABLE && !skip_copy)
{ {
// There is an unflushed big write. Copy small writes in its position // There is an unflushed big write. Copy small writes in its position
has_writes = true; has_writes = true;
@ -712,12 +583,21 @@ bool journal_flusher_co::scan_dirty(int wait_base)
clean_bitmap_len = dirty_it->second.len; clean_bitmap_len = dirty_it->second.len;
skip_copy = true; skip_copy = true;
} }
else if (IS_DELETE(dirty_it->second.state) && !skip_copy) else if (dirty_it->second.state == ST_DEL_STABLE && !skip_copy)
{ {
// There is an unflushed delete // There is an unflushed delete
has_delete = true; has_delete = true;
skip_copy = true; skip_copy = true;
} }
else if (!IS_STABLE(dirty_it->second.state))
{
char err[1024];
snprintf(
err, 1024, "BUG: Unexpected dirty_entry %lu:%lu v%lu state during flush: %d",
dirty_it->first.oid.inode, dirty_it->first.oid.stripe, dirty_it->first.version, dirty_it->second.state
);
throw std::runtime_error(err);
}
dirty_start = dirty_it; dirty_start = dirty_it;
if (dirty_it == bs->dirty_db.begin()) if (dirty_it == bs->dirty_db.begin())
{ {
@ -753,7 +633,7 @@ bool journal_flusher_co::modify_meta_read(uint64_t meta_loc, flusher_meta_write_
if (wr.it == flusher->meta_sectors.end()) if (wr.it == flusher->meta_sectors.end())
{ {
// Not in memory yet, read it // Not in memory yet, read it
wr.buf = memalign_or_die(MEM_ALIGNMENT, bs->meta_block_size); wr.buf = memalign(MEM_ALIGNMENT, bs->meta_block_size);
wr.it = flusher->meta_sectors.emplace(wr.sector, (meta_sector_t){ wr.it = flusher->meta_sectors.emplace(wr.sector, (meta_sector_t){
.offset = wr.sector, .offset = wr.sector,
.len = bs->meta_block_size, .len = bs->meta_block_size,
@ -783,7 +663,7 @@ void journal_flusher_co::update_clean_db()
if (old_clean_loc != UINT64_MAX && old_clean_loc != clean_loc) if (old_clean_loc != UINT64_MAX && old_clean_loc != clean_loc)
{ {
#ifdef BLOCKSTORE_DEBUG #ifdef BLOCKSTORE_DEBUG
printf("Free block %lu (new location is %lu)\n", old_clean_loc >> bs->block_order, clean_loc >> bs->block_order); printf("Free block %lu\n", old_clean_loc >> bs->block_order);
#endif #endif
bs->data_alloc->set(old_clean_loc >> bs->block_order, false); bs->data_alloc->set(old_clean_loc >> bs->block_order, false);
} }
@ -831,10 +711,7 @@ bool journal_flusher_co::fsync_batch(bool fsync_meta, int wait_base)
sync_found: sync_found:
cur_sync->ready_count++; cur_sync->ready_count++;
flusher->syncing_flushers++; flusher->syncing_flushers++;
resume_1: if (flusher->syncing_flushers >= flusher->flusher_count || !flusher->flush_queue.size())
if (!cur_sync->state)
{
if (flusher->syncing_flushers >= flusher->cur_flusher_count || !flusher->flush_queue.size())
{ {
// Sync batch is ready. Do it. // Sync batch is ready. Do it.
await_sqe(0); await_sqe(0);
@ -843,23 +720,23 @@ bool journal_flusher_co::fsync_batch(bool fsync_meta, int wait_base)
my_uring_prep_fsync(sqe, fsync_meta ? bs->meta_fd : bs->data_fd, IORING_FSYNC_DATASYNC); my_uring_prep_fsync(sqe, fsync_meta ? bs->meta_fd : bs->data_fd, IORING_FSYNC_DATASYNC);
cur_sync->state = 1; cur_sync->state = 1;
wait_count++; wait_count++;
resume_2: resume_1:
if (wait_count > 0) if (wait_count > 0)
{ {
wait_state = 2; wait_state = 1;
return false; return false;
} }
// Sync completed. All previous coroutines waiting for it must be resumed // Sync completed. All previous coroutines waiting for it must be resumed
cur_sync->state = 2; cur_sync->state = 2;
bs->ringloop->wakeup(); bs->ringloop->wakeup();
} }
else
{
// Wait until someone else sends and completes a sync. // Wait until someone else sends and completes a sync.
wait_state = 1; resume_2:
if (!cur_sync->state)
{
wait_state = 2;
return false; return false;
} }
}
flusher->syncing_flushers--; flusher->syncing_flushers--;
cur_sync->ready_count--; cur_sync->ready_count--;
if (cur_sync->ready_count == 0) if (cur_sync->ready_count == 0)
@ -869,3 +746,35 @@ bool journal_flusher_co::fsync_batch(bool fsync_meta, int wait_base)
} }
return true; return true;
} }
void journal_flusher_co::bitmap_set(void *bitmap, uint64_t start, uint64_t len)
{
if (start == 0)
{
if (len == 32*bs->bitmap_granularity)
{
*((uint32_t*)bitmap) = UINT32_MAX;
return;
}
else if (len == 64*bs->bitmap_granularity)
{
*((uint64_t*)bitmap) = UINT64_MAX;
return;
}
}
unsigned bit_start = start / bs->bitmap_granularity;
unsigned bit_end = ((start + len) + bs->bitmap_granularity - 1) / bs->bitmap_granularity;
while (bit_start < bit_end)
{
if (!(bit_start & 7) && bit_end >= bit_start+8)
{
((uint8_t*)bitmap)[bit_start / 8] = UINT8_MAX;
bit_start += 8;
}
else
{
((uint8_t*)bitmap)[bit_start / 8] |= 1 << (bit_start % 8);
bit_start++;
}
}
}

View File

@ -1,6 +1,3 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
struct copy_buffer_t struct copy_buffer_t
{ {
uint64_t offset, len; uint64_t offset, len;
@ -59,8 +56,6 @@ class journal_flusher_co
uint64_t clean_bitmap_offset, clean_bitmap_len; uint64_t clean_bitmap_offset, clean_bitmap_len;
void *new_clean_bitmap; void *new_clean_bitmap;
uint64_t new_trim_pos;
// local: scan_dirty() // local: scan_dirty()
uint64_t offset, end_offset, submit_offset, submit_len; uint64_t offset, end_offset, submit_offset, submit_len;
@ -69,6 +64,7 @@ class journal_flusher_co
bool modify_meta_read(uint64_t meta_loc, flusher_meta_write_t &wr, int wait_base); bool modify_meta_read(uint64_t meta_loc, flusher_meta_write_t &wr, int wait_base);
void update_clean_db(); void update_clean_db();
bool fsync_batch(bool fsync_meta, int wait_base); bool fsync_batch(bool fsync_meta, int wait_base);
void bitmap_set(void *bitmap, uint64_t start, uint64_t len);
public: public:
journal_flusher_co(); journal_flusher_co();
bool loop(); bool loop();
@ -79,14 +75,13 @@ class journal_flusher_t
{ {
int trim_wanted = 0; int trim_wanted = 0;
bool dequeuing; bool dequeuing;
int flusher_count, cur_flusher_count, target_flusher_count; int flusher_count;
int flusher_start_threshold; int flusher_start_threshold;
journal_flusher_co *co; journal_flusher_co *co;
blockstore_impl_t *bs; blockstore_impl_t *bs;
friend class journal_flusher_co; friend class journal_flusher_co;
int journal_trim_counter, journal_trim_interval; int journal_trim_counter, journal_trim_interval;
bool trimming;
void* journal_superblock; void* journal_superblock;
int active_flushers; int active_flushers;
@ -102,10 +97,8 @@ public:
~journal_flusher_t(); ~journal_flusher_t();
void loop(); void loop();
bool is_active(); bool is_active();
void mark_trim_possible();
void request_trim(); void request_trim();
void release_trim(); void release_trim();
void enqueue_flush(obj_ver_id oid); void enqueue_flush(obj_ver_id oid);
void unshift_flush(obj_ver_id oid, bool force); void unshift_flush(obj_ver_id oid);
void remove_flush(object_id oid);
}; };

View File

@ -1,6 +1,3 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
#include "blockstore_impl.h" #include "blockstore_impl.h"
blockstore_impl_t::blockstore_impl_t(blockstore_config_t & config, ring_loop_t *ringloop) blockstore_impl_t::blockstore_impl_t(blockstore_config_t & config, ring_loop_t *ringloop)
@ -10,9 +7,9 @@ blockstore_impl_t::blockstore_impl_t(blockstore_config_t & config, ring_loop_t *
ring_consumer.loop = [this]() { loop(); }; ring_consumer.loop = [this]() { loop(); };
ringloop->register_consumer(&ring_consumer); ringloop->register_consumer(&ring_consumer);
initialized = 0; initialized = 0;
zero_object = (uint8_t*)memalign(MEM_ALIGNMENT, block_size);
data_fd = meta_fd = journal.fd = -1; data_fd = meta_fd = journal.fd = -1;
parse_config(config); parse_config(config);
zero_object = (uint8_t*)memalign_or_die(MEM_ALIGNMENT, block_size);
try try
{ {
open_data(); open_data();
@ -101,14 +98,26 @@ void blockstore_impl_t::loop()
{ {
// try to submit ops // try to submit ops
unsigned initial_ring_space = ringloop->space_left(); unsigned initial_ring_space = ringloop->space_left();
// has_writes == 0 - no writes before the current queue item // FIXME: rework this "sync polling"
// has_writes == 1 - some writes in progress auto cur_sync = in_progress_syncs.begin();
// has_writes == 2 - tried to submit some writes, but failed while (cur_sync != in_progress_syncs.end())
int has_writes = 0, op_idx = 0, new_idx = 0;
for (; op_idx < submit_queue.size(); op_idx++, new_idx++)
{ {
auto op = submit_queue[op_idx]; if (continue_sync(*cur_sync) != 2)
submit_queue[new_idx] = op; {
// List is unmodified
cur_sync++;
}
else
{
cur_sync = in_progress_syncs.begin();
}
}
auto cur = submit_queue.begin();
int has_writes = 0;
while (cur != submit_queue.end())
{
auto op_ptr = cur;
auto op = *(cur++);
// FIXME: This needs some simplification // FIXME: This needs some simplification
// Writes should not block reads if the ring is not full and reads don't depend on them // Writes should not block reads if the ring is not full and reads don't depend on them
// In all other cases we should stop submission // In all other cases we should stop submission
@ -121,7 +130,7 @@ void blockstore_impl_t::loop()
} }
else if (PRIV(op)->wait_for) else if (PRIV(op)->wait_for)
{ {
if (op->opcode == BS_OP_WRITE || op->opcode == BS_OP_WRITE_STABLE || op->opcode == BS_OP_DELETE) if (op->opcode == BS_OP_WRITE || op->opcode == BS_OP_DELETE)
{ {
has_writes = 2; has_writes = 2;
} }
@ -130,33 +139,30 @@ void blockstore_impl_t::loop()
} }
unsigned ring_space = ringloop->space_left(); unsigned ring_space = ringloop->space_left();
unsigned prev_sqe_pos = ringloop->save(); unsigned prev_sqe_pos = ringloop->save();
// 0 = can't submit bool dequeue_op = false;
// 1 = in progress
// 2 = can be removed from queue
int wr_st = 0;
if (op->opcode == BS_OP_READ) if (op->opcode == BS_OP_READ)
{ {
wr_st = dequeue_read(op); dequeue_op = dequeue_read(op);
} }
else if (op->opcode == BS_OP_WRITE || op->opcode == BS_OP_WRITE_STABLE) else if (op->opcode == BS_OP_WRITE)
{ {
if (has_writes == 2) if (has_writes == 2)
{ {
// Some writes already could not be submitted // Some writes could not be submitted
continue; break;
} }
wr_st = dequeue_write(op); dequeue_op = dequeue_write(op);
has_writes = wr_st > 0 ? 1 : 2; has_writes = dequeue_op ? 1 : 2;
} }
else if (op->opcode == BS_OP_DELETE) else if (op->opcode == BS_OP_DELETE)
{ {
if (has_writes == 2) if (has_writes == 2)
{ {
// Some writes already could not be submitted // Some writes could not be submitted
continue; break;
} }
wr_st = dequeue_del(op); dequeue_op = dequeue_del(op);
has_writes = wr_st > 0 ? 1 : 2; has_writes = dequeue_op ? 1 : 2;
} }
else if (op->opcode == BS_OP_SYNC) else if (op->opcode == BS_OP_SYNC)
{ {
@ -169,31 +175,43 @@ void blockstore_impl_t::loop()
// Can't submit SYNC before previous writes // Can't submit SYNC before previous writes
continue; continue;
} }
wr_st = continue_sync(op, false); dequeue_op = dequeue_sync(op);
if (wr_st != 2)
{
has_writes = wr_st > 0 ? 1 : 2;
}
} }
else if (op->opcode == BS_OP_STABLE) else if (op->opcode == BS_OP_STABLE)
{ {
wr_st = dequeue_stable(op); if (has_writes == 2)
{
// Don't submit additional flushes before completing previous LISTs
break;
}
dequeue_op = dequeue_stable(op);
} }
else if (op->opcode == BS_OP_ROLLBACK) else if (op->opcode == BS_OP_ROLLBACK)
{ {
wr_st = dequeue_rollback(op); if (has_writes == 2)
{
// Don't submit additional flushes before completing previous LISTs
break;
}
dequeue_op = dequeue_rollback(op);
} }
else if (op->opcode == BS_OP_LIST) else if (op->opcode == BS_OP_LIST)
{ {
// LIST doesn't need to be blocked by previous modifications // Block LIST operation by previous modifications,
process_list(op); // so it always returns a consistent state snapshot
wr_st = 2; if (has_writes == 2 || inflight_writes > 0)
} has_writes = 2;
if (wr_st == 2) else
{ {
new_idx--; process_list(op);
dequeue_op = true;
} }
if (wr_st == 0) }
if (dequeue_op)
{
submit_queue.erase(op_ptr);
}
else
{ {
ringloop->restore(prev_sqe_pos); ringloop->restore(prev_sqe_pos);
if (PRIV(op)->wait_for == WAIT_SQE) if (PRIV(op)->wait_for == WAIT_SQE)
@ -204,14 +222,6 @@ void blockstore_impl_t::loop()
} }
} }
} }
if (op_idx != new_idx)
{
while (op_idx < submit_queue.size())
{
submit_queue[new_idx++] = submit_queue[op_idx++];
}
submit_queue.resize(new_idx);
}
if (!readonly) if (!readonly)
{ {
flusher->loop(); flusher->loop();
@ -234,7 +244,7 @@ bool blockstore_impl_t::is_safe_to_stop()
{ {
// It's safe to stop blockstore when there are no in-flight operations, // It's safe to stop blockstore when there are no in-flight operations,
// no in-progress syncs and flusher isn't doing anything // no in-progress syncs and flusher isn't doing anything
if (submit_queue.size() > 0 || !readonly && flusher->is_active()) if (submit_queue.size() > 0 || in_progress_syncs.size() > 0 || !readonly && flusher->is_active())
{ {
return false; return false;
} }
@ -288,7 +298,7 @@ void blockstore_impl_t::check_wait(blockstore_op_t *op)
else if (PRIV(op)->wait_for == WAIT_JOURNAL_BUFFER) else if (PRIV(op)->wait_for == WAIT_JOURNAL_BUFFER)
{ {
int next = ((journal.cur_sector + 1) % journal.sector_count); int next = ((journal.cur_sector + 1) % journal.sector_count);
if (journal.sector_info[next].flush_count > 0 || if (journal.sector_info[next].usage_count > 0 ||
journal.sector_info[next].dirty) journal.sector_info[next].dirty)
{ {
// do not submit // do not submit
@ -301,7 +311,7 @@ void blockstore_impl_t::check_wait(blockstore_op_t *op)
} }
else if (PRIV(op)->wait_for == WAIT_FREE) else if (PRIV(op)->wait_for == WAIT_FREE)
{ {
if (!data_alloc->get_free_count() && flusher->is_active()) if (!data_alloc->get_free_count() && !flusher->is_active())
{ {
#ifdef BLOCKSTORE_DEBUG #ifdef BLOCKSTORE_DEBUG
printf("Still waiting for free space on the data device\n"); printf("Still waiting for free space on the data device\n");
@ -316,15 +326,16 @@ void blockstore_impl_t::check_wait(blockstore_op_t *op)
} }
} }
void blockstore_impl_t::enqueue_op(blockstore_op_t *op) void blockstore_impl_t::enqueue_op(blockstore_op_t *op, bool first)
{ {
if (op->opcode < BS_OP_MIN || op->opcode > BS_OP_MAX || if (op->opcode < BS_OP_MIN || op->opcode > BS_OP_MAX ||
((op->opcode == BS_OP_READ || op->opcode == BS_OP_WRITE || op->opcode == BS_OP_WRITE_STABLE) && ( ((op->opcode == BS_OP_READ || op->opcode == BS_OP_WRITE) && (
op->offset >= block_size || op->offset >= block_size ||
op->len > block_size-op->offset || op->len > block_size-op->offset ||
(op->len % disk_alignment) (op->len % disk_alignment)
)) || )) ||
readonly && op->opcode != BS_OP_READ && op->opcode != BS_OP_LIST) readonly && op->opcode != BS_OP_READ && op->opcode != BS_OP_LIST ||
first && op->opcode == BS_OP_WRITE)
{ {
// Basic verification not passed // Basic verification not passed
op->retval = -EINVAL; op->retval = -EINVAL;
@ -369,17 +380,30 @@ void blockstore_impl_t::enqueue_op(blockstore_op_t *op)
} }
}; };
} }
if ((op->opcode == BS_OP_WRITE || op->opcode == BS_OP_WRITE_STABLE || op->opcode == BS_OP_DELETE) && !enqueue_write(op)) if ((op->opcode == BS_OP_WRITE || op->opcode == BS_OP_DELETE) && !enqueue_write(op))
{ {
std::function<void (blockstore_op_t*)>(op->callback)(op); std::function<void (blockstore_op_t*)>(op->callback)(op);
return; return;
} }
if (op->opcode == BS_OP_SYNC && immediate_commit == IMMEDIATE_ALL)
{
op->retval = 0;
std::function<void (blockstore_op_t*)>(op->callback)(op);
return;
}
// Call constructor without allocating memory. We'll call destructor before returning op back // Call constructor without allocating memory. We'll call destructor before returning op back
new ((void*)op->private_data) blockstore_op_private_t; new ((void*)op->private_data) blockstore_op_private_t;
PRIV(op)->wait_for = 0; PRIV(op)->wait_for = 0;
PRIV(op)->op_state = 0; PRIV(op)->op_state = 0;
PRIV(op)->pending_ops = 0; PRIV(op)->pending_ops = 0;
if (!first)
{
submit_queue.push_back(op); submit_queue.push_back(op);
}
else
{
submit_queue.push_front(op);
}
ringloop->wakeup(); ringloop->wakeup();
} }
@ -407,12 +431,10 @@ static bool replace_stable(object_id oid, uint64_t version, int search_start, in
void blockstore_impl_t::process_list(blockstore_op_t *op) void blockstore_impl_t::process_list(blockstore_op_t *op)
{ {
// Check PG
uint32_t list_pg = op->offset; uint32_t list_pg = op->offset;
uint32_t pg_count = op->len; uint32_t pg_count = op->len;
uint64_t pg_stripe_size = op->oid.stripe; uint64_t pg_stripe_size = op->oid.stripe;
uint64_t min_inode = op->oid.inode;
uint64_t max_inode = op->version;
// Check PG
if (pg_count != 0 && (pg_stripe_size < MIN_BLOCK_SIZE || list_pg >= pg_count)) if (pg_count != 0 && (pg_stripe_size < MIN_BLOCK_SIZE || list_pg >= pg_count))
{ {
op->retval = -EINVAL; op->retval = -EINVAL;
@ -428,22 +450,9 @@ void blockstore_impl_t::process_list(blockstore_op_t *op)
FINISH_OP(op); FINISH_OP(op);
return; return;
} }
for (auto it = clean_db.begin(); it != clean_db.end(); it++)
{ {
auto clean_it = clean_db.begin(), clean_end = clean_db.end(); if (!pg_count || ((it->first.inode + it->first.stripe / pg_stripe_size) % pg_count) == list_pg)
if ((min_inode != 0 || max_inode != 0) && min_inode <= max_inode)
{
clean_it = clean_db.lower_bound({
.inode = min_inode,
.stripe = 0,
});
clean_end = clean_db.upper_bound({
.inode = max_inode,
.stripe = UINT64_MAX,
});
}
for (; clean_it != clean_end; clean_it++)
{
if (!pg_count || ((clean_it->first.inode + clean_it->first.stripe / pg_stripe_size) % pg_count) == list_pg)
{ {
if (stable_count >= stable_alloc) if (stable_count >= stable_alloc)
{ {
@ -457,56 +466,36 @@ void blockstore_impl_t::process_list(blockstore_op_t *op)
} }
} }
stable[stable_count++] = { stable[stable_count++] = {
.oid = clean_it->first, .oid = it->first,
.version = clean_it->second.version, .version = it->second.version,
}; };
} }
} }
}
int clean_stable_count = stable_count; int clean_stable_count = stable_count;
// Copy dirty_db entries (sorted, too) // Copy dirty_db entries (sorted, too)
int unstable_count = 0, unstable_alloc = 0; int unstable_count = 0, unstable_alloc = 0;
obj_ver_id *unstable = NULL; obj_ver_id *unstable = NULL;
for (auto it = dirty_db.begin(); it != dirty_db.end(); it++)
{ {
auto dirty_it = dirty_db.begin(), dirty_end = dirty_db.end(); if (!pg_count || ((it->first.oid.inode + it->first.oid.stripe / pg_stripe_size) % pg_count) == list_pg)
if ((min_inode != 0 || max_inode != 0) && min_inode <= max_inode)
{ {
dirty_it = dirty_db.lower_bound({ if (IS_DELETE(it->second.state))
.oid = {
.inode = min_inode,
.stripe = 0,
},
.version = 0,
});
dirty_end = dirty_db.upper_bound({
.oid = {
.inode = max_inode,
.stripe = UINT64_MAX,
},
.version = UINT64_MAX,
});
}
for (; dirty_it != dirty_end; dirty_it++)
{
if (!pg_count || ((dirty_it->first.oid.inode + dirty_it->first.oid.stripe / pg_stripe_size) % pg_count) == list_pg)
{
if (IS_DELETE(dirty_it->second.state))
{ {
// Deletions are always stable, so try to zero out two possible entries // Deletions are always stable, so try to zero out two possible entries
if (!replace_stable(dirty_it->first.oid, 0, 0, clean_stable_count, stable)) if (!replace_stable(it->first.oid, 0, 0, clean_stable_count, stable))
{ {
replace_stable(dirty_it->first.oid, 0, clean_stable_count, stable_count, stable); replace_stable(it->first.oid, 0, clean_stable_count, stable_count, stable);
} }
} }
else if (IS_STABLE(dirty_it->second.state)) else if (IS_STABLE(it->second.state))
{ {
// First try to replace a clean stable version in the first part of the list // First try to replace a clean stable version in the first part of the list
if (!replace_stable(dirty_it->first.oid, dirty_it->first.version, 0, clean_stable_count, stable)) if (!replace_stable(it->first.oid, it->first.version, 0, clean_stable_count, stable))
{ {
// Then try to replace the last dirty stable version in the second part of the list // Then try to replace the last dirty stable version in the second part of the list
if (stable_count > 0 && stable[stable_count-1].oid == dirty_it->first.oid) if (stable[stable_count-1].oid == it->first.oid)
{ {
stable[stable_count-1].version = dirty_it->first.version; stable[stable_count-1].version = it->first.version;
} }
else else
{ {
@ -523,7 +512,7 @@ void blockstore_impl_t::process_list(blockstore_op_t *op)
return; return;
} }
} }
stable[stable_count++] = dirty_it->first; stable[stable_count++] = it->first;
} }
} }
} }
@ -542,8 +531,7 @@ void blockstore_impl_t::process_list(blockstore_op_t *op)
return; return;
} }
} }
unstable[unstable_count++] = dirty_it->first; unstable[unstable_count++] = it->first;
}
} }
} }
} }

View File

@ -1,6 +1,3 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
#pragma once #pragma once
#include "blockstore.h" #include "blockstore.h"
@ -10,6 +7,7 @@
#include <sys/stat.h> #include <sys/stat.h>
#include <fcntl.h> #include <fcntl.h>
#include <unistd.h> #include <unistd.h>
#include <malloc.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <vector> #include <vector>
@ -19,39 +17,45 @@
#include "cpp-btree/btree_map.h" #include "cpp-btree/btree_map.h"
#include "malloc_or_die.h"
#include "allocator.h" #include "allocator.h"
//#define BLOCKSTORE_DEBUG //#define BLOCKSTORE_DEBUG
// States are not stored on disk. Instead, they're deduced from the journal // States are not stored on disk. Instead, they're deduced from the journal
// FIXME: Rename to BS_ST_*
#define BS_ST_SMALL_WRITE 0x01 #define ST_J_WAIT_BIG 1
#define BS_ST_BIG_WRITE 0x02 #define ST_J_IN_FLIGHT 2
#define BS_ST_DELETE 0x03 #define ST_J_SUBMITTED 3
#define ST_J_WRITTEN 4
#define ST_J_SYNCED 5
#define ST_J_STABLE 6
#define BS_ST_WAIT_DEL 0x10 #define ST_D_IN_FLIGHT 15
#define BS_ST_WAIT_BIG 0x20 #define ST_D_SUBMITTED 16
#define BS_ST_IN_FLIGHT 0x30 #define ST_D_WRITTEN 17
#define BS_ST_SUBMITTED 0x40 #define ST_D_SYNCED 20
#define BS_ST_WRITTEN 0x50 #define ST_D_STABLE 21
#define BS_ST_SYNCED 0x60
#define BS_ST_STABLE 0x70
#define BS_ST_INSTANT 0x100 #define ST_DEL_IN_FLIGHT 31
#define ST_DEL_SUBMITTED 32
#define ST_DEL_WRITTEN 33
#define ST_DEL_SYNCED 34
#define ST_DEL_STABLE 35
#define ST_CURRENT 48
#define IMMEDIATE_NONE 0 #define IMMEDIATE_NONE 0
#define IMMEDIATE_SMALL 1 #define IMMEDIATE_SMALL 1
#define IMMEDIATE_ALL 2 #define IMMEDIATE_ALL 2
#define BS_ST_TYPE_MASK 0x0F #define IS_IN_FLIGHT(st) (st == ST_J_WAIT_BIG || st == ST_J_IN_FLIGHT || st == ST_D_IN_FLIGHT || st == ST_DEL_IN_FLIGHT || st == ST_J_SUBMITTED || st == ST_D_SUBMITTED || st == ST_DEL_SUBMITTED)
#define BS_ST_WORKFLOW_MASK 0xF0 #define IS_STABLE(st) (st == ST_J_STABLE || st == ST_D_STABLE || st == ST_DEL_STABLE || st == ST_CURRENT)
#define IS_IN_FLIGHT(st) (((st) & 0xF0) <= BS_ST_SUBMITTED) #define IS_SYNCED(st) (IS_STABLE(st) || st == ST_J_SYNCED || st == ST_D_SYNCED || st == ST_DEL_SYNCED)
#define IS_STABLE(st) (((st) & 0xF0) == BS_ST_STABLE) #define IS_JOURNAL(st) (st >= ST_J_WAIT_BIG && st <= ST_J_STABLE)
#define IS_SYNCED(st) (((st) & 0xF0) >= BS_ST_SYNCED) #define IS_BIG_WRITE(st) (st >= ST_D_IN_FLIGHT && st <= ST_D_STABLE)
#define IS_JOURNAL(st) (((st) & 0x0F) == BS_ST_SMALL_WRITE) #define IS_DELETE(st) (st >= ST_DEL_IN_FLIGHT && st <= ST_DEL_STABLE)
#define IS_BIG_WRITE(st) (((st) & 0x0F) == BS_ST_BIG_WRITE) #define IS_UNSYNCED(st) (st >= ST_J_WAIT_BIG && st <= ST_J_WRITTEN || st >= ST_D_IN_FLIGHT && st <= ST_D_WRITTEN|| st >= ST_DEL_IN_FLIGHT && st <= ST_DEL_WRITTEN)
#define IS_DELETE(st) (((st) & 0x0F) == BS_ST_DELETE)
#define BS_SUBMIT_GET_SQE(sqe, data) \ #define BS_SUBMIT_GET_SQE(sqe, data) \
BS_SUBMIT_GET_ONLY_SQE(sqe); \ BS_SUBMIT_GET_ONLY_SQE(sqe); \
@ -77,8 +81,7 @@
#include "blockstore_journal.h" #include "blockstore_journal.h"
// 32 bytes = 24 bytes + block bitmap (4 bytes by default) + external attributes (also bitmap, 4 bytes by default) // 24 bytes + block bitmap per "clean" entry on disk with fixed metadata tables
// per "clean" entry on disk with fixed metadata tables
// FIXME: maybe add crc32's to metadata // FIXME: maybe add crc32's to metadata
struct __attribute__((__packed__)) clean_disk_entry struct __attribute__((__packed__)) clean_disk_entry
{ {
@ -94,7 +97,7 @@ struct __attribute__((__packed__)) clean_entry
uint64_t location; uint64_t location;
}; };
// 64 = 24 + 40 bytes per dirty entry in memory (obj_ver_id => dirty_entry) // 56 = 24 + 32 bytes per dirty entry in memory (obj_ver_id => dirty_entry)
struct __attribute__((__packed__)) dirty_entry struct __attribute__((__packed__)) dirty_entry
{ {
uint32_t state; uint32_t state;
@ -103,7 +106,6 @@ struct __attribute__((__packed__)) dirty_entry
uint32_t offset; // data offset within object (stripe) uint32_t offset; // data offset within object (stripe)
uint32_t len; // data length uint32_t len; // data length
uint64_t journal_sector; // journal sector used for this entry uint64_t journal_sector; // journal sector used for this entry
void* bitmap; // either external bitmap itself when it fits, or a pointer to it when it doesn't
}; };
// - Sync must be submitted after previous writes/deletes (not before!) // - Sync must be submitted after previous writes/deletes (not before!)
@ -156,12 +158,12 @@ struct blockstore_op_private_t
// Write // Write
struct iovec iov_zerofill[3]; struct iovec iov_zerofill[3];
// Warning: must not have a default value here because it's written to before calling constructor in blockstore_write.cpp O_o
uint64_t real_version;
// Sync // Sync
std::vector<obj_ver_id> sync_big_writes, sync_small_writes; std::vector<obj_ver_id> sync_big_writes, sync_small_writes;
int sync_small_checked, sync_big_checked; int sync_small_checked, sync_big_checked;
std::list<blockstore_op_t*>::iterator in_progress_ptr;
int prev_sync_count;
}; };
// https://github.com/algorithm-ninja/cpp-btree // https://github.com/algorithm-ninja/cpp-btree
@ -199,10 +201,7 @@ class blockstore_impl_t
// Suitable only for server SSDs with capacitors, requires disabled data and journal fsyncs // Suitable only for server SSDs with capacitors, requires disabled data and journal fsyncs
int immediate_commit = IMMEDIATE_NONE; int immediate_commit = IMMEDIATE_NONE;
bool inmemory_meta = false; bool inmemory_meta = false;
// Maximum flusher count int flusher_count;
unsigned flusher_count;
// Maximum queue depth
unsigned max_write_iodepth = 128;
/******* END OF OPTIONS *******/ /******* END OF OPTIONS *******/
struct ring_consumer_t ring_consumer; struct ring_consumer_t ring_consumer;
@ -210,8 +209,9 @@ class blockstore_impl_t
blockstore_clean_db_t clean_db; blockstore_clean_db_t clean_db;
uint8_t *clean_bitmap = NULL; uint8_t *clean_bitmap = NULL;
blockstore_dirty_db_t dirty_db; blockstore_dirty_db_t dirty_db;
std::vector<blockstore_op_t*> submit_queue; std::list<blockstore_op_t*> submit_queue; // FIXME: funny thing is that vector is better here
std::vector<obj_ver_id> unsynced_big_writes, unsynced_small_writes; std::vector<obj_ver_id> unsynced_big_writes, unsynced_small_writes;
std::list<blockstore_op_t*> in_progress_syncs; // ...and probably here, too
allocator *data_alloc = NULL; allocator *data_alloc = NULL;
uint8_t *zero_object; uint8_t *zero_object;
@ -228,10 +228,10 @@ class blockstore_impl_t
struct journal_t journal; struct journal_t journal;
journal_flusher_t *flusher; journal_flusher_t *flusher;
int write_iodepth = 0;
bool live = false, queue_stall = false; bool live = false, queue_stall = false;
ring_loop_t *ringloop; ring_loop_t *ringloop;
int inflight_writes = 0;
bool stop_sync_submitted; bool stop_sync_submitted;
@ -251,7 +251,6 @@ class blockstore_impl_t
void open_data(); void open_data();
void open_meta(); void open_meta();
void open_journal(); void open_journal();
uint8_t* get_clean_entry_bitmap(uint64_t block_loc, int offset);
// Asynchronous init // Asynchronous init
int initialized; int initialized;
@ -271,7 +270,6 @@ class blockstore_impl_t
// Write // Write
bool enqueue_write(blockstore_op_t *op); bool enqueue_write(blockstore_op_t *op);
void cancel_all_writes(blockstore_op_t *op, blockstore_dirty_db_t::iterator dirty_it, int retval);
int dequeue_write(blockstore_op_t *op); int dequeue_write(blockstore_op_t *op);
int dequeue_del(blockstore_op_t *op); int dequeue_del(blockstore_op_t *op);
int continue_write(blockstore_op_t *op); int continue_write(blockstore_op_t *op);
@ -279,9 +277,11 @@ class blockstore_impl_t
void handle_write_event(ring_data_t *data, blockstore_op_t *op); void handle_write_event(ring_data_t *data, blockstore_op_t *op);
// Sync // Sync
int continue_sync(blockstore_op_t *op, bool queue_has_in_progress_sync); int dequeue_sync(blockstore_op_t *op);
void handle_sync_event(ring_data_t *data, blockstore_op_t *op); void handle_sync_event(ring_data_t *data, blockstore_op_t *op);
void ack_sync(blockstore_op_t *op); int continue_sync(blockstore_op_t *op);
void ack_one_sync(blockstore_op_t *op);
int ack_sync(blockstore_op_t *op);
// Stabilize // Stabilize
int dequeue_stable(blockstore_op_t *op); int dequeue_stable(blockstore_op_t *op);
@ -321,16 +321,13 @@ public:
bool is_stalled(); bool is_stalled();
// Submission // Submission
void enqueue_op(blockstore_op_t *op); void enqueue_op(blockstore_op_t *op, bool first = false);
// Unstable writes are added here (map of object_id -> version) // Unstable writes are added here (map of object_id -> version)
std::unordered_map<object_id, uint64_t> unstable_writes; std::unordered_map<object_id, uint64_t> unstable_writes;
// Space usage statistics
std::map<uint64_t, uint64_t> inode_space_stats;
inline uint32_t get_block_size() { return block_size; } inline uint32_t get_block_size() { return block_size; }
inline uint64_t get_block_count() { return block_count; } inline uint64_t get_block_count() { return block_count; }
inline uint64_t get_free_block_count() { return data_alloc->get_free_count(); } inline uint64_t get_free_block_count() { return data_alloc->get_free_count(); }
inline uint32_t get_bitmap_granularity() { return disk_alignment; } inline uint32_t get_disk_alignment() { return disk_alignment; }
}; };

View File

@ -1,6 +1,3 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
#include "blockstore_impl.h" #include "blockstore_impl.h"
blockstore_init_meta::blockstore_init_meta(blockstore_impl_t *bs) blockstore_init_meta::blockstore_init_meta(blockstore_impl_t *bs)
@ -100,7 +97,7 @@ void blockstore_init_meta::handle_entries(void* entries, unsigned count, int blo
clean_disk_entry *entry = (clean_disk_entry*)(entries + i*bs->clean_entry_size); clean_disk_entry *entry = (clean_disk_entry*)(entries + i*bs->clean_entry_size);
if (!bs->inmemory_meta && bs->clean_entry_bitmap_size) if (!bs->inmemory_meta && bs->clean_entry_bitmap_size)
{ {
memcpy(bs->clean_bitmap + (done_cnt+i)*2*bs->clean_entry_bitmap_size, &entry->bitmap, 2*bs->clean_entry_bitmap_size); memcpy(bs->clean_bitmap + (done_cnt+i)*bs->clean_entry_bitmap_size, &entry->bitmap, bs->clean_entry_bitmap_size);
} }
if (entry->oid.inode > 0) if (entry->oid.inode > 0)
{ {
@ -111,17 +108,13 @@ void blockstore_init_meta::handle_entries(void* entries, unsigned count, int blo
{ {
// free the previous block // free the previous block
#ifdef BLOCKSTORE_DEBUG #ifdef BLOCKSTORE_DEBUG
printf("Free block %lu (new location is %lu)\n", clean_it->second.location >> block_order, done_cnt+i); printf("Free block %lu\n", clean_it->second.location >> bs->block_order);
#endif #endif
bs->data_alloc->set(clean_it->second.location >> block_order, false); bs->data_alloc->set(clean_it->second.location >> block_order, false);
} }
else
{
bs->inode_space_stats[entry->oid.inode] += bs->block_size;
}
entries_loaded++; entries_loaded++;
#ifdef BLOCKSTORE_DEBUG #ifdef BLOCKSTORE_DEBUG
printf("Allocate block (clean entry) %lu: %lx:%lx v%lu\n", done_cnt+i, entry->oid.inode, entry->oid.stripe, entry->version); printf("Allocate block (clean entry) %lu: %lu:%lu v%lu\n", done_cnt+i, entry->oid.inode, entry->oid.stripe, entry->version);
#endif #endif
bs->data_alloc->set(done_cnt+i, true); bs->data_alloc->set(done_cnt+i, true);
bs->clean_db[entry->oid] = (struct clean_entry){ bs->clean_db[entry->oid] = (struct clean_entry){
@ -132,7 +125,7 @@ void blockstore_init_meta::handle_entries(void* entries, unsigned count, int blo
else else
{ {
#ifdef BLOCKSTORE_DEBUG #ifdef BLOCKSTORE_DEBUG
printf("Old clean entry %lu: %lx:%lx v%lu\n", done_cnt+i, entry->oid.inode, entry->oid.stripe, entry->version); printf("Old clean entry %lu: %lu:%lu v%lu\n", done_cnt+i, entry->oid.inode, entry->oid.stripe, entry->version);
#endif #endif
} }
} }
@ -209,7 +202,11 @@ int blockstore_init_journal::loop()
goto resume_7; goto resume_7;
printf("Reading blockstore journal\n"); printf("Reading blockstore journal\n");
if (!bs->journal.inmemory) if (!bs->journal.inmemory)
submitted_buf = memalign_or_die(MEM_ALIGNMENT, 2*bs->journal.block_size); {
submitted_buf = memalign(MEM_ALIGNMENT, 2*bs->journal.block_size);
if (!submitted_buf)
throw std::bad_alloc();
}
else else
submitted_buf = bs->journal.buffer; submitted_buf = bs->journal.buffer;
// Read first block of the journal // Read first block of the journal
@ -320,7 +317,7 @@ resume_1:
if (journal_pos < bs->journal.used_start) if (journal_pos < bs->journal.used_start)
end = bs->journal.used_start; end = bs->journal.used_start;
if (!bs->journal.inmemory) if (!bs->journal.inmemory)
submitted_buf = memalign_or_die(MEM_ALIGNMENT, JOURNAL_BUFFER_SIZE); submitted_buf = memalign(MEM_ALIGNMENT, JOURNAL_BUFFER_SIZE);
else else
submitted_buf = bs->journal.buffer + journal_pos; submitted_buf = bs->journal.buffer + journal_pos;
data->iov = { data->iov = {
@ -403,7 +400,8 @@ resume_1:
} }
} }
} }
bs->flusher->mark_trim_possible(); // Trim journal on start so we don't stall when all entries are older
bs->journal.trim();
bs->journal.dirty_start = bs->journal.next_free; bs->journal.dirty_start = bs->journal.next_free;
printf( printf(
"Journal entries loaded: %lu, free journal space: %lu bytes (%08lx..%08lx is used), free blocks: %lu / %lu\n", "Journal entries loaded: %lu, free journal space: %lu bytes (%08lx..%08lx is used), free blocks: %lu / %lu\n",
@ -456,15 +454,10 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t done_pos, u
break; break;
} }
} }
if (je->type == JE_SMALL_WRITE || je->type == JE_SMALL_WRITE_INSTANT) if (je->type == JE_SMALL_WRITE)
{ {
#ifdef BLOCKSTORE_DEBUG #ifdef BLOCKSTORE_DEBUG
printf( printf("je_small_write oid=%lu:%lu ver=%lu offset=%u len=%u\n", je->small_write.oid.inode, je->small_write.oid.stripe, je->small_write.version, je->small_write.offset, je->small_write.len);
"je_small_write%s oid=%lx:%lx ver=%lu offset=%u len=%u\n",
je->type == JE_SMALL_WRITE_INSTANT ? "_instant" : "",
je->small_write.oid.inode, je->small_write.oid.stripe, je->small_write.version,
je->small_write.offset, je->small_write.len
);
#endif #endif
// oid, version, offset, len // oid, version, offset, len
uint64_t prev_free = next_free; uint64_t prev_free = next_free;
@ -534,99 +527,30 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t done_pos, u
.oid = je->small_write.oid, .oid = je->small_write.oid,
.version = je->small_write.version, .version = je->small_write.version,
}; };
void *bmp = (void*)je + sizeof(journal_entry_small_write);
if (bs->clean_entry_bitmap_size <= sizeof(void*))
{
memcpy(&bmp, bmp, bs->clean_entry_bitmap_size);
}
else if (!bs->journal.inmemory)
{
// FIXME Using large blockstore objects and not keeping journal in memory
// will result in a lot of small allocations for entry bitmaps. This can
// only be fixed by using a patched map with dynamic entry size, but not
// the btree_map, because it doesn't keep iterators valid all the time.
void *bmp_cp = malloc_or_die(bs->clean_entry_bitmap_size);
memcpy(bmp_cp, bmp, bs->clean_entry_bitmap_size);
bmp = bmp_cp;
}
bs->dirty_db.emplace(ov, (dirty_entry){ bs->dirty_db.emplace(ov, (dirty_entry){
.state = (BS_ST_SMALL_WRITE | BS_ST_SYNCED), .state = ST_J_SYNCED,
.flags = 0, .flags = 0,
.location = location, .location = location,
.offset = je->small_write.offset, .offset = je->small_write.offset,
.len = je->small_write.len, .len = je->small_write.len,
.journal_sector = proc_pos, .journal_sector = proc_pos,
.bitmap = bmp,
}); });
bs->journal.used_sectors[proc_pos]++; bs->journal.used_sectors[proc_pos]++;
#ifdef BLOCKSTORE_DEBUG #ifdef BLOCKSTORE_DEBUG
printf( printf(
"journal offset %08lx is used by %lx:%lx v%lu (%lu refs)\n", "journal offset %08lx is used by %lu:%lu v%lu (%lu refs)\n",
proc_pos, ov.oid.inode, ov.oid.stripe, ov.version, bs->journal.used_sectors[proc_pos] proc_pos, ov.oid.inode, ov.oid.stripe, ov.version, bs->journal.used_sectors[proc_pos]
); );
#endif #endif
auto & unstab = bs->unstable_writes[ov.oid]; auto & unstab = bs->unstable_writes[ov.oid];
unstab = unstab < ov.version ? ov.version : unstab; unstab = unstab < ov.version ? ov.version : unstab;
if (je->type == JE_SMALL_WRITE_INSTANT)
{
bs->mark_stable(ov);
} }
} }
} else if (je->type == JE_BIG_WRITE)
else if (je->type == JE_BIG_WRITE || je->type == JE_BIG_WRITE_INSTANT)
{ {
#ifdef BLOCKSTORE_DEBUG #ifdef BLOCKSTORE_DEBUG
printf( printf("je_big_write oid=%lu:%lu ver=%lu loc=%lu\n", je->big_write.oid.inode, je->big_write.oid.stripe, je->big_write.version, je->big_write.location);
"je_big_write%s oid=%lx:%lx ver=%lu loc=%lu\n",
je->type == JE_BIG_WRITE_INSTANT ? "_instant" : "",
je->big_write.oid.inode, je->big_write.oid.stripe, je->big_write.version, je->big_write.location >> bs->block_order
);
#endif #endif
auto dirty_it = bs->dirty_db.upper_bound((obj_ver_id){
.oid = je->big_write.oid,
.version = UINT64_MAX,
});
if (dirty_it != bs->dirty_db.begin() && bs->dirty_db.size() > 0)
{
dirty_it--;
if (dirty_it->first.oid == je->big_write.oid &&
dirty_it->first.version >= je->big_write.version &&
(dirty_it->second.state & BS_ST_TYPE_MASK) == BS_ST_DELETE)
{
// It is allowed to overwrite a deleted object with a
// version number smaller than deletion version number,
// because the presence of a BIG_WRITE entry means that
// its data and metadata are already flushed.
// We don't know if newer versions are flushed, but
// the previous delete definitely is.
// So we flush previous dirty entries, but retain the clean one.
// This feature is required for writes happening shortly
// after deletes.
auto dirty_end = dirty_it;
dirty_end++;
while (1)
{
if (dirty_it == bs->dirty_db.begin())
{
break;
}
dirty_it--;
if (dirty_it->first.oid != je->big_write.oid)
{
dirty_it++;
break;
}
}
auto clean_it = bs->clean_db.find(je->big_write.oid);
bs->erase_dirty(
dirty_it, dirty_end,
clean_it != bs->clean_db.end() ? clean_it->second.location : UINT64_MAX
);
// Remove it from the flusher's queue, too
// Otherwise it may end up referring to a small unstable write after reading the rest of the journal
bs->flusher->remove_flush(je->big_write.oid);
}
}
auto clean_it = bs->clean_db.find(je->big_write.oid); auto clean_it = bs->clean_db.find(je->big_write.oid);
if (clean_it == bs->clean_db.end() || if (clean_it == bs->clean_db.end() ||
clean_it->second.version < je->big_write.version) clean_it->second.version < je->big_write.version)
@ -636,53 +560,27 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t done_pos, u
.oid = je->big_write.oid, .oid = je->big_write.oid,
.version = je->big_write.version, .version = je->big_write.version,
}; };
void *bmp = (void*)je + sizeof(journal_entry_big_write);
if (bs->clean_entry_bitmap_size <= sizeof(void*))
{
memcpy(&bmp, bmp, bs->clean_entry_bitmap_size);
}
else if (!bs->journal.inmemory)
{
// FIXME Using large blockstore objects and not keeping journal in memory
// will result in a lot of small allocations for entry bitmaps. This can
// only be fixed by using a patched map with dynamic entry size, but not
// the btree_map, because it doesn't keep iterators valid all the time.
void *bmp_cp = malloc_or_die(bs->clean_entry_bitmap_size);
memcpy(bmp_cp, bmp, bs->clean_entry_bitmap_size);
bmp = bmp_cp;
}
bs->dirty_db.emplace(ov, (dirty_entry){ bs->dirty_db.emplace(ov, (dirty_entry){
.state = (BS_ST_BIG_WRITE | BS_ST_SYNCED), .state = ST_D_SYNCED,
.flags = 0, .flags = 0,
.location = je->big_write.location, .location = je->big_write.location,
.offset = je->big_write.offset, .offset = je->big_write.offset,
.len = je->big_write.len, .len = je->big_write.len,
.journal_sector = proc_pos, .journal_sector = proc_pos,
.bitmap = bmp,
}); });
#ifdef BLOCKSTORE_DEBUG #ifdef BLOCKSTORE_DEBUG
printf("Allocate block %lu\n", je->big_write.location >> bs->block_order); printf("Allocate block %lu\n", je->big_write.location >> bs->block_order);
#endif #endif
bs->data_alloc->set(je->big_write.location >> bs->block_order, true); bs->data_alloc->set(je->big_write.location >> bs->block_order, true);
bs->journal.used_sectors[proc_pos]++; bs->journal.used_sectors[proc_pos]++;
#ifdef BLOCKSTORE_DEBUG
printf(
"journal offset %08lx is used by %lx:%lx v%lu (%lu refs)\n",
proc_pos, ov.oid.inode, ov.oid.stripe, ov.version, bs->journal.used_sectors[proc_pos]
);
#endif
auto & unstab = bs->unstable_writes[ov.oid]; auto & unstab = bs->unstable_writes[ov.oid];
unstab = unstab < ov.version ? ov.version : unstab; unstab = unstab < ov.version ? ov.version : unstab;
if (je->type == JE_BIG_WRITE_INSTANT)
{
bs->mark_stable(ov);
}
} }
} }
else if (je->type == JE_STABLE) else if (je->type == JE_STABLE)
{ {
#ifdef BLOCKSTORE_DEBUG #ifdef BLOCKSTORE_DEBUG
printf("je_stable oid=%lx:%lx ver=%lu\n", je->stable.oid.inode, je->stable.oid.stripe, je->stable.version); printf("je_stable oid=%lu:%lu ver=%lu\n", je->stable.oid.inode, je->stable.oid.stripe, je->stable.version);
#endif #endif
// oid, version // oid, version
obj_ver_id ov = { obj_ver_id ov = {
@ -694,7 +592,7 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t done_pos, u
else if (je->type == JE_ROLLBACK) else if (je->type == JE_ROLLBACK)
{ {
#ifdef BLOCKSTORE_DEBUG #ifdef BLOCKSTORE_DEBUG
printf("je_rollback oid=%lx:%lx ver=%lu\n", je->rollback.oid.inode, je->rollback.oid.stripe, je->rollback.version); printf("je_rollback oid=%lu:%lu ver=%lu\n", je->rollback.oid.inode, je->rollback.oid.stripe, je->rollback.version);
#endif #endif
// rollback dirty writes of <oid> up to <version> // rollback dirty writes of <oid> up to <version>
obj_ver_id ov = { obj_ver_id ov = {
@ -706,10 +604,10 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t done_pos, u
else if (je->type == JE_DELETE) else if (je->type == JE_DELETE)
{ {
#ifdef BLOCKSTORE_DEBUG #ifdef BLOCKSTORE_DEBUG
printf("je_delete oid=%lx:%lx ver=%lu\n", je->del.oid.inode, je->del.oid.stripe, je->del.version); printf("je_delete oid=%lu:%lu ver=%lu\n", je->del.oid.inode, je->del.oid.stripe, je->del.version);
#endif #endif
auto clean_it = bs->clean_db.find(je->del.oid); auto clean_it = bs->clean_db.find(je->del.oid);
if (clean_it != bs->clean_db.end() && if (clean_it == bs->clean_db.end() ||
clean_it->second.version < je->del.version) clean_it->second.version < je->del.version)
{ {
// oid, version // oid, version
@ -718,7 +616,7 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t done_pos, u
.version = je->del.version, .version = je->del.version,
}; };
bs->dirty_db.emplace(ov, (dirty_entry){ bs->dirty_db.emplace(ov, (dirty_entry){
.state = (BS_ST_DELETE | BS_ST_SYNCED), .state = ST_DEL_SYNCED,
.flags = 0, .flags = 0,
.location = 0, .location = 0,
.offset = 0, .offset = 0,

View File

@ -1,6 +1,3 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
#pragma once #pragma once
class blockstore_init_meta class blockstore_init_meta

View File

@ -1,12 +1,9 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
#include "blockstore_impl.h" #include "blockstore_impl.h"
blockstore_journal_check_t::blockstore_journal_check_t(blockstore_impl_t *bs) blockstore_journal_check_t::blockstore_journal_check_t(blockstore_impl_t *bs)
{ {
this->bs = bs; this->bs = bs;
sectors_to_write = 0; sectors_required = 0;
next_pos = bs->journal.next_free; next_pos = bs->journal.next_free;
next_sector = bs->journal.cur_sector; next_sector = bs->journal.cur_sector;
first_sector = -1; first_sector = -1;
@ -20,26 +17,21 @@ int blockstore_journal_check_t::check_available(blockstore_op_t *op, int entries
int required = entries_required; int required = entries_required;
while (1) while (1)
{ {
int fits = bs->journal.no_same_sector_overwrites && next_pos == bs->journal.next_free && bs->journal.sector_info[next_sector].written int fits = (bs->journal.block_size - next_in_pos) / size;
? 0
: (bs->journal.block_size - next_in_pos) / size;
if (fits > 0) if (fits > 0)
{ {
if (fits > required)
{
fits = required;
}
if (first_sector == -1) if (first_sector == -1)
{ {
first_sector = next_sector; first_sector = next_sector;
} }
required -= fits; required -= fits;
next_in_pos += fits * size; next_in_pos += fits * size;
sectors_to_write++; sectors_required++;
} }
else if (bs->journal.sector_info[next_sector].dirty) else if (bs->journal.sector_info[next_sector].dirty)
{ {
sectors_to_write++; // sectors_required is more like "sectors to write"
sectors_required++;
} }
if (required <= 0) if (required <= 0)
{ {
@ -62,7 +54,7 @@ int blockstore_journal_check_t::check_available(blockstore_op_t *op, int entries
" is too small for a batch of "+std::to_string(entries_required)+" entries of "+std::to_string(size)+" bytes" " is too small for a batch of "+std::to_string(entries_required)+" entries of "+std::to_string(size)+" bytes"
); );
} }
if (bs->journal.sector_info[next_sector].flush_count > 0 || if (bs->journal.sector_info[next_sector].usage_count > 0 ||
bs->journal.sector_info[next_sector].dirty) bs->journal.sector_info[next_sector].dirty)
{ {
// No memory buffer available. Wait for it. // No memory buffer available. Wait for it.
@ -74,18 +66,17 @@ int blockstore_journal_check_t::check_available(blockstore_op_t *op, int entries
dirty++; dirty++;
used++; used++;
} }
if (bs->journal.sector_info[i].flush_count > 0) if (bs->journal.sector_info[i].usage_count > 0)
{ {
used++; used++;
} }
} }
// In fact, it's even more rare than "ran out of journal space", so print a warning // In fact, it's even more rare than "ran out of journal space", so print a warning
printf( printf(
"Ran out of journal sector buffers: %d/%lu buffers used (%d dirty), next buffer (%ld)" "Ran out of journal sector buffers: %d/%lu buffers used (%d dirty), next buffer (%ld) is %s and flushed %lu times\n",
" is %s and flushed %lu times. Consider increasing \'journal_sector_buffer_count\'\n",
used, bs->journal.sector_count, dirty, next_sector, used, bs->journal.sector_count, dirty, next_sector,
bs->journal.sector_info[next_sector].dirty ? "dirty" : "not dirty", bs->journal.sector_info[next_sector].dirty ? "dirty" : "not dirty",
bs->journal.sector_info[next_sector].flush_count bs->journal.sector_info[next_sector].usage_count
); );
PRIV(op)->wait_for = WAIT_JOURNAL_BUFFER; PRIV(op)->wait_for = WAIT_JOURNAL_BUFFER;
return 0; return 0;
@ -104,8 +95,10 @@ int blockstore_journal_check_t::check_available(blockstore_op_t *op, int entries
{ {
// No space in the journal. Wait until used_start changes. // No space in the journal. Wait until used_start changes.
printf( printf(
"Ran out of journal space (used_start=%08lx, next_free=%08lx, dirty_start=%08lx)\n", "Ran out of journal space (free space: %lu bytes)\n",
bs->journal.used_start, bs->journal.next_free, bs->journal.dirty_start (bs->journal.next_free >= bs->journal.used_start
? bs->journal.len-bs->journal.block_size - (bs->journal.next_free-bs->journal.used_start)
: bs->journal.used_start - bs->journal.next_free)
); );
PRIV(op)->wait_for = WAIT_JOURNAL; PRIV(op)->wait_for = WAIT_JOURNAL;
bs->flusher->request_trim(); bs->flusher->request_trim();
@ -117,21 +110,20 @@ int blockstore_journal_check_t::check_available(blockstore_op_t *op, int entries
journal_entry* prefill_single_journal_entry(journal_t & journal, uint16_t type, uint32_t size) journal_entry* prefill_single_journal_entry(journal_t & journal, uint16_t type, uint32_t size)
{ {
if (!journal.entry_fits(size)) if (journal.block_size - journal.in_sector_pos < size)
{ {
assert(!journal.sector_info[journal.cur_sector].dirty); assert(!journal.sector_info[journal.cur_sector].dirty);
// Move to the next journal sector // Move to the next journal sector
if (journal.sector_info[journal.cur_sector].flush_count > 0) if (journal.sector_info[journal.cur_sector].usage_count > 0)
{ {
// Also select next sector buffer in memory // Also select next sector buffer in memory
journal.cur_sector = ((journal.cur_sector + 1) % journal.sector_count); journal.cur_sector = ((journal.cur_sector + 1) % journal.sector_count);
assert(!journal.sector_info[journal.cur_sector].flush_count); assert(!journal.sector_info[journal.cur_sector].usage_count);
} }
else else
{ {
journal.dirty_start = journal.next_free; journal.dirty_start = journal.next_free;
} }
journal.sector_info[journal.cur_sector].written = false;
journal.sector_info[journal.cur_sector].offset = journal.next_free; journal.sector_info[journal.cur_sector].offset = journal.next_free;
journal.in_sector_pos = 0; journal.in_sector_pos = 0;
journal.next_free = (journal.next_free+journal.block_size) < journal.len ? journal.next_free + journal.block_size : journal.block_size; journal.next_free = (journal.next_free+journal.block_size) < journal.len ? journal.next_free + journal.block_size : journal.block_size;
@ -156,8 +148,7 @@ journal_entry* prefill_single_journal_entry(journal_t & journal, uint16_t type,
void prepare_journal_sector_write(journal_t & journal, int cur_sector, io_uring_sqe *sqe, std::function<void(ring_data_t*)> cb) void prepare_journal_sector_write(journal_t & journal, int cur_sector, io_uring_sqe *sqe, std::function<void(ring_data_t*)> cb)
{ {
journal.sector_info[cur_sector].dirty = false; journal.sector_info[cur_sector].dirty = false;
journal.sector_info[cur_sector].written = true; journal.sector_info[cur_sector].usage_count++;
journal.sector_info[cur_sector].flush_count++;
ring_data_t *data = ((ring_data_t*)sqe->user_data); ring_data_t *data = ((ring_data_t*)sqe->user_data);
data->iov = (struct iovec){ data->iov = (struct iovec){
(journal.inmemory (journal.inmemory
@ -184,7 +175,7 @@ journal_t::~journal_t()
buffer = NULL; buffer = NULL;
} }
uint64_t journal_t::get_trim_pos() bool journal_t::trim()
{ {
auto journal_used_it = used_sectors.lower_bound(used_start); auto journal_used_it = used_sectors.lower_bound(used_start);
#ifdef BLOCKSTORE_DEBUG #ifdef BLOCKSTORE_DEBUG
@ -202,19 +193,26 @@ uint64_t journal_t::get_trim_pos()
if (journal_used_it == used_sectors.end()) if (journal_used_it == used_sectors.end())
{ {
// Journal is empty // Journal is empty
return next_free; used_start = next_free;
} }
else else
{ {
// next_free does not need updating during trim used_start = journal_used_it->first;
return journal_used_it->first; // next_free does not need updating here
} }
} }
else if (journal_used_it->first > used_start) else if (journal_used_it->first > used_start)
{ {
// Journal is cleared up to <journal_used_it> // Journal is cleared up to <journal_used_it>
return journal_used_it->first; used_start = journal_used_it->first;
} }
else
{
// Can't trim journal // Can't trim journal
return used_start; return false;
}
#ifdef BLOCKSTORE_DEBUG
printf("Journal trimmed to %08lx (next_free=%08lx)\n", used_start, next_free);
#endif
return true;
} }

View File

@ -1,6 +1,3 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
#pragma once #pragma once
#include "crc32c.h" #include "crc32c.h"
@ -10,8 +7,6 @@
#define JOURNAL_BUFFER_SIZE 4*1024*1024 #define JOURNAL_BUFFER_SIZE 4*1024*1024
// We reserve some extra space for future stabilize requests during writes // We reserve some extra space for future stabilize requests during writes
// FIXME: This value should be dynamic i.e. Blockstore ideally shouldn't allow
// writing more than can be stabilized afterwards
#define JOURNAL_STABILIZE_RESERVATION 65536 #define JOURNAL_STABILIZE_RESERVATION 65536
// Journal entries // Journal entries
@ -24,9 +19,7 @@
#define JE_STABLE 0x04 #define JE_STABLE 0x04
#define JE_DELETE 0x05 #define JE_DELETE 0x05
#define JE_ROLLBACK 0x06 #define JE_ROLLBACK 0x06
#define JE_SMALL_WRITE_INSTANT 0x07 #define JE_MAX 0x06
#define JE_BIG_WRITE_INSTANT 0x08
#define JE_MAX 0x08
// crc32c comes first to ease calculation and is equal to crc32() // crc32c comes first to ease calculation and is equal to crc32()
struct __attribute__((__packed__)) journal_entry_start struct __attribute__((__packed__)) journal_entry_start
@ -54,9 +47,6 @@ struct __attribute__((__packed__)) journal_entry_small_write
// data_offset is its offset within journal // data_offset is its offset within journal
uint64_t data_offset; uint64_t data_offset;
uint32_t crc32_data; uint32_t crc32_data;
// small_write and big_write entries are followed by the "external" bitmap
// its size is dynamic and included in journal entry's <size> field
uint8_t bitmap[];
}; };
struct __attribute__((__packed__)) journal_entry_big_write struct __attribute__((__packed__)) journal_entry_big_write
@ -71,9 +61,6 @@ struct __attribute__((__packed__)) journal_entry_big_write
uint32_t offset; uint32_t offset;
uint32_t len; uint32_t len;
uint64_t location; uint64_t location;
// small_write and big_write entries are followed by the "external" bitmap
// its size is dynamic and included in journal entry's <size> field
uint8_t bitmap[];
}; };
struct __attribute__((__packed__)) journal_entry_stable struct __attribute__((__packed__)) journal_entry_stable
@ -139,8 +126,7 @@ inline uint32_t je_crc32(journal_entry *je)
struct journal_sector_info_t struct journal_sector_info_t
{ {
uint64_t offset; uint64_t offset;
uint64_t flush_count; uint64_t usage_count;
bool written;
bool dirty; bool dirty;
}; };
@ -165,7 +151,6 @@ struct journal_t
void *sector_buf = NULL; void *sector_buf = NULL;
journal_sector_info_t *sector_info = NULL; journal_sector_info_t *sector_info = NULL;
uint64_t sector_count; uint64_t sector_count;
bool no_same_sector_overwrites = false;
int cur_sector = 0; int cur_sector = 0;
int in_sector_pos = 0; int in_sector_pos = 0;
@ -175,19 +160,13 @@ struct journal_t
~journal_t(); ~journal_t();
bool trim(); bool trim();
uint64_t get_trim_pos();
inline bool entry_fits(int size)
{
return !(block_size - in_sector_pos < size ||
no_same_sector_overwrites && sector_info[cur_sector].written);
}
}; };
struct blockstore_journal_check_t struct blockstore_journal_check_t
{ {
blockstore_impl_t *bs; blockstore_impl_t *bs;
uint64_t next_pos, next_sector, next_in_pos; uint64_t next_pos, next_sector, next_in_pos;
int sectors_to_write, first_sector; int sectors_required, first_sector;
bool right_dir; // writing to the end or the beginning of the ring buffer bool right_dir; // writing to the end or the beginning of the ring buffer
blockstore_journal_check_t(blockstore_impl_t *bs); blockstore_journal_check_t(blockstore_impl_t *bs);

View File

@ -1,6 +1,3 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
#include <sys/file.h> #include <sys/file.h>
#include "blockstore_impl.h" #include "blockstore_impl.h"
@ -62,15 +59,12 @@ void blockstore_impl_t::parse_config(blockstore_config_t & config)
journal_device = config["journal_device"]; journal_device = config["journal_device"];
journal.offset = strtoull(config["journal_offset"].c_str(), NULL, 10); journal.offset = strtoull(config["journal_offset"].c_str(), NULL, 10);
journal.sector_count = strtoull(config["journal_sector_buffer_count"].c_str(), NULL, 10); journal.sector_count = strtoull(config["journal_sector_buffer_count"].c_str(), NULL, 10);
journal.no_same_sector_overwrites = config["journal_no_same_sector_overwrites"] == "true" ||
config["journal_no_same_sector_overwrites"] == "1" || config["journal_no_same_sector_overwrites"] == "yes";
journal.inmemory = config["inmemory_journal"] != "false"; journal.inmemory = config["inmemory_journal"] != "false";
disk_alignment = strtoull(config["disk_alignment"].c_str(), NULL, 10); disk_alignment = strtoull(config["disk_alignment"].c_str(), NULL, 10);
journal_block_size = strtoull(config["journal_block_size"].c_str(), NULL, 10); journal_block_size = strtoull(config["journal_block_size"].c_str(), NULL, 10);
meta_block_size = strtoull(config["meta_block_size"].c_str(), NULL, 10); meta_block_size = strtoull(config["meta_block_size"].c_str(), NULL, 10);
bitmap_granularity = strtoull(config["bitmap_granularity"].c_str(), NULL, 10); bitmap_granularity = strtoull(config["bitmap_granularity"].c_str(), NULL, 10);
flusher_count = strtoull(config["flusher_count"].c_str(), NULL, 10); flusher_count = strtoull(config["flusher_count"].c_str(), NULL, 10);
max_write_iodepth = strtoull(config["max_write_iodepth"].c_str(), NULL, 10);
// Validate // Validate
if (!block_size) if (!block_size)
{ {
@ -84,17 +78,13 @@ void blockstore_impl_t::parse_config(blockstore_config_t & config)
{ {
flusher_count = 32; flusher_count = 32;
} }
if (!max_write_iodepth)
{
max_write_iodepth = 128;
}
if (!disk_alignment) if (!disk_alignment)
{ {
disk_alignment = 4096; disk_alignment = 4096;
} }
else if (disk_alignment % MEM_ALIGNMENT) else if (disk_alignment % MEM_ALIGNMENT)
{ {
throw std::runtime_error("disk_alignment must be a multiple of "+std::to_string(MEM_ALIGNMENT)); throw std::runtime_error("disk_alingment must be a multiple of "+std::to_string(MEM_ALIGNMENT));
} }
if (!journal_block_size) if (!journal_block_size)
{ {
@ -118,7 +108,7 @@ void blockstore_impl_t::parse_config(blockstore_config_t & config)
} }
if (!bitmap_granularity) if (!bitmap_granularity)
{ {
bitmap_granularity = DEFAULT_BITMAP_GRANULARITY; bitmap_granularity = 4096;
} }
else if (bitmap_granularity % disk_alignment) else if (bitmap_granularity % disk_alignment)
{ {
@ -170,7 +160,7 @@ void blockstore_impl_t::parse_config(blockstore_config_t & config)
} }
// init some fields // init some fields
clean_entry_bitmap_size = block_size / bitmap_granularity / 8; clean_entry_bitmap_size = block_size / bitmap_granularity / 8;
clean_entry_size = sizeof(clean_disk_entry) + 2*clean_entry_bitmap_size; clean_entry_size = sizeof(clean_disk_entry) + clean_entry_bitmap_size;
journal.block_size = journal_block_size; journal.block_size = journal_block_size;
journal.next_free = journal_block_size; journal.next_free = journal_block_size;
journal.used_start = journal_block_size; journal.used_start = journal_block_size;
@ -237,7 +227,7 @@ void blockstore_impl_t::calc_lengths()
} }
else if (clean_entry_bitmap_size) else if (clean_entry_bitmap_size)
{ {
clean_bitmap = (uint8_t*)malloc(block_count * 2*clean_entry_bitmap_size); clean_bitmap = (uint8_t*)malloc(block_count * clean_entry_bitmap_size);
if (!clean_bitmap) if (!clean_bitmap)
throw std::runtime_error("Failed to allocate memory for the metadata sparse write bitmap"); throw std::runtime_error("Failed to allocate memory for the metadata sparse write bitmap");
} }

View File

@ -1,6 +1,3 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
#include "blockstore_impl.h" #include "blockstore_impl.h"
int blockstore_impl_t::fulfill_read_push(blockstore_op_t *op, void *buf, uint64_t offset, uint64_t len, int blockstore_impl_t::fulfill_read_push(blockstore_op_t *op, void *buf, uint64_t offset, uint64_t len,
@ -40,7 +37,6 @@ int blockstore_impl_t::fulfill_read_push(blockstore_op_t *op, void *buf, uint64_
return 1; return 1;
} }
// FIXME I've seen a bug here so I want some tests
int blockstore_impl_t::fulfill_read(blockstore_op_t *read_op, uint64_t &fulfilled, uint32_t item_start, uint32_t item_end, int blockstore_impl_t::fulfill_read(blockstore_op_t *read_op, uint64_t &fulfilled, uint32_t item_start, uint32_t item_end,
uint32_t item_state, uint64_t item_version, uint64_t item_location) uint32_t item_state, uint64_t item_version, uint64_t item_location)
{ {
@ -53,20 +49,8 @@ int blockstore_impl_t::fulfill_read(blockstore_op_t *read_op, uint64_t &fulfille
while (1) while (1)
{ {
for (; it != PRIV(read_op)->read_vec.end(); it++) for (; it != PRIV(read_op)->read_vec.end(); it++)
{
if (it->offset >= cur_start) if (it->offset >= cur_start)
{
break; break;
}
else if (it->offset + it->len > cur_start)
{
cur_start = it->offset + it->len;
if (cur_start >= item_end)
{
goto endwhile;
}
}
}
if (it == PRIV(read_op)->read_vec.end() || it->offset > cur_start) if (it == PRIV(read_op)->read_vec.end() || it->offset > cur_start)
{ {
fulfill_read_t el = { fulfill_read_t el = {
@ -85,30 +69,12 @@ int blockstore_impl_t::fulfill_read(blockstore_op_t *read_op, uint64_t &fulfille
} }
cur_start = it->offset + it->len; cur_start = it->offset + it->len;
if (it == PRIV(read_op)->read_vec.end() || cur_start >= item_end) if (it == PRIV(read_op)->read_vec.end() || cur_start >= item_end)
{
break; break;
} }
} }
}
endwhile:
return 1; return 1;
} }
uint8_t* blockstore_impl_t::get_clean_entry_bitmap(uint64_t block_loc, int offset)
{
uint8_t *clean_entry_bitmap;
uint64_t meta_loc = block_loc >> block_order;
if (inmemory_meta)
{
uint64_t sector = (meta_loc / (meta_block_size / clean_entry_size)) * meta_block_size;
uint64_t pos = (meta_loc % (meta_block_size / clean_entry_size));
clean_entry_bitmap = (uint8_t*)(metadata_buffer + sector + pos*clean_entry_size + sizeof(clean_disk_entry) + offset);
}
else
clean_entry_bitmap = (uint8_t*)(clean_bitmap + meta_loc*2*clean_entry_bitmap_size + offset);
return clean_entry_bitmap;
}
int blockstore_impl_t::dequeue_read(blockstore_op_t *read_op) int blockstore_impl_t::dequeue_read(blockstore_op_t *read_op)
{ {
auto clean_it = clean_db.find(read_op->oid); auto clean_it = clean_db.find(read_op->oid);
@ -127,7 +93,7 @@ int blockstore_impl_t::dequeue_read(blockstore_op_t *read_op)
read_op->version = 0; read_op->version = 0;
read_op->retval = read_op->len; read_op->retval = read_op->len;
FINISH_OP(read_op); FINISH_OP(read_op);
return 2; return 1;
} }
uint64_t fulfilled = 0; uint64_t fulfilled = 0;
PRIV(read_op)->pending_ops = 0; PRIV(read_op)->pending_ops = 0;
@ -149,11 +115,6 @@ int blockstore_impl_t::dequeue_read(blockstore_op_t *read_op)
if (!result_version) if (!result_version)
{ {
result_version = dirty_it->first.version; result_version = dirty_it->first.version;
if (read_op->bitmap)
{
void *bmp_ptr = (clean_entry_bitmap_size > sizeof(void*) ? dirty_it->second.bitmap : &dirty_it->second.bitmap);
memcpy(read_op->bitmap, bmp_ptr, clean_entry_bitmap_size);
}
} }
if (!fulfill_read(read_op, fulfilled, dirty.offset, dirty.offset + dirty.len, if (!fulfill_read(read_op, fulfilled, dirty.offset, dirty.offset + dirty.len,
dirty.state, dirty_it->first.version, dirty.location + (IS_JOURNAL(dirty.state) ? 0 : dirty.offset))) dirty.state, dirty_it->first.version, dirty.location + (IS_JOURNAL(dirty.state) ? 0 : dirty.offset)))
@ -175,17 +136,12 @@ int blockstore_impl_t::dequeue_read(blockstore_op_t *read_op)
if (!result_version) if (!result_version)
{ {
result_version = clean_it->second.version; result_version = clean_it->second.version;
if (read_op->bitmap)
{
void *bmp_ptr = get_clean_entry_bitmap(clean_it->second.location, clean_entry_bitmap_size);
memcpy(read_op->bitmap, bmp_ptr, clean_entry_bitmap_size);
}
} }
if (fulfilled < read_op->len) if (fulfilled < read_op->len)
{ {
if (!clean_entry_bitmap_size) if (!clean_entry_bitmap_size)
{ {
if (!fulfill_read(read_op, fulfilled, 0, block_size, (BS_ST_BIG_WRITE | BS_ST_STABLE), 0, clean_it->second.location)) if (!fulfill_read(read_op, fulfilled, 0, block_size, ST_CURRENT, 0, clean_it->second.location))
{ {
// need to wait. undo added requests, don't dequeue op // need to wait. undo added requests, don't dequeue op
PRIV(read_op)->read_vec.clear(); PRIV(read_op)->read_vec.clear();
@ -194,7 +150,18 @@ int blockstore_impl_t::dequeue_read(blockstore_op_t *read_op)
} }
else else
{ {
uint8_t *clean_entry_bitmap = get_clean_entry_bitmap(clean_it->second.location, 0); uint64_t meta_loc = clean_it->second.location >> block_order;
uint8_t *clean_entry_bitmap;
if (inmemory_meta)
{
uint64_t sector = (meta_loc / (meta_block_size / clean_entry_size)) * meta_block_size;
uint64_t pos = (meta_loc % (meta_block_size / clean_entry_size));
clean_entry_bitmap = (uint8_t*)(metadata_buffer + sector + pos*clean_entry_size + sizeof(clean_disk_entry));
}
else
{
clean_entry_bitmap = (uint8_t*)(clean_bitmap + meta_loc*clean_entry_bitmap_size);
}
uint64_t bmp_start = 0, bmp_end = 0, bmp_size = block_size/bitmap_granularity; uint64_t bmp_start = 0, bmp_end = 0, bmp_size = block_size/bitmap_granularity;
while (bmp_start < bmp_size) while (bmp_start < bmp_size)
{ {
@ -205,8 +172,8 @@ int blockstore_impl_t::dequeue_read(blockstore_op_t *read_op)
if (bmp_end > bmp_start) if (bmp_end > bmp_start)
{ {
// fill with zeroes // fill with zeroes
assert(fulfill_read(read_op, fulfilled, bmp_start * bitmap_granularity, fulfill_read(read_op, fulfilled, bmp_start * bitmap_granularity,
bmp_end * bitmap_granularity, (BS_ST_DELETE | BS_ST_STABLE), 0, 0)); bmp_end * bitmap_granularity, ST_DEL_STABLE, 0, 0);
} }
bmp_start = bmp_end; bmp_start = bmp_end;
while (clean_entry_bitmap[bmp_end >> 3] & (1 << (bmp_end & 0x7)) && bmp_end < bmp_size) while (clean_entry_bitmap[bmp_end >> 3] & (1 << (bmp_end & 0x7)) && bmp_end < bmp_size)
@ -216,8 +183,7 @@ int blockstore_impl_t::dequeue_read(blockstore_op_t *read_op)
if (bmp_end > bmp_start) if (bmp_end > bmp_start)
{ {
if (!fulfill_read(read_op, fulfilled, bmp_start * bitmap_granularity, if (!fulfill_read(read_op, fulfilled, bmp_start * bitmap_granularity,
bmp_end * bitmap_granularity, (BS_ST_BIG_WRITE | BS_ST_STABLE), 0, bmp_end * bitmap_granularity, ST_CURRENT, 0, clean_it->second.location + bmp_start * bitmap_granularity))
clean_it->second.location + bmp_start * bitmap_granularity))
{ {
// need to wait. undo added requests, don't dequeue op // need to wait. undo added requests, don't dequeue op
PRIV(read_op)->read_vec.clear(); PRIV(read_op)->read_vec.clear();
@ -232,7 +198,7 @@ int blockstore_impl_t::dequeue_read(blockstore_op_t *read_op)
else if (fulfilled < read_op->len) else if (fulfilled < read_op->len)
{ {
// fill remaining parts with zeroes // fill remaining parts with zeroes
assert(fulfill_read(read_op, fulfilled, 0, block_size, (BS_ST_DELETE | BS_ST_STABLE), 0, 0)); fulfill_read(read_op, fulfilled, 0, block_size, ST_DEL_STABLE, 0, 0);
} }
assert(fulfilled == read_op->len); assert(fulfilled == read_op->len);
read_op->version = result_version; read_op->version = result_version;
@ -246,10 +212,10 @@ int blockstore_impl_t::dequeue_read(blockstore_op_t *read_op)
} }
read_op->retval = read_op->len; read_op->retval = read_op->len;
FINISH_OP(read_op); FINISH_OP(read_op);
return 2; return 1;
} }
read_op->retval = 0; read_op->retval = 0;
return 2; return 1;
} }
void blockstore_impl_t::handle_read_event(ring_data_t *data, blockstore_op_t *op) void blockstore_impl_t::handle_read_event(ring_data_t *data, blockstore_op_t *op)

View File

@ -1,6 +1,3 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
#include "blockstore_impl.h" #include "blockstore_impl.h"
int blockstore_impl_t::dequeue_rollback(blockstore_op_t *op) int blockstore_impl_t::dequeue_rollback(blockstore_op_t *op)
@ -9,14 +6,10 @@ int blockstore_impl_t::dequeue_rollback(blockstore_op_t *op)
{ {
return continue_rollback(op); return continue_rollback(op);
} }
obj_ver_id *v, *nv; obj_ver_id* v;
int i, todo = op->len; int i, todo = op->len;
for (i = 0, v = (obj_ver_id*)op->buf, nv = (obj_ver_id*)op->buf; i < op->len; i++, v++, nv++) for (i = 0, v = (obj_ver_id*)op->buf; i < op->len; i++, v++)
{ {
if (nv != v)
{
*nv = *v;
}
// Check that there are some versions greater than v->version (which may be zero), // Check that there are some versions greater than v->version (which may be zero),
// check that they're unstable, synced, and not currently written to // check that they're unstable, synced, and not currently written to
auto dirty_it = dirty_db.lower_bound((obj_ver_id){ auto dirty_it = dirty_db.lower_bound((obj_ver_id){
@ -25,32 +18,31 @@ int blockstore_impl_t::dequeue_rollback(blockstore_op_t *op)
}); });
if (dirty_it == dirty_db.begin()) if (dirty_it == dirty_db.begin())
{ {
skip_ov: if (v->version == 0)
// Already rolled back, skip this object version {
todo--; // Already rolled back
nv--; // FIXME Skip this object version
continue; }
bad_op:
op->retval = -ENOENT;
FINISH_OP(op);
return 1;
} }
else else
{ {
dirty_it--; dirty_it--;
if (dirty_it->first.oid != v->oid || dirty_it->first.version < v->version) if (dirty_it->first.oid != v->oid || dirty_it->first.version < v->version)
{ {
goto skip_ov; goto bad_op;
} }
while (dirty_it->first.oid == v->oid && dirty_it->first.version > v->version) while (dirty_it->first.oid == v->oid && dirty_it->first.version > v->version)
{ {
if (IS_IN_FLIGHT(dirty_it->second.state)) if (!IS_SYNCED(dirty_it->second.state) ||
{
// Object write is still in progress. Wait until the write request completes
return 0;
}
else if (!IS_SYNCED(dirty_it->second.state) ||
IS_STABLE(dirty_it->second.state)) IS_STABLE(dirty_it->second.state))
{ {
op->retval = -EBUSY; op->retval = -EBUSY;
FINISH_OP(op); FINISH_OP(op);
return 2; return 1;
} }
if (dirty_it == dirty_db.begin()) if (dirty_it == dirty_db.begin())
{ {
@ -60,14 +52,6 @@ skip_ov:
} }
} }
} }
op->len = todo;
if (!todo)
{
// Already rolled back
op->retval = 0;
FINISH_OP(op);
return 2;
}
// Check journal space // Check journal space
blockstore_journal_check_t space_check(this); blockstore_journal_check_t space_check(this);
if (!space_check.check_available(op, todo, sizeof(journal_entry_rollback), 0)) if (!space_check.check_available(op, todo, sizeof(journal_entry_rollback), 0))
@ -75,38 +59,43 @@ skip_ov:
return 0; return 0;
} }
// There is sufficient space. Get SQEs // There is sufficient space. Get SQEs
struct io_uring_sqe *sqe[space_check.sectors_to_write]; struct io_uring_sqe *sqe[space_check.sectors_required];
for (i = 0; i < space_check.sectors_to_write; i++) for (i = 0; i < space_check.sectors_required; i++)
{ {
BS_SUBMIT_GET_SQE_DECL(sqe[i]); BS_SUBMIT_GET_SQE_DECL(sqe[i]);
} }
// Prepare and submit journal entries // Prepare and submit journal entries
auto cb = [this, op](ring_data_t *data) { handle_rollback_event(data, op); }; auto cb = [this, op](ring_data_t *data) { handle_rollback_event(data, op); };
int s = 0, cur_sector = -1; int s = 0, cur_sector = -1;
for (i = 0, v = (obj_ver_id*)op->buf; i < op->len; i++, v++) if ((journal_block_size - journal.in_sector_pos) < sizeof(journal_entry_rollback) &&
{
if (!journal.entry_fits(sizeof(journal_entry_rollback)) &&
journal.sector_info[journal.cur_sector].dirty) journal.sector_info[journal.cur_sector].dirty)
{ {
if (cur_sector == -1) if (cur_sector == -1)
PRIV(op)->min_flushed_journal_sector = 1 + journal.cur_sector; PRIV(op)->min_flushed_journal_sector = 1 + journal.cur_sector;
prepare_journal_sector_write(journal, journal.cur_sector, sqe[s++], cb);
cur_sector = journal.cur_sector; cur_sector = journal.cur_sector;
prepare_journal_sector_write(journal, cur_sector, sqe[s++], cb);
} }
for (i = 0, v = (obj_ver_id*)op->buf; i < op->len; i++, v++)
{
journal_entry_rollback *je = (journal_entry_rollback*) journal_entry_rollback *je = (journal_entry_rollback*)
prefill_single_journal_entry(journal, JE_ROLLBACK, sizeof(journal_entry_rollback)); prefill_single_journal_entry(journal, JE_ROLLBACK, sizeof(journal_entry_rollback));
journal.sector_info[journal.cur_sector].dirty = false;
je->oid = v->oid; je->oid = v->oid;
je->version = v->version; je->version = v->version;
je->crc32 = je_crc32((journal_entry*)je); je->crc32 = je_crc32((journal_entry*)je);
journal.crc32_last = je->crc32; journal.crc32_last = je->crc32;
} if (cur_sector != journal.cur_sector)
prepare_journal_sector_write(journal, journal.cur_sector, sqe[s++], cb); {
assert(s == space_check.sectors_to_write);
if (cur_sector == -1) if (cur_sector == -1)
PRIV(op)->min_flushed_journal_sector = 1 + journal.cur_sector; PRIV(op)->min_flushed_journal_sector = 1 + journal.cur_sector;
cur_sector = journal.cur_sector;
prepare_journal_sector_write(journal, cur_sector, sqe[s++], cb);
}
}
PRIV(op)->max_flushed_journal_sector = 1 + journal.cur_sector; PRIV(op)->max_flushed_journal_sector = 1 + journal.cur_sector;
PRIV(op)->pending_ops = s; PRIV(op)->pending_ops = s;
PRIV(op)->op_state = 1; PRIV(op)->op_state = 1;
inflight_writes++;
return 1; return 1;
} }
@ -126,8 +115,11 @@ resume_2:
resume_3: resume_3:
if (!disable_journal_fsync) if (!disable_journal_fsync)
{ {
io_uring_sqe *sqe; io_uring_sqe *sqe = get_sqe();
BS_SUBMIT_GET_SQE_DECL(sqe); if (!sqe)
{
return 0;
}
ring_data_t *data = ((ring_data_t*)sqe->user_data); ring_data_t *data = ((ring_data_t*)sqe->user_data);
my_uring_prep_fsync(sqe, journal.fd, IORING_FSYNC_DATASYNC); my_uring_prep_fsync(sqe, journal.fd, IORING_FSYNC_DATASYNC);
data->iov = { 0 }; data->iov = { 0 };
@ -144,11 +136,12 @@ resume_5:
{ {
mark_rolled_back(*v); mark_rolled_back(*v);
} }
flusher->mark_trim_possible(); journal.trim();
inflight_writes--;
// Acknowledge op // Acknowledge op
op->retval = 0; op->retval = 0;
FINISH_OP(op); FINISH_OP(op);
return 2; return 1;
} }
void blockstore_impl_t::mark_rolled_back(const obj_ver_id & ov) void blockstore_impl_t::mark_rolled_back(const obj_ver_id & ov)
@ -163,7 +156,10 @@ void blockstore_impl_t::mark_rolled_back(const obj_ver_id & ov)
auto rm_start = it; auto rm_start = it;
auto rm_end = it; auto rm_end = it;
it--; it--;
while (1) while (it->first.oid == ov.oid &&
it->first.version > ov.version &&
!IS_IN_FLIGHT(it->second.state) &&
!IS_STABLE(it->second.state))
{ {
if (it->first.oid != ov.oid) if (it->first.oid != ov.oid)
break; break;
@ -173,7 +169,7 @@ void blockstore_impl_t::mark_rolled_back(const obj_ver_id & ov)
max_unstable = it->first.version; max_unstable = it->first.version;
break; break;
} }
else if (IS_IN_FLIGHT(it->second.state) || IS_STABLE(it->second.state)) else if (IS_STABLE(it->second.state))
break; break;
// Remove entry // Remove entry
rm_start = it; rm_start = it;
@ -184,6 +180,7 @@ void blockstore_impl_t::mark_rolled_back(const obj_ver_id & ov)
if (rm_start != rm_end) if (rm_start != rm_end)
{ {
erase_dirty(rm_start, rm_end, UINT64_MAX); erase_dirty(rm_start, rm_end, UINT64_MAX);
}
auto unstab_it = unstable_writes.find(ov.oid); auto unstab_it = unstable_writes.find(ov.oid);
if (unstab_it != unstable_writes.end()) if (unstab_it != unstable_writes.end())
{ {
@ -193,7 +190,6 @@ void blockstore_impl_t::mark_rolled_back(const obj_ver_id & ov)
unstab_it->second = max_unstable; unstab_it->second = max_unstable;
} }
} }
}
} }
void blockstore_impl_t::handle_rollback_event(ring_data_t *data, blockstore_op_t *op) void blockstore_impl_t::handle_rollback_event(ring_data_t *data, blockstore_op_t *op)
@ -201,6 +197,7 @@ void blockstore_impl_t::handle_rollback_event(ring_data_t *data, blockstore_op_t
live = true; live = true;
if (data->res != data->iov.iov_len) if (data->res != data->iov.iov_len)
{ {
inflight_writes--;
throw std::runtime_error( throw std::runtime_error(
"write operation failed ("+std::to_string(data->res)+" != "+std::to_string(data->iov.iov_len)+ "write operation failed ("+std::to_string(data->res)+" != "+std::to_string(data->iov.iov_len)+
"). in-memory state is corrupted. AAAAAAAaaaaaaaaa!!!111" "). in-memory state is corrupted. AAAAAAAaaaaaaaaa!!!111"
@ -210,44 +207,19 @@ void blockstore_impl_t::handle_rollback_event(ring_data_t *data, blockstore_op_t
if (PRIV(op)->pending_ops == 0) if (PRIV(op)->pending_ops == 0)
{ {
PRIV(op)->op_state++; PRIV(op)->op_state++;
ringloop->wakeup(); if (!continue_rollback(op))
{
submit_queue.push_front(op);
}
} }
} }
void blockstore_impl_t::erase_dirty(blockstore_dirty_db_t::iterator dirty_start, blockstore_dirty_db_t::iterator dirty_end, uint64_t clean_loc) void blockstore_impl_t::erase_dirty(blockstore_dirty_db_t::iterator dirty_start, blockstore_dirty_db_t::iterator dirty_end, uint64_t clean_loc)
{ {
if (dirty_end == dirty_start)
{
return;
}
auto dirty_it = dirty_end; auto dirty_it = dirty_end;
while (dirty_it != dirty_start)
{
dirty_it--; dirty_it--;
if (IS_DELETE(dirty_it->second.state))
{
object_id oid = dirty_it->first.oid;
#ifdef BLOCKSTORE_DEBUG
printf("Unblock writes-after-delete %lx:%lx v%lx\n", oid.inode, oid.stripe, dirty_it->first.version);
#endif
dirty_it = dirty_end;
// Unblock operations blocked by delete flushing
uint32_t next_state = BS_ST_IN_FLIGHT;
while (dirty_it != dirty_db.end() && dirty_it->first.oid == oid)
{
if ((dirty_it->second.state & BS_ST_WORKFLOW_MASK) == BS_ST_WAIT_DEL)
{
dirty_it->second.state = (dirty_it->second.state & ~BS_ST_WORKFLOW_MASK) | next_state;
if (IS_BIG_WRITE(dirty_it->second.state))
{
next_state = BS_ST_WAIT_BIG;
}
}
dirty_it++;
}
dirty_it = dirty_end;
dirty_it--;
}
while (1)
{
if (IS_BIG_WRITE(dirty_it->second.state) && dirty_it->second.location != clean_loc) if (IS_BIG_WRITE(dirty_it->second.state) && dirty_it->second.location != clean_loc)
{ {
#ifdef BLOCKSTORE_DEBUG #ifdef BLOCKSTORE_DEBUG
@ -258,7 +230,7 @@ void blockstore_impl_t::erase_dirty(blockstore_dirty_db_t::iterator dirty_start,
int used = --journal.used_sectors[dirty_it->second.journal_sector]; int used = --journal.used_sectors[dirty_it->second.journal_sector];
#ifdef BLOCKSTORE_DEBUG #ifdef BLOCKSTORE_DEBUG
printf( printf(
"remove usage of journal offset %08lx by %lx:%lx v%lu (%d refs)\n", dirty_it->second.journal_sector, "remove usage of journal offset %08lx by %lu:%lu v%lu (%d refs)\n", dirty_it->second.journal_sector,
dirty_it->first.oid.inode, dirty_it->first.oid.stripe, dirty_it->first.version, used dirty_it->first.oid.inode, dirty_it->first.oid.stripe, dirty_it->first.version, used
); );
#endif #endif
@ -266,16 +238,6 @@ void blockstore_impl_t::erase_dirty(blockstore_dirty_db_t::iterator dirty_start,
{ {
journal.used_sectors.erase(dirty_it->second.journal_sector); journal.used_sectors.erase(dirty_it->second.journal_sector);
} }
if (clean_entry_bitmap_size > sizeof(void*))
{
free(dirty_it->second.bitmap);
dirty_it->second.bitmap = NULL;
}
if (dirty_it == dirty_start)
{
break;
}
dirty_it--;
} }
dirty_db.erase(dirty_start, dirty_end); dirty_db.erase(dirty_start, dirty_end);
} }

View File

@ -1,6 +1,3 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
#include "blockstore_impl.h" #include "blockstore_impl.h"
// Stabilize small write: // Stabilize small write:
@ -60,24 +57,19 @@ int blockstore_impl_t::dequeue_stable(blockstore_op_t *op)
// No such object version // No such object version
op->retval = -ENOENT; op->retval = -ENOENT;
FINISH_OP(op); FINISH_OP(op);
return 2; return 1;
} }
else else
{ {
// Already stable // Already stable
} }
} }
else if (IS_IN_FLIGHT(dirty_it->second.state)) else if (IS_UNSYNCED(dirty_it->second.state))
{
// Object write is still in progress. Wait until the write request completes
return 0;
}
else if (!IS_SYNCED(dirty_it->second.state))
{ {
// Object not synced yet. Caller must sync it first // Object not synced yet. Caller must sync it first
op->retval = -EBUSY; op->retval = -EBUSY;
FINISH_OP(op); FINISH_OP(op);
return 2; return 1;
} }
else if (!IS_STABLE(dirty_it->second.state)) else if (!IS_STABLE(dirty_it->second.state))
{ {
@ -89,7 +81,7 @@ int blockstore_impl_t::dequeue_stable(blockstore_op_t *op)
// Already stable // Already stable
op->retval = 0; op->retval = 0;
FINISH_OP(op); FINISH_OP(op);
return 2; return 1;
} }
// Check journal space // Check journal space
blockstore_journal_check_t space_check(this); blockstore_journal_check_t space_check(this);
@ -98,39 +90,44 @@ int blockstore_impl_t::dequeue_stable(blockstore_op_t *op)
return 0; return 0;
} }
// There is sufficient space. Get SQEs // There is sufficient space. Get SQEs
struct io_uring_sqe *sqe[space_check.sectors_to_write]; struct io_uring_sqe *sqe[space_check.sectors_required];
for (i = 0; i < space_check.sectors_to_write; i++) for (i = 0; i < space_check.sectors_required; i++)
{ {
BS_SUBMIT_GET_SQE_DECL(sqe[i]); BS_SUBMIT_GET_SQE_DECL(sqe[i]);
} }
// Prepare and submit journal entries // Prepare and submit journal entries
auto cb = [this, op](ring_data_t *data) { handle_stable_event(data, op); }; auto cb = [this, op](ring_data_t *data) { handle_stable_event(data, op); };
int s = 0, cur_sector = -1; int s = 0, cur_sector = -1;
for (i = 0, v = (obj_ver_id*)op->buf; i < op->len; i++, v++) if ((journal_block_size - journal.in_sector_pos) < sizeof(journal_entry_stable) &&
{
// FIXME: Only stabilize versions that aren't stable yet
if (!journal.entry_fits(sizeof(journal_entry_stable)) &&
journal.sector_info[journal.cur_sector].dirty) journal.sector_info[journal.cur_sector].dirty)
{ {
if (cur_sector == -1) if (cur_sector == -1)
PRIV(op)->min_flushed_journal_sector = 1 + journal.cur_sector; PRIV(op)->min_flushed_journal_sector = 1 + journal.cur_sector;
prepare_journal_sector_write(journal, journal.cur_sector, sqe[s++], cb);
cur_sector = journal.cur_sector; cur_sector = journal.cur_sector;
prepare_journal_sector_write(journal, cur_sector, sqe[s++], cb);
} }
for (i = 0, v = (obj_ver_id*)op->buf; i < op->len; i++, v++)
{
// FIXME: Only stabilize versions that aren't stable yet
journal_entry_stable *je = (journal_entry_stable*) journal_entry_stable *je = (journal_entry_stable*)
prefill_single_journal_entry(journal, JE_STABLE, sizeof(journal_entry_stable)); prefill_single_journal_entry(journal, JE_STABLE, sizeof(journal_entry_stable));
journal.sector_info[journal.cur_sector].dirty = false;
je->oid = v->oid; je->oid = v->oid;
je->version = v->version; je->version = v->version;
je->crc32 = je_crc32((journal_entry*)je); je->crc32 = je_crc32((journal_entry*)je);
journal.crc32_last = je->crc32; journal.crc32_last = je->crc32;
} if (cur_sector != journal.cur_sector)
prepare_journal_sector_write(journal, journal.cur_sector, sqe[s++], cb); {
assert(s == space_check.sectors_to_write);
if (cur_sector == -1) if (cur_sector == -1)
PRIV(op)->min_flushed_journal_sector = 1 + journal.cur_sector; PRIV(op)->min_flushed_journal_sector = 1 + journal.cur_sector;
cur_sector = journal.cur_sector;
prepare_journal_sector_write(journal, cur_sector, sqe[s++], cb);
}
}
PRIV(op)->max_flushed_journal_sector = 1 + journal.cur_sector; PRIV(op)->max_flushed_journal_sector = 1 + journal.cur_sector;
PRIV(op)->pending_ops = s; PRIV(op)->pending_ops = s;
PRIV(op)->op_state = 1; PRIV(op)->op_state = 1;
inflight_writes++;
return 1; return 1;
} }
@ -150,8 +147,11 @@ resume_2:
resume_3: resume_3:
if (!disable_journal_fsync) if (!disable_journal_fsync)
{ {
io_uring_sqe *sqe; io_uring_sqe *sqe = get_sqe();
BS_SUBMIT_GET_SQE_DECL(sqe); if (!sqe)
{
return 0;
}
ring_data_t *data = ((ring_data_t*)sqe->user_data); ring_data_t *data = ((ring_data_t*)sqe->user_data);
my_uring_prep_fsync(sqe, journal.fd, IORING_FSYNC_DATASYNC); my_uring_prep_fsync(sqe, journal.fd, IORING_FSYNC_DATASYNC);
data->iov = { 0 }; data->iov = { 0 };
@ -170,10 +170,11 @@ resume_5:
// Mark all dirty_db entries up to op->version as stable // Mark all dirty_db entries up to op->version as stable
mark_stable(*v); mark_stable(*v);
} }
inflight_writes--;
// Acknowledge op // Acknowledge op
op->retval = 0; op->retval = 0;
FINISH_OP(op); FINISH_OP(op);
return 2; return 1;
} }
void blockstore_impl_t::mark_stable(const obj_ver_id & v) void blockstore_impl_t::mark_stable(const obj_ver_id & v)
@ -183,18 +184,17 @@ void blockstore_impl_t::mark_stable(const obj_ver_id & v)
{ {
while (1) while (1)
{ {
if ((dirty_it->second.state & BS_ST_WORKFLOW_MASK) == BS_ST_SYNCED) if (dirty_it->second.state == ST_J_SYNCED)
{ {
dirty_it->second.state = (dirty_it->second.state & ~BS_ST_WORKFLOW_MASK) | BS_ST_STABLE; dirty_it->second.state = ST_J_STABLE;
// Allocations and deletions are counted when they're stabilized
if (IS_BIG_WRITE(dirty_it->second.state))
{
inode_space_stats[dirty_it->first.oid.inode] += block_size;
} }
else if (IS_DELETE(dirty_it->second.state)) else if (dirty_it->second.state == ST_D_SYNCED)
{ {
inode_space_stats[dirty_it->first.oid.inode] -= block_size; dirty_it->second.state = ST_D_STABLE;
} }
else if (dirty_it->second.state == ST_DEL_SYNCED)
{
dirty_it->second.state = ST_DEL_STABLE;
} }
else if (IS_STABLE(dirty_it->second.state)) else if (IS_STABLE(dirty_it->second.state))
{ {
@ -210,6 +210,9 @@ void blockstore_impl_t::mark_stable(const obj_ver_id & v)
break; break;
} }
} }
#ifdef BLOCKSTORE_DEBUG
printf("enqueue_flush %lu:%lu v%lu\n", v.oid.inode, v.oid.stripe, v.version);
#endif
flusher->enqueue_flush(v); flusher->enqueue_flush(v);
} }
auto unstab_it = unstable_writes.find(v.oid); auto unstab_it = unstable_writes.find(v.oid);
@ -225,6 +228,7 @@ void blockstore_impl_t::handle_stable_event(ring_data_t *data, blockstore_op_t *
live = true; live = true;
if (data->res != data->iov.iov_len) if (data->res != data->iov.iov_len)
{ {
inflight_writes--;
throw std::runtime_error( throw std::runtime_error(
"write operation failed ("+std::to_string(data->res)+" != "+std::to_string(data->iov.iov_len)+ "write operation failed ("+std::to_string(data->res)+" != "+std::to_string(data->iov.iov_len)+
"). in-memory state is corrupted. AAAAAAAaaaaaaaaa!!!111" "). in-memory state is corrupted. AAAAAAAaaaaaaaaa!!!111"
@ -234,6 +238,9 @@ void blockstore_impl_t::handle_stable_event(ring_data_t *data, blockstore_op_t *
if (PRIV(op)->pending_ops == 0) if (PRIV(op)->pending_ops == 0)
{ {
PRIV(op)->op_state++; PRIV(op)->op_state++;
ringloop->wakeup(); if (!continue_stable(op))
{
submit_queue.push_front(op);
}
} }
} }

View File

@ -1,6 +1,3 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
#include "blockstore_impl.h" #include "blockstore_impl.h"
#define SYNC_HAS_SMALL 1 #define SYNC_HAS_SMALL 1
@ -12,15 +9,8 @@
#define SYNC_JOURNAL_SYNC_SENT 7 #define SYNC_JOURNAL_SYNC_SENT 7
#define SYNC_DONE 8 #define SYNC_DONE 8
int blockstore_impl_t::continue_sync(blockstore_op_t *op, bool queue_has_in_progress_sync) int blockstore_impl_t::dequeue_sync(blockstore_op_t *op)
{ {
if (immediate_commit == IMMEDIATE_ALL)
{
// We can return immediately because sync is only dequeued after all previous writes
op->retval = 0;
FINISH_OP(op);
return 2;
}
if (PRIV(op)->op_state == 0) if (PRIV(op)->op_state == 0)
{ {
stop_sync_submitted = false; stop_sync_submitted = false;
@ -36,15 +26,34 @@ int blockstore_impl_t::continue_sync(blockstore_op_t *op, bool queue_has_in_prog
PRIV(op)->op_state = SYNC_HAS_SMALL; PRIV(op)->op_state = SYNC_HAS_SMALL;
else else
PRIV(op)->op_state = SYNC_DONE; PRIV(op)->op_state = SYNC_DONE;
// Always add sync to in_progress_syncs because we clear unsynced_big_writes and unsynced_small_writes
PRIV(op)->prev_sync_count = in_progress_syncs.size();
PRIV(op)->in_progress_ptr = in_progress_syncs.insert(in_progress_syncs.end(), op);
} }
continue_sync(op);
// Always dequeue because we always add syncs to in_progress_syncs
return 1;
}
int blockstore_impl_t::continue_sync(blockstore_op_t *op)
{
auto cb = [this, op](ring_data_t *data) { handle_sync_event(data, op); };
if (PRIV(op)->op_state == SYNC_HAS_SMALL) if (PRIV(op)->op_state == SYNC_HAS_SMALL)
{ {
// No big writes, just fsync the journal // No big writes, just fsync the journal
for (; PRIV(op)->sync_small_checked < PRIV(op)->sync_small_writes.size(); PRIV(op)->sync_small_checked++)
{
if (IS_IN_FLIGHT(dirty_db[PRIV(op)->sync_small_writes[PRIV(op)->sync_small_checked]].state))
{
// Wait for small inflight writes to complete
return 0;
}
}
if (journal.sector_info[journal.cur_sector].dirty) if (journal.sector_info[journal.cur_sector].dirty)
{ {
// Write out the last journal sector if it happens to be dirty // Write out the last journal sector if it happens to be dirty
BS_SUBMIT_GET_ONLY_SQE(sqe); BS_SUBMIT_GET_ONLY_SQE(sqe);
prepare_journal_sector_write(journal, journal.cur_sector, sqe, [this, op](ring_data_t *data) { handle_sync_event(data, op); }); prepare_journal_sector_write(journal, journal.cur_sector, sqe, cb);
PRIV(op)->min_flushed_journal_sector = PRIV(op)->max_flushed_journal_sector = 1 + journal.cur_sector; PRIV(op)->min_flushed_journal_sector = PRIV(op)->max_flushed_journal_sector = 1 + journal.cur_sector;
PRIV(op)->pending_ops = 1; PRIV(op)->pending_ops = 1;
PRIV(op)->op_state = SYNC_JOURNAL_WRITE_SENT; PRIV(op)->op_state = SYNC_JOURNAL_WRITE_SENT;
@ -57,13 +66,21 @@ int blockstore_impl_t::continue_sync(blockstore_op_t *op, bool queue_has_in_prog
} }
if (PRIV(op)->op_state == SYNC_HAS_BIG) if (PRIV(op)->op_state == SYNC_HAS_BIG)
{ {
for (; PRIV(op)->sync_big_checked < PRIV(op)->sync_big_writes.size(); PRIV(op)->sync_big_checked++)
{
if (IS_IN_FLIGHT(dirty_db[PRIV(op)->sync_big_writes[PRIV(op)->sync_big_checked]].state))
{
// Wait for big inflight writes to complete
return 0;
}
}
// 1st step: fsync data // 1st step: fsync data
if (!disable_data_fsync) if (!disable_data_fsync)
{ {
BS_SUBMIT_GET_SQE(sqe, data); BS_SUBMIT_GET_SQE(sqe, data);
my_uring_prep_fsync(sqe, data_fd, IORING_FSYNC_DATASYNC); my_uring_prep_fsync(sqe, data_fd, IORING_FSYNC_DATASYNC);
data->iov = { 0 }; data->iov = { 0 };
data->callback = [this, op](ring_data_t *data) { handle_sync_event(data, op); }; data->callback = cb;
PRIV(op)->min_flushed_journal_sector = PRIV(op)->max_flushed_journal_sector = 0; PRIV(op)->min_flushed_journal_sector = PRIV(op)->max_flushed_journal_sector = 0;
PRIV(op)->pending_ops = 1; PRIV(op)->pending_ops = 1;
PRIV(op)->op_state = SYNC_DATA_SYNC_SENT; PRIV(op)->op_state = SYNC_DATA_SYNC_SENT;
@ -76,41 +93,48 @@ int blockstore_impl_t::continue_sync(blockstore_op_t *op, bool queue_has_in_prog
} }
if (PRIV(op)->op_state == SYNC_DATA_SYNC_DONE) if (PRIV(op)->op_state == SYNC_DATA_SYNC_DONE)
{ {
for (; PRIV(op)->sync_small_checked < PRIV(op)->sync_small_writes.size(); PRIV(op)->sync_small_checked++)
{
if (IS_IN_FLIGHT(dirty_db[PRIV(op)->sync_small_writes[PRIV(op)->sync_small_checked]].state))
{
// Wait for small inflight writes to complete
return 0;
}
}
// 2nd step: Data device is synced, prepare & write journal entries // 2nd step: Data device is synced, prepare & write journal entries
// Check space in the journal and journal memory buffers // Check space in the journal and journal memory buffers
blockstore_journal_check_t space_check(this); blockstore_journal_check_t space_check(this);
if (!space_check.check_available(op, PRIV(op)->sync_big_writes.size(), sizeof(journal_entry_big_write), JOURNAL_STABILIZE_RESERVATION)) if (!space_check.check_available(op, PRIV(op)->sync_big_writes.size(), sizeof(journal_entry_big_write), 0))
{ {
return 0; return 0;
} }
// Get SQEs. Don't bother about merging, submit each journal sector as a separate request // Get SQEs. Don't bother about merging, submit each journal sector as a separate request
struct io_uring_sqe *sqe[space_check.sectors_to_write]; struct io_uring_sqe *sqe[space_check.sectors_required];
for (int i = 0; i < space_check.sectors_to_write; i++) for (int i = 0; i < space_check.sectors_required; i++)
{ {
BS_SUBMIT_GET_SQE_DECL(sqe[i]); BS_SUBMIT_GET_SQE_DECL(sqe[i]);
} }
// Prepare and submit journal entries // Prepare and submit journal entries
auto it = PRIV(op)->sync_big_writes.begin(); auto it = PRIV(op)->sync_big_writes.begin();
int s = 0, cur_sector = -1; int s = 0, cur_sector = -1;
while (it != PRIV(op)->sync_big_writes.end()) if ((journal_block_size - journal.in_sector_pos) < sizeof(journal_entry_big_write) &&
{
if (!journal.entry_fits(sizeof(journal_entry_big_write)) &&
journal.sector_info[journal.cur_sector].dirty) journal.sector_info[journal.cur_sector].dirty)
{ {
if (cur_sector == -1) if (cur_sector == -1)
PRIV(op)->min_flushed_journal_sector = 1 + journal.cur_sector; PRIV(op)->min_flushed_journal_sector = 1 + journal.cur_sector;
prepare_journal_sector_write(journal, journal.cur_sector, sqe[s++], [this, op](ring_data_t *data) { handle_sync_event(data, op); });
cur_sector = journal.cur_sector; cur_sector = journal.cur_sector;
prepare_journal_sector_write(journal, cur_sector, sqe[s++], cb);
} }
journal_entry_big_write *je = (journal_entry_big_write*)prefill_single_journal_entry( while (it != PRIV(op)->sync_big_writes.end())
journal, (dirty_db[*it].state & BS_ST_INSTANT) ? JE_BIG_WRITE_INSTANT : JE_BIG_WRITE, {
sizeof(journal_entry_big_write) journal_entry_big_write *je = (journal_entry_big_write*)
); prefill_single_journal_entry(journal, JE_BIG_WRITE, sizeof(journal_entry_big_write));
dirty_db[*it].journal_sector = journal.sector_info[journal.cur_sector].offset; dirty_db[*it].journal_sector = journal.sector_info[journal.cur_sector].offset;
journal.sector_info[journal.cur_sector].dirty = false;
journal.used_sectors[journal.sector_info[journal.cur_sector].offset]++; journal.used_sectors[journal.sector_info[journal.cur_sector].offset]++;
#ifdef BLOCKSTORE_DEBUG #ifdef BLOCKSTORE_DEBUG
printf( printf(
"journal offset %08lx is used by %lx:%lx v%lu (%lu refs)\n", "journal offset %08lx is used by %lu:%lu v%lu (%lu refs)\n",
dirty_db[*it].journal_sector, it->oid.inode, it->oid.stripe, it->version, dirty_db[*it].journal_sector, it->oid.inode, it->oid.stripe, it->version,
journal.used_sectors[journal.sector_info[journal.cur_sector].offset] journal.used_sectors[journal.sector_info[journal.cur_sector].offset]
); );
@ -123,11 +147,14 @@ int blockstore_impl_t::continue_sync(blockstore_op_t *op, bool queue_has_in_prog
je->crc32 = je_crc32((journal_entry*)je); je->crc32 = je_crc32((journal_entry*)je);
journal.crc32_last = je->crc32; journal.crc32_last = je->crc32;
it++; it++;
} if (cur_sector != journal.cur_sector)
prepare_journal_sector_write(journal, journal.cur_sector, sqe[s++], [this, op](ring_data_t *data) { handle_sync_event(data, op); }); {
assert(s == space_check.sectors_to_write);
if (cur_sector == -1) if (cur_sector == -1)
PRIV(op)->min_flushed_journal_sector = 1 + journal.cur_sector; PRIV(op)->min_flushed_journal_sector = 1 + journal.cur_sector;
cur_sector = journal.cur_sector;
prepare_journal_sector_write(journal, cur_sector, sqe[s++], cb);
}
}
PRIV(op)->max_flushed_journal_sector = 1 + journal.cur_sector; PRIV(op)->max_flushed_journal_sector = 1 + journal.cur_sector;
PRIV(op)->pending_ops = s; PRIV(op)->pending_ops = s;
PRIV(op)->op_state = SYNC_JOURNAL_WRITE_SENT; PRIV(op)->op_state = SYNC_JOURNAL_WRITE_SENT;
@ -140,7 +167,7 @@ int blockstore_impl_t::continue_sync(blockstore_op_t *op, bool queue_has_in_prog
BS_SUBMIT_GET_SQE(sqe, data); BS_SUBMIT_GET_SQE(sqe, data);
my_uring_prep_fsync(sqe, journal.fd, IORING_FSYNC_DATASYNC); my_uring_prep_fsync(sqe, journal.fd, IORING_FSYNC_DATASYNC);
data->iov = { 0 }; data->iov = { 0 };
data->callback = [this, op](ring_data_t *data) { handle_sync_event(data, op); }; data->callback = cb;
PRIV(op)->pending_ops = 1; PRIV(op)->pending_ops = 1;
PRIV(op)->op_state = SYNC_JOURNAL_SYNC_SENT; PRIV(op)->op_state = SYNC_JOURNAL_SYNC_SENT;
return 1; return 1;
@ -150,10 +177,9 @@ int blockstore_impl_t::continue_sync(blockstore_op_t *op, bool queue_has_in_prog
PRIV(op)->op_state = SYNC_DONE; PRIV(op)->op_state = SYNC_DONE;
} }
} }
if (PRIV(op)->op_state == SYNC_DONE && !queue_has_in_progress_sync) if (PRIV(op)->op_state == SYNC_DONE)
{ {
ack_sync(op); return ack_sync(op);
return 2;
} }
return 1; return 1;
} }
@ -185,37 +211,59 @@ void blockstore_impl_t::handle_sync_event(ring_data_t *data, blockstore_op_t *op
else if (PRIV(op)->op_state == SYNC_JOURNAL_SYNC_SENT) else if (PRIV(op)->op_state == SYNC_JOURNAL_SYNC_SENT)
{ {
PRIV(op)->op_state = SYNC_DONE; PRIV(op)->op_state = SYNC_DONE;
ack_sync(op);
} }
else else
{ {
throw std::runtime_error("BUG: unexpected sync op state"); throw std::runtime_error("BUG: unexpected sync op state");
} }
ringloop->wakeup();
} }
} }
void blockstore_impl_t::ack_sync(blockstore_op_t *op) int blockstore_impl_t::ack_sync(blockstore_op_t *op)
{
if (PRIV(op)->op_state == SYNC_DONE && PRIV(op)->prev_sync_count == 0)
{
// Remove dependency of subsequent syncs
auto it = PRIV(op)->in_progress_ptr;
int done_syncs = 1;
++it;
// Acknowledge sync
ack_one_sync(op);
while (it != in_progress_syncs.end())
{
auto & next_sync = *it++;
PRIV(next_sync)->prev_sync_count -= done_syncs;
if (PRIV(next_sync)->prev_sync_count == 0 && PRIV(next_sync)->op_state == SYNC_DONE)
{
done_syncs++;
// Acknowledge next_sync
ack_one_sync(next_sync);
}
}
return 2;
}
return 0;
}
void blockstore_impl_t::ack_one_sync(blockstore_op_t *op)
{ {
// Handle states // Handle states
for (auto it = PRIV(op)->sync_big_writes.begin(); it != PRIV(op)->sync_big_writes.end(); it++) for (auto it = PRIV(op)->sync_big_writes.begin(); it != PRIV(op)->sync_big_writes.end(); it++)
{ {
#ifdef BLOCKSTORE_DEBUG #ifdef BLOCKSTORE_DEBUG
printf("Ack sync big %lx:%lx v%lu\n", it->oid.inode, it->oid.stripe, it->version); printf("Ack sync big %lu:%lu v%lu\n", it->oid.inode, it->oid.stripe, it->version);
#endif #endif
auto & unstab = unstable_writes[it->oid]; auto & unstab = unstable_writes[it->oid];
unstab = unstab < it->version ? it->version : unstab; unstab = unstab < it->version ? it->version : unstab;
auto dirty_it = dirty_db.find(*it); auto dirty_it = dirty_db.find(*it);
dirty_it->second.state = ((dirty_it->second.state & ~BS_ST_WORKFLOW_MASK) | BS_ST_SYNCED); dirty_it->second.state = ST_D_SYNCED;
if (dirty_it->second.state & BS_ST_INSTANT)
{
mark_stable(dirty_it->first);
}
dirty_it++; dirty_it++;
while (dirty_it != dirty_db.end() && dirty_it->first.oid == it->oid) while (dirty_it != dirty_db.end() && dirty_it->first.oid == it->oid)
{ {
if ((dirty_it->second.state & BS_ST_WORKFLOW_MASK) == BS_ST_WAIT_BIG) if (dirty_it->second.state == ST_J_WAIT_BIG)
{ {
dirty_it->second.state = (dirty_it->second.state & ~BS_ST_WORKFLOW_MASK) | BS_ST_IN_FLIGHT; dirty_it->second.state = ST_J_IN_FLIGHT;
} }
dirty_it++; dirty_it++;
} }
@ -223,25 +271,22 @@ void blockstore_impl_t::ack_sync(blockstore_op_t *op)
for (auto it = PRIV(op)->sync_small_writes.begin(); it != PRIV(op)->sync_small_writes.end(); it++) for (auto it = PRIV(op)->sync_small_writes.begin(); it != PRIV(op)->sync_small_writes.end(); it++)
{ {
#ifdef BLOCKSTORE_DEBUG #ifdef BLOCKSTORE_DEBUG
printf("Ack sync small %lx:%lx v%lu\n", it->oid.inode, it->oid.stripe, it->version); printf("Ack sync small %lu:%lu v%lu\n", it->oid.inode, it->oid.stripe, it->version);
#endif #endif
auto & unstab = unstable_writes[it->oid]; auto & unstab = unstable_writes[it->oid];
unstab = unstab < it->version ? it->version : unstab; unstab = unstab < it->version ? it->version : unstab;
if (dirty_db[*it].state == (BS_ST_DELETE | BS_ST_WRITTEN)) if (dirty_db[*it].state == ST_DEL_WRITTEN)
{ {
dirty_db[*it].state = (BS_ST_DELETE | BS_ST_SYNCED); dirty_db[*it].state = ST_DEL_SYNCED;
// Deletions are treated as immediately stable // Deletions are treated as immediately stable
mark_stable(*it); mark_stable(*it);
} }
else /* (BS_ST_INSTANT?) | BS_ST_SMALL_WRITE | BS_ST_WRITTEN */ else /* == ST_J_WRITTEN */
{ {
dirty_db[*it].state = (dirty_db[*it].state & ~BS_ST_WORKFLOW_MASK) | BS_ST_SYNCED; dirty_db[*it].state = ST_J_SYNCED;
if (dirty_db[*it].state & BS_ST_INSTANT)
{
mark_stable(*it);
}
} }
} }
in_progress_syncs.erase(PRIV(op)->in_progress_ptr);
op->retval = 0; op->retval = 0;
FINISH_OP(op); FINISH_OP(op);
} }

View File

@ -1,19 +1,11 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
#include "blockstore_impl.h" #include "blockstore_impl.h"
bool blockstore_impl_t::enqueue_write(blockstore_op_t *op) bool blockstore_impl_t::enqueue_write(blockstore_op_t *op)
{ {
// Check or assign version number // Check or assign version number
bool found = false, deleted = false, is_del = (op->opcode == BS_OP_DELETE); bool found = false, deleted = false, is_del = (op->opcode == BS_OP_DELETE);
bool wait_big = false, wait_del = false; bool is_inflight_big = false;
void *bmp = NULL;
uint64_t version = 1; uint64_t version = 1;
if (!is_del && clean_entry_bitmap_size > sizeof(void*))
{
bmp = calloc_or_die(1, clean_entry_bitmap_size);
}
if (dirty_db.size() > 0) if (dirty_db.size() > 0)
{ {
auto dirty_it = dirty_db.upper_bound((obj_ver_id){ auto dirty_it = dirty_db.upper_bound((obj_ver_id){
@ -26,14 +18,9 @@ bool blockstore_impl_t::enqueue_write(blockstore_op_t *op)
found = true; found = true;
version = dirty_it->first.version + 1; version = dirty_it->first.version + 1;
deleted = IS_DELETE(dirty_it->second.state); deleted = IS_DELETE(dirty_it->second.state);
wait_del = ((dirty_it->second.state & BS_ST_WORKFLOW_MASK) == BS_ST_WAIT_DEL); is_inflight_big = dirty_it->second.state >= ST_D_IN_FLIGHT &&
wait_big = (dirty_it->second.state & BS_ST_TYPE_MASK) == BS_ST_BIG_WRITE dirty_it->second.state < ST_D_SYNCED ||
? !IS_SYNCED(dirty_it->second.state) dirty_it->second.state == ST_J_WAIT_BIG;
: ((dirty_it->second.state & BS_ST_WORKFLOW_MASK) == BS_ST_WAIT_BIG);
if (clean_entry_bitmap_size > sizeof(void*))
memcpy(bmp, dirty_it->second.bitmap, clean_entry_bitmap_size);
else
bmp = dirty_it->second.bitmap;
} }
} }
if (!found) if (!found)
@ -42,55 +29,29 @@ bool blockstore_impl_t::enqueue_write(blockstore_op_t *op)
if (clean_it != clean_db.end()) if (clean_it != clean_db.end())
{ {
version = clean_it->second.version + 1; version = clean_it->second.version + 1;
void *bmp_ptr = get_clean_entry_bitmap(clean_it->second.location, clean_entry_bitmap_size);
memcpy((clean_entry_bitmap_size > sizeof(void*) ? bmp : &bmp), bmp_ptr, clean_entry_bitmap_size);
} }
else else
{ {
deleted = true; deleted = true;
} }
} }
if (op->version == 0)
{
op->version = version;
}
else if (op->version < version)
{
// Invalid version requested
op->retval = -EEXIST;
return false;
}
if (deleted && is_del) if (deleted && is_del)
{ {
// Already deleted // Already deleted
op->retval = 0; op->retval = 0;
return false; return false;
} }
PRIV(op)->real_version = 0; if (is_inflight_big && !is_del && !deleted && op->len < block_size &&
if (op->version == 0)
{
op->version = version;
}
else if (op->version < version)
{
// Implicit operations must be added like that: DEL [FLUSH] BIG [SYNC] SMALL SMALL
if (deleted || wait_del)
{
// It's allowed to write versions with low numbers over deletes
// However, we have to flush those deletes first as we use version number for ordering
#ifdef BLOCKSTORE_DEBUG
printf("Write %lx:%lx v%lu over delete (real v%lu) offset=%u len=%u\n", op->oid.inode, op->oid.stripe, version, op->version, op->offset, op->len);
#endif
wait_del = true;
PRIV(op)->real_version = op->version;
op->version = version;
flusher->unshift_flush((obj_ver_id){
.oid = op->oid,
.version = version-1,
}, true);
}
else
{
// Invalid version requested
op->retval = -EEXIST;
if (!is_del && clean_entry_bitmap_size > sizeof(void*))
{
free(bmp);
}
return false;
}
}
if (wait_big && !is_del && !deleted && op->len < block_size &&
immediate_commit != IMMEDIATE_ALL) immediate_commit != IMMEDIATE_ALL)
{ {
// Issue an additional sync so that the previous big write can reach the journal // Issue an additional sync so that the previous big write can reach the journal
@ -104,89 +65,30 @@ bool blockstore_impl_t::enqueue_write(blockstore_op_t *op)
} }
#ifdef BLOCKSTORE_DEBUG #ifdef BLOCKSTORE_DEBUG
if (is_del) if (is_del)
printf("Delete %lx:%lx v%lu\n", op->oid.inode, op->oid.stripe, op->version); printf("Delete %lu:%lu v%lu\n", op->oid.inode, op->oid.stripe, op->version);
else if (!wait_del) else
printf("Write %lx:%lx v%lu offset=%u len=%u\n", op->oid.inode, op->oid.stripe, op->version, op->offset, op->len); printf("Write %lu:%lu v%lu offset=%u len=%u\n", op->oid.inode, op->oid.stripe, op->version, op->offset, op->len);
#endif #endif
// FIXME No strict need to add it into dirty_db here, it's just left // No strict need to add it into dirty_db here, it's just left
// from the previous implementation where reads waited for writes // from the previous implementation where reads waited for writes
uint32_t state;
if (is_del)
state = BS_ST_DELETE | BS_ST_IN_FLIGHT;
else
{
state = (op->len == block_size || deleted ? BS_ST_BIG_WRITE : BS_ST_SMALL_WRITE);
if (wait_del)
state |= BS_ST_WAIT_DEL;
else if (state == BS_ST_SMALL_WRITE && wait_big)
state |= BS_ST_WAIT_BIG;
else
state |= BS_ST_IN_FLIGHT;
if (op->opcode == BS_OP_WRITE_STABLE)
state |= BS_ST_INSTANT;
if (op->bitmap)
{
// Only allow to overwrite part of the object bitmap respective to the write's offset/len
uint8_t *bmp_ptr = (uint8_t*)(clean_entry_bitmap_size > sizeof(void*) ? bmp : &bmp);
uint32_t bit = op->offset/bitmap_granularity;
uint32_t bits_left = op->len/bitmap_granularity;
while (!(bit % 8) && bits_left > 8)
{
// Copy bytes
bmp_ptr[bit/8] = ((uint8_t*)op->bitmap)[bit/8];
bit += 8;
bits_left -= 8;
}
while (bits_left > 0)
{
// Copy bits
bmp_ptr[bit/8] = (bmp_ptr[bit/8] & ~(1 << (bit%8)))
| (((uint8_t*)op->bitmap)[bit/8] & (1 << bit%8));
bit++;
bits_left--;
}
}
}
dirty_db.emplace((obj_ver_id){ dirty_db.emplace((obj_ver_id){
.oid = op->oid, .oid = op->oid,
.version = op->version, .version = op->version,
}, (dirty_entry){ }, (dirty_entry){
.state = state, .state = (uint32_t)(
is_del
? ST_DEL_IN_FLIGHT
: (op->len == block_size || deleted ? ST_D_IN_FLIGHT : (is_inflight_big ? ST_J_WAIT_BIG : ST_J_IN_FLIGHT))
),
.flags = 0, .flags = 0,
.location = 0, .location = 0,
.offset = is_del ? 0 : op->offset, .offset = is_del ? 0 : op->offset,
.len = is_del ? 0 : op->len, .len = is_del ? 0 : op->len,
.journal_sector = 0, .journal_sector = 0,
.bitmap = bmp,
}); });
return true; return true;
} }
void blockstore_impl_t::cancel_all_writes(blockstore_op_t *op, blockstore_dirty_db_t::iterator dirty_it, int retval)
{
while (dirty_it != dirty_db.end() && dirty_it->first.oid == op->oid)
{
if (clean_entry_bitmap_size > sizeof(void*))
free(dirty_it->second.bitmap);
dirty_db.erase(dirty_it++);
}
bool found = false;
for (auto other_op: submit_queue)
{
if (!found && other_op == op)
found = true;
else if (found && other_op->oid == op->oid &&
(other_op->opcode == BS_OP_WRITE || other_op->opcode == BS_OP_WRITE_STABLE))
{
// Mark operations to cancel them
PRIV(other_op)->real_version = UINT64_MAX;
other_op->retval = retval;
}
}
op->retval = retval;
FINISH_OP(op);
}
// First step of the write algorithm: dequeue operation and submit initial write(s) // First step of the write algorithm: dequeue operation and submit initial write(s)
int blockstore_impl_t::dequeue_write(blockstore_op_t *op) int blockstore_impl_t::dequeue_write(blockstore_op_t *op)
{ {
@ -199,46 +101,11 @@ int blockstore_impl_t::dequeue_write(blockstore_op_t *op)
.version = op->version, .version = op->version,
}); });
assert(dirty_it != dirty_db.end()); assert(dirty_it != dirty_db.end());
if ((dirty_it->second.state & BS_ST_WORKFLOW_MASK) < BS_ST_IN_FLIGHT) if (dirty_it->second.state == ST_J_WAIT_BIG)
{
// Don't dequeue
return 0;
}
if (PRIV(op)->real_version != 0)
{
if (PRIV(op)->real_version == UINT64_MAX)
{
// This is the flag value used to cancel operations
FINISH_OP(op);
return 2;
}
// Restore original low version number for unblocked operations
#ifdef BLOCKSTORE_DEBUG
printf("Restoring %lx:%lx version: v%lu -> v%lu\n", op->oid.inode, op->oid.stripe, op->version, PRIV(op)->real_version);
#endif
auto prev_it = dirty_it;
prev_it--;
if (prev_it->first.oid == op->oid && prev_it->first.version >= PRIV(op)->real_version)
{
// Original version is still invalid
// All subsequent writes to the same object must be canceled too
cancel_all_writes(op, dirty_it, -EEXIST);
return 2;
}
op->version = PRIV(op)->real_version;
PRIV(op)->real_version = 0;
dirty_entry e = dirty_it->second;
dirty_db.erase(dirty_it);
dirty_it = dirty_db.emplace((obj_ver_id){
.oid = op->oid,
.version = op->version,
}, e).first;
}
if (write_iodepth >= max_write_iodepth)
{ {
return 0; return 0;
} }
if ((dirty_it->second.state & BS_ST_TYPE_MASK) == BS_ST_BIG_WRITE) else if (dirty_it->second.state == ST_D_IN_FLIGHT)
{ {
blockstore_journal_check_t space_check(this); blockstore_journal_check_t space_check(this);
if (!space_check.check_available(op, unsynced_big_writes.size() + 1, sizeof(journal_entry_big_write), JOURNAL_STABILIZE_RESERVATION)) if (!space_check.check_available(op, unsynced_big_writes.size() + 1, sizeof(journal_entry_big_write), JOURNAL_STABILIZE_RESERVATION))
@ -256,13 +123,13 @@ int blockstore_impl_t::dequeue_write(blockstore_op_t *op)
PRIV(op)->wait_for = WAIT_FREE; PRIV(op)->wait_for = WAIT_FREE;
return 0; return 0;
} }
cancel_all_writes(op, dirty_it, -ENOSPC); op->retval = -ENOSPC;
return 2; FINISH_OP(op);
return 1;
} }
write_iodepth++;
BS_SUBMIT_GET_SQE(sqe, data); BS_SUBMIT_GET_SQE(sqe, data);
dirty_it->second.location = loc << block_order; dirty_it->second.location = loc << block_order;
dirty_it->second.state = (dirty_it->second.state & ~BS_ST_WORKFLOW_MASK) | BS_ST_SUBMITTED; dirty_it->second.state = ST_D_SUBMITTED;
#ifdef BLOCKSTORE_DEBUG #ifdef BLOCKSTORE_DEBUG
printf("Allocate block %lu\n", loc); printf("Allocate block %lu\n", loc);
#endif #endif
@ -302,7 +169,7 @@ int blockstore_impl_t::dequeue_write(blockstore_op_t *op)
PRIV(op)->op_state = 1; PRIV(op)->op_state = 1;
} }
} }
else /* if ((dirty_it->second.state & BS_ST_TYPE_MASK) == BS_ST_SMALL_WRITE) */ else
{ {
// Small (journaled) write // Small (journaled) write
// First check if the journal has sufficient space // First check if the journal has sufficient space
@ -312,7 +179,6 @@ int blockstore_impl_t::dequeue_write(blockstore_op_t *op)
{ {
return 0; return 0;
} }
write_iodepth++;
// There is sufficient space. Get SQE(s) // There is sufficient space. Get SQE(s)
struct io_uring_sqe *sqe1 = NULL; struct io_uring_sqe *sqe1 = NULL;
if (immediate_commit != IMMEDIATE_NONE || if (immediate_commit != IMMEDIATE_NONE ||
@ -343,15 +209,13 @@ int blockstore_impl_t::dequeue_write(blockstore_op_t *op)
} }
} }
// Then pre-fill journal entry // Then pre-fill journal entry
journal_entry_small_write *je = (journal_entry_small_write*)prefill_single_journal_entry( journal_entry_small_write *je = (journal_entry_small_write*)
journal, op->opcode == BS_OP_WRITE_STABLE ? JE_SMALL_WRITE_INSTANT : JE_SMALL_WRITE, prefill_single_journal_entry(journal, JE_SMALL_WRITE, sizeof(journal_entry_small_write));
sizeof(journal_entry_small_write) + clean_entry_bitmap_size
);
dirty_it->second.journal_sector = journal.sector_info[journal.cur_sector].offset; dirty_it->second.journal_sector = journal.sector_info[journal.cur_sector].offset;
journal.used_sectors[journal.sector_info[journal.cur_sector].offset]++; journal.used_sectors[journal.sector_info[journal.cur_sector].offset]++;
#ifdef BLOCKSTORE_DEBUG #ifdef BLOCKSTORE_DEBUG
printf( printf(
"journal offset %08lx is used by %lx:%lx v%lu (%lu refs)\n", "journal offset %08lx is used by %lu:%lu v%lu (%lu refs)\n",
dirty_it->second.journal_sector, dirty_it->first.oid.inode, dirty_it->first.oid.stripe, dirty_it->first.version, dirty_it->second.journal_sector, dirty_it->first.oid.inode, dirty_it->first.oid.stripe, dirty_it->first.version,
journal.used_sectors[journal.sector_info[journal.cur_sector].offset] journal.used_sectors[journal.sector_info[journal.cur_sector].offset]
); );
@ -364,7 +228,6 @@ int blockstore_impl_t::dequeue_write(blockstore_op_t *op)
je->len = op->len; je->len = op->len;
je->data_offset = journal.next_free; je->data_offset = journal.next_free;
je->crc32_data = crc32c(0, op->buf, op->len); je->crc32_data = crc32c(0, op->buf, op->len);
memcpy((void*)(je+1), (clean_entry_bitmap_size > sizeof(void*) ? dirty_it->second.bitmap : &dirty_it->second.bitmap), clean_entry_bitmap_size);
je->crc32 = je_crc32((journal_entry*)je); je->crc32 = je_crc32((journal_entry*)je);
journal.crc32_last = je->crc32; journal.crc32_last = je->crc32;
if (immediate_commit != IMMEDIATE_NONE) if (immediate_commit != IMMEDIATE_NONE)
@ -394,7 +257,7 @@ int blockstore_impl_t::dequeue_write(blockstore_op_t *op)
// Zero-length overwrite. Allowed to bump object version in EC placement groups without actually writing data // Zero-length overwrite. Allowed to bump object version in EC placement groups without actually writing data
} }
dirty_it->second.location = journal.next_free; dirty_it->second.location = journal.next_free;
dirty_it->second.state = (dirty_it->second.state & ~BS_ST_WORKFLOW_MASK) | BS_ST_SUBMITTED; dirty_it->second.state = ST_J_SUBMITTED;
journal.next_free += op->len; journal.next_free += op->len;
if (journal.next_free >= journal.len) if (journal.next_free >= journal.len)
{ {
@ -411,13 +274,14 @@ int blockstore_impl_t::dequeue_write(blockstore_op_t *op)
if (!PRIV(op)->pending_ops) if (!PRIV(op)->pending_ops)
{ {
PRIV(op)->op_state = 4; PRIV(op)->op_state = 4;
return continue_write(op); continue_write(op);
} }
else else
{ {
PRIV(op)->op_state = 3; PRIV(op)->op_state = 3;
} }
} }
inflight_writes++;
return 1; return 1;
} }
@ -425,33 +289,31 @@ int blockstore_impl_t::continue_write(blockstore_op_t *op)
{ {
io_uring_sqe *sqe = NULL; io_uring_sqe *sqe = NULL;
journal_entry_big_write *je; journal_entry_big_write *je;
int op_state = PRIV(op)->op_state;
if (op_state != 2 && op_state != 4)
{
// In progress
return 1;
}
auto dirty_it = dirty_db.find((obj_ver_id){ auto dirty_it = dirty_db.find((obj_ver_id){
.oid = op->oid, .oid = op->oid,
.version = op->version, .version = op->version,
}); });
assert(dirty_it != dirty_db.end()); assert(dirty_it != dirty_db.end());
if (op_state == 2) if (PRIV(op)->op_state == 2)
goto resume_2; goto resume_2;
else if (op_state == 4) else if (PRIV(op)->op_state == 4)
goto resume_4; goto resume_4;
else
return 1;
resume_2: resume_2:
// Only for the immediate_commit mode: prepare and submit big_write journal entry // Only for the immediate_commit mode: prepare and submit big_write journal entry
BS_SUBMIT_GET_SQE_DECL(sqe); sqe = get_sqe();
je = (journal_entry_big_write*)prefill_single_journal_entry( if (!sqe)
journal, op->opcode == BS_OP_WRITE_STABLE ? JE_BIG_WRITE_INSTANT : JE_BIG_WRITE, {
sizeof(journal_entry_big_write) + clean_entry_bitmap_size return 0;
); }
je = (journal_entry_big_write*)prefill_single_journal_entry(journal, JE_BIG_WRITE, sizeof(journal_entry_big_write));
dirty_it->second.journal_sector = journal.sector_info[journal.cur_sector].offset; dirty_it->second.journal_sector = journal.sector_info[journal.cur_sector].offset;
journal.sector_info[journal.cur_sector].dirty = false;
journal.used_sectors[journal.sector_info[journal.cur_sector].offset]++; journal.used_sectors[journal.sector_info[journal.cur_sector].offset]++;
#ifdef BLOCKSTORE_DEBUG #ifdef BLOCKSTORE_DEBUG
printf( printf(
"journal offset %08lx is used by %lx:%lx v%lu (%lu refs)\n", "journal offset %08lx is used by %lu:%lu v%lu (%lu refs)\n",
journal.sector_info[journal.cur_sector].offset, op->oid.inode, op->oid.stripe, op->version, journal.sector_info[journal.cur_sector].offset, op->oid.inode, op->oid.stripe, op->version,
journal.used_sectors[journal.sector_info[journal.cur_sector].offset] journal.used_sectors[journal.sector_info[journal.cur_sector].offset]
); );
@ -461,7 +323,6 @@ resume_2:
je->offset = op->offset; je->offset = op->offset;
je->len = op->len; je->len = op->len;
je->location = dirty_it->second.location; je->location = dirty_it->second.location;
memcpy((void*)(je+1), (clean_entry_bitmap_size > sizeof(void*) ? dirty_it->second.bitmap : &dirty_it->second.bitmap), clean_entry_bitmap_size);
je->crc32 = je_crc32((journal_entry*)je); je->crc32 = je_crc32((journal_entry*)je);
journal.crc32_last = je->crc32; journal.crc32_last = je->crc32;
prepare_journal_sector_write(journal, journal.cur_sector, sqe, prepare_journal_sector_write(journal, journal.cur_sector, sqe,
@ -473,9 +334,9 @@ resume_2:
resume_4: resume_4:
// Switch object state // Switch object state
#ifdef BLOCKSTORE_DEBUG #ifdef BLOCKSTORE_DEBUG
printf("Ack write %lx:%lx v%lu = state %x\n", op->oid.inode, op->oid.stripe, op->version, dirty_it->second.state); printf("Ack write %lu:%lu v%lu = %d\n", op->oid.inode, op->oid.stripe, op->version, dirty_it->second.state);
#endif #endif
bool imm = (dirty_it->second.state & BS_ST_TYPE_MASK) == BS_ST_BIG_WRITE bool imm = dirty_it->second.state == ST_D_SUBMITTED
? (immediate_commit == IMMEDIATE_ALL) ? (immediate_commit == IMMEDIATE_ALL)
: (immediate_commit != IMMEDIATE_NONE); : (immediate_commit != IMMEDIATE_NONE);
if (imm) if (imm)
@ -483,30 +344,40 @@ resume_4:
auto & unstab = unstable_writes[op->oid]; auto & unstab = unstable_writes[op->oid];
unstab = unstab < op->version ? op->version : unstab; unstab = unstab < op->version ? op->version : unstab;
} }
dirty_it->second.state = (dirty_it->second.state & ~BS_ST_WORKFLOW_MASK) if (dirty_it->second.state == ST_J_SUBMITTED)
| (imm ? BS_ST_SYNCED : BS_ST_WRITTEN); {
if (imm && ((dirty_it->second.state & BS_ST_TYPE_MASK) == BS_ST_DELETE || (dirty_it->second.state & BS_ST_INSTANT))) dirty_it->second.state = imm ? ST_J_SYNCED : ST_J_WRITTEN;
}
else if (dirty_it->second.state == ST_D_SUBMITTED)
{
dirty_it->second.state = imm ? ST_D_SYNCED : ST_D_WRITTEN;
}
else if (dirty_it->second.state == ST_DEL_SUBMITTED)
{
dirty_it->second.state = imm ? ST_DEL_SYNCED : ST_DEL_WRITTEN;
if (imm)
{ {
// Deletions are treated as immediately stable // Deletions are treated as immediately stable
mark_stable(dirty_it->first); mark_stable(dirty_it->first);
} }
}
if (immediate_commit == IMMEDIATE_ALL) if (immediate_commit == IMMEDIATE_ALL)
{ {
dirty_it++; dirty_it++;
while (dirty_it != dirty_db.end() && dirty_it->first.oid == op->oid) while (dirty_it != dirty_db.end() && dirty_it->first.oid == op->oid)
{ {
if ((dirty_it->second.state & BS_ST_WORKFLOW_MASK) == BS_ST_WAIT_BIG) if (dirty_it->second.state == ST_J_WAIT_BIG)
{ {
dirty_it->second.state = (dirty_it->second.state & ~BS_ST_WORKFLOW_MASK) | BS_ST_IN_FLIGHT; dirty_it->second.state = ST_J_IN_FLIGHT;
} }
dirty_it++; dirty_it++;
} }
} }
inflight_writes--;
// Acknowledge write // Acknowledge write
op->retval = op->len; op->retval = op->len;
write_iodepth--;
FINISH_OP(op); FINISH_OP(op);
return 2; return 1;
} }
void blockstore_impl_t::handle_write_event(ring_data_t *data, blockstore_op_t *op) void blockstore_impl_t::handle_write_event(ring_data_t *data, blockstore_op_t *op)
@ -514,6 +385,7 @@ void blockstore_impl_t::handle_write_event(ring_data_t *data, blockstore_op_t *o
live = true; live = true;
if (data->res != data->iov.iov_len) if (data->res != data->iov.iov_len)
{ {
inflight_writes--;
// FIXME: our state becomes corrupted after a write error. maybe do something better than just die // FIXME: our state becomes corrupted after a write error. maybe do something better than just die
throw std::runtime_error( throw std::runtime_error(
"write operation failed ("+std::to_string(data->res)+" != "+std::to_string(data->iov.iov_len)+ "write operation failed ("+std::to_string(data->res)+" != "+std::to_string(data->iov.iov_len)+
@ -525,7 +397,10 @@ void blockstore_impl_t::handle_write_event(ring_data_t *data, blockstore_op_t *o
{ {
release_journal_sectors(op); release_journal_sectors(op);
PRIV(op)->op_state++; PRIV(op)->op_state++;
ringloop->wakeup(); if (!continue_write(op))
{
submit_queue.push_front(op);
}
} }
} }
@ -538,8 +413,8 @@ void blockstore_impl_t::release_journal_sectors(blockstore_op_t *op)
uint64_t s = PRIV(op)->min_flushed_journal_sector; uint64_t s = PRIV(op)->min_flushed_journal_sector;
while (1) while (1)
{ {
journal.sector_info[s-1].flush_count--; journal.sector_info[s-1].usage_count--;
if (s != (1+journal.cur_sector) && journal.sector_info[s-1].flush_count == 0) if (s != (1+journal.cur_sector) && journal.sector_info[s-1].usage_count == 0)
{ {
// We know for sure that we won't write into this sector anymore // We know for sure that we won't write into this sector anymore
uint64_t new_ds = journal.sector_info[s-1].offset + journal.block_size; uint64_t new_ds = journal.sector_info[s-1].offset + journal.block_size;
@ -563,21 +438,16 @@ void blockstore_impl_t::release_journal_sectors(blockstore_op_t *op)
int blockstore_impl_t::dequeue_del(blockstore_op_t *op) int blockstore_impl_t::dequeue_del(blockstore_op_t *op)
{ {
if (PRIV(op)->op_state)
{
return continue_write(op);
}
auto dirty_it = dirty_db.find((obj_ver_id){ auto dirty_it = dirty_db.find((obj_ver_id){
.oid = op->oid, .oid = op->oid,
.version = op->version, .version = op->version,
}); });
assert(dirty_it != dirty_db.end()); assert(dirty_it != dirty_db.end());
blockstore_journal_check_t space_check(this); blockstore_journal_check_t space_check(this);
if (!space_check.check_available(op, 1, sizeof(journal_entry_del), JOURNAL_STABILIZE_RESERVATION)) if (!space_check.check_available(op, 1, sizeof(journal_entry_del), 0))
{ {
return 0; return 0;
} }
write_iodepth++;
io_uring_sqe *sqe = NULL; io_uring_sqe *sqe = NULL;
if (immediate_commit != IMMEDIATE_NONE || if (immediate_commit != IMMEDIATE_NONE ||
(journal_block_size - journal.in_sector_pos) < sizeof(journal_entry_del) && (journal_block_size - journal.in_sector_pos) < sizeof(journal_entry_del) &&
@ -602,14 +472,13 @@ int blockstore_impl_t::dequeue_del(blockstore_op_t *op)
} }
} }
// Pre-fill journal entry // Pre-fill journal entry
journal_entry_del *je = (journal_entry_del*)prefill_single_journal_entry( journal_entry_del *je = (journal_entry_del*)
journal, JE_DELETE, sizeof(struct journal_entry_del) prefill_single_journal_entry(journal, JE_DELETE, sizeof(struct journal_entry_del));
);
dirty_it->second.journal_sector = journal.sector_info[journal.cur_sector].offset; dirty_it->second.journal_sector = journal.sector_info[journal.cur_sector].offset;
journal.used_sectors[journal.sector_info[journal.cur_sector].offset]++; journal.used_sectors[journal.sector_info[journal.cur_sector].offset]++;
#ifdef BLOCKSTORE_DEBUG #ifdef BLOCKSTORE_DEBUG
printf( printf(
"journal offset %08lx is used by %lx:%lx v%lu (%lu refs)\n", "journal offset %08lx is used by %lu:%lu v%lu (%lu refs)\n",
dirty_it->second.journal_sector, dirty_it->first.oid.inode, dirty_it->first.oid.stripe, dirty_it->first.version, dirty_it->second.journal_sector, dirty_it->first.oid.inode, dirty_it->first.oid.stripe, dirty_it->first.version,
journal.used_sectors[journal.sector_info[journal.cur_sector].offset] journal.used_sectors[journal.sector_info[journal.cur_sector].offset]
); );
@ -618,16 +487,13 @@ int blockstore_impl_t::dequeue_del(blockstore_op_t *op)
je->version = op->version; je->version = op->version;
je->crc32 = je_crc32((journal_entry*)je); je->crc32 = je_crc32((journal_entry*)je);
journal.crc32_last = je->crc32; journal.crc32_last = je->crc32;
dirty_it->second.state = BS_ST_DELETE | BS_ST_SUBMITTED; dirty_it->second.state = ST_DEL_SUBMITTED;
if (immediate_commit != IMMEDIATE_NONE) if (immediate_commit != IMMEDIATE_NONE)
{ {
prepare_journal_sector_write(journal, journal.cur_sector, sqe, cb); prepare_journal_sector_write(journal, journal.cur_sector, sqe, cb);
PRIV(op)->min_flushed_journal_sector = PRIV(op)->max_flushed_journal_sector = 1 + journal.cur_sector; PRIV(op)->min_flushed_journal_sector = PRIV(op)->max_flushed_journal_sector = 1 + journal.cur_sector;
PRIV(op)->pending_ops++; PRIV(op)->pending_ops++;
} // Remember small write as unsynced
else
{
// Remember delete as unsynced
unsynced_small_writes.push_back((obj_ver_id){ unsynced_small_writes.push_back((obj_ver_id){
.oid = op->oid, .oid = op->oid,
.version = op->version, .version = op->version,
@ -636,7 +502,7 @@ int blockstore_impl_t::dequeue_del(blockstore_op_t *op)
if (!PRIV(op)->pending_ops) if (!PRIV(op)->pending_ops)
{ {
PRIV(op)->op_state = 4; PRIV(op)->op_state = 4;
return continue_write(op); continue_write(op);
} }
else else
{ {

349
cluster_client.cpp Normal file
View File

@ -0,0 +1,349 @@
#include "cluster_client.h"
cluster_client_t::cluster_client_t(ring_loop_t *ringloop, timerfd_manager_t *tfd, json11::Json & config)
{
this->ringloop = ringloop;
this->tfd = tfd;
msgr.tfd = tfd;
msgr.ringloop = ringloop;
msgr.repeer_pgs = [this](osd_num_t peer_osd)
{
// peer_osd just connected or dropped connection
if (msgr.osd_peer_fds.find(peer_osd) != msgr.osd_peer_fds.end())
{
// really connected :)
continue_ops();
}
};
st_cli.tfd = tfd;
st_cli.on_load_config_hook = [this](json11::Json::object & cfg) { on_load_config_hook(cfg); };
st_cli.on_change_osd_state_hook = [this](uint64_t peer_osd) { on_change_osd_state_hook(peer_osd); };
st_cli.on_change_hook = [this](json11::Json::object & changes) { on_change_hook(changes); };
st_cli.on_load_pgs_hook = [this](bool success) { on_load_pgs_hook(success); };
log_level = config["log_level"].int64_value();
st_cli.parse_config(config);
st_cli.load_global_config();
}
void cluster_client_t::continue_ops()
{
for (auto op_it = unsent_ops.begin(); op_it != unsent_ops.end(); )
{
cluster_op_t *op = *op_it;
if (op->needs_reslice && !op->sent_count)
{
op->parts.clear();
op->done_count = 0;
op->needs_reslice = false;
}
if (!op->parts.size())
{
unsent_ops.erase(op_it++);
execute(op);
continue;
}
if (!op->needs_reslice)
{
for (auto & op_part: op->parts)
{
if (!op_part.sent && !op_part.done)
{
try_send(op, &op_part);
}
}
if (op->sent_count == op->parts.size() - op->done_count)
{
unsent_ops.erase(op_it++);
sent_ops.insert(op);
}
else
op_it++;
}
else
op_it++;
}
}
static uint32_t is_power_of_two(uint64_t value)
{
uint32_t l = 0;
while (value > 1)
{
if (value & 1)
{
return 64;
}
value = value >> 1;
l++;
}
return l;
}
void cluster_client_t::on_load_config_hook(json11::Json::object & config)
{
bs_block_size = config["block_size"].uint64_value();
bs_disk_alignment = config["disk_alignment"].uint64_value();
bs_bitmap_granularity = config["bitmap_granularity"].uint64_value();
if (!bs_block_size)
bs_block_size = DEFAULT_BLOCK_SIZE;
if (!bs_disk_alignment)
bs_disk_alignment = DEFAULT_DISK_ALIGNMENT;
if (!bs_bitmap_granularity)
bs_bitmap_granularity = DEFAULT_BITMAP_GRANULARITY;
{
uint32_t block_order;
if ((block_order = is_power_of_two(bs_block_size)) >= 64 || bs_block_size < MIN_BLOCK_SIZE || bs_block_size >= MAX_BLOCK_SIZE)
throw std::runtime_error("Bad block size");
}
if (config.find("pg_stripe_size") != config.end())
{
pg_stripe_size = config["pg_stripe_size"].uint64_value();
if (!pg_stripe_size)
pg_stripe_size = DEFAULT_PG_STRIPE_SIZE;
}
if (config["immediate_commit"] == "all")
{
// Cluster-wide immediate_commit mode
immediate_commit = true;
}
msgr.peer_connect_interval = config["peer_connect_interval"].uint64_value();
if (!msgr.peer_connect_interval)
msgr.peer_connect_interval = DEFAULT_PEER_CONNECT_INTERVAL;
msgr.peer_connect_timeout = config["peer_connect_timeout"].uint64_value();
if (!msgr.peer_connect_timeout)
msgr.peer_connect_timeout = DEFAULT_PEER_CONNECT_TIMEOUT;
}
void cluster_client_t::on_load_pgs_hook(bool success)
{
if (success)
{
pg_count = st_cli.pg_config.size();
continue_ops();
}
}
void cluster_client_t::on_change_hook(json11::Json::object & changes)
{
if (pg_count != st_cli.pg_config.size())
{
// At this point, all operations should be suspended
// And they need to be resliced!
for (auto op: unsent_ops)
{
op->needs_reslice = true;
}
for (auto op: sent_ops)
{
op->needs_reslice = true;
}
pg_count = st_cli.pg_config.size();
}
continue_ops();
}
void cluster_client_t::on_change_osd_state_hook(uint64_t peer_osd)
{
if (msgr.wanted_peers.find(peer_osd) != msgr.wanted_peers.end())
{
msgr.connect_peer(peer_osd, st_cli.peer_states[peer_osd]);
}
}
// FIXME: Implement OSD_OP_SYNC for immediate_commit == false
void cluster_client_t::execute(cluster_op_t *op)
{
if (op->opcode == OSD_OP_SYNC && immediate_commit)
{
// Syncs are not required in the immediate_commit mode
op->retval = 0;
std::function<void(cluster_op_t*)>(op->callback)(op);
return;
}
if (op->opcode != OSD_OP_READ && op->opcode != OSD_OP_OUT || !op->inode || !op->len ||
op->offset % bs_disk_alignment || op->len % bs_disk_alignment)
{
op->retval = -EINVAL;
std::function<void(cluster_op_t*)>(op->callback)(op);
return;
}
if (!pg_stripe_size)
{
// Config is not loaded yet
unsent_ops.insert(op);
return;
}
if (op->opcode == OSD_OP_WRITE && !immediate_commit)
{
// Copy operation
cluster_op_t *op_copy = new cluster_op_t();
op_copy->opcode = op->opcode;
op_copy->inode = op->inode;
op_copy->offset = op->offset;
op_copy->len = op->len;
op_copy->buf = malloc(op->len);
memcpy(op_copy->buf, op->buf, op->len);
unsynced_ops.push_back(op_copy);
unsynced_bytes += op->len;
if (inmemory_commit)
{
// Immediately acknowledge write and continue with the copy
op->retval = op->len;
std::function<void(cluster_op_t*)>(op->callback)(op);
op = op_copy;
}
if (unsynced_bytes >= inmemory_dirty_limit)
{
// Push an extra SYNC operation
}
}
// Slice the request into individual object stripe requests
// Primary OSDs still operate individual stripes, but their size is multiplied by PG minsize in case of EC
uint64_t pg_block_size = bs_block_size * pg_part_count;
uint64_t first_stripe = (op->offset / pg_block_size) * pg_block_size;
uint64_t last_stripe = ((op->offset + op->len + pg_block_size - 1) / pg_block_size - 1) * pg_block_size;
int part_count = 0;
for (uint64_t stripe = first_stripe; stripe <= last_stripe; stripe += pg_block_size)
{
if (op->offset < (stripe+pg_block_size) && (op->offset+op->len) > stripe)
{
part_count++;
}
}
op->parts.resize(part_count);
bool resend = false;
int i = 0;
for (uint64_t stripe = first_stripe; stripe <= last_stripe; stripe += pg_block_size)
{
uint64_t stripe_end = stripe + pg_block_size;
if (op->offset < stripe_end && (op->offset+op->len) > stripe)
{
pg_num_t pg_num = (op->inode + stripe/pg_stripe_size) % pg_count + 1;
op->parts[i] = {
.parent = op,
.offset = op->offset < stripe ? stripe : op->offset,
.len = (uint32_t)((op->offset+op->len) > stripe_end ? pg_block_size : op->offset+op->len-stripe),
.pg_num = pg_num,
.buf = op->buf + (op->offset < stripe ? stripe-op->offset : 0),
.sent = false,
.done = false,
};
if (!try_send(op, &op->parts[i]))
{
// Part needs to be sent later
resend = true;
}
i++;
}
}
if (resend)
{
unsent_ops.insert(op);
}
else
{
sent_ops.insert(op);
}
}
bool cluster_client_t::try_send(cluster_op_t *op, cluster_op_part_t *part)
{
auto pg_it = st_cli.pg_config.find(part->pg_num);
if (pg_it != st_cli.pg_config.end() &&
!pg_it->second.pause && pg_it->second.cur_primary)
{
osd_num_t primary_osd = pg_it->second.cur_primary;
auto peer_it = msgr.osd_peer_fds.find(primary_osd);
if (peer_it != msgr.osd_peer_fds.end())
{
int peer_fd = peer_it->second;
part->osd_num = primary_osd;
part->sent = true;
op->sent_count++;
part->op = {
.op_type = OSD_OP_OUT,
.peer_fd = peer_fd,
.req = { .rw = {
.header = {
.magic = SECONDARY_OSD_OP_MAGIC,
.id = op_id++,
.opcode = op->opcode,
},
.inode = op->inode,
.offset = part->offset,
.len = part->len,
} },
.callback = [this, part](osd_op_t *op_part)
{
handle_op_part(part);
},
};
part->op.send_list.push_back(part->op.req.buf, OSD_PACKET_SIZE);
if (op->opcode == OSD_OP_WRITE)
{
part->op.send_list.push_back(part->buf, part->len);
}
else
{
part->op.buf = part->buf;
}
msgr.outbox_push(&part->op);
return true;
}
else if (msgr.wanted_peers.find(primary_osd) == msgr.wanted_peers.end())
{
msgr.connect_peer(primary_osd, st_cli.peer_states[primary_osd]);
}
}
return false;
}
void cluster_client_t::handle_op_part(cluster_op_part_t *part)
{
cluster_op_t *op = part->parent;
part->sent = false;
op->sent_count--;
part->op.buf = NULL;
if (part->op.reply.hdr.retval != part->op.req.rw.len)
{
// Operation failed, retry
printf(
"Operation part failed on OSD %lu: retval=%ld (expected %u), reconnecting\n",
part->osd_num, part->op.reply.hdr.retval, part->op.req.rw.len
);
msgr.stop_client(part->op.peer_fd);
if (op->sent_count == op->parts.size() - op->done_count - 1)
{
// Resend later when OSDs come up
// FIXME: Check for different types of errors
// FIXME: Repeat operations after a small timeout, for the case when OSD is coming up
sent_ops.erase(op);
unsent_ops.insert(op);
}
if (op->sent_count == 0 && op->needs_reslice)
{
// PG count has changed, reslice the operation
unsent_ops.erase(op);
op->parts.clear();
op->done_count = 0;
op->needs_reslice = false;
execute(op);
}
}
else
{
// OK
part->done = true;
op->done_count++;
if (op->done_count >= op->parts.size())
{
// Finished!
sent_ops.erase(op);
op->retval = op->len;
std::function<void(cluster_op_t*)>(op->callback)(op);
}
}
}

80
cluster_client.h Normal file
View File

@ -0,0 +1,80 @@
#pragma once
#include "messenger.h"
#include "etcd_state_client.h"
#define MIN_BLOCK_SIZE 4*1024
#define MAX_BLOCK_SIZE 128*1024*1024
#define DEFAULT_BLOCK_SIZE 128*1024
#define DEFAULT_PG_STRIPE_SIZE 4*1024*1024
#define DEFAULT_DISK_ALIGNMENT 4096
#define DEFAULT_BITMAP_GRANULARITY 4096
struct cluster_op_t;
struct cluster_op_part_t
{
cluster_op_t *parent;
uint64_t offset;
uint32_t len;
pg_num_t pg_num;
osd_num_t osd_num;
void *buf;
bool sent;
bool done;
osd_op_t op;
};
struct cluster_op_t
{
uint64_t opcode; // OSD_OP_READ, OSD_OP_WRITE, OSD_OP_SYNC
uint64_t inode;
uint64_t offset;
uint64_t len;
int retval;
void *buf;
std::function<void(cluster_op_t*)> callback;
protected:
bool needs_reslice = false;
int sent_count = 0, done_count = 0;
std::vector<cluster_op_part_t> parts;
friend class cluster_client_t;
};
class cluster_client_t
{
timerfd_manager_t *tfd;
ring_loop_t *ringloop;
uint64_t pg_part_count = 2;
uint64_t pg_stripe_size = 0;
uint64_t bs_block_size = 0;
uint64_t bs_disk_alignment = 0;
uint64_t bs_bitmap_granularity = 0;
uint64_t pg_count = 0;
bool immediate_commit = false;
bool inmemory_commit = false;
uint64_t inmemory_dirty_limit = 32*1024*1024;
int log_level;
uint64_t op_id = 1;
etcd_state_client_t st_cli;
osd_messenger_t msgr;
std::set<cluster_op_t*> sent_ops, unsent_ops;
// unsynced operations are copied in memory to allow replay when cluster isn't in the immediate_commit mode
std::vector<cluster_op_t*> unsynced_ops;
uint64_t unsynced_bytes = 0;
public:
cluster_client_t(ring_loop_t *ringloop, timerfd_manager_t *tfd, json11::Json & config);
void execute(cluster_op_t *op);
protected:
void continue_ops();
void on_load_config_hook(json11::Json::object & cfg);
void on_load_pgs_hook(bool success);
void on_change_hook(json11::Json::object & changes);
void on_change_osd_state_hook(uint64_t peer_osd);
bool try_send(cluster_op_t *op, cluster_op_part_t *part);
void handle_op_part(cluster_op_part_t *part);
};

View File

@ -1,13 +0,0 @@
#!/bin/bash
gcc -I. -E -o fio_headers.i src/fio_headers.h
rm -rf fio-copy
for i in `grep -Po 'fio/[^"]+' fio_headers.i | sort | uniq`; do
j=${i##fio/}
p=$(dirname $j)
mkdir -p fio-copy/$p
cp $i fio-copy/$j
done
rm fio_headers.i

View File

@ -1,18 +0,0 @@
#!/bin/bash
#cd qemu
#debian/rules b/configure-stamp
#cd b/qemu; make qapi
gcc -I qemu/b/qemu `pkg-config glib-2.0 --cflags` \
-I qemu/include -E -o qemu_driver.i src/qemu_driver.c
rm -rf qemu-copy
for i in `grep -Po 'qemu/[^"]+' qemu_driver.i | sort | uniq`; do
j=${i##qemu/}
p=$(dirname $j)
mkdir -p qemu-copy/$p
cp $i qemu-copy/$j
done
rm qemu_driver.i

@ -1 +0,0 @@
Subproject commit 5dc108754ad40d3b1d024f9bd7cca0595ef1a1db

View File

@ -8,10 +8,4 @@
// unsigned __int64 _mm_crc32_u64 (unsigned __int64 crc, unsigned __int64 v) // unsigned __int64 _mm_crc32_u64 (unsigned __int64 crc, unsigned __int64 v)
// unsigned int _mm_crc32_u8 (unsigned int crc, unsigned char v) // unsigned int _mm_crc32_u8 (unsigned int crc, unsigned char v)
#ifdef __cplusplus
extern "C" {
#endif
uint32_t crc32c(uint32_t crc, const void *buf, size_t len); uint32_t crc32c(uint32_t crc, const void *buf, size_t len);
#ifdef __cplusplus
};
#endif

View File

@ -1,7 +0,0 @@
#!/bin/bash
sed 's/$REL/bullseye/g' < vitastor.Dockerfile > ../Dockerfile
cd ..
mkdir -p packages
sudo podman build -v `pwd`/packages:/root/packages -f Dockerfile .
rm Dockerfile

View File

@ -1,7 +0,0 @@
#!/bin/bash
sed 's/$REL/buster/g' < vitastor.Dockerfile > ../Dockerfile
cd ..
mkdir -p packages
sudo podman build -v `pwd`/packages:/root/packages -f Dockerfile .
rm Dockerfile

17
debian/changelog vendored
View File

@ -1,17 +0,0 @@
vitastor (0.5.10-1) unstable; urgency=medium
* Bugfixes
-- Vitaliy Filippov <vitalif@yourcmc.ru> Tue, 02 Feb 2021 23:01:24 +0300
vitastor (0.5.1-1) unstable; urgency=medium
* Add jerasure support
-- Vitaliy Filippov <vitalif@yourcmc.ru> Sat, 05 Dec 2020 17:02:26 +0300
vitastor (0.5-1) unstable; urgency=medium
* First packaging for Debian
-- Vitaliy Filippov <vitalif@yourcmc.ru> Thu, 05 Nov 2020 02:20:59 +0300

1
debian/compat vendored
View File

@ -1 +0,0 @@
13

17
debian/control vendored
View File

@ -1,17 +0,0 @@
Source: vitastor
Section: admin
Priority: optional
Maintainer: Vitaliy Filippov <vitalif@yourcmc.ru>
Build-Depends: debhelper, liburing-dev (>= 0.6), g++ (>= 8), libstdc++6 (>= 8), linux-libc-dev, libgoogle-perftools-dev, libjerasure-dev, libgf-complete-dev
Standards-Version: 4.5.0
Homepage: https://vitastor.io/
Rules-Requires-Root: no
Package: vitastor
Architecture: amd64
Depends: ${shlibs:Depends}, ${misc:Depends}, fio (= ${dep:fio}), qemu (= ${dep:qemu}), nodejs (>= 10), node-sprintf-js, node-ws (>= 7), libjerasure2, lp-solve
Description: Vitastor, a fast software-defined clustered block storage
Vitastor is a small, simple and fast clustered block storage (storage for VM drives),
architecturally similar to Ceph which means strong consistency, primary-replication,
symmetric clustering and automatic data distribution over any number of drives of any
size with configurable redundancy (replication or erasure codes/XOR).

21
debian/copyright vendored
View File

@ -1,21 +0,0 @@
Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
Upstream-Name: vitastor
Upstream-Contact: Vitaliy Filippov <vitalif@yourcmc.ru>
Source: https://vitastor.io
Files: *
Copyright: 2019+ Vitaliy Filippov <vitalif@yourcmc.ru>
License: Multiple licenses VNPL-1.1 and/or GPL-2.0+
All server-side code (OSD, Monitor and so on) is licensed under the terms of
Vitastor Network Public License 1.1 (VNPL 1.1), a copyleft license based on
GNU GPLv3.0 with the additional "Network Interaction" clause which requires
opensourcing all programs directly or indirectly interacting with Vitastor
through a computer network and expressly designed to be used in conjunction
with it ("Proxy Programs"). Proxy Programs may be made public not only under
the terms of the same license, but also under the terms of any GPL-Compatible
Free Software License, as listed by the Free Software Foundation.
This is a stricter copyleft license than the Affero GPL.
.
Client libraries (cluster_client and so on) are dual-licensed under the same
VNPL 1.1 and also GNU GPL 2.0 or later to allow for compatibility with GPLed
software like QEMU and fio.

3
debian/install vendored
View File

@ -1,3 +0,0 @@
VNPL-1.1.txt usr/share/doc/vitastor
GPL-2.0.txt usr/share/doc/vitastor
mon usr/lib/vitastor

View File

@ -1,44 +0,0 @@
# Build patched QEMU for Debian Buster or Bullseye/Sid inside a container
# cd ..; podman build --build-arg REL=bullseye -v `pwd`/packages:/root/packages -f debian/patched-qemu.Dockerfile .
FROM debian:$REL
WORKDIR /root
RUN if [ "$REL" = "buster" ]; then \
echo 'deb http://deb.debian.org/debian buster-backports main' >> /etc/apt/sources.list; \
echo >> /etc/apt/preferences; \
echo 'Package: *' >> /etc/apt/preferences; \
echo 'Pin: release a=buster-backports' >> /etc/apt/preferences; \
echo 'Pin-Priority: 500' >> /etc/apt/preferences; \
fi; \
grep '^deb ' /etc/apt/sources.list | perl -pe 's/^deb/deb-src/' >> /etc/apt/sources.list; \
echo 'APT::Install-Recommends false;' >> /etc/apt/apt.conf; \
echo 'APT::Install-Suggests false;' >> /etc/apt/apt.conf
RUN apt-get update
RUN apt-get -y install qemu fio liburing1 liburing-dev libgoogle-perftools-dev devscripts
RUN apt-get -y build-dep qemu
RUN apt-get -y build-dep fio
RUN apt-get --download-only source qemu
RUN apt-get --download-only source fio
ADD qemu-5.0-vitastor.patch qemu-5.1-vitastor.patch /root/vitastor/
RUN set -e; \
mkdir -p /root/packages/qemu-$REL; \
rm -rf /root/packages/qemu-$REL/*; \
cd /root/packages/qemu-$REL; \
dpkg-source -x /root/qemu*.dsc; \
if [ -d /root/packages/qemu-$REL/qemu-5.0 ]; then \
cp /root/vitastor/qemu-5.0-vitastor.patch /root/packages/qemu-$REL/qemu-5.0/debian/patches; \
echo qemu-5.0-vitastor.patch >> /root/packages/qemu-$REL/qemu-5.0/debian/patches/series; \
else \
cp /root/vitastor/qemu-5.1-vitastor.patch /root/packages/qemu-$REL/qemu-*/debian/patches; \
P=`ls -d /root/packages/qemu-$REL/qemu-*/debian/patches`; \
echo qemu-5.1-vitastor.patch >> $P/series; \
fi; \
cd /root/packages/qemu-$REL/qemu-*/; \
V=$(head -n1 debian/changelog | perl -pe 's/^.*\((.*?)(~bpo[\d\+]*)?\).*$/$1/')+vitastor1; \
DEBFULLNAME="Vitaliy Filippov <vitalif@yourcmc.ru>" dch -D $REL -v $V 'Plug Vitastor block driver'; \
DEB_BUILD_OPTIONS=nocheck dpkg-buildpackage --jobs=auto -sa; \
rm -rf /root/packages/qemu-$REL/qemu-*/

9
debian/rules vendored
View File

@ -1,9 +0,0 @@
#!/usr/bin/make -f
export DH_VERBOSE = 1
%:
dh $@
override_dh_installdeb:
cat debian/substvars >> debian/vitastor.substvars
dh_installdeb

View File

@ -1 +0,0 @@
3.0 (quilt)

2
debian/substvars vendored
View File

@ -1,2 +0,0 @@
dep:fio=3.16-1
dep:qemu=1:5.1+dfsg-4+vitastor1

View File

@ -1,67 +0,0 @@
# Build Vitastor packages for Debian Buster or Bullseye/Sid inside a container
# cd ..; podman build --build-arg REL=bullseye -v `pwd`/packages:/root/packages -f debian/vitastor.Dockerfile .
FROM debian:$REL
WORKDIR /root
RUN if [ "$REL" = "buster" ]; then \
echo 'deb http://deb.debian.org/debian buster-backports main' >> /etc/apt/sources.list; \
echo >> /etc/apt/preferences; \
echo 'Package: *' >> /etc/apt/preferences; \
echo 'Pin: release a=buster-backports' >> /etc/apt/preferences; \
echo 'Pin-Priority: 500' >> /etc/apt/preferences; \
fi; \
grep '^deb ' /etc/apt/sources.list | perl -pe 's/^deb/deb-src/' >> /etc/apt/sources.list; \
echo 'APT::Install-Recommends false;' >> /etc/apt/apt.conf; \
echo 'APT::Install-Suggests false;' >> /etc/apt/apt.conf
RUN apt-get update
RUN apt-get -y install qemu fio liburing1 liburing-dev libgoogle-perftools-dev devscripts
RUN apt-get -y build-dep qemu
RUN apt-get -y build-dep fio
RUN apt-get --download-only source qemu
RUN apt-get --download-only source fio
RUN apt-get -y install libjerasure-dev cmake
ADD . /root/vitastor
RUN set -e -x; \
mkdir -p /root/fio-build/; \
cd /root/fio-build/; \
rm -rf /root/fio-build/*; \
dpkg-source -x /root/fio*.dsc; \
cd /root/packages/qemu-$REL/; \
rm -rf qemu*/; \
dpkg-source -x qemu*.dsc; \
cd /root/packages/qemu-$REL/qemu*/; \
debian/rules b/configure-stamp; \
cd b/qemu; \
make -j8 qapi/qapi-builtin-types.h; \
mkdir -p /root/packages/vitastor-$REL; \
rm -rf /root/packages/vitastor-$REL/*; \
cd /root/packages/vitastor-$REL; \
cp -r /root/vitastor vitastor-0.5.10; \
ln -s /root/packages/qemu-$REL/qemu-*/ vitastor-0.5.10/qemu; \
ln -s /root/fio-build/fio-*/ vitastor-0.5.10/fio; \
cd vitastor-0.5.10; \
FIO=$(head -n1 fio/debian/changelog | perl -pe 's/^.*\((.*?)\).*$/$1/'); \
QEMU=$(head -n1 qemu/debian/changelog | perl -pe 's/^.*\((.*?)\).*$/$1/'); \
sh copy-qemu-includes.sh; \
sh copy-fio-includes.sh; \
rm qemu fio; \
mkdir -p a b debian/patches; \
mv qemu-copy b/qemu; \
mv fio-copy b/fio; \
diff -NaurpbB a b > debian/patches/qemu-fio-headers.patch || true; \
echo qemu-fio-headers.patch >> debian/patches/series; \
rm -rf a b; \
rm -rf /root/packages/qemu-$REL/qemu*/; \
echo "dep:fio=$FIO" > debian/substvars; \
echo "dep:qemu=$QEMU" >> debian/substvars; \
cd /root/packages/vitastor-$REL; \
tar --sort=name --mtime='2020-01-01' --owner=0 --group=0 --exclude=debian -cJf vitastor_0.5.10.orig.tar.xz vitastor-0.5.10; \
cd vitastor-0.5.10; \
V=$(head -n1 debian/changelog | perl -pe 's/^.*\((.*?)\).*$/$1/'); \
DEBFULLNAME="Vitaliy Filippov <vitalif@yourcmc.ru>" dch -D $REL -v "$V""$REL" "Rebuild for $REL"; \
DEB_BUILD_OPTIONS=nocheck dpkg-buildpackage --jobs=auto -sa; \
rm -rf /root/packages/vitastor-$REL/vitastor-*/

165
dump_journal.cpp Normal file
View File

@ -0,0 +1,165 @@
#define _LARGEFILE64_SOURCE
#include <sys/types.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <fcntl.h>
#include <unistd.h>
#include <stdint.h>
#include <malloc.h>
#include <linux/fs.h>
#include <string.h>
#include <errno.h>
#include <assert.h>
#include <stdio.h>
#include "blockstore_impl.h"
#include "crc32c.h"
struct journal_dump_t
{
char *journal_device;
uint32_t journal_block;
uint64_t journal_offset;
uint64_t journal_len;
uint64_t journal_pos;
int fd;
void dump_block(void *buf);
};
int main(int argc, char *argv[])
{
if (argc < 5)
{
printf("USAGE: %s <journal_file> <journal_block_size> <offset> <size>\n", argv[0]);
return 1;
}
journal_dump_t self;
self.journal_device = argv[1];
self.journal_block = strtoul(argv[2], NULL, 10);
self.journal_offset = strtoull(argv[3], NULL, 10);
self.journal_len = strtoull(argv[4], NULL, 10);
if (self.journal_block < MEM_ALIGNMENT || (self.journal_block % MEM_ALIGNMENT) ||
self.journal_block > 128*1024)
{
printf("Invalid journal block size\n");
return 1;
}
self.fd = open(self.journal_device, O_DIRECT|O_RDONLY);
if (self.fd == -1)
{
printf("Failed to open journal\n");
return 1;
}
void *data = memalign(MEM_ALIGNMENT, self.journal_block);
self.journal_pos = 0;
while (self.journal_pos < self.journal_len)
{
int r = pread(self.fd, data, self.journal_block, self.journal_offset+self.journal_pos);
assert(r == self.journal_block);
uint64_t s;
for (s = 0; s < self.journal_block; s += 8)
{
if (*((uint64_t*)(data+s)) != 0)
break;
}
if (s == self.journal_block)
{
printf("offset %08lx: zeroes\n", self.journal_pos);
self.journal_pos += self.journal_block;
}
else if (((journal_entry*)data)->magic == JOURNAL_MAGIC)
{
printf("offset %08lx:\n", self.journal_pos);
self.dump_block(data);
}
else
{
printf("offset %08lx: no magic in the beginning, looks like random data (pattern=%lx)\n", self.journal_pos, *((uint64_t*)data));
self.journal_pos += self.journal_block;
}
}
free(data);
close(self.fd);
return 0;
}
void journal_dump_t::dump_block(void *buf)
{
uint32_t pos = 0;
journal_pos += journal_block;
int entry = 0;
bool wrapped = false;
while (pos < journal_block)
{
journal_entry *je = (journal_entry*)(buf + pos);
if (je->magic != JOURNAL_MAGIC || je->type < JE_START || je->type > JE_DELETE)
{
break;
}
const char *crc32_valid = je_crc32(je) == je->crc32 ? "(valid)" : "(invalid)";
printf("entry % 3d: crc32=%08x %s prev=%08x ", entry, je->crc32, crc32_valid, je->crc32_prev);
if (je->type == JE_START)
{
printf("je_start start=%08lx\n", je->start.journal_start);
}
else if (je->type == JE_SMALL_WRITE)
{
printf(
"je_small_write oid=%lu:%lu ver=%lu offset=%u len=%u loc=%08lx",
je->small_write.oid.inode, je->small_write.oid.stripe,
je->small_write.version, je->small_write.offset, je->small_write.len,
je->small_write.data_offset
);
if (journal_pos + je->small_write.len > journal_len)
{
// data continues from the beginning of the journal
journal_pos = journal_block;
wrapped = true;
}
if (journal_pos != je->small_write.data_offset)
{
printf(" (mismatched, calculated = %lu)", journal_pos);
}
journal_pos += je->small_write.len;
if (journal_pos >= journal_len)
{
journal_pos = journal_block;
wrapped = true;
}
uint32_t data_crc32 = 0;
void *data = memalign(MEM_ALIGNMENT, je->small_write.len);
assert(pread(fd, data, je->small_write.len, journal_offset+je->small_write.data_offset) == je->small_write.len);
data_crc32 = crc32c(0, data, je->small_write.len);
free(data);
printf(
" data_crc32=%08x%s", je->small_write.crc32_data,
(data_crc32 != je->small_write.crc32_data) ? " (invalid)" : " (valid)"
);
printf("\n");
}
else if (je->type == JE_BIG_WRITE)
{
printf("je_big_write oid=%lu:%lu ver=%lu loc=%08lx\n", je->big_write.oid.inode, je->big_write.oid.stripe, je->big_write.version, je->big_write.location);
}
else if (je->type == JE_STABLE)
{
printf("je_stable oid=%lu:%lu ver=%lu\n", je->stable.oid.inode, je->stable.oid.stripe, je->stable.version);
}
else if (je->type == JE_ROLLBACK)
{
printf("je_rollback oid=%lu:%lu ver=%lu\n", je->rollback.oid.inode, je->rollback.oid.stripe, je->rollback.version);
}
else if (je->type == JE_DELETE)
{
printf("je_delete oid=%lu:%lu ver=%lu\n", je->del.oid.inode, je->del.oid.stripe, je->del.version);
}
pos += je->size;
entry++;
}
if (wrapped)
{
journal_pos = journal_len;
}
}

View File

@ -1,10 +1,6 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 or GNU GPL-2.0+ (see README.md for details)
#include <sys/epoll.h> #include <sys/epoll.h>
#include <sys/poll.h> #include <sys/poll.h>
#include <unistd.h> #include <unistd.h>
#include <stdexcept>
#include "epoll_manager.h" #include "epoll_manager.h"
@ -20,7 +16,7 @@ epoll_manager_t::epoll_manager_t(ring_loop_t *ringloop)
throw std::runtime_error(std::string("epoll_create: ") + strerror(errno)); throw std::runtime_error(std::string("epoll_create: ") + strerror(errno));
} }
tfd = new timerfd_manager_t([this](int fd, bool wr, std::function<void(int, int)> handler) { set_fd_handler(fd, wr, handler); }); tfd = new timerfd_manager_t([this](int fd, std::function<void(int, int)> handler) { set_fd_handler(fd, handler); });
handle_epoll_events(); handle_epoll_events();
} }
@ -35,14 +31,14 @@ epoll_manager_t::~epoll_manager_t()
close(epoll_fd); close(epoll_fd);
} }
void epoll_manager_t::set_fd_handler(int fd, bool wr, std::function<void(int, int)> handler) void epoll_manager_t::set_fd_handler(int fd, std::function<void(int, int)> handler)
{ {
if (handler != NULL) if (handler != NULL)
{ {
bool exists = epoll_handlers.find(fd) != epoll_handlers.end(); bool exists = epoll_handlers.find(fd) != epoll_handlers.end();
epoll_event ev; epoll_event ev;
ev.data.fd = fd; ev.data.fd = fd;
ev.events = (wr ? EPOLLOUT : 0) | EPOLLIN | EPOLLRDHUP | EPOLLET; ev.events = EPOLLOUT | EPOLLIN | EPOLLRDHUP | EPOLLET;
if (epoll_ctl(epoll_fd, exists ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, fd, &ev) < 0) if (epoll_ctl(epoll_fd, exists ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, fd, &ev) < 0)
{ {
throw std::runtime_error(std::string("epoll_ctl: ") + strerror(errno)); throw std::runtime_error(std::string("epoll_ctl: ") + strerror(errno));
@ -84,12 +80,8 @@ void epoll_manager_t::handle_epoll_events()
nfds = epoll_wait(epoll_fd, events, MAX_EPOLL_EVENTS, 0); nfds = epoll_wait(epoll_fd, events, MAX_EPOLL_EVENTS, 0);
for (int i = 0; i < nfds; i++) for (int i = 0; i < nfds; i++)
{ {
auto cb_it = epoll_handlers.find(events[i].data.fd); auto & cb = epoll_handlers[events[i].data.fd];
if (cb_it != epoll_handlers.end())
{
auto & cb = cb_it->second;
cb(events[i].data.fd, events[i].events); cb(events[i].data.fd, events[i].events);
} }
}
} while (nfds == MAX_EPOLL_EVENTS); } while (nfds == MAX_EPOLL_EVENTS);
} }

View File

@ -1,6 +1,3 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 or GNU GPL-2.0+ (see README.md for details)
#pragma once #pragma once
#include <map> #include <map>
@ -16,7 +13,7 @@ class epoll_manager_t
public: public:
epoll_manager_t(ring_loop_t *ringloop); epoll_manager_t(ring_loop_t *ringloop);
~epoll_manager_t(); ~epoll_manager_t();
void set_fd_handler(int fd, bool wr, std::function<void(int, int)> handler); void set_fd_handler(int fd, std::function<void(int, int)> handler);
void handle_epoll_events(); void handle_epoll_events();
timerfd_manager_t *tfd; timerfd_manager_t *tfd;

424
etcd_state_client.cpp Normal file
View File

@ -0,0 +1,424 @@
#include "osd_ops.h"
#include "pg_states.h"
#include "etcd_state_client.h"
#include "http_client.h"
#include "base64.h"
json_kv_t etcd_state_client_t::parse_etcd_kv(const json11::Json & kv_json)
{
json_kv_t kv;
kv.key = base64_decode(kv_json["key"].string_value());
std::string json_err, json_text = base64_decode(kv_json["value"].string_value());
kv.value = json_text == "" ? json11::Json() : json11::Json::parse(json_text, json_err);
if (json_err != "")
{
printf("Bad JSON in etcd key %s: %s (value: %s)\n", kv.key.c_str(), json_err.c_str(), json_text.c_str());
kv.key = "";
}
return kv;
}
void etcd_state_client_t::etcd_txn(json11::Json txn, int timeout, std::function<void(std::string, json11::Json)> callback)
{
etcd_call("/kv/txn", txn, timeout, callback);
}
void etcd_state_client_t::etcd_call(std::string api, json11::Json payload, int timeout, std::function<void(std::string, json11::Json)> callback)
{
std::string etcd_address = etcd_addresses[rand() % etcd_addresses.size()];
std::string etcd_api_path;
int pos = etcd_address.find('/');
if (pos >= 0)
{
etcd_api_path = etcd_address.substr(pos);
etcd_address = etcd_address.substr(0, pos);
}
std::string req = payload.dump();
req = "POST "+etcd_api_path+api+" HTTP/1.1\r\n"
"Host: "+etcd_address+"\r\n"
"Content-Type: application/json\r\n"
"Content-Length: "+std::to_string(req.size())+"\r\n"
"Connection: close\r\n"
"\r\n"+req;
http_request_json(tfd, etcd_address, req, timeout, callback);
}
void etcd_state_client_t::parse_config(json11::Json & config)
{
this->etcd_addresses.clear();
if (config["etcd_address"].is_string())
{
std::string ea = config["etcd_address"].string_value();
while (1)
{
int pos = ea.find(',');
std::string addr = pos >= 0 ? ea.substr(0, pos) : ea;
if (addr.length() > 0)
{
if (addr.find('/') < 0)
addr += "/v3";
this->etcd_addresses.push_back(addr);
}
if (pos >= 0)
ea = ea.substr(pos+1);
else
break;
}
}
else if (config["etcd_address"].array_items().size())
{
for (auto & ea: config["etcd_address"].array_items())
{
std::string addr = ea.string_value();
if (addr != "")
{
if (addr.find('/') < 0)
addr += "/v3";
this->etcd_addresses.push_back(addr);
}
}
}
this->etcd_prefix = config["etcd_prefix"].string_value();
if (this->etcd_prefix == "")
{
this->etcd_prefix = "/microceph";
}
else if (this->etcd_prefix[0] != '/')
{
this->etcd_prefix = "/"+this->etcd_prefix;
}
this->log_level = config["log_level"].int64_value();
}
void etcd_state_client_t::start_etcd_watcher()
{
std::string etcd_address = etcd_addresses[rand() % etcd_addresses.size()];
std::string etcd_api_path;
int pos = etcd_address.find('/');
if (pos >= 0)
{
etcd_api_path = etcd_address.substr(pos);
etcd_address = etcd_address.substr(0, pos);
}
etcd_watches_initialised = 0;
etcd_watch_ws = open_websocket(tfd, etcd_address, etcd_api_path+"/watch", ETCD_SLOW_TIMEOUT, [this](const http_response_t *msg)
{
if (msg->body.length())
{
std::string json_err;
json11::Json data = json11::Json::parse(msg->body, json_err);
if (json_err != "")
{
printf("Bad JSON in etcd event: %s, ignoring event\n", json_err.c_str());
}
else
{
if (data["result"]["created"].bool_value())
{
etcd_watches_initialised++;
}
if (etcd_watches_initialised == 4)
{
etcd_watch_revision = data["result"]["header"]["revision"].uint64_value();
}
// First gather all changes into a hash to remove multiple overwrites
json11::Json::object changes;
for (auto & ev: data["result"]["events"].array_items())
{
auto kv = parse_etcd_kv(ev["kv"]);
if (kv.key != "")
{
changes[kv.key] = kv.value;
}
}
for (auto & kv: changes)
{
if (this->log_level > 0)
{
printf("Incoming event: %s -> %s\n", kv.first.c_str(), kv.second.dump().c_str());
}
parse_state(kv.first, kv.second);
}
// React to changes
if (on_change_hook != NULL)
{
on_change_hook(changes);
}
}
}
if (msg->eof)
{
etcd_watch_ws = NULL;
if (etcd_watches_initialised == 0)
{
// Connection not established, retry in <ETCD_SLOW_TIMEOUT>
tfd->set_timer(ETCD_SLOW_TIMEOUT, false, [this](int)
{
start_etcd_watcher();
});
}
else
{
// Connection was live, retry immediately
start_etcd_watcher();
}
}
});
etcd_watch_ws->post_message(WS_TEXT, json11::Json(json11::Json::object {
{ "create_request", json11::Json::object {
{ "key", base64_encode(etcd_prefix+"/config/") },
{ "range_end", base64_encode(etcd_prefix+"/config0") },
{ "start_revision", etcd_watch_revision+1 },
{ "watch_id", ETCD_CONFIG_WATCH_ID },
} }
}).dump());
etcd_watch_ws->post_message(WS_TEXT, json11::Json(json11::Json::object {
{ "create_request", json11::Json::object {
{ "key", base64_encode(etcd_prefix+"/osd/state/") },
{ "range_end", base64_encode(etcd_prefix+"/osd/state0") },
{ "start_revision", etcd_watch_revision+1 },
{ "watch_id", ETCD_OSD_STATE_WATCH_ID },
} }
}).dump());
etcd_watch_ws->post_message(WS_TEXT, json11::Json(json11::Json::object {
{ "create_request", json11::Json::object {
{ "key", base64_encode(etcd_prefix+"/pg/state/") },
{ "range_end", base64_encode(etcd_prefix+"/pg/state0") },
{ "start_revision", etcd_watch_revision+1 },
{ "watch_id", ETCD_PG_STATE_WATCH_ID },
} }
}).dump());
etcd_watch_ws->post_message(WS_TEXT, json11::Json(json11::Json::object {
{ "create_request", json11::Json::object {
{ "key", base64_encode(etcd_prefix+"/pg/history/") },
{ "range_end", base64_encode(etcd_prefix+"/pg/history0") },
{ "start_revision", etcd_watch_revision+1 },
{ "watch_id", ETCD_PG_HISTORY_WATCH_ID },
} }
}).dump());
}
void etcd_state_client_t::load_global_config()
{
etcd_call("/kv/range", json11::Json::object {
{ "key", base64_encode(etcd_prefix+"/config/global") }
}, ETCD_SLOW_TIMEOUT, [this](std::string err, json11::Json data)
{
if (err != "")
{
printf("Error reading OSD configuration from etcd: %s\n", err.c_str());
tfd->set_timer(ETCD_SLOW_TIMEOUT, false, [this](int timer_id)
{
load_global_config();
});
return;
}
if (!etcd_watch_revision)
{
etcd_watch_revision = data["header"]["revision"].uint64_value();
}
json11::Json::object global_config;
if (data["kvs"].array_items().size() > 0)
{
auto kv = parse_etcd_kv(data["kvs"][0]);
if (kv.value.is_object())
{
global_config = kv.value.object_items();
}
}
on_load_config_hook(global_config);
});
}
void etcd_state_client_t::load_pgs()
{
json11::Json::array txn = {
json11::Json::object {
{ "request_range", json11::Json::object {
{ "key", base64_encode(etcd_prefix+"/config/pgs") },
} }
},
json11::Json::object {
{ "request_range", json11::Json::object {
{ "key", base64_encode(etcd_prefix+"/pg/history/") },
{ "range_end", base64_encode(etcd_prefix+"/pg/history0") },
} }
},
json11::Json::object {
{ "request_range", json11::Json::object {
{ "key", base64_encode(etcd_prefix+"/pg/state/") },
{ "range_end", base64_encode(etcd_prefix+"/pg/state0") },
} }
},
json11::Json::object {
{ "request_range", json11::Json::object {
{ "key", base64_encode(etcd_prefix+"/osd/state/") },
{ "range_end", base64_encode(etcd_prefix+"/osd/state0") },
} }
},
};
json11::Json::object req = { { "success", txn } };
json11::Json checks = load_pgs_checks_hook != NULL ? load_pgs_checks_hook() : json11::Json();
if (checks.array_items().size() > 0)
{
req["compare"] = checks;
}
etcd_txn(req, ETCD_SLOW_TIMEOUT, [this](std::string err, json11::Json data)
{
if (err != "")
{
printf("Error loading PGs from etcd: %s\n", err.c_str());
tfd->set_timer(ETCD_SLOW_TIMEOUT, false, [this](int timer_id)
{
load_pgs();
});
return;
}
if (!data["succeeded"].bool_value())
{
on_load_pgs_hook(false);
return;
}
for (auto & res: data["responses"].array_items())
{
for (auto & kv_json: res["response_range"]["kvs"].array_items())
{
auto kv = parse_etcd_kv(kv_json);
parse_state(kv.key, kv.value);
}
}
on_load_pgs_hook(true);
});
}
void etcd_state_client_t::parse_state(const std::string & key, const json11::Json & value)
{
if (key == etcd_prefix+"/config/pgs")
{
for (auto & pg_item: this->pg_config)
{
pg_item.second.exists = false;
}
for (auto & pg_item: value["items"].object_items())
{
pg_num_t pg_num = stoull_full(pg_item.first);
if (!pg_num)
{
printf("Bad key in PG configuration: %s (must be a number), skipped\n", pg_item.first.c_str());
continue;
}
this->pg_config[pg_num].exists = true;
this->pg_config[pg_num].pause = pg_item.second["pause"].bool_value();
this->pg_config[pg_num].primary = pg_item.second["primary"].uint64_value();
this->pg_config[pg_num].target_set.clear();
for (auto pg_osd: pg_item.second["osd_set"].array_items())
{
this->pg_config[pg_num].target_set.push_back(pg_osd.uint64_value());
}
if (this->pg_config[pg_num].target_set.size() != 3)
{
printf("Bad PG %u config format: incorrect osd_set = %s\n", pg_num, pg_item.second["osd_set"].dump().c_str());
this->pg_config[pg_num].target_set.resize(3);
this->pg_config[pg_num].pause = true;
}
}
}
else if (key.substr(0, etcd_prefix.length()+12) == etcd_prefix+"/pg/history/")
{
// <etcd_prefix>/pg/history/%d
pg_num_t pg_num = stoull_full(key.substr(etcd_prefix.length()+12));
if (!pg_num)
{
printf("Bad etcd key %s, ignoring\n", key.c_str());
}
else
{
auto & pg_cfg = this->pg_config[pg_num];
pg_cfg.target_history.clear();
pg_cfg.all_peers.clear();
// Refuse to start PG if any set of the <osd_sets> has no live OSDs
for (auto hist_item: value["osd_sets"].array_items())
{
std::vector<osd_num_t> history_set;
for (auto pg_osd: hist_item.array_items())
{
history_set.push_back(pg_osd.uint64_value());
}
pg_cfg.target_history.push_back(history_set);
}
// Include these additional OSDs when peering the PG
for (auto pg_osd: value["all_peers"].array_items())
{
pg_cfg.all_peers.push_back(pg_osd.uint64_value());
}
}
}
else if (key.substr(0, etcd_prefix.length()+10) == etcd_prefix+"/pg/state/")
{
// <etcd_prefix>/pg/state/%d
pg_num_t pg_num = stoull_full(key.substr(etcd_prefix.length()+10));
if (!pg_num)
{
printf("Bad etcd key %s, ignoring\n", key.c_str());
}
else if (value.is_null())
{
this->pg_config[pg_num].cur_primary = 0;
this->pg_config[pg_num].cur_state = 0;
}
else
{
osd_num_t cur_primary = value["primary"].uint64_value();
int state = 0;
for (auto & e: value["state"].array_items())
{
int i;
for (i = 0; i < pg_state_bit_count; i++)
{
if (e.string_value() == pg_state_names[i])
{
state = state | pg_state_bits[i];
break;
}
}
if (i >= pg_state_bit_count)
{
printf("Unexpected PG %u state keyword in etcd: %s\n", pg_num, e.dump().c_str());
return;
}
}
if (!cur_primary || !value["state"].is_array() || !state ||
(state & PG_OFFLINE) && state != PG_OFFLINE ||
(state & PG_PEERING) && state != PG_PEERING ||
(state & PG_INCOMPLETE) && state != PG_INCOMPLETE)
{
printf("Unexpected PG %u state in etcd: primary=%lu, state=%s\n", pg_num, cur_primary, value["state"].dump().c_str());
return;
}
this->pg_config[pg_num].cur_primary = cur_primary;
this->pg_config[pg_num].cur_state = state;
}
}
else if (key.substr(0, etcd_prefix.length()+11) == etcd_prefix+"/osd/state/")
{
// <etcd_prefix>/osd/state/%d
osd_num_t peer_osd = std::stoull(key.substr(etcd_prefix.length()+11));
if (peer_osd > 0)
{
if (value.is_object() && value["state"] == "up" &&
value["addresses"].is_array() &&
value["port"].int64_value() > 0 && value["port"].int64_value() < 65536)
{
this->peer_states[peer_osd] = value;
}
else
{
this->peer_states.erase(peer_osd);
}
if (on_change_osd_state_hook != NULL)
{
on_change_osd_state_hook(peer_osd);
}
}
}
}

View File

@ -1,6 +1,3 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 or GNU GPL-2.0+ (see README.md for details)
#pragma once #pragma once
#include "osd_id.h" #include "osd_id.h"
@ -16,14 +13,6 @@
#define ETCD_SLOW_TIMEOUT 5000 #define ETCD_SLOW_TIMEOUT 5000
#define ETCD_QUICK_TIMEOUT 1000 #define ETCD_QUICK_TIMEOUT 1000
#define DEFAULT_BLOCK_SIZE 128*1024
struct json_kv_t
{
std::string key;
json11::Json value;
};
struct pg_config_t struct pg_config_t
{ {
bool exists; bool exists;
@ -34,47 +23,16 @@ struct pg_config_t
bool pause; bool pause;
osd_num_t cur_primary; osd_num_t cur_primary;
int cur_state; int cur_state;
uint64_t epoch;
}; };
struct pool_config_t struct json_kv_t
{ {
bool exists; std::string key;
pool_id_t id; json11::Json value;
std::string name;
uint64_t scheme;
uint64_t pg_size, pg_minsize, parity_chunks;
uint64_t pg_count;
uint64_t real_pg_count;
std::string failure_domain;
uint64_t max_osd_combinations;
uint64_t pg_stripe_size;
std::map<pg_num_t, pg_config_t> pg_config;
};
struct inode_config_t
{
uint64_t num;
std::string name;
uint64_t size;
inode_t parent_id;
bool readonly;
};
struct inode_watch_t
{
std::string name;
inode_config_t cfg;
}; };
struct etcd_state_client_t struct etcd_state_client_t
{ {
protected:
std::vector<inode_watch_t*> watches;
websocket_t *etcd_watch_ws = NULL;
uint64_t bs_block_size = 0;
void add_etcd_url(std::string);
public:
std::vector<std::string> etcd_addresses; std::vector<std::string> etcd_addresses;
std::string etcd_prefix; std::string etcd_prefix;
int log_level = 0; int log_level = 0;
@ -82,17 +40,15 @@ public:
int etcd_watches_initialised = 0; int etcd_watches_initialised = 0;
uint64_t etcd_watch_revision = 0; uint64_t etcd_watch_revision = 0;
std::map<pool_id_t, pool_config_t> pool_config; websocket_t *etcd_watch_ws = NULL;
std::map<pg_num_t, pg_config_t> pg_config;
std::map<osd_num_t, json11::Json> peer_states; std::map<osd_num_t, json11::Json> peer_states;
std::map<inode_t, inode_config_t> inode_config;
std::map<std::string, inode_t> inode_by_name;
std::function<void(json11::Json::object &)> on_change_hook; std::function<void(json11::Json::object &)> on_change_hook;
std::function<void(json11::Json::object &)> on_load_config_hook; std::function<void(json11::Json::object &)> on_load_config_hook;
std::function<json11::Json()> load_pgs_checks_hook; std::function<json11::Json()> load_pgs_checks_hook;
std::function<void(bool)> on_load_pgs_hook; std::function<void(bool)> on_load_pgs_hook;
std::function<void(pool_id_t, pg_num_t)> on_change_pg_history_hook; std::function<void(uint64_t)> on_change_osd_state_hook;
std::function<void(osd_num_t)> on_change_osd_state_hook;
json_kv_t parse_etcd_kv(const json11::Json & kv_json); json_kv_t parse_etcd_kv(const json11::Json & kv_json);
void etcd_call(std::string api, json11::Json payload, int timeout, std::function<void(std::string, json11::Json)> callback); void etcd_call(std::string api, json11::Json payload, int timeout, std::function<void(std::string, json11::Json)> callback);
@ -102,7 +58,4 @@ public:
void load_pgs(); void load_pgs();
void parse_state(const std::string & key, const json11::Json & value); void parse_state(const std::string & key, const json11::Json & value);
void parse_config(json11::Json & config); void parse_config(json11::Json & config);
inode_watch_t* watch_inode(std::string name);
void close_watch(inode_watch_t* watch);
~etcd_state_client_t();
}; };

View File

@ -1,22 +1,19 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 or GNU GPL-2.0+ (see README.md for details)
// FIO engine to test cluster I/O // FIO engine to test cluster I/O
// //
// Random write: // Random write:
// //
// fio -thread -ioengine=./libfio_cluster.so -name=test -bs=4k -direct=1 -fsync=16 -iodepth=16 -rw=randwrite \ // fio -thread -ioengine=./libfio_cluster.so -name=test -bs=4k -direct=1 -fsync=16 -iodepth=16 -rw=randwrite \
// -etcd=127.0.0.1:2379 [-etcd_prefix=/vitastor] (-image=testimg | -pool=1 -inode=1 -size=1000M) // -etcd=127.0.0.1:2379 [-etcd_prefix=/microceph] -size=1000M
// //
// Linear write: // Linear write:
// //
// fio -thread -ioengine=./libfio_cluster.so -name=test -bs=128k -direct=1 -fsync=32 -iodepth=32 -rw=write \ // fio -thread -ioengine=./libfio_cluster.so -name=test -bs=128k -direct=1 -fsync=32 -iodepth=32 -rw=write \
// -etcd=127.0.0.1:2379 [-etcd_prefix=/vitastor] -image=testimg // -etcd=127.0.0.1:2379 [-etcd_prefix=/microceph] -size=1000M
// //
// Random read (run with -iodepth=32 or -iodepth=1): // Random read (run with -iodepth=32 or -iodepth=1):
// //
// fio -thread -ioengine=./libfio_cluster.so -name=test -bs=4k -direct=1 -iodepth=32 -rw=randread \ // fio -thread -ioengine=./libfio_cluster.so -name=test -bs=4k -direct=1 -iodepth=32 -rw=randread \
// -etcd=127.0.0.1:2379 [-etcd_prefix=/vitastor] -image=testimg // -etcd=127.0.0.1:2379 [-etcd_prefix=/microceph] -size=1000M
#include <sys/types.h> #include <sys/types.h>
#include <sys/socket.h> #include <sys/socket.h>
@ -28,14 +25,18 @@
#include "epoll_manager.h" #include "epoll_manager.h"
#include "cluster_client.h" #include "cluster_client.h"
#include "fio_headers.h" extern "C" {
#define CONFIG_HAVE_GETTID
#define CONFIG_PWRITEV2
#include "fio/fio.h"
#include "fio/optgroup.h"
}
struct sec_data struct sec_data
{ {
ring_loop_t *ringloop = NULL; ring_loop_t *ringloop = NULL;
epoll_manager_t *epmgr = NULL; epoll_manager_t *epmgr = NULL;
cluster_client_t *cli = NULL; cluster_client_t *cli = NULL;
inode_watch_t *watch = NULL;
bool last_sync = false; bool last_sync = false;
/* The list of completed io_u structs. */ /* The list of completed io_u structs. */
std::vector<io_u*> completed; std::vector<io_u*> completed;
@ -48,10 +49,7 @@ struct sec_options
int __pad; int __pad;
char *etcd_host = NULL; char *etcd_host = NULL;
char *etcd_prefix = NULL; char *etcd_prefix = NULL;
char *image = NULL; int inode = 0;
uint64_t pool = 0;
uint64_t inode = 0;
int cluster_log = 0;
int trace = 0; int trace = 0;
}; };
@ -66,29 +64,11 @@ static struct fio_option options[] = {
.group = FIO_OPT_G_FILENAME, .group = FIO_OPT_G_FILENAME,
}, },
{ {
.name = "etcd_prefix", .name = "etcd",
.lname = "etcd key prefix", .lname = "etcd key prefix",
.type = FIO_OPT_STR_STORE, .type = FIO_OPT_STR_STORE,
.off1 = offsetof(struct sec_options, etcd_prefix), .off1 = offsetof(struct sec_options, etcd_prefix),
.help = "etcd key prefix, by default /vitastor", .help = "etcd key prefix, by default /microceph",
.category = FIO_OPT_C_ENGINE,
.group = FIO_OPT_G_FILENAME,
},
{
.name = "image",
.lname = "Vitastor image name",
.type = FIO_OPT_STR_STORE,
.off1 = offsetof(struct sec_options, image),
.help = "Vitastor image name to run tests on",
.category = FIO_OPT_C_ENGINE,
.group = FIO_OPT_G_FILENAME,
},
{
.name = "pool",
.lname = "pool number for the inode",
.type = FIO_OPT_INT,
.off1 = offsetof(struct sec_options, pool),
.help = "pool number for the inode to run tests on",
.category = FIO_OPT_C_ENGINE, .category = FIO_OPT_C_ENGINE,
.group = FIO_OPT_G_FILENAME, .group = FIO_OPT_G_FILENAME,
}, },
@ -97,17 +77,7 @@ static struct fio_option options[] = {
.lname = "inode to run tests on", .lname = "inode to run tests on",
.type = FIO_OPT_INT, .type = FIO_OPT_INT,
.off1 = offsetof(struct sec_options, inode), .off1 = offsetof(struct sec_options, inode),
.help = "inode number to run tests on", .help = "inode to run tests on (1 by default)",
.category = FIO_OPT_C_ENGINE,
.group = FIO_OPT_G_FILENAME,
},
{
.name = "cluster_log_level",
.lname = "cluster log level",
.type = FIO_OPT_INT,
.off1 = offsetof(struct sec_options, cluster_log),
.help = "Set log level for the Vitastor client",
.def = "0",
.category = FIO_OPT_C_ENGINE, .category = FIO_OPT_C_ENGINE,
.group = FIO_OPT_G_FILENAME, .group = FIO_OPT_G_FILENAME,
}, },
@ -128,15 +98,8 @@ static struct fio_option options[] = {
static int sec_setup(struct thread_data *td) static int sec_setup(struct thread_data *td)
{ {
sec_options *o = (sec_options*)td->eo;
sec_data *bsd; sec_data *bsd;
if (!o->etcd_host)
{
td_verror(td, EINVAL, "etcd address is missing");
return 1;
}
bsd = new sec_data; bsd = new sec_data;
if (!bsd) if (!bsd)
{ {
@ -152,51 +115,6 @@ static int sec_setup(struct thread_data *td)
td->o.open_files++; td->o.open_files++;
} }
json11::Json cfg = json11::Json::object {
{ "etcd_address", std::string(o->etcd_host) },
{ "etcd_prefix", std::string(o->etcd_prefix ? o->etcd_prefix : "/vitastor") },
{ "log_level", o->cluster_log },
};
if (!o->image)
{
if (!(o->inode & ((1l << (64-POOL_ID_BITS)) - 1)))
{
td_verror(td, EINVAL, "inode number is missing");
return 1;
}
if (o->pool)
{
o->inode = (o->inode & ((1l << (64-POOL_ID_BITS)) - 1)) | (o->pool << (64-POOL_ID_BITS));
}
if (!(o->inode >> (64-POOL_ID_BITS)))
{
td_verror(td, EINVAL, "pool is missing");
return 1;
}
}
else
{
o->inode = 0;
}
bsd->ringloop = new ring_loop_t(512);
bsd->epmgr = new epoll_manager_t(bsd->ringloop);
bsd->cli = new cluster_client_t(bsd->ringloop, bsd->epmgr->tfd, cfg);
if (o->image)
{
while (!bsd->cli->is_ready())
{
bsd->ringloop->loop();
if (bsd->cli->is_ready())
break;
bsd->ringloop->wait();
}
bsd->watch = bsd->cli->st_cli.watch_inode(std::string(o->image));
td->files[0]->real_file_size = bsd->watch->cfg.size;
}
bsd->trace = o->trace ? true : false;
return 0; return 0;
} }
@ -205,20 +123,32 @@ static void sec_cleanup(struct thread_data *td)
sec_data *bsd = (sec_data*)td->io_ops_data; sec_data *bsd = (sec_data*)td->io_ops_data;
if (bsd) if (bsd)
{ {
if (bsd->watch)
{
bsd->cli->st_cli.close_watch(bsd->watch);
}
delete bsd->cli; delete bsd->cli;
delete bsd->epmgr; delete bsd->epmgr;
delete bsd->ringloop; delete bsd->ringloop;
delete bsd; bsd->cli = NULL;
bsd->epmgr = NULL;
bsd->ringloop = NULL;
} }
} }
/* Connect to the server from each thread. */ /* Connect to the server from each thread. */
static int sec_init(struct thread_data *td) static int sec_init(struct thread_data *td)
{ {
sec_options *o = (sec_options*)td->eo;
sec_data *bsd = (sec_data*)td->io_ops_data;
json11::Json cfg = json11::Json::object {
{ "etcd_address", std::string(o->etcd_host) },
{ "etcd_prefix", std::string(o->etcd_prefix ? o->etcd_prefix : "/microceph") },
};
bsd->ringloop = new ring_loop_t(512);
bsd->epmgr = new epoll_manager_t(bsd->ringloop);
bsd->cli = new cluster_client_t(bsd->ringloop, bsd->epmgr->tfd, cfg);
bsd->trace = o->trace ? true : false;
return 0; return 0;
} }
@ -238,26 +168,22 @@ static enum fio_q_status sec_queue(struct thread_data *td, struct io_u *io)
io->engine_data = bsd; io->engine_data = bsd;
cluster_op_t *op = new cluster_op_t; cluster_op_t *op = new cluster_op_t;
op->inode = opt->image ? bsd->watch->cfg.num : opt->inode;
switch (io->ddir) switch (io->ddir)
{ {
case DDIR_READ: case DDIR_READ:
op->opcode = OSD_OP_READ; op->opcode = OSD_OP_READ;
op->inode = opt->inode;
op->offset = io->offset; op->offset = io->offset;
op->len = io->xfer_buflen; op->len = io->xfer_buflen;
op->iov.push_back(io->xfer_buf, io->xfer_buflen); op->buf = io->xfer_buf;
bsd->last_sync = false; bsd->last_sync = false;
break; break;
case DDIR_WRITE: case DDIR_WRITE:
if (opt->image && bsd->watch->cfg.readonly)
{
io->error = EROFS;
return FIO_Q_COMPLETED;
}
op->opcode = OSD_OP_WRITE; op->opcode = OSD_OP_WRITE;
op->inode = opt->inode;
op->offset = io->offset; op->offset = io->offset;
op->len = io->xfer_buflen; op->len = io->xfer_buflen;
op->iov.push_back(io->xfer_buf, io->xfer_buflen); op->buf = io->xfer_buf;
bsd->last_sync = false; bsd->last_sync = false;
break; break;
case DDIR_SYNC: case DDIR_SYNC:
@ -285,16 +211,8 @@ static enum fio_q_status sec_queue(struct thread_data *td, struct io_u *io)
if (opt->trace) if (opt->trace)
{ {
if (io->ddir == DDIR_SYNC) printf("+++ %s # %d\n", io->ddir == DDIR_READ ? "READ" :
{ (io->ddir == DDIR_WRITE ? "WRITE" : "SYNC"), n);
printf("+++ SYNC # %d\n", n);
}
else
{
printf("+++ %s # %d 0x%llx+%llx\n",
io->ddir == DDIR_READ ? "READ" : "WRITE",
n, io->offset, io->xfer_buflen);
}
} }
io->error = 0; io->error = 0;
@ -352,7 +270,7 @@ static int sec_invalidate(struct thread_data *td, struct fio_file *f)
} }
struct ioengine_ops ioengine = { struct ioengine_ops ioengine = {
.name = "vitastor_cluster", .name = "microceph_cluster",
.version = FIO_IOOPS_VERSION, .version = FIO_IOOPS_VERSION,
.flags = FIO_MEMALIGN | FIO_DISKLESSIO | FIO_NOEXTEND, .flags = FIO_MEMALIGN | FIO_DISKLESSIO | FIO_NOEXTEND,
.setup = sec_setup, .setup = sec_setup,

View File

@ -1,6 +1,3 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
// FIO engine to test Blockstore // FIO engine to test Blockstore
// //
// Initialize storage for tests: // Initialize storage for tests:
@ -25,7 +22,12 @@
// -bs_config='{"data_device":"./test_data.bin"}' -size=1000M // -bs_config='{"data_device":"./test_data.bin"}' -size=1000M
#include "blockstore.h" #include "blockstore.h"
#include "fio_headers.h" extern "C" {
#define CONFIG_HAVE_GETTID
#define CONFIG_PWRITEV2
#include "fio/fio.h"
#include "fio/optgroup.h"
}
#include "json11/json11.hpp" #include "json11/json11.hpp"
@ -288,7 +290,7 @@ static int bs_invalidate(struct thread_data *td, struct fio_file *f)
} }
struct ioengine_ops ioengine = { struct ioengine_ops ioengine = {
.name = "vitastor_blockstore", .name = "microceph_blockstore",
.version = FIO_IOOPS_VERSION, .version = FIO_IOOPS_VERSION,
.flags = FIO_MEMALIGN | FIO_DISKLESSIO | FIO_NOEXTEND, .flags = FIO_MEMALIGN | FIO_DISKLESSIO | FIO_NOEXTEND,
.setup = bs_setup, .setup = bs_setup,

View File

@ -1,6 +1,3 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 or GNU GPL-2.0+ (see README.md for details)
// FIO engine to test Blockstore through Secondary OSD interface // FIO engine to test Blockstore through Secondary OSD interface
// //
// Prepare storage like in fio_engine.cpp, then start OSD with ./osd, then test it // Prepare storage like in fio_engine.cpp, then start OSD with ./osd, then test it
@ -30,7 +27,12 @@
#include "rw_blocking.h" #include "rw_blocking.h"
#include "osd_ops.h" #include "osd_ops.h"
#include "fio_headers.h" extern "C" {
#define CONFIG_HAVE_GETTID
#define CONFIG_PWRITEV2
#include "fio/fio.h"
#include "fio/optgroup.h"
}
struct sec_data struct sec_data
{ {
@ -140,7 +142,6 @@ static void sec_cleanup(struct thread_data *td)
if (bsd) if (bsd)
{ {
close(bsd->connect_fd); close(bsd->connect_fd);
delete bsd;
} }
} }
@ -204,7 +205,7 @@ static enum fio_q_status sec_queue(struct thread_data *td, struct io_u *io)
case DDIR_READ: case DDIR_READ:
if (!opt->single_primary) if (!opt->single_primary)
{ {
op.hdr.opcode = OSD_OP_SEC_READ; op.hdr.opcode = OSD_OP_SECONDARY_READ;
op.sec_rw.oid = { op.sec_rw.oid = {
.inode = 1, .inode = 1,
.stripe = io->offset >> bsd->block_order, .stripe = io->offset >> bsd->block_order,
@ -225,7 +226,7 @@ static enum fio_q_status sec_queue(struct thread_data *td, struct io_u *io)
case DDIR_WRITE: case DDIR_WRITE:
if (!opt->single_primary) if (!opt->single_primary)
{ {
op.hdr.opcode = OSD_OP_SEC_WRITE; op.hdr.opcode = OSD_OP_SECONDARY_WRITE;
op.sec_rw.oid = { op.sec_rw.oid = {
.inode = 1, .inode = 1,
.stripe = io->offset >> bsd->block_order, .stripe = io->offset >> bsd->block_order,
@ -313,7 +314,6 @@ static int sec_getevents(struct thread_data *td, unsigned int min, unsigned int
exit(1); exit(1);
} }
io_u* io = it->second; io_u* io = it->second;
bsd->queue.erase(it);
if (io->ddir == DDIR_READ) if (io->ddir == DDIR_READ)
{ {
if (reply.hdr.retval != io->xfer_buflen) if (reply.hdr.retval != io->xfer_buflen)
@ -381,7 +381,7 @@ static int sec_invalidate(struct thread_data *td, struct fio_file *f)
} }
struct ioengine_ops ioengine = { struct ioengine_ops ioengine = {
.name = "vitastor_secondary_osd", .name = "microceph_secondary_osd",
.version = FIO_IOOPS_VERSION, .version = FIO_IOOPS_VERSION,
.flags = FIO_MEMALIGN | FIO_DISKLESSIO | FIO_NOEXTEND, .flags = FIO_MEMALIGN | FIO_DISKLESSIO | FIO_NOEXTEND,
.setup = sec_setup, .setup = sec_setup,

View File

@ -1,6 +1,3 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 or GNU GPL-2.0+ (see README.md for details)
#include <netinet/tcp.h> #include <netinet/tcp.h>
#include <sys/epoll.h> #include <sys/epoll.h>
@ -13,8 +10,6 @@
#include <fcntl.h> #include <fcntl.h>
#include <string.h> #include <string.h>
#include <stdexcept>
#include "json11/json11.hpp" #include "json11/json11.hpp"
#include "http_client.h" #include "http_client.h"
#include "timerfd_manager.h" #include "timerfd_manager.h"
@ -22,6 +17,7 @@
#define READ_BUFFER_SIZE 9000 #define READ_BUFFER_SIZE 9000
static int extract_port(std::string & host); static int extract_port(std::string & host);
static std::string strtolower(const std::string & in);
static std::string trim(const std::string & in); static std::string trim(const std::string & in);
static std::string ws_format_frame(int type, uint64_t size); static std::string ws_format_frame(int type, uint64_t size);
static bool ws_parse_frame(std::string & buf, int & type, std::string & res); static bool ws_parse_frame(std::string & buf, int & type, std::string & res);
@ -160,7 +156,7 @@ http_co_t::~http_co_t()
} }
if (peer_fd >= 0) if (peer_fd >= 0)
{ {
tfd->set_fd_handler(peer_fd, false, NULL); tfd->set_fd_handler(peer_fd, NULL);
close(peer_fd); close(peer_fd);
peer_fd = -1; peer_fd = -1;
} }
@ -227,7 +223,7 @@ void http_co_t::start_connection()
end(); end();
return; return;
} }
tfd->set_fd_handler(peer_fd, true, [this](int peer_fd, int epoll_events) tfd->set_fd_handler(peer_fd, [this](int peer_fd, int epoll_events)
{ {
this->epoll_events |= epoll_events; this->epoll_events |= epoll_events;
handle_events(); handle_events();
@ -280,11 +276,6 @@ void http_co_t::handle_connect_result()
} }
int one = 1; int one = 1;
setsockopt(peer_fd, SOL_TCP, TCP_NODELAY, &one, sizeof(one)); setsockopt(peer_fd, SOL_TCP, TCP_NODELAY, &one, sizeof(one));
tfd->set_fd_handler(peer_fd, false, [this](int peer_fd, int epoll_events)
{
this->epoll_events |= epoll_events;
handle_events();
});
state = HTTP_CO_SENDING_REQUEST; state = HTTP_CO_SENDING_REQUEST;
submit_send(); submit_send();
stackout(); stackout();
@ -306,18 +297,15 @@ void http_co_t::submit_read()
{ {
res = -errno; res = -errno;
} }
if (res == -EAGAIN) if (res == -EAGAIN || res == 0)
{ {
epoll_events = epoll_events & ~EPOLLIN; epoll_events = epoll_events & ~EPOLLIN;
} }
else if (res <= 0) else if (res < 0)
{ {
// < 0 means error, 0 means EOF
if (!res)
epoll_events = epoll_events & ~EPOLLIN;
end(); end();
} }
else else if (res > 0)
{ {
response += std::string(rbuf.data(), res); response += std::string(rbuf.data(), res);
handle_read(); handle_read();
@ -672,7 +660,7 @@ static int extract_port(std::string & host)
return port; return port;
} }
std::string strtolower(const std::string & in) static std::string strtolower(const std::string & in)
{ {
std::string s = in; std::string s = in;
for (int i = 0; i < s.length(); i++) for (int i = 0; i < s.length(); i++)

View File

@ -1,6 +1,3 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 or GNU GPL-2.0+ (see README.md for details)
#pragma once #pragma once
#include <string> #include <string>
#include <vector> #include <vector>
@ -49,8 +46,6 @@ std::vector<std::string> getifaddr_list(bool include_v6 = false);
uint64_t stoull_full(const std::string & str, int base = 10); uint64_t stoull_full(const std::string & str, int base = 10);
std::string strtolower(const std::string & in);
void http_request(timerfd_manager_t *tfd, const std::string & host, const std::string & request, void http_request(timerfd_manager_t *tfd, const std::string & host, const std::string & request,
const http_options_t & options, std::function<void(const http_response_t *response)> callback); const http_options_t & options, std::function<void(const http_response_t *response)> callback);

1
json11

@ -1 +0,0 @@
Subproject commit 97f06cb20c1e136fd37d58fb40f57dd8f8a3a4a7

48
lambda_size.cpp Normal file
View File

@ -0,0 +1,48 @@
#include <iostream>
#include <functional>
#include <array>
#include <cstdlib> // for malloc() and free()
using namespace std;
// replace operator new and delete to log allocations
void* operator new(std::size_t n)
{
cout << "Allocating " << n << " bytes" << endl;
return malloc(n);
}
void operator delete(void* p) throw()
{
free(p);
}
class test
{
public:
std::string s;
void a(std::function<void()> & f, const char *str)
{
auto l = [this, str]() { cout << str << " ? " << s << " from this\n"; };
cout << "Assigning lambda3 of size " << sizeof(l) << endl;
f = l;
}
};
int main()
{
std::array<char, 16> arr1;
auto lambda1 = [arr1](){};
cout << "Assigning lambda1 of size " << sizeof(lambda1) << endl;
std::function<void()> f1 = lambda1;
std::array<char, 17> arr2;
auto lambda2 = [arr2](){};
cout << "Assigning lambda2 of size " << sizeof(lambda2) << endl;
std::function<void()> f2 = lambda2;
test t;
std::function<void()> f3;
t.s = "str";
t.a(f3, "huyambda");
f3();
}

View File

@ -1,6 +1,3 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
// Data distribution optimizer using linear programming (lp_solve) // Data distribution optimizer using linear programming (lp_solve)
const child_process = require('child_process'); const child_process = require('child_process');
@ -28,7 +25,7 @@ async function lp_solve(text)
let vars = {}; let vars = {};
for (const line of stdout.split(/\n/)) for (const line of stdout.split(/\n/))
{ {
let m = /^(^Value of objective function: (-?[\d\.]+)|Actual values of the variables:)\s*$/.exec(line); let m = /^(^Value of objective function: ([\d\.]+)|Actual values of the variables:)\s*$/.exec(line);
if (m) if (m)
{ {
if (m[2]) if (m[2])
@ -50,34 +47,34 @@ async function lp_solve(text)
return { score, vars }; return { score, vars };
} }
async function optimize_initial({ osd_tree, pg_count, pg_size = 3, pg_minsize = 2, max_combinations = 10000, parity_space = 1 }) async function optimize_initial(osd_tree, pg_count, max_combinations)
{ {
if (!pg_count || !osd_tree) max_combinations = max_combinations || 10000;
{
return null;
}
const all_weights = Object.assign({}, ...Object.values(osd_tree)); const all_weights = Object.assign({}, ...Object.values(osd_tree));
const total_weight = Object.values(all_weights).reduce((a, c) => Number(a) + Number(c), 0); const total_weight = Object.values(all_weights).reduce((a, c) => Number(a) + Number(c), 0);
const all_pgs = Object.values(random_combinations(osd_tree, pg_size, max_combinations, parity_space > 1)); let all_pgs = all_combinations(osd_tree, null, true);
if (all_pgs.length > max_combinations)
{
const prob = max_combinations/all_pgs.length;
all_pgs = all_pgs.filter(pg => Math.random() < prob);
}
const pg_per_osd = {}; const pg_per_osd = {};
for (const pg of all_pgs) for (const pg of all_pgs)
{ {
for (let i = 0; i < pg.length; i++) for (const osd of pg)
{ {
const osd = pg[i];
pg_per_osd[osd] = pg_per_osd[osd] || []; pg_per_osd[osd] = pg_per_osd[osd] || [];
pg_per_osd[osd].push((i >= pg_minsize ? parity_space+'*' : '')+"pg_"+pg.join("_")); pg_per_osd[osd].push("pg_"+pg.join("_"));
} }
} }
const pg_effsize = Math.min(pg_minsize, Object.keys(osd_tree).length) const pg_size = Math.min(Object.keys(osd_tree).length, 3);
+ Math.max(0, Math.min(pg_size, Object.keys(osd_tree).length) - pg_minsize) * parity_space;
let lp = ''; let lp = '';
lp += "max: "+all_pgs.map(pg => 'pg_'+pg.join('_')).join(' + ')+";\n"; lp += "max: "+all_pgs.map(pg => 'pg_'+pg.join('_')).join(' + ')+";\n";
for (const osd in pg_per_osd) for (const osd in pg_per_osd)
{ {
if (osd !== NO_OSD) if (osd !== NO_OSD)
{ {
let osd_pg_count = all_weights[osd]/total_weight*pg_effsize*pg_count; let osd_pg_count = all_weights[osd]/total_weight*pg_size*pg_count;
lp += pg_per_osd[osd].join(' + ')+' <= '+osd_pg_count+';\n'; lp += pg_per_osd[osd].join(' + ')+' <= '+osd_pg_count+';\n';
} }
} }
@ -89,19 +86,11 @@ async function optimize_initial({ osd_tree, pg_count, pg_size = 3, pg_minsize =
const lp_result = await lp_solve(lp); const lp_result = await lp_solve(lp);
if (!lp_result) if (!lp_result)
{ {
console.log(lp);
throw new Error('Problem is infeasible or unbounded - is it a bug?'); throw new Error('Problem is infeasible or unbounded - is it a bug?');
} }
const int_pgs = make_int_pgs(lp_result.vars, pg_count); const int_pgs = make_int_pgs(lp_result.vars, pg_count);
const eff = pg_list_space_efficiency(int_pgs, all_weights, pg_minsize, parity_space); const eff = pg_list_space_efficiency(int_pgs, all_weights);
const res = { return { score: lp_result.score, weights: lp_result.vars, int_pgs, space: eff*pg_size, total_space: total_weight };
score: lp_result.score,
weights: lp_result.vars,
int_pgs,
space: eff * pg_effsize,
total_space: total_weight,
};
return res;
} }
function make_int_pgs(weights, pg_count) function make_int_pgs(weights, pg_count)
@ -123,117 +112,11 @@ function make_int_pgs(weights, pg_count)
return int_pgs; return int_pgs;
} }
function calc_intersect_weights(pg_size, pg_count, prev_weights, all_pgs)
{
const move_weights = {};
if ((1 << pg_size) < pg_count)
{
const intersect = {};
for (const pg_name in prev_weights)
{
const pg = pg_name.substr(3).split(/_/);
for (let omit = 1; omit < (1 << pg_size); omit++)
{
let pg_omit = [ ...pg ];
let intersect_count = pg_size;
for (let i = 0; i < pg_size; i++)
{
if (omit & (1 << i))
{
pg_omit[i] = '';
intersect_count--;
}
}
pg_omit = pg_omit.join(':');
intersect[pg_omit] = Math.max(intersect[pg_omit] || 0, intersect_count);
}
}
for (const pg of all_pgs)
{
let max_int = 0;
for (let omit = 1; omit < (1 << pg_size); omit++)
{
let pg_omit = [ ...pg ];
for (let i = 0; i < pg_size; i++)
{
if (omit & (1 << i))
{
pg_omit[i] = '';
}
}
pg_omit = pg_omit.join(':');
max_int = Math.max(max_int, intersect[pg_omit] || 0);
}
move_weights['pg_'+pg.join('_')] = pg_size-max_int;
}
}
else
{
const prev_pg_hashed = Object.keys(prev_weights).map(pg_name => pg_name.substr(3).split(/_/).reduce((a, c) => { a[c] = 1; return a; }, {}));
for (const pg of all_pgs)
{
if (!prev_weights['pg_'+pg.join('_')])
{
let max_int = 0;
for (const prev_hash in prev_pg_hashed)
{
const intersect_count = pg.reduce((a, osd) => a + (prev_hash[osd] ? 1 : 0), 0);
if (max_int < intersect_count)
{
max_int = intersect_count;
if (max_int >= pg_size)
{
break;
}
}
}
move_weights['pg_'+pg.join('_')] = pg_size-max_int;
}
}
}
return move_weights;
}
function add_valid_previous(osd_tree, prev_weights, all_pgs)
{
// Add previous combinations that are still valid
const hosts = Object.keys(osd_tree).sort();
const host_per_osd = {};
for (const host in osd_tree)
{
for (const osd in osd_tree[host])
{
host_per_osd[osd] = host;
}
}
skip_pg: for (const pg_name in prev_weights)
{
const seen_hosts = {};
const pg = pg_name.substr(3).split(/_/);
for (const osd of pg)
{
if (!host_per_osd[osd] || seen_hosts[host_per_osd[osd]])
{
continue skip_pg;
}
seen_hosts[host_per_osd[osd]] = true;
}
if (!all_pgs[pg_name])
{
all_pgs[pg_name] = pg;
}
}
}
// Try to minimize data movement // Try to minimize data movement
async function optimize_change({ prev_pgs: prev_int_pgs, osd_tree, pg_size = 3, pg_minsize = 2, max_combinations = 10000, parity_space = 1 }) async function optimize_change(prev_int_pgs, osd_tree, max_combinations)
{ {
if (!osd_tree) max_combinations = max_combinations || 10000;
{ const pg_size = Math.min(Object.keys(osd_tree).length, 3);
return null;
}
const pg_effsize = Math.min(pg_minsize, Object.keys(osd_tree).length)
+ Math.max(0, Math.min(pg_size, Object.keys(osd_tree).length) - pg_minsize) * parity_space;
const pg_count = prev_int_pgs.length; const pg_count = prev_int_pgs.length;
const prev_weights = {}; const prev_weights = {};
const prev_pg_per_osd = {}; const prev_pg_per_osd = {};
@ -241,55 +124,70 @@ async function optimize_change({ prev_pgs: prev_int_pgs, osd_tree, pg_size = 3,
{ {
const pg_name = 'pg_'+pg.join('_'); const pg_name = 'pg_'+pg.join('_');
prev_weights[pg_name] = (prev_weights[pg_name]||0) + 1; prev_weights[pg_name] = (prev_weights[pg_name]||0) + 1;
for (let i = 0; i < pg.length; i++) for (const osd of pg)
{ {
const osd = pg[i];
prev_pg_per_osd[osd] = prev_pg_per_osd[osd] || []; prev_pg_per_osd[osd] = prev_pg_per_osd[osd] || [];
prev_pg_per_osd[osd].push([ pg_name, (i >= pg_minsize ? parity_space : 1) ]); prev_pg_per_osd[osd].push(pg_name);
} }
} }
// Get all combinations // Get all combinations
let all_pgs = random_combinations(osd_tree, pg_size, max_combinations, parity_space > 1); let all_pgs = all_combinations(osd_tree, null, true);
add_valid_previous(osd_tree, prev_weights, all_pgs); if (all_pgs.length > max_combinations)
all_pgs = Object.values(all_pgs); {
const intersecting = all_pgs.filter(pg => prev_weights['pg_'+pg.join('_')]);
if (intersecting.length > max_combinations)
{
const prob = max_combinations/intersecting.length;
all_pgs = intersecting.filter(pg => Math.random() < prob);
}
else
{
const prob = (max_combinations-intersecting.length)/all_pgs.length;
all_pgs = all_pgs.filter(pg => Math.random() < prob || prev_weights['pg_'+pg.join('_')]);
}
}
const pg_per_osd = {}; const pg_per_osd = {};
for (const pg of all_pgs) for (const pg of all_pgs)
{ {
const pg_name = 'pg_'+pg.join('_'); const pg_name = 'pg_'+pg.join('_');
for (let i = 0; i < pg.length; i++) for (const osd of pg)
{ {
const osd = pg[i];
pg_per_osd[osd] = pg_per_osd[osd] || []; pg_per_osd[osd] = pg_per_osd[osd] || [];
pg_per_osd[osd].push([ pg_name, (i >= pg_minsize ? parity_space : 1) ]); pg_per_osd[osd].push(pg_name);
} }
} }
// Penalize PGs based on their similarity to old PGs // Penalize PGs based on their similarity to old PGs
const move_weights = calc_intersect_weights(pg_size, pg_count, prev_weights, all_pgs); const intersect = {};
for (const pg_name in prev_weights)
{
const pg = pg_name.substr(3).split(/_/);
intersect[pg[0]+'::'] = intersect[':'+pg[1]+':'] = intersect['::'+pg[2]] = 2;
intersect[pg[0]+'::'+pg[2]] = intersect[':'+pg[1]+':'+pg[2]] = intersect[pg[0]+':'+pg[1]+':'] = 1;
}
const move_weights = {};
for (const pg of all_pgs)
{
move_weights['pg_'+pg.join('_')] =
intersect[pg[0]+'::'+pg[2]] || intersect[':'+pg[1]+':'+pg[2]] || intersect[pg[0]+':'+pg[1]+':'] ||
intersect[pg[0]+'::'] || intersect[':'+pg[1]+':'] || intersect['::'+pg[2]] ||
3;
}
// Calculate total weight - old PG weights // Calculate total weight - old PG weights
const all_pg_names = all_pgs.map(pg => 'pg_'+pg.join('_')); const all_pg_names = all_pgs.map(pg => 'pg_'+pg.join('_'));
const all_pgs_hash = all_pg_names.reduce((a, c) => { a[c] = true; return a; }, {});
const all_weights = Object.assign({}, ...Object.values(osd_tree)); const all_weights = Object.assign({}, ...Object.values(osd_tree));
const total_weight = Object.values(all_weights).reduce((a, c) => Number(a) + Number(c), 0); const total_weight = Object.values(all_weights).reduce((a, c) => Number(a) + Number(c), 0);
// Generate the LP problem // Generate the LP problem
let lp = ''; let lp = '';
lp += 'max: '+all_pg_names.map(pg_name => ( lp += 'max: '+all_pg_names.map(pg_name => (
prev_weights[pg_name] ? `${pg_size+1}*add_${pg_name} - ${pg_size+1}*del_${pg_name}` : `${pg_size+1-move_weights[pg_name]}*${pg_name}` prev_weights[pg_name] ? `${4-move_weights[pg_name]}*add_${pg_name} - 4*del_${pg_name}` : `${4-move_weights[pg_name]}*${pg_name}`
)).join(' + ')+';\n'; )).join(' + ')+';\n';
lp += all_pg_names
.map(pg_name => (prev_weights[pg_name] ? `add_${pg_name} - del_${pg_name}` : `${pg_name}`))
.join(' + ')+' = '+(pg_count
- Object.keys(prev_weights).reduce((a, old_pg_name) => (a + (all_pgs_hash[old_pg_name] ? prev_weights[old_pg_name] : 0)), 0)
)+';\n';
for (const osd in pg_per_osd) for (const osd in pg_per_osd)
{ {
if (osd !== NO_OSD) if (osd !== NO_OSD)
{ {
const osd_sum = (pg_per_osd[osd]||[]).map(([ pg_name, space ]) => ( const osd_sum = (pg_per_osd[osd]||[]).map(pg_name => prev_weights[pg_name] ? `add_${pg_name} - del_${pg_name}` : pg_name).join(' + ');
prev_weights[pg_name] ? `${space} * add_${pg_name} - ${space} * del_${pg_name}` : `${space} * ${pg_name}` const rm_osd_pg_count = (prev_pg_per_osd[osd]||[]).filter(old_pg_name => move_weights[old_pg_name]).length;
)).join(' + '); let osd_pg_count = all_weights[osd]*3/total_weight*pg_count - rm_osd_pg_count;
const rm_osd_pg_count = (prev_pg_per_osd[osd]||[])
.reduce((a, [ old_pg_name, space ]) => (a + (all_pgs_hash[old_pg_name] ? space : 0)), 0);
const osd_pg_count = all_weights[osd]*pg_effsize/total_weight*pg_count - rm_osd_pg_count;
lp += osd_sum + ' <= ' + osd_pg_count + ';\n'; lp += osd_sum + ' <= ' + osd_pg_count + ';\n';
} }
} }
@ -323,7 +221,7 @@ async function optimize_change({ prev_pgs: prev_int_pgs, osd_tree, pg_size = 3,
const weights = { ...prev_weights }; const weights = { ...prev_weights };
for (const k in prev_weights) for (const k in prev_weights)
{ {
if (!all_pgs_hash[k]) if (!move_weights[k])
{ {
delete weights[k]; delete weights[k];
} }
@ -338,7 +236,7 @@ async function optimize_change({ prev_pgs: prev_int_pgs, osd_tree, pg_size = 3,
{ {
weights[k.substr(4)] = (weights[k.substr(4)] || 0) - Number(lp_result.vars[k]); weights[k.substr(4)] = (weights[k.substr(4)] || 0) - Number(lp_result.vars[k]);
} }
else if (k.substr(0, 3) === 'pg_') else
{ {
weights[k] = Number(lp_result.vars[k]); weights[k] = Number(lp_result.vars[k]);
} }
@ -360,7 +258,7 @@ async function optimize_change({ prev_pgs: prev_int_pgs, osd_tree, pg_size = 3,
{ {
differs++; differs++;
} }
for (let j = 0; j < pg_size; j++) for (let j = 0; j < 3; j++)
{ {
if (new_pgs[i][j] != prev_int_pgs[i][j]) if (new_pgs[i][j] != prev_int_pgs[i][j])
{ {
@ -375,7 +273,7 @@ async function optimize_change({ prev_pgs: prev_int_pgs, osd_tree, pg_size = 3,
int_pgs: new_pgs, int_pgs: new_pgs,
differs, differs,
osd_differs, osd_differs,
space: pg_effsize * pg_list_space_efficiency(new_pgs, all_weights, pg_minsize, parity_space), space: pg_size * pg_list_space_efficiency(new_pgs, all_weights),
total_space: total_weight, total_space: total_weight,
}; };
} }
@ -493,112 +391,27 @@ function extract_osds(osd_tree, levels, osd_level, osds = {})
return osds; return osds;
} }
// ordered = don't treat (x,y) and (y,x) as equal // FIXME: support different pg_sizes, not just 3
function random_combinations(osd_tree, pg_size, count, ordered)
{
let seed = 0x5f020e43;
let rng = () =>
{
seed ^= seed << 13;
seed ^= seed >> 17;
seed ^= seed << 5;
return seed + 2147483648;
};
const hosts = Object.keys(osd_tree).sort();
const osds = Object.keys(osd_tree).reduce((a, c) => { a[c] = Object.keys(osd_tree[c]).sort(); return a; }, {});
const r = {};
// Generate random combinations including each OSD at least once
for (let h = 0; h < hosts.length; h++)
{
for (let o = 0; o < osds[hosts[h]].length; o++)
{
const pg = [ osds[hosts[h]][o] ];
const cur_hosts = [ ...hosts ];
cur_hosts.splice(h, 1);
for (let i = 1; i < pg_size && i < hosts.length; i++)
{
const next_host = rng() % cur_hosts.length;
const next_osd = rng() % osds[cur_hosts[next_host]].length;
pg.push(osds[cur_hosts[next_host]][next_osd]);
cur_hosts.splice(next_host, 1);
}
const cyclic_pgs = [ pg ];
if (ordered)
{
for (let i = 1; i < pg.size; i++)
{
cyclic_pgs.push([ ...pg.slice(i), ...pg.slice(0, i) ]);
}
}
for (const pg of cyclic_pgs)
{
while (pg.length < pg_size)
{
pg.push(NO_OSD);
}
r['pg_'+pg.join('_')] = pg;
}
}
}
// Generate purely random combinations
while (count > 0)
{
let host_idx = [];
const cur_hosts = [ ...hosts.map((h, i) => i) ];
const max_hosts = pg_size < hosts.length ? pg_size : hosts.length;
if (ordered)
{
for (let i = 0; i < max_hosts; i++)
{
const r = rng() % cur_hosts.length;
host_idx[i] = cur_hosts[r];
cur_hosts.splice(r, 1);
}
}
else
{
for (let i = 0; i < max_hosts; i++)
{
const r = rng() % (cur_hosts.length - (max_hosts - i - 1));
host_idx[i] = cur_hosts[r];
cur_hosts.splice(0, r+1);
}
}
let pg = host_idx.map(h => osds[hosts[h]][rng() % osds[hosts[h]].length]);
while (pg.length < pg_size)
{
pg.push(NO_OSD);
}
r['pg_'+pg.join('_')] = pg;
count--;
}
return r;
}
// Super-stupid algorithm. Given the current OSD tree, generate all possible OSD combinations
// osd_tree = { failure_domain1: { osd1: size1, ... }, ... } // osd_tree = { failure_domain1: { osd1: size1, ... }, ... }
// ordered = return combinations without duplicates having different order function all_combinations(osd_tree, count, ordered)
function all_combinations(osd_tree, pg_size, ordered, count)
{ {
const hosts = Object.keys(osd_tree).sort(); const hosts = Object.keys(osd_tree).sort();
const osds = Object.keys(osd_tree).reduce((a, c) => { a[c] = Object.keys(osd_tree[c]).sort(); return a; }, {}); const osds = Object.keys(osd_tree).reduce((a, c) => { a[c] = Object.keys(osd_tree[c]).sort(); return a; }, {});
while (hosts.length < pg_size) while (hosts.length < 3)
{ {
osds[NO_OSD] = [ NO_OSD ]; osds[NO_OSD] = [ NO_OSD ];
hosts.push(NO_OSD); hosts.push(NO_OSD);
} }
let host_idx = []; let host_idx = [ 0, 1, 2 ];
let osd_idx = []; let osd_idx = [ 0, 0, 0 ];
for (let i = 0; i < pg_size; i++)
{
host_idx.push(i);
osd_idx.push(0);
}
const r = []; const r = [];
while (!count || count < 0 || r.length < count) while (!count || count < 0 || r.length < count)
{
let inc;
if (host_idx[2] != host_idx[1] && host_idx[2] != host_idx[0] && host_idx[1] != host_idx[0])
{ {
r.push(host_idx.map((hi, i) => osds[hosts[hi]][osd_idx[i]])); r.push(host_idx.map((hi, i) => osds[hosts[hi]][osd_idx[i]]));
let inc = pg_size-1; inc = 2;
while (inc >= 0) while (inc >= 0)
{ {
osd_idx[inc]++; osd_idx[inc]++;
@ -612,39 +425,33 @@ function all_combinations(osd_tree, pg_size, ordered, count)
break; break;
} }
} }
if (inc < 0)
{
// no osds left in the current host combination, select the next one
inc = pg_size-1;
same_again: while (inc >= 0)
{
host_idx[inc]++;
for (let prev_host = 0; prev_host < inc; prev_host++)
{
if (host_idx[prev_host] == host_idx[inc])
{
continue same_again;
}
}
if (host_idx[inc] < (ordered ? hosts.length-(pg_size-1-inc) : hosts.length))
{
while ((++inc) < pg_size)
{
host_idx[inc] = (ordered ? host_idx[inc-1]+1 : 0);
}
break;
} }
else else
{ {
inc--; inc = -1;
}
} }
if (inc < 0) if (inc < 0)
{
// no osds left in current host combination, select the next one
osd_idx = [ 0, 0, 0 ];
host_idx[2]++;
if (host_idx[2] >= hosts.length)
{
host_idx[1]++;
host_idx[2] = ordered ? host_idx[1]+1 : 0;
if ((ordered ? host_idx[2] : host_idx[1]) >= hosts.length)
{
host_idx[0]++;
host_idx[1] = ordered ? host_idx[0]+1 : 0;
host_idx[2] = ordered ? host_idx[1]+1 : 0;
if ((ordered ? host_idx[2] : host_idx[0]) >= hosts.length)
{ {
break; break;
} }
} }
} }
}
}
return r; return r;
} }
@ -661,15 +468,14 @@ function pg_weights_space_efficiency(weights, pg_count, osd_sizes)
return pg_per_osd_space_efficiency(per_osd, pg_count, osd_sizes); return pg_per_osd_space_efficiency(per_osd, pg_count, osd_sizes);
} }
function pg_list_space_efficiency(pgs, osd_sizes, pg_minsize, parity_space) function pg_list_space_efficiency(pgs, osd_sizes)
{ {
const per_osd = {}; const per_osd = {};
for (const pg of pgs) for (const pg of pgs)
{ {
for (let i = 0; i < pg.length; i++) for (const osd of pg)
{ {
const osd = pg[i]; per_osd[osd] = (per_osd[osd]||0) + 1;
per_osd[osd] = (per_osd[osd]||0) + (i >= pg_minsize ? (parity_space||1) : 1);
} }
} }
return pg_per_osd_space_efficiency(per_osd, pgs.length, osd_sizes); return pg_per_osd_space_efficiency(per_osd, pgs.length, osd_sizes);
@ -711,6 +517,5 @@ module.exports = {
lp_solve, lp_solve,
make_int_pgs, make_int_pgs,
align_pgs, align_pgs,
random_combinations,
all_combinations, all_combinations,
}; };

7
mon/mon-main.js → lp/mon-main.js Executable file → Normal file
View File

@ -1,8 +1,5 @@
#!/usr/bin/node #!/usr/bin/node
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
const Mon = require('./mon.js'); const Mon = require('./mon.js');
const options = {}; const options = {};
@ -18,8 +15,8 @@ for (let i = 2; i < process.argv.length; i++)
if (!options.etcd_url) if (!options.etcd_url)
{ {
console.error('USAGE: '+process.argv[0]+' '+process.argv[1]+' --etcd_url "http://127.0.0.1:2379,..." --etcd_prefix "/vitastor" --etcd_start_timeout 5 [--verbose 1]'); console.error('USAGE: '+process.argv[0]+' '+process.argv[1]+' --etcd_url "http://127.0.0.1:2379,..." --etcd_prefix "/rage" --etcd_start_timeout 5');
process.exit(); process.exit();
} }
new Mon(options).start().catch(e => { console.error(e); process.exit(); }); new Mon(options).start();

858
lp/mon.js Normal file
View File

@ -0,0 +1,858 @@
const http = require('http');
const os = require('os');
const WebSocket = require('ws');
const LPOptimizer = require('./lp-optimizer.js');
const stableStringify = require('./stable-stringify.js');
class Mon
{
static etcd_tree = {
config: {
global: null,
/* placement_tree = {
levels: { datacenter: 1, rack: 2, host: 3, osd: 4, ... },
nodes: { host1: { level: 'host', parent: 'rack1' }, ... },
failure_domain: 'host',
} */
placement_tree: null,
osd: {},
pgs: {},
},
osd: {
state: {},
stats: {},
},
mon: {
master: null,
},
pg: {
change_stamp: null,
state: {},
stats: {},
history: {},
},
}
constructor(config)
{
// FIXME: Maybe prefer local etcd
this.etcd_urls = [];
for (let url of config.etcd_url.split(/,/))
{
let scheme = 'http';
url = url.trim().replace(/^(https?):\/\//, (m, m1) => { scheme = m1; return ''; });
if (!/\/[^\/]/.exec(url))
url += '/v3';
this.etcd_urls.push(scheme+'://'+url);
}
this.etcd_prefix = config.etcd_prefix || '/rage';
this.etcd_prefix = this.etcd_prefix.replace(/\/\/+/g, '/').replace(/^\/?(.*[^\/])\/?$/, '/$1');
this.etcd_start_timeout = (config.etcd_start_timeout || 5) * 1000;
this.state = JSON.parse(JSON.stringify(Mon.etcd_tree));
}
async start()
{
await this.load_config();
await this.get_lease();
await this.become_master();
await this.load_cluster_state();
await this.start_watcher();
await this.recheck_pgs();
}
async load_config()
{
const res = await this.etcd_call('/txn', { success: [
{ requestRange: { key: b64(this.etcd_prefix+'/config/global') } }
] }, this.etcd_start_timeout, -1);
this.parse_kv(res.responses[0].response_range.kvs[0]);
this.check_config();
}
check_config()
{
this.config.etcd_mon_timeout = Number(this.config.etcd_mon_timeout) || 0;
if (this.config.etcd_mon_timeout <= 0)
{
this.config.etcd_mon_timeout = 1000;
}
this.config.etcd_mon_retries = Number(this.config.etcd_mon_retries) || 5;
if (this.config.etcd_mon_retries < 0)
{
this.config.etcd_mon_retries = 0;
}
this.config.mon_change_timeout = Number(this.config.mon_change_timeout) || 1000;
if (this.config.mon_change_timeout < 100)
{
this.config.mon_change_timeout = 100;
}
this.config.mon_stats_timeout = Number(this.config.mon_stats_timeout) || 1000;
if (this.config.mon_stats_timeout < 100)
{
this.config.mon_stats_timeout = 100;
}
// After this number of seconds, a dead OSD will be removed from PG distribution
this.config.osd_out_time = Number(this.config.osd_out_time) || 0;
if (!this.config.osd_out_time)
{
this.config.osd_out_time = 30*60; // 30 minutes by default
}
this.config.max_osd_combinations = Number(this.config.max_osd_combinations) || 10000;
if (this.config.max_osd_combinations < 100)
{
this.config.max_osd_combinations = 100;
}
}
async start_watcher(retries)
{
let retry = 0;
if (retries >= 0 && retries < 1)
{
retries = 1;
}
while (retries < 0 || retry < retries)
{
const base = 'ws'+this.etcd_urls[Math.floor(Math.random()*this.etcd_urls.length)].substr(4);
const ok = await new Promise((ok, no) =>
{
const timer_id = setTimeout(() =>
{
this.ws.close();
ok(false);
}, timeout);
this.ws = new WebSocket(base+'/watch');
this.ws.on('open', () =>
{
if (timer_id)
clearTimeout(timer_id);
ok(true);
});
});
if (!ok)
{
this.ws = null;
}
retry++;
}
if (!this.ws)
{
this.die('Failed to open etcd watch websocket');
}
this.ws.send(JSON.stringify({
create_request: {
key: b64(this.etcd_prefix+'/'),
range_end: b64(this.etcd_prefix+'0'),
start_revision: ''+this.etcd_watch_revision,
watch_id: 1,
},
}));
this.ws.on('message', (msg) =>
{
let data;
try
{
data = JSON.parse(msg);
}
catch (e)
{
}
if (!data || !data.result || !data.result.events)
{
console.error('Garbage received from watch websocket: '+msg);
}
else
{
let stats_changed = false, changed = false;
console.log('Revision '+data.result.header.revision+' events: ');
for (const e of data.result.events)
{
this.parse_kv(e.kv);
const key = e.kv.key.substr(this.etcd_prefix.length);
if (key.substr(0, 11) == '/osd/stats/' || key.substr(0, 10) == '/pg/stats/')
{
stats_changed = true;
}
else if (key != '/stats')
{
changed = true;
}
console.log(e);
}
if (stats_changed)
{
this.schedule_update_stats();
}
if (changed)
{
this.schedule_recheck();
}
}
});
}
async get_lease()
{
const max_ttl = this.config.etcd_mon_ttl + this.config.etcd_mon_timeout/1000*this.config.etcd_mon_retries;
const res = await this.etcd_call('/lease/grant', { TTL: max_ttl }, this.config.etcd_mon_timeout, this.config.etcd_mon_retries);
this.etcd_lease_id = res.ID;
setInterval(async () =>
{
const res = await this.etcd_call('/lease/keepalive', { ID: this.etcd_lease_id }, this.config.etcd_mon_timeout, this.config.etcd_mon_retries);
if (!res.result.TTL)
{
this.die('Lease expired');
}
}, config.etcd_mon_timeout);
}
async become_master()
{
const state = { ip: this.local_ips() };
while (1)
{
const res = await this.etcd_call('/txn', {
compare: [ { target: 'CREATE', create_revision: 0, key: b64(this.etcd_prefix+'/mon/master') } ],
success: [ { key: b64(this.etcd_prefix+'/mon/master'), value: b64(JSON.stringify(state)), lease: ''+this.etcd_lease_id } ],
}, this.etcd_start_timeout, 0);
if (!res.succeeded)
{
await new Promise(ok => setTimeout(ok, this.etcd_start_timeout));
}
}
}
async load_cluster_state()
{
const res = await this.etcd_call('/txn', { success: [
{ requestRange: { key: b64(this.etcd_prefix+'/'), range_end: b64(this.etcd_prefix+'0') } },
] }, this.etcd_start_timeout, -1);
this.etcd_watch_revision = BigInt(res.header.revision)+BigInt(1);
const data = JSON.parse(JSON.stringify(Mon.etcd_tree));
for (const response of res.responses)
{
for (const kv of response.response_range.kvs)
{
this.parse_kv(kv);
}
}
this.state = data;
}
all_osds()
{
return Object.keys(this.state.osd.stats);
}
get_osd_tree()
{
this.state.config.placement_tree = this.state.config.placement_tree||{};
const levels = this.state.config.placement_tree.levels||{};
levels.host = levels.host || 100;
levels.osd = levels.osd || 101;
const tree = { '': { children: [] } };
for (const node_id in this.state.config.placement_tree.nodes||{})
{
const node_cfg = this.state.config.placement_tree.nodes[node_id];
if (!node_id || /^\d/.exec(node_id) ||
!node_cfg.level || !levels[node_cfg.level])
{
// All nodes must have non-empty non-numeric IDs and valid levels
continue;
}
tree[node_id] = { id: node_id, level: node_cfg.level, parent: node_cfg.parent, children: [] };
}
// This requires monitor system time to be in sync with OSD system times (at least to some extent)
const down_time = Date.now()/1000 - this.config.osd_out_time;
for (const osd_num of this.all_osds().sort((a, b) => a - b))
{
const stat = this.state.osd.stats[osd_num];
if (stat.size && (this.state.osd.state[osd_num] || Number(stat.time) >= down_time))
{
// Numeric IDs are reserved for OSDs
const reweight = this.state.config.osd[osd_num] && Number(this.state.config.osd[osd_num].reweight) || 1;
tree[osd_num] = tree[osd_num] || { id: osd_num, parent: stat.host };
tree[osd_num].level = 'osd';
tree[osd_num].size = reweight * stat.size / 1024 / 1024 / 1024 / 1024; // terabytes
delete tree[osd_num].children;
}
}
for (const node_id in tree)
{
if (node_id === '')
{
continue;
}
const node_cfg = tree[node_id];
const node_level = levels[node_cfg.level] || node_cfg.level;
let parent_level = node_cfg.parent && tree[node_cfg.parent] && tree[node_cfg.parent].children
&& tree[node_cfg.parent].level;
parent_level = parent_level ? (levels[parent_level] || parent_level) : null;
// Parent's level must be less than child's; OSDs must be leaves
const parent = parent_level && parent_level < node_level ? tree[node_cfg.parent] : '';
tree[parent].children.push(tree[node_id]);
delete node_cfg.parent;
}
return LPOptimizer.flatten_tree(tree[''].children, levels, this.state.config.failure_domain, 'osd');
}
async stop_all_pgs()
{
let has_online = false, paused = true;
for (const pg in this.state.config.pgs.items||{})
{
const cur_state = ((this.state.pg.state[pg]||{}).state||[]).join(',');
if (cur_state != '' && cur_state != 'offline')
{
has_online = true;
}
if (!this.state.config.pgs.items[pg].pause)
{
paused = false;
}
}
if (!paused)
{
console.log('Stopping all PGs before changing PG count');
const new_cfg = JSON.parse(JSON.stringify(this.state.config.pgs));
for (const pg in new_cfg.items)
{
new_cfg.items[pg].pause = true;
}
// Check that no OSDs change their state before we pause PGs
// Doing this we make sure that OSDs don't wake up in the middle of our "transaction"
// and can't see the old PG configuration
const checks = [];
for (const osd_num of this.all_osds())
{
const key = b64(this.etcd_prefix+'/osd/state/'+osd_num);
checks.push({ key, target: 'MOD', result: 'LESS', mod_revision: ''+this.etcd_watch_revision });
}
const res = await this.etcd_call('/txn', {
compare: [
{ key: b64(this.etcd_prefix+'/mon/master'), target: 'LEASE', lease: ''+this.etcd_lease_id },
{ key: b64(this.etcd_prefix+'/config/pgs'), target: 'MOD', mod_revision: ''+this.etcd_watch_revision, result: 'LESS' },
...checks,
],
success: [
{ requestPut: { key: b64(this.etcd_prefix+'/config/pgs'), value: b64(JSON.stringify(new_cfg)) } },
],
}, this.config.etcd_mon_timeout, 0);
if (!res.succeeded)
{
return false;
}
this.state.config.pgs = new_cfg;
}
return !has_online;
}
scale_pg_count(prev_pgs, pg_history, new_pg_count)
{
const old_pg_count = prev_pgs.length;
// Add all possibly intersecting PGs into the history of new PGs
if (!(new_pg_count % old_pg_count))
{
// New PG count is a multiple of the old PG count
const mul = (new_pg_count / old_pg_count);
for (let i = 0; i < new_pg_count; i++)
{
const old_i = Math.floor(new_pg_count / mul);
pg_history[i] = JSON.parse(JSON.stringify(this.state.pg.history[1+old_i]));
}
}
else if (!(old_pg_count % new_pg_count))
{
// Old PG count is a multiple of the new PG count
const mul = (old_pg_count / new_pg_count);
for (let i = 0; i < new_pg_count; i++)
{
pg_history[i] = {
osd_sets: [],
all_peers: [],
};
for (let j = 0; j < mul; j++)
{
pg_history[i].osd_sets.push(prev_pgs[i*mul]);
const hist = this.state.pg.history[1+i*mul+j];
if (hist && hist.osd_sets && hist.osd_sets.length)
{
Array.prototype.push.apply(pg_history[i].osd_sets, hist.osd_sets);
}
if (hist && hist.all_peers && hist.all_peers.length)
{
Array.prototype.push.apply(pg_history[i].all_peers, hist.all_peers);
}
}
}
}
else
{
// Any PG may intersect with any PG after non-multiple PG count change
// So, merge ALL PGs history
let all_sets = {};
let all_peers = {};
for (const pg of prev_pgs)
{
all_sets[pg.join(' ')] = pg;
}
for (const pg in this.state.pg.history)
{
const hist = this.state.pg.history[pg];
if (hist && hist.osd_sets)
{
for (const pg of hist.osd_sets)
{
all_sets[pg.join(' ')] = pg;
}
}
if (hist && hist.all_peers)
{
for (const osd_num of hist.all_peers)
{
all_peers[osd_num] = Number(osd_num);
}
}
}
all_sets = Object.values(all_sets);
all_peers = Object.values(all_peers);
for (let i = 0; i < new_pg_count; i++)
{
pg_history[i] = { osd_sets: all_sets, all_peers };
}
}
// Mark history keys for removed PGs as removed
for (let i = new_pg_count; i < old_pg_count; i++)
{
pg_history[i] = null;
}
if (old_pg_count < new_pg_count)
{
for (let i = new_pg_count-1; i >= 0; i--)
{
prev_pgs[i] = prev_pgs[Math.floor(i/new_pg_count*old_pg_count)];
}
}
else if (old_pg_count > new_pg_count)
{
for (let i = 0; i < new_pg_count; i++)
{
prev_pgs[i] = prev_pgs[Math.round(i/new_pg_count*old_pg_count)];
}
prev_pgs.splice(new_pg_count, old_pg_count-new_pg_count);
}
}
async save_new_pgs(prev_pgs, new_pgs, pg_history, tree_hash)
{
const txn = [], checks = [];
const pg_items = {};
new_pgs.map((osd_set, i) =>
{
osd_set = osd_set.map(osd_num => osd_num === LPOptimizer.NO_OSD ? 0 : osd_num);
const alive_set = osd_set.filter(osd_num => osd_num);
pg_items[i+1] = {
osd_set,
primary: alive_set.length ? alive_set[Math.floor(Math.random()*alive_set.length)] : 0,
};
if (prev_pgs[i] && prev_pgs[i].join(' ') != osd_set.join(' '))
{
pg_history[i] = pg_history[i] || {};
pg_history[i].osd_sets = pg_history[i].osd_sets || [];
pg_history[i].osd_sets.push(prev_pgs[i]);
}
});
for (let i = 0; i < new_pgs.length || i < prev_pgs.length; i++)
{
checks.push({
key: b64(this.etcd_prefix+'/pg/history/'+(i+1)),
target: 'MOD',
mod_revision: ''+this.etcd_watch_revision,
result: 'LESS',
});
if (pg_history[i])
{
txn.push({
requestPut: {
key: b64(this.etcd_prefix+'/pg/history/'+(i+1)),
value: b64(JSON.stringify(pg_history[i])),
},
});
}
else
{
txn.push({
requestDeleteRange: {
key: b64(this.etcd_prefix+'/pg/history/'+(i+1)),
},
});
}
}
this.state.config.pgs = {
hash: tree_hash,
items: pg_items,
};
const res = await this.etcd_call('/txn', {
compare: [
{ key: b64(this.etcd_prefix+'/mon/master'), target: 'LEASE', lease: ''+this.etcd_lease_id },
{ key: b64(this.etcd_prefix+'/config/pgs'), target: 'MOD', mod_revision: ''+this.etcd_watch_revision, result: 'LESS' },
...checks,
],
success: [
{ requestPut: { key: b64(this.etcd_prefix+'/config/pgs'), value: b64(JSON.stringify(this.state.config.pgs)) } },
...txn,
],
}, this.config.etcd_mon_timeout, 0);
return res.succeeded;
}
async recheck_pgs()
{
// Take configuration and state, check it against the stored configuration hash
// Recalculate PGs and save them to etcd if the configuration is changed
const tree_cfg = {
osd_tree: this.get_osd_tree(),
pg_count: this.config.pg_count || Object.keys(this.state.config.pgs.items||{}).length || 128,
max_osd_combinations: this.config.max_osd_combinations,
};
const tree_hash = sha1hex(stableStringify(tree_cfg));
if (this.state.config.pgs.hash != tree_hash)
{
// Something has changed
const prev_pgs = [];
for (const pg in this.state.config.pgs.items||{})
{
prev_pgs[pg-1] = this.state.config.pgs.items[pg].osd_set;
}
const pg_history = [];
const old_pg_count = prev_pgs.length;
let optimize_result;
if (old_pg_count > 0)
{
if (old_pg_count != tree_cfg.pg_count)
{
// PG count changed. Need to bring all PGs down.
if (!await this.stop_all_pgs())
{
this.schedule_recheck();
return;
}
this.scale_pg_count(prev_pgs, pg_history, new_pg_count);
}
optimize_result = await LPOptimizer.optimize_change(prev_pgs, tree_cfg.osd_tree, tree_cfg.max_osd_combinations);
}
else
{
optimize_result = await LPOptimizer.optimize_initial(tree_cfg.osd_tree, tree_cfg.pg_count, tree_cfg.max_osd_combinations);
}
if (!await this.save_new_pgs(prev_pgs, optimize_result.int_pgs, pg_history, tree_hash))
{
console.log('Someone changed PG configuration while we also tried to change it. Retrying in '+this.config.mon_change_timeout+' ms');
this.schedule_recheck();
return;
}
console.log('PG configuration successfully changed');
if (old_pg_count != optimize_result.int_pgs.length)
{
console.log(`PG count changed from: ${old_pg_count} to ${optimize_result.int_pgs.length}`);
}
LPOptimizer.print_change_stats(optimize_result);
}
}
schedule_recheck()
{
if (this.recheck_timer)
{
clearTimeout(this.recheck_timer);
this.recheck_timer = null;
}
this.recheck_timer = setTimeout(() =>
{
this.recheck_timer = null;
this.recheck_pgs().catch(console.error);
}, this.config.mon_change_timeout || 1000);
}
sum_stats()
{
let overflow = false;
this.prev_stats = this.prev_stats || { op_stats: {}, subop_stats: {}, recovery_stats: {} };
const op_stats = {}, subop_stats = {}, recovery_stats = {};
for (const osd in this.state.osd.stats)
{
const st = this.state.osd.stats[osd];
for (const op in st.op_stats||{})
{
op_stats[op] = op_stats[op] || { count: 0n, usec: 0n, bytes: 0n };
op_stats[op].count += BigInt(st.op_stats.count||0);
op_stats[op].usec += BigInt(st.op_stats.usec||0);
op_stats[op].bytes += BigInt(st.op_stats.bytes||0);
}
for (const op in st.subop_stats||{})
{
subop_stats[op] = subop_stats[op] || { count: 0n, usec: 0n };
subop_stats[op].count += BigInt(st.subop_stats.count||0);
subop_stats[op].usec += BigInt(st.subop_stats.usec||0);
}
for (const op in st.recovery_stats||{})
{
recovery_stats[op] = recovery_stats[op] || { count: 0n, bytes: 0n };
recovery_stats[op].count += BigInt(st.recovery_stats.count||0);
recovery_stats[op].bytes += BigInt(st.recovery_stats.bytes||0);
}
}
for (const op in op_stats)
{
if (op_stats[op].count >= 0x10000000000000000n)
{
if (!this.prev_stats.op_stats[op])
{
overflow = true;
}
else
{
op_stats[op].count -= this.prev_stats.op_stats[op].count;
op_stats[op].usec -= this.prev_stats.op_stats[op].usec;
op_stats[op].bytes -= this.prev_stats.op_stats[op].bytes;
}
}
}
for (const op in subop_stats)
{
if (subop_stats[op].count >= 0x10000000000000000n)
{
if (!this.prev_stats.subop_stats[op])
{
overflow = true;
}
else
{
subop_stats[op].count -= this.prev_stats.subop_stats[op].count;
subop_stats[op].usec -= this.prev_stats.subop_stats[op].usec;
}
}
}
for (const op in recovery_stats)
{
if (recovery_stats[op].count >= 0x10000000000000000n)
{
if (!this.prev_stats.recovery_stats[op])
{
overflow = true;
}
else
{
recovery_stats[op].count -= this.prev_stats.recovery_stats[op].count;
recovery_stats[op].bytes -= this.prev_stats.recovery_stats[op].bytes;
}
}
}
const object_counts = { object: 0n, clean: 0n, misplaced: 0n, degraded: 0n, incomplete: 0n };
for (const pg_num in this.state.pg.stats)
{
const st = this.state.pg.stats[pg_num];
for (const k in object_counts)
{
if (st[k+'_count'])
{
object_counts[k] += BigInt(st[k+'_count']);
}
}
}
return (this.prev_stats = { overflow, op_stats, subop_stats, recovery_stats, object_counts });
}
async update_total_stats()
{
const stats = this.sum_stats();
if (!stats.overflow)
{
// Convert to strings, serialize and save
const ser = {};
for (const st of [ 'op_stats', 'subop_stats', 'recovery_stats' ])
{
ser[st] = {};
for (const op in stats[st])
{
ser[st][op] = {};
for (const k in stats[st][op])
{
ser[st][op][k] = ''+stats[st][op][k];
}
}
}
ser.object_counts = {};
for (const k in stats.object_counts)
{
ser.object_counts[k] = ''+stats.object_counts[k];
}
await this.etcd_call('/txn', {
success: [ { requestPut: { key: b64(this.etcd_prefix+'/stats'), value: b64(JSON.stringify(ser)) } } ],
}, this.config.etcd_mon_timeout, 0);
}
}
schedule_update_stats()
{
if (this.stats_timer)
{
clearTimeout(this.stats_timer);
this.stats_timer = null;
}
this.stats_timer = setTimeout(() =>
{
this.stats_timer = null;
this.update_total_stats().catch(console.error);
}, this.config.mon_stats_timeout || 1000);
}
parse_kv(kv)
{
if (!kv || !kv.key)
{
return;
}
kv.key = de64(kv.key);
kv.value = kv.value ? JSON.parse(de64(kv.value)) : null;
const key = kv.key.substr(this.etcd_prefix.length).replace(/^\/+/, '').split('/');
const cur = this.state, orig = Mon.etcd_tree;
for (let i = 0; i < key.length-1; i++)
{
if (!orig[key[i]])
{
console.log('Bad key in etcd: '+kv.key+' = '+kv.value);
return;
}
orig = orig[key[i]];
cur = (cur[key[i]] = cur[key[i]] || {});
}
if (orig[key.length-1])
{
console.log('Bad key in etcd: '+kv.key+' = '+kv.value);
return;
}
cur[key[key.length-1]] = kv.value;
if (key.join('/') === 'config/global')
{
this.state.config.global = this.state.config.global || {};
this.config = this.state.config.global;
this.check_config();
}
}
async etcd_call(path, body, timeout, retries)
{
let retry = 0;
if (retries >= 0 && retries < 1)
{
retries = 1;
}
while (retries < 0 || retry < retries)
{
const base = this.etcd_urls[Math.floor(Math.random()*this.etcd_urls.length)];
const res = await POST(base+path, body, timeout);
if (res.json)
{
if (res.json.error)
{
console.log('etcd returned error: '+res.json.error);
break;
}
return res.json;
}
retry++;
}
this.die();
}
die(err)
{
// In fact we can just try to rejoin
console.fatal(err || 'Cluster connection failed');
process.exit(1);
}
local_ips()
{
const ips = [];
const ifaces = os.networkInterfaces();
for (const ifname in ifaces)
{
for (const iface of ifaces[ifname])
{
if (iface.family == 'IPv4' && !iface.internal)
{
ips.push(iface.address);
}
}
}
return ips;
}
}
function POST(url, body, timeout)
{
return new Promise((ok, no) =>
{
const body_text = Buffer.from(JSON.stringify(body));
let timer_id = timeout > 0 ? setTimeout(() =>
{
if (req)
req.abort();
req = null;
ok({ error: 'timeout' });
}, timeout) : null;
let req = http.request(url, { method: 'POST', headers: {
'Content-Type': 'application/json',
'Content-Length': body_text,
} }, (res) =>
{
if (!req)
{
return;
}
clearTimeout(timer_id);
if (res.statusCode != 200)
{
ok({ error: res.statusCode, response: res });
return;
}
let res_body = '';
res.setEncoding('utf8');
res.on('data', chunk => { res_body += chunk });
res.on('end', () =>
{
try
{
res_body = JSON.parse(res_body);
ok({ response: res, json: res_body });
}
catch (e)
{
ok({ error: e, response: res, body: res_body });
}
});
});
req.write(body_text);
req.end();
});
}
function b64(str)
{
return Buffer.from(str).toString('base64');
}
function de64(str)
{
return Buffer.from(str, 'base64').toString();
}
function sha1hex(str)
{
const hash = crypto.createHash('sha1');
hash.update(str);
return hash.digest('hex');
}

View File

@ -1,15 +1,14 @@
{ {
"name": "vitastor-mon", "name": "rage-mon",
"version": "1.0.0", "version": "1.0.0",
"description": "Vitastor SDS monitor service", "description": "RAGE storage monitor service",
"main": "mon-main.js", "main": "mon.js",
"scripts": { "scripts": {
"test": "echo \"Error: no test specified\" && exit 1" "test": "echo \"Error: no test specified\" && exit 1"
}, },
"author": "Vitaliy Filippov", "author": "Vitaliy Filippov",
"license": "UNLICENSED", "license": "UNLICENSED",
"dependencies": { "dependencies": {
"sprintf-js": "^1.1.2",
"ws": "^7.2.5" "ws": "^7.2.5"
} }
} }

View File

@ -1,6 +1,3 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
const LPOptimizer = require('./lp-optimizer.js'); const LPOptimizer = require('./lp-optimizer.js');
const crush_tree = [ const crush_tree = [
@ -43,31 +40,31 @@ async function run()
{ {
const cur_tree = {}; const cur_tree = {};
console.log('Empty tree:'); console.log('Empty tree:');
let res = await LPOptimizer.optimize_initial({ osd_tree: cur_tree, pg_size: 3, pg_count: 256 }); let res = await LPOptimizer.optimize_initial(cur_tree, 256);
LPOptimizer.print_change_stats(res, false); LPOptimizer.print_change_stats(res, false);
console.log('\nAdding 1st failure domain:'); console.log('\nAdding 1st failure domain:');
cur_tree['dom1'] = osd_tree['dom1']; cur_tree['dom1'] = osd_tree['dom1'];
res = await LPOptimizer.optimize_change({ prev_pgs: res.int_pgs, osd_tree: cur_tree, pg_size: 3 }); res = await LPOptimizer.optimize_change(res.int_pgs, cur_tree);
LPOptimizer.print_change_stats(res, false); LPOptimizer.print_change_stats(res, false);
console.log('\nAdding 2nd failure domain:'); console.log('\nAdding 2nd failure domain:');
cur_tree['dom2'] = osd_tree['dom2']; cur_tree['dom2'] = osd_tree['dom2'];
res = await LPOptimizer.optimize_change({ prev_pgs: res.int_pgs, osd_tree: cur_tree, pg_size: 3 }); res = await LPOptimizer.optimize_change(res.int_pgs, cur_tree);
LPOptimizer.print_change_stats(res, false); LPOptimizer.print_change_stats(res, false);
console.log('\nAdding 3rd failure domain:'); console.log('\nAdding 3rd failure domain:');
cur_tree['dom3'] = osd_tree['dom3']; cur_tree['dom3'] = osd_tree['dom3'];
res = await LPOptimizer.optimize_change({ prev_pgs: res.int_pgs, osd_tree: cur_tree, pg_size: 3 }); res = await LPOptimizer.optimize_change(res.int_pgs, cur_tree);
LPOptimizer.print_change_stats(res, false); LPOptimizer.print_change_stats(res, false);
console.log('\nRemoving 3rd failure domain:'); console.log('\nRemoving 3rd failure domain:');
delete cur_tree['dom3']; delete cur_tree['dom3'];
res = await LPOptimizer.optimize_change({ prev_pgs: res.int_pgs, osd_tree: cur_tree, pg_size: 3 }); res = await LPOptimizer.optimize_change(res.int_pgs, cur_tree);
LPOptimizer.print_change_stats(res, false); LPOptimizer.print_change_stats(res, false);
console.log('\nRemoving 2nd failure domain:'); console.log('\nRemoving 2nd failure domain:');
delete cur_tree['dom2']; delete cur_tree['dom2'];
res = await LPOptimizer.optimize_change({ prev_pgs: res.int_pgs, osd_tree: cur_tree, pg_size: 3 }); res = await LPOptimizer.optimize_change(res.int_pgs, cur_tree);
LPOptimizer.print_change_stats(res, false); LPOptimizer.print_change_stats(res, false);
console.log('\nRemoving 1st failure domain:'); console.log('\nRemoving 1st failure domain:');
delete cur_tree['dom1']; delete cur_tree['dom1'];
res = await LPOptimizer.optimize_change({ prev_pgs: res.int_pgs, osd_tree: cur_tree, pg_size: 3 }); res = await LPOptimizer.optimize_change(res.int_pgs, cur_tree);
LPOptimizer.print_change_stats(res, false); LPOptimizer.print_change_stats(res, false);
} }

View File

@ -1,6 +1,3 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
const LPOptimizer = require('./lp-optimizer.js'); const LPOptimizer = require('./lp-optimizer.js');
const osd_tree = { const osd_tree = {
@ -78,37 +75,19 @@ const crush_tree = [
async function run() async function run()
{ {
let res;
// Test: add 1 OSD of almost the same size. Ideal data movement could be 1/12 = 8.33%. Actual is ~13% // Test: add 1 OSD of almost the same size. Ideal data movement could be 1/12 = 8.33%. Actual is ~13%
// Space efficiency is ~99% in all cases. // Space efficiency is ~99.5% in both cases.
let res = await LPOptimizer.optimize_initial(osd_tree, 256);
console.log('256 PGs, size=2');
res = await LPOptimizer.optimize_initial({ osd_tree, pg_size: 2, pg_count: 256 });
LPOptimizer.print_change_stats(res, false); LPOptimizer.print_change_stats(res, false);
console.log('\nAdding osd.8'); console.log('adding osd.8');
osd_tree[500][8] = 3.58589; osd_tree[500][8] = 3.58589;
res = await LPOptimizer.optimize_change({ prev_pgs: res.int_pgs, osd_tree, pg_size: 2 }); res = await LPOptimizer.optimize_change(res.int_pgs, osd_tree);
LPOptimizer.print_change_stats(res, false); LPOptimizer.print_change_stats(res, false);
console.log('\nRemoving osd.8'); console.log('removing osd.8');
delete osd_tree[500][8]; delete osd_tree[500][8];
res = await LPOptimizer.optimize_change({ prev_pgs: res.int_pgs, osd_tree, pg_size: 2 }); res = await LPOptimizer.optimize_change(res.int_pgs, osd_tree);
LPOptimizer.print_change_stats(res, false); LPOptimizer.print_change_stats(res, false);
res = await LPOptimizer.optimize_initial(LPOptimizer.flatten_tree(crush_tree, {}, 1, 3), 256);
console.log('\n256 PGs, size=3');
res = await LPOptimizer.optimize_initial({ osd_tree, pg_size: 3, pg_count: 256 });
LPOptimizer.print_change_stats(res, false);
console.log('\nAdding osd.8');
osd_tree[500][8] = 3.58589;
res = await LPOptimizer.optimize_change({ prev_pgs: res.int_pgs, osd_tree, pg_size: 3 });
LPOptimizer.print_change_stats(res, false);
console.log('\nRemoving osd.8');
delete osd_tree[500][8];
res = await LPOptimizer.optimize_change({ prev_pgs: res.int_pgs, osd_tree, pg_size: 3 });
LPOptimizer.print_change_stats(res, false);
console.log('\n256 PGs, size=3, failure domain=rack');
res = await LPOptimizer.optimize_initial({ osd_tree: LPOptimizer.flatten_tree(crush_tree, {}, 1, 3), pg_size: 3, pg_count: 256 });
LPOptimizer.print_change_stats(res, false); LPOptimizer.print_change_stats(res, false);
} }

View File

@ -1,12 +1,8 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 or GNU GPL-2.0+ (see README.md for details)
#include <unistd.h> #include <unistd.h>
#include <fcntl.h> #include <fcntl.h>
#include <sys/socket.h> #include <sys/socket.h>
#include <sys/epoll.h> #include <sys/epoll.h>
#include <netinet/tcp.h> #include <netinet/tcp.h>
#include <stdexcept>
#include "messenger.h" #include "messenger.h"
@ -26,106 +22,6 @@ osd_op_t::~osd_op_t()
} }
} }
void osd_messenger_t::init()
{
keepalive_timer_id = tfd->set_timer(1000, true, [this](int)
{
for (auto cl_it = clients.begin(); cl_it != clients.end();)
{
auto cl = (cl_it++)->second;
if (!cl->osd_num)
{
// Do not run keepalive on regular clients
continue;
}
if (cl->ping_time_remaining > 0)
{
cl->ping_time_remaining--;
if (!cl->ping_time_remaining)
{
// Ping timed out, stop the client
stop_client(cl->peer_fd, true);
}
}
else if (cl->idle_time_remaining > 0)
{
cl->idle_time_remaining--;
if (!cl->idle_time_remaining)
{
// Connection is idle for <osd_idle_time>, send ping
osd_op_t *op = new osd_op_t();
op->op_type = OSD_OP_OUT;
op->peer_fd = cl->peer_fd;
op->req = (osd_any_op_t){
.hdr = {
.magic = SECONDARY_OSD_OP_MAGIC,
.id = this->next_subop_id++,
.opcode = OSD_OP_PING,
},
};
op->callback = [this, cl](osd_op_t *op)
{
int fail_fd = (op->reply.hdr.retval != 0 ? op->peer_fd : -1);
cl->ping_time_remaining = 0;
delete op;
if (fail_fd >= 0)
{
stop_client(fail_fd, true);
}
};
outbox_push(op);
cl->ping_time_remaining = osd_ping_timeout;
cl->idle_time_remaining = osd_idle_timeout;
}
}
else
{
cl->idle_time_remaining = osd_idle_timeout;
}
}
});
}
osd_messenger_t::~osd_messenger_t()
{
if (keepalive_timer_id >= 0)
{
tfd->clear_timer(keepalive_timer_id);
keepalive_timer_id = -1;
}
while (clients.size() > 0)
{
stop_client(clients.begin()->first, true);
}
}
void osd_messenger_t::parse_config(const json11::Json & config)
{
this->use_sync_send_recv = config["use_sync_send_recv"].bool_value() ||
config["use_sync_send_recv"].uint64_value();
this->peer_connect_interval = config["peer_connect_interval"].uint64_value();
if (!this->peer_connect_interval)
{
this->peer_connect_interval = DEFAULT_PEER_CONNECT_INTERVAL;
}
this->peer_connect_timeout = config["peer_connect_timeout"].uint64_value();
if (!this->peer_connect_timeout)
{
this->peer_connect_timeout = DEFAULT_PEER_CONNECT_TIMEOUT;
}
this->osd_idle_timeout = config["osd_idle_timeout"].uint64_value();
if (!this->osd_idle_timeout)
{
this->osd_idle_timeout = DEFAULT_OSD_PING_TIMEOUT;
}
this->osd_ping_timeout = config["osd_ping_timeout"].uint64_value();
if (!this->osd_ping_timeout)
{
this->osd_ping_timeout = DEFAULT_OSD_PING_TIMEOUT;
}
this->log_level = config["log_level"].uint64_value();
}
void osd_messenger_t::connect_peer(uint64_t peer_osd, json11::Json peer_state) void osd_messenger_t::connect_peer(uint64_t peer_osd, json11::Json peer_state)
{ {
if (wanted_peers.find(peer_osd) == wanted_peers.end()) if (wanted_peers.find(peer_osd) == wanted_peers.end())
@ -167,13 +63,11 @@ void osd_messenger_t::try_connect_peer(uint64_t peer_osd)
} }
wp.cur_addr = wp.address_list[wp.address_index].string_value(); wp.cur_addr = wp.address_list[wp.address_index].string_value();
wp.cur_port = wp.port; wp.cur_port = wp.port;
wp.connecting = true;
try_connect_peer_addr(peer_osd, wp.cur_addr.c_str(), wp.cur_port); try_connect_peer_addr(peer_osd, wp.cur_addr.c_str(), wp.cur_port);
} }
void osd_messenger_t::try_connect_peer_addr(osd_num_t peer_osd, const char *peer_host, int peer_port) void osd_messenger_t::try_connect_peer_addr(osd_num_t peer_osd, const char *peer_host, int peer_port)
{ {
assert(peer_osd != this->osd_num);
struct sockaddr_in addr; struct sockaddr_in addr;
int r; int r;
if ((r = inet_pton(AF_INET, peer_host, &addr.sin_addr)) != 1) if ((r = inet_pton(AF_INET, peer_host, &addr.sin_addr)) != 1)
@ -190,6 +84,17 @@ void osd_messenger_t::try_connect_peer_addr(osd_num_t peer_osd, const char *peer
return; return;
} }
fcntl(peer_fd, F_SETFL, fcntl(peer_fd, F_GETFL, 0) | O_NONBLOCK); fcntl(peer_fd, F_SETFL, fcntl(peer_fd, F_GETFL, 0) | O_NONBLOCK);
int timeout_id = -1;
if (peer_connect_timeout > 0)
{
timeout_id = tfd->set_timer(1000*peer_connect_timeout, false, [this, peer_fd](int timer_id)
{
osd_num_t peer_osd = clients[peer_fd].osd_num;
stop_client(peer_fd);
on_connect_peer(peer_osd, -EIO);
return;
});
}
r = connect(peer_fd, (sockaddr*)&addr, sizeof(addr)); r = connect(peer_fd, (sockaddr*)&addr, sizeof(addr));
if (r < 0 && errno != EINPROGRESS) if (r < 0 && errno != EINPROGRESS)
{ {
@ -197,27 +102,17 @@ void osd_messenger_t::try_connect_peer_addr(osd_num_t peer_osd, const char *peer
on_connect_peer(peer_osd, -errno); on_connect_peer(peer_osd, -errno);
return; return;
} }
int timeout_id = -1; assert(peer_osd != this->osd_num);
if (peer_connect_timeout > 0) clients[peer_fd] = (osd_client_t){
{
timeout_id = tfd->set_timer(1000*peer_connect_timeout, false, [this, peer_fd](int timer_id)
{
osd_num_t peer_osd = clients.at(peer_fd)->osd_num;
stop_client(peer_fd, true);
on_connect_peer(peer_osd, -EIO);
return;
});
}
clients[peer_fd] = new osd_client_t((osd_client_t){
.peer_addr = addr, .peer_addr = addr,
.peer_port = peer_port, .peer_port = peer_port,
.peer_fd = peer_fd, .peer_fd = peer_fd,
.peer_state = PEER_CONNECTING, .peer_state = PEER_CONNECTING,
.connect_timeout_id = timeout_id, .connect_timeout_id = timeout_id,
.osd_num = peer_osd, .osd_num = peer_osd,
.in_buf = malloc_or_die(receive_buffer_size), .in_buf = malloc(receive_buffer_size),
}); };
tfd->set_fd_handler(peer_fd, true, [this](int peer_fd, int epoll_events) tfd->set_fd_handler(peer_fd, [this](int peer_fd, int epoll_events)
{ {
// Either OUT (connected) or HUP // Either OUT (connected) or HUP
handle_connect_epoll(peer_fd); handle_connect_epoll(peer_fd);
@ -226,13 +121,13 @@ void osd_messenger_t::try_connect_peer_addr(osd_num_t peer_osd, const char *peer
void osd_messenger_t::handle_connect_epoll(int peer_fd) void osd_messenger_t::handle_connect_epoll(int peer_fd)
{ {
auto cl = clients[peer_fd]; auto & cl = clients[peer_fd];
if (cl->connect_timeout_id >= 0) if (cl.connect_timeout_id >= 0)
{ {
tfd->clear_timer(cl->connect_timeout_id); tfd->clear_timer(cl.connect_timeout_id);
cl->connect_timeout_id = -1; cl.connect_timeout_id = -1;
} }
osd_num_t peer_osd = cl->osd_num; osd_num_t peer_osd = cl.osd_num;
int result = 0; int result = 0;
socklen_t result_len = sizeof(result); socklen_t result_len = sizeof(result);
if (getsockopt(peer_fd, SOL_SOCKET, SO_ERROR, &result, &result_len) < 0) if (getsockopt(peer_fd, SOL_SOCKET, SO_ERROR, &result, &result_len) < 0)
@ -241,14 +136,15 @@ void osd_messenger_t::handle_connect_epoll(int peer_fd)
} }
if (result != 0) if (result != 0)
{ {
stop_client(peer_fd, true); stop_client(peer_fd);
on_connect_peer(peer_osd, -result); on_connect_peer(peer_osd, -result);
return; return;
} }
int one = 1; int one = 1;
setsockopt(peer_fd, SOL_TCP, TCP_NODELAY, &one, sizeof(one)); setsockopt(peer_fd, SOL_TCP, TCP_NODELAY, &one, sizeof(one));
cl->peer_state = PEER_CONNECTED; cl.peer_state = PEER_CONNECTED;
tfd->set_fd_handler(peer_fd, false, [this](int peer_fd, int epoll_events) // FIXME Disable EPOLLOUT on this fd
tfd->set_fd_handler(peer_fd, [this](int peer_fd, int epoll_events)
{ {
handle_peer_epoll(peer_fd, epoll_events); handle_peer_epoll(peer_fd, epoll_events);
}); });
@ -263,20 +159,17 @@ void osd_messenger_t::handle_peer_epoll(int peer_fd, int epoll_events)
{ {
// Stop client // Stop client
printf("[OSD %lu] client %d disconnected\n", this->osd_num, peer_fd); printf("[OSD %lu] client %d disconnected\n", this->osd_num, peer_fd);
stop_client(peer_fd, true); stop_client(peer_fd);
} }
else if (epoll_events & EPOLLIN) else if (epoll_events & EPOLLIN)
{ {
// Mark client as ready (i.e. some data is available) // Mark client as ready (i.e. some data is available)
auto cl = clients[peer_fd]; auto & cl = clients[peer_fd];
cl->read_ready++; cl.read_ready++;
if (cl->read_ready == 1) if (cl.read_ready == 1)
{ {
read_ready_clients.push_back(cl->peer_fd); read_ready_clients.push_back(cl.peer_fd);
if (ringloop)
ringloop->wakeup(); ringloop->wakeup();
else
read_requests();
} }
} }
} }
@ -312,20 +205,18 @@ void osd_messenger_t::on_connect_peer(osd_num_t peer_osd, int peer_fd)
} }
return; return;
} }
if (log_level > 0) printf("Connected with peer OSD %lu (fd %d)\n", peer_osd, peer_fd);
{
printf("[OSD %lu] Connected with peer OSD %lu (client %d)\n", osd_num, peer_osd, peer_fd);
}
wanted_peers.erase(peer_osd); wanted_peers.erase(peer_osd);
repeer_pgs(peer_osd); repeer_pgs(peer_osd);
} }
void osd_messenger_t::check_peer_config(osd_client_t *cl) void osd_messenger_t::check_peer_config(osd_client_t & cl)
{ {
osd_op_t *op = new osd_op_t(); osd_op_t *op = new osd_op_t();
op->op_type = OSD_OP_OUT; op->op_type = OSD_OP_OUT;
op->peer_fd = cl->peer_fd; op->send_list.push_back(op->req.buf, OSD_PACKET_SIZE);
op->req = (osd_any_op_t){ op->peer_fd = cl.peer_fd;
op->req = {
.show_conf = { .show_conf = {
.header = { .header = {
.magic = SECONDARY_OSD_OP_MAGIC, .magic = SECONDARY_OSD_OP_MAGIC,
@ -334,15 +225,16 @@ void osd_messenger_t::check_peer_config(osd_client_t *cl)
}, },
}, },
}; };
op->callback = [this, cl](osd_op_t *op) op->callback = [this](osd_op_t *op)
{ {
osd_client_t & cl = clients[op->peer_fd];
std::string json_err; std::string json_err;
json11::Json config; json11::Json config;
bool err = false; bool err = false;
if (op->reply.hdr.retval < 0) if (op->reply.hdr.retval < 0)
{ {
err = true; err = true;
printf("Failed to get config from OSD %lu (retval=%ld), disconnecting peer\n", cl->osd_num, op->reply.hdr.retval); printf("Failed to get config from OSD %lu (retval=%ld), disconnecting peer\n", cl.osd_num, op->reply.hdr.retval);
} }
else else
{ {
@ -350,37 +242,45 @@ void osd_messenger_t::check_peer_config(osd_client_t *cl)
if (json_err != "") if (json_err != "")
{ {
err = true; err = true;
printf("Failed to get config from OSD %lu: bad JSON: %s, disconnecting peer\n", cl->osd_num, json_err.c_str()); printf("Failed to get config from OSD %lu: bad JSON: %s, disconnecting peer\n", cl.osd_num, json_err.c_str());
} }
else if (config["osd_num"].uint64_value() != cl->osd_num) else if (config["osd_num"].uint64_value() != cl.osd_num)
{ {
err = true; err = true;
printf("Connected to OSD %lu instead of OSD %lu, peer state is outdated, disconnecting peer\n", config["osd_num"].uint64_value(), cl->osd_num); printf("Connected to OSD %lu instead of OSD %lu, peer state is outdated, disconnecting peer\n", config["osd_num"].uint64_value(), cl.osd_num);
on_connect_peer(cl.osd_num, -1);
} }
} }
if (err) if (err)
{ {
osd_num_t osd_num = cl->osd_num;
stop_client(op->peer_fd); stop_client(op->peer_fd);
on_connect_peer(osd_num, -1);
delete op; delete op;
return; return;
} }
osd_peer_fds[cl->osd_num] = cl->peer_fd; osd_peer_fds[cl.osd_num] = cl.peer_fd;
on_connect_peer(cl->osd_num, cl->peer_fd); on_connect_peer(cl.osd_num, cl.peer_fd);
delete op; delete op;
}; };
outbox_push(op); outbox_push(op);
} }
void osd_messenger_t::cancel_osd_ops(osd_client_t *cl) void osd_messenger_t::cancel_osd_ops(osd_client_t & cl)
{ {
for (auto p: cl->sent_ops) for (auto p: cl.sent_ops)
{ {
cancel_op(p.second); cancel_op(p.second);
} }
cl->sent_ops.clear(); cl.sent_ops.clear();
cl->outbox.clear(); for (auto op: cl.outbox)
{
cancel_op(op);
}
cl.outbox.clear();
if (cl.write_op)
{
cancel_op(cl.write_op);
cl.write_op = NULL;
}
} }
void osd_messenger_t::cancel_op(osd_op_t *op) void osd_messenger_t::cancel_op(osd_op_t *op)
@ -401,7 +301,7 @@ void osd_messenger_t::cancel_op(osd_op_t *op)
} }
} }
void osd_messenger_t::stop_client(int peer_fd, bool force) void osd_messenger_t::stop_client(int peer_fd)
{ {
assert(peer_fd != 0); assert(peer_fd != 0);
auto it = clients.find(peer_fd); auto it = clients.find(peer_fd);
@ -410,49 +310,32 @@ void osd_messenger_t::stop_client(int peer_fd, bool force)
return; return;
} }
uint64_t repeer_osd = 0; uint64_t repeer_osd = 0;
osd_client_t *cl = it->second; osd_client_t cl = it->second;
if (cl->peer_state == PEER_CONNECTED) if (cl.peer_state == PEER_CONNECTED)
{ {
if (cl->osd_num) if (cl.osd_num)
{ {
// Reload configuration from etcd when the connection is dropped // Reload configuration from etcd when the connection is dropped
if (log_level > 0) printf("[OSD %lu] Stopping client %d (OSD peer %lu)\n", osd_num, peer_fd, cl.osd_num);
printf("[OSD %lu] Stopping client %d (OSD peer %lu)\n", osd_num, peer_fd, cl->osd_num); repeer_osd = cl.osd_num;
repeer_osd = cl->osd_num;
} }
else else
{ {
if (log_level > 0)
printf("[OSD %lu] Stopping client %d (regular client)\n", osd_num, peer_fd); printf("[OSD %lu] Stopping client %d (regular client)\n", osd_num, peer_fd);
} }
} }
else if (!force)
{
return;
}
cl->peer_state = PEER_STOPPED;
clients.erase(it); clients.erase(it);
tfd->set_fd_handler(peer_fd, false, NULL); tfd->set_fd_handler(peer_fd, NULL);
if (cl->connect_timeout_id >= 0) if (cl.osd_num)
{ {
tfd->clear_timer(cl->connect_timeout_id); osd_peer_fds.erase(cl.osd_num);
cl->connect_timeout_id = -1; // Cancel outbound operations
cancel_osd_ops(cl);
} }
if (cl->osd_num) if (cl.read_op)
{ {
osd_peer_fds.erase(cl->osd_num); delete cl.read_op;
} cl.read_op = NULL;
if (cl->read_op)
{
if (cl->read_op->callback)
{
cancel_op(cl->read_op);
}
else
{
delete cl->read_op;
}
cl->read_op = NULL;
} }
for (auto rit = read_ready_clients.begin(); rit != read_ready_clients.end(); rit++) for (auto rit = read_ready_clients.begin(); rit != read_ready_clients.end(); rit++)
{ {
@ -470,24 +353,12 @@ void osd_messenger_t::stop_client(int peer_fd, bool force)
break; break;
} }
} }
free(cl->in_buf); free(cl.in_buf);
cl->in_buf = NULL;
close(peer_fd); close(peer_fd);
if (repeer_osd) if (repeer_osd)
{ {
// First repeer PGs as canceling OSD ops may push new operations
// and we need correct PG states when we do that
repeer_pgs(repeer_osd); repeer_pgs(repeer_osd);
} }
if (cl->osd_num)
{
// Cancel outbound operations
cancel_osd_ops(cl);
}
if (cl->refs <= 0)
{
delete cl;
}
} }
void osd_messenger_t::accept_connections(int listen_fd) void osd_messenger_t::accept_connections(int listen_fd)
@ -505,15 +376,15 @@ void osd_messenger_t::accept_connections(int listen_fd)
fcntl(peer_fd, F_SETFL, fcntl(peer_fd, F_GETFL, 0) | O_NONBLOCK); fcntl(peer_fd, F_SETFL, fcntl(peer_fd, F_GETFL, 0) | O_NONBLOCK);
int one = 1; int one = 1;
setsockopt(peer_fd, SOL_TCP, TCP_NODELAY, &one, sizeof(one)); setsockopt(peer_fd, SOL_TCP, TCP_NODELAY, &one, sizeof(one));
clients[peer_fd] = new osd_client_t((osd_client_t){ clients[peer_fd] = {
.peer_addr = addr, .peer_addr = addr,
.peer_port = ntohs(addr.sin_port), .peer_port = ntohs(addr.sin_port),
.peer_fd = peer_fd, .peer_fd = peer_fd,
.peer_state = PEER_CONNECTED, .peer_state = PEER_CONNECTED,
.in_buf = malloc_or_die(receive_buffer_size), .in_buf = malloc(receive_buffer_size),
}); };
// Add FD to epoll // Add FD to epoll
tfd->set_fd_handler(peer_fd, false, [this](int peer_fd, int epoll_events) tfd->set_fd_handler(peer_fd, [this](int peer_fd, int epoll_events)
{ {
handle_peer_epoll(peer_fd, epoll_events); handle_peer_epoll(peer_fd, epoll_events);
}); });

213
messenger.h Normal file
View File

@ -0,0 +1,213 @@
#pragma once
#include <sys/types.h>
#include <stdint.h>
#include <arpa/inet.h>
#include <malloc.h>
#include <set>
#include <map>
#include <deque>
#include <vector>
#include "json11/json11.hpp"
#include "osd_ops.h"
#include "timerfd_manager.h"
#include "ringloop.h"
#define OSD_OP_IN 0
#define OSD_OP_OUT 1
#define CL_READ_HDR 1
#define CL_READ_DATA 2
#define CL_READ_REPLY_DATA 3
#define CL_WRITE_READY 1
#define CL_WRITE_REPLY 2
#define OSD_OP_INLINE_BUF_COUNT 16
#define PEER_CONNECTING 1
#define PEER_CONNECTED 2
#define DEFAULT_PEER_CONNECT_INTERVAL 5
#define DEFAULT_PEER_CONNECT_TIMEOUT 5
struct osd_op_buf_list_t
{
int count = 0, alloc = 0, sent = 0;
iovec *buf = NULL;
iovec inline_buf[OSD_OP_INLINE_BUF_COUNT];
~osd_op_buf_list_t()
{
if (buf && buf != inline_buf)
{
free(buf);
}
}
inline iovec* get_iovec()
{
return (buf ? buf : inline_buf) + sent;
}
inline int get_size()
{
return count - sent;
}
inline void push_back(void *nbuf, size_t len)
{
if (count >= alloc)
{
if (!alloc)
{
alloc = OSD_OP_INLINE_BUF_COUNT;
buf = inline_buf;
}
else if (buf == inline_buf)
{
int old = alloc;
alloc = ((alloc/16)*16 + 1);
buf = (iovec*)malloc(sizeof(iovec) * alloc);
memcpy(buf, inline_buf, sizeof(iovec)*old);
}
else
{
alloc = ((alloc/16)*16 + 1);
buf = (iovec*)realloc(buf, sizeof(iovec) * alloc);
}
}
buf[count++] = { .iov_base = nbuf, .iov_len = len };
}
};
struct blockstore_op_t;
struct osd_primary_op_data_t;
struct osd_op_t
{
timespec tv_begin;
uint64_t op_type = OSD_OP_IN;
int peer_fd;
osd_any_op_t req;
osd_any_reply_t reply;
blockstore_op_t *bs_op = NULL;
void *buf = NULL;
void *rmw_buf = NULL;
osd_primary_op_data_t* op_data = NULL;
std::function<void(osd_op_t*)> callback;
osd_op_buf_list_t send_list;
~osd_op_t();
};
struct osd_client_t
{
sockaddr_in peer_addr;
int peer_port;
int peer_fd;
int peer_state;
int connect_timeout_id = -1;
osd_num_t osd_num = 0;
void *in_buf = NULL;
// Read state
int read_ready = 0;
osd_op_t *read_op = NULL;
int read_reply_id = 0;
iovec read_iov;
msghdr read_msg;
void *read_buf = NULL;
int read_remaining = 0;
int read_state = 0;
// Incoming operations
std::vector<osd_op_t*> received_ops;
// Outbound operations
std::deque<osd_op_t*> outbox;
std::map<int, osd_op_t*> sent_ops;
// PGs dirtied by this client's primary-writes (FIXME to drop the connection)
std::set<pg_num_t> dirty_pgs;
// Write state
osd_op_t *write_op = NULL;
msghdr write_msg;
int write_state = 0;
};
struct osd_wanted_peer_t
{
json11::Json address_list;
int port;
time_t last_connect_attempt;
bool connecting, address_changed;
int address_index;
std::string cur_addr;
int cur_port;
};
struct osd_op_stats_t
{
uint64_t op_stat_sum[OSD_OP_MAX+1] = { 0 };
uint64_t op_stat_count[OSD_OP_MAX+1] = { 0 };
uint64_t op_stat_bytes[OSD_OP_MAX+1] = { 0 };
uint64_t subop_stat_sum[OSD_OP_MAX+1] = { 0 };
uint64_t subop_stat_count[OSD_OP_MAX+1] = { 0 };
};
struct osd_messenger_t
{
timerfd_manager_t *tfd;
ring_loop_t *ringloop;
// osd_num_t is only for logging and asserts
osd_num_t osd_num;
int receive_buffer_size = 9000;
int peer_connect_interval = DEFAULT_PEER_CONNECT_INTERVAL;
int peer_connect_timeout = DEFAULT_PEER_CONNECT_TIMEOUT;
int log_level = 0;
std::map<osd_num_t, osd_wanted_peer_t> wanted_peers;
std::map<uint64_t, int> osd_peer_fds;
uint64_t next_subop_id = 1;
std::map<int, osd_client_t> clients;
std::vector<int> read_ready_clients;
std::vector<int> write_ready_clients;
// op statistics
osd_op_stats_t stats;
public:
void connect_peer(uint64_t osd_num, json11::Json peer_state);
void stop_client(int peer_fd);
void outbox_push(osd_op_t *cur_op);
std::function<void(osd_op_t*)> exec_op;
std::function<void(osd_num_t)> repeer_pgs;
void handle_peer_epoll(int peer_fd, int epoll_events);
void read_requests();
void send_replies();
void accept_connections(int listen_fd);
protected:
void try_connect_peer(uint64_t osd_num);
void try_connect_peer_addr(osd_num_t peer_osd, const char *peer_host, int peer_port);
void handle_connect_epoll(int peer_fd);
void on_connect_peer(osd_num_t peer_osd, int peer_fd);
void check_peer_config(osd_client_t & cl);
void cancel_osd_ops(osd_client_t & cl);
void cancel_op(osd_op_t *op);
bool try_send(osd_client_t & cl);
void handle_send(int result, int peer_fd);
bool handle_read(int result, int peer_fd);
void handle_finished_read(osd_client_t & cl);
void handle_op_hdr(osd_client_t *cl);
void handle_reply_hdr(osd_client_t *cl);
};

View File

@ -1,104 +0,0 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
module.exports = {
scale_pg_count,
};
function add_pg_history(new_pg_history, new_pg, prev_pgs, prev_pg_history, old_pg)
{
if (!new_pg_history[new_pg])
{
new_pg_history[new_pg] = {
osd_sets: {},
all_peers: {},
epoch: 0,
};
}
const nh = new_pg_history[new_pg], oh = prev_pg_history[old_pg];
nh.osd_sets[prev_pgs[old_pg].join(' ')] = prev_pgs[old_pg];
if (oh && oh.osd_sets && oh.osd_sets.length)
{
for (const pg of oh.osd_sets)
{
nh.osd_sets[pg.join(' ')] = pg;
}
}
if (oh && oh.all_peers && oh.all_peers.length)
{
for (const osd_num of oh.all_peers)
{
nh.all_peers[osd_num] = Number(osd_num);
}
}
if (oh && oh.epoch)
{
nh.epoch = nh.epoch < oh.epoch ? oh.epoch : nh.epoch;
}
}
function finish_pg_history(merged_history)
{
merged_history.osd_sets = Object.values(merged_history.osd_sets);
merged_history.all_peers = Object.values(merged_history.all_peers);
}
function scale_pg_count(prev_pgs, prev_pg_history, new_pg_history, new_pg_count)
{
const old_pg_count = prev_pgs.length;
// Add all possibly intersecting PGs to the history of new PGs
if (!(new_pg_count % old_pg_count))
{
// New PG count is a multiple of old PG count
for (let i = 0; i < new_pg_count; i++)
{
add_pg_history(new_pg_history, i, prev_pgs, prev_pg_history, i % old_pg_count);
finish_pg_history(new_pg_history[i]);
}
}
else if (!(old_pg_count % new_pg_count))
{
// Old PG count is a multiple of the new PG count
const mul = (old_pg_count / new_pg_count);
for (let i = 0; i < new_pg_count; i++)
{
for (let j = 0; j < mul; j++)
{
add_pg_history(new_pg_history, i, prev_pgs, prev_pg_history, i+j*new_pg_count);
}
finish_pg_history(new_pg_history[i]);
}
}
else
{
// Any PG may intersect with any PG after non-multiple PG count change
// So, merge ALL PGs history
let merged_history = {};
for (let i = 0; i < old_pg_count; i++)
{
add_pg_history(merged_history, 1, prev_pgs, prev_pg_history, i);
}
finish_pg_history(merged_history[1]);
for (let i = 0; i < new_pg_count; i++)
{
new_pg_history[i] = { ...merged_history[1] };
}
}
// Mark history keys for removed PGs as removed
for (let i = new_pg_count; i < old_pg_count; i++)
{
new_pg_history[i] = null;
}
// Just for the lp_solve optimizer - pick a "previous" PG for each "new" one
if (old_pg_count < new_pg_count)
{
for (let i = old_pg_count; i < new_pg_count; i++)
{
prev_pgs[i] = prev_pgs[i % old_pg_count];
}
}
else if (old_pg_count > new_pg_count)
{
prev_pgs.splice(new_pg_count, old_pg_count-new_pg_count);
}
}

View File

@ -1,89 +0,0 @@
// Functions to calculate Annualized Failure Rate of your cluster
// if you know AFR of your drives, number of drives, expected rebalance time
// and replication factor
// License: VNPL-1.1 (see https://yourcmc.ru/git/vitalif/vitastor/src/branch/master/README.md for details) or AGPL-3.0
// Author: Vitaliy Filippov, 2020+
module.exports = {
cluster_afr_fullmesh,
failure_rate_fullmesh,
cluster_afr,
c_n_k,
};
/******** "FULL MESH": ASSUME EACH OSD COMMUNICATES WITH ALL OTHER OSDS ********/
// Estimate AFR of the cluster
// n - number of drives
// afr - annualized failure rate of a single drive
// l - expected rebalance time in days after a single drive failure
// k - replication factor / number of drives that must fail at the same time for the cluster to fail
function cluster_afr_fullmesh(n, afr, l, k)
{
return 1 - (1 - afr * failure_rate_fullmesh(n-(k-1), afr*l/365, k-1)) ** (n-(k-1));
}
// Probability of at least <f> failures in a cluster with <n> drives with AFR=<a>
function failure_rate_fullmesh(n, a, f)
{
if (f <= 0)
{
return (1-a)**n;
}
let p = 1;
for (let i = 0; i < f; i++)
{
p -= c_n_k(n, i) * (1-a)**(n-i) * a**i;
}
return p;
}
/******** PGS: EACH OSD ONLY COMMUNICATES WITH <pgs> OTHER OSDs ********/
// <n> hosts of <m> drives of <capacity> GB, each able to backfill at <speed> GB/s,
// <k> replicas, <pgs> unique peer PGs per OSD (~50 for 100 PG-per-OSD in a big cluster)
//
// For each of n*m drives: P(drive fails in a year) * P(any of its peers fail in <l*365> next days).
// More peers per OSD increase rebalance speed (more drives work together to resilver) if you
// let them finish rebalance BEFORE replacing the failed drive (degraded_replacement=false).
// At the same time, more peers per OSD increase probability of any of them to fail!
// osd_rm=true means that failed OSDs' data is rebalanced over all other hosts,
// not over the same host as it's in Ceph by default (dead OSDs are marked 'out').
//
// Probability of all except one drives in a replica group to fail is (AFR^(k-1)).
// So with <x> PGs it becomes ~ (x * (AFR*L/365)^(k-1)). Interesting but reasonable consequence
// is that, with k=2, total failure rate doesn't depend on number of peers per OSD,
// because it gets increased linearly by increased number of peers to fail
// and decreased linearly by reduced rebalance time.
function cluster_afr({ n_hosts, n_drives, afr_drive, afr_host, capacity, speed, ec, ec_data, ec_parity, replicas, pgs = 1, osd_rm, degraded_replacement, down_out_interval = 600 })
{
const pg_size = (ec ? ec_data+ec_parity : replicas);
pgs = Math.min(pgs, (n_hosts-1)*n_drives/(pg_size-1));
const host_pgs = Math.min(pgs*n_drives, (n_hosts-1)*n_drives/(pg_size-1));
const resilver_disk = n_drives == 1 || osd_rm ? pgs : (n_drives-1);
const disk_heal_time = (down_out_interval + capacity/(degraded_replacement ? 1 : resilver_disk)/speed)/86400/365;
const host_heal_time = (down_out_interval + n_drives*capacity/pgs/speed)/86400/365;
const disk_heal_fail = ((afr_drive+afr_host/n_drives)*disk_heal_time);
const host_heal_fail = ((afr_drive+afr_host/n_drives)*host_heal_time);
const disk_pg_fail = ec
? failure_rate_fullmesh(ec_data+ec_parity-1, disk_heal_fail, ec_parity)
: disk_heal_fail**(replicas-1);
const host_pg_fail = ec
? failure_rate_fullmesh(ec_data+ec_parity-1, host_heal_fail, ec_parity)
: host_heal_fail**(replicas-1);
return 1 - ((1 - afr_drive * (1-(1-disk_pg_fail)**pgs)) ** (n_hosts*n_drives))
* ((1 - afr_host * (1-(1-host_pg_fail)**host_pgs)) ** n_hosts);
}
/******** UTILITY ********/
// Combination count
function c_n_k(n, k)
{
let r = 1;
for (let i = 0; i < k; i++)
{
r *= (n-i) / (i+1);
}
return r;
}

View File

@ -1,28 +0,0 @@
const { sprintf } = require('sprintf-js');
const { cluster_afr } = require('./afr.js');
print_cluster_afr({ n_hosts: 4, n_drives: 6, afr_drive: 0.03, afr_host: 0.05, capacity: 4000, speed: 0.1, replicas: 2 });
print_cluster_afr({ n_hosts: 4, n_drives: 3, afr_drive: 0.03, afr_host: 0, capacity: 4000, speed: 0.1, replicas: 2 });
print_cluster_afr({ n_hosts: 4, n_drives: 3, afr_drive: 0.03, afr_host: 0.05, capacity: 4000, speed: 0.1, replicas: 2 });
print_cluster_afr({ n_hosts: 4, n_drives: 3, afr_drive: 0.03, afr_host: 0, capacity: 4000, speed: 0.1, ec: true, ec_data: 2, ec_parity: 1 });
print_cluster_afr({ n_hosts: 4, n_drives: 3, afr_drive: 0.03, afr_host: 0.05, capacity: 4000, speed: 0.1, ec: true, ec_data: 2, ec_parity: 1 });
print_cluster_afr({ n_hosts: 10, n_drives: 10, afr_drive: 0.1, afr_host: 0, capacity: 8000, speed: 0.02, replicas: 2 });
print_cluster_afr({ n_hosts: 10, n_drives: 10, afr_drive: 0.1, afr_host: 0.05, capacity: 8000, speed: 0.02, replicas: 2 });
print_cluster_afr({ n_hosts: 10, n_drives: 10, afr_drive: 0.1, afr_host: 0, capacity: 8000, speed: 0.02, replicas: 3 });
print_cluster_afr({ n_hosts: 10, n_drives: 10, afr_drive: 0.1, afr_host: 0.05, capacity: 8000, speed: 0.02, replicas: 3 });
print_cluster_afr({ n_hosts: 10, n_drives: 10, afr_drive: 0.1, afr_host: 0, capacity: 8000, speed: 0.02, replicas: 3, pgs: 100 });
print_cluster_afr({ n_hosts: 10, n_drives: 10, afr_drive: 0.1, afr_host: 0.05, capacity: 8000, speed: 0.02, replicas: 3, pgs: 100 });
print_cluster_afr({ n_hosts: 10, n_drives: 10, afr_drive: 0.1, afr_host: 0.05, capacity: 8000, speed: 0.02, replicas: 3, pgs: 100, degraded_replacement: 1 });
function print_cluster_afr(config)
{
console.log(
`${config.n_hosts} nodes with ${config.n_drives} ${sprintf("%.1f", config.capacity/1000)}TB drives`+
`, capable to backfill at ${sprintf("%.1f", config.speed*1000)} MB/s, drive AFR ${sprintf("%.1f", config.afr_drive*100)}%`+
(config.afr_host ? `, host AFR ${sprintf("%.1f", config.afr_host*100)}%` : '')+
(config.ec ? `, EC ${config.ec_data}+${config.ec_parity}` : `, ${config.replicas} replicas`)+
`, ${config.pgs||1} PG per OSD`+
(config.degraded_replacement ? `\n...and you don't let the rebalance finish before replacing drives` : '')
);
console.log('-> '+sprintf("%.7f%%", 100*cluster_afr(config))+'\n');
}

View File

@ -1,76 +0,0 @@
#!/bin/bash
# Very simple systemd unit generator for vitastor-osd services
# Not the final solution yet, mostly for tests
# Copyright (c) Vitaliy Filippov, 2019+
# License: MIT
# USAGE: ./make-osd.sh /dev/disk/by-partuuid/xxx [ /dev/disk/by-partuuid/yyy]...
IP_SUBSTR="10.200.1."
ETCD_HOSTS="etcd0=http://10.200.1.10:2380,etcd1=http://10.200.1.11:2380,etcd2=http://10.200.1.12:2380"
set -e -x
IP=`ip -json a s | jq -r '.[].addr_info[] | select(.local | startswith("'$IP_SUBSTR'")) | .local'`
[ "$IP" != "" ] || exit 1
ETCD_MON=$(echo $ETCD_HOSTS | perl -pe 's/:2380/:2379/g; s/etcd\d*=//g;')
D=`dirname $0`
# Create OSDs on all passed devices
OSD_NUM=1
for DEV in $*; do
# Ugly :) -> node.js rework pending
while true; do
ST=$(etcdctl --endpoints="$ETCD_MON" get --print-value-only /vitastor/osd/stats/$OSD_NUM)
if [ "$ST" = "" ]; then
break
fi
OSD_NUM=$((OSD_NUM+1))
done
etcdctl --endpoints="$ETCD_MON" put /vitastor/osd/stats/$OSD_NUM '{}'
echo Creating OSD $OSD_NUM on $DEV
OPT=`node $D/simple-offsets.js --device $DEV --format options | tr '\n' ' '`
META=`echo $OPT | grep -Po '(?<=data_offset )\d+'`
dd if=/dev/zero of=$DEV bs=1048576 count=$(((META+1048575)/1048576)) oflag=direct
cat >/etc/systemd/system/vitastor-osd$OSD_NUM.service <<EOF
[Unit]
Description=Vitastor object storage daemon osd.$OSD_NUM
After=network-online.target local-fs.target time-sync.target
Wants=network-online.target local-fs.target time-sync.target
PartOf=vitastor.target
[Service]
LimitNOFILE=1048576
LimitNPROC=1048576
LimitMEMLOCK=infinity
ExecStart=/usr/bin/vitastor-osd \\
--etcd_address $IP:2379/v3 \\
--bind_address $IP \\
--osd_num $OSD_NUM \\
--disable_data_fsync 1 \\
--immediate_commit all \\
--flusher_count 256 \\
--disk_alignment 4096 --journal_block_size 4096 --meta_block_size 4096 \\
--journal_no_same_sector_overwrites true \\
--journal_sector_buffer_count 1024 \\
$OPT
WorkingDirectory=/
ExecStartPre=+chown vitastor:vitastor $DEV
User=vitastor
PrivateTmp=false
TasksMax=infinity
Restart=always
StartLimitInterval=0
RestartSec=10
[Install]
WantedBy=vitastor.target
EOF
systemctl enable vitastor-osd$OSD_NUM
done

View File

@ -1,85 +0,0 @@
#!/bin/bash
# Very simple systemd unit generator for etcd & vitastor-mon services
# Not the final solution yet, mostly for tests
# Copyright (c) Vitaliy Filippov, 2019+
# License: MIT
# USAGE: ./make-units.sh
IP_SUBSTR="10.200.1."
ETCD_HOSTS="etcd0=http://10.200.1.10:2380,etcd1=http://10.200.1.11:2380,etcd2=http://10.200.1.12:2380"
# determine IP
IP=`ip -json a s | jq -r '.[].addr_info[] | select(.local | startswith("'$IP_SUBSTR'")) | .local'`
[ "$IP" != "" ] || exit 1
ETCD_NUM=${ETCD_HOSTS/$IP*/}
[ "$ETCD_NUM" != "$ETCD_HOSTS" ] || exit 1
ETCD_NUM=$(echo $ETCD_NUM | tr -d -c , | wc -c)
# etcd
useradd etcd
mkdir -p /var/lib/etcd$ETCD_NUM.etcd
cat >/etc/systemd/system/etcd.service <<EOF
[Unit]
Description=etcd for vitastor
After=network-online.target local-fs.target time-sync.target
Wants=network-online.target local-fs.target time-sync.target
[Service]
Restart=always
ExecStart=/usr/local/bin/etcd -name etcd$ETCD_NUM --data-dir /var/lib/etcd$ETCD_NUM.etcd \\
--advertise-client-urls http://$IP:2379 --listen-client-urls http://$IP:2379 \\
--initial-advertise-peer-urls http://$IP:2380 --listen-peer-urls http://$IP:2380 \\
--initial-cluster-token vitastor-etcd-1 --initial-cluster $ETCD_HOSTS \\
--initial-cluster-state new --max-txn-ops=100000 --auto-compaction-retention=10 --auto-compaction-mode=revision
WorkingDirectory=/var/lib/etcd$ETCD_NUM.etcd
ExecStartPre=+chown -R etcd /var/lib/etcd$ETCD_NUM.etcd
User=etcd
PrivateTmp=false
TasksMax=infinity
Restart=always
StartLimitInterval=0
RestartSec=10
[Install]
WantedBy=local.target
EOF
systemctl daemon-reload
systemctl enable etcd
systemctl start etcd
useradd vitastor
chmod 755 /root
# Vitastor target
cat >/etc/systemd/system/vitastor.target <<EOF
[Unit]
Description=vitastor target
[Install]
WantedBy=multi-user.target
EOF
# Monitor unit
ETCD_MON=$(echo $ETCD_HOSTS | perl -pe 's/:2380/:2379/g; s/etcd\d*=//g;')
cat >/etc/systemd/system/vitastor-mon.service <<EOF
[Unit]
Description=Vitastor monitor
After=network-online.target local-fs.target time-sync.target
Wants=network-online.target local-fs.target time-sync.target
[Service]
Restart=always
ExecStart=node /usr/lib/vitastor/mon/mon-main.js --etcd_url '$ETCD_MON' --etcd_prefix '/vitastor' --etcd_start_timeout 5
WorkingDirectory=/
User=vitastor
PrivateTmp=false
TasksMax=infinity
Restart=always
StartLimitInterval=0
RestartSec=10
[Install]
WantedBy=vitastor.target
EOF

View File

@ -1,23 +0,0 @@
const fsp = require('fs').promises;
async function merge(file1, file2, out)
{
if (!out)
{
console.error('USAGE: nodejs merge.js layer1 layer2 output');
process.exit();
}
const layer1 = await fsp.readFile(file1);
const layer2 = await fsp.readFile(file2);
const zero = Buffer.alloc(4096);
for (let i = 0; i < layer2.length; i += 4096)
{
if (zero.compare(layer2, i, i+4096) != 0)
{
layer2.copy(layer1, i, i, i+4096);
}
}
await fsp.writeFile(out, layer1);
}
merge(process.argv[2], process.argv[3], process.argv[4]);

1471
mon/mon.js

File diff suppressed because it is too large Load Diff

View File

@ -1,93 +0,0 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: MIT
// Simple tool to calculate journal and metadata offsets for a single device
// Will be replaced by smarter tools in the future
const fs = require('fs').promises;
const child_process = require('child_process');
async function run()
{
const options = {
object_size: 128*1024,
bitmap_granularity: 4096,
journal_size: 16*1024*1024,
device_block_size: 4096,
journal_offset: 0,
device_size: 0,
format: 'text',
};
for (let i = 2; i < process.argv.length; i++)
{
if (process.argv[i].substr(0, 2) == '--')
{
options[process.argv[i].substr(2)] = process.argv[i+1];
i++;
}
}
if (!options.device)
{
process.stderr.write('USAGE: nodejs '+process.argv[1]+' --device /dev/sdXXX\n');
process.exit(1);
}
options.device_size = Number(options.device_size);
let device_size = options.device_size;
if (!device_size)
{
const st = await fs.stat(options.device);
options.device_block_size = st.blksize;
if (st.isBlockDevice())
device_size = Number(await system("/sbin/blockdev --getsize64 "+options.device))
else
device_size = st.size;
}
if (!device_size)
{
process.stderr.write('Failed to get device size\n');
process.exit(1);
}
options.journal_offset = Math.ceil(options.journal_offset/options.device_block_size)*options.device_block_size;
const meta_offset = options.journal_offset + Math.ceil(options.journal_size/options.device_block_size)*options.device_block_size;
const entries_per_block = Math.floor(options.device_block_size / (24 + 2*options.object_size/options.bitmap_granularity/8));
const object_count = Math.floor((device_size-meta_offset)/options.object_size);
const meta_size = Math.ceil(object_count / entries_per_block) * options.device_block_size;
const data_offset = meta_offset + meta_size;
const meta_size_fmt = (meta_size > 1024*1024*1024 ? Math.round(meta_size/1024/1024/1024*100)/100+" GB"
: Math.round(meta_size/1024/1024*100)/100+" MB");
if (options.format == 'text' || options.format == 'options')
{
if (options.format == 'text')
{
process.stderr.write(
`Metadata size: ${meta_size_fmt}\n`+
`Options for the OSD:\n`
);
}
process.stdout.write(
` --data_device ${options.device}\n`+
` --journal_offset ${options.journal_offset}\n`+
` --meta_offset ${meta_offset}\n`+
` --data_offset ${data_offset}\n`+
(options.device_size ? ` --data_size ${device_size-data_offset}\n` : '')
);
}
else if (options.format == 'env')
{
process.stdout.write(
`journal_offset=${options.journal_offset}\n`+
`meta_offset=${meta_offset}\n`+
`data_offset=${data_offset}\n`+
`data_size=${device_size-data_offset}\n`
);
}
else
process.stdout.write('Unknown format: '+options.format);
}
function system(cmd)
{
return new Promise((ok, no) => child_process.exec(cmd, { maxBuffer: 64*1024*1024 }, (err, stdout, stderr) => (err ? no(err.message) : ok(stdout))));
}
run().catch(err => { console.error(err); process.exit(1); });

View File

@ -1,78 +0,0 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: MIT
function stableStringify(obj, opts)
{
if (!opts)
opts = {};
if (typeof opts === 'function')
opts = { cmp: opts };
let space = opts.space || '';
if (typeof space === 'number')
space = Array(space+1).join(' ');
const cycles = (typeof opts.cycles === 'boolean') ? opts.cycles : false;
const cmp = opts.cmp && (function (f)
{
return function (node)
{
return function (a, b)
{
let aobj = { key: a, value: node[a] };
let bobj = { key: b, value: node[b] };
return f(aobj, bobj);
};
};
})(opts.cmp);
const seen = new Map();
return (function stringify (parent, key, node, level)
{
const indent = space ? ('\n' + new Array(level + 1).join(space)) : '';
const colonSeparator = space ? ': ' : ':';
if (node === undefined)
{
return;
}
if (typeof node !== 'object' || node === null)
{
return JSON.stringify(node);
}
if (node instanceof Array)
{
const out = [];
for (let i = 0; i < node.length; i++)
{
const item = stringify(node, i, node[i], level+1) || JSON.stringify(null);
out.push(indent + space + item);
}
return '[' + out.join(',') + indent + ']';
}
else
{
if (seen.has(node))
{
if (cycles)
return JSON.stringify('__cycle__');
throw new TypeError('Converting circular structure to JSON');
}
else
seen.set(node, true);
const keys = Object.keys(node).sort(cmp && cmp(node));
const out = [];
for (let i = 0; i < keys.length; i++)
{
const key = keys[i];
const value = stringify(node, key, node[key], level+1);
if (!value)
continue;
const keyValue = JSON.stringify(key)
+ colonSeparator
+ value;
out.push(indent + space + keyValue);
}
seen.delete(node);
return '{' + out.join(',') + indent + '}';
}
})({ '': obj }, '', obj, 0);
}
module.exports = stableStringify;

View File

@ -1,130 +0,0 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
// Interesting real-world example coming from Ceph with EC and compression enabled.
// EC parity chunks can't be compressed as efficiently as data chunks,
// thus they occupy more space (2.26x more space) in OSD object stores.
// This leads to really uneven OSD fill ratio in Ceph even when PGs are perfectly balanced.
// But we support this case with the "parity_space" parameter in optimize_initial()/optimize_change().
const LPOptimizer = require('./lp-optimizer.js');
const osd_tree = {
ripper5: {
osd0: 3.493144989013672,
osd1: 3.493144989013672,
osd2: 3.454082489013672,
osd12: 3.461894989013672,
},
ripper7: {
osd4: 3.638690948486328,
osd5: 3.638690948486328,
osd6: 3.638690948486328,
},
ripper4: {
osd9: 3.4609375,
osd10: 3.4609375,
osd11: 3.4609375,
},
ripper6: {
osd3: 3.5849609375,
osd7: 3.5859336853027344,
osd8: 3.638690948486328,
osd13: 3.461894989013672
},
};
const prev_pgs = [[12,7,5],[6,11,12],[3,6,9],[10,0,5],[2,5,13],[9,8,6],[3,4,12],[7,4,12],[12,11,13],[13,6,0],[4,13,10],[9,7,6],[7,10,0],[10,8,0],[3,10,2],[3,0,4],[6,13,0],[13,10,0],[13,10,5],[8,11,6],[3,9,2],[2,8,5],[8,9,5],[3,12,11],[0,7,4],[13,11,1],[11,3,12],[12,8,10],[7,5,12],[2,13,5],[7,11,0],[13,2,6],[0,6,8],[13,1,6],[0,13,4],[0,8,10],[4,10,0],[8,12,4],[8,12,9],[12,7,4],[13,9,5],[3,2,11],[1,9,7],[1,8,5],[5,12,9],[3,5,12],[2,8,10],[0,8,4],[1,4,11],[7,10,2],[12,13,5],[3,1,11],[7,1,4],[4,12,8],[7,0,9],[11,1,8],[3,0,5],[11,13,0],[1,13,5],[12,7,10],[12,8,4],[11,13,5],[0,11,6],[2,11,3],[13,1,11],[2,7,10],[7,10,12],[7,12,10],[12,11,5],[13,12,10],[2,3,9],[4,3,9],[13,2,5],[7,12,6],[12,10,13],[9,8,1],[13,1,5],[9,5,12],[5,11,7],[6,2,9],[8,11,6],[12,5,8],[6,13,1],[7,6,11],[2,3,6],[8,5,9],[1,13,6],[9,3,2],[7,11,1],[3,10,1],[0,11,7],[3,0,5],[1,3,6],[6,0,9],[3,11,4],[8,10,2],[13,1,9],[12,6,9],[3,12,9],[12,8,9],[7,5,0],[8,12,5],[0,11,3],[12,11,13],[0,7,11],[0,3,10],[1,3,11],[2,7,11],[13,2,6],[9,12,13],[8,2,4],[0,7,4],[5,13,0],[13,12,9],[1,9,8],[0,10,3],[3,5,10],[7,12,9],[2,13,4],[12,7,5],[9,2,7],[3,2,9],[6,2,7],[3,1,9],[4,3,2],[5,3,11],[0,7,6],[1,6,13],[7,10,2],[12,4,8],[13,12,6],[7,5,11],[6,2,3],[2,7,6],[2,3,10],[2,7,10],[11,12,6],[0,13,5],[10,2,4],[13,0,11],[7,0,6],[8,9,4],[8,4,11],[7,11,2],[3,4,2],[6,1,3],[7,2,11],[8,9,4],[11,4,8],[10,3,1],[2,10,13],[1,7,11],[13,11,12],[2,6,9],[10,0,13],[7,10,4],[0,11,13],[13,10,1],[7,5,0],[7,12,10],[3,1,4],[7,1,5],[3,11,5],[7,5,0],[1,3,5],[10,5,12],[0,3,9],[7,1,11],[11,8,12],[3,6,2],[7,12,9],[7,11,12],[4,11,3],[0,11,13],[13,2,5],[1,5,8],[0,11,8],[3,5,1],[11,0,6],[3,11,2],[11,8,12],[4,1,3],[10,13,4],[13,9,6],[2,3,10],[12,7,9],[10,0,4],[10,13,2],[3,11,1],[7,2,9],[1,7,4],[13,1,4],[7,0,6],[5,3,9],[10,0,7],[0,7,10],[3,6,10],[13,0,5],[8,4,1],[3,1,10],[2,10,13],[13,0,5],[13,10,2],[12,7,9],[6,8,10],[6,1,8],[10,8,1],[13,5,0],[5,11,3],[7,6,1],[8,5,9],[2,13,11],[10,12,4],[13,4,1],[2,13,4],[11,7,0],[2,9,7],[1,7,6],[8,0,4],[8,1,9],[7,10,12],[13,9,6],[7,6,11],[13,0,4],[1,8,4],[3,12,5],[10,3,1],[10,2,13],[2,4,8],[6,2,3],[3,0,10],[6,7,12],[8,12,5],[3,0,6],[13,12,10],[11,3,6],[9,0,13],[10,0,6],[7,5,2],[1,3,11],[7,10,2],[2,9,8],[11,13,12],[0,8,4],[8,12,11],[6,0,3],[1,13,4],[11,8,2],[12,3,6],[4,7,1],[7,6,12],[3,10,6],[0,10,7],[8,9,1],[0,10,6],[8,10,1]]
.map(pg => pg.map(n => 'osd'+n));
const by_osd = {};
for (let i = 0; i < prev_pgs.length; i++)
{
for (let j = 0; j < prev_pgs[i].length; j++)
{
by_osd[prev_pgs[i][j]] = by_osd[prev_pgs[i][j]] || [];
by_osd[prev_pgs[i][j]][j] = (by_osd[prev_pgs[i][j]][j] || 0) + 1;
}
}
/*
This set of PGs was balanced by hand, by heavily tuning OSD weights in Ceph:
{
osd0: 4.2,
osd1: 3.5,
osd2: 3.45409,
osd3: 4.5,
osd4: 1.4,
osd5: 1.4,
osd6: 1.75,
osd7: 4.5,
osd8: 4.4,
osd9: 2.2,
osd10: 2.7,
osd11: 2,
osd12: 3.4,
osd13: 3.4,
}
EC+compression is a nightmare in Ceph, yeah :))
To calculate the average ratio between data chunks and parity chunks we
calculate the number of PG chunks for each chunk role for each OSD:
{
osd12: [ 18, 22, 17 ],
osd7: [ 35, 22, 8 ],
osd5: [ 6, 17, 27 ],
osd6: [ 13, 12, 28 ],
osd11: [ 13, 26, 20 ],
osd3: [ 30, 20, 10 ],
osd9: [ 8, 12, 26 ],
osd10: [ 15, 23, 20 ],
osd0: [ 22, 22, 14 ],
osd2: [ 22, 16, 16 ],
osd13: [ 29, 19, 13 ],
osd8: [ 20, 18, 12 ],
osd4: [ 8, 10, 28 ],
osd1: [ 17, 17, 17 ]
}
And now we can pick a pair of OSDs and determine the ratio by solving the following:
osd5 = 23*X + 27*Y = 3249728140
osd13 = 48*X + 13*Y = 2991675992
=>
osd5 - 27/13*osd13 = 23*X - 27/13*48*X = -76.6923076923077*X = -2963752766.46154
=>
X = 38644720.1243731
Y = (osd5-23*X)/27 = 87440725.0792377
Y/X = 2.26268232239284 ~= 2.26
Which means that parity chunks are compressed ~2.26 times worse than data chunks.
Fine, let's try to optimize for it.
*/
async function run()
{
const all_weights = Object.assign({}, ...Object.values(osd_tree));
const total_weight = Object.values(all_weights).reduce((a, c) => Number(a) + Number(c), 0);
const eff = LPOptimizer.pg_list_space_efficiency(prev_pgs, all_weights, 2, 2.26);
const orig = eff*4.26 / total_weight;
console.log('Original efficiency was: '+Math.round(orig*10000)/100+' %');
let prev = await LPOptimizer.optimize_initial({ osd_tree, pg_size: 3, pg_count: 256, parity_space: 2.26 });
LPOptimizer.print_change_stats(prev);
let next = await LPOptimizer.optimize_change({ prev_pgs, osd_tree, pg_size: 3, max_combinations: 10000, parity_space: 2.26 });
LPOptimizer.print_change_stats(next);
}
run().catch(console.error);

View File

@ -1,25 +0,0 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
const LPOptimizer = require('./lp-optimizer.js');
async function run()
{
const osd_tree = { a: { 1: 1 }, b: { 2: 1 }, c: { 3: 1 } };
let res;
console.log('16 PGs, size=3');
res = await LPOptimizer.optimize_initial({ osd_tree, pg_size: 3, pg_count: 16 });
LPOptimizer.print_change_stats(res, false);
console.log('\nReduce PG size to 2');
res = await LPOptimizer.optimize_change({ prev_pgs: res.int_pgs.map(pg => pg.slice(0, 2)), osd_tree, pg_size: 2 });
LPOptimizer.print_change_stats(res, false);
console.log('\nRemove OSD 3');
delete osd_tree['c'];
res = await LPOptimizer.optimize_change({ prev_pgs: res.int_pgs, osd_tree, pg_size: 2 });
LPOptimizer.print_change_stats(res, false);
}
run().catch(console.error);

270
msgr_receive.cpp Normal file
View File

@ -0,0 +1,270 @@
#include "messenger.h"
void osd_messenger_t::read_requests()
{
while (read_ready_clients.size() > 0)
{
int peer_fd = read_ready_clients[0];
auto & cl = clients[peer_fd];
if (!cl.read_op || cl.read_remaining < receive_buffer_size)
{
cl.read_iov.iov_base = cl.in_buf;
cl.read_iov.iov_len = receive_buffer_size;
}
else
{
cl.read_iov.iov_base = cl.read_buf;
cl.read_iov.iov_len = cl.read_remaining;
}
cl.read_msg.msg_iov = &cl.read_iov;
cl.read_msg.msg_iovlen = 1;
read_ready_clients.erase(read_ready_clients.begin(), read_ready_clients.begin() + 1);
int result = recvmsg(peer_fd, &cl.read_msg, 0);
if (result < 0)
{
result = -errno;
}
handle_read(result, peer_fd);
}
}
bool osd_messenger_t::handle_read(int result, int peer_fd)
{
auto cl_it = clients.find(peer_fd);
if (cl_it != clients.end())
{
auto & cl = cl_it->second;
if (result < 0 && result != -EAGAIN)
{
// this is a client socket, so don't panic. just disconnect it
printf("Client %d socket read error: %d (%s). Disconnecting client\n", peer_fd, -result, strerror(-result));
stop_client(peer_fd);
return false;
}
if (result == -EAGAIN || result < cl.read_iov.iov_len)
{
cl.read_ready--;
if (cl.read_ready > 0)
read_ready_clients.push_back(peer_fd);
}
else
{
read_ready_clients.push_back(peer_fd);
}
if (result > 0)
{
if (cl.read_iov.iov_base == cl.in_buf)
{
// Compose operation(s) from the buffer
int remain = result;
void *curbuf = cl.in_buf;
while (remain > 0)
{
if (!cl.read_op)
{
cl.read_op = new osd_op_t;
cl.read_op->peer_fd = peer_fd;
cl.read_op->op_type = OSD_OP_IN;
cl.read_buf = cl.read_op->req.buf;
cl.read_remaining = OSD_PACKET_SIZE;
cl.read_state = CL_READ_HDR;
}
if (cl.read_remaining > remain)
{
memcpy(cl.read_buf, curbuf, remain);
cl.read_remaining -= remain;
cl.read_buf += remain;
remain = 0;
if (cl.read_remaining <= 0)
handle_finished_read(cl);
}
else
{
memcpy(cl.read_buf, curbuf, cl.read_remaining);
curbuf += cl.read_remaining;
remain -= cl.read_remaining;
cl.read_remaining = 0;
cl.read_buf = NULL;
handle_finished_read(cl);
}
}
}
else
{
// Long data
cl.read_remaining -= result;
cl.read_buf += result;
if (cl.read_remaining <= 0)
{
handle_finished_read(cl);
}
}
if (result >= cl.read_iov.iov_len)
{
return true;
}
}
}
return false;
}
void osd_messenger_t::handle_finished_read(osd_client_t & cl)
{
if (cl.read_state == CL_READ_HDR)
{
if (cl.read_op->req.hdr.magic == SECONDARY_OSD_REPLY_MAGIC)
handle_reply_hdr(&cl);
else
handle_op_hdr(&cl);
}
else if (cl.read_state == CL_READ_DATA)
{
// Operation is ready
cl.received_ops.push_back(cl.read_op);
exec_op(cl.read_op);
cl.read_op = NULL;
cl.read_state = 0;
}
else if (cl.read_state == CL_READ_REPLY_DATA)
{
// Reply is ready
auto req_it = cl.sent_ops.find(cl.read_reply_id);
osd_op_t *request = req_it->second;
cl.sent_ops.erase(req_it);
cl.read_reply_id = 0;
delete cl.read_op;
cl.read_op = NULL;
cl.read_state = 0;
// Measure subop latency
timespec tv_end;
clock_gettime(CLOCK_REALTIME, &tv_end);
stats.subop_stat_count[request->req.hdr.opcode]++;
if (!stats.subop_stat_count[request->req.hdr.opcode])
{
stats.subop_stat_count[request->req.hdr.opcode]++;
stats.subop_stat_sum[request->req.hdr.opcode] = 0;
}
stats.subop_stat_sum[request->req.hdr.opcode] += (
(tv_end.tv_sec - request->tv_begin.tv_sec)*1000000 +
(tv_end.tv_nsec - request->tv_begin.tv_nsec)/1000
);
request->callback(request);
}
else
{
assert(0);
}
}
void osd_messenger_t::handle_op_hdr(osd_client_t *cl)
{
osd_op_t *cur_op = cl->read_op;
if (cur_op->req.hdr.opcode == OSD_OP_SECONDARY_READ)
{
if (cur_op->req.sec_rw.len > 0)
cur_op->buf = memalign(MEM_ALIGNMENT, cur_op->req.sec_rw.len);
cl->read_remaining = 0;
}
else if (cur_op->req.hdr.opcode == OSD_OP_SECONDARY_WRITE)
{
if (cur_op->req.sec_rw.len > 0)
cur_op->buf = memalign(MEM_ALIGNMENT, cur_op->req.sec_rw.len);
cl->read_remaining = cur_op->req.sec_rw.len;
}
else if (cur_op->req.hdr.opcode == OSD_OP_SECONDARY_STABILIZE ||
cur_op->req.hdr.opcode == OSD_OP_SECONDARY_ROLLBACK)
{
if (cur_op->req.sec_stab.len > 0)
cur_op->buf = memalign(MEM_ALIGNMENT, cur_op->req.sec_stab.len);
cl->read_remaining = cur_op->req.sec_stab.len;
}
else if (cur_op->req.hdr.opcode == OSD_OP_READ)
{
if (cur_op->req.rw.len > 0)
cur_op->buf = memalign(MEM_ALIGNMENT, cur_op->req.rw.len);
cl->read_remaining = 0;
}
else if (cur_op->req.hdr.opcode == OSD_OP_WRITE)
{
if (cur_op->req.rw.len > 0)
cur_op->buf = memalign(MEM_ALIGNMENT, cur_op->req.rw.len);
cl->read_remaining = cur_op->req.rw.len;
}
if (cl->read_remaining > 0)
{
// Read data
cl->read_buf = cur_op->buf;
cl->read_state = CL_READ_DATA;
}
else
{
// Operation is ready
cl->read_op = NULL;
cl->read_state = 0;
cl->received_ops.push_back(cur_op);
exec_op(cur_op);
}
}
void osd_messenger_t::handle_reply_hdr(osd_client_t *cl)
{
osd_op_t *cur_op = cl->read_op;
auto req_it = cl->sent_ops.find(cur_op->req.hdr.id);
if (req_it == cl->sent_ops.end())
{
// Command out of sync. Drop connection
printf("Client %d command out of sync: id %lu\n", cl->peer_fd, cur_op->req.hdr.id);
stop_client(cl->peer_fd);
return;
}
osd_op_t *op = req_it->second;
memcpy(op->reply.buf, cur_op->req.buf, OSD_PACKET_SIZE);
if ((op->reply.hdr.opcode == OSD_OP_SECONDARY_READ || op->reply.hdr.opcode == OSD_OP_READ) &&
op->reply.hdr.retval > 0)
{
// Read data. In this case we assume that the buffer is preallocated by the caller (!)
assert(op->buf);
cl->read_state = CL_READ_REPLY_DATA;
cl->read_reply_id = op->req.hdr.id;
cl->read_buf = op->buf;
cl->read_remaining = op->reply.hdr.retval;
}
else if (op->reply.hdr.opcode == OSD_OP_SECONDARY_LIST && op->reply.hdr.retval > 0)
{
op->buf = memalign(MEM_ALIGNMENT, sizeof(obj_ver_id) * op->reply.hdr.retval);
cl->read_state = CL_READ_REPLY_DATA;
cl->read_reply_id = op->req.hdr.id;
cl->read_buf = op->buf;
cl->read_remaining = sizeof(obj_ver_id) * op->reply.hdr.retval;
}
else if (op->reply.hdr.opcode == OSD_OP_SHOW_CONFIG && op->reply.hdr.retval > 0)
{
op->buf = malloc(op->reply.hdr.retval);
cl->read_state = CL_READ_REPLY_DATA;
cl->read_reply_id = op->req.hdr.id;
cl->read_buf = op->buf;
cl->read_remaining = op->reply.hdr.retval;
}
else
{
delete cl->read_op;
cl->read_state = 0;
cl->read_op = NULL;
cl->sent_ops.erase(req_it);
// Measure subop latency
timespec tv_end;
clock_gettime(CLOCK_REALTIME, &tv_end);
stats.subop_stat_count[op->req.hdr.opcode]++;
if (!stats.subop_stat_count[op->req.hdr.opcode])
{
stats.subop_stat_count[op->req.hdr.opcode]++;
stats.subop_stat_sum[op->req.hdr.opcode] = 0;
}
stats.subop_stat_sum[op->req.hdr.opcode] += (
(tv_end.tv_sec - op->tv_begin.tv_sec)*1000000 +
(tv_end.tv_nsec - op->tv_begin.tv_nsec)/1000
);
// Copy lambda to be unaffected by `delete op`
std::function<void(osd_op_t*)>(op->callback)(op);
}
}

149
msgr_send.cpp Normal file
View File

@ -0,0 +1,149 @@
#include "messenger.h"
void osd_messenger_t::outbox_push(osd_op_t *cur_op)
{
assert(cur_op->peer_fd);
auto & cl = clients.at(cur_op->peer_fd);
if (cur_op->op_type == OSD_OP_OUT)
{
clock_gettime(CLOCK_REALTIME, &cur_op->tv_begin);
}
else
{
// Check that operation actually belongs to this client
bool found = false;
for (auto it = cl.received_ops.begin(); it != cl.received_ops.end(); it++)
{
if (*it == cur_op)
{
found = true;
cl.received_ops.erase(it, it+1);
break;
}
}
if (!found)
{
delete cur_op;
return;
}
}
cl.outbox.push_back(cur_op);
if (cl.write_op || cl.outbox.size() > 1 || !try_send(cl))
{
if (cl.write_state == 0)
{
cl.write_state = CL_WRITE_READY;
write_ready_clients.push_back(cur_op->peer_fd);
}
ringloop->wakeup();
}
}
bool osd_messenger_t::try_send(osd_client_t & cl)
{
int peer_fd = cl.peer_fd;
if (!cl.write_op)
{
// pick next command
cl.write_op = cl.outbox.front();
cl.outbox.pop_front();
cl.write_state = CL_WRITE_REPLY;
if (cl.write_op->op_type == OSD_OP_IN)
{
// Measure execution latency
timespec tv_end;
clock_gettime(CLOCK_REALTIME, &tv_end);
stats.op_stat_count[cl.write_op->req.hdr.opcode]++;
if (!stats.op_stat_count[cl.write_op->req.hdr.opcode])
{
stats.op_stat_count[cl.write_op->req.hdr.opcode]++;
stats.op_stat_sum[cl.write_op->req.hdr.opcode] = 0;
stats.op_stat_bytes[cl.write_op->req.hdr.opcode] = 0;
}
stats.op_stat_sum[cl.write_op->req.hdr.opcode] += (
(tv_end.tv_sec - cl.write_op->tv_begin.tv_sec)*1000000 +
(tv_end.tv_nsec - cl.write_op->tv_begin.tv_nsec)/1000
);
if (cl.write_op->req.hdr.opcode == OSD_OP_READ ||
cl.write_op->req.hdr.opcode == OSD_OP_WRITE)
{
stats.op_stat_bytes[cl.write_op->req.hdr.opcode] += cl.write_op->req.rw.len;
}
else if (cl.write_op->req.hdr.opcode == OSD_OP_SECONDARY_READ ||
cl.write_op->req.hdr.opcode == OSD_OP_SECONDARY_WRITE)
{
stats.op_stat_bytes[cl.write_op->req.hdr.opcode] += cl.write_op->req.sec_rw.len;
}
}
}
cl.write_msg.msg_iov = cl.write_op->send_list.get_iovec();
cl.write_msg.msg_iovlen = cl.write_op->send_list.get_size();
int result = sendmsg(peer_fd, &cl.write_msg, MSG_NOSIGNAL);
if (result < 0)
result = -errno;
handle_send(result, peer_fd);
return true;
}
void osd_messenger_t::send_replies()
{
while (write_ready_clients.size() > 0)
{
auto & cl = clients[write_ready_clients[0]];
write_ready_clients.erase(write_ready_clients.begin(), write_ready_clients.begin() + 1);
try_send(cl);
}
}
void osd_messenger_t::handle_send(int result, int peer_fd)
{
auto cl_it = clients.find(peer_fd);
if (cl_it != clients.end())
{
auto & cl = cl_it->second;
if (result < 0 && result != -EAGAIN)
{
// this is a client socket, so don't panic. just disconnect it
printf("Client %d socket write error: %d (%s). Disconnecting client\n", peer_fd, -result, strerror(-result));
stop_client(peer_fd);
return;
}
if (result >= 0)
{
osd_op_t *cur_op = cl.write_op;
while (result > 0 && cur_op->send_list.sent < cur_op->send_list.count)
{
iovec & iov = cur_op->send_list.buf[cur_op->send_list.sent];
if (iov.iov_len <= result)
{
result -= iov.iov_len;
cur_op->send_list.sent++;
}
else
{
iov.iov_len -= result;
iov.iov_base += result;
break;
}
}
if (cur_op->send_list.sent >= cur_op->send_list.count)
{
// Done
if (cur_op->op_type == OSD_OP_IN)
{
delete cur_op;
}
else
{
cl.sent_ops[cl.write_op->req.hdr.id] = cl.write_op;
}
cl.write_op = NULL;
cl.write_state = cl.outbox.size() > 0 ? CL_WRITE_READY : 0;
}
}
if (cl.write_state != 0)
{
write_ready_clients.push_back(peer_fd);
}
}
}

View File

@ -1,19 +1,14 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 or GNU GPL-2.0+ (see README.md for details)
#pragma once #pragma once
#include <stdint.h> #include <stdint.h>
#include <functional> #include <functional>
typedef uint64_t inode_t;
// 16 bytes per object/stripe id // 16 bytes per object/stripe id
// stripe = (start of the parity stripe + peer role) // stripe = (start of the parity stripe + peer role)
// i.e. for example (256KB + one of 0,1,2) // i.e. for example (256KB + one of 0,1,2)
struct __attribute__((__packed__)) object_id struct __attribute__((__packed__)) object_id
{ {
inode_t inode; uint64_t inode;
uint64_t stripe; uint64_t stripe;
}; };

View File

@ -1,7 +1,5 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
#include <sys/socket.h> #include <sys/socket.h>
#include <sys/epoll.h>
#include <sys/poll.h> #include <sys/poll.h>
#include <netinet/in.h> #include <netinet/in.h>
#include <netinet/tcp.h> #include <netinet/tcp.h>
@ -9,41 +7,53 @@
#include "osd.h" #include "osd.h"
osd_t::osd_t(blockstore_config_t & config, ring_loop_t *ringloop) #define MAX_EPOLL_EVENTS 64
{
bs_block_size = strtoull(config["block_size"].c_str(), NULL, 10);
bs_bitmap_granularity = strtoull(config["bitmap_granularity"].c_str(), NULL, 10);
if (!bs_block_size)
bs_block_size = DEFAULT_BLOCK_SIZE;
if (!bs_bitmap_granularity)
bs_bitmap_granularity = DEFAULT_BITMAP_GRANULARITY;
clean_entry_bitmap_size = bs_block_size / bs_bitmap_granularity / 8;
const char* osd_op_names[] = {
"",
"read",
"write",
"sync",
"stabilize",
"rollback",
"delete",
"sync_stab_all",
"list",
"show_config",
"primary_read",
"primary_write",
"primary_sync",
"primary_delete",
};
osd_t::osd_t(blockstore_config_t & config, blockstore_t *bs, ring_loop_t *ringloop)
{
this->config = config; this->config = config;
this->bs = bs;
this->ringloop = ringloop; this->ringloop = ringloop;
// FIXME: Create Blockstore from on-disk superblock config and check it against the OSD cluster config this->bs_block_size = bs->get_block_size();
this->bs = new blockstore_t(config, ringloop); // FIXME: use bitmap granularity instead
this->bs_disk_alignment = bs->get_disk_alignment();
parse_config(config); parse_config(config);
epmgr = new epoll_manager_t(ringloop); epoll_fd = epoll_create(1);
this->tfd = epmgr->tfd; if (epoll_fd < 0)
{
throw std::runtime_error(std::string("epoll_create: ") + strerror(errno));
}
this->tfd = new timerfd_manager_t([this](int fd, std::function<void(int, int)> handler) { set_fd_handler(fd, handler); });
this->tfd->set_timer(print_stats_interval*1000, true, [this](int timer_id) this->tfd->set_timer(print_stats_interval*1000, true, [this](int timer_id)
{ {
print_stats(); print_stats();
}); });
this->tfd->set_timer(slow_log_interval*1000, true, [this](int timer_id)
{
print_slow();
});
c_cli.tfd = this->tfd; c_cli.tfd = this->tfd;
c_cli.ringloop = this->ringloop; c_cli.ringloop = this->ringloop;
c_cli.exec_op = [this](osd_op_t *op) { exec_op(op); }; c_cli.exec_op = [this](osd_op_t *op) { exec_op(op); };
c_cli.repeer_pgs = [this](osd_num_t peer_osd) { repeer_pgs(peer_osd); }; c_cli.repeer_pgs = [this](osd_num_t peer_osd) { repeer_pgs(peer_osd); };
c_cli.init();
init_cluster(); init_cluster();
@ -53,17 +63,18 @@ osd_t::osd_t(blockstore_config_t & config, ring_loop_t *ringloop)
osd_t::~osd_t() osd_t::~osd_t()
{ {
if (tfd)
{
delete tfd;
tfd = NULL;
}
ringloop->unregister_consumer(&consumer); ringloop->unregister_consumer(&consumer);
delete epmgr; close(epoll_fd);
delete bs;
close(listen_fd); close(listen_fd);
} }
void osd_t::parse_config(blockstore_config_t & config) void osd_t::parse_config(blockstore_config_t & config)
{ {
if (config.find("log_level") == config.end())
config["log_level"] = "1";
log_level = strtoull(config["log_level"].c_str(), NULL, 10);
// Initial startup configuration // Initial startup configuration
json11::Json json_config = json11::Json(config); json11::Json json_config = json11::Json(config);
st_cli.parse_config(json_config); st_cli.parse_config(json_config);
@ -75,8 +86,6 @@ void osd_t::parse_config(blockstore_config_t & config)
throw std::runtime_error("osd_num is required in the configuration"); throw std::runtime_error("osd_num is required in the configuration");
c_cli.osd_num = osd_num; c_cli.osd_num = osd_num;
run_primary = config["run_primary"] != "false" && config["run_primary"] != "0" && config["run_primary"] != "no"; run_primary = config["run_primary"] != "false" && config["run_primary"] != "0" && config["run_primary"] != "no";
no_rebalance = config["no_rebalance"] == "true" || config["no_rebalance"] == "1" || config["no_rebalance"] == "yes";
no_recovery = config["no_recovery"] == "true" || config["no_recovery"] == "1" || config["no_recovery"] == "yes";
// Cluster configuration // Cluster configuration
bind_address = config["bind_address"]; bind_address = config["bind_address"];
if (bind_address == "") if (bind_address == "")
@ -100,21 +109,28 @@ void osd_t::parse_config(blockstore_config_t & config)
if (client_queue_depth < 128) if (client_queue_depth < 128)
client_queue_depth = 128; client_queue_depth = 128;
} }
if (config.find("pg_stripe_size") != config.end())
{
pg_stripe_size = strtoull(config["pg_stripe_size"].c_str(), NULL, 10);
if (!pg_stripe_size || !bs_block_size || pg_stripe_size < bs_block_size || (pg_stripe_size % bs_block_size) != 0)
pg_stripe_size = DEFAULT_PG_STRIPE_SIZE;
}
recovery_queue_depth = strtoull(config["recovery_queue_depth"].c_str(), NULL, 10); recovery_queue_depth = strtoull(config["recovery_queue_depth"].c_str(), NULL, 10);
if (recovery_queue_depth < 1 || recovery_queue_depth > MAX_RECOVERY_QUEUE) if (recovery_queue_depth < 1 || recovery_queue_depth > MAX_RECOVERY_QUEUE)
recovery_queue_depth = DEFAULT_RECOVERY_QUEUE; recovery_queue_depth = DEFAULT_RECOVERY_QUEUE;
recovery_sync_batch = strtoull(config["recovery_sync_batch"].c_str(), NULL, 10);
if (recovery_sync_batch < 1 || recovery_sync_batch > MAX_RECOVERY_QUEUE)
recovery_sync_batch = DEFAULT_RECOVERY_BATCH;
if (config["readonly"] == "true" || config["readonly"] == "1" || config["readonly"] == "yes") if (config["readonly"] == "true" || config["readonly"] == "1" || config["readonly"] == "yes")
readonly = true; readonly = true;
print_stats_interval = strtoull(config["print_stats_interval"].c_str(), NULL, 10); print_stats_interval = strtoull(config["print_stats_interval"].c_str(), NULL, 10);
if (!print_stats_interval) if (!print_stats_interval)
print_stats_interval = 3; print_stats_interval = 3;
slow_log_interval = strtoull(config["slow_log_interval"].c_str(), NULL, 10); c_cli.peer_connect_interval = strtoull(config["peer_connect_interval"].c_str(), NULL, 10);
if (!slow_log_interval) if (!c_cli.peer_connect_interval)
slow_log_interval = 10; c_cli.peer_connect_interval = DEFAULT_PEER_CONNECT_INTERVAL;
c_cli.parse_config(json_config); c_cli.peer_connect_timeout = strtoull(config["peer_connect_timeout"].c_str(), NULL, 10);
if (!c_cli.peer_connect_timeout)
c_cli.peer_connect_timeout = DEFAULT_PEER_CONNECT_TIMEOUT;
log_level = strtoull(config["log_level"].c_str(), NULL, 10);
c_cli.log_level = log_level;
} }
void osd_t::bind_socket() void osd_t::bind_socket()
@ -165,10 +181,15 @@ void osd_t::bind_socket()
fcntl(listen_fd, F_SETFL, fcntl(listen_fd, F_GETFL, 0) | O_NONBLOCK); fcntl(listen_fd, F_SETFL, fcntl(listen_fd, F_GETFL, 0) | O_NONBLOCK);
epmgr->set_fd_handler(listen_fd, false, [this](int fd, int events) epoll_event ev;
ev.data.fd = listen_fd;
ev.events = EPOLLIN | EPOLLET;
if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, listen_fd, &ev) < 0)
{ {
c_cli.accept_connections(listen_fd); close(listen_fd);
}); close(epoll_fd);
throw std::runtime_error(std::string("epoll_ctl: ") + strerror(errno));
}
} }
bool osd_t::shutdown() bool osd_t::shutdown()
@ -178,17 +199,86 @@ bool osd_t::shutdown()
{ {
return false; return false;
} }
return !bs || bs->is_safe_to_stop(); return bs->is_safe_to_stop();
} }
void osd_t::loop() void osd_t::loop()
{ {
if (!wait_state)
{
handle_epoll_events();
wait_state = 1;
}
handle_peers(); handle_peers();
c_cli.read_requests(); c_cli.read_requests();
c_cli.send_replies(); c_cli.send_replies();
ringloop->submit(); ringloop->submit();
} }
void osd_t::set_fd_handler(int fd, std::function<void(int, int)> handler)
{
if (handler != NULL)
{
bool exists = epoll_handlers.find(fd) != epoll_handlers.end();
epoll_event ev;
ev.data.fd = fd;
ev.events = EPOLLOUT | EPOLLIN | EPOLLRDHUP | EPOLLET;
if (epoll_ctl(epoll_fd, exists ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, fd, &ev) < 0)
{
throw std::runtime_error(std::string("epoll_ctl: ") + strerror(errno));
}
epoll_handlers[fd] = handler;
}
else
{
if (epoll_ctl(epoll_fd, EPOLL_CTL_DEL, fd, NULL) < 0 && errno != ENOENT)
{
throw std::runtime_error(std::string("epoll_ctl: ") + strerror(errno));
}
epoll_handlers.erase(fd);
}
}
void osd_t::handle_epoll_events()
{
io_uring_sqe *sqe = ringloop->get_sqe();
if (!sqe)
{
throw std::runtime_error("can't get SQE, will fall out of sync with EPOLLET");
}
ring_data_t *data = ((ring_data_t*)sqe->user_data);
my_uring_prep_poll_add(sqe, epoll_fd, POLLIN);
data->callback = [this](ring_data_t *data)
{
if (data->res < 0)
{
throw std::runtime_error(std::string("epoll failed: ") + strerror(-data->res));
}
handle_epoll_events();
};
ringloop->submit();
int nfds;
epoll_event events[MAX_EPOLL_EVENTS];
restart:
nfds = epoll_wait(epoll_fd, events, MAX_EPOLL_EVENTS, 0);
for (int i = 0; i < nfds; i++)
{
if (events[i].data.fd == listen_fd)
{
c_cli.accept_connections(listen_fd);
}
else
{
auto & cb = epoll_handlers[events[i].data.fd];
cb(events[i].data.fd, events[i].events);
}
}
if (nfds == MAX_EPOLL_EVENTS)
{
goto restart;
}
}
void osd_t::exec_op(osd_op_t *cur_op) void osd_t::exec_op(osd_op_t *cur_op)
{ {
clock_gettime(CLOCK_REALTIME, &cur_op->tv_begin); clock_gettime(CLOCK_REALTIME, &cur_op->tv_begin);
@ -199,34 +289,21 @@ void osd_t::exec_op(osd_op_t *cur_op)
return; return;
} }
inflight_ops++; inflight_ops++;
cur_op->send_list.push_back(cur_op->reply.buf, OSD_PACKET_SIZE);
if (cur_op->req.hdr.magic != SECONDARY_OSD_OP_MAGIC || if (cur_op->req.hdr.magic != SECONDARY_OSD_OP_MAGIC ||
cur_op->req.hdr.opcode < OSD_OP_MIN || cur_op->req.hdr.opcode > OSD_OP_MAX || cur_op->req.hdr.opcode < OSD_OP_MIN || cur_op->req.hdr.opcode > OSD_OP_MAX ||
((cur_op->req.hdr.opcode == OSD_OP_SEC_READ || (cur_op->req.hdr.opcode == OSD_OP_SECONDARY_READ || cur_op->req.hdr.opcode == OSD_OP_SECONDARY_WRITE) &&
cur_op->req.hdr.opcode == OSD_OP_SEC_WRITE || (cur_op->req.sec_rw.len > OSD_RW_MAX || cur_op->req.sec_rw.len % bs_disk_alignment || cur_op->req.sec_rw.offset % bs_disk_alignment) ||
cur_op->req.hdr.opcode == OSD_OP_SEC_WRITE_STABLE) && (cur_op->req.hdr.opcode == OSD_OP_READ || cur_op->req.hdr.opcode == OSD_OP_WRITE || cur_op->req.hdr.opcode == OSD_OP_DELETE) &&
(cur_op->req.sec_rw.len > OSD_RW_MAX || (cur_op->req.rw.len > OSD_RW_MAX || cur_op->req.rw.len % bs_disk_alignment || cur_op->req.rw.offset % bs_disk_alignment))
cur_op->req.sec_rw.len % bs_bitmap_granularity ||
cur_op->req.sec_rw.offset % bs_bitmap_granularity)) ||
((cur_op->req.hdr.opcode == OSD_OP_READ ||
cur_op->req.hdr.opcode == OSD_OP_WRITE ||
cur_op->req.hdr.opcode == OSD_OP_DELETE) &&
(cur_op->req.rw.len > OSD_RW_MAX ||
cur_op->req.rw.len % bs_bitmap_granularity ||
cur_op->req.rw.offset % bs_bitmap_granularity)))
{ {
// Bad command // Bad command
finish_op(cur_op, -EINVAL); finish_op(cur_op, -EINVAL);
return; return;
} }
if (cur_op->req.hdr.opcode == OSD_OP_PING)
{
// Pong
finish_op(cur_op, 0);
return;
}
if (readonly && if (readonly &&
cur_op->req.hdr.opcode != OSD_OP_SEC_READ && cur_op->req.hdr.opcode != OSD_OP_SECONDARY_READ &&
cur_op->req.hdr.opcode != OSD_OP_SEC_LIST && cur_op->req.hdr.opcode != OSD_OP_SECONDARY_LIST &&
cur_op->req.hdr.opcode != OSD_OP_READ && cur_op->req.hdr.opcode != OSD_OP_READ &&
cur_op->req.hdr.opcode != OSD_OP_SHOW_CONFIG) cur_op->req.hdr.opcode != OSD_OP_SHOW_CONFIG)
{ {
@ -274,9 +351,9 @@ void osd_t::reset_stats()
void osd_t::print_stats() void osd_t::print_stats()
{ {
for (int i = OSD_OP_MIN; i <= OSD_OP_MAX; i++) for (int i = 0; i <= OSD_OP_MAX; i++)
{ {
if (c_cli.stats.op_stat_count[i] != prev_stats.op_stat_count[i] && i != OSD_OP_PING) if (c_cli.stats.op_stat_count[i] != prev_stats.op_stat_count[i])
{ {
uint64_t avg = (c_cli.stats.op_stat_sum[i] - prev_stats.op_stat_sum[i])/(c_cli.stats.op_stat_count[i] - prev_stats.op_stat_count[i]); uint64_t avg = (c_cli.stats.op_stat_sum[i] - prev_stats.op_stat_sum[i])/(c_cli.stats.op_stat_count[i] - prev_stats.op_stat_count[i]);
uint64_t bw = (c_cli.stats.op_stat_bytes[i] - prev_stats.op_stat_bytes[i]) / print_stats_interval; uint64_t bw = (c_cli.stats.op_stat_bytes[i] - prev_stats.op_stat_bytes[i]) / print_stats_interval;
@ -297,7 +374,7 @@ void osd_t::print_stats()
prev_stats.op_stat_bytes[i] = c_cli.stats.op_stat_bytes[i]; prev_stats.op_stat_bytes[i] = c_cli.stats.op_stat_bytes[i];
} }
} }
for (int i = OSD_OP_MIN; i <= OSD_OP_MAX; i++) for (int i = 0; i <= OSD_OP_MAX; i++)
{ {
if (c_cli.stats.subop_stat_count[i] != prev_stats.subop_stat_count[i]) if (c_cli.stats.subop_stat_count[i] != prev_stats.subop_stat_count[i])
{ {
@ -335,73 +412,3 @@ void osd_t::print_stats()
printf("[OSD %lu] %lu object(s) misplaced\n", osd_num, misplaced_objects); printf("[OSD %lu] %lu object(s) misplaced\n", osd_num, misplaced_objects);
} }
} }
void osd_t::print_slow()
{
char alloc[1024];
timespec now;
clock_gettime(CLOCK_REALTIME, &now);
for (auto & kv: c_cli.clients)
{
for (auto op: kv.second->received_ops)
{
if ((now.tv_sec - op->tv_begin.tv_sec) >= slow_log_interval)
{
int l = sizeof(alloc), n;
char *buf = alloc;
#define bufprintf(s, ...) { n = snprintf(buf, l, s, __VA_ARGS__); n = n < 0 ? 0 : n; buf += n; l -= n; }
bufprintf("[OSD %lu] Slow op", osd_num);
if (kv.second->osd_num)
{
bufprintf(" from peer OSD %lu (client %d)", kv.second->osd_num, kv.second->peer_fd);
}
else
{
bufprintf(" from client %d", kv.second->peer_fd);
}
bufprintf(": %s id=%lu", osd_op_names[op->req.hdr.opcode], op->req.hdr.id);
if (op->req.hdr.opcode == OSD_OP_SEC_READ || op->req.hdr.opcode == OSD_OP_SEC_WRITE ||
op->req.hdr.opcode == OSD_OP_SEC_WRITE_STABLE || op->req.hdr.opcode == OSD_OP_SEC_DELETE)
{
bufprintf(" %lx:%lx v", op->req.sec_rw.oid.inode, op->req.sec_rw.oid.stripe);
if (op->req.sec_rw.version == UINT64_MAX)
{
bufprintf("%s", "max");
}
else
{
bufprintf("%lu", op->req.sec_rw.version);
}
if (op->req.hdr.opcode != OSD_OP_SEC_DELETE)
{
bufprintf(" offset=%x len=%x", op->req.sec_rw.offset, op->req.sec_rw.len);
}
}
else if (op->req.hdr.opcode == OSD_OP_SEC_STABILIZE || op->req.hdr.opcode == OSD_OP_SEC_ROLLBACK)
{
for (uint64_t i = 0; i < op->req.sec_stab.len; i += sizeof(obj_ver_id))
{
obj_ver_id *ov = (obj_ver_id*)(op->buf + i);
bufprintf(i == 0 ? " %lx:%lx v%lu" : ", %lx:%lx v%lu", ov->oid.inode, ov->oid.stripe, ov->version);
}
}
else if (op->req.hdr.opcode == OSD_OP_SEC_LIST)
{
bufprintf(
" inode=%lx-%lx pg=%u/%u, stripe=%lu",
op->req.sec_list.min_inode, op->req.sec_list.max_inode,
op->req.sec_list.list_pg, op->req.sec_list.pg_count,
op->req.sec_list.pg_stripe_size
);
}
else if (op->req.hdr.opcode == OSD_OP_READ || op->req.hdr.opcode == OSD_OP_WRITE ||
op->req.hdr.opcode == OSD_OP_DELETE)
{
bufprintf(" inode=%lx offset=%lx len=%x", op->req.rw.inode, op->req.rw.offset, op->req.rw.len);
}
#undef bufprintf
printf("%s\n", alloc);
}
}
}
}

View File

@ -1,6 +1,3 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
#pragma once #pragma once
#include <sys/types.h> #include <sys/types.h>
@ -19,7 +16,6 @@
#include "blockstore.h" #include "blockstore.h"
#include "ringloop.h" #include "ringloop.h"
#include "timerfd_manager.h" #include "timerfd_manager.h"
#include "epoll_manager.h"
#include "osd_peering_pg.h" #include "osd_peering_pg.h"
#include "messenger.h" #include "messenger.h"
#include "etcd_state_client.h" #include "etcd_state_client.h"
@ -37,10 +33,12 @@
#define DEFAULT_AUTOSYNC_INTERVAL 5 #define DEFAULT_AUTOSYNC_INTERVAL 5
#define MAX_RECOVERY_QUEUE 2048 #define MAX_RECOVERY_QUEUE 2048
#define DEFAULT_RECOVERY_QUEUE 4 #define DEFAULT_RECOVERY_QUEUE 4
#define DEFAULT_RECOVERY_BATCH 16 #define DEFAULT_PG_STRIPE_SIZE 4*1024*1024 // 4 MB by default
//#define OSD_STUB //#define OSD_STUB
extern const char* osd_op_names[];
struct osd_object_id_t struct osd_object_id_t
{ {
osd_num_t osd_num; osd_num_t osd_num;
@ -51,21 +49,11 @@ struct osd_recovery_op_t
{ {
int st = 0; int st = 0;
bool degraded = false; bool degraded = false;
pg_num_t pg_num = 0;
object_id oid = { 0 }; object_id oid = { 0 };
osd_op_t *osd_op = NULL; osd_op_t *osd_op = NULL;
}; };
// Posted as /osd/inodestats/$osd, then accumulated by the monitor
#define INODE_STATS_READ 0
#define INODE_STATS_WRITE 1
#define INODE_STATS_DELETE 2
struct inode_stats_t
{
uint64_t op_sum[3] = { 0 };
uint64_t op_count[3] = { 0 };
uint64_t op_bytes[3] = { 0 };
};
class osd_t class osd_t
{ {
// config // config
@ -76,19 +64,15 @@ class osd_t
bool readonly = false; bool readonly = false;
osd_num_t osd_num = 1; // OSD numbers start with 1 osd_num_t osd_num = 1; // OSD numbers start with 1
bool run_primary = false; bool run_primary = false;
bool no_rebalance = false;
bool no_recovery = false;
std::string bind_address; std::string bind_address;
int bind_port, listen_backlog; int bind_port, listen_backlog;
// FIXME: Implement client queue depth limit // FIXME: Implement client queue depth limit
int client_queue_depth = 128; int client_queue_depth = 128;
bool allow_test_ops = true; bool allow_test_ops = true;
int print_stats_interval = 3; int print_stats_interval = 3;
int slow_log_interval = 10;
int immediate_commit = IMMEDIATE_NONE; int immediate_commit = IMMEDIATE_NONE;
int autosync_interval = DEFAULT_AUTOSYNC_INTERVAL; // sync every 5 seconds int autosync_interval = DEFAULT_AUTOSYNC_INTERVAL; // sync every 5 seconds
int recovery_queue_depth = DEFAULT_RECOVERY_QUEUE; int recovery_queue_depth = DEFAULT_RECOVERY_QUEUE;
int recovery_sync_batch = DEFAULT_RECOVERY_BATCH;
int log_level = 0; int log_level = 0;
// cluster state // cluster state
@ -99,22 +83,19 @@ class osd_t
std::string etcd_lease_id; std::string etcd_lease_id;
json11::Json self_state; json11::Json self_state;
bool loading_peer_config = false; bool loading_peer_config = false;
std::set<pool_pg_num_t> pg_state_dirty; std::set<pg_num_t> pg_state_dirty;
bool pg_config_applied = false; bool pg_config_applied = false;
bool etcd_reporting_pg_state = false; bool etcd_reporting_pg_state = false;
bool etcd_reporting_stats = false; bool etcd_reporting_stats = false;
// peers and PGs // peers and PGs
std::map<pool_id_t, pg_num_t> pg_counts; std::map<pg_num_t, pg_t> pgs;
std::map<pool_pg_num_t, pg_t> pgs; std::set<pg_num_t> dirty_pgs;
std::set<pool_pg_num_t> dirty_pgs;
std::set<osd_num_t> dirty_osds;
int copies_to_delete_after_sync_count = 0;
uint64_t misplaced_objects = 0, degraded_objects = 0, incomplete_objects = 0; uint64_t misplaced_objects = 0, degraded_objects = 0, incomplete_objects = 0;
int peering_state = 0; int peering_state = 0;
unsigned pg_count = 0;
std::map<object_id, osd_recovery_op_t> recovery_ops; std::map<object_id, osd_recovery_op_t> recovery_ops;
int recovery_done = 0;
osd_op_t *autosync_op = NULL; osd_op_t *autosync_op = NULL;
// Unstable writes // Unstable writes
@ -126,18 +107,20 @@ class osd_t
bool stopping = false; bool stopping = false;
int inflight_ops = 0; int inflight_ops = 0;
blockstore_t *bs; blockstore_t *bs;
uint32_t bs_block_size, bs_bitmap_granularity, clean_entry_bitmap_size; uint32_t bs_block_size, bs_disk_alignment;
uint64_t pg_stripe_size = DEFAULT_PG_STRIPE_SIZE;
ring_loop_t *ringloop; ring_loop_t *ringloop;
timerfd_manager_t *tfd = NULL; timerfd_manager_t *tfd = NULL;
epoll_manager_t *epmgr = NULL;
int wait_state = 0;
int epoll_fd = 0;
int listening_port = 0; int listening_port = 0;
int listen_fd = 0; int listen_fd = 0;
ring_consumer_t consumer; ring_consumer_t consumer;
std::map<int, std::function<void(int, int)>> epoll_handlers;
// op statistics // op statistics
osd_op_stats_t prev_stats; osd_op_stats_t prev_stats;
std::map<uint64_t, inode_stats_t> inode_stats;
const char* recovery_stat_names[2] = { "degraded", "misplaced" }; const char* recovery_stat_names[2] = { "degraded", "misplaced" };
uint64_t recovery_stat_count[2][2] = { 0 }; uint64_t recovery_stat_count[2][2] = { 0 };
uint64_t recovery_stat_bytes[2][2] = { 0 }; uint64_t recovery_stat_bytes[2][2] = { 0 };
@ -145,8 +128,7 @@ class osd_t
// cluster connection // cluster connection
void parse_config(blockstore_config_t & config); void parse_config(blockstore_config_t & config);
void init_cluster(); void init_cluster();
void on_change_osd_state_hook(osd_num_t peer_osd); void on_change_osd_state_hook(uint64_t osd_num);
void on_change_pg_history_hook(pool_id_t pool_id, pg_num_t pg_num);
void on_change_etcd_state_hook(json11::Json::object & changes); void on_change_etcd_state_hook(json11::Json::object & changes);
void on_load_config_hook(json11::Json::object & changes); void on_load_config_hook(json11::Json::object & changes);
json11::Json on_load_pgs_checks_hook(); json11::Json on_load_pgs_checks_hook();
@ -157,7 +139,6 @@ class osd_t
void create_osd_state(); void create_osd_state();
void renew_lease(); void renew_lease();
void print_stats(); void print_stats();
void print_slow();
void reset_stats(); void reset_stats();
json11::Json get_statistics(); json11::Json get_statistics();
void report_statistics(); void report_statistics();
@ -168,23 +149,24 @@ class osd_t
// event loop, socket read/write // event loop, socket read/write
void loop(); void loop();
void set_fd_handler(int fd, std::function<void(int, int)> handler);
void handle_epoll_events();
// peer handling (primary OSD logic) // peer handling (primary OSD logic)
void parse_test_peer(std::string peer); void parse_test_peer(std::string peer);
void handle_peers(); void handle_peers();
void repeer_pgs(osd_num_t osd_num); void repeer_pgs(osd_num_t osd_num);
void start_pg_peering(pg_t & pg); void start_pg_peering(pg_num_t pg_num);
void submit_sync_and_list_subop(osd_num_t role_osd, pg_peering_state_t *ps); void submit_sync_and_list_subop(osd_num_t role_osd, pg_peering_state_t *ps);
void submit_list_subop(osd_num_t role_osd, pg_peering_state_t *ps); void submit_list_subop(osd_num_t role_osd, pg_peering_state_t *ps);
void discard_list_subop(osd_op_t *list_op); void discard_list_subop(osd_op_t *list_op);
bool stop_pg(pg_t & pg); bool stop_pg(pg_num_t pg_num);
void reset_pg(pg_t & pg);
void finish_stop_pg(pg_t & pg); void finish_stop_pg(pg_t & pg);
// flushing, recovery and backfill // flushing, recovery and backfill
void submit_pg_flush_ops(pg_t & pg); void submit_pg_flush_ops(pg_num_t pg_num);
void handle_flush_op(bool rollback, pool_id_t pool_id, pg_num_t pg_num, pg_flush_batch_t *fb, osd_num_t peer_osd, int retval); void handle_flush_op(bool rollback, pg_num_t pg_num, pg_flush_batch_t *fb, osd_num_t peer_osd, int retval);
void submit_flush_op(pool_id_t pool_id, pg_num_t pg_num, pg_flush_batch_t *fb, bool rollback, osd_num_t peer_osd, int count, obj_ver_id *data); void submit_flush_op(pg_num_t pg_num, pg_flush_batch_t *fb, bool rollback, osd_num_t peer_osd, int count, obj_ver_id *data);
bool pick_next_recovery(osd_recovery_op_t &op); bool pick_next_recovery(osd_recovery_op_t &op);
void submit_recovery_op(osd_recovery_op_t *op); void submit_recovery_op(osd_recovery_op_t *op);
bool continue_recovery(); bool continue_recovery();
@ -215,22 +197,18 @@ class osd_t
void handle_primary_bs_subop(osd_op_t *subop); void handle_primary_bs_subop(osd_op_t *subop);
void add_bs_subop_stats(osd_op_t *subop); void add_bs_subop_stats(osd_op_t *subop);
void pg_cancel_write_queue(pg_t & pg, osd_op_t *first_op, object_id oid, int retval); void pg_cancel_write_queue(pg_t & pg, osd_op_t *first_op, object_id oid, int retval);
void submit_primary_subops(int submit_type, uint64_t op_version, int pg_size, const uint64_t* osd_set, osd_op_t *cur_op); void submit_primary_subops(int submit_type, int read_pg_size, const uint64_t* osd_set, osd_op_t *cur_op);
void submit_primary_del_subops(osd_op_t *cur_op, uint64_t *cur_set, uint64_t set_size, pg_osd_set_t & loc_set); void submit_primary_del_subops(osd_op_t *cur_op, uint64_t *cur_set, pg_osd_set_t & loc_set);
void submit_primary_del_batch(osd_op_t *cur_op, obj_ver_osd_t *chunks_to_delete, int chunks_to_delete_count);
void submit_primary_sync_subops(osd_op_t *cur_op); void submit_primary_sync_subops(osd_op_t *cur_op);
void submit_primary_stab_subops(osd_op_t *cur_op); void submit_primary_stab_subops(osd_op_t *cur_op);
inline pg_num_t map_to_pg(object_id oid, uint64_t pg_stripe_size) inline pg_num_t map_to_pg(object_id oid)
{ {
uint64_t pg_count = pg_counts[INODE_POOL(oid.inode)];
if (!pg_count)
pg_count = 1;
return (oid.inode + oid.stripe / pg_stripe_size) % pg_count + 1; return (oid.inode + oid.stripe / pg_stripe_size) % pg_count + 1;
} }
public: public:
osd_t(blockstore_config_t & config, ring_loop_t *ringloop); osd_t(blockstore_config_t & config, blockstore_t *bs, ring_loop_t *ringloop);
~osd_t(); ~osd_t();
void force_stop(int exitcode); void force_stop(int exitcode);
bool shutdown(); bool shutdown();

View File

@ -1,10 +1,6 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
#include "osd.h" #include "osd.h"
#include "base64.h" #include "base64.h"
#include "etcd_state_client.h" #include "etcd_state_client.h"
#include "osd_rmw.h"
// Startup sequence: // Startup sequence:
// Start etcd watcher -> Load global OSD configuration -> Bind socket -> Acquire lease -> Report&lock OSD state // Start etcd watcher -> Load global OSD configuration -> Bind socket -> Acquire lease -> Report&lock OSD state
@ -18,7 +14,7 @@ void osd_t::init_cluster()
{ {
if (run_primary) if (run_primary)
{ {
// Test version of clustering code with 1 pool, 1 PG and 2 peers // Test version of clustering code with 1 PG and 2 peers
// Example: peers = 2:127.0.0.1:11204,3:127.0.0.1:11205 // Example: peers = 2:127.0.0.1:11204,3:127.0.0.1:11205
std::string peerstr = config["peers"]; std::string peerstr = config["peers"];
while (peerstr.size()) while (peerstr.size())
@ -31,30 +27,15 @@ void osd_t::init_cluster()
{ {
throw std::runtime_error("run_primary requires at least 2 peers"); throw std::runtime_error("run_primary requires at least 2 peers");
} }
pgs[{ 1, 1 }] = (pg_t){ pgs[1] = (pg_t){
.state = PG_PEERING, .state = PG_PEERING,
.scheme = POOL_SCHEME_XOR,
.pg_cursize = 0, .pg_cursize = 0,
.pg_size = 3,
.pg_minsize = 2,
.pg_data_size = 2,
.pool_id = 1,
.pg_num = 1, .pg_num = 1,
.target_set = { 1, 2, 3 }, .target_set = { 1, 2, 3 },
.cur_set = { 0, 0, 0 }, .cur_set = { 0, 0, 0 },
}; };
st_cli.pool_config[1] = (pool_config_t){ report_pg_state(pgs[1]);
.exists = true, pg_count = 1;
.id = 1,
.name = "testpool",
.scheme = POOL_SCHEME_XOR,
.pg_size = 3,
.pg_minsize = 2,
.pg_count = 1,
.real_pg_count = 1,
};
report_pg_state(pgs[{ 1, 1 }]);
pg_counts[1] = 1;
} }
bind_socket(); bind_socket();
} }
@ -62,8 +43,7 @@ void osd_t::init_cluster()
{ {
st_cli.tfd = tfd; st_cli.tfd = tfd;
st_cli.log_level = log_level; st_cli.log_level = log_level;
st_cli.on_change_osd_state_hook = [this](osd_num_t peer_osd) { on_change_osd_state_hook(peer_osd); }; st_cli.on_change_osd_state_hook = [this](uint64_t peer_osd) { on_change_osd_state_hook(peer_osd); };
st_cli.on_change_pg_history_hook = [this](pool_id_t pool_id, pg_num_t pg_num) { on_change_pg_history_hook(pool_id, pg_num); };
st_cli.on_change_hook = [this](json11::Json::object & changes) { on_change_etcd_state_hook(changes); }; st_cli.on_change_hook = [this](json11::Json::object & changes) { on_change_etcd_state_hook(changes); };
st_cli.on_load_config_hook = [this](json11::Json::object & cfg) { on_load_config_hook(cfg); }; st_cli.on_load_config_hook = [this](json11::Json::object & cfg) { on_load_config_hook(cfg); };
st_cli.load_pgs_checks_hook = [this]() { return on_load_pgs_checks_hook(); }; st_cli.load_pgs_checks_hook = [this]() { return on_load_pgs_checks_hook(); };
@ -142,7 +122,7 @@ json11::Json osd_t::get_statistics()
} }
st["host"] = self_state["host"]; st["host"] = self_state["host"];
json11::Json::object op_stats, subop_stats; json11::Json::object op_stats, subop_stats;
for (int i = OSD_OP_MIN; i <= OSD_OP_MAX; i++) for (int i = 0; i <= OSD_OP_MAX; i++)
{ {
op_stats[osd_op_names[i]] = json11::Json::object { op_stats[osd_op_names[i]] = json11::Json::object {
{ "count", c_cli.stats.op_stat_count[i] }, { "count", c_cli.stats.op_stat_count[i] },
@ -150,7 +130,7 @@ json11::Json osd_t::get_statistics()
{ "bytes", c_cli.stats.op_stat_bytes[i] }, { "bytes", c_cli.stats.op_stat_bytes[i] },
}; };
} }
for (int i = OSD_OP_MIN; i <= OSD_OP_MAX; i++) for (int i = 0; i <= OSD_OP_MAX; i++)
{ {
subop_stats[osd_op_names[i]] = json11::Json::object { subop_stats[osd_op_names[i]] = json11::Json::object {
{ "count", c_cli.stats.subop_stat_count[i] }, { "count", c_cli.stats.subop_stat_count[i] },
@ -179,47 +159,11 @@ void osd_t::report_statistics()
return; return;
} }
etcd_reporting_stats = true; etcd_reporting_stats = true;
// Report space usage statistics as a whole
// Maybe we'll report it using deltas if we tune for a lot of inodes at some point
json11::Json::object inode_space;
for (auto kv: bs->get_inode_space_stats())
{
inode_space[std::to_string(kv.first)] = kv.second;
}
json11::Json::object inode_ops;
for (auto kv: inode_stats)
{
inode_ops[std::to_string(kv.first)] = json11::Json::object {
{ "read", json11::Json::object {
{ "count", kv.second.op_count[INODE_STATS_READ] },
{ "usec", kv.second.op_sum[INODE_STATS_READ] },
{ "bytes", kv.second.op_bytes[INODE_STATS_READ] },
} },
{ "write", json11::Json::object {
{ "count", kv.second.op_count[INODE_STATS_WRITE] },
{ "usec", kv.second.op_sum[INODE_STATS_WRITE] },
{ "bytes", kv.second.op_bytes[INODE_STATS_WRITE] },
} },
{ "delete", json11::Json::object {
{ "count", kv.second.op_count[INODE_STATS_DELETE] },
{ "usec", kv.second.op_sum[INODE_STATS_DELETE] },
{ "bytes", kv.second.op_bytes[INODE_STATS_DELETE] },
} },
};
}
json11::Json::array txn = { json11::Json::object { json11::Json::array txn = { json11::Json::object {
{ "request_put", json11::Json::object { { "request_put", json11::Json::object {
{ "key", base64_encode(st_cli.etcd_prefix+"/osd/stats/"+std::to_string(osd_num)) }, { "key", base64_encode(st_cli.etcd_prefix+"/osd/stats/"+std::to_string(osd_num)) },
{ "value", base64_encode(get_statistics().dump()) }, { "value", base64_encode(get_statistics().dump()) },
} }, } }
{ "request_put", json11::Json::object {
{ "key", base64_encode(st_cli.etcd_prefix+"/osd/space/"+std::to_string(osd_num)) },
{ "value", base64_encode(json11::Json(inode_space).dump()) },
} },
{ "request_put", json11::Json::object {
{ "key", base64_encode(st_cli.etcd_prefix+"/osd/inodestats/"+std::to_string(osd_num)) },
{ "value", base64_encode(json11::Json(inode_ops).dump()) },
} },
} }; } };
for (auto & p: pgs) for (auto & p: pgs)
{ {
@ -238,7 +182,7 @@ void osd_t::report_statistics()
pg_stats["write_osd_set"] = pg.cur_set; pg_stats["write_osd_set"] = pg.cur_set;
txn.push_back(json11::Json::object { txn.push_back(json11::Json::object {
{ "request_put", json11::Json::object { { "request_put", json11::Json::object {
{ "key", base64_encode(st_cli.etcd_prefix+"/pg/stats/"+std::to_string(pg.pool_id)+"/"+std::to_string(pg.pg_num)) }, { "key", base64_encode(st_cli.etcd_prefix+"/pg/stats/"+std::to_string(pg.pg_num)) },
{ "value", base64_encode(json11::Json(pg_stats).dump()) }, { "value", base64_encode(json11::Json(pg_stats).dump()) },
} } } }
}); });
@ -263,7 +207,7 @@ void osd_t::report_statistics()
}); });
} }
void osd_t::on_change_osd_state_hook(osd_num_t peer_osd) void osd_t::on_change_osd_state_hook(uint64_t peer_osd)
{ {
if (c_cli.wanted_peers.find(peer_osd) != c_cli.wanted_peers.end()) if (c_cli.wanted_peers.find(peer_osd) != c_cli.wanted_peers.end())
{ {
@ -274,35 +218,8 @@ void osd_t::on_change_osd_state_hook(osd_num_t peer_osd)
void osd_t::on_change_etcd_state_hook(json11::Json::object & changes) void osd_t::on_change_etcd_state_hook(json11::Json::object & changes)
{ {
// FIXME apply config changes in runtime (maybe, some) // FIXME apply config changes in runtime (maybe, some)
if (run_primary)
{
apply_pg_count(); apply_pg_count();
apply_pg_config(); apply_pg_config();
}
}
void osd_t::on_change_pg_history_hook(pool_id_t pool_id, pg_num_t pg_num)
{
auto pg_it = pgs.find({
.pool_id = pool_id,
.pg_num = pg_num,
});
if (pg_it != pgs.end() && pg_it->second.epoch > pg_it->second.reported_epoch &&
st_cli.pool_config[pool_id].pg_config[pg_num].epoch >= pg_it->second.epoch)
{
pg_it->second.reported_epoch = st_cli.pool_config[pool_id].pg_config[pg_num].epoch;
object_id oid = { 0 };
bool first = true;
for (auto op: pg_it->second.write_queue)
{
if (first || oid != op.first)
{
oid = op.first;
first = false;
continue_primary_write(op.second);
}
}
}
} }
void osd_t::on_load_config_hook(json11::Json::object & global_config) void osd_t::on_load_config_hook(json11::Json::object & global_config)
@ -324,6 +241,7 @@ void osd_t::on_load_config_hook(json11::Json::object & global_config)
} }
parse_config(osd_config); parse_config(osd_config);
bind_socket(); bind_socket();
st_cli.start_etcd_watcher();
acquire_lease(); acquire_lease();
} }
@ -420,7 +338,6 @@ void osd_t::create_osd_state()
{ {
st_cli.load_pgs(); st_cli.load_pgs();
} }
report_statistics();
}); });
} }
@ -512,56 +429,52 @@ void osd_t::on_load_pgs_hook(bool success)
void osd_t::apply_pg_count() void osd_t::apply_pg_count()
{ {
for (auto & pool_item: st_cli.pool_config) pg_num_t pg_count = st_cli.pg_config.size();
if (pg_count > 0 && (st_cli.pg_config.begin()->first != 1 || std::prev(st_cli.pg_config.end())->first != pg_count))
{ {
if (pool_item.second.real_pg_count != 0 && printf("Invalid PG configuration: PG numbers don't cover the whole 1..%d range\n", pg_count);
pool_item.second.real_pg_count != pg_counts[pool_item.first]) force_stop(1);
return;
}
if (this->pg_count != 0 && this->pg_count != pg_count)
{ {
// Check that all pool PGs are offline. It is not allowed to change PG count when any PGs are online // Check that all PGs are offline. It is not allowed to change PG count when any PGs are online
// The external tool must wait for all PGs to come down before changing PG count // The external tool must wait for all PGs to come down before changing PG count
// If it doesn't wait, a restarted OSD may apply the new count immediately which will lead to bugs // If it doesn't wait, a restarted OSD may apply the new count immediately which will lead to bugs
// So an OSD just dies if it detects PG count change while there are active PGs // So an OSD just dies if it detects PG count change while there are active PGs
int still_active = 0; int still_active = 0;
for (auto & kv: pgs) for (auto & kv: pgs)
{ {
if (kv.first.pool_id == pool_item.first && (kv.second.state & PG_ACTIVE)) if (kv.second.state & PG_ACTIVE)
{ {
still_active++; still_active++;
} }
} }
if (still_active > 0) if (still_active > 0)
{ {
printf( printf("[OSD %lu] PG count change detected, but %d PG(s) are still active. This is not allowed. Exiting\n", this->osd_num, still_active);
"[OSD %lu] PG count change detected for pool %u (new is %lu, old is %u),"
" but %u PG(s) are still active. This is not allowed. Exiting\n",
this->osd_num, pool_item.first, pool_item.second.real_pg_count, pg_counts[pool_item.first], still_active
);
force_stop(1); force_stop(1);
return; return;
} }
} }
this->pg_counts[pool_item.first] = pool_item.second.real_pg_count; this->pg_count = pg_count;
}
} }
void osd_t::apply_pg_config() void osd_t::apply_pg_config()
{ {
bool all_applied = true; bool all_applied = true;
for (auto & pool_item: st_cli.pool_config) for (auto & kv: st_cli.pg_config)
{
auto pool_id = pool_item.first;
for (auto & kv: pool_item.second.pg_config)
{ {
pg_num_t pg_num = kv.first; pg_num_t pg_num = kv.first;
auto & pg_cfg = kv.second; auto & pg_cfg = kv.second;
bool take = pg_cfg.exists && pg_cfg.primary == this->osd_num && bool take = pg_cfg.exists && pg_cfg.primary == this->osd_num &&
!pg_cfg.pause && (!pg_cfg.cur_primary || pg_cfg.cur_primary == this->osd_num); !pg_cfg.pause && (!pg_cfg.cur_primary || pg_cfg.cur_primary == this->osd_num);
auto pg_it = this->pgs.find({ .pool_id = pool_id, .pg_num = pg_num }); bool currently_taken = this->pgs.find(pg_num) != this->pgs.end() &&
bool currently_taken = pg_it != this->pgs.end() && pg_it->second.state != PG_OFFLINE; this->pgs[pg_num].state != PG_OFFLINE;
if (currently_taken && !take) if (currently_taken && !take)
{ {
// Stop this PG // Stop this PG
stop_pg(pg_it->second); stop_pg(pg_num);
} }
else if (take) else if (take)
{ {
@ -593,9 +506,9 @@ void osd_t::apply_pg_config()
} }
if (currently_taken) if (currently_taken)
{ {
if (pg_it->second.state & (PG_ACTIVE | PG_INCOMPLETE | PG_PEERING)) if (this->pgs[pg_num].state & (PG_ACTIVE | PG_INCOMPLETE | PG_PEERING))
{ {
if (pg_it->second.target_set == pg_cfg.target_set) if (this->pgs[pg_num].target_set == pg_cfg.target_set)
{ {
// No change in osd_set; history changes are ignored // No change in osd_set; history changes are ignored
continue; continue;
@ -603,18 +516,18 @@ void osd_t::apply_pg_config()
else else
{ {
// Stop PG, reapply change after stopping // Stop PG, reapply change after stopping
stop_pg(pg_it->second); stop_pg(pg_num);
all_applied = false; all_applied = false;
continue; continue;
} }
} }
else if (pg_it->second.state & PG_STOPPING) else if (this->pgs[pg_num].state & PG_STOPPING)
{ {
// Reapply change after stopping // Reapply change after stopping
all_applied = false; all_applied = false;
continue; continue;
} }
else if (pg_it->second.state & PG_STARTING) else if (this->pgs[pg_num].state & PG_STARTING)
{ {
if (pg_cfg.cur_primary == this->osd_num) if (pg_cfg.cur_primary == this->osd_num)
{ {
@ -629,34 +542,19 @@ void osd_t::apply_pg_config()
} }
else else
{ {
throw std::runtime_error( throw std::runtime_error("Unexpected PG "+std::to_string(pg_num)+" state: "+std::to_string(this->pgs[pg_num].state));
"Unexpected PG "+std::to_string(pool_id)+"/"+std::to_string(pg_num)+
" state: "+std::to_string(pg_it->second.state)
);
} }
} }
auto & pg = this->pgs[{ .pool_id = pool_id, .pg_num = pg_num }]; this->pgs[pg_num] = (pg_t){
pg = (pg_t){
.state = pg_cfg.cur_primary == this->osd_num ? PG_PEERING : PG_STARTING, .state = pg_cfg.cur_primary == this->osd_num ? PG_PEERING : PG_STARTING,
.scheme = pool_item.second.scheme,
.pg_cursize = 0, .pg_cursize = 0,
.pg_size = pool_item.second.pg_size,
.pg_minsize = pool_item.second.pg_minsize,
.pg_data_size = pg.scheme == POOL_SCHEME_REPLICATED
? 1 : pool_item.second.pg_size - pool_item.second.parity_chunks,
.pool_id = pool_id,
.pg_num = pg_num, .pg_num = pg_num,
.reported_epoch = pg_cfg.epoch,
.target_history = pg_cfg.target_history, .target_history = pg_cfg.target_history,
.all_peers = std::vector<osd_num_t>(all_peers.begin(), all_peers.end()), .all_peers = std::vector<osd_num_t>(all_peers.begin(), all_peers.end()),
.target_set = pg_cfg.target_set, .target_set = pg_cfg.target_set,
}; };
if (pg.scheme == POOL_SCHEME_JERASURE) this->pg_state_dirty.insert(pg_num);
{ this->pgs[pg_num].print_state();
use_jerasure(pg.pg_size, pg.pg_data_size, true);
}
this->pg_state_dirty.insert({ .pool_id = pool_id, .pg_num = pg_num });
pg.print_state();
if (pg_cfg.cur_primary == this->osd_num) if (pg_cfg.cur_primary == this->osd_num)
{ {
// Add peers // Add peers
@ -667,7 +565,7 @@ void osd_t::apply_pg_config()
c_cli.connect_peer(pg_osd, st_cli.peer_states[pg_osd]); c_cli.connect_peer(pg_osd, st_cli.peer_states[pg_osd]);
} }
} }
start_pg_peering(pg); start_pg_peering(pg_num);
} }
else else
{ {
@ -676,7 +574,6 @@ void osd_t::apply_pg_config()
} }
} }
} }
}
report_pg_states(); report_pg_states();
this->pg_config_applied = all_applied; this->pg_config_applied = all_applied;
} }
@ -687,7 +584,8 @@ void osd_t::report_pg_states()
{ {
return; return;
} }
std::vector<std::pair<pool_pg_num_t,bool>> reporting_pgs; etcd_reporting_pg_state = true;
std::vector<std::pair<pg_num_t,bool>> reporting_pgs;
json11::Json::array checks; json11::Json::array checks;
json11::Json::array success; json11::Json::array success;
json11::Json::array failure; json11::Json::array failure;
@ -699,23 +597,9 @@ void osd_t::report_pg_states()
continue; continue;
} }
auto & pg = pg_it->second; auto & pg = pg_it->second;
reporting_pgs.push_back({ *it, pg.history_changed }); reporting_pgs.push_back({ pg.pg_num, pg.history_changed });
std::string state_key_base64 = base64_encode(st_cli.etcd_prefix+"/pg/state/"+std::to_string(pg.pool_id)+"/"+std::to_string(pg.pg_num)); std::string state_key_base64 = base64_encode(st_cli.etcd_prefix+"/pg/state/"+std::to_string(pg.pg_num));
bool pg_state_exists = false; if (pg.state == PG_STARTING)
if (pg.state != PG_STARTING)
{
auto pool_it = st_cli.pool_config.find(pg.pool_id);
if (pool_it != st_cli.pool_config.end())
{
auto pg_it = pool_it->second.pg_config.find(pg.pg_num);
if (pg_it != pool_it->second.pg_config.end() &&
pg_it->second.cur_state != 0)
{
pg_state_exists = true;
}
}
}
if (!pg_state_exists)
{ {
// Check that the PG key does not exist // Check that the PG key does not exist
// Failed check indicates an unsuccessful PG lock attempt in this case // Failed check indicates an unsuccessful PG lock attempt in this case
@ -727,7 +611,9 @@ void osd_t::report_pg_states()
} }
else else
{ {
// Check that the key is ours if it already exists // Check that the key is ours
// Failed check indicates success for OFFLINE pgs (PG lock is already deleted)
// and an unexpected race condition for started pgs (PG lock is held by someone else)
checks.push_back(json11::Json::object { checks.push_back(json11::Json::object {
{ "target", "LEASE" }, { "target", "LEASE" },
{ "lease", etcd_lease_id }, { "lease", etcd_lease_id },
@ -754,7 +640,7 @@ void osd_t::report_pg_states()
} }
success.push_back(json11::Json::object { success.push_back(json11::Json::object {
{ "request_put", json11::Json::object { { "request_put", json11::Json::object {
{ "key", state_key_base64 }, { "key", base64_encode(st_cli.etcd_prefix+"/pg/state/"+std::to_string(pg.pg_num)) },
{ "value", base64_encode(json11::Json(json11::Json::object { { "value", base64_encode(json11::Json(json11::Json::object {
{ "primary", this->osd_num }, { "primary", this->osd_num },
{ "state", pg_state_keywords }, { "state", pg_state_keywords },
@ -765,27 +651,27 @@ void osd_t::report_pg_states()
}); });
if (pg.history_changed) if (pg.history_changed)
{ {
// Prevent race conditions (for the case when the monitor is updating this key at the same time)
pg.history_changed = false; pg.history_changed = false;
std::string history_key = base64_encode(st_cli.etcd_prefix+"/pg/history/"+std::to_string(pg.pool_id)+"/"+std::to_string(pg.pg_num)); if (pg.state == PG_ACTIVE)
json11::Json::object history_value = { {
{ "epoch", pg.epoch },
{ "all_peers", pg.all_peers },
{ "osd_sets", pg.target_history },
};
checks.push_back(json11::Json::object {
{ "target", "MOD" },
{ "key", history_key },
{ "result", "LESS" },
{ "mod_revision", st_cli.etcd_watch_revision+1 },
});
success.push_back(json11::Json::object { success.push_back(json11::Json::object {
{ "request_put", json11::Json::object { { "request_delete_range", json11::Json::object {
{ "key", history_key }, { "key", base64_encode(st_cli.etcd_prefix+"/pg/history/"+std::to_string(pg.pg_num)) },
{ "value", base64_encode(json11::Json(history_value).dump()) },
} } } }
}); });
} }
else if (pg.state == (PG_ACTIVE|PG_LEFT_ON_DEAD))
{
success.push_back(json11::Json::object {
{ "request_put", json11::Json::object {
{ "key", base64_encode(st_cli.etcd_prefix+"/pg/history/"+std::to_string(pg.pg_num)) },
{ "value", base64_encode(json11::Json(json11::Json::object {
{ "all_peers", pg.all_peers },
}).dump()) },
} }
});
}
}
} }
failure.push_back(json11::Json::object { failure.push_back(json11::Json::object {
{ "request_range", json11::Json::object { { "request_range", json11::Json::object {
@ -794,7 +680,6 @@ void osd_t::report_pg_states()
}); });
} }
pg_state_dirty.clear(); pg_state_dirty.clear();
etcd_reporting_pg_state = true;
st_cli.etcd_txn(json11::Json::object { st_cli.etcd_txn(json11::Json::object {
{ "compare", checks }, { "success", success }, { "failure", failure } { "compare", checks }, { "success", success }, { "failure", failure }
}, ETCD_QUICK_TIMEOUT, [this, reporting_pgs](std::string err, json11::Json data) }, ETCD_QUICK_TIMEOUT, [this, reporting_pgs](std::string err, json11::Json data)
@ -820,26 +705,17 @@ void osd_t::report_pg_states()
if (res["kvs"].array_items().size()) if (res["kvs"].array_items().size())
{ {
auto kv = st_cli.parse_etcd_kv(res["kvs"][0]); auto kv = st_cli.parse_etcd_kv(res["kvs"][0]);
if (kv.key.substr(st_cli.etcd_prefix.length()+10) == st_cli.etcd_prefix+"/pg/state/") pg_num_t pg_num = stoull_full(kv.key.substr(st_cli.etcd_prefix.length()+10));
{ auto pg_it = pgs.find(pg_num);
pool_id_t pool_id = 0;
pg_num_t pg_num = 0;
char null_byte = 0;
sscanf(kv.key.c_str() + st_cli.etcd_prefix.length()+10, "%u/%u%c", &pool_id, &pg_num, &null_byte);
if (null_byte == 0)
{
auto pg_it = pgs.find({ .pool_id = pool_id, .pg_num = pg_num });
if (pg_it != pgs.end() && pg_it->second.state != PG_OFFLINE && pg_it->second.state != PG_STARTING) if (pg_it != pgs.end() && pg_it->second.state != PG_OFFLINE && pg_it->second.state != PG_STARTING)
{ {
// Live PG state update failed // Live PG state update failed
printf("Failed to report state of pool %u PG %u which is live. Race condition detected, exiting\n", pool_id, pg_num); printf("Failed to report state of PG %u which is live. Race condition detected, exiting\n", pg_num);
force_stop(1); force_stop(1);
return; return;
} }
} }
} }
}
}
// Retry after a short pause (hope we'll get some updates and update PG states accordingly) // Retry after a short pause (hope we'll get some updates and update PG states accordingly)
tfd->set_timer(500, false, [this](int) { report_pg_states(); }); tfd->set_timer(500, false, [this](int) { report_pg_states(); });
} }
@ -849,18 +725,15 @@ void osd_t::report_pg_states()
for (auto pp: reporting_pgs) for (auto pp: reporting_pgs)
{ {
auto pg_it = this->pgs.find(pp.first); auto pg_it = this->pgs.find(pp.first);
if (pg_it != this->pgs.end() && if (pg_it != this->pgs.end())
pg_it->second.state == PG_OFFLINE &&
pg_state_dirty.find(pp.first) == pg_state_dirty.end())
{ {
// Forget offline PGs after reporting their state if (pg_it->second.state == PG_OFFLINE)
if (pg_it->second.scheme == POOL_SCHEME_JERASURE)
{ {
use_jerasure(pg_it->second.pg_size, pg_it->second.pg_data_size, false); // Remove offline PGs after reporting their state
}
this->pgs.erase(pg_it); this->pgs.erase(pg_it);
} }
} }
}
// Push other PG state updates, if any // Push other PG state updates, if any
report_pg_states(); report_pg_states();
if (!this->pg_state_dirty.size()) if (!this->pg_state_dirty.size())

View File

@ -1,12 +1,10 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
#include "osd.h" #include "osd.h"
#define FLUSH_BATCH 512 #define FLUSH_BATCH 512
void osd_t::submit_pg_flush_ops(pg_t & pg) void osd_t::submit_pg_flush_ops(pg_num_t pg_num)
{ {
pg_t & pg = pgs[pg_num];
pg_flush_batch_t *fb = new pg_flush_batch_t(); pg_flush_batch_t *fb = new pg_flush_batch_t();
pg.flush_batch = fb; pg.flush_batch = fb;
auto it = pg.flush_actions.begin(), prev_it = pg.flush_actions.begin(); auto it = pg.flush_actions.begin(), prev_it = pg.flush_actions.begin();
@ -47,7 +45,7 @@ void osd_t::submit_pg_flush_ops(pg_t & pg)
if (l.second.size() > 0) if (l.second.size() > 0)
{ {
fb->flush_ops++; fb->flush_ops++;
submit_flush_op(pg.pool_id, pg.pg_num, fb, true, l.first, l.second.size(), l.second.data()); submit_flush_op(pg.pg_num, fb, true, l.first, l.second.size(), l.second.data());
} }
} }
for (auto & l: fb->stable_lists) for (auto & l: fb->stable_lists)
@ -55,15 +53,14 @@ void osd_t::submit_pg_flush_ops(pg_t & pg)
if (l.second.size() > 0) if (l.second.size() > 0)
{ {
fb->flush_ops++; fb->flush_ops++;
submit_flush_op(pg.pool_id, pg.pg_num, fb, false, l.first, l.second.size(), l.second.data()); submit_flush_op(pg.pg_num, fb, false, l.first, l.second.size(), l.second.data());
} }
} }
} }
void osd_t::handle_flush_op(bool rollback, pool_id_t pool_id, pg_num_t pg_num, pg_flush_batch_t *fb, osd_num_t peer_osd, int retval) void osd_t::handle_flush_op(bool rollback, pg_num_t pg_num, pg_flush_batch_t *fb, osd_num_t peer_osd, int retval)
{ {
pool_pg_num_t pg_id = { .pool_id = pool_id, .pg_num = pg_num }; if (pgs.find(pg_num) == pgs.end() || pgs[pg_num].flush_batch != fb)
if (pgs.find(pg_id) == pgs.end() || pgs[pg_id].flush_batch != fb)
{ {
// Throw the result away // Throw the result away
return; return;
@ -95,7 +92,7 @@ void osd_t::handle_flush_op(bool rollback, pool_id_t pool_id, pg_num_t pg_num, p
{ {
// This flush batch is done // This flush batch is done
std::vector<osd_op_t*> continue_ops; std::vector<osd_op_t*> continue_ops;
auto & pg = pgs.at(pg_id); auto & pg = pgs[pg_num];
auto it = pg.flush_actions.begin(), prev_it = it; auto it = pg.flush_actions.begin(), prev_it = it;
auto erase_start = it; auto erase_start = it;
while (1) while (1)
@ -156,22 +153,22 @@ void osd_t::handle_flush_op(bool rollback, pool_id_t pool_id, pg_num_t pg_num, p
} }
} }
void osd_t::submit_flush_op(pool_id_t pool_id, pg_num_t pg_num, pg_flush_batch_t *fb, bool rollback, osd_num_t peer_osd, int count, obj_ver_id *data) void osd_t::submit_flush_op(pg_num_t pg_num, pg_flush_batch_t *fb, bool rollback, osd_num_t peer_osd, int count, obj_ver_id *data)
{ {
osd_op_t *op = new osd_op_t(); osd_op_t *op = new osd_op_t();
// Copy buffer so it gets freed along with the operation // Copy buffer so it gets freed along with the operation
op->buf = malloc_or_die(sizeof(obj_ver_id) * count); op->buf = malloc(sizeof(obj_ver_id) * count);
memcpy(op->buf, data, sizeof(obj_ver_id) * count); memcpy(op->buf, data, sizeof(obj_ver_id) * count);
if (peer_osd == this->osd_num) if (peer_osd == this->osd_num)
{ {
// local // local
clock_gettime(CLOCK_REALTIME, &op->tv_begin); clock_gettime(CLOCK_REALTIME, &op->tv_begin);
op->bs_op = new blockstore_op_t((blockstore_op_t){ op->bs_op = new blockstore_op_t({
.opcode = (uint64_t)(rollback ? BS_OP_ROLLBACK : BS_OP_STABLE), .opcode = (uint64_t)(rollback ? BS_OP_ROLLBACK : BS_OP_STABLE),
.callback = [this, op, pool_id, pg_num, fb](blockstore_op_t *bs_op) .callback = [this, op, pg_num, fb](blockstore_op_t *bs_op)
{ {
add_bs_subop_stats(op); add_bs_subop_stats(op);
handle_flush_op(bs_op->opcode == BS_OP_ROLLBACK, pool_id, pg_num, fb, this->osd_num, bs_op->retval); handle_flush_op(bs_op->opcode == BS_OP_ROLLBACK, pg_num, fb, this->osd_num, bs_op->retval);
delete op->bs_op; delete op->bs_op;
op->bs_op = NULL; op->bs_op = NULL;
delete op; delete op;
@ -186,21 +183,22 @@ void osd_t::submit_flush_op(pool_id_t pool_id, pg_num_t pg_num, pg_flush_batch_t
// Peer // Peer
int peer_fd = c_cli.osd_peer_fds[peer_osd]; int peer_fd = c_cli.osd_peer_fds[peer_osd];
op->op_type = OSD_OP_OUT; op->op_type = OSD_OP_OUT;
op->iov.push_back(op->buf, count * sizeof(obj_ver_id)); op->send_list.push_back(op->req.buf, OSD_PACKET_SIZE);
op->send_list.push_back(op->buf, count * sizeof(obj_ver_id));
op->peer_fd = peer_fd; op->peer_fd = peer_fd;
op->req = (osd_any_op_t){ op->req = {
.sec_stab = { .sec_stab = {
.header = { .header = {
.magic = SECONDARY_OSD_OP_MAGIC, .magic = SECONDARY_OSD_OP_MAGIC,
.id = c_cli.next_subop_id++, .id = c_cli.next_subop_id++,
.opcode = (uint64_t)(rollback ? OSD_OP_SEC_ROLLBACK : OSD_OP_SEC_STABILIZE), .opcode = (uint64_t)(rollback ? OSD_OP_SECONDARY_ROLLBACK : OSD_OP_SECONDARY_STABILIZE),
}, },
.len = count * sizeof(obj_ver_id), .len = count * sizeof(obj_ver_id),
}, },
}; };
op->callback = [this, pool_id, pg_num, fb, peer_osd](osd_op_t *op) op->callback = [this, pg_num, fb, peer_osd](osd_op_t *op)
{ {
handle_flush_op(op->req.hdr.opcode == OSD_OP_SEC_ROLLBACK, pool_id, pg_num, fb, peer_osd, op->reply.hdr.retval); handle_flush_op(op->req.hdr.opcode == OSD_OP_SECONDARY_ROLLBACK, pg_num, fb, peer_osd, op->reply.hdr.retval);
delete op; delete op;
}; };
c_cli.outbox_push(op); c_cli.outbox_push(op);
@ -209,8 +207,6 @@ void osd_t::submit_flush_op(pool_id_t pool_id, pg_num_t pg_num, pg_flush_batch_t
bool osd_t::pick_next_recovery(osd_recovery_op_t &op) bool osd_t::pick_next_recovery(osd_recovery_op_t &op)
{ {
if (!no_recovery)
{
for (auto pg_it = pgs.begin(); pg_it != pgs.end(); pg_it++) for (auto pg_it = pgs.begin(); pg_it != pgs.end(); pg_it++)
{ {
if ((pg_it->second.state & (PG_ACTIVE | PG_HAS_DEGRADED)) == (PG_ACTIVE | PG_HAS_DEGRADED)) if ((pg_it->second.state & (PG_ACTIVE | PG_HAS_DEGRADED)) == (PG_ACTIVE | PG_HAS_DEGRADED))
@ -220,15 +216,13 @@ bool osd_t::pick_next_recovery(osd_recovery_op_t &op)
if (recovery_ops.find(obj_it->first) == recovery_ops.end()) if (recovery_ops.find(obj_it->first) == recovery_ops.end())
{ {
op.degraded = true; op.degraded = true;
op.pg_num = pg_it->first;
op.oid = obj_it->first; op.oid = obj_it->first;
return true; return true;
} }
} }
} }
} }
}
if (!no_rebalance)
{
for (auto pg_it = pgs.begin(); pg_it != pgs.end(); pg_it++) for (auto pg_it = pgs.begin(); pg_it != pgs.end(); pg_it++)
{ {
if ((pg_it->second.state & (PG_ACTIVE | PG_HAS_MISPLACED)) == (PG_ACTIVE | PG_HAS_MISPLACED)) if ((pg_it->second.state & (PG_ACTIVE | PG_HAS_MISPLACED)) == (PG_ACTIVE | PG_HAS_MISPLACED))
@ -238,13 +232,13 @@ bool osd_t::pick_next_recovery(osd_recovery_op_t &op)
if (recovery_ops.find(obj_it->first) == recovery_ops.end()) if (recovery_ops.find(obj_it->first) == recovery_ops.end())
{ {
op.degraded = false; op.degraded = false;
op.pg_num = pg_it->first;
op.oid = obj_it->first; op.oid = obj_it->first;
return true; return true;
} }
} }
} }
} }
}
return false; return false;
} }
@ -252,7 +246,7 @@ void osd_t::submit_recovery_op(osd_recovery_op_t *op)
{ {
op->osd_op = new osd_op_t(); op->osd_op = new osd_op_t();
op->osd_op->op_type = OSD_OP_OUT; op->osd_op->op_type = OSD_OP_OUT;
op->osd_op->req = (osd_any_op_t){ op->osd_op->req = {
.rw = { .rw = {
.header = { .header = {
.magic = SECONDARY_OSD_OP_MAGIC, .magic = SECONDARY_OSD_OP_MAGIC,
@ -264,23 +258,15 @@ void osd_t::submit_recovery_op(osd_recovery_op_t *op)
.len = 0, .len = 0,
}, },
}; };
if (log_level > 2)
{
printf("Submitting recovery operation for %lx:%lx\n", op->oid.inode, op->oid.stripe);
}
op->osd_op->callback = [this, op](osd_op_t *osd_op) op->osd_op->callback = [this, op](osd_op_t *osd_op)
{ {
// Don't sync the write, it will be synced by our regular sync coroutine
if (osd_op->reply.hdr.retval < 0) if (osd_op->reply.hdr.retval < 0)
{ {
// Error recovering object // Error recovering object
if (osd_op->reply.hdr.retval == -EPIPE) if (osd_op->reply.hdr.retval == -EPIPE)
{ {
// PG is stopped or one of the OSDs is gone, error is harmless // PG is stopped or one of the OSDs is gone, error is harmless
printf(
"Recovery operation failed with object %lx:%lx (PG %u/%u)\n",
op->oid.inode, op->oid.stripe, INODE_POOL(op->oid.inode),
map_to_pg(op->oid, st_cli.pool_config.at(INODE_POOL(op->oid.inode)).pg_stripe_size)
);
} }
else else
{ {
@ -291,17 +277,6 @@ void osd_t::submit_recovery_op(osd_recovery_op_t *op)
op->osd_op = NULL; op->osd_op = NULL;
recovery_ops.erase(op->oid); recovery_ops.erase(op->oid);
delete osd_op; delete osd_op;
if (immediate_commit != IMMEDIATE_ALL)
{
recovery_done++;
if (recovery_done >= recovery_sync_batch)
{
// Force sync every <recovery_sync_batch> operations
// This is required not to pile up an excessive amount of delete operations
autosync();
recovery_done = 0;
}
}
continue_recovery(); continue_recovery();
}; };
exec_op(op->osd_op); exec_op(op->osd_op);

4
osd_id.h Normal file
View File

@ -0,0 +1,4 @@
#pragma once
typedef uint64_t osd_num_t;
typedef uint32_t pg_num_t;

View File

@ -1,6 +1,3 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
#include "osd.h" #include "osd.h"
#include <signal.h> #include <signal.h>
@ -21,8 +18,6 @@ static void handle_sigint(int sig)
int main(int narg, char *args[]) int main(int narg, char *args[])
{ {
setvbuf(stdout, NULL, _IONBF, 0);
setvbuf(stderr, NULL, _IONBF, 0);
if (sizeof(osd_any_op_t) > OSD_PACKET_SIZE || if (sizeof(osd_any_op_t) > OSD_PACKET_SIZE ||
sizeof(osd_any_reply_t) > OSD_PACKET_SIZE) sizeof(osd_any_reply_t) > OSD_PACKET_SIZE)
{ {
@ -41,13 +36,16 @@ int main(int narg, char *args[])
signal(SIGINT, handle_sigint); signal(SIGINT, handle_sigint);
signal(SIGTERM, handle_sigint); signal(SIGTERM, handle_sigint);
ring_loop_t *ringloop = new ring_loop_t(512); ring_loop_t *ringloop = new ring_loop_t(512);
osd = new osd_t(config, ringloop); // FIXME: Create Blockstore from on-disk superblock config and check it against the OSD cluster config
blockstore_t *bs = new blockstore_t(config, ringloop);
osd = new osd_t(config, bs, ringloop);
while (1) while (1)
{ {
ringloop->loop(); ringloop->loop();
ringloop->wait(); ringloop->wait();
} }
delete osd; delete osd;
delete bs;
delete ringloop; delete ringloop;
return 0; return 0;
} }

View File

@ -1,6 +1,3 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 or GNU GPL-2.0+ (see README.md for details)
#pragma once #pragma once
#include "object_id.h" #include "object_id.h"
@ -13,22 +10,20 @@
#define OSD_PACKET_SIZE 0x80 #define OSD_PACKET_SIZE 0x80
// Opcodes // Opcodes
#define OSD_OP_MIN 1 #define OSD_OP_MIN 1
#define OSD_OP_SEC_READ 1 #define OSD_OP_SECONDARY_READ 1
#define OSD_OP_SEC_WRITE 2 #define OSD_OP_SECONDARY_WRITE 2
#define OSD_OP_SEC_WRITE_STABLE 3 #define OSD_OP_SECONDARY_SYNC 3
#define OSD_OP_SEC_SYNC 4 #define OSD_OP_SECONDARY_STABILIZE 4
#define OSD_OP_SEC_STABILIZE 5 #define OSD_OP_SECONDARY_ROLLBACK 5
#define OSD_OP_SEC_ROLLBACK 6 #define OSD_OP_SECONDARY_DELETE 6
#define OSD_OP_SEC_DELETE 7 #define OSD_OP_TEST_SYNC_STAB_ALL 7
#define OSD_OP_TEST_SYNC_STAB_ALL 8 #define OSD_OP_SECONDARY_LIST 8
#define OSD_OP_SEC_LIST 9 #define OSD_OP_SHOW_CONFIG 9
#define OSD_OP_SHOW_CONFIG 10 #define OSD_OP_READ 10
#define OSD_OP_READ 11 #define OSD_OP_WRITE 11
#define OSD_OP_WRITE 12 #define OSD_OP_SYNC 12
#define OSD_OP_SYNC 13 #define OSD_OP_DELETE 13
#define OSD_OP_DELETE 14 #define OSD_OP_MAX 13
#define OSD_OP_PING 15
#define OSD_OP_MAX 15
// Alignment & limit for read/write operations // Alignment & limit for read/write operations
#ifndef MEM_ALIGNMENT #ifndef MEM_ALIGNMENT
#define MEM_ALIGNMENT 512 #define MEM_ALIGNMENT 512
@ -71,9 +66,6 @@ struct __attribute__((__packed__)) osd_op_secondary_rw_t
uint32_t offset; uint32_t offset;
// length // length
uint32_t len; uint32_t len;
// bitmap/attribute length - bitmap comes after header, but before data
uint32_t attr_len;
uint32_t pad0;
}; };
struct __attribute__((__packed__)) osd_reply_secondary_rw_t struct __attribute__((__packed__)) osd_reply_secondary_rw_t
@ -81,9 +73,6 @@ struct __attribute__((__packed__)) osd_reply_secondary_rw_t
osd_reply_header_t header; osd_reply_header_t header;
// for reads and writes: assigned or read version number // for reads and writes: assigned or read version number
uint64_t version; uint64_t version;
// for reads: bitmap/attribute length (just to double-check)
uint32_t attr_len;
uint32_t pad0;
}; };
// delete object on the secondary OSD // delete object on the secondary OSD
@ -145,10 +134,7 @@ struct __attribute__((__packed__)) osd_op_secondary_list_t
osd_op_header_t header; osd_op_header_t header;
// placement group total number and total count // placement group total number and total count
pg_num_t list_pg, pg_count; pg_num_t list_pg, pg_count;
// size of an area that maps to one PG continuously
uint64_t pg_stripe_size; uint64_t pg_stripe_size;
// inode range (used to select pools)
uint64_t min_inode, max_inode;
}; };
struct __attribute__((__packed__)) osd_reply_secondary_list_t struct __attribute__((__packed__)) osd_reply_secondary_list_t
@ -160,6 +146,7 @@ struct __attribute__((__packed__)) osd_reply_secondary_list_t
}; };
// read or write to the primary OSD (must be within individual stripe) // read or write to the primary OSD (must be within individual stripe)
// FIXME: allow to return used block bitmap (required for snapshots)
struct __attribute__((__packed__)) osd_op_rw_t struct __attribute__((__packed__)) osd_op_rw_t
{ {
osd_op_header_t header; osd_op_header_t header;
@ -174,9 +161,6 @@ struct __attribute__((__packed__)) osd_op_rw_t
struct __attribute__((__packed__)) osd_reply_rw_t struct __attribute__((__packed__)) osd_reply_rw_t
{ {
osd_reply_header_t header; osd_reply_header_t header;
// for reads: bitmap length
uint32_t bitmap_len;
uint32_t pad0;
}; };
// sync to the primary OSD // sync to the primary OSD
@ -218,5 +202,3 @@ union osd_any_reply_t
osd_reply_sync_t sync; osd_reply_sync_t sync;
uint8_t buf[OSD_PACKET_SIZE]; uint8_t buf[OSD_PACKET_SIZE];
}; };
extern const char* osd_op_names[];

View File

@ -1,6 +1,3 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
#include <netinet/tcp.h> #include <netinet/tcp.h>
#include <sys/epoll.h> #include <sys/epoll.h>
@ -29,7 +26,7 @@ void osd_t::handle_peers()
degraded_objects += p.second.degraded_objects.size(); degraded_objects += p.second.degraded_objects.size();
if ((p.second.state & (PG_ACTIVE | PG_HAS_UNCLEAN)) == (PG_ACTIVE | PG_HAS_UNCLEAN)) if ((p.second.state & (PG_ACTIVE | PG_HAS_UNCLEAN)) == (PG_ACTIVE | PG_HAS_UNCLEAN))
peering_state = peering_state | OSD_FLUSHING_PGS; peering_state = peering_state | OSD_FLUSHING_PGS;
else if (p.second.state & PG_ACTIVE) else
peering_state = peering_state | OSD_RECOVERING; peering_state = peering_state | OSD_RECOVERING;
} }
else else
@ -53,7 +50,7 @@ void osd_t::handle_peers()
{ {
if (!p.second.flush_batch) if (!p.second.flush_batch)
{ {
submit_pg_flush_ops(p.second); submit_pg_flush_ops(p.first);
} }
still = true; still = true;
} }
@ -91,20 +88,23 @@ void osd_t::repeer_pgs(osd_num_t peer_osd)
if (repeer) if (repeer)
{ {
// Repeer this pg // Repeer this pg
printf("[PG %u/%u] Repeer because of OSD %lu\n", p.second.pool_id, p.second.pg_num, peer_osd); printf("[PG %u] Repeer because of OSD %lu\n", p.second.pg_num, peer_osd);
start_pg_peering(p.second); start_pg_peering(p.second.pg_num);
} }
} }
} }
} }
// Reset PG state (when peering or stopping) // Repeer on each connect/disconnect peer event
void osd_t::reset_pg(pg_t & pg) void osd_t::start_pg_peering(pg_num_t pg_num)
{ {
auto & pg = pgs[pg_num];
pg.state = PG_PEERING;
this->peering_state |= OSD_PEERING_PGS;
report_pg_state(pg);
// Reset PG state
pg.cur_peers.clear(); pg.cur_peers.clear();
pg.state_dict.clear(); pg.state_dict.clear();
copies_to_delete_after_sync_count -= pg.copies_to_delete_after_sync.size();
pg.copies_to_delete_after_sync.clear();
incomplete_objects -= pg.incomplete_objects.size(); incomplete_objects -= pg.incomplete_objects.size();
misplaced_objects -= pg.misplaced_objects.size(); misplaced_objects -= pg.misplaced_objects.size();
degraded_objects -= pg.degraded_objects.size(); degraded_objects -= pg.degraded_objects.size();
@ -123,41 +123,16 @@ void osd_t::reset_pg(pg_t & pg)
cancel_primary_write(p.second); cancel_primary_write(p.second);
} }
pg.write_queue.clear(); pg.write_queue.clear();
uint64_t pg_stripe_size = st_cli.pool_config[pg.pool_id].pg_stripe_size;
for (auto it = unstable_writes.begin(); it != unstable_writes.end(); ) for (auto it = unstable_writes.begin(); it != unstable_writes.end(); )
{ {
// Forget this PG's unstable writes // Forget this PG's unstable writes
if (INODE_POOL(it->first.oid.inode) == pg.pool_id && map_to_pg(it->first.oid, pg_stripe_size) == pg.pg_num) pg_num_t n = (it->first.oid.inode + it->first.oid.stripe / pg_stripe_size) % pg_count + 1;
if (n == pg.pg_num)
unstable_writes.erase(it++); unstable_writes.erase(it++);
else else
it++; it++;
} }
dirty_pgs.erase({ .pool_id = pg.pool_id, .pg_num = pg.pg_num }); dirty_pgs.erase(pg.pg_num);
}
// Repeer on each connect/disconnect peer event
void osd_t::start_pg_peering(pg_t & pg)
{
pg.state = PG_PEERING;
this->peering_state |= OSD_PEERING_PGS;
reset_pg(pg);
report_pg_state(pg);
// Drop connections of clients who have this PG in dirty_pgs
if (immediate_commit != IMMEDIATE_ALL)
{
std::vector<int> to_stop;
for (auto & cp: c_cli.clients)
{
if (cp.second->dirty_pgs.find({ .pool_id = pg.pool_id, .pg_num = pg.pg_num }) != cp.second->dirty_pgs.end())
{
to_stop.push_back(cp.first);
}
}
for (auto peer_fd: to_stop)
{
c_cli.stop_client(peer_fd);
}
}
// Calculate current write OSD set // Calculate current write OSD set
pg.pg_cursize = 0; pg.pg_cursize = 0;
pg.cur_set.resize(pg.target_set.size()); pg.cur_set.resize(pg.target_set.size());
@ -182,25 +157,19 @@ void osd_t::start_pg_peering(pg_t & pg)
// (PG history is kept up to the latest active+clean state) // (PG history is kept up to the latest active+clean state)
for (auto & history_set: pg.target_history) for (auto & history_set: pg.target_history)
{ {
bool found = true; bool found = false;
for (auto history_osd: history_set) for (auto history_osd: history_set)
{ {
if (history_osd != 0) if (history_osd != 0 && c_cli.osd_peer_fds.find(history_osd) != c_cli.osd_peer_fds.end())
{
found = false;
if (history_osd == this->osd_num ||
c_cli.osd_peer_fds.find(history_osd) != c_cli.osd_peer_fds.end())
{ {
found = true; found = true;
break; break;
} }
} }
}
if (!found) if (!found)
{ {
pg.state = PG_INCOMPLETE; pg.state = PG_INCOMPLETE;
report_pg_state(pg); report_pg_state(pg);
return;
} }
} }
} }
@ -208,7 +177,6 @@ void osd_t::start_pg_peering(pg_t & pg)
{ {
pg.state = PG_INCOMPLETE; pg.state = PG_INCOMPLETE;
report_pg_state(pg); report_pg_state(pg);
return;
} }
std::set<osd_num_t> cur_peers; std::set<osd_num_t> cur_peers;
for (auto pg_osd: pg.all_peers) for (auto pg_osd: pg.all_peers)
@ -232,7 +200,8 @@ void osd_t::start_pg_peering(pg_t & pg)
{ {
// Discard the result after completion, which, chances are, will be unsuccessful // Discard the result after completion, which, chances are, will be unsuccessful
discard_list_subop(it->second); discard_list_subop(it->second);
pg.peering_state->list_ops.erase(it++); pg.peering_state->list_ops.erase(it);
it = pg.peering_state->list_ops.begin();
} }
else else
it++; it++;
@ -245,7 +214,8 @@ void osd_t::start_pg_peering(pg_t & pg)
{ {
free(it->second.buf); free(it->second.buf);
} }
pg.peering_state->list_results.erase(it++); pg.peering_state->list_results.erase(it);
it = pg.peering_state->list_results.begin();
} }
else else
it++; it++;
@ -263,7 +233,6 @@ void osd_t::start_pg_peering(pg_t & pg)
if (!pg.peering_state) if (!pg.peering_state)
{ {
pg.peering_state = new pg_peering_state_t(); pg.peering_state = new pg_peering_state_t();
pg.peering_state->pool_id = pg.pool_id;
pg.peering_state->pg_num = pg.pg_num; pg.peering_state->pg_num = pg.pg_num;
} }
for (osd_num_t peer_osd: cur_peers) for (osd_num_t peer_osd: cur_peers)
@ -318,13 +287,14 @@ void osd_t::submit_sync_and_list_subop(osd_num_t role_osd, pg_peering_state_t *p
auto & cl = c_cli.clients.at(c_cli.osd_peer_fds[role_osd]); auto & cl = c_cli.clients.at(c_cli.osd_peer_fds[role_osd]);
osd_op_t *op = new osd_op_t(); osd_op_t *op = new osd_op_t();
op->op_type = OSD_OP_OUT; op->op_type = OSD_OP_OUT;
op->peer_fd = cl->peer_fd; op->send_list.push_back(op->req.buf, OSD_PACKET_SIZE);
op->req = (osd_any_op_t){ op->peer_fd = cl.peer_fd;
op->req = {
.sec_sync = { .sec_sync = {
.header = { .header = {
.magic = SECONDARY_OSD_OP_MAGIC, .magic = SECONDARY_OSD_OP_MAGIC,
.id = c_cli.next_subop_id++, .id = c_cli.next_subop_id++,
.opcode = OSD_OP_SEC_SYNC, .opcode = OSD_OP_SECONDARY_SYNC,
}, },
}, },
}; };
@ -359,10 +329,8 @@ void osd_t::submit_list_subop(osd_num_t role_osd, pg_peering_state_t *ps)
clock_gettime(CLOCK_REALTIME, &op->tv_begin); clock_gettime(CLOCK_REALTIME, &op->tv_begin);
op->bs_op = new blockstore_op_t(); op->bs_op = new blockstore_op_t();
op->bs_op->opcode = BS_OP_LIST; op->bs_op->opcode = BS_OP_LIST;
op->bs_op->oid.stripe = st_cli.pool_config[ps->pool_id].pg_stripe_size; op->bs_op->oid.stripe = pg_stripe_size;
op->bs_op->oid.inode = ((uint64_t)ps->pool_id << (64 - POOL_ID_BITS)); op->bs_op->len = pg_count;
op->bs_op->version = ((uint64_t)(ps->pool_id+1) << (64 - POOL_ID_BITS)) - 1;
op->bs_op->len = pg_counts[ps->pool_id];
op->bs_op->offset = ps->pg_num-1; op->bs_op->offset = ps->pg_num-1;
op->bs_op->callback = [this, ps, op, role_osd](blockstore_op_t *bs_op) op->bs_op->callback = [this, ps, op, role_osd](blockstore_op_t *bs_op)
{ {
@ -372,8 +340,8 @@ void osd_t::submit_list_subop(osd_num_t role_osd, pg_peering_state_t *ps)
} }
add_bs_subop_stats(op); add_bs_subop_stats(op);
printf( printf(
"[PG %u/%u] Got object list from OSD %lu (local): %d object versions (%lu of them stable)\n", "[PG %u] Got object list from OSD %lu (local): %d object versions (%lu of them stable)\n",
ps->pool_id, ps->pg_num, role_osd, bs_op->retval, bs_op->version ps->pg_num, role_osd, bs_op->retval, bs_op->version
); );
ps->list_results[role_osd] = { ps->list_results[role_osd] = {
.buf = (obj_ver_id*)op->bs_op->buf, .buf = (obj_ver_id*)op->bs_op->buf,
@ -393,19 +361,18 @@ void osd_t::submit_list_subop(osd_num_t role_osd, pg_peering_state_t *ps)
// Peer // Peer
osd_op_t *op = new osd_op_t(); osd_op_t *op = new osd_op_t();
op->op_type = OSD_OP_OUT; op->op_type = OSD_OP_OUT;
op->send_list.push_back(op->req.buf, OSD_PACKET_SIZE);
op->peer_fd = c_cli.osd_peer_fds[role_osd]; op->peer_fd = c_cli.osd_peer_fds[role_osd];
op->req = (osd_any_op_t){ op->req = {
.sec_list = { .sec_list = {
.header = { .header = {
.magic = SECONDARY_OSD_OP_MAGIC, .magic = SECONDARY_OSD_OP_MAGIC,
.id = c_cli.next_subop_id++, .id = c_cli.next_subop_id++,
.opcode = OSD_OP_SEC_LIST, .opcode = OSD_OP_SECONDARY_LIST,
}, },
.list_pg = ps->pg_num, .list_pg = ps->pg_num,
.pg_count = pg_counts[ps->pool_id], .pg_count = pg_count,
.pg_stripe_size = st_cli.pool_config[ps->pool_id].pg_stripe_size, .pg_stripe_size = pg_stripe_size,
.min_inode = ((uint64_t)(ps->pool_id) << (64 - POOL_ID_BITS)),
.max_inode = ((uint64_t)(ps->pool_id+1) << (64 - POOL_ID_BITS)) - 1,
}, },
}; };
op->callback = [this, ps, role_osd](osd_op_t *op) op->callback = [this, ps, role_osd](osd_op_t *op)
@ -419,8 +386,8 @@ void osd_t::submit_list_subop(osd_num_t role_osd, pg_peering_state_t *ps)
return; return;
} }
printf( printf(
"[PG %u/%u] Got object list from OSD %lu: %ld object versions (%lu of them stable)\n", "[PG %u] Got object list from OSD %lu: %ld object versions (%lu of them stable)\n",
ps->pool_id, ps->pg_num, role_osd, op->reply.hdr.retval, op->reply.sec_list.stable_count ps->pg_num, role_osd, op->reply.hdr.retval, op->reply.sec_list.stable_count
); );
ps->list_results[role_osd] = { ps->list_results[role_osd] = {
.buf = (obj_ver_id*)op->buf, .buf = (obj_ver_id*)op->buf,
@ -461,16 +428,22 @@ void osd_t::discard_list_subop(osd_op_t *list_op)
} }
} }
bool osd_t::stop_pg(pg_t & pg) bool osd_t::stop_pg(pg_num_t pg_num)
{ {
auto pg_it = pgs.find(pg_num);
if (pg_it == pgs.end())
{
return false;
}
auto & pg = pg_it->second;
if (pg.peering_state) if (pg.peering_state)
{ {
// Stop peering // Stop peering
for (auto it = pg.peering_state->list_ops.begin(); it != pg.peering_state->list_ops.end(); it++) for (auto it = pg.peering_state->list_ops.begin(); it != pg.peering_state->list_ops.end();)
{ {
discard_list_subop(it->second); discard_list_subop(it->second);
} }
for (auto it = pg.peering_state->list_results.begin(); it != pg.peering_state->list_results.end(); it++) for (auto it = pg.peering_state->list_results.begin(); it != pg.peering_state->list_results.end();)
{ {
if (it->second.buf) if (it->second.buf)
{ {
@ -480,19 +453,12 @@ bool osd_t::stop_pg(pg_t & pg)
delete pg.peering_state; delete pg.peering_state;
pg.peering_state = NULL; pg.peering_state = NULL;
} }
if (pg.state & (PG_STOPPING | PG_OFFLINE)) if (!(pg.state & PG_ACTIVE))
{ {
return false; return false;
} }
if (!(pg.state & PG_ACTIVE))
{
finish_stop_pg(pg);
return true;
}
pg.state = pg.state & ~PG_ACTIVE | PG_STOPPING; pg.state = pg.state & ~PG_ACTIVE | PG_STOPPING;
if (pg.inflight == 0 && !pg.flush_batch && if (pg.inflight == 0 && !pg.flush_batch)
// We must either forget all PG's unstable writes or wait for it to become clean
dirty_pgs.find({ .pool_id = pg.pool_id, .pg_num = pg.pg_num }) == dirty_pgs.end())
{ {
finish_stop_pg(pg); finish_stop_pg(pg);
} }
@ -506,14 +472,13 @@ bool osd_t::stop_pg(pg_t & pg)
void osd_t::finish_stop_pg(pg_t & pg) void osd_t::finish_stop_pg(pg_t & pg)
{ {
pg.state = PG_OFFLINE; pg.state = PG_OFFLINE;
reset_pg(pg);
report_pg_state(pg); report_pg_state(pg);
} }
void osd_t::report_pg_state(pg_t & pg) void osd_t::report_pg_state(pg_t & pg)
{ {
pg.print_state(); pg.print_state();
this->pg_state_dirty.insert({ .pool_id = pg.pool_id, .pg_num = pg.pg_num }); this->pg_state_dirty.insert(pg.pg_num);
if (pg.state == PG_ACTIVE && (pg.target_history.size() > 0 || pg.all_peers.size() > pg.target_set.size())) if (pg.state == PG_ACTIVE && (pg.target_history.size() > 0 || pg.all_peers.size() > pg.target_set.size()))
{ {
// Clear history of active+clean PGs // Clear history of active+clean PGs

View File

@ -1,7 +1,3 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
#include <unordered_map>
#include "osd_peering_pg.h" #include "osd_peering_pg.h"
struct obj_ver_role struct obj_ver_role
@ -37,7 +33,6 @@ struct obj_piece_ver_t
struct pg_obj_state_check_t struct pg_obj_state_check_t
{ {
pg_t *pg; pg_t *pg;
bool replicated = false;
std::vector<obj_ver_role> list; std::vector<obj_ver_role> list;
int list_pos; int list_pos;
int obj_start = 0, obj_end = 0, ver_start = 0, ver_end = 0; int obj_start = 0, obj_end = 0, ver_start = 0, ver_end = 0;
@ -46,7 +41,7 @@ struct pg_obj_state_check_t
uint64_t last_ver = 0; uint64_t last_ver = 0;
uint64_t target_ver = 0; uint64_t target_ver = 0;
uint64_t n_copies = 0, has_roles = 0, n_roles = 0, n_stable = 0, n_mismatched = 0; uint64_t n_copies = 0, has_roles = 0, n_roles = 0, n_stable = 0, n_mismatched = 0;
uint64_t n_unstable = 0, n_invalid = 0; uint64_t n_unstable = 0, n_buggy = 0;
pg_osd_set_t osd_set; pg_osd_set_t osd_set;
int log_level; int log_level;
@ -78,12 +73,6 @@ void pg_obj_state_check_t::walk()
{ {
finish_object(); finish_object();
} }
if (pg->state & PG_HAS_INVALID)
{
// Stop PGs with "invalid" objects
pg->state = PG_INCOMPLETE | PG_HAS_INVALID;
return;
}
if (pg->pg_cursize < pg->pg_size) if (pg->pg_cursize < pg->pg_size)
{ {
pg->state |= PG_DEGRADED; pg->state |= PG_DEGRADED;
@ -103,12 +92,12 @@ void pg_obj_state_check_t::start_object()
target_ver = 0; target_ver = 0;
ver_start = list_pos; ver_start = list_pos;
has_roles = n_copies = n_roles = n_stable = n_mismatched = 0; has_roles = n_copies = n_roles = n_stable = n_mismatched = 0;
n_unstable = n_invalid = 0; n_unstable = n_buggy = 0;
} }
void pg_obj_state_check_t::handle_version() void pg_obj_state_check_t::handle_version()
{ {
if (!target_ver && last_ver != list[list_pos].version && (n_stable > 0 || n_roles >= pg->pg_data_size)) if (!target_ver && last_ver != list[list_pos].version && (n_stable > 0 || n_roles >= pg->pg_minsize))
{ {
// Version is either stable or recoverable // Version is either stable or recoverable
target_ver = last_ver; target_ver = last_ver;
@ -122,11 +111,11 @@ void pg_obj_state_check_t::handle_version()
has_roles = n_copies = n_roles = n_stable = n_mismatched = 0; has_roles = n_copies = n_roles = n_stable = n_mismatched = 0;
last_ver = list[list_pos].version; last_ver = list[list_pos].version;
} }
unsigned replica = (list[list_pos].oid.stripe & STRIPE_MASK); int replica = (list[list_pos].oid.stripe & STRIPE_MASK);
n_copies++; n_copies++;
if (replicated && replica > 0 || replica >= pg->pg_size) if (replica >= pg->pg_size)
{ {
n_invalid++; n_buggy++;
} }
else else
{ {
@ -134,23 +123,6 @@ void pg_obj_state_check_t::handle_version()
{ {
n_stable++; n_stable++;
} }
if (replicated)
{
int i;
for (i = 0; i < pg->cur_set.size(); i++)
{
if (pg->cur_set[i] == list[list_pos].osd_num)
{
break;
}
}
if (i == pg->cur_set.size())
{
n_mismatched++;
}
}
else
{
if (pg->cur_set[replica] != list[list_pos].osd_num) if (pg->cur_set[replica] != list[list_pos].osd_num)
{ {
n_mismatched++; n_mismatched++;
@ -162,7 +134,6 @@ void pg_obj_state_check_t::handle_version()
} }
} }
} }
}
if (!list[list_pos].is_stable) if (!list[list_pos].is_stable)
{ {
n_unstable++; n_unstable++;
@ -171,7 +142,7 @@ void pg_obj_state_check_t::handle_version()
void pg_obj_state_check_t::finish_object() void pg_obj_state_check_t::finish_object()
{ {
if (!target_ver && (n_stable > 0 || n_roles >= pg->pg_data_size)) if (!target_ver && (n_stable > 0 || n_roles >= pg->pg_minsize))
{ {
// Version is either stable or recoverable // Version is either stable or recoverable
target_ver = last_ver; target_ver = last_ver;
@ -180,14 +151,11 @@ void pg_obj_state_check_t::finish_object()
obj_end = list_pos; obj_end = list_pos;
// Remember the decision // Remember the decision
uint64_t state = 0; uint64_t state = 0;
if (n_invalid > 0) if (n_buggy > 0)
{ {
// It's not allowed to change the replication scheme for a pool other than by recreating it state = OBJ_BUGGY;
// So we must bring the PG offline // FIXME: bring pg offline
state = OBJ_INCOMPLETE; throw std::runtime_error("buggy object state");
pg->state |= PG_HAS_INVALID;
pg->total_count++;
return;
} }
if (n_unstable > 0) if (n_unstable > 0)
{ {
@ -233,40 +201,48 @@ void pg_obj_state_check_t::finish_object()
{ {
return; return;
} }
if (!replicated && n_roles < pg->pg_data_size) if (n_roles < pg->pg_minsize)
{ {
if (log_level > 1) if (log_level > 1)
{ {
printf("Object is incomplete: %lx:%lx version=%lu/%lu\n", oid.inode, oid.stripe, target_ver, max_ver); printf("Object is incomplete: inode=%lu stripe=%lu version=%lu/%lu\n", oid.inode, oid.stripe, target_ver, max_ver);
} }
state = OBJ_INCOMPLETE; state = OBJ_INCOMPLETE;
pg->state = pg->state | PG_HAS_INCOMPLETE; pg->state = pg->state | PG_HAS_INCOMPLETE;
} }
else if ((replicated ? n_copies : n_roles) < pg->pg_cursize) else if (n_roles < pg->pg_cursize)
{ {
if (log_level > 1) if (log_level > 1)
{ {
printf("Object is degraded: %lx:%lx version=%lu/%lu\n", oid.inode, oid.stripe, target_ver, max_ver); printf("Object is degraded: inode=%lu stripe=%lu version=%lu/%lu\n", oid.inode, oid.stripe, target_ver, max_ver);
} }
state = OBJ_DEGRADED; state = OBJ_DEGRADED;
pg->state = pg->state | PG_HAS_DEGRADED; pg->state = pg->state | PG_HAS_DEGRADED;
} }
else if (n_mismatched > 0) if (n_mismatched > 0)
{ {
if (log_level > 2 && (replicated || n_roles >= pg->pg_cursize)) if (n_roles >= pg->pg_cursize && log_level > 1)
{ {
printf("Object is misplaced: %lx:%lx version=%lu/%lu\n", oid.inode, oid.stripe, target_ver, max_ver); printf("Object is misplaced: inode=%lu stripe=%lu version=%lu/%lu\n", oid.inode, oid.stripe, target_ver, max_ver);
} }
state |= OBJ_MISPLACED; state |= OBJ_MISPLACED;
pg->state = pg->state | PG_HAS_MISPLACED; pg->state = pg->state | PG_HAS_MISPLACED;
} }
if (log_level > 1 && (state & (OBJ_INCOMPLETE | OBJ_DEGRADED)) || if (log_level > 1 && (n_roles < pg->pg_cursize || n_mismatched > 0))
log_level > 2 && (state & OBJ_MISPLACED)) {
if (log_level > 2)
{ {
for (int i = obj_start; i < obj_end; i++) for (int i = obj_start; i < obj_end; i++)
{ {
printf("v%lu present on: osd %lu, role %ld%s\n", list[i].version, list[i].osd_num, printf("v%lu present on: osd %lu, role %ld%s\n", list[i].version, list[i].osd_num, (list[i].oid.stripe & STRIPE_MASK), list[i].is_stable ? " (stable)" : "");
(list[i].oid.stripe & STRIPE_MASK), list[i].is_stable ? " (stable)" : ""); }
}
else
{
for (int i = ver_start; i < ver_end; i++)
{
printf("Target version present on: osd %lu, role %ld%s\n", list[i].osd_num, (list[i].oid.stripe & STRIPE_MASK), list[i].is_stable ? " (stable)" : "");
}
} }
} }
pg->total_count++; pg->total_count++;
@ -302,14 +278,11 @@ void pg_obj_state_check_t::finish_object()
.osd_num = list[i].osd_num, .osd_num = list[i].osd_num,
.outdated = true, .outdated = true,
}); });
if (!(state & (OBJ_INCOMPLETE | OBJ_DEGRADED)))
{
state |= OBJ_MISPLACED; state |= OBJ_MISPLACED;
pg->state = pg->state | PG_HAS_MISPLACED; pg->state = pg->state | PG_HAS_MISPLACED;
} }
} }
} }
}
if (target_ver < max_ver) if (target_ver < max_ver)
{ {
pg->ver_override[oid] = target_ver; pg->ver_override[oid] = target_ver;
@ -324,23 +297,6 @@ void pg_obj_state_check_t::finish_object()
if (it == pg->state_dict.end()) if (it == pg->state_dict.end())
{ {
std::vector<uint64_t> read_target; std::vector<uint64_t> read_target;
if (replicated)
{
for (auto & o: osd_set)
{
if (!o.outdated)
{
read_target.push_back(o.osd_num);
}
}
while (read_target.size() < pg->pg_size)
{
// FIXME: This is because we then use .data() and assume it's at least <pg_size> long
read_target.push_back(0);
}
}
else
{
read_target.resize(pg->pg_size); read_target.resize(pg->pg_size);
for (int i = 0; i < pg->pg_size; i++) for (int i = 0; i < pg->pg_size; i++)
{ {
@ -353,7 +309,6 @@ void pg_obj_state_check_t::finish_object()
read_target[o.role] = o.osd_num; read_target[o.role] = o.osd_num;
} }
} }
}
pg->state_dict[osd_set] = { pg->state_dict[osd_set] = {
.read_target = read_target, .read_target = read_target,
.osd_set = osd_set, .osd_set = osd_set,
@ -388,9 +343,7 @@ void pg_t::calc_object_states(int log_level)
pg_obj_state_check_t st; pg_obj_state_check_t st;
st.log_level = log_level; st.log_level = log_level;
st.pg = this; st.pg = this;
st.replicated = (this->scheme == POOL_SCHEME_REPLICATED);
auto ps = peering_state; auto ps = peering_state;
epoch = 0;
for (auto it: ps->list_results) for (auto it: ps->list_results)
{ {
auto nstab = it.second.stable_count; auto nstab = it.second.stable_count;
@ -401,10 +354,6 @@ void pg_t::calc_object_states(int log_level)
obj_ver_id *ov = it.second.buf; obj_ver_id *ov = it.second.buf;
for (uint64_t i = 0; i < n; i++, ov++) for (uint64_t i = 0; i < n; i++, ov++)
{ {
if ((ov->version >> (64-PG_EPOCH_BITS)) > epoch)
{
epoch = (ov->version >> (64-PG_EPOCH_BITS));
}
st.list[start+i] = { st.list[start+i] = {
.oid = ov->oid, .oid = ov->oid,
.version = ov->version, .version = ov->version,
@ -420,17 +369,12 @@ void pg_t::calc_object_states(int log_level)
std::sort(st.list.begin(), st.list.end()); std::sort(st.list.begin(), st.list.end());
// Walk over it and check object states // Walk over it and check object states
st.walk(); st.walk();
if (this->state & (PG_DEGRADED|PG_LEFT_ON_DEAD))
{
assert(epoch != ((1ul << PG_EPOCH_BITS)-1));
epoch++;
}
} }
void pg_t::print_state() void pg_t::print_state()
{ {
printf( printf(
"[PG %u/%u] is %s%s%s%s%s%s%s%s%s%s%s%s%s (%lu objects)\n", pool_id, pg_num, "[PG %u] is %s%s%s%s%s%s%s%s%s%s%s (%lu objects)\n", pg_num,
(state & PG_STARTING) ? "starting" : "", (state & PG_STARTING) ? "starting" : "",
(state & PG_OFFLINE) ? "offline" : "", (state & PG_OFFLINE) ? "offline" : "",
(state & PG_PEERING) ? "peering" : "", (state & PG_PEERING) ? "peering" : "",
@ -442,8 +386,6 @@ void pg_t::print_state()
(state & PG_HAS_DEGRADED) ? " + has_degraded" : "", (state & PG_HAS_DEGRADED) ? " + has_degraded" : "",
(state & PG_HAS_MISPLACED) ? " + has_misplaced" : "", (state & PG_HAS_MISPLACED) ? " + has_misplaced" : "",
(state & PG_HAS_UNCLEAN) ? " + has_unclean" : "", (state & PG_HAS_UNCLEAN) ? " + has_unclean" : "",
(state & PG_HAS_INVALID) ? " + has_invalid" : "",
(state & PG_LEFT_ON_DEAD) ? " + left_on_dead" : "",
total_count total_count
); );
} }

View File

@ -1,7 +1,5 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
#include <map> #include <map>
#include <unordered_map>
#include <vector> #include <vector>
#include <algorithm> #include <algorithm>
@ -11,8 +9,6 @@
#include "osd_ops.h" #include "osd_ops.h"
#include "pg_states.h" #include "pg_states.h"
#define PG_EPOCH_BITS 48
struct pg_obj_loc_t struct pg_obj_loc_t
{ {
uint64_t role; uint64_t role;
@ -44,9 +40,8 @@ struct osd_op_t;
struct pg_peering_state_t struct pg_peering_state_t
{ {
// osd_num -> list result // osd_num -> list result
std::map<osd_num_t, osd_op_t*> list_ops; std::unordered_map<osd_num_t, osd_op_t*> list_ops;
std::map<osd_num_t, pg_list_result_t> list_results; std::unordered_map<osd_num_t, pg_list_result_t> list_results;
pool_id_t pool_id = 0;
pg_num_t pg_num = 0; pg_num_t pg_num = 0;
}; };
@ -56,13 +51,6 @@ struct obj_piece_id_t
uint64_t osd_num; uint64_t osd_num;
}; };
struct obj_ver_osd_t
{
uint64_t osd_num;
object_id oid;
uint64_t version;
};
struct flush_action_t struct flush_action_t
{ {
bool rollback = false, make_stable = false; bool rollback = false, make_stable = false;
@ -81,13 +69,9 @@ struct pg_flush_batch_t
struct pg_t struct pg_t
{ {
int state = 0; int state = 0;
uint64_t scheme = 0; uint64_t pg_cursize = 3, pg_size = 3, pg_minsize = 2;
uint64_t pg_cursize = 0, pg_size = 0, pg_minsize = 0, pg_data_size = 0; pg_num_t pg_num;
pool_id_t pool_id = 0;
pg_num_t pg_num = 0;
uint64_t clean_count = 0, total_count = 0; uint64_t clean_count = 0, total_count = 0;
// epoch number - should increase with each non-clean activation of the PG
uint64_t epoch = 0, reported_epoch = 0;
// target history and all potential peers // target history and all potential peers
std::vector<std::vector<osd_num_t>> target_history; std::vector<std::vector<osd_num_t>> target_history;
std::vector<osd_num_t> all_peers; std::vector<osd_num_t> all_peers;
@ -101,14 +85,13 @@ struct pg_t
std::vector<osd_num_t> cur_set; std::vector<osd_num_t> cur_set;
// same thing in state_dict-like format // same thing in state_dict-like format
pg_osd_set_t cur_loc_set; pg_osd_set_t cur_loc_set;
// moved object map. by default, each object is considered to reside on cur_set. // moved object map. by default, each object is considered to reside on the cur_set.
// this map stores all objects that differ. // this map stores all objects that differ.
// it may consume up to ~ (raw storage / object size) * 24 bytes in the worst case scenario // it may consume up to ~ (raw storage / object size) * 24 bytes in the worst case scenario
// which is up to ~192 MB per 1 TB in the worst case scenario // which is up to ~192 MB per 1 TB in the worst case scenario
std::map<pg_osd_set_t, pg_osd_set_state_t> state_dict; std::map<pg_osd_set_t, pg_osd_set_state_t> state_dict;
btree::btree_map<object_id, pg_osd_set_state_t*> incomplete_objects, misplaced_objects, degraded_objects; btree::btree_map<object_id, pg_osd_set_state_t*> incomplete_objects, misplaced_objects, degraded_objects;
std::map<obj_piece_id_t, flush_action_t> flush_actions; std::map<obj_piece_id_t, flush_action_t> flush_actions;
std::vector<obj_ver_osd_t> copies_to_delete_after_sync;
btree::btree_map<object_id, uint64_t> ver_override; btree::btree_map<object_id, uint64_t> ver_override;
pg_peering_state_t *peering_state = NULL; pg_peering_state_t *peering_state = NULL;
pg_flush_batch_t *flush_batch = NULL; pg_flush_batch_t *flush_batch = NULL;

View File

@ -1,9 +1,5 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
#define _LARGEFILE64_SOURCE #define _LARGEFILE64_SOURCE
#include "malloc_or_die.h"
#include "osd_peering_pg.h" #include "osd_peering_pg.h"
#define STRIPE_SHIFT 12 #define STRIPE_SHIFT 12
@ -32,7 +28,7 @@ int main(int argc, char *argv[])
for (uint64_t osd_num = 1; osd_num <= 3; osd_num++) for (uint64_t osd_num = 1; osd_num <= 3; osd_num++)
{ {
pg_list_result_t r = { pg_list_result_t r = {
.buf = (obj_ver_id*)malloc_or_die(sizeof(obj_ver_id) * 1024*1024*8), .buf = (obj_ver_id*)malloc(sizeof(obj_ver_id) * 1024*1024*8),
.total_count = 1024*1024*8, .total_count = 1024*1024*8,
.stable_count = (uint64_t)(1024*1024*8 - (osd_num == 1 ? 10 : 0)), .stable_count = (uint64_t)(1024*1024*8 - (osd_num == 1 ? 10 : 0)),
}; };

View File

@ -1,8 +1,4 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
#include "osd_primary.h" #include "osd_primary.h"
#include "allocator.h"
// read: read directly or read paired stripe(s), reconstruct, return // read: read directly or read paired stripe(s), reconstruct, return
// write: read paired stripe(s), reconstruct, modify, calculate parity, write // write: read paired stripe(s), reconstruct, modify, calculate parity, write
@ -17,56 +13,36 @@ bool osd_t::prepare_primary_rw(osd_op_t *cur_op)
{ {
// PG number is calculated from the offset // PG number is calculated from the offset
// Our EC scheme stores data in fixed chunks equal to (K*block size) // Our EC scheme stores data in fixed chunks equal to (K*block size)
// K = (pg_size-parity_chunks) in case of EC/XOR, or 1 for replicated pools // K = pg_minsize and will be a property of the inode. Not it's hardcoded (FIXME)
pool_id_t pool_id = INODE_POOL(cur_op->req.rw.inode); uint64_t pg_block_size = bs_block_size * 2;
// FIXME: We have to access pool config here, so make sure that it doesn't change while its PGs are active...
auto pool_cfg_it = st_cli.pool_config.find(pool_id);
if (pool_cfg_it == st_cli.pool_config.end())
{
// Pool config is not loaded yet
finish_op(cur_op, -EPIPE);
return false;
}
auto & pool_cfg = pool_cfg_it->second;
uint64_t pg_data_size = (pool_cfg.scheme == POOL_SCHEME_REPLICATED ? 1 : pool_cfg.pg_size-pool_cfg.parity_chunks);
uint64_t pg_block_size = bs_block_size * pg_data_size;
object_id oid = { object_id oid = {
.inode = cur_op->req.rw.inode, .inode = cur_op->req.rw.inode,
// oid.stripe = starting offset of the parity stripe // oid.stripe = starting offset of the parity stripe
.stripe = (cur_op->req.rw.offset/pg_block_size)*pg_block_size, .stripe = (cur_op->req.rw.offset/pg_block_size)*pg_block_size,
}; };
pg_num_t pg_num = (cur_op->req.rw.inode + oid.stripe/pool_cfg.pg_stripe_size) % pg_counts[pool_id] + 1; pg_num_t pg_num = (cur_op->req.rw.inode + oid.stripe/pg_stripe_size) % pg_count + 1;
auto pg_it = pgs.find({ .pool_id = pool_id, .pg_num = pg_num }); auto pg_it = pgs.find(pg_num);
if (pg_it == pgs.end() || !(pg_it->second.state & PG_ACTIVE)) if (pg_it == pgs.end() || !(pg_it->second.state & PG_ACTIVE))
{ {
// This OSD is not primary for this PG or the PG is inactive // This OSD is not primary for this PG or the PG is inactive
// FIXME: Allow reads from PGs degraded under pg_minsize, but don't allow writes
finish_op(cur_op, -EPIPE); finish_op(cur_op, -EPIPE);
return false; return false;
} }
if ((cur_op->req.rw.offset + cur_op->req.rw.len) > (oid.stripe + pg_block_size) || if ((cur_op->req.rw.offset + cur_op->req.rw.len) > (oid.stripe + pg_block_size) ||
(cur_op->req.rw.offset % bs_bitmap_granularity) != 0 || (cur_op->req.rw.offset % bs_disk_alignment) != 0 ||
(cur_op->req.rw.len % bs_bitmap_granularity) != 0) (cur_op->req.rw.len % bs_disk_alignment) != 0)
{ {
finish_op(cur_op, -EINVAL); finish_op(cur_op, -EINVAL);
return false; return false;
} }
int stripe_count = (pool_cfg.scheme == POOL_SCHEME_REPLICATED ? 1 : pg_it->second.pg_size); osd_primary_op_data_t *op_data = (osd_primary_op_data_t*)calloc(
osd_primary_op_data_t *op_data = (osd_primary_op_data_t*)calloc_or_die( sizeof(osd_primary_op_data_t) + sizeof(osd_rmw_stripe_t) * pg_it->second.pg_size, 1
1, sizeof(osd_primary_op_data_t) + (clean_entry_bitmap_size + sizeof(osd_rmw_stripe_t)) * stripe_count
); );
op_data->pg_num = pg_num; op_data->pg_num = pg_num;
op_data->oid = oid; op_data->oid = oid;
op_data->stripes = ((osd_rmw_stripe_t*)(op_data+1)); op_data->stripes = ((osd_rmw_stripe_t*)(op_data+1));
op_data->scheme = pool_cfg.scheme;
op_data->pg_data_size = pg_data_size;
cur_op->op_data = op_data; cur_op->op_data = op_data;
split_stripes(pg_data_size, bs_block_size, (uint32_t)(cur_op->req.rw.offset - oid.stripe), cur_op->req.rw.len, op_data->stripes); split_stripes(pg_it->second.pg_minsize, bs_block_size, (uint32_t)(cur_op->req.rw.offset - oid.stripe), cur_op->req.rw.len, op_data->stripes);
// Allocate bitmaps along with stripes to avoid extra allocations and fragmentation
for (int i = 0; i < stripe_count; i++)
{
op_data->stripes[i].bmp_buf = (void*)(op_data->stripes+stripe_count) + clean_entry_bitmap_size*i;
}
pg_it->second.inflight++; pg_it->second.inflight++;
return true; return true;
} }
@ -106,13 +82,12 @@ void osd_t::continue_primary_read(osd_op_t *cur_op)
{ {
return; return;
} }
cur_op->reply.rw.bitmap_len = 0;
osd_primary_op_data_t *op_data = cur_op->op_data; osd_primary_op_data_t *op_data = cur_op->op_data;
if (op_data->st == 1) goto resume_1; if (op_data->st == 1) goto resume_1;
else if (op_data->st == 2) goto resume_2; else if (op_data->st == 2) goto resume_2;
{ {
auto & pg = pgs.at({ .pool_id = INODE_POOL(op_data->oid.inode), .pg_num = op_data->pg_num }); auto & pg = pgs[op_data->pg_num];
for (int role = 0; role < op_data->pg_data_size; role++) for (int role = 0; role < pg.pg_minsize; role++)
{ {
op_data->stripes[role].read_start = op_data->stripes[role].req_start; op_data->stripes[role].read_start = op_data->stripes[role].req_start;
op_data->stripes[role].read_end = op_data->stripes[role].req_end; op_data->stripes[role].read_end = op_data->stripes[role].req_end;
@ -120,29 +95,29 @@ void osd_t::continue_primary_read(osd_op_t *cur_op)
// Determine version // Determine version
auto vo_it = pg.ver_override.find(op_data->oid); auto vo_it = pg.ver_override.find(op_data->oid);
op_data->target_ver = vo_it != pg.ver_override.end() ? vo_it->second : UINT64_MAX; op_data->target_ver = vo_it != pg.ver_override.end() ? vo_it->second : UINT64_MAX;
if (pg.state == PG_ACTIVE || op_data->scheme == POOL_SCHEME_REPLICATED) if (pg.state == PG_ACTIVE)
{ {
// Fast happy-path // Fast happy-path
cur_op->buf = alloc_read_buffer(op_data->stripes, op_data->pg_data_size, 0); cur_op->buf = alloc_read_buffer(op_data->stripes, pg.pg_minsize, 0);
submit_primary_subops(SUBMIT_READ, op_data->target_ver, submit_primary_subops(SUBMIT_READ, pg.pg_minsize, pg.cur_set.data(), cur_op);
(op_data->scheme == POOL_SCHEME_REPLICATED ? pg.pg_size : op_data->pg_data_size), pg.cur_set.data(), cur_op); cur_op->send_list.push_back(cur_op->buf, cur_op->req.rw.len);
op_data->st = 1; op_data->st = 1;
} }
else else
{ {
// PG may be degraded or have misplaced objects // PG may be degraded or have misplaced objects
uint64_t* cur_set = get_object_osd_set(pg, op_data->oid, pg.cur_set.data(), &op_data->object_state); uint64_t* cur_set = get_object_osd_set(pg, op_data->oid, pg.cur_set.data(), &op_data->object_state);
if (extend_missing_stripes(op_data->stripes, cur_set, op_data->pg_data_size, pg.pg_size) < 0) if (extend_missing_stripes(op_data->stripes, cur_set, pg.pg_minsize, pg.pg_size) < 0)
{ {
finish_op(cur_op, -EIO); finish_op(cur_op, -EIO);
return; return;
} }
// Submit reads // Submit reads
op_data->pg_minsize = pg.pg_minsize;
op_data->pg_size = pg.pg_size; op_data->pg_size = pg.pg_size;
op_data->scheme = pg.scheme;
op_data->degraded = 1; op_data->degraded = 1;
cur_op->buf = alloc_read_buffer(op_data->stripes, pg.pg_size, 0); cur_op->buf = alloc_read_buffer(op_data->stripes, pg.pg_size, 0);
submit_primary_subops(SUBMIT_READ, op_data->target_ver, pg.pg_size, cur_set, cur_op); submit_primary_subops(SUBMIT_READ, pg.pg_size, cur_set, cur_op);
op_data->st = 1; op_data->st = 1;
} }
} }
@ -154,37 +129,27 @@ resume_2:
finish_op(cur_op, op_data->epipe > 0 ? -EPIPE : -EIO); finish_op(cur_op, op_data->epipe > 0 ? -EPIPE : -EIO);
return; return;
} }
cur_op->reply.rw.bitmap_len = op_data->pg_data_size * clean_entry_bitmap_size;
if (op_data->degraded) if (op_data->degraded)
{ {
// Reconstruct missing stripes // Reconstruct missing stripes
// FIXME: Always EC(k+1) by now. Add different coding schemes
osd_rmw_stripe_t *stripes = op_data->stripes; osd_rmw_stripe_t *stripes = op_data->stripes;
if (op_data->scheme == POOL_SCHEME_XOR) for (int role = 0; role < op_data->pg_minsize; role++)
{ {
reconstruct_stripes_xor(stripes, op_data->pg_size, clean_entry_bitmap_size); if (stripes[role].read_end != 0 && stripes[role].missing)
{
reconstruct_stripe(stripes, op_data->pg_size, role);
} }
else if (op_data->scheme == POOL_SCHEME_JERASURE)
{
reconstruct_stripes_jerasure(stripes, op_data->pg_size, op_data->pg_data_size, clean_entry_bitmap_size);
}
cur_op->iov.push_back(op_data->stripes[0].bmp_buf, cur_op->reply.rw.bitmap_len);
for (int role = 0; role < op_data->pg_size; role++)
{
if (stripes[role].req_end != 0) if (stripes[role].req_end != 0)
{ {
// Send buffer in parts to avoid copying // Send buffer in parts to avoid copying
cur_op->iov.push_back( cur_op->send_list.push_back(
stripes[role].read_buf + (stripes[role].req_start - stripes[role].read_start), stripes[role].read_buf + (stripes[role].req_start - stripes[role].read_start),
stripes[role].req_end - stripes[role].req_start stripes[role].req_end - stripes[role].req_start
); );
} }
} }
} }
else
{
cur_op->iov.push_back(op_data->stripes[0].bmp_buf, cur_op->reply.rw.bitmap_len);
cur_op->iov.push_back(cur_op->buf, cur_op->req.rw.len);
}
finish_op(cur_op, cur_op->req.rw.len); finish_op(cur_op, cur_op->req.rw.len);
} }
@ -222,7 +187,7 @@ void osd_t::continue_primary_write(osd_op_t *cur_op)
return; return;
} }
osd_primary_op_data_t *op_data = cur_op->op_data; osd_primary_op_data_t *op_data = cur_op->op_data;
auto & pg = pgs.at({ .pool_id = INODE_POOL(op_data->oid.inode), .pg_num = op_data->pg_num }); auto & pg = pgs[op_data->pg_num];
if (op_data->st == 1) goto resume_1; if (op_data->st == 1) goto resume_1;
else if (op_data->st == 2) goto resume_2; else if (op_data->st == 2) goto resume_2;
else if (op_data->st == 3) goto resume_3; else if (op_data->st == 3) goto resume_3;
@ -232,7 +197,6 @@ void osd_t::continue_primary_write(osd_op_t *cur_op)
else if (op_data->st == 7) goto resume_7; else if (op_data->st == 7) goto resume_7;
else if (op_data->st == 8) goto resume_8; else if (op_data->st == 8) goto resume_8;
else if (op_data->st == 9) goto resume_9; else if (op_data->st == 9) goto resume_9;
else if (op_data->st == 10) goto resume_10;
assert(op_data->st == 0); assert(op_data->st == 0);
if (!check_write_queue(cur_op, pg)) if (!check_write_queue(cur_op, pg))
{ {
@ -241,37 +205,12 @@ void osd_t::continue_primary_write(osd_op_t *cur_op)
resume_1: resume_1:
// Determine blocks to read and write // Determine blocks to read and write
// Missing chunks are allowed to be overwritten even in incomplete objects // Missing chunks are allowed to be overwritten even in incomplete objects
// FIXME: Allow to do small writes to the old (degraded/misplaced) OSD set for lower performance impact // FIXME: Allow to do small writes to the old (degraded/misplaced) OSD set for the lower performance impact
op_data->prev_set = get_object_osd_set(pg, op_data->oid, pg.cur_set.data(), &op_data->object_state); op_data->prev_set = get_object_osd_set(pg, op_data->oid, pg.cur_set.data(), &op_data->object_state);
if (op_data->scheme == POOL_SCHEME_REPLICATED)
{
// Simplified algorithm
op_data->stripes[0].write_start = op_data->stripes[0].req_start;
op_data->stripes[0].write_end = op_data->stripes[0].req_end;
op_data->stripes[0].write_buf = cur_op->buf;
op_data->stripes[0].bmp_buf = (void*)(op_data->stripes+1);
if (pg.cur_set.data() != op_data->prev_set && (op_data->stripes[0].write_start != 0 ||
op_data->stripes[0].write_end != bs_block_size))
{
// Object is degraded/misplaced and will be moved to <write_osd_set>
op_data->stripes[0].read_start = 0;
op_data->stripes[0].read_end = bs_block_size;
cur_op->rmw_buf = op_data->stripes[0].read_buf = memalign_or_die(MEM_ALIGNMENT, bs_block_size);
}
}
else
{
cur_op->rmw_buf = calc_rmw(cur_op->buf, op_data->stripes, op_data->prev_set, cur_op->rmw_buf = calc_rmw(cur_op->buf, op_data->stripes, op_data->prev_set,
pg.pg_size, op_data->pg_data_size, pg.pg_cursize, pg.cur_set.data(), bs_block_size, clean_entry_bitmap_size); pg.pg_size, pg.pg_minsize, pg.pg_cursize, pg.cur_set.data(), bs_block_size);
if (!cur_op->rmw_buf)
{
// Refuse partial overwrite of an incomplete object
cur_op->reply.hdr.retval = -EINVAL;
goto continue_others;
}
}
// Read required blocks // Read required blocks
submit_primary_subops(SUBMIT_RMW_READ, UINT64_MAX, pg.pg_size, op_data->prev_set, cur_op); submit_primary_subops(SUBMIT_RMW_READ, pg.pg_size, op_data->prev_set, cur_op);
resume_2: resume_2:
op_data->st = 2; op_data->st = 2;
return; return;
@ -283,65 +222,10 @@ resume_3:
} }
// Save version override for parallel reads // Save version override for parallel reads
pg.ver_override[op_data->oid] = op_data->fact_ver; pg.ver_override[op_data->oid] = op_data->fact_ver;
if (op_data->scheme == POOL_SCHEME_REPLICATED)
{
// Set bitmap bits
bitmap_set(op_data->stripes[0].bmp_buf, op_data->stripes[0].write_start, op_data->stripes[0].write_end, bs_bitmap_granularity);
// Possibly copy new data from the request into the recovery buffer
if (pg.cur_set.data() != op_data->prev_set && (op_data->stripes[0].write_start != 0 ||
op_data->stripes[0].write_end != bs_block_size))
{
memcpy(
op_data->stripes[0].read_buf + op_data->stripes[0].req_start,
op_data->stripes[0].write_buf,
op_data->stripes[0].req_end - op_data->stripes[0].req_start
);
op_data->stripes[0].write_buf = op_data->stripes[0].read_buf;
op_data->stripes[0].write_start = 0;
op_data->stripes[0].write_end = bs_block_size;
}
}
else
{
// Recover missing stripes, calculate parity // Recover missing stripes, calculate parity
if (pg.scheme == POOL_SCHEME_XOR) calc_rmw_parity(op_data->stripes, pg.pg_size, op_data->prev_set, pg.cur_set.data(), bs_block_size);
{
calc_rmw_parity_xor(op_data->stripes, pg.pg_size, op_data->prev_set, pg.cur_set.data(), bs_block_size, clean_entry_bitmap_size);
}
else if (pg.scheme == POOL_SCHEME_JERASURE)
{
calc_rmw_parity_jerasure(op_data->stripes, pg.pg_size, op_data->pg_data_size, op_data->prev_set, pg.cur_set.data(), bs_block_size, clean_entry_bitmap_size);
}
}
// Send writes // Send writes
if ((op_data->fact_ver >> (64-PG_EPOCH_BITS)) < pg.epoch) submit_primary_subops(SUBMIT_WRITE, pg.pg_size, pg.cur_set.data(), cur_op);
{
op_data->target_ver = ((uint64_t)pg.epoch << (64-PG_EPOCH_BITS)) | 1;
}
else
{
if ((op_data->fact_ver & (1ul<<(64-PG_EPOCH_BITS) - 1)) == (1ul<<(64-PG_EPOCH_BITS) - 1))
{
assert(pg.epoch != ((1ul << PG_EPOCH_BITS)-1));
pg.epoch++;
}
op_data->target_ver = op_data->fact_ver + 1;
}
if (pg.epoch > pg.reported_epoch)
{
// Report newer epoch before writing
// FIXME: We may report only one PG state here...
this->pg_state_dirty.insert({ .pool_id = pg.pool_id, .pg_num = pg.pg_num });
pg.history_changed = true;
report_pg_states();
resume_10:
if (pg.epoch > pg.reported_epoch)
{
op_data->st = 10;
return;
}
}
submit_primary_subops(SUBMIT_WRITE, op_data->target_ver, pg.pg_size, pg.cur_set.data(), cur_op);
resume_4: resume_4:
op_data->st = 4; op_data->st = 4;
return; return;
@ -351,13 +235,6 @@ resume_5:
pg_cancel_write_queue(pg, cur_op, op_data->oid, op_data->epipe > 0 ? -EPIPE : -EIO); pg_cancel_write_queue(pg, cur_op, op_data->oid, op_data->epipe > 0 ? -EPIPE : -EIO);
return; return;
} }
resume_6:
resume_7:
if (!remember_unstable_write(cur_op, pg, pg.cur_loc_set, 6))
{
// FIXME: Check for immediate_commit == IMMEDIATE_SMALL
return;
}
if (op_data->fact_ver == 1) if (op_data->fact_ver == 1)
{ {
// Object is created // Object is created
@ -374,40 +251,15 @@ resume_7:
recovery_stat_count[0][recovery_type]++; recovery_stat_count[0][recovery_type]++;
recovery_stat_bytes[0][recovery_type] = 0; recovery_stat_bytes[0][recovery_type] = 0;
} }
for (int role = 0; role < (op_data->scheme == POOL_SCHEME_REPLICATED ? 1 : pg.pg_size); role++) for (int role = 0; role < pg.pg_size; role++)
{ {
recovery_stat_bytes[0][recovery_type] += op_data->stripes[role].write_end - op_data->stripes[role].write_start; recovery_stat_bytes[0][recovery_type] += op_data->stripes[role].write_end - op_data->stripes[role].write_start;
} }
} }
// Any kind of a non-clean object can have extra chunks, because we don't record objects if (op_data->object_state->state & OBJ_MISPLACED)
// as degraded & misplaced or incomplete & misplaced at the same time. So try to remove extra chunks
if (immediate_commit != IMMEDIATE_ALL)
{ {
// We can't remove extra chunks yet if fsyncs are explicit, because // Remove extra chunks
// new copies may not be committed to stable storage yet submit_primary_del_subops(cur_op, pg.cur_set.data(), op_data->object_state->osd_set);
// We can only remove extra chunks after a successful SYNC for this PG
for (auto & chunk: op_data->object_state->osd_set)
{
// Check is the same as in submit_primary_del_subops()
if (op_data->scheme == POOL_SCHEME_REPLICATED
? !contains_osd(pg.cur_set.data(), pg.pg_size, chunk.osd_num)
: (chunk.osd_num != pg.cur_set[chunk.role]))
{
pg.copies_to_delete_after_sync.push_back((obj_ver_osd_t){
.osd_num = chunk.osd_num,
.oid = {
.inode = op_data->oid.inode,
.stripe = op_data->oid.stripe | (op_data->scheme == POOL_SCHEME_REPLICATED ? 0 : chunk.role),
},
.version = op_data->fact_ver,
});
copies_to_delete_after_sync_count++;
}
}
}
else
{
submit_primary_del_subops(cur_op, pg.cur_set.data(), pg.pg_size, op_data->object_state->osd_set);
if (op_data->n_subops > 0) if (op_data->n_subops > 0)
{ {
resume_8: resume_8:
@ -425,25 +277,30 @@ resume_9:
remove_object_from_state(op_data->oid, op_data->object_state, pg); remove_object_from_state(op_data->oid, op_data->object_state, pg);
pg.clean_count++; pg.clean_count++;
} }
cur_op->reply.hdr.retval = cur_op->req.rw.len;
continue_others:
// Remove version override // Remove version override
pg.ver_override.erase(op_data->oid); pg.ver_override.erase(op_data->oid);
object_id oid = op_data->oid; // FIXME: Check for immediate_commit == IMMEDIATE_SMALL
// Remove the operation from queue before calling finish_op so it doesn't see the completed operation in queue resume_6:
auto next_it = pg.write_queue.find(oid); resume_7:
if (next_it != pg.write_queue.end() && next_it->second == cur_op) if (!remember_unstable_write(cur_op, pg, pg.cur_loc_set, 6))
{ {
pg.write_queue.erase(next_it++); return;
} }
// finish_op would invalidate next_it if it cleared pg.write_queue, but it doesn't do that :) object_id oid = op_data->oid;
finish_op(cur_op, cur_op->reply.hdr.retval); finish_op(cur_op, cur_op->req.rw.len);
// Continue other write operations to the same object // Continue other write operations to the same object
auto next_it = pg.write_queue.find(oid);
auto this_it = next_it;
if (this_it != pg.write_queue.end() && this_it->second == cur_op)
{
next_it++;
pg.write_queue.erase(this_it);
if (next_it != pg.write_queue.end() && next_it->first == oid) if (next_it != pg.write_queue.end() && next_it->first == oid)
{ {
osd_op_t *next_op = next_it->second; osd_op_t *next_op = next_it->second;
continue_primary_write(next_op); continue_primary_write(next_op);
} }
}
} }
bool osd_t::remember_unstable_write(osd_op_t *cur_op, pg_t & pg, pg_osd_set_t & loc_set, int base_state) bool osd_t::remember_unstable_write(osd_op_t *cur_op, pg_t & pg, pg_osd_set_t & loc_set, int base_state)
@ -457,12 +314,8 @@ bool osd_t::remember_unstable_write(osd_op_t *cur_op, pg_t & pg, pg_osd_set_t &
{ {
goto resume_7; goto resume_7;
} }
// FIXME: Check for immediate_commit == IMMEDIATE_SMALL
if (immediate_commit == IMMEDIATE_ALL) if (immediate_commit == IMMEDIATE_ALL)
{ {
if (op_data->scheme != POOL_SCHEME_REPLICATED)
{
// Send STABILIZE ops immediately
op_data->unstable_write_osds = new std::vector<unstable_osd_num_t>(); op_data->unstable_write_osds = new std::vector<unstable_osd_num_t>();
op_data->unstable_writes = new obj_ver_id[loc_set.size()]; op_data->unstable_writes = new obj_ver_id[loc_set.size()];
{ {
@ -500,15 +353,11 @@ resume_7:
return false; return false;
} }
} }
}
else else
{ {
if (op_data->scheme != POOL_SCHEME_REPLICATED) // Remember version as unstable
{
// Remember version as unstable for EC/XOR
for (auto & chunk: loc_set) for (auto & chunk: loc_set)
{ {
this->dirty_osds.insert(chunk.osd_num);
this->unstable_writes[(osd_object_id_t){ this->unstable_writes[(osd_object_id_t){
.osd_num = chunk.osd_num, .osd_num = chunk.osd_num,
.oid = { .oid = {
@ -517,23 +366,10 @@ resume_7:
}, },
}] = op_data->fact_ver; }] = op_data->fact_ver;
} }
}
else
{
// Only remember to sync OSDs for replicated pools
for (auto & chunk: loc_set)
{
this->dirty_osds.insert(chunk.osd_num);
}
}
// Remember PG as dirty to drop the connection when PG goes offline // Remember PG as dirty to drop the connection when PG goes offline
// (this is required because of the "lazy sync") // (this is required because of the "lazy sync")
auto cl_it = c_cli.clients.find(cur_op->peer_fd); c_cli.clients[cur_op->peer_fd].dirty_pgs.insert(op_data->pg_num);
if (cl_it != c_cli.clients.end()) dirty_pgs.insert(op_data->pg_num);
{
cl_it->second->dirty_pgs.insert({ .pool_id = pg.pool_id, .pg_num = pg.pg_num });
}
dirty_pgs.insert({ .pool_id = pg.pool_id, .pg_num = pg.pg_num });
} }
return true; return true;
} }
@ -543,7 +379,7 @@ void osd_t::continue_primary_sync(osd_op_t *cur_op)
{ {
if (!cur_op->op_data) if (!cur_op->op_data)
{ {
cur_op->op_data = (osd_primary_op_data_t*)calloc_or_die(1, sizeof(osd_primary_op_data_t)); cur_op->op_data = (osd_primary_op_data_t*)calloc(sizeof(osd_primary_op_data_t), 1);
} }
osd_primary_op_data_t *op_data = cur_op->op_data; osd_primary_op_data_t *op_data = cur_op->op_data;
if (op_data->st == 1) goto resume_1; if (op_data->st == 1) goto resume_1;
@ -552,8 +388,6 @@ void osd_t::continue_primary_sync(osd_op_t *cur_op)
else if (op_data->st == 4) goto resume_4; else if (op_data->st == 4) goto resume_4;
else if (op_data->st == 5) goto resume_5; else if (op_data->st == 5) goto resume_5;
else if (op_data->st == 6) goto resume_6; else if (op_data->st == 6) goto resume_6;
else if (op_data->st == 7) goto resume_7;
else if (op_data->st == 8) goto resume_8;
assert(op_data->st == 0); assert(op_data->st == 0);
if (syncs_in_progress.size() > 0) if (syncs_in_progress.size() > 0)
{ {
@ -569,7 +403,7 @@ resume_1:
syncs_in_progress.push_back(cur_op); syncs_in_progress.push_back(cur_op);
} }
resume_2: resume_2:
if (dirty_osds.size() == 0) if (unstable_writes.size() == 0)
{ {
// Nothing to sync // Nothing to sync
goto finish; goto finish;
@ -577,10 +411,11 @@ resume_2:
// Save and clear unstable_writes // Save and clear unstable_writes
// In theory it is possible to do in on a per-client basis, but this seems to be an unnecessary complication // In theory it is possible to do in on a per-client basis, but this seems to be an unnecessary complication
// It would be cool not to copy these here at all, but someone has to deduplicate them by object IDs anyway // It would be cool not to copy these here at all, but someone has to deduplicate them by object IDs anyway
if (unstable_writes.size() > 0)
{ {
op_data->unstable_write_osds = new std::vector<unstable_osd_num_t>(); op_data->unstable_write_osds = new std::vector<unstable_osd_num_t>();
op_data->unstable_writes = new obj_ver_id[this->unstable_writes.size()]; op_data->unstable_writes = new obj_ver_id[this->unstable_writes.size()];
op_data->dirty_pgs = new pg_num_t[dirty_pgs.size()];
op_data->dirty_pg_count = dirty_pgs.size();
osd_num_t last_osd = 0; osd_num_t last_osd = 0;
int last_start = 0, last_end = 0; int last_start = 0, last_end = 0;
for (auto it = this->unstable_writes.begin(); it != this->unstable_writes.end(); it++) for (auto it = this->unstable_writes.begin(); it != this->unstable_writes.end(); it++)
@ -612,50 +447,14 @@ resume_2:
.len = last_end - last_start, .len = last_end - last_start,
}); });
} }
this->unstable_writes.clear();
}
{
void *dirty_buf = malloc_or_die(
sizeof(pool_pg_num_t)*dirty_pgs.size() +
sizeof(osd_num_t)*dirty_osds.size() +
sizeof(obj_ver_osd_t)*this->copies_to_delete_after_sync_count
);
op_data->dirty_pgs = (pool_pg_num_t*)dirty_buf;
op_data->dirty_osds = (osd_num_t*)(dirty_buf + sizeof(pool_pg_num_t)*dirty_pgs.size());
op_data->dirty_pg_count = dirty_pgs.size();
op_data->dirty_osd_count = dirty_osds.size();
if (this->copies_to_delete_after_sync_count)
{
op_data->copies_to_delete_count = 0;
op_data->copies_to_delete = (obj_ver_osd_t*)(op_data->dirty_osds + op_data->dirty_osd_count);
for (auto dirty_pg_num: dirty_pgs)
{
auto & pg = pgs.at(dirty_pg_num);
assert(pg.copies_to_delete_after_sync.size() <= this->copies_to_delete_after_sync_count);
memcpy(
op_data->copies_to_delete + op_data->copies_to_delete_count,
pg.copies_to_delete_after_sync.data(),
sizeof(obj_ver_osd_t)*pg.copies_to_delete_after_sync.size()
);
op_data->copies_to_delete_count += pg.copies_to_delete_after_sync.size();
this->copies_to_delete_after_sync_count -= pg.copies_to_delete_after_sync.size();
pg.copies_to_delete_after_sync.clear();
}
assert(this->copies_to_delete_after_sync_count == 0);
}
int dpg = 0; int dpg = 0;
for (auto dirty_pg_num: dirty_pgs) for (auto dirty_pg_num: dirty_pgs)
{ {
pgs.at(dirty_pg_num).inflight++; pgs[dirty_pg_num].inflight++;
op_data->dirty_pgs[dpg++] = dirty_pg_num; op_data->dirty_pgs[dpg++] = dirty_pg_num;
} }
dirty_pgs.clear(); dirty_pgs.clear();
dpg = 0; this->unstable_writes.clear();
for (auto osd_num: dirty_osds)
{
op_data->dirty_osds[dpg++] = osd_num;
}
dirty_osds.clear();
} }
if (immediate_commit != IMMEDIATE_ALL) if (immediate_commit != IMMEDIATE_ALL)
{ {
@ -670,27 +469,13 @@ resume_4:
goto resume_6; goto resume_6;
} }
} }
if (op_data->unstable_writes) // Stabilize version sets
{
// Stabilize version sets, if any
submit_primary_stab_subops(cur_op); submit_primary_stab_subops(cur_op);
resume_5: resume_5:
op_data->st = 5; op_data->st = 5;
return; return;
}
resume_6: resume_6:
if (op_data->errors > 0) if (op_data->errors > 0)
{
// Return PGs and OSDs back into their dirty sets
for (int i = 0; i < op_data->dirty_pg_count; i++)
{
dirty_pgs.insert(op_data->dirty_pgs[i]);
}
for (int i = 0; i < op_data->dirty_osd_count; i++)
{
dirty_osds.insert(op_data->dirty_osds[i]);
}
if (op_data->unstable_writes)
{ {
// Return objects back into the unstable write set // Return objects back into the unstable write set
for (auto unstable_osd: *(op_data->unstable_write_osds)) for (auto unstable_osd: *(op_data->unstable_write_osds))
@ -699,11 +484,8 @@ resume_6:
{ {
// Except those from peered PGs // Except those from peered PGs
auto & w = op_data->unstable_writes[i]; auto & w = op_data->unstable_writes[i];
pool_pg_num_t wpg = { pg_num_t wpg = map_to_pg(w.oid);
.pool_id = INODE_POOL(w.oid.inode), if (pgs[wpg].state & PG_ACTIVE)
.pg_num = map_to_pg(w.oid, st_cli.pool_config.at(INODE_POOL(w.oid.inode)).pg_stripe_size),
};
if (pgs.at(wpg).state & PG_ACTIVE)
{ {
uint64_t & dest = this->unstable_writes[(osd_object_id_t){ uint64_t & dest = this->unstable_writes[(osd_object_id_t){
.osd_num = unstable_osd.osd_num, .osd_num = unstable_osd.osd_num,
@ -715,59 +497,21 @@ resume_6:
} }
} }
} }
if (op_data->copies_to_delete)
{
// Return 'copies to delete' back into respective PGs
for (int i = 0; i < op_data->copies_to_delete_count; i++)
{
auto & w = op_data->copies_to_delete[i];
auto & pg = pgs.at((pool_pg_num_t){
.pool_id = INODE_POOL(w.oid.inode),
.pg_num = map_to_pg(w.oid, st_cli.pool_config.at(INODE_POOL(w.oid.inode)).pg_stripe_size),
});
if (pg.state & PG_ACTIVE)
{
pg.copies_to_delete_after_sync.push_back(w);
copies_to_delete_after_sync_count++;
}
}
}
}
else if (op_data->copies_to_delete)
{
// Actually delete copies which we wanted to delete
submit_primary_del_batch(cur_op, op_data->copies_to_delete, op_data->copies_to_delete_count);
resume_7:
op_data->st = 7;
return;
resume_8:
if (op_data->errors > 0)
{
goto resume_6;
}
}
for (int i = 0; i < op_data->dirty_pg_count; i++) for (int i = 0; i < op_data->dirty_pg_count; i++)
{ {
auto & pg = pgs.at(op_data->dirty_pgs[i]); auto & pg = pgs.at(op_data->dirty_pgs[i]);
pg.inflight--; pg.inflight--;
if ((pg.state & PG_STOPPING) && pg.inflight == 0 && !pg.flush_batch && if ((pg.state & PG_STOPPING) && pg.inflight == 0 && !pg.flush_batch)
// We must either forget all PG's unstable writes or wait for it to become clean
dirty_pgs.find({ .pool_id = pg.pool_id, .pg_num = pg.pg_num }) == dirty_pgs.end())
{ {
finish_stop_pg(pg); finish_stop_pg(pg);
} }
} }
// FIXME: Free those in the destructor? // FIXME: Free those in the destructor?
free(op_data->dirty_pgs); delete op_data->dirty_pgs;
op_data->dirty_pgs = NULL;
op_data->dirty_osds = NULL;
if (op_data->unstable_writes)
{
delete op_data->unstable_write_osds; delete op_data->unstable_write_osds;
delete[] op_data->unstable_writes; delete[] op_data->unstable_writes;
op_data->unstable_writes = NULL; op_data->unstable_writes = NULL;
op_data->unstable_write_osds = NULL; op_data->unstable_write_osds = NULL;
}
if (op_data->errors > 0) if (op_data->errors > 0)
{ {
finish_op(cur_op, op_data->epipe > 0 ? -EPIPE : -EIO); finish_op(cur_op, op_data->epipe > 0 ? -EPIPE : -EIO);
@ -779,7 +523,7 @@ finish:
{ {
auto it = c_cli.clients.find(cur_op->peer_fd); auto it = c_cli.clients.find(cur_op->peer_fd);
if (it != c_cli.clients.end()) if (it != c_cli.clients.end())
it->second->dirty_pgs.clear(); it->second.dirty_pgs.clear();
} }
finish_op(cur_op, 0); finish_op(cur_op, 0);
} }
@ -846,7 +590,7 @@ void osd_t::continue_primary_del(osd_op_t *cur_op)
return; return;
} }
osd_primary_op_data_t *op_data = cur_op->op_data; osd_primary_op_data_t *op_data = cur_op->op_data;
auto & pg = pgs.at({ .pool_id = INODE_POOL(op_data->oid.inode), .pg_num = op_data->pg_num }); auto & pg = pgs[op_data->pg_num];
if (op_data->st == 1) goto resume_1; if (op_data->st == 1) goto resume_1;
else if (op_data->st == 2) goto resume_2; else if (op_data->st == 2) goto resume_2;
else if (op_data->st == 3) goto resume_3; else if (op_data->st == 3) goto resume_3;
@ -867,7 +611,7 @@ resume_1:
// Determine which OSDs contain this object and delete it // Determine which OSDs contain this object and delete it
op_data->prev_set = get_object_osd_set(pg, op_data->oid, pg.cur_set.data(), &op_data->object_state); op_data->prev_set = get_object_osd_set(pg, op_data->oid, pg.cur_set.data(), &op_data->object_state);
// Submit 1 read to determine the actual version number // Submit 1 read to determine the actual version number
submit_primary_subops(SUBMIT_RMW_READ, UINT64_MAX, pg.pg_size, op_data->prev_set, cur_op); submit_primary_subops(SUBMIT_RMW_READ, pg.pg_size, op_data->prev_set, cur_op);
resume_2: resume_2:
op_data->st = 2; op_data->st = 2;
return; return;
@ -881,7 +625,7 @@ resume_3:
pg.ver_override[op_data->oid] = op_data->fact_ver; pg.ver_override[op_data->oid] = op_data->fact_ver;
// Submit deletes // Submit deletes
op_data->fact_ver++; op_data->fact_ver++;
submit_primary_del_subops(cur_op, NULL, 0, op_data->object_state ? op_data->object_state->osd_set : pg.cur_loc_set); submit_primary_del_subops(cur_op, NULL, op_data->object_state ? op_data->object_state->osd_set : pg.cur_loc_set);
resume_4: resume_4:
op_data->st = 4; op_data->st = 4;
return; return;

View File

@ -1,6 +1,3 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
#pragma once #pragma once
#include "osd.h" #include "osd.h"
@ -23,9 +20,8 @@ struct osd_primary_op_data_t
object_id oid; object_id oid;
uint64_t target_ver; uint64_t target_ver;
uint64_t fact_ver = 0; uint64_t fact_ver = 0;
uint64_t scheme = 0;
int n_subops = 0, done = 0, errors = 0, epipe = 0; int n_subops = 0, done = 0, errors = 0, epipe = 0;
int degraded = 0, pg_size, pg_data_size; int degraded = 0, pg_size, pg_minsize;
osd_rmw_stripe_t *stripes; osd_rmw_stripe_t *stripes;
osd_op_t *subops = NULL; osd_op_t *subops = NULL;
uint64_t *prev_set = NULL; uint64_t *prev_set = NULL;
@ -33,13 +29,7 @@ struct osd_primary_op_data_t
// for sync. oops, requires freeing // for sync. oops, requires freeing
std::vector<unstable_osd_num_t> *unstable_write_osds = NULL; std::vector<unstable_osd_num_t> *unstable_write_osds = NULL;
pool_pg_num_t *dirty_pgs = NULL; pg_num_t *dirty_pgs = NULL;
int dirty_pg_count = 0; int dirty_pg_count = 0;
osd_num_t *dirty_osds = NULL;
int dirty_osd_count = 0;
obj_ver_id *unstable_writes = NULL; obj_ver_id *unstable_writes = NULL;
obj_ver_osd_t *copies_to_delete = NULL;
int copies_to_delete_count = 0;
}; };
bool contains_osd(osd_num_t *osd_set, uint64_t size, osd_num_t osd_num);

View File

@ -1,6 +1,3 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
#include "osd_primary.h" #include "osd_primary.h"
void osd_t::autosync() void osd_t::autosync()
@ -11,7 +8,7 @@ void osd_t::autosync()
{ {
autosync_op = new osd_op_t(); autosync_op = new osd_op_t();
autosync_op->op_type = OSD_OP_IN; autosync_op->op_type = OSD_OP_IN;
autosync_op->req = (osd_any_op_t){ autosync_op->req = {
.sync = { .sync = {
.header = { .header = {
.magic = SECONDARY_OSD_OP_MAGIC, .magic = SECONDARY_OSD_OP_MAGIC,
@ -36,39 +33,14 @@ void osd_t::autosync()
void osd_t::finish_op(osd_op_t *cur_op, int retval) void osd_t::finish_op(osd_op_t *cur_op, int retval)
{ {
inflight_ops--; inflight_ops--;
if (cur_op->req.hdr.opcode == OSD_OP_READ ||
cur_op->req.hdr.opcode == OSD_OP_WRITE ||
cur_op->req.hdr.opcode == OSD_OP_DELETE)
{
// Track inode statistics
if (!cur_op->tv_end.tv_sec)
{
clock_gettime(CLOCK_REALTIME, &cur_op->tv_end);
}
uint64_t usec = (
(cur_op->tv_end.tv_sec - cur_op->tv_begin.tv_sec)*1000000 +
(cur_op->tv_end.tv_nsec - cur_op->tv_begin.tv_nsec)/1000
);
int inode_st_op = cur_op->req.hdr.opcode == OSD_OP_DELETE
? INODE_STATS_DELETE
: (cur_op->req.hdr.opcode == OSD_OP_READ ? INODE_STATS_READ : INODE_STATS_WRITE);
inode_stats[cur_op->req.rw.inode].op_count[inode_st_op]++;
inode_stats[cur_op->req.rw.inode].op_sum[inode_st_op] += usec;
if (cur_op->req.hdr.opcode == OSD_OP_DELETE)
inode_stats[cur_op->req.rw.inode].op_bytes[inode_st_op] += cur_op->op_data->pg_data_size * bs_block_size;
else
inode_stats[cur_op->req.rw.inode].op_bytes[inode_st_op] += cur_op->req.rw.len;
}
if (cur_op->op_data) if (cur_op->op_data)
{ {
if (cur_op->op_data->pg_num > 0) if (cur_op->op_data->pg_num > 0)
{ {
auto & pg = pgs.at({ .pool_id = INODE_POOL(cur_op->op_data->oid.inode), .pg_num = cur_op->op_data->pg_num }); auto & pg = pgs[cur_op->op_data->pg_num];
pg.inflight--; pg.inflight--;
assert(pg.inflight >= 0); assert(pg.inflight >= 0);
if ((pg.state & PG_STOPPING) && pg.inflight == 0 && !pg.flush_batch && if ((pg.state & PG_STOPPING) && pg.inflight == 0 && !pg.flush_batch)
// We must either forget all PG's unstable writes or wait for it to become clean
dirty_pgs.find({ .pool_id = pg.pool_id, .pg_num = pg.pg_num }) == dirty_pgs.end())
{ {
finish_stop_pg(pg); finish_stop_pg(pg);
} }
@ -87,7 +59,7 @@ void osd_t::finish_op(osd_op_t *cur_op, int retval)
} }
else else
{ {
// FIXME add separate magic number for primary ops // FIXME add separate magic number
auto cl_it = c_cli.clients.find(cur_op->peer_fd); auto cl_it = c_cli.clients.find(cur_op->peer_fd);
if (cl_it != c_cli.clients.end()) if (cl_it != c_cli.clients.end())
{ {
@ -104,12 +76,11 @@ void osd_t::finish_op(osd_op_t *cur_op, int retval)
} }
} }
void osd_t::submit_primary_subops(int submit_type, uint64_t op_version, int pg_size, const uint64_t* osd_set, osd_op_t *cur_op) void osd_t::submit_primary_subops(int submit_type, int pg_size, const uint64_t* osd_set, osd_op_t *cur_op)
{ {
bool wr = submit_type == SUBMIT_WRITE; bool w = submit_type == SUBMIT_WRITE;
osd_primary_op_data_t *op_data = cur_op->op_data; osd_primary_op_data_t *op_data = cur_op->op_data;
osd_rmw_stripe_t *stripes = op_data->stripes; osd_rmw_stripe_t *stripes = op_data->stripes;
bool rep = op_data->scheme == POOL_SCHEME_REPLICATED;
// Allocate subops // Allocate subops
int n_subops = 0, zero_read = -1; int n_subops = 0, zero_read = -1;
for (int role = 0; role < pg_size; role++) for (int role = 0; role < pg_size; role++)
@ -118,12 +89,12 @@ void osd_t::submit_primary_subops(int submit_type, uint64_t op_version, int pg_s
{ {
zero_read = role; zero_read = role;
} }
if (osd_set[role] != 0 && (wr || !rep && stripes[role].read_end != 0)) if (osd_set[role] != 0 && (w || stripes[role].read_end != 0))
{ {
n_subops++; n_subops++;
} }
} }
if (!n_subops && (submit_type == SUBMIT_RMW_READ || rep)) if (!n_subops && submit_type == SUBMIT_RMW_READ)
{ {
n_subops = 1; n_subops = 1;
} }
@ -131,6 +102,7 @@ void osd_t::submit_primary_subops(int submit_type, uint64_t op_version, int pg_s
{ {
zero_read = -1; zero_read = -1;
} }
uint64_t op_version = w ? op_data->fact_ver+1 : (submit_type == SUBMIT_RMW_READ ? UINT64_MAX : op_data->target_ver);
osd_op_t *subops = new osd_op_t[n_subops]; osd_op_t *subops = new osd_op_t[n_subops];
op_data->fact_ver = 0; op_data->fact_ver = 0;
op_data->done = op_data->errors = 0; op_data->done = op_data->errors = 0;
@ -140,40 +112,36 @@ void osd_t::submit_primary_subops(int submit_type, uint64_t op_version, int pg_s
for (int role = 0; role < pg_size; role++) for (int role = 0; role < pg_size; role++)
{ {
// We always submit zero-length writes to all replicas, even if the stripe is not modified // We always submit zero-length writes to all replicas, even if the stripe is not modified
if (!(wr || !rep && stripes[role].read_end != 0 || zero_read == role)) if (!(w || stripes[role].read_end != 0 || zero_read == role))
{ {
continue; continue;
} }
osd_num_t role_osd_num = osd_set[role]; osd_num_t role_osd_num = osd_set[role];
if (role_osd_num != 0) if (role_osd_num != 0)
{ {
int stripe_num = rep ? 0 : role;
if (role_osd_num == this->osd_num) if (role_osd_num == this->osd_num)
{ {
clock_gettime(CLOCK_REALTIME, &subops[i].tv_begin); clock_gettime(CLOCK_REALTIME, &subops[i].tv_begin);
subops[i].op_type = (uint64_t)cur_op; subops[i].op_type = (uint64_t)cur_op;
subops[i].bitmap = stripes[stripe_num].bmp_buf;
subops[i].bitmap_len = clean_entry_bitmap_size;
subops[i].bs_op = new blockstore_op_t({ subops[i].bs_op = new blockstore_op_t({
.opcode = (uint64_t)(wr ? (rep ? BS_OP_WRITE_STABLE : BS_OP_WRITE) : BS_OP_READ), .opcode = (uint64_t)(w ? BS_OP_WRITE : BS_OP_READ),
.callback = [subop = &subops[i], this](blockstore_op_t *bs_subop) .callback = [subop = &subops[i], this](blockstore_op_t *bs_subop)
{ {
handle_primary_bs_subop(subop); handle_primary_bs_subop(subop);
}, },
.oid = { .oid = {
.inode = op_data->oid.inode, .inode = op_data->oid.inode,
.stripe = op_data->oid.stripe | stripe_num, .stripe = op_data->oid.stripe | role,
}, },
.version = op_version, .version = op_version,
.offset = wr ? stripes[stripe_num].write_start : stripes[stripe_num].read_start, .offset = w ? stripes[role].write_start : stripes[role].read_start,
.len = wr ? stripes[stripe_num].write_end - stripes[stripe_num].write_start : stripes[stripe_num].read_end - stripes[stripe_num].read_start, .len = w ? stripes[role].write_end - stripes[role].write_start : stripes[role].read_end - stripes[role].read_start,
.buf = wr ? stripes[stripe_num].write_buf : stripes[stripe_num].read_buf, .buf = w ? stripes[role].write_buf : stripes[role].read_buf,
.bitmap = stripes[stripe_num].bmp_buf,
}); });
#ifdef OSD_DEBUG #ifdef OSD_DEBUG
printf( printf(
"Submit %s to local: %lx:%lx v%lu %u-%u\n", wr ? "write" : "read", "Submit %s to local: %lu:%lu v%lu %u-%u\n", w ? "write" : "read",
op_data->oid.inode, op_data->oid.stripe | stripe_num, op_version, op_data->oid.inode, op_data->oid.stripe | role, op_version,
subops[i].bs_op->offset, subops[i].bs_op->len subops[i].bs_op->offset, subops[i].bs_op->len
); );
#endif #endif
@ -182,49 +150,40 @@ void osd_t::submit_primary_subops(int submit_type, uint64_t op_version, int pg_s
else else
{ {
subops[i].op_type = OSD_OP_OUT; subops[i].op_type = OSD_OP_OUT;
subops[i].send_list.push_back(subops[i].req.buf, OSD_PACKET_SIZE);
subops[i].peer_fd = c_cli.osd_peer_fds.at(role_osd_num); subops[i].peer_fd = c_cli.osd_peer_fds.at(role_osd_num);
subops[i].bitmap = stripes[stripe_num].bmp_buf;
subops[i].bitmap_len = clean_entry_bitmap_size;
subops[i].req.sec_rw = { subops[i].req.sec_rw = {
.header = { .header = {
.magic = SECONDARY_OSD_OP_MAGIC, .magic = SECONDARY_OSD_OP_MAGIC,
.id = c_cli.next_subop_id++, .id = c_cli.next_subop_id++,
.opcode = (uint64_t)(wr ? (rep ? OSD_OP_SEC_WRITE_STABLE : OSD_OP_SEC_WRITE) : OSD_OP_SEC_READ), .opcode = (uint64_t)(w ? OSD_OP_SECONDARY_WRITE : OSD_OP_SECONDARY_READ),
}, },
.oid = { .oid = {
.inode = op_data->oid.inode, .inode = op_data->oid.inode,
.stripe = op_data->oid.stripe | stripe_num, .stripe = op_data->oid.stripe | role,
}, },
.version = op_version, .version = op_version,
.offset = wr ? stripes[stripe_num].write_start : stripes[stripe_num].read_start, .offset = w ? stripes[role].write_start : stripes[role].read_start,
.len = wr ? stripes[stripe_num].write_end - stripes[stripe_num].write_start : stripes[stripe_num].read_end - stripes[stripe_num].read_start, .len = w ? stripes[role].write_end - stripes[role].write_start : stripes[role].read_end - stripes[role].read_start,
.attr_len = wr ? clean_entry_bitmap_size : 0,
}; };
#ifdef OSD_DEBUG #ifdef OSD_DEBUG
printf( printf(
"Submit %s to osd %lu: %lx:%lx v%lu %u-%u\n", wr ? "write" : "read", role_osd_num, "Submit %s to osd %lu: %lu:%lu v%lu %u-%u\n", w ? "write" : "read", role_osd_num,
op_data->oid.inode, op_data->oid.stripe | stripe_num, op_version, op_data->oid.inode, op_data->oid.stripe | role, op_version,
subops[i].req.sec_rw.offset, subops[i].req.sec_rw.len subops[i].req.sec_rw.offset, subops[i].req.sec_rw.len
); );
#endif #endif
if (wr) subops[i].buf = w ? stripes[role].write_buf : stripes[role].read_buf;
if (w && stripes[role].write_end > 0)
{ {
if (stripes[stripe_num].write_end > stripes[stripe_num].write_start) subops[i].send_list.push_back(stripes[role].write_buf, stripes[role].write_end - stripes[role].write_start);
{
subops[i].iov.push_back(stripes[stripe_num].write_buf, stripes[stripe_num].write_end - stripes[stripe_num].write_start);
}
}
else
{
if (stripes[stripe_num].read_end > stripes[stripe_num].read_start)
{
subops[i].iov.push_back(stripes[stripe_num].read_buf, stripes[stripe_num].read_end - stripes[stripe_num].read_start);
}
} }
subops[i].callback = [cur_op, this](osd_op_t *subop) subops[i].callback = [cur_op, this](osd_op_t *subop)
{ {
int fail_fd = subop->req.hdr.opcode == OSD_OP_SEC_WRITE && int fail_fd = subop->req.hdr.opcode == OSD_OP_SECONDARY_WRITE &&
subop->reply.hdr.retval != subop->req.sec_rw.len ? subop->peer_fd : -1; subop->reply.hdr.retval != subop->req.sec_rw.len ? subop->peer_fd : -1;
// so it doesn't get freed
subop->buf = NULL;
handle_primary_subop(subop, cur_op); handle_primary_subop(subop, cur_op);
if (fail_fd >= 0) if (fail_fd >= 0)
{ {
@ -241,23 +200,21 @@ void osd_t::submit_primary_subops(int submit_type, uint64_t op_version, int pg_s
static uint64_t bs_op_to_osd_op[] = { static uint64_t bs_op_to_osd_op[] = {
0, 0,
OSD_OP_SEC_READ, // BS_OP_READ = 1 OSD_OP_SECONDARY_READ, // BS_OP_READ
OSD_OP_SEC_WRITE, // BS_OP_WRITE = 2 OSD_OP_SECONDARY_WRITE, // BS_OP_WRITE
OSD_OP_SEC_WRITE_STABLE, // BS_OP_WRITE_STABLE = 3 OSD_OP_SECONDARY_SYNC, // BS_OP_SYNC
OSD_OP_SEC_SYNC, // BS_OP_SYNC = 4 OSD_OP_SECONDARY_STABILIZE, // BS_OP_STABLE
OSD_OP_SEC_STABILIZE, // BS_OP_STABLE = 5 OSD_OP_SECONDARY_DELETE, // BS_OP_DELETE
OSD_OP_SEC_DELETE, // BS_OP_DELETE = 6 OSD_OP_SECONDARY_LIST, // BS_OP_LIST
OSD_OP_SEC_LIST, // BS_OP_LIST = 7 OSD_OP_SECONDARY_ROLLBACK, // BS_OP_ROLLBACK
OSD_OP_SEC_ROLLBACK, // BS_OP_ROLLBACK = 8 OSD_OP_TEST_SYNC_STAB_ALL, // BS_OP_SYNC_STAB_ALL
OSD_OP_TEST_SYNC_STAB_ALL, // BS_OP_SYNC_STAB_ALL = 9
}; };
void osd_t::handle_primary_bs_subop(osd_op_t *subop) void osd_t::handle_primary_bs_subop(osd_op_t *subop)
{ {
osd_op_t *cur_op = (osd_op_t*)subop->op_type; osd_op_t *cur_op = (osd_op_t*)subop->op_type;
blockstore_op_t *bs_op = subop->bs_op; blockstore_op_t *bs_op = subop->bs_op;
int expected = bs_op->opcode == BS_OP_READ || bs_op->opcode == BS_OP_WRITE int expected = bs_op->opcode == BS_OP_READ || bs_op->opcode == BS_OP_WRITE ? bs_op->len : 0;
|| bs_op->opcode == BS_OP_WRITE_STABLE ? bs_op->len : 0;
if (bs_op->retval != expected && bs_op->opcode != BS_OP_READ) if (bs_op->retval != expected && bs_op->opcode != BS_OP_READ)
{ {
// die // die
@ -269,7 +226,7 @@ void osd_t::handle_primary_bs_subop(osd_op_t *subop)
add_bs_subop_stats(subop); add_bs_subop_stats(subop);
subop->req.hdr.opcode = bs_op_to_osd_op[bs_op->opcode]; subop->req.hdr.opcode = bs_op_to_osd_op[bs_op->opcode];
subop->reply.hdr.retval = bs_op->retval; subop->reply.hdr.retval = bs_op->retval;
if (bs_op->opcode == BS_OP_READ || bs_op->opcode == BS_OP_WRITE || bs_op->opcode == BS_OP_WRITE_STABLE) if (bs_op->opcode == BS_OP_READ || bs_op->opcode == BS_OP_WRITE)
{ {
subop->req.sec_rw.len = bs_op->len; subop->req.sec_rw.len = bs_op->len;
subop->reply.sec_rw.version = bs_op->version; subop->reply.sec_rw.version = bs_op->version;
@ -296,7 +253,7 @@ void osd_t::add_bs_subop_stats(osd_op_t *subop)
(tv_end.tv_sec - subop->tv_begin.tv_sec)*1000000 + (tv_end.tv_sec - subop->tv_begin.tv_sec)*1000000 +
(tv_end.tv_nsec - subop->tv_begin.tv_nsec)/1000 (tv_end.tv_nsec - subop->tv_begin.tv_nsec)/1000
); );
if (opcode == OSD_OP_SEC_READ || opcode == OSD_OP_SEC_WRITE) if (opcode == OSD_OP_SECONDARY_READ || opcode == OSD_OP_SECONDARY_WRITE)
{ {
c_cli.stats.op_stat_bytes[opcode] += subop->bs_op->len; c_cli.stats.op_stat_bytes[opcode] += subop->bs_op->len;
} }
@ -306,8 +263,8 @@ void osd_t::handle_primary_subop(osd_op_t *subop, osd_op_t *cur_op)
{ {
uint64_t opcode = subop->req.hdr.opcode; uint64_t opcode = subop->req.hdr.opcode;
int retval = subop->reply.hdr.retval; int retval = subop->reply.hdr.retval;
int expected = opcode == OSD_OP_SEC_READ || opcode == OSD_OP_SEC_WRITE int expected = opcode == OSD_OP_SECONDARY_READ || opcode == OSD_OP_SECONDARY_WRITE
|| opcode == OSD_OP_SEC_WRITE_STABLE ? subop->req.sec_rw.len : 0; ? subop->req.sec_rw.len : 0;
osd_primary_op_data_t *op_data = cur_op->op_data; osd_primary_op_data_t *op_data = cur_op->op_data;
if (retval != expected) if (retval != expected)
{ {
@ -321,12 +278,12 @@ void osd_t::handle_primary_subop(osd_op_t *subop, osd_op_t *cur_op)
else else
{ {
op_data->done++; op_data->done++;
if (opcode == OSD_OP_SEC_READ || opcode == OSD_OP_SEC_WRITE || opcode == OSD_OP_SEC_WRITE_STABLE) if (opcode == OSD_OP_SECONDARY_READ || opcode == OSD_OP_SECONDARY_WRITE)
{ {
uint64_t version = subop->reply.sec_rw.version; uint64_t version = subop->reply.sec_rw.version;
#ifdef OSD_DEBUG #ifdef OSD_DEBUG
uint64_t peer_osd = c_cli.clients.find(subop->peer_fd) != c_cli.clients.end() uint64_t peer_osd = c_cli.clients.find(subop->peer_fd) != c_cli.clients.end()
? c_cli.clients[subop->peer_fd]->osd_num : osd_num; ? c_cli.clients[subop->peer_fd].osd_num : osd_num;
printf("subop %lu from osd %lu: version = %lu\n", opcode, peer_osd, version); printf("subop %lu from osd %lu: version = %lu\n", opcode, peer_osd, version);
#endif #endif
if (op_data->fact_ver != 0 && op_data->fact_ver != version) if (op_data->fact_ver != 0 && op_data->fact_ver != version)
@ -384,59 +341,30 @@ void osd_t::cancel_primary_write(osd_op_t *cur_op)
} }
} }
bool contains_osd(osd_num_t *osd_set, uint64_t size, osd_num_t osd_num) void osd_t::submit_primary_del_subops(osd_op_t *cur_op, uint64_t *cur_set, pg_osd_set_t & loc_set)
{
for (uint64_t i = 0; i < size; i++)
{
if (osd_set[i] == osd_num)
{
return true;
}
}
return false;
}
void osd_t::submit_primary_del_subops(osd_op_t *cur_op, osd_num_t *cur_set, uint64_t set_size, pg_osd_set_t & loc_set)
{ {
osd_primary_op_data_t *op_data = cur_op->op_data; osd_primary_op_data_t *op_data = cur_op->op_data;
bool rep = op_data->scheme == POOL_SCHEME_REPLICATED; int extra_chunks = 0;
obj_ver_osd_t extra_chunks[loc_set.size()];
int chunks_to_del = 0;
for (auto & chunk: loc_set) for (auto & chunk: loc_set)
{ {
// ordered comparison for EC/XOR, unordered for replicated pools if (!cur_set || chunk.osd_num != cur_set[chunk.role])
if (!cur_set || (rep
? !contains_osd(cur_set, set_size, chunk.osd_num)
: (chunk.osd_num != cur_set[chunk.role])))
{ {
extra_chunks[chunks_to_del++] = (obj_ver_osd_t){ extra_chunks++;
.osd_num = chunk.osd_num,
.oid = {
.inode = op_data->oid.inode,
.stripe = op_data->oid.stripe | (rep ? 0 : chunk.role),
},
// Same version as write
.version = op_data->fact_ver,
};
} }
} }
submit_primary_del_batch(cur_op, extra_chunks, chunks_to_del); op_data->n_subops = extra_chunks;
}
void osd_t::submit_primary_del_batch(osd_op_t *cur_op, obj_ver_osd_t *chunks_to_delete, int chunks_to_delete_count)
{
osd_primary_op_data_t *op_data = cur_op->op_data;
op_data->n_subops = chunks_to_delete_count;
op_data->done = op_data->errors = 0; op_data->done = op_data->errors = 0;
if (!op_data->n_subops) if (!extra_chunks)
{ {
return; return;
} }
osd_op_t *subops = new osd_op_t[chunks_to_delete_count]; osd_op_t *subops = new osd_op_t[extra_chunks];
op_data->subops = subops; op_data->subops = subops;
for (int i = 0; i < chunks_to_delete_count; i++) int i = 0;
for (auto & chunk: loc_set)
{
if (!cur_set || chunk.osd_num != cur_set[chunk.role])
{ {
auto & chunk = chunks_to_delete[i];
if (chunk.osd_num == this->osd_num) if (chunk.osd_num == this->osd_num)
{ {
clock_gettime(CLOCK_REALTIME, &subops[i].tv_begin); clock_gettime(CLOCK_REALTIME, &subops[i].tv_begin);
@ -447,23 +375,32 @@ void osd_t::submit_primary_del_batch(osd_op_t *cur_op, obj_ver_osd_t *chunks_to_
{ {
handle_primary_bs_subop(subop); handle_primary_bs_subop(subop);
}, },
.oid = chunk.oid, .oid = {
.version = chunk.version, .inode = op_data->oid.inode,
.stripe = op_data->oid.stripe | chunk.role,
},
// Same version as write
.version = op_data->fact_ver,
}); });
bs->enqueue_op(subops[i].bs_op); bs->enqueue_op(subops[i].bs_op);
} }
else else
{ {
subops[i].op_type = OSD_OP_OUT; subops[i].op_type = OSD_OP_OUT;
subops[i].send_list.push_back(subops[i].req.buf, OSD_PACKET_SIZE);
subops[i].peer_fd = c_cli.osd_peer_fds.at(chunk.osd_num); subops[i].peer_fd = c_cli.osd_peer_fds.at(chunk.osd_num);
subops[i].req.sec_del = { subops[i].req.sec_del = {
.header = { .header = {
.magic = SECONDARY_OSD_OP_MAGIC, .magic = SECONDARY_OSD_OP_MAGIC,
.id = c_cli.next_subop_id++, .id = c_cli.next_subop_id++,
.opcode = OSD_OP_SEC_DELETE, .opcode = OSD_OP_SECONDARY_DELETE,
}, },
.oid = chunk.oid, .oid = {
.version = chunk.version, .inode = op_data->oid.inode,
.stripe = op_data->oid.stripe | chunk.role,
},
// Same version as write
.version = op_data->fact_ver,
}; };
subops[i].callback = [cur_op, this](osd_op_t *subop) subops[i].callback = [cur_op, this](osd_op_t *subop)
{ {
@ -477,20 +414,22 @@ void osd_t::submit_primary_del_batch(osd_op_t *cur_op, obj_ver_osd_t *chunks_to_
}; };
c_cli.outbox_push(&subops[i]); c_cli.outbox_push(&subops[i]);
} }
i++;
}
} }
} }
void osd_t::submit_primary_sync_subops(osd_op_t *cur_op) void osd_t::submit_primary_sync_subops(osd_op_t *cur_op)
{ {
osd_primary_op_data_t *op_data = cur_op->op_data; osd_primary_op_data_t *op_data = cur_op->op_data;
int n_osds = op_data->dirty_osd_count; int n_osds = op_data->unstable_write_osds->size();
osd_op_t *subops = new osd_op_t[n_osds]; osd_op_t *subops = new osd_op_t[n_osds];
op_data->done = op_data->errors = 0; op_data->done = op_data->errors = 0;
op_data->n_subops = n_osds; op_data->n_subops = n_osds;
op_data->subops = subops; op_data->subops = subops;
for (int i = 0; i < n_osds; i++) for (int i = 0; i < n_osds; i++)
{ {
osd_num_t sync_osd = op_data->dirty_osds[i]; osd_num_t sync_osd = (*(op_data->unstable_write_osds))[i].osd_num;
if (sync_osd == this->osd_num) if (sync_osd == this->osd_num)
{ {
clock_gettime(CLOCK_REALTIME, &subops[i].tv_begin); clock_gettime(CLOCK_REALTIME, &subops[i].tv_begin);
@ -507,12 +446,13 @@ void osd_t::submit_primary_sync_subops(osd_op_t *cur_op)
else else
{ {
subops[i].op_type = OSD_OP_OUT; subops[i].op_type = OSD_OP_OUT;
subops[i].send_list.push_back(subops[i].req.buf, OSD_PACKET_SIZE);
subops[i].peer_fd = c_cli.osd_peer_fds.at(sync_osd); subops[i].peer_fd = c_cli.osd_peer_fds.at(sync_osd);
subops[i].req.sec_sync = { subops[i].req.sec_sync = {
.header = { .header = {
.magic = SECONDARY_OSD_OP_MAGIC, .magic = SECONDARY_OSD_OP_MAGIC,
.id = c_cli.next_subop_id++, .id = c_cli.next_subop_id++,
.opcode = OSD_OP_SEC_SYNC, .opcode = OSD_OP_SECONDARY_SYNC,
}, },
}; };
subops[i].callback = [cur_op, this](osd_op_t *subop) subops[i].callback = [cur_op, this](osd_op_t *subop)
@ -545,7 +485,7 @@ void osd_t::submit_primary_stab_subops(osd_op_t *cur_op)
{ {
clock_gettime(CLOCK_REALTIME, &subops[i].tv_begin); clock_gettime(CLOCK_REALTIME, &subops[i].tv_begin);
subops[i].op_type = (uint64_t)cur_op; subops[i].op_type = (uint64_t)cur_op;
subops[i].bs_op = new blockstore_op_t((blockstore_op_t){ subops[i].bs_op = new blockstore_op_t({
.opcode = BS_OP_STABLE, .opcode = BS_OP_STABLE,
.callback = [subop = &subops[i], this](blockstore_op_t *bs_subop) .callback = [subop = &subops[i], this](blockstore_op_t *bs_subop)
{ {
@ -559,16 +499,17 @@ void osd_t::submit_primary_stab_subops(osd_op_t *cur_op)
else else
{ {
subops[i].op_type = OSD_OP_OUT; subops[i].op_type = OSD_OP_OUT;
subops[i].send_list.push_back(subops[i].req.buf, OSD_PACKET_SIZE);
subops[i].peer_fd = c_cli.osd_peer_fds.at(stab_osd.osd_num); subops[i].peer_fd = c_cli.osd_peer_fds.at(stab_osd.osd_num);
subops[i].req.sec_stab = { subops[i].req.sec_stab = {
.header = { .header = {
.magic = SECONDARY_OSD_OP_MAGIC, .magic = SECONDARY_OSD_OP_MAGIC,
.id = c_cli.next_subop_id++, .id = c_cli.next_subop_id++,
.opcode = OSD_OP_SEC_STABILIZE, .opcode = OSD_OP_SECONDARY_STABILIZE,
}, },
.len = (uint64_t)(stab_osd.len * sizeof(obj_ver_id)), .len = (uint64_t)(stab_osd.len * sizeof(obj_ver_id)),
}; };
subops[i].iov.push_back(op_data->unstable_writes + stab_osd.start, stab_osd.len * sizeof(obj_ver_id)); subops[i].send_list.push_back(op_data->unstable_writes + stab_osd.start, stab_osd.len * sizeof(obj_ver_id));
subops[i].callback = [cur_op, this](osd_op_t *subop) subops[i].callback = [cur_op, this](osd_op_t *subop)
{ {
int fail_fd = subop->reply.hdr.retval != 0 ? subop->peer_fd : -1; int fail_fd = subop->reply.hdr.retval != 0 ? subop->peer_fd : -1;
@ -587,28 +528,24 @@ void osd_t::submit_primary_stab_subops(osd_op_t *cur_op)
void osd_t::pg_cancel_write_queue(pg_t & pg, osd_op_t *first_op, object_id oid, int retval) void osd_t::pg_cancel_write_queue(pg_t & pg, osd_op_t *first_op, object_id oid, int retval)
{ {
auto st_it = pg.write_queue.find(oid), it = st_it; auto st_it = pg.write_queue.find(oid), it = st_it;
if (it == pg.write_queue.end() || it->second != first_op) finish_op(first_op, retval);
if (it != pg.write_queue.end() && it->second == first_op)
{
it++;
}
else
{ {
// Write queue doesn't match the first operation. // Write queue doesn't match the first operation.
// first_op is a leftover operation from the previous peering of the same PG. // first_op is a leftover operation from the previous peering of the same PG.
finish_op(first_op, retval);
return; return;
} }
std::vector<osd_op_t*> cancel_ops; while (it != pg.write_queue.end() && it->first == oid)
while (it != pg.write_queue.end())
{ {
cancel_ops.push_back(it->second); finish_op(it->second, retval);
it++; it++;
} }
if (st_it != it) if (st_it != it)
{ {
// First erase them and then run finish_op() for the sake of reenterability
// Calling finish_op() on a live iterator previously triggered a bug where some
// of the OSDs were looping infinitely if you stopped all of them with kill -INT during recovery
pg.write_queue.erase(st_it, it); pg.write_queue.erase(st_it, it);
for (auto op: cancel_ops)
{
finish_op(op, retval);
}
} }
} }

450
osd_rmw.cpp Normal file
View File

@ -0,0 +1,450 @@
#include <malloc.h>
#include <string.h>
#include <assert.h>
#include "xor.h"
#include "osd_rmw.h"
static inline void extend_read(uint32_t start, uint32_t end, osd_rmw_stripe_t & stripe)
{
if (stripe.read_end == 0)
{
stripe.read_start = start;
stripe.read_end = end;
}
else
{
if (stripe.read_end < end)
stripe.read_end = end;
if (stripe.read_start > start)
stripe.read_start = start;
}
}
static inline void cover_read(uint32_t start, uint32_t end, osd_rmw_stripe_t & stripe)
{
// Subtract <to> write request from <from> request
if (start >= stripe.req_start &&
end <= stripe.req_end)
{
return;
}
if (start <= stripe.req_start &&
end >= stripe.req_start &&
end <= stripe.req_end)
{
end = stripe.req_start;
}
else if (start >= stripe.req_start &&
start <= stripe.req_end &&
end >= stripe.req_end)
{
start = stripe.req_end;
}
if (stripe.read_end == 0)
{
stripe.read_start = start;
stripe.read_end = end;
}
else
{
if (stripe.read_end < end)
stripe.read_end = end;
if (stripe.read_start > start)
stripe.read_start = start;
}
}
void split_stripes(uint64_t pg_minsize, uint32_t bs_block_size, uint32_t start, uint32_t end, osd_rmw_stripe_t *stripes)
{
if (end == 0)
{
// Zero length request - offset doesn't matter
return;
}
end = start+end;
for (int role = 0; role < pg_minsize; role++)
{
if (start < (1+role)*bs_block_size && end > role*bs_block_size)
{
stripes[role].req_start = start < role*bs_block_size ? 0 : start-role*bs_block_size;
stripes[role].req_end = end > (role+1)*bs_block_size ? bs_block_size : end-role*bs_block_size;
}
}
}
void reconstruct_stripe(osd_rmw_stripe_t *stripes, int pg_size, int role)
{
int prev = -2;
for (int other = 0; other < pg_size; other++)
{
if (other != role)
{
if (prev == -2)
{
prev = other;
}
else if (prev >= 0)
{
assert(stripes[role].read_start >= stripes[prev].read_start &&
stripes[role].read_start >= stripes[other].read_start);
memxor(
stripes[prev].read_buf + (stripes[role].read_start - stripes[prev].read_start),
stripes[other].read_buf + (stripes[role].read_start - stripes[other].read_start),
stripes[role].read_buf, stripes[role].read_end - stripes[role].read_start
);
prev = -1;
}
else
{
assert(stripes[role].read_start >= stripes[other].read_start);
memxor(
stripes[role].read_buf,
stripes[other].read_buf + (stripes[role].read_start - stripes[other].read_start),
stripes[role].read_buf, stripes[role].read_end - stripes[role].read_start
);
}
}
}
}
int extend_missing_stripes(osd_rmw_stripe_t *stripes, osd_num_t *osd_set, int minsize, int size)
{
for (int role = 0; role < minsize; role++)
{
if (stripes[role].read_end != 0 && osd_set[role] == 0)
{
stripes[role].missing = true;
// Stripe is missing. Extend read to other stripes.
// We need at least pg_minsize stripes to recover the lost part.
// FIXME: LRC EC and similar don't require to read all other stripes.
int exist = 0;
for (int j = 0; j < size; j++)
{
if (osd_set[j] != 0)
{
extend_read(stripes[role].read_start, stripes[role].read_end, stripes[j]);
exist++;
if (exist >= minsize)
{
break;
}
}
}
if (exist < minsize)
{
// Less than minsize stripes are available for this object
return -1;
}
}
}
return 0;
}
void* alloc_read_buffer(osd_rmw_stripe_t *stripes, int read_pg_size, uint64_t add_size)
{
// Calculate buffer size
uint64_t buf_size = add_size;
for (int role = 0; role < read_pg_size; role++)
{
if (stripes[role].read_end != 0)
{
buf_size += stripes[role].read_end - stripes[role].read_start;
}
}
// Allocate buffer
void *buf = memalign(MEM_ALIGNMENT, buf_size);
uint64_t buf_pos = add_size;
for (int role = 0; role < read_pg_size; role++)
{
if (stripes[role].read_end != 0)
{
stripes[role].read_buf = buf + buf_pos;
buf_pos += stripes[role].read_end - stripes[role].read_start;
}
}
return buf;
}
void* calc_rmw(void *request_buf, osd_rmw_stripe_t *stripes, uint64_t *read_osd_set,
uint64_t pg_size, uint64_t pg_minsize, uint64_t pg_cursize, uint64_t *write_osd_set, uint64_t chunk_size)
{
// Generic parity modification (read-modify-write) algorithm
// Read -> Reconstruct missing chunks -> Calc parity chunks -> Write
// Now we always read continuous ranges. This means that an update of the beginning
// of one data stripe and the end of another will lead to a read of full paired stripes.
// FIXME: (Maybe) read small individual ranges in that case instead.
uint32_t start = 0, end = 0;
for (int role = 0; role < pg_minsize; role++)
{
if (stripes[role].req_end != 0)
{
start = !end || stripes[role].req_start < start ? stripes[role].req_start : start;
end = std::max(stripes[role].req_end, end);
stripes[role].write_start = stripes[role].req_start;
stripes[role].write_end = stripes[role].req_end;
}
}
int write_parity = 0;
for (int role = pg_minsize; role < pg_size; role++)
{
if (write_osd_set[role] != 0)
{
write_parity = 1;
stripes[role].write_start = start;
stripes[role].write_end = end;
}
}
if (write_parity)
{
for (int role = 0; role < pg_minsize; role++)
{
cover_read(start, end, stripes[role]);
}
}
if (write_osd_set != read_osd_set)
{
pg_cursize = 0;
// Object is degraded/misplaced and will be moved to <write_osd_set>
for (int role = 0; role < pg_size; role++)
{
if (write_osd_set[role] != read_osd_set[role])
{
// FIXME: For EC more than 2+1: handle case when write_osd_set == 0 and read_osd_set != 0
// We need to get data for any moved / recovered chunk
// And we need a continuous write buffer so we'll only optimize
// for the case when the whole chunk is ovewritten in the request
if (stripes[role].req_start != 0 ||
stripes[role].req_end != chunk_size)
{
stripes[role].read_start = 0;
stripes[role].read_end = chunk_size;
// Warning: We don't modify write_start/write_end here, we do it in calc_rmw_parity()
}
}
if (read_osd_set[role] != 0)
{
pg_cursize++;
}
}
}
if (pg_cursize < pg_size)
{
// Some stripe(s) are missing, so we need to read parity
for (int role = 0; role < pg_size; role++)
{
if (read_osd_set[role] == 0)
{
stripes[role].missing = true;
if (stripes[role].read_end != 0)
{
int found = 0;
for (int r2 = 0; r2 < pg_size && found < pg_minsize; r2++)
{
// Read the non-covered range of <role> from at least <minsize> other stripes to reconstruct it
if (read_osd_set[r2] != 0)
{
extend_read(stripes[role].read_start, stripes[role].read_end, stripes[r2]);
found++;
}
}
if (found < pg_minsize)
{
// FIXME Object is incomplete - refuse partial overwrite
assert(0);
}
}
}
}
}
// Allocate read buffers
void *rmw_buf = alloc_read_buffer(stripes, pg_size, (write_parity ? pg_size-pg_minsize : 0) * (end - start));
// Position write buffers
uint64_t buf_pos = 0, in_pos = 0;
for (int role = 0; role < pg_size; role++)
{
if (stripes[role].req_end != 0)
{
stripes[role].write_buf = request_buf + in_pos;
in_pos += stripes[role].req_end - stripes[role].req_start;
}
else if (role >= pg_minsize && write_osd_set[role] != 0 && end != 0)
{
stripes[role].write_buf = rmw_buf + buf_pos;
buf_pos += end - start;
}
}
return rmw_buf;
}
static void get_old_new_buffers(osd_rmw_stripe_t & stripe, uint32_t wr_start, uint32_t wr_end, buf_len_t *bufs, int & nbufs)
{
uint32_t ns = 0, ne = 0, os = 0, oe = 0;
if (stripe.req_end > wr_start &&
stripe.req_start < wr_end)
{
ns = std::max(stripe.req_start, wr_start);
ne = std::min(stripe.req_end, wr_end);
}
if (stripe.read_end > wr_start &&
stripe.read_start < wr_end)
{
os = std::max(stripe.read_start, wr_start);
oe = std::min(stripe.read_end, wr_end);
}
if (ne && (!oe || ns <= os))
{
// NEW or NEW->OLD
bufs[nbufs++] = { .buf = stripe.write_buf + ns - stripe.req_start, .len = ne-ns };
if (os < ne)
os = ne;
if (oe > os)
{
// NEW->OLD
bufs[nbufs++] = { .buf = stripe.read_buf + os - stripe.read_start, .len = oe-os };
}
}
else if (oe)
{
// OLD or OLD->NEW or OLD->NEW->OLD
if (ne)
{
// OLD->NEW or OLD->NEW->OLD
bufs[nbufs++] = { .buf = stripe.read_buf + os - stripe.read_start, .len = ns-os };
bufs[nbufs++] = { .buf = stripe.write_buf + ns - stripe.req_start, .len = ne-ns };
if (oe > ne)
{
// OLD->NEW->OLD
bufs[nbufs++] = { .buf = stripe.read_buf + ne - stripe.read_start, .len = oe-ne };
}
}
else
{
// OLD
bufs[nbufs++] = { .buf = stripe.read_buf + os - stripe.read_start, .len = oe-os };
}
}
}
static void xor_multiple_buffers(buf_len_t *xor1, int n1, buf_len_t *xor2, int n2, void *dest, uint32_t len)
{
assert(n1 > 0 && n2 > 0);
int i1 = 0, i2 = 0;
uint32_t start1 = 0, start2 = 0, end1 = xor1[0].len, end2 = xor2[0].len;
uint32_t pos = 0;
while (pos < len)
{
// We know for sure that ranges overlap
uint32_t end = std::min(end1, end2);
memxor(xor1[i1].buf + pos-start1, xor2[i2].buf + pos-start2, dest+pos, end-pos);
pos = end;
if (pos >= end1)
{
i1++;
if (i1 >= n1)
{
assert(pos >= end2);
return;
}
start1 = end1;
end1 += xor1[i1].len;
}
if (pos >= end2)
{
i2++;
start2 = end2;
end2 += xor2[i2].len;
}
}
}
void calc_rmw_parity(osd_rmw_stripe_t *stripes, int pg_size, uint64_t *read_osd_set, uint64_t *write_osd_set, uint32_t chunk_size)
{
int pg_minsize = pg_size-1;
for (int role = 0; role < pg_size; role++)
{
if (stripes[role].read_end != 0 && stripes[role].missing)
{
// Reconstruct missing stripe (EC k+1)
reconstruct_stripe(stripes, pg_size, role);
break;
}
}
uint32_t start = 0, end = 0;
if (!stripes[pg_minsize].missing || write_osd_set != read_osd_set)
{
for (int role = 0; role < pg_minsize; role++)
{
if (stripes[role].req_end != 0)
{
start = !end || stripes[role].req_start < start ? stripes[role].req_start : start;
end = std::max(stripes[role].req_end, end);
}
}
}
if (write_osd_set != read_osd_set)
{
for (int role = 0; role < pg_minsize; role++)
{
if (write_osd_set[role] != read_osd_set[role] &&
(stripes[role].req_start != 0 || stripes[role].req_end != chunk_size))
{
// FIXME again, handle case when write_osd_set[role] is 0
// Copy modified chunk into the read buffer to write it back
memcpy(
stripes[role].read_buf + stripes[role].req_start,
stripes[role].write_buf,
stripes[role].req_end - stripes[role].req_start
);
stripes[role].write_buf = stripes[role].read_buf;
stripes[role].write_start = 0;
stripes[role].write_end = chunk_size;
}
}
}
if (!stripes[pg_minsize].missing && end != 0)
{
// Calculate new parity (EC k+1)
int parity = pg_minsize, prev = -2;
for (int other = 0; other < pg_minsize; other++)
{
if (prev == -2)
{
prev = other;
}
else
{
int n1 = 0, n2 = 0;
buf_len_t xor1[3], xor2[3];
if (prev == -1)
{
xor1[n1++] = { .buf = stripes[parity].write_buf, .len = end-start };
}
else
{
get_old_new_buffers(stripes[prev], start, end, xor1, n1);
prev = -1;
}
get_old_new_buffers(stripes[other], start, end, xor2, n2);
xor_multiple_buffers(xor1, n1, xor2, n2, stripes[parity].write_buf, end-start);
}
}
}
if (write_osd_set != read_osd_set)
{
for (int role = pg_minsize; role < pg_size; role++)
{
if (write_osd_set[role] != read_osd_set[role] && (start != 0 || end != chunk_size))
{
// Copy new parity into the read buffer to write it back
memcpy(
stripes[role].read_buf + start,
stripes[role].write_buf,
end - start
);
stripes[role].write_buf = stripes[role].read_buf;
stripes[role].write_start = 0;
stripes[role].write_end = chunk_size;
}
}
}
}

37
osd_rmw.h Normal file
View File

@ -0,0 +1,37 @@
#pragma once
#include <stdint.h>
#include "object_id.h"
#include "osd_id.h"
#ifndef MEM_ALIGNMENT
#define MEM_ALIGNMENT 512
#endif
struct buf_len_t
{
void *buf;
uint64_t len;
};
struct osd_rmw_stripe_t
{
void *read_buf, *write_buf;
uint32_t req_start, req_end;
uint32_t read_start, read_end;
uint32_t write_start, write_end;
bool missing;
};
void split_stripes(uint64_t pg_minsize, uint32_t bs_block_size, uint32_t start, uint32_t len, osd_rmw_stripe_t *stripes);
void reconstruct_stripe(osd_rmw_stripe_t *stripes, int pg_size, int role);
int extend_missing_stripes(osd_rmw_stripe_t *stripes, osd_num_t *osd_set, int minsize, int size);
void* alloc_read_buffer(osd_rmw_stripe_t *stripes, int read_pg_size, uint64_t add_size);
void* calc_rmw(void *request_buf, osd_rmw_stripe_t *stripes, uint64_t *read_osd_set,
uint64_t pg_size, uint64_t pg_minsize, uint64_t pg_cursize, uint64_t *write_osd_set, uint64_t chunk_size);
void calc_rmw_parity(osd_rmw_stripe_t *stripes, int pg_size, uint64_t *read_osd_set, uint64_t *write_osd_set, uint32_t chunk_size);

Some files were not shown because too many files have changed in this diff Show More