Compare commits
97 Commits
test-doubl
...
cached-rea
Author | SHA1 | Date | |
---|---|---|---|
37d14e35f3 | |||
8c4a11b51c | |||
98d5849190 | |||
e4ea8a9514 | |||
3c565e7b94 | |||
708918a4c7 | |||
8e099c1d11 | |||
debb00a535 | |||
c8891ab1d6 | |||
7062b73d87 | |||
02e24f5144 | |||
55f506f6e0 | |||
e12dd9b82c | |||
82d8848b8f | |||
ef800408dc | |||
3b1150c478 | |||
ccdf87dc81 | |||
bede73d158 | |||
8e35319a34 | |||
dc6e88e2ca | |||
4c3370220b | |||
f02344c0a4 | |||
b369032665 | |||
319b0833eb | |||
0641b06fb1 | |||
3b9873b9a9 | |||
eef97a0dc4 | |||
5b16e5ab5b | |||
bb430fccd5 | |||
bafadd5559 | |||
6ebca5fedc | |||
e3e2325ef5 | |||
9a908f3e66 | |||
89f6fef920 | |||
63b9382067 | |||
8a7dea9fa2 | |||
1adf77f8fb | |||
7a530346a6 | |||
cc1f03971d | |||
a0aac7eb2a | |||
ac7b834af3 | |||
ee0c78fd74 | |||
e6646a5b2f | |||
ae69662b17 | |||
57ad4c3636 | |||
b7e4d0c9bf | |||
161a23c966 | |||
2f999d8607 | |||
d007a374f2 | |||
45c0694853 | |||
57bcba2406 | |||
30ac899074 | |||
2348d39cf4 | |||
3de7929fe5 | |||
07b2196bc2 | |||
b8e30608d6 | |||
a612cdca47 | |||
c8d61568b5 | |||
84ed3c6395 | |||
a7b57386c0 | |||
9d4ea5f764 | |||
000e4944ec | |||
8426616d89 | |||
1a841344ec | |||
8603b5cb1d | |||
f12b8e45a9 | |||
878ccbb6ea | |||
b14220b4d0 | |||
181d6ba407 | |||
63c2b9832c | |||
10e2e6a7c8 | |||
a598428992 | |||
08a677b684 | |||
7c8fbdad16 | |||
2f9353df60 | |||
57c744f288 | |||
a11ca56fb1 | |||
b84927b340 | |||
83cacba226 | |||
2c8f0bc6d5 | |||
7ae5b0e368 | |||
926be372fd | |||
6222779b52 | |||
a4186e20aa | |||
c74a424930 | |||
32f2c4dd27 | |||
3ad16b9a1a | |||
1c2df841c2 | |||
aa5dacc7a9 | |||
affe8fc270 | |||
4fdc49bdc7 | |||
86b4682975 | |||
bdd48e4cf1 | |||
af8c3411cd | |||
9c405009f3 | |||
f9fbea25a4 | |||
2c9a10d081 |
@@ -10,6 +10,9 @@ RUN set -e -x; \
|
||||
ln -s /root/fio-build/fio-*/ ./fio; \
|
||||
ln -s /root/qemu-build/qemu-*/ ./qemu; \
|
||||
ls /usr/include/linux/raw.h || cp ./debian/raw.h /usr/include/linux/raw.h; \
|
||||
cd mon; \
|
||||
npm install; \
|
||||
cd ..; \
|
||||
mkdir build; \
|
||||
cd build; \
|
||||
cmake .. -DWITH_ASAN=yes -DWITH_QEMU=yes; \
|
||||
|
@@ -71,7 +71,7 @@ jobs:
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 3
|
||||
timeout-minutes: 10
|
||||
run: /root/vitastor/tests/test_add_osd.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
@@ -190,24 +190,6 @@ jobs:
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_failure_domain:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 3
|
||||
run: /root/vitastor/tests/test_failure_domain.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_interrupted_rebalance:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
@@ -280,7 +262,7 @@ jobs:
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_minsize_1:
|
||||
test_failure_domain:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
@@ -288,115 +270,7 @@ jobs:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 3
|
||||
run: /root/vitastor/tests/test_minsize_1.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_move_reappear:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 3
|
||||
run: /root/vitastor/tests/test_move_reappear.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_rebalance_verify:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 3
|
||||
run: /root/vitastor/tests/test_rebalance_verify.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_rebalance_verify_imm:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 3
|
||||
run: IMMEDIATE_COMMIT=1 /root/vitastor/tests/test_rebalance_verify.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_rebalance_verify_ec:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 3
|
||||
run: SCHEME=ec /root/vitastor/tests/test_rebalance_verify.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_rebalance_verify_ec_imm:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 3
|
||||
run: SCHEME=ec IMMEDIATE_COMMIT=1 /root/vitastor/tests/test_rebalance_verify.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_rm:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 3
|
||||
run: /root/vitastor/tests/test_rm.sh
|
||||
run: /root/vitastor/tests/test_failure_domain.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
@@ -442,6 +316,132 @@ jobs:
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_minsize_1:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 3
|
||||
run: /root/vitastor/tests/test_minsize_1.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_move_reappear:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 3
|
||||
run: /root/vitastor/tests/test_move_reappear.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_rm:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 3
|
||||
run: /root/vitastor/tests/test_rm.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_snapshot_chain:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 3
|
||||
run: /root/vitastor/tests/test_snapshot_chain.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_snapshot_chain_ec:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 3
|
||||
run: SCHEME=ec /root/vitastor/tests/test_snapshot_chain.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_snapshot_down:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 3
|
||||
run: /root/vitastor/tests/test_snapshot_down.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_snapshot_down_ec:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 3
|
||||
run: SCHEME=ec /root/vitastor/tests/test_snapshot_down.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_splitbrain:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
@@ -460,6 +460,78 @@ jobs:
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_rebalance_verify:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 10
|
||||
run: /root/vitastor/tests/test_rebalance_verify.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_rebalance_verify_imm:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 10
|
||||
run: IMMEDIATE_COMMIT=1 /root/vitastor/tests/test_rebalance_verify.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_rebalance_verify_ec:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 10
|
||||
run: SCHEME=ec /root/vitastor/tests/test_rebalance_verify.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_rebalance_verify_ec_imm:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 10
|
||||
run: SCHEME=ec IMMEDIATE_COMMIT=1 /root/vitastor/tests/test_rebalance_verify.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_write:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
@@ -550,6 +622,114 @@ jobs:
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_heal_csum_32k_dmj:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 10
|
||||
run: TEST_NAME=csum_32k_dmj OSD_ARGS="--data_csum_type crc32c --csum_block_size 32k --inmemory_metadata false --inmemory_journal false" OFFSET_ARGS=$OSD_ARGS /root/vitastor/tests/test_heal.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_heal_csum_32k_dj:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 10
|
||||
run: TEST_NAME=csum_32k_dj OSD_ARGS="--data_csum_type crc32c --csum_block_size 32k --inmemory_journal false" OFFSET_ARGS=$OSD_ARGS /root/vitastor/tests/test_heal.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_heal_csum_32k:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 10
|
||||
run: TEST_NAME=csum_32k OSD_ARGS="--data_csum_type crc32c --csum_block_size 32k" OFFSET_ARGS=$OSD_ARGS /root/vitastor/tests/test_heal.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_heal_csum_4k_dmj:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 10
|
||||
run: TEST_NAME=csum_4k_dmj OSD_ARGS="--data_csum_type crc32c --inmemory_metadata false --inmemory_journal false" OFFSET_ARGS=$OSD_ARGS /root/vitastor/tests/test_heal.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_heal_csum_4k_dj:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 10
|
||||
run: TEST_NAME=csum_4k_dj OSD_ARGS="--data_csum_type crc32c --inmemory_journal false" OFFSET_ARGS=$OSD_ARGS /root/vitastor/tests/test_heal.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_heal_csum_4k:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 10
|
||||
run: TEST_NAME=csum_4k OSD_ARGS="--data_csum_type crc32c" OFFSET_ARGS=$OSD_ARGS /root/vitastor/tests/test_heal.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_scrub:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
|
@@ -7,15 +7,22 @@ for my $line (<>)
|
||||
if ($line =~ /\.\/(test_[^\.]+)/s)
|
||||
{
|
||||
chomp $line;
|
||||
my $test_name = $1;
|
||||
my $base_name = $1;
|
||||
my $test_name = $base_name;
|
||||
my $timeout = 3;
|
||||
if ($test_name eq 'test_etcd_fail' || $test_name eq 'test_heal' || $test_name eq 'test_interrupted_rebalance')
|
||||
if ($test_name eq 'test_etcd_fail' || $test_name eq 'test_heal' || $test_name eq 'test_add_osd' ||
|
||||
$test_name eq 'test_interrupted_rebalance' || $test_name eq 'test_rebalance_verify')
|
||||
{
|
||||
$timeout = 10;
|
||||
}
|
||||
while ($line =~ /([^\s=]+)=(\S+)/gs)
|
||||
{
|
||||
if ($1 eq 'SCHEME' && $2 eq 'ec')
|
||||
if ($1 eq 'TEST_NAME')
|
||||
{
|
||||
$test_name = $base_name.'_'.$2;
|
||||
last;
|
||||
}
|
||||
elsif ($1 eq 'SCHEME' && $2 eq 'ec')
|
||||
{
|
||||
$test_name .= '_ec';
|
||||
}
|
||||
|
@@ -2,6 +2,6 @@ cmake_minimum_required(VERSION 2.8.12)
|
||||
|
||||
project(vitastor)
|
||||
|
||||
set(VERSION "0.9.0")
|
||||
set(VERSION "0.9.3")
|
||||
|
||||
add_subdirectory(src)
|
||||
|
@@ -15,7 +15,7 @@ Vitastor архитектурно похож на Ceph, что означает
|
||||
и автоматическое распределение данных по любому числу дисков любого размера с настраиваемыми схемами
|
||||
избыточности - репликацией или с произвольными кодами коррекции ошибок.
|
||||
|
||||
Vitastor нацелен на SSD и SSD+HDD кластеры с как минимум 10 Гбит/с сетью, поддерживает
|
||||
Vitastor нацелен в первую очередь на SSD и SSD+HDD кластеры с как минимум 10 Гбит/с сетью, поддерживает
|
||||
TCP и RDMA и на хорошем железе может достигать задержки 4 КБ чтения и записи на уровне ~0.1 мс,
|
||||
что примерно в 10 раз быстрее, чем Ceph и другие популярные программные СХД.
|
||||
|
||||
|
@@ -14,8 +14,8 @@ Vitastor is architecturally similar to Ceph which means strong consistency,
|
||||
primary-replication, symmetric clustering and automatic data distribution over any
|
||||
number of drives of any size with configurable redundancy (replication or erasure codes/XOR).
|
||||
|
||||
Vitastor targets SSD and SSD+HDD clusters with at least 10 Gbit/s network, supports
|
||||
TCP and RDMA and may achieve 4 KB read and write latency as low as ~0.1 ms
|
||||
Vitastor targets primarily SSD and SSD+HDD clusters with at least 10 Gbit/s network,
|
||||
supports TCP and RDMA and may achieve 4 KB read and write latency as low as ~0.1 ms
|
||||
with proper hardware which is ~10 times faster than other popular SDS's like Ceph
|
||||
or internal systems of public clouds.
|
||||
|
||||
|
@@ -1,4 +1,4 @@
|
||||
VERSION ?= v0.9.0
|
||||
VERSION ?= v0.9.3
|
||||
|
||||
all: build push
|
||||
|
||||
|
@@ -49,7 +49,7 @@ spec:
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
image: vitalif/vitastor-csi:v0.9.0
|
||||
image: vitalif/vitastor-csi:v0.9.3
|
||||
args:
|
||||
- "--node=$(NODE_ID)"
|
||||
- "--endpoint=$(CSI_ENDPOINT)"
|
||||
|
@@ -116,7 +116,7 @@ spec:
|
||||
privileged: true
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
image: vitalif/vitastor-csi:v0.9.0
|
||||
image: vitalif/vitastor-csi:v0.9.3
|
||||
args:
|
||||
- "--node=$(NODE_ID)"
|
||||
- "--endpoint=$(CSI_ENDPOINT)"
|
||||
|
@@ -5,7 +5,7 @@ package vitastor
|
||||
|
||||
const (
|
||||
vitastorCSIDriverName = "csi.vitastor.io"
|
||||
vitastorCSIDriverVersion = "0.9.0"
|
||||
vitastorCSIDriverVersion = "0.9.3"
|
||||
)
|
||||
|
||||
// Config struct fills the parameters of request or user input
|
||||
|
58
debian/build-pve-qemu.sh
vendored
Normal file
58
debian/build-pve-qemu.sh
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
exit
|
||||
|
||||
git clone https://git.yourcmc.ru/vitalif/pve-qemu .
|
||||
|
||||
# bookworm
|
||||
|
||||
docker run -it -v `pwd`/pve-qemu:/root/pve-qemu --name pve-qemu-bullseye debian:bullseye bash
|
||||
|
||||
perl -i -pe 's/Types: deb$/Types: deb deb-src/' /etc/apt/sources.list.d/debian.sources
|
||||
echo 'deb [arch=amd64] http://download.proxmox.com/debian/pve bookworm pve-no-subscription' >> /etc/apt/sources.list
|
||||
echo 'deb https://vitastor.io/debian bookworm main' >> /etc/apt/sources.list
|
||||
echo 'APT::Install-Recommends false;' >> /etc/apt/apt.conf
|
||||
echo 'ru_RU UTF-8' >> /etc/locale.gen
|
||||
echo 'en_US UTF-8' >> /etc/locale.gen
|
||||
apt-get update
|
||||
apt-get install wget ca-certificates
|
||||
wget https://enterprise.proxmox.com/debian/proxmox-release-bookworm.gpg -O /etc/apt/trusted.gpg.d/proxmox-release-bookworm.gpg
|
||||
wget https://vitastor.io/debian/pubkey.gpg -O /etc/apt/trusted.gpg.d/vitastor.gpg
|
||||
apt-get update
|
||||
apt-get install git devscripts equivs wget mc libjemalloc-dev vitastor-client-dev lintian locales
|
||||
mk-build-deps --install ./control
|
||||
|
||||
# bullseye
|
||||
|
||||
docker run -it -v `pwd`/pve-qemu:/root/pve-qemu --name pve-qemu-bullseye debian:bullseye bash
|
||||
|
||||
grep '^deb ' /etc/apt/sources.list | perl -pe 's/^deb /deb-src /' >> /etc/apt/sources.list
|
||||
echo 'deb [arch=amd64] http://download.proxmox.com/debian/pve bullseye pve-no-subscription' >> /etc/apt/sources.list
|
||||
echo 'deb https://vitastor.io/debian bullseye main' >> /etc/apt/sources.list
|
||||
echo 'APT::Install-Recommends false;' >> /etc/apt/apt.conf
|
||||
echo 'ru_RU UTF-8' >> /etc/locale.gen
|
||||
echo 'en_US UTF-8' >> /etc/locale.gen
|
||||
apt-get update
|
||||
apt-get install wget
|
||||
wget https://enterprise.proxmox.com/debian/proxmox-release-bullseye.gpg -O /etc/apt/trusted.gpg.d/proxmox-release-bullseye.gpg
|
||||
wget https://vitastor.io/debian/pubkey.gpg -O /etc/apt/trusted.gpg.d/vitastor.gpg
|
||||
apt-get update
|
||||
apt-get install git devscripts equivs wget mc libjemalloc-dev vitastor-client-dev lintian locales
|
||||
mk-build-deps --install ./control
|
||||
|
||||
# buster
|
||||
|
||||
docker run -it -v `pwd`/pve-qemu:/root/pve-qemu --name pve-qemu-buster debian:buster bash
|
||||
|
||||
grep '^deb ' /etc/apt/sources.list | perl -pe 's/^deb /deb-src /' >> /etc/apt/sources.list
|
||||
echo 'deb [arch=amd64] http://download.proxmox.com/debian/pve buster pve-no-subscription' >> /etc/apt/sources.list
|
||||
echo 'deb https://vitastor.io/debian buster main' >> /etc/apt/sources.list
|
||||
echo 'deb http://deb.debian.org/debian buster-backports main' >> /etc/apt/sources.list
|
||||
echo 'APT::Install-Recommends false;' >> /etc/apt/apt.conf
|
||||
echo 'ru_RU UTF-8' >> /etc/locale.gen
|
||||
echo 'en_US UTF-8' >> /etc/locale.gen
|
||||
apt-get update
|
||||
apt-get install wget ca-certificates
|
||||
wget http://download.proxmox.com/debian/proxmox-ve-release-6.x.gpg -O /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
|
||||
wget https://vitastor.io/debian/pubkey.gpg -O /etc/apt/trusted.gpg.d/vitastor.gpg
|
||||
apt-get update
|
||||
apt-get install git devscripts equivs wget mc libjemalloc-dev vitastor-client-dev lintian locales
|
||||
mk-build-deps --install ./control
|
7
debian/build-vitastor-bookworm.sh
vendored
Executable file
7
debian/build-vitastor-bookworm.sh
vendored
Executable file
@@ -0,0 +1,7 @@
|
||||
#!/bin/bash
|
||||
|
||||
cat < vitastor.Dockerfile > ../Dockerfile
|
||||
cd ..
|
||||
mkdir -p packages
|
||||
sudo podman build --build-arg REL=bookworm -v `pwd`/packages:/root/packages -f Dockerfile .
|
||||
rm Dockerfile
|
4
debian/changelog
vendored
4
debian/changelog
vendored
@@ -1,10 +1,10 @@
|
||||
vitastor (0.9.0-1) unstable; urgency=medium
|
||||
vitastor (0.9.3-1) unstable; urgency=medium
|
||||
|
||||
* Bugfixes
|
||||
|
||||
-- Vitaliy Filippov <vitalif@yourcmc.ru> Fri, 03 Jun 2022 02:09:44 +0300
|
||||
|
||||
vitastor (0.9.0-1) unstable; urgency=medium
|
||||
vitastor (0.9.3-1) unstable; urgency=medium
|
||||
|
||||
* Implement NFS proxy
|
||||
* Add documentation
|
||||
|
29
debian/patched-qemu.Dockerfile
vendored
29
debian/patched-qemu.Dockerfile
vendored
@@ -1,4 +1,4 @@
|
||||
# Build patched QEMU for Debian Buster or Bullseye/Sid inside a container
|
||||
# Build patched QEMU for Debian inside a container
|
||||
# cd ..; podman build --build-arg REL=bullseye -v `pwd`/packages:/root/packages -f debian/patched-qemu.Dockerfile .
|
||||
|
||||
ARG REL=
|
||||
@@ -15,17 +15,19 @@ RUN if [ "$REL" = "buster" -o "$REL" = "bullseye" ]; then \
|
||||
echo 'Pin-Priority: 500' >> /etc/apt/preferences; \
|
||||
fi; \
|
||||
grep '^deb ' /etc/apt/sources.list | perl -pe 's/^deb/deb-src/' >> /etc/apt/sources.list; \
|
||||
perl -i -pe 's/Types: deb$/Types: deb deb-src/' /etc/apt/sources.list.d/debian.sources || true; \
|
||||
echo 'APT::Install-Recommends false;' >> /etc/apt/apt.conf; \
|
||||
echo 'APT::Install-Suggests false;' >> /etc/apt/apt.conf
|
||||
|
||||
RUN apt-get update
|
||||
RUN apt-get -y install qemu fio liburing1 liburing-dev libgoogle-perftools-dev devscripts
|
||||
RUN apt-get -y install fio liburing-dev libgoogle-perftools-dev devscripts
|
||||
RUN apt-get -y build-dep qemu
|
||||
# To build a custom version
|
||||
#RUN cp /root/packages/qemu-orig/* /root
|
||||
RUN apt-get --download-only source qemu
|
||||
|
||||
ADD patches/qemu-5.0-vitastor.patch patches/qemu-5.1-vitastor.patch patches/qemu-6.1-vitastor.patch src/qemu_driver.c /root/vitastor/patches/
|
||||
ADD patches /root/vitastor/patches
|
||||
ADD src/qemu_driver.c /root/vitastor/src/qemu_driver.c
|
||||
RUN set -e; \
|
||||
apt-get install -y wget; \
|
||||
wget -q -O /etc/apt/trusted.gpg.d/vitastor.gpg https://vitastor.io/debian/pubkey.gpg; \
|
||||
@@ -37,25 +39,16 @@ RUN set -e; \
|
||||
rm -rf /root/packages/qemu-$REL/*; \
|
||||
cd /root/packages/qemu-$REL; \
|
||||
dpkg-source -x /root/qemu*.dsc; \
|
||||
if ls -d /root/packages/qemu-$REL/qemu-5.0*; then \
|
||||
D=$(ls -d /root/packages/qemu-$REL/qemu-5.0*); \
|
||||
cp /root/vitastor/patches/qemu-5.0-vitastor.patch $D/debian/patches; \
|
||||
echo qemu-5.0-vitastor.patch >> $D/debian/patches/series; \
|
||||
elif ls /root/packages/qemu-$REL/qemu-6.1*; then \
|
||||
D=$(ls -d /root/packages/qemu-$REL/qemu-6.1*); \
|
||||
cp /root/vitastor/patches/qemu-6.1-vitastor.patch $D/debian/patches; \
|
||||
echo qemu-6.1-vitastor.patch >> $D/debian/patches/series; \
|
||||
else \
|
||||
cp /root/vitastor/patches/qemu-5.1-vitastor.patch /root/packages/qemu-$REL/qemu-*/debian/patches; \
|
||||
P=`ls -d /root/packages/qemu-$REL/qemu-*/debian/patches`; \
|
||||
echo qemu-5.1-vitastor.patch >> $P/series; \
|
||||
fi; \
|
||||
QEMU_VER=$(ls -d qemu*/ | perl -pe 's!^.*(\d+\.\d+).*!$1!'); \
|
||||
D=$(ls -d qemu*/); \
|
||||
cp /root/vitastor/patches/qemu-$QEMU_VER-vitastor.patch ./qemu-*/debian/patches; \
|
||||
echo qemu-$QEMU_VER-vitastor.patch >> $D/debian/patches/series; \
|
||||
cd /root/packages/qemu-$REL/qemu-*/; \
|
||||
quilt push -a; \
|
||||
quilt add block/vitastor.c; \
|
||||
cp /root/vitastor/patches/qemu_driver.c block/vitastor.c; \
|
||||
cp /root/vitastor/src/qemu_driver.c block/vitastor.c; \
|
||||
quilt refresh; \
|
||||
V=$(head -n1 debian/changelog | perl -pe 's/^.*\((.*?)(~bpo[\d\+]*)?\).*$/$1/')+vitastor1; \
|
||||
V=$(head -n1 debian/changelog | perl -pe 's/^.*\((.*?)(~bpo[\d\+]*)?\).*$/$1/')+vitastor3; \
|
||||
DEBEMAIL="Vitaliy Filippov <vitalif@yourcmc.ru>" dch -D $REL -v $V 'Plug Vitastor block driver'; \
|
||||
DEB_BUILD_OPTIONS=nocheck dpkg-buildpackage --jobs=auto -sa; \
|
||||
rm -rf /root/packages/qemu-$REL/qemu-*/
|
||||
|
13
debian/vitastor.Dockerfile
vendored
13
debian/vitastor.Dockerfile
vendored
@@ -1,4 +1,4 @@
|
||||
# Build Vitastor packages for Debian Buster or Bullseye/Sid inside a container
|
||||
# Build Vitastor packages for Debian inside a container
|
||||
# cd ..; podman build --build-arg REL=bullseye -v `pwd`/packages:/root/packages -f debian/vitastor.Dockerfile .
|
||||
|
||||
ARG REL=
|
||||
@@ -15,11 +15,12 @@ RUN if [ "$REL" = "buster" -o "$REL" = "bullseye" ]; then \
|
||||
echo 'Pin-Priority: 500' >> /etc/apt/preferences; \
|
||||
fi; \
|
||||
grep '^deb ' /etc/apt/sources.list | perl -pe 's/^deb/deb-src/' >> /etc/apt/sources.list; \
|
||||
perl -i -pe 's/Types: deb$/Types: deb deb-src/' /etc/apt/sources.list.d/debian.sources || true; \
|
||||
echo 'APT::Install-Recommends false;' >> /etc/apt/apt.conf; \
|
||||
echo 'APT::Install-Suggests false;' >> /etc/apt/apt.conf
|
||||
|
||||
RUN apt-get update
|
||||
RUN apt-get -y install fio liburing1 liburing-dev libgoogle-perftools-dev devscripts
|
||||
RUN apt-get -y install fio liburing-dev libgoogle-perftools-dev devscripts
|
||||
RUN apt-get -y build-dep fio
|
||||
RUN apt-get --download-only source fio
|
||||
RUN apt-get update && apt-get -y install libjerasure-dev cmake libibverbs-dev libisal-dev
|
||||
@@ -34,8 +35,8 @@ RUN set -e -x; \
|
||||
mkdir -p /root/packages/vitastor-$REL; \
|
||||
rm -rf /root/packages/vitastor-$REL/*; \
|
||||
cd /root/packages/vitastor-$REL; \
|
||||
cp -r /root/vitastor vitastor-0.9.0; \
|
||||
cd vitastor-0.9.0; \
|
||||
cp -r /root/vitastor vitastor-0.9.3; \
|
||||
cd vitastor-0.9.3; \
|
||||
ln -s /root/fio-build/fio-*/ ./fio; \
|
||||
FIO=$(head -n1 fio/debian/changelog | perl -pe 's/^.*\((.*?)\).*$/$1/'); \
|
||||
ls /usr/include/linux/raw.h || cp ./debian/raw.h /usr/include/linux/raw.h; \
|
||||
@@ -48,8 +49,8 @@ RUN set -e -x; \
|
||||
rm -rf a b; \
|
||||
echo "dep:fio=$FIO" > debian/fio_version; \
|
||||
cd /root/packages/vitastor-$REL; \
|
||||
tar --sort=name --mtime='2020-01-01' --owner=0 --group=0 --exclude=debian -cJf vitastor_0.9.0.orig.tar.xz vitastor-0.9.0; \
|
||||
cd vitastor-0.9.0; \
|
||||
tar --sort=name --mtime='2020-01-01' --owner=0 --group=0 --exclude=debian -cJf vitastor_0.9.3.orig.tar.xz vitastor-0.9.3; \
|
||||
cd vitastor-0.9.3; \
|
||||
V=$(head -n1 debian/changelog | perl -pe 's/^.*\((.*?)\).*$/$1/'); \
|
||||
DEBFULLNAME="Vitaliy Filippov <vitalif@yourcmc.ru>" dch -D $REL -v "$V""$REL" "Rebuild for $REL"; \
|
||||
DEB_BUILD_OPTIONS=nocheck dpkg-buildpackage --jobs=auto -sa; \
|
||||
|
@@ -21,7 +21,7 @@ Configuration parameters can be set in 3 places:
|
||||
mon, fio and QEMU options, OpenStack/Proxmox/etc configuration. The latter
|
||||
doesn't allow to set all variables directly, but it allows to override the
|
||||
configuration file and set everything you need inside it.
|
||||
- OSD superblocks created by [vitastor-disk](../usage/disk.en.md) contain
|
||||
- OSD superblocks created by [vitastor-disk](usage/disk.en.md) contain
|
||||
primarily disk layout parameters of specific OSDs. In fact, these parameters
|
||||
are automatically passed into the command line of vitastor-osd process, so
|
||||
they have the same "status" as command-line parameters.
|
||||
|
@@ -23,7 +23,7 @@
|
||||
монитора, опциях fio и QEMU, настроек OpenStack, Proxmox и т.п. Последние,
|
||||
как правило, не включают полный набор параметров напрямую, но позволяют
|
||||
определить путь к файлу конфигурации и задать любые параметры в нём.
|
||||
- В суперблоке OSD, записываемом [vitastor-disk](../usage/disk.ru.md) - параметры,
|
||||
- В суперблоке OSD, записываемом [vitastor-disk](usage/disk.ru.md) - параметры,
|
||||
связанные с дисковым форматом и с этим конкретным OSD. На самом деле,
|
||||
при запуске OSD эти параметры автоматически передаются в командную строку
|
||||
процесса vitastor-osd, то есть по "статусу" они эквивалентны параметрам
|
||||
|
@@ -33,12 +33,13 @@ Size of objects (data blocks) into which all physical and virtual drives
|
||||
in Vitastor, affects memory usage, write amplification and I/O load
|
||||
distribution effectiveness.
|
||||
|
||||
Recommended default block size is 128 KB for SSD and 4 MB for HDD. In fact,
|
||||
it's possible to use 4 MB for SSD too - it will lower memory usage, but
|
||||
Recommended default block size is 128 KB for SSD and 1 MB for HDD. In fact,
|
||||
it's possible to use 1 MB for SSD too - it will lower memory usage, but
|
||||
may increase average WA and reduce linear performance.
|
||||
|
||||
OSD memory usage is roughly (SIZE / BLOCK * 68 bytes) which is roughly
|
||||
544 MB per 1 TB of used disk space with the default 128 KB block size.
|
||||
With 1 MB it's 8 times lower.
|
||||
|
||||
## bitmap_granularity
|
||||
|
||||
|
@@ -33,14 +33,14 @@ OSD) могут сосуществовать в одном кластере Vita
|
||||
настроек, влияет на потребление памяти, объём избыточной записи (write
|
||||
amplification) и эффективность распределения нагрузки по OSD.
|
||||
|
||||
Рекомендуемые по умолчанию размеры блока - 128 килобайт для SSD и 4
|
||||
мегабайта для HDD. В принципе, для SSD можно тоже использовать 4 мегабайта,
|
||||
Рекомендуемые по умолчанию размеры блока - 128 килобайт для SSD и 1 мегабайт
|
||||
для HDD. В принципе, для SSD можно тоже использовать блок размером 1 мегабайт,
|
||||
это понизит использование памяти, но ухудшит распределение нагрузки и в
|
||||
среднем увеличит WA.
|
||||
|
||||
Потребление памяти OSD составляет примерно (РАЗМЕР / БЛОК * 68 байт),
|
||||
т.е. примерно 544 МБ памяти на 1 ТБ занятого места на диске при
|
||||
стандартном 128 КБ блоке.
|
||||
стандартном 128 КБ блоке. При 1 МБ блоке памяти нужно в 8 раз меньше.
|
||||
|
||||
## bitmap_granularity
|
||||
|
||||
|
@@ -24,6 +24,8 @@ initialization and can't be changed after it without losing data.
|
||||
- [disable_journal_fsync](#disable_journal_fsync)
|
||||
- [disable_device_lock](#disable_device_lock)
|
||||
- [disk_alignment](#disk_alignment)
|
||||
- [data_csum_type](#data_csum_type)
|
||||
- [csum_block_size](#csum_block_size)
|
||||
|
||||
## data_device
|
||||
|
||||
@@ -174,3 +176,42 @@ Intel Optane (probably, not tested yet).
|
||||
|
||||
Clients don't need to be aware of disk_alignment, so it's not required to
|
||||
put a modified value into etcd key /vitastor/config/global.
|
||||
|
||||
## data_csum_type
|
||||
|
||||
- Type: string
|
||||
- Default: none
|
||||
|
||||
Data checksum type to use. May be "crc32c" or "none". Set to "crc32c" to
|
||||
enable data checksums.
|
||||
|
||||
## csum_block_size
|
||||
|
||||
- Type: integer
|
||||
- Default: 4096
|
||||
|
||||
Checksum calculation block size.
|
||||
|
||||
Must be equal or a multiple of [bitmap_granularity](layout-cluster.en.md#bitmap_granularity)
|
||||
(which is usually 4 KB).
|
||||
|
||||
Checksums increase metadata size by 4 bytes per each csum_block_size of data.
|
||||
|
||||
Checksums are always a compromise:
|
||||
1. You either sacrifice +1 GB RAM per 1 TB of data
|
||||
2. Or you raise csum_block_size, for example, to 32k and sacrifice
|
||||
50% random write iops due to checksum read-modify-write
|
||||
3. Or you turn off [inmemory_metadata](osd.en.md#inmemory_metadata) and
|
||||
sacrifice 50% random read iops due to checksum reads
|
||||
|
||||
Option 1 (default) is recommended for all-flash setups because these usually
|
||||
have enough RAM.
|
||||
|
||||
Option 2 is recommended for HDD-only setups. HDD-only setups usually do NOT
|
||||
have enough RAM for the default 4 KB csum_block_size.
|
||||
|
||||
Option 3 is recommended for SSD+HDD setups (because metadata SSDs will handle
|
||||
extra reads without any performance drop) and also *maybe* for NVMe all-flash
|
||||
setups when you don't have enough RAM (because NVMe drives have plenty
|
||||
of read iops to spare). You may also consider enabling
|
||||
[cached_read_meta](osd.en.md#cached_read_meta) in this case.
|
||||
|
@@ -25,6 +25,8 @@
|
||||
- [disable_journal_fsync](#disable_journal_fsync)
|
||||
- [disable_device_lock](#disable_device_lock)
|
||||
- [disk_alignment](#disk_alignment)
|
||||
- [data_csum_type](#data_csum_type)
|
||||
- [csum_block_size](#csum_block_size)
|
||||
|
||||
## data_device
|
||||
|
||||
@@ -183,3 +185,52 @@ journal_block_size и meta_block_size. Однако единственные SSD
|
||||
|
||||
Клиентам не обязательно знать про disk_alignment, так что помещать значение
|
||||
этого параметра в etcd в /vitastor/config/global не нужно.
|
||||
|
||||
## data_csum_type
|
||||
|
||||
- Тип: строка
|
||||
- Значение по умолчанию: none
|
||||
|
||||
Тип используемых OSD контрольных сумм данных. Может быть "crc32c" или "none".
|
||||
Установите в "crc32c", чтобы включить расчёт и проверку контрольных сумм данных.
|
||||
|
||||
Следует понимать, что контрольные суммы в зависимости от размера блока их
|
||||
расчёта либо увеличивают потребление памяти, либо снижают производительность.
|
||||
Подробнее смотрите в описании параметра [csum_block_size](#csum_block_size).
|
||||
|
||||
## csum_block_size
|
||||
|
||||
- Тип: целое число
|
||||
- Значение по умолчанию: 4096
|
||||
|
||||
Размер блока расчёта контрольных сумм.
|
||||
|
||||
Должен быть равен или кратен [bitmap_granularity](layout-cluster.ru.md#bitmap_granularity)
|
||||
(который обычно равен 4 КБ).
|
||||
|
||||
Контрольные суммы увеличивают размер метаданных на 4 байта на каждые
|
||||
csum_block_size данных.
|
||||
|
||||
Контрольные суммы - это всегда компромисс:
|
||||
1. Вы либо жертвуете потреблением +1 ГБ памяти на 1 ТБ дискового пространства
|
||||
2. Либо вы повышаете csum_block_size до, скажем, 32k и жертвуете 50%
|
||||
скорости случайной записи из-за цикла чтения-изменения-записи для расчёта
|
||||
новых контрольных сумм
|
||||
3. Либо вы отключаете [inmemory_metadata](osd.ru.md#inmemory_metadata) и
|
||||
жертвуете 50% скорости случайного чтения из-за чтения контрольных сумм
|
||||
с диска
|
||||
|
||||
Вариант 1 (при настройках по умолчанию) рекомендуется для SSD (All-Flash)
|
||||
кластеров, потому что памяти в них обычно хватает.
|
||||
|
||||
Вариант 2 рекомендуется для кластеров на одних жёстких дисках (без SSD
|
||||
под метаданные). На 4 кб блок контрольной суммы памяти в таких кластерах
|
||||
обычно НЕ хватает.
|
||||
|
||||
Вариант 3 рекомендуется для гибридных кластеров (SSD+HDD), потому что
|
||||
скорости SSD под метаданными хватит, чтобы обработать дополнительные чтения
|
||||
без снижения производительности. Также вариант 3 *может* рекомендоваться
|
||||
для All-Flash кластеров на основе NVMe-дисков, когда памяти НЕ достаточно,
|
||||
потому что NVMe-диски имеют огромный запас производительности по чтению.
|
||||
В таких случаях, возможно, также имеет смысл включать параметр
|
||||
[cached_read_meta](osd.ru.md#cached_read_meta).
|
||||
|
@@ -31,6 +31,9 @@ them, even without restarting by updating configuration in etcd.
|
||||
- [max_flusher_count](#max_flusher_count)
|
||||
- [inmemory_metadata](#inmemory_metadata)
|
||||
- [inmemory_journal](#inmemory_journal)
|
||||
- [cached_read_data](#cached_read_data)
|
||||
- [cached_read_meta](#cached_read_meta)
|
||||
- [cached_read_journal](#cached_read_journal)
|
||||
- [journal_sector_buffer_count](#journal_sector_buffer_count)
|
||||
- [journal_no_same_sector_overwrites](#journal_no_same_sector_overwrites)
|
||||
- [throttle_small_writes](#throttle_small_writes)
|
||||
@@ -255,6 +258,46 @@ is typically very small because it's sufficient to have 16-32 MB journal
|
||||
for SSD OSDs. However, in theory it's possible that you'll want to turn it
|
||||
off for hybrid (HDD+SSD) OSDs with large journals on quick devices.
|
||||
|
||||
## cached_read_data
|
||||
|
||||
- Type: boolean
|
||||
- Default: false
|
||||
|
||||
Read data through Linux page cache, i.e. use a file descriptor opened without
|
||||
O_DIRECT for data reads. May improve read performance for frequently accessed
|
||||
data if it fits in RAM. Memory in page cache is shared by all processes and
|
||||
not accounted in OSD memory consumption.
|
||||
|
||||
## cached_read_meta
|
||||
|
||||
- Type: boolean
|
||||
- Default: false
|
||||
|
||||
Read metadata through Linux page cache. May be beneficial when checksums
|
||||
are enabled and [inmemory_metadata](#inmemory_metadata) is disabled, because
|
||||
in this case metadata blocks are read from disk to verify checksums on every
|
||||
read request and caching them may reduce this extra read load.
|
||||
|
||||
Absolutely pointless to enable with enabled inmemory_metadata because all
|
||||
metadata is kept in memory anyway, and likely pointless without checksums,
|
||||
because in that case, metadata blocks are read from disk only during journal
|
||||
flushing.
|
||||
|
||||
If the same device is used for data and metadata, enabling [cached_read_data](#cached_read_data)
|
||||
also enables this parameter, given that it isn't turned off explicitly.
|
||||
|
||||
## cached_read_journal
|
||||
|
||||
- Type: boolean
|
||||
- Default: false
|
||||
|
||||
Read buffered data from journal through Linux page cache. Does not have sense
|
||||
without disabling [inmemory_journal](#inmemory_journal), which, again, is
|
||||
enabled by default.
|
||||
|
||||
If the same device is used for metadata and journal, enabling [cached_read_meta](#cached_read_meta)
|
||||
also enables this parameter, given that it isn't turned off explicitly.
|
||||
|
||||
## journal_sector_buffer_count
|
||||
|
||||
- Type: integer
|
||||
|
@@ -32,6 +32,9 @@
|
||||
- [max_flusher_count](#max_flusher_count)
|
||||
- [inmemory_metadata](#inmemory_metadata)
|
||||
- [inmemory_journal](#inmemory_journal)
|
||||
- [cached_read_data](#cached_read_data)
|
||||
- [cached_read_meta](#cached_read_meta)
|
||||
- [cached_read_journal](#cached_read_journal)
|
||||
- [journal_sector_buffer_count](#journal_sector_buffer_count)
|
||||
- [journal_no_same_sector_overwrites](#journal_no_same_sector_overwrites)
|
||||
- [throttle_small_writes](#throttle_small_writes)
|
||||
@@ -263,6 +266,51 @@ Flusher - это микро-поток (корутина), которая коп
|
||||
параметра может оказаться полезным для гибридных OSD (HDD+SSD) с большими
|
||||
журналами, расположенными на быстром по сравнению с HDD устройстве.
|
||||
|
||||
## cached_read_data
|
||||
|
||||
- Тип: булево (да/нет)
|
||||
- Значение по умолчанию: false
|
||||
|
||||
Читать данные через системный кэш Linux (page cache), то есть, использовать
|
||||
для чтения данных файловый дескриптор, открытый без флага O_DIRECT. Может
|
||||
улучшить производительность чтения для часто используемых данных, если они
|
||||
помещаются в память. Память кэша разделяется между всеми процессами в
|
||||
системе и не учитывается в потреблении памяти процессом OSD.
|
||||
|
||||
## cached_read_meta
|
||||
|
||||
- Тип: булево (да/нет)
|
||||
- Значение по умолчанию: false
|
||||
|
||||
Читать метаданные через системный кэш Linux. Может быть полезно, когда
|
||||
включены контрольные суммы, а параметр [inmemory_metadata](#inmemory_metadata)
|
||||
отключён, так как в этом случае блоки метаданных читаются с диска при каждом
|
||||
запросе чтения для проверки контрольных сумм и их кэширование может снизить
|
||||
дополнительную нагрузку на диск.
|
||||
|
||||
Абсолютно бессмысленно включать данный параметр, если параметр
|
||||
inmemory_metadata включён (по умолчанию это так), и также вероятно
|
||||
бессмысленно включать его, если не включены контрольные суммы, так как в
|
||||
этом случае блоки метаданных читаются с диска только во время сброса
|
||||
журнала.
|
||||
|
||||
Если одно и то же устройство используется для данных и метаданных, включение
|
||||
[cached_read_data](#cached_read_data) также включает данный параметр, при
|
||||
условии, что он не отключён явным образом.
|
||||
|
||||
## cached_read_journal
|
||||
|
||||
- Тип: булево (да/нет)
|
||||
- Значение по умолчанию: false
|
||||
|
||||
Читать буферизованные в журнале данные через системный кэш Linux. Не имеет
|
||||
смысла без отключения параметра [inmemory_journal](#inmemory_journal),
|
||||
который, опять же, по умолчанию включён.
|
||||
|
||||
Если одно и то же устройство используется для метаданных и журнала,
|
||||
включение [cached_read_meta](#cached_read_meta) также включает данный
|
||||
параметр, при условии, что он не отключён явным образом.
|
||||
|
||||
## journal_sector_buffer_count
|
||||
|
||||
- Тип: целое число
|
||||
|
145
docs/config/src/include.js
Executable file
145
docs/config/src/include.js
Executable file
@@ -0,0 +1,145 @@
|
||||
#!/usr/bin/nodejs
|
||||
|
||||
const fsp = require('fs').promises;
|
||||
|
||||
run(process.argv).catch(console.error);
|
||||
|
||||
async function run(argv)
|
||||
{
|
||||
if (argv.length < 3)
|
||||
{
|
||||
console.log('Markdown preprocessor\nUSAGE: ./include.js file.md');
|
||||
return;
|
||||
}
|
||||
const index_file = await fsp.realpath(argv[2]);
|
||||
const re = /(\{\{[\s\S]*?\}\}|\[[^\]]+\]\([^\)]+\)|(?:^|\n)#[^\n]+)/;
|
||||
let text = await fsp.readFile(index_file, { encoding: 'utf-8' });
|
||||
text = text.split(re);
|
||||
let included = {};
|
||||
let heading = 0, heading_name = '', m;
|
||||
for (let i = 0; i < text.length; i++)
|
||||
{
|
||||
if (text[i].substr(0, 2) == '{{')
|
||||
{
|
||||
// Inclusion
|
||||
let incfile = text[i].substr(2, text[i].length-4);
|
||||
let section = null;
|
||||
let indent = heading;
|
||||
incfile = incfile.replace(/\s*\|\s*indent\s*=\s*(-?\d+)\s*$/, (m, m1) => { indent = parseInt(m1); return ''; });
|
||||
incfile = incfile.replace(/\s*#\s*([^#]+)$/, (m, m1) => { section = m1; return ''; });
|
||||
let inc_heading = section;
|
||||
incfile = rel2abs(index_file, incfile);
|
||||
let inc = await fsp.readFile(incfile, { encoding: 'utf-8' });
|
||||
inc = inc.trim().replace(/^[\s\S]+?\n#/, '#'); // remove until the first header
|
||||
inc = inc.split(re);
|
||||
const indent_str = new Array(indent+1).join('#');
|
||||
let section_start = -1, section_end = -1;
|
||||
for (let j = 0; j < inc.length; j++)
|
||||
{
|
||||
if ((m = /^(\n?)(#+\s*)([\s\S]+)$/.exec(inc[j])))
|
||||
{
|
||||
if (!inc_heading)
|
||||
{
|
||||
inc_heading = m[3].trim();
|
||||
}
|
||||
if (section)
|
||||
{
|
||||
if (m[3].trim() == section)
|
||||
section_start = j;
|
||||
else if (section_start >= 0)
|
||||
{
|
||||
section_end = j;
|
||||
break;
|
||||
}
|
||||
}
|
||||
inc[j] = m[1] + indent_str + m[2] + m[3];
|
||||
}
|
||||
else if ((m = /^(\[[^\]]+\]\()([^\)]+)(\))$/.exec(inc[j])) && !/^https?:(\/\/)|^#/.exec(m[2]))
|
||||
{
|
||||
const abs_m2 = rel2abs(incfile, m[2]);
|
||||
const rel_m = abs2rel(__filename, abs_m2);
|
||||
if (rel_m.substr(0, 9) == '../../../') // outside docs
|
||||
inc[j] = m[1] + 'https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/'+rel2abs('docs/config/src/include.js', rel_m) + m[3];
|
||||
else
|
||||
inc[j] = m[1] + abs_m2 + m[3];
|
||||
}
|
||||
}
|
||||
if (section)
|
||||
{
|
||||
inc = section_start >= 0 ? inc.slice(section_start, section_end < 0 ? inc.length : section_end) : [];
|
||||
}
|
||||
if (inc.length)
|
||||
{
|
||||
if (!inc_heading)
|
||||
inc_heading = heading_name||'';
|
||||
included[incfile+(section ? '#'+section : '')] = '#'+inc_heading.toLowerCase().replace(/\P{L}+/ug, '-').replace(/^-|-$/g, '');
|
||||
inc[0] = inc[0].replace(/^\s+/, '');
|
||||
inc[inc.length-1] = inc[inc.length-1].replace(/\s+$/, '');
|
||||
}
|
||||
text.splice(i, 1, ...inc);
|
||||
i = i + inc.length - 1;
|
||||
}
|
||||
else if ((m = /^\n?(#+)\s*([\s\S]+)$/.exec(text[i])))
|
||||
{
|
||||
// Heading
|
||||
heading = m[1].length;
|
||||
heading_name = m[2].trim();
|
||||
}
|
||||
}
|
||||
for (let i = 0; i < text.length; i++)
|
||||
{
|
||||
if ((m = /^(\[[^\]]+\]\()([^\)]+)(\))$/.exec(text[i])) && !/^https?:(\/\/)|^#/.exec(m[2]))
|
||||
{
|
||||
const p = m[2].indexOf('#');
|
||||
if (included[m[2]])
|
||||
{
|
||||
text[i] = m[1]+included[m[2]]+m[3];
|
||||
}
|
||||
else if (p >= 0 && included[m[2].substr(0, p)])
|
||||
{
|
||||
text[i] = m[1]+m[2].substr(p)+m[3];
|
||||
}
|
||||
}
|
||||
}
|
||||
console.log(text.join(''));
|
||||
}
|
||||
|
||||
function rel2abs(ref, rel)
|
||||
{
|
||||
rel = [ ...ref.replace(/^(.*)\/[^\/]+$/, '$1').split(/\/+/), ...rel.split(/\/+/) ];
|
||||
return killdots(rel).join('/');
|
||||
}
|
||||
|
||||
function abs2rel(ref, abs)
|
||||
{
|
||||
ref = ref.split(/\/+/);
|
||||
abs = abs.split(/\/+/);
|
||||
while (ref.length > 1 && ref[0] == abs[0])
|
||||
{
|
||||
ref.shift();
|
||||
abs.shift();
|
||||
}
|
||||
for (let i = 1; i < ref.length; i++)
|
||||
{
|
||||
abs.unshift('..');
|
||||
}
|
||||
return killdots(abs).join('/');
|
||||
}
|
||||
|
||||
function killdots(rel)
|
||||
{
|
||||
for (let i = 0; i < rel.length; i++)
|
||||
{
|
||||
if (rel[i] == '.')
|
||||
{
|
||||
rel.splice(i, 1);
|
||||
i--;
|
||||
}
|
||||
else if (i >= 1 && rel[i] == '..' && rel[i-1] != '..')
|
||||
{
|
||||
rel.splice(i-1, 2);
|
||||
i -= 2;
|
||||
}
|
||||
}
|
||||
return rel;
|
||||
}
|
65
docs/config/src/included.en.md
Normal file
65
docs/config/src/included.en.md
Normal file
@@ -0,0 +1,65 @@
|
||||
# Vitastor
|
||||
|
||||
{{../../../README.md#The Idea}}
|
||||
|
||||
{{../../../README.md#Talks and presentations}}
|
||||
|
||||
{{../../intro/features.en.md}}
|
||||
|
||||
{{../../intro/quickstart.en.md}}
|
||||
|
||||
{{../../intro/architecture.en.md}}
|
||||
|
||||
## Installation
|
||||
|
||||
{{../../installation/packages.en.md}}
|
||||
|
||||
{{../../installation/proxmox.en.md}}
|
||||
|
||||
{{../../installation/openstack.en.md}}
|
||||
|
||||
{{../../installation/kubernetes.en.md}}
|
||||
|
||||
{{../../installation/source.en.md}}
|
||||
|
||||
{{../../config.en.md|indent=1}}
|
||||
|
||||
{{../../config/common.en.md|indent=2}}
|
||||
|
||||
{{../../config/network.en.md|indent=2}}
|
||||
|
||||
{{../../config/layout-cluster.en.md|indent=2}}
|
||||
|
||||
{{../../config/layout-osd.en.md|indent=2}}
|
||||
|
||||
{{../../config/osd.en.md|indent=2}}
|
||||
|
||||
{{../../config/monitor.en.md|indent=2}}
|
||||
|
||||
{{../../config/pool.en.md|indent=2}}
|
||||
|
||||
{{../../config/inode.en.md|indent=2}}
|
||||
|
||||
## Usage
|
||||
|
||||
{{../../usage/cli.en.md}}
|
||||
|
||||
{{../../usage/disk.en.md}}
|
||||
|
||||
{{../../usage/fio.en.md}}
|
||||
|
||||
{{../../usage/nbd.en.md}}
|
||||
|
||||
{{../../usage/qemu.en.md}}
|
||||
|
||||
{{../../usage/nfs.en.md}}
|
||||
|
||||
## Performance
|
||||
|
||||
{{../../performance/understanding.en.md}}
|
||||
|
||||
{{../../performance/theoretical.en.md}}
|
||||
|
||||
{{../../performance/comparison1.en.md}}
|
||||
|
||||
{{../../intro/author.en.md|indent=1}}
|
65
docs/config/src/included.ru.md
Normal file
65
docs/config/src/included.ru.md
Normal file
@@ -0,0 +1,65 @@
|
||||
# Vitastor
|
||||
|
||||
{{../../../README-ru.md#Идея|indent=0}}
|
||||
|
||||
{{../../../README-ru.md#Презентации и записи докладов|indent=0}}
|
||||
|
||||
{{../../intro/features.ru.md}}
|
||||
|
||||
{{../../intro/quickstart.ru.md}}
|
||||
|
||||
{{../../intro/architecture.ru.md}}
|
||||
|
||||
## Установка
|
||||
|
||||
{{../../installation/packages.ru.md}}
|
||||
|
||||
{{../../installation/proxmox.ru.md}}
|
||||
|
||||
{{../../installation/openstack.ru.md}}
|
||||
|
||||
{{../../installation/kubernetes.ru.md}}
|
||||
|
||||
{{../../installation/source.ru.md}}
|
||||
|
||||
{{../../config.ru.md|indent=1}}
|
||||
|
||||
{{../../config/common.ru.md|indent=2}}
|
||||
|
||||
{{../../config/network.ru.md|indent=2}}
|
||||
|
||||
{{../../config/layout-cluster.ru.md|indent=2}}
|
||||
|
||||
{{../../config/layout-osd.ru.md|indent=2}}
|
||||
|
||||
{{../../config/osd.ru.md|indent=2}}
|
||||
|
||||
{{../../config/monitor.ru.md|indent=2}}
|
||||
|
||||
{{../../config/pool.ru.md|indent=2}}
|
||||
|
||||
{{../../config/inode.ru.md|indent=2}}
|
||||
|
||||
## Использование
|
||||
|
||||
{{../../usage/cli.ru.md}}
|
||||
|
||||
{{../../usage/disk.ru.md}}
|
||||
|
||||
{{../../usage/fio.ru.md}}
|
||||
|
||||
{{../../usage/nbd.ru.md}}
|
||||
|
||||
{{../../usage/qemu.ru.md}}
|
||||
|
||||
{{../../usage/nfs.ru.md}}
|
||||
|
||||
## Производительность
|
||||
|
||||
{{../../performance/understanding.ru.md}}
|
||||
|
||||
{{../../performance/theoretical.ru.md}}
|
||||
|
||||
{{../../performance/comparison1.ru.md}}
|
||||
|
||||
{{../../intro/author.ru.md|indent=1}}
|
@@ -7,26 +7,27 @@
|
||||
in Vitastor, affects memory usage, write amplification and I/O load
|
||||
distribution effectiveness.
|
||||
|
||||
Recommended default block size is 128 KB for SSD and 4 MB for HDD. In fact,
|
||||
it's possible to use 4 MB for SSD too - it will lower memory usage, but
|
||||
Recommended default block size is 128 KB for SSD and 1 MB for HDD. In fact,
|
||||
it's possible to use 1 MB for SSD too - it will lower memory usage, but
|
||||
may increase average WA and reduce linear performance.
|
||||
|
||||
OSD memory usage is roughly (SIZE / BLOCK * 68 bytes) which is roughly
|
||||
544 MB per 1 TB of used disk space with the default 128 KB block size.
|
||||
With 1 MB it's 8 times lower.
|
||||
info_ru: |
|
||||
Размер объектов (блоков данных), на которые делятся физические и виртуальные
|
||||
диски в Vitastor (в рамках каждого пула). Одна из ключевых на данный момент
|
||||
настроек, влияет на потребление памяти, объём избыточной записи (write
|
||||
amplification) и эффективность распределения нагрузки по OSD.
|
||||
|
||||
Рекомендуемые по умолчанию размеры блока - 128 килобайт для SSD и 4
|
||||
мегабайта для HDD. В принципе, для SSD можно тоже использовать 4 мегабайта,
|
||||
Рекомендуемые по умолчанию размеры блока - 128 килобайт для SSD и 1 мегабайт
|
||||
для HDD. В принципе, для SSD можно тоже использовать блок размером 1 мегабайт,
|
||||
это понизит использование памяти, но ухудшит распределение нагрузки и в
|
||||
среднем увеличит WA.
|
||||
|
||||
Потребление памяти OSD составляет примерно (РАЗМЕР / БЛОК * 68 байт),
|
||||
т.е. примерно 544 МБ памяти на 1 ТБ занятого места на диске при
|
||||
стандартном 128 КБ блоке.
|
||||
стандартном 128 КБ блоке. При 1 МБ блоке памяти нужно в 8 раз меньше.
|
||||
- name: bitmap_granularity
|
||||
type: int
|
||||
default: 4096
|
||||
|
@@ -204,3 +204,77 @@
|
||||
|
||||
Клиентам не обязательно знать про disk_alignment, так что помещать значение
|
||||
этого параметра в etcd в /vitastor/config/global не нужно.
|
||||
- name: data_csum_type
|
||||
type: string
|
||||
default: none
|
||||
info: |
|
||||
Data checksum type to use. May be "crc32c" or "none". Set to "crc32c" to
|
||||
enable data checksums.
|
||||
info_ru: |
|
||||
Тип используемых OSD контрольных сумм данных. Может быть "crc32c" или "none".
|
||||
Установите в "crc32c", чтобы включить расчёт и проверку контрольных сумм данных.
|
||||
|
||||
Следует понимать, что контрольные суммы в зависимости от размера блока их
|
||||
расчёта либо увеличивают потребление памяти, либо снижают производительность.
|
||||
Подробнее смотрите в описании параметра [csum_block_size](#csum_block_size).
|
||||
- name: csum_block_size
|
||||
type: int
|
||||
default: 4096
|
||||
info: |
|
||||
Checksum calculation block size.
|
||||
|
||||
Must be equal or a multiple of [bitmap_granularity](layout-cluster.en.md#bitmap_granularity)
|
||||
(which is usually 4 KB).
|
||||
|
||||
Checksums increase metadata size by 4 bytes per each csum_block_size of data.
|
||||
|
||||
Checksums are always a compromise:
|
||||
1. You either sacrifice +1 GB RAM per 1 TB of data
|
||||
2. Or you raise csum_block_size, for example, to 32k and sacrifice
|
||||
50% random write iops due to checksum read-modify-write
|
||||
3. Or you turn off [inmemory_metadata](osd.en.md#inmemory_metadata) and
|
||||
sacrifice 50% random read iops due to checksum reads
|
||||
|
||||
Option 1 (default) is recommended for all-flash setups because these usually
|
||||
have enough RAM.
|
||||
|
||||
Option 2 is recommended for HDD-only setups. HDD-only setups usually do NOT
|
||||
have enough RAM for the default 4 KB csum_block_size.
|
||||
|
||||
Option 3 is recommended for SSD+HDD setups (because metadata SSDs will handle
|
||||
extra reads without any performance drop) and also *maybe* for NVMe all-flash
|
||||
setups when you don't have enough RAM (because NVMe drives have plenty
|
||||
of read iops to spare). You may also consider enabling
|
||||
[cached_read_meta](osd.en.md#cached_read_meta) in this case.
|
||||
info_ru: |
|
||||
Размер блока расчёта контрольных сумм.
|
||||
|
||||
Должен быть равен или кратен [bitmap_granularity](layout-cluster.ru.md#bitmap_granularity)
|
||||
(который обычно равен 4 КБ).
|
||||
|
||||
Контрольные суммы увеличивают размер метаданных на 4 байта на каждые
|
||||
csum_block_size данных.
|
||||
|
||||
Контрольные суммы - это всегда компромисс:
|
||||
1. Вы либо жертвуете потреблением +1 ГБ памяти на 1 ТБ дискового пространства
|
||||
2. Либо вы повышаете csum_block_size до, скажем, 32k и жертвуете 50%
|
||||
скорости случайной записи из-за цикла чтения-изменения-записи для расчёта
|
||||
новых контрольных сумм
|
||||
3. Либо вы отключаете [inmemory_metadata](osd.ru.md#inmemory_metadata) и
|
||||
жертвуете 50% скорости случайного чтения из-за чтения контрольных сумм
|
||||
с диска
|
||||
|
||||
Вариант 1 (при настройках по умолчанию) рекомендуется для SSD (All-Flash)
|
||||
кластеров, потому что памяти в них обычно хватает.
|
||||
|
||||
Вариант 2 рекомендуется для кластеров на одних жёстких дисках (без SSD
|
||||
под метаданные). На 4 кб блок контрольной суммы памяти в таких кластерах
|
||||
обычно НЕ хватает.
|
||||
|
||||
Вариант 3 рекомендуется для гибридных кластеров (SSD+HDD), потому что
|
||||
скорости SSD под метаданными хватит, чтобы обработать дополнительные чтения
|
||||
без снижения производительности. Также вариант 3 *может* рекомендоваться
|
||||
для All-Flash кластеров на основе NVMe-дисков, когда памяти НЕ достаточно,
|
||||
потому что NVMe-диски имеют огромный запас производительности по чтению.
|
||||
В таких случаях, возможно, также имеет смысл включать параметр
|
||||
[cached_read_meta](osd.ru.md#cached_read_meta).
|
||||
|
@@ -260,6 +260,70 @@
|
||||
достаточно 16- или 32-мегабайтного журнала. Однако в теории отключение
|
||||
параметра может оказаться полезным для гибридных OSD (HDD+SSD) с большими
|
||||
журналами, расположенными на быстром по сравнению с HDD устройстве.
|
||||
- name: cached_read_data
|
||||
type: bool
|
||||
default: false
|
||||
info: |
|
||||
Read data through Linux page cache, i.e. use a file descriptor opened without
|
||||
O_DIRECT for data reads. May improve read performance for frequently accessed
|
||||
data if it fits in RAM. Memory in page cache is shared by all processes and
|
||||
not accounted in OSD memory consumption.
|
||||
info_ru: |
|
||||
Читать данные через системный кэш Linux (page cache), то есть, использовать
|
||||
для чтения данных файловый дескриптор, открытый без флага O_DIRECT. Может
|
||||
улучшить производительность чтения для часто используемых данных, если они
|
||||
помещаются в память. Память кэша разделяется между всеми процессами в
|
||||
системе и не учитывается в потреблении памяти процессом OSD.
|
||||
- name: cached_read_meta
|
||||
type: bool
|
||||
default: false
|
||||
info: |
|
||||
Read metadata through Linux page cache. May be beneficial when checksums
|
||||
are enabled and [inmemory_metadata](#inmemory_metadata) is disabled, because
|
||||
in this case metadata blocks are read from disk to verify checksums on every
|
||||
read request and caching them may reduce this extra read load.
|
||||
|
||||
Absolutely pointless to enable with enabled inmemory_metadata because all
|
||||
metadata is kept in memory anyway, and likely pointless without checksums,
|
||||
because in that case, metadata blocks are read from disk only during journal
|
||||
flushing.
|
||||
|
||||
If the same device is used for data and metadata, enabling [cached_read_data](#cached_read_data)
|
||||
also enables this parameter, given that it isn't turned off explicitly.
|
||||
info_ru: |
|
||||
Читать метаданные через системный кэш Linux. Может быть полезно, когда
|
||||
включены контрольные суммы, а параметр [inmemory_metadata](#inmemory_metadata)
|
||||
отключён, так как в этом случае блоки метаданных читаются с диска при каждом
|
||||
запросе чтения для проверки контрольных сумм и их кэширование может снизить
|
||||
дополнительную нагрузку на диск.
|
||||
|
||||
Абсолютно бессмысленно включать данный параметр, если параметр
|
||||
inmemory_metadata включён (по умолчанию это так), и также вероятно
|
||||
бессмысленно включать его, если не включены контрольные суммы, так как в
|
||||
этом случае блоки метаданных читаются с диска только во время сброса
|
||||
журнала.
|
||||
|
||||
Если одно и то же устройство используется для данных и метаданных, включение
|
||||
[cached_read_data](#cached_read_data) также включает данный параметр, при
|
||||
условии, что он не отключён явным образом.
|
||||
- name: cached_read_journal
|
||||
type: bool
|
||||
default: false
|
||||
info: |
|
||||
Read buffered data from journal through Linux page cache. Does not have sense
|
||||
without disabling [inmemory_journal](#inmemory_journal), which, again, is
|
||||
enabled by default.
|
||||
|
||||
If the same device is used for metadata and journal, enabling [cached_read_meta](#cached_read_meta)
|
||||
also enables this parameter, given that it isn't turned off explicitly.
|
||||
info_ru: |
|
||||
Читать буферизованные в журнале данные через системный кэш Linux. Не имеет
|
||||
смысла без отключения параметра [inmemory_journal](#inmemory_journal),
|
||||
который, опять же, по умолчанию включён.
|
||||
|
||||
Если одно и то же устройство используется для метаданных и журнала,
|
||||
включение [cached_read_meta](#cached_read_meta) также включает данный
|
||||
параметр, при условии, что он не отключён явным образом.
|
||||
- name: journal_sector_buffer_count
|
||||
type: int
|
||||
default: 32
|
||||
|
@@ -8,13 +8,13 @@
|
||||
|
||||
У Vitastor есть CSI-плагин для Kubernetes, поддерживающий RWO, а также блочные RWX, тома.
|
||||
|
||||
Для установки возьмите манифесты из директории [csi/deploy/](../csi/deploy/), поместите
|
||||
вашу конфигурацию подключения к Vitastor в [csi/deploy/001-csi-config-map.yaml](../csi/deploy/001-csi-config-map.yaml),
|
||||
настройте StorageClass в [csi/deploy/009-storage-class.yaml](../csi/deploy/009-storage-class.yaml)
|
||||
Для установки возьмите манифесты из директории [csi/deploy/](../../csi/deploy/), поместите
|
||||
вашу конфигурацию подключения к Vitastor в [csi/deploy/001-csi-config-map.yaml](../../csi/deploy/001-csi-config-map.yaml),
|
||||
настройте StorageClass в [csi/deploy/009-storage-class.yaml](../../csi/deploy/009-storage-class.yaml)
|
||||
и примените все `NNN-*.yaml` к вашей инсталляции Kubernetes.
|
||||
|
||||
```
|
||||
for i in ./???-*.yaml; do kubectl apply -f $i; done
|
||||
```
|
||||
|
||||
После этого вы сможете создавать PersistentVolume. Пример смотрите в файле [csi/deploy/example-pvc.yaml](../csi/deploy/example-pvc.yaml).
|
||||
После этого вы сможете создавать PersistentVolume. Пример смотрите в файле [csi/deploy/example-pvc.yaml](../../csi/deploy/example-pvc.yaml).
|
||||
|
@@ -36,5 +36,5 @@ vitastor_pool_id = 1
|
||||
image_upload_use_cinder_backend = True
|
||||
```
|
||||
|
||||
To put Glance images in Vitastor, use [https://docs.openstack.org/cinder/pike/admin/blockstorage-volume-backed-image.html](volume-backed images),
|
||||
To put Glance images in Vitastor, use [volume-backed images](https://docs.openstack.org/cinder/pike/admin/blockstorage-volume-backed-image.html),
|
||||
although the support has not been verified yet.
|
||||
|
@@ -36,5 +36,5 @@ image_upload_use_cinder_backend = True
|
||||
```
|
||||
|
||||
Чтобы помещать в Vitastor Glance-образы, нужно использовать
|
||||
[https://docs.openstack.org/cinder/pike/admin/blockstorage-volume-backed-image.html](образы на основе томов Cinder),
|
||||
[образы на основе томов Cinder](https://docs.openstack.org/cinder/pike/admin/blockstorage-volume-backed-image.html),
|
||||
однако, поддержка этой функции ещё не проверялась.
|
||||
|
@@ -11,7 +11,8 @@
|
||||
- Trust Vitastor package signing key:
|
||||
`wget https://vitastor.io/debian/pubkey.gpg -O /etc/apt/trusted.gpg.d/vitastor.gpg`
|
||||
- Add Vitastor package repository to your /etc/apt/sources.list:
|
||||
- Debian 11 (Bullseye/Sid): `deb https://vitastor.io/debian bullseye main`
|
||||
- Debian 12 (Bookworm/Sid): `deb https://vitastor.io/debian bookworm main`
|
||||
- Debian 11 (Bullseye): `deb https://vitastor.io/debian bullseye main`
|
||||
- Debian 10 (Buster): `deb https://vitastor.io/debian buster main`
|
||||
- For Debian 10 (Buster) also enable backports repository:
|
||||
`deb http://deb.debian.org/debian buster-backports main`
|
||||
|
@@ -11,7 +11,8 @@
|
||||
- Добавьте ключ репозитория Vitastor:
|
||||
`wget https://vitastor.io/debian/pubkey.gpg -O /etc/apt/trusted.gpg.d/vitastor.gpg`
|
||||
- Добавьте репозиторий Vitastor в /etc/apt/sources.list:
|
||||
- Debian 11 (Bullseye/Sid): `deb https://vitastor.io/debian bullseye main`
|
||||
- Debian 12 (Bookworm/Sid): `deb https://vitastor.io/debian bookworm main`
|
||||
- Debian 11 (Bullseye): `deb https://vitastor.io/debian bullseye main`
|
||||
- Debian 10 (Buster): `deb https://vitastor.io/debian buster main`
|
||||
- Для Debian 10 (Buster) также включите репозиторий backports:
|
||||
`deb http://deb.debian.org/debian buster-backports main`
|
||||
|
@@ -6,10 +6,10 @@
|
||||
|
||||
# Proxmox VE
|
||||
|
||||
To enable Vitastor support in Proxmox Virtual Environment (6.4-7.4 are supported):
|
||||
To enable Vitastor support in Proxmox Virtual Environment (6.4-8.0 are supported):
|
||||
|
||||
- Add the corresponding Vitastor Debian repository into sources.list on Proxmox hosts:
|
||||
buster for 6.4, bullseye for 7.4, pve7.1 for 7.1, pve7.2 for 7.2, pve7.3 for 7.3
|
||||
bookworm for 8.0, bullseye for 7.4, pve7.3 for 7.3, pve7.2 for 7.2, pve7.1 for 7.1, buster for 6.4
|
||||
- Install vitastor-client, pve-qemu-kvm, pve-storage-vitastor (* or see note) packages from Vitastor repository
|
||||
- Define storage in `/etc/pve/storage.cfg` (see below)
|
||||
- Block network access from VMs to Vitastor network (to OSDs and etcd),
|
||||
@@ -35,5 +35,5 @@ vitastor: vitastor
|
||||
vitastor_nbd 0
|
||||
```
|
||||
|
||||
\* Note: you can also manually copy [patches/VitastorPlugin.pm](patches/VitastorPlugin.pm) to Proxmox hosts
|
||||
\* Note: you can also manually copy [patches/VitastorPlugin.pm](../../patches/VitastorPlugin.pm) to Proxmox hosts
|
||||
as `/usr/share/perl5/PVE/Storage/Custom/VitastorPlugin.pm` instead of installing pve-storage-vitastor.
|
||||
|
@@ -1,15 +1,15 @@
|
||||
[Документация](../../README-ru.md#документация) → Установка → Proxmox
|
||||
[Документация](../../README-ru.md#документация) → Установка → Proxmox VE
|
||||
|
||||
-----
|
||||
|
||||
[Read in English](proxmox.en.md)
|
||||
|
||||
# Proxmox
|
||||
# Proxmox VE
|
||||
|
||||
Чтобы подключить Vitastor к Proxmox Virtual Environment (поддерживаются версии 6.4-7.4):
|
||||
Чтобы подключить Vitastor к Proxmox Virtual Environment (поддерживаются версии 6.4-8.0):
|
||||
|
||||
- Добавьте соответствующий Debian-репозиторий Vitastor в sources.list на хостах Proxmox:
|
||||
buster для 6.4, bullseye для 7.4, pve7.1 для 7.1, pve7.2 для 7.2, pve7.3 для 7.3
|
||||
bookworm для 8.0, bullseye для 7.4, pve7.3 для 7.3, pve7.2 для 7.2, pve7.1 для 7.1, buster для 6.4
|
||||
- Установите пакеты vitastor-client, pve-qemu-kvm, pve-storage-vitastor (* или см. сноску) из репозитория Vitastor
|
||||
- Определите тип хранилища в `/etc/pve/storage.cfg` (см. ниже)
|
||||
- Обязательно заблокируйте доступ от виртуальных машин к сети Vitastor (OSD и etcd), т.к. Vitastor (пока) не поддерживает аутентификацию
|
||||
@@ -35,5 +35,5 @@ vitastor: vitastor
|
||||
```
|
||||
|
||||
\* Примечание: вместо установки пакета pve-storage-vitastor вы можете вручную скопировать файл
|
||||
[patches/VitastorPlugin.pm](patches/VitastorPlugin.pm) на хосты Proxmox как
|
||||
[patches/VitastorPlugin.pm](../../patches/VitastorPlugin.pm) на хосты Proxmox как
|
||||
`/usr/share/perl5/PVE/Storage/Custom/VitastorPlugin.pm`.
|
||||
|
@@ -44,7 +44,7 @@
|
||||
depends linearly on drive capacity and data store block size which is 128 KB by default.
|
||||
With 128 KB blocks metadata takes around 512 MB per 1 TB (which is still less than Ceph wants).
|
||||
Journal is also kept in memory by default, but in SSD-only clusters it's only 32 MB, and in SSD+HDD
|
||||
clusters, where it's beneficial to increase it, [inmemory_journal](docs/config/osd.en.md#inmemory_journal) can be disabled.
|
||||
clusters, where it's beneficial to increase it, [inmemory_journal](../config/osd.en.md#inmemory_journal) can be disabled.
|
||||
- Vitastor storage layer doesn't have internal copy-on-write or redirect-write. I know that maybe
|
||||
it's possible to create a good copy-on-write storage, but it's much harder and makes performance
|
||||
less deterministic, so CoW isn't used in Vitastor.
|
||||
|
@@ -156,7 +156,7 @@
|
||||
блока хранилища (block_size, по умолчанию 128 КБ). С 128 КБ блоком потребление памяти
|
||||
составляет примерно 512 МБ на 1 ТБ данных. Журналы по умолчанию тоже хранятся в памяти,
|
||||
но в SSD-кластерах нужный размер журнала составляет всего 32 МБ, а в гибридных (SSD+HDD)
|
||||
кластерах, в которых есть смысл делать журналы больше, можно отключить [inmemory_journal](../docs/config/osd.ru.md#inmemory_journal).
|
||||
кластерах, в которых есть смысл делать журналы больше, можно отключить [inmemory_journal](../config/osd.ru.md#inmemory_journal).
|
||||
- В Vitastor нет внутреннего copy-on-write. Я считаю, что реализация CoW-хранилища гораздо сложнее,
|
||||
поэтому сложнее добиться устойчиво хороших результатов. Возможно, в один прекрасный день
|
||||
я придумаю красивый алгоритм для CoW-хранилища, но пока нет — внутреннего CoW в Vitastor не будет.
|
||||
|
@@ -30,12 +30,13 @@
|
||||
- [Write throttling to smooth random write workloads in SSD+HDD configurations](../config/osd.en.md#throttle_small_writes)
|
||||
- [RDMA/RoCEv2 support via libibverbs](../config/network.en.md#rdma_device)
|
||||
- [Scrubbing without checksums](../config/osd.en.md#auto_scrub) (verification of copies)
|
||||
- [Checksums](../config/layout-osd.en.md#data_csum_type)
|
||||
|
||||
## Plugins and tools
|
||||
|
||||
- [Debian and CentOS packages](../installation/packages.en.md)
|
||||
- [Image management CLI (vitastor-cli)](../usage/cli.en.md)
|
||||
- [Disk management CLI (vitastor-disk)](docs/usage/disk.en.md)
|
||||
- [Disk management CLI (vitastor-disk)](../usage/disk.en.md)
|
||||
- Generic user-space client library
|
||||
- [Native QEMU driver](../usage/qemu.en.md)
|
||||
- [Loadable fio engine for benchmarks](../usage/fio.en.md)
|
||||
@@ -55,7 +56,6 @@ The following features are planned for the future:
|
||||
- iSCSI proxy
|
||||
- Multi-threaded client
|
||||
- Faster failover
|
||||
- Checksums
|
||||
- Tiered storage (SSD caching)
|
||||
- NVDIMM support
|
||||
- Compression (possibly)
|
||||
|
@@ -13,7 +13,7 @@
|
||||
## Серверные функции
|
||||
|
||||
- Базовая часть - надёжное кластерное блочное хранилище без единой точки отказа
|
||||
- [Производительность](../comparison1.ru.md) ;-D
|
||||
- [Производительность](../performance/comparison1.ru.md) ;-D
|
||||
- [Несколько схем отказоустойчивости](../config/pool.ru.md#scheme): репликация, XOR n+1 (1 диск чётности), коды коррекции ошибок
|
||||
Рида-Соломона на основе библиотек jerasure и ISA-L с любым числом дисков данных и чётности в группе
|
||||
- Конфигурация через простые человекочитаемые JSON-структуры в etcd
|
||||
@@ -32,12 +32,13 @@
|
||||
- [Сглаживание производительности случайной записи в SSD+HDD конфигурациях](../config/osd.ru.md#throttle_small_writes)
|
||||
- [Поддержка RDMA/RoCEv2 через libibverbs](../config/network.ru.md#rdma_device)
|
||||
- [Фоновая проверка целостности без контрольных сумм](../config/osd.ru.md#auto_scrub) (сверка копий)
|
||||
- [Контрольные суммы](../config/layout-osd.ru.md#data_csum_type)
|
||||
|
||||
## Драйверы и инструменты
|
||||
|
||||
- [Пакеты для Debian и CentOS](../installation/packages.ru.md)
|
||||
- [Консольный интерфейс управления образами (vitastor-cli)](../usage/cli.ru.md)
|
||||
- [Инструмент управления дисками (vitastor-disk)](docs/usage/disk.ru.md)
|
||||
- [Инструмент управления дисками (vitastor-disk)](../usage/disk.ru.md)
|
||||
- Общая пользовательская клиентская библиотека для работы с кластером
|
||||
- [Драйвер диска для QEMU](../usage/qemu.ru.md)
|
||||
- [Драйвер диска для утилиты тестирования производительности fio](../usage/fio.ru.md)
|
||||
@@ -55,7 +56,6 @@
|
||||
- iSCSI-прокси
|
||||
- Многопоточный клиент
|
||||
- Более быстрое переключение при отказах
|
||||
- Контрольные суммы
|
||||
- Поддержка SSD-кэширования (tiered storage)
|
||||
- Поддержка NVDIMM
|
||||
- Возможно, сжатие
|
||||
|
@@ -7,6 +7,7 @@
|
||||
# Quick Start
|
||||
|
||||
- [Preparation](#preparation)
|
||||
- [Recommended drives](#recommended-drives)
|
||||
- [Configure monitors](#configure-monitors)
|
||||
- [Configure OSDs](#configure-osds)
|
||||
- [Create a pool](#create-a-pool)
|
||||
@@ -19,10 +20,20 @@
|
||||
- Get some SATA or NVMe SSDs with capacitors (server-grade drives). You can use desktop SSDs
|
||||
with lazy fsync, but prepare for inferior single-thread latency. Read more about capacitors
|
||||
[here](../config/layout-cluster.en.md#immediate_commit).
|
||||
- If you want to use HDDs, get modern HDDs with Media Cache or SSD Cache: HGST Ultrastar,
|
||||
Toshiba MG08, Seagate EXOS or something similar. If your drives don't have such cache then
|
||||
you also need small SSDs for journal and metadata (even 2 GB per 1 TB of HDD space is enough).
|
||||
- Get a fast network (at least 10 Gbit/s). Something like Mellanox ConnectX-4 with RoCEv2 is ideal.
|
||||
- Disable CPU powersaving: `cpupower idle-set -D 0 && cpupower frequency-set -g performance`.
|
||||
- [Install Vitastor packages](../installation/packages.en.md).
|
||||
|
||||
## Recommended drives
|
||||
|
||||
- SATA SSD: Micron 5100/5200/5300/5400, Samsung PM863/PM883/PM893, Intel D3-S4510/4520/4610/4620, Kingston DC500M
|
||||
- NVMe: Micron 9100/9200/9300/9400, Micron 7300/7450, Samsung PM983/PM9A3, Samsung PM1723/1735/1743,
|
||||
Intel DC-P3700/P4500/P4600, Intel D7-P5500/P5600, Intel Optane, Kingston DC1000B/DC1500M
|
||||
- HDD: HGST Ultrastar, Toshiba MG06/MG07/MG08, Seagate EXOS
|
||||
|
||||
## Configure monitors
|
||||
|
||||
On the monitor hosts:
|
||||
@@ -45,9 +56,10 @@ On the monitor hosts:
|
||||
}
|
||||
```
|
||||
- Initialize OSDs:
|
||||
- SSD-only: `vitastor-disk prepare /dev/sdXXX [/dev/sdYYY ...]`. You can add
|
||||
`--disable_data_fsync off` to leave disk cache enabled if you use desktop
|
||||
SSDs without capacitors.
|
||||
- SSD-only or HDD-only: `vitastor-disk prepare /dev/sdXXX [/dev/sdYYY ...]`.
|
||||
Add `--disable_data_fsync off` to leave disk write cache enabled if you use
|
||||
desktop SSDs without capacitors. Do NOT add `--disable_data_fsync off` if you
|
||||
use HDDs or SSD+HDD.
|
||||
- Hybrid, SSD+HDD: `vitastor-disk prepare --hybrid /dev/sdXXX [/dev/sdYYY ...]`.
|
||||
Pass all your devices (HDD and SSD) to this script — it will partition disks and initialize journals on its own.
|
||||
This script skips HDDs which are already partitioned so if you want to use non-empty disks for
|
||||
|
@@ -7,6 +7,7 @@
|
||||
# Быстрый старт
|
||||
|
||||
- [Подготовка](#подготовка)
|
||||
- [Рекомендуемые диски](#рекомендуемые-диски)
|
||||
- [Настройте мониторы](#настройте-мониторы)
|
||||
- [Настройте OSD](#настройте-osd)
|
||||
- [Создайте пул](#создайте-пул)
|
||||
@@ -19,10 +20,20 @@
|
||||
- Возьмите серверы с SSD (SATA или NVMe), желательно с конденсаторами (серверные SSD). Можно
|
||||
использовать и десктопные SSD, включив режим отложенного fsync, но производительность будет хуже.
|
||||
О конденсаторах читайте [здесь](../config/layout-cluster.ru.md#immediate_commit).
|
||||
- Если хотите использовать HDD, берите современные модели с Media или SSD кэшем - HGST Ultrastar,
|
||||
Toshiba MG08, Seagate EXOS или что-то похожее. Если такого кэша у ваших дисков нет,
|
||||
обязательно возьмите SSD под метаданные и журнал (маленькие, буквально 2 ГБ на 1 ТБ HDD-места).
|
||||
- Возьмите быструю сеть, минимум 10 гбит/с. Идеал - что-то вроде Mellanox ConnectX-4 с RoCEv2.
|
||||
- Для лучшей производительности отключите энергосбережение CPU: `cpupower idle-set -D 0 && cpupower frequency-set -g performance`.
|
||||
- [Установите пакеты Vitastor](../installation/packages.ru.md).
|
||||
|
||||
## Рекомендуемые диски
|
||||
|
||||
- SATA SSD: Micron 5100/5200/5300/5400, Samsung PM863/PM883/PM893, Intel D3-S4510/4520/4610/4620, Kingston DC500M
|
||||
- NVMe: Micron 9100/9200/9300/9400, Micron 7300/7450, Samsung PM983/PM9A3, Samsung PM1723/1735/1743,
|
||||
Intel DC-P3700/P4500/P4600, Intel D7-P5500/P5600, Intel Optane, Kingston DC1000B/DC1500M
|
||||
- HDD: HGST Ultrastar, Toshiba MG06/MG07/MG08, Seagate EXOS
|
||||
|
||||
## Настройте мониторы
|
||||
|
||||
На хостах, выделенных под мониторы:
|
||||
@@ -45,9 +56,10 @@
|
||||
}
|
||||
```
|
||||
- Инициализуйте OSD:
|
||||
- SSD: `vitastor-disk prepare /dev/sdXXX [/dev/sdYYY ...]`. Если вы используете
|
||||
десктопные SSD без конденсаторов, можете оставить кэш включённым, добавив
|
||||
опцию `--disable_data_fsync off`.
|
||||
- Только SSD или только HDD: `vitastor-disk prepare /dev/sdXXX [/dev/sdYYY ...]`.
|
||||
Если вы используете десктопные SSD без конденсаторов, добавьте опцию `--disable_data_fsync off`,
|
||||
чтобы оставить кэш записи диска включённым. НЕ добавляйте эту опцию, если используете
|
||||
жёсткие диски (HDD).
|
||||
- Гибридные, SSD+HDD: `vitastor-disk prepare --hybrid /dev/sdXXX [/dev/sdYYY ...]`.
|
||||
Передайте все ваши SSD и HDD скрипту в командной строке подряд, скрипт автоматически выделит
|
||||
разделы под журналы на SSD и данные на HDD. Скрипт пропускает HDD, на которых уже есть разделы
|
||||
|
@@ -86,6 +86,8 @@ Options (both modes):
|
||||
--journal_size 1G/32M Set journal size (area or partition size)
|
||||
--block_size 1M/128k Set blockstore object size
|
||||
--bitmap_granularity 4k Set bitmap granularity
|
||||
--data_csum_type none Set data checksum type (crc32c or none)
|
||||
--csum_block_size 4k Set data checksum block size
|
||||
--data_device_block 4k Override data device block size
|
||||
--meta_device_block 4k Override metadata device block size
|
||||
--journal_device_block 4k Override journal device block size
|
||||
@@ -100,8 +102,9 @@ checks the device cache status on start and tries to disable cache for SATA/SAS
|
||||
If it doesn't succeed it issues a warning in the system log.
|
||||
|
||||
You can also pass other OSD options here as arguments and they'll be persisted
|
||||
to the superblock: max_write_iodepth, max_write_iodepth, min_flusher_count,
|
||||
max_flusher_count, inmemory_metadata, inmemory_journal, journal_sector_buffer_count,
|
||||
in the superblock: cached_read_data, cached_read_meta, cached_read_journal,
|
||||
inmemory_metadata, inmemory_journal, max_write_iodepth,
|
||||
min_flusher_count, max_flusher_count, journal_sector_buffer_count,
|
||||
journal_no_same_sector_overwrites, throttle_small_writes, throttle_target_iops,
|
||||
throttle_target_mbs, throttle_target_parallelism, throttle_threshold_us.
|
||||
See [Runtime OSD Parameters](../config/osd.en.md) for details.
|
||||
@@ -249,7 +252,9 @@ Options (see also [Cluster-Wide Disk Layout Parameters](../config/layout-cluster
|
||||
```
|
||||
--object_size 128k Set blockstore block size
|
||||
--bitmap_granularity 4k Set bitmap granularity
|
||||
--journal_size 32M Set journal size
|
||||
--journal_size 16M Set journal size
|
||||
--data_csum_type none Set data checksum type (crc32c or none)
|
||||
--csum_block_size 4k Set data checksum block size
|
||||
--device_block_size 4k Set device block size
|
||||
--journal_offset 0 Set journal offset
|
||||
--device_size 0 Set device size
|
||||
|
@@ -87,6 +87,8 @@ vitastor-disk - инструмент командной строки для уп
|
||||
--journal_size 1G/32M Задать размер журнала (области или раздела журнала)
|
||||
--block_size 1M/128k Задать размер объекта хранилища
|
||||
--bitmap_granularity 4k Задать гранулярность битовых карт
|
||||
--data_csum_type none Задать тип контрольных сумм (crc32c или none)
|
||||
--csum_block_size 4k Задать размер блока расчёта контрольных сумм
|
||||
--data_device_block 4k Задать размер блока устройства данных
|
||||
--meta_device_block 4k Задать размер блока метаданных
|
||||
--journal_device_block 4k Задать размер блока журнала
|
||||
@@ -101,8 +103,9 @@ vitastor-disk - инструмент командной строки для уп
|
||||
это не удаётся, в системный журнал выводится предупреждение.
|
||||
|
||||
Вы можете передать данной команде и некоторые другие опции OSD в качестве аргументов
|
||||
и они тоже будут сохранены в суперблок: max_write_iodepth, max_write_iodepth, min_flusher_count,
|
||||
max_flusher_count, inmemory_metadata, inmemory_journal, journal_sector_buffer_count,
|
||||
и они тоже будут сохранены в суперблок: cached_read_data, cached_read_meta,
|
||||
cached_read_journal, inmemory_metadata, inmemory_journal, max_write_iodepth,
|
||||
min_flusher_count, max_flusher_count, journal_sector_buffer_count,
|
||||
journal_no_same_sector_overwrites, throttle_small_writes, throttle_target_iops,
|
||||
throttle_target_mbs, throttle_target_parallelism, throttle_threshold_us.
|
||||
Читайте об этих параметрах подробнее в разделе [Изменяемые параметры OSD](../config/osd.ru.md).
|
||||
@@ -254,7 +257,9 @@ OSD отключены fsync-и.
|
||||
```
|
||||
--object_size 128k Размер блока хранилища
|
||||
--bitmap_granularity 4k Гранулярность битовых карт
|
||||
--journal_size 32M Размер журнала
|
||||
--journal_size 16M Размер журнала
|
||||
--data_csum_type none Задать тип контрольных сумм (crc32c или none)
|
||||
--csum_block_size 4k Задать размер блока расчёта контрольных сумм
|
||||
--device_block_size 4k Размер блока устройства
|
||||
--journal_offset 0 Смещение журнала
|
||||
--device_size 0 Размер устройства
|
||||
|
@@ -13,6 +13,8 @@ remains decent (see an example [here](../performance/comparison1.en.md#vitastor-
|
||||
|
||||
Vitastor Kubernetes CSI driver is based on NBD.
|
||||
|
||||
See also [VDUSE](qemu.en.md#vduse).
|
||||
|
||||
## Map image
|
||||
|
||||
To create a local block device for a Vitastor image run:
|
||||
|
@@ -16,6 +16,8 @@ NBD немного снижает производительность из-за
|
||||
|
||||
CSI-драйвер Kubernetes Vitastor основан на NBD.
|
||||
|
||||
Смотрите также [VDUSE](qemu.ru.md#vduse).
|
||||
|
||||
## Подключить устройство
|
||||
|
||||
Чтобы создать локальное блочное устройство для образа, выполните команду:
|
||||
|
@@ -29,7 +29,7 @@ vitastor-nfs [--etcd_address ADDR] [ДРУГИЕ ОПЦИИ]
|
||||
--bind <IP> принимать соединения по адресу <IP> (по умолчанию 0.0.0.0 - на всех)
|
||||
--nfspath <PATH> установить путь NFS-экспорта в <PATH> (по умолчанию /)
|
||||
--port <PORT> использовать порт <PORT> для NFS-сервисов (по умолчанию 2049)
|
||||
--pool <POOL> исползовать пул <POOL> для новых образов (обязательно, если пул в кластере не один)
|
||||
--pool <POOL> использовать пул <POOL> для новых образов (обязательно, если пул в кластере не один)
|
||||
--foreground 1 не уходить в фон после запуска
|
||||
```
|
||||
|
||||
|
@@ -83,3 +83,43 @@ qemu-img rebase -u -b '' testimg.qcow2
|
||||
This can be used for backups. Just note that exporting an image that is currently being written to
|
||||
is of course unsafe and doesn't produce a consistent result, so only export snapshots if you do this
|
||||
on a live VM.
|
||||
|
||||
## VDUSE
|
||||
|
||||
Linux kernel, starting with version 5.15, supports a new interface for attaching virtual disks
|
||||
to the host - VDUSE (vDPA Device in Userspace). QEMU, starting with 7.2, has support for
|
||||
exporting QEMU block devices over this protocol using qemu-storage-daemon.
|
||||
|
||||
VDUSE has the same problem as other FUSE-like interfaces in Linux: if a userspace process hangs,
|
||||
for example, if it loses connectivity with Vitastor cluster - active processes doing I/O may
|
||||
hang in the D state (uninterruptible sleep) and you won't be able to kill them even with kill -9.
|
||||
In this case reboot will be the only way to remove VDUSE devices from system.
|
||||
|
||||
On the other hand, VDUSE is faster than [NBD](nbd.en.md), so you may prefer to use it if
|
||||
performance is important for you. Approximate performance numbers:
|
||||
direct fio benchmark - 115000 iops, NBD - 60000 iops, VDUSE - 90000 iops.
|
||||
|
||||
To try VDUSE you need at least Linux 5.15, built with VDUSE support
|
||||
(CONFIG_VIRTIO_VDPA=m and CONFIG_VDPA_USER=m). Debian Linux kernels have these options
|
||||
disabled by now, so if you want to try it on Debian, use a kernel from Ubuntu
|
||||
[kernel-ppa/mainline](https://kernel.ubuntu.com/~kernel-ppa/mainline/) or Proxmox.
|
||||
|
||||
Commands to attach Vitastor image as a VDUSE device:
|
||||
|
||||
```
|
||||
modprobe vduse virtio-vdpa
|
||||
qemu-storage-daemon --daemonize --blockdev '{"node-name":"test1","driver":"vitastor",\
|
||||
"etcd-host":"192.168.7.2:2379/v3","image":"testosd1","cache":{"direct":true,"no-flush":false},"discard":"unmap"}' \
|
||||
--export vduse-blk,id=test1,node-name=test1,name=test1,num-queues=16,queue-size=128,writable=true
|
||||
vdpa dev add name test1 mgmtdev vduse
|
||||
```
|
||||
|
||||
After running these commands /dev/vda device will appear in the system and you'll be able to
|
||||
use it as a normal disk.
|
||||
|
||||
To remove the device:
|
||||
|
||||
```
|
||||
vdpa dev del test1
|
||||
kill <qemu-storage-daemon_process_PID>
|
||||
```
|
||||
|
@@ -87,3 +87,43 @@ qemu-img rebase -u -b '' testimg.qcow2
|
||||
Это можно использовать для резервного копирования. Только помните, что экспортировать образ, в который
|
||||
в то же время идёт запись, небезопасно - результат чтения не будет целостным. Так что если вы работаете
|
||||
с активными виртуальными машинами, экспортируйте только их снимки, но не сам образ.
|
||||
|
||||
## VDUSE
|
||||
|
||||
В Linux, начиная с версии ядра 5.15, доступен новый интерфейс для подключения виртуальных дисков
|
||||
к системе - VDUSE (vDPA Device in Userspace), а в QEMU, начиная с версии 7.2, есть поддержка
|
||||
экспорта блочных устройств QEMU по этому протоколу через qemu-storage-daemon.
|
||||
|
||||
VDUSE страдает общей проблемой FUSE-подобных интерфейсов в Linux: если пользовательский процесс
|
||||
подвиснет, например, если будет потеряна связь с кластером Vitastor - читающие/пишущие в кластер
|
||||
процессы могут "залипнуть" в состоянии D (непрерываемый сон) и их будет невозможно убить даже
|
||||
через kill -9. В этом случае удалить из системы устройство можно только перезагрузившись.
|
||||
|
||||
С другой стороны, VDUSE быстрее по сравнению с [NBD](nbd.ru.md), поэтому его может
|
||||
быть предпочтительно использовать там, где производительность важнее. Порядок показателей:
|
||||
прямое тестирование через fio - 115000 iops, NBD - 60000 iops, VDUSE - 90000 iops.
|
||||
|
||||
Чтобы использовать VDUSE, вам нужно ядро Linux версии хотя бы 5.15, собранное с поддержкой
|
||||
VDUSE (CONFIG_VIRTIO_VDPA=m и CONFIG_VDPA_USER=m). В ядрах в Debian Linux поддержка пока
|
||||
отключена - если хотите попробовать эту функцию на Debian, поставьте ядро из Ubuntu
|
||||
[kernel-ppa/mainline](https://kernel.ubuntu.com/~kernel-ppa/mainline/) или из Proxmox.
|
||||
|
||||
Команды для подключения виртуального диска через VDUSE:
|
||||
|
||||
```
|
||||
modprobe vduse virtio-vdpa
|
||||
qemu-storage-daemon --daemonize --blockdev '{"node-name":"test1","driver":"vitastor",\
|
||||
"etcd-host":"192.168.7.2:2379/v3","image":"testosd1","cache":{"direct":true,"no-flush":false},"discard":"unmap"}' \
|
||||
--export vduse-blk,id=test1,node-name=test1,name=test1,num-queues=16,queue-size=128,writable=true
|
||||
vdpa dev add name test1 mgmtdev vduse
|
||||
```
|
||||
|
||||
После этого в системе появится устройство /dev/vda, которое можно будет использовать как
|
||||
обычный диск.
|
||||
|
||||
Для удаления устройства из системы:
|
||||
|
||||
```
|
||||
vdpa dev del test1
|
||||
kill <PID_процесса_qemu-storage-daemon>
|
||||
```
|
||||
|
@@ -63,8 +63,9 @@ Wants=network-online.target local-fs.target time-sync.target
|
||||
|
||||
[Service]
|
||||
Restart=always
|
||||
ExecStart=/usr/local/bin/etcd -name etcd${num} --data-dir /var/lib/etcd${num}.etcd \\
|
||||
--advertise-client-urls http://${etcds[num]}:2379 --listen-client-urls http://${etcds[num]}:2379 \\
|
||||
Environment=GOGC=50
|
||||
ExecStart=etcd -name etcd${num} --data-dir /var/lib/etcd${num}.etcd \\
|
||||
--snapshot-count 10000 --advertise-client-urls http://${etcds[num]}:2379 --listen-client-urls http://${etcds[num]}:2379 \\
|
||||
--initial-advertise-peer-urls http://${etcds[num]}:2380 --listen-peer-urls http://${etcds[num]}:2380 \\
|
||||
--initial-cluster-token vitastor-etcd-1 --initial-cluster ${etcd_cluster} \\
|
||||
--initial-cluster-state new --max-txn-ops=100000 --max-request-bytes=104857600 \\
|
||||
|
147
mon/mon.js
147
mon/mon.js
@@ -391,6 +391,7 @@ class Mon
|
||||
this.etcd_start_timeout = (config.etcd_start_timeout || 5) * 1000;
|
||||
this.state = JSON.parse(JSON.stringify(this.constructor.etcd_tree));
|
||||
this.signals_set = false;
|
||||
this.stat_time = Date.now();
|
||||
this.ws = null;
|
||||
this.ws_alive = false;
|
||||
this.ws_keepalive_timer = null;
|
||||
@@ -1410,65 +1411,75 @@ class Mon
|
||||
}
|
||||
}
|
||||
|
||||
derive_osd_stats(st, prev)
|
||||
{
|
||||
const zero_stats = { op: { bps: 0n, iops: 0n, lat: 0n }, subop: { iops: 0n, lat: 0n }, recovery: { bps: 0n, iops: 0n } };
|
||||
const diff = { op_stats: {}, subop_stats: {}, recovery_stats: {} };
|
||||
if (!st || !st.time || prev && (prev.time || this.stat_time/1000) >= st.time)
|
||||
{
|
||||
return diff;
|
||||
}
|
||||
const timediff = BigInt(st.time*1000 - (prev && prev.time*1000 || this.stat_time));
|
||||
for (const op in st.op_stats||{})
|
||||
{
|
||||
const pr = prev && prev.op_stats && prev.op_stats[op];
|
||||
let c = st.op_stats[op];
|
||||
c = { bytes: BigInt(c.bytes||0), usec: BigInt(c.usec||0), count: BigInt(c.count||0) };
|
||||
const b = c.bytes - BigInt(pr && pr.bytes||0);
|
||||
const us = c.usec - BigInt(pr && pr.usec||0);
|
||||
const n = c.count - BigInt(pr && pr.count||0);
|
||||
if (n > 0)
|
||||
diff.op_stats[op] = { ...c, bps: b*1000n/timediff, iops: n*1000n/timediff, lat: us/n };
|
||||
}
|
||||
for (const op in st.subop_stats||{})
|
||||
{
|
||||
const pr = prev && prev.subop_stats && prev.subop_stats[op];
|
||||
let c = st.subop_stats[op];
|
||||
c = { usec: BigInt(c.usec||0), count: BigInt(c.count||0) };
|
||||
const us = c.usec - BigInt(pr && pr.usec||0);
|
||||
const n = c.count - BigInt(pr && pr.count||0);
|
||||
if (n > 0)
|
||||
diff.subop_stats[op] = { ...c, iops: n*1000n/timediff, lat: us/n };
|
||||
}
|
||||
for (const op in st.recovery_stats||{})
|
||||
{
|
||||
const pr = prev && prev.recovery_stats && prev.recovery_stats[op];
|
||||
let c = st.recovery_stats[op];
|
||||
c = { bytes: BigInt(c.bytes||0), count: BigInt(c.count||0) };
|
||||
const b = c.bytes - BigInt(pr && pr.bytes||0);
|
||||
const n = c.count - BigInt(pr && pr.count||0);
|
||||
if (n > 0)
|
||||
diff.recovery_stats[op] = { ...c, bps: b*1000n/timediff, iops: n*1000n/timediff };
|
||||
}
|
||||
return diff;
|
||||
}
|
||||
|
||||
sum_op_stats(timestamp, prev_stats)
|
||||
{
|
||||
const op_stats = {}, subop_stats = {}, recovery_stats = {};
|
||||
const sum_diff = { op_stats: {}, subop_stats: {}, recovery_stats: {} };
|
||||
if (!prev_stats || prev_stats.timestamp >= timestamp)
|
||||
{
|
||||
return sum_diff;
|
||||
}
|
||||
const tm = BigInt(timestamp - (prev_stats.timestamp || 0));
|
||||
// Sum derived values instead of deriving summed
|
||||
for (const osd in this.state.osd.stats)
|
||||
{
|
||||
const st = this.state.osd.stats[osd]||{};
|
||||
for (const op in st.op_stats||{})
|
||||
const derived = this.derive_osd_stats(this.state.osd.stats[osd],
|
||||
this.prev_stats && this.prev_stats.osd_stats && this.prev_stats.osd_stats[osd]);
|
||||
for (const type in derived)
|
||||
{
|
||||
op_stats[op] = op_stats[op] || { count: 0n, usec: 0n, bytes: 0n };
|
||||
op_stats[op].count += BigInt(st.op_stats[op].count||0);
|
||||
op_stats[op].usec += BigInt(st.op_stats[op].usec||0);
|
||||
op_stats[op].bytes += BigInt(st.op_stats[op].bytes||0);
|
||||
}
|
||||
for (const op in st.subop_stats||{})
|
||||
{
|
||||
subop_stats[op] = subop_stats[op] || { count: 0n, usec: 0n };
|
||||
subop_stats[op].count += BigInt(st.subop_stats[op].count||0);
|
||||
subop_stats[op].usec += BigInt(st.subop_stats[op].usec||0);
|
||||
}
|
||||
for (const op in st.recovery_stats||{})
|
||||
{
|
||||
recovery_stats[op] = recovery_stats[op] || { count: 0n, bytes: 0n };
|
||||
recovery_stats[op].count += BigInt(st.recovery_stats[op].count||0);
|
||||
recovery_stats[op].bytes += BigInt(st.recovery_stats[op].bytes||0);
|
||||
for (const op in derived[type])
|
||||
{
|
||||
for (const k in derived[type][op])
|
||||
{
|
||||
sum_diff[type][op] = sum_diff[type][op] || {};
|
||||
sum_diff[type][op][k] = (sum_diff[type][op][k] || 0n) + derived[type][op][k];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (prev_stats && prev_stats.timestamp >= timestamp)
|
||||
{
|
||||
prev_stats = null;
|
||||
}
|
||||
const tm = prev_stats ? BigInt(timestamp - prev_stats.timestamp) : 0;
|
||||
for (const op in op_stats)
|
||||
{
|
||||
if (prev_stats && prev_stats.op_stats && prev_stats.op_stats[op])
|
||||
{
|
||||
op_stats[op].bps = (op_stats[op].bytes - prev_stats.op_stats[op].bytes) * 1000n / tm;
|
||||
op_stats[op].iops = (op_stats[op].count - prev_stats.op_stats[op].count) * 1000n / tm;
|
||||
op_stats[op].lat = (op_stats[op].usec - prev_stats.op_stats[op].usec)
|
||||
/ ((op_stats[op].count - prev_stats.op_stats[op].count) || 1n);
|
||||
}
|
||||
}
|
||||
for (const op in subop_stats)
|
||||
{
|
||||
if (prev_stats && prev_stats.subop_stats && prev_stats.subop_stats[op])
|
||||
{
|
||||
subop_stats[op].iops = (subop_stats[op].count - prev_stats.subop_stats[op].count) * 1000n / tm;
|
||||
subop_stats[op].lat = (subop_stats[op].usec - prev_stats.subop_stats[op].usec)
|
||||
/ ((subop_stats[op].count - prev_stats.subop_stats[op].count) || 1n);
|
||||
}
|
||||
}
|
||||
for (const op in recovery_stats)
|
||||
{
|
||||
if (prev_stats && prev_stats.recovery_stats && prev_stats.recovery_stats[op])
|
||||
{
|
||||
recovery_stats[op].bps = (recovery_stats[op].bytes - prev_stats.recovery_stats[op].bytes) * 1000n / tm;
|
||||
recovery_stats[op].iops = (recovery_stats[op].count - prev_stats.recovery_stats[op].count) * 1000n / tm;
|
||||
}
|
||||
}
|
||||
return { op_stats, subop_stats, recovery_stats };
|
||||
return sum_diff;
|
||||
}
|
||||
|
||||
sum_object_counts()
|
||||
@@ -1597,7 +1608,7 @@ class Mon
|
||||
}
|
||||
}
|
||||
}
|
||||
return inode_stats;
|
||||
return { inode_stats, seen_pools };
|
||||
}
|
||||
|
||||
serialize_bigints(obj)
|
||||
@@ -1623,11 +1634,12 @@ class Mon
|
||||
const timestamp = Date.now();
|
||||
const { object_counts, object_bytes } = this.sum_object_counts();
|
||||
let stats = this.sum_op_stats(timestamp, this.prev_stats);
|
||||
let inode_stats = this.sum_inode_stats(
|
||||
let { inode_stats, seen_pools } = this.sum_inode_stats(
|
||||
this.prev_stats ? this.prev_stats.inode_stats : null,
|
||||
timestamp, this.prev_stats ? this.prev_stats.timestamp : null
|
||||
);
|
||||
this.prev_stats = { timestamp, ...stats, inode_stats };
|
||||
this.prev_stats = { timestamp, inode_stats, osd_stats: { ...this.state.osd.stats } };
|
||||
this.stat_time = Date.now();
|
||||
stats.object_counts = object_counts;
|
||||
stats.object_bytes = object_bytes;
|
||||
stats = this.serialize_bigints(stats);
|
||||
@@ -1657,12 +1669,22 @@ class Mon
|
||||
}
|
||||
for (const pool_id in this.state.pool.stats)
|
||||
{
|
||||
const pool_stats = { ...this.state.pool.stats[pool_id] };
|
||||
this.serialize_bigints(pool_stats);
|
||||
txn.push({ requestPut: {
|
||||
key: b64(this.etcd_prefix+'/pool/stats/'+pool_id),
|
||||
value: b64(JSON.stringify(pool_stats)),
|
||||
} });
|
||||
if (!seen_pools[pool_id])
|
||||
{
|
||||
txn.push({ requestDeleteRange: {
|
||||
key: b64(this.etcd_prefix+'/pool/stats/'+pool_id),
|
||||
} });
|
||||
delete this.state.pool.stats[pool_id];
|
||||
}
|
||||
else
|
||||
{
|
||||
const pool_stats = { ...this.state.pool.stats[pool_id] };
|
||||
this.serialize_bigints(pool_stats);
|
||||
txn.push({ requestPut: {
|
||||
key: b64(this.etcd_prefix+'/pool/stats/'+pool_id),
|
||||
value: b64(JSON.stringify(pool_stats)),
|
||||
} });
|
||||
}
|
||||
}
|
||||
if (txn.length)
|
||||
{
|
||||
@@ -1743,13 +1765,14 @@ class Mon
|
||||
else if (key_parts[0] === 'osd' && key_parts[1] === 'stats')
|
||||
{
|
||||
// Recheck OSD tree on OSD addition/deletion
|
||||
const osd_num = key_parts[2];
|
||||
if ((!old) != (!kv.value) || old && kv.value && old.size != kv.value.size)
|
||||
{
|
||||
this.schedule_recheck();
|
||||
}
|
||||
// Recheck PGs <osd_out_time> after last OSD statistics report
|
||||
this.schedule_next_recheck_at(
|
||||
!this.state.osd.stats[key[2]] ? 0 : this.state.osd.stats[key[2]].time+this.config.osd_out_time
|
||||
!this.state.osd.stats[osd_num] ? 0 : this.state.osd.stats[osd_num].time+this.config.osd_out_time
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@@ -388,8 +388,6 @@ sub unmap_volume
|
||||
my ($class, $storeid, $scfg, $volname, $snapname) = @_;
|
||||
my $prefix = defined $scfg->{vitastor_prefix} ? $scfg->{vitastor_prefix} : 'pve/';
|
||||
|
||||
return 1 if !$scfg->{vitastor_nbd};
|
||||
|
||||
my ($vtype, $name, $vmid) = $class->parse_volname($volname);
|
||||
$name .= '@'.$snapname if $snapname;
|
||||
|
||||
@@ -413,7 +411,7 @@ sub activate_volume
|
||||
sub deactivate_volume
|
||||
{
|
||||
my ($class, $storeid, $scfg, $volname, $snapname, $cache) = @_;
|
||||
$class->unmap_volume($storeid, $scfg, $volname, $snapname);
|
||||
$class->unmap_volume($storeid, $scfg, $volname, $snapname) if $scfg->{vitastor_nbd};
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@@ -50,7 +50,7 @@ from cinder.volume import configuration
|
||||
from cinder.volume import driver
|
||||
from cinder.volume import volume_utils
|
||||
|
||||
VERSION = '0.9.0'
|
||||
VERSION = '0.9.3'
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
644
patches/libvirt-9.0-vitastor.diff
Normal file
644
patches/libvirt-9.0-vitastor.diff
Normal file
@@ -0,0 +1,644 @@
|
||||
commit e6f935157944279c2c0634915c3c00feeec748c9
|
||||
Author: Vitaliy Filippov <vitalif@yourcmc.ru>
|
||||
Date: Mon Jun 19 00:58:19 2023 +0300
|
||||
|
||||
Add Vitastor support
|
||||
|
||||
diff --git a/include/libvirt/libvirt-storage.h b/include/libvirt/libvirt-storage.h
|
||||
index aaad4a3..5f5daa8 100644
|
||||
--- a/include/libvirt/libvirt-storage.h
|
||||
+++ b/include/libvirt/libvirt-storage.h
|
||||
@@ -326,6 +326,7 @@ typedef enum {
|
||||
VIR_CONNECT_LIST_STORAGE_POOLS_ZFS = 1 << 17, /* (Since: 1.2.8) */
|
||||
VIR_CONNECT_LIST_STORAGE_POOLS_VSTORAGE = 1 << 18, /* (Since: 3.1.0) */
|
||||
VIR_CONNECT_LIST_STORAGE_POOLS_ISCSI_DIRECT = 1 << 19, /* (Since: 5.6.0) */
|
||||
+ VIR_CONNECT_LIST_STORAGE_POOLS_VITASTOR = 1 << 20, /* (Since: 5.0.0) */
|
||||
} virConnectListAllStoragePoolsFlags;
|
||||
|
||||
int virConnectListAllStoragePools(virConnectPtr conn,
|
||||
diff --git a/src/conf/domain_conf.c b/src/conf/domain_conf.c
|
||||
index 45965fa..b7c23d3 100644
|
||||
--- a/src/conf/domain_conf.c
|
||||
+++ b/src/conf/domain_conf.c
|
||||
@@ -7103,7 +7103,8 @@ virDomainDiskSourceNetworkParse(xmlNodePtr node,
|
||||
src->configFile = virXPathString("string(./config/@file)", ctxt);
|
||||
|
||||
if (src->protocol == VIR_STORAGE_NET_PROTOCOL_HTTP ||
|
||||
- src->protocol == VIR_STORAGE_NET_PROTOCOL_HTTPS)
|
||||
+ src->protocol == VIR_STORAGE_NET_PROTOCOL_HTTPS ||
|
||||
+ src->protocol == VIR_STORAGE_NET_PROTOCOL_VITASTOR)
|
||||
src->query = virXMLPropString(node, "query");
|
||||
|
||||
if (virDomainStorageNetworkParseHosts(node, ctxt, &src->hosts, &src->nhosts) < 0)
|
||||
@@ -30121,6 +30122,7 @@ virDomainStorageSourceTranslateSourcePool(virStorageSource *src,
|
||||
|
||||
case VIR_STORAGE_POOL_MPATH:
|
||||
case VIR_STORAGE_POOL_RBD:
|
||||
+ case VIR_STORAGE_POOL_VITASTOR:
|
||||
case VIR_STORAGE_POOL_SHEEPDOG:
|
||||
case VIR_STORAGE_POOL_GLUSTER:
|
||||
case VIR_STORAGE_POOL_LAST:
|
||||
diff --git a/src/conf/domain_validate.c b/src/conf/domain_validate.c
|
||||
index 5a9bf20..05058b8 100644
|
||||
--- a/src/conf/domain_validate.c
|
||||
+++ b/src/conf/domain_validate.c
|
||||
@@ -494,6 +494,7 @@ virDomainDiskDefValidateSourceChainOne(const virStorageSource *src)
|
||||
case VIR_STORAGE_NET_PROTOCOL_RBD:
|
||||
break;
|
||||
|
||||
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
||||
case VIR_STORAGE_NET_PROTOCOL_NBD:
|
||||
case VIR_STORAGE_NET_PROTOCOL_SHEEPDOG:
|
||||
case VIR_STORAGE_NET_PROTOCOL_GLUSTER:
|
||||
@@ -541,7 +542,7 @@ virDomainDiskDefValidateSourceChainOne(const virStorageSource *src)
|
||||
}
|
||||
}
|
||||
|
||||
- /* internal snapshots and config files are currently supported only with rbd: */
|
||||
+ /* internal snapshots are currently supported only with rbd: */
|
||||
if (virStorageSourceGetActualType(src) != VIR_STORAGE_TYPE_NETWORK &&
|
||||
src->protocol != VIR_STORAGE_NET_PROTOCOL_RBD) {
|
||||
if (src->snapshot) {
|
||||
@@ -550,11 +551,15 @@ virDomainDiskDefValidateSourceChainOne(const virStorageSource *src)
|
||||
"only with 'rbd' disks"));
|
||||
return -1;
|
||||
}
|
||||
-
|
||||
+ }
|
||||
+ /* config files are currently supported only with rbd and vitastor: */
|
||||
+ if (virStorageSourceGetActualType(src) != VIR_STORAGE_TYPE_NETWORK &&
|
||||
+ src->protocol != VIR_STORAGE_NET_PROTOCOL_RBD &&
|
||||
+ src->protocol != VIR_STORAGE_NET_PROTOCOL_VITASTOR) {
|
||||
if (src->configFile) {
|
||||
virReportError(VIR_ERR_XML_ERROR, "%s",
|
||||
_("<config> element is currently supported "
|
||||
- "only with 'rbd' disks"));
|
||||
+ "only with 'rbd' and 'vitastor' disks"));
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
diff --git a/src/conf/schemas/domaincommon.rng b/src/conf/schemas/domaincommon.rng
|
||||
index 6cb0a20..8bf7de9 100644
|
||||
--- a/src/conf/schemas/domaincommon.rng
|
||||
+++ b/src/conf/schemas/domaincommon.rng
|
||||
@@ -1972,6 +1972,35 @@
|
||||
</element>
|
||||
</define>
|
||||
|
||||
+ <define name="diskSourceNetworkProtocolVitastor">
|
||||
+ <element name="source">
|
||||
+ <interleave>
|
||||
+ <attribute name="protocol">
|
||||
+ <value>vitastor</value>
|
||||
+ </attribute>
|
||||
+ <ref name="diskSourceCommon"/>
|
||||
+ <optional>
|
||||
+ <attribute name="name"/>
|
||||
+ </optional>
|
||||
+ <optional>
|
||||
+ <attribute name="query"/>
|
||||
+ </optional>
|
||||
+ <zeroOrMore>
|
||||
+ <ref name="diskSourceNetworkHost"/>
|
||||
+ </zeroOrMore>
|
||||
+ <optional>
|
||||
+ <element name="config">
|
||||
+ <attribute name="file">
|
||||
+ <ref name="absFilePath"/>
|
||||
+ </attribute>
|
||||
+ <empty/>
|
||||
+ </element>
|
||||
+ </optional>
|
||||
+ <empty/>
|
||||
+ </interleave>
|
||||
+ </element>
|
||||
+ </define>
|
||||
+
|
||||
<define name="diskSourceNetworkProtocolISCSI">
|
||||
<element name="source">
|
||||
<attribute name="protocol">
|
||||
@@ -2264,6 +2293,7 @@
|
||||
<ref name="diskSourceNetworkProtocolSimple"/>
|
||||
<ref name="diskSourceNetworkProtocolVxHS"/>
|
||||
<ref name="diskSourceNetworkProtocolNFS"/>
|
||||
+ <ref name="diskSourceNetworkProtocolVitastor"/>
|
||||
</choice>
|
||||
</define>
|
||||
|
||||
diff --git a/src/conf/storage_conf.c b/src/conf/storage_conf.c
|
||||
index f5a9636..8339bc4 100644
|
||||
--- a/src/conf/storage_conf.c
|
||||
+++ b/src/conf/storage_conf.c
|
||||
@@ -56,7 +56,7 @@ VIR_ENUM_IMPL(virStoragePool,
|
||||
"logical", "disk", "iscsi",
|
||||
"iscsi-direct", "scsi", "mpath",
|
||||
"rbd", "sheepdog", "gluster",
|
||||
- "zfs", "vstorage",
|
||||
+ "zfs", "vstorage", "vitastor",
|
||||
);
|
||||
|
||||
VIR_ENUM_IMPL(virStoragePoolFormatFileSystem,
|
||||
@@ -242,6 +242,18 @@ static virStoragePoolTypeInfo poolTypeInfo[] = {
|
||||
.formatToString = virStorageFileFormatTypeToString,
|
||||
}
|
||||
},
|
||||
+ {.poolType = VIR_STORAGE_POOL_VITASTOR,
|
||||
+ .poolOptions = {
|
||||
+ .flags = (VIR_STORAGE_POOL_SOURCE_HOST |
|
||||
+ VIR_STORAGE_POOL_SOURCE_NETWORK |
|
||||
+ VIR_STORAGE_POOL_SOURCE_NAME),
|
||||
+ },
|
||||
+ .volOptions = {
|
||||
+ .defaultFormat = VIR_STORAGE_FILE_RAW,
|
||||
+ .formatFromString = virStorageVolumeFormatFromString,
|
||||
+ .formatToString = virStorageFileFormatTypeToString,
|
||||
+ }
|
||||
+ },
|
||||
{.poolType = VIR_STORAGE_POOL_SHEEPDOG,
|
||||
.poolOptions = {
|
||||
.flags = (VIR_STORAGE_POOL_SOURCE_HOST |
|
||||
@@ -542,6 +554,11 @@ virStoragePoolDefParseSource(xmlXPathContextPtr ctxt,
|
||||
_("element 'name' is mandatory for RBD pool"));
|
||||
return -1;
|
||||
}
|
||||
+ if (pool_type == VIR_STORAGE_POOL_VITASTOR && source->name == NULL) {
|
||||
+ virReportError(VIR_ERR_XML_ERROR, "%s",
|
||||
+ _("element 'name' is mandatory for Vitastor pool"));
|
||||
+ return -1;
|
||||
+ }
|
||||
|
||||
if (options->formatFromString) {
|
||||
g_autofree char *format = NULL;
|
||||
@@ -1132,6 +1149,7 @@ virStoragePoolDefFormatBuf(virBuffer *buf,
|
||||
/* RBD, Sheepdog, Gluster and Iscsi-direct devices are not local block devs nor
|
||||
* files, so they don't have a target */
|
||||
if (def->type != VIR_STORAGE_POOL_RBD &&
|
||||
+ def->type != VIR_STORAGE_POOL_VITASTOR &&
|
||||
def->type != VIR_STORAGE_POOL_SHEEPDOG &&
|
||||
def->type != VIR_STORAGE_POOL_GLUSTER &&
|
||||
def->type != VIR_STORAGE_POOL_ISCSI_DIRECT) {
|
||||
diff --git a/src/conf/storage_conf.h b/src/conf/storage_conf.h
|
||||
index fc67957..720c07e 100644
|
||||
--- a/src/conf/storage_conf.h
|
||||
+++ b/src/conf/storage_conf.h
|
||||
@@ -103,6 +103,7 @@ typedef enum {
|
||||
VIR_STORAGE_POOL_GLUSTER, /* Gluster device */
|
||||
VIR_STORAGE_POOL_ZFS, /* ZFS */
|
||||
VIR_STORAGE_POOL_VSTORAGE, /* Virtuozzo Storage */
|
||||
+ VIR_STORAGE_POOL_VITASTOR, /* Vitastor */
|
||||
|
||||
VIR_STORAGE_POOL_LAST,
|
||||
} virStoragePoolType;
|
||||
@@ -454,6 +455,7 @@ VIR_ENUM_DECL(virStoragePartedFs);
|
||||
VIR_CONNECT_LIST_STORAGE_POOLS_SCSI | \
|
||||
VIR_CONNECT_LIST_STORAGE_POOLS_MPATH | \
|
||||
VIR_CONNECT_LIST_STORAGE_POOLS_RBD | \
|
||||
+ VIR_CONNECT_LIST_STORAGE_POOLS_VITASTOR | \
|
||||
VIR_CONNECT_LIST_STORAGE_POOLS_SHEEPDOG | \
|
||||
VIR_CONNECT_LIST_STORAGE_POOLS_GLUSTER | \
|
||||
VIR_CONNECT_LIST_STORAGE_POOLS_ZFS | \
|
||||
diff --git a/src/conf/storage_source_conf.c b/src/conf/storage_source_conf.c
|
||||
index cecd7e8..d7b79a4 100644
|
||||
--- a/src/conf/storage_source_conf.c
|
||||
+++ b/src/conf/storage_source_conf.c
|
||||
@@ -87,6 +87,7 @@ VIR_ENUM_IMPL(virStorageNetProtocol,
|
||||
"ssh",
|
||||
"vxhs",
|
||||
"nfs",
|
||||
+ "vitastor",
|
||||
);
|
||||
|
||||
|
||||
@@ -1286,6 +1287,7 @@ virStorageSourceNetworkDefaultPort(virStorageNetProtocol protocol)
|
||||
case VIR_STORAGE_NET_PROTOCOL_GLUSTER:
|
||||
return 24007;
|
||||
|
||||
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
||||
case VIR_STORAGE_NET_PROTOCOL_RBD:
|
||||
/* we don't provide a default for RBD */
|
||||
return 0;
|
||||
diff --git a/src/conf/storage_source_conf.h b/src/conf/storage_source_conf.h
|
||||
index 14a6825..eb4acac 100644
|
||||
--- a/src/conf/storage_source_conf.h
|
||||
+++ b/src/conf/storage_source_conf.h
|
||||
@@ -128,6 +128,7 @@ typedef enum {
|
||||
VIR_STORAGE_NET_PROTOCOL_SSH,
|
||||
VIR_STORAGE_NET_PROTOCOL_VXHS,
|
||||
VIR_STORAGE_NET_PROTOCOL_NFS,
|
||||
+ VIR_STORAGE_NET_PROTOCOL_VITASTOR,
|
||||
|
||||
VIR_STORAGE_NET_PROTOCOL_LAST
|
||||
} virStorageNetProtocol;
|
||||
diff --git a/src/conf/virstorageobj.c b/src/conf/virstorageobj.c
|
||||
index e6c187e..035b423 100644
|
||||
--- a/src/conf/virstorageobj.c
|
||||
+++ b/src/conf/virstorageobj.c
|
||||
@@ -1433,6 +1433,7 @@ virStoragePoolObjSourceFindDuplicateCb(const void *payload,
|
||||
return 1;
|
||||
break;
|
||||
|
||||
+ case VIR_STORAGE_POOL_VITASTOR:
|
||||
case VIR_STORAGE_POOL_ISCSI_DIRECT:
|
||||
case VIR_STORAGE_POOL_RBD:
|
||||
case VIR_STORAGE_POOL_LAST:
|
||||
@@ -1918,6 +1919,8 @@ virStoragePoolObjMatch(virStoragePoolObj *obj,
|
||||
(obj->def->type == VIR_STORAGE_POOL_MPATH)) ||
|
||||
(MATCH(VIR_CONNECT_LIST_STORAGE_POOLS_RBD) &&
|
||||
(obj->def->type == VIR_STORAGE_POOL_RBD)) ||
|
||||
+ (MATCH(VIR_CONNECT_LIST_STORAGE_POOLS_VITASTOR) &&
|
||||
+ (obj->def->type == VIR_STORAGE_POOL_VITASTOR)) ||
|
||||
(MATCH(VIR_CONNECT_LIST_STORAGE_POOLS_SHEEPDOG) &&
|
||||
(obj->def->type == VIR_STORAGE_POOL_SHEEPDOG)) ||
|
||||
(MATCH(VIR_CONNECT_LIST_STORAGE_POOLS_GLUSTER) &&
|
||||
diff --git a/src/libvirt-storage.c b/src/libvirt-storage.c
|
||||
index 8490034..ab2cdaa 100644
|
||||
--- a/src/libvirt-storage.c
|
||||
+++ b/src/libvirt-storage.c
|
||||
@@ -94,6 +94,7 @@ virStoragePoolGetConnect(virStoragePoolPtr pool)
|
||||
* VIR_CONNECT_LIST_STORAGE_POOLS_SCSI
|
||||
* VIR_CONNECT_LIST_STORAGE_POOLS_MPATH
|
||||
* VIR_CONNECT_LIST_STORAGE_POOLS_RBD
|
||||
+ * VIR_CONNECT_LIST_STORAGE_POOLS_VITASTOR
|
||||
* VIR_CONNECT_LIST_STORAGE_POOLS_SHEEPDOG
|
||||
* VIR_CONNECT_LIST_STORAGE_POOLS_GLUSTER
|
||||
* VIR_CONNECT_LIST_STORAGE_POOLS_ZFS
|
||||
diff --git a/src/libxl/libxl_conf.c b/src/libxl/libxl_conf.c
|
||||
index 17ac880..59711b5 100644
|
||||
--- a/src/libxl/libxl_conf.c
|
||||
+++ b/src/libxl/libxl_conf.c
|
||||
@@ -970,6 +970,7 @@ libxlMakeNetworkDiskSrcStr(virStorageSource *src,
|
||||
case VIR_STORAGE_NET_PROTOCOL_SSH:
|
||||
case VIR_STORAGE_NET_PROTOCOL_VXHS:
|
||||
case VIR_STORAGE_NET_PROTOCOL_NFS:
|
||||
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
||||
case VIR_STORAGE_NET_PROTOCOL_LAST:
|
||||
case VIR_STORAGE_NET_PROTOCOL_NONE:
|
||||
virReportError(VIR_ERR_NO_SUPPORT,
|
||||
diff --git a/src/libxl/xen_xl.c b/src/libxl/xen_xl.c
|
||||
index 6919325..55ffc32 100644
|
||||
--- a/src/libxl/xen_xl.c
|
||||
+++ b/src/libxl/xen_xl.c
|
||||
@@ -1445,6 +1445,7 @@ xenFormatXLDiskSrcNet(virStorageSource *src)
|
||||
case VIR_STORAGE_NET_PROTOCOL_SSH:
|
||||
case VIR_STORAGE_NET_PROTOCOL_VXHS:
|
||||
case VIR_STORAGE_NET_PROTOCOL_NFS:
|
||||
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
||||
case VIR_STORAGE_NET_PROTOCOL_LAST:
|
||||
case VIR_STORAGE_NET_PROTOCOL_NONE:
|
||||
virReportError(VIR_ERR_NO_SUPPORT,
|
||||
diff --git a/src/qemu/qemu_block.c b/src/qemu/qemu_block.c
|
||||
index e865aa1..40162af 100644
|
||||
--- a/src/qemu/qemu_block.c
|
||||
+++ b/src/qemu/qemu_block.c
|
||||
@@ -604,6 +604,38 @@ qemuBlockStorageSourceGetRBDProps(virStorageSource *src,
|
||||
}
|
||||
|
||||
|
||||
+static virJSONValue *
|
||||
+qemuBlockStorageSourceGetVitastorProps(virStorageSource *src)
|
||||
+{
|
||||
+ virJSONValue *ret = NULL;
|
||||
+ virStorageNetHostDef *host;
|
||||
+ size_t i;
|
||||
+ g_auto(virBuffer) buf = VIR_BUFFER_INITIALIZER;
|
||||
+ g_autofree char *etcd = NULL;
|
||||
+
|
||||
+ for (i = 0; i < src->nhosts; i++) {
|
||||
+ host = src->hosts + i;
|
||||
+ if ((virStorageNetHostTransport)host->transport != VIR_STORAGE_NET_HOST_TRANS_TCP) {
|
||||
+ return NULL;
|
||||
+ }
|
||||
+ virBufferAsprintf(&buf, i > 0 ? ",%s:%u" : "%s:%u", host->name, host->port);
|
||||
+ }
|
||||
+ if (src->nhosts > 0) {
|
||||
+ etcd = virBufferContentAndReset(&buf);
|
||||
+ }
|
||||
+
|
||||
+ if (virJSONValueObjectAdd(&ret,
|
||||
+ "S:etcd-host", etcd,
|
||||
+ "S:etcd-prefix", src->query,
|
||||
+ "S:config-path", src->configFile,
|
||||
+ "s:image", src->path,
|
||||
+ NULL) < 0)
|
||||
+ return NULL;
|
||||
+
|
||||
+ return ret;
|
||||
+}
|
||||
+
|
||||
+
|
||||
static virJSONValue *
|
||||
qemuBlockStorageSourceGetSheepdogProps(virStorageSource *src)
|
||||
{
|
||||
@@ -917,6 +949,12 @@ qemuBlockStorageSourceGetBackendProps(virStorageSource *src,
|
||||
return NULL;
|
||||
break;
|
||||
|
||||
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
||||
+ driver = "vitastor";
|
||||
+ if (!(fileprops = qemuBlockStorageSourceGetVitastorProps(src)))
|
||||
+ return NULL;
|
||||
+ break;
|
||||
+
|
||||
case VIR_STORAGE_NET_PROTOCOL_SHEEPDOG:
|
||||
driver = "sheepdog";
|
||||
if (!(fileprops = qemuBlockStorageSourceGetSheepdogProps(src)))
|
||||
@@ -1860,6 +1898,7 @@ qemuBlockGetBackingStoreString(virStorageSource *src,
|
||||
|
||||
case VIR_STORAGE_NET_PROTOCOL_SHEEPDOG:
|
||||
case VIR_STORAGE_NET_PROTOCOL_RBD:
|
||||
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
||||
case VIR_STORAGE_NET_PROTOCOL_VXHS:
|
||||
case VIR_STORAGE_NET_PROTOCOL_NFS:
|
||||
case VIR_STORAGE_NET_PROTOCOL_SSH:
|
||||
@@ -2242,6 +2281,12 @@ qemuBlockStorageSourceCreateGetStorageProps(virStorageSource *src,
|
||||
return -1;
|
||||
break;
|
||||
|
||||
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
||||
+ driver = "vitastor";
|
||||
+ if (!(location = qemuBlockStorageSourceGetVitastorProps(src)))
|
||||
+ return -1;
|
||||
+ break;
|
||||
+
|
||||
case VIR_STORAGE_NET_PROTOCOL_SHEEPDOG:
|
||||
driver = "sheepdog";
|
||||
if (!(location = qemuBlockStorageSourceGetSheepdogProps(src)))
|
||||
diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
|
||||
index 2eb5653..60ee82d 100644
|
||||
--- a/src/qemu/qemu_domain.c
|
||||
+++ b/src/qemu/qemu_domain.c
|
||||
@@ -4958,7 +4958,8 @@ qemuDomainValidateStorageSource(virStorageSource *src,
|
||||
if (src->query &&
|
||||
(actualType != VIR_STORAGE_TYPE_NETWORK ||
|
||||
(src->protocol != VIR_STORAGE_NET_PROTOCOL_HTTPS &&
|
||||
- src->protocol != VIR_STORAGE_NET_PROTOCOL_HTTP))) {
|
||||
+ src->protocol != VIR_STORAGE_NET_PROTOCOL_HTTP &&
|
||||
+ src->protocol != VIR_STORAGE_NET_PROTOCOL_VITASTOR))) {
|
||||
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
||||
_("query is supported only with HTTP(S) protocols"));
|
||||
return -1;
|
||||
@@ -10129,6 +10130,7 @@ qemuDomainPrepareStorageSourceTLS(virStorageSource *src,
|
||||
break;
|
||||
|
||||
case VIR_STORAGE_NET_PROTOCOL_RBD:
|
||||
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
||||
case VIR_STORAGE_NET_PROTOCOL_SHEEPDOG:
|
||||
case VIR_STORAGE_NET_PROTOCOL_GLUSTER:
|
||||
case VIR_STORAGE_NET_PROTOCOL_ISCSI:
|
||||
diff --git a/src/qemu/qemu_snapshot.c b/src/qemu/qemu_snapshot.c
|
||||
index b841680..a6be771 100644
|
||||
--- a/src/qemu/qemu_snapshot.c
|
||||
+++ b/src/qemu/qemu_snapshot.c
|
||||
@@ -373,6 +373,7 @@ qemuSnapshotPrepareDiskExternalInactive(virDomainSnapshotDiskDef *snapdisk,
|
||||
case VIR_STORAGE_NET_PROTOCOL_NONE:
|
||||
case VIR_STORAGE_NET_PROTOCOL_NBD:
|
||||
case VIR_STORAGE_NET_PROTOCOL_RBD:
|
||||
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
||||
case VIR_STORAGE_NET_PROTOCOL_SHEEPDOG:
|
||||
case VIR_STORAGE_NET_PROTOCOL_GLUSTER:
|
||||
case VIR_STORAGE_NET_PROTOCOL_ISCSI:
|
||||
@@ -578,6 +579,7 @@ qemuSnapshotPrepareDiskInternal(virDomainDiskDef *disk,
|
||||
case VIR_STORAGE_NET_PROTOCOL_NONE:
|
||||
case VIR_STORAGE_NET_PROTOCOL_NBD:
|
||||
case VIR_STORAGE_NET_PROTOCOL_RBD:
|
||||
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
||||
case VIR_STORAGE_NET_PROTOCOL_SHEEPDOG:
|
||||
case VIR_STORAGE_NET_PROTOCOL_GLUSTER:
|
||||
case VIR_STORAGE_NET_PROTOCOL_ISCSI:
|
||||
diff --git a/src/storage/storage_driver.c b/src/storage/storage_driver.c
|
||||
index d90c1c9..e853457 100644
|
||||
--- a/src/storage/storage_driver.c
|
||||
+++ b/src/storage/storage_driver.c
|
||||
@@ -1627,6 +1627,7 @@ storageVolLookupByPathCallback(virStoragePoolObj *obj,
|
||||
|
||||
case VIR_STORAGE_POOL_GLUSTER:
|
||||
case VIR_STORAGE_POOL_RBD:
|
||||
+ case VIR_STORAGE_POOL_VITASTOR:
|
||||
case VIR_STORAGE_POOL_SHEEPDOG:
|
||||
case VIR_STORAGE_POOL_ZFS:
|
||||
case VIR_STORAGE_POOL_LAST:
|
||||
diff --git a/src/storage_file/storage_source_backingstore.c b/src/storage_file/storage_source_backingstore.c
|
||||
index e48ae72..2017ccc 100644
|
||||
--- a/src/storage_file/storage_source_backingstore.c
|
||||
+++ b/src/storage_file/storage_source_backingstore.c
|
||||
@@ -284,6 +284,75 @@ virStorageSourceParseRBDColonString(const char *rbdstr,
|
||||
}
|
||||
|
||||
|
||||
+static int
|
||||
+virStorageSourceParseVitastorColonString(const char *colonstr,
|
||||
+ virStorageSource *src)
|
||||
+{
|
||||
+ char *p, *e, *next;
|
||||
+ g_autofree char *options = NULL;
|
||||
+
|
||||
+ /* optionally skip the "vitastor:" prefix if provided */
|
||||
+ if (STRPREFIX(colonstr, "vitastor:"))
|
||||
+ colonstr += strlen("vitastor:");
|
||||
+
|
||||
+ options = g_strdup(colonstr);
|
||||
+
|
||||
+ p = options;
|
||||
+ while (*p) {
|
||||
+ /* find : delimiter or end of string */
|
||||
+ for (e = p; *e && *e != ':'; ++e) {
|
||||
+ if (*e == '\\') {
|
||||
+ e++;
|
||||
+ if (*e == '\0')
|
||||
+ break;
|
||||
+ }
|
||||
+ }
|
||||
+ if (*e == '\0') {
|
||||
+ next = e; /* last kv pair */
|
||||
+ } else {
|
||||
+ next = e + 1;
|
||||
+ *e = '\0';
|
||||
+ }
|
||||
+
|
||||
+ if (STRPREFIX(p, "image=")) {
|
||||
+ src->path = g_strdup(p + strlen("image="));
|
||||
+ } else if (STRPREFIX(p, "etcd-prefix=")) {
|
||||
+ src->query = g_strdup(p + strlen("etcd-prefix="));
|
||||
+ } else if (STRPREFIX(p, "config-path=")) {
|
||||
+ src->configFile = g_strdup(p + strlen("config-path="));
|
||||
+ } else if (STRPREFIX(p, "etcd-host=")) {
|
||||
+ char *h, *sep;
|
||||
+
|
||||
+ h = p + strlen("etcd-host=");
|
||||
+ while (h < e) {
|
||||
+ for (sep = h; sep < e; ++sep) {
|
||||
+ if (*sep == '\\' && (sep[1] == ',' ||
|
||||
+ sep[1] == ';' ||
|
||||
+ sep[1] == ' ')) {
|
||||
+ *sep = '\0';
|
||||
+ sep += 2;
|
||||
+ break;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if (virStorageSourceRBDAddHost(src, h) < 0)
|
||||
+ return -1;
|
||||
+
|
||||
+ h = sep;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ p = next;
|
||||
+ }
|
||||
+
|
||||
+ if (!src->path) {
|
||||
+ return -1;
|
||||
+ }
|
||||
+
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+
|
||||
static int
|
||||
virStorageSourceParseNBDColonString(const char *nbdstr,
|
||||
virStorageSource *src)
|
||||
@@ -396,6 +465,11 @@ virStorageSourceParseBackingColon(virStorageSource *src,
|
||||
return -1;
|
||||
break;
|
||||
|
||||
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
||||
+ if (virStorageSourceParseVitastorColonString(path, src) < 0)
|
||||
+ return -1;
|
||||
+ break;
|
||||
+
|
||||
case VIR_STORAGE_NET_PROTOCOL_SHEEPDOG:
|
||||
case VIR_STORAGE_NET_PROTOCOL_LAST:
|
||||
case VIR_STORAGE_NET_PROTOCOL_NONE:
|
||||
@@ -984,6 +1058,54 @@ virStorageSourceParseBackingJSONRBD(virStorageSource *src,
|
||||
return 0;
|
||||
}
|
||||
|
||||
+static int
|
||||
+virStorageSourceParseBackingJSONVitastor(virStorageSource *src,
|
||||
+ virJSONValue *json,
|
||||
+ const char *jsonstr G_GNUC_UNUSED,
|
||||
+ int opaque G_GNUC_UNUSED)
|
||||
+{
|
||||
+ const char *filename;
|
||||
+ const char *image = virJSONValueObjectGetString(json, "image");
|
||||
+ const char *conf = virJSONValueObjectGetString(json, "config-path");
|
||||
+ const char *etcd_prefix = virJSONValueObjectGetString(json, "etcd-prefix");
|
||||
+ virJSONValue *servers = virJSONValueObjectGetArray(json, "server");
|
||||
+ size_t nservers;
|
||||
+ size_t i;
|
||||
+
|
||||
+ src->type = VIR_STORAGE_TYPE_NETWORK;
|
||||
+ src->protocol = VIR_STORAGE_NET_PROTOCOL_VITASTOR;
|
||||
+
|
||||
+ /* legacy syntax passed via 'filename' option */
|
||||
+ if ((filename = virJSONValueObjectGetString(json, "filename")))
|
||||
+ return virStorageSourceParseVitastorColonString(filename, src);
|
||||
+
|
||||
+ if (!image) {
|
||||
+ virReportError(VIR_ERR_INVALID_ARG, "%s",
|
||||
+ _("missing image name in Vitastor backing volume "
|
||||
+ "JSON specification"));
|
||||
+ return -1;
|
||||
+ }
|
||||
+
|
||||
+ src->path = g_strdup(image);
|
||||
+ src->configFile = g_strdup(conf);
|
||||
+ src->query = g_strdup(etcd_prefix);
|
||||
+
|
||||
+ if (servers) {
|
||||
+ nservers = virJSONValueArraySize(servers);
|
||||
+
|
||||
+ src->hosts = g_new0(virStorageNetHostDef, nservers);
|
||||
+ src->nhosts = nservers;
|
||||
+
|
||||
+ for (i = 0; i < nservers; i++) {
|
||||
+ if (virStorageSourceParseBackingJSONInetSocketAddress(src->hosts + i,
|
||||
+ virJSONValueArrayGet(servers, i)) < 0)
|
||||
+ return -1;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
static int
|
||||
virStorageSourceParseBackingJSONRaw(virStorageSource *src,
|
||||
virJSONValue *json,
|
||||
@@ -1162,6 +1284,7 @@ static const struct virStorageSourceJSONDriverParser jsonParsers[] = {
|
||||
{"sheepdog", false, virStorageSourceParseBackingJSONSheepdog, 0},
|
||||
{"ssh", false, virStorageSourceParseBackingJSONSSH, 0},
|
||||
{"rbd", false, virStorageSourceParseBackingJSONRBD, 0},
|
||||
+ {"vitastor", false, virStorageSourceParseBackingJSONVitastor, 0},
|
||||
{"raw", true, virStorageSourceParseBackingJSONRaw, 0},
|
||||
{"nfs", false, virStorageSourceParseBackingJSONNFS, 0},
|
||||
{"vxhs", false, virStorageSourceParseBackingJSONVxHS, 0},
|
||||
diff --git a/src/test/test_driver.c b/src/test/test_driver.c
|
||||
index bd6f063..cce34e1 100644
|
||||
--- a/src/test/test_driver.c
|
||||
+++ b/src/test/test_driver.c
|
||||
@@ -7338,6 +7338,7 @@ testStorageVolumeTypeForPool(int pooltype)
|
||||
case VIR_STORAGE_POOL_ISCSI_DIRECT:
|
||||
case VIR_STORAGE_POOL_GLUSTER:
|
||||
case VIR_STORAGE_POOL_RBD:
|
||||
+ case VIR_STORAGE_POOL_VITASTOR:
|
||||
return VIR_STORAGE_VOL_NETWORK;
|
||||
case VIR_STORAGE_POOL_LOGICAL:
|
||||
case VIR_STORAGE_POOL_DISK:
|
||||
diff --git a/tests/storagepoolcapsschemadata/poolcaps-fs.xml b/tests/storagepoolcapsschemadata/poolcaps-fs.xml
|
||||
index eee75af..8bd0a57 100644
|
||||
--- a/tests/storagepoolcapsschemadata/poolcaps-fs.xml
|
||||
+++ b/tests/storagepoolcapsschemadata/poolcaps-fs.xml
|
||||
@@ -204,4 +204,11 @@
|
||||
</enum>
|
||||
</volOptions>
|
||||
</pool>
|
||||
+ <pool type='vitastor' supported='no'>
|
||||
+ <volOptions>
|
||||
+ <defaultFormat type='raw'/>
|
||||
+ <enum name='targetFormatType'>
|
||||
+ </enum>
|
||||
+ </volOptions>
|
||||
+ </pool>
|
||||
</storagepoolCapabilities>
|
||||
diff --git a/tests/storagepoolcapsschemadata/poolcaps-full.xml b/tests/storagepoolcapsschemadata/poolcaps-full.xml
|
||||
index 805950a..852df0d 100644
|
||||
--- a/tests/storagepoolcapsschemadata/poolcaps-full.xml
|
||||
+++ b/tests/storagepoolcapsschemadata/poolcaps-full.xml
|
||||
@@ -204,4 +204,11 @@
|
||||
</enum>
|
||||
</volOptions>
|
||||
</pool>
|
||||
+ <pool type='vitastor' supported='yes'>
|
||||
+ <volOptions>
|
||||
+ <defaultFormat type='raw'/>
|
||||
+ <enum name='targetFormatType'>
|
||||
+ </enum>
|
||||
+ </volOptions>
|
||||
+ </pool>
|
||||
</storagepoolCapabilities>
|
||||
diff --git a/tests/storagepoolxml2argvtest.c b/tests/storagepoolxml2argvtest.c
|
||||
index e8e40d6..db55fe5 100644
|
||||
--- a/tests/storagepoolxml2argvtest.c
|
||||
+++ b/tests/storagepoolxml2argvtest.c
|
||||
@@ -65,6 +65,7 @@ testCompareXMLToArgvFiles(bool shouldFail,
|
||||
case VIR_STORAGE_POOL_GLUSTER:
|
||||
case VIR_STORAGE_POOL_ZFS:
|
||||
case VIR_STORAGE_POOL_VSTORAGE:
|
||||
+ case VIR_STORAGE_POOL_VITASTOR:
|
||||
case VIR_STORAGE_POOL_LAST:
|
||||
default:
|
||||
VIR_TEST_DEBUG("pool type '%s' has no xml2argv test", defTypeStr);
|
||||
diff --git a/tools/virsh-pool.c b/tools/virsh-pool.c
|
||||
index 8a98c6a..4b1bbd4 100644
|
||||
--- a/tools/virsh-pool.c
|
||||
+++ b/tools/virsh-pool.c
|
||||
@@ -1221,6 +1221,9 @@ cmdPoolList(vshControl *ctl, const vshCmd *cmd G_GNUC_UNUSED)
|
||||
case VIR_STORAGE_POOL_VSTORAGE:
|
||||
flags |= VIR_CONNECT_LIST_STORAGE_POOLS_VSTORAGE;
|
||||
break;
|
||||
+ case VIR_STORAGE_POOL_VITASTOR:
|
||||
+ flags |= VIR_CONNECT_LIST_STORAGE_POOLS_VITASTOR;
|
||||
+ break;
|
||||
case VIR_STORAGE_POOL_LAST:
|
||||
break;
|
||||
}
|
190
patches/pve-qemu-8.0-vitastor.patch
Normal file
190
patches/pve-qemu-8.0-vitastor.patch
Normal file
@@ -0,0 +1,190 @@
|
||||
diff --git a/block/meson.build b/block/meson.build
|
||||
index 382bec0e7d..af6207dbce 100644
|
||||
--- a/block/meson.build
|
||||
+++ b/block/meson.build
|
||||
@@ -114,6 +114,7 @@ foreach m : [
|
||||
[libnfs, 'nfs', files('nfs.c')],
|
||||
[libssh, 'ssh', files('ssh.c')],
|
||||
[rbd, 'rbd', files('rbd.c')],
|
||||
+ [vitastor, 'vitastor', files('vitastor.c')],
|
||||
]
|
||||
if m[0].found()
|
||||
module_ss = ss.source_set()
|
||||
diff --git a/meson.build b/meson.build
|
||||
index c44d05a13f..ebedb42843 100644
|
||||
--- a/meson.build
|
||||
+++ b/meson.build
|
||||
@@ -1028,6 +1028,26 @@ if not get_option('rbd').auto() or have_block
|
||||
endif
|
||||
endif
|
||||
|
||||
+vitastor = not_found
|
||||
+if not get_option('vitastor').auto() or have_block
|
||||
+ libvitastor_client = cc.find_library('vitastor_client', has_headers: ['vitastor_c.h'],
|
||||
+ required: get_option('vitastor'), kwargs: static_kwargs)
|
||||
+ if libvitastor_client.found()
|
||||
+ if cc.links('''
|
||||
+ #include <vitastor_c.h>
|
||||
+ int main(void) {
|
||||
+ vitastor_c_create_qemu(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
|
||||
+ return 0;
|
||||
+ }''', dependencies: libvitastor_client)
|
||||
+ vitastor = declare_dependency(dependencies: libvitastor_client)
|
||||
+ elif get_option('vitastor').enabled()
|
||||
+ error('could not link libvitastor_client')
|
||||
+ else
|
||||
+ warning('could not link libvitastor_client, disabling')
|
||||
+ endif
|
||||
+ endif
|
||||
+endif
|
||||
+
|
||||
glusterfs = not_found
|
||||
glusterfs_ftruncate_has_stat = false
|
||||
glusterfs_iocb_has_stat = false
|
||||
@@ -1882,6 +1902,7 @@ endif
|
||||
config_host_data.set('CONFIG_OPENGL', opengl.found())
|
||||
config_host_data.set('CONFIG_PROFILER', get_option('profiler'))
|
||||
config_host_data.set('CONFIG_RBD', rbd.found())
|
||||
+config_host_data.set('CONFIG_VITASTOR', vitastor.found())
|
||||
config_host_data.set('CONFIG_RDMA', rdma.found())
|
||||
config_host_data.set('CONFIG_SDL', sdl.found())
|
||||
config_host_data.set('CONFIG_SDL_IMAGE', sdl_image.found())
|
||||
@@ -4020,6 +4041,7 @@ if spice_protocol.found()
|
||||
summary_info += {' spice server support': spice}
|
||||
endif
|
||||
summary_info += {'rbd support': rbd}
|
||||
+summary_info += {'vitastor support': vitastor}
|
||||
summary_info += {'smartcard support': cacard}
|
||||
summary_info += {'U2F support': u2f}
|
||||
summary_info += {'libusb': libusb}
|
||||
diff --git a/meson_options.txt b/meson_options.txt
|
||||
index fc9447d267..c4ac55c283 100644
|
||||
--- a/meson_options.txt
|
||||
+++ b/meson_options.txt
|
||||
@@ -173,6 +173,8 @@ option('lzo', type : 'feature', value : 'auto',
|
||||
description: 'lzo compression support')
|
||||
option('rbd', type : 'feature', value : 'auto',
|
||||
description: 'Ceph block device driver')
|
||||
+option('vitastor', type : 'feature', value : 'auto',
|
||||
+ description: 'Vitastor block device driver')
|
||||
option('opengl', type : 'feature', value : 'auto',
|
||||
description: 'OpenGL support')
|
||||
option('rdma', type : 'feature', value : 'auto',
|
||||
diff --git a/qapi/block-core.json b/qapi/block-core.json
|
||||
index c05ad0c07e..f5eb701604 100644
|
||||
--- a/qapi/block-core.json
|
||||
+++ b/qapi/block-core.json
|
||||
@@ -3308,7 +3308,7 @@
|
||||
'raw', 'rbd',
|
||||
{ 'name': 'replication', 'if': 'CONFIG_REPLICATION' },
|
||||
'pbs',
|
||||
- 'ssh', 'throttle', 'vdi', 'vhdx',
|
||||
+ 'ssh', 'throttle', 'vdi', 'vhdx', 'vitastor',
|
||||
{ 'name': 'virtio-blk-vfio-pci', 'if': 'CONFIG_BLKIO' },
|
||||
{ 'name': 'virtio-blk-vhost-user', 'if': 'CONFIG_BLKIO' },
|
||||
{ 'name': 'virtio-blk-vhost-vdpa', 'if': 'CONFIG_BLKIO' },
|
||||
@@ -4338,6 +4338,28 @@
|
||||
'*key-secret': 'str',
|
||||
'*server': ['InetSocketAddressBase'] } }
|
||||
|
||||
+##
|
||||
+# @BlockdevOptionsVitastor:
|
||||
+#
|
||||
+# Driver specific block device options for vitastor
|
||||
+#
|
||||
+# @image: Image name
|
||||
+# @inode: Inode number
|
||||
+# @pool: Pool ID
|
||||
+# @size: Desired image size in bytes
|
||||
+# @config-path: Path to Vitastor configuration
|
||||
+# @etcd-host: etcd connection address(es)
|
||||
+# @etcd-prefix: etcd key/value prefix
|
||||
+##
|
||||
+{ 'struct': 'BlockdevOptionsVitastor',
|
||||
+ 'data': { '*inode': 'uint64',
|
||||
+ '*pool': 'uint64',
|
||||
+ '*size': 'uint64',
|
||||
+ '*image': 'str',
|
||||
+ '*config-path': 'str',
|
||||
+ '*etcd-host': 'str',
|
||||
+ '*etcd-prefix': 'str' } }
|
||||
+
|
||||
##
|
||||
# @ReplicationMode:
|
||||
#
|
||||
@@ -4787,6 +4809,7 @@
|
||||
'throttle': 'BlockdevOptionsThrottle',
|
||||
'vdi': 'BlockdevOptionsGenericFormat',
|
||||
'vhdx': 'BlockdevOptionsGenericFormat',
|
||||
+ 'vitastor': 'BlockdevOptionsVitastor',
|
||||
'virtio-blk-vfio-pci':
|
||||
{ 'type': 'BlockdevOptionsVirtioBlkVfioPci',
|
||||
'if': 'CONFIG_BLKIO' },
|
||||
@@ -5187,6 +5210,17 @@
|
||||
'*cluster-size' : 'size',
|
||||
'*encrypt' : 'RbdEncryptionCreateOptions' } }
|
||||
|
||||
+##
|
||||
+# @BlockdevCreateOptionsVitastor:
|
||||
+#
|
||||
+# Driver specific image creation options for Vitastor.
|
||||
+#
|
||||
+# @size: Size of the virtual disk in bytes
|
||||
+##
|
||||
+{ 'struct': 'BlockdevCreateOptionsVitastor',
|
||||
+ 'data': { 'location': 'BlockdevOptionsVitastor',
|
||||
+ 'size': 'size' } }
|
||||
+
|
||||
##
|
||||
# @BlockdevVmdkSubformat:
|
||||
#
|
||||
@@ -5385,6 +5419,7 @@
|
||||
'ssh': 'BlockdevCreateOptionsSsh',
|
||||
'vdi': 'BlockdevCreateOptionsVdi',
|
||||
'vhdx': 'BlockdevCreateOptionsVhdx',
|
||||
+ 'vitastor': 'BlockdevCreateOptionsVitastor',
|
||||
'vmdk': 'BlockdevCreateOptionsVmdk',
|
||||
'vpc': 'BlockdevCreateOptionsVpc'
|
||||
} }
|
||||
diff --git a/scripts/ci/org.centos/stream/8/x86_64/configure b/scripts/ci/org.centos/stream/8/x86_64/configure
|
||||
index 6e8983f39c..1b0b9fcf3e 100755
|
||||
--- a/scripts/ci/org.centos/stream/8/x86_64/configure
|
||||
+++ b/scripts/ci/org.centos/stream/8/x86_64/configure
|
||||
@@ -32,7 +32,7 @@
|
||||
--with-git=meson \
|
||||
--with-git-submodules=update \
|
||||
--target-list="x86_64-softmmu" \
|
||||
---block-drv-rw-whitelist="qcow2,raw,file,host_device,nbd,iscsi,rbd,blkdebug,luks,null-co,nvme,copy-on-read,throttle,gluster" \
|
||||
+--block-drv-rw-whitelist="qcow2,raw,file,host_device,nbd,iscsi,rbd,vitastor,blkdebug,luks,null-co,nvme,copy-on-read,throttle,gluster" \
|
||||
--audio-drv-list="" \
|
||||
--block-drv-ro-whitelist="vmdk,vhdx,vpc,https,ssh" \
|
||||
--with-coroutine=ucontext \
|
||||
@@ -179,6 +179,7 @@
|
||||
--enable-opengl \
|
||||
--enable-pie \
|
||||
--enable-rbd \
|
||||
+--enable-vitastor \
|
||||
--enable-rdma \
|
||||
--enable-seccomp \
|
||||
--enable-snappy \
|
||||
diff --git a/scripts/meson-buildoptions.sh b/scripts/meson-buildoptions.sh
|
||||
index 009fab1515..95914e6ebc 100644
|
||||
--- a/scripts/meson-buildoptions.sh
|
||||
+++ b/scripts/meson-buildoptions.sh
|
||||
@@ -144,6 +144,7 @@ meson_options_help() {
|
||||
printf "%s\n" ' qed qed image format support'
|
||||
printf "%s\n" ' qga-vss build QGA VSS support (broken with MinGW)'
|
||||
printf "%s\n" ' rbd Ceph block device driver'
|
||||
+ printf "%s\n" ' vitastor Vitastor block device driver'
|
||||
printf "%s\n" ' rdma Enable RDMA-based migration'
|
||||
printf "%s\n" ' replication replication support'
|
||||
printf "%s\n" ' sdl SDL user interface'
|
||||
@@ -392,6 +393,8 @@ _meson_option_parse() {
|
||||
--disable-qom-cast-debug) printf "%s" -Dqom_cast_debug=false ;;
|
||||
--enable-rbd) printf "%s" -Drbd=enabled ;;
|
||||
--disable-rbd) printf "%s" -Drbd=disabled ;;
|
||||
+ --enable-vitastor) printf "%s" -Dvitastor=enabled ;;
|
||||
+ --disable-vitastor) printf "%s" -Dvitastor=disabled ;;
|
||||
--enable-rdma) printf "%s" -Drdma=enabled ;;
|
||||
--disable-rdma) printf "%s" -Drdma=disabled ;;
|
||||
--enable-replication) printf "%s" -Dreplication=enabled ;;
|
@@ -24,4 +24,4 @@ rm fio
|
||||
mv fio-copy fio
|
||||
FIO=`rpm -qi fio | perl -e 'while(<>) { /^Epoch[\s:]+(\S+)/ && print "$1:"; /^Version[\s:]+(\S+)/ && print $1; /^Release[\s:]+(\S+)/ && print "-$1"; }'`
|
||||
perl -i -pe 's/(Requires:\s*fio)([^\n]+)?/$1 = '$FIO'/' $VITASTOR/rpm/vitastor-el$EL.spec
|
||||
tar --transform 's#^#vitastor-0.9.0/#' --exclude 'rpm/*.rpm' -czf $VITASTOR/../vitastor-0.9.0$(rpm --eval '%dist').tar.gz *
|
||||
tar --transform 's#^#vitastor-0.9.3/#' --exclude 'rpm/*.rpm' -czf $VITASTOR/../vitastor-0.9.3$(rpm --eval '%dist').tar.gz *
|
||||
|
@@ -35,7 +35,7 @@ ADD . /root/vitastor
|
||||
RUN set -e; \
|
||||
cd /root/vitastor/rpm; \
|
||||
sh build-tarball.sh; \
|
||||
cp /root/vitastor-0.9.0.el7.tar.gz ~/rpmbuild/SOURCES; \
|
||||
cp /root/vitastor-0.9.3.el7.tar.gz ~/rpmbuild/SOURCES; \
|
||||
cp vitastor-el7.spec ~/rpmbuild/SPECS/vitastor.spec; \
|
||||
cd ~/rpmbuild/SPECS/; \
|
||||
rpmbuild -ba vitastor.spec; \
|
||||
|
@@ -1,11 +1,11 @@
|
||||
Name: vitastor
|
||||
Version: 0.9.0
|
||||
Version: 0.9.3
|
||||
Release: 1%{?dist}
|
||||
Summary: Vitastor, a fast software-defined clustered block storage
|
||||
|
||||
License: Vitastor Network Public License 1.1
|
||||
URL: https://vitastor.io/
|
||||
Source0: vitastor-0.9.0.el7.tar.gz
|
||||
Source0: vitastor-0.9.3.el7.tar.gz
|
||||
|
||||
BuildRequires: liburing-devel >= 0.6
|
||||
BuildRequires: gperftools-devel
|
||||
|
@@ -35,7 +35,7 @@ ADD . /root/vitastor
|
||||
RUN set -e; \
|
||||
cd /root/vitastor/rpm; \
|
||||
sh build-tarball.sh; \
|
||||
cp /root/vitastor-0.9.0.el8.tar.gz ~/rpmbuild/SOURCES; \
|
||||
cp /root/vitastor-0.9.3.el8.tar.gz ~/rpmbuild/SOURCES; \
|
||||
cp vitastor-el8.spec ~/rpmbuild/SPECS/vitastor.spec; \
|
||||
cd ~/rpmbuild/SPECS/; \
|
||||
rpmbuild -ba vitastor.spec; \
|
||||
|
@@ -1,11 +1,11 @@
|
||||
Name: vitastor
|
||||
Version: 0.9.0
|
||||
Version: 0.9.3
|
||||
Release: 1%{?dist}
|
||||
Summary: Vitastor, a fast software-defined clustered block storage
|
||||
|
||||
License: Vitastor Network Public License 1.1
|
||||
URL: https://vitastor.io/
|
||||
Source0: vitastor-0.9.0.el8.tar.gz
|
||||
Source0: vitastor-0.9.3.el8.tar.gz
|
||||
|
||||
BuildRequires: liburing-devel >= 0.6
|
||||
BuildRequires: gperftools-devel
|
||||
|
@@ -18,7 +18,7 @@ ADD . /root/vitastor
|
||||
RUN set -e; \
|
||||
cd /root/vitastor/rpm; \
|
||||
sh build-tarball.sh; \
|
||||
cp /root/vitastor-0.9.0.el9.tar.gz ~/rpmbuild/SOURCES; \
|
||||
cp /root/vitastor-0.9.3.el9.tar.gz ~/rpmbuild/SOURCES; \
|
||||
cp vitastor-el9.spec ~/rpmbuild/SPECS/vitastor.spec; \
|
||||
cd ~/rpmbuild/SPECS/; \
|
||||
rpmbuild -ba vitastor.spec; \
|
||||
|
@@ -1,11 +1,11 @@
|
||||
Name: vitastor
|
||||
Version: 0.9.0
|
||||
Version: 0.9.3
|
||||
Release: 1%{?dist}
|
||||
Summary: Vitastor, a fast software-defined clustered block storage
|
||||
|
||||
License: Vitastor Network Public License 1.1
|
||||
URL: https://vitastor.io/
|
||||
Source0: vitastor-0.9.0.el9.tar.gz
|
||||
Source0: vitastor-0.9.3.el9.tar.gz
|
||||
|
||||
BuildRequires: liburing-devel >= 0.6
|
||||
BuildRequires: gperftools-devel
|
||||
|
@@ -16,7 +16,7 @@ if("${CMAKE_INSTALL_PREFIX}" MATCHES "^/usr/local/?$")
|
||||
set(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}")
|
||||
endif()
|
||||
|
||||
add_definitions(-DVERSION="0.9.0")
|
||||
add_definitions(-DVERSION="0.9.3")
|
||||
add_definitions(-Wall -Wno-sign-compare -Wno-comment -Wno-parentheses -Wno-pointer-arith -fdiagnostics-color=always -I ${CMAKE_SOURCE_DIR}/src)
|
||||
if (${WITH_ASAN})
|
||||
add_definitions(-fsanitize=address -fno-omit-frame-pointer)
|
||||
|
@@ -143,34 +143,83 @@ uint64_t allocator::get_free_count()
|
||||
return free;
|
||||
}
|
||||
|
||||
// FIXME: Move to utils?
|
||||
void bitmap_set(void *bitmap, uint64_t start, uint64_t len, uint64_t bitmap_granularity)
|
||||
{
|
||||
if (start == 0)
|
||||
if (start == 0 && len == 32*bitmap_granularity)
|
||||
*((uint32_t*)bitmap) = UINT32_MAX;
|
||||
else if (start == 0 && len == 64*bitmap_granularity)
|
||||
*((uint64_t*)bitmap) = UINT64_MAX;
|
||||
else
|
||||
{
|
||||
if (len == 32*bitmap_granularity)
|
||||
unsigned bit_start = start / bitmap_granularity;
|
||||
unsigned bit_end = ((start + len) + bitmap_granularity - 1) / bitmap_granularity;
|
||||
while (bit_start < bit_end)
|
||||
{
|
||||
*((uint32_t*)bitmap) = UINT32_MAX;
|
||||
return;
|
||||
}
|
||||
else if (len == 64*bitmap_granularity)
|
||||
{
|
||||
*((uint64_t*)bitmap) = UINT64_MAX;
|
||||
return;
|
||||
}
|
||||
}
|
||||
unsigned bit_start = start / bitmap_granularity;
|
||||
unsigned bit_end = ((start + len) + bitmap_granularity - 1) / bitmap_granularity;
|
||||
while (bit_start < bit_end)
|
||||
{
|
||||
if (!(bit_start & 7) && bit_end >= bit_start+8)
|
||||
{
|
||||
((uint8_t*)bitmap)[bit_start / 8] = UINT8_MAX;
|
||||
bit_start += 8;
|
||||
}
|
||||
else
|
||||
{
|
||||
((uint8_t*)bitmap)[bit_start / 8] |= 1 << (bit_start % 8);
|
||||
bit_start++;
|
||||
if (!(bit_start & 7) && bit_end >= bit_start+8)
|
||||
{
|
||||
((uint8_t*)bitmap)[bit_start / 8] = UINT8_MAX;
|
||||
bit_start += 8;
|
||||
}
|
||||
else
|
||||
{
|
||||
((uint8_t*)bitmap)[bit_start / 8] |= 1 << (bit_start % 8);
|
||||
bit_start++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void bitmap_clear(void *bitmap, uint64_t start, uint64_t len, uint64_t bitmap_granularity)
|
||||
{
|
||||
if (start == 0 && len == 32*bitmap_granularity)
|
||||
*((uint32_t*)bitmap) = 0;
|
||||
else if (start == 0 && len == 64*bitmap_granularity)
|
||||
*((uint64_t*)bitmap) = 0;
|
||||
else
|
||||
{
|
||||
unsigned bit_start = start / bitmap_granularity;
|
||||
unsigned bit_end = ((start + len) + bitmap_granularity - 1) / bitmap_granularity;
|
||||
while (bit_start < bit_end)
|
||||
{
|
||||
if (!(bit_start & 7) && bit_end >= bit_start+8)
|
||||
{
|
||||
((uint8_t*)bitmap)[bit_start / 8] = 0;
|
||||
bit_start += 8;
|
||||
}
|
||||
else
|
||||
{
|
||||
((uint8_t*)bitmap)[bit_start / 8] &= (0xFF ^ (1 << (bit_start % 8)));
|
||||
bit_start++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool bitmap_check(void *bitmap, uint64_t start, uint64_t len, uint64_t bitmap_granularity)
|
||||
{
|
||||
bool r = false;
|
||||
if (start == 0 && len == 32*bitmap_granularity)
|
||||
r = !!*((uint32_t*)bitmap);
|
||||
else if (start == 0 && len == 64*bitmap_granularity)
|
||||
r = !!*((uint64_t*)bitmap);
|
||||
else
|
||||
{
|
||||
unsigned bit_start = start / bitmap_granularity;
|
||||
unsigned bit_end = ((start + len) + bitmap_granularity - 1) / bitmap_granularity;
|
||||
while (bit_start < bit_end)
|
||||
{
|
||||
if (!(bit_start & 7) && bit_end >= bit_start+8)
|
||||
{
|
||||
r = r || !!((uint8_t*)bitmap)[bit_start / 8];
|
||||
bit_start += 8;
|
||||
}
|
||||
else
|
||||
{
|
||||
r = r || (((uint8_t*)bitmap)[bit_start / 8] & (1 << (bit_start % 8)));
|
||||
bit_start++;
|
||||
}
|
||||
}
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
@@ -23,3 +23,5 @@ public:
|
||||
};
|
||||
|
||||
void bitmap_set(void *bitmap, uint64_t start, uint64_t len, uint64_t bitmap_granularity);
|
||||
void bitmap_clear(void *bitmap, uint64_t start, uint64_t len, uint64_t bitmap_granularity);
|
||||
bool bitmap_check(void *bitmap, uint64_t start, uint64_t len, uint64_t bitmap_granularity);
|
||||
|
@@ -77,6 +77,7 @@ Output:
|
||||
-EINVAL = invalid input parameters
|
||||
-ENOENT = requested object/version does not exist for reads
|
||||
-ENOSPC = no space left in the store for writes
|
||||
-EDOM = checksum error.
|
||||
- version = the version actually read or written
|
||||
|
||||
## BS_OP_DELETE
|
||||
|
@@ -40,10 +40,31 @@ void blockstore_disk_t::parse_config(std::map<std::string, std::string> & config
|
||||
data_block_size = parse_size(config["block_size"]);
|
||||
journal_device = config["journal_device"];
|
||||
journal_offset = parse_size(config["journal_offset"]);
|
||||
disk_alignment = strtoull(config["disk_alignment"].c_str(), NULL, 10);
|
||||
journal_block_size = strtoull(config["journal_block_size"].c_str(), NULL, 10);
|
||||
meta_block_size = strtoull(config["meta_block_size"].c_str(), NULL, 10);
|
||||
bitmap_granularity = strtoull(config["bitmap_granularity"].c_str(), NULL, 10);
|
||||
disk_alignment = parse_size(config["disk_alignment"]);
|
||||
journal_block_size = parse_size(config["journal_block_size"]);
|
||||
meta_block_size = parse_size(config["meta_block_size"]);
|
||||
bitmap_granularity = parse_size(config["bitmap_granularity"]);
|
||||
meta_format = stoull_full(config["meta_format"]);
|
||||
cached_read_data = config["cached_read_data"] == "true" || config["cached_read_data"] == "yes" || config["cached_read_data"] == "1";
|
||||
cached_read_meta = cached_read_data && (meta_device == data_device || meta_device == "") &&
|
||||
config.find("cached_read_meta") == config.end() ||
|
||||
config["cached_read_meta"] == "true" || config["cached_read_meta"] == "yes" || config["cached_read_meta"] == "1";
|
||||
cached_read_journal = cached_read_meta && (journal_device == meta_device || journal_device == "") &&
|
||||
config.find("cached_read_journal") == config.end() ||
|
||||
config["cached_read_journal"] == "true" || config["cached_read_journal"] == "yes" || config["cached_read_journal"] == "1";
|
||||
if (config["data_csum_type"] == "crc32c")
|
||||
{
|
||||
data_csum_type = BLOCKSTORE_CSUM_CRC32C;
|
||||
}
|
||||
else if (config["data_csum_type"] == "" || config["data_csum_type"] == "none")
|
||||
{
|
||||
data_csum_type = BLOCKSTORE_CSUM_NONE;
|
||||
}
|
||||
else
|
||||
{
|
||||
throw std::runtime_error("data_csum_type="+config["data_csum_type"]+" is unsupported, only \"crc32c\" and \"none\" are supported");
|
||||
}
|
||||
csum_block_size = parse_size(config["csum_block_size"]);
|
||||
// Validate
|
||||
if (!data_block_size)
|
||||
{
|
||||
@@ -91,7 +112,23 @@ void blockstore_disk_t::parse_config(std::map<std::string, std::string> & config
|
||||
}
|
||||
if (data_block_size % bitmap_granularity)
|
||||
{
|
||||
throw std::runtime_error("Block size must be a multiple of sparse write tracking granularity");
|
||||
throw std::runtime_error("Data block size must be a multiple of sparse write tracking granularity");
|
||||
}
|
||||
if (!data_csum_type)
|
||||
{
|
||||
csum_block_size = 0;
|
||||
}
|
||||
else if (!csum_block_size)
|
||||
{
|
||||
csum_block_size = bitmap_granularity;
|
||||
}
|
||||
if (csum_block_size && (csum_block_size % bitmap_granularity))
|
||||
{
|
||||
throw std::runtime_error("Checksum block size must be a multiple of sparse write tracking granularity");
|
||||
}
|
||||
if (csum_block_size && (data_block_size % csum_block_size))
|
||||
{
|
||||
throw std::runtime_error("Checksum block size must be a divisor of data block size");
|
||||
}
|
||||
if (meta_device == "")
|
||||
{
|
||||
@@ -110,7 +147,9 @@ void blockstore_disk_t::parse_config(std::map<std::string, std::string> & config
|
||||
throw std::runtime_error("journal_offset must be a multiple of journal_block_size = "+std::to_string(journal_block_size));
|
||||
}
|
||||
clean_entry_bitmap_size = data_block_size / bitmap_granularity / 8;
|
||||
clean_entry_size = sizeof(clean_disk_entry) + 2*clean_entry_bitmap_size;
|
||||
clean_dyn_size = clean_entry_bitmap_size*2 + (csum_block_size
|
||||
? data_block_size/csum_block_size*(data_csum_type & 0xFF) : 0);
|
||||
clean_entry_size = sizeof(clean_disk_entry) + clean_dyn_size + 4 /*entry_csum*/;
|
||||
}
|
||||
|
||||
void blockstore_disk_t::calc_lengths(bool skip_meta_check)
|
||||
@@ -160,6 +199,25 @@ void blockstore_disk_t::calc_lengths(bool skip_meta_check)
|
||||
// required metadata size
|
||||
block_count = data_len / data_block_size;
|
||||
meta_len = (1 + (block_count - 1 + meta_block_size / clean_entry_size) / (meta_block_size / clean_entry_size)) * meta_block_size;
|
||||
if (meta_format == BLOCKSTORE_META_FORMAT_V1 ||
|
||||
!meta_format && !skip_meta_check && meta_area_size < meta_len && !data_csum_type)
|
||||
{
|
||||
uint64_t clean_entry_v0_size = sizeof(clean_disk_entry) + 2*clean_entry_bitmap_size;
|
||||
uint64_t meta_v0_len = (1 + (block_count - 1 + meta_block_size / clean_entry_v0_size)
|
||||
/ (meta_block_size / clean_entry_v0_size)) * meta_block_size;
|
||||
if (meta_format == BLOCKSTORE_META_FORMAT_V1 || meta_area_size >= meta_v0_len)
|
||||
{
|
||||
// Old metadata fits.
|
||||
printf("Warning: Using old metadata format without checksums because the new format doesn't fit into provided area\n");
|
||||
clean_entry_size = clean_entry_v0_size;
|
||||
meta_len = meta_v0_len;
|
||||
meta_format = BLOCKSTORE_META_FORMAT_V1;
|
||||
}
|
||||
else
|
||||
meta_format = BLOCKSTORE_META_FORMAT_V2;
|
||||
}
|
||||
else
|
||||
meta_format = BLOCKSTORE_META_FORMAT_V2;
|
||||
if (!skip_meta_check && meta_area_size < meta_len)
|
||||
{
|
||||
throw std::runtime_error("Metadata area is too small, need at least "+std::to_string(meta_len)+" bytes");
|
||||
@@ -237,6 +295,18 @@ void blockstore_disk_t::open_data()
|
||||
{
|
||||
throw std::runtime_error(std::string("Failed to lock data device: ") + strerror(errno));
|
||||
}
|
||||
if (cached_read_data)
|
||||
{
|
||||
read_data_fd = open(data_device.c_str(), O_RDWR);
|
||||
if (read_data_fd == -1)
|
||||
{
|
||||
throw std::runtime_error("Failed to open data device "+data_device+": "+std::string(strerror(errno)));
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
read_data_fd = data_fd;
|
||||
}
|
||||
}
|
||||
|
||||
void blockstore_disk_t::open_meta()
|
||||
@@ -257,6 +327,18 @@ void blockstore_disk_t::open_meta()
|
||||
{
|
||||
throw std::runtime_error(std::string("Failed to lock metadata device: ") + strerror(errno));
|
||||
}
|
||||
if (cached_read_meta)
|
||||
{
|
||||
read_meta_fd = open(meta_device.c_str(), O_RDWR);
|
||||
if (read_meta_fd == -1)
|
||||
{
|
||||
throw std::runtime_error("Failed to open metadata device "+meta_device+": "+std::string(strerror(errno)));
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
read_meta_fd = meta_fd;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -275,6 +357,22 @@ void blockstore_disk_t::open_meta()
|
||||
") is not a multiple of data device sector size ("+std::to_string(meta_device_sect)+")"
|
||||
);
|
||||
}
|
||||
if (!cached_read_meta)
|
||||
{
|
||||
read_meta_fd = meta_fd;
|
||||
}
|
||||
else if (meta_device == data_device && cached_read_data)
|
||||
{
|
||||
read_meta_fd = read_data_fd;
|
||||
}
|
||||
else
|
||||
{
|
||||
read_meta_fd = open(meta_device.c_str(), O_RDWR);
|
||||
if (read_meta_fd == -1)
|
||||
{
|
||||
throw std::runtime_error("Failed to open metadata device "+meta_device+": "+std::string(strerror(errno)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void blockstore_disk_t::open_journal()
|
||||
@@ -309,6 +407,26 @@ void blockstore_disk_t::open_journal()
|
||||
") is not a multiple of journal device sector size ("+std::to_string(journal_device_sect)+")"
|
||||
);
|
||||
}
|
||||
if (!cached_read_journal)
|
||||
{
|
||||
read_journal_fd = journal_fd;
|
||||
}
|
||||
else if (journal_device == meta_device && cached_read_meta)
|
||||
{
|
||||
read_journal_fd = read_meta_fd;
|
||||
}
|
||||
else if (journal_device == data_device && cached_read_data)
|
||||
{
|
||||
read_journal_fd = read_data_fd;
|
||||
}
|
||||
else
|
||||
{
|
||||
read_journal_fd = open(journal_device.c_str(), O_RDWR);
|
||||
if (read_journal_fd == -1)
|
||||
{
|
||||
throw std::runtime_error("Failed to open journal device "+journal_device+": "+std::string(strerror(errno)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void blockstore_disk_t::close_all()
|
||||
@@ -319,5 +437,12 @@ void blockstore_disk_t::close_all()
|
||||
close(meta_fd);
|
||||
if (journal_fd >= 0 && journal_fd != meta_fd)
|
||||
close(journal_fd);
|
||||
if (read_data_fd >= 0 && read_data_fd != data_fd)
|
||||
close(read_data_fd);
|
||||
if (read_meta_fd >= 0 && read_meta_fd != meta_fd)
|
||||
close(read_meta_fd);
|
||||
if (read_journal_fd >= 0 && read_journal_fd != journal_fd)
|
||||
close(read_journal_fd);
|
||||
data_fd = meta_fd = journal_fd = -1;
|
||||
read_data_fd = read_meta_fd = read_journal_fd = -1;
|
||||
}
|
||||
|
@@ -8,6 +8,10 @@
|
||||
#include <string>
|
||||
#include <map>
|
||||
|
||||
#define BLOCKSTORE_CSUM_NONE 0
|
||||
// Lower byte of checksum type is its length
|
||||
#define BLOCKSTORE_CSUM_CRC32C 0x104
|
||||
|
||||
struct blockstore_disk_t
|
||||
{
|
||||
std::string data_device, meta_device, journal_device;
|
||||
@@ -21,17 +25,24 @@ struct blockstore_disk_t
|
||||
uint64_t meta_block_size = 4096;
|
||||
// Sparse write tracking granularity. 4 KB is a good choice. Must be a multiple of disk_alignment
|
||||
uint64_t bitmap_granularity = 4096;
|
||||
// Data checksum type, BLOCKSTORE_CSUM_NONE or BLOCKSTORE_CSUM_CRC32C
|
||||
uint32_t data_csum_type = BLOCKSTORE_CSUM_NONE;
|
||||
// Checksum block size, must be a multiple of bitmap_granularity
|
||||
uint32_t csum_block_size = 4096;
|
||||
// By default, Blockstore locks all opened devices exclusively. This option can be used to disable locking
|
||||
bool disable_flock = false;
|
||||
// Use linux page cache for reads. If enabled, separate buffered FDs will be opened for reading
|
||||
bool cached_read_data = false, cached_read_meta = false, cached_read_journal = false;
|
||||
|
||||
int meta_fd = -1, data_fd = -1, journal_fd = -1;
|
||||
uint64_t meta_offset, meta_device_sect, meta_device_size, meta_len;
|
||||
int read_meta_fd = -1, read_data_fd = -1, read_journal_fd = -1;
|
||||
uint64_t meta_offset, meta_device_sect, meta_device_size, meta_len, meta_format = 0;
|
||||
uint64_t data_offset, data_device_sect, data_device_size, data_len;
|
||||
uint64_t journal_offset, journal_device_sect, journal_device_size, journal_len;
|
||||
|
||||
uint32_t block_order;
|
||||
uint64_t block_count;
|
||||
uint32_t clean_entry_bitmap_size = 0, clean_entry_size = 0;
|
||||
uint32_t clean_entry_bitmap_size = 0, clean_entry_size = 0, clean_dyn_size = 0;
|
||||
|
||||
void parse_config(std::map<std::string, std::string> & config);
|
||||
void open_data();
|
||||
@@ -39,4 +50,13 @@ struct blockstore_disk_t
|
||||
void open_journal();
|
||||
void calc_lengths(bool skip_meta_check = false);
|
||||
void close_all();
|
||||
|
||||
inline uint64_t dirty_dyn_size(uint64_t offset, uint64_t len)
|
||||
{
|
||||
// Checksums may be partial if write is not aligned with csum_block_size
|
||||
return clean_entry_bitmap_size + (csum_block_size && len > 0
|
||||
? ((offset+len+csum_block_size-1)/csum_block_size - offset/csum_block_size)
|
||||
* (data_csum_type & 0xFF)
|
||||
: 0);
|
||||
}
|
||||
};
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -1,10 +1,22 @@
|
||||
// Copyright (c) Vitaliy Filippov, 2019+
|
||||
// License: VNPL-1.1 (see README.md for details)
|
||||
|
||||
#define COPY_BUF_JOURNAL 1
|
||||
#define COPY_BUF_DATA 2
|
||||
#define COPY_BUF_ZERO 4
|
||||
#define COPY_BUF_CSUM_FILL 8
|
||||
#define COPY_BUF_COALESCED 16
|
||||
#define COPY_BUF_META_BLOCK 32
|
||||
#define COPY_BUF_JOURNALED_BIG 64
|
||||
|
||||
struct copy_buffer_t
|
||||
{
|
||||
uint64_t offset, len;
|
||||
int copy_flags;
|
||||
uint64_t offset, len, disk_offset;
|
||||
uint64_t journal_sector; // only for reads: sector+1 if used and !journal.inmemory, otherwise 0
|
||||
void *buf;
|
||||
uint8_t *csum_buf;
|
||||
int *dyn_data;
|
||||
};
|
||||
|
||||
struct meta_sector_t
|
||||
@@ -37,7 +49,7 @@ class journal_flusher_co
|
||||
{
|
||||
blockstore_impl_t *bs;
|
||||
journal_flusher_t *flusher;
|
||||
int wait_state, wait_count;
|
||||
int wait_state, wait_count, wait_journal_count;
|
||||
struct io_uring_sqe *sqe;
|
||||
struct ring_data_t *data;
|
||||
|
||||
@@ -46,28 +58,39 @@ class journal_flusher_co
|
||||
obj_ver_id cur;
|
||||
std::map<obj_ver_id, dirty_entry>::iterator dirty_it, dirty_start, dirty_end;
|
||||
std::map<object_id, uint64_t>::iterator repeat_it;
|
||||
std::function<void(ring_data_t*)> simple_callback_r, simple_callback_w;
|
||||
std::function<void(ring_data_t*)> simple_callback_r, simple_callback_rj, simple_callback_w;
|
||||
|
||||
bool skip_copy, has_delete, has_writes;
|
||||
std::vector<copy_buffer_t> v;
|
||||
std::vector<copy_buffer_t>::iterator it;
|
||||
int i;
|
||||
bool fill_incomplete, cleared_incomplete;
|
||||
int read_to_fill_incomplete;
|
||||
int copy_count;
|
||||
uint64_t clean_loc, old_clean_loc;
|
||||
uint64_t clean_loc, clean_ver, old_clean_loc, old_clean_ver;
|
||||
flusher_meta_write_t meta_old, meta_new;
|
||||
bool clean_init_bitmap;
|
||||
uint64_t clean_bitmap_offset, clean_bitmap_len;
|
||||
void *new_clean_bitmap;
|
||||
uint8_t *clean_init_dyn_ptr;
|
||||
uint8_t *new_clean_bitmap;
|
||||
|
||||
uint64_t new_trim_pos;
|
||||
|
||||
// local: scan_dirty()
|
||||
uint64_t offset, end_offset, submit_offset, submit_len;
|
||||
|
||||
friend class journal_flusher_t;
|
||||
bool scan_dirty(int wait_base);
|
||||
void scan_dirty();
|
||||
bool read_dirty(int wait_base);
|
||||
bool modify_meta_do_reads(int wait_base);
|
||||
bool wait_meta_reads(int wait_base);
|
||||
bool modify_meta_read(uint64_t meta_loc, flusher_meta_write_t &wr, int wait_base);
|
||||
bool clear_incomplete_csum_block_bits(int wait_base);
|
||||
void calc_block_checksums(uint32_t *new_data_csums, bool skip_overwrites);
|
||||
void update_metadata_entry();
|
||||
bool write_meta_block(flusher_meta_write_t & meta_block, int wait_base);
|
||||
void update_clean_db();
|
||||
void free_data_blocks();
|
||||
bool fsync_batch(bool fsync_meta, int wait_base);
|
||||
bool trim_journal(int wait_base);
|
||||
void free_buffers();
|
||||
public:
|
||||
journal_flusher_co();
|
||||
bool loop();
|
||||
@@ -95,14 +118,16 @@ class journal_flusher_t
|
||||
|
||||
std::map<uint64_t, meta_sector_t> meta_sectors;
|
||||
std::deque<object_id> flush_queue;
|
||||
std::map<object_id, uint64_t> flush_versions;
|
||||
std::map<object_id, uint64_t> flush_versions; // FIXME: consider unordered_map?
|
||||
|
||||
bool try_find_older(std::map<obj_ver_id, dirty_entry>::iterator & dirty_end, obj_ver_id & cur);
|
||||
bool try_find_other(std::map<obj_ver_id, dirty_entry>::iterator & dirty_end, obj_ver_id & cur);
|
||||
|
||||
public:
|
||||
journal_flusher_t(blockstore_impl_t *bs);
|
||||
~journal_flusher_t();
|
||||
void loop();
|
||||
bool is_trim_wanted() { return trim_wanted; }
|
||||
bool is_active();
|
||||
void mark_trim_possible();
|
||||
void request_trim();
|
||||
@@ -111,4 +136,5 @@ public:
|
||||
void unshift_flush(obj_ver_id oid, bool force);
|
||||
void remove_flush(object_id oid);
|
||||
void dump_diagnostics();
|
||||
bool is_mutated(uint64_t clean_loc);
|
||||
};
|
||||
|
@@ -13,6 +13,7 @@ blockstore_impl_t::blockstore_impl_t(blockstore_config_t & config, ring_loop_t *
|
||||
initialized = 0;
|
||||
parse_config(config, true);
|
||||
zero_object = (uint8_t*)memalign_or_die(MEM_ALIGNMENT, dsk.data_block_size);
|
||||
alloc_dyn_data = dsk.clean_dyn_size > sizeof(void*) || dsk.csum_block_size > 0;
|
||||
try
|
||||
{
|
||||
dsk.open_data();
|
||||
@@ -38,8 +39,8 @@ blockstore_impl_t::~blockstore_impl_t()
|
||||
dsk.close_all();
|
||||
if (metadata_buffer)
|
||||
free(metadata_buffer);
|
||||
if (clean_bitmap)
|
||||
free(clean_bitmap);
|
||||
if (clean_bitmaps)
|
||||
free(clean_bitmaps);
|
||||
}
|
||||
|
||||
bool blockstore_impl_t::is_started()
|
||||
|
@@ -93,11 +93,10 @@
|
||||
|
||||
// "VITAstor"
|
||||
#define BLOCKSTORE_META_MAGIC_V1 0x726F747341544956l
|
||||
#define BLOCKSTORE_META_VERSION_V1 1
|
||||
#define BLOCKSTORE_META_FORMAT_V1 1
|
||||
#define BLOCKSTORE_META_FORMAT_V2 2
|
||||
|
||||
// metadata header (superblock)
|
||||
// FIXME: After adding the OSD superblock, add a key to metadata
|
||||
// and journal headers to check if they belong to the same OSD
|
||||
struct __attribute__((__packed__)) blockstore_meta_header_v1_t
|
||||
{
|
||||
uint64_t zero;
|
||||
@@ -108,14 +107,29 @@ struct __attribute__((__packed__)) blockstore_meta_header_v1_t
|
||||
uint32_t bitmap_granularity;
|
||||
};
|
||||
|
||||
struct __attribute__((__packed__)) blockstore_meta_header_v2_t
|
||||
{
|
||||
uint64_t zero;
|
||||
uint64_t magic;
|
||||
uint64_t version;
|
||||
uint32_t meta_block_size;
|
||||
uint32_t data_block_size;
|
||||
uint32_t bitmap_granularity;
|
||||
uint32_t data_csum_type;
|
||||
uint32_t csum_block_size;
|
||||
uint32_t header_csum;
|
||||
};
|
||||
|
||||
// 32 bytes = 24 bytes + block bitmap (4 bytes by default) + external attributes (also bitmap, 4 bytes by default)
|
||||
// per "clean" entry on disk with fixed metadata tables
|
||||
// FIXME: maybe add crc32's to metadata
|
||||
struct __attribute__((__packed__)) clean_disk_entry
|
||||
{
|
||||
object_id oid;
|
||||
uint64_t version;
|
||||
uint8_t bitmap[];
|
||||
// Two more fields come after bitmap in metadata version 2:
|
||||
// uint32_t data_csum[];
|
||||
// uint32_t entry_csum;
|
||||
};
|
||||
|
||||
// 32 = 16 + 16 bytes per "clean" entry in memory (object_id => clean_entry)
|
||||
@@ -125,7 +139,7 @@ struct __attribute__((__packed__)) clean_entry
|
||||
uint64_t location;
|
||||
};
|
||||
|
||||
// 64 = 24 + 40 bytes per dirty entry in memory (obj_ver_id => dirty_entry)
|
||||
// 64 = 24 + 40 bytes per dirty entry in memory (obj_ver_id => dirty_entry). Plus checksums
|
||||
struct __attribute__((__packed__)) dirty_entry
|
||||
{
|
||||
uint32_t state;
|
||||
@@ -134,7 +148,7 @@ struct __attribute__((__packed__)) dirty_entry
|
||||
uint32_t offset; // data offset within object (stripe)
|
||||
uint32_t len; // data length
|
||||
uint64_t journal_sector; // journal sector used for this entry
|
||||
void* bitmap; // either external bitmap itself when it fits, or a pointer to it when it doesn't
|
||||
void* dyn_data; // dynamic data: external bitmap and data block checksums. may be a pointer to the in-memory journal
|
||||
};
|
||||
|
||||
// - Sync must be submitted after previous writes/deletes (not before!)
|
||||
@@ -163,12 +177,23 @@ struct __attribute__((__packed__)) dirty_entry
|
||||
// Suspend operation until there is some free space on the data device
|
||||
#define WAIT_FREE 5
|
||||
|
||||
struct fulfill_read_t
|
||||
struct used_clean_obj_t
|
||||
{
|
||||
uint64_t offset, len;
|
||||
uint64_t journal_sector; // sector+1 if used and !journal.inmemory, otherwise 0
|
||||
int refs;
|
||||
bool was_freed; // was freed by a parallel flush?
|
||||
bool was_changed; // was changed by a parallel flush?
|
||||
};
|
||||
|
||||
// https://github.com/algorithm-ninja/cpp-btree
|
||||
// https://github.com/greg7mdp/sparsepp/ was used previously, but it was TERRIBLY slow after resizing
|
||||
// with sparsepp, random reads dropped to ~700 iops very fast with just as much as ~32k objects in the DB
|
||||
typedef btree::btree_map<object_id, clean_entry> blockstore_clean_db_t;
|
||||
typedef std::map<obj_ver_id, dirty_entry> blockstore_dirty_db_t;
|
||||
|
||||
#include "blockstore_init.h"
|
||||
|
||||
#include "blockstore_flush.h"
|
||||
|
||||
#define PRIV(op) ((blockstore_op_private_t*)(op)->private_data)
|
||||
#define FINISH_OP(op) PRIV(op)->~blockstore_op_private_t(); std::function<void (blockstore_op_t*)>(op->callback)(op)
|
||||
|
||||
@@ -181,7 +206,8 @@ struct blockstore_op_private_t
|
||||
int op_state;
|
||||
|
||||
// Read
|
||||
std::vector<fulfill_read_t> read_vec;
|
||||
uint64_t clean_block_used;
|
||||
std::vector<copy_buffer_t> read_vec;
|
||||
|
||||
// Sync, write
|
||||
int min_flushed_journal_sector, max_flushed_journal_sector;
|
||||
@@ -197,16 +223,6 @@ struct blockstore_op_private_t
|
||||
int sync_small_checked, sync_big_checked;
|
||||
};
|
||||
|
||||
// https://github.com/algorithm-ninja/cpp-btree
|
||||
// https://github.com/greg7mdp/sparsepp/ was used previously, but it was TERRIBLY slow after resizing
|
||||
// with sparsepp, random reads dropped to ~700 iops very fast with just as much as ~32k objects in the DB
|
||||
typedef btree::btree_map<object_id, clean_entry> blockstore_clean_db_t;
|
||||
typedef std::map<obj_ver_id, dirty_entry> blockstore_dirty_db_t;
|
||||
|
||||
#include "blockstore_init.h"
|
||||
|
||||
#include "blockstore_flush.h"
|
||||
|
||||
typedef uint32_t pool_id_t;
|
||||
typedef uint64_t pool_pg_id_t;
|
||||
|
||||
@@ -253,7 +269,7 @@ class blockstore_impl_t
|
||||
|
||||
std::map<pool_id_t, pool_shard_settings_t> clean_db_settings;
|
||||
std::map<pool_pg_id_t, blockstore_clean_db_t> clean_db_shards;
|
||||
uint8_t *clean_bitmap = NULL;
|
||||
uint8_t *clean_bitmaps = NULL;
|
||||
blockstore_dirty_db_t dirty_db;
|
||||
std::vector<blockstore_op_t*> submit_queue;
|
||||
std::vector<obj_ver_id> unsynced_big_writes, unsynced_small_writes;
|
||||
@@ -267,6 +283,10 @@ class blockstore_impl_t
|
||||
journal_flusher_t *flusher;
|
||||
int big_to_flush = 0;
|
||||
int write_iodepth = 0;
|
||||
bool alloc_dyn_data = false;
|
||||
|
||||
// clean data blocks referenced by read operations
|
||||
std::map<uint64_t, used_clean_obj_t> used_clean_objects;
|
||||
|
||||
bool live = false, queue_stall = false;
|
||||
ring_loop_t *ringloop;
|
||||
@@ -310,8 +330,30 @@ class blockstore_impl_t
|
||||
|
||||
// Read
|
||||
int dequeue_read(blockstore_op_t *read_op);
|
||||
int fulfill_read(blockstore_op_t *read_op, uint64_t &fulfilled, uint32_t item_start, uint32_t item_end,
|
||||
uint32_t item_state, uint64_t item_version, uint64_t item_location, uint64_t journal_sector);
|
||||
void find_holes(std::vector<copy_buffer_t> & read_vec, uint32_t item_start, uint32_t item_end,
|
||||
std::function<int(int, bool, uint32_t, uint32_t)> callback);
|
||||
int fulfill_read(blockstore_op_t *read_op,
|
||||
uint64_t &fulfilled, uint32_t item_start, uint32_t item_end,
|
||||
uint32_t item_state, uint64_t item_version, uint64_t item_location,
|
||||
uint64_t journal_sector, uint8_t *csum, int *dyn_data);
|
||||
bool fulfill_clean_read(blockstore_op_t *read_op, uint64_t & fulfilled,
|
||||
uint8_t *clean_entry_bitmap, int *dyn_data,
|
||||
uint32_t item_start, uint32_t item_end, uint64_t clean_loc, uint64_t clean_ver);
|
||||
int fill_partial_checksum_blocks(std::vector<copy_buffer_t> & rv, uint64_t & fulfilled,
|
||||
uint8_t *clean_entry_bitmap, int *dyn_data, bool from_journal, uint8_t *read_buf, uint64_t read_offset, uint64_t read_end);
|
||||
int pad_journal_read(std::vector<copy_buffer_t> & rv, copy_buffer_t & cp,
|
||||
uint64_t dirty_offset, uint64_t dirty_end, uint64_t dirty_loc, uint8_t *csum_ptr, int *dyn_data,
|
||||
uint64_t offset, uint64_t submit_len, uint64_t & blk_begin, uint64_t & blk_end, uint8_t* & blk_buf);
|
||||
bool read_range_fulfilled(std::vector<copy_buffer_t> & rv, uint64_t & fulfilled, uint8_t *read_buf,
|
||||
uint8_t *clean_entry_bitmap, uint32_t item_start, uint32_t item_end);
|
||||
bool read_checksum_block(blockstore_op_t *op, int rv_pos, uint64_t &fulfilled, uint64_t clean_loc);
|
||||
uint8_t* read_clean_meta_block(blockstore_op_t *read_op, uint64_t clean_loc, int rv_pos);
|
||||
bool verify_padded_checksums(uint8_t *clean_entry_bitmap, uint8_t *csum_buf, uint32_t offset,
|
||||
iovec *iov, int n_iov, std::function<void(uint32_t, uint32_t, uint32_t)> bad_block_cb);
|
||||
bool verify_journal_checksums(uint8_t *csums, uint32_t offset,
|
||||
iovec *iov, int n_iov, std::function<void(uint32_t, uint32_t, uint32_t)> bad_block_cb);
|
||||
bool verify_clean_padded_checksums(blockstore_op_t *op, uint64_t clean_loc, uint8_t *dyn_data, bool from_journal,
|
||||
iovec *iov, int n_iov, std::function<void(uint32_t, uint32_t, uint32_t)> bad_block_cb);
|
||||
int fulfill_read_push(blockstore_op_t *op, void *buf, uint64_t offset, uint64_t len,
|
||||
uint32_t item_state, uint64_t item_version);
|
||||
void handle_read_event(ring_data_t *data, blockstore_op_t *op);
|
||||
@@ -342,6 +384,7 @@ class blockstore_impl_t
|
||||
int continue_rollback(blockstore_op_t *op);
|
||||
void mark_rolled_back(const obj_ver_id & ov);
|
||||
void erase_dirty(blockstore_dirty_db_t::iterator dirty_start, blockstore_dirty_db_t::iterator dirty_end, uint64_t clean_loc);
|
||||
void free_dirty_dyn_data(dirty_entry & e);
|
||||
|
||||
// List
|
||||
void process_list(blockstore_op_t *op);
|
||||
|
@@ -65,7 +65,7 @@ int blockstore_init_meta::loop()
|
||||
GET_SQE();
|
||||
data->iov = { metadata_buffer, bs->dsk.meta_block_size };
|
||||
data->callback = [this](ring_data_t *data) { handle_event(data, -1); };
|
||||
my_uring_prep_readv(sqe, bs->dsk.meta_fd, &data->iov, 1, bs->dsk.meta_offset);
|
||||
my_uring_prep_readv(sqe, bs->dsk.read_meta_fd, &data->iov, 1, bs->dsk.meta_offset);
|
||||
bs->ringloop->submit();
|
||||
submitted++;
|
||||
resume_1:
|
||||
@@ -77,13 +77,20 @@ resume_1:
|
||||
if (iszero((uint64_t*)metadata_buffer, bs->dsk.meta_block_size / sizeof(uint64_t)))
|
||||
{
|
||||
{
|
||||
blockstore_meta_header_v1_t *hdr = (blockstore_meta_header_v1_t *)metadata_buffer;
|
||||
blockstore_meta_header_v2_t *hdr = (blockstore_meta_header_v2_t *)metadata_buffer;
|
||||
hdr->zero = 0;
|
||||
hdr->magic = BLOCKSTORE_META_MAGIC_V1;
|
||||
hdr->version = BLOCKSTORE_META_VERSION_V1;
|
||||
hdr->version = bs->dsk.meta_format;
|
||||
hdr->meta_block_size = bs->dsk.meta_block_size;
|
||||
hdr->data_block_size = bs->dsk.data_block_size;
|
||||
hdr->bitmap_granularity = bs->dsk.bitmap_granularity;
|
||||
if (bs->dsk.meta_format >= BLOCKSTORE_META_FORMAT_V2)
|
||||
{
|
||||
hdr->data_csum_type = bs->dsk.data_csum_type;
|
||||
hdr->csum_block_size = bs->dsk.csum_block_size;
|
||||
hdr->header_csum = 0;
|
||||
hdr->header_csum = crc32c(0, hdr, sizeof(*hdr));
|
||||
}
|
||||
}
|
||||
if (bs->readonly)
|
||||
{
|
||||
@@ -109,28 +116,62 @@ resume_1:
|
||||
}
|
||||
else
|
||||
{
|
||||
blockstore_meta_header_v1_t *hdr = (blockstore_meta_header_v1_t *)metadata_buffer;
|
||||
if (hdr->zero != 0 ||
|
||||
hdr->magic != BLOCKSTORE_META_MAGIC_V1 ||
|
||||
hdr->version != BLOCKSTORE_META_VERSION_V1)
|
||||
blockstore_meta_header_v2_t *hdr = (blockstore_meta_header_v2_t *)metadata_buffer;
|
||||
if (hdr->zero != 0 || hdr->magic != BLOCKSTORE_META_MAGIC_V1 || hdr->version < BLOCKSTORE_META_FORMAT_V1)
|
||||
{
|
||||
printf(
|
||||
"Metadata is corrupt or old version.\n"
|
||||
" If this is a new OSD please zero out the metadata area before starting it.\n"
|
||||
" If you need to upgrade from 0.5.x please request it via the issue tracker.\n"
|
||||
"Metadata is corrupt or too old (pre-0.6.x).\n"
|
||||
" If this is a new OSD, please zero out the metadata area before starting it.\n"
|
||||
" If you need to upgrade from 0.5.x, convert metadata with vitastor-disk.\n"
|
||||
);
|
||||
exit(1);
|
||||
}
|
||||
if (hdr->version == BLOCKSTORE_META_FORMAT_V2)
|
||||
{
|
||||
uint32_t csum = hdr->header_csum;
|
||||
hdr->header_csum = 0;
|
||||
if (crc32c(0, hdr, sizeof(*hdr)) != csum)
|
||||
{
|
||||
printf("Metadata header is corrupt (checksum mismatch).\n");
|
||||
exit(1);
|
||||
}
|
||||
hdr->header_csum = csum;
|
||||
bs->dsk.meta_format = BLOCKSTORE_META_FORMAT_V2;
|
||||
}
|
||||
else if (hdr->version == BLOCKSTORE_META_FORMAT_V1)
|
||||
{
|
||||
hdr->data_csum_type = 0;
|
||||
hdr->csum_block_size = 0;
|
||||
hdr->header_csum = 0;
|
||||
// Enable compatibility mode - entries without checksums
|
||||
bs->dsk.clean_entry_size = sizeof(clean_disk_entry) + bs->dsk.clean_entry_bitmap_size*2;
|
||||
bs->dsk.meta_len = (1 + (bs->dsk.block_count - 1 + bs->dsk.meta_block_size / bs->dsk.clean_entry_size)
|
||||
/ (bs->dsk.meta_block_size / bs->dsk.clean_entry_size)) * bs->dsk.meta_block_size;
|
||||
bs->dsk.meta_format = BLOCKSTORE_META_FORMAT_V1;
|
||||
printf("Warning: Starting with metadata in the old format without checksums, as stored on disk\n");
|
||||
}
|
||||
else if (hdr->version > BLOCKSTORE_META_FORMAT_V2)
|
||||
{
|
||||
printf(
|
||||
"Metadata format is too new for me (stored version is %lu, max supported %u).\n",
|
||||
hdr->version, BLOCKSTORE_META_FORMAT_V2
|
||||
);
|
||||
exit(1);
|
||||
}
|
||||
if (hdr->meta_block_size != bs->dsk.meta_block_size ||
|
||||
hdr->data_block_size != bs->dsk.data_block_size ||
|
||||
hdr->bitmap_granularity != bs->dsk.bitmap_granularity)
|
||||
hdr->bitmap_granularity != bs->dsk.bitmap_granularity ||
|
||||
hdr->data_csum_type != bs->dsk.data_csum_type ||
|
||||
hdr->csum_block_size != bs->dsk.csum_block_size)
|
||||
{
|
||||
printf(
|
||||
"Configuration stored in metadata superblock"
|
||||
" (meta_block_size=%u, data_block_size=%u, bitmap_granularity=%u)"
|
||||
" differs from OSD configuration (%lu/%u/%lu).\n",
|
||||
" (meta_block_size=%u, data_block_size=%u, bitmap_granularity=%u, data_csum_type=%u, csum_block_size=%u)"
|
||||
" differs from OSD configuration (%lu/%u/%lu, %u/%u).\n",
|
||||
hdr->meta_block_size, hdr->data_block_size, hdr->bitmap_granularity,
|
||||
bs->dsk.meta_block_size, bs->dsk.data_block_size, bs->dsk.bitmap_granularity
|
||||
hdr->data_csum_type, hdr->csum_block_size,
|
||||
bs->dsk.meta_block_size, bs->dsk.data_block_size, bs->dsk.bitmap_granularity,
|
||||
bs->dsk.data_csum_type, bs->dsk.csum_block_size
|
||||
);
|
||||
exit(1);
|
||||
}
|
||||
@@ -161,7 +202,7 @@ resume_2:
|
||||
data->iov = { bufs[i].buf, bufs[i].size };
|
||||
data->callback = [this, i](ring_data_t *data) { handle_event(data, i); };
|
||||
if (!zero_on_init)
|
||||
my_uring_prep_readv(sqe, bs->dsk.meta_fd, &data->iov, 1, bs->dsk.meta_offset + bufs[i].offset);
|
||||
my_uring_prep_readv(sqe, bs->dsk.read_meta_fd, &data->iov, 1, bs->dsk.meta_offset + bufs[i].offset);
|
||||
else
|
||||
{
|
||||
// Fill metadata with zeroes
|
||||
@@ -218,7 +259,7 @@ resume_2:
|
||||
GET_SQE();
|
||||
data->iov = { metadata_buffer, bs->dsk.meta_block_size };
|
||||
data->callback = [this](ring_data_t *data) { handle_event(data, -1); };
|
||||
my_uring_prep_readv(sqe, bs->dsk.meta_fd, &data->iov, 1, bs->dsk.meta_offset + (1+next_offset)*bs->dsk.meta_block_size);
|
||||
my_uring_prep_readv(sqe, bs->dsk.read_meta_fd, &data->iov, 1, bs->dsk.meta_offset + (1+next_offset)*bs->dsk.meta_block_size);
|
||||
submitted++;
|
||||
resume_5:
|
||||
if (submitted > 0)
|
||||
@@ -279,12 +320,22 @@ bool blockstore_init_meta::handle_meta_block(uint8_t *buf, uint64_t entries_per_
|
||||
for (uint64_t i = 0; i < max_i; i++)
|
||||
{
|
||||
clean_disk_entry *entry = (clean_disk_entry*)(buf + i*bs->dsk.clean_entry_size);
|
||||
if (!bs->inmemory_meta && bs->dsk.clean_entry_bitmap_size)
|
||||
{
|
||||
memcpy(bs->clean_bitmap + (done_cnt+i)*2*bs->dsk.clean_entry_bitmap_size, &entry->bitmap, 2*bs->dsk.clean_entry_bitmap_size);
|
||||
}
|
||||
if (entry->oid.inode > 0)
|
||||
{
|
||||
if (bs->dsk.meta_format >= BLOCKSTORE_META_FORMAT_V2)
|
||||
{
|
||||
// Check entry crc32
|
||||
uint32_t *entry_csum = (uint32_t*)((uint8_t*)entry + bs->dsk.clean_entry_size - 4);
|
||||
if (*entry_csum != crc32c(0, entry, bs->dsk.clean_entry_size - 4))
|
||||
{
|
||||
printf("Metadata entry %lu is corrupt (checksum mismatch), skipping\n", done_cnt+i);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (!bs->inmemory_meta && bs->dsk.clean_entry_bitmap_size)
|
||||
{
|
||||
memcpy(bs->clean_bitmaps + (done_cnt+i) * 2 * bs->dsk.clean_entry_bitmap_size, &entry->bitmap, 2 * bs->dsk.clean_entry_bitmap_size);
|
||||
}
|
||||
auto & clean_db = bs->clean_db_shard(entry->oid);
|
||||
auto clean_it = clean_db.find(entry->oid);
|
||||
if (clean_it == clean_db.end() || clean_it->second.version < entry->version)
|
||||
@@ -416,7 +467,7 @@ int blockstore_init_journal::loop()
|
||||
data = ((ring_data_t*)sqe->user_data);
|
||||
data->iov = { submitted_buf, bs->journal.block_size };
|
||||
data->callback = simple_callback;
|
||||
my_uring_prep_readv(sqe, bs->dsk.journal_fd, &data->iov, 1, bs->journal.offset);
|
||||
my_uring_prep_readv(sqe, bs->dsk.read_journal_fd, &data->iov, 1, bs->journal.offset);
|
||||
bs->ringloop->submit();
|
||||
wait_count = 1;
|
||||
resume_1:
|
||||
@@ -440,7 +491,9 @@ resume_1:
|
||||
.size = sizeof(journal_entry_start),
|
||||
.reserved = 0,
|
||||
.journal_start = bs->journal.block_size,
|
||||
.version = JOURNAL_VERSION,
|
||||
.version = JOURNAL_VERSION_V2,
|
||||
.data_csum_type = bs->dsk.data_csum_type,
|
||||
.csum_block_size = bs->dsk.csum_block_size,
|
||||
};
|
||||
((journal_entry_start*)submitted_buf)->crc32 = je_crc32((journal_entry*)submitted_buf);
|
||||
if (bs->readonly)
|
||||
@@ -492,18 +545,36 @@ resume_1:
|
||||
if (je_start->magic != JOURNAL_MAGIC ||
|
||||
je_start->type != JE_START ||
|
||||
je_crc32((journal_entry*)je_start) != je_start->crc32 ||
|
||||
je_start->size != sizeof(journal_entry_start) && je_start->size != JE_START_LEGACY_SIZE)
|
||||
je_start->size != JE_START_V0_SIZE && je_start->size != JE_START_V1_SIZE && je_start->size != JE_START_V2_SIZE)
|
||||
{
|
||||
// Entry is corrupt
|
||||
fprintf(stderr, "First entry of the journal is corrupt\n");
|
||||
fprintf(stderr, "First entry of the journal is corrupt or unsupported\n");
|
||||
exit(1);
|
||||
}
|
||||
if (je_start->size == JE_START_LEGACY_SIZE || je_start->version != JOURNAL_VERSION)
|
||||
if (je_start->size == JE_START_V0_SIZE ||
|
||||
(je_start->version != JOURNAL_VERSION_V1 || je_start->size != JE_START_V1_SIZE) &&
|
||||
(je_start->version != JOURNAL_VERSION_V2 || je_start->size != JE_START_V2_SIZE))
|
||||
{
|
||||
fprintf(
|
||||
stderr, "The code only supports journal version %d, but it is %lu on disk."
|
||||
" Please use the previous version to flush the journal before upgrading OSD\n",
|
||||
JOURNAL_VERSION, je_start->size == JE_START_LEGACY_SIZE ? 0 : je_start->version
|
||||
stderr, "The code only supports journal versions 2 and 1, but it is %lu on disk."
|
||||
" Please use vitastor-disk to rewrite the journal\n",
|
||||
je_start->size == JE_START_V0_SIZE ? 0 : je_start->version
|
||||
);
|
||||
exit(1);
|
||||
}
|
||||
if (je_start->version == JOURNAL_VERSION_V1)
|
||||
{
|
||||
je_start->data_csum_type = 0;
|
||||
je_start->csum_block_size = 0;
|
||||
}
|
||||
if (je_start->data_csum_type != bs->dsk.data_csum_type ||
|
||||
je_start->csum_block_size != bs->dsk.csum_block_size)
|
||||
{
|
||||
printf(
|
||||
"Configuration stored in journal superblock (data_csum_type=%u, csum_block_size=%u)"
|
||||
" differs from OSD configuration (%u/%u).\n",
|
||||
je_start->data_csum_type, je_start->csum_block_size,
|
||||
bs->dsk.data_csum_type, bs->dsk.csum_block_size
|
||||
);
|
||||
exit(1);
|
||||
}
|
||||
@@ -536,7 +607,7 @@ resume_1:
|
||||
end - journal_pos < JOURNAL_BUFFER_SIZE ? end - journal_pos : JOURNAL_BUFFER_SIZE,
|
||||
};
|
||||
data->callback = [this](ring_data_t *data1) { handle_event(data1); };
|
||||
my_uring_prep_readv(sqe, bs->dsk.journal_fd, &data->iov, 1, bs->journal.offset + journal_pos);
|
||||
my_uring_prep_readv(sqe, bs->dsk.read_journal_fd, &data->iov, 1, bs->journal.offset + journal_pos);
|
||||
bs->ringloop->submit();
|
||||
}
|
||||
while (done.size() > 0)
|
||||
@@ -705,11 +776,14 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t done_pos, u
|
||||
snprintf(err, 1024, "BUG: calculated journal data offset (%08lx) != stored journal data offset (%08lx)", location, je->small_write.data_offset);
|
||||
throw std::runtime_error(err);
|
||||
}
|
||||
uint32_t data_crc32 = 0;
|
||||
small_write_data.clear();
|
||||
if (location >= done_pos && location+je->small_write.len <= done_pos+len)
|
||||
{
|
||||
// data is within this buffer
|
||||
data_crc32 = crc32c(0, (uint8_t*)buf + location - done_pos, je->small_write.len);
|
||||
small_write_data.push_back((iovec){
|
||||
.iov_base = (uint8_t*)buf + location - done_pos,
|
||||
.iov_len = je->small_write.len,
|
||||
});
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -724,7 +798,10 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t done_pos, u
|
||||
? location+je->small_write.len : done[i].pos+done[i].len);
|
||||
uint64_t part_begin = (location < done[i].pos ? done[i].pos : location);
|
||||
covered += part_end - part_begin;
|
||||
data_crc32 = crc32c(data_crc32, (uint8_t*)done[i].buf + part_begin - done[i].pos, part_end - part_begin);
|
||||
small_write_data.push_back((iovec){
|
||||
.iov_base = (uint8_t*)done[i].buf + part_begin - done[i].pos,
|
||||
.iov_len = part_end - part_begin,
|
||||
});
|
||||
}
|
||||
}
|
||||
if (covered < je->small_write.len)
|
||||
@@ -734,12 +811,102 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t done_pos, u
|
||||
return 2;
|
||||
}
|
||||
}
|
||||
if (data_crc32 != je->small_write.crc32_data)
|
||||
bool data_csum_valid = true;
|
||||
if (!bs->dsk.csum_block_size)
|
||||
{
|
||||
uint32_t data_crc32 = 0;
|
||||
for (auto & sd: small_write_data)
|
||||
{
|
||||
data_crc32 = crc32c(data_crc32, sd.iov_base, sd.iov_len);
|
||||
}
|
||||
data_csum_valid = data_crc32 == je->small_write.crc32_data;
|
||||
if (!data_csum_valid)
|
||||
{
|
||||
printf(
|
||||
"Journal entry data is corrupt for small_write%s oid=%lx:%lx ver=%lu offset=%u len=%u - data crc32 %x != %x\n",
|
||||
je->type == JE_SMALL_WRITE_INSTANT ? "_instant" : "",
|
||||
je->small_write.oid.inode, je->small_write.oid.stripe, je->small_write.version,
|
||||
je->small_write.offset, je->small_write.len,
|
||||
data_crc32, je->small_write.crc32_data
|
||||
);
|
||||
}
|
||||
}
|
||||
else if (je->small_write.len > 0)
|
||||
{
|
||||
// FIXME: deduplicate with disk_tool_journal.cpp
|
||||
// like in enqueue_write()
|
||||
uint32_t start = je->small_write.offset / bs->dsk.csum_block_size;
|
||||
uint32_t end = (je->small_write.offset+je->small_write.len-1) / bs->dsk.csum_block_size;
|
||||
uint32_t data_csum_size = (end-start+1) * (bs->dsk.data_csum_type & 0xFF);
|
||||
uint32_t required_size = sizeof(journal_entry_small_write) + bs->dsk.clean_entry_bitmap_size + data_csum_size;
|
||||
if (je->size != required_size)
|
||||
{
|
||||
printf(
|
||||
"Journal entry data has invalid size for small_write%s oid=%lx:%lx ver=%lu offset=%u len=%u - should be %u bytes but is %u bytes\n",
|
||||
je->type == JE_SMALL_WRITE_INSTANT ? "_instant" : "",
|
||||
je->small_write.oid.inode, je->small_write.oid.stripe, je->small_write.version,
|
||||
je->small_write.offset, je->small_write.len,
|
||||
required_size, je->size
|
||||
);
|
||||
data_csum_valid = false;
|
||||
}
|
||||
else
|
||||
{
|
||||
int sd_num = 0;
|
||||
size_t sd_pos = 0;
|
||||
uint32_t *block_csums = (uint32_t*)((uint8_t*)je + sizeof(journal_entry_small_write) + bs->dsk.clean_entry_bitmap_size);
|
||||
for (uint32_t pos = start; pos <= end; pos++, block_csums++)
|
||||
{
|
||||
size_t block_left = (pos == start
|
||||
? (start == end
|
||||
? je->small_write.len
|
||||
: bs->dsk.csum_block_size - je->small_write.offset%bs->dsk.csum_block_size)
|
||||
: (pos < end
|
||||
? bs->dsk.csum_block_size
|
||||
: (je->small_write.offset + je->small_write.len)%bs->dsk.csum_block_size));
|
||||
if (pos > start && pos == end && block_left == 0)
|
||||
{
|
||||
// full last block
|
||||
block_left = bs->dsk.csum_block_size;
|
||||
}
|
||||
uint32_t block_crc32 = 0;
|
||||
while (block_left > 0)
|
||||
{
|
||||
assert(sd_num < small_write_data.size());
|
||||
if (small_write_data[sd_num].iov_len >= sd_pos+block_left)
|
||||
{
|
||||
block_crc32 = crc32c(block_crc32, (uint8_t*)small_write_data[sd_num].iov_base+sd_pos, block_left);
|
||||
sd_pos += block_left;
|
||||
break;
|
||||
}
|
||||
else
|
||||
{
|
||||
block_crc32 = crc32c(block_crc32, (uint8_t*)small_write_data[sd_num].iov_base+sd_pos, small_write_data[sd_num].iov_len-sd_pos);
|
||||
block_left -= (small_write_data[sd_num].iov_len-sd_pos);
|
||||
sd_pos = 0;
|
||||
sd_num++;
|
||||
}
|
||||
}
|
||||
if (block_crc32 != *block_csums)
|
||||
{
|
||||
printf(
|
||||
"Journal entry data is corrupt for small_write%s oid=%lx:%lx ver=%lu offset=%u len=%u - block %u crc32 %x != %x\n",
|
||||
je->type == JE_SMALL_WRITE_INSTANT ? "_instant" : "",
|
||||
je->small_write.oid.inode, je->small_write.oid.stripe, je->small_write.version,
|
||||
je->small_write.offset, je->small_write.len,
|
||||
pos, block_crc32, *block_csums
|
||||
);
|
||||
data_csum_valid = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!data_csum_valid)
|
||||
{
|
||||
// journal entry is corrupt, stop here
|
||||
// interesting thing is that we must clear the corrupt entry if we're not readonly,
|
||||
// because we don't write next entries in the same journal block
|
||||
printf("Journal entry data is corrupt (data crc32 %x != %x)\n", data_crc32, je->small_write.crc32_data);
|
||||
memset((uint8_t*)buf + proc_pos - done_pos + pos, 0, bs->journal.block_size - pos);
|
||||
bs->journal.next_free = prev_free;
|
||||
init_write_buf = (uint8_t*)buf + proc_pos - done_pos;
|
||||
@@ -755,11 +922,14 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t done_pos, u
|
||||
.oid = je->small_write.oid,
|
||||
.version = je->small_write.version,
|
||||
};
|
||||
void *bmp = NULL;
|
||||
void *bmp_from = (uint8_t*)je + sizeof(journal_entry_small_write);
|
||||
if (bs->dsk.clean_entry_bitmap_size <= sizeof(void*))
|
||||
uint64_t dyn_size = bs->dsk.dirty_dyn_size(je->small_write.offset, je->small_write.len);
|
||||
void *dyn = NULL;
|
||||
void *dyn_from = (uint8_t*)je + sizeof(journal_entry_small_write);
|
||||
if (!bs->alloc_dyn_data)
|
||||
{
|
||||
memcpy(&bmp, bmp_from, bs->dsk.clean_entry_bitmap_size);
|
||||
// Bitmap without checksum is only 4 bytes for 128k objects, save it inline
|
||||
// It can even contain 4 byte bitmap + 4 byte CRC32 for 4 kb writes :)
|
||||
memcpy(&dyn, dyn_from, dyn_size);
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -767,8 +937,9 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t done_pos, u
|
||||
// allocations for entry bitmaps. This can only be fixed by using
|
||||
// a patched map with dynamic entry size, but not the btree_map,
|
||||
// because it doesn't keep iterators valid all the time.
|
||||
bmp = malloc_or_die(bs->dsk.clean_entry_bitmap_size);
|
||||
memcpy(bmp, bmp_from, bs->dsk.clean_entry_bitmap_size);
|
||||
dyn = malloc_or_die(dyn_size+sizeof(int));
|
||||
*((int*)dyn) = 1;
|
||||
memcpy((uint8_t*)dyn+sizeof(int), dyn_from, dyn_size);
|
||||
}
|
||||
bs->dirty_db.emplace(ov, (dirty_entry){
|
||||
.state = (BS_ST_SMALL_WRITE | BS_ST_SYNCED),
|
||||
@@ -777,7 +948,7 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t done_pos, u
|
||||
.offset = je->small_write.offset,
|
||||
.len = je->small_write.len,
|
||||
.journal_sector = proc_pos,
|
||||
.bitmap = bmp,
|
||||
.dyn_data = dyn,
|
||||
});
|
||||
bs->journal.used_sectors[proc_pos]++;
|
||||
#ifdef BLOCKSTORE_DEBUG
|
||||
@@ -836,11 +1007,13 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t done_pos, u
|
||||
.oid = je->big_write.oid,
|
||||
.version = je->big_write.version,
|
||||
};
|
||||
void *bmp = NULL;
|
||||
void *bmp_from = (uint8_t*)je + sizeof(journal_entry_big_write);
|
||||
if (bs->dsk.clean_entry_bitmap_size <= sizeof(void*))
|
||||
uint64_t dyn_size = bs->dsk.dirty_dyn_size(je->big_write.offset, je->big_write.len);
|
||||
void *dyn = NULL;
|
||||
void *dyn_from = (uint8_t*)je + sizeof(journal_entry_big_write);
|
||||
if (!bs->alloc_dyn_data)
|
||||
{
|
||||
memcpy(&bmp, bmp_from, bs->dsk.clean_entry_bitmap_size);
|
||||
// Bitmap without checksum is only 4 bytes for 128k objects, save it inline
|
||||
memcpy(&dyn, dyn_from, dyn_size);
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -848,8 +1021,9 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t done_pos, u
|
||||
// allocations for entry bitmaps. This can only be fixed by using
|
||||
// a patched map with dynamic entry size, but not the btree_map,
|
||||
// because it doesn't keep iterators valid all the time.
|
||||
bmp = malloc_or_die(bs->dsk.clean_entry_bitmap_size);
|
||||
memcpy(bmp, bmp_from, bs->dsk.clean_entry_bitmap_size);
|
||||
dyn = malloc_or_die(dyn_size+sizeof(int));
|
||||
*((int*)dyn) = 1;
|
||||
memcpy((uint8_t*)dyn+sizeof(int), dyn_from, dyn_size);
|
||||
}
|
||||
auto dirty_it = bs->dirty_db.emplace(ov, (dirty_entry){
|
||||
.state = (BS_ST_BIG_WRITE | BS_ST_SYNCED),
|
||||
@@ -858,7 +1032,7 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t done_pos, u
|
||||
.offset = je->big_write.offset,
|
||||
.len = je->big_write.len,
|
||||
.journal_sector = proc_pos,
|
||||
.bitmap = bmp,
|
||||
.dyn_data = dyn,
|
||||
}).first;
|
||||
if (bs->data_alloc->get(je->big_write.location >> bs->dsk.block_order))
|
||||
{
|
||||
|
@@ -50,6 +50,7 @@ class blockstore_init_journal
|
||||
uint64_t next_free;
|
||||
std::vector<bs_init_journal_done> done;
|
||||
std::vector<obj_ver_id> double_allocs;
|
||||
std::vector<iovec> small_write_data;
|
||||
uint64_t journal_pos = 0;
|
||||
uint64_t continue_pos = 0;
|
||||
void *init_write_buf = NULL;
|
||||
|
@@ -17,6 +17,7 @@ blockstore_journal_check_t::blockstore_journal_check_t(blockstore_impl_t *bs)
|
||||
// Check if we can write <required> entries of <size> bytes and <data_after> data bytes after them to the journal
|
||||
int blockstore_journal_check_t::check_available(blockstore_op_t *op, int entries_required, int size, int data_after)
|
||||
{
|
||||
uint64_t prev_next = next_sector;
|
||||
int required = entries_required;
|
||||
while (1)
|
||||
{
|
||||
@@ -35,11 +36,19 @@ int blockstore_journal_check_t::check_available(blockstore_op_t *op, int entries
|
||||
}
|
||||
required -= fits;
|
||||
next_in_pos += fits * size;
|
||||
sectors_to_write++;
|
||||
if (next_sector != prev_next || !sectors_to_write)
|
||||
{
|
||||
// Except the previous call to this function
|
||||
sectors_to_write++;
|
||||
}
|
||||
}
|
||||
else if (bs->journal.sector_info[next_sector].dirty)
|
||||
{
|
||||
sectors_to_write++;
|
||||
if (next_sector != prev_next || !sectors_to_write)
|
||||
{
|
||||
// Except the previous call to this function
|
||||
sectors_to_write++;
|
||||
}
|
||||
}
|
||||
if (required <= 0)
|
||||
{
|
||||
@@ -236,14 +245,6 @@ journal_t::~journal_t()
|
||||
uint64_t journal_t::get_trim_pos()
|
||||
{
|
||||
auto journal_used_it = used_sectors.lower_bound(used_start);
|
||||
#ifdef BLOCKSTORE_DEBUG
|
||||
printf(
|
||||
"Trimming journal (used_start=%08lx, next_free=%08lx, dirty_start=%08lx, new_start=%08lx, new_refcount=%ld)\n",
|
||||
used_start, next_free, dirty_start,
|
||||
journal_used_it == used_sectors.end() ? 0 : journal_used_it->first,
|
||||
journal_used_it == used_sectors.end() ? 0 : journal_used_it->second
|
||||
);
|
||||
#endif
|
||||
if (journal_used_it == used_sectors.end())
|
||||
{
|
||||
// Journal is cleared to its end, restart from the beginning
|
||||
@@ -256,12 +257,26 @@ uint64_t journal_t::get_trim_pos()
|
||||
else
|
||||
{
|
||||
// next_free does not need updating during trim
|
||||
#ifdef BLOCKSTORE_DEBUG
|
||||
printf(
|
||||
"Trimming journal (used_start=%08lx, next_free=%08lx, dirty_start=%08lx, new_start=%08lx, new_refcount=%ld)\n",
|
||||
used_start, next_free, dirty_start,
|
||||
journal_used_it->first, journal_used_it->second
|
||||
);
|
||||
#endif
|
||||
return journal_used_it->first;
|
||||
}
|
||||
}
|
||||
else if (journal_used_it->first > used_start)
|
||||
{
|
||||
// Journal is cleared up to <journal_used_it>
|
||||
#ifdef BLOCKSTORE_DEBUG
|
||||
printf(
|
||||
"Trimming journal (used_start=%08lx, next_free=%08lx, dirty_start=%08lx, new_start=%08lx, new_refcount=%ld)\n",
|
||||
used_start, next_free, dirty_start,
|
||||
journal_used_it->first, journal_used_it->second
|
||||
);
|
||||
#endif
|
||||
return journal_used_it->first;
|
||||
}
|
||||
// Can't trim journal
|
||||
@@ -283,3 +298,31 @@ void journal_t::dump_diagnostics()
|
||||
journal_used_it == used_sectors.end() ? 0 : journal_used_it->second
|
||||
);
|
||||
}
|
||||
|
||||
static uint64_t zero_page[4096];
|
||||
|
||||
uint32_t crc32c_pad(uint32_t prev_crc, const void *buf, size_t len, size_t left_pad, size_t right_pad)
|
||||
{
|
||||
uint32_t r = prev_crc;
|
||||
while (left_pad >= 4096)
|
||||
{
|
||||
r = crc32c(r, zero_page, 4096);
|
||||
left_pad -= 4096;
|
||||
}
|
||||
if (left_pad > 0)
|
||||
r = crc32c(r, zero_page, left_pad);
|
||||
r = crc32c(r, buf, len);
|
||||
while (right_pad >= 4096)
|
||||
{
|
||||
r = crc32c(r, zero_page, 4096);
|
||||
right_pad -= 4096;
|
||||
}
|
||||
if (left_pad > 0)
|
||||
r = crc32c(r, zero_page, right_pad);
|
||||
return r;
|
||||
}
|
||||
|
||||
uint32_t crc32c_nopad(uint32_t prev_crc, const void *buf, size_t len, size_t left_pad, size_t right_pad)
|
||||
{
|
||||
return crc32c(0, buf, len);
|
||||
}
|
||||
|
@@ -8,7 +8,8 @@
|
||||
|
||||
#define MIN_JOURNAL_SIZE 4*1024*1024
|
||||
#define JOURNAL_MAGIC 0x4A33
|
||||
#define JOURNAL_VERSION 1
|
||||
#define JOURNAL_VERSION_V1 1
|
||||
#define JOURNAL_VERSION_V2 2
|
||||
#define JOURNAL_BUFFER_SIZE 4*1024*1024
|
||||
#define JOURNAL_ENTRY_HEADER_SIZE 16
|
||||
|
||||
@@ -32,7 +33,7 @@
|
||||
#define JE_BIG_WRITE_INSTANT 0x08
|
||||
#define JE_MAX 0x08
|
||||
|
||||
// crc32c comes first to ease calculation and is equal to crc32()
|
||||
// crc32c comes first to ease calculation
|
||||
struct __attribute__((__packed__)) journal_entry_start
|
||||
{
|
||||
uint32_t crc32;
|
||||
@@ -42,8 +43,12 @@ struct __attribute__((__packed__)) journal_entry_start
|
||||
uint32_t reserved;
|
||||
uint64_t journal_start;
|
||||
uint64_t version;
|
||||
uint32_t data_csum_type;
|
||||
uint32_t csum_block_size;
|
||||
};
|
||||
#define JE_START_LEGACY_SIZE 24
|
||||
#define JE_START_V0_SIZE 24
|
||||
#define JE_START_V1_SIZE 32
|
||||
#define JE_START_V2_SIZE 40
|
||||
|
||||
struct __attribute__((__packed__)) journal_entry_small_write
|
||||
{
|
||||
@@ -59,10 +64,12 @@ struct __attribute__((__packed__)) journal_entry_small_write
|
||||
// small_write entries contain <len> bytes of data which is stored in next sectors
|
||||
// data_offset is its offset within journal
|
||||
uint64_t data_offset;
|
||||
uint32_t crc32_data;
|
||||
uint32_t crc32_data; // zero when data_csum_type != 0
|
||||
// small_write and big_write entries are followed by the "external" bitmap
|
||||
// its size is dynamic and included in journal entry's <size> field
|
||||
uint8_t bitmap[];
|
||||
// and then data checksums if data_csum_type != 0
|
||||
// uint32_t data_crc32c[];
|
||||
};
|
||||
|
||||
struct __attribute__((__packed__)) journal_entry_big_write
|
||||
@@ -80,6 +87,8 @@ struct __attribute__((__packed__)) journal_entry_big_write
|
||||
// small_write and big_write entries are followed by the "external" bitmap
|
||||
// its size is dynamic and included in journal entry's <size> field
|
||||
uint8_t bitmap[];
|
||||
// and then data checksums if data_csum_type != 0
|
||||
// uint32_t data_crc32c[];
|
||||
};
|
||||
|
||||
struct __attribute__((__packed__)) journal_entry_stable
|
||||
@@ -218,3 +227,6 @@ struct blockstore_journal_check_t
|
||||
};
|
||||
|
||||
journal_entry* prefill_single_journal_entry(journal_t & journal, uint16_t type, uint32_t size);
|
||||
|
||||
uint32_t crc32c_pad(uint32_t prev_crc, const void *buf, size_t len, size_t left_pad, size_t right_pad);
|
||||
uint32_t crc32c_nopad(uint32_t prev_crc, const void *buf, size_t len, size_t left_pad, size_t right_pad);
|
||||
|
@@ -133,19 +133,24 @@ void blockstore_impl_t::calc_lengths()
|
||||
{
|
||||
metadata_buffer = memalign(MEM_ALIGNMENT, dsk.meta_len);
|
||||
if (!metadata_buffer)
|
||||
throw std::runtime_error("Failed to allocate memory for the metadata");
|
||||
throw std::runtime_error("Failed to allocate memory for the metadata ("+std::to_string(dsk.meta_len/1024/1024)+" MB)");
|
||||
}
|
||||
else if (dsk.clean_entry_bitmap_size)
|
||||
else if (dsk.clean_entry_bitmap_size || dsk.data_csum_type)
|
||||
{
|
||||
clean_bitmap = (uint8_t*)malloc(dsk.block_count * 2*dsk.clean_entry_bitmap_size);
|
||||
if (!clean_bitmap)
|
||||
throw std::runtime_error("Failed to allocate memory for the metadata sparse write bitmap");
|
||||
clean_bitmaps = (uint8_t*)malloc(dsk.block_count * 2 * dsk.clean_entry_bitmap_size);
|
||||
if (!clean_bitmaps)
|
||||
{
|
||||
throw std::runtime_error(
|
||||
"Failed to allocate memory for the metadata sparse write bitmap ("+
|
||||
std::to_string(dsk.block_count * 2 * dsk.clean_entry_bitmap_size / 1024 / 1024)+" MB)"
|
||||
);
|
||||
}
|
||||
}
|
||||
if (journal.inmemory)
|
||||
{
|
||||
journal.buffer = memalign(MEM_ALIGNMENT, journal.len);
|
||||
if (!journal.buffer)
|
||||
throw std::runtime_error("Failed to allocate memory for journal");
|
||||
throw std::runtime_error("Failed to allocate memory for journal ("+std::to_string(journal.len/1024/1024)+" MB)");
|
||||
}
|
||||
else
|
||||
{
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -218,7 +218,7 @@ void blockstore_impl_t::erase_dirty(blockstore_dirty_db_t::iterator dirty_start,
|
||||
auto used = --journal.used_sectors[dirty_it->second.journal_sector];
|
||||
#ifdef BLOCKSTORE_DEBUG
|
||||
printf(
|
||||
"remove usage of journal offset %08lx by %lx:%lx v%lu (%d refs)\n", dirty_it->second.journal_sector,
|
||||
"remove usage of journal offset %08lx by %lx:%lx v%lu (%lu refs)\n", dirty_it->second.journal_sector,
|
||||
dirty_it->first.oid.inode, dirty_it->first.oid.stripe, dirty_it->first.version, used
|
||||
);
|
||||
#endif
|
||||
@@ -227,11 +227,7 @@ void blockstore_impl_t::erase_dirty(blockstore_dirty_db_t::iterator dirty_start,
|
||||
journal.used_sectors.erase(dirty_it->second.journal_sector);
|
||||
flusher->mark_trim_possible();
|
||||
}
|
||||
if (dsk.clean_entry_bitmap_size > sizeof(void*))
|
||||
{
|
||||
free(dirty_it->second.bitmap);
|
||||
dirty_it->second.bitmap = NULL;
|
||||
}
|
||||
free_dirty_dyn_data(dirty_it->second);
|
||||
if (dirty_it == dirty_start)
|
||||
{
|
||||
break;
|
||||
@@ -240,3 +236,18 @@ void blockstore_impl_t::erase_dirty(blockstore_dirty_db_t::iterator dirty_start,
|
||||
}
|
||||
dirty_db.erase(dirty_start, dirty_end);
|
||||
}
|
||||
|
||||
void blockstore_impl_t::free_dirty_dyn_data(dirty_entry & e)
|
||||
{
|
||||
if (e.dyn_data)
|
||||
{
|
||||
if (alloc_dyn_data &&
|
||||
--*((int*)e.dyn_data) == 0) // refcount
|
||||
{
|
||||
// dyn_data contains the bitmap and checksums
|
||||
// free it if it doesn't refer to the in-memory journal
|
||||
free(e.dyn_data);
|
||||
}
|
||||
e.dyn_data = NULL;
|
||||
}
|
||||
}
|
||||
|
@@ -458,6 +458,16 @@ void blockstore_impl_t::mark_stable(const obj_ver_id & v, bool forget_dirty)
|
||||
big_to_flush++;
|
||||
}
|
||||
}
|
||||
else if (IS_IN_FLIGHT(dirty_it->second.state))
|
||||
{
|
||||
// mark_stable should never be called for in-flight or submitted writes
|
||||
printf(
|
||||
"BUG: Attempt to mark_stable object %lx:%lx v%lu state of which is %x\n",
|
||||
dirty_it->first.oid.inode, dirty_it->first.oid.stripe, dirty_it->first.version,
|
||||
dirty_it->second.state
|
||||
);
|
||||
exit(1);
|
||||
}
|
||||
if (forget_dirty && (IS_BIG_WRITE(dirty_it->second.state) ||
|
||||
IS_DELETE(dirty_it->second.state)))
|
||||
{
|
||||
|
@@ -78,7 +78,23 @@ int blockstore_impl_t::continue_sync(blockstore_op_t *op)
|
||||
// 2nd step: Data device is synced, prepare & write journal entries
|
||||
// Check space in the journal and journal memory buffers
|
||||
blockstore_journal_check_t space_check(this);
|
||||
if (!space_check.check_available(op, PRIV(op)->sync_big_writes.size(),
|
||||
if (dsk.csum_block_size)
|
||||
{
|
||||
// More complex check because all journal entries have different lengths
|
||||
int left = PRIV(op)->sync_big_writes.size();
|
||||
for (auto & sbw: PRIV(op)->sync_big_writes)
|
||||
{
|
||||
left--;
|
||||
auto & dirty_entry = dirty_db.at(sbw);
|
||||
uint64_t dyn_size = dsk.dirty_dyn_size(dirty_entry.offset, dirty_entry.len);
|
||||
if (!space_check.check_available(op, 1, sizeof(journal_entry_big_write) + dyn_size,
|
||||
left == 0 ? JOURNAL_STABILIZE_RESERVATION : 0))
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (!space_check.check_available(op, PRIV(op)->sync_big_writes.size(),
|
||||
sizeof(journal_entry_big_write) + dsk.clean_entry_bitmap_size, JOURNAL_STABILIZE_RESERVATION))
|
||||
{
|
||||
return 0;
|
||||
@@ -90,16 +106,17 @@ int blockstore_impl_t::continue_sync(blockstore_op_t *op)
|
||||
int s = 0;
|
||||
while (it != PRIV(op)->sync_big_writes.end())
|
||||
{
|
||||
if (!journal.entry_fits(sizeof(journal_entry_big_write) + dsk.clean_entry_bitmap_size) &&
|
||||
auto & dirty_entry = dirty_db.at(*it);
|
||||
uint64_t dyn_size = dsk.dirty_dyn_size(dirty_entry.offset, dirty_entry.len);
|
||||
if (!journal.entry_fits(sizeof(journal_entry_big_write) + dyn_size) &&
|
||||
journal.sector_info[journal.cur_sector].dirty)
|
||||
{
|
||||
prepare_journal_sector_write(journal.cur_sector, op);
|
||||
s++;
|
||||
}
|
||||
auto & dirty_entry = dirty_db.at(*it);
|
||||
journal_entry_big_write *je = (journal_entry_big_write*)prefill_single_journal_entry(
|
||||
journal, (dirty_entry.state & BS_ST_INSTANT) ? JE_BIG_WRITE_INSTANT : JE_BIG_WRITE,
|
||||
sizeof(journal_entry_big_write) + dsk.clean_entry_bitmap_size
|
||||
sizeof(journal_entry_big_write) + dyn_size
|
||||
);
|
||||
dirty_entry.journal_sector = journal.sector_info[journal.cur_sector].offset;
|
||||
journal.used_sectors[journal.sector_info[journal.cur_sector].offset]++;
|
||||
@@ -115,8 +132,8 @@ int blockstore_impl_t::continue_sync(blockstore_op_t *op)
|
||||
je->offset = dirty_entry.offset;
|
||||
je->len = dirty_entry.len;
|
||||
je->location = dirty_entry.location;
|
||||
memcpy((void*)(je+1), (dsk.clean_entry_bitmap_size > sizeof(void*)
|
||||
? dirty_entry.bitmap : &dirty_entry.bitmap), dsk.clean_entry_bitmap_size);
|
||||
memcpy((void*)(je+1), (alloc_dyn_data
|
||||
? (uint8_t*)dirty_entry.dyn_data+sizeof(int) : (uint8_t*)&dirty_entry.dyn_data), dyn_size);
|
||||
je->crc32 = je_crc32((journal_entry*)je);
|
||||
journal.crc32_last = je->crc32;
|
||||
it++;
|
||||
|
@@ -8,12 +8,21 @@ bool blockstore_impl_t::enqueue_write(blockstore_op_t *op)
|
||||
// Check or assign version number
|
||||
bool found = false, deleted = false, unsynced = false, is_del = (op->opcode == BS_OP_DELETE);
|
||||
bool wait_big = false, wait_del = false;
|
||||
void *bmp = NULL;
|
||||
uint64_t version = 1;
|
||||
if (!is_del && dsk.clean_entry_bitmap_size > sizeof(void*))
|
||||
void *dyn = NULL;
|
||||
if (is_del)
|
||||
{
|
||||
bmp = calloc_or_die(1, dsk.clean_entry_bitmap_size);
|
||||
op->len = 0;
|
||||
}
|
||||
size_t dyn_size = dsk.dirty_dyn_size(op->offset, op->len);
|
||||
if (!is_del && alloc_dyn_data)
|
||||
{
|
||||
// FIXME: Working with `dyn_data` has to be refactored somehow but I first have to decide how :)
|
||||
// +sizeof(int) = refcount
|
||||
dyn = calloc_or_die(1, dyn_size+sizeof(int));
|
||||
*((int*)dyn) = 1;
|
||||
}
|
||||
uint8_t *dyn_ptr = (uint8_t*)(alloc_dyn_data ? dyn+sizeof(int) : &dyn);
|
||||
uint64_t version = 1;
|
||||
if (dirty_db.size() > 0)
|
||||
{
|
||||
auto dirty_it = dirty_db.upper_bound((obj_ver_id){
|
||||
@@ -33,10 +42,9 @@ bool blockstore_impl_t::enqueue_write(blockstore_op_t *op)
|
||||
: ((dirty_it->second.state & BS_ST_WORKFLOW_MASK) == BS_ST_WAIT_BIG);
|
||||
if (!is_del && !deleted)
|
||||
{
|
||||
if (dsk.clean_entry_bitmap_size > sizeof(void*))
|
||||
memcpy(bmp, dirty_it->second.bitmap, dsk.clean_entry_bitmap_size);
|
||||
else
|
||||
bmp = dirty_it->second.bitmap;
|
||||
void *dyn_from = alloc_dyn_data
|
||||
? (uint8_t*)dirty_it->second.dyn_data + sizeof(int) : (uint8_t*)&dirty_it->second.dyn_data;
|
||||
memcpy(dyn_ptr, dyn_from, dsk.clean_entry_bitmap_size);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -50,7 +58,7 @@ bool blockstore_impl_t::enqueue_write(blockstore_op_t *op)
|
||||
if (!is_del)
|
||||
{
|
||||
void *bmp_ptr = get_clean_entry_bitmap(clean_it->second.location, dsk.clean_entry_bitmap_size);
|
||||
memcpy((dsk.clean_entry_bitmap_size > sizeof(void*) ? bmp : &bmp), bmp_ptr, dsk.clean_entry_bitmap_size);
|
||||
memcpy(dyn_ptr, bmp_ptr, dsk.clean_entry_bitmap_size);
|
||||
}
|
||||
}
|
||||
else
|
||||
@@ -112,9 +120,9 @@ bool blockstore_impl_t::enqueue_write(blockstore_op_t *op)
|
||||
printf("Write %lx:%lx v%lu requested, but we already have v%lu\n", op->oid.inode, op->oid.stripe, op->version, version);
|
||||
#endif
|
||||
op->retval = -EEXIST;
|
||||
if (!is_del && dsk.clean_entry_bitmap_size > sizeof(void*))
|
||||
if (!is_del && alloc_dyn_data)
|
||||
{
|
||||
free(bmp);
|
||||
free(dyn);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
@@ -158,26 +166,50 @@ bool blockstore_impl_t::enqueue_write(blockstore_op_t *op)
|
||||
if (op->bitmap)
|
||||
{
|
||||
// Only allow to overwrite part of the object bitmap respective to the write's offset/len
|
||||
uint8_t *bmp_ptr = (uint8_t*)(dsk.clean_entry_bitmap_size > sizeof(void*) ? bmp : &bmp);
|
||||
uint32_t bit = op->offset/dsk.bitmap_granularity;
|
||||
uint32_t bits_left = op->len/dsk.bitmap_granularity;
|
||||
while (!(bit % 8) && bits_left >= 8)
|
||||
{
|
||||
// Copy bytes
|
||||
bmp_ptr[bit/8] = ((uint8_t*)op->bitmap)[bit/8];
|
||||
dyn_ptr[bit/8] = ((uint8_t*)op->bitmap)[bit/8];
|
||||
bit += 8;
|
||||
bits_left -= 8;
|
||||
}
|
||||
while (bits_left > 0)
|
||||
{
|
||||
// Copy bits
|
||||
bmp_ptr[bit/8] = (bmp_ptr[bit/8] & ~(1 << (bit%8)))
|
||||
dyn_ptr[bit/8] = (dyn_ptr[bit/8] & ~(1 << (bit%8)))
|
||||
| (((uint8_t*)op->bitmap)[bit/8] & (1 << bit%8));
|
||||
bit++;
|
||||
bits_left--;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Calculate checksums
|
||||
// FIXME: Allow to receive checksums from outside?
|
||||
if (!is_del && dsk.data_csum_type && op->len > 0)
|
||||
{
|
||||
uint32_t *data_csums = (uint32_t*)(dyn_ptr + dsk.clean_entry_bitmap_size);
|
||||
uint32_t start = op->offset / dsk.csum_block_size;
|
||||
uint32_t end = (op->offset+op->len-1) / dsk.csum_block_size;
|
||||
auto fn = state & BS_ST_BIG_WRITE ? crc32c_pad : crc32c_nopad;
|
||||
if (start == end)
|
||||
data_csums[0] = fn(0, op->buf, op->len, op->offset - start*dsk.csum_block_size, end*dsk.csum_block_size - (op->offset+op->len));
|
||||
else
|
||||
{
|
||||
// First block
|
||||
data_csums[0] = fn(0, op->buf, dsk.csum_block_size*(start+1)-op->offset, op->offset - start*dsk.csum_block_size, 0);
|
||||
// Intermediate blocks
|
||||
for (uint32_t i = start+1; i < end; i++)
|
||||
data_csums[i-start] = crc32c(0, (uint8_t*)op->buf + dsk.csum_block_size*i-op->offset, dsk.csum_block_size);
|
||||
// Last block
|
||||
data_csums[end-start] = fn(
|
||||
0, (uint8_t*)op->buf + end*dsk.csum_block_size - op->offset,
|
||||
op->offset+op->len - end*dsk.csum_block_size,
|
||||
0, (end+1)*dsk.csum_block_size - (op->offset+op->len)
|
||||
);
|
||||
}
|
||||
}
|
||||
dirty_db.emplace((obj_ver_id){
|
||||
.oid = op->oid,
|
||||
.version = op->version,
|
||||
@@ -188,7 +220,7 @@ bool blockstore_impl_t::enqueue_write(blockstore_op_t *op)
|
||||
.offset = is_del ? 0 : op->offset,
|
||||
.len = is_del ? 0 : op->len,
|
||||
.journal_sector = 0,
|
||||
.bitmap = bmp,
|
||||
.dyn_data = dyn,
|
||||
});
|
||||
return true;
|
||||
}
|
||||
@@ -197,8 +229,7 @@ void blockstore_impl_t::cancel_all_writes(blockstore_op_t *op, blockstore_dirty_
|
||||
{
|
||||
while (dirty_it != dirty_db.end() && dirty_it->first.oid == op->oid)
|
||||
{
|
||||
if (dsk.clean_entry_bitmap_size > sizeof(void*))
|
||||
free(dirty_it->second.bitmap);
|
||||
free_dirty_dyn_data(dirty_it->second);
|
||||
dirty_db.erase(dirty_it++);
|
||||
}
|
||||
bool found = false;
|
||||
@@ -280,7 +311,7 @@ int blockstore_impl_t::dequeue_write(blockstore_op_t *op)
|
||||
{
|
||||
blockstore_journal_check_t space_check(this);
|
||||
if (!space_check.check_available(op, unsynced_big_write_count + 1,
|
||||
sizeof(journal_entry_big_write) + dsk.clean_entry_bitmap_size,
|
||||
sizeof(journal_entry_big_write) + dsk.clean_dyn_size,
|
||||
(dirty_it->second.state & BS_ST_INSTANT) ? JOURNAL_INSTANT_RESERVATION : JOURNAL_STABILIZE_RESERVATION))
|
||||
{
|
||||
return 0;
|
||||
@@ -363,12 +394,13 @@ int blockstore_impl_t::dequeue_write(blockstore_op_t *op)
|
||||
{
|
||||
// Small (journaled) write
|
||||
// First check if the journal has sufficient space
|
||||
uint64_t dyn_size = dsk.dirty_dyn_size(op->offset, op->len);
|
||||
blockstore_journal_check_t space_check(this);
|
||||
if (unsynced_big_write_count &&
|
||||
!space_check.check_available(op, unsynced_big_write_count,
|
||||
sizeof(journal_entry_big_write) + dsk.clean_entry_bitmap_size, 0)
|
||||
sizeof(journal_entry_big_write) + dsk.clean_dyn_size, 0)
|
||||
|| !space_check.check_available(op, 1,
|
||||
sizeof(journal_entry_small_write) + dsk.clean_entry_bitmap_size,
|
||||
sizeof(journal_entry_small_write) + dyn_size,
|
||||
op->len + ((dirty_it->second.state & BS_ST_INSTANT) ? JOURNAL_INSTANT_RESERVATION : JOURNAL_STABILIZE_RESERVATION)))
|
||||
{
|
||||
return 0;
|
||||
@@ -377,7 +409,7 @@ int blockstore_impl_t::dequeue_write(blockstore_op_t *op)
|
||||
BS_SUBMIT_CHECK_SQES(
|
||||
// Write current journal sector only if it's dirty and full, or in the immediate_commit mode
|
||||
(immediate_commit != IMMEDIATE_NONE ||
|
||||
!journal.entry_fits(sizeof(journal_entry_small_write) + dsk.clean_entry_bitmap_size) ? 1 : 0) +
|
||||
!journal.entry_fits(sizeof(journal_entry_small_write) + dyn_size) ? 1 : 0) +
|
||||
(op->len > 0 ? 1 : 0)
|
||||
);
|
||||
write_iodepth++;
|
||||
@@ -385,7 +417,7 @@ int blockstore_impl_t::dequeue_write(blockstore_op_t *op)
|
||||
auto cb = [this, op](ring_data_t *data) { handle_write_event(data, op); };
|
||||
if (immediate_commit == IMMEDIATE_NONE)
|
||||
{
|
||||
if (!journal.entry_fits(sizeof(journal_entry_small_write) + dsk.clean_entry_bitmap_size))
|
||||
if (!journal.entry_fits(sizeof(journal_entry_small_write) + dyn_size))
|
||||
{
|
||||
prepare_journal_sector_write(journal.cur_sector, op);
|
||||
}
|
||||
@@ -397,7 +429,7 @@ int blockstore_impl_t::dequeue_write(blockstore_op_t *op)
|
||||
// Then pre-fill journal entry
|
||||
journal_entry_small_write *je = (journal_entry_small_write*)prefill_single_journal_entry(
|
||||
journal, op->opcode == BS_OP_WRITE_STABLE ? JE_SMALL_WRITE_INSTANT : JE_SMALL_WRITE,
|
||||
sizeof(journal_entry_small_write) + dsk.clean_entry_bitmap_size
|
||||
sizeof(journal_entry_small_write) + dyn_size
|
||||
);
|
||||
dirty_it->second.journal_sector = journal.sector_info[journal.cur_sector].offset;
|
||||
journal.used_sectors[journal.sector_info[journal.cur_sector].offset]++;
|
||||
@@ -409,14 +441,31 @@ int blockstore_impl_t::dequeue_write(blockstore_op_t *op)
|
||||
);
|
||||
#endif
|
||||
// Figure out where data will be
|
||||
journal.next_free = (journal.next_free + op->len) <= journal.len ? journal.next_free : dsk.journal_block_size;
|
||||
auto next_next_free = (journal.next_free + op->len) <= journal.len ? journal.next_free : dsk.journal_block_size;
|
||||
if (op->len > 0)
|
||||
{
|
||||
auto journal_used_it = journal.used_sectors.lower_bound(next_next_free);
|
||||
if (journal_used_it != journal.used_sectors.end() &&
|
||||
journal_used_it->first < next_next_free + op->len)
|
||||
{
|
||||
printf(
|
||||
"BUG: Attempt to overwrite used offset (%lx, %lu refs) of the journal with the object %lx:%lx v%lu: data at %lx, len %x!"
|
||||
" Journal used_start=%08lx (%lu refs), next_free=%08lx, dirty_start=%08lx\n",
|
||||
journal_used_it->first, journal_used_it->second, op->oid.inode, op->oid.stripe, op->version, next_next_free, op->len,
|
||||
journal.used_start, journal.used_sectors[journal.used_start], journal.next_free, journal.dirty_start
|
||||
);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
journal.next_free = next_next_free;
|
||||
je->oid = op->oid;
|
||||
je->version = op->version;
|
||||
je->offset = op->offset;
|
||||
je->len = op->len;
|
||||
je->data_offset = journal.next_free;
|
||||
je->crc32_data = crc32c(0, op->buf, op->len);
|
||||
memcpy((void*)(je+1), (dsk.clean_entry_bitmap_size > sizeof(void*) ? dirty_it->second.bitmap : &dirty_it->second.bitmap), dsk.clean_entry_bitmap_size);
|
||||
je->crc32_data = dsk.csum_block_size ? 0 : crc32c(0, op->buf, op->len);
|
||||
memcpy((void*)(je+1), (alloc_dyn_data
|
||||
? (uint8_t*)dirty_it->second.dyn_data+sizeof(int) : (uint8_t*)&dirty_it->second.dyn_data), dyn_size);
|
||||
je->crc32 = je_crc32((journal_entry*)je);
|
||||
journal.crc32_last = je->crc32;
|
||||
if (immediate_commit != IMMEDIATE_NONE)
|
||||
@@ -485,9 +534,9 @@ resume_2:
|
||||
.version = op->version,
|
||||
});
|
||||
assert(dirty_it != dirty_db.end());
|
||||
uint64_t dyn_size = dsk.dirty_dyn_size(op->offset, op->len);
|
||||
blockstore_journal_check_t space_check(this);
|
||||
if (!space_check.check_available(op, 1,
|
||||
sizeof(journal_entry_big_write) + dsk.clean_entry_bitmap_size,
|
||||
if (!space_check.check_available(op, 1, sizeof(journal_entry_big_write) + dyn_size,
|
||||
((dirty_it->second.state & BS_ST_INSTANT) ? JOURNAL_INSTANT_RESERVATION : JOURNAL_STABILIZE_RESERVATION)))
|
||||
{
|
||||
return 0;
|
||||
@@ -495,7 +544,7 @@ resume_2:
|
||||
BS_SUBMIT_CHECK_SQES(1);
|
||||
journal_entry_big_write *je = (journal_entry_big_write*)prefill_single_journal_entry(
|
||||
journal, op->opcode == BS_OP_WRITE_STABLE ? JE_BIG_WRITE_INSTANT : JE_BIG_WRITE,
|
||||
sizeof(journal_entry_big_write) + dsk.clean_entry_bitmap_size
|
||||
sizeof(journal_entry_big_write) + dyn_size
|
||||
);
|
||||
dirty_it->second.journal_sector = journal.sector_info[journal.cur_sector].offset;
|
||||
journal.used_sectors[journal.sector_info[journal.cur_sector].offset]++;
|
||||
@@ -511,7 +560,8 @@ resume_2:
|
||||
je->offset = op->offset;
|
||||
je->len = op->len;
|
||||
je->location = dirty_it->second.location;
|
||||
memcpy((void*)(je+1), (dsk.clean_entry_bitmap_size > sizeof(void*) ? dirty_it->second.bitmap : &dirty_it->second.bitmap), dsk.clean_entry_bitmap_size);
|
||||
memcpy((void*)(je+1), (alloc_dyn_data
|
||||
? (uint8_t*)dirty_it->second.dyn_data+sizeof(int) : (uint8_t*)&dirty_it->second.dyn_data), dyn_size);
|
||||
je->crc32 = je_crc32((journal_entry*)je);
|
||||
journal.crc32_last = je->crc32;
|
||||
prepare_journal_sector_write(journal.cur_sector, op);
|
||||
@@ -645,8 +695,13 @@ void blockstore_impl_t::release_journal_sectors(blockstore_op_t *op)
|
||||
uint64_t s = PRIV(op)->min_flushed_journal_sector;
|
||||
while (1)
|
||||
{
|
||||
if (s != (1+journal.cur_sector) && journal.sector_info[s-1].flush_count == 0)
|
||||
if (!journal.sector_info[s-1].dirty && journal.sector_info[s-1].flush_count == 0)
|
||||
{
|
||||
if (s == (1+journal.cur_sector))
|
||||
{
|
||||
// Forcibly move to the next sector and move dirty position
|
||||
journal.in_sector_pos = journal.block_size;
|
||||
}
|
||||
// We know for sure that we won't write into this sector anymore
|
||||
uint64_t new_ds = journal.sector_info[s-1].offset + journal.block_size;
|
||||
if (new_ds >= journal.len)
|
||||
|
@@ -41,7 +41,7 @@ struct snap_merger_t
|
||||
int fsync_interval = 128;
|
||||
|
||||
// -- STATE --
|
||||
inode_t target;
|
||||
inode_t target, to_num;
|
||||
int target_rank;
|
||||
bool inside_continue = false;
|
||||
int state = 0;
|
||||
@@ -98,6 +98,7 @@ struct snap_merger_t
|
||||
state = 100;
|
||||
return;
|
||||
}
|
||||
to_num = to_cfg->num;
|
||||
// Check that to_cfg is actually a child of from_cfg and target_cfg is somewhere between them
|
||||
std::vector<inode_t> chain_list;
|
||||
inode_config_t *cur = to_cfg;
|
||||
@@ -451,7 +452,7 @@ struct snap_merger_t
|
||||
{
|
||||
cluster_op_t *op = &rwo->op;
|
||||
op->opcode = OSD_OP_READ;
|
||||
op->inode = target;
|
||||
op->inode = to_num;
|
||||
op->offset = rwo->offset;
|
||||
op->len = target_block_size;
|
||||
op->iov.push_back(rwo->buf, target_block_size);
|
||||
@@ -483,7 +484,7 @@ struct snap_merger_t
|
||||
{
|
||||
// write start->end
|
||||
rwo->todo++;
|
||||
write_subop(rwo, rwo->start*gran, rwo->end*gran, use_cas ? 1+rwo->op.version : 0);
|
||||
write_subop(rwo, rwo->start*gran, rwo->end*gran, use_cas && to_num == target ? 1+rwo->op.version : 0);
|
||||
rwo->start = rwo->end;
|
||||
if (use_cas)
|
||||
{
|
||||
@@ -502,7 +503,7 @@ struct snap_merger_t
|
||||
{
|
||||
// write start->end
|
||||
rwo->todo++;
|
||||
write_subop(rwo, rwo->start*gran, rwo->end*gran, use_cas ? 1+rwo->op.version : 0);
|
||||
write_subop(rwo, rwo->start*gran, rwo->end*gran, use_cas && to_num == target ? 1+rwo->op.version : 0);
|
||||
rwo->start = rwo->end;
|
||||
if (use_cas)
|
||||
{
|
||||
@@ -532,7 +533,7 @@ struct snap_merger_t
|
||||
if (use_cas && subop->retval == -EINTR)
|
||||
{
|
||||
// CAS failure - reread and repeat optimistically
|
||||
rwo->start = subop->offset - rwo->offset;
|
||||
rwo->start = rwo->end = 0;
|
||||
rwo_read(rwo);
|
||||
delete subop;
|
||||
return;
|
||||
@@ -542,7 +543,7 @@ struct snap_merger_t
|
||||
rwo->error_read = false;
|
||||
}
|
||||
// Increment CAS version
|
||||
rwo->op.version++;
|
||||
rwo->op.version = subop->version;
|
||||
if (use_cas)
|
||||
next_write(rwo);
|
||||
else
|
||||
|
@@ -65,6 +65,9 @@ struct snap_remover_t
|
||||
int current_child = 0;
|
||||
std::function<bool(cli_result_t &)> cb;
|
||||
|
||||
std::vector<std::string> rebased_images, deleted_images;
|
||||
std::vector<uint64_t> deleted_ids;
|
||||
std::string inverse_child_name, inverse_parent_name;
|
||||
cli_result_t result;
|
||||
|
||||
bool is_done()
|
||||
@@ -122,6 +125,7 @@ resume_1:
|
||||
{
|
||||
if (merge_children[current_child] == inverse_child)
|
||||
continue;
|
||||
rebased_images.push_back(parent->cli->st_cli.inode_config.at(merge_children[current_child]).name);
|
||||
start_merge_child(merge_children[current_child], merge_children[current_child]);
|
||||
if (state == 100)
|
||||
return;
|
||||
@@ -134,9 +138,12 @@ resume_2:
|
||||
cb = NULL;
|
||||
if (result.err)
|
||||
{
|
||||
result.data = my_result(result.data);
|
||||
state = 100;
|
||||
return;
|
||||
}
|
||||
else if (parent->progress)
|
||||
printf("%s\n", result.text.c_str());
|
||||
parent->change_parent(merge_children[current_child], new_parent, &result);
|
||||
state = 3;
|
||||
resume_3:
|
||||
@@ -144,6 +151,7 @@ resume_3:
|
||||
return;
|
||||
if (result.err)
|
||||
{
|
||||
result.data = my_result(result.data);
|
||||
state = 100;
|
||||
return;
|
||||
}
|
||||
@@ -165,9 +173,12 @@ resume_4:
|
||||
cb = NULL;
|
||||
if (result.err)
|
||||
{
|
||||
result.data = my_result(result.data);
|
||||
state = 100;
|
||||
return;
|
||||
}
|
||||
else if (parent->progress)
|
||||
printf("%s\n", result.text.c_str());
|
||||
// Delete "inverse" child data
|
||||
start_delete_source(inverse_child);
|
||||
if (state == 100)
|
||||
@@ -181,9 +192,12 @@ resume_5:
|
||||
cb = NULL;
|
||||
if (result.err)
|
||||
{
|
||||
result.data = my_result(result.data);
|
||||
state = 100;
|
||||
return;
|
||||
}
|
||||
else if (parent->progress)
|
||||
printf("%s\n", result.text.c_str());
|
||||
// Delete "inverse" child metadata, rename parent over it,
|
||||
// and also change parent links of the previous "inverse" child
|
||||
rename_inverse_parent();
|
||||
@@ -199,6 +213,12 @@ resume_6:
|
||||
{
|
||||
if (chain_list[current_child] == inverse_parent)
|
||||
continue;
|
||||
{
|
||||
auto parent_it = parent->cli->st_cli.inode_config.find(chain_list[current_child]);
|
||||
if (parent_it != parent->cli->st_cli.inode_config.end())
|
||||
deleted_images.push_back(parent_it->second.name);
|
||||
deleted_ids.push_back(chain_list[current_child]);
|
||||
}
|
||||
start_delete_source(chain_list[current_child]);
|
||||
resume_7:
|
||||
while (!cb(result))
|
||||
@@ -209,9 +229,12 @@ resume_7:
|
||||
cb = NULL;
|
||||
if (result.err)
|
||||
{
|
||||
result.data = my_result(result.data);
|
||||
state = 100;
|
||||
return;
|
||||
}
|
||||
else if (parent->progress)
|
||||
printf("%s\n", result.text.c_str());
|
||||
delete_inode_config(chain_list[current_child]);
|
||||
if (state == 100)
|
||||
return;
|
||||
@@ -221,11 +244,26 @@ resume_8:
|
||||
return;
|
||||
}
|
||||
state = 100;
|
||||
result = (cli_result_t){
|
||||
.text = "",
|
||||
.data = my_result(result.data),
|
||||
};
|
||||
resume_100:
|
||||
// Done
|
||||
return;
|
||||
}
|
||||
|
||||
json11::Json my_result(json11::Json src)
|
||||
{
|
||||
auto obj = src.object_items();
|
||||
obj["deleted_ids"] = deleted_ids;
|
||||
obj["deleted_images"] = deleted_images;
|
||||
obj["rebased_images"] = rebased_images;
|
||||
obj["renamed_from"] = inverse_parent_name;
|
||||
obj["renamed_to"] = inverse_child_name;
|
||||
return obj;
|
||||
}
|
||||
|
||||
void get_merge_children()
|
||||
{
|
||||
// Get all children of from..to
|
||||
@@ -338,7 +376,11 @@ resume_100:
|
||||
}
|
||||
for (auto inode_result: data["responses"].array_items())
|
||||
{
|
||||
auto kv = parent->cli->st_cli.parse_etcd_kv(inode_result["kvs"][0]);
|
||||
if (inode_result["response_range"]["kvs"].array_items().size() == 0)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
auto kv = parent->cli->st_cli.parse_etcd_kv(inode_result["response_range"]["kvs"][0]);
|
||||
pool_id_t pool_id = 0;
|
||||
inode_t inode = 0;
|
||||
char null_byte = 0;
|
||||
@@ -377,7 +419,7 @@ resume_100:
|
||||
inode_t child = cp.first;
|
||||
uint64_t child_used = inode_used[child];
|
||||
int rank = cp.second;
|
||||
for (int i = chain_list.size()-rank; i < chain_list.size(); i++)
|
||||
for (int i = chain_list.size()-1-rank; i < chain_list.size(); i++)
|
||||
{
|
||||
inode_t parent = chain_list[i];
|
||||
uint64_t parent_used = inode_used[parent];
|
||||
@@ -413,8 +455,8 @@ resume_100:
|
||||
}
|
||||
inode_config_t *child_cfg = &child_it->second;
|
||||
inode_config_t *target_cfg = &target_it->second;
|
||||
std::string child_name = child_cfg->name;
|
||||
std::string target_name = target_cfg->name;
|
||||
inverse_child_name = child_cfg->name;
|
||||
inverse_parent_name = target_cfg->name;
|
||||
std::string child_cfg_key = base64_encode(
|
||||
parent->cli->st_cli.etcd_prefix+
|
||||
"/config/inode/"+std::to_string(INODE_POOL(inverse_child))+
|
||||
@@ -425,6 +467,9 @@ resume_100:
|
||||
"/config/inode/"+std::to_string(INODE_POOL(inverse_parent))+
|
||||
"/"+std::to_string(INODE_NO_POOL(inverse_parent))
|
||||
);
|
||||
std::string target_idx_key = base64_encode(
|
||||
parent->cli->st_cli.etcd_prefix+"/index/image/"+inverse_parent_name
|
||||
);
|
||||
// Fill new configuration
|
||||
inode_config_t new_cfg = *child_cfg;
|
||||
new_cfg.num = target_cfg->num;
|
||||
@@ -449,6 +494,11 @@ resume_100:
|
||||
{ "key", child_cfg_key },
|
||||
} },
|
||||
},
|
||||
json11::Json::object {
|
||||
{ "request_delete_range", json11::Json::object {
|
||||
{ "key", target_idx_key },
|
||||
} },
|
||||
},
|
||||
json11::Json::object {
|
||||
{ "request_put", json11::Json::object {
|
||||
{ "key", target_cfg_key },
|
||||
@@ -495,12 +545,12 @@ resume_100:
|
||||
parent->cli->st_cli.etcd_txn_slow(json11::Json::object {
|
||||
{ "compare", cmp },
|
||||
{ "success", txn },
|
||||
}, [this, target_name, child_name](std::string err, json11::Json res)
|
||||
}, [this](std::string err, json11::Json res)
|
||||
{
|
||||
parent->waiting--;
|
||||
if (err != "")
|
||||
{
|
||||
result = (cli_result_t){ .err = EIO, .text = "Error renaming "+target_name+" to "+child_name+": "+err };
|
||||
result = (cli_result_t){ .err = EIO, .text = "Error renaming "+inverse_parent_name+" to "+inverse_child_name+": "+err };
|
||||
state = 100;
|
||||
return;
|
||||
}
|
||||
@@ -508,14 +558,14 @@ resume_100:
|
||||
{
|
||||
result = (cli_result_t){
|
||||
.err = EAGAIN,
|
||||
.text = "Parent ("+target_name+"), child ("+child_name+"), or one of its children"
|
||||
.text = "Parent ("+inverse_parent_name+"), child ("+inverse_child_name+"), or one of its children"
|
||||
" configuration was modified during rename",
|
||||
};
|
||||
state = 100;
|
||||
return;
|
||||
}
|
||||
if (parent->progress)
|
||||
printf("Layer %s renamed to %s\n", target_name.c_str(), child_name.c_str());
|
||||
printf("Layer %s renamed to %s\n", inverse_parent_name.c_str(), inverse_child_name.c_str());
|
||||
parent->ringloop->wakeup();
|
||||
});
|
||||
}
|
||||
|
@@ -28,6 +28,7 @@ struct rm_inode_t
|
||||
cli_tool_t *parent = NULL;
|
||||
inode_list_t *lister = NULL;
|
||||
std::vector<rm_pg_t*> lists;
|
||||
std::vector<osd_num_t> inactive_osds;
|
||||
uint64_t total_count = 0, total_done = 0, total_prev_pct = 0;
|
||||
uint64_t pgs_to_list = 0;
|
||||
bool lists_done = false;
|
||||
@@ -86,6 +87,16 @@ struct rm_inode_t
|
||||
state = 100;
|
||||
return;
|
||||
}
|
||||
inactive_osds = parent->cli->list_inode_get_inactive_osds(lister);
|
||||
if (inactive_osds.size() && !parent->json_output)
|
||||
{
|
||||
fprintf(stderr, "Some data may remain after delete on OSDs which are currently down: ");
|
||||
for (int i = 0; i < inactive_osds.size(); i++)
|
||||
{
|
||||
fprintf(stderr, i > 0 ? ", %lu" : "%lu", inactive_osds[i]);
|
||||
}
|
||||
fprintf(stderr, "\n");
|
||||
}
|
||||
pgs_to_list = parent->cli->list_pg_count(lister);
|
||||
parent->cli->list_inode_next(lister, parent->parallel_osds);
|
||||
}
|
||||
@@ -167,16 +178,33 @@ struct rm_inode_t
|
||||
}
|
||||
if (parent->progress && total_count > 0 && total_done*1000/total_count != total_prev_pct)
|
||||
{
|
||||
printf("\rRemoved %lu/%lu objects, %lu more PGs to list...", total_done, total_count, pgs_to_list);
|
||||
fprintf(stderr, "\rRemoved %lu/%lu objects, %lu more PGs to list...", total_done, total_count, pgs_to_list);
|
||||
total_prev_pct = total_done*1000/total_count;
|
||||
}
|
||||
if (lists_done && !lists.size())
|
||||
{
|
||||
if (parent->progress && total_count > 0)
|
||||
{
|
||||
fprintf(stderr, "\n");
|
||||
}
|
||||
if (parent->progress && (total_done < total_count || inactive_osds.size() > 0))
|
||||
{
|
||||
fprintf(
|
||||
stderr, "Warning: Pool:%u,ID:%lu inode data may not have been fully removed.\n"
|
||||
" Use `vitastor-cli rm-data --pool %u --inode %lu` if you encounter it in listings.\n",
|
||||
pool_id, INODE_NO_POOL(inode), pool_id, INODE_NO_POOL(inode)
|
||||
);
|
||||
}
|
||||
result = (cli_result_t){
|
||||
.err = error_count > 0 ? EIO : 0,
|
||||
.text = error_count > 0 ? "Some blocks were not removed" : (
|
||||
"Done, inode "+std::to_string(INODE_NO_POOL(inode))+" from pool "+
|
||||
std::to_string(pool_id)+" removed"),
|
||||
.data = json11::Json::object {
|
||||
{ "removed_objects", total_done },
|
||||
{ "total_objects", total_count },
|
||||
{ "inactive_osds", inactive_osds },
|
||||
},
|
||||
};
|
||||
state = 100;
|
||||
}
|
||||
|
@@ -12,7 +12,7 @@ static const char *obj_states[] = { "clean", "misplaced", "degraded", "incomplet
|
||||
// Print cluster status:
|
||||
// etcd, mon, osd states
|
||||
// raw/used space, object states, pool states, pg states
|
||||
// client io, recovery io, rebalance io
|
||||
// client io, recovery io, rebalance io, scrub io
|
||||
struct status_printer_t
|
||||
{
|
||||
cli_tool_t *parent;
|
||||
@@ -252,18 +252,25 @@ resume_2:
|
||||
}
|
||||
more_states.resize(more_states.size()-2);
|
||||
std::string recovery_io;
|
||||
int io_indent = 0;
|
||||
{
|
||||
uint64_t deg_bps = agg_stats["recovery_stats"]["degraded"]["bps"].uint64_value();
|
||||
uint64_t deg_iops = agg_stats["recovery_stats"]["degraded"]["iops"].uint64_value();
|
||||
uint64_t misp_bps = agg_stats["recovery_stats"]["misplaced"]["bps"].uint64_value();
|
||||
uint64_t misp_iops = agg_stats["recovery_stats"]["misplaced"]["iops"].uint64_value();
|
||||
uint64_t scrub_bps = agg_stats["op_stats"]["scrub"]["bps"].uint64_value();
|
||||
uint64_t scrub_iops = agg_stats["op_stats"]["scrub"]["iops"].uint64_value();
|
||||
if (misp_iops > 0 || misp_bps > 0 || no_rebalance)
|
||||
io_indent = 3;
|
||||
else if (deg_iops > 0 || deg_bps > 0 || no_recovery)
|
||||
io_indent = 2;
|
||||
if (deg_iops > 0 || deg_bps > 0)
|
||||
{
|
||||
recovery_io += " recovery: "+std::string(no_recovery ? "disabled, " : "")+
|
||||
recovery_io += " recovery: "+str_repeat(" ", io_indent-2)+std::string(no_recovery ? "disabled, " : "")+
|
||||
format_size(deg_bps)+"/s, "+format_size(deg_iops, true)+" op/s\n";
|
||||
}
|
||||
else if (no_recovery)
|
||||
recovery_io += " recovery: disabled\n";
|
||||
recovery_io += " recovery: disabled\n";
|
||||
if (misp_iops > 0 || misp_bps > 0)
|
||||
{
|
||||
recovery_io += " rebalance: "+std::string(no_rebalance ? "disabled, " : "")+
|
||||
@@ -271,6 +278,13 @@ resume_2:
|
||||
}
|
||||
else if (no_rebalance)
|
||||
recovery_io += " rebalance: disabled\n";
|
||||
if (scrub_iops > 0 || scrub_bps > 0)
|
||||
{
|
||||
recovery_io += " scrub: "+str_repeat(" ", io_indent+1)+std::string(no_scrub ? "disabled, " : "")+
|
||||
format_size(scrub_bps)+"/s, "+format_size(scrub_iops, true)+" op/s\n";
|
||||
}
|
||||
else if (no_scrub)
|
||||
recovery_io += " scrub: "+str_repeat(" ", io_indent+1)+"disabled\n";
|
||||
}
|
||||
printf(
|
||||
" cluster:\n"
|
||||
@@ -298,7 +312,7 @@ resume_2:
|
||||
pools_active, pool_count,
|
||||
pgs_by_state_str.c_str(),
|
||||
readonly ? " (read-only mode)" : "",
|
||||
recovery_io.size() > 0 ? " " : "",
|
||||
str_repeat(" ", io_indent).c_str(),
|
||||
format_size(agg_stats["op_stats"]["primary_read"]["bps"].uint64_value()).c_str(),
|
||||
format_size(agg_stats["op_stats"]["primary_read"]["iops"].uint64_value(), true).c_str(),
|
||||
format_size(agg_stats["op_stats"]["primary_write"]["bps"].uint64_value()).c_str(),
|
||||
|
@@ -1209,6 +1209,10 @@ void cluster_client_t::handle_op_part(cluster_op_part_t *part)
|
||||
copy_part_bitmap(op, part);
|
||||
op->version = op->parts.size() == 1 ? part->op.reply.rw.version : 0;
|
||||
}
|
||||
else if (op->opcode == OSD_OP_WRITE)
|
||||
{
|
||||
op->version = op->parts.size() == 1 ? part->op.reply.rw.version : 0;
|
||||
}
|
||||
if (op->inflight_count == 0)
|
||||
{
|
||||
if (op->opcode == OSD_OP_SYNC)
|
||||
|
@@ -130,6 +130,7 @@ public:
|
||||
inode_list_t *list_inode_start(inode_t inode,
|
||||
std::function<void(inode_list_t* lst, std::set<object_id>&& objects, pg_num_t pg_num, osd_num_t primary_osd, int status)> callback);
|
||||
int list_pg_count(inode_list_t *lst);
|
||||
const std::vector<osd_num_t> & list_inode_get_inactive_osds(inode_list_t *lst);
|
||||
void list_inode_next(inode_list_t *lst, int next_pgs);
|
||||
//inline uint32_t get_bs_bitmap_granularity() { return st_cli.global_bitmap_granularity; }
|
||||
//inline uint64_t get_bs_block_size() { return st_cli.global_block_size; }
|
||||
|
@@ -36,6 +36,7 @@ struct inode_list_t
|
||||
inode_t inode = 0;
|
||||
int done_pgs = 0;
|
||||
int want = 0;
|
||||
std::vector<osd_num_t> inactive_osds;
|
||||
std::vector<inode_list_pg_t*> pgs;
|
||||
std::function<void(inode_list_t* lst, std::set<object_id>&& objects, pg_num_t pg_num, osd_num_t primary_osd, int status)> callback;
|
||||
};
|
||||
@@ -60,6 +61,7 @@ inode_list_t* cluster_client_t::list_inode_start(inode_t inode,
|
||||
lst->inode = inode;
|
||||
lst->callback = callback;
|
||||
auto pool_cfg = st_cli.pool_config[pool_id];
|
||||
std::set<osd_num_t> inactive_osd_set;
|
||||
for (auto & pg_item: pool_cfg.pg_config)
|
||||
{
|
||||
auto & pg = pg_item.second;
|
||||
@@ -106,11 +108,18 @@ inode_list_t* cluster_client_t::list_inode_start(inode_t inode,
|
||||
}
|
||||
for (osd_num_t peer_osd: all_peers)
|
||||
{
|
||||
r->list_osds.push_back((inode_list_osd_t){
|
||||
.pg = r,
|
||||
.osd_num = peer_osd,
|
||||
.sent = false,
|
||||
});
|
||||
if (st_cli.peer_states.find(peer_osd) != st_cli.peer_states.end())
|
||||
{
|
||||
r->list_osds.push_back((inode_list_osd_t){
|
||||
.pg = r,
|
||||
.osd_num = peer_osd,
|
||||
.sent = false,
|
||||
});
|
||||
}
|
||||
else
|
||||
{
|
||||
inactive_osd_set.insert(peer_osd);
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
@@ -132,6 +141,7 @@ inode_list_t* cluster_client_t::list_inode_start(inode_t inode,
|
||||
{
|
||||
lst->pgs[i]->pos = i;
|
||||
}
|
||||
lst->inactive_osds.insert(lst->inactive_osds.end(), inactive_osd_set.begin(), inactive_osd_set.end());
|
||||
lists.push_back(lst);
|
||||
return lst;
|
||||
}
|
||||
@@ -141,6 +151,11 @@ int cluster_client_t::list_pg_count(inode_list_t *lst)
|
||||
return lst->pgs.size();
|
||||
}
|
||||
|
||||
const std::vector<osd_num_t> & cluster_client_t::list_inode_get_inactive_osds(inode_list_t *lst)
|
||||
{
|
||||
return lst->inactive_osds;
|
||||
}
|
||||
|
||||
void cluster_client_t::list_inode_next(inode_list_t *lst, int next_pgs)
|
||||
{
|
||||
if (next_pgs >= 0)
|
||||
|
@@ -10,6 +10,7 @@
|
||||
#include "json11/json11.hpp"
|
||||
#include "str_util.h"
|
||||
#include "blockstore.h"
|
||||
#include "blockstore_disk.h"
|
||||
|
||||
// Calculate offsets for a block device and print OSD command line parameters
|
||||
void disk_tool_simple_offsets(json11::Json cfg, bool json_output)
|
||||
@@ -20,23 +21,39 @@ void disk_tool_simple_offsets(json11::Json cfg, bool json_output)
|
||||
fprintf(stderr, "Device path is missing\n");
|
||||
exit(1);
|
||||
}
|
||||
uint64_t object_size = parse_size(cfg["object_size"].string_value());
|
||||
uint64_t data_block_size = parse_size(cfg["object_size"].string_value());
|
||||
uint64_t bitmap_granularity = parse_size(cfg["bitmap_granularity"].string_value());
|
||||
uint64_t journal_size = parse_size(cfg["journal_size"].string_value());
|
||||
uint64_t device_block_size = parse_size(cfg["device_block_size"].string_value());
|
||||
uint64_t journal_offset = parse_size(cfg["journal_offset"].string_value());
|
||||
uint64_t device_size = parse_size(cfg["device_size"].string_value());
|
||||
uint32_t csum_block_size = parse_size(cfg["csum_block_size"].string_value());
|
||||
uint32_t data_csum_type = BLOCKSTORE_CSUM_NONE;
|
||||
if (cfg["data_csum_type"] == "crc32c")
|
||||
data_csum_type = BLOCKSTORE_CSUM_CRC32C;
|
||||
else if (cfg["data_csum_type"].string_value() != "" && cfg["data_csum_type"].string_value() != "none")
|
||||
{
|
||||
fprintf(
|
||||
stderr, "data_csum_type=%s is unsupported, only \"crc32c\" and \"none\" are supported",
|
||||
cfg["data_csum_type"].string_value().c_str()
|
||||
);
|
||||
exit(1);
|
||||
}
|
||||
std::string format = cfg["format"].string_value();
|
||||
if (json_output)
|
||||
format = "json";
|
||||
if (!object_size)
|
||||
object_size = 1 << DEFAULT_DATA_BLOCK_ORDER;
|
||||
if (!data_block_size)
|
||||
data_block_size = 1 << DEFAULT_DATA_BLOCK_ORDER;
|
||||
if (!bitmap_granularity)
|
||||
bitmap_granularity = DEFAULT_BITMAP_GRANULARITY;
|
||||
if (!journal_size)
|
||||
journal_size = 16*1024*1024;
|
||||
if (!device_block_size)
|
||||
device_block_size = 4096;
|
||||
if (!data_csum_type)
|
||||
csum_block_size = 0;
|
||||
else if (!csum_block_size)
|
||||
csum_block_size = bitmap_granularity;
|
||||
uint64_t orig_device_size = device_size;
|
||||
if (!device_size)
|
||||
{
|
||||
@@ -85,22 +102,30 @@ void disk_tool_simple_offsets(json11::Json cfg, bool json_output)
|
||||
fprintf(stderr, "Invalid device block size specified: %lu\n", device_block_size);
|
||||
exit(1);
|
||||
}
|
||||
if (object_size < device_block_size || object_size > MAX_DATA_BLOCK_SIZE ||
|
||||
object_size & (object_size-1) != 0)
|
||||
if (data_block_size < device_block_size || data_block_size > MAX_DATA_BLOCK_SIZE ||
|
||||
data_block_size & (data_block_size-1) != 0)
|
||||
{
|
||||
fprintf(stderr, "Invalid object size specified: %lu\n", object_size);
|
||||
fprintf(stderr, "Invalid object size specified: %lu\n", data_block_size);
|
||||
exit(1);
|
||||
}
|
||||
if (bitmap_granularity < device_block_size || bitmap_granularity > object_size ||
|
||||
if (bitmap_granularity < device_block_size || bitmap_granularity > data_block_size ||
|
||||
bitmap_granularity & (bitmap_granularity-1) != 0)
|
||||
{
|
||||
fprintf(stderr, "Invalid bitmap granularity specified: %lu\n", bitmap_granularity);
|
||||
exit(1);
|
||||
}
|
||||
if (csum_block_size && (data_block_size % csum_block_size))
|
||||
{
|
||||
fprintf(stderr, "csum_block_size must be a divisor of data_block_size\n");
|
||||
exit(1);
|
||||
}
|
||||
journal_offset = ((journal_offset+device_block_size-1)/device_block_size)*device_block_size;
|
||||
uint64_t meta_offset = journal_offset + ((journal_size+device_block_size-1)/device_block_size)*device_block_size;
|
||||
uint64_t entries_per_block = (device_block_size / (24 + 2*object_size/bitmap_granularity/8));
|
||||
uint64_t object_count = ((device_size-meta_offset)/object_size);
|
||||
uint64_t data_csum_size = (data_csum_type ? data_block_size/csum_block_size*(data_csum_type & 0xFF) : 0);
|
||||
uint64_t clean_entry_bitmap_size = data_block_size/bitmap_granularity/8;
|
||||
uint64_t clean_entry_size = 24 /*sizeof(clean_disk_entry)*/ + 2*clean_entry_bitmap_size + data_csum_size + 4 /*entry_csum*/;
|
||||
uint64_t entries_per_block = device_block_size / clean_entry_size;
|
||||
uint64_t object_count = ((device_size-meta_offset)/data_block_size);
|
||||
uint64_t meta_size = (1 + (object_count+entries_per_block-1)/entries_per_block) * device_block_size;
|
||||
uint64_t data_offset = meta_offset + meta_size;
|
||||
if (format == "json")
|
||||
|
@@ -59,6 +59,8 @@ static const char *help_text =
|
||||
" --journal_size 32M/1G Set journal size (area or partition size)\n"
|
||||
" --block_size 128k/1M Set blockstore object size\n"
|
||||
" --bitmap_granularity 4k Set bitmap granularity\n"
|
||||
" --data_csum_type none Set data checksum type (crc32c or none)\n"
|
||||
" --csum_block_size 4k Set data checksum block size\n"
|
||||
" --data_device_block 4k Override data device block size\n"
|
||||
" --meta_device_block 4k Override metadata device block size\n"
|
||||
" --journal_device_block 4k Override journal device block size\n"
|
||||
@@ -72,8 +74,9 @@ static const char *help_text =
|
||||
" If it doesn't succeed it issues a warning in the system log.\n"
|
||||
" \n"
|
||||
" You can also pass other OSD options here as arguments and they'll be persisted\n"
|
||||
" to the superblock: max_write_iodepth, max_write_iodepth, min_flusher_count,\n"
|
||||
" max_flusher_count, inmemory_metadata, inmemory_journal, journal_sector_buffer_count,\n"
|
||||
" in the superblock: cached_read_data, cached_read_meta, cached_read_journal,\n"
|
||||
" inmemory_metadata, inmemory_journal, max_write_iodepth,\n"
|
||||
" min_flusher_count, max_flusher_count, journal_sector_buffer_count,\n"
|
||||
" journal_no_same_sector_overwrites, throttle_small_writes, throttle_target_iops,\n"
|
||||
" throttle_target_mbs, throttle_target_parallelism, throttle_threshold_us.\n"
|
||||
"\n"
|
||||
@@ -161,6 +164,8 @@ static const char *help_text =
|
||||
" --object_size 128k Set blockstore block size\n"
|
||||
" --bitmap_granularity 4k Set bitmap granularity\n"
|
||||
" --journal_size 16M Set journal size\n"
|
||||
" --data_csum_type none Set data checksum type (crc32c or none)\n"
|
||||
" --csum_block_size 4k Set data checksum block size\n"
|
||||
" --device_block_size 4k Set device block size\n"
|
||||
" --journal_offset 0 Set journal offset\n"
|
||||
" --device_size 0 Set device size\n"
|
||||
@@ -270,6 +275,19 @@ int main(int argc, char *argv[])
|
||||
fprintf(stderr, "Invalid JSON: %s\n", json_err.c_str());
|
||||
return 1;
|
||||
}
|
||||
if (entries[0]["type"] == "start")
|
||||
{
|
||||
self.dsk.data_csum_type = csum_type_from_str(entries[0]["data_csum_type"].string_value());
|
||||
self.dsk.csum_block_size = entries[0]["csum_block_size"].uint64_value();
|
||||
}
|
||||
if (self.options["data_csum_type"] != "")
|
||||
{
|
||||
self.dsk.data_csum_type = csum_type_from_str(self.options["data_csum_type"]);
|
||||
}
|
||||
if (self.options["csum_block_size"] != "")
|
||||
{
|
||||
self.dsk.csum_block_size = stoull_full(self.options["csum_block_size"], 0);
|
||||
}
|
||||
return self.write_json_journal(entries);
|
||||
}
|
||||
else if (!strcmp(cmd[0], "dump-meta"))
|
||||
|
@@ -64,17 +64,19 @@ struct disk_tool_t
|
||||
ring_loop_t *ringloop;
|
||||
ring_consumer_t ring_consumer;
|
||||
int remap_active;
|
||||
journal_entry_start je_start;
|
||||
uint8_t *new_journal_buf, *new_meta_buf, *new_journal_ptr, *new_journal_data;
|
||||
uint64_t new_journal_in_pos;
|
||||
int64_t data_idx_diff;
|
||||
uint64_t total_blocks, free_first, free_last;
|
||||
uint64_t new_clean_entry_bitmap_size, new_clean_entry_size, new_entries_per_block;
|
||||
uint64_t new_clean_entry_bitmap_size, new_data_csum_size, new_clean_entry_size, new_entries_per_block;
|
||||
int new_journal_fd, new_meta_fd;
|
||||
resizer_data_moving_t *moving_blocks;
|
||||
|
||||
bool started;
|
||||
void *small_write_data;
|
||||
uint32_t data_crc32;
|
||||
bool data_csum_valid;
|
||||
uint32_t crc32_last;
|
||||
uint32_t new_crc32_prev;
|
||||
|
||||
@@ -84,11 +86,11 @@ struct disk_tool_t
|
||||
void dump_journal_entry(int num, journal_entry *je, bool json);
|
||||
int process_journal(std::function<int(void*)> block_fn);
|
||||
int process_journal_block(void *buf, std::function<void(int, journal_entry*)> iter_fn);
|
||||
int process_meta(std::function<void(blockstore_meta_header_v1_t *)> hdr_fn,
|
||||
int process_meta(std::function<void(blockstore_meta_header_v2_t *)> hdr_fn,
|
||||
std::function<void(uint64_t, clean_disk_entry*, uint8_t*)> record_fn);
|
||||
|
||||
int dump_meta();
|
||||
void dump_meta_header(blockstore_meta_header_v1_t *hdr);
|
||||
void dump_meta_header(blockstore_meta_header_v2_t *hdr);
|
||||
void dump_meta_entry(uint64_t block_num, clean_disk_entry *entry, uint8_t *bitmap);
|
||||
|
||||
int write_json_journal(json11::Json entries);
|
||||
@@ -96,7 +98,7 @@ struct disk_tool_t
|
||||
|
||||
int resize_data();
|
||||
int resize_parse_params();
|
||||
void resize_init(blockstore_meta_header_v1_t *hdr);
|
||||
void resize_init(blockstore_meta_header_v2_t *hdr);
|
||||
int resize_remap_blocks();
|
||||
int resize_copy_data();
|
||||
int resize_rewrite_journal();
|
||||
@@ -141,3 +143,5 @@ json11::Json read_parttable(std::string dev);
|
||||
uint64_t dev_size_from_parttable(json11::Json pt);
|
||||
uint64_t free_from_parttable(json11::Json pt);
|
||||
int fix_partition_type(std::string dev_by_uuid);
|
||||
std::string csum_type_str(uint32_t data_csum_type);
|
||||
uint32_t csum_type_from_str(std::string data_csum_type);
|
||||
|
@@ -55,6 +55,23 @@ int disk_tool_t::dump_journal()
|
||||
printf("offset %08lx:\n", journal_pos);
|
||||
else
|
||||
printf(",\"entries\":[\n");
|
||||
if (journal_pos == 0)
|
||||
{
|
||||
// Fill journal header to know checksum type & size
|
||||
journal_entry *je = (journal_entry*)journal_buf;
|
||||
if (je->magic == JOURNAL_MAGIC && je->type == JE_START &&
|
||||
(je->start.version == JOURNAL_VERSION_V1 || je->start.version == JOURNAL_VERSION_V2))
|
||||
{
|
||||
memcpy(&je_start, je, sizeof(je_start));
|
||||
if (je_start.size == JE_START_V0_SIZE)
|
||||
je_start.version = 0;
|
||||
if (je_start.version < JOURNAL_VERSION_V2)
|
||||
{
|
||||
je_start.data_csum_type = 0;
|
||||
je_start.csum_block_size = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
first_entry = true;
|
||||
process_journal_block(journal_buf, [this](int num, journal_entry *je) { dump_journal_entry(num, je, json); });
|
||||
if (json)
|
||||
@@ -120,8 +137,22 @@ int disk_tool_t::process_journal(std::function<int(void*)> block_fn)
|
||||
fprintf(stderr, "offset %08lx: journal superblock is invalid\n", journal_pos);
|
||||
r = 1;
|
||||
}
|
||||
else if (je->start.size != JE_START_V0_SIZE && je->start.version != JOURNAL_VERSION_V1 && je->start.version != JOURNAL_VERSION_V2)
|
||||
{
|
||||
fprintf(stderr, "offset %08lx: journal superblock contains version %lu, but I only understand 0, 1 and 2\n",
|
||||
journal_pos, je->start.size == JE_START_V0_SIZE ? 0 : je->start.version);
|
||||
r = 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
memcpy(&je_start, je, sizeof(je_start));
|
||||
if (je_start.size == JE_START_V0_SIZE)
|
||||
je_start.version = 0;
|
||||
if (je_start.version < JOURNAL_VERSION_V2)
|
||||
{
|
||||
je_start.data_csum_type = 0;
|
||||
je_start.csum_block_size = 0;
|
||||
}
|
||||
started = false;
|
||||
crc32_last = 0;
|
||||
block_fn(data);
|
||||
@@ -183,7 +214,49 @@ int disk_tool_t::process_journal_block(void *buf, std::function<void(int, journa
|
||||
}
|
||||
small_write_data = memalign_or_die(MEM_ALIGNMENT, je->small_write.len);
|
||||
assert(pread(dsk.journal_fd, small_write_data, je->small_write.len, dsk.journal_offset+je->small_write.data_offset) == je->small_write.len);
|
||||
data_crc32 = crc32c(0, small_write_data, je->small_write.len);
|
||||
data_crc32 = je_start.csum_block_size ? 0 : crc32c(0, small_write_data, je->small_write.len);
|
||||
data_csum_valid = (data_crc32 == je->small_write.crc32_data);
|
||||
if (je_start.csum_block_size && je->small_write.len > 0)
|
||||
{
|
||||
// like in enqueue_write()
|
||||
uint32_t start = je->small_write.offset / je_start.csum_block_size;
|
||||
uint32_t end = (je->small_write.offset+je->small_write.len-1) / je_start.csum_block_size;
|
||||
uint32_t data_csum_size = (end-start+1) * (je_start.data_csum_type & 0xFF);
|
||||
if (je->size < sizeof(journal_entry_small_write) + data_csum_size)
|
||||
{
|
||||
data_csum_valid = false;
|
||||
}
|
||||
else
|
||||
{
|
||||
uint32_t calc_csum = 0;
|
||||
uint32_t *block_csums = (uint32_t*)((uint8_t*)je + je->size - data_csum_size);
|
||||
if (start == end)
|
||||
{
|
||||
calc_csum = crc32c(0, (uint8_t*)small_write_data, je->small_write.len);
|
||||
data_csum_valid = data_csum_valid && (calc_csum == *block_csums++);
|
||||
}
|
||||
else
|
||||
{
|
||||
// First block
|
||||
calc_csum = crc32c(0, (uint8_t*)small_write_data,
|
||||
je_start.csum_block_size*(start+1)-je->small_write.offset);
|
||||
data_csum_valid = data_csum_valid && (calc_csum == *block_csums++);
|
||||
// Intermediate blocks
|
||||
for (uint32_t i = start+1; i < end; i++)
|
||||
{
|
||||
calc_csum = crc32c(0, (uint8_t*)small_write_data +
|
||||
je_start.csum_block_size*i-je->small_write.offset, je_start.csum_block_size);
|
||||
data_csum_valid = data_csum_valid && (calc_csum == *block_csums++);
|
||||
}
|
||||
// Last block
|
||||
calc_csum = crc32c(
|
||||
0, (uint8_t*)small_write_data + end*je_start.csum_block_size - je->small_write.offset,
|
||||
je->small_write.offset+je->small_write.len - end*je_start.csum_block_size
|
||||
);
|
||||
data_csum_valid = data_csum_valid && (calc_csum == *block_csums++);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
iter_fn(entry, je);
|
||||
if (je->type == JE_SMALL_WRITE || je->type == JE_SMALL_WRITE_INSTANT)
|
||||
@@ -223,29 +296,40 @@ void disk_tool_t::dump_journal_entry(int num, journal_entry *je, bool json)
|
||||
if (je->type == JE_START)
|
||||
{
|
||||
printf(
|
||||
json ? ",\"type\":\"start\",\"start\":\"0x%lx\"}" : "je_start start=%08lx\n",
|
||||
json ? ",\"type\":\"start\",\"start\":\"0x%lx\"" : "je_start start=%08lx",
|
||||
je->start.journal_start
|
||||
);
|
||||
if (je->start.data_csum_type)
|
||||
{
|
||||
printf(
|
||||
json ? ",\"data_csum_type\":\"%s\",\"csum_block_size\":%u" : " data_csum_type=%s csum_block_size=%u",
|
||||
csum_type_str(je->start.data_csum_type).c_str(), je->start.csum_block_size
|
||||
);
|
||||
}
|
||||
printf(json ? "}" : "\n");
|
||||
}
|
||||
else if (je->type == JE_SMALL_WRITE || je->type == JE_SMALL_WRITE_INSTANT)
|
||||
{
|
||||
auto & sw = je->small_write;
|
||||
printf(
|
||||
json ? ",\"type\":\"small_write%s\",\"inode\":\"0x%lx\",\"stripe\":\"0x%lx\",\"ver\":\"%lu\",\"offset\":%u,\"len\":%u,\"loc\":\"0x%lx\""
|
||||
: "je_small_write%s oid=%lx:%lx ver=%lu offset=%u len=%u loc=%08lx",
|
||||
je->type == JE_SMALL_WRITE_INSTANT ? "_instant" : "",
|
||||
je->small_write.oid.inode, je->small_write.oid.stripe,
|
||||
je->small_write.version, je->small_write.offset, je->small_write.len,
|
||||
je->small_write.data_offset
|
||||
sw.oid.inode, sw.oid.stripe, sw.version, sw.offset, sw.len, sw.data_offset
|
||||
);
|
||||
if (journal_calc_data_pos != je->small_write.data_offset)
|
||||
if (journal_calc_data_pos != sw.data_offset)
|
||||
{
|
||||
printf(json ? ",\"bad_loc\":true,\"calc_loc\":\"0x%lx\""
|
||||
: " (mismatched, calculated = %lu)", journal_pos);
|
||||
}
|
||||
if (je->small_write.size > sizeof(journal_entry_small_write))
|
||||
uint32_t data_csum_size = (!je_start.csum_block_size
|
||||
? 0
|
||||
: ((sw.offset + sw.len - 1)/je_start.csum_block_size - sw.offset/je_start.csum_block_size + 1)
|
||||
*(je_start.data_csum_type & 0xFF));
|
||||
if (je->size > sizeof(journal_entry_small_write) + data_csum_size)
|
||||
{
|
||||
printf(json ? ",\"bitmap\":\"" : " (bitmap: ");
|
||||
for (int i = sizeof(journal_entry_small_write); i < je->small_write.size; i++)
|
||||
for (int i = sizeof(journal_entry_small_write); i < je->size - data_csum_size; i++)
|
||||
{
|
||||
printf("%02x", ((uint8_t*)je)[i]);
|
||||
}
|
||||
@@ -254,34 +338,56 @@ void disk_tool_t::dump_journal_entry(int num, journal_entry *je, bool json)
|
||||
if (dump_with_data)
|
||||
{
|
||||
printf(json ? ",\"data\":\"" : " (data: ");
|
||||
for (int i = 0; i < je->small_write.len; i++)
|
||||
for (int i = 0; i < sw.len; i++)
|
||||
{
|
||||
printf("%02x", ((uint8_t*)small_write_data)[i]);
|
||||
}
|
||||
printf(json ? "\"" : ")");
|
||||
}
|
||||
if (data_csum_size > 0 && je->size >= sizeof(journal_entry_small_write) + data_csum_size)
|
||||
{
|
||||
printf(json ? ",\"block_csums\":\"" : " block_csums=");
|
||||
uint8_t *block_csums = (uint8_t*)je + je->size - data_csum_size;
|
||||
for (int i = 0; i < data_csum_size; i++)
|
||||
printf("%02x", block_csums[i]);
|
||||
printf(json ? "\"" : "");
|
||||
}
|
||||
else
|
||||
{
|
||||
printf(json ? ",\"data_crc32\":\"%08x\"" : " data_crc32=%08x", sw.crc32_data);
|
||||
}
|
||||
printf(
|
||||
json ? ",\"data_crc32\":\"%08x\",\"data_valid\":%s}" : " data_crc32=%08x%s\n",
|
||||
je->small_write.crc32_data,
|
||||
(data_crc32 != je->small_write.crc32_data
|
||||
? (json ? "false" : " (invalid)")
|
||||
: (json ? "true" : " (valid)"))
|
||||
json ? ",\"data_valid\":%s}" : "%s\n",
|
||||
(data_csum_valid
|
||||
? (json ? "true" : " (valid)")
|
||||
: (json ? "false" : " (invalid)"))
|
||||
);
|
||||
}
|
||||
else if (je->type == JE_BIG_WRITE || je->type == JE_BIG_WRITE_INSTANT)
|
||||
{
|
||||
auto & bw = je->big_write;
|
||||
printf(
|
||||
json ? ",\"type\":\"big_write%s\",\"inode\":\"0x%lx\",\"stripe\":\"0x%lx\",\"ver\":\"%lu\",\"offset\":%u,\"len\":%u,\"loc\":\"0x%lx\""
|
||||
: "je_big_write%s oid=%lx:%lx ver=%lu offset=%u len=%u loc=%08lx",
|
||||
je->type == JE_BIG_WRITE_INSTANT ? "_instant" : "",
|
||||
je->big_write.oid.inode, je->big_write.oid.stripe,
|
||||
je->big_write.version, je->big_write.offset, je->big_write.len,
|
||||
je->big_write.location
|
||||
bw.oid.inode, bw.oid.stripe, bw.version, bw.offset, bw.len, bw.location
|
||||
);
|
||||
if (je->big_write.size > sizeof(journal_entry_big_write))
|
||||
uint32_t data_csum_size = (!je_start.csum_block_size
|
||||
? 0
|
||||
: ((bw.offset + bw.len - 1)/je_start.csum_block_size - bw.offset/je_start.csum_block_size + 1)
|
||||
*(je_start.data_csum_type & 0xFF));
|
||||
if (data_csum_size > 0 && je->size >= sizeof(journal_entry_big_write) + data_csum_size)
|
||||
{
|
||||
printf(json ? ",\"block_csums\":\"" : " block_csums=");
|
||||
uint8_t *block_csums = (uint8_t*)je + je->size - data_csum_size;
|
||||
for (int i = 0; i < data_csum_size; i++)
|
||||
printf("%02x", block_csums[i]);
|
||||
printf(json ? "\"" : "");
|
||||
}
|
||||
if (bw.size > sizeof(journal_entry_big_write) + data_csum_size)
|
||||
{
|
||||
printf(json ? ",\"bitmap\":\"" : " (bitmap: ");
|
||||
for (int i = sizeof(journal_entry_big_write); i < je->big_write.size; i++)
|
||||
for (int i = sizeof(journal_entry_big_write); i < bw.size - data_csum_size; i++)
|
||||
{
|
||||
printf("%02x", ((uint8_t*)je)[i]);
|
||||
}
|
||||
@@ -338,7 +444,9 @@ int disk_tool_t::write_json_journal(json11::Json entries)
|
||||
.type = JE_START,
|
||||
.size = sizeof(journal_entry_start),
|
||||
.journal_start = dsk.journal_block_size,
|
||||
.version = JOURNAL_VERSION,
|
||||
.version = JOURNAL_VERSION_V2,
|
||||
.data_csum_type = dsk.data_csum_type,
|
||||
.csum_block_size = dsk.csum_block_size,
|
||||
};
|
||||
((journal_entry*)new_journal_buf)->crc32 = je_crc32((journal_entry*)new_journal_buf);
|
||||
new_journal_ptr += dsk.journal_block_size;
|
||||
@@ -358,9 +466,11 @@ int disk_tool_t::write_json_journal(json11::Json entries)
|
||||
uint32_t entry_size = (type == JE_START
|
||||
? sizeof(journal_entry_start)
|
||||
: (type == JE_SMALL_WRITE || type == JE_SMALL_WRITE_INSTANT
|
||||
? sizeof(journal_entry_small_write) + dsk.clean_entry_bitmap_size
|
||||
? sizeof(journal_entry_small_write) + dsk.clean_entry_bitmap_size +
|
||||
(dsk.data_csum_type ? rec["len"].uint64_value()/dsk.csum_block_size*(dsk.data_csum_type & 0xFF) : 0)
|
||||
: (type == JE_BIG_WRITE || type == JE_BIG_WRITE_INSTANT
|
||||
? sizeof(journal_entry_big_write) + dsk.clean_entry_bitmap_size
|
||||
? sizeof(journal_entry_big_write) + dsk.clean_entry_bitmap_size +
|
||||
(dsk.data_csum_type ? rec["len"].uint64_value()/dsk.csum_block_size*(dsk.data_csum_type & 0xFF) : 0)
|
||||
: sizeof(journal_entry_del))));
|
||||
if (dsk.journal_block_size < new_journal_in_pos + entry_size)
|
||||
{
|
||||
@@ -402,12 +512,24 @@ int disk_tool_t::write_json_journal(json11::Json entries)
|
||||
.offset = (uint32_t)rec["offset"].uint64_value(),
|
||||
.len = (uint32_t)rec["len"].uint64_value(),
|
||||
.data_offset = (uint64_t)(new_journal_data-new_journal_buf),
|
||||
.crc32_data = (uint32_t)sscanf_json("%x", rec["data_crc32"]),
|
||||
.crc32_data = !dsk.data_csum_type ? 0 : (uint32_t)sscanf_json("%x", rec["data_crc32"]),
|
||||
};
|
||||
fromhexstr(rec["bitmap"].string_value(), dsk.clean_entry_bitmap_size, ((uint8_t*)ne) + sizeof(journal_entry_small_write));
|
||||
uint32_t data_csum_size = !dsk.data_csum_type ? 0 : ne->small_write.len/dsk.csum_block_size*(dsk.data_csum_type & 0xFF);
|
||||
fromhexstr(rec["bitmap"].string_value(), dsk.clean_entry_bitmap_size, ((uint8_t*)ne) + sizeof(journal_entry_small_write) + data_csum_size);
|
||||
fromhexstr(rec["data"].string_value(), ne->small_write.len, new_journal_data);
|
||||
if (dsk.data_csum_type)
|
||||
fromhexstr(rec["block_csums"].string_value(), data_csum_size, ((uint8_t*)ne) + sizeof(journal_entry_small_write));
|
||||
if (rec["data"].is_string())
|
||||
ne->small_write.crc32_data = crc32c(0, new_journal_data, ne->small_write.len);
|
||||
{
|
||||
if (!dsk.data_csum_type)
|
||||
ne->small_write.crc32_data = crc32c(0, new_journal_data, ne->small_write.len);
|
||||
else if (dsk.data_csum_type == BLOCKSTORE_CSUM_CRC32C)
|
||||
{
|
||||
uint32_t *block_csums = (uint32_t*)(((uint8_t*)ne) + sizeof(journal_entry_small_write));
|
||||
for (uint32_t i = 0; i < ne->small_write.len; i += dsk.csum_block_size, block_csums++)
|
||||
*block_csums = crc32c(0, new_journal_data+i, dsk.csum_block_size);
|
||||
}
|
||||
}
|
||||
new_journal_data += ne->small_write.len;
|
||||
}
|
||||
else if (type == JE_BIG_WRITE || type == JE_BIG_WRITE_INSTANT)
|
||||
@@ -426,7 +548,10 @@ int disk_tool_t::write_json_journal(json11::Json entries)
|
||||
.len = (uint32_t)rec["len"].uint64_value(),
|
||||
.location = sscanf_json(NULL, rec["loc"]),
|
||||
};
|
||||
fromhexstr(rec["bitmap"].string_value(), dsk.clean_entry_bitmap_size, ((uint8_t*)ne) + sizeof(journal_entry_big_write));
|
||||
uint32_t data_csum_size = !dsk.data_csum_type ? 0 : ne->big_write.len/dsk.csum_block_size*(dsk.data_csum_type & 0xFF);
|
||||
fromhexstr(rec["bitmap"].string_value(), dsk.clean_entry_bitmap_size, ((uint8_t*)ne) + sizeof(journal_entry_big_write) + data_csum_size);
|
||||
if (dsk.data_csum_type)
|
||||
fromhexstr(rec["block_csums"].string_value(), data_csum_size, ((uint8_t*)ne) + sizeof(journal_entry_big_write));
|
||||
}
|
||||
else if (type == JE_STABLE || type == JE_ROLLBACK || type == JE_DELETE)
|
||||
{
|
||||
|
@@ -5,7 +5,7 @@
|
||||
#include "rw_blocking.h"
|
||||
#include "osd_id.h"
|
||||
|
||||
int disk_tool_t::process_meta(std::function<void(blockstore_meta_header_v1_t *)> hdr_fn,
|
||||
int disk_tool_t::process_meta(std::function<void(blockstore_meta_header_v2_t *)> hdr_fn,
|
||||
std::function<void(uint64_t, clean_disk_entry*, uint8_t*)> record_fn)
|
||||
{
|
||||
if (dsk.meta_block_size % DIRECT_IO_ALIGNMENT)
|
||||
@@ -28,12 +28,38 @@ int disk_tool_t::process_meta(std::function<void(blockstore_meta_header_v1_t *)>
|
||||
lseek64(dsk.meta_fd, dsk.meta_offset, 0);
|
||||
read_blocking(dsk.meta_fd, data, dsk.meta_block_size);
|
||||
// Check superblock
|
||||
blockstore_meta_header_v1_t *hdr = (blockstore_meta_header_v1_t *)data;
|
||||
if (hdr->zero == 0 &&
|
||||
hdr->magic == BLOCKSTORE_META_MAGIC_V1 &&
|
||||
hdr->version == BLOCKSTORE_META_VERSION_V1)
|
||||
blockstore_meta_header_v2_t *hdr = (blockstore_meta_header_v2_t *)data;
|
||||
if (hdr->zero == 0 && hdr->magic == BLOCKSTORE_META_MAGIC_V1)
|
||||
{
|
||||
// Vitastor 0.6-0.7 - static array of clean_disk_entry with bitmaps
|
||||
if (hdr->version == BLOCKSTORE_META_FORMAT_V1)
|
||||
{
|
||||
// Vitastor 0.6-0.8 - static array of clean_disk_entry with bitmaps
|
||||
hdr->data_csum_type = 0;
|
||||
hdr->csum_block_size = 0;
|
||||
hdr->header_csum = 0;
|
||||
}
|
||||
else if (hdr->version == BLOCKSTORE_META_FORMAT_V2)
|
||||
{
|
||||
// Vitastor 0.9 - static array of clean_disk_entry with bitmaps and checksums
|
||||
if (hdr->data_csum_type != 0 &&
|
||||
hdr->data_csum_type != BLOCKSTORE_CSUM_CRC32C)
|
||||
{
|
||||
fprintf(stderr, "I don't know checksum format %u, the only supported format is crc32c = %u.\n", hdr->data_csum_type, BLOCKSTORE_CSUM_CRC32C);
|
||||
free(data);
|
||||
close(dsk.meta_fd);
|
||||
dsk.meta_fd = -1;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// Unsupported version
|
||||
fprintf(stderr, "Metadata format is too new for me (stored version is %lu, max supported %u).\n", hdr->version, BLOCKSTORE_META_FORMAT_V2);
|
||||
free(data);
|
||||
close(dsk.meta_fd);
|
||||
dsk.meta_fd = -1;
|
||||
return 1;
|
||||
}
|
||||
if (hdr->meta_block_size != dsk.meta_block_size)
|
||||
{
|
||||
fprintf(stderr, "Using block size of %u bytes based on information from the superblock\n", hdr->meta_block_size);
|
||||
@@ -45,14 +71,24 @@ int disk_tool_t::process_meta(std::function<void(blockstore_meta_header_v1_t *)>
|
||||
memcpy(new_data, data, dsk.meta_block_size);
|
||||
free(data);
|
||||
data = new_data;
|
||||
hdr = (blockstore_meta_header_v1_t *)data;
|
||||
hdr = (blockstore_meta_header_v2_t *)data;
|
||||
}
|
||||
}
|
||||
dsk.meta_format = hdr->version;
|
||||
dsk.data_block_size = hdr->data_block_size;
|
||||
dsk.csum_block_size = hdr->csum_block_size;
|
||||
dsk.data_csum_type = hdr->data_csum_type;
|
||||
dsk.bitmap_granularity = hdr->bitmap_granularity;
|
||||
dsk.clean_entry_bitmap_size = hdr->data_block_size / hdr->bitmap_granularity / 8;
|
||||
dsk.clean_entry_size = sizeof(clean_disk_entry) + 2*dsk.clean_entry_bitmap_size;
|
||||
dsk.clean_entry_bitmap_size = (hdr->data_block_size / hdr->bitmap_granularity + 7) / 8;
|
||||
dsk.clean_entry_size = sizeof(clean_disk_entry) + 2*dsk.clean_entry_bitmap_size
|
||||
+ (hdr->data_csum_type
|
||||
? ((hdr->data_block_size+hdr->csum_block_size-1)/hdr->csum_block_size
|
||||
*(hdr->data_csum_type & 0xff))
|
||||
: 0)
|
||||
+ (dsk.meta_format == BLOCKSTORE_META_FORMAT_V2 ? 4 /*entry_csum*/ : 0);
|
||||
uint64_t block_num = 0;
|
||||
hdr_fn(hdr);
|
||||
hdr = NULL;
|
||||
meta_pos = dsk.meta_block_size;
|
||||
lseek64(dsk.meta_fd, dsk.meta_offset+meta_pos, 0);
|
||||
while (meta_pos < dsk.meta_len)
|
||||
@@ -67,6 +103,15 @@ int disk_tool_t::process_meta(std::function<void(blockstore_meta_header_v1_t *)>
|
||||
clean_disk_entry *entry = (clean_disk_entry*)((uint8_t*)data + blk + ioff);
|
||||
if (entry->oid.inode)
|
||||
{
|
||||
if (dsk.data_csum_type)
|
||||
{
|
||||
uint32_t *entry_csum = (uint32_t*)((uint8_t*)entry + dsk.clean_entry_size - 4);
|
||||
if (*entry_csum != crc32c(0, entry, dsk.clean_entry_size - 4))
|
||||
{
|
||||
fprintf(stderr, "Metadata entry %lu is corrupt (checksum mismatch), skipping\n", block_num);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
record_fn(block_num, entry, entry->bitmap);
|
||||
}
|
||||
}
|
||||
@@ -107,21 +152,35 @@ int disk_tool_t::process_meta(std::function<void(blockstore_meta_header_v1_t *)>
|
||||
int disk_tool_t::dump_meta()
|
||||
{
|
||||
int r = process_meta(
|
||||
[this](blockstore_meta_header_v1_t *hdr) { dump_meta_header(hdr); },
|
||||
[this](blockstore_meta_header_v2_t *hdr) { dump_meta_header(hdr); },
|
||||
[this](uint64_t block_num, clean_disk_entry *entry, uint8_t *bitmap) { dump_meta_entry(block_num, entry, bitmap); }
|
||||
);
|
||||
printf("\n]}\n");
|
||||
if (r == 0)
|
||||
printf("\n]}\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
void disk_tool_t::dump_meta_header(blockstore_meta_header_v1_t *hdr)
|
||||
void disk_tool_t::dump_meta_header(blockstore_meta_header_v2_t *hdr)
|
||||
{
|
||||
if (hdr)
|
||||
{
|
||||
printf(
|
||||
"{\"version\":\"0.6\",\"meta_block_size\":%u,\"data_block_size\":%u,\"bitmap_granularity\":%u,\"entries\":[\n",
|
||||
hdr->meta_block_size, hdr->data_block_size, hdr->bitmap_granularity
|
||||
);
|
||||
if (hdr->version == BLOCKSTORE_META_FORMAT_V1)
|
||||
{
|
||||
printf(
|
||||
"{\"version\":\"0.6\",\"meta_block_size\":%u,\"data_block_size\":%u,\"bitmap_granularity\":%u,"
|
||||
"\"entries\":[\n",
|
||||
hdr->meta_block_size, hdr->data_block_size, hdr->bitmap_granularity
|
||||
);
|
||||
}
|
||||
else if (hdr->version == BLOCKSTORE_META_FORMAT_V2)
|
||||
{
|
||||
printf(
|
||||
"{\"version\":\"0.9\",\"meta_block_size\":%u,\"data_block_size\":%u,\"bitmap_granularity\":%u,"
|
||||
"\"data_csum_type\":%s,\"csum_block_size\":%u,\"entries\":[\n",
|
||||
hdr->meta_block_size, hdr->data_block_size, hdr->bitmap_granularity,
|
||||
csum_type_str(hdr->data_csum_type).c_str(), hdr->csum_block_size
|
||||
);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -151,6 +210,15 @@ void disk_tool_t::dump_meta_entry(uint64_t block_num, clean_disk_entry *entry, u
|
||||
{
|
||||
printf("%02x", bitmap[dsk.clean_entry_bitmap_size + i]);
|
||||
}
|
||||
if (dsk.csum_block_size && dsk.data_csum_type)
|
||||
{
|
||||
uint8_t *csums = bitmap + dsk.clean_entry_bitmap_size*2;
|
||||
printf("\",\"block_csums\":\"");
|
||||
for (uint64_t i = 0; i < (dsk.data_block_size+dsk.csum_block_size-1)/dsk.csum_block_size*(dsk.data_csum_type & 0xFF); i++)
|
||||
{
|
||||
printf("%02x", csums[i]);
|
||||
}
|
||||
}
|
||||
printf("\"}");
|
||||
}
|
||||
else
|
||||
@@ -164,18 +232,30 @@ int disk_tool_t::write_json_meta(json11::Json meta)
|
||||
{
|
||||
new_meta_buf = (uint8_t*)memalign_or_die(MEM_ALIGNMENT, new_meta_len);
|
||||
memset(new_meta_buf, 0, new_meta_len);
|
||||
blockstore_meta_header_v1_t *new_hdr = (blockstore_meta_header_v1_t *)new_meta_buf;
|
||||
blockstore_meta_header_v2_t *new_hdr = (blockstore_meta_header_v2_t *)new_meta_buf;
|
||||
new_hdr->zero = 0;
|
||||
new_hdr->magic = BLOCKSTORE_META_MAGIC_V1;
|
||||
new_hdr->version = BLOCKSTORE_META_VERSION_V1;
|
||||
new_hdr->version = meta["version"].uint64_value() == BLOCKSTORE_META_FORMAT_V1
|
||||
? BLOCKSTORE_META_FORMAT_V1 : BLOCKSTORE_META_FORMAT_V2;
|
||||
new_hdr->meta_block_size = meta["meta_block_size"].uint64_value()
|
||||
? meta["meta_block_size"].uint64_value() : 4096;
|
||||
new_hdr->data_block_size = meta["data_block_size"].uint64_value()
|
||||
? meta["data_block_size"].uint64_value() : 131072;
|
||||
new_hdr->bitmap_granularity = meta["bitmap_granularity"].uint64_value()
|
||||
? meta["bitmap_granularity"].uint64_value() : 4096;
|
||||
new_clean_entry_bitmap_size = new_hdr->data_block_size / new_hdr->bitmap_granularity / 8;
|
||||
new_clean_entry_size = sizeof(clean_disk_entry) + 2*new_clean_entry_bitmap_size;
|
||||
new_hdr->data_csum_type = meta["data_csum_type"].is_number()
|
||||
? meta["data_csum_type"].uint64_value()
|
||||
: (meta["data_csum_type"].string_value() == "crc32c"
|
||||
? BLOCKSTORE_CSUM_CRC32C
|
||||
: BLOCKSTORE_CSUM_NONE);
|
||||
new_hdr->csum_block_size = meta["csum_block_size"].uint64_value();
|
||||
uint32_t new_clean_entry_header_size = (new_hdr->version == BLOCKSTORE_META_FORMAT_V1
|
||||
? sizeof(clean_disk_entry) : sizeof(clean_disk_entry) + 4 /*entry_csum*/);
|
||||
new_clean_entry_bitmap_size = (new_hdr->data_block_size / new_hdr->bitmap_granularity + 7) / 8;
|
||||
new_data_csum_size = (new_hdr->data_csum_type
|
||||
? ((new_hdr->data_block_size+new_hdr->csum_block_size-1)/new_hdr->csum_block_size*(new_hdr->data_csum_type & 0xFF))
|
||||
: 0);
|
||||
new_clean_entry_size = new_clean_entry_header_size + 2*new_clean_entry_bitmap_size + new_data_csum_size;
|
||||
new_entries_per_block = new_hdr->meta_block_size / new_clean_entry_size;
|
||||
for (const auto & e: meta["entries"].array_items())
|
||||
{
|
||||
@@ -194,8 +274,21 @@ int disk_tool_t::write_json_meta(json11::Json meta)
|
||||
new_entry->oid.inode = (sscanf_json(NULL, e["pool"]) << (64-POOL_ID_BITS)) | sscanf_json(NULL, e["inode"]);
|
||||
new_entry->oid.stripe = sscanf_json(NULL, e["stripe"]);
|
||||
new_entry->version = sscanf_json(NULL, e["version"]);
|
||||
fromhexstr(e["bitmap"].string_value(), new_clean_entry_bitmap_size, ((uint8_t*)new_entry) + sizeof(clean_disk_entry));
|
||||
fromhexstr(e["ext_bitmap"].string_value(), new_clean_entry_bitmap_size, ((uint8_t*)new_entry) + sizeof(clean_disk_entry) + new_clean_entry_bitmap_size);
|
||||
fromhexstr(e["bitmap"].string_value(), new_clean_entry_bitmap_size,
|
||||
((uint8_t*)new_entry) + sizeof(clean_disk_entry));
|
||||
fromhexstr(e["ext_bitmap"].string_value(), new_clean_entry_bitmap_size,
|
||||
((uint8_t*)new_entry) + sizeof(clean_disk_entry) + new_clean_entry_bitmap_size);
|
||||
if (new_hdr->version == BLOCKSTORE_META_FORMAT_V2)
|
||||
{
|
||||
if (new_hdr->data_csum_type != 0)
|
||||
{
|
||||
fromhexstr(e["data_csum"].string_value(), new_data_csum_size,
|
||||
((uint8_t*)new_entry) + sizeof(clean_disk_entry) + 2*new_clean_entry_bitmap_size);
|
||||
}
|
||||
uint32_t *new_entry_csum = (uint32_t*)(((uint8_t*)new_entry) + sizeof(clean_disk_entry) +
|
||||
2*new_clean_entry_bitmap_size + new_data_csum_size);
|
||||
*new_entry_csum = crc32c(0, new_entry, new_clean_entry_size - 4);
|
||||
}
|
||||
}
|
||||
int r = resize_write_new_meta();
|
||||
free(new_meta_buf);
|
||||
|
@@ -8,6 +8,9 @@
|
||||
int disk_tool_t::prepare_one(std::map<std::string, std::string> options, int is_hdd)
|
||||
{
|
||||
static const char *allow_additional_params[] = {
|
||||
"cached_read_data",
|
||||
"cached_read_meta",
|
||||
"cached_read_journal",
|
||||
"max_write_iodepth",
|
||||
"max_write_iodepth",
|
||||
"min_flusher_count",
|
||||
@@ -99,15 +102,16 @@ int disk_tool_t::prepare_one(std::map<std::string, std::string> options, int is_
|
||||
if (options["journal_size"] == "")
|
||||
{
|
||||
if (options["journal_device"] == "")
|
||||
options["journal_size"] = "32M";
|
||||
options["journal_size"] = is_hdd ? "128M" : "32M";
|
||||
else if (is_hdd)
|
||||
options["journal_size"] = DEFAULT_HYBRID_JOURNAL;
|
||||
}
|
||||
bool is_hybrid = is_hdd && options["journal_device"] != "" && options["journal_device"] != options["data_device"];
|
||||
if (is_hdd)
|
||||
{
|
||||
if (options["block_size"] == "")
|
||||
options["block_size"] = "1M";
|
||||
if (options["throttle_small_writes"] == "")
|
||||
if (is_hybrid && options["throttle_small_writes"] == "")
|
||||
options["throttle_small_writes"] = "1";
|
||||
}
|
||||
json11::Json::object sb;
|
||||
@@ -115,6 +119,7 @@ int disk_tool_t::prepare_one(std::map<std::string, std::string> options, int is_
|
||||
try
|
||||
{
|
||||
dsk.parse_config(options);
|
||||
dsk.cached_read_data = dsk.cached_read_meta = dsk.cached_read_journal = false;
|
||||
dsk.open_data();
|
||||
dsk.open_meta();
|
||||
dsk.open_journal();
|
||||
@@ -134,7 +139,7 @@ int disk_tool_t::prepare_one(std::map<std::string, std::string> options, int is_
|
||||
{ "meta_offset", 4096 + (dsk.meta_device == dsk.journal_device ? dsk.journal_len : 0) },
|
||||
{ "data_offset", 4096 + (dsk.data_device == dsk.meta_device ? dsk.meta_len : 0) +
|
||||
(dsk.data_device == dsk.journal_device ? dsk.journal_len : 0) },
|
||||
{ "journal_no_same_sector_overwrites", true },
|
||||
{ "journal_no_same_sector_overwrites", !is_hdd || is_hybrid },
|
||||
{ "journal_sector_buffer_count", 1024 },
|
||||
{ "disable_data_fsync", json_is_true(options["disable_data_fsync"]) },
|
||||
{ "disable_meta_fsync", json_is_true(options["disable_meta_fsync"]) },
|
||||
@@ -478,6 +483,7 @@ int disk_tool_t::get_meta_partition(std::vector<vitastor_dev_info_t> & ssds, std
|
||||
{
|
||||
blockstore_disk_t dsk;
|
||||
dsk.parse_config(options);
|
||||
dsk.cached_read_data = dsk.cached_read_meta = dsk.cached_read_journal = false;
|
||||
dsk.open_data();
|
||||
dsk.open_meta();
|
||||
dsk.open_journal();
|
||||
|
@@ -29,7 +29,7 @@ int disk_tool_t::resize_data()
|
||||
fprintf(stderr, "Reading metadata\n");
|
||||
data_alloc = new allocator((new_data_len < dsk.data_len ? dsk.data_len : new_data_len) / dsk.data_block_size);
|
||||
r = process_meta(
|
||||
[this](blockstore_meta_header_v1_t *hdr)
|
||||
[this](blockstore_meta_header_v2_t *hdr)
|
||||
{
|
||||
resize_init(hdr);
|
||||
},
|
||||
@@ -91,6 +91,7 @@ int disk_tool_t::resize_parse_params()
|
||||
try
|
||||
{
|
||||
dsk.parse_config(options);
|
||||
dsk.cached_read_data = dsk.cached_read_meta = dsk.cached_read_journal = false;
|
||||
dsk.open_data();
|
||||
dsk.open_meta();
|
||||
dsk.open_journal();
|
||||
@@ -139,7 +140,7 @@ int disk_tool_t::resize_parse_params()
|
||||
return 0;
|
||||
}
|
||||
|
||||
void disk_tool_t::resize_init(blockstore_meta_header_v1_t *hdr)
|
||||
void disk_tool_t::resize_init(blockstore_meta_header_v2_t *hdr)
|
||||
{
|
||||
if (hdr && dsk.data_block_size != hdr->data_block_size)
|
||||
{
|
||||
@@ -149,6 +150,15 @@ void disk_tool_t::resize_init(blockstore_meta_header_v1_t *hdr)
|
||||
}
|
||||
dsk.data_block_size = hdr->data_block_size;
|
||||
}
|
||||
if (hdr && (dsk.data_csum_type != hdr->data_csum_type || dsk.csum_block_size != hdr->csum_block_size))
|
||||
{
|
||||
if (dsk.data_csum_type)
|
||||
{
|
||||
fprintf(stderr, "Using data checksum type %s from metadata superblock\n", csum_type_str(hdr->data_csum_type).c_str());
|
||||
}
|
||||
dsk.data_csum_type = hdr->data_csum_type;
|
||||
dsk.csum_block_size = hdr->csum_block_size;
|
||||
}
|
||||
if (((new_data_len-dsk.data_len) % dsk.data_block_size) ||
|
||||
((new_data_offset-dsk.data_offset) % dsk.data_block_size))
|
||||
{
|
||||
@@ -160,8 +170,12 @@ void disk_tool_t::resize_init(blockstore_meta_header_v1_t *hdr)
|
||||
free_last = (new_data_offset+new_data_len < dsk.data_offset+dsk.data_len)
|
||||
? (dsk.data_offset+dsk.data_len-new_data_offset-new_data_len) / dsk.data_block_size
|
||||
: 0;
|
||||
uint32_t new_clean_entry_header_size = sizeof(clean_disk_entry) + 4 /*entry_csum*/;
|
||||
new_clean_entry_bitmap_size = dsk.data_block_size / (hdr ? hdr->bitmap_granularity : 4096) / 8;
|
||||
new_clean_entry_size = sizeof(clean_disk_entry) + 2 * new_clean_entry_bitmap_size;
|
||||
new_data_csum_size = (dsk.data_csum_type
|
||||
? ((dsk.data_block_size+dsk.csum_block_size-1)/dsk.csum_block_size*(dsk.data_csum_type & 0xFF))
|
||||
: 0);
|
||||
new_clean_entry_size = new_clean_entry_header_size + 2*new_clean_entry_bitmap_size + new_data_csum_size;
|
||||
new_entries_per_block = dsk.meta_block_size/new_clean_entry_size;
|
||||
uint64_t new_meta_blocks = 1 + (new_data_len/dsk.data_block_size + new_entries_per_block-1) / new_entries_per_block;
|
||||
if (!new_meta_len)
|
||||
@@ -349,13 +363,25 @@ int disk_tool_t::resize_rewrite_journal()
|
||||
{
|
||||
if (je->type == JE_START)
|
||||
{
|
||||
if (je_start.data_csum_type != dsk.data_csum_type ||
|
||||
je_start.csum_block_size != dsk.csum_block_size)
|
||||
{
|
||||
fprintf(
|
||||
stderr, "Error: journal header has different checksum parameters: %s/%u vs %s/%u\n",
|
||||
csum_type_str(je_start.data_csum_type).c_str(), je_start.csum_block_size,
|
||||
csum_type_str(dsk.data_csum_type).c_str(), dsk.csum_block_size
|
||||
);
|
||||
exit(1);
|
||||
}
|
||||
journal_entry *ne = (journal_entry*)(new_journal_ptr + new_journal_in_pos);
|
||||
*((journal_entry_start*)ne) = (journal_entry_start){
|
||||
.magic = JOURNAL_MAGIC,
|
||||
.type = JE_START,
|
||||
.size = sizeof(journal_entry_start),
|
||||
.journal_start = dsk.journal_block_size,
|
||||
.version = JOURNAL_VERSION,
|
||||
.version = JOURNAL_VERSION_V2,
|
||||
.data_csum_type = dsk.data_csum_type,
|
||||
.csum_block_size = dsk.csum_block_size,
|
||||
};
|
||||
ne->crc32 = je_crc32(ne);
|
||||
new_journal_ptr += dsk.journal_block_size;
|
||||
@@ -436,15 +462,17 @@ int disk_tool_t::resize_rewrite_meta()
|
||||
new_meta_buf = (uint8_t*)memalign_or_die(MEM_ALIGNMENT, new_meta_len);
|
||||
memset(new_meta_buf, 0, new_meta_len);
|
||||
int r = process_meta(
|
||||
[this](blockstore_meta_header_v1_t *hdr)
|
||||
[this](blockstore_meta_header_v2_t *hdr)
|
||||
{
|
||||
blockstore_meta_header_v1_t *new_hdr = (blockstore_meta_header_v1_t *)new_meta_buf;
|
||||
blockstore_meta_header_v2_t *new_hdr = (blockstore_meta_header_v2_t *)new_meta_buf;
|
||||
new_hdr->zero = 0;
|
||||
new_hdr->magic = BLOCKSTORE_META_MAGIC_V1;
|
||||
new_hdr->version = BLOCKSTORE_META_VERSION_V1;
|
||||
new_hdr->version = BLOCKSTORE_META_FORMAT_V1;
|
||||
new_hdr->meta_block_size = dsk.meta_block_size;
|
||||
new_hdr->data_block_size = dsk.data_block_size;
|
||||
new_hdr->bitmap_granularity = dsk.bitmap_granularity ? dsk.bitmap_granularity : 4096;
|
||||
new_hdr->data_csum_type = dsk.data_csum_type;
|
||||
new_hdr->csum_block_size = dsk.csum_block_size;
|
||||
},
|
||||
[this](uint64_t block_num, clean_disk_entry *entry, uint8_t *bitmap)
|
||||
{
|
||||
@@ -463,7 +491,7 @@ int disk_tool_t::resize_rewrite_meta()
|
||||
new_entry->oid = entry->oid;
|
||||
new_entry->version = entry->version;
|
||||
if (bitmap)
|
||||
memcpy(new_entry->bitmap, bitmap, 2*new_clean_entry_bitmap_size);
|
||||
memcpy(new_entry->bitmap, bitmap, 2*new_clean_entry_bitmap_size + new_data_csum_size);
|
||||
else
|
||||
memset(new_entry->bitmap, 0xff, 2*new_clean_entry_bitmap_size);
|
||||
}
|
||||
|
@@ -373,3 +373,22 @@ int fix_partition_type(std::string dev_by_uuid)
|
||||
std::string out;
|
||||
return shell_exec({ "sfdisk", "--no-reread", "--force", "/dev/"+parent_dev }, script, &out, NULL);
|
||||
}
|
||||
|
||||
std::string csum_type_str(uint32_t data_csum_type)
|
||||
{
|
||||
std::string csum_type;
|
||||
if (data_csum_type == BLOCKSTORE_CSUM_NONE)
|
||||
csum_type = "none";
|
||||
else if (data_csum_type == BLOCKSTORE_CSUM_CRC32C)
|
||||
csum_type = "crc32c";
|
||||
else
|
||||
csum_type = std::to_string(data_csum_type);
|
||||
return csum_type;
|
||||
}
|
||||
|
||||
uint32_t csum_type_from_str(std::string data_csum_type)
|
||||
{
|
||||
if (data_csum_type == "crc32c")
|
||||
return BLOCKSTORE_CSUM_CRC32C;
|
||||
return stoull_full(data_csum_type, 0);
|
||||
}
|
||||
|
@@ -187,22 +187,30 @@ void etcd_state_client_t::add_etcd_url(std::string addr)
|
||||
check_addr = addr;
|
||||
if (pos == std::string::npos)
|
||||
addr += "/v3";
|
||||
bool local = false;
|
||||
int i;
|
||||
for (i = 0; i < local_ips.size(); i++)
|
||||
{
|
||||
if (local_ips[i] == check_addr)
|
||||
{
|
||||
this->etcd_local.push_back(addr);
|
||||
local = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (i >= local_ips.size())
|
||||
this->etcd_addresses.push_back(addr);
|
||||
auto & to = local ? this->etcd_local : this->etcd_addresses;
|
||||
for (i = 0; i < to.size(); i++)
|
||||
{
|
||||
if (to[i] == addr)
|
||||
break;
|
||||
}
|
||||
if (i >= to.size())
|
||||
to.push_back(addr);
|
||||
}
|
||||
}
|
||||
|
||||
void etcd_state_client_t::parse_config(const json11::Json & config)
|
||||
{
|
||||
this->etcd_local.clear();
|
||||
this->etcd_addresses.clear();
|
||||
if (config["etcd_address"].is_string())
|
||||
{
|
||||
@@ -349,7 +357,7 @@ void etcd_state_client_t::start_etcd_watcher()
|
||||
watch_id == ETCD_OSD_STATE_WATCH_ID)
|
||||
etcd_watches_initialised++;
|
||||
if (etcd_watches_initialised == 4 && this->log_level > 0)
|
||||
fprintf(stderr, "Successfully subscribed to etcd at %s\n", selected_etcd_address.c_str());
|
||||
fprintf(stderr, "Successfully subscribed to etcd at %s\n", cur_addr.c_str());
|
||||
}
|
||||
if (data["result"]["canceled"].bool_value())
|
||||
{
|
||||
@@ -360,15 +368,17 @@ void etcd_state_client_t::start_etcd_watcher()
|
||||
// so we should restart from the beginning if we can
|
||||
if (on_reload_hook != NULL)
|
||||
{
|
||||
fprintf(stderr, "Revisions before %lu were compacted by etcd, reloading state\n",
|
||||
data["result"]["compact_revision"].uint64_value());
|
||||
if (etcd_watch_ws)
|
||||
// check to not trigger on_reload_hook multiple times
|
||||
if (etcd_watch_ws != NULL)
|
||||
{
|
||||
fprintf(stderr, "Revisions before %lu were compacted by etcd, reloading state\n",
|
||||
data["result"]["compact_revision"].uint64_value());
|
||||
http_close(etcd_watch_ws);
|
||||
etcd_watch_ws = NULL;
|
||||
etcd_watch_revision = 0;
|
||||
on_reload_hook();
|
||||
}
|
||||
etcd_watch_revision = 0;
|
||||
on_reload_hook();
|
||||
return;
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -415,13 +425,9 @@ void etcd_state_client_t::start_etcd_watcher()
|
||||
}
|
||||
if (msg->eof)
|
||||
{
|
||||
fprintf(stderr, "Disconnected from etcd %s\n", cur_addr.c_str());
|
||||
if (cur_addr == selected_etcd_address)
|
||||
{
|
||||
fprintf(stderr, "Disconnected from etcd %s\n", selected_etcd_address.c_str());
|
||||
selected_etcd_address = "";
|
||||
}
|
||||
else
|
||||
fprintf(stderr, "Disconnected from etcd\n");
|
||||
if (etcd_watch_ws)
|
||||
{
|
||||
http_close(etcd_watch_ws);
|
||||
@@ -438,6 +444,7 @@ void etcd_state_client_t::start_etcd_watcher()
|
||||
else if (etcd_watches_initialised > 0)
|
||||
{
|
||||
// Connection was live, retry immediately
|
||||
etcd_watches_initialised = 0;
|
||||
start_etcd_watcher();
|
||||
}
|
||||
}
|
||||
|
@@ -173,6 +173,8 @@ public:
|
||||
bool connect_rdma(int peer_fd, std::string rdma_address, uint64_t client_max_msg);
|
||||
#endif
|
||||
|
||||
void measure_exec(osd_op_t *cur_op);
|
||||
|
||||
protected:
|
||||
void try_connect_peer(uint64_t osd_num);
|
||||
void try_connect_peer_addr(osd_num_t peer_osd, const char *peer_host, int peer_port);
|
||||
@@ -184,7 +186,6 @@ protected:
|
||||
void cancel_op(osd_op_t *op);
|
||||
|
||||
bool try_send(osd_client_t *cl);
|
||||
void measure_exec(osd_op_t *cur_op);
|
||||
void handle_send(int result, osd_client_t *cl);
|
||||
|
||||
bool handle_read(int result, osd_client_t *cl);
|
||||
|
@@ -9,6 +9,10 @@ osd_op_t::~osd_op_t()
|
||||
{
|
||||
assert(!bs_op);
|
||||
assert(!op_data);
|
||||
if (bitmap_buf)
|
||||
{
|
||||
free(bitmap_buf);
|
||||
}
|
||||
if (rmw_buf)
|
||||
{
|
||||
free(rmw_buf);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user