Compare commits
175 Commits
hier-failu
...
hotfix-1.0
Author | SHA1 | Date | |
---|---|---|---|
fc83e3821c | |||
0d707fc83b | |||
4f99f78430 | |||
f926f8c2e0 | |||
d73ad12c56 | |||
d68cec10e2 | |||
9afa200a33 | |||
aff6f3e970 | |||
49fca80f1c | |||
a028b4fa4c | |||
da2bfd0b1e | |||
cec5ceab77 | |||
dc90faec7e | |||
20c62a4244 | |||
6acf562e01 | |||
6f797f429e | |||
b8a1734465 | |||
c752b68167 | |||
564df2eb5d | |||
9a427dd70a | |||
1a4ceb420d | |||
21b5124a4b | |||
4181add1f4 | |||
a8464c19af | |||
819cb70cdd | |||
3c8e4c6b72 | |||
8ef4cf89dc | |||
7bfb1639ea | |||
628e481c32 | |||
af6f2046fc | |||
9357e5293e | |||
12851dc07d | |||
a5753e35a3 | |||
d6ee1ca17c | |||
71674d00cf | |||
ddb078d5a7 | |||
d22d56f90a | |||
eb1331a079 | |||
c5274f655b | |||
45e07d6294 | |||
a8ee391e05 | |||
de48fa3fd2 | |||
874a766b62 | |||
384bd8e28f | |||
430994f48a | |||
3d7f838c59 | |||
b909d81f41 | |||
e42975ffd1 | |||
93778324e5 | |||
eeb6727170 | |||
7fe82c692e | |||
92c6e16eba | |||
213a9ccb4d | |||
a166147110 | |||
7d532880c3 | |||
0b0405d115 | |||
e651c93a90 | |||
988e90be69 | |||
272a45ad63 | |||
25a15d24cf | |||
700e0e9bff | |||
ab0ca7c00f | |||
f153bc950b | |||
425ff8818d | |||
9e287a7778 | |||
f52f58b9e9 | |||
1fe6b0c0e2 | |||
e4237e9ed8 | |||
10a5fd6abb | |||
1c316ef350 | |||
0b2d12eef1 | |||
1c10430ae1 | |||
dfce91d168 | |||
332a13ba30 | |||
d0e257ee81 | |||
004912aac0 | |||
c18e92273e | |||
9815d70ffc | |||
4a4627dcab | |||
b963f2fd93 | |||
ba7427020e | |||
a0aac7eb2a | |||
ac7b834af3 | |||
ee0c78fd74 | |||
e6646a5b2f | |||
ae69662b17 | |||
57ad4c3636 | |||
b7e4d0c9bf | |||
161a23c966 | |||
2f999d8607 | |||
d007a374f2 | |||
45c0694853 | |||
57bcba2406 | |||
30ac899074 | |||
2348d39cf4 | |||
3de7929fe5 | |||
07b2196bc2 | |||
b8e30608d6 | |||
a612cdca47 | |||
c8d61568b5 | |||
84ed3c6395 | |||
a7b57386c0 | |||
9d4ea5f764 | |||
000e4944ec | |||
8426616d89 | |||
1a841344ec | |||
8603b5cb1d | |||
f12b8e45a9 | |||
878ccbb6ea | |||
b14220b4d0 | |||
181d6ba407 | |||
63c2b9832c | |||
10e2e6a7c8 | |||
a598428992 | |||
08a677b684 | |||
7c8fbdad16 | |||
2f9353df60 | |||
57c744f288 | |||
a11ca56fb1 | |||
b84927b340 | |||
83cacba226 | |||
2c8f0bc6d5 | |||
7ae5b0e368 | |||
926be372fd | |||
6222779b52 | |||
a4186e20aa | |||
c74a424930 | |||
32f2c4dd27 | |||
3ad16b9a1a | |||
1c2df841c2 | |||
aa5dacc7a9 | |||
affe8fc270 | |||
4fdc49bdc7 | |||
86b4682975 | |||
bdd48e4cf1 | |||
af8c3411cd | |||
9c405009f3 | |||
f9fbea25a4 | |||
2c9a10d081 | |||
150968070f | |||
cdfc74665b | |||
3f60fecd7c | |||
3b4cf29e65 | |||
eeaba11ebd | |||
aea567cfbd | |||
ce02f47de6 | |||
5fd3208616 | |||
5997b76535 | |||
f1961157f0 | |||
88c1ba0790 | |||
b5bd611683 | |||
fa90b5a4e7 | |||
8d40ad99a6 | |||
3475772b07 | |||
25fcedf6e7 | |||
6ca20aa194 | |||
4bfd994341 | |||
59e959dcbb | |||
a9581f0739 | |||
105a405b0a | |||
d55d7d5326 | |||
0e5d0e02a9 | |||
0439981a66 | |||
6648f6bb6e | |||
281be547eb | |||
0c78dd7178 | |||
3c924397e7 | |||
c3bd26193d | |||
43b77d7619 | |||
a6d846863b | |||
8dc427b43c | |||
bf2112653b | |||
0538a484b3 | |||
97720fa6b4 | |||
e60e352df6 |
@@ -10,6 +10,9 @@ RUN set -e -x; \
|
||||
ln -s /root/fio-build/fio-*/ ./fio; \
|
||||
ln -s /root/qemu-build/qemu-*/ ./qemu; \
|
||||
ls /usr/include/linux/raw.h || cp ./debian/raw.h /usr/include/linux/raw.h; \
|
||||
cd mon; \
|
||||
npm install; \
|
||||
cd ..; \
|
||||
mkdir build; \
|
||||
cd build; \
|
||||
cmake .. -DWITH_ASAN=yes -DWITH_QEMU=yes; \
|
||||
|
@@ -71,7 +71,7 @@ jobs:
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 3
|
||||
timeout-minutes: 10
|
||||
run: /root/vitastor/tests/test_add_osd.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
@@ -190,24 +190,6 @@ jobs:
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_failure_domain:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 3
|
||||
run: /root/vitastor/tests/test_failure_domain.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_interrupted_rebalance:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
@@ -280,7 +262,7 @@ jobs:
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_minsize_1:
|
||||
test_failure_domain:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
@@ -288,115 +270,7 @@ jobs:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 3
|
||||
run: /root/vitastor/tests/test_minsize_1.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_move_reappear:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 3
|
||||
run: /root/vitastor/tests/test_move_reappear.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_rebalance_verify:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 3
|
||||
run: /root/vitastor/tests/test_rebalance_verify.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_rebalance_verify_imm:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 3
|
||||
run: IMMEDIATE_COMMIT=1 /root/vitastor/tests/test_rebalance_verify.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_rebalance_verify_ec:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 3
|
||||
run: SCHEME=ec /root/vitastor/tests/test_rebalance_verify.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_rebalance_verify_ec_imm:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 3
|
||||
run: SCHEME=ec IMMEDIATE_COMMIT=1 /root/vitastor/tests/test_rebalance_verify.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_rm:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 3
|
||||
run: /root/vitastor/tests/test_rm.sh
|
||||
run: /root/vitastor/tests/test_failure_domain.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
@@ -442,6 +316,132 @@ jobs:
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_minsize_1:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 3
|
||||
run: /root/vitastor/tests/test_minsize_1.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_move_reappear:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 3
|
||||
run: /root/vitastor/tests/test_move_reappear.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_rm:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 3
|
||||
run: /root/vitastor/tests/test_rm.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_snapshot_chain:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 3
|
||||
run: /root/vitastor/tests/test_snapshot_chain.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_snapshot_chain_ec:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 3
|
||||
run: SCHEME=ec /root/vitastor/tests/test_snapshot_chain.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_snapshot_down:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 3
|
||||
run: /root/vitastor/tests/test_snapshot_down.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_snapshot_down_ec:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 3
|
||||
run: SCHEME=ec /root/vitastor/tests/test_snapshot_down.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_splitbrain:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
@@ -460,6 +460,78 @@ jobs:
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_rebalance_verify:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 10
|
||||
run: /root/vitastor/tests/test_rebalance_verify.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_rebalance_verify_imm:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 10
|
||||
run: IMMEDIATE_COMMIT=1 /root/vitastor/tests/test_rebalance_verify.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_rebalance_verify_ec:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 10
|
||||
run: SCHEME=ec /root/vitastor/tests/test_rebalance_verify.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_rebalance_verify_ec_imm:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 10
|
||||
run: SCHEME=ec IMMEDIATE_COMMIT=1 /root/vitastor/tests/test_rebalance_verify.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_write:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
@@ -550,3 +622,219 @@ jobs:
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_heal_csum_32k_dmj:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 10
|
||||
run: TEST_NAME=csum_32k_dmj OSD_ARGS="--data_csum_type crc32c --csum_block_size 32k --inmemory_metadata false --inmemory_journal false" OFFSET_ARGS=$OSD_ARGS /root/vitastor/tests/test_heal.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_heal_csum_32k_dj:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 10
|
||||
run: TEST_NAME=csum_32k_dj OSD_ARGS="--data_csum_type crc32c --csum_block_size 32k --inmemory_journal false" OFFSET_ARGS=$OSD_ARGS /root/vitastor/tests/test_heal.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_heal_csum_32k:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 10
|
||||
run: TEST_NAME=csum_32k OSD_ARGS="--data_csum_type crc32c --csum_block_size 32k" OFFSET_ARGS=$OSD_ARGS /root/vitastor/tests/test_heal.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_heal_csum_4k_dmj:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 10
|
||||
run: TEST_NAME=csum_4k_dmj OSD_ARGS="--data_csum_type crc32c --inmemory_metadata false --inmemory_journal false" OFFSET_ARGS=$OSD_ARGS /root/vitastor/tests/test_heal.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_heal_csum_4k_dj:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 10
|
||||
run: TEST_NAME=csum_4k_dj OSD_ARGS="--data_csum_type crc32c --inmemory_journal false" OFFSET_ARGS=$OSD_ARGS /root/vitastor/tests/test_heal.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_heal_csum_4k:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 10
|
||||
run: TEST_NAME=csum_4k OSD_ARGS="--data_csum_type crc32c" OFFSET_ARGS=$OSD_ARGS /root/vitastor/tests/test_heal.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_scrub:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 3
|
||||
run: /root/vitastor/tests/test_scrub.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_scrub_zero_osd_2:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 3
|
||||
run: ZERO_OSD=2 /root/vitastor/tests/test_scrub.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_scrub_xor:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 3
|
||||
run: SCHEME=xor /root/vitastor/tests/test_scrub.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_scrub_pg_size_3:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 3
|
||||
run: PG_SIZE=3 /root/vitastor/tests/test_scrub.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_scrub_pg_size_6_pg_minsize_4_osd_count_6_ec:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 3
|
||||
run: PG_SIZE=6 PG_MINSIZE=4 OSD_COUNT=6 SCHEME=ec /root/vitastor/tests/test_scrub.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
test_scrub_ec:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
container: ${{env.TEST_IMAGE}}:${{github.sha}}
|
||||
steps:
|
||||
- name: Run test
|
||||
id: test
|
||||
timeout-minutes: 3
|
||||
run: SCHEME=ec /root/vitastor/tests/test_scrub.sh
|
||||
- name: Print logs
|
||||
if: always() && steps.test.outcome == 'failure'
|
||||
run: |
|
||||
for i in /root/vitastor/testdata/*.log /root/vitastor/testdata/*.txt; do
|
||||
echo "-------- $i --------"
|
||||
cat $i
|
||||
echo ""
|
||||
done
|
||||
|
||||
|
@@ -7,15 +7,22 @@ for my $line (<>)
|
||||
if ($line =~ /\.\/(test_[^\.]+)/s)
|
||||
{
|
||||
chomp $line;
|
||||
my $test_name = $1;
|
||||
my $base_name = $1;
|
||||
my $test_name = $base_name;
|
||||
my $timeout = 3;
|
||||
if ($test_name eq 'test_etcd_fail' || $test_name eq 'test_heal' || $test_name eq 'test_interrupted_rebalance')
|
||||
if ($test_name eq 'test_etcd_fail' || $test_name eq 'test_heal' || $test_name eq 'test_add_osd' ||
|
||||
$test_name eq 'test_interrupted_rebalance' || $test_name eq 'test_rebalance_verify')
|
||||
{
|
||||
$timeout = 10;
|
||||
}
|
||||
while ($line =~ /([^\s=]+)=(\S+)/gs)
|
||||
{
|
||||
if ($1 eq 'SCHEME' && $2 eq 'ec')
|
||||
if ($1 eq 'TEST_NAME')
|
||||
{
|
||||
$test_name = $base_name.'_'.$2;
|
||||
last;
|
||||
}
|
||||
elsif ($1 eq 'SCHEME' && $2 eq 'ec')
|
||||
{
|
||||
$test_name .= '_ec';
|
||||
}
|
||||
|
@@ -2,6 +2,6 @@ cmake_minimum_required(VERSION 2.8.12)
|
||||
|
||||
project(vitastor)
|
||||
|
||||
set(VERSION "0.8.9")
|
||||
set(VERSION "1.0.0")
|
||||
|
||||
add_subdirectory(src)
|
||||
|
@@ -15,7 +15,7 @@ Vitastor архитектурно похож на Ceph, что означает
|
||||
и автоматическое распределение данных по любому числу дисков любого размера с настраиваемыми схемами
|
||||
избыточности - репликацией или с произвольными кодами коррекции ошибок.
|
||||
|
||||
Vitastor нацелен на SSD и SSD+HDD кластеры с как минимум 10 Гбит/с сетью, поддерживает
|
||||
Vitastor нацелен в первую очередь на SSD и SSD+HDD кластеры с как минимум 10 Гбит/с сетью, поддерживает
|
||||
TCP и RDMA и на хорошем железе может достигать задержки 4 КБ чтения и записи на уровне ~0.1 мс,
|
||||
что примерно в 10 раз быстрее, чем Ceph и другие популярные программные СХД.
|
||||
|
||||
|
@@ -14,8 +14,8 @@ Vitastor is architecturally similar to Ceph which means strong consistency,
|
||||
primary-replication, symmetric clustering and automatic data distribution over any
|
||||
number of drives of any size with configurable redundancy (replication or erasure codes/XOR).
|
||||
|
||||
Vitastor targets SSD and SSD+HDD clusters with at least 10 Gbit/s network, supports
|
||||
TCP and RDMA and may achieve 4 KB read and write latency as low as ~0.1 ms
|
||||
Vitastor targets primarily SSD and SSD+HDD clusters with at least 10 Gbit/s network,
|
||||
supports TCP and RDMA and may achieve 4 KB read and write latency as low as ~0.1 ms
|
||||
with proper hardware which is ~10 times faster than other popular SDS's like Ceph
|
||||
or internal systems of public clouds.
|
||||
|
||||
|
@@ -1,4 +1,4 @@
|
||||
VERSION ?= v0.8.9
|
||||
VERSION ?= v1.0.0
|
||||
|
||||
all: build push
|
||||
|
||||
|
@@ -49,7 +49,7 @@ spec:
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
image: vitalif/vitastor-csi:v0.8.9
|
||||
image: vitalif/vitastor-csi:v1.0.0
|
||||
args:
|
||||
- "--node=$(NODE_ID)"
|
||||
- "--endpoint=$(CSI_ENDPOINT)"
|
||||
|
@@ -116,7 +116,7 @@ spec:
|
||||
privileged: true
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
image: vitalif/vitastor-csi:v0.8.9
|
||||
image: vitalif/vitastor-csi:v1.0.0
|
||||
args:
|
||||
- "--node=$(NODE_ID)"
|
||||
- "--endpoint=$(CSI_ENDPOINT)"
|
||||
|
@@ -5,7 +5,7 @@ package vitastor
|
||||
|
||||
const (
|
||||
vitastorCSIDriverName = "csi.vitastor.io"
|
||||
vitastorCSIDriverVersion = "0.8.9"
|
||||
vitastorCSIDriverVersion = "1.0.0"
|
||||
)
|
||||
|
||||
// Config struct fills the parameters of request or user input
|
||||
|
58
debian/build-pve-qemu.sh
vendored
Normal file
58
debian/build-pve-qemu.sh
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
exit
|
||||
|
||||
git clone https://git.yourcmc.ru/vitalif/pve-qemu .
|
||||
|
||||
# bookworm
|
||||
|
||||
docker run -it -v `pwd`/pve-qemu:/root/pve-qemu --name pve-qemu-bullseye debian:bullseye bash
|
||||
|
||||
perl -i -pe 's/Types: deb$/Types: deb deb-src/' /etc/apt/sources.list.d/debian.sources
|
||||
echo 'deb [arch=amd64] http://download.proxmox.com/debian/pve bookworm pve-no-subscription' >> /etc/apt/sources.list
|
||||
echo 'deb https://vitastor.io/debian bookworm main' >> /etc/apt/sources.list
|
||||
echo 'APT::Install-Recommends false;' >> /etc/apt/apt.conf
|
||||
echo 'ru_RU UTF-8' >> /etc/locale.gen
|
||||
echo 'en_US UTF-8' >> /etc/locale.gen
|
||||
apt-get update
|
||||
apt-get install wget ca-certificates
|
||||
wget https://enterprise.proxmox.com/debian/proxmox-release-bookworm.gpg -O /etc/apt/trusted.gpg.d/proxmox-release-bookworm.gpg
|
||||
wget https://vitastor.io/debian/pubkey.gpg -O /etc/apt/trusted.gpg.d/vitastor.gpg
|
||||
apt-get update
|
||||
apt-get install git devscripts equivs wget mc libjemalloc-dev vitastor-client-dev lintian locales
|
||||
mk-build-deps --install ./control
|
||||
|
||||
# bullseye
|
||||
|
||||
docker run -it -v `pwd`/pve-qemu:/root/pve-qemu --name pve-qemu-bullseye debian:bullseye bash
|
||||
|
||||
grep '^deb ' /etc/apt/sources.list | perl -pe 's/^deb /deb-src /' >> /etc/apt/sources.list
|
||||
echo 'deb [arch=amd64] http://download.proxmox.com/debian/pve bullseye pve-no-subscription' >> /etc/apt/sources.list
|
||||
echo 'deb https://vitastor.io/debian bullseye main' >> /etc/apt/sources.list
|
||||
echo 'APT::Install-Recommends false;' >> /etc/apt/apt.conf
|
||||
echo 'ru_RU UTF-8' >> /etc/locale.gen
|
||||
echo 'en_US UTF-8' >> /etc/locale.gen
|
||||
apt-get update
|
||||
apt-get install wget
|
||||
wget https://enterprise.proxmox.com/debian/proxmox-release-bullseye.gpg -O /etc/apt/trusted.gpg.d/proxmox-release-bullseye.gpg
|
||||
wget https://vitastor.io/debian/pubkey.gpg -O /etc/apt/trusted.gpg.d/vitastor.gpg
|
||||
apt-get update
|
||||
apt-get install git devscripts equivs wget mc libjemalloc-dev vitastor-client-dev lintian locales
|
||||
mk-build-deps --install ./control
|
||||
|
||||
# buster
|
||||
|
||||
docker run -it -v `pwd`/pve-qemu:/root/pve-qemu --name pve-qemu-buster debian:buster bash
|
||||
|
||||
grep '^deb ' /etc/apt/sources.list | perl -pe 's/^deb /deb-src /' >> /etc/apt/sources.list
|
||||
echo 'deb [arch=amd64] http://download.proxmox.com/debian/pve buster pve-no-subscription' >> /etc/apt/sources.list
|
||||
echo 'deb https://vitastor.io/debian buster main' >> /etc/apt/sources.list
|
||||
echo 'deb http://deb.debian.org/debian buster-backports main' >> /etc/apt/sources.list
|
||||
echo 'APT::Install-Recommends false;' >> /etc/apt/apt.conf
|
||||
echo 'ru_RU UTF-8' >> /etc/locale.gen
|
||||
echo 'en_US UTF-8' >> /etc/locale.gen
|
||||
apt-get update
|
||||
apt-get install wget ca-certificates
|
||||
wget http://download.proxmox.com/debian/proxmox-ve-release-6.x.gpg -O /etc/apt/trusted.gpg.d/proxmox-ve-release-6.x.gpg
|
||||
wget https://vitastor.io/debian/pubkey.gpg -O /etc/apt/trusted.gpg.d/vitastor.gpg
|
||||
apt-get update
|
||||
apt-get install git devscripts equivs wget mc libjemalloc-dev vitastor-client-dev lintian locales
|
||||
mk-build-deps --install ./control
|
7
debian/build-vitastor-bookworm.sh
vendored
Executable file
7
debian/build-vitastor-bookworm.sh
vendored
Executable file
@@ -0,0 +1,7 @@
|
||||
#!/bin/bash
|
||||
|
||||
cat < vitastor.Dockerfile > ../Dockerfile
|
||||
cd ..
|
||||
mkdir -p packages
|
||||
sudo podman build --build-arg REL=bookworm -v `pwd`/packages:/root/packages -f Dockerfile .
|
||||
rm Dockerfile
|
4
debian/changelog
vendored
4
debian/changelog
vendored
@@ -1,10 +1,10 @@
|
||||
vitastor (0.8.9-1) unstable; urgency=medium
|
||||
vitastor (1.0.0-1) unstable; urgency=medium
|
||||
|
||||
* Bugfixes
|
||||
|
||||
-- Vitaliy Filippov <vitalif@yourcmc.ru> Fri, 03 Jun 2022 02:09:44 +0300
|
||||
|
||||
vitastor (0.8.9-1) unstable; urgency=medium
|
||||
vitastor (1.0.0-1) unstable; urgency=medium
|
||||
|
||||
* Implement NFS proxy
|
||||
* Add documentation
|
||||
|
45
debian/patched-qemu.Dockerfile
vendored
45
debian/patched-qemu.Dockerfile
vendored
@@ -1,4 +1,4 @@
|
||||
# Build patched QEMU for Debian Buster or Bullseye/Sid inside a container
|
||||
# Build patched QEMU for Debian inside a container
|
||||
# cd ..; podman build --build-arg REL=bullseye -v `pwd`/packages:/root/packages -f debian/patched-qemu.Dockerfile .
|
||||
|
||||
ARG REL=
|
||||
@@ -15,47 +15,46 @@ RUN if [ "$REL" = "buster" -o "$REL" = "bullseye" ]; then \
|
||||
echo 'Pin-Priority: 500' >> /etc/apt/preferences; \
|
||||
fi; \
|
||||
grep '^deb ' /etc/apt/sources.list | perl -pe 's/^deb/deb-src/' >> /etc/apt/sources.list; \
|
||||
perl -i -pe 's/Types: deb$/Types: deb deb-src/' /etc/apt/sources.list.d/debian.sources || true; \
|
||||
echo 'APT::Install-Recommends false;' >> /etc/apt/apt.conf; \
|
||||
echo 'APT::Install-Suggests false;' >> /etc/apt/apt.conf
|
||||
|
||||
RUN apt-get update
|
||||
RUN apt-get -y install qemu fio liburing1 liburing-dev libgoogle-perftools-dev devscripts
|
||||
RUN apt-get -y install fio liburing-dev libgoogle-perftools-dev devscripts
|
||||
RUN apt-get -y build-dep qemu
|
||||
# To build a custom version
|
||||
#RUN cp /root/packages/qemu-orig/* /root
|
||||
RUN apt-get --download-only source qemu
|
||||
|
||||
ADD patches/qemu-5.0-vitastor.patch patches/qemu-5.1-vitastor.patch patches/qemu-6.1-vitastor.patch src/qemu_driver.c /root/vitastor/patches/
|
||||
ADD patches /root/vitastor/patches
|
||||
ADD src/qemu_driver.c /root/vitastor/src/qemu_driver.c
|
||||
|
||||
#RUN set -e; \
|
||||
# apt-get install -y wget; \
|
||||
# wget -q -O /etc/apt/trusted.gpg.d/vitastor.gpg https://vitastor.io/debian/pubkey.gpg; \
|
||||
# (echo deb http://vitastor.io/debian $REL main > /etc/apt/sources.list.d/vitastor.list); \
|
||||
# (echo "APT::Install-Recommends false;" > /etc/apt/apt.conf) && \
|
||||
# apt-get update; \
|
||||
# apt-get install -y vitastor-client vitastor-client-dev quilt
|
||||
|
||||
RUN set -e; \
|
||||
apt-get install -y wget; \
|
||||
wget -q -O /etc/apt/trusted.gpg.d/vitastor.gpg https://vitastor.io/debian/pubkey.gpg; \
|
||||
(echo deb http://vitastor.io/debian $REL main > /etc/apt/sources.list.d/vitastor.list); \
|
||||
(echo "APT::Install-Recommends false;" > /etc/apt/apt.conf) && \
|
||||
dpkg -i /root/packages/vitastor-$REL/vitastor-client_*.deb /root/packages/vitastor-$REL/vitastor-client-dev_*.deb; \
|
||||
apt-get update; \
|
||||
apt-get install -y vitastor-client vitastor-client-dev quilt; \
|
||||
apt-get install -y quilt; \
|
||||
mkdir -p /root/packages/qemu-$REL; \
|
||||
rm -rf /root/packages/qemu-$REL/*; \
|
||||
cd /root/packages/qemu-$REL; \
|
||||
dpkg-source -x /root/qemu*.dsc; \
|
||||
if ls -d /root/packages/qemu-$REL/qemu-5.0*; then \
|
||||
D=$(ls -d /root/packages/qemu-$REL/qemu-5.0*); \
|
||||
cp /root/vitastor/patches/qemu-5.0-vitastor.patch $D/debian/patches; \
|
||||
echo qemu-5.0-vitastor.patch >> $D/debian/patches/series; \
|
||||
elif ls /root/packages/qemu-$REL/qemu-6.1*; then \
|
||||
D=$(ls -d /root/packages/qemu-$REL/qemu-6.1*); \
|
||||
cp /root/vitastor/patches/qemu-6.1-vitastor.patch $D/debian/patches; \
|
||||
echo qemu-6.1-vitastor.patch >> $D/debian/patches/series; \
|
||||
else \
|
||||
cp /root/vitastor/patches/qemu-5.1-vitastor.patch /root/packages/qemu-$REL/qemu-*/debian/patches; \
|
||||
P=`ls -d /root/packages/qemu-$REL/qemu-*/debian/patches`; \
|
||||
echo qemu-5.1-vitastor.patch >> $P/series; \
|
||||
fi; \
|
||||
QEMU_VER=$(ls -d qemu*/ | perl -pe 's!^.*(\d+\.\d+).*!$1!'); \
|
||||
D=$(ls -d qemu*/); \
|
||||
cp /root/vitastor/patches/qemu-$QEMU_VER-vitastor.patch ./qemu-*/debian/patches; \
|
||||
echo qemu-$QEMU_VER-vitastor.patch >> $D/debian/patches/series; \
|
||||
cd /root/packages/qemu-$REL/qemu-*/; \
|
||||
quilt push -a; \
|
||||
quilt add block/vitastor.c; \
|
||||
cp /root/vitastor/patches/qemu_driver.c block/vitastor.c; \
|
||||
cp /root/vitastor/src/qemu_driver.c block/vitastor.c; \
|
||||
quilt refresh; \
|
||||
V=$(head -n1 debian/changelog | perl -pe 's/^.*\((.*?)(~bpo[\d\+]*)?\).*$/$1/')+vitastor1; \
|
||||
V=$(head -n1 debian/changelog | perl -pe 's/^.*\((.*?)(~bpo[\d\+]*)?\).*$/$1/')+vitastor3; \
|
||||
DEBEMAIL="Vitaliy Filippov <vitalif@yourcmc.ru>" dch -D $REL -v $V 'Plug Vitastor block driver'; \
|
||||
DEB_BUILD_OPTIONS=nocheck dpkg-buildpackage --jobs=auto -sa; \
|
||||
rm -rf /root/packages/qemu-$REL/qemu-*/
|
||||
|
13
debian/vitastor.Dockerfile
vendored
13
debian/vitastor.Dockerfile
vendored
@@ -1,4 +1,4 @@
|
||||
# Build Vitastor packages for Debian Buster or Bullseye/Sid inside a container
|
||||
# Build Vitastor packages for Debian inside a container
|
||||
# cd ..; podman build --build-arg REL=bullseye -v `pwd`/packages:/root/packages -f debian/vitastor.Dockerfile .
|
||||
|
||||
ARG REL=
|
||||
@@ -15,11 +15,12 @@ RUN if [ "$REL" = "buster" -o "$REL" = "bullseye" ]; then \
|
||||
echo 'Pin-Priority: 500' >> /etc/apt/preferences; \
|
||||
fi; \
|
||||
grep '^deb ' /etc/apt/sources.list | perl -pe 's/^deb/deb-src/' >> /etc/apt/sources.list; \
|
||||
perl -i -pe 's/Types: deb$/Types: deb deb-src/' /etc/apt/sources.list.d/debian.sources || true; \
|
||||
echo 'APT::Install-Recommends false;' >> /etc/apt/apt.conf; \
|
||||
echo 'APT::Install-Suggests false;' >> /etc/apt/apt.conf
|
||||
|
||||
RUN apt-get update
|
||||
RUN apt-get -y install fio liburing1 liburing-dev libgoogle-perftools-dev devscripts
|
||||
RUN apt-get -y install fio liburing-dev libgoogle-perftools-dev devscripts
|
||||
RUN apt-get -y build-dep fio
|
||||
RUN apt-get --download-only source fio
|
||||
RUN apt-get update && apt-get -y install libjerasure-dev cmake libibverbs-dev libisal-dev
|
||||
@@ -34,8 +35,8 @@ RUN set -e -x; \
|
||||
mkdir -p /root/packages/vitastor-$REL; \
|
||||
rm -rf /root/packages/vitastor-$REL/*; \
|
||||
cd /root/packages/vitastor-$REL; \
|
||||
cp -r /root/vitastor vitastor-0.8.9; \
|
||||
cd vitastor-0.8.9; \
|
||||
cp -r /root/vitastor vitastor-1.0.0; \
|
||||
cd vitastor-1.0.0; \
|
||||
ln -s /root/fio-build/fio-*/ ./fio; \
|
||||
FIO=$(head -n1 fio/debian/changelog | perl -pe 's/^.*\((.*?)\).*$/$1/'); \
|
||||
ls /usr/include/linux/raw.h || cp ./debian/raw.h /usr/include/linux/raw.h; \
|
||||
@@ -48,8 +49,8 @@ RUN set -e -x; \
|
||||
rm -rf a b; \
|
||||
echo "dep:fio=$FIO" > debian/fio_version; \
|
||||
cd /root/packages/vitastor-$REL; \
|
||||
tar --sort=name --mtime='2020-01-01' --owner=0 --group=0 --exclude=debian -cJf vitastor_0.8.9.orig.tar.xz vitastor-0.8.9; \
|
||||
cd vitastor-0.8.9; \
|
||||
tar --sort=name --mtime='2020-01-01' --owner=0 --group=0 --exclude=debian -cJf vitastor_1.0.0.orig.tar.xz vitastor-1.0.0; \
|
||||
cd vitastor-1.0.0; \
|
||||
V=$(head -n1 debian/changelog | perl -pe 's/^.*\((.*?)\).*$/$1/'); \
|
||||
DEBFULLNAME="Vitaliy Filippov <vitalif@yourcmc.ru>" dch -D $REL -v "$V""$REL" "Rebuild for $REL"; \
|
||||
DEB_BUILD_OPTIONS=nocheck dpkg-buildpackage --jobs=auto -sa; \
|
||||
|
@@ -21,7 +21,7 @@ Configuration parameters can be set in 3 places:
|
||||
mon, fio and QEMU options, OpenStack/Proxmox/etc configuration. The latter
|
||||
doesn't allow to set all variables directly, but it allows to override the
|
||||
configuration file and set everything you need inside it.
|
||||
- OSD superblocks created by [vitastor-disk](../usage/disk.en.md) contain
|
||||
- OSD superblocks created by [vitastor-disk](usage/disk.en.md) contain
|
||||
primarily disk layout parameters of specific OSDs. In fact, these parameters
|
||||
are automatically passed into the command line of vitastor-osd process, so
|
||||
they have the same "status" as command-line parameters.
|
||||
|
@@ -23,7 +23,7 @@
|
||||
монитора, опциях fio и QEMU, настроек OpenStack, Proxmox и т.п. Последние,
|
||||
как правило, не включают полный набор параметров напрямую, но позволяют
|
||||
определить путь к файлу конфигурации и задать любые параметры в нём.
|
||||
- В суперблоке OSD, записываемом [vitastor-disk](../usage/disk.ru.md) - параметры,
|
||||
- В суперблоке OSD, записываемом [vitastor-disk](usage/disk.ru.md) - параметры,
|
||||
связанные с дисковым форматом и с этим конкретным OSD. На самом деле,
|
||||
при запуске OSD эти параметры автоматически передаются в командную строку
|
||||
процесса vitastor-osd, то есть по "статусу" они эквивалентны параметрам
|
||||
|
@@ -25,11 +25,16 @@ running if required parameters are specified.
|
||||
## etcd_address
|
||||
|
||||
- Type: string or array of strings
|
||||
- Can be changed online: yes
|
||||
|
||||
etcd connection endpoint(s). Multiple endpoints may be delimited by "," or
|
||||
specified in a JSON array `["10.0.115.10:2379/v3","10.0.115.11:2379/v3"]`.
|
||||
Note that https is not supported for etcd connections yet.
|
||||
|
||||
etcd connection endpoints can be changed online by updating global
|
||||
configuration in etcd itself - this allows to switch the cluster to new
|
||||
etcd addresses without downtime.
|
||||
|
||||
## etcd_prefix
|
||||
|
||||
- Type: string
|
||||
@@ -42,5 +47,6 @@ example, use a single etcd cluster for multiple Vitastor clusters.
|
||||
|
||||
- Type: integer
|
||||
- Default: 0
|
||||
- Can be changed online: yes
|
||||
|
||||
Log level. Raise if you want more verbose output.
|
||||
|
@@ -24,10 +24,14 @@
|
||||
## etcd_address
|
||||
|
||||
- Тип: строка или массив строк
|
||||
- Можно менять на лету: да
|
||||
|
||||
Адрес(а) подключения к etcd. Несколько адресов могут разделяться запятой
|
||||
или указываться в виде JSON-массива `["10.0.115.10:2379/v3","10.0.115.11:2379/v3"]`.
|
||||
|
||||
Адреса подключения к etcd можно поменять на лету, обновив конфигурацию в
|
||||
самом etcd - это позволяет переключить кластер на новые etcd без остановки.
|
||||
|
||||
## etcd_prefix
|
||||
|
||||
- Тип: строка
|
||||
@@ -41,5 +45,6 @@
|
||||
|
||||
- Тип: целое число
|
||||
- Значение по умолчанию: 0
|
||||
- Можно менять на лету: да
|
||||
|
||||
Уровень логгирования. Повысьте, если хотите более подробный вывод.
|
||||
|
@@ -33,12 +33,13 @@ Size of objects (data blocks) into which all physical and virtual drives
|
||||
in Vitastor, affects memory usage, write amplification and I/O load
|
||||
distribution effectiveness.
|
||||
|
||||
Recommended default block size is 128 KB for SSD and 4 MB for HDD. In fact,
|
||||
it's possible to use 4 MB for SSD too - it will lower memory usage, but
|
||||
Recommended default block size is 128 KB for SSD and 1 MB for HDD. In fact,
|
||||
it's possible to use 1 MB for SSD too - it will lower memory usage, but
|
||||
may increase average WA and reduce linear performance.
|
||||
|
||||
OSD memory usage is roughly (SIZE / BLOCK * 68 bytes) which is roughly
|
||||
544 MB per 1 TB of used disk space with the default 128 KB block size.
|
||||
With 1 MB it's 8 times lower.
|
||||
|
||||
## bitmap_granularity
|
||||
|
||||
|
@@ -33,14 +33,14 @@ OSD) могут сосуществовать в одном кластере Vita
|
||||
настроек, влияет на потребление памяти, объём избыточной записи (write
|
||||
amplification) и эффективность распределения нагрузки по OSD.
|
||||
|
||||
Рекомендуемые по умолчанию размеры блока - 128 килобайт для SSD и 4
|
||||
мегабайта для HDD. В принципе, для SSD можно тоже использовать 4 мегабайта,
|
||||
Рекомендуемые по умолчанию размеры блока - 128 килобайт для SSD и 1 мегабайт
|
||||
для HDD. В принципе, для SSD можно тоже использовать блок размером 1 мегабайт,
|
||||
это понизит использование памяти, но ухудшит распределение нагрузки и в
|
||||
среднем увеличит WA.
|
||||
|
||||
Потребление памяти OSD составляет примерно (РАЗМЕР / БЛОК * 68 байт),
|
||||
т.е. примерно 544 МБ памяти на 1 ТБ занятого места на диске при
|
||||
стандартном 128 КБ блоке.
|
||||
стандартном 128 КБ блоке. При 1 МБ блоке памяти нужно в 8 раз меньше.
|
||||
|
||||
## bitmap_granularity
|
||||
|
||||
|
@@ -24,6 +24,8 @@ initialization and can't be changed after it without losing data.
|
||||
- [disable_journal_fsync](#disable_journal_fsync)
|
||||
- [disable_device_lock](#disable_device_lock)
|
||||
- [disk_alignment](#disk_alignment)
|
||||
- [data_csum_type](#data_csum_type)
|
||||
- [csum_block_size](#csum_block_size)
|
||||
|
||||
## data_device
|
||||
|
||||
@@ -174,3 +176,43 @@ Intel Optane (probably, not tested yet).
|
||||
|
||||
Clients don't need to be aware of disk_alignment, so it's not required to
|
||||
put a modified value into etcd key /vitastor/config/global.
|
||||
|
||||
## data_csum_type
|
||||
|
||||
- Type: string
|
||||
- Default: none
|
||||
|
||||
Data checksum type to use. May be "crc32c" or "none". Set to "crc32c" to
|
||||
enable data checksums.
|
||||
|
||||
## csum_block_size
|
||||
|
||||
- Type: integer
|
||||
- Default: 4096
|
||||
|
||||
Checksum calculation block size.
|
||||
|
||||
Must be equal or a multiple of [bitmap_granularity](layout-cluster.en.md#bitmap_granularity)
|
||||
(which is usually 4 KB).
|
||||
|
||||
Checksums increase metadata size by 4 bytes per each csum_block_size of data.
|
||||
|
||||
Checksums are always a tradeoff:
|
||||
1. You either sacrifice +1 GB RAM per 1 TB of data
|
||||
2. Or you raise csum_block_size, for example, to 32k and sacrifice
|
||||
50% random write iops due to checksum read-modify-write
|
||||
3. Or you turn off [inmemory_metadata](osd.en.md#inmemory_metadata) and
|
||||
sacrifice 50% random read iops due to checksum reads
|
||||
|
||||
All-flash clusters usually have enough RAM to use default csum_block_size,
|
||||
which uses 1 GB RAM per 1 TB of data. HDD clusters usually don't.
|
||||
|
||||
Thus, recommended setups are:
|
||||
1. All-flash, 1 GB RAM per 1 TB data: default (csum_block_size=4k)
|
||||
2. All-flash, less RAM: csum_block_size=4k + inmemory_metadata=false
|
||||
3. Hybrid HDD+SSD: csum_block_size=4k + inmemory_metadata=false
|
||||
4. HDD-only, faster random read: csum_block_size=32k
|
||||
5. HDD-only, faster random write: csum_block_size=4k +
|
||||
inmemory_metadata=false + cached_io_meta=true
|
||||
|
||||
See also [cached_io_meta](osd.en.md#cached_io_meta).
|
||||
|
@@ -25,6 +25,8 @@
|
||||
- [disable_journal_fsync](#disable_journal_fsync)
|
||||
- [disable_device_lock](#disable_device_lock)
|
||||
- [disk_alignment](#disk_alignment)
|
||||
- [data_csum_type](#data_csum_type)
|
||||
- [csum_block_size](#csum_block_size)
|
||||
|
||||
## data_device
|
||||
|
||||
@@ -183,3 +185,47 @@ journal_block_size и meta_block_size. Однако единственные SSD
|
||||
|
||||
Клиентам не обязательно знать про disk_alignment, так что помещать значение
|
||||
этого параметра в etcd в /vitastor/config/global не нужно.
|
||||
|
||||
## data_csum_type
|
||||
|
||||
- Тип: строка
|
||||
- Значение по умолчанию: none
|
||||
|
||||
Тип используемых OSD контрольных сумм данных. Может быть "crc32c" или "none".
|
||||
Установите в "crc32c", чтобы включить расчёт и проверку контрольных сумм данных.
|
||||
|
||||
Следует понимать, что контрольные суммы в зависимости от размера блока их
|
||||
расчёта либо увеличивают потребление памяти, либо снижают производительность.
|
||||
Подробнее смотрите в описании параметра [csum_block_size](#csum_block_size).
|
||||
|
||||
## csum_block_size
|
||||
|
||||
- Тип: целое число
|
||||
- Значение по умолчанию: 4096
|
||||
|
||||
Размер блока расчёта контрольных сумм.
|
||||
|
||||
Должен быть равен или кратен [bitmap_granularity](layout-cluster.ru.md#bitmap_granularity)
|
||||
(который обычно равен 4 КБ).
|
||||
|
||||
Контрольные суммы увеличивают размер метаданных на 4 байта на каждые
|
||||
csum_block_size данных.
|
||||
|
||||
Контрольные суммы - это всегда компромисс:
|
||||
1. Вы либо жертвуете потреблением +1 ГБ памяти на 1 ТБ дискового пространства
|
||||
2. Либо вы повышаете csum_block_size до, скажем, 32k и жертвуете 50%
|
||||
скорости случайной записи из-за цикла чтения-изменения-записи для расчёта
|
||||
новых контрольных сумм
|
||||
3. Либо вы отключаете [inmemory_metadata](osd.ru.md#inmemory_metadata) и
|
||||
жертвуете 50% скорости случайного чтения из-за чтения контрольных сумм
|
||||
с диска
|
||||
|
||||
Таким образом, рекомендуются следующие варианты настроек:
|
||||
1. All-flash, 1 ГБ памяти на 1 ТБ данных: по умолчанию (csum_block_size=4k)
|
||||
2. All-flash, меньше памяти: csum_block_size=4k + inmemory_metadata=false
|
||||
3. Гибридные HDD+SSD: csum_block_size=4k + inmemory_metadata=false
|
||||
4. Только HDD, быстрее случайное чтение: csum_block_size=32k
|
||||
5. Только HDD, быстрее случайная запись: csum_block_size=4k +
|
||||
inmemory_metadata=false + cached_io_meta=true
|
||||
|
||||
Смотрите также [cached_io_meta](osd.ru.md#cached_io_meta).
|
||||
|
@@ -153,6 +153,7 @@ operations.
|
||||
- Type: seconds
|
||||
- Default: 5
|
||||
- Minimum: 1
|
||||
- Can be changed online: yes
|
||||
|
||||
Interval before attempting to reconnect to an unavailable OSD.
|
||||
|
||||
@@ -161,6 +162,7 @@ Interval before attempting to reconnect to an unavailable OSD.
|
||||
- Type: seconds
|
||||
- Default: 5
|
||||
- Minimum: 1
|
||||
- Can be changed online: yes
|
||||
|
||||
Timeout for OSD connection attempts.
|
||||
|
||||
@@ -169,6 +171,7 @@ Timeout for OSD connection attempts.
|
||||
- Type: seconds
|
||||
- Default: 5
|
||||
- Minimum: 1
|
||||
- Can be changed online: yes
|
||||
|
||||
OSD connection inactivity time after which clients and other OSDs send
|
||||
keepalive requests to check state of the connection.
|
||||
@@ -178,6 +181,7 @@ keepalive requests to check state of the connection.
|
||||
- Type: seconds
|
||||
- Default: 5
|
||||
- Minimum: 1
|
||||
- Can be changed online: yes
|
||||
|
||||
Maximum time to wait for OSD keepalive responses. If an OSD doesn't respond
|
||||
within this time, the connection to it is dropped and a reconnection attempt
|
||||
@@ -188,6 +192,7 @@ is scheduled.
|
||||
- Type: milliseconds
|
||||
- Default: 500
|
||||
- Minimum: 50
|
||||
- Can be changed online: yes
|
||||
|
||||
OSDs respond to clients with a special error code when they receive I/O
|
||||
requests for a PG that's not synchronized and started. This parameter sets
|
||||
@@ -197,6 +202,7 @@ the time for the clients to wait before re-attempting such I/O requests.
|
||||
|
||||
- Type: integer
|
||||
- Default: 5
|
||||
- Can be changed online: yes
|
||||
|
||||
Maximum number of attempts for etcd requests which can't be retried
|
||||
indefinitely.
|
||||
@@ -205,6 +211,7 @@ indefinitely.
|
||||
|
||||
- Type: milliseconds
|
||||
- Default: 1000
|
||||
- Can be changed online: yes
|
||||
|
||||
Timeout for etcd requests which should complete quickly, like lease refresh.
|
||||
|
||||
@@ -212,6 +219,7 @@ Timeout for etcd requests which should complete quickly, like lease refresh.
|
||||
|
||||
- Type: milliseconds
|
||||
- Default: 5000
|
||||
- Can be changed online: yes
|
||||
|
||||
Timeout for etcd requests which are allowed to wait for some time.
|
||||
|
||||
@@ -219,6 +227,7 @@ Timeout for etcd requests which are allowed to wait for some time.
|
||||
|
||||
- Type: seconds
|
||||
- Default: max(30, etcd_report_interval*2)
|
||||
- Can be changed online: yes
|
||||
|
||||
Timeout for etcd connection HTTP Keep-Alive. Should be higher than
|
||||
etcd_report_interval to guarantee that keepalive actually works.
|
||||
@@ -227,6 +236,7 @@ etcd_report_interval to guarantee that keepalive actually works.
|
||||
|
||||
- Type: seconds
|
||||
- Default: 30
|
||||
- Can be changed online: yes
|
||||
|
||||
etcd websocket ping interval required to keep the connection alive and
|
||||
detect disconnections quickly.
|
||||
@@ -235,6 +245,7 @@ detect disconnections quickly.
|
||||
|
||||
- Type: integer
|
||||
- Default: 33554432
|
||||
- Can be changed online: yes
|
||||
|
||||
Without immediate_commit=all this parameter sets the limit of "dirty"
|
||||
(not committed by fsync) data allowed by the client before forcing an
|
||||
|
@@ -161,6 +161,7 @@ OSD в любом случае согласовывают реальное зн
|
||||
- Тип: секунды
|
||||
- Значение по умолчанию: 5
|
||||
- Минимальное значение: 1
|
||||
- Можно менять на лету: да
|
||||
|
||||
Время ожидания перед повторной попыткой соединиться с недоступным OSD.
|
||||
|
||||
@@ -169,6 +170,7 @@ OSD в любом случае согласовывают реальное зн
|
||||
- Тип: секунды
|
||||
- Значение по умолчанию: 5
|
||||
- Минимальное значение: 1
|
||||
- Можно менять на лету: да
|
||||
|
||||
Максимальное время ожидания попытки соединения с OSD.
|
||||
|
||||
@@ -177,6 +179,7 @@ OSD в любом случае согласовывают реальное зн
|
||||
- Тип: секунды
|
||||
- Значение по умолчанию: 5
|
||||
- Минимальное значение: 1
|
||||
- Можно менять на лету: да
|
||||
|
||||
Время неактивности соединения с OSD, после которого клиенты или другие OSD
|
||||
посылают запрос проверки состояния соединения.
|
||||
@@ -186,6 +189,7 @@ OSD в любом случае согласовывают реальное зн
|
||||
- Тип: секунды
|
||||
- Значение по умолчанию: 5
|
||||
- Минимальное значение: 1
|
||||
- Можно менять на лету: да
|
||||
|
||||
Максимальное время ожидания ответа на запрос проверки состояния соединения.
|
||||
Если OSD не отвечает за это время, соединение отключается и производится
|
||||
@@ -196,6 +200,7 @@ OSD в любом случае согласовывают реальное зн
|
||||
- Тип: миллисекунды
|
||||
- Значение по умолчанию: 500
|
||||
- Минимальное значение: 50
|
||||
- Можно менять на лету: да
|
||||
|
||||
Когда OSD получают от клиентов запросы ввода-вывода, относящиеся к не
|
||||
поднятым на данный момент на них PG, либо к PG в процессе синхронизации,
|
||||
@@ -207,6 +212,7 @@ OSD в любом случае согласовывают реальное зн
|
||||
|
||||
- Тип: целое число
|
||||
- Значение по умолчанию: 5
|
||||
- Можно менять на лету: да
|
||||
|
||||
Максимальное число попыток выполнения запросов к etcd для тех запросов,
|
||||
которые нельзя повторять бесконечно.
|
||||
@@ -215,6 +221,7 @@ OSD в любом случае согласовывают реальное зн
|
||||
|
||||
- Тип: миллисекунды
|
||||
- Значение по умолчанию: 1000
|
||||
- Можно менять на лету: да
|
||||
|
||||
Максимальное время выполнения запросов к etcd, которые должны завершаться
|
||||
быстро, таких, как обновление резервации (lease).
|
||||
@@ -223,6 +230,7 @@ OSD в любом случае согласовывают реальное зн
|
||||
|
||||
- Тип: миллисекунды
|
||||
- Значение по умолчанию: 5000
|
||||
- Можно менять на лету: да
|
||||
|
||||
Максимальное время выполнения запросов к etcd, для которых не обязательно
|
||||
гарантировать быстрое выполнение.
|
||||
@@ -231,6 +239,7 @@ OSD в любом случае согласовывают реальное зн
|
||||
|
||||
- Тип: секунды
|
||||
- Значение по умолчанию: max(30, etcd_report_interval*2)
|
||||
- Можно менять на лету: да
|
||||
|
||||
Таймаут для HTTP Keep-Alive в соединениях к etcd. Должен быть больше, чем
|
||||
etcd_report_interval, чтобы keepalive гарантированно работал.
|
||||
@@ -239,6 +248,7 @@ etcd_report_interval, чтобы keepalive гарантированно рабо
|
||||
|
||||
- Тип: секунды
|
||||
- Значение по умолчанию: 30
|
||||
- Можно менять на лету: да
|
||||
|
||||
Интервал проверки живости вебсокет-подключений к etcd.
|
||||
|
||||
@@ -246,6 +256,7 @@ etcd_report_interval, чтобы keepalive гарантированно рабо
|
||||
|
||||
- Тип: целое число
|
||||
- Значение по умолчанию: 33554432
|
||||
- Можно менять на лету: да
|
||||
|
||||
При работе без immediate_commit=all - это лимит объёма "грязных" (не
|
||||
зафиксированных fsync-ом) данных, при достижении которого клиент будет
|
||||
|
@@ -7,7 +7,8 @@
|
||||
# Runtime OSD Parameters
|
||||
|
||||
These parameters only apply to OSDs, are not fixed at the moment of OSD drive
|
||||
initialization and can be changed with an OSD restart.
|
||||
initialization and can be changed - either with an OSD restart or, for some of
|
||||
them, even without restarting by updating configuration in etcd.
|
||||
|
||||
- [etcd_report_interval](#etcd_report_interval)
|
||||
- [run_primary](#run_primary)
|
||||
@@ -30,6 +31,9 @@ initialization and can be changed with an OSD restart.
|
||||
- [max_flusher_count](#max_flusher_count)
|
||||
- [inmemory_metadata](#inmemory_metadata)
|
||||
- [inmemory_journal](#inmemory_journal)
|
||||
- [cached_io_data](#cached_io_data)
|
||||
- [cached_io_meta](#cached_io_meta)
|
||||
- [cached_io_journal](#cached_io_journal)
|
||||
- [journal_sector_buffer_count](#journal_sector_buffer_count)
|
||||
- [journal_no_same_sector_overwrites](#journal_no_same_sector_overwrites)
|
||||
- [throttle_small_writes](#throttle_small_writes)
|
||||
@@ -38,6 +42,14 @@ initialization and can be changed with an OSD restart.
|
||||
- [throttle_target_parallelism](#throttle_target_parallelism)
|
||||
- [throttle_threshold_us](#throttle_threshold_us)
|
||||
- [osd_memlock](#osd_memlock)
|
||||
- [auto_scrub](#auto_scrub)
|
||||
- [no_scrub](#no_scrub)
|
||||
- [scrub_interval](#scrub_interval)
|
||||
- [scrub_queue_depth](#scrub_queue_depth)
|
||||
- [scrub_sleep](#scrub_sleep)
|
||||
- [scrub_list_limit](#scrub_list_limit)
|
||||
- [scrub_find_best](#scrub_find_best)
|
||||
- [scrub_ec_max_bruteforce](#scrub_ec_max_bruteforce)
|
||||
|
||||
## etcd_report_interval
|
||||
|
||||
@@ -91,6 +103,7 @@ OSD by hand.
|
||||
|
||||
- Type: seconds
|
||||
- Default: 5
|
||||
- Can be changed online: yes
|
||||
|
||||
Time interval at which automatic fsyncs/flushes are issued by each OSD when
|
||||
the immediate_commit mode if disabled. fsyncs are required because without
|
||||
@@ -103,6 +116,7 @@ issue fsyncs at all.
|
||||
|
||||
- Type: integer
|
||||
- Default: 128
|
||||
- Can be changed online: yes
|
||||
|
||||
Same as autosync_interval, but sets the maximum number of uncommitted write
|
||||
operations before issuing an fsync operation internally.
|
||||
@@ -111,6 +125,7 @@ operations before issuing an fsync operation internally.
|
||||
|
||||
- Type: integer
|
||||
- Default: 4
|
||||
- Can be changed online: yes
|
||||
|
||||
Maximum recovery operations per one primary OSD at any given moment of time.
|
||||
Currently it's the only parameter available to tune the speed or recovery
|
||||
@@ -120,6 +135,7 @@ and rebalancing, but it's planned to implement more.
|
||||
|
||||
- Type: integer
|
||||
- Default: 128
|
||||
- Can be changed online: yes
|
||||
|
||||
Number of recovery operations before switching to recovery of the next PG.
|
||||
The idea is to mix all PGs during recovery for more even space and load
|
||||
@@ -130,6 +146,7 @@ Degraded PGs are anyway scanned first.
|
||||
|
||||
- Type: integer
|
||||
- Default: 16
|
||||
- Can be changed online: yes
|
||||
|
||||
Maximum number of recovery operations before issuing an additional fsync.
|
||||
|
||||
@@ -145,6 +162,7 @@ the underlying device. This may be useful for recovery purposes.
|
||||
|
||||
- Type: boolean
|
||||
- Default: false
|
||||
- Can be changed online: yes
|
||||
|
||||
Disable automatic background recovery of objects. Note that it doesn't
|
||||
affect implicit recovery of objects happening during writes - a write is
|
||||
@@ -154,6 +172,7 @@ always made to a full set of at least pg_minsize OSDs.
|
||||
|
||||
- Type: boolean
|
||||
- Default: false
|
||||
- Can be changed online: yes
|
||||
|
||||
Disable background movement of data between different OSDs. Disabling it
|
||||
means that PGs in the `has_misplaced` state will be left in it indefinitely.
|
||||
@@ -162,6 +181,7 @@ means that PGs in the `has_misplaced` state will be left in it indefinitely.
|
||||
|
||||
- Type: seconds
|
||||
- Default: 3
|
||||
- Can be changed online: yes
|
||||
|
||||
Time interval at which OSDs print simple human-readable operation
|
||||
statistics on stdout.
|
||||
@@ -170,6 +190,7 @@ statistics on stdout.
|
||||
|
||||
- Type: seconds
|
||||
- Default: 10
|
||||
- Can be changed online: yes
|
||||
|
||||
Time interval at which OSDs dump slow or stuck operations on stdout, if
|
||||
they're any. Also it's the time after which an operation is considered
|
||||
@@ -179,6 +200,7 @@ they're any. Also it's the time after which an operation is considered
|
||||
|
||||
- Type: seconds
|
||||
- Default: 60
|
||||
- Can be changed online: yes
|
||||
|
||||
Number of seconds after which a deleted inode is removed from OSD statistics.
|
||||
|
||||
@@ -186,6 +208,7 @@ Number of seconds after which a deleted inode is removed from OSD statistics.
|
||||
|
||||
- Type: integer
|
||||
- Default: 128
|
||||
- Can be changed online: yes
|
||||
|
||||
Parallel client write operation limit per one OSD. Operations that exceed
|
||||
this limit are pushed to a temporary queue instead of being executed
|
||||
@@ -195,6 +218,7 @@ immediately.
|
||||
|
||||
- Type: integer
|
||||
- Default: 1
|
||||
- Can be changed online: yes
|
||||
|
||||
Flusher is a micro-thread that moves data from the journal to the data
|
||||
area of the device. Their number is auto-tuned between minimum and maximum.
|
||||
@@ -204,6 +228,7 @@ Minimum number is set by this parameter.
|
||||
|
||||
- Type: integer
|
||||
- Default: 256
|
||||
- Can be changed online: yes
|
||||
|
||||
Maximum number of journal flushers (see above min_flusher_count).
|
||||
|
||||
@@ -233,6 +258,48 @@ is typically very small because it's sufficient to have 16-32 MB journal
|
||||
for SSD OSDs. However, in theory it's possible that you'll want to turn it
|
||||
off for hybrid (HDD+SSD) OSDs with large journals on quick devices.
|
||||
|
||||
## cached_io_data
|
||||
|
||||
- Type: boolean
|
||||
- Default: false
|
||||
|
||||
Read and write *data* through Linux page cache, i.e. use a file descriptor
|
||||
opened with O_SYNC, but without O_DIRECT for I/O. May improve read
|
||||
performance for hot data and slower disks - HDDs and maybe SATA SSDs.
|
||||
Not recommended for desktop SSDs without capacitors because O_SYNC flushes
|
||||
disk cache on every write.
|
||||
|
||||
## cached_io_meta
|
||||
|
||||
- Type: boolean
|
||||
- Default: false
|
||||
|
||||
Read and write *metadata* through Linux page cache. May improve read
|
||||
performance only if your drives are relatively slow (HDD, SATA SSD), and
|
||||
only if checksums are enabled and [inmemory_metadata](#inmemory_metadata)
|
||||
is disabled, because in this case metadata blocks are read from disk
|
||||
on every read request to verify checksums and caching them may reduce this
|
||||
extra read load.
|
||||
|
||||
Absolutely pointless to enable with enabled inmemory_metadata because all
|
||||
metadata is kept in memory anyway, and likely pointless without checksums,
|
||||
because in that case, metadata blocks are read from disk only during journal
|
||||
flushing.
|
||||
|
||||
If the same device is used for data and metadata, enabling [cached_io_data](#cached_io_data)
|
||||
also enables this parameter, given that it isn't turned off explicitly.
|
||||
|
||||
## cached_io_journal
|
||||
|
||||
- Type: boolean
|
||||
- Default: false
|
||||
|
||||
Read and write *journal* through Linux page cache. May improve read
|
||||
performance if [inmemory_journal](#inmemory_journal) is turned off.
|
||||
|
||||
If the same device is used for metadata and journal, enabling [cached_io_meta](#cached_io_meta)
|
||||
also enables this parameter, given that it isn't turned off explicitly.
|
||||
|
||||
## journal_sector_buffer_count
|
||||
|
||||
- Type: integer
|
||||
@@ -260,6 +327,7 @@ Most (99%) other SSDs don't need this option.
|
||||
|
||||
- Type: boolean
|
||||
- Default: false
|
||||
- Can be changed online: yes
|
||||
|
||||
Enable soft throttling of small journaled writes. Useful for hybrid OSDs
|
||||
with fast journal/metadata devices and slow data devices. The idea is that
|
||||
@@ -277,6 +345,7 @@ fills up.
|
||||
|
||||
- Type: integer
|
||||
- Default: 100
|
||||
- Can be changed online: yes
|
||||
|
||||
Target maximum number of throttled operations per second under the condition
|
||||
of full journal. Set it to approximate random write iops of your data devices
|
||||
@@ -286,6 +355,7 @@ of full journal. Set it to approximate random write iops of your data devices
|
||||
|
||||
- Type: integer
|
||||
- Default: 100
|
||||
- Can be changed online: yes
|
||||
|
||||
Target maximum bandwidth in MB/s of throttled operations per second under
|
||||
the condition of full journal. Set it to approximate linear write
|
||||
@@ -295,6 +365,7 @@ performance of your data devices (HDDs).
|
||||
|
||||
- Type: integer
|
||||
- Default: 1
|
||||
- Can be changed online: yes
|
||||
|
||||
Target maximum parallelism of throttled operations under the condition of
|
||||
full journal. Set it to approximate internal parallelism of your data
|
||||
@@ -304,6 +375,7 @@ devices (1 for HDDs, 4-8 for SSDs).
|
||||
|
||||
- Type: microseconds
|
||||
- Default: 50
|
||||
- Can be changed online: yes
|
||||
|
||||
Minimal computed delay to be applied to throttled operations. Usually
|
||||
doesn't need to be changed.
|
||||
@@ -313,4 +385,103 @@ doesn't need to be changed.
|
||||
- Type: boolean
|
||||
- Default: false
|
||||
|
||||
Lock all OSD memory to prevent it from being unloaded into swap with mlockall(). Requires sufficient ulimit -l (max locked memory).
|
||||
Lock all OSD memory to prevent it from being unloaded into swap with
|
||||
mlockall(). Requires sufficient ulimit -l (max locked memory).
|
||||
|
||||
## auto_scrub
|
||||
|
||||
- Type: boolean
|
||||
- Default: false
|
||||
- Can be changed online: yes
|
||||
|
||||
Data scrubbing is the process of background verification of copies to find
|
||||
and repair corrupted blocks. It's not run automatically by default since
|
||||
it's a new feature. Set this parameter to true to enable automatic scrubs.
|
||||
|
||||
This parameter makes OSDs automatically schedule data scrubbing of clean PGs
|
||||
every `scrub_interval` (see below). You can also start/schedule scrubbing
|
||||
manually by setting `next_scrub` JSON key to the desired UNIX time of the
|
||||
next scrub in `/pg/history/...` values in etcd.
|
||||
|
||||
## no_scrub
|
||||
|
||||
- Type: boolean
|
||||
- Default: false
|
||||
- Can be changed online: yes
|
||||
|
||||
Temporarily disable scrubbing and stop running scrubs.
|
||||
|
||||
## scrub_interval
|
||||
|
||||
- Type: string
|
||||
- Default: 30d
|
||||
- Can be changed online: yes
|
||||
|
||||
Default automatic scrubbing interval for all pools. Numbers without suffix
|
||||
are treated as seconds, possible unit suffixes include 's' (seconds),
|
||||
'm' (minutes), 'h' (hours), 'd' (days), 'M' (months) and 'y' (years).
|
||||
|
||||
## scrub_queue_depth
|
||||
|
||||
- Type: integer
|
||||
- Default: 1
|
||||
- Can be changed online: yes
|
||||
|
||||
Number of parallel scrubbing operations per one OSD.
|
||||
|
||||
## scrub_sleep
|
||||
|
||||
- Type: milliseconds
|
||||
- Default: 0
|
||||
- Can be changed online: yes
|
||||
|
||||
Additional interval between two consecutive scrubbing operations on one OSD.
|
||||
Can be used to slow down scrubbing if it affects user load too much.
|
||||
|
||||
## scrub_list_limit
|
||||
|
||||
- Type: integer
|
||||
- Default: 1000
|
||||
- Can be changed online: yes
|
||||
|
||||
Number of objects to list in one listing operation during scrub.
|
||||
|
||||
## scrub_find_best
|
||||
|
||||
- Type: boolean
|
||||
- Default: true
|
||||
- Can be changed online: yes
|
||||
|
||||
Find and automatically restore best versions of objects with unmatched
|
||||
copies. In replicated setups, the best version is the version with most
|
||||
matching replicas. In EC setups, the best version is the subset of data
|
||||
and parity chunks without mismatches.
|
||||
|
||||
The hypothetical situation where you might want to disable it is when
|
||||
you have 3 replicas and you are paranoid that 2 HDDs out of 3 may silently
|
||||
corrupt an object in the same way (for example, zero it out) and only
|
||||
1 HDD will remain good. In this case disabling scrub_find_best may help
|
||||
you to recover the data! See also scrub_ec_max_bruteforce below.
|
||||
|
||||
## scrub_ec_max_bruteforce
|
||||
|
||||
- Type: integer
|
||||
- Default: 100
|
||||
- Can be changed online: yes
|
||||
|
||||
Vitastor can locate corrupted chunks in EC setups with more than 1 parity
|
||||
chunk by brute-forcing all possible error locations. This configuration
|
||||
value limits the maximum number of checked combinations. You can try to
|
||||
increase it if you have EC N+K setup with N and K large enough for
|
||||
combination count `C(N+K-1, K-1) = (N+K-1)! / (K-1)! / N!` to be greater
|
||||
than the default 100.
|
||||
|
||||
If there are too many possible combinations or if multiple combinations give
|
||||
correct results then objects are marked inconsistent and aren't recovered
|
||||
automatically.
|
||||
|
||||
In replicated setups bruteforcing isn't needed, Vitastor just assumes that
|
||||
the variant with most available equal copies is correct. For example, if
|
||||
you have 3 replicas and 1 of them differs, this one is considered to be
|
||||
corrupted. But if there is no "best" version with more copies than all
|
||||
others have then the object is also marked as inconsistent.
|
||||
|
@@ -8,7 +8,8 @@
|
||||
|
||||
Данные параметры используются только OSD, но, в отличие от дисковых параметров,
|
||||
не фиксируются в момент инициализации дисков OSD и могут быть изменены в любой
|
||||
момент с перезапуском OSD.
|
||||
момент с помощью перезапуска OSD, а некоторые и без перезапуска, с помощью
|
||||
изменения конфигурации в etcd.
|
||||
|
||||
- [etcd_report_interval](#etcd_report_interval)
|
||||
- [run_primary](#run_primary)
|
||||
@@ -31,6 +32,9 @@
|
||||
- [max_flusher_count](#max_flusher_count)
|
||||
- [inmemory_metadata](#inmemory_metadata)
|
||||
- [inmemory_journal](#inmemory_journal)
|
||||
- [cached_io_data](#cached_io_data)
|
||||
- [cached_io_meta](#cached_io_meta)
|
||||
- [cached_io_journal](#cached_io_journal)
|
||||
- [journal_sector_buffer_count](#journal_sector_buffer_count)
|
||||
- [journal_no_same_sector_overwrites](#journal_no_same_sector_overwrites)
|
||||
- [throttle_small_writes](#throttle_small_writes)
|
||||
@@ -39,6 +43,14 @@
|
||||
- [throttle_target_parallelism](#throttle_target_parallelism)
|
||||
- [throttle_threshold_us](#throttle_threshold_us)
|
||||
- [osd_memlock](#osd_memlock)
|
||||
- [auto_scrub](#auto_scrub)
|
||||
- [no_scrub](#no_scrub)
|
||||
- [scrub_interval](#scrub_interval)
|
||||
- [scrub_queue_depth](#scrub_queue_depth)
|
||||
- [scrub_sleep](#scrub_sleep)
|
||||
- [scrub_list_limit](#scrub_list_limit)
|
||||
- [scrub_find_best](#scrub_find_best)
|
||||
- [scrub_ec_max_bruteforce](#scrub_ec_max_bruteforce)
|
||||
|
||||
## etcd_report_interval
|
||||
|
||||
@@ -93,6 +105,7 @@ RUNNING), подходящий под заданную маску. Также н
|
||||
|
||||
- Тип: секунды
|
||||
- Значение по умолчанию: 5
|
||||
- Можно менять на лету: да
|
||||
|
||||
Временной интервал отправки автоматических fsync-ов (операций очистки кэша)
|
||||
каждым OSD для случая, когда режим immediate_commit отключён. fsync-и нужны
|
||||
@@ -105,6 +118,7 @@ OSD, чтобы успевать очищать журнал - без них OSD
|
||||
|
||||
- Тип: целое число
|
||||
- Значение по умолчанию: 128
|
||||
- Можно менять на лету: да
|
||||
|
||||
Аналогично autosync_interval, но задаёт не временной интервал, а
|
||||
максимальное количество незафиксированных операций записи перед
|
||||
@@ -114,6 +128,7 @@ OSD, чтобы успевать очищать журнал - без них OSD
|
||||
|
||||
- Тип: целое число
|
||||
- Значение по умолчанию: 4
|
||||
- Можно менять на лету: да
|
||||
|
||||
Максимальное число операций восстановления на одном первичном OSD в любой
|
||||
момент времени. На данный момент единственный параметр, который можно менять
|
||||
@@ -124,6 +139,7 @@ OSD, чтобы успевать очищать журнал - без них OSD
|
||||
|
||||
- Тип: целое число
|
||||
- Значение по умолчанию: 128
|
||||
- Можно менять на лету: да
|
||||
|
||||
Число операций восстановления перед переключением на восстановление другой PG.
|
||||
Идея заключается в том, чтобы восстанавливать все PG одновременно для более
|
||||
@@ -135,6 +151,7 @@ OSD, чтобы успевать очищать журнал - без них OSD
|
||||
|
||||
- Тип: целое число
|
||||
- Значение по умолчанию: 16
|
||||
- Можно менять на лету: да
|
||||
|
||||
Максимальное число операций восстановления перед дополнительным fsync.
|
||||
|
||||
@@ -150,6 +167,7 @@ OSD, чтобы успевать очищать журнал - без них OSD
|
||||
|
||||
- Тип: булево (да/нет)
|
||||
- Значение по умолчанию: false
|
||||
- Можно менять на лету: да
|
||||
|
||||
Отключить автоматическое фоновое восстановление объектов. Обратите внимание,
|
||||
что эта опция не отключает восстановление объектов, происходящее при
|
||||
@@ -160,6 +178,7 @@ OSD.
|
||||
|
||||
- Тип: булево (да/нет)
|
||||
- Значение по умолчанию: false
|
||||
- Можно менять на лету: да
|
||||
|
||||
Отключить фоновое перемещение объектов между разными OSD. Отключение
|
||||
означает, что PG, находящиеся в состоянии `has_misplaced`, будут оставлены
|
||||
@@ -169,6 +188,7 @@ OSD.
|
||||
|
||||
- Тип: секунды
|
||||
- Значение по умолчанию: 3
|
||||
- Можно менять на лету: да
|
||||
|
||||
Временной интервал, с которым OSD печатают простую человекочитаемую
|
||||
статистику выполнения операций в стандартный вывод.
|
||||
@@ -177,6 +197,7 @@ OSD.
|
||||
|
||||
- Тип: секунды
|
||||
- Значение по умолчанию: 10
|
||||
- Можно менять на лету: да
|
||||
|
||||
Временной интервал, с которым OSD выводят в стандартный вывод список
|
||||
медленных или зависших операций, если таковые имеются. Также время, при
|
||||
@@ -186,6 +207,7 @@ OSD.
|
||||
|
||||
- Тип: секунды
|
||||
- Значение по умолчанию: 60
|
||||
- Можно менять на лету: да
|
||||
|
||||
Число секунд, через которое удалённые инод удаляется и из статистики OSD.
|
||||
|
||||
@@ -193,6 +215,7 @@ OSD.
|
||||
|
||||
- Тип: целое число
|
||||
- Значение по умолчанию: 128
|
||||
- Можно менять на лету: да
|
||||
|
||||
Максимальное число одновременных клиентских операций записи на один OSD.
|
||||
Операции, превышающие этот лимит, не исполняются сразу, а сохраняются во
|
||||
@@ -202,6 +225,7 @@ OSD.
|
||||
|
||||
- Тип: целое число
|
||||
- Значение по умолчанию: 1
|
||||
- Можно менять на лету: да
|
||||
|
||||
Flusher - это микро-поток (корутина), которая копирует данные из журнала в
|
||||
основную область устройства данных. Их число настраивается динамически между
|
||||
@@ -211,6 +235,7 @@ Flusher - это микро-поток (корутина), которая коп
|
||||
|
||||
- Тип: целое число
|
||||
- Значение по умолчанию: 256
|
||||
- Можно менять на лету: да
|
||||
|
||||
Максимальное число микро-потоков очистки журнала (см. выше min_flusher_count).
|
||||
|
||||
@@ -241,6 +266,52 @@ Flusher - это микро-поток (корутина), которая коп
|
||||
параметра может оказаться полезным для гибридных OSD (HDD+SSD) с большими
|
||||
журналами, расположенными на быстром по сравнению с HDD устройстве.
|
||||
|
||||
## cached_io_data
|
||||
|
||||
- Тип: булево (да/нет)
|
||||
- Значение по умолчанию: false
|
||||
|
||||
Читать и записывать *данные* через системный кэш Linux (page cache), то есть,
|
||||
использовать для данных файловый дескриптор, открытый без флага O_DIRECT, но
|
||||
с флагом O_SYNC. Может улучшить скорость чтения для относительно медленных
|
||||
дисков - HDD и, возможно, SATA SSD. Не рекомендуется для потребительских
|
||||
SSD без конденсаторов, так как O_SYNC сбрасывает кэш диска при каждой записи.
|
||||
|
||||
## cached_io_meta
|
||||
|
||||
- Тип: булево (да/нет)
|
||||
- Значение по умолчанию: false
|
||||
|
||||
Читать и записывать *метаданные* через системный кэш Linux. Может улучшить
|
||||
скорость чтения, если у вас медленные диски, и только если контрольные суммы
|
||||
включены, а параметр [inmemory_metadata](#inmemory_metadata) отключён, так
|
||||
как в этом случае блоки метаданных читаются с диска при каждом запросе чтения
|
||||
для проверки контрольных сумм и их кэширование может снизить дополнительную
|
||||
нагрузку на диск.
|
||||
|
||||
Абсолютно бессмысленно включать данный параметр, если параметр
|
||||
inmemory_metadata включён (по умолчанию это так), и также вероятно
|
||||
бессмысленно включать его, если не включены контрольные суммы, так как в
|
||||
этом случае блоки метаданных читаются с диска только во время сброса
|
||||
журнала.
|
||||
|
||||
Если одно и то же устройство используется для данных и метаданных, включение
|
||||
[cached_io_data](#cached_io_data) также включает данный параметр, при
|
||||
условии, что он не отключён явным образом.
|
||||
|
||||
## cached_io_journal
|
||||
|
||||
- Тип: булево (да/нет)
|
||||
- Значение по умолчанию: false
|
||||
|
||||
Читать и записывать *журнал* через системный кэш Linux. Может улучшить
|
||||
скорость чтения, если параметр [inmemory_journal](#inmemory_journal)
|
||||
отключён.
|
||||
|
||||
Если одно и то же устройство используется для метаданных и журнала,
|
||||
включение [cached_io_meta](#cached_io_meta) также включает данный
|
||||
параметр, при условии, что он не отключён явным образом.
|
||||
|
||||
## journal_sector_buffer_count
|
||||
|
||||
- Тип: целое число
|
||||
@@ -270,6 +341,7 @@ Flusher - это микро-поток (корутина), которая коп
|
||||
|
||||
- Тип: булево (да/нет)
|
||||
- Значение по умолчанию: false
|
||||
- Можно менять на лету: да
|
||||
|
||||
Разрешить мягкое ограничение скорости журналируемой записи. Полезно для
|
||||
гибридных OSD с быстрыми устройствами метаданных и медленными устройствами
|
||||
@@ -288,6 +360,7 @@ Flusher - это микро-поток (корутина), которая коп
|
||||
|
||||
- Тип: целое число
|
||||
- Значение по умолчанию: 100
|
||||
- Можно менять на лету: да
|
||||
|
||||
Расчётное максимальное число ограничиваемых операций в секунду при условии
|
||||
отсутствия свободного места в журнале. Устанавливайте приблизительно равным
|
||||
@@ -298,6 +371,7 @@ Flusher - это микро-поток (корутина), которая коп
|
||||
|
||||
- Тип: целое число
|
||||
- Значение по умолчанию: 100
|
||||
- Можно менять на лету: да
|
||||
|
||||
Расчётный максимальный размер в МБ/с ограничиваемых операций в секунду при
|
||||
условии отсутствия свободного места в журнале. Устанавливайте приблизительно
|
||||
@@ -308,6 +382,7 @@ Flusher - это микро-поток (корутина), которая коп
|
||||
|
||||
- Тип: целое число
|
||||
- Значение по умолчанию: 1
|
||||
- Можно менять на лету: да
|
||||
|
||||
Расчётный максимальный параллелизм ограничиваемых операций в секунду при
|
||||
условии отсутствия свободного места в журнале. Устанавливайте приблизительно
|
||||
@@ -318,6 +393,7 @@ Flusher - это микро-поток (корутина), которая коп
|
||||
|
||||
- Тип: микросекунды
|
||||
- Значение по умолчанию: 50
|
||||
- Можно менять на лету: да
|
||||
|
||||
Минимальная применимая к ограничиваемым операциям задержка. Обычно не
|
||||
требует изменений.
|
||||
@@ -327,4 +403,113 @@ Flusher - это микро-поток (корутина), которая коп
|
||||
- Тип: булево (да/нет)
|
||||
- Значение по умолчанию: false
|
||||
|
||||
Блокировать всю память OSD с помощью mlockall, чтобы запретить её выгрузку в пространство подкачки. Требует достаточного значения ulimit -l (лимита заблокированной памяти).
|
||||
Блокировать всю память OSD с помощью mlockall, чтобы запретить её выгрузку
|
||||
в пространство подкачки. Требует достаточного значения ulimit -l (лимита
|
||||
заблокированной памяти).
|
||||
|
||||
## auto_scrub
|
||||
|
||||
- Тип: булево (да/нет)
|
||||
- Значение по умолчанию: false
|
||||
- Можно менять на лету: да
|
||||
|
||||
Скраб - процесс фоновой проверки копий данных, предназначенный, чтобы
|
||||
находить и исправлять повреждённые блоки. По умолчанию эти проверки ещё не
|
||||
запускаются автоматически, так как являются новой функцией. Чтобы включить
|
||||
автоматическое планирование скрабов, установите данный параметр в true.
|
||||
|
||||
Включённый параметр заставляет OSD автоматически планировать фоновую
|
||||
проверку чистых PG раз в `scrub_interval` (см. ниже). Вы также можете
|
||||
запустить или запланировать проверку вручную, установив значение ключа JSON
|
||||
`next_scrub` внутри ключей etcd `/pg/history/...` в UNIX-время следующей
|
||||
желаемой проверки.
|
||||
|
||||
## no_scrub
|
||||
|
||||
- Тип: булево (да/нет)
|
||||
- Значение по умолчанию: false
|
||||
- Можно менять на лету: да
|
||||
|
||||
Временно отключить и остановить запущенные скрабы.
|
||||
|
||||
## scrub_interval
|
||||
|
||||
- Тип: строка
|
||||
- Значение по умолчанию: 30d
|
||||
- Можно менять на лету: да
|
||||
|
||||
Интервал автоматической фоновой проверки по умолчанию для всех пулов.
|
||||
Значения без указанной единицы измерения считаются в секундах, допустимые
|
||||
символы единиц измерения в конце: 's' (секунды),
|
||||
'm' (минуты), 'h' (часы), 'd' (дни), 'M' (месяца) или 'y' (годы).
|
||||
|
||||
## scrub_queue_depth
|
||||
|
||||
- Тип: целое число
|
||||
- Значение по умолчанию: 1
|
||||
- Можно менять на лету: да
|
||||
|
||||
Число параллельных операций фоновой проверки на один OSD.
|
||||
|
||||
## scrub_sleep
|
||||
|
||||
- Тип: миллисекунды
|
||||
- Значение по умолчанию: 0
|
||||
- Можно менять на лету: да
|
||||
|
||||
Дополнительный интервал ожидания после фоновой проверки каждого объекта на
|
||||
одном OSD. Может использоваться для замедления скраба, если он слишком
|
||||
сильно влияет на пользовательскую нагрузку.
|
||||
|
||||
## scrub_list_limit
|
||||
|
||||
- Тип: целое число
|
||||
- Значение по умолчанию: 1000
|
||||
- Можно менять на лету: да
|
||||
|
||||
Размер загружаемых за одну операцию списков объектов в процессе фоновой
|
||||
проверки.
|
||||
|
||||
## scrub_find_best
|
||||
|
||||
- Тип: булево (да/нет)
|
||||
- Значение по умолчанию: true
|
||||
- Можно менять на лету: да
|
||||
|
||||
Находить и автоматически восстанавливать "лучшие версии" объектов с
|
||||
несовпадающими копиями/частями. При использовании репликации "лучшая"
|
||||
версия - версия, доступная в большем числе экземпляров, чем другие. При
|
||||
использовании кодов коррекции ошибок "лучшая" версия - это подмножество
|
||||
частей данных и чётности, полностью соответствующих друг другу.
|
||||
|
||||
Гипотетическая ситуация, в которой вы можете захотеть отключить этот
|
||||
поиск - это если у вас 3 реплики и вы боитесь, что 2 диска из 3 могут
|
||||
незаметно и одинаково повредить данные одного и того же объекта, например,
|
||||
занулив его, и только 1 диск останется неповреждённым. В этой ситуации
|
||||
отключение этого параметра поможет вам восстановить данные! Смотрите также
|
||||
описание следующего параметра - scrub_ec_max_bruteforce.
|
||||
|
||||
## scrub_ec_max_bruteforce
|
||||
|
||||
- Тип: целое число
|
||||
- Значение по умолчанию: 100
|
||||
- Можно менять на лету: да
|
||||
|
||||
Vitastor старается определить повреждённые части объектов при использовании
|
||||
EC (кодов коррекции ошибок) с более, чем 1 диском чётности, путём перебора
|
||||
всех возможных комбинаций ошибочных частей. Данное значение конфигурации
|
||||
ограничивает число перебираемых комбинаций. Вы можете попробовать поднять
|
||||
его, если используете схему кодирования EC N+K с N и K, достаточно большими
|
||||
для того, чтобы число сочетаний `C(N+K-1, K-1) = (N+K-1)! / (K-1)! / N!`
|
||||
было больше, чем стандартное значение 100.
|
||||
|
||||
Если возможных комбинаций слишком много или если корректная комбинаций не
|
||||
определяется однозначно, объекты помечаются неконсистентными (inconsistent)
|
||||
и не восстанавливаются автоматически.
|
||||
|
||||
При использовании репликации перебор не нужен, Vitastor просто предполагает,
|
||||
что вариант объекта с наибольшим количеством одинаковых копий корректен.
|
||||
Например, если вы используете 3 реплики и 1 из них отличается, эта 1 копия
|
||||
считается некорректной. Однако, если "лучшую" версию с числом доступных
|
||||
копий большим, чем у всех других версий, найти невозможно, то объект тоже
|
||||
маркируется неконсистентным.
|
||||
|
@@ -40,6 +40,7 @@ Parameters:
|
||||
- [root_node](#root_node)
|
||||
- [osd_tags](#osd_tags)
|
||||
- [primary_affinity_tags](#primary_affinity_tags)
|
||||
- [scrub_interval](#scrub_interval)
|
||||
|
||||
Examples:
|
||||
|
||||
@@ -272,6 +273,13 @@ Specifies OSD tags to prefer putting primary OSDs in this pool to.
|
||||
Note that for EC/XOR pools Vitastor always prefers to put primary OSD on one
|
||||
of the OSDs containing a data chunk for a PG.
|
||||
|
||||
## scrub_interval
|
||||
|
||||
- Type: time interval (number + unit s/m/h/d/M/y)
|
||||
|
||||
Automatic scrubbing interval for this pool. Overrides
|
||||
[global scrub_interval setting](osd.en.md#scrub_interval).
|
||||
|
||||
# Examples
|
||||
|
||||
## Replicated pool
|
||||
|
@@ -39,6 +39,7 @@
|
||||
- [root_node](#root_node)
|
||||
- [osd_tags](#osd_tags)
|
||||
- [primary_affinity_tags](#primary_affinity_tags)
|
||||
- [scrub_interval](#scrub_interval)
|
||||
|
||||
Примеры:
|
||||
|
||||
@@ -276,6 +277,13 @@ PG в Vitastor эферемерны, то есть вы можете менят
|
||||
для PG этого пула. Имейте в виду, что для EC-пулов Vitastor также всегда
|
||||
предпочитает помещать первичный OSD на один из OSD с данными, а не с чётностью.
|
||||
|
||||
## scrub_interval
|
||||
|
||||
- Тип: временной интервал (число + единица измерения s/m/h/d/M/y)
|
||||
|
||||
Интервал скраба, то есть, автоматической фоновой проверки данных для данного пула.
|
||||
Переопределяет [глобальную настройку scrub_interval](osd.ru.md#scrub_interval).
|
||||
|
||||
# Примеры
|
||||
|
||||
## Реплицированный пул
|
||||
|
@@ -11,13 +11,21 @@
|
||||
- name: etcd_address
|
||||
type: string or array of strings
|
||||
type_ru: строка или массив строк
|
||||
online: true
|
||||
info: |
|
||||
etcd connection endpoint(s). Multiple endpoints may be delimited by "," or
|
||||
specified in a JSON array `["10.0.115.10:2379/v3","10.0.115.11:2379/v3"]`.
|
||||
Note that https is not supported for etcd connections yet.
|
||||
|
||||
etcd connection endpoints can be changed online by updating global
|
||||
configuration in etcd itself - this allows to switch the cluster to new
|
||||
etcd addresses without downtime.
|
||||
info_ru: |
|
||||
Адрес(а) подключения к etcd. Несколько адресов могут разделяться запятой
|
||||
или указываться в виде JSON-массива `["10.0.115.10:2379/v3","10.0.115.11:2379/v3"]`.
|
||||
|
||||
Адреса подключения к etcd можно поменять на лету, обновив конфигурацию в
|
||||
самом etcd - это позволяет переключить кластер на новые etcd без остановки.
|
||||
- name: etcd_prefix
|
||||
type: string
|
||||
default: "/vitastor"
|
||||
@@ -31,5 +39,6 @@
|
||||
- name: log_level
|
||||
type: int
|
||||
default: 0
|
||||
online: true
|
||||
info: Log level. Raise if you want more verbose output.
|
||||
info_ru: Уровень логгирования. Повысьте, если хотите более подробный вывод.
|
||||
|
145
docs/config/src/include.js
Executable file
145
docs/config/src/include.js
Executable file
@@ -0,0 +1,145 @@
|
||||
#!/usr/bin/nodejs
|
||||
|
||||
const fsp = require('fs').promises;
|
||||
|
||||
run(process.argv).catch(console.error);
|
||||
|
||||
async function run(argv)
|
||||
{
|
||||
if (argv.length < 3)
|
||||
{
|
||||
console.log('Markdown preprocessor\nUSAGE: ./include.js file.md');
|
||||
return;
|
||||
}
|
||||
const index_file = await fsp.realpath(argv[2]);
|
||||
const re = /(\{\{[\s\S]*?\}\}|\[[^\]]+\]\([^\)]+\)|(?:^|\n)#[^\n]+)/;
|
||||
let text = await fsp.readFile(index_file, { encoding: 'utf-8' });
|
||||
text = text.split(re);
|
||||
let included = {};
|
||||
let heading = 0, heading_name = '', m;
|
||||
for (let i = 0; i < text.length; i++)
|
||||
{
|
||||
if (text[i].substr(0, 2) == '{{')
|
||||
{
|
||||
// Inclusion
|
||||
let incfile = text[i].substr(2, text[i].length-4);
|
||||
let section = null;
|
||||
let indent = heading;
|
||||
incfile = incfile.replace(/\s*\|\s*indent\s*=\s*(-?\d+)\s*$/, (m, m1) => { indent = parseInt(m1); return ''; });
|
||||
incfile = incfile.replace(/\s*#\s*([^#]+)$/, (m, m1) => { section = m1; return ''; });
|
||||
let inc_heading = section;
|
||||
incfile = rel2abs(index_file, incfile);
|
||||
let inc = await fsp.readFile(incfile, { encoding: 'utf-8' });
|
||||
inc = inc.trim().replace(/^[\s\S]+?\n#/, '#'); // remove until the first header
|
||||
inc = inc.split(re);
|
||||
const indent_str = new Array(indent+1).join('#');
|
||||
let section_start = -1, section_end = -1;
|
||||
for (let j = 0; j < inc.length; j++)
|
||||
{
|
||||
if ((m = /^(\n?)(#+\s*)([\s\S]+)$/.exec(inc[j])))
|
||||
{
|
||||
if (!inc_heading)
|
||||
{
|
||||
inc_heading = m[3].trim();
|
||||
}
|
||||
if (section)
|
||||
{
|
||||
if (m[3].trim() == section)
|
||||
section_start = j;
|
||||
else if (section_start >= 0)
|
||||
{
|
||||
section_end = j;
|
||||
break;
|
||||
}
|
||||
}
|
||||
inc[j] = m[1] + indent_str + m[2] + m[3];
|
||||
}
|
||||
else if ((m = /^(\[[^\]]+\]\()([^\)]+)(\))$/.exec(inc[j])) && !/^https?:(\/\/)|^#/.exec(m[2]))
|
||||
{
|
||||
const abs_m2 = rel2abs(incfile, m[2]);
|
||||
const rel_m = abs2rel(__filename, abs_m2);
|
||||
if (rel_m.substr(0, 9) == '../../../') // outside docs
|
||||
inc[j] = m[1] + 'https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/'+rel2abs('docs/config/src/include.js', rel_m) + m[3];
|
||||
else
|
||||
inc[j] = m[1] + abs_m2 + m[3];
|
||||
}
|
||||
}
|
||||
if (section)
|
||||
{
|
||||
inc = section_start >= 0 ? inc.slice(section_start, section_end < 0 ? inc.length : section_end) : [];
|
||||
}
|
||||
if (inc.length)
|
||||
{
|
||||
if (!inc_heading)
|
||||
inc_heading = heading_name||'';
|
||||
included[incfile+(section ? '#'+section : '')] = '#'+inc_heading.toLowerCase().replace(/\P{L}+/ug, '-').replace(/^-|-$/g, '');
|
||||
inc[0] = inc[0].replace(/^\s+/, '');
|
||||
inc[inc.length-1] = inc[inc.length-1].replace(/\s+$/, '');
|
||||
}
|
||||
text.splice(i, 1, ...inc);
|
||||
i = i + inc.length - 1;
|
||||
}
|
||||
else if ((m = /^\n?(#+)\s*([\s\S]+)$/.exec(text[i])))
|
||||
{
|
||||
// Heading
|
||||
heading = m[1].length;
|
||||
heading_name = m[2].trim();
|
||||
}
|
||||
}
|
||||
for (let i = 0; i < text.length; i++)
|
||||
{
|
||||
if ((m = /^(\[[^\]]+\]\()([^\)]+)(\))$/.exec(text[i])) && !/^https?:(\/\/)|^#/.exec(m[2]))
|
||||
{
|
||||
const p = m[2].indexOf('#');
|
||||
if (included[m[2]])
|
||||
{
|
||||
text[i] = m[1]+included[m[2]]+m[3];
|
||||
}
|
||||
else if (p >= 0 && included[m[2].substr(0, p)])
|
||||
{
|
||||
text[i] = m[1]+m[2].substr(p)+m[3];
|
||||
}
|
||||
}
|
||||
}
|
||||
console.log(text.join(''));
|
||||
}
|
||||
|
||||
function rel2abs(ref, rel)
|
||||
{
|
||||
rel = [ ...ref.replace(/^(.*)\/[^\/]+$/, '$1').split(/\/+/), ...rel.split(/\/+/) ];
|
||||
return killdots(rel).join('/');
|
||||
}
|
||||
|
||||
function abs2rel(ref, abs)
|
||||
{
|
||||
ref = ref.split(/\/+/);
|
||||
abs = abs.split(/\/+/);
|
||||
while (ref.length > 1 && ref[0] == abs[0])
|
||||
{
|
||||
ref.shift();
|
||||
abs.shift();
|
||||
}
|
||||
for (let i = 1; i < ref.length; i++)
|
||||
{
|
||||
abs.unshift('..');
|
||||
}
|
||||
return killdots(abs).join('/');
|
||||
}
|
||||
|
||||
function killdots(rel)
|
||||
{
|
||||
for (let i = 0; i < rel.length; i++)
|
||||
{
|
||||
if (rel[i] == '.')
|
||||
{
|
||||
rel.splice(i, 1);
|
||||
i--;
|
||||
}
|
||||
else if (i >= 1 && rel[i] == '..' && rel[i-1] != '..')
|
||||
{
|
||||
rel.splice(i-1, 2);
|
||||
i -= 2;
|
||||
}
|
||||
}
|
||||
return rel;
|
||||
}
|
65
docs/config/src/included.en.md
Normal file
65
docs/config/src/included.en.md
Normal file
@@ -0,0 +1,65 @@
|
||||
# Vitastor
|
||||
|
||||
{{../../../README.md#The Idea}}
|
||||
|
||||
{{../../../README.md#Talks and presentations}}
|
||||
|
||||
{{../../intro/features.en.md}}
|
||||
|
||||
{{../../intro/quickstart.en.md}}
|
||||
|
||||
{{../../intro/architecture.en.md}}
|
||||
|
||||
## Installation
|
||||
|
||||
{{../../installation/packages.en.md}}
|
||||
|
||||
{{../../installation/proxmox.en.md}}
|
||||
|
||||
{{../../installation/openstack.en.md}}
|
||||
|
||||
{{../../installation/kubernetes.en.md}}
|
||||
|
||||
{{../../installation/source.en.md}}
|
||||
|
||||
{{../../config.en.md|indent=1}}
|
||||
|
||||
{{../../config/common.en.md|indent=2}}
|
||||
|
||||
{{../../config/network.en.md|indent=2}}
|
||||
|
||||
{{../../config/layout-cluster.en.md|indent=2}}
|
||||
|
||||
{{../../config/layout-osd.en.md|indent=2}}
|
||||
|
||||
{{../../config/osd.en.md|indent=2}}
|
||||
|
||||
{{../../config/monitor.en.md|indent=2}}
|
||||
|
||||
{{../../config/pool.en.md|indent=2}}
|
||||
|
||||
{{../../config/inode.en.md|indent=2}}
|
||||
|
||||
## Usage
|
||||
|
||||
{{../../usage/cli.en.md}}
|
||||
|
||||
{{../../usage/disk.en.md}}
|
||||
|
||||
{{../../usage/fio.en.md}}
|
||||
|
||||
{{../../usage/nbd.en.md}}
|
||||
|
||||
{{../../usage/qemu.en.md}}
|
||||
|
||||
{{../../usage/nfs.en.md}}
|
||||
|
||||
## Performance
|
||||
|
||||
{{../../performance/understanding.en.md}}
|
||||
|
||||
{{../../performance/theoretical.en.md}}
|
||||
|
||||
{{../../performance/comparison1.en.md}}
|
||||
|
||||
{{../../intro/author.en.md|indent=1}}
|
65
docs/config/src/included.ru.md
Normal file
65
docs/config/src/included.ru.md
Normal file
@@ -0,0 +1,65 @@
|
||||
# Vitastor
|
||||
|
||||
{{../../../README-ru.md#Идея|indent=0}}
|
||||
|
||||
{{../../../README-ru.md#Презентации и записи докладов|indent=0}}
|
||||
|
||||
{{../../intro/features.ru.md}}
|
||||
|
||||
{{../../intro/quickstart.ru.md}}
|
||||
|
||||
{{../../intro/architecture.ru.md}}
|
||||
|
||||
## Установка
|
||||
|
||||
{{../../installation/packages.ru.md}}
|
||||
|
||||
{{../../installation/proxmox.ru.md}}
|
||||
|
||||
{{../../installation/openstack.ru.md}}
|
||||
|
||||
{{../../installation/kubernetes.ru.md}}
|
||||
|
||||
{{../../installation/source.ru.md}}
|
||||
|
||||
{{../../config.ru.md|indent=1}}
|
||||
|
||||
{{../../config/common.ru.md|indent=2}}
|
||||
|
||||
{{../../config/network.ru.md|indent=2}}
|
||||
|
||||
{{../../config/layout-cluster.ru.md|indent=2}}
|
||||
|
||||
{{../../config/layout-osd.ru.md|indent=2}}
|
||||
|
||||
{{../../config/osd.ru.md|indent=2}}
|
||||
|
||||
{{../../config/monitor.ru.md|indent=2}}
|
||||
|
||||
{{../../config/pool.ru.md|indent=2}}
|
||||
|
||||
{{../../config/inode.ru.md|indent=2}}
|
||||
|
||||
## Использование
|
||||
|
||||
{{../../usage/cli.ru.md}}
|
||||
|
||||
{{../../usage/disk.ru.md}}
|
||||
|
||||
{{../../usage/fio.ru.md}}
|
||||
|
||||
{{../../usage/nbd.ru.md}}
|
||||
|
||||
{{../../usage/qemu.ru.md}}
|
||||
|
||||
{{../../usage/nfs.ru.md}}
|
||||
|
||||
## Производительность
|
||||
|
||||
{{../../performance/understanding.ru.md}}
|
||||
|
||||
{{../../performance/theoretical.ru.md}}
|
||||
|
||||
{{../../performance/comparison1.ru.md}}
|
||||
|
||||
{{../../intro/author.ru.md|indent=1}}
|
@@ -7,26 +7,27 @@
|
||||
in Vitastor, affects memory usage, write amplification and I/O load
|
||||
distribution effectiveness.
|
||||
|
||||
Recommended default block size is 128 KB for SSD and 4 MB for HDD. In fact,
|
||||
it's possible to use 4 MB for SSD too - it will lower memory usage, but
|
||||
Recommended default block size is 128 KB for SSD and 1 MB for HDD. In fact,
|
||||
it's possible to use 1 MB for SSD too - it will lower memory usage, but
|
||||
may increase average WA and reduce linear performance.
|
||||
|
||||
OSD memory usage is roughly (SIZE / BLOCK * 68 bytes) which is roughly
|
||||
544 MB per 1 TB of used disk space with the default 128 KB block size.
|
||||
With 1 MB it's 8 times lower.
|
||||
info_ru: |
|
||||
Размер объектов (блоков данных), на которые делятся физические и виртуальные
|
||||
диски в Vitastor (в рамках каждого пула). Одна из ключевых на данный момент
|
||||
настроек, влияет на потребление памяти, объём избыточной записи (write
|
||||
amplification) и эффективность распределения нагрузки по OSD.
|
||||
|
||||
Рекомендуемые по умолчанию размеры блока - 128 килобайт для SSD и 4
|
||||
мегабайта для HDD. В принципе, для SSD можно тоже использовать 4 мегабайта,
|
||||
Рекомендуемые по умолчанию размеры блока - 128 килобайт для SSD и 1 мегабайт
|
||||
для HDD. В принципе, для SSD можно тоже использовать блок размером 1 мегабайт,
|
||||
это понизит использование памяти, но ухудшит распределение нагрузки и в
|
||||
среднем увеличит WA.
|
||||
|
||||
Потребление памяти OSD составляет примерно (РАЗМЕР / БЛОК * 68 байт),
|
||||
т.е. примерно 544 МБ памяти на 1 ТБ занятого места на диске при
|
||||
стандартном 128 КБ блоке.
|
||||
стандартном 128 КБ блоке. При 1 МБ блоке памяти нужно в 8 раз меньше.
|
||||
- name: bitmap_granularity
|
||||
type: int
|
||||
default: 4096
|
||||
|
@@ -204,3 +204,73 @@
|
||||
|
||||
Клиентам не обязательно знать про disk_alignment, так что помещать значение
|
||||
этого параметра в etcd в /vitastor/config/global не нужно.
|
||||
- name: data_csum_type
|
||||
type: string
|
||||
default: none
|
||||
info: |
|
||||
Data checksum type to use. May be "crc32c" or "none". Set to "crc32c" to
|
||||
enable data checksums.
|
||||
info_ru: |
|
||||
Тип используемых OSD контрольных сумм данных. Может быть "crc32c" или "none".
|
||||
Установите в "crc32c", чтобы включить расчёт и проверку контрольных сумм данных.
|
||||
|
||||
Следует понимать, что контрольные суммы в зависимости от размера блока их
|
||||
расчёта либо увеличивают потребление памяти, либо снижают производительность.
|
||||
Подробнее смотрите в описании параметра [csum_block_size](#csum_block_size).
|
||||
- name: csum_block_size
|
||||
type: int
|
||||
default: 4096
|
||||
info: |
|
||||
Checksum calculation block size.
|
||||
|
||||
Must be equal or a multiple of [bitmap_granularity](layout-cluster.en.md#bitmap_granularity)
|
||||
(which is usually 4 KB).
|
||||
|
||||
Checksums increase metadata size by 4 bytes per each csum_block_size of data.
|
||||
|
||||
Checksums are always a tradeoff:
|
||||
1. You either sacrifice +1 GB RAM per 1 TB of data
|
||||
2. Or you raise csum_block_size, for example, to 32k and sacrifice
|
||||
50% random write iops due to checksum read-modify-write
|
||||
3. Or you turn off [inmemory_metadata](osd.en.md#inmemory_metadata) and
|
||||
sacrifice 50% random read iops due to checksum reads
|
||||
|
||||
All-flash clusters usually have enough RAM to use default csum_block_size,
|
||||
which uses 1 GB RAM per 1 TB of data. HDD clusters usually don't.
|
||||
|
||||
Thus, recommended setups are:
|
||||
1. All-flash, 1 GB RAM per 1 TB data: default (csum_block_size=4k)
|
||||
2. All-flash, less RAM: csum_block_size=4k + inmemory_metadata=false
|
||||
3. Hybrid HDD+SSD: csum_block_size=4k + inmemory_metadata=false
|
||||
4. HDD-only, faster random read: csum_block_size=32k
|
||||
5. HDD-only, faster random write: csum_block_size=4k +
|
||||
inmemory_metadata=false + cached_io_meta=true
|
||||
|
||||
See also [cached_io_meta](osd.en.md#cached_io_meta).
|
||||
info_ru: |
|
||||
Размер блока расчёта контрольных сумм.
|
||||
|
||||
Должен быть равен или кратен [bitmap_granularity](layout-cluster.ru.md#bitmap_granularity)
|
||||
(который обычно равен 4 КБ).
|
||||
|
||||
Контрольные суммы увеличивают размер метаданных на 4 байта на каждые
|
||||
csum_block_size данных.
|
||||
|
||||
Контрольные суммы - это всегда компромисс:
|
||||
1. Вы либо жертвуете потреблением +1 ГБ памяти на 1 ТБ дискового пространства
|
||||
2. Либо вы повышаете csum_block_size до, скажем, 32k и жертвуете 50%
|
||||
скорости случайной записи из-за цикла чтения-изменения-записи для расчёта
|
||||
новых контрольных сумм
|
||||
3. Либо вы отключаете [inmemory_metadata](osd.ru.md#inmemory_metadata) и
|
||||
жертвуете 50% скорости случайного чтения из-за чтения контрольных сумм
|
||||
с диска
|
||||
|
||||
Таким образом, рекомендуются следующие варианты настроек:
|
||||
1. All-flash, 1 ГБ памяти на 1 ТБ данных: по умолчанию (csum_block_size=4k)
|
||||
2. All-flash, меньше памяти: csum_block_size=4k + inmemory_metadata=false
|
||||
3. Гибридные HDD+SSD: csum_block_size=4k + inmemory_metadata=false
|
||||
4. Только HDD, быстрее случайное чтение: csum_block_size=32k
|
||||
5. Только HDD, быстрее случайная запись: csum_block_size=4k +
|
||||
inmemory_metadata=false + cached_io_meta=true
|
||||
|
||||
Смотрите также [cached_io_meta](osd.ru.md#cached_io_meta).
|
||||
|
@@ -14,6 +14,7 @@ const L = {
|
||||
toc_config: '[Configuration](../config.en.md)',
|
||||
toc_usage: 'Usage',
|
||||
toc_performance: 'Performance',
|
||||
online: 'Can be changed online: yes',
|
||||
},
|
||||
ru: {
|
||||
Documentation: 'Документация',
|
||||
@@ -28,6 +29,7 @@ const L = {
|
||||
toc_config: '[Конфигурация](../config.ru.md)',
|
||||
toc_usage: 'Использование',
|
||||
toc_performance: 'Производительность',
|
||||
online: 'Можно менять на лету: да',
|
||||
},
|
||||
};
|
||||
const types = {
|
||||
@@ -70,6 +72,8 @@ for (const file of params_files)
|
||||
out += `- ${L[lang]['Default'] || 'Default'}: ${c.default}\n`;
|
||||
if (c.min !== undefined)
|
||||
out += `- ${L[lang]['Minimum'] || 'Minimum'}: ${c.min}\n`;
|
||||
if (c.online)
|
||||
out += `- ${L[lang]['online'] || 'Can be changed online: yes'}\n`;
|
||||
out += `\n`+(c["info_"+lang] || c["info"]).replace(/\s+$/, '');
|
||||
}
|
||||
const head = fs.readFileSync(__dirname+'/'+file+'.'+lang+'.md', { encoding: 'utf-8' });
|
||||
|
@@ -164,18 +164,21 @@
|
||||
type: sec
|
||||
min: 1
|
||||
default: 5
|
||||
online: true
|
||||
info: Interval before attempting to reconnect to an unavailable OSD.
|
||||
info_ru: Время ожидания перед повторной попыткой соединиться с недоступным OSD.
|
||||
- name: peer_connect_timeout
|
||||
type: sec
|
||||
min: 1
|
||||
default: 5
|
||||
online: true
|
||||
info: Timeout for OSD connection attempts.
|
||||
info_ru: Максимальное время ожидания попытки соединения с OSD.
|
||||
- name: osd_idle_timeout
|
||||
type: sec
|
||||
min: 1
|
||||
default: 5
|
||||
online: true
|
||||
info: |
|
||||
OSD connection inactivity time after which clients and other OSDs send
|
||||
keepalive requests to check state of the connection.
|
||||
@@ -186,6 +189,7 @@
|
||||
type: sec
|
||||
min: 1
|
||||
default: 5
|
||||
online: true
|
||||
info: |
|
||||
Maximum time to wait for OSD keepalive responses. If an OSD doesn't respond
|
||||
within this time, the connection to it is dropped and a reconnection attempt
|
||||
@@ -198,6 +202,7 @@
|
||||
type: ms
|
||||
min: 50
|
||||
default: 500
|
||||
online: true
|
||||
info: |
|
||||
OSDs respond to clients with a special error code when they receive I/O
|
||||
requests for a PG that's not synchronized and started. This parameter sets
|
||||
@@ -211,6 +216,7 @@
|
||||
- name: max_etcd_attempts
|
||||
type: int
|
||||
default: 5
|
||||
online: true
|
||||
info: |
|
||||
Maximum number of attempts for etcd requests which can't be retried
|
||||
indefinitely.
|
||||
@@ -220,6 +226,7 @@
|
||||
- name: etcd_quick_timeout
|
||||
type: ms
|
||||
default: 1000
|
||||
online: true
|
||||
info: |
|
||||
Timeout for etcd requests which should complete quickly, like lease refresh.
|
||||
info_ru: |
|
||||
@@ -228,6 +235,7 @@
|
||||
- name: etcd_slow_timeout
|
||||
type: ms
|
||||
default: 5000
|
||||
online: true
|
||||
info: Timeout for etcd requests which are allowed to wait for some time.
|
||||
info_ru: |
|
||||
Максимальное время выполнения запросов к etcd, для которых не обязательно
|
||||
@@ -235,6 +243,7 @@
|
||||
- name: etcd_keepalive_timeout
|
||||
type: sec
|
||||
default: max(30, etcd_report_interval*2)
|
||||
online: true
|
||||
info: |
|
||||
Timeout for etcd connection HTTP Keep-Alive. Should be higher than
|
||||
etcd_report_interval to guarantee that keepalive actually works.
|
||||
@@ -244,6 +253,7 @@
|
||||
- name: etcd_ws_keepalive_timeout
|
||||
type: sec
|
||||
default: 30
|
||||
online: true
|
||||
info: |
|
||||
etcd websocket ping interval required to keep the connection alive and
|
||||
detect disconnections quickly.
|
||||
@@ -252,6 +262,7 @@
|
||||
- name: client_dirty_limit
|
||||
type: int
|
||||
default: 33554432
|
||||
online: true
|
||||
info: |
|
||||
Without immediate_commit=all this parameter sets the limit of "dirty"
|
||||
(not committed by fsync) data allowed by the client before forcing an
|
||||
|
@@ -1,4 +1,5 @@
|
||||
# Runtime OSD Parameters
|
||||
|
||||
These parameters only apply to OSDs, are not fixed at the moment of OSD drive
|
||||
initialization and can be changed with an OSD restart.
|
||||
initialization and can be changed - either with an OSD restart or, for some of
|
||||
them, even without restarting by updating configuration in etcd.
|
||||
|
@@ -2,4 +2,5 @@
|
||||
|
||||
Данные параметры используются только OSD, но, в отличие от дисковых параметров,
|
||||
не фиксируются в момент инициализации дисков OSD и могут быть изменены в любой
|
||||
момент с перезапуском OSD.
|
||||
момент с помощью перезапуска OSD, а некоторые и без перезапуска, с помощью
|
||||
изменения конфигурации в etcd.
|
||||
|
@@ -66,6 +66,7 @@
|
||||
- name: autosync_interval
|
||||
type: sec
|
||||
default: 5
|
||||
online: true
|
||||
info: |
|
||||
Time interval at which automatic fsyncs/flushes are issued by each OSD when
|
||||
the immediate_commit mode if disabled. fsyncs are required because without
|
||||
@@ -83,6 +84,7 @@
|
||||
- name: autosync_writes
|
||||
type: int
|
||||
default: 128
|
||||
online: true
|
||||
info: |
|
||||
Same as autosync_interval, but sets the maximum number of uncommitted write
|
||||
operations before issuing an fsync operation internally.
|
||||
@@ -93,6 +95,7 @@
|
||||
- name: recovery_queue_depth
|
||||
type: int
|
||||
default: 4
|
||||
online: true
|
||||
info: |
|
||||
Maximum recovery operations per one primary OSD at any given moment of time.
|
||||
Currently it's the only parameter available to tune the speed or recovery
|
||||
@@ -105,6 +108,7 @@
|
||||
- name: recovery_pg_switch
|
||||
type: int
|
||||
default: 128
|
||||
online: true
|
||||
info: |
|
||||
Number of recovery operations before switching to recovery of the next PG.
|
||||
The idea is to mix all PGs during recovery for more even space and load
|
||||
@@ -119,6 +123,7 @@
|
||||
- name: recovery_sync_batch
|
||||
type: int
|
||||
default: 16
|
||||
online: true
|
||||
info: Maximum number of recovery operations before issuing an additional fsync.
|
||||
info_ru: Максимальное число операций восстановления перед дополнительным fsync.
|
||||
- name: readonly
|
||||
@@ -133,6 +138,7 @@
|
||||
- name: no_recovery
|
||||
type: bool
|
||||
default: false
|
||||
online: true
|
||||
info: |
|
||||
Disable automatic background recovery of objects. Note that it doesn't
|
||||
affect implicit recovery of objects happening during writes - a write is
|
||||
@@ -145,6 +151,7 @@
|
||||
- name: no_rebalance
|
||||
type: bool
|
||||
default: false
|
||||
online: true
|
||||
info: |
|
||||
Disable background movement of data between different OSDs. Disabling it
|
||||
means that PGs in the `has_misplaced` state will be left in it indefinitely.
|
||||
@@ -155,6 +162,7 @@
|
||||
- name: print_stats_interval
|
||||
type: sec
|
||||
default: 3
|
||||
online: true
|
||||
info: |
|
||||
Time interval at which OSDs print simple human-readable operation
|
||||
statistics on stdout.
|
||||
@@ -164,6 +172,7 @@
|
||||
- name: slow_log_interval
|
||||
type: sec
|
||||
default: 10
|
||||
online: true
|
||||
info: |
|
||||
Time interval at which OSDs dump slow or stuck operations on stdout, if
|
||||
they're any. Also it's the time after which an operation is considered
|
||||
@@ -175,6 +184,7 @@
|
||||
- name: inode_vanish_time
|
||||
type: sec
|
||||
default: 60
|
||||
online: true
|
||||
info: |
|
||||
Number of seconds after which a deleted inode is removed from OSD statistics.
|
||||
info_ru: |
|
||||
@@ -182,6 +192,7 @@
|
||||
- name: max_write_iodepth
|
||||
type: int
|
||||
default: 128
|
||||
online: true
|
||||
info: |
|
||||
Parallel client write operation limit per one OSD. Operations that exceed
|
||||
this limit are pushed to a temporary queue instead of being executed
|
||||
@@ -193,6 +204,7 @@
|
||||
- name: min_flusher_count
|
||||
type: int
|
||||
default: 1
|
||||
online: true
|
||||
info: |
|
||||
Flusher is a micro-thread that moves data from the journal to the data
|
||||
area of the device. Their number is auto-tuned between minimum and maximum.
|
||||
@@ -204,6 +216,7 @@
|
||||
- name: max_flusher_count
|
||||
type: int
|
||||
default: 256
|
||||
online: true
|
||||
info: |
|
||||
Maximum number of journal flushers (see above min_flusher_count).
|
||||
info_ru: |
|
||||
@@ -247,6 +260,73 @@
|
||||
достаточно 16- или 32-мегабайтного журнала. Однако в теории отключение
|
||||
параметра может оказаться полезным для гибридных OSD (HDD+SSD) с большими
|
||||
журналами, расположенными на быстром по сравнению с HDD устройстве.
|
||||
- name: cached_io_data
|
||||
type: bool
|
||||
default: false
|
||||
info: |
|
||||
Read and write *data* through Linux page cache, i.e. use a file descriptor
|
||||
opened with O_SYNC, but without O_DIRECT for I/O. May improve read
|
||||
performance for hot data and slower disks - HDDs and maybe SATA SSDs.
|
||||
Not recommended for desktop SSDs without capacitors because O_SYNC flushes
|
||||
disk cache on every write.
|
||||
info_ru: |
|
||||
Читать и записывать *данные* через системный кэш Linux (page cache), то есть,
|
||||
использовать для данных файловый дескриптор, открытый без флага O_DIRECT, но
|
||||
с флагом O_SYNC. Может улучшить скорость чтения для относительно медленных
|
||||
дисков - HDD и, возможно, SATA SSD. Не рекомендуется для потребительских
|
||||
SSD без конденсаторов, так как O_SYNC сбрасывает кэш диска при каждой записи.
|
||||
- name: cached_io_meta
|
||||
type: bool
|
||||
default: false
|
||||
info: |
|
||||
Read and write *metadata* through Linux page cache. May improve read
|
||||
performance only if your drives are relatively slow (HDD, SATA SSD), and
|
||||
only if checksums are enabled and [inmemory_metadata](#inmemory_metadata)
|
||||
is disabled, because in this case metadata blocks are read from disk
|
||||
on every read request to verify checksums and caching them may reduce this
|
||||
extra read load.
|
||||
|
||||
Absolutely pointless to enable with enabled inmemory_metadata because all
|
||||
metadata is kept in memory anyway, and likely pointless without checksums,
|
||||
because in that case, metadata blocks are read from disk only during journal
|
||||
flushing.
|
||||
|
||||
If the same device is used for data and metadata, enabling [cached_io_data](#cached_io_data)
|
||||
also enables this parameter, given that it isn't turned off explicitly.
|
||||
info_ru: |
|
||||
Читать и записывать *метаданные* через системный кэш Linux. Может улучшить
|
||||
скорость чтения, если у вас медленные диски, и только если контрольные суммы
|
||||
включены, а параметр [inmemory_metadata](#inmemory_metadata) отключён, так
|
||||
как в этом случае блоки метаданных читаются с диска при каждом запросе чтения
|
||||
для проверки контрольных сумм и их кэширование может снизить дополнительную
|
||||
нагрузку на диск.
|
||||
|
||||
Абсолютно бессмысленно включать данный параметр, если параметр
|
||||
inmemory_metadata включён (по умолчанию это так), и также вероятно
|
||||
бессмысленно включать его, если не включены контрольные суммы, так как в
|
||||
этом случае блоки метаданных читаются с диска только во время сброса
|
||||
журнала.
|
||||
|
||||
Если одно и то же устройство используется для данных и метаданных, включение
|
||||
[cached_io_data](#cached_io_data) также включает данный параметр, при
|
||||
условии, что он не отключён явным образом.
|
||||
- name: cached_io_journal
|
||||
type: bool
|
||||
default: false
|
||||
info: |
|
||||
Read and write *journal* through Linux page cache. May improve read
|
||||
performance if [inmemory_journal](#inmemory_journal) is turned off.
|
||||
|
||||
If the same device is used for metadata and journal, enabling [cached_io_meta](#cached_io_meta)
|
||||
also enables this parameter, given that it isn't turned off explicitly.
|
||||
info_ru: |
|
||||
Читать и записывать *журнал* через системный кэш Linux. Может улучшить
|
||||
скорость чтения, если параметр [inmemory_journal](#inmemory_journal)
|
||||
отключён.
|
||||
|
||||
Если одно и то же устройство используется для метаданных и журнала,
|
||||
включение [cached_io_meta](#cached_io_meta) также включает данный
|
||||
параметр, при условии, что он не отключён явным образом.
|
||||
- name: journal_sector_buffer_count
|
||||
type: int
|
||||
default: 32
|
||||
@@ -284,6 +364,7 @@
|
||||
- name: throttle_small_writes
|
||||
type: bool
|
||||
default: false
|
||||
online: true
|
||||
info: |
|
||||
Enable soft throttling of small journaled writes. Useful for hybrid OSDs
|
||||
with fast journal/metadata devices and slow data devices. The idea is that
|
||||
@@ -312,6 +393,7 @@
|
||||
- name: throttle_target_iops
|
||||
type: int
|
||||
default: 100
|
||||
online: true
|
||||
info: |
|
||||
Target maximum number of throttled operations per second under the condition
|
||||
of full journal. Set it to approximate random write iops of your data devices
|
||||
@@ -324,6 +406,7 @@
|
||||
- name: throttle_target_mbs
|
||||
type: int
|
||||
default: 100
|
||||
online: true
|
||||
info: |
|
||||
Target maximum bandwidth in MB/s of throttled operations per second under
|
||||
the condition of full journal. Set it to approximate linear write
|
||||
@@ -336,6 +419,7 @@
|
||||
- name: throttle_target_parallelism
|
||||
type: int
|
||||
default: 1
|
||||
online: true
|
||||
info: |
|
||||
Target maximum parallelism of throttled operations under the condition of
|
||||
full journal. Set it to approximate internal parallelism of your data
|
||||
@@ -348,6 +432,7 @@
|
||||
- name: throttle_threshold_us
|
||||
type: us
|
||||
default: 50
|
||||
online: true
|
||||
info: |
|
||||
Minimal computed delay to be applied to throttled operations. Usually
|
||||
doesn't need to be changed.
|
||||
@@ -357,10 +442,151 @@
|
||||
- name: osd_memlock
|
||||
type: bool
|
||||
default: false
|
||||
info: >
|
||||
info: |
|
||||
Lock all OSD memory to prevent it from being unloaded into swap with
|
||||
mlockall(). Requires sufficient ulimit -l (max locked memory).
|
||||
info_ru: >
|
||||
info_ru: |
|
||||
Блокировать всю память OSD с помощью mlockall, чтобы запретить её выгрузку
|
||||
в пространство подкачки. Требует достаточного значения ulimit -l (лимита
|
||||
заблокированной памяти).
|
||||
- name: auto_scrub
|
||||
type: bool
|
||||
default: false
|
||||
online: true
|
||||
info: |
|
||||
Data scrubbing is the process of background verification of copies to find
|
||||
and repair corrupted blocks. It's not run automatically by default since
|
||||
it's a new feature. Set this parameter to true to enable automatic scrubs.
|
||||
|
||||
This parameter makes OSDs automatically schedule data scrubbing of clean PGs
|
||||
every `scrub_interval` (see below). You can also start/schedule scrubbing
|
||||
manually by setting `next_scrub` JSON key to the desired UNIX time of the
|
||||
next scrub in `/pg/history/...` values in etcd.
|
||||
info_ru: |
|
||||
Скраб - процесс фоновой проверки копий данных, предназначенный, чтобы
|
||||
находить и исправлять повреждённые блоки. По умолчанию эти проверки ещё не
|
||||
запускаются автоматически, так как являются новой функцией. Чтобы включить
|
||||
автоматическое планирование скрабов, установите данный параметр в true.
|
||||
|
||||
Включённый параметр заставляет OSD автоматически планировать фоновую
|
||||
проверку чистых PG раз в `scrub_interval` (см. ниже). Вы также можете
|
||||
запустить или запланировать проверку вручную, установив значение ключа JSON
|
||||
`next_scrub` внутри ключей etcd `/pg/history/...` в UNIX-время следующей
|
||||
желаемой проверки.
|
||||
- name: no_scrub
|
||||
type: bool
|
||||
default: false
|
||||
online: true
|
||||
info: |
|
||||
Temporarily disable scrubbing and stop running scrubs.
|
||||
info_ru: |
|
||||
Временно отключить и остановить запущенные скрабы.
|
||||
- name: scrub_interval
|
||||
type: string
|
||||
default: 30d
|
||||
online: true
|
||||
info: |
|
||||
Default automatic scrubbing interval for all pools. Numbers without suffix
|
||||
are treated as seconds, possible unit suffixes include 's' (seconds),
|
||||
'm' (minutes), 'h' (hours), 'd' (days), 'M' (months) and 'y' (years).
|
||||
info_ru: |
|
||||
Интервал автоматической фоновой проверки по умолчанию для всех пулов.
|
||||
Значения без указанной единицы измерения считаются в секундах, допустимые
|
||||
символы единиц измерения в конце: 's' (секунды),
|
||||
'm' (минуты), 'h' (часы), 'd' (дни), 'M' (месяца) или 'y' (годы).
|
||||
- name: scrub_queue_depth
|
||||
type: int
|
||||
default: 1
|
||||
online: true
|
||||
info: |
|
||||
Number of parallel scrubbing operations per one OSD.
|
||||
info_ru: |
|
||||
Число параллельных операций фоновой проверки на один OSD.
|
||||
- name: scrub_sleep
|
||||
type: ms
|
||||
default: 0
|
||||
online: true
|
||||
info: |
|
||||
Additional interval between two consecutive scrubbing operations on one OSD.
|
||||
Can be used to slow down scrubbing if it affects user load too much.
|
||||
info_ru: |
|
||||
Дополнительный интервал ожидания после фоновой проверки каждого объекта на
|
||||
одном OSD. Может использоваться для замедления скраба, если он слишком
|
||||
сильно влияет на пользовательскую нагрузку.
|
||||
- name: scrub_list_limit
|
||||
type: int
|
||||
default: 1000
|
||||
online: true
|
||||
info: |
|
||||
Number of objects to list in one listing operation during scrub.
|
||||
info_ru: |
|
||||
Размер загружаемых за одну операцию списков объектов в процессе фоновой
|
||||
проверки.
|
||||
- name: scrub_find_best
|
||||
type: bool
|
||||
default: true
|
||||
online: true
|
||||
info: |
|
||||
Find and automatically restore best versions of objects with unmatched
|
||||
copies. In replicated setups, the best version is the version with most
|
||||
matching replicas. In EC setups, the best version is the subset of data
|
||||
and parity chunks without mismatches.
|
||||
|
||||
The hypothetical situation where you might want to disable it is when
|
||||
you have 3 replicas and you are paranoid that 2 HDDs out of 3 may silently
|
||||
corrupt an object in the same way (for example, zero it out) and only
|
||||
1 HDD will remain good. In this case disabling scrub_find_best may help
|
||||
you to recover the data! See also scrub_ec_max_bruteforce below.
|
||||
info_ru: |
|
||||
Находить и автоматически восстанавливать "лучшие версии" объектов с
|
||||
несовпадающими копиями/частями. При использовании репликации "лучшая"
|
||||
версия - версия, доступная в большем числе экземпляров, чем другие. При
|
||||
использовании кодов коррекции ошибок "лучшая" версия - это подмножество
|
||||
частей данных и чётности, полностью соответствующих друг другу.
|
||||
|
||||
Гипотетическая ситуация, в которой вы можете захотеть отключить этот
|
||||
поиск - это если у вас 3 реплики и вы боитесь, что 2 диска из 3 могут
|
||||
незаметно и одинаково повредить данные одного и того же объекта, например,
|
||||
занулив его, и только 1 диск останется неповреждённым. В этой ситуации
|
||||
отключение этого параметра поможет вам восстановить данные! Смотрите также
|
||||
описание следующего параметра - scrub_ec_max_bruteforce.
|
||||
- name: scrub_ec_max_bruteforce
|
||||
type: int
|
||||
default: 100
|
||||
online: true
|
||||
info: |
|
||||
Vitastor can locate corrupted chunks in EC setups with more than 1 parity
|
||||
chunk by brute-forcing all possible error locations. This configuration
|
||||
value limits the maximum number of checked combinations. You can try to
|
||||
increase it if you have EC N+K setup with N and K large enough for
|
||||
combination count `C(N+K-1, K-1) = (N+K-1)! / (K-1)! / N!` to be greater
|
||||
than the default 100.
|
||||
|
||||
If there are too many possible combinations or if multiple combinations give
|
||||
correct results then objects are marked inconsistent and aren't recovered
|
||||
automatically.
|
||||
|
||||
In replicated setups bruteforcing isn't needed, Vitastor just assumes that
|
||||
the variant with most available equal copies is correct. For example, if
|
||||
you have 3 replicas and 1 of them differs, this one is considered to be
|
||||
corrupted. But if there is no "best" version with more copies than all
|
||||
others have then the object is also marked as inconsistent.
|
||||
info_ru: |
|
||||
Vitastor старается определить повреждённые части объектов при использовании
|
||||
EC (кодов коррекции ошибок) с более, чем 1 диском чётности, путём перебора
|
||||
всех возможных комбинаций ошибочных частей. Данное значение конфигурации
|
||||
ограничивает число перебираемых комбинаций. Вы можете попробовать поднять
|
||||
его, если используете схему кодирования EC N+K с N и K, достаточно большими
|
||||
для того, чтобы число сочетаний `C(N+K-1, K-1) = (N+K-1)! / (K-1)! / N!`
|
||||
было больше, чем стандартное значение 100.
|
||||
|
||||
Если возможных комбинаций слишком много или если корректная комбинаций не
|
||||
определяется однозначно, объекты помечаются неконсистентными (inconsistent)
|
||||
и не восстанавливаются автоматически.
|
||||
|
||||
При использовании репликации перебор не нужен, Vitastor просто предполагает,
|
||||
что вариант объекта с наибольшим количеством одинаковых копий корректен.
|
||||
Например, если вы используете 3 реплики и 1 из них отличается, эта 1 копия
|
||||
считается некорректной. Однако, если "лучшую" версию с числом доступных
|
||||
копий большим, чем у всех других версий, найти невозможно, то объект тоже
|
||||
маркируется неконсистентным.
|
||||
|
@@ -8,13 +8,13 @@
|
||||
|
||||
У Vitastor есть CSI-плагин для Kubernetes, поддерживающий RWO, а также блочные RWX, тома.
|
||||
|
||||
Для установки возьмите манифесты из директории [csi/deploy/](../csi/deploy/), поместите
|
||||
вашу конфигурацию подключения к Vitastor в [csi/deploy/001-csi-config-map.yaml](../csi/deploy/001-csi-config-map.yaml),
|
||||
настройте StorageClass в [csi/deploy/009-storage-class.yaml](../csi/deploy/009-storage-class.yaml)
|
||||
Для установки возьмите манифесты из директории [csi/deploy/](../../csi/deploy/), поместите
|
||||
вашу конфигурацию подключения к Vitastor в [csi/deploy/001-csi-config-map.yaml](../../csi/deploy/001-csi-config-map.yaml),
|
||||
настройте StorageClass в [csi/deploy/009-storage-class.yaml](../../csi/deploy/009-storage-class.yaml)
|
||||
и примените все `NNN-*.yaml` к вашей инсталляции Kubernetes.
|
||||
|
||||
```
|
||||
for i in ./???-*.yaml; do kubectl apply -f $i; done
|
||||
```
|
||||
|
||||
После этого вы сможете создавать PersistentVolume. Пример смотрите в файле [csi/deploy/example-pvc.yaml](../csi/deploy/example-pvc.yaml).
|
||||
После этого вы сможете создавать PersistentVolume. Пример смотрите в файле [csi/deploy/example-pvc.yaml](../../csi/deploy/example-pvc.yaml).
|
||||
|
@@ -36,5 +36,5 @@ vitastor_pool_id = 1
|
||||
image_upload_use_cinder_backend = True
|
||||
```
|
||||
|
||||
To put Glance images in Vitastor, use [https://docs.openstack.org/cinder/pike/admin/blockstorage-volume-backed-image.html](volume-backed images),
|
||||
To put Glance images in Vitastor, use [volume-backed images](https://docs.openstack.org/cinder/pike/admin/blockstorage-volume-backed-image.html),
|
||||
although the support has not been verified yet.
|
||||
|
@@ -36,5 +36,5 @@ image_upload_use_cinder_backend = True
|
||||
```
|
||||
|
||||
Чтобы помещать в Vitastor Glance-образы, нужно использовать
|
||||
[https://docs.openstack.org/cinder/pike/admin/blockstorage-volume-backed-image.html](образы на основе томов Cinder),
|
||||
[образы на основе томов Cinder](https://docs.openstack.org/cinder/pike/admin/blockstorage-volume-backed-image.html),
|
||||
однако, поддержка этой функции ещё не проверялась.
|
||||
|
@@ -11,8 +11,11 @@
|
||||
- Trust Vitastor package signing key:
|
||||
`wget https://vitastor.io/debian/pubkey.gpg -O /etc/apt/trusted.gpg.d/vitastor.gpg`
|
||||
- Add Vitastor package repository to your /etc/apt/sources.list:
|
||||
- Debian 11 (Bullseye/Sid): `deb https://vitastor.io/debian bullseye main`
|
||||
- Debian 12 (Bookworm/Sid): `deb https://vitastor.io/debian bookworm main`
|
||||
- Debian 11 (Bullseye): `deb https://vitastor.io/debian bullseye main`
|
||||
- Debian 10 (Buster): `deb https://vitastor.io/debian buster main`
|
||||
- Add `-oldstable` to bookworm/bullseye/buster in this line to install the last
|
||||
stable version from 0.9.x branch instead of 1.x
|
||||
- For Debian 10 (Buster) also enable backports repository:
|
||||
`deb http://deb.debian.org/debian buster-backports main`
|
||||
- Install packages: `apt update; apt install vitastor lp-solve etcd linux-image-amd64 qemu`
|
||||
@@ -45,3 +48,10 @@
|
||||
- etcd 3.4.15 or newer. Earlier versions won't work because of various bugs,
|
||||
for example [#12402](https://github.com/etcd-io/etcd/pull/12402).
|
||||
- node.js 10 or newer
|
||||
|
||||
## Version archive
|
||||
|
||||
All previous Vitastor and other components (QEMU, etcd...) package builds
|
||||
can be found here:
|
||||
|
||||
https://vitastor.io/archive/
|
||||
|
@@ -11,8 +11,11 @@
|
||||
- Добавьте ключ репозитория Vitastor:
|
||||
`wget https://vitastor.io/debian/pubkey.gpg -O /etc/apt/trusted.gpg.d/vitastor.gpg`
|
||||
- Добавьте репозиторий Vitastor в /etc/apt/sources.list:
|
||||
- Debian 11 (Bullseye/Sid): `deb https://vitastor.io/debian bullseye main`
|
||||
- Debian 12 (Bookworm/Sid): `deb https://vitastor.io/debian bookworm main`
|
||||
- Debian 11 (Bullseye): `deb https://vitastor.io/debian bullseye main`
|
||||
- Debian 10 (Buster): `deb https://vitastor.io/debian buster main`
|
||||
- Добавьте `-oldstable` к слову bookworm/bullseye/buster в этой строке, чтобы
|
||||
установить последнюю стабильную версию из ветки 0.9.x вместо 1.x
|
||||
- Для Debian 10 (Buster) также включите репозиторий backports:
|
||||
`deb http://deb.debian.org/debian buster-backports main`
|
||||
- Установите пакеты: `apt update; apt install vitastor lp-solve etcd linux-image-amd64 qemu`
|
||||
@@ -44,3 +47,10 @@
|
||||
- etcd 3.4.15 или новее. Более старые версии не будут работать из-за разных багов,
|
||||
например, [#12402](https://github.com/etcd-io/etcd/pull/12402).
|
||||
- node.js 10 или новее
|
||||
|
||||
## Архив предыдущих версий
|
||||
|
||||
Все предыдущие сборки пакетов Vitastor и других компонентов, таких, как QEMU
|
||||
и etcd, можно скачать по следующей ссылке:
|
||||
|
||||
https://vitastor.io/archive/
|
||||
|
@@ -6,10 +6,10 @@
|
||||
|
||||
# Proxmox VE
|
||||
|
||||
To enable Vitastor support in Proxmox Virtual Environment (6.4-7.4 are supported):
|
||||
To enable Vitastor support in Proxmox Virtual Environment (6.4-8.0 are supported):
|
||||
|
||||
- Add the corresponding Vitastor Debian repository into sources.list on Proxmox hosts:
|
||||
buster for 6.4, bullseye for 7.4, pve7.1 for 7.1, pve7.2 for 7.2, pve7.3 for 7.3
|
||||
bookworm for 8.0, bullseye for 7.4, pve7.3 for 7.3, pve7.2 for 7.2, pve7.1 for 7.1, buster for 6.4
|
||||
- Install vitastor-client, pve-qemu-kvm, pve-storage-vitastor (* or see note) packages from Vitastor repository
|
||||
- Define storage in `/etc/pve/storage.cfg` (see below)
|
||||
- Block network access from VMs to Vitastor network (to OSDs and etcd),
|
||||
@@ -35,5 +35,5 @@ vitastor: vitastor
|
||||
vitastor_nbd 0
|
||||
```
|
||||
|
||||
\* Note: you can also manually copy [patches/VitastorPlugin.pm](patches/VitastorPlugin.pm) to Proxmox hosts
|
||||
\* Note: you can also manually copy [patches/VitastorPlugin.pm](../../patches/VitastorPlugin.pm) to Proxmox hosts
|
||||
as `/usr/share/perl5/PVE/Storage/Custom/VitastorPlugin.pm` instead of installing pve-storage-vitastor.
|
||||
|
@@ -1,15 +1,15 @@
|
||||
[Документация](../../README-ru.md#документация) → Установка → Proxmox
|
||||
[Документация](../../README-ru.md#документация) → Установка → Proxmox VE
|
||||
|
||||
-----
|
||||
|
||||
[Read in English](proxmox.en.md)
|
||||
|
||||
# Proxmox
|
||||
# Proxmox VE
|
||||
|
||||
Чтобы подключить Vitastor к Proxmox Virtual Environment (поддерживаются версии 6.4-7.4):
|
||||
Чтобы подключить Vitastor к Proxmox Virtual Environment (поддерживаются версии 6.4-8.0):
|
||||
|
||||
- Добавьте соответствующий Debian-репозиторий Vitastor в sources.list на хостах Proxmox:
|
||||
buster для 6.4, bullseye для 7.4, pve7.1 для 7.1, pve7.2 для 7.2, pve7.3 для 7.3
|
||||
bookworm для 8.0, bullseye для 7.4, pve7.3 для 7.3, pve7.2 для 7.2, pve7.1 для 7.1, buster для 6.4
|
||||
- Установите пакеты vitastor-client, pve-qemu-kvm, pve-storage-vitastor (* или см. сноску) из репозитория Vitastor
|
||||
- Определите тип хранилища в `/etc/pve/storage.cfg` (см. ниже)
|
||||
- Обязательно заблокируйте доступ от виртуальных машин к сети Vitastor (OSD и etcd), т.к. Vitastor (пока) не поддерживает аутентификацию
|
||||
@@ -35,5 +35,5 @@ vitastor: vitastor
|
||||
```
|
||||
|
||||
\* Примечание: вместо установки пакета pve-storage-vitastor вы можете вручную скопировать файл
|
||||
[patches/VitastorPlugin.pm](patches/VitastorPlugin.pm) на хосты Proxmox как
|
||||
[patches/VitastorPlugin.pm](../../patches/VitastorPlugin.pm) на хосты Proxmox как
|
||||
`/usr/share/perl5/PVE/Storage/Custom/VitastorPlugin.pm`.
|
||||
|
@@ -21,7 +21,7 @@
|
||||
|
||||
## Basic instructions
|
||||
|
||||
Download source, for example using git: `git clone --recurse-submodules https://yourcmc.ru/git/vitalif/vitastor/`
|
||||
Download source, for example using git: `git clone --recurse-submodules https://git.yourcmc.ru/vitalif/vitastor/`
|
||||
|
||||
Get `fio` source and symlink it into `<vitastor>/fio`. If you don't want to build fio engine,
|
||||
you can disable it by passing `-DWITH_FIO=no` to cmake.
|
||||
@@ -41,7 +41,7 @@ It's recommended to build the QEMU driver (qemu_driver.c) in-tree, as a part of
|
||||
QEMU build process. To do that:
|
||||
- Install vitastor client library headers (from source or from vitastor-client-dev package)
|
||||
- Take a corresponding patch from `patches/qemu-*-vitastor.patch` and apply it to QEMU source
|
||||
- Copy `src/qemu_driver.c` to QEMU source directory as `block/block-vitastor.c`
|
||||
- Copy `src/qemu_driver.c` to QEMU source directory as `block/vitastor.c`
|
||||
- Build QEMU as usual
|
||||
|
||||
But it is also possible to build it out-of-tree. To do that:
|
||||
|
@@ -21,7 +21,7 @@
|
||||
|
||||
## Базовая инструкция
|
||||
|
||||
Скачайте исходные коды, например, из git: `git clone --recurse-submodules https://yourcmc.ru/git/vitalif/vitastor/`
|
||||
Скачайте исходные коды, например, из git: `git clone --recurse-submodules https://git.yourcmc.ru/vitalif/vitastor/`
|
||||
|
||||
Скачайте исходные коды пакета `fio`, распакуйте их и создайте символическую ссылку на них
|
||||
в директории исходников Vitastor: `<vitastor>/fio`. Либо, если вы не хотите собирать плагин fio,
|
||||
@@ -41,7 +41,7 @@ cmake .. && make -j8 install
|
||||
Драйвер QEMU (qemu_driver.c) рекомендуется собирать вместе с самим QEMU. Для этого:
|
||||
- Установите заголовки клиентской библиотеки Vitastor (из исходников или из пакета vitastor-client-dev)
|
||||
- Возьмите соответствующий патч из `patches/qemu-*-vitastor.patch` и примените его к исходникам QEMU
|
||||
- Скопируйте [src/qemu_driver.c](../../src/qemu_driver.c) в директорию исходников QEMU как `block/block-vitastor.c`
|
||||
- Скопируйте [src/qemu_driver.c](../../src/qemu_driver.c) в директорию исходников QEMU как `block/vitastor.c`
|
||||
- Соберите QEMU как обычно
|
||||
|
||||
Однако в целях отладки драйвер также можно собирать отдельно от QEMU. Для этого:
|
||||
@@ -60,7 +60,7 @@ cmake .. && make -j8 install
|
||||
* Для QEMU 2.0+: `<qemu>/qapi-types.h` → `<vitastor>/qemu/b/qemu/qapi-types.h`
|
||||
- `config-host.h` и `qapi` нужны, т.к. в них содержатся автогенерируемые заголовки
|
||||
- Сконфигурируйте cmake Vitastor с `WITH_QEMU=yes` (`cmake .. -DWITH_QEMU=yes`) и, если вы
|
||||
используете RHEL-подобый дистрибутив, также с `QEMU_PLUGINDIR=qemu-kvm`.
|
||||
используете RHEL-подобный дистрибутив, также с `QEMU_PLUGINDIR=qemu-kvm`.
|
||||
- После этого в процессе сборки Vitastor также будет собираться подходящий для вашей
|
||||
версии QEMU `block-vitastor.so`.
|
||||
- Таким образом можно использовать драйвер даже с немодифицированным QEMU, но в этом случае
|
||||
|
@@ -44,7 +44,7 @@
|
||||
depends linearly on drive capacity and data store block size which is 128 KB by default.
|
||||
With 128 KB blocks metadata takes around 512 MB per 1 TB (which is still less than Ceph wants).
|
||||
Journal is also kept in memory by default, but in SSD-only clusters it's only 32 MB, and in SSD+HDD
|
||||
clusters, where it's beneficial to increase it, [inmemory_journal](docs/config/osd.en.md#inmemory_journal) can be disabled.
|
||||
clusters, where it's beneficial to increase it, [inmemory_journal](../config/osd.en.md#inmemory_journal) can be disabled.
|
||||
- Vitastor storage layer doesn't have internal copy-on-write or redirect-write. I know that maybe
|
||||
it's possible to create a good copy-on-write storage, but it's much harder and makes performance
|
||||
less deterministic, so CoW isn't used in Vitastor.
|
||||
|
@@ -156,7 +156,7 @@
|
||||
блока хранилища (block_size, по умолчанию 128 КБ). С 128 КБ блоком потребление памяти
|
||||
составляет примерно 512 МБ на 1 ТБ данных. Журналы по умолчанию тоже хранятся в памяти,
|
||||
но в SSD-кластерах нужный размер журнала составляет всего 32 МБ, а в гибридных (SSD+HDD)
|
||||
кластерах, в которых есть смысл делать журналы больше, можно отключить [inmemory_journal](../docs/config/osd.ru.md#inmemory_journal).
|
||||
кластерах, в которых есть смысл делать журналы больше, можно отключить [inmemory_journal](../config/osd.ru.md#inmemory_journal).
|
||||
- В Vitastor нет внутреннего copy-on-write. Я считаю, что реализация CoW-хранилища гораздо сложнее,
|
||||
поэтому сложнее добиться устойчиво хороших результатов. Возможно, в один прекрасный день
|
||||
я придумаю красивый алгоритм для CoW-хранилища, но пока нет — внутреннего CoW в Vitastor не будет.
|
||||
|
@@ -29,12 +29,14 @@
|
||||
- Snapshots and copy-on-write image clones
|
||||
- [Write throttling to smooth random write workloads in SSD+HDD configurations](../config/osd.en.md#throttle_small_writes)
|
||||
- [RDMA/RoCEv2 support via libibverbs](../config/network.en.md#rdma_device)
|
||||
- [Scrubbing](../config/osd.en.md#auto_scrub) (verification of copies)
|
||||
- [Checksums](../config/layout-osd.en.md#data_csum_type)
|
||||
|
||||
## Plugins and tools
|
||||
|
||||
- [Debian and CentOS packages](../installation/packages.en.md)
|
||||
- [Image management CLI (vitastor-cli)](../usage/cli.en.md)
|
||||
- [Disk management CLI (vitastor-disk)](docs/usage/disk.en.md)
|
||||
- [Disk management CLI (vitastor-disk)](../usage/disk.en.md)
|
||||
- Generic user-space client library
|
||||
- [Native QEMU driver](../usage/qemu.en.md)
|
||||
- [Loadable fio engine for benchmarks](../usage/fio.en.md)
|
||||
@@ -54,8 +56,6 @@ The following features are planned for the future:
|
||||
- iSCSI proxy
|
||||
- Multi-threaded client
|
||||
- Faster failover
|
||||
- Scrubbing without checksums (verification of replicas)
|
||||
- Checksums
|
||||
- Tiered storage (SSD caching)
|
||||
- NVDIMM support
|
||||
- Compression (possibly)
|
||||
|
@@ -13,7 +13,7 @@
|
||||
## Серверные функции
|
||||
|
||||
- Базовая часть - надёжное кластерное блочное хранилище без единой точки отказа
|
||||
- [Производительность](../comparison1.ru.md) ;-D
|
||||
- [Производительность](../performance/comparison1.ru.md) ;-D
|
||||
- [Несколько схем отказоустойчивости](../config/pool.ru.md#scheme): репликация, XOR n+1 (1 диск чётности), коды коррекции ошибок
|
||||
Рида-Соломона на основе библиотек jerasure и ISA-L с любым числом дисков данных и чётности в группе
|
||||
- Конфигурация через простые человекочитаемые JSON-структуры в etcd
|
||||
@@ -31,12 +31,14 @@
|
||||
- Снапшоты и copy-on-write клоны
|
||||
- [Сглаживание производительности случайной записи в SSD+HDD конфигурациях](../config/osd.ru.md#throttle_small_writes)
|
||||
- [Поддержка RDMA/RoCEv2 через libibverbs](../config/network.ru.md#rdma_device)
|
||||
- [Фоновая проверка целостности](../config/osd.ru.md#auto_scrub) (сверка копий)
|
||||
- [Контрольные суммы](../config/layout-osd.ru.md#data_csum_type)
|
||||
|
||||
## Драйверы и инструменты
|
||||
|
||||
- [Пакеты для Debian и CentOS](../installation/packages.ru.md)
|
||||
- [Консольный интерфейс управления образами (vitastor-cli)](../usage/cli.ru.md)
|
||||
- [Инструмент управления дисками (vitastor-disk)](docs/usage/disk.ru.md)
|
||||
- [Инструмент управления дисками (vitastor-disk)](../usage/disk.ru.md)
|
||||
- Общая пользовательская клиентская библиотека для работы с кластером
|
||||
- [Драйвер диска для QEMU](../usage/qemu.ru.md)
|
||||
- [Драйвер диска для утилиты тестирования производительности fio](../usage/fio.ru.md)
|
||||
@@ -54,8 +56,6 @@
|
||||
- iSCSI-прокси
|
||||
- Многопоточный клиент
|
||||
- Более быстрое переключение при отказах
|
||||
- Фоновая проверка целостности без контрольных сумм (сверка реплик)
|
||||
- Контрольные суммы
|
||||
- Поддержка SSD-кэширования (tiered storage)
|
||||
- Поддержка NVDIMM
|
||||
- Возможно, сжатие
|
||||
|
@@ -7,6 +7,7 @@
|
||||
# Quick Start
|
||||
|
||||
- [Preparation](#preparation)
|
||||
- [Recommended drives](#recommended-drives)
|
||||
- [Configure monitors](#configure-monitors)
|
||||
- [Configure OSDs](#configure-osds)
|
||||
- [Create a pool](#create-a-pool)
|
||||
@@ -19,10 +20,20 @@
|
||||
- Get some SATA or NVMe SSDs with capacitors (server-grade drives). You can use desktop SSDs
|
||||
with lazy fsync, but prepare for inferior single-thread latency. Read more about capacitors
|
||||
[here](../config/layout-cluster.en.md#immediate_commit).
|
||||
- If you want to use HDDs, get modern HDDs with Media Cache or SSD Cache: HGST Ultrastar,
|
||||
Toshiba MG08, Seagate EXOS or something similar. If your drives don't have such cache then
|
||||
you also need small SSDs for journal and metadata (even 2 GB per 1 TB of HDD space is enough).
|
||||
- Get a fast network (at least 10 Gbit/s). Something like Mellanox ConnectX-4 with RoCEv2 is ideal.
|
||||
- Disable CPU powersaving: `cpupower idle-set -D 0 && cpupower frequency-set -g performance`.
|
||||
- [Install Vitastor packages](../installation/packages.en.md).
|
||||
|
||||
## Recommended drives
|
||||
|
||||
- SATA SSD: Micron 5100/5200/5300/5400, Samsung PM863/PM883/PM893, Intel D3-S4510/4520/4610/4620, Kingston DC500M
|
||||
- NVMe: Micron 9100/9200/9300/9400, Micron 7300/7450, Samsung PM983/PM9A3, Samsung PM1723/1735/1743,
|
||||
Intel DC-P3700/P4500/P4600, Intel D7-P5500/P5600, Intel Optane, Kingston DC1000B/DC1500M
|
||||
- HDD: HGST Ultrastar, Toshiba MG06/MG07/MG08, Seagate EXOS
|
||||
|
||||
## Configure monitors
|
||||
|
||||
On the monitor hosts:
|
||||
@@ -45,9 +56,10 @@ On the monitor hosts:
|
||||
}
|
||||
```
|
||||
- Initialize OSDs:
|
||||
- SSD-only: `vitastor-disk prepare /dev/sdXXX [/dev/sdYYY ...]`. You can add
|
||||
`--disable_data_fsync off` to leave disk cache enabled if you use desktop
|
||||
SSDs without capacitors.
|
||||
- SSD-only or HDD-only: `vitastor-disk prepare /dev/sdXXX [/dev/sdYYY ...]`.
|
||||
Add `--disable_data_fsync off` to leave disk write cache enabled if you use
|
||||
desktop SSDs without capacitors. Do NOT add `--disable_data_fsync off` if you
|
||||
use HDDs or SSD+HDD.
|
||||
- Hybrid, SSD+HDD: `vitastor-disk prepare --hybrid /dev/sdXXX [/dev/sdYYY ...]`.
|
||||
Pass all your devices (HDD and SSD) to this script — it will partition disks and initialize journals on its own.
|
||||
This script skips HDDs which are already partitioned so if you want to use non-empty disks for
|
||||
|
@@ -7,6 +7,7 @@
|
||||
# Быстрый старт
|
||||
|
||||
- [Подготовка](#подготовка)
|
||||
- [Рекомендуемые диски](#рекомендуемые-диски)
|
||||
- [Настройте мониторы](#настройте-мониторы)
|
||||
- [Настройте OSD](#настройте-osd)
|
||||
- [Создайте пул](#создайте-пул)
|
||||
@@ -19,10 +20,20 @@
|
||||
- Возьмите серверы с SSD (SATA или NVMe), желательно с конденсаторами (серверные SSD). Можно
|
||||
использовать и десктопные SSD, включив режим отложенного fsync, но производительность будет хуже.
|
||||
О конденсаторах читайте [здесь](../config/layout-cluster.ru.md#immediate_commit).
|
||||
- Если хотите использовать HDD, берите современные модели с Media или SSD кэшем - HGST Ultrastar,
|
||||
Toshiba MG08, Seagate EXOS или что-то похожее. Если такого кэша у ваших дисков нет,
|
||||
обязательно возьмите SSD под метаданные и журнал (маленькие, буквально 2 ГБ на 1 ТБ HDD-места).
|
||||
- Возьмите быструю сеть, минимум 10 гбит/с. Идеал - что-то вроде Mellanox ConnectX-4 с RoCEv2.
|
||||
- Для лучшей производительности отключите энергосбережение CPU: `cpupower idle-set -D 0 && cpupower frequency-set -g performance`.
|
||||
- [Установите пакеты Vitastor](../installation/packages.ru.md).
|
||||
|
||||
## Рекомендуемые диски
|
||||
|
||||
- SATA SSD: Micron 5100/5200/5300/5400, Samsung PM863/PM883/PM893, Intel D3-S4510/4520/4610/4620, Kingston DC500M
|
||||
- NVMe: Micron 9100/9200/9300/9400, Micron 7300/7450, Samsung PM983/PM9A3, Samsung PM1723/1735/1743,
|
||||
Intel DC-P3700/P4500/P4600, Intel D7-P5500/P5600, Intel Optane, Kingston DC1000B/DC1500M
|
||||
- HDD: HGST Ultrastar, Toshiba MG06/MG07/MG08, Seagate EXOS
|
||||
|
||||
## Настройте мониторы
|
||||
|
||||
На хостах, выделенных под мониторы:
|
||||
@@ -45,9 +56,10 @@
|
||||
}
|
||||
```
|
||||
- Инициализуйте OSD:
|
||||
- SSD: `vitastor-disk prepare /dev/sdXXX [/dev/sdYYY ...]`. Если вы используете
|
||||
десктопные SSD без конденсаторов, можете оставить кэш включённым, добавив
|
||||
опцию `--disable_data_fsync off`.
|
||||
- Только SSD или только HDD: `vitastor-disk prepare /dev/sdXXX [/dev/sdYYY ...]`.
|
||||
Если вы используете десктопные SSD без конденсаторов, добавьте опцию `--disable_data_fsync off`,
|
||||
чтобы оставить кэш записи диска включённым. НЕ добавляйте эту опцию, если используете
|
||||
жёсткие диски (HDD).
|
||||
- Гибридные, SSD+HDD: `vitastor-disk prepare --hybrid /dev/sdXXX [/dev/sdYYY ...]`.
|
||||
Передайте все ваши SSD и HDD скрипту в командной строке подряд, скрипт автоматически выделит
|
||||
разделы под журналы на SSD и данные на HDD. Скрипт пропускает HDD, на которых уже есть разделы
|
||||
|
@@ -20,6 +20,8 @@ It supports the following commands:
|
||||
- [flatten](#flatten)
|
||||
- [rm-data](#rm-data)
|
||||
- [merge-data](#merge-data)
|
||||
- [describe](#describe)
|
||||
- [fix](#fix)
|
||||
- [alloc-osd](#alloc-osd)
|
||||
- [rm-osd](#rm-osd)
|
||||
|
||||
@@ -174,6 +176,51 @@ Merge layer data without changing metadata. Merge `<from>`..`<to>` to `<target>`
|
||||
`<to>` must be a child of `<from>` and `<target>` may be one of the layers between
|
||||
`<from>` and `<to>`, including `<from>` and `<to>`.
|
||||
|
||||
## describe
|
||||
|
||||
`vitastor-cli describe [--osds <osds>] [--object-state <states>] [--pool <pool>]
|
||||
[--inode <ino>] [--min-inode <ino>] [--max-inode <ino>]
|
||||
[--min-offset <offset>] [--max-offset <offset>]`
|
||||
|
||||
Describe unclean object locations in the cluster.
|
||||
|
||||
```
|
||||
--osds <osds>
|
||||
Only list objects from primary OSD(s) <osds>.
|
||||
--object-state <states>
|
||||
Only list objects in given state(s). State(s) may include:
|
||||
degraded, misplaced, incomplete, corrupted, inconsistent.
|
||||
--pool <pool name or number>
|
||||
Only list objects in the given pool.
|
||||
--inode, --min-inode, --max-inode
|
||||
Restrict listing to specific inode numbers.
|
||||
--min-offset, --max-offset
|
||||
Restrict listing to specific offsets inside inodes.
|
||||
```
|
||||
|
||||
## fix
|
||||
|
||||
`vitastor-cli fix [--objects <objects>] [--bad-osds <osds>] [--part <part>] [--check no]`
|
||||
|
||||
Fix inconsistent objects in the cluster by deleting some copies.
|
||||
|
||||
```
|
||||
--objects <objects>
|
||||
Objects to fix, either in plain text or JSON format. If not specified,
|
||||
object list will be read from STDIN in one of the same formats.
|
||||
Plain text format: 0x<inode>:0x<stripe> <any delimiter> 0x<inode>:0x<stripe> ...
|
||||
JSON format: [{"inode":"0x...","stripe":"0x..."},...]
|
||||
--bad-osds <osds>
|
||||
Remove inconsistent copies/parts of objects from these OSDs, effectively
|
||||
marking them bad and allowing Vitastor to recover objects from other copies.
|
||||
--part <number>
|
||||
Only remove EC part <number> (from 0 to pg_size-1), required for extreme
|
||||
edge cases where one OSD has multiple parts of a EC object.
|
||||
--check no
|
||||
Do not recheck that requested objects are actually inconsistent,
|
||||
delete requested copies/parts anyway.
|
||||
```
|
||||
|
||||
## alloc-osd
|
||||
|
||||
`vitastor-cli alloc-osd`
|
||||
|
@@ -184,6 +184,59 @@ vitastor-cli snap-create [-p|--pool <id|name>] <image>@<snapshot>
|
||||
в целевой образ `<target>`. `<to>` должен быть дочерним образом `<from>`, а `<target>`
|
||||
должен быть одним из слоёв между `<from>` и `<to>`, включая сами `<from>` и `<to>`.
|
||||
|
||||
## describe
|
||||
|
||||
`vitastor-cli describe [--osds <osds>] [--object-state <состояния>] [--pool <пул>]
|
||||
[--inode <номер>] [--min-inode <номер>] [--max-inode <номер>]
|
||||
[--min-offset <смещение>] [--max-offset <смещение>]`
|
||||
|
||||
Описать состояние "грязных" объектов в кластере, то есть таких объектов, копии
|
||||
или части которых хранятся на наборе OSD, не равном целевому.
|
||||
|
||||
```
|
||||
--osds <osds>
|
||||
Перечислять только объекты с первичных OSD из списка <osds>.
|
||||
--object-state <состояния>
|
||||
Перечислять только объекты в указанных состояниях. Возможные состояния
|
||||
объектов:
|
||||
- degraded - деградированная избыточность
|
||||
- misplaced - перемещённый
|
||||
- incomplete - нечитаемый из-за потери большего числа частей, чем допустимо
|
||||
- corrupted - с одной или более повреждённой частью
|
||||
- inconsistent - неконсистентный, с неоднозначным расхождением копий/частей
|
||||
--pool <имя или ID пула>
|
||||
Перечислять только объекты из заданного пула.
|
||||
--inode, --min-inode, --max-inode
|
||||
Перечислять только объекты из указанных номеров инодов (образов).
|
||||
--min-offset, --max-offset
|
||||
Перечислять только объекты с заданных смещений внутри образов.
|
||||
```
|
||||
|
||||
## fix
|
||||
|
||||
`vitastor-cli fix [--objects <объекты>] [--bad-osds <osds>] [--part <номер>] [--check no]`
|
||||
|
||||
Исправить неконсистентные (неоднозначные) объекты путём удаления части копий.
|
||||
|
||||
```
|
||||
--objects <объекты>
|
||||
Объекты для исправления - в простом текстовом или JSON формате. Если опция
|
||||
не указана, список объектов читается со стандартного ввода в тех же форматах.
|
||||
Простой формат: 0x<инод>:0x<смещение> <любой разделитель> 0x<инод>:0x<смещение> ...
|
||||
Формат JSON: [{"inode":"0x<инод>","stripe":"0x<смещение>"},...]
|
||||
--bad-osds <osds>
|
||||
Удалить неконсистентные копии/части объектов с данных OSD, таким образом
|
||||
признавая потерю этих копий и позволяя Vitastor-у восстановить объекты из
|
||||
других копий.
|
||||
--part <номер>
|
||||
Удалить только части EC с заданным номером (от 0 до pg_size-1). Нужно только
|
||||
в редких граничных случаях, когда один и тот же OSD содержит несколько частей
|
||||
одного EC-объекта.
|
||||
--check no
|
||||
Не перепроверять, что заданные объекты действительно в неконсистентном
|
||||
состоянии и просто удалять заданные части.
|
||||
```
|
||||
|
||||
## alloc-osd
|
||||
|
||||
`vitastor-cli alloc-osd`
|
||||
|
@@ -86,6 +86,8 @@ Options (both modes):
|
||||
--journal_size 1G/32M Set journal size (area or partition size)
|
||||
--block_size 1M/128k Set blockstore object size
|
||||
--bitmap_granularity 4k Set bitmap granularity
|
||||
--data_csum_type none Set data checksum type (crc32c or none)
|
||||
--csum_block_size 4k Set data checksum block size
|
||||
--data_device_block 4k Override data device block size
|
||||
--meta_device_block 4k Override metadata device block size
|
||||
--journal_device_block 4k Override journal device block size
|
||||
@@ -100,8 +102,9 @@ checks the device cache status on start and tries to disable cache for SATA/SAS
|
||||
If it doesn't succeed it issues a warning in the system log.
|
||||
|
||||
You can also pass other OSD options here as arguments and they'll be persisted
|
||||
to the superblock: max_write_iodepth, max_write_iodepth, min_flusher_count,
|
||||
max_flusher_count, inmemory_metadata, inmemory_journal, journal_sector_buffer_count,
|
||||
in the superblock: cached_io_data, cached_io_meta, cached_io_journal,
|
||||
inmemory_metadata, inmemory_journal, max_write_iodepth,
|
||||
min_flusher_count, max_flusher_count, journal_sector_buffer_count,
|
||||
journal_no_same_sector_overwrites, throttle_small_writes, throttle_target_iops,
|
||||
throttle_target_mbs, throttle_target_parallelism, throttle_threshold_us.
|
||||
See [Runtime OSD Parameters](../config/osd.en.md) for details.
|
||||
@@ -249,7 +252,9 @@ Options (see also [Cluster-Wide Disk Layout Parameters](../config/layout-cluster
|
||||
```
|
||||
--object_size 128k Set blockstore block size
|
||||
--bitmap_granularity 4k Set bitmap granularity
|
||||
--journal_size 32M Set journal size
|
||||
--journal_size 16M Set journal size
|
||||
--data_csum_type none Set data checksum type (crc32c or none)
|
||||
--csum_block_size 4k Set data checksum block size
|
||||
--device_block_size 4k Set device block size
|
||||
--journal_offset 0 Set journal offset
|
||||
--device_size 0 Set device size
|
||||
|
@@ -87,6 +87,8 @@ vitastor-disk - инструмент командной строки для уп
|
||||
--journal_size 1G/32M Задать размер журнала (области или раздела журнала)
|
||||
--block_size 1M/128k Задать размер объекта хранилища
|
||||
--bitmap_granularity 4k Задать гранулярность битовых карт
|
||||
--data_csum_type none Задать тип контрольных сумм (crc32c или none)
|
||||
--csum_block_size 4k Задать размер блока расчёта контрольных сумм
|
||||
--data_device_block 4k Задать размер блока устройства данных
|
||||
--meta_device_block 4k Задать размер блока метаданных
|
||||
--journal_device_block 4k Задать размер блока журнала
|
||||
@@ -101,8 +103,9 @@ vitastor-disk - инструмент командной строки для уп
|
||||
это не удаётся, в системный журнал выводится предупреждение.
|
||||
|
||||
Вы можете передать данной команде и некоторые другие опции OSD в качестве аргументов
|
||||
и они тоже будут сохранены в суперблок: max_write_iodepth, max_write_iodepth, min_flusher_count,
|
||||
max_flusher_count, inmemory_metadata, inmemory_journal, journal_sector_buffer_count,
|
||||
и они тоже будут сохранены в суперблок: cached_io_data, cached_io_meta,
|
||||
cached_io_journal, inmemory_metadata, inmemory_journal, max_write_iodepth,
|
||||
min_flusher_count, max_flusher_count, journal_sector_buffer_count,
|
||||
journal_no_same_sector_overwrites, throttle_small_writes, throttle_target_iops,
|
||||
throttle_target_mbs, throttle_target_parallelism, throttle_threshold_us.
|
||||
Читайте об этих параметрах подробнее в разделе [Изменяемые параметры OSD](../config/osd.ru.md).
|
||||
@@ -254,7 +257,9 @@ OSD отключены fsync-и.
|
||||
```
|
||||
--object_size 128k Размер блока хранилища
|
||||
--bitmap_granularity 4k Гранулярность битовых карт
|
||||
--journal_size 32M Размер журнала
|
||||
--journal_size 16M Размер журнала
|
||||
--data_csum_type none Задать тип контрольных сумм (crc32c или none)
|
||||
--csum_block_size 4k Задать размер блока расчёта контрольных сумм
|
||||
--device_block_size 4k Размер блока устройства
|
||||
--journal_offset 0 Смещение журнала
|
||||
--device_size 0 Размер устройства
|
||||
|
@@ -13,6 +13,8 @@ remains decent (see an example [here](../performance/comparison1.en.md#vitastor-
|
||||
|
||||
Vitastor Kubernetes CSI driver is based on NBD.
|
||||
|
||||
See also [VDUSE](qemu.en.md#vduse).
|
||||
|
||||
## Map image
|
||||
|
||||
To create a local block device for a Vitastor image run:
|
||||
@@ -25,6 +27,23 @@ It will output a block device name like /dev/nbd0 which you can then use as a no
|
||||
|
||||
You can also use `--pool <POOL> --inode <INODE> --size <SIZE>` instead of `--image <IMAGE>` if you want.
|
||||
|
||||
Additional options for map command:
|
||||
|
||||
* `--nbd_timeout 30` \
|
||||
Timeout for I/O operations in seconds after exceeding which the kernel stops
|
||||
the device. You can set it to 0 to disable the timeout, but beware that you
|
||||
won't be able to stop the device at all if vitastor-nbd process dies.
|
||||
* `--nbd_max_devices 64 --nbd_max_part 3` \
|
||||
Options for the `nbd` kernel module when modprobing it (`nbds_max` and `max_part`).
|
||||
note that maximum allowed (nbds_max)*(1+max_part) is 256.
|
||||
* `--logfile /path/to/log/file.txt` \
|
||||
Write log messages to the specified file instead of dropping them (in background mode)
|
||||
or printing them to the standard output (in foreground mode).
|
||||
* `--dev_num N` \
|
||||
Use the specified device /dev/nbdN instead of automatic selection.
|
||||
* `--foreground 1` \
|
||||
Stay in foreground, do not daemonize.
|
||||
|
||||
## Unmap image
|
||||
|
||||
To unmap the device run:
|
||||
@@ -32,3 +51,27 @@ To unmap the device run:
|
||||
```
|
||||
vitastor-nbd unmap /dev/nbd0
|
||||
```
|
||||
|
||||
## List mapped images
|
||||
|
||||
```
|
||||
vitastor-nbd ls [--json]
|
||||
```
|
||||
|
||||
Example output (normal format):
|
||||
|
||||
```
|
||||
/dev/nbd0
|
||||
image: bench
|
||||
pid: 584536
|
||||
|
||||
/dev/nbd1
|
||||
image: bench1
|
||||
pid: 584546
|
||||
```
|
||||
|
||||
Example output (JSON format):
|
||||
|
||||
```
|
||||
{"/dev/nbd0": {"image": "bench", "pid": 584536}, "/dev/nbd1": {"image": "bench1", "pid": 584546}}
|
||||
```
|
||||
|
@@ -16,6 +16,8 @@ NBD немного снижает производительность из-за
|
||||
|
||||
CSI-драйвер Kubernetes Vitastor основан на NBD.
|
||||
|
||||
Смотрите также [VDUSE](qemu.ru.md#vduse).
|
||||
|
||||
## Подключить устройство
|
||||
|
||||
Чтобы создать локальное блочное устройство для образа, выполните команду:
|
||||
@@ -30,6 +32,27 @@ vitastor-nbd map --etcd_address 10.115.0.10:2379/v3 --image testimg
|
||||
Для обращения по номеру инода, аналогично другим командам, можно использовать опции
|
||||
`--pool <POOL> --inode <INODE> --size <SIZE>` вместо `--image testimg`.
|
||||
|
||||
Дополнительные опции для команды подключения NBD-устройства:
|
||||
|
||||
* `--nbd_timeout 30` \
|
||||
Максимальное время выполнения любой операции чтения/записи в секундах, при
|
||||
превышении которого ядро остановит NBD-устройство. Вы можете установить опцию
|
||||
в 0, чтобы отключить ограничение времени, но имейте в виду, что в этом случае
|
||||
вы вообще не сможете отключить NBD-устройство при нештатном завершении процесса
|
||||
vitastor-nbd.
|
||||
* `--nbd_max_devices 64 --nbd_max_part 3` \
|
||||
Опции, передаваемые модулю ядра nbd, если его загружает vitastor-nbd
|
||||
(`nbds_max` и `max_part`). Имейте в виду, что (nbds_max)*(1+max_part)
|
||||
обычно не должно превышать 256.
|
||||
* `--logfile /path/to/log/file.txt` \
|
||||
Писать сообщения о процессе работы в заданный файл, вместо пропуска их
|
||||
при фоновом режиме запуска или печати на стандартный вывод при запуске
|
||||
в консоли с `--foreground 1`.
|
||||
* `--dev_num N` \
|
||||
Использовать заданное устройство `/dev/nbdN` вместо автоматического подбора.
|
||||
* `--foreground 1` \
|
||||
Не уводить процесс в фоновый режим.
|
||||
|
||||
## Отключить устройство
|
||||
|
||||
Для отключения устройства выполните:
|
||||
@@ -37,3 +60,27 @@ vitastor-nbd map --etcd_address 10.115.0.10:2379/v3 --image testimg
|
||||
```
|
||||
vitastor-nbd unmap /dev/nbd0
|
||||
```
|
||||
|
||||
## Вывести подключённые устройства
|
||||
|
||||
```
|
||||
vitastor-nbd ls [--json]
|
||||
```
|
||||
|
||||
Пример вывода в обычном формате:
|
||||
|
||||
```
|
||||
/dev/nbd0
|
||||
image: bench
|
||||
pid: 584536
|
||||
|
||||
/dev/nbd1
|
||||
image: bench1
|
||||
pid: 584546
|
||||
```
|
||||
|
||||
Пример вывода в JSON-формате:
|
||||
|
||||
```
|
||||
{"/dev/nbd0": {"image": "bench", "pid": 584536}, "/dev/nbd1": {"image": "bench1", "pid": 584546}}
|
||||
```
|
||||
|
@@ -29,7 +29,7 @@ vitastor-nfs [--etcd_address ADDR] [ДРУГИЕ ОПЦИИ]
|
||||
--bind <IP> принимать соединения по адресу <IP> (по умолчанию 0.0.0.0 - на всех)
|
||||
--nfspath <PATH> установить путь NFS-экспорта в <PATH> (по умолчанию /)
|
||||
--port <PORT> использовать порт <PORT> для NFS-сервисов (по умолчанию 2049)
|
||||
--pool <POOL> исползовать пул <POOL> для новых образов (обязательно, если пул в кластере не один)
|
||||
--pool <POOL> использовать пул <POOL> для новых образов (обязательно, если пул в кластере не один)
|
||||
--foreground 1 не уходить в фон после запуска
|
||||
```
|
||||
|
||||
|
@@ -83,3 +83,44 @@ qemu-img rebase -u -b '' testimg.qcow2
|
||||
This can be used for backups. Just note that exporting an image that is currently being written to
|
||||
is of course unsafe and doesn't produce a consistent result, so only export snapshots if you do this
|
||||
on a live VM.
|
||||
|
||||
## VDUSE
|
||||
|
||||
Linux kernel, starting with version 5.15, supports a new interface for attaching virtual disks
|
||||
to the host - VDUSE (vDPA Device in Userspace). QEMU, starting with 7.2, has support for
|
||||
exporting QEMU block devices over this protocol using qemu-storage-daemon.
|
||||
|
||||
VDUSE has the same problem as other FUSE-like interfaces in Linux: if a userspace process hangs,
|
||||
for example, if it loses connectivity with Vitastor cluster - active processes doing I/O may
|
||||
hang in the D state (uninterruptible sleep) and you won't be able to kill them even with kill -9.
|
||||
In this case reboot will be the only way to remove VDUSE devices from system.
|
||||
|
||||
On the other hand, VDUSE is faster than [NBD](nbd.en.md), so you may prefer to use it if
|
||||
performance is important for you. Approximate performance numbers:
|
||||
direct fio benchmark - 115000 iops, NBD - 60000 iops, VDUSE - 90000 iops.
|
||||
|
||||
To try VDUSE you need at least Linux 5.15, built with VDUSE support
|
||||
(CONFIG_VIRTIO_VDPA=m and CONFIG_VDPA_USER=m). Debian Linux kernels have these options
|
||||
disabled by now, so if you want to try it on Debian, use a kernel from Ubuntu
|
||||
[kernel-ppa/mainline](https://kernel.ubuntu.com/~kernel-ppa/mainline/) or Proxmox.
|
||||
|
||||
Commands to attach Vitastor image as a VDUSE device:
|
||||
|
||||
```
|
||||
modprobe vduse
|
||||
modprobe virtio-vdpa
|
||||
qemu-storage-daemon --daemonize --blockdev '{"node-name":"test1","driver":"vitastor",\
|
||||
"etcd-host":"192.168.7.2:2379/v3","image":"testosd1","cache":{"direct":true,"no-flush":false},"discard":"unmap"}' \
|
||||
--export vduse-blk,id=test1,node-name=test1,name=test1,num-queues=16,queue-size=128,writable=true
|
||||
vdpa dev add name test1 mgmtdev vduse
|
||||
```
|
||||
|
||||
After running these commands /dev/vda device will appear in the system and you'll be able to
|
||||
use it as a normal disk.
|
||||
|
||||
To remove the device:
|
||||
|
||||
```
|
||||
vdpa dev del test1
|
||||
kill <qemu-storage-daemon_process_PID>
|
||||
```
|
||||
|
@@ -87,3 +87,44 @@ qemu-img rebase -u -b '' testimg.qcow2
|
||||
Это можно использовать для резервного копирования. Только помните, что экспортировать образ, в который
|
||||
в то же время идёт запись, небезопасно - результат чтения не будет целостным. Так что если вы работаете
|
||||
с активными виртуальными машинами, экспортируйте только их снимки, но не сам образ.
|
||||
|
||||
## VDUSE
|
||||
|
||||
В Linux, начиная с версии ядра 5.15, доступен новый интерфейс для подключения виртуальных дисков
|
||||
к системе - VDUSE (vDPA Device in Userspace), а в QEMU, начиная с версии 7.2, есть поддержка
|
||||
экспорта блочных устройств QEMU по этому протоколу через qemu-storage-daemon.
|
||||
|
||||
VDUSE страдает общей проблемой FUSE-подобных интерфейсов в Linux: если пользовательский процесс
|
||||
подвиснет, например, если будет потеряна связь с кластером Vitastor - читающие/пишущие в кластер
|
||||
процессы могут "залипнуть" в состоянии D (непрерываемый сон) и их будет невозможно убить даже
|
||||
через kill -9. В этом случае удалить из системы устройство можно только перезагрузившись.
|
||||
|
||||
С другой стороны, VDUSE быстрее по сравнению с [NBD](nbd.ru.md), поэтому его может
|
||||
быть предпочтительно использовать там, где производительность важнее. Порядок показателей:
|
||||
прямое тестирование через fio - 115000 iops, NBD - 60000 iops, VDUSE - 90000 iops.
|
||||
|
||||
Чтобы использовать VDUSE, вам нужно ядро Linux версии хотя бы 5.15, собранное с поддержкой
|
||||
VDUSE (CONFIG_VIRTIO_VDPA=m и CONFIG_VDPA_USER=m). В ядрах в Debian Linux поддержка пока
|
||||
отключена - если хотите попробовать эту функцию на Debian, поставьте ядро из Ubuntu
|
||||
[kernel-ppa/mainline](https://kernel.ubuntu.com/~kernel-ppa/mainline/) или из Proxmox.
|
||||
|
||||
Команды для подключения виртуального диска через VDUSE:
|
||||
|
||||
```
|
||||
modprobe vduse
|
||||
modprobe virtio-vdpa
|
||||
qemu-storage-daemon --daemonize --blockdev '{"node-name":"test1","driver":"vitastor",\
|
||||
"etcd-host":"192.168.7.2:2379/v3","image":"testosd1","cache":{"direct":true,"no-flush":false},"discard":"unmap"}' \
|
||||
--export vduse-blk,id=test1,node-name=test1,name=test1,num-queues=16,queue-size=128,writable=true
|
||||
vdpa dev add name test1 mgmtdev vduse
|
||||
```
|
||||
|
||||
После этого в системе появится устройство /dev/vda, которое можно будет использовать как
|
||||
обычный диск.
|
||||
|
||||
Для удаления устройства из системы:
|
||||
|
||||
```
|
||||
vdpa dev del test1
|
||||
kill <PID_процесса_qemu-storage-daemon>
|
||||
```
|
||||
|
@@ -63,8 +63,9 @@ Wants=network-online.target local-fs.target time-sync.target
|
||||
|
||||
[Service]
|
||||
Restart=always
|
||||
ExecStart=/usr/local/bin/etcd -name etcd${num} --data-dir /var/lib/etcd${num}.etcd \\
|
||||
--advertise-client-urls http://${etcds[num]}:2379 --listen-client-urls http://${etcds[num]}:2379 \\
|
||||
Environment=GOGC=50
|
||||
ExecStart=etcd -name etcd${num} --data-dir /var/lib/etcd${num}.etcd \\
|
||||
--snapshot-count 10000 --advertise-client-urls http://${etcds[num]}:2379 --listen-client-urls http://${etcds[num]}:2379 \\
|
||||
--initial-advertise-peer-urls http://${etcds[num]}:2380 --listen-peer-urls http://${etcds[num]}:2380 \\
|
||||
--initial-cluster-token vitastor-etcd-1 --initial-cluster ${etcd_cluster} \\
|
||||
--initial-cluster-state new --max-txn-ops=100000 --max-request-bytes=104857600 \\
|
||||
|
185
mon/mon.js
185
mon/mon.js
@@ -104,12 +104,21 @@ const etcd_tree = {
|
||||
autosync_writes: 128,
|
||||
client_queue_depth: 128, // unused
|
||||
recovery_queue_depth: 4,
|
||||
recovery_pg_switch: 128,
|
||||
recovery_sync_batch: 16,
|
||||
no_recovery: false,
|
||||
no_rebalance: false,
|
||||
print_stats_interval: 3,
|
||||
slow_log_interval: 10,
|
||||
inode_vanish_time: 60,
|
||||
auto_scrub: false,
|
||||
no_scrub: false,
|
||||
scrub_interval: '30d', // 1s/1m/1h/1d
|
||||
scrub_queue_depth: 1,
|
||||
scrub_sleep: 0, // milliseconds
|
||||
scrub_list_limit: 1000, // objects to list on one scrub iteration
|
||||
scrub_find_best: true,
|
||||
scrub_ec_max_bruteforce: 100, // maximum EC error locator brute-force iterators
|
||||
// blockstore - fixed in superblock
|
||||
block_size,
|
||||
disk_alignment,
|
||||
@@ -172,6 +181,8 @@ const etcd_tree = {
|
||||
osd_tags?: 'nvme' | [ 'nvme', ... ],
|
||||
// prefer to put primary on OSD with these tags
|
||||
primary_affinity_tags?: 'nvme' | [ 'nvme', ... ],
|
||||
// scrub interval
|
||||
scrub_interval?: '30d',
|
||||
},
|
||||
...
|
||||
}, */
|
||||
@@ -267,7 +278,7 @@ const etcd_tree = {
|
||||
primary: osd_num_t,
|
||||
state: ("starting"|"peering"|"incomplete"|"active"|"repeering"|"stopping"|"offline"|
|
||||
"degraded"|"has_incomplete"|"has_degraded"|"has_misplaced"|"has_unclean"|
|
||||
"has_invalid"|"left_on_dead")[],
|
||||
"has_invalid"|"has_inconsistent"|"has_corrupted"|"left_on_dead"|"scrubbing")[],
|
||||
}
|
||||
}, */
|
||||
},
|
||||
@@ -289,6 +300,7 @@ const etcd_tree = {
|
||||
osd_sets: osd_num_t[][],
|
||||
all_peers: osd_num_t[],
|
||||
epoch: uint64_t,
|
||||
next_scrub: uint64_t,
|
||||
},
|
||||
}, */
|
||||
},
|
||||
@@ -379,6 +391,7 @@ class Mon
|
||||
this.etcd_start_timeout = (config.etcd_start_timeout || 5) * 1000;
|
||||
this.state = JSON.parse(JSON.stringify(this.constructor.etcd_tree));
|
||||
this.signals_set = false;
|
||||
this.stat_time = Date.now();
|
||||
this.ws = null;
|
||||
this.ws_alive = false;
|
||||
this.ws_keepalive_timer = null;
|
||||
@@ -526,10 +539,18 @@ class Mon
|
||||
{
|
||||
retries = 1;
|
||||
}
|
||||
const tried = {};
|
||||
while (retries < 0 || retry < retries)
|
||||
{
|
||||
const cur_addr = this.pick_next_etcd();
|
||||
const base = 'ws'+cur_addr.substr(4);
|
||||
let now = Date.now();
|
||||
if (tried[base] && now-tried[base] < timeout)
|
||||
{
|
||||
await new Promise(ok => setTimeout(ok, timeout-(now-tried[base])));
|
||||
now = Date.now();
|
||||
}
|
||||
tried[base] = now;
|
||||
const ok = await new Promise((ok, no) =>
|
||||
{
|
||||
const timer_id = setTimeout(() =>
|
||||
@@ -1398,65 +1419,75 @@ class Mon
|
||||
}
|
||||
}
|
||||
|
||||
derive_osd_stats(st, prev)
|
||||
{
|
||||
const zero_stats = { op: { bps: 0n, iops: 0n, lat: 0n }, subop: { iops: 0n, lat: 0n }, recovery: { bps: 0n, iops: 0n } };
|
||||
const diff = { op_stats: {}, subop_stats: {}, recovery_stats: {} };
|
||||
if (!st || !st.time || prev && (prev.time || this.stat_time/1000) >= st.time)
|
||||
{
|
||||
return diff;
|
||||
}
|
||||
const timediff = BigInt(st.time*1000 - (prev && prev.time*1000 || this.stat_time));
|
||||
for (const op in st.op_stats||{})
|
||||
{
|
||||
const pr = prev && prev.op_stats && prev.op_stats[op];
|
||||
let c = st.op_stats[op];
|
||||
c = { bytes: BigInt(c.bytes||0), usec: BigInt(c.usec||0), count: BigInt(c.count||0) };
|
||||
const b = c.bytes - BigInt(pr && pr.bytes||0);
|
||||
const us = c.usec - BigInt(pr && pr.usec||0);
|
||||
const n = c.count - BigInt(pr && pr.count||0);
|
||||
if (n > 0)
|
||||
diff.op_stats[op] = { ...c, bps: b*1000n/timediff, iops: n*1000n/timediff, lat: us/n };
|
||||
}
|
||||
for (const op in st.subop_stats||{})
|
||||
{
|
||||
const pr = prev && prev.subop_stats && prev.subop_stats[op];
|
||||
let c = st.subop_stats[op];
|
||||
c = { usec: BigInt(c.usec||0), count: BigInt(c.count||0) };
|
||||
const us = c.usec - BigInt(pr && pr.usec||0);
|
||||
const n = c.count - BigInt(pr && pr.count||0);
|
||||
if (n > 0)
|
||||
diff.subop_stats[op] = { ...c, iops: n*1000n/timediff, lat: us/n };
|
||||
}
|
||||
for (const op in st.recovery_stats||{})
|
||||
{
|
||||
const pr = prev && prev.recovery_stats && prev.recovery_stats[op];
|
||||
let c = st.recovery_stats[op];
|
||||
c = { bytes: BigInt(c.bytes||0), count: BigInt(c.count||0) };
|
||||
const b = c.bytes - BigInt(pr && pr.bytes||0);
|
||||
const n = c.count - BigInt(pr && pr.count||0);
|
||||
if (n > 0)
|
||||
diff.recovery_stats[op] = { ...c, bps: b*1000n/timediff, iops: n*1000n/timediff };
|
||||
}
|
||||
return diff;
|
||||
}
|
||||
|
||||
sum_op_stats(timestamp, prev_stats)
|
||||
{
|
||||
const op_stats = {}, subop_stats = {}, recovery_stats = {};
|
||||
const sum_diff = { op_stats: {}, subop_stats: {}, recovery_stats: {} };
|
||||
if (!prev_stats || prev_stats.timestamp >= timestamp)
|
||||
{
|
||||
return sum_diff;
|
||||
}
|
||||
const tm = BigInt(timestamp - (prev_stats.timestamp || 0));
|
||||
// Sum derived values instead of deriving summed
|
||||
for (const osd in this.state.osd.stats)
|
||||
{
|
||||
const st = this.state.osd.stats[osd]||{};
|
||||
for (const op in st.op_stats||{})
|
||||
const derived = this.derive_osd_stats(this.state.osd.stats[osd],
|
||||
this.prev_stats && this.prev_stats.osd_stats && this.prev_stats.osd_stats[osd]);
|
||||
for (const type in derived)
|
||||
{
|
||||
op_stats[op] = op_stats[op] || { count: 0n, usec: 0n, bytes: 0n };
|
||||
op_stats[op].count += BigInt(st.op_stats[op].count||0);
|
||||
op_stats[op].usec += BigInt(st.op_stats[op].usec||0);
|
||||
op_stats[op].bytes += BigInt(st.op_stats[op].bytes||0);
|
||||
}
|
||||
for (const op in st.subop_stats||{})
|
||||
{
|
||||
subop_stats[op] = subop_stats[op] || { count: 0n, usec: 0n };
|
||||
subop_stats[op].count += BigInt(st.subop_stats[op].count||0);
|
||||
subop_stats[op].usec += BigInt(st.subop_stats[op].usec||0);
|
||||
}
|
||||
for (const op in st.recovery_stats||{})
|
||||
{
|
||||
recovery_stats[op] = recovery_stats[op] || { count: 0n, bytes: 0n };
|
||||
recovery_stats[op].count += BigInt(st.recovery_stats[op].count||0);
|
||||
recovery_stats[op].bytes += BigInt(st.recovery_stats[op].bytes||0);
|
||||
for (const op in derived[type])
|
||||
{
|
||||
for (const k in derived[type][op])
|
||||
{
|
||||
sum_diff[type][op] = sum_diff[type][op] || {};
|
||||
sum_diff[type][op][k] = (sum_diff[type][op][k] || 0n) + derived[type][op][k];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (prev_stats && prev_stats.timestamp >= timestamp)
|
||||
{
|
||||
prev_stats = null;
|
||||
}
|
||||
const tm = prev_stats ? BigInt(timestamp - prev_stats.timestamp) : 0;
|
||||
for (const op in op_stats)
|
||||
{
|
||||
if (prev_stats && prev_stats.op_stats && prev_stats.op_stats[op])
|
||||
{
|
||||
op_stats[op].bps = (op_stats[op].bytes - prev_stats.op_stats[op].bytes) * 1000n / tm;
|
||||
op_stats[op].iops = (op_stats[op].count - prev_stats.op_stats[op].count) * 1000n / tm;
|
||||
op_stats[op].lat = (op_stats[op].usec - prev_stats.op_stats[op].usec)
|
||||
/ ((op_stats[op].count - prev_stats.op_stats[op].count) || 1n);
|
||||
}
|
||||
}
|
||||
for (const op in subop_stats)
|
||||
{
|
||||
if (prev_stats && prev_stats.subop_stats && prev_stats.subop_stats[op])
|
||||
{
|
||||
subop_stats[op].iops = (subop_stats[op].count - prev_stats.subop_stats[op].count) * 1000n / tm;
|
||||
subop_stats[op].lat = (subop_stats[op].usec - prev_stats.subop_stats[op].usec)
|
||||
/ ((subop_stats[op].count - prev_stats.subop_stats[op].count) || 1n);
|
||||
}
|
||||
}
|
||||
for (const op in recovery_stats)
|
||||
{
|
||||
if (prev_stats && prev_stats.recovery_stats && prev_stats.recovery_stats[op])
|
||||
{
|
||||
recovery_stats[op].bps = (recovery_stats[op].bytes - prev_stats.recovery_stats[op].bytes) * 1000n / tm;
|
||||
recovery_stats[op].iops = (recovery_stats[op].count - prev_stats.recovery_stats[op].count) * 1000n / tm;
|
||||
}
|
||||
}
|
||||
return { op_stats, subop_stats, recovery_stats };
|
||||
return sum_diff;
|
||||
}
|
||||
|
||||
sum_object_counts()
|
||||
@@ -1474,10 +1505,14 @@ class Mon
|
||||
break;
|
||||
}
|
||||
}
|
||||
const pool_cfg = (this.state.config.pools[pool_id]||{});
|
||||
if (!object_size)
|
||||
{
|
||||
object_size = (this.state.config.pools[pool_id]||{}).block_size ||
|
||||
this.config.block_size || 131072;
|
||||
object_size = pool_cfg.block_size || this.config.block_size || 131072;
|
||||
}
|
||||
if (pool_cfg.scheme !== 'replicated')
|
||||
{
|
||||
object_size *= ((pool_cfg.pg_size||0) - (pool_cfg.parity_chunks||0));
|
||||
}
|
||||
object_size = BigInt(object_size);
|
||||
for (const pg_num in this.state.pg.stats[pool_id])
|
||||
@@ -1585,7 +1620,7 @@ class Mon
|
||||
}
|
||||
}
|
||||
}
|
||||
return inode_stats;
|
||||
return { inode_stats, seen_pools };
|
||||
}
|
||||
|
||||
serialize_bigints(obj)
|
||||
@@ -1611,11 +1646,12 @@ class Mon
|
||||
const timestamp = Date.now();
|
||||
const { object_counts, object_bytes } = this.sum_object_counts();
|
||||
let stats = this.sum_op_stats(timestamp, this.prev_stats);
|
||||
let inode_stats = this.sum_inode_stats(
|
||||
let { inode_stats, seen_pools } = this.sum_inode_stats(
|
||||
this.prev_stats ? this.prev_stats.inode_stats : null,
|
||||
timestamp, this.prev_stats ? this.prev_stats.timestamp : null
|
||||
);
|
||||
this.prev_stats = { timestamp, ...stats, inode_stats };
|
||||
this.prev_stats = { timestamp, inode_stats, osd_stats: { ...this.state.osd.stats } };
|
||||
this.stat_time = Date.now();
|
||||
stats.object_counts = object_counts;
|
||||
stats.object_bytes = object_bytes;
|
||||
stats = this.serialize_bigints(stats);
|
||||
@@ -1645,12 +1681,22 @@ class Mon
|
||||
}
|
||||
for (const pool_id in this.state.pool.stats)
|
||||
{
|
||||
const pool_stats = { ...this.state.pool.stats[pool_id] };
|
||||
this.serialize_bigints(pool_stats);
|
||||
txn.push({ requestPut: {
|
||||
key: b64(this.etcd_prefix+'/pool/stats/'+pool_id),
|
||||
value: b64(JSON.stringify(pool_stats)),
|
||||
} });
|
||||
if (!seen_pools[pool_id])
|
||||
{
|
||||
txn.push({ requestDeleteRange: {
|
||||
key: b64(this.etcd_prefix+'/pool/stats/'+pool_id),
|
||||
} });
|
||||
delete this.state.pool.stats[pool_id];
|
||||
}
|
||||
else
|
||||
{
|
||||
const pool_stats = { ...this.state.pool.stats[pool_id] };
|
||||
this.serialize_bigints(pool_stats);
|
||||
txn.push({ requestPut: {
|
||||
key: b64(this.etcd_prefix+'/pool/stats/'+pool_id),
|
||||
value: b64(JSON.stringify(pool_stats)),
|
||||
} });
|
||||
}
|
||||
}
|
||||
if (txn.length)
|
||||
{
|
||||
@@ -1731,13 +1777,14 @@ class Mon
|
||||
else if (key_parts[0] === 'osd' && key_parts[1] === 'stats')
|
||||
{
|
||||
// Recheck OSD tree on OSD addition/deletion
|
||||
const osd_num = key_parts[2];
|
||||
if ((!old) != (!kv.value) || old && kv.value && old.size != kv.value.size)
|
||||
{
|
||||
this.schedule_recheck();
|
||||
}
|
||||
// Recheck PGs <osd_out_time> after last OSD statistics report
|
||||
this.schedule_next_recheck_at(
|
||||
!this.state.osd.stats[key[2]] ? 0 : this.state.osd.stats[key[2]].time+this.config.osd_out_time
|
||||
!this.state.osd.stats[osd_num] ? 0 : this.state.osd.stats[osd_num].time+this.config.osd_out_time
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1749,10 +1796,18 @@ class Mon
|
||||
{
|
||||
retries = 1;
|
||||
}
|
||||
const tried = {};
|
||||
while (retries < 0 || retry < retries)
|
||||
{
|
||||
retry++;
|
||||
const base = this.pick_next_etcd();
|
||||
let now = Date.now();
|
||||
if (tried[base] && now-tried[base] < timeout)
|
||||
{
|
||||
await new Promise(ok => setTimeout(ok, timeout-(now-tried[base])));
|
||||
now = Date.now();
|
||||
}
|
||||
tried[base] = now;
|
||||
const res = await POST(base+path, body, timeout);
|
||||
if (res.error)
|
||||
{
|
||||
|
@@ -388,8 +388,6 @@ sub unmap_volume
|
||||
my ($class, $storeid, $scfg, $volname, $snapname) = @_;
|
||||
my $prefix = defined $scfg->{vitastor_prefix} ? $scfg->{vitastor_prefix} : 'pve/';
|
||||
|
||||
return 1 if !$scfg->{vitastor_nbd};
|
||||
|
||||
my ($vtype, $name, $vmid) = $class->parse_volname($volname);
|
||||
$name .= '@'.$snapname if $snapname;
|
||||
|
||||
@@ -413,7 +411,7 @@ sub activate_volume
|
||||
sub deactivate_volume
|
||||
{
|
||||
my ($class, $storeid, $scfg, $volname, $snapname, $cache) = @_;
|
||||
$class->unmap_volume($storeid, $scfg, $volname, $snapname);
|
||||
$class->unmap_volume($storeid, $scfg, $volname, $snapname) if $scfg->{vitastor_nbd};
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@@ -50,7 +50,7 @@ from cinder.volume import configuration
|
||||
from cinder.volume import driver
|
||||
from cinder.volume import volume_utils
|
||||
|
||||
VERSION = '0.8.9'
|
||||
VERSION = '1.0.0'
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
644
patches/libvirt-9.0-vitastor.diff
Normal file
644
patches/libvirt-9.0-vitastor.diff
Normal file
@@ -0,0 +1,644 @@
|
||||
commit e6f935157944279c2c0634915c3c00feeec748c9
|
||||
Author: Vitaliy Filippov <vitalif@yourcmc.ru>
|
||||
Date: Mon Jun 19 00:58:19 2023 +0300
|
||||
|
||||
Add Vitastor support
|
||||
|
||||
diff --git a/include/libvirt/libvirt-storage.h b/include/libvirt/libvirt-storage.h
|
||||
index aaad4a3..5f5daa8 100644
|
||||
--- a/include/libvirt/libvirt-storage.h
|
||||
+++ b/include/libvirt/libvirt-storage.h
|
||||
@@ -326,6 +326,7 @@ typedef enum {
|
||||
VIR_CONNECT_LIST_STORAGE_POOLS_ZFS = 1 << 17, /* (Since: 1.2.8) */
|
||||
VIR_CONNECT_LIST_STORAGE_POOLS_VSTORAGE = 1 << 18, /* (Since: 3.1.0) */
|
||||
VIR_CONNECT_LIST_STORAGE_POOLS_ISCSI_DIRECT = 1 << 19, /* (Since: 5.6.0) */
|
||||
+ VIR_CONNECT_LIST_STORAGE_POOLS_VITASTOR = 1 << 20, /* (Since: 5.0.0) */
|
||||
} virConnectListAllStoragePoolsFlags;
|
||||
|
||||
int virConnectListAllStoragePools(virConnectPtr conn,
|
||||
diff --git a/src/conf/domain_conf.c b/src/conf/domain_conf.c
|
||||
index 45965fa..b7c23d3 100644
|
||||
--- a/src/conf/domain_conf.c
|
||||
+++ b/src/conf/domain_conf.c
|
||||
@@ -7103,7 +7103,8 @@ virDomainDiskSourceNetworkParse(xmlNodePtr node,
|
||||
src->configFile = virXPathString("string(./config/@file)", ctxt);
|
||||
|
||||
if (src->protocol == VIR_STORAGE_NET_PROTOCOL_HTTP ||
|
||||
- src->protocol == VIR_STORAGE_NET_PROTOCOL_HTTPS)
|
||||
+ src->protocol == VIR_STORAGE_NET_PROTOCOL_HTTPS ||
|
||||
+ src->protocol == VIR_STORAGE_NET_PROTOCOL_VITASTOR)
|
||||
src->query = virXMLPropString(node, "query");
|
||||
|
||||
if (virDomainStorageNetworkParseHosts(node, ctxt, &src->hosts, &src->nhosts) < 0)
|
||||
@@ -30121,6 +30122,7 @@ virDomainStorageSourceTranslateSourcePool(virStorageSource *src,
|
||||
|
||||
case VIR_STORAGE_POOL_MPATH:
|
||||
case VIR_STORAGE_POOL_RBD:
|
||||
+ case VIR_STORAGE_POOL_VITASTOR:
|
||||
case VIR_STORAGE_POOL_SHEEPDOG:
|
||||
case VIR_STORAGE_POOL_GLUSTER:
|
||||
case VIR_STORAGE_POOL_LAST:
|
||||
diff --git a/src/conf/domain_validate.c b/src/conf/domain_validate.c
|
||||
index 5a9bf20..05058b8 100644
|
||||
--- a/src/conf/domain_validate.c
|
||||
+++ b/src/conf/domain_validate.c
|
||||
@@ -494,6 +494,7 @@ virDomainDiskDefValidateSourceChainOne(const virStorageSource *src)
|
||||
case VIR_STORAGE_NET_PROTOCOL_RBD:
|
||||
break;
|
||||
|
||||
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
||||
case VIR_STORAGE_NET_PROTOCOL_NBD:
|
||||
case VIR_STORAGE_NET_PROTOCOL_SHEEPDOG:
|
||||
case VIR_STORAGE_NET_PROTOCOL_GLUSTER:
|
||||
@@ -541,7 +542,7 @@ virDomainDiskDefValidateSourceChainOne(const virStorageSource *src)
|
||||
}
|
||||
}
|
||||
|
||||
- /* internal snapshots and config files are currently supported only with rbd: */
|
||||
+ /* internal snapshots are currently supported only with rbd: */
|
||||
if (virStorageSourceGetActualType(src) != VIR_STORAGE_TYPE_NETWORK &&
|
||||
src->protocol != VIR_STORAGE_NET_PROTOCOL_RBD) {
|
||||
if (src->snapshot) {
|
||||
@@ -550,11 +551,15 @@ virDomainDiskDefValidateSourceChainOne(const virStorageSource *src)
|
||||
"only with 'rbd' disks"));
|
||||
return -1;
|
||||
}
|
||||
-
|
||||
+ }
|
||||
+ /* config files are currently supported only with rbd and vitastor: */
|
||||
+ if (virStorageSourceGetActualType(src) != VIR_STORAGE_TYPE_NETWORK &&
|
||||
+ src->protocol != VIR_STORAGE_NET_PROTOCOL_RBD &&
|
||||
+ src->protocol != VIR_STORAGE_NET_PROTOCOL_VITASTOR) {
|
||||
if (src->configFile) {
|
||||
virReportError(VIR_ERR_XML_ERROR, "%s",
|
||||
_("<config> element is currently supported "
|
||||
- "only with 'rbd' disks"));
|
||||
+ "only with 'rbd' and 'vitastor' disks"));
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
diff --git a/src/conf/schemas/domaincommon.rng b/src/conf/schemas/domaincommon.rng
|
||||
index 6cb0a20..8bf7de9 100644
|
||||
--- a/src/conf/schemas/domaincommon.rng
|
||||
+++ b/src/conf/schemas/domaincommon.rng
|
||||
@@ -1972,6 +1972,35 @@
|
||||
</element>
|
||||
</define>
|
||||
|
||||
+ <define name="diskSourceNetworkProtocolVitastor">
|
||||
+ <element name="source">
|
||||
+ <interleave>
|
||||
+ <attribute name="protocol">
|
||||
+ <value>vitastor</value>
|
||||
+ </attribute>
|
||||
+ <ref name="diskSourceCommon"/>
|
||||
+ <optional>
|
||||
+ <attribute name="name"/>
|
||||
+ </optional>
|
||||
+ <optional>
|
||||
+ <attribute name="query"/>
|
||||
+ </optional>
|
||||
+ <zeroOrMore>
|
||||
+ <ref name="diskSourceNetworkHost"/>
|
||||
+ </zeroOrMore>
|
||||
+ <optional>
|
||||
+ <element name="config">
|
||||
+ <attribute name="file">
|
||||
+ <ref name="absFilePath"/>
|
||||
+ </attribute>
|
||||
+ <empty/>
|
||||
+ </element>
|
||||
+ </optional>
|
||||
+ <empty/>
|
||||
+ </interleave>
|
||||
+ </element>
|
||||
+ </define>
|
||||
+
|
||||
<define name="diskSourceNetworkProtocolISCSI">
|
||||
<element name="source">
|
||||
<attribute name="protocol">
|
||||
@@ -2264,6 +2293,7 @@
|
||||
<ref name="diskSourceNetworkProtocolSimple"/>
|
||||
<ref name="diskSourceNetworkProtocolVxHS"/>
|
||||
<ref name="diskSourceNetworkProtocolNFS"/>
|
||||
+ <ref name="diskSourceNetworkProtocolVitastor"/>
|
||||
</choice>
|
||||
</define>
|
||||
|
||||
diff --git a/src/conf/storage_conf.c b/src/conf/storage_conf.c
|
||||
index f5a9636..8339bc4 100644
|
||||
--- a/src/conf/storage_conf.c
|
||||
+++ b/src/conf/storage_conf.c
|
||||
@@ -56,7 +56,7 @@ VIR_ENUM_IMPL(virStoragePool,
|
||||
"logical", "disk", "iscsi",
|
||||
"iscsi-direct", "scsi", "mpath",
|
||||
"rbd", "sheepdog", "gluster",
|
||||
- "zfs", "vstorage",
|
||||
+ "zfs", "vstorage", "vitastor",
|
||||
);
|
||||
|
||||
VIR_ENUM_IMPL(virStoragePoolFormatFileSystem,
|
||||
@@ -242,6 +242,18 @@ static virStoragePoolTypeInfo poolTypeInfo[] = {
|
||||
.formatToString = virStorageFileFormatTypeToString,
|
||||
}
|
||||
},
|
||||
+ {.poolType = VIR_STORAGE_POOL_VITASTOR,
|
||||
+ .poolOptions = {
|
||||
+ .flags = (VIR_STORAGE_POOL_SOURCE_HOST |
|
||||
+ VIR_STORAGE_POOL_SOURCE_NETWORK |
|
||||
+ VIR_STORAGE_POOL_SOURCE_NAME),
|
||||
+ },
|
||||
+ .volOptions = {
|
||||
+ .defaultFormat = VIR_STORAGE_FILE_RAW,
|
||||
+ .formatFromString = virStorageVolumeFormatFromString,
|
||||
+ .formatToString = virStorageFileFormatTypeToString,
|
||||
+ }
|
||||
+ },
|
||||
{.poolType = VIR_STORAGE_POOL_SHEEPDOG,
|
||||
.poolOptions = {
|
||||
.flags = (VIR_STORAGE_POOL_SOURCE_HOST |
|
||||
@@ -542,6 +554,11 @@ virStoragePoolDefParseSource(xmlXPathContextPtr ctxt,
|
||||
_("element 'name' is mandatory for RBD pool"));
|
||||
return -1;
|
||||
}
|
||||
+ if (pool_type == VIR_STORAGE_POOL_VITASTOR && source->name == NULL) {
|
||||
+ virReportError(VIR_ERR_XML_ERROR, "%s",
|
||||
+ _("element 'name' is mandatory for Vitastor pool"));
|
||||
+ return -1;
|
||||
+ }
|
||||
|
||||
if (options->formatFromString) {
|
||||
g_autofree char *format = NULL;
|
||||
@@ -1132,6 +1149,7 @@ virStoragePoolDefFormatBuf(virBuffer *buf,
|
||||
/* RBD, Sheepdog, Gluster and Iscsi-direct devices are not local block devs nor
|
||||
* files, so they don't have a target */
|
||||
if (def->type != VIR_STORAGE_POOL_RBD &&
|
||||
+ def->type != VIR_STORAGE_POOL_VITASTOR &&
|
||||
def->type != VIR_STORAGE_POOL_SHEEPDOG &&
|
||||
def->type != VIR_STORAGE_POOL_GLUSTER &&
|
||||
def->type != VIR_STORAGE_POOL_ISCSI_DIRECT) {
|
||||
diff --git a/src/conf/storage_conf.h b/src/conf/storage_conf.h
|
||||
index fc67957..720c07e 100644
|
||||
--- a/src/conf/storage_conf.h
|
||||
+++ b/src/conf/storage_conf.h
|
||||
@@ -103,6 +103,7 @@ typedef enum {
|
||||
VIR_STORAGE_POOL_GLUSTER, /* Gluster device */
|
||||
VIR_STORAGE_POOL_ZFS, /* ZFS */
|
||||
VIR_STORAGE_POOL_VSTORAGE, /* Virtuozzo Storage */
|
||||
+ VIR_STORAGE_POOL_VITASTOR, /* Vitastor */
|
||||
|
||||
VIR_STORAGE_POOL_LAST,
|
||||
} virStoragePoolType;
|
||||
@@ -454,6 +455,7 @@ VIR_ENUM_DECL(virStoragePartedFs);
|
||||
VIR_CONNECT_LIST_STORAGE_POOLS_SCSI | \
|
||||
VIR_CONNECT_LIST_STORAGE_POOLS_MPATH | \
|
||||
VIR_CONNECT_LIST_STORAGE_POOLS_RBD | \
|
||||
+ VIR_CONNECT_LIST_STORAGE_POOLS_VITASTOR | \
|
||||
VIR_CONNECT_LIST_STORAGE_POOLS_SHEEPDOG | \
|
||||
VIR_CONNECT_LIST_STORAGE_POOLS_GLUSTER | \
|
||||
VIR_CONNECT_LIST_STORAGE_POOLS_ZFS | \
|
||||
diff --git a/src/conf/storage_source_conf.c b/src/conf/storage_source_conf.c
|
||||
index cecd7e8..d7b79a4 100644
|
||||
--- a/src/conf/storage_source_conf.c
|
||||
+++ b/src/conf/storage_source_conf.c
|
||||
@@ -87,6 +87,7 @@ VIR_ENUM_IMPL(virStorageNetProtocol,
|
||||
"ssh",
|
||||
"vxhs",
|
||||
"nfs",
|
||||
+ "vitastor",
|
||||
);
|
||||
|
||||
|
||||
@@ -1286,6 +1287,7 @@ virStorageSourceNetworkDefaultPort(virStorageNetProtocol protocol)
|
||||
case VIR_STORAGE_NET_PROTOCOL_GLUSTER:
|
||||
return 24007;
|
||||
|
||||
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
||||
case VIR_STORAGE_NET_PROTOCOL_RBD:
|
||||
/* we don't provide a default for RBD */
|
||||
return 0;
|
||||
diff --git a/src/conf/storage_source_conf.h b/src/conf/storage_source_conf.h
|
||||
index 14a6825..eb4acac 100644
|
||||
--- a/src/conf/storage_source_conf.h
|
||||
+++ b/src/conf/storage_source_conf.h
|
||||
@@ -128,6 +128,7 @@ typedef enum {
|
||||
VIR_STORAGE_NET_PROTOCOL_SSH,
|
||||
VIR_STORAGE_NET_PROTOCOL_VXHS,
|
||||
VIR_STORAGE_NET_PROTOCOL_NFS,
|
||||
+ VIR_STORAGE_NET_PROTOCOL_VITASTOR,
|
||||
|
||||
VIR_STORAGE_NET_PROTOCOL_LAST
|
||||
} virStorageNetProtocol;
|
||||
diff --git a/src/conf/virstorageobj.c b/src/conf/virstorageobj.c
|
||||
index e6c187e..035b423 100644
|
||||
--- a/src/conf/virstorageobj.c
|
||||
+++ b/src/conf/virstorageobj.c
|
||||
@@ -1433,6 +1433,7 @@ virStoragePoolObjSourceFindDuplicateCb(const void *payload,
|
||||
return 1;
|
||||
break;
|
||||
|
||||
+ case VIR_STORAGE_POOL_VITASTOR:
|
||||
case VIR_STORAGE_POOL_ISCSI_DIRECT:
|
||||
case VIR_STORAGE_POOL_RBD:
|
||||
case VIR_STORAGE_POOL_LAST:
|
||||
@@ -1918,6 +1919,8 @@ virStoragePoolObjMatch(virStoragePoolObj *obj,
|
||||
(obj->def->type == VIR_STORAGE_POOL_MPATH)) ||
|
||||
(MATCH(VIR_CONNECT_LIST_STORAGE_POOLS_RBD) &&
|
||||
(obj->def->type == VIR_STORAGE_POOL_RBD)) ||
|
||||
+ (MATCH(VIR_CONNECT_LIST_STORAGE_POOLS_VITASTOR) &&
|
||||
+ (obj->def->type == VIR_STORAGE_POOL_VITASTOR)) ||
|
||||
(MATCH(VIR_CONNECT_LIST_STORAGE_POOLS_SHEEPDOG) &&
|
||||
(obj->def->type == VIR_STORAGE_POOL_SHEEPDOG)) ||
|
||||
(MATCH(VIR_CONNECT_LIST_STORAGE_POOLS_GLUSTER) &&
|
||||
diff --git a/src/libvirt-storage.c b/src/libvirt-storage.c
|
||||
index 8490034..ab2cdaa 100644
|
||||
--- a/src/libvirt-storage.c
|
||||
+++ b/src/libvirt-storage.c
|
||||
@@ -94,6 +94,7 @@ virStoragePoolGetConnect(virStoragePoolPtr pool)
|
||||
* VIR_CONNECT_LIST_STORAGE_POOLS_SCSI
|
||||
* VIR_CONNECT_LIST_STORAGE_POOLS_MPATH
|
||||
* VIR_CONNECT_LIST_STORAGE_POOLS_RBD
|
||||
+ * VIR_CONNECT_LIST_STORAGE_POOLS_VITASTOR
|
||||
* VIR_CONNECT_LIST_STORAGE_POOLS_SHEEPDOG
|
||||
* VIR_CONNECT_LIST_STORAGE_POOLS_GLUSTER
|
||||
* VIR_CONNECT_LIST_STORAGE_POOLS_ZFS
|
||||
diff --git a/src/libxl/libxl_conf.c b/src/libxl/libxl_conf.c
|
||||
index 17ac880..59711b5 100644
|
||||
--- a/src/libxl/libxl_conf.c
|
||||
+++ b/src/libxl/libxl_conf.c
|
||||
@@ -970,6 +970,7 @@ libxlMakeNetworkDiskSrcStr(virStorageSource *src,
|
||||
case VIR_STORAGE_NET_PROTOCOL_SSH:
|
||||
case VIR_STORAGE_NET_PROTOCOL_VXHS:
|
||||
case VIR_STORAGE_NET_PROTOCOL_NFS:
|
||||
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
||||
case VIR_STORAGE_NET_PROTOCOL_LAST:
|
||||
case VIR_STORAGE_NET_PROTOCOL_NONE:
|
||||
virReportError(VIR_ERR_NO_SUPPORT,
|
||||
diff --git a/src/libxl/xen_xl.c b/src/libxl/xen_xl.c
|
||||
index 6919325..55ffc32 100644
|
||||
--- a/src/libxl/xen_xl.c
|
||||
+++ b/src/libxl/xen_xl.c
|
||||
@@ -1445,6 +1445,7 @@ xenFormatXLDiskSrcNet(virStorageSource *src)
|
||||
case VIR_STORAGE_NET_PROTOCOL_SSH:
|
||||
case VIR_STORAGE_NET_PROTOCOL_VXHS:
|
||||
case VIR_STORAGE_NET_PROTOCOL_NFS:
|
||||
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
||||
case VIR_STORAGE_NET_PROTOCOL_LAST:
|
||||
case VIR_STORAGE_NET_PROTOCOL_NONE:
|
||||
virReportError(VIR_ERR_NO_SUPPORT,
|
||||
diff --git a/src/qemu/qemu_block.c b/src/qemu/qemu_block.c
|
||||
index e865aa1..40162af 100644
|
||||
--- a/src/qemu/qemu_block.c
|
||||
+++ b/src/qemu/qemu_block.c
|
||||
@@ -604,6 +604,38 @@ qemuBlockStorageSourceGetRBDProps(virStorageSource *src,
|
||||
}
|
||||
|
||||
|
||||
+static virJSONValue *
|
||||
+qemuBlockStorageSourceGetVitastorProps(virStorageSource *src)
|
||||
+{
|
||||
+ virJSONValue *ret = NULL;
|
||||
+ virStorageNetHostDef *host;
|
||||
+ size_t i;
|
||||
+ g_auto(virBuffer) buf = VIR_BUFFER_INITIALIZER;
|
||||
+ g_autofree char *etcd = NULL;
|
||||
+
|
||||
+ for (i = 0; i < src->nhosts; i++) {
|
||||
+ host = src->hosts + i;
|
||||
+ if ((virStorageNetHostTransport)host->transport != VIR_STORAGE_NET_HOST_TRANS_TCP) {
|
||||
+ return NULL;
|
||||
+ }
|
||||
+ virBufferAsprintf(&buf, i > 0 ? ",%s:%u" : "%s:%u", host->name, host->port);
|
||||
+ }
|
||||
+ if (src->nhosts > 0) {
|
||||
+ etcd = virBufferContentAndReset(&buf);
|
||||
+ }
|
||||
+
|
||||
+ if (virJSONValueObjectAdd(&ret,
|
||||
+ "S:etcd-host", etcd,
|
||||
+ "S:etcd-prefix", src->query,
|
||||
+ "S:config-path", src->configFile,
|
||||
+ "s:image", src->path,
|
||||
+ NULL) < 0)
|
||||
+ return NULL;
|
||||
+
|
||||
+ return ret;
|
||||
+}
|
||||
+
|
||||
+
|
||||
static virJSONValue *
|
||||
qemuBlockStorageSourceGetSheepdogProps(virStorageSource *src)
|
||||
{
|
||||
@@ -917,6 +949,12 @@ qemuBlockStorageSourceGetBackendProps(virStorageSource *src,
|
||||
return NULL;
|
||||
break;
|
||||
|
||||
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
||||
+ driver = "vitastor";
|
||||
+ if (!(fileprops = qemuBlockStorageSourceGetVitastorProps(src)))
|
||||
+ return NULL;
|
||||
+ break;
|
||||
+
|
||||
case VIR_STORAGE_NET_PROTOCOL_SHEEPDOG:
|
||||
driver = "sheepdog";
|
||||
if (!(fileprops = qemuBlockStorageSourceGetSheepdogProps(src)))
|
||||
@@ -1860,6 +1898,7 @@ qemuBlockGetBackingStoreString(virStorageSource *src,
|
||||
|
||||
case VIR_STORAGE_NET_PROTOCOL_SHEEPDOG:
|
||||
case VIR_STORAGE_NET_PROTOCOL_RBD:
|
||||
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
||||
case VIR_STORAGE_NET_PROTOCOL_VXHS:
|
||||
case VIR_STORAGE_NET_PROTOCOL_NFS:
|
||||
case VIR_STORAGE_NET_PROTOCOL_SSH:
|
||||
@@ -2242,6 +2281,12 @@ qemuBlockStorageSourceCreateGetStorageProps(virStorageSource *src,
|
||||
return -1;
|
||||
break;
|
||||
|
||||
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
||||
+ driver = "vitastor";
|
||||
+ if (!(location = qemuBlockStorageSourceGetVitastorProps(src)))
|
||||
+ return -1;
|
||||
+ break;
|
||||
+
|
||||
case VIR_STORAGE_NET_PROTOCOL_SHEEPDOG:
|
||||
driver = "sheepdog";
|
||||
if (!(location = qemuBlockStorageSourceGetSheepdogProps(src)))
|
||||
diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
|
||||
index 2eb5653..60ee82d 100644
|
||||
--- a/src/qemu/qemu_domain.c
|
||||
+++ b/src/qemu/qemu_domain.c
|
||||
@@ -4958,7 +4958,8 @@ qemuDomainValidateStorageSource(virStorageSource *src,
|
||||
if (src->query &&
|
||||
(actualType != VIR_STORAGE_TYPE_NETWORK ||
|
||||
(src->protocol != VIR_STORAGE_NET_PROTOCOL_HTTPS &&
|
||||
- src->protocol != VIR_STORAGE_NET_PROTOCOL_HTTP))) {
|
||||
+ src->protocol != VIR_STORAGE_NET_PROTOCOL_HTTP &&
|
||||
+ src->protocol != VIR_STORAGE_NET_PROTOCOL_VITASTOR))) {
|
||||
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
||||
_("query is supported only with HTTP(S) protocols"));
|
||||
return -1;
|
||||
@@ -10129,6 +10130,7 @@ qemuDomainPrepareStorageSourceTLS(virStorageSource *src,
|
||||
break;
|
||||
|
||||
case VIR_STORAGE_NET_PROTOCOL_RBD:
|
||||
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
||||
case VIR_STORAGE_NET_PROTOCOL_SHEEPDOG:
|
||||
case VIR_STORAGE_NET_PROTOCOL_GLUSTER:
|
||||
case VIR_STORAGE_NET_PROTOCOL_ISCSI:
|
||||
diff --git a/src/qemu/qemu_snapshot.c b/src/qemu/qemu_snapshot.c
|
||||
index b841680..a6be771 100644
|
||||
--- a/src/qemu/qemu_snapshot.c
|
||||
+++ b/src/qemu/qemu_snapshot.c
|
||||
@@ -373,6 +373,7 @@ qemuSnapshotPrepareDiskExternalInactive(virDomainSnapshotDiskDef *snapdisk,
|
||||
case VIR_STORAGE_NET_PROTOCOL_NONE:
|
||||
case VIR_STORAGE_NET_PROTOCOL_NBD:
|
||||
case VIR_STORAGE_NET_PROTOCOL_RBD:
|
||||
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
||||
case VIR_STORAGE_NET_PROTOCOL_SHEEPDOG:
|
||||
case VIR_STORAGE_NET_PROTOCOL_GLUSTER:
|
||||
case VIR_STORAGE_NET_PROTOCOL_ISCSI:
|
||||
@@ -578,6 +579,7 @@ qemuSnapshotPrepareDiskInternal(virDomainDiskDef *disk,
|
||||
case VIR_STORAGE_NET_PROTOCOL_NONE:
|
||||
case VIR_STORAGE_NET_PROTOCOL_NBD:
|
||||
case VIR_STORAGE_NET_PROTOCOL_RBD:
|
||||
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
||||
case VIR_STORAGE_NET_PROTOCOL_SHEEPDOG:
|
||||
case VIR_STORAGE_NET_PROTOCOL_GLUSTER:
|
||||
case VIR_STORAGE_NET_PROTOCOL_ISCSI:
|
||||
diff --git a/src/storage/storage_driver.c b/src/storage/storage_driver.c
|
||||
index d90c1c9..e853457 100644
|
||||
--- a/src/storage/storage_driver.c
|
||||
+++ b/src/storage/storage_driver.c
|
||||
@@ -1627,6 +1627,7 @@ storageVolLookupByPathCallback(virStoragePoolObj *obj,
|
||||
|
||||
case VIR_STORAGE_POOL_GLUSTER:
|
||||
case VIR_STORAGE_POOL_RBD:
|
||||
+ case VIR_STORAGE_POOL_VITASTOR:
|
||||
case VIR_STORAGE_POOL_SHEEPDOG:
|
||||
case VIR_STORAGE_POOL_ZFS:
|
||||
case VIR_STORAGE_POOL_LAST:
|
||||
diff --git a/src/storage_file/storage_source_backingstore.c b/src/storage_file/storage_source_backingstore.c
|
||||
index e48ae72..2017ccc 100644
|
||||
--- a/src/storage_file/storage_source_backingstore.c
|
||||
+++ b/src/storage_file/storage_source_backingstore.c
|
||||
@@ -284,6 +284,75 @@ virStorageSourceParseRBDColonString(const char *rbdstr,
|
||||
}
|
||||
|
||||
|
||||
+static int
|
||||
+virStorageSourceParseVitastorColonString(const char *colonstr,
|
||||
+ virStorageSource *src)
|
||||
+{
|
||||
+ char *p, *e, *next;
|
||||
+ g_autofree char *options = NULL;
|
||||
+
|
||||
+ /* optionally skip the "vitastor:" prefix if provided */
|
||||
+ if (STRPREFIX(colonstr, "vitastor:"))
|
||||
+ colonstr += strlen("vitastor:");
|
||||
+
|
||||
+ options = g_strdup(colonstr);
|
||||
+
|
||||
+ p = options;
|
||||
+ while (*p) {
|
||||
+ /* find : delimiter or end of string */
|
||||
+ for (e = p; *e && *e != ':'; ++e) {
|
||||
+ if (*e == '\\') {
|
||||
+ e++;
|
||||
+ if (*e == '\0')
|
||||
+ break;
|
||||
+ }
|
||||
+ }
|
||||
+ if (*e == '\0') {
|
||||
+ next = e; /* last kv pair */
|
||||
+ } else {
|
||||
+ next = e + 1;
|
||||
+ *e = '\0';
|
||||
+ }
|
||||
+
|
||||
+ if (STRPREFIX(p, "image=")) {
|
||||
+ src->path = g_strdup(p + strlen("image="));
|
||||
+ } else if (STRPREFIX(p, "etcd-prefix=")) {
|
||||
+ src->query = g_strdup(p + strlen("etcd-prefix="));
|
||||
+ } else if (STRPREFIX(p, "config-path=")) {
|
||||
+ src->configFile = g_strdup(p + strlen("config-path="));
|
||||
+ } else if (STRPREFIX(p, "etcd-host=")) {
|
||||
+ char *h, *sep;
|
||||
+
|
||||
+ h = p + strlen("etcd-host=");
|
||||
+ while (h < e) {
|
||||
+ for (sep = h; sep < e; ++sep) {
|
||||
+ if (*sep == '\\' && (sep[1] == ',' ||
|
||||
+ sep[1] == ';' ||
|
||||
+ sep[1] == ' ')) {
|
||||
+ *sep = '\0';
|
||||
+ sep += 2;
|
||||
+ break;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if (virStorageSourceRBDAddHost(src, h) < 0)
|
||||
+ return -1;
|
||||
+
|
||||
+ h = sep;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ p = next;
|
||||
+ }
|
||||
+
|
||||
+ if (!src->path) {
|
||||
+ return -1;
|
||||
+ }
|
||||
+
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+
|
||||
static int
|
||||
virStorageSourceParseNBDColonString(const char *nbdstr,
|
||||
virStorageSource *src)
|
||||
@@ -396,6 +465,11 @@ virStorageSourceParseBackingColon(virStorageSource *src,
|
||||
return -1;
|
||||
break;
|
||||
|
||||
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
||||
+ if (virStorageSourceParseVitastorColonString(path, src) < 0)
|
||||
+ return -1;
|
||||
+ break;
|
||||
+
|
||||
case VIR_STORAGE_NET_PROTOCOL_SHEEPDOG:
|
||||
case VIR_STORAGE_NET_PROTOCOL_LAST:
|
||||
case VIR_STORAGE_NET_PROTOCOL_NONE:
|
||||
@@ -984,6 +1058,54 @@ virStorageSourceParseBackingJSONRBD(virStorageSource *src,
|
||||
return 0;
|
||||
}
|
||||
|
||||
+static int
|
||||
+virStorageSourceParseBackingJSONVitastor(virStorageSource *src,
|
||||
+ virJSONValue *json,
|
||||
+ const char *jsonstr G_GNUC_UNUSED,
|
||||
+ int opaque G_GNUC_UNUSED)
|
||||
+{
|
||||
+ const char *filename;
|
||||
+ const char *image = virJSONValueObjectGetString(json, "image");
|
||||
+ const char *conf = virJSONValueObjectGetString(json, "config-path");
|
||||
+ const char *etcd_prefix = virJSONValueObjectGetString(json, "etcd-prefix");
|
||||
+ virJSONValue *servers = virJSONValueObjectGetArray(json, "server");
|
||||
+ size_t nservers;
|
||||
+ size_t i;
|
||||
+
|
||||
+ src->type = VIR_STORAGE_TYPE_NETWORK;
|
||||
+ src->protocol = VIR_STORAGE_NET_PROTOCOL_VITASTOR;
|
||||
+
|
||||
+ /* legacy syntax passed via 'filename' option */
|
||||
+ if ((filename = virJSONValueObjectGetString(json, "filename")))
|
||||
+ return virStorageSourceParseVitastorColonString(filename, src);
|
||||
+
|
||||
+ if (!image) {
|
||||
+ virReportError(VIR_ERR_INVALID_ARG, "%s",
|
||||
+ _("missing image name in Vitastor backing volume "
|
||||
+ "JSON specification"));
|
||||
+ return -1;
|
||||
+ }
|
||||
+
|
||||
+ src->path = g_strdup(image);
|
||||
+ src->configFile = g_strdup(conf);
|
||||
+ src->query = g_strdup(etcd_prefix);
|
||||
+
|
||||
+ if (servers) {
|
||||
+ nservers = virJSONValueArraySize(servers);
|
||||
+
|
||||
+ src->hosts = g_new0(virStorageNetHostDef, nservers);
|
||||
+ src->nhosts = nservers;
|
||||
+
|
||||
+ for (i = 0; i < nservers; i++) {
|
||||
+ if (virStorageSourceParseBackingJSONInetSocketAddress(src->hosts + i,
|
||||
+ virJSONValueArrayGet(servers, i)) < 0)
|
||||
+ return -1;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
static int
|
||||
virStorageSourceParseBackingJSONRaw(virStorageSource *src,
|
||||
virJSONValue *json,
|
||||
@@ -1162,6 +1284,7 @@ static const struct virStorageSourceJSONDriverParser jsonParsers[] = {
|
||||
{"sheepdog", false, virStorageSourceParseBackingJSONSheepdog, 0},
|
||||
{"ssh", false, virStorageSourceParseBackingJSONSSH, 0},
|
||||
{"rbd", false, virStorageSourceParseBackingJSONRBD, 0},
|
||||
+ {"vitastor", false, virStorageSourceParseBackingJSONVitastor, 0},
|
||||
{"raw", true, virStorageSourceParseBackingJSONRaw, 0},
|
||||
{"nfs", false, virStorageSourceParseBackingJSONNFS, 0},
|
||||
{"vxhs", false, virStorageSourceParseBackingJSONVxHS, 0},
|
||||
diff --git a/src/test/test_driver.c b/src/test/test_driver.c
|
||||
index bd6f063..cce34e1 100644
|
||||
--- a/src/test/test_driver.c
|
||||
+++ b/src/test/test_driver.c
|
||||
@@ -7338,6 +7338,7 @@ testStorageVolumeTypeForPool(int pooltype)
|
||||
case VIR_STORAGE_POOL_ISCSI_DIRECT:
|
||||
case VIR_STORAGE_POOL_GLUSTER:
|
||||
case VIR_STORAGE_POOL_RBD:
|
||||
+ case VIR_STORAGE_POOL_VITASTOR:
|
||||
return VIR_STORAGE_VOL_NETWORK;
|
||||
case VIR_STORAGE_POOL_LOGICAL:
|
||||
case VIR_STORAGE_POOL_DISK:
|
||||
diff --git a/tests/storagepoolcapsschemadata/poolcaps-fs.xml b/tests/storagepoolcapsschemadata/poolcaps-fs.xml
|
||||
index eee75af..8bd0a57 100644
|
||||
--- a/tests/storagepoolcapsschemadata/poolcaps-fs.xml
|
||||
+++ b/tests/storagepoolcapsschemadata/poolcaps-fs.xml
|
||||
@@ -204,4 +204,11 @@
|
||||
</enum>
|
||||
</volOptions>
|
||||
</pool>
|
||||
+ <pool type='vitastor' supported='no'>
|
||||
+ <volOptions>
|
||||
+ <defaultFormat type='raw'/>
|
||||
+ <enum name='targetFormatType'>
|
||||
+ </enum>
|
||||
+ </volOptions>
|
||||
+ </pool>
|
||||
</storagepoolCapabilities>
|
||||
diff --git a/tests/storagepoolcapsschemadata/poolcaps-full.xml b/tests/storagepoolcapsschemadata/poolcaps-full.xml
|
||||
index 805950a..852df0d 100644
|
||||
--- a/tests/storagepoolcapsschemadata/poolcaps-full.xml
|
||||
+++ b/tests/storagepoolcapsschemadata/poolcaps-full.xml
|
||||
@@ -204,4 +204,11 @@
|
||||
</enum>
|
||||
</volOptions>
|
||||
</pool>
|
||||
+ <pool type='vitastor' supported='yes'>
|
||||
+ <volOptions>
|
||||
+ <defaultFormat type='raw'/>
|
||||
+ <enum name='targetFormatType'>
|
||||
+ </enum>
|
||||
+ </volOptions>
|
||||
+ </pool>
|
||||
</storagepoolCapabilities>
|
||||
diff --git a/tests/storagepoolxml2argvtest.c b/tests/storagepoolxml2argvtest.c
|
||||
index e8e40d6..db55fe5 100644
|
||||
--- a/tests/storagepoolxml2argvtest.c
|
||||
+++ b/tests/storagepoolxml2argvtest.c
|
||||
@@ -65,6 +65,7 @@ testCompareXMLToArgvFiles(bool shouldFail,
|
||||
case VIR_STORAGE_POOL_GLUSTER:
|
||||
case VIR_STORAGE_POOL_ZFS:
|
||||
case VIR_STORAGE_POOL_VSTORAGE:
|
||||
+ case VIR_STORAGE_POOL_VITASTOR:
|
||||
case VIR_STORAGE_POOL_LAST:
|
||||
default:
|
||||
VIR_TEST_DEBUG("pool type '%s' has no xml2argv test", defTypeStr);
|
||||
diff --git a/tools/virsh-pool.c b/tools/virsh-pool.c
|
||||
index 8a98c6a..4b1bbd4 100644
|
||||
--- a/tools/virsh-pool.c
|
||||
+++ b/tools/virsh-pool.c
|
||||
@@ -1221,6 +1221,9 @@ cmdPoolList(vshControl *ctl, const vshCmd *cmd G_GNUC_UNUSED)
|
||||
case VIR_STORAGE_POOL_VSTORAGE:
|
||||
flags |= VIR_CONNECT_LIST_STORAGE_POOLS_VSTORAGE;
|
||||
break;
|
||||
+ case VIR_STORAGE_POOL_VITASTOR:
|
||||
+ flags |= VIR_CONNECT_LIST_STORAGE_POOLS_VITASTOR;
|
||||
+ break;
|
||||
case VIR_STORAGE_POOL_LAST:
|
||||
break;
|
||||
}
|
190
patches/pve-qemu-8.0-vitastor.patch
Normal file
190
patches/pve-qemu-8.0-vitastor.patch
Normal file
@@ -0,0 +1,190 @@
|
||||
diff --git a/block/meson.build b/block/meson.build
|
||||
index 382bec0e7d..af6207dbce 100644
|
||||
--- a/block/meson.build
|
||||
+++ b/block/meson.build
|
||||
@@ -114,6 +114,7 @@ foreach m : [
|
||||
[libnfs, 'nfs', files('nfs.c')],
|
||||
[libssh, 'ssh', files('ssh.c')],
|
||||
[rbd, 'rbd', files('rbd.c')],
|
||||
+ [vitastor, 'vitastor', files('vitastor.c')],
|
||||
]
|
||||
if m[0].found()
|
||||
module_ss = ss.source_set()
|
||||
diff --git a/meson.build b/meson.build
|
||||
index c44d05a13f..ebedb42843 100644
|
||||
--- a/meson.build
|
||||
+++ b/meson.build
|
||||
@@ -1028,6 +1028,26 @@ if not get_option('rbd').auto() or have_block
|
||||
endif
|
||||
endif
|
||||
|
||||
+vitastor = not_found
|
||||
+if not get_option('vitastor').auto() or have_block
|
||||
+ libvitastor_client = cc.find_library('vitastor_client', has_headers: ['vitastor_c.h'],
|
||||
+ required: get_option('vitastor'), kwargs: static_kwargs)
|
||||
+ if libvitastor_client.found()
|
||||
+ if cc.links('''
|
||||
+ #include <vitastor_c.h>
|
||||
+ int main(void) {
|
||||
+ vitastor_c_create_qemu(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
|
||||
+ return 0;
|
||||
+ }''', dependencies: libvitastor_client)
|
||||
+ vitastor = declare_dependency(dependencies: libvitastor_client)
|
||||
+ elif get_option('vitastor').enabled()
|
||||
+ error('could not link libvitastor_client')
|
||||
+ else
|
||||
+ warning('could not link libvitastor_client, disabling')
|
||||
+ endif
|
||||
+ endif
|
||||
+endif
|
||||
+
|
||||
glusterfs = not_found
|
||||
glusterfs_ftruncate_has_stat = false
|
||||
glusterfs_iocb_has_stat = false
|
||||
@@ -1882,6 +1902,7 @@ endif
|
||||
config_host_data.set('CONFIG_OPENGL', opengl.found())
|
||||
config_host_data.set('CONFIG_PROFILER', get_option('profiler'))
|
||||
config_host_data.set('CONFIG_RBD', rbd.found())
|
||||
+config_host_data.set('CONFIG_VITASTOR', vitastor.found())
|
||||
config_host_data.set('CONFIG_RDMA', rdma.found())
|
||||
config_host_data.set('CONFIG_SDL', sdl.found())
|
||||
config_host_data.set('CONFIG_SDL_IMAGE', sdl_image.found())
|
||||
@@ -4020,6 +4041,7 @@ if spice_protocol.found()
|
||||
summary_info += {' spice server support': spice}
|
||||
endif
|
||||
summary_info += {'rbd support': rbd}
|
||||
+summary_info += {'vitastor support': vitastor}
|
||||
summary_info += {'smartcard support': cacard}
|
||||
summary_info += {'U2F support': u2f}
|
||||
summary_info += {'libusb': libusb}
|
||||
diff --git a/meson_options.txt b/meson_options.txt
|
||||
index fc9447d267..c4ac55c283 100644
|
||||
--- a/meson_options.txt
|
||||
+++ b/meson_options.txt
|
||||
@@ -173,6 +173,8 @@ option('lzo', type : 'feature', value : 'auto',
|
||||
description: 'lzo compression support')
|
||||
option('rbd', type : 'feature', value : 'auto',
|
||||
description: 'Ceph block device driver')
|
||||
+option('vitastor', type : 'feature', value : 'auto',
|
||||
+ description: 'Vitastor block device driver')
|
||||
option('opengl', type : 'feature', value : 'auto',
|
||||
description: 'OpenGL support')
|
||||
option('rdma', type : 'feature', value : 'auto',
|
||||
diff --git a/qapi/block-core.json b/qapi/block-core.json
|
||||
index c05ad0c07e..f5eb701604 100644
|
||||
--- a/qapi/block-core.json
|
||||
+++ b/qapi/block-core.json
|
||||
@@ -3308,7 +3308,7 @@
|
||||
'raw', 'rbd',
|
||||
{ 'name': 'replication', 'if': 'CONFIG_REPLICATION' },
|
||||
'pbs',
|
||||
- 'ssh', 'throttle', 'vdi', 'vhdx',
|
||||
+ 'ssh', 'throttle', 'vdi', 'vhdx', 'vitastor',
|
||||
{ 'name': 'virtio-blk-vfio-pci', 'if': 'CONFIG_BLKIO' },
|
||||
{ 'name': 'virtio-blk-vhost-user', 'if': 'CONFIG_BLKIO' },
|
||||
{ 'name': 'virtio-blk-vhost-vdpa', 'if': 'CONFIG_BLKIO' },
|
||||
@@ -4338,6 +4338,28 @@
|
||||
'*key-secret': 'str',
|
||||
'*server': ['InetSocketAddressBase'] } }
|
||||
|
||||
+##
|
||||
+# @BlockdevOptionsVitastor:
|
||||
+#
|
||||
+# Driver specific block device options for vitastor
|
||||
+#
|
||||
+# @image: Image name
|
||||
+# @inode: Inode number
|
||||
+# @pool: Pool ID
|
||||
+# @size: Desired image size in bytes
|
||||
+# @config-path: Path to Vitastor configuration
|
||||
+# @etcd-host: etcd connection address(es)
|
||||
+# @etcd-prefix: etcd key/value prefix
|
||||
+##
|
||||
+{ 'struct': 'BlockdevOptionsVitastor',
|
||||
+ 'data': { '*inode': 'uint64',
|
||||
+ '*pool': 'uint64',
|
||||
+ '*size': 'uint64',
|
||||
+ '*image': 'str',
|
||||
+ '*config-path': 'str',
|
||||
+ '*etcd-host': 'str',
|
||||
+ '*etcd-prefix': 'str' } }
|
||||
+
|
||||
##
|
||||
# @ReplicationMode:
|
||||
#
|
||||
@@ -4787,6 +4809,7 @@
|
||||
'throttle': 'BlockdevOptionsThrottle',
|
||||
'vdi': 'BlockdevOptionsGenericFormat',
|
||||
'vhdx': 'BlockdevOptionsGenericFormat',
|
||||
+ 'vitastor': 'BlockdevOptionsVitastor',
|
||||
'virtio-blk-vfio-pci':
|
||||
{ 'type': 'BlockdevOptionsVirtioBlkVfioPci',
|
||||
'if': 'CONFIG_BLKIO' },
|
||||
@@ -5187,6 +5210,17 @@
|
||||
'*cluster-size' : 'size',
|
||||
'*encrypt' : 'RbdEncryptionCreateOptions' } }
|
||||
|
||||
+##
|
||||
+# @BlockdevCreateOptionsVitastor:
|
||||
+#
|
||||
+# Driver specific image creation options for Vitastor.
|
||||
+#
|
||||
+# @size: Size of the virtual disk in bytes
|
||||
+##
|
||||
+{ 'struct': 'BlockdevCreateOptionsVitastor',
|
||||
+ 'data': { 'location': 'BlockdevOptionsVitastor',
|
||||
+ 'size': 'size' } }
|
||||
+
|
||||
##
|
||||
# @BlockdevVmdkSubformat:
|
||||
#
|
||||
@@ -5385,6 +5419,7 @@
|
||||
'ssh': 'BlockdevCreateOptionsSsh',
|
||||
'vdi': 'BlockdevCreateOptionsVdi',
|
||||
'vhdx': 'BlockdevCreateOptionsVhdx',
|
||||
+ 'vitastor': 'BlockdevCreateOptionsVitastor',
|
||||
'vmdk': 'BlockdevCreateOptionsVmdk',
|
||||
'vpc': 'BlockdevCreateOptionsVpc'
|
||||
} }
|
||||
diff --git a/scripts/ci/org.centos/stream/8/x86_64/configure b/scripts/ci/org.centos/stream/8/x86_64/configure
|
||||
index 6e8983f39c..1b0b9fcf3e 100755
|
||||
--- a/scripts/ci/org.centos/stream/8/x86_64/configure
|
||||
+++ b/scripts/ci/org.centos/stream/8/x86_64/configure
|
||||
@@ -32,7 +32,7 @@
|
||||
--with-git=meson \
|
||||
--with-git-submodules=update \
|
||||
--target-list="x86_64-softmmu" \
|
||||
---block-drv-rw-whitelist="qcow2,raw,file,host_device,nbd,iscsi,rbd,blkdebug,luks,null-co,nvme,copy-on-read,throttle,gluster" \
|
||||
+--block-drv-rw-whitelist="qcow2,raw,file,host_device,nbd,iscsi,rbd,vitastor,blkdebug,luks,null-co,nvme,copy-on-read,throttle,gluster" \
|
||||
--audio-drv-list="" \
|
||||
--block-drv-ro-whitelist="vmdk,vhdx,vpc,https,ssh" \
|
||||
--with-coroutine=ucontext \
|
||||
@@ -179,6 +179,7 @@
|
||||
--enable-opengl \
|
||||
--enable-pie \
|
||||
--enable-rbd \
|
||||
+--enable-vitastor \
|
||||
--enable-rdma \
|
||||
--enable-seccomp \
|
||||
--enable-snappy \
|
||||
diff --git a/scripts/meson-buildoptions.sh b/scripts/meson-buildoptions.sh
|
||||
index 009fab1515..95914e6ebc 100644
|
||||
--- a/scripts/meson-buildoptions.sh
|
||||
+++ b/scripts/meson-buildoptions.sh
|
||||
@@ -144,6 +144,7 @@ meson_options_help() {
|
||||
printf "%s\n" ' qed qed image format support'
|
||||
printf "%s\n" ' qga-vss build QGA VSS support (broken with MinGW)'
|
||||
printf "%s\n" ' rbd Ceph block device driver'
|
||||
+ printf "%s\n" ' vitastor Vitastor block device driver'
|
||||
printf "%s\n" ' rdma Enable RDMA-based migration'
|
||||
printf "%s\n" ' replication replication support'
|
||||
printf "%s\n" ' sdl SDL user interface'
|
||||
@@ -392,6 +393,8 @@ _meson_option_parse() {
|
||||
--disable-qom-cast-debug) printf "%s" -Dqom_cast_debug=false ;;
|
||||
--enable-rbd) printf "%s" -Drbd=enabled ;;
|
||||
--disable-rbd) printf "%s" -Drbd=disabled ;;
|
||||
+ --enable-vitastor) printf "%s" -Dvitastor=enabled ;;
|
||||
+ --disable-vitastor) printf "%s" -Dvitastor=disabled ;;
|
||||
--enable-rdma) printf "%s" -Drdma=enabled ;;
|
||||
--disable-rdma) printf "%s" -Drdma=disabled ;;
|
||||
--enable-replication) printf "%s" -Dreplication=enabled ;;
|
176
patches/qemu-2.12-vitastor.patch
Normal file
176
patches/qemu-2.12-vitastor.patch
Normal file
@@ -0,0 +1,176 @@
|
||||
diff --git a/block/Makefile.objs b/block/Makefile.objs
|
||||
index d644bac60a..e404236291 100644
|
||||
--- a/block/Makefile.objs
|
||||
+++ b/block/Makefile.objs
|
||||
@@ -19,6 +19,7 @@ block-obj-$(if $(CONFIG_LIBISCSI),y,n) += iscsi-opts.o
|
||||
block-obj-$(CONFIG_LIBNFS) += nfs.o
|
||||
block-obj-$(CONFIG_CURL) += curl.o
|
||||
block-obj-$(CONFIG_RBD) += rbd.o
|
||||
+block-obj-$(CONFIG_VITASTOR) += vitastor.o
|
||||
block-obj-$(CONFIG_GLUSTERFS) += gluster.o
|
||||
block-obj-$(CONFIG_VXHS) += vxhs.o
|
||||
block-obj-$(CONFIG_LIBSSH2) += ssh.o
|
||||
@@ -39,6 +40,8 @@ curl.o-cflags := $(CURL_CFLAGS)
|
||||
curl.o-libs := $(CURL_LIBS)
|
||||
rbd.o-cflags := $(RBD_CFLAGS)
|
||||
rbd.o-libs := $(RBD_LIBS)
|
||||
+vitastor.o-cflags := $(VITASTOR_CFLAGS)
|
||||
+vitastor.o-libs := $(VITASTOR_LIBS)
|
||||
gluster.o-cflags := $(GLUSTERFS_CFLAGS)
|
||||
gluster.o-libs := $(GLUSTERFS_LIBS)
|
||||
vxhs.o-libs := $(VXHS_LIBS)
|
||||
diff --git a/configure b/configure
|
||||
index 0a19b033bc..58b7fbf24c 100755
|
||||
--- a/configure
|
||||
+++ b/configure
|
||||
@@ -398,6 +398,7 @@ trace_backends="log"
|
||||
trace_file="trace"
|
||||
spice=""
|
||||
rbd=""
|
||||
+vitastor=""
|
||||
smartcard=""
|
||||
libusb=""
|
||||
usb_redir=""
|
||||
@@ -1213,6 +1214,10 @@ for opt do
|
||||
;;
|
||||
--enable-rbd) rbd="yes"
|
||||
;;
|
||||
+ --disable-vitastor) vitastor="no"
|
||||
+ ;;
|
||||
+ --enable-vitastor) vitastor="yes"
|
||||
+ ;;
|
||||
--disable-xfsctl) xfs="no"
|
||||
;;
|
||||
--enable-xfsctl) xfs="yes"
|
||||
@@ -1601,6 +1606,7 @@ disabled with --disable-FEATURE, default is enabled if available:
|
||||
vhost-crypto vhost-crypto acceleration support
|
||||
spice spice
|
||||
rbd rados block device (rbd)
|
||||
+ vitastor vitastor block device
|
||||
libiscsi iscsi support
|
||||
libnfs nfs support
|
||||
smartcard smartcard support (libcacard)
|
||||
@@ -3594,6 +3600,27 @@ EOF
|
||||
fi
|
||||
fi
|
||||
|
||||
+##########################################
|
||||
+# vitastor probe
|
||||
+if test "$vitastor" != "no" ; then
|
||||
+ cat > $TMPC <<EOF
|
||||
+#include <vitastor_c.h>
|
||||
+int main(void) {
|
||||
+ vitastor_c_create_qemu(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
|
||||
+ return 0;
|
||||
+}
|
||||
+EOF
|
||||
+ vitastor_libs="-lvitastor_client"
|
||||
+ if compile_prog "" "$vitastor_libs" ; then
|
||||
+ vitastor=yes
|
||||
+ else
|
||||
+ if test "$vitastor" = "yes" ; then
|
||||
+ feature_not_found "vitastor block device" "Install vitastor-client-dev"
|
||||
+ fi
|
||||
+ vitastor=no
|
||||
+ fi
|
||||
+fi
|
||||
+
|
||||
##########################################
|
||||
# libssh2 probe
|
||||
min_libssh2_version=1.2.8
|
||||
@@ -5837,6 +5864,7 @@ echo "Trace output file $trace_file-<pid>"
|
||||
fi
|
||||
echo "spice support $spice $(echo_version $spice $spice_protocol_version/$spice_server_version)"
|
||||
echo "rbd support $rbd"
|
||||
+echo "vitastor support $vitastor"
|
||||
echo "xfsctl support $xfs"
|
||||
echo "smartcard support $smartcard"
|
||||
echo "libusb $libusb"
|
||||
@@ -6416,6 +6444,11 @@ if test "$rbd" = "yes" ; then
|
||||
echo "RBD_CFLAGS=$rbd_cflags" >> $config_host_mak
|
||||
echo "RBD_LIBS=$rbd_libs" >> $config_host_mak
|
||||
fi
|
||||
+if test "$vitastor" = "yes" ; then
|
||||
+ echo "CONFIG_VITASTOR=m" >> $config_host_mak
|
||||
+ echo "VITASTOR_CFLAGS=$vitastor_cflags" >> $config_host_mak
|
||||
+ echo "VITASTOR_LIBS=$vitastor_libs" >> $config_host_mak
|
||||
+fi
|
||||
|
||||
echo "CONFIG_COROUTINE_BACKEND=$coroutine" >> $config_host_mak
|
||||
if test "$coroutine_pool" = "yes" ; then
|
||||
diff --git a/qapi/block-core.json b/qapi/block-core.json
|
||||
index c50517bff3..c780bb2c1c 100644
|
||||
--- a/qapi/block-core.json
|
||||
+++ b/qapi/block-core.json
|
||||
@@ -2514,7 +2514,7 @@
|
||||
'dmg', 'file', 'ftp', 'ftps', 'gluster', 'host_cdrom',
|
||||
'host_device', 'http', 'https', 'iscsi', 'luks', 'nbd', 'nfs',
|
||||
'null-aio', 'null-co', 'nvme', 'parallels', 'qcow', 'qcow2', 'qed',
|
||||
- 'quorum', 'raw', 'rbd', 'replication', 'sheepdog', 'ssh',
|
||||
+ 'quorum', 'raw', 'rbd', 'vitastor', 'replication', 'sheepdog', 'ssh',
|
||||
'throttle', 'vdi', 'vhdx', 'vmdk', 'vpc', 'vvfat', 'vxhs' ] }
|
||||
|
||||
##
|
||||
@@ -3217,6 +3217,28 @@
|
||||
'*snap-id': 'uint32',
|
||||
'*tag': 'str' } }
|
||||
|
||||
+##
|
||||
+# @BlockdevOptionsVitastor:
|
||||
+#
|
||||
+# Driver specific block device options for vitastor
|
||||
+#
|
||||
+# @image: Image name
|
||||
+# @inode: Inode number
|
||||
+# @pool: Pool ID
|
||||
+# @size: Desired image size in bytes
|
||||
+# @config-path: Path to Vitastor configuration
|
||||
+# @etcd-host: etcd connection address(es)
|
||||
+# @etcd-prefix: etcd key/value prefix
|
||||
+##
|
||||
+{ 'struct': 'BlockdevOptionsVitastor',
|
||||
+ 'data': { '*inode': 'uint64',
|
||||
+ '*pool': 'uint64',
|
||||
+ '*size': 'uint64',
|
||||
+ '*image': 'str',
|
||||
+ '*config-path': 'str',
|
||||
+ '*etcd-host': 'str',
|
||||
+ '*etcd-prefix': 'str' } }
|
||||
+
|
||||
##
|
||||
# @ReplicationMode:
|
||||
#
|
||||
@@ -3547,6 +3569,7 @@
|
||||
'rbd': 'BlockdevOptionsRbd',
|
||||
'replication':'BlockdevOptionsReplication',
|
||||
'sheepdog': 'BlockdevOptionsSheepdog',
|
||||
+ 'vitastor': 'BlockdevOptionsVitastor',
|
||||
'ssh': 'BlockdevOptionsSsh',
|
||||
'throttle': 'BlockdevOptionsThrottle',
|
||||
'vdi': 'BlockdevOptionsGenericFormat',
|
||||
@@ -3991,6 +4014,17 @@
|
||||
'*subformat': 'BlockdevVhdxSubformat',
|
||||
'*block-state-zero': 'bool' } }
|
||||
|
||||
+##
|
||||
+# @BlockdevCreateOptionsVitastor:
|
||||
+#
|
||||
+# Driver specific image creation options for Vitastor.
|
||||
+#
|
||||
+# @size: Size of the virtual disk in bytes
|
||||
+##
|
||||
+{ 'struct': 'BlockdevCreateOptionsVitastor',
|
||||
+ 'data': { 'location': 'BlockdevOptionsVitastor',
|
||||
+ 'size': 'size' } }
|
||||
+
|
||||
##
|
||||
# @BlockdevVpcSubformat:
|
||||
#
|
||||
@@ -4074,6 +4108,7 @@
|
||||
'rbd': 'BlockdevCreateOptionsRbd',
|
||||
'replication': 'BlockdevCreateNotSupported',
|
||||
'sheepdog': 'BlockdevCreateOptionsSheepdog',
|
||||
+ 'vitastor': 'BlockdevCreateOptionsVitastor',
|
||||
'ssh': 'BlockdevCreateOptionsSsh',
|
||||
'throttle': 'BlockdevCreateNotSupported',
|
||||
'vdi': 'BlockdevCreateOptionsVdi',
|
181
patches/qemu-5.2-vitastor.patch
Normal file
181
patches/qemu-5.2-vitastor.patch
Normal file
@@ -0,0 +1,181 @@
|
||||
Index: qemu-5.2+dfsg/qapi/block-core.json
|
||||
===================================================================
|
||||
--- qemu-5.2+dfsg.orig/qapi/block-core.json
|
||||
+++ qemu-5.2+dfsg/qapi/block-core.json
|
||||
@@ -2831,7 +2831,7 @@
|
||||
'luks', 'nbd', 'nfs', 'null-aio', 'null-co', 'nvme', 'parallels',
|
||||
'qcow', 'qcow2', 'qed', 'quorum', 'raw', 'rbd',
|
||||
{ 'name': 'replication', 'if': 'defined(CONFIG_REPLICATION)' },
|
||||
- 'sheepdog',
|
||||
+ 'sheepdog', 'vitastor',
|
||||
'ssh', 'throttle', 'vdi', 'vhdx', 'vmdk', 'vpc', 'vvfat' ] }
|
||||
|
||||
##
|
||||
@@ -3668,6 +3668,28 @@
|
||||
'*tag': 'str' } }
|
||||
|
||||
##
|
||||
+# @BlockdevOptionsVitastor:
|
||||
+#
|
||||
+# Driver specific block device options for vitastor
|
||||
+#
|
||||
+# @image: Image name
|
||||
+# @inode: Inode number
|
||||
+# @pool: Pool ID
|
||||
+# @size: Desired image size in bytes
|
||||
+# @config-path: Path to Vitastor configuration
|
||||
+# @etcd-host: etcd connection address(es)
|
||||
+# @etcd-prefix: etcd key/value prefix
|
||||
+##
|
||||
+{ 'struct': 'BlockdevOptionsVitastor',
|
||||
+ 'data': { '*inode': 'uint64',
|
||||
+ '*pool': 'uint64',
|
||||
+ '*size': 'uint64',
|
||||
+ '*image': 'str',
|
||||
+ '*config-path': 'str',
|
||||
+ '*etcd-host': 'str',
|
||||
+ '*etcd-prefix': 'str' } }
|
||||
+
|
||||
+##
|
||||
# @ReplicationMode:
|
||||
#
|
||||
# An enumeration of replication modes.
|
||||
@@ -4015,6 +4037,7 @@
|
||||
'replication': { 'type': 'BlockdevOptionsReplication',
|
||||
'if': 'defined(CONFIG_REPLICATION)' },
|
||||
'sheepdog': 'BlockdevOptionsSheepdog',
|
||||
+ 'vitastor': 'BlockdevOptionsVitastor',
|
||||
'ssh': 'BlockdevOptionsSsh',
|
||||
'throttle': 'BlockdevOptionsThrottle',
|
||||
'vdi': 'BlockdevOptionsGenericFormat',
|
||||
@@ -4404,6 +4427,17 @@
|
||||
'*cluster-size' : 'size' } }
|
||||
|
||||
##
|
||||
+# @BlockdevCreateOptionsVitastor:
|
||||
+#
|
||||
+# Driver specific image creation options for Vitastor.
|
||||
+#
|
||||
+# @size: Size of the virtual disk in bytes
|
||||
+##
|
||||
+{ 'struct': 'BlockdevCreateOptionsVitastor',
|
||||
+ 'data': { 'location': 'BlockdevOptionsVitastor',
|
||||
+ 'size': 'size' } }
|
||||
+
|
||||
+##
|
||||
# @BlockdevVmdkSubformat:
|
||||
#
|
||||
# Subformat options for VMDK images
|
||||
@@ -4665,6 +4699,7 @@
|
||||
'qed': 'BlockdevCreateOptionsQed',
|
||||
'rbd': 'BlockdevCreateOptionsRbd',
|
||||
'sheepdog': 'BlockdevCreateOptionsSheepdog',
|
||||
+ 'vitastor': 'BlockdevCreateOptionsVitastor',
|
||||
'ssh': 'BlockdevCreateOptionsSsh',
|
||||
'vdi': 'BlockdevCreateOptionsVdi',
|
||||
'vhdx': 'BlockdevCreateOptionsVhdx',
|
||||
Index: qemu-5.2+dfsg/block/meson.build
|
||||
===================================================================
|
||||
--- qemu-5.2+dfsg.orig/block/meson.build
|
||||
+++ qemu-5.2+dfsg/block/meson.build
|
||||
@@ -76,6 +76,7 @@ foreach m : [
|
||||
['CONFIG_LIBNFS', 'nfs', libnfs, 'nfs.c'],
|
||||
['CONFIG_LIBSSH', 'ssh', libssh, 'ssh.c'],
|
||||
['CONFIG_RBD', 'rbd', rbd, 'rbd.c'],
|
||||
+ ['CONFIG_VITASTOR', 'vitastor', vitastor, 'vitastor.c'],
|
||||
]
|
||||
if config_host.has_key(m[0])
|
||||
if enable_modules
|
||||
Index: qemu-5.2+dfsg/configure
|
||||
===================================================================
|
||||
--- qemu-5.2+dfsg.orig/configure
|
||||
+++ qemu-5.2+dfsg/configure
|
||||
@@ -372,6 +372,7 @@ trace_backends="log"
|
||||
trace_file="trace"
|
||||
spice=""
|
||||
rbd=""
|
||||
+vitastor=""
|
||||
smartcard=""
|
||||
u2f="auto"
|
||||
libusb=""
|
||||
@@ -1263,6 +1264,10 @@ for opt do
|
||||
;;
|
||||
--enable-rbd) rbd="yes"
|
||||
;;
|
||||
+ --disable-vitastor) vitastor="no"
|
||||
+ ;;
|
||||
+ --enable-vitastor) vitastor="yes"
|
||||
+ ;;
|
||||
--disable-xfsctl) xfs="no"
|
||||
;;
|
||||
--enable-xfsctl) xfs="yes"
|
||||
@@ -1827,6 +1832,7 @@ disabled with --disable-FEATURE, default
|
||||
vhost-vdpa vhost-vdpa kernel backend support
|
||||
spice spice
|
||||
rbd rados block device (rbd)
|
||||
+ vitastor vitastor block device
|
||||
libiscsi iscsi support
|
||||
libnfs nfs support
|
||||
smartcard smartcard support (libcacard)
|
||||
@@ -3719,6 +3725,27 @@ EOF
|
||||
fi
|
||||
|
||||
##########################################
|
||||
+# vitastor probe
|
||||
+if test "$vitastor" != "no" ; then
|
||||
+ cat > $TMPC <<EOF
|
||||
+#include <vitastor_c.h>
|
||||
+int main(void) {
|
||||
+ vitastor_c_create_qemu(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
|
||||
+ return 0;
|
||||
+}
|
||||
+EOF
|
||||
+ vitastor_libs="-lvitastor_client"
|
||||
+ if compile_prog "" "$vitastor_libs" ; then
|
||||
+ vitastor=yes
|
||||
+ else
|
||||
+ if test "$vitastor" = "yes" ; then
|
||||
+ feature_not_found "vitastor block device" "Install vitastor-client-dev"
|
||||
+ fi
|
||||
+ vitastor=no
|
||||
+ fi
|
||||
+fi
|
||||
+
|
||||
+##########################################
|
||||
# libssh probe
|
||||
if test "$libssh" != "no" ; then
|
||||
if $pkg_config --exists libssh; then
|
||||
@@ -6456,6 +6483,10 @@ if test "$rbd" = "yes" ; then
|
||||
echo "CONFIG_RBD=y" >> $config_host_mak
|
||||
echo "RBD_LIBS=$rbd_libs" >> $config_host_mak
|
||||
fi
|
||||
+if test "$vitastor" = "yes" ; then
|
||||
+ echo "CONFIG_VITASTOR=y" >> $config_host_mak
|
||||
+ echo "VITASTOR_LIBS=$vitastor_libs" >> $config_host_mak
|
||||
+fi
|
||||
|
||||
echo "CONFIG_COROUTINE_BACKEND=$coroutine" >> $config_host_mak
|
||||
if test "$coroutine_pool" = "yes" ; then
|
||||
Index: qemu-5.2+dfsg/meson.build
|
||||
===================================================================
|
||||
--- qemu-5.2+dfsg.orig/meson.build
|
||||
+++ qemu-5.2+dfsg/meson.build
|
||||
@@ -596,6 +596,10 @@ rbd = not_found
|
||||
if 'CONFIG_RBD' in config_host
|
||||
rbd = declare_dependency(link_args: config_host['RBD_LIBS'].split())
|
||||
endif
|
||||
+vitastor = not_found
|
||||
+if 'CONFIG_VITASTOR' in config_host
|
||||
+ vitastor = declare_dependency(link_args: config_host['VITASTOR_LIBS'].split())
|
||||
+endif
|
||||
glusterfs = not_found
|
||||
if 'CONFIG_GLUSTERFS' in config_host
|
||||
glusterfs = declare_dependency(compile_args: config_host['GLUSTERFS_CFLAGS'].split(),
|
||||
@@ -2145,6 +2149,7 @@ endif
|
||||
# TODO: add back protocol and server version
|
||||
summary_info += {'spice support': config_host.has_key('CONFIG_SPICE')}
|
||||
summary_info += {'rbd support': config_host.has_key('CONFIG_RBD')}
|
||||
+summary_info += {'vitastor support': config_host.has_key('CONFIG_VITASTOR')}
|
||||
summary_info += {'xfsctl support': config_host.has_key('CONFIG_XFS')}
|
||||
summary_info += {'smartcard support': config_host.has_key('CONFIG_SMARTCARD')}
|
||||
summary_info += {'U2F support': u2f.found()}
|
@@ -24,4 +24,4 @@ rm fio
|
||||
mv fio-copy fio
|
||||
FIO=`rpm -qi fio | perl -e 'while(<>) { /^Epoch[\s:]+(\S+)/ && print "$1:"; /^Version[\s:]+(\S+)/ && print $1; /^Release[\s:]+(\S+)/ && print "-$1"; }'`
|
||||
perl -i -pe 's/(Requires:\s*fio)([^\n]+)?/$1 = '$FIO'/' $VITASTOR/rpm/vitastor-el$EL.spec
|
||||
tar --transform 's#^#vitastor-0.8.9/#' --exclude 'rpm/*.rpm' -czf $VITASTOR/../vitastor-0.8.9$(rpm --eval '%dist').tar.gz *
|
||||
tar --transform 's#^#vitastor-1.0.0/#' --exclude 'rpm/*.rpm' -czf $VITASTOR/../vitastor-1.0.0$(rpm --eval '%dist').tar.gz *
|
||||
|
@@ -22,7 +22,7 @@
|
||||
Name: qemu-kvm
|
||||
Version: 4.2.0
|
||||
-Release: 29.vitastor%{?dist}.6
|
||||
+Release: 32.vitastor%{?dist}.6
|
||||
+Release: 34.vitastor%{?dist}.6
|
||||
# Epoch because we pushed a qemu-1.0 package. AIUI this can't ever be dropped
|
||||
Epoch: 15
|
||||
License: GPLv2 and GPLv2+ and CC-BY
|
||||
|
@@ -13,7 +13,7 @@
|
||||
Name: qemu-kvm
|
||||
Version: 4.2.0
|
||||
-Release: 29%{?dist}.6
|
||||
+Release: 32.vitastor%{?dist}.6
|
||||
+Release: 33.vitastor%{?dist}.6
|
||||
# Epoch because we pushed a qemu-1.0 package. AIUI this can't ever be dropped
|
||||
Epoch: 15
|
||||
License: GPLv2 and GPLv2+ and CC-BY
|
||||
|
103
rpm/qemu-kvm-6.2-el8.spec.patch
Normal file
103
rpm/qemu-kvm-6.2-el8.spec.patch
Normal file
@@ -0,0 +1,103 @@
|
||||
--- qemu-kvm-6.2.spec.orig 2023-07-18 13:52:57.636625440 +0000
|
||||
+++ qemu-kvm-6.2.spec 2023-07-18 13:52:19.011683886 +0000
|
||||
@@ -73,6 +73,7 @@ Requires: %{name}-hw-usbredir = %{epoch}
|
||||
%endif \
|
||||
Requires: %{name}-block-iscsi = %{epoch}:%{version}-%{release} \
|
||||
Requires: %{name}-block-rbd = %{epoch}:%{version}-%{release} \
|
||||
+Requires: %{name}-block-vitastor = %{epoch}:%{version}-%{release}\
|
||||
Requires: %{name}-block-ssh = %{epoch}:%{version}-%{release}
|
||||
|
||||
# Macro to properly setup RHEL/RHEV conflict handling
|
||||
@@ -83,7 +84,7 @@ Obsoletes: %1-rhev <= %{epoch}:%{version
|
||||
Summary: QEMU is a machine emulator and virtualizer
|
||||
Name: qemu-kvm
|
||||
Version: 6.2.0
|
||||
-Release: 32%{?rcrel}%{?dist}
|
||||
+Release: 32.vitastor%{?rcrel}%{?dist}
|
||||
# Epoch because we pushed a qemu-1.0 package. AIUI this can't ever be dropped
|
||||
Epoch: 15
|
||||
License: GPLv2 and GPLv2+ and CC-BY
|
||||
@@ -122,6 +123,7 @@ Source37: tests_data_acpi_pc_SSDT.dimmpx
|
||||
Source38: tests_data_acpi_q35_FACP.slic
|
||||
Source39: tests_data_acpi_q35_SSDT.dimmpxm
|
||||
Source40: tests_data_acpi_virt_SSDT.memhp
|
||||
+Source41: qemu-vitastor.c
|
||||
|
||||
Patch0001: 0001-redhat-Adding-slirp-to-the-exploded-tree.patch
|
||||
Patch0005: 0005-Initial-redhat-build.patch
|
||||
@@ -652,6 +654,7 @@ Patch255: kvm-scsi-protect-req-aiocb-wit
|
||||
Patch256: kvm-dma-helpers-prevent-dma_blk_cb-vs-dma_aio_cancel-rac.patch
|
||||
# For bz#2090990 - qemu crash with error scsi_req_unref(SCSIRequest *): Assertion `req->refcount > 0' failed or scsi_dma_complete(void *, int): Assertion `r->req.aiocb != NULL' failed [8.7.0]
|
||||
Patch257: kvm-virtio-scsi-reset-SCSI-devices-from-main-loop-thread.patch
|
||||
+Patch258: qemu-6.2-vitastor.patch
|
||||
|
||||
BuildRequires: wget
|
||||
BuildRequires: rpm-build
|
||||
@@ -689,6 +692,7 @@ BuildRequires: libcurl-devel
|
||||
BuildRequires: libssh-devel
|
||||
BuildRequires: librados-devel
|
||||
BuildRequires: librbd-devel
|
||||
+BuildRequires: vitastor-client-devel
|
||||
%if %{have_gluster}
|
||||
# For gluster block driver
|
||||
BuildRequires: glusterfs-api-devel
|
||||
@@ -926,6 +930,14 @@ Install this package if you want to acce
|
||||
using the rbd protocol.
|
||||
|
||||
|
||||
+%package block-vitastor
|
||||
+Summary: QEMU Vitastor block driver
|
||||
+Requires: %{name}-common%{?_isa} = %{epoch}:%{version}-%{release}
|
||||
+
|
||||
+%description block-vitastor
|
||||
+This package provides the additional Vitastor block driver for QEMU.
|
||||
+
|
||||
+
|
||||
%package block-ssh
|
||||
Summary: QEMU SSH block driver
|
||||
Requires: %{name}-common%{?_isa} = %{epoch}:%{version}-%{release}
|
||||
@@ -979,6 +991,7 @@ This package provides usbredir support.
|
||||
rm -fr slirp
|
||||
mkdir slirp
|
||||
%autopatch -p1
|
||||
+cp %{SOURCE41} ./block/vitastor.c
|
||||
|
||||
%global qemu_kvm_build qemu_kvm_build
|
||||
mkdir -p %{qemu_kvm_build}
|
||||
@@ -994,7 +1007,7 @@ cp -f %{SOURCE40} tests/data/acpi/virt/S
|
||||
# --build-id option is used for giving info to the debug packages.
|
||||
buildldflags="VL_LDFLAGS=-Wl,--build-id"
|
||||
|
||||
-%global block_drivers_list qcow2,raw,file,host_device,nbd,iscsi,rbd,blkdebug,luks,null-co,nvme,copy-on-read,throttle
|
||||
+%global block_drivers_list qcow2,raw,file,host_device,nbd,iscsi,rbd,vitastor,blkdebug,luks,null-co,nvme,copy-on-read,throttle
|
||||
|
||||
%if 0%{have_gluster}
|
||||
%global block_drivers_list %{block_drivers_list},gluster
|
||||
@@ -1149,9 +1162,7 @@ pushd %{qemu_kvm_build}
|
||||
--firmwarepath=%{_prefix}/share/qemu-firmware \
|
||||
--meson="git" \
|
||||
--target-list="%{buildarch}" \
|
||||
- --block-drv-rw-whitelist=%{block_drivers_list} \
|
||||
--audio-drv-list= \
|
||||
- --block-drv-ro-whitelist=vmdk,vhdx,vpc,https,ssh \
|
||||
--with-coroutine=ucontext \
|
||||
--with-git=git \
|
||||
--tls-priority=@QEMU,SYSTEM \
|
||||
@@ -1197,6 +1208,7 @@ pushd %{qemu_kvm_build}
|
||||
%endif
|
||||
--enable-pie \
|
||||
--enable-rbd \
|
||||
+ --enable-vitastor \
|
||||
%if 0%{have_librdma}
|
||||
--enable-rdma \
|
||||
%endif
|
||||
@@ -1794,6 +1806,9 @@ sh %{_sysconfdir}/sysconfig/modules/kvm.
|
||||
%files block-rbd
|
||||
%{_libdir}/qemu-kvm/block-rbd.so
|
||||
|
||||
+%files block-vitastor
|
||||
+%{_libdir}/qemu-kvm/block-vitastor.so
|
||||
+
|
||||
%files block-ssh
|
||||
%{_libdir}/qemu-kvm/block-ssh.so
|
||||
|
93
rpm/qemu-kvm-7.2-el9.spec.patch
Normal file
93
rpm/qemu-kvm-7.2-el9.spec.patch
Normal file
@@ -0,0 +1,93 @@
|
||||
--- qemu-kvm-7.2.spec.orig 2023-06-22 13:56:19.000000000 +0000
|
||||
+++ qemu-kvm-7.2.spec 2023-07-18 07:55:22.347090196 +0000
|
||||
@@ -100,8 +100,6 @@
|
||||
%endif
|
||||
|
||||
%global target_list %{kvm_target}-softmmu
|
||||
-%global block_drivers_rw_list qcow2,raw,file,host_device,nbd,iscsi,rbd,blkdebug,luks,null-co,nvme,copy-on-read,throttle,compress
|
||||
-%global block_drivers_ro_list vdi,vmdk,vhdx,vpc,https
|
||||
%define qemudocdir %{_docdir}/%{name}
|
||||
%global firmwaredirs "%{_datadir}/qemu-firmware:%{_datadir}/ipxe/qemu:%{_datadir}/seavgabios:%{_datadir}/seabios"
|
||||
|
||||
@@ -126,6 +124,7 @@ Requires: %{name}-device-usb-host = %{ep
|
||||
Requires: %{name}-device-usb-redirect = %{epoch}:%{version}-%{release} \
|
||||
%endif \
|
||||
Requires: %{name}-block-rbd = %{epoch}:%{version}-%{release} \
|
||||
+Requires: %{name}-block-vitastor = %{epoch}:%{version}-%{release}\
|
||||
Requires: %{name}-audio-pa = %{epoch}:%{version}-%{release}
|
||||
|
||||
# Since SPICE is removed from RHEL-9, the following Obsoletes:
|
||||
@@ -148,7 +147,7 @@ Obsoletes: %{name}-block-ssh <= %{epoch}
|
||||
Summary: QEMU is a machine emulator and virtualizer
|
||||
Name: qemu-kvm
|
||||
Version: 7.2.0
|
||||
-Release: 14%{?rcrel}%{?dist}%{?cc_suffix}.1
|
||||
+Release: 14.vitastor%{?rcrel}%{?dist}%{?cc_suffix}.1
|
||||
# Epoch because we pushed a qemu-1.0 package. AIUI this can't ever be dropped
|
||||
# Epoch 15 used for RHEL 8
|
||||
# Epoch 17 used for RHEL 9 (due to release versioning offset in RHEL 8.5)
|
||||
@@ -171,6 +170,7 @@ Source28: 95-kvm-memlock.conf
|
||||
Source30: kvm-s390x.conf
|
||||
Source31: kvm-x86.conf
|
||||
Source36: README.tests
|
||||
+Source37: qemu-vitastor.c
|
||||
|
||||
|
||||
Patch0004: 0004-Initial-redhat-build.patch
|
||||
@@ -418,6 +418,7 @@ Patch134: kvm-target-i386-Fix-BZHI-instr
|
||||
Patch135: kvm-intel-iommu-fail-DEVIOTLB_UNMAP-without-dt-mode.patch
|
||||
# For bz#2203745 - Disk detach is unsuccessful while the guest is still booting [rhel-9.2.0.z]
|
||||
Patch136: kvm-acpi-pcihp-allow-repeating-hot-unplug-requests.patch
|
||||
+Patch137: qemu-7.2-vitastor.patch
|
||||
|
||||
%if %{have_clang}
|
||||
BuildRequires: clang
|
||||
@@ -449,6 +450,7 @@ BuildRequires: libcurl-devel
|
||||
%if %{have_block_rbd}
|
||||
BuildRequires: librbd-devel
|
||||
%endif
|
||||
+BuildRequires: vitastor-client-devel
|
||||
# We need both because the 'stap' binary is probed for by configure
|
||||
BuildRequires: systemtap
|
||||
BuildRequires: systemtap-sdt-devel
|
||||
@@ -642,6 +644,14 @@ using the rbd protocol.
|
||||
%endif
|
||||
|
||||
|
||||
+%package block-vitastor
|
||||
+Summary: QEMU Vitastor block driver
|
||||
+Requires: %{name}-common%{?_isa} = %{epoch}:%{version}-%{release}
|
||||
+
|
||||
+%description block-vitastor
|
||||
+This package provides the additional Vitastor block driver for QEMU.
|
||||
+
|
||||
+
|
||||
%package audio-pa
|
||||
Summary: QEMU PulseAudio audio driver
|
||||
Requires: %{name}-common%{?_isa} = %{epoch}:%{version}-%{release}
|
||||
@@ -719,6 +729,7 @@ This package provides usbredir support.
|
||||
%prep
|
||||
%setup -q -n qemu-%{version}%{?rcstr}
|
||||
%autopatch -p1
|
||||
+cp %{SOURCE37} ./block/vitastor.c
|
||||
|
||||
%global qemu_kvm_build qemu_kvm_build
|
||||
mkdir -p %{qemu_kvm_build}
|
||||
@@ -946,6 +957,7 @@ run_configure \
|
||||
%if %{have_block_rbd}
|
||||
--enable-rbd \
|
||||
%endif
|
||||
+ --enable-vitastor \
|
||||
%if %{have_librdma}
|
||||
--enable-rdma \
|
||||
%endif
|
||||
@@ -1426,6 +1438,9 @@ useradd -r -u 107 -g qemu -G kvm -d / -s
|
||||
%files block-rbd
|
||||
%{_libdir}/%{name}/block-rbd.so
|
||||
%endif
|
||||
+%files block-vitastor
|
||||
+%{_libdir}/%{name}/block-vitastor.so
|
||||
+
|
||||
%files audio-pa
|
||||
%{_libdir}/%{name}/audio-pa.so
|
||||
|
@@ -35,7 +35,7 @@ ADD . /root/vitastor
|
||||
RUN set -e; \
|
||||
cd /root/vitastor/rpm; \
|
||||
sh build-tarball.sh; \
|
||||
cp /root/vitastor-0.8.9.el7.tar.gz ~/rpmbuild/SOURCES; \
|
||||
cp /root/vitastor-1.0.0.el7.tar.gz ~/rpmbuild/SOURCES; \
|
||||
cp vitastor-el7.spec ~/rpmbuild/SPECS/vitastor.spec; \
|
||||
cd ~/rpmbuild/SPECS/; \
|
||||
rpmbuild -ba vitastor.spec; \
|
||||
|
@@ -1,11 +1,11 @@
|
||||
Name: vitastor
|
||||
Version: 0.8.9
|
||||
Version: 1.0.0
|
||||
Release: 1%{?dist}
|
||||
Summary: Vitastor, a fast software-defined clustered block storage
|
||||
|
||||
License: Vitastor Network Public License 1.1
|
||||
URL: https://vitastor.io/
|
||||
Source0: vitastor-0.8.9.el7.tar.gz
|
||||
Source0: vitastor-1.0.0.el7.tar.gz
|
||||
|
||||
BuildRequires: liburing-devel >= 0.6
|
||||
BuildRequires: gperftools-devel
|
||||
|
@@ -35,7 +35,7 @@ ADD . /root/vitastor
|
||||
RUN set -e; \
|
||||
cd /root/vitastor/rpm; \
|
||||
sh build-tarball.sh; \
|
||||
cp /root/vitastor-0.8.9.el8.tar.gz ~/rpmbuild/SOURCES; \
|
||||
cp /root/vitastor-1.0.0.el8.tar.gz ~/rpmbuild/SOURCES; \
|
||||
cp vitastor-el8.spec ~/rpmbuild/SPECS/vitastor.spec; \
|
||||
cd ~/rpmbuild/SPECS/; \
|
||||
rpmbuild -ba vitastor.spec; \
|
||||
|
@@ -1,11 +1,11 @@
|
||||
Name: vitastor
|
||||
Version: 0.8.9
|
||||
Version: 1.0.0
|
||||
Release: 1%{?dist}
|
||||
Summary: Vitastor, a fast software-defined clustered block storage
|
||||
|
||||
License: Vitastor Network Public License 1.1
|
||||
URL: https://vitastor.io/
|
||||
Source0: vitastor-0.8.9.el8.tar.gz
|
||||
Source0: vitastor-1.0.0.el8.tar.gz
|
||||
|
||||
BuildRequires: liburing-devel >= 0.6
|
||||
BuildRequires: gperftools-devel
|
||||
|
@@ -18,7 +18,7 @@ ADD . /root/vitastor
|
||||
RUN set -e; \
|
||||
cd /root/vitastor/rpm; \
|
||||
sh build-tarball.sh; \
|
||||
cp /root/vitastor-0.8.9.el9.tar.gz ~/rpmbuild/SOURCES; \
|
||||
cp /root/vitastor-1.0.0.el9.tar.gz ~/rpmbuild/SOURCES; \
|
||||
cp vitastor-el9.spec ~/rpmbuild/SPECS/vitastor.spec; \
|
||||
cd ~/rpmbuild/SPECS/; \
|
||||
rpmbuild -ba vitastor.spec; \
|
||||
|
@@ -1,11 +1,11 @@
|
||||
Name: vitastor
|
||||
Version: 0.8.9
|
||||
Version: 1.0.0
|
||||
Release: 1%{?dist}
|
||||
Summary: Vitastor, a fast software-defined clustered block storage
|
||||
|
||||
License: Vitastor Network Public License 1.1
|
||||
URL: https://vitastor.io/
|
||||
Source0: vitastor-0.8.9.el9.tar.gz
|
||||
Source0: vitastor-1.0.0.el9.tar.gz
|
||||
|
||||
BuildRequires: liburing-devel >= 0.6
|
||||
BuildRequires: gperftools-devel
|
||||
@@ -73,7 +73,7 @@ Vitastor library headers for development.
|
||||
Summary: Vitastor - fio drivers
|
||||
Group: Development/Libraries
|
||||
Requires: vitastor-client = %{version}-%{release}
|
||||
Requires: fio = 3.27-7.el9
|
||||
Requires: fio = 3.27-8.el9
|
||||
|
||||
|
||||
%description -n vitastor-fio
|
||||
|
@@ -16,7 +16,7 @@ if("${CMAKE_INSTALL_PREFIX}" MATCHES "^/usr/local/?$")
|
||||
set(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}")
|
||||
endif()
|
||||
|
||||
add_definitions(-DVERSION="0.8.9")
|
||||
add_definitions(-DVERSION="1.0.0")
|
||||
add_definitions(-Wall -Wno-sign-compare -Wno-comment -Wno-parentheses -Wno-pointer-arith -fdiagnostics-color=always -I ${CMAKE_SOURCE_DIR}/src)
|
||||
if (${WITH_ASAN})
|
||||
add_definitions(-fsanitize=address -fno-omit-frame-pointer)
|
||||
@@ -111,7 +111,7 @@ target_compile_options(vitastor_common PUBLIC -fPIC)
|
||||
add_executable(vitastor-osd
|
||||
osd_main.cpp osd.cpp osd_secondary.cpp osd_peering.cpp osd_flush.cpp osd_peering_pg.cpp
|
||||
osd_primary.cpp osd_primary_chain.cpp osd_primary_sync.cpp osd_primary_write.cpp osd_primary_subops.cpp
|
||||
osd_cluster.cpp osd_rmw.cpp
|
||||
osd_cluster.cpp osd_rmw.cpp osd_scrub.cpp osd_primary_describe.cpp
|
||||
)
|
||||
target_link_libraries(vitastor-osd
|
||||
vitastor_common
|
||||
@@ -141,6 +141,8 @@ add_library(vitastor_client SHARED
|
||||
cli_common.cpp
|
||||
cli_alloc_osd.cpp
|
||||
cli_status.cpp
|
||||
cli_describe.cpp
|
||||
cli_fix.cpp
|
||||
cli_df.cpp
|
||||
cli_ls.cpp
|
||||
cli_create.cpp
|
||||
@@ -299,7 +301,7 @@ add_executable(test_cluster_client
|
||||
EXCLUDE_FROM_ALL
|
||||
test_cluster_client.cpp
|
||||
pg_states.cpp osd_ops.cpp cluster_client.cpp cluster_client_list.cpp msgr_op.cpp mock/messenger.cpp msgr_stop.cpp
|
||||
etcd_state_client.cpp timerfd_manager.cpp ../json11/json11.cpp
|
||||
etcd_state_client.cpp timerfd_manager.cpp str_util.cpp ../json11/json11.cpp
|
||||
)
|
||||
target_compile_definitions(test_cluster_client PUBLIC -D__MOCK__)
|
||||
target_include_directories(test_cluster_client PUBLIC ${CMAKE_SOURCE_DIR}/src/mock)
|
||||
|
@@ -19,8 +19,8 @@ bool string_to_addr(std::string str, bool parse_port, int default_port, struct s
|
||||
if (p != std::string::npos && !(str.length() > 0 && str[p-1] == ']')) // "[ipv6]" which contains ':'
|
||||
{
|
||||
char null_byte = 0;
|
||||
int n = sscanf(str.c_str()+p+1, "%d%c", &default_port, &null_byte);
|
||||
if (n != 1 || default_port >= 0x10000)
|
||||
int scanned = sscanf(str.c_str()+p+1, "%d%c", &default_port, &null_byte);
|
||||
if (scanned != 1 || default_port >= 0x10000)
|
||||
return false;
|
||||
str = str.substr(0, p);
|
||||
}
|
||||
|
@@ -143,34 +143,83 @@ uint64_t allocator::get_free_count()
|
||||
return free;
|
||||
}
|
||||
|
||||
// FIXME: Move to utils?
|
||||
void bitmap_set(void *bitmap, uint64_t start, uint64_t len, uint64_t bitmap_granularity)
|
||||
{
|
||||
if (start == 0)
|
||||
if (start == 0 && len == 32*bitmap_granularity)
|
||||
*((uint32_t*)bitmap) = UINT32_MAX;
|
||||
else if (start == 0 && len == 64*bitmap_granularity)
|
||||
*((uint64_t*)bitmap) = UINT64_MAX;
|
||||
else
|
||||
{
|
||||
if (len == 32*bitmap_granularity)
|
||||
unsigned bit_start = start / bitmap_granularity;
|
||||
unsigned bit_end = ((start + len) + bitmap_granularity - 1) / bitmap_granularity;
|
||||
while (bit_start < bit_end)
|
||||
{
|
||||
*((uint32_t*)bitmap) = UINT32_MAX;
|
||||
return;
|
||||
}
|
||||
else if (len == 64*bitmap_granularity)
|
||||
{
|
||||
*((uint64_t*)bitmap) = UINT64_MAX;
|
||||
return;
|
||||
}
|
||||
}
|
||||
unsigned bit_start = start / bitmap_granularity;
|
||||
unsigned bit_end = ((start + len) + bitmap_granularity - 1) / bitmap_granularity;
|
||||
while (bit_start < bit_end)
|
||||
{
|
||||
if (!(bit_start & 7) && bit_end >= bit_start+8)
|
||||
{
|
||||
((uint8_t*)bitmap)[bit_start / 8] = UINT8_MAX;
|
||||
bit_start += 8;
|
||||
}
|
||||
else
|
||||
{
|
||||
((uint8_t*)bitmap)[bit_start / 8] |= 1 << (bit_start % 8);
|
||||
bit_start++;
|
||||
if (!(bit_start & 7) && bit_end >= bit_start+8)
|
||||
{
|
||||
((uint8_t*)bitmap)[bit_start / 8] = UINT8_MAX;
|
||||
bit_start += 8;
|
||||
}
|
||||
else
|
||||
{
|
||||
((uint8_t*)bitmap)[bit_start / 8] |= 1 << (bit_start % 8);
|
||||
bit_start++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void bitmap_clear(void *bitmap, uint64_t start, uint64_t len, uint64_t bitmap_granularity)
|
||||
{
|
||||
if (start == 0 && len == 32*bitmap_granularity)
|
||||
*((uint32_t*)bitmap) = 0;
|
||||
else if (start == 0 && len == 64*bitmap_granularity)
|
||||
*((uint64_t*)bitmap) = 0;
|
||||
else
|
||||
{
|
||||
unsigned bit_start = start / bitmap_granularity;
|
||||
unsigned bit_end = ((start + len) + bitmap_granularity - 1) / bitmap_granularity;
|
||||
while (bit_start < bit_end)
|
||||
{
|
||||
if (!(bit_start & 7) && bit_end >= bit_start+8)
|
||||
{
|
||||
((uint8_t*)bitmap)[bit_start / 8] = 0;
|
||||
bit_start += 8;
|
||||
}
|
||||
else
|
||||
{
|
||||
((uint8_t*)bitmap)[bit_start / 8] &= (0xFF ^ (1 << (bit_start % 8)));
|
||||
bit_start++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool bitmap_check(void *bitmap, uint64_t start, uint64_t len, uint64_t bitmap_granularity)
|
||||
{
|
||||
bool r = false;
|
||||
if (start == 0 && len == 32*bitmap_granularity)
|
||||
r = !!*((uint32_t*)bitmap);
|
||||
else if (start == 0 && len == 64*bitmap_granularity)
|
||||
r = !!*((uint64_t*)bitmap);
|
||||
else
|
||||
{
|
||||
unsigned bit_start = start / bitmap_granularity;
|
||||
unsigned bit_end = ((start + len) + bitmap_granularity - 1) / bitmap_granularity;
|
||||
while (bit_start < bit_end)
|
||||
{
|
||||
if (!(bit_start & 7) && bit_end >= bit_start+8)
|
||||
{
|
||||
r = r || !!((uint8_t*)bitmap)[bit_start / 8];
|
||||
bit_start += 8;
|
||||
}
|
||||
else
|
||||
{
|
||||
r = r || (((uint8_t*)bitmap)[bit_start / 8] & (1 << (bit_start % 8)));
|
||||
bit_start++;
|
||||
}
|
||||
}
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
@@ -23,3 +23,5 @@ public:
|
||||
};
|
||||
|
||||
void bitmap_set(void *bitmap, uint64_t start, uint64_t len, uint64_t bitmap_granularity);
|
||||
void bitmap_clear(void *bitmap, uint64_t start, uint64_t len, uint64_t bitmap_granularity);
|
||||
bool bitmap_check(void *bitmap, uint64_t start, uint64_t len, uint64_t bitmap_granularity);
|
||||
|
@@ -73,7 +73,11 @@ Input:
|
||||
write request is copied into the metadata area bitwise and stored there.
|
||||
|
||||
Output:
|
||||
- retval = number of bytes actually read/written or negative error number (-EINVAL or -ENOSPC)
|
||||
- retval = number of bytes actually read/written or negative error number
|
||||
-EINVAL = invalid input parameters
|
||||
-ENOENT = requested object/version does not exist for reads
|
||||
-ENOSPC = no space left in the store for writes
|
||||
-EDOM = checksum error.
|
||||
- version = the version actually read or written
|
||||
|
||||
## BS_OP_DELETE
|
||||
@@ -122,11 +126,14 @@ Output:
|
||||
Get a list of all objects in this Blockstore.
|
||||
|
||||
Input:
|
||||
- oid.stripe = PG alignment
|
||||
- len = PG count or 0 to list all objects
|
||||
- offset = PG number
|
||||
- oid.inode = min inode number or 0 to list all inodes
|
||||
- version = max inode number or 0 to list all inodes
|
||||
- pg_alignment = PG alignment
|
||||
- pg_count = PG count or 0 to list all objects
|
||||
- pg_number = PG number
|
||||
- list_stable_limit = max number of clean objects in the reply
|
||||
it's guaranteed that dirty objects are returned from the same interval,
|
||||
i.e. from (min_oid .. min(max_oid, max(returned stable OIDs)))
|
||||
- min_oid = min inode/stripe or 0 to list all objects
|
||||
- max_oid = max inode/stripe or 0 to list all objects
|
||||
|
||||
Output:
|
||||
- retval = total obj_ver_id count
|
||||
@@ -143,10 +150,27 @@ struct blockstore_op_t
|
||||
uint64_t opcode;
|
||||
// finish callback
|
||||
std::function<void (blockstore_op_t*)> callback;
|
||||
object_id oid;
|
||||
uint64_t version;
|
||||
uint32_t offset;
|
||||
uint32_t len;
|
||||
union __attribute__((__packed__))
|
||||
{
|
||||
// R/W
|
||||
struct __attribute__((__packed__))
|
||||
{
|
||||
object_id oid;
|
||||
uint64_t version;
|
||||
uint32_t offset;
|
||||
uint32_t len;
|
||||
};
|
||||
// List
|
||||
struct __attribute__((__packed__))
|
||||
{
|
||||
object_id min_oid;
|
||||
object_id max_oid;
|
||||
uint32_t pg_alignment;
|
||||
uint32_t pg_count;
|
||||
uint32_t pg_number;
|
||||
uint32_t list_stable_limit;
|
||||
};
|
||||
};
|
||||
void *buf;
|
||||
void *bitmap;
|
||||
int retval;
|
||||
|
@@ -40,10 +40,31 @@ void blockstore_disk_t::parse_config(std::map<std::string, std::string> & config
|
||||
data_block_size = parse_size(config["block_size"]);
|
||||
journal_device = config["journal_device"];
|
||||
journal_offset = parse_size(config["journal_offset"]);
|
||||
disk_alignment = strtoull(config["disk_alignment"].c_str(), NULL, 10);
|
||||
journal_block_size = strtoull(config["journal_block_size"].c_str(), NULL, 10);
|
||||
meta_block_size = strtoull(config["meta_block_size"].c_str(), NULL, 10);
|
||||
bitmap_granularity = strtoull(config["bitmap_granularity"].c_str(), NULL, 10);
|
||||
disk_alignment = parse_size(config["disk_alignment"]);
|
||||
journal_block_size = parse_size(config["journal_block_size"]);
|
||||
meta_block_size = parse_size(config["meta_block_size"]);
|
||||
bitmap_granularity = parse_size(config["bitmap_granularity"]);
|
||||
meta_format = stoull_full(config["meta_format"]);
|
||||
cached_io_data = config["cached_io_data"] == "true" || config["cached_io_data"] == "yes" || config["cached_io_data"] == "1";
|
||||
cached_io_meta = cached_io_data && (meta_device == data_device || meta_device == "") &&
|
||||
config.find("cached_io_meta") == config.end() ||
|
||||
config["cached_io_meta"] == "true" || config["cached_io_meta"] == "yes" || config["cached_io_meta"] == "1";
|
||||
cached_io_journal = cached_io_meta && (journal_device == meta_device || journal_device == "") &&
|
||||
config.find("cached_io_journal") == config.end() ||
|
||||
config["cached_io_journal"] == "true" || config["cached_io_journal"] == "yes" || config["cached_io_journal"] == "1";
|
||||
if (config["data_csum_type"] == "crc32c")
|
||||
{
|
||||
data_csum_type = BLOCKSTORE_CSUM_CRC32C;
|
||||
}
|
||||
else if (config["data_csum_type"] == "" || config["data_csum_type"] == "none")
|
||||
{
|
||||
data_csum_type = BLOCKSTORE_CSUM_NONE;
|
||||
}
|
||||
else
|
||||
{
|
||||
throw std::runtime_error("data_csum_type="+config["data_csum_type"]+" is unsupported, only \"crc32c\" and \"none\" are supported");
|
||||
}
|
||||
csum_block_size = parse_size(config["csum_block_size"]);
|
||||
// Validate
|
||||
if (!data_block_size)
|
||||
{
|
||||
@@ -91,7 +112,23 @@ void blockstore_disk_t::parse_config(std::map<std::string, std::string> & config
|
||||
}
|
||||
if (data_block_size % bitmap_granularity)
|
||||
{
|
||||
throw std::runtime_error("Block size must be a multiple of sparse write tracking granularity");
|
||||
throw std::runtime_error("Data block size must be a multiple of sparse write tracking granularity");
|
||||
}
|
||||
if (!data_csum_type)
|
||||
{
|
||||
csum_block_size = 0;
|
||||
}
|
||||
else if (!csum_block_size)
|
||||
{
|
||||
csum_block_size = bitmap_granularity;
|
||||
}
|
||||
if (csum_block_size && (csum_block_size % bitmap_granularity))
|
||||
{
|
||||
throw std::runtime_error("Checksum block size must be a multiple of sparse write tracking granularity");
|
||||
}
|
||||
if (csum_block_size && (data_block_size % csum_block_size))
|
||||
{
|
||||
throw std::runtime_error("Checksum block size must be a divisor of data block size");
|
||||
}
|
||||
if (meta_device == "")
|
||||
{
|
||||
@@ -110,7 +147,9 @@ void blockstore_disk_t::parse_config(std::map<std::string, std::string> & config
|
||||
throw std::runtime_error("journal_offset must be a multiple of journal_block_size = "+std::to_string(journal_block_size));
|
||||
}
|
||||
clean_entry_bitmap_size = data_block_size / bitmap_granularity / 8;
|
||||
clean_entry_size = sizeof(clean_disk_entry) + 2*clean_entry_bitmap_size;
|
||||
clean_dyn_size = clean_entry_bitmap_size*2 + (csum_block_size
|
||||
? data_block_size/csum_block_size*(data_csum_type & 0xFF) : 0);
|
||||
clean_entry_size = sizeof(clean_disk_entry) + clean_dyn_size + 4 /*entry_csum*/;
|
||||
}
|
||||
|
||||
void blockstore_disk_t::calc_lengths(bool skip_meta_check)
|
||||
@@ -160,6 +199,25 @@ void blockstore_disk_t::calc_lengths(bool skip_meta_check)
|
||||
// required metadata size
|
||||
block_count = data_len / data_block_size;
|
||||
meta_len = (1 + (block_count - 1 + meta_block_size / clean_entry_size) / (meta_block_size / clean_entry_size)) * meta_block_size;
|
||||
if (meta_format == BLOCKSTORE_META_FORMAT_V1 ||
|
||||
!meta_format && !skip_meta_check && meta_area_size < meta_len && !data_csum_type)
|
||||
{
|
||||
uint64_t clean_entry_v0_size = sizeof(clean_disk_entry) + 2*clean_entry_bitmap_size;
|
||||
uint64_t meta_v0_len = (1 + (block_count - 1 + meta_block_size / clean_entry_v0_size)
|
||||
/ (meta_block_size / clean_entry_v0_size)) * meta_block_size;
|
||||
if (meta_format == BLOCKSTORE_META_FORMAT_V1 || meta_area_size >= meta_v0_len)
|
||||
{
|
||||
// Old metadata fits.
|
||||
printf("Warning: Using old metadata format without checksums because the new format doesn't fit into provided area\n");
|
||||
clean_entry_size = clean_entry_v0_size;
|
||||
meta_len = meta_v0_len;
|
||||
meta_format = BLOCKSTORE_META_FORMAT_V1;
|
||||
}
|
||||
else
|
||||
meta_format = BLOCKSTORE_META_FORMAT_V2;
|
||||
}
|
||||
else
|
||||
meta_format = BLOCKSTORE_META_FORMAT_V2;
|
||||
if (!skip_meta_check && meta_area_size < meta_len)
|
||||
{
|
||||
throw std::runtime_error("Metadata area is too small, need at least "+std::to_string(meta_len)+" bytes");
|
||||
@@ -216,7 +274,7 @@ static void check_size(int fd, uint64_t *size, uint64_t *sectsize, std::string n
|
||||
|
||||
void blockstore_disk_t::open_data()
|
||||
{
|
||||
data_fd = open(data_device.c_str(), O_DIRECT|O_RDWR);
|
||||
data_fd = open(data_device.c_str(), (cached_io_data ? O_SYNC : O_DIRECT) | O_RDWR);
|
||||
if (data_fd == -1)
|
||||
{
|
||||
throw std::runtime_error("Failed to open data device "+data_device+": "+std::string(strerror(errno)));
|
||||
@@ -241,9 +299,9 @@ void blockstore_disk_t::open_data()
|
||||
|
||||
void blockstore_disk_t::open_meta()
|
||||
{
|
||||
if (meta_device != data_device)
|
||||
if (meta_device != data_device || cached_io_meta != cached_io_data)
|
||||
{
|
||||
meta_fd = open(meta_device.c_str(), O_DIRECT|O_RDWR);
|
||||
meta_fd = open(meta_device.c_str(), (cached_io_meta ? O_SYNC : O_DIRECT) | O_RDWR);
|
||||
if (meta_fd == -1)
|
||||
{
|
||||
throw std::runtime_error("Failed to open metadata device "+meta_device+": "+std::string(strerror(errno)));
|
||||
@@ -253,7 +311,7 @@ void blockstore_disk_t::open_meta()
|
||||
{
|
||||
throw std::runtime_error("meta_offset exceeds device size = "+std::to_string(meta_device_size));
|
||||
}
|
||||
if (!disable_flock && flock(meta_fd, LOCK_EX|LOCK_NB) != 0)
|
||||
if (!disable_flock && meta_device != data_device && flock(meta_fd, LOCK_EX|LOCK_NB) != 0)
|
||||
{
|
||||
throw std::runtime_error(std::string("Failed to lock metadata device: ") + strerror(errno));
|
||||
}
|
||||
@@ -279,15 +337,15 @@ void blockstore_disk_t::open_meta()
|
||||
|
||||
void blockstore_disk_t::open_journal()
|
||||
{
|
||||
if (journal_device != meta_device)
|
||||
if (journal_device != meta_device || cached_io_journal != cached_io_meta)
|
||||
{
|
||||
journal_fd = open(journal_device.c_str(), O_DIRECT|O_RDWR);
|
||||
journal_fd = open(journal_device.c_str(), (cached_io_journal ? O_SYNC : O_DIRECT) | O_RDWR);
|
||||
if (journal_fd == -1)
|
||||
{
|
||||
throw std::runtime_error("Failed to open journal device "+journal_device+": "+std::string(strerror(errno)));
|
||||
}
|
||||
check_size(journal_fd, &journal_device_size, &journal_device_sect, "journal device");
|
||||
if (!disable_flock && flock(journal_fd, LOCK_EX|LOCK_NB) != 0)
|
||||
if (!disable_flock && journal_device != meta_device && flock(journal_fd, LOCK_EX|LOCK_NB) != 0)
|
||||
{
|
||||
throw std::runtime_error(std::string("Failed to lock journal device: ") + strerror(errno));
|
||||
}
|
||||
|
@@ -8,6 +8,10 @@
|
||||
#include <string>
|
||||
#include <map>
|
||||
|
||||
#define BLOCKSTORE_CSUM_NONE 0
|
||||
// Lower byte of checksum type is its length
|
||||
#define BLOCKSTORE_CSUM_CRC32C 0x104
|
||||
|
||||
struct blockstore_disk_t
|
||||
{
|
||||
std::string data_device, meta_device, journal_device;
|
||||
@@ -21,17 +25,23 @@ struct blockstore_disk_t
|
||||
uint64_t meta_block_size = 4096;
|
||||
// Sparse write tracking granularity. 4 KB is a good choice. Must be a multiple of disk_alignment
|
||||
uint64_t bitmap_granularity = 4096;
|
||||
// Data checksum type, BLOCKSTORE_CSUM_NONE or BLOCKSTORE_CSUM_CRC32C
|
||||
uint32_t data_csum_type = BLOCKSTORE_CSUM_NONE;
|
||||
// Checksum block size, must be a multiple of bitmap_granularity
|
||||
uint32_t csum_block_size = 4096;
|
||||
// By default, Blockstore locks all opened devices exclusively. This option can be used to disable locking
|
||||
bool disable_flock = false;
|
||||
// Use Linux page cache for reads and writes, i.e. open FDs with O_SYNC instead of O_DIRECT
|
||||
bool cached_io_data = false, cached_io_meta = false, cached_io_journal = false;
|
||||
|
||||
int meta_fd = -1, data_fd = -1, journal_fd = -1;
|
||||
uint64_t meta_offset, meta_device_sect, meta_device_size, meta_len;
|
||||
uint64_t meta_offset, meta_device_sect, meta_device_size, meta_len, meta_format = 0;
|
||||
uint64_t data_offset, data_device_sect, data_device_size, data_len;
|
||||
uint64_t journal_offset, journal_device_sect, journal_device_size, journal_len;
|
||||
|
||||
uint32_t block_order;
|
||||
uint64_t block_count;
|
||||
uint32_t clean_entry_bitmap_size = 0, clean_entry_size = 0;
|
||||
uint32_t clean_entry_bitmap_size = 0, clean_entry_size = 0, clean_dyn_size = 0;
|
||||
|
||||
void parse_config(std::map<std::string, std::string> & config);
|
||||
void open_data();
|
||||
@@ -39,4 +49,13 @@ struct blockstore_disk_t
|
||||
void open_journal();
|
||||
void calc_lengths(bool skip_meta_check = false);
|
||||
void close_all();
|
||||
|
||||
inline uint64_t dirty_dyn_size(uint64_t offset, uint64_t len)
|
||||
{
|
||||
// Checksums may be partial if write is not aligned with csum_block_size
|
||||
return clean_entry_bitmap_size + (csum_block_size && len > 0
|
||||
? ((offset+len+csum_block_size-1)/csum_block_size - offset/csum_block_size)
|
||||
* (data_csum_type & 0xFF)
|
||||
: 0);
|
||||
}
|
||||
};
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -1,10 +1,22 @@
|
||||
// Copyright (c) Vitaliy Filippov, 2019+
|
||||
// License: VNPL-1.1 (see README.md for details)
|
||||
|
||||
#define COPY_BUF_JOURNAL 1
|
||||
#define COPY_BUF_DATA 2
|
||||
#define COPY_BUF_ZERO 4
|
||||
#define COPY_BUF_CSUM_FILL 8
|
||||
#define COPY_BUF_COALESCED 16
|
||||
#define COPY_BUF_META_BLOCK 32
|
||||
#define COPY_BUF_JOURNALED_BIG 64
|
||||
|
||||
struct copy_buffer_t
|
||||
{
|
||||
uint64_t offset, len;
|
||||
int copy_flags;
|
||||
uint64_t offset, len, disk_offset;
|
||||
uint64_t journal_sector; // only for reads: sector+1 if used and !journal.inmemory, otherwise 0
|
||||
void *buf;
|
||||
uint8_t *csum_buf;
|
||||
int *dyn_data;
|
||||
};
|
||||
|
||||
struct meta_sector_t
|
||||
@@ -37,7 +49,7 @@ class journal_flusher_co
|
||||
{
|
||||
blockstore_impl_t *bs;
|
||||
journal_flusher_t *flusher;
|
||||
int wait_state, wait_count;
|
||||
int wait_state, wait_count, wait_journal_count;
|
||||
struct io_uring_sqe *sqe;
|
||||
struct ring_data_t *data;
|
||||
|
||||
@@ -46,28 +58,39 @@ class journal_flusher_co
|
||||
obj_ver_id cur;
|
||||
std::map<obj_ver_id, dirty_entry>::iterator dirty_it, dirty_start, dirty_end;
|
||||
std::map<object_id, uint64_t>::iterator repeat_it;
|
||||
std::function<void(ring_data_t*)> simple_callback_r, simple_callback_w;
|
||||
std::function<void(ring_data_t*)> simple_callback_r, simple_callback_rj, simple_callback_w;
|
||||
|
||||
bool skip_copy, has_delete, has_writes;
|
||||
std::vector<copy_buffer_t> v;
|
||||
std::vector<copy_buffer_t>::iterator it;
|
||||
int i;
|
||||
bool fill_incomplete, cleared_incomplete;
|
||||
int read_to_fill_incomplete;
|
||||
int copy_count;
|
||||
uint64_t clean_loc, old_clean_loc;
|
||||
uint64_t clean_loc, clean_ver, old_clean_loc, old_clean_ver;
|
||||
flusher_meta_write_t meta_old, meta_new;
|
||||
bool clean_init_bitmap;
|
||||
uint64_t clean_bitmap_offset, clean_bitmap_len;
|
||||
void *new_clean_bitmap;
|
||||
uint8_t *clean_init_dyn_ptr;
|
||||
uint8_t *new_clean_bitmap;
|
||||
|
||||
uint64_t new_trim_pos;
|
||||
|
||||
// local: scan_dirty()
|
||||
uint64_t offset, end_offset, submit_offset, submit_len;
|
||||
|
||||
friend class journal_flusher_t;
|
||||
bool scan_dirty(int wait_base);
|
||||
void scan_dirty();
|
||||
bool read_dirty(int wait_base);
|
||||
bool modify_meta_do_reads(int wait_base);
|
||||
bool wait_meta_reads(int wait_base);
|
||||
bool modify_meta_read(uint64_t meta_loc, flusher_meta_write_t &wr, int wait_base);
|
||||
bool clear_incomplete_csum_block_bits(int wait_base);
|
||||
void calc_block_checksums(uint32_t *new_data_csums, bool skip_overwrites);
|
||||
void update_metadata_entry();
|
||||
bool write_meta_block(flusher_meta_write_t & meta_block, int wait_base);
|
||||
void update_clean_db();
|
||||
void free_data_blocks();
|
||||
bool fsync_batch(bool fsync_meta, int wait_base);
|
||||
bool trim_journal(int wait_base);
|
||||
void free_buffers();
|
||||
public:
|
||||
journal_flusher_co();
|
||||
bool loop();
|
||||
@@ -95,14 +118,16 @@ class journal_flusher_t
|
||||
|
||||
std::map<uint64_t, meta_sector_t> meta_sectors;
|
||||
std::deque<object_id> flush_queue;
|
||||
std::map<object_id, uint64_t> flush_versions;
|
||||
std::map<object_id, uint64_t> flush_versions; // FIXME: consider unordered_map?
|
||||
|
||||
bool try_find_older(std::map<obj_ver_id, dirty_entry>::iterator & dirty_end, obj_ver_id & cur);
|
||||
bool try_find_other(std::map<obj_ver_id, dirty_entry>::iterator & dirty_end, obj_ver_id & cur);
|
||||
|
||||
public:
|
||||
journal_flusher_t(blockstore_impl_t *bs);
|
||||
~journal_flusher_t();
|
||||
void loop();
|
||||
bool is_trim_wanted() { return trim_wanted; }
|
||||
bool is_active();
|
||||
void mark_trim_possible();
|
||||
void request_trim();
|
||||
@@ -111,4 +136,5 @@ public:
|
||||
void unshift_flush(obj_ver_id oid, bool force);
|
||||
void remove_flush(object_id oid);
|
||||
void dump_diagnostics();
|
||||
bool is_mutated(uint64_t clean_loc);
|
||||
};
|
||||
|
@@ -13,6 +13,7 @@ blockstore_impl_t::blockstore_impl_t(blockstore_config_t & config, ring_loop_t *
|
||||
initialized = 0;
|
||||
parse_config(config, true);
|
||||
zero_object = (uint8_t*)memalign_or_die(MEM_ALIGNMENT, dsk.data_block_size);
|
||||
alloc_dyn_data = dsk.clean_dyn_size > sizeof(void*) || dsk.csum_block_size > 0;
|
||||
try
|
||||
{
|
||||
dsk.open_data();
|
||||
@@ -38,8 +39,8 @@ blockstore_impl_t::~blockstore_impl_t()
|
||||
dsk.close_all();
|
||||
if (metadata_buffer)
|
||||
free(metadata_buffer);
|
||||
if (clean_bitmap)
|
||||
free(clean_bitmap);
|
||||
if (clean_bitmaps)
|
||||
free(clean_bitmaps);
|
||||
}
|
||||
|
||||
bool blockstore_impl_t::is_started()
|
||||
@@ -392,6 +393,7 @@ void blockstore_impl_t::init_op(blockstore_op_t *op)
|
||||
{
|
||||
// Call constructor without allocating memory. We'll call destructor before returning op back
|
||||
new ((void*)op->private_data) blockstore_op_private_t;
|
||||
PRIV(op)->min_flushed_journal_sector = PRIV(op)->max_flushed_journal_sector = 0;
|
||||
PRIV(op)->wait_for = 0;
|
||||
PRIV(op)->op_state = 0;
|
||||
PRIV(op)->pending_ops = 0;
|
||||
@@ -462,11 +464,11 @@ void blockstore_impl_t::reshard_clean_db(pool_id_t pool, uint32_t pg_count, uint
|
||||
|
||||
void blockstore_impl_t::process_list(blockstore_op_t *op)
|
||||
{
|
||||
uint32_t list_pg = op->offset+1;
|
||||
uint32_t pg_count = op->len;
|
||||
uint64_t pg_stripe_size = op->oid.stripe;
|
||||
uint64_t min_inode = op->oid.inode;
|
||||
uint64_t max_inode = op->version;
|
||||
uint32_t list_pg = op->pg_number+1;
|
||||
uint32_t pg_count = op->pg_count;
|
||||
uint64_t pg_stripe_size = op->pg_alignment;
|
||||
uint64_t min_inode = op->min_oid.inode;
|
||||
uint64_t max_inode = op->max_oid.inode;
|
||||
// Check PG
|
||||
if (pg_count != 0 && (pg_stripe_size < MIN_DATA_BLOCK_SIZE || list_pg > pg_count))
|
||||
{
|
||||
@@ -513,7 +515,13 @@ void blockstore_impl_t::process_list(blockstore_op_t *op)
|
||||
stable_alloc += clean_db.size();
|
||||
}
|
||||
}
|
||||
else
|
||||
if (op->list_stable_limit > 0)
|
||||
{
|
||||
stable_alloc = op->list_stable_limit;
|
||||
if (stable_alloc > 1024*1024)
|
||||
stable_alloc = 1024*1024;
|
||||
}
|
||||
if (stable_alloc < 32768)
|
||||
{
|
||||
stable_alloc = 32768;
|
||||
}
|
||||
@@ -524,22 +532,22 @@ void blockstore_impl_t::process_list(blockstore_op_t *op)
|
||||
FINISH_OP(op);
|
||||
return;
|
||||
}
|
||||
auto max_oid = op->max_oid;
|
||||
bool limited = false;
|
||||
pool_pg_id_t last_shard_id = 0;
|
||||
for (auto shard_it = clean_db_shards.lower_bound(first_shard);
|
||||
shard_it != clean_db_shards.end() && shard_it->first <= last_shard;
|
||||
shard_it++)
|
||||
{
|
||||
auto & clean_db = shard_it->second;
|
||||
auto clean_it = clean_db.begin(), clean_end = clean_db.end();
|
||||
if ((min_inode != 0 || max_inode != 0) && min_inode <= max_inode)
|
||||
if (op->min_oid.inode != 0 || op->min_oid.stripe != 0)
|
||||
{
|
||||
clean_it = clean_db.lower_bound({
|
||||
.inode = min_inode,
|
||||
.stripe = 0,
|
||||
});
|
||||
clean_end = clean_db.upper_bound({
|
||||
.inode = max_inode,
|
||||
.stripe = UINT64_MAX,
|
||||
});
|
||||
clean_it = clean_db.lower_bound(op->min_oid);
|
||||
}
|
||||
if ((max_oid.inode != 0 || max_oid.stripe != 0) && !(max_oid < op->min_oid))
|
||||
{
|
||||
clean_end = clean_db.upper_bound(max_oid);
|
||||
}
|
||||
for (; clean_it != clean_end; clean_it++)
|
||||
{
|
||||
@@ -558,11 +566,29 @@ void blockstore_impl_t::process_list(blockstore_op_t *op)
|
||||
.oid = clean_it->first,
|
||||
.version = clean_it->second.version,
|
||||
};
|
||||
if (op->list_stable_limit > 0 && stable_count >= op->list_stable_limit)
|
||||
{
|
||||
if (!limited)
|
||||
{
|
||||
limited = true;
|
||||
max_oid = stable[stable_count-1].oid;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (op->list_stable_limit > 0)
|
||||
{
|
||||
// To maintain the order, we have to include objects in the same range from other shards
|
||||
if (last_shard_id != 0 && last_shard_id != shard_it->first)
|
||||
std::sort(stable, stable+stable_count);
|
||||
if (stable_count > op->list_stable_limit)
|
||||
stable_count = op->list_stable_limit;
|
||||
}
|
||||
last_shard_id = shard_it->first;
|
||||
}
|
||||
if (first_shard != last_shard)
|
||||
if (op->list_stable_limit == 0 && first_shard != last_shard)
|
||||
{
|
||||
// If that's not a per-PG listing, sort clean entries
|
||||
// If that's not a per-PG listing, sort clean entries (already sorted if list_stable_limit != 0)
|
||||
std::sort(stable, stable+stable_count);
|
||||
}
|
||||
int clean_stable_count = stable_count;
|
||||
@@ -571,20 +597,17 @@ void blockstore_impl_t::process_list(blockstore_op_t *op)
|
||||
obj_ver_id *unstable = NULL;
|
||||
{
|
||||
auto dirty_it = dirty_db.begin(), dirty_end = dirty_db.end();
|
||||
if ((min_inode != 0 || max_inode != 0) && min_inode <= max_inode)
|
||||
if (op->min_oid.inode != 0 || op->min_oid.stripe != 0)
|
||||
{
|
||||
dirty_it = dirty_db.lower_bound({
|
||||
.oid = {
|
||||
.inode = min_inode,
|
||||
.stripe = 0,
|
||||
},
|
||||
.oid = op->min_oid,
|
||||
.version = 0,
|
||||
});
|
||||
}
|
||||
if ((max_oid.inode != 0 || max_oid.stripe != 0) && !(max_oid < op->min_oid))
|
||||
{
|
||||
dirty_end = dirty_db.upper_bound({
|
||||
.oid = {
|
||||
.inode = max_inode,
|
||||
.stripe = UINT64_MAX,
|
||||
},
|
||||
.oid = max_oid,
|
||||
.version = UINT64_MAX,
|
||||
});
|
||||
}
|
||||
@@ -628,6 +651,11 @@ void blockstore_impl_t::process_list(blockstore_op_t *op)
|
||||
stable[stable_count++] = dirty_it->first;
|
||||
}
|
||||
}
|
||||
if (op->list_stable_limit > 0 && stable_count >= op->list_stable_limit)
|
||||
{
|
||||
// Stop here
|
||||
break;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@@ -93,11 +93,10 @@
|
||||
|
||||
// "VITAstor"
|
||||
#define BLOCKSTORE_META_MAGIC_V1 0x726F747341544956l
|
||||
#define BLOCKSTORE_META_VERSION_V1 1
|
||||
#define BLOCKSTORE_META_FORMAT_V1 1
|
||||
#define BLOCKSTORE_META_FORMAT_V2 2
|
||||
|
||||
// metadata header (superblock)
|
||||
// FIXME: After adding the OSD superblock, add a key to metadata
|
||||
// and journal headers to check if they belong to the same OSD
|
||||
struct __attribute__((__packed__)) blockstore_meta_header_v1_t
|
||||
{
|
||||
uint64_t zero;
|
||||
@@ -108,14 +107,29 @@ struct __attribute__((__packed__)) blockstore_meta_header_v1_t
|
||||
uint32_t bitmap_granularity;
|
||||
};
|
||||
|
||||
struct __attribute__((__packed__)) blockstore_meta_header_v2_t
|
||||
{
|
||||
uint64_t zero;
|
||||
uint64_t magic;
|
||||
uint64_t version;
|
||||
uint32_t meta_block_size;
|
||||
uint32_t data_block_size;
|
||||
uint32_t bitmap_granularity;
|
||||
uint32_t data_csum_type;
|
||||
uint32_t csum_block_size;
|
||||
uint32_t header_csum;
|
||||
};
|
||||
|
||||
// 32 bytes = 24 bytes + block bitmap (4 bytes by default) + external attributes (also bitmap, 4 bytes by default)
|
||||
// per "clean" entry on disk with fixed metadata tables
|
||||
// FIXME: maybe add crc32's to metadata
|
||||
struct __attribute__((__packed__)) clean_disk_entry
|
||||
{
|
||||
object_id oid;
|
||||
uint64_t version;
|
||||
uint8_t bitmap[];
|
||||
// Two more fields come after bitmap in metadata version 2:
|
||||
// uint32_t data_csum[];
|
||||
// uint32_t entry_csum;
|
||||
};
|
||||
|
||||
// 32 = 16 + 16 bytes per "clean" entry in memory (object_id => clean_entry)
|
||||
@@ -125,7 +139,7 @@ struct __attribute__((__packed__)) clean_entry
|
||||
uint64_t location;
|
||||
};
|
||||
|
||||
// 64 = 24 + 40 bytes per dirty entry in memory (obj_ver_id => dirty_entry)
|
||||
// 64 = 24 + 40 bytes per dirty entry in memory (obj_ver_id => dirty_entry). Plus checksums
|
||||
struct __attribute__((__packed__)) dirty_entry
|
||||
{
|
||||
uint32_t state;
|
||||
@@ -134,7 +148,7 @@ struct __attribute__((__packed__)) dirty_entry
|
||||
uint32_t offset; // data offset within object (stripe)
|
||||
uint32_t len; // data length
|
||||
uint64_t journal_sector; // journal sector used for this entry
|
||||
void* bitmap; // either external bitmap itself when it fits, or a pointer to it when it doesn't
|
||||
void* dyn_data; // dynamic data: external bitmap and data block checksums. may be a pointer to the in-memory journal
|
||||
};
|
||||
|
||||
// - Sync must be submitted after previous writes/deletes (not before!)
|
||||
@@ -163,12 +177,23 @@ struct __attribute__((__packed__)) dirty_entry
|
||||
// Suspend operation until there is some free space on the data device
|
||||
#define WAIT_FREE 5
|
||||
|
||||
struct fulfill_read_t
|
||||
struct used_clean_obj_t
|
||||
{
|
||||
uint64_t offset, len;
|
||||
uint64_t journal_sector; // sector+1 if used and !journal.inmemory, otherwise 0
|
||||
int refs;
|
||||
bool was_freed; // was freed by a parallel flush?
|
||||
bool was_changed; // was changed by a parallel flush?
|
||||
};
|
||||
|
||||
// https://github.com/algorithm-ninja/cpp-btree
|
||||
// https://github.com/greg7mdp/sparsepp/ was used previously, but it was TERRIBLY slow after resizing
|
||||
// with sparsepp, random reads dropped to ~700 iops very fast with just as much as ~32k objects in the DB
|
||||
typedef btree::btree_map<object_id, clean_entry> blockstore_clean_db_t;
|
||||
typedef std::map<obj_ver_id, dirty_entry> blockstore_dirty_db_t;
|
||||
|
||||
#include "blockstore_init.h"
|
||||
|
||||
#include "blockstore_flush.h"
|
||||
|
||||
#define PRIV(op) ((blockstore_op_private_t*)(op)->private_data)
|
||||
#define FINISH_OP(op) PRIV(op)->~blockstore_op_private_t(); std::function<void (blockstore_op_t*)>(op->callback)(op)
|
||||
|
||||
@@ -181,10 +206,11 @@ struct blockstore_op_private_t
|
||||
int op_state;
|
||||
|
||||
// Read
|
||||
std::vector<fulfill_read_t> read_vec;
|
||||
uint64_t clean_block_used;
|
||||
std::vector<copy_buffer_t> read_vec;
|
||||
|
||||
// Sync, write
|
||||
int min_flushed_journal_sector, max_flushed_journal_sector;
|
||||
uint64_t min_flushed_journal_sector, max_flushed_journal_sector;
|
||||
|
||||
// Write
|
||||
struct iovec iov_zerofill[3];
|
||||
@@ -194,19 +220,8 @@ struct blockstore_op_private_t
|
||||
|
||||
// Sync
|
||||
std::vector<obj_ver_id> sync_big_writes, sync_small_writes;
|
||||
int sync_small_checked, sync_big_checked;
|
||||
};
|
||||
|
||||
// https://github.com/algorithm-ninja/cpp-btree
|
||||
// https://github.com/greg7mdp/sparsepp/ was used previously, but it was TERRIBLY slow after resizing
|
||||
// with sparsepp, random reads dropped to ~700 iops very fast with just as much as ~32k objects in the DB
|
||||
typedef btree::btree_map<object_id, clean_entry> blockstore_clean_db_t;
|
||||
typedef std::map<obj_ver_id, dirty_entry> blockstore_dirty_db_t;
|
||||
|
||||
#include "blockstore_init.h"
|
||||
|
||||
#include "blockstore_flush.h"
|
||||
|
||||
typedef uint32_t pool_id_t;
|
||||
typedef uint64_t pool_pg_id_t;
|
||||
|
||||
@@ -253,7 +268,7 @@ class blockstore_impl_t
|
||||
|
||||
std::map<pool_id_t, pool_shard_settings_t> clean_db_settings;
|
||||
std::map<pool_pg_id_t, blockstore_clean_db_t> clean_db_shards;
|
||||
uint8_t *clean_bitmap = NULL;
|
||||
uint8_t *clean_bitmaps = NULL;
|
||||
blockstore_dirty_db_t dirty_db;
|
||||
std::vector<blockstore_op_t*> submit_queue;
|
||||
std::vector<obj_ver_id> unsynced_big_writes, unsynced_small_writes;
|
||||
@@ -267,6 +282,10 @@ class blockstore_impl_t
|
||||
journal_flusher_t *flusher;
|
||||
int big_to_flush = 0;
|
||||
int write_iodepth = 0;
|
||||
bool alloc_dyn_data = false;
|
||||
|
||||
// clean data blocks referenced by read operations
|
||||
std::map<uint64_t, used_clean_obj_t> used_clean_objects;
|
||||
|
||||
bool live = false, queue_stall = false;
|
||||
ring_loop_t *ringloop;
|
||||
@@ -310,8 +329,30 @@ class blockstore_impl_t
|
||||
|
||||
// Read
|
||||
int dequeue_read(blockstore_op_t *read_op);
|
||||
int fulfill_read(blockstore_op_t *read_op, uint64_t &fulfilled, uint32_t item_start, uint32_t item_end,
|
||||
uint32_t item_state, uint64_t item_version, uint64_t item_location, uint64_t journal_sector);
|
||||
void find_holes(std::vector<copy_buffer_t> & read_vec, uint32_t item_start, uint32_t item_end,
|
||||
std::function<int(int, bool, uint32_t, uint32_t)> callback);
|
||||
int fulfill_read(blockstore_op_t *read_op,
|
||||
uint64_t &fulfilled, uint32_t item_start, uint32_t item_end,
|
||||
uint32_t item_state, uint64_t item_version, uint64_t item_location,
|
||||
uint64_t journal_sector, uint8_t *csum, int *dyn_data);
|
||||
bool fulfill_clean_read(blockstore_op_t *read_op, uint64_t & fulfilled,
|
||||
uint8_t *clean_entry_bitmap, int *dyn_data,
|
||||
uint32_t item_start, uint32_t item_end, uint64_t clean_loc, uint64_t clean_ver);
|
||||
int fill_partial_checksum_blocks(std::vector<copy_buffer_t> & rv, uint64_t & fulfilled,
|
||||
uint8_t *clean_entry_bitmap, int *dyn_data, bool from_journal, uint8_t *read_buf, uint64_t read_offset, uint64_t read_end);
|
||||
int pad_journal_read(std::vector<copy_buffer_t> & rv, copy_buffer_t & cp,
|
||||
uint64_t dirty_offset, uint64_t dirty_end, uint64_t dirty_loc, uint8_t *csum_ptr, int *dyn_data,
|
||||
uint64_t offset, uint64_t submit_len, uint64_t & blk_begin, uint64_t & blk_end, uint8_t* & blk_buf);
|
||||
bool read_range_fulfilled(std::vector<copy_buffer_t> & rv, uint64_t & fulfilled, uint8_t *read_buf,
|
||||
uint8_t *clean_entry_bitmap, uint32_t item_start, uint32_t item_end);
|
||||
bool read_checksum_block(blockstore_op_t *op, int rv_pos, uint64_t &fulfilled, uint64_t clean_loc);
|
||||
uint8_t* read_clean_meta_block(blockstore_op_t *read_op, uint64_t clean_loc, int rv_pos);
|
||||
bool verify_padded_checksums(uint8_t *clean_entry_bitmap, uint8_t *csum_buf, uint32_t offset,
|
||||
iovec *iov, int n_iov, std::function<void(uint32_t, uint32_t, uint32_t)> bad_block_cb);
|
||||
bool verify_journal_checksums(uint8_t *csums, uint32_t offset,
|
||||
iovec *iov, int n_iov, std::function<void(uint32_t, uint32_t, uint32_t)> bad_block_cb);
|
||||
bool verify_clean_padded_checksums(blockstore_op_t *op, uint64_t clean_loc, uint8_t *dyn_data, bool from_journal,
|
||||
iovec *iov, int n_iov, std::function<void(uint32_t, uint32_t, uint32_t)> bad_block_cb);
|
||||
int fulfill_read_push(blockstore_op_t *op, void *buf, uint64_t offset, uint64_t len,
|
||||
uint32_t item_state, uint64_t item_version);
|
||||
void handle_read_event(ring_data_t *data, blockstore_op_t *op);
|
||||
@@ -342,6 +383,7 @@ class blockstore_impl_t
|
||||
int continue_rollback(blockstore_op_t *op);
|
||||
void mark_rolled_back(const obj_ver_id & ov);
|
||||
void erase_dirty(blockstore_dirty_db_t::iterator dirty_start, blockstore_dirty_db_t::iterator dirty_end, uint64_t clean_loc);
|
||||
void free_dirty_dyn_data(dirty_entry & e);
|
||||
|
||||
// List
|
||||
void process_list(blockstore_op_t *op);
|
||||
|
@@ -77,13 +77,20 @@ resume_1:
|
||||
if (iszero((uint64_t*)metadata_buffer, bs->dsk.meta_block_size / sizeof(uint64_t)))
|
||||
{
|
||||
{
|
||||
blockstore_meta_header_v1_t *hdr = (blockstore_meta_header_v1_t *)metadata_buffer;
|
||||
blockstore_meta_header_v2_t *hdr = (blockstore_meta_header_v2_t *)metadata_buffer;
|
||||
hdr->zero = 0;
|
||||
hdr->magic = BLOCKSTORE_META_MAGIC_V1;
|
||||
hdr->version = BLOCKSTORE_META_VERSION_V1;
|
||||
hdr->version = bs->dsk.meta_format;
|
||||
hdr->meta_block_size = bs->dsk.meta_block_size;
|
||||
hdr->data_block_size = bs->dsk.data_block_size;
|
||||
hdr->bitmap_granularity = bs->dsk.bitmap_granularity;
|
||||
if (bs->dsk.meta_format >= BLOCKSTORE_META_FORMAT_V2)
|
||||
{
|
||||
hdr->data_csum_type = bs->dsk.data_csum_type;
|
||||
hdr->csum_block_size = bs->dsk.csum_block_size;
|
||||
hdr->header_csum = 0;
|
||||
hdr->header_csum = crc32c(0, hdr, sizeof(*hdr));
|
||||
}
|
||||
}
|
||||
if (bs->readonly)
|
||||
{
|
||||
@@ -109,28 +116,62 @@ resume_1:
|
||||
}
|
||||
else
|
||||
{
|
||||
blockstore_meta_header_v1_t *hdr = (blockstore_meta_header_v1_t *)metadata_buffer;
|
||||
if (hdr->zero != 0 ||
|
||||
hdr->magic != BLOCKSTORE_META_MAGIC_V1 ||
|
||||
hdr->version != BLOCKSTORE_META_VERSION_V1)
|
||||
blockstore_meta_header_v2_t *hdr = (blockstore_meta_header_v2_t *)metadata_buffer;
|
||||
if (hdr->zero != 0 || hdr->magic != BLOCKSTORE_META_MAGIC_V1 || hdr->version < BLOCKSTORE_META_FORMAT_V1)
|
||||
{
|
||||
printf(
|
||||
"Metadata is corrupt or old version.\n"
|
||||
" If this is a new OSD please zero out the metadata area before starting it.\n"
|
||||
" If you need to upgrade from 0.5.x please request it via the issue tracker.\n"
|
||||
"Metadata is corrupt or too old (pre-0.6.x).\n"
|
||||
" If this is a new OSD, please zero out the metadata area before starting it.\n"
|
||||
" If you need to upgrade from 0.5.x, convert metadata with vitastor-disk.\n"
|
||||
);
|
||||
exit(1);
|
||||
}
|
||||
if (hdr->version == BLOCKSTORE_META_FORMAT_V2)
|
||||
{
|
||||
uint32_t csum = hdr->header_csum;
|
||||
hdr->header_csum = 0;
|
||||
if (crc32c(0, hdr, sizeof(*hdr)) != csum)
|
||||
{
|
||||
printf("Metadata header is corrupt (checksum mismatch).\n");
|
||||
exit(1);
|
||||
}
|
||||
hdr->header_csum = csum;
|
||||
bs->dsk.meta_format = BLOCKSTORE_META_FORMAT_V2;
|
||||
}
|
||||
else if (hdr->version == BLOCKSTORE_META_FORMAT_V1)
|
||||
{
|
||||
hdr->data_csum_type = 0;
|
||||
hdr->csum_block_size = 0;
|
||||
hdr->header_csum = 0;
|
||||
// Enable compatibility mode - entries without checksums
|
||||
bs->dsk.clean_entry_size = sizeof(clean_disk_entry) + bs->dsk.clean_entry_bitmap_size*2;
|
||||
bs->dsk.meta_len = (1 + (bs->dsk.block_count - 1 + bs->dsk.meta_block_size / bs->dsk.clean_entry_size)
|
||||
/ (bs->dsk.meta_block_size / bs->dsk.clean_entry_size)) * bs->dsk.meta_block_size;
|
||||
bs->dsk.meta_format = BLOCKSTORE_META_FORMAT_V1;
|
||||
printf("Warning: Starting with metadata in the old format without checksums, as stored on disk\n");
|
||||
}
|
||||
else if (hdr->version > BLOCKSTORE_META_FORMAT_V2)
|
||||
{
|
||||
printf(
|
||||
"Metadata format is too new for me (stored version is %lu, max supported %u).\n",
|
||||
hdr->version, BLOCKSTORE_META_FORMAT_V2
|
||||
);
|
||||
exit(1);
|
||||
}
|
||||
if (hdr->meta_block_size != bs->dsk.meta_block_size ||
|
||||
hdr->data_block_size != bs->dsk.data_block_size ||
|
||||
hdr->bitmap_granularity != bs->dsk.bitmap_granularity)
|
||||
hdr->bitmap_granularity != bs->dsk.bitmap_granularity ||
|
||||
hdr->data_csum_type != bs->dsk.data_csum_type ||
|
||||
hdr->csum_block_size != bs->dsk.csum_block_size)
|
||||
{
|
||||
printf(
|
||||
"Configuration stored in metadata superblock"
|
||||
" (meta_block_size=%u, data_block_size=%u, bitmap_granularity=%u)"
|
||||
" differs from OSD configuration (%lu/%u/%lu).\n",
|
||||
" (meta_block_size=%u, data_block_size=%u, bitmap_granularity=%u, data_csum_type=%u, csum_block_size=%u)"
|
||||
" differs from OSD configuration (%lu/%u/%lu, %u/%u).\n",
|
||||
hdr->meta_block_size, hdr->data_block_size, hdr->bitmap_granularity,
|
||||
bs->dsk.meta_block_size, bs->dsk.data_block_size, bs->dsk.bitmap_granularity
|
||||
hdr->data_csum_type, hdr->csum_block_size,
|
||||
bs->dsk.meta_block_size, bs->dsk.data_block_size, bs->dsk.bitmap_granularity,
|
||||
bs->dsk.data_csum_type, bs->dsk.csum_block_size
|
||||
);
|
||||
exit(1);
|
||||
}
|
||||
@@ -279,12 +320,22 @@ bool blockstore_init_meta::handle_meta_block(uint8_t *buf, uint64_t entries_per_
|
||||
for (uint64_t i = 0; i < max_i; i++)
|
||||
{
|
||||
clean_disk_entry *entry = (clean_disk_entry*)(buf + i*bs->dsk.clean_entry_size);
|
||||
if (!bs->inmemory_meta && bs->dsk.clean_entry_bitmap_size)
|
||||
{
|
||||
memcpy(bs->clean_bitmap + (done_cnt+i)*2*bs->dsk.clean_entry_bitmap_size, &entry->bitmap, 2*bs->dsk.clean_entry_bitmap_size);
|
||||
}
|
||||
if (entry->oid.inode > 0)
|
||||
{
|
||||
if (bs->dsk.meta_format >= BLOCKSTORE_META_FORMAT_V2)
|
||||
{
|
||||
// Check entry crc32
|
||||
uint32_t *entry_csum = (uint32_t*)((uint8_t*)entry + bs->dsk.clean_entry_size - 4);
|
||||
if (*entry_csum != crc32c(0, entry, bs->dsk.clean_entry_size - 4))
|
||||
{
|
||||
printf("Metadata entry %lu is corrupt (checksum mismatch), skipping\n", done_cnt+i);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (!bs->inmemory_meta && bs->dsk.clean_entry_bitmap_size)
|
||||
{
|
||||
memcpy(bs->clean_bitmaps + (done_cnt+i) * 2 * bs->dsk.clean_entry_bitmap_size, &entry->bitmap, 2 * bs->dsk.clean_entry_bitmap_size);
|
||||
}
|
||||
auto & clean_db = bs->clean_db_shard(entry->oid);
|
||||
auto clean_it = clean_db.find(entry->oid);
|
||||
if (clean_it == clean_db.end() || clean_it->second.version < entry->version)
|
||||
@@ -440,7 +491,9 @@ resume_1:
|
||||
.size = sizeof(journal_entry_start),
|
||||
.reserved = 0,
|
||||
.journal_start = bs->journal.block_size,
|
||||
.version = JOURNAL_VERSION,
|
||||
.version = JOURNAL_VERSION_V2,
|
||||
.data_csum_type = bs->dsk.data_csum_type,
|
||||
.csum_block_size = bs->dsk.csum_block_size,
|
||||
};
|
||||
((journal_entry_start*)submitted_buf)->crc32 = je_crc32((journal_entry*)submitted_buf);
|
||||
if (bs->readonly)
|
||||
@@ -492,18 +545,36 @@ resume_1:
|
||||
if (je_start->magic != JOURNAL_MAGIC ||
|
||||
je_start->type != JE_START ||
|
||||
je_crc32((journal_entry*)je_start) != je_start->crc32 ||
|
||||
je_start->size != sizeof(journal_entry_start) && je_start->size != JE_START_LEGACY_SIZE)
|
||||
je_start->size != JE_START_V0_SIZE && je_start->size != JE_START_V1_SIZE && je_start->size != JE_START_V2_SIZE)
|
||||
{
|
||||
// Entry is corrupt
|
||||
fprintf(stderr, "First entry of the journal is corrupt\n");
|
||||
fprintf(stderr, "First entry of the journal is corrupt or unsupported\n");
|
||||
exit(1);
|
||||
}
|
||||
if (je_start->size == JE_START_LEGACY_SIZE || je_start->version != JOURNAL_VERSION)
|
||||
if (je_start->size == JE_START_V0_SIZE ||
|
||||
(je_start->version != JOURNAL_VERSION_V1 || je_start->size != JE_START_V1_SIZE) &&
|
||||
(je_start->version != JOURNAL_VERSION_V2 || je_start->size != JE_START_V2_SIZE))
|
||||
{
|
||||
fprintf(
|
||||
stderr, "The code only supports journal version %d, but it is %lu on disk."
|
||||
" Please use the previous version to flush the journal before upgrading OSD\n",
|
||||
JOURNAL_VERSION, je_start->size == JE_START_LEGACY_SIZE ? 0 : je_start->version
|
||||
stderr, "The code only supports journal versions 2 and 1, but it is %lu on disk."
|
||||
" Please use vitastor-disk to rewrite the journal\n",
|
||||
je_start->size == JE_START_V0_SIZE ? 0 : je_start->version
|
||||
);
|
||||
exit(1);
|
||||
}
|
||||
if (je_start->version == JOURNAL_VERSION_V1)
|
||||
{
|
||||
je_start->data_csum_type = 0;
|
||||
je_start->csum_block_size = 0;
|
||||
}
|
||||
if (je_start->data_csum_type != bs->dsk.data_csum_type ||
|
||||
je_start->csum_block_size != bs->dsk.csum_block_size)
|
||||
{
|
||||
printf(
|
||||
"Configuration stored in journal superblock (data_csum_type=%u, csum_block_size=%u)"
|
||||
" differs from OSD configuration (%u/%u).\n",
|
||||
je_start->data_csum_type, je_start->csum_block_size,
|
||||
bs->dsk.data_csum_type, bs->dsk.csum_block_size
|
||||
);
|
||||
exit(1);
|
||||
}
|
||||
@@ -705,11 +776,14 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t done_pos, u
|
||||
snprintf(err, 1024, "BUG: calculated journal data offset (%08lx) != stored journal data offset (%08lx)", location, je->small_write.data_offset);
|
||||
throw std::runtime_error(err);
|
||||
}
|
||||
uint32_t data_crc32 = 0;
|
||||
small_write_data.clear();
|
||||
if (location >= done_pos && location+je->small_write.len <= done_pos+len)
|
||||
{
|
||||
// data is within this buffer
|
||||
data_crc32 = crc32c(0, (uint8_t*)buf + location - done_pos, je->small_write.len);
|
||||
small_write_data.push_back((iovec){
|
||||
.iov_base = (uint8_t*)buf + location - done_pos,
|
||||
.iov_len = je->small_write.len,
|
||||
});
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -724,7 +798,10 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t done_pos, u
|
||||
? location+je->small_write.len : done[i].pos+done[i].len);
|
||||
uint64_t part_begin = (location < done[i].pos ? done[i].pos : location);
|
||||
covered += part_end - part_begin;
|
||||
data_crc32 = crc32c(data_crc32, (uint8_t*)done[i].buf + part_begin - done[i].pos, part_end - part_begin);
|
||||
small_write_data.push_back((iovec){
|
||||
.iov_base = (uint8_t*)done[i].buf + part_begin - done[i].pos,
|
||||
.iov_len = part_end - part_begin,
|
||||
});
|
||||
}
|
||||
}
|
||||
if (covered < je->small_write.len)
|
||||
@@ -734,12 +811,102 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t done_pos, u
|
||||
return 2;
|
||||
}
|
||||
}
|
||||
if (data_crc32 != je->small_write.crc32_data)
|
||||
bool data_csum_valid = true;
|
||||
if (!bs->dsk.csum_block_size)
|
||||
{
|
||||
uint32_t data_crc32 = 0;
|
||||
for (auto & sd: small_write_data)
|
||||
{
|
||||
data_crc32 = crc32c(data_crc32, sd.iov_base, sd.iov_len);
|
||||
}
|
||||
data_csum_valid = data_crc32 == je->small_write.crc32_data;
|
||||
if (!data_csum_valid)
|
||||
{
|
||||
printf(
|
||||
"Journal entry data is corrupt for small_write%s oid=%lx:%lx ver=%lu offset=%u len=%u - data crc32 %x != %x\n",
|
||||
je->type == JE_SMALL_WRITE_INSTANT ? "_instant" : "",
|
||||
je->small_write.oid.inode, je->small_write.oid.stripe, je->small_write.version,
|
||||
je->small_write.offset, je->small_write.len,
|
||||
data_crc32, je->small_write.crc32_data
|
||||
);
|
||||
}
|
||||
}
|
||||
else if (je->small_write.len > 0)
|
||||
{
|
||||
// FIXME: deduplicate with disk_tool_journal.cpp
|
||||
// like in enqueue_write()
|
||||
uint32_t start = je->small_write.offset / bs->dsk.csum_block_size;
|
||||
uint32_t end = (je->small_write.offset+je->small_write.len-1) / bs->dsk.csum_block_size;
|
||||
uint32_t data_csum_size = (end-start+1) * (bs->dsk.data_csum_type & 0xFF);
|
||||
uint32_t required_size = sizeof(journal_entry_small_write) + bs->dsk.clean_entry_bitmap_size + data_csum_size;
|
||||
if (je->size != required_size)
|
||||
{
|
||||
printf(
|
||||
"Journal entry data has invalid size for small_write%s oid=%lx:%lx ver=%lu offset=%u len=%u - should be %u bytes but is %u bytes\n",
|
||||
je->type == JE_SMALL_WRITE_INSTANT ? "_instant" : "",
|
||||
je->small_write.oid.inode, je->small_write.oid.stripe, je->small_write.version,
|
||||
je->small_write.offset, je->small_write.len,
|
||||
required_size, je->size
|
||||
);
|
||||
data_csum_valid = false;
|
||||
}
|
||||
else
|
||||
{
|
||||
int sd_num = 0;
|
||||
size_t sd_pos = 0;
|
||||
uint32_t *block_csums = (uint32_t*)((uint8_t*)je + sizeof(journal_entry_small_write) + bs->dsk.clean_entry_bitmap_size);
|
||||
for (uint32_t pos = start; pos <= end; pos++, block_csums++)
|
||||
{
|
||||
size_t block_left = (pos == start
|
||||
? (start == end
|
||||
? je->small_write.len
|
||||
: bs->dsk.csum_block_size - je->small_write.offset%bs->dsk.csum_block_size)
|
||||
: (pos < end
|
||||
? bs->dsk.csum_block_size
|
||||
: (je->small_write.offset + je->small_write.len)%bs->dsk.csum_block_size));
|
||||
if (pos > start && pos == end && block_left == 0)
|
||||
{
|
||||
// full last block
|
||||
block_left = bs->dsk.csum_block_size;
|
||||
}
|
||||
uint32_t block_crc32 = 0;
|
||||
while (block_left > 0)
|
||||
{
|
||||
assert(sd_num < small_write_data.size());
|
||||
if (small_write_data[sd_num].iov_len >= sd_pos+block_left)
|
||||
{
|
||||
block_crc32 = crc32c(block_crc32, (uint8_t*)small_write_data[sd_num].iov_base+sd_pos, block_left);
|
||||
sd_pos += block_left;
|
||||
break;
|
||||
}
|
||||
else
|
||||
{
|
||||
block_crc32 = crc32c(block_crc32, (uint8_t*)small_write_data[sd_num].iov_base+sd_pos, small_write_data[sd_num].iov_len-sd_pos);
|
||||
block_left -= (small_write_data[sd_num].iov_len-sd_pos);
|
||||
sd_pos = 0;
|
||||
sd_num++;
|
||||
}
|
||||
}
|
||||
if (block_crc32 != *block_csums)
|
||||
{
|
||||
printf(
|
||||
"Journal entry data is corrupt for small_write%s oid=%lx:%lx ver=%lu offset=%u len=%u - block %u crc32 %x != %x\n",
|
||||
je->type == JE_SMALL_WRITE_INSTANT ? "_instant" : "",
|
||||
je->small_write.oid.inode, je->small_write.oid.stripe, je->small_write.version,
|
||||
je->small_write.offset, je->small_write.len,
|
||||
pos, block_crc32, *block_csums
|
||||
);
|
||||
data_csum_valid = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!data_csum_valid)
|
||||
{
|
||||
// journal entry is corrupt, stop here
|
||||
// interesting thing is that we must clear the corrupt entry if we're not readonly,
|
||||
// because we don't write next entries in the same journal block
|
||||
printf("Journal entry data is corrupt (data crc32 %x != %x)\n", data_crc32, je->small_write.crc32_data);
|
||||
memset((uint8_t*)buf + proc_pos - done_pos + pos, 0, bs->journal.block_size - pos);
|
||||
bs->journal.next_free = prev_free;
|
||||
init_write_buf = (uint8_t*)buf + proc_pos - done_pos;
|
||||
@@ -755,11 +922,14 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t done_pos, u
|
||||
.oid = je->small_write.oid,
|
||||
.version = je->small_write.version,
|
||||
};
|
||||
void *bmp = NULL;
|
||||
void *bmp_from = (uint8_t*)je + sizeof(journal_entry_small_write);
|
||||
if (bs->dsk.clean_entry_bitmap_size <= sizeof(void*))
|
||||
uint64_t dyn_size = bs->dsk.dirty_dyn_size(je->small_write.offset, je->small_write.len);
|
||||
void *dyn = NULL;
|
||||
void *dyn_from = (uint8_t*)je + sizeof(journal_entry_small_write);
|
||||
if (!bs->alloc_dyn_data)
|
||||
{
|
||||
memcpy(&bmp, bmp_from, bs->dsk.clean_entry_bitmap_size);
|
||||
// Bitmap without checksum is only 4 bytes for 128k objects, save it inline
|
||||
// It can even contain 4 byte bitmap + 4 byte CRC32 for 4 kb writes :)
|
||||
memcpy(&dyn, dyn_from, dyn_size);
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -767,8 +937,9 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t done_pos, u
|
||||
// allocations for entry bitmaps. This can only be fixed by using
|
||||
// a patched map with dynamic entry size, but not the btree_map,
|
||||
// because it doesn't keep iterators valid all the time.
|
||||
bmp = malloc_or_die(bs->dsk.clean_entry_bitmap_size);
|
||||
memcpy(bmp, bmp_from, bs->dsk.clean_entry_bitmap_size);
|
||||
dyn = malloc_or_die(dyn_size+sizeof(int));
|
||||
*((int*)dyn) = 1;
|
||||
memcpy((uint8_t*)dyn+sizeof(int), dyn_from, dyn_size);
|
||||
}
|
||||
bs->dirty_db.emplace(ov, (dirty_entry){
|
||||
.state = (BS_ST_SMALL_WRITE | BS_ST_SYNCED),
|
||||
@@ -777,7 +948,7 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t done_pos, u
|
||||
.offset = je->small_write.offset,
|
||||
.len = je->small_write.len,
|
||||
.journal_sector = proc_pos,
|
||||
.bitmap = bmp,
|
||||
.dyn_data = dyn,
|
||||
});
|
||||
bs->journal.used_sectors[proc_pos]++;
|
||||
#ifdef BLOCKSTORE_DEBUG
|
||||
@@ -836,11 +1007,13 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t done_pos, u
|
||||
.oid = je->big_write.oid,
|
||||
.version = je->big_write.version,
|
||||
};
|
||||
void *bmp = NULL;
|
||||
void *bmp_from = (uint8_t*)je + sizeof(journal_entry_big_write);
|
||||
if (bs->dsk.clean_entry_bitmap_size <= sizeof(void*))
|
||||
uint64_t dyn_size = bs->dsk.dirty_dyn_size(je->big_write.offset, je->big_write.len);
|
||||
void *dyn = NULL;
|
||||
void *dyn_from = (uint8_t*)je + sizeof(journal_entry_big_write);
|
||||
if (!bs->alloc_dyn_data)
|
||||
{
|
||||
memcpy(&bmp, bmp_from, bs->dsk.clean_entry_bitmap_size);
|
||||
// Bitmap without checksum is only 4 bytes for 128k objects, save it inline
|
||||
memcpy(&dyn, dyn_from, dyn_size);
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -848,8 +1021,9 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t done_pos, u
|
||||
// allocations for entry bitmaps. This can only be fixed by using
|
||||
// a patched map with dynamic entry size, but not the btree_map,
|
||||
// because it doesn't keep iterators valid all the time.
|
||||
bmp = malloc_or_die(bs->dsk.clean_entry_bitmap_size);
|
||||
memcpy(bmp, bmp_from, bs->dsk.clean_entry_bitmap_size);
|
||||
dyn = malloc_or_die(dyn_size+sizeof(int));
|
||||
*((int*)dyn) = 1;
|
||||
memcpy((uint8_t*)dyn+sizeof(int), dyn_from, dyn_size);
|
||||
}
|
||||
auto dirty_it = bs->dirty_db.emplace(ov, (dirty_entry){
|
||||
.state = (BS_ST_BIG_WRITE | BS_ST_SYNCED),
|
||||
@@ -858,7 +1032,7 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t done_pos, u
|
||||
.offset = je->big_write.offset,
|
||||
.len = je->big_write.len,
|
||||
.journal_sector = proc_pos,
|
||||
.bitmap = bmp,
|
||||
.dyn_data = dyn,
|
||||
}).first;
|
||||
if (bs->data_alloc->get(je->big_write.location >> bs->dsk.block_order))
|
||||
{
|
||||
|
@@ -50,6 +50,7 @@ class blockstore_init_journal
|
||||
uint64_t next_free;
|
||||
std::vector<bs_init_journal_done> done;
|
||||
std::vector<obj_ver_id> double_allocs;
|
||||
std::vector<iovec> small_write_data;
|
||||
uint64_t journal_pos = 0;
|
||||
uint64_t continue_pos = 0;
|
||||
void *init_write_buf = NULL;
|
||||
|
@@ -17,6 +17,7 @@ blockstore_journal_check_t::blockstore_journal_check_t(blockstore_impl_t *bs)
|
||||
// Check if we can write <required> entries of <size> bytes and <data_after> data bytes after them to the journal
|
||||
int blockstore_journal_check_t::check_available(blockstore_op_t *op, int entries_required, int size, int data_after)
|
||||
{
|
||||
uint64_t prev_next = next_sector;
|
||||
int required = entries_required;
|
||||
while (1)
|
||||
{
|
||||
@@ -35,11 +36,19 @@ int blockstore_journal_check_t::check_available(blockstore_op_t *op, int entries
|
||||
}
|
||||
required -= fits;
|
||||
next_in_pos += fits * size;
|
||||
sectors_to_write++;
|
||||
if (next_sector != prev_next || !sectors_to_write)
|
||||
{
|
||||
// Except the previous call to this function
|
||||
sectors_to_write++;
|
||||
}
|
||||
}
|
||||
else if (bs->journal.sector_info[next_sector].dirty)
|
||||
{
|
||||
sectors_to_write++;
|
||||
if (next_sector != prev_next || !sectors_to_write)
|
||||
{
|
||||
// Except the previous call to this function
|
||||
sectors_to_write++;
|
||||
}
|
||||
}
|
||||
if (required <= 0)
|
||||
{
|
||||
@@ -189,6 +198,7 @@ void blockstore_impl_t::prepare_journal_sector_write(int cur_sector, blockstore_
|
||||
priv->pending_ops++;
|
||||
if (!priv->min_flushed_journal_sector)
|
||||
priv->min_flushed_journal_sector = 1+cur_sector;
|
||||
assert(priv->min_flushed_journal_sector <= journal.sector_count);
|
||||
priv->max_flushed_journal_sector = 1+cur_sector;
|
||||
}
|
||||
|
||||
@@ -236,14 +246,6 @@ journal_t::~journal_t()
|
||||
uint64_t journal_t::get_trim_pos()
|
||||
{
|
||||
auto journal_used_it = used_sectors.lower_bound(used_start);
|
||||
#ifdef BLOCKSTORE_DEBUG
|
||||
printf(
|
||||
"Trimming journal (used_start=%08lx, next_free=%08lx, dirty_start=%08lx, new_start=%08lx, new_refcount=%ld)\n",
|
||||
used_start, next_free, dirty_start,
|
||||
journal_used_it == used_sectors.end() ? 0 : journal_used_it->first,
|
||||
journal_used_it == used_sectors.end() ? 0 : journal_used_it->second
|
||||
);
|
||||
#endif
|
||||
if (journal_used_it == used_sectors.end())
|
||||
{
|
||||
// Journal is cleared to its end, restart from the beginning
|
||||
@@ -256,12 +258,26 @@ uint64_t journal_t::get_trim_pos()
|
||||
else
|
||||
{
|
||||
// next_free does not need updating during trim
|
||||
#ifdef BLOCKSTORE_DEBUG
|
||||
printf(
|
||||
"Trimming journal (used_start=%08lx, next_free=%08lx, dirty_start=%08lx, new_start=%08lx, new_refcount=%ld)\n",
|
||||
used_start, next_free, dirty_start,
|
||||
journal_used_it->first, journal_used_it->second
|
||||
);
|
||||
#endif
|
||||
return journal_used_it->first;
|
||||
}
|
||||
}
|
||||
else if (journal_used_it->first > used_start)
|
||||
{
|
||||
// Journal is cleared up to <journal_used_it>
|
||||
#ifdef BLOCKSTORE_DEBUG
|
||||
printf(
|
||||
"Trimming journal (used_start=%08lx, next_free=%08lx, dirty_start=%08lx, new_start=%08lx, new_refcount=%ld)\n",
|
||||
used_start, next_free, dirty_start,
|
||||
journal_used_it->first, journal_used_it->second
|
||||
);
|
||||
#endif
|
||||
return journal_used_it->first;
|
||||
}
|
||||
// Can't trim journal
|
||||
@@ -283,3 +299,31 @@ void journal_t::dump_diagnostics()
|
||||
journal_used_it == used_sectors.end() ? 0 : journal_used_it->second
|
||||
);
|
||||
}
|
||||
|
||||
static uint64_t zero_page[4096];
|
||||
|
||||
uint32_t crc32c_pad(uint32_t prev_crc, const void *buf, size_t len, size_t left_pad, size_t right_pad)
|
||||
{
|
||||
uint32_t r = prev_crc;
|
||||
while (left_pad >= 4096)
|
||||
{
|
||||
r = crc32c(r, zero_page, 4096);
|
||||
left_pad -= 4096;
|
||||
}
|
||||
if (left_pad > 0)
|
||||
r = crc32c(r, zero_page, left_pad);
|
||||
r = crc32c(r, buf, len);
|
||||
while (right_pad >= 4096)
|
||||
{
|
||||
r = crc32c(r, zero_page, 4096);
|
||||
right_pad -= 4096;
|
||||
}
|
||||
if (left_pad > 0)
|
||||
r = crc32c(r, zero_page, right_pad);
|
||||
return r;
|
||||
}
|
||||
|
||||
uint32_t crc32c_nopad(uint32_t prev_crc, const void *buf, size_t len, size_t left_pad, size_t right_pad)
|
||||
{
|
||||
return crc32c(0, buf, len);
|
||||
}
|
||||
|
@@ -8,7 +8,8 @@
|
||||
|
||||
#define MIN_JOURNAL_SIZE 4*1024*1024
|
||||
#define JOURNAL_MAGIC 0x4A33
|
||||
#define JOURNAL_VERSION 1
|
||||
#define JOURNAL_VERSION_V1 1
|
||||
#define JOURNAL_VERSION_V2 2
|
||||
#define JOURNAL_BUFFER_SIZE 4*1024*1024
|
||||
#define JOURNAL_ENTRY_HEADER_SIZE 16
|
||||
|
||||
@@ -32,7 +33,7 @@
|
||||
#define JE_BIG_WRITE_INSTANT 0x08
|
||||
#define JE_MAX 0x08
|
||||
|
||||
// crc32c comes first to ease calculation and is equal to crc32()
|
||||
// crc32c comes first to ease calculation
|
||||
struct __attribute__((__packed__)) journal_entry_start
|
||||
{
|
||||
uint32_t crc32;
|
||||
@@ -42,8 +43,12 @@ struct __attribute__((__packed__)) journal_entry_start
|
||||
uint32_t reserved;
|
||||
uint64_t journal_start;
|
||||
uint64_t version;
|
||||
uint32_t data_csum_type;
|
||||
uint32_t csum_block_size;
|
||||
};
|
||||
#define JE_START_LEGACY_SIZE 24
|
||||
#define JE_START_V0_SIZE 24
|
||||
#define JE_START_V1_SIZE 32
|
||||
#define JE_START_V2_SIZE 40
|
||||
|
||||
struct __attribute__((__packed__)) journal_entry_small_write
|
||||
{
|
||||
@@ -59,10 +64,12 @@ struct __attribute__((__packed__)) journal_entry_small_write
|
||||
// small_write entries contain <len> bytes of data which is stored in next sectors
|
||||
// data_offset is its offset within journal
|
||||
uint64_t data_offset;
|
||||
uint32_t crc32_data;
|
||||
uint32_t crc32_data; // zero when data_csum_type != 0
|
||||
// small_write and big_write entries are followed by the "external" bitmap
|
||||
// its size is dynamic and included in journal entry's <size> field
|
||||
uint8_t bitmap[];
|
||||
// and then data checksums if data_csum_type != 0
|
||||
// uint32_t data_crc32c[];
|
||||
};
|
||||
|
||||
struct __attribute__((__packed__)) journal_entry_big_write
|
||||
@@ -80,6 +87,8 @@ struct __attribute__((__packed__)) journal_entry_big_write
|
||||
// small_write and big_write entries are followed by the "external" bitmap
|
||||
// its size is dynamic and included in journal entry's <size> field
|
||||
uint8_t bitmap[];
|
||||
// and then data checksums if data_csum_type != 0
|
||||
// uint32_t data_crc32c[];
|
||||
};
|
||||
|
||||
struct __attribute__((__packed__)) journal_entry_stable
|
||||
@@ -218,3 +227,6 @@ struct blockstore_journal_check_t
|
||||
};
|
||||
|
||||
journal_entry* prefill_single_journal_entry(journal_t & journal, uint16_t type, uint32_t size);
|
||||
|
||||
uint32_t crc32c_pad(uint32_t prev_crc, const void *buf, size_t len, size_t left_pad, size_t right_pad);
|
||||
uint32_t crc32c_nopad(uint32_t prev_crc, const void *buf, size_t len, size_t left_pad, size_t right_pad);
|
||||
|
@@ -85,11 +85,13 @@ void blockstore_impl_t::parse_config(blockstore_config_t & config, bool init)
|
||||
immediate_commit = IMMEDIATE_SMALL;
|
||||
}
|
||||
metadata_buf_size = strtoull(config["meta_buf_size"].c_str(), NULL, 10);
|
||||
inmemory_meta = config["inmemory_metadata"] != "false";
|
||||
inmemory_meta = config["inmemory_metadata"] != "false" && config["inmemory_metadata"] != "0" &&
|
||||
config["inmemory_metadata"] != "no";
|
||||
journal.sector_count = strtoull(config["journal_sector_buffer_count"].c_str(), NULL, 10);
|
||||
journal.no_same_sector_overwrites = config["journal_no_same_sector_overwrites"] == "true" ||
|
||||
config["journal_no_same_sector_overwrites"] == "1" || config["journal_no_same_sector_overwrites"] == "yes";
|
||||
journal.inmemory = config["inmemory_journal"] != "false";
|
||||
journal.inmemory = config["inmemory_journal"] != "false" && config["inmemory_journal"] != "0" &&
|
||||
config["inmemory_journal"] != "no";
|
||||
// Validate
|
||||
if (journal.sector_count < 2)
|
||||
{
|
||||
@@ -133,19 +135,24 @@ void blockstore_impl_t::calc_lengths()
|
||||
{
|
||||
metadata_buffer = memalign(MEM_ALIGNMENT, dsk.meta_len);
|
||||
if (!metadata_buffer)
|
||||
throw std::runtime_error("Failed to allocate memory for the metadata");
|
||||
throw std::runtime_error("Failed to allocate memory for the metadata ("+std::to_string(dsk.meta_len/1024/1024)+" MB)");
|
||||
}
|
||||
else if (dsk.clean_entry_bitmap_size)
|
||||
else if (dsk.clean_entry_bitmap_size || dsk.data_csum_type)
|
||||
{
|
||||
clean_bitmap = (uint8_t*)malloc(dsk.block_count * 2*dsk.clean_entry_bitmap_size);
|
||||
if (!clean_bitmap)
|
||||
throw std::runtime_error("Failed to allocate memory for the metadata sparse write bitmap");
|
||||
clean_bitmaps = (uint8_t*)malloc(dsk.block_count * 2 * dsk.clean_entry_bitmap_size);
|
||||
if (!clean_bitmaps)
|
||||
{
|
||||
throw std::runtime_error(
|
||||
"Failed to allocate memory for the metadata sparse write bitmap ("+
|
||||
std::to_string(dsk.block_count * 2 * dsk.clean_entry_bitmap_size / 1024 / 1024)+" MB)"
|
||||
);
|
||||
}
|
||||
}
|
||||
if (journal.inmemory)
|
||||
{
|
||||
journal.buffer = memalign(MEM_ALIGNMENT, journal.len);
|
||||
if (!journal.buffer)
|
||||
throw std::runtime_error("Failed to allocate memory for journal");
|
||||
throw std::runtime_error("Failed to allocate memory for journal ("+std::to_string(journal.len/1024/1024)+" MB)");
|
||||
}
|
||||
else
|
||||
{
|
||||
|
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user