Compare commits

..

33 Commits

Author SHA1 Message Date
Vitaliy Filippov e18e33944b Fix eviction when random_pos selects the end
Test / test_minsize_1 (push) Has been cancelled Details
Test / test_move_reappear (push) Has been cancelled Details
Test / test_rm (push) Has been cancelled Details
Test / test_snapshot_chain (push) Has been cancelled Details
Test / test_snapshot_chain_ec (push) Has been cancelled Details
Test / test_snapshot_down (push) Has been cancelled Details
Test / test_snapshot_down_ec (push) Has been cancelled Details
Test / test_splitbrain (push) Has been cancelled Details
Test / test_rebalance_verify (push) Has been cancelled Details
Test / test_rebalance_verify_imm (push) Has been cancelled Details
Test / test_rebalance_verify_ec (push) Has been cancelled Details
Test / test_rebalance_verify_ec_imm (push) Has been cancelled Details
Test / test_switch_primary (push) Has been cancelled Details
Test / test_write (push) Has been cancelled Details
Test / test_write_xor (push) Has been cancelled Details
Test / test_write_no_same (push) Has been cancelled Details
Test / test_heal_pg_size_2 (push) Has been cancelled Details
Test / test_heal_ec (push) Has been cancelled Details
Test / test_heal_csum_32k_dmj (push) Has been cancelled Details
Test / test_heal_csum_32k_dj (push) Has been cancelled Details
Test / test_heal_csum_32k (push) Has been cancelled Details
Test / test_heal_csum_4k_dmj (push) Has been cancelled Details
Test / test_heal_csum_4k_dj (push) Has been cancelled Details
Test / test_heal_csum_4k (push) Has been cancelled Details
Test / test_scrub (push) Has been cancelled Details
Test / test_scrub_zero_osd_2 (push) Has been cancelled Details
Test / test_scrub_xor (push) Has been cancelled Details
Test / test_scrub_pg_size_3 (push) Has been cancelled Details
Test / test_scrub_pg_size_6_pg_minsize_4_osd_count_6_ec (push) Has been cancelled Details
Test / test_scrub_ec (push) Has been cancelled Details
2024-02-04 14:09:03 +03:00
Vitaliy Filippov dce1a557cf Implement min/max list_count to make listings during performance test reasonable 2024-02-04 14:09:03 +03:00
Vitaliy Filippov 1f5c613926 Fix and improve parallel allocation
- Do not try to allocate more DB blocks in an inode block until it's "confirmed" and "locked" by the first write
- Do not recheck for new zero DB blocks on first write into an inode block - a CAS failure means someone else is already writing into it
- Throw new allocation blocks away regardless of whether the known_version is 0 on a CAS failure
2024-02-04 14:09:03 +03:00
Vitaliy Filippov 612972d4e0 Implement key_prefix for K/V stress test 2024-02-04 14:09:03 +03:00
Vitaliy Filippov 6ca1bb61b7 More fixes
- do not overwrite a block with older version if known version is newer
  (read may start before update and end after update)
- invalidated block versions can't be remembered and trusted
- right boundary for split blocks is right_half when diving down, not key_lt
- restart update also when block is "invalidated", not just on version mismatch
- copy callback in listings to avoid closure destruction bugs too
2024-02-04 14:09:03 +03:00
Vitaliy Filippov 19d312805e Add logging and one more assert 2024-02-04 14:09:03 +03:00
Vitaliy Filippov bbb7665e84 Make get_block() wait for updating when unrelated block is found along the path 2024-02-04 14:09:03 +03:00
Vitaliy Filippov 1d0837fcbf Fix a race condition where changed blocks were parsed over existing cached blocks and getting a mix of data 2024-02-04 14:09:03 +03:00
Vitaliy Filippov e4f9d39823 Simplify code by removing an unneeded "optimisation" 2024-02-04 14:09:03 +03:00
Vitaliy Filippov a47ebc4b8e Add kv_log_level, print warnings on level 1, trace ops on level 10 2024-02-04 14:09:03 +03:00
Vitaliy Filippov 9c0e7e8710 Fix duplicate keys in listings on parallel updates -- do not rewind key "iterator position" 2024-02-04 14:09:03 +03:00
Vitaliy Filippov 797a27063d Implement key suffix to avoid collisions of multiple test workers 2024-02-04 14:09:03 +03:00
Vitaliy Filippov 59d9a5706a Do not complain on empty first block 2024-02-04 14:09:03 +03:00
Vitaliy Filippov a872d087ad Add JSON output for stress-tester 2024-02-04 14:09:03 +03:00
Vitaliy Filippov c3f0550c9c Print total stats 2024-02-04 14:09:03 +03:00
Vitaliy Filippov c70dc313ef Do not send more than op_count operations (fix segfault on finish) 2024-02-04 14:09:03 +03:00
Vitaliy Filippov c5950468d5 Add some more resiliency to serialize() 2024-02-04 14:09:03 +03:00
Vitaliy Filippov d562547991 Invalidate blocks being updated too 2024-02-04 14:09:03 +03:00
Vitaliy Filippov 04925057ad Change new block allocation method: make each writer choose multiple empty PG blocks and place blocks in them 2024-02-04 14:09:03 +03:00
Vitaliy Filippov 91d665a2f6 Remove blocks from cache on unsuccessful updates 2024-02-04 14:09:03 +03:00
Vitaliy Filippov 0167ebf5c9 Allow to track multiple updates per block (it should never happen though) 2024-02-04 14:09:03 +03:00
Vitaliy Filippov 2b6394aeac Do not call stop_updating after failed write_new_block and after clear_block (both delete the item) 2024-02-04 14:09:03 +03:00
Vitaliy Filippov d33d0875b7 Track versions of parent blocks and recheck if changed during update 2024-02-04 14:09:03 +03:00
Vitaliy Filippov 4afc5bf299 Fix resume_split condition (key_lt can also be "") 2024-02-04 14:09:03 +03:00
Vitaliy Filippov 3a8fee5a80 Experiment: transform offsets for better sharding 2024-02-04 14:09:03 +03:00
Vitaliy Filippov 170f91ccf2 More post-stress-test fixes
- Prevent _split types of new blocks
- Stop updating new blocks only after the whole update, otherwise pointers
  may become invalid
- Use recheck_none for updates initially
- Use UINT64_MAX as initial block version when postponing ops, otherwise the
  check fails when the block is initially empty. This for example leads to
  writing both leaf items & block pointers (which is incorrect) into the root
  block when starting stress-test with --parallelism 32
- Fix -EINTR comparison
2024-02-04 14:09:03 +03:00
Vitaliy Filippov b9f959b6c8 Print operation statistics 2024-02-04 14:09:03 +03:00
Vitaliy Filippov 1a75e308e2 K/V fixes after stress-test :-)
- track block versions correctly - per inode block (128kb) instead of tree block (4kb)
- prevent multiple parallel CAS writes of the same inode block
- add logging for EILSEQ which means invalid data in the tree
- fix get_block updated flag which was true for blocks already in cache and was leading to infinite loops on "unrelated block" errors
- apply changes to blocks in cache only after successful writes (using "virtual changes")
- do not replace cached block with an older version from disk
- recheck "unrelated blocks" (read/update collisions) until data stops changing
- track tree path correctly - do not treat split block as parent of its right half
- correctly move blocks when finding new empty place on disk
- restart updates from the beginning when one of blocks is changed by a parallel update
- fix delete using SET opcode and setting key to the empty value instead
- prevent changing the same key more than 1 time in parallel
- fix listing verification
- resume continue_updates in update_find (required because it uses continue_update itself)
- add allow_old_cached parameter to get()
2024-02-04 14:09:03 +03:00
Vitaliy Filippov 448bd292d5 Implement K/V DB stress tester 2024-02-04 14:09:03 +03:00
Vitaliy Filippov 7be4771edb Evict blocks based on memory limit & block usage 2024-02-04 14:09:03 +03:00
Vitaliy Filippov 5c613bdd53 Track blocks per level 2024-02-04 14:09:03 +03:00
Vitaliy Filippov 2ef2b1f1a0 Track block level 2024-02-04 14:09:03 +03:00
Vitaliy Filippov 3529683dd1 Experimental B-Tree Vitastor embedded K/V database implementation! 2024-02-04 14:09:03 +03:00
104 changed files with 512 additions and 795 deletions

View File

@ -395,7 +395,7 @@ jobs:
steps:
- name: Run test
id: test
timeout-minutes: 6
timeout-minutes: 3
run: SCHEME=ec /root/vitastor/tests/test_snapshot_chain.sh
- name: Print logs
if: always() && steps.test.outcome == 'failure'

View File

@ -39,10 +39,6 @@ for my $line (<>)
$test_name .= '_'.lc($1).'_'.$2;
}
}
if ($test_name eq 'test_snapshot_chain_ec')
{
$timeout = 6;
}
$line =~ s!\./test_!/root/vitastor/tests/test_!;
# Gitea CI doesn't support artifacts yet, lol
#- name: Upload results

View File

@ -2,6 +2,6 @@ cmake_minimum_required(VERSION 2.8.12)
project(vitastor)
set(VERSION "1.4.7")
set(VERSION "1.4.2")
add_subdirectory(src)

View File

@ -1,4 +1,4 @@
VERSION ?= v1.4.7
VERSION ?= v1.4.2
all: build push

View File

@ -49,7 +49,7 @@ spec:
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: vitalif/vitastor-csi:v1.4.7
image: vitalif/vitastor-csi:v1.4.2
args:
- "--node=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)"

View File

@ -121,7 +121,7 @@ spec:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
image: vitalif/vitastor-csi:v1.4.7
image: vitalif/vitastor-csi:v1.4.2
args:
- "--node=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)"

View File

@ -5,7 +5,7 @@ package vitastor
const (
vitastorCSIDriverName = "csi.vitastor.io"
vitastorCSIDriverVersion = "1.4.7"
vitastorCSIDriverVersion = "1.4.2"
)
// Config struct fills the parameters of request or user input

2
debian/changelog vendored
View File

@ -1,4 +1,4 @@
vitastor (1.4.7-1) unstable; urgency=medium
vitastor (1.4.2-1) unstable; urgency=medium
* Bugfixes

View File

@ -35,8 +35,8 @@ RUN set -e -x; \
mkdir -p /root/packages/vitastor-$REL; \
rm -rf /root/packages/vitastor-$REL/*; \
cd /root/packages/vitastor-$REL; \
cp -r /root/vitastor vitastor-1.4.7; \
cd vitastor-1.4.7; \
cp -r /root/vitastor vitastor-1.4.2; \
cd vitastor-1.4.2; \
ln -s /root/fio-build/fio-*/ ./fio; \
FIO=$(head -n1 fio/debian/changelog | perl -pe 's/^.*\((.*?)\).*$/$1/'); \
ls /usr/include/linux/raw.h || cp ./debian/raw.h /usr/include/linux/raw.h; \
@ -49,8 +49,8 @@ RUN set -e -x; \
rm -rf a b; \
echo "dep:fio=$FIO" > debian/fio_version; \
cd /root/packages/vitastor-$REL; \
tar --sort=name --mtime='2020-01-01' --owner=0 --group=0 --exclude=debian -cJf vitastor_1.4.7.orig.tar.xz vitastor-1.4.7; \
cd vitastor-1.4.7; \
tar --sort=name --mtime='2020-01-01' --owner=0 --group=0 --exclude=debian -cJf vitastor_1.4.2.orig.tar.xz vitastor-1.4.2; \
cd vitastor-1.4.2; \
V=$(head -n1 debian/changelog | perl -pe 's/^.*\((.*?)\).*$/$1/'); \
DEBFULLNAME="Vitaliy Filippov <vitalif@yourcmc.ru>" dch -D $REL -v "$V""$REL" "Rebuild for $REL"; \
DEB_BUILD_OPTIONS=nocheck dpkg-buildpackage --jobs=auto -sa; \

View File

@ -19,8 +19,8 @@ These parameters only apply to Monitors.
## etcd_mon_ttl
- Type: seconds
- Default: 1
- Minimum: 5
- Default: 30
- Minimum: 10
Monitor etcd lease refresh interval in seconds

View File

@ -19,8 +19,8 @@
## etcd_mon_ttl
- Тип: секунды
- Значение по умолчанию: 1
- Минимальное значение: 5
- Значение по умолчанию: 30
- Минимальное значение: 10
Интервал обновления etcd резервации (lease) монитором

View File

@ -215,8 +215,8 @@ is scheduled.
## up_wait_retry_interval
- Type: milliseconds
- Default: 50
- Minimum: 10
- Default: 500
- Minimum: 50
- Can be changed online: yes
OSDs respond to clients with a special error code when they receive I/O

View File

@ -224,8 +224,8 @@ OSD в любом случае согласовывают реальное зн
## up_wait_retry_interval
- Тип: миллисекунды
- Значение по умолчанию: 50
- Минимальное значение: 10
- Значение по умолчанию: 500
- Минимальное значение: 50
- Можно менять на лету: да
Когда OSD получают от клиентов запросы ввода-вывода, относящиеся к не

View File

@ -59,7 +59,6 @@ them, even without restarting by updating configuration in etcd.
- [recovery_tune_client_util_high](#recovery_tune_client_util_high)
- [recovery_tune_agg_interval](#recovery_tune_agg_interval)
- [recovery_tune_sleep_min_us](#recovery_tune_sleep_min_us)
- [recovery_tune_sleep_cutoff_us](#recovery_tune_sleep_cutoff_us)
## etcd_report_interval
@ -605,14 +604,5 @@ is usually fine.
- Default: 10
- Can be changed online: yes
Minimum possible value for auto-tuned recovery_sleep_us. Lower values
are changed to 0.
## recovery_tune_sleep_cutoff_us
- Type: microseconds
- Default: 10000000
- Can be changed online: yes
Maximum possible value for auto-tuned recovery_sleep_us. Higher values
are treated as outliers and ignored in aggregation.
Minimum possible value for auto-tuned recovery_sleep_us. Values lower
than this value are changed to 0.

View File

@ -60,7 +60,6 @@
- [recovery_tune_client_util_high](#recovery_tune_client_util_high)
- [recovery_tune_agg_interval](#recovery_tune_agg_interval)
- [recovery_tune_sleep_min_us](#recovery_tune_sleep_min_us)
- [recovery_tune_sleep_cutoff_us](#recovery_tune_sleep_cutoff_us)
## etcd_report_interval
@ -635,14 +634,4 @@ EC (кодов коррекции ошибок) с более, чем 1 диск
- Можно менять на лету: да
Минимальное возможное значение авто-подстроенного recovery_sleep_us.
Меньшие значения заменяются на 0.
## recovery_tune_sleep_cutoff_us
- Тип: микросекунды
- Значение по умолчанию: 10000000
- Можно менять на лету: да
Максимальное возможное значение авто-подстроенного recovery_sleep_us.
Большие значения считаются случайными выбросами и игнорируются в
усреднении.
Значения ниже данного заменяются на 0.

View File

@ -154,9 +154,6 @@ That is, if it becomes impossible to place PG data on at least (pg_minsize)
OSDs, PG is deactivated for both read and write. So you know that a fresh
write always goes to at least (pg_minsize) OSDs (disks).
That is, pg_size minus pg_minsize sets the number of disk failures to tolerate
without temporary downtime (for [osd_out_time](monitor.en.md#osd_out_time)).
FIXME: pg_minsize behaviour may be changed in the future to only make PGs
read-only instead of deactivating them.

View File

@ -157,10 +157,6 @@
OSD, PG деактивируется на чтение и запись. Иными словами, всегда известно,
что новые блоки данных всегда записываются как минимум на pg_minsize дисков.
По сути, разница pg_size и pg_minsize задаёт число отказов дисков, которые пул
может пережить без временной (на [osd_out_time](monitor.ru.md#osd_out_time))
остановки обслуживания.
FIXME: Поведение pg_minsize может быть изменено в будущем с полной деактивации
PG на перевод их в режим только для чтения.

View File

@ -731,19 +731,8 @@
default: 10
online: true
info: |
Minimum possible value for auto-tuned recovery_sleep_us. Lower values
are changed to 0.
Minimum possible value for auto-tuned recovery_sleep_us. Values lower
than this value are changed to 0.
info_ru: |
Минимальное возможное значение авто-подстроенного recovery_sleep_us.
Меньшие значения заменяются на 0.
- name: recovery_tune_sleep_cutoff_us
type: us
default: 10000000
online: true
info: |
Maximum possible value for auto-tuned recovery_sleep_us. Higher values
are treated as outliers and ignored in aggregation.
info_ru: |
Максимальное возможное значение авто-подстроенного recovery_sleep_us.
Большие значения считаются случайными выбросами и игнорируются в
усреднении.
Значения ниже данного заменяются на 0.

View File

@ -261,7 +261,7 @@ Options (see also [Cluster-Wide Disk Layout Parameters](../config/layout-cluster
```
--object_size 128k Set blockstore block size
--bitmap_granularity 4k Set bitmap granularity
--journal_size 32M Set journal size
--journal_size 16M Set journal size
--data_csum_type none Set data checksum type (crc32c or none)
--csum_block_size 4k Set data checksum block size
--device_block_size 4k Set device block size

View File

@ -267,7 +267,7 @@ OSD отключены fsync-и.
```
--object_size 128k Размер блока хранилища
--bitmap_granularity 4k Гранулярность битовых карт
--journal_size 32M Размер журнала
--journal_size 16M Размер журнала
--data_csum_type none Задать тип контрольных сумм (crc32c или none)
--csum_block_size 4k Задать размер блока расчёта контрольных сумм
--device_block_size 4k Размер блока устройства

View File

@ -675,12 +675,7 @@ class Mon
{
this.parse_kv(e.kv);
const key = e.kv.key.substr(this.etcd_prefix.length);
if (key.substr(0, 11) == '/osd/state/')
{
stats_changed = true;
changed = true;
}
else if (key.substr(0, 11) == '/osd/stats/' || key.substr(0, 10) == '/pg/stats/' || key.substr(0, 16) == '/osd/inodestats/')
if (key.substr(0, 11) == '/osd/stats/' || key.substr(0, 10) == '/pg/stats/' || key.substr(0, 16) == '/osd/inodestats/')
{
stats_changed = true;
}
@ -1640,13 +1635,9 @@ class Mon
}
const sum_diff = { op_stats: {}, subop_stats: {}, recovery_stats: {} };
// Sum derived values instead of deriving summed
for (const osd in this.state.osd.state)
for (const osd in this.state.osd.stats)
{
const derived = this.prev_stats.osd_diff[osd];
if (!this.state.osd.state[osd] || !derived)
{
continue;
}
for (const type in sum_diff)
{
for (const op in derived[type]||{})
@ -1747,13 +1738,9 @@ class Mon
const used = this.state.pool.stats[pool_id].used_raw_tb;
this.state.pool.stats[pool_id].used_raw_tb = Number(used)/1024/1024/1024/1024;
}
for (const osd_num in this.state.osd.state)
for (const osd_num in this.state.osd.inodestats)
{
const ist = this.state.osd.inodestats[osd_num];
if (!ist || !this.state.osd.state[osd_num])
{
continue;
}
for (const pool_id in ist)
{
inode_stats[pool_id] = inode_stats[pool_id] || {};
@ -1769,14 +1756,9 @@ class Mon
}
}
}
for (const osd in this.state.osd.state)
for (const osd in this.prev_stats.osd_diff)
{
const osd_diff = this.prev_stats.osd_diff[osd];
if (!osd_diff || !this.state.osd.state[osd])
{
continue;
}
for (const pool_id in osd_diff.inode_stats)
for (const pool_id in this.prev_stats.osd_diff[osd].inode_stats)
{
for (const inode_num in this.prev_stats.osd_diff[osd].inode_stats[pool_id])
{

View File

@ -1,6 +1,6 @@
{
"name": "vitastor-mon",
"version": "1.4.7",
"version": "1.4.2",
"description": "Vitastor SDS monitor service",
"main": "mon-main.js",
"scripts": {

View File

@ -50,7 +50,7 @@ from cinder.volume import configuration
from cinder.volume import driver
from cinder.volume import volume_utils
VERSION = '1.4.7'
VERSION = '1.4.2'
LOG = logging.getLogger(__name__)

View File

@ -24,4 +24,4 @@ rm fio
mv fio-copy fio
FIO=`rpm -qi fio | perl -e 'while(<>) { /^Epoch[\s:]+(\S+)/ && print "$1:"; /^Version[\s:]+(\S+)/ && print $1; /^Release[\s:]+(\S+)/ && print "-$1"; }'`
perl -i -pe 's/(Requires:\s*fio)([^\n]+)?/$1 = '$FIO'/' $VITASTOR/rpm/vitastor-el$EL.spec
tar --transform 's#^#vitastor-1.4.7/#' --exclude 'rpm/*.rpm' -czf $VITASTOR/../vitastor-1.4.7$(rpm --eval '%dist').tar.gz *
tar --transform 's#^#vitastor-1.4.2/#' --exclude 'rpm/*.rpm' -czf $VITASTOR/../vitastor-1.4.2$(rpm --eval '%dist').tar.gz *

View File

@ -36,7 +36,7 @@ ADD . /root/vitastor
RUN set -e; \
cd /root/vitastor/rpm; \
sh build-tarball.sh; \
cp /root/vitastor-1.4.7.el7.tar.gz ~/rpmbuild/SOURCES; \
cp /root/vitastor-1.4.2.el7.tar.gz ~/rpmbuild/SOURCES; \
cp vitastor-el7.spec ~/rpmbuild/SPECS/vitastor.spec; \
cd ~/rpmbuild/SPECS/; \
rpmbuild -ba vitastor.spec; \

View File

@ -1,11 +1,11 @@
Name: vitastor
Version: 1.4.7
Version: 1.4.2
Release: 1%{?dist}
Summary: Vitastor, a fast software-defined clustered block storage
License: Vitastor Network Public License 1.1
URL: https://vitastor.io/
Source0: vitastor-1.4.7.el7.tar.gz
Source0: vitastor-1.4.2.el7.tar.gz
BuildRequires: liburing-devel >= 0.6
BuildRequires: gperftools-devel

View File

@ -35,7 +35,7 @@ ADD . /root/vitastor
RUN set -e; \
cd /root/vitastor/rpm; \
sh build-tarball.sh; \
cp /root/vitastor-1.4.7.el8.tar.gz ~/rpmbuild/SOURCES; \
cp /root/vitastor-1.4.2.el8.tar.gz ~/rpmbuild/SOURCES; \
cp vitastor-el8.spec ~/rpmbuild/SPECS/vitastor.spec; \
cd ~/rpmbuild/SPECS/; \
rpmbuild -ba vitastor.spec; \

View File

@ -1,11 +1,11 @@
Name: vitastor
Version: 1.4.7
Version: 1.4.2
Release: 1%{?dist}
Summary: Vitastor, a fast software-defined clustered block storage
License: Vitastor Network Public License 1.1
URL: https://vitastor.io/
Source0: vitastor-1.4.7.el8.tar.gz
Source0: vitastor-1.4.2.el8.tar.gz
BuildRequires: liburing-devel >= 0.6
BuildRequires: gperftools-devel

View File

@ -18,7 +18,7 @@ ADD . /root/vitastor
RUN set -e; \
cd /root/vitastor/rpm; \
sh build-tarball.sh; \
cp /root/vitastor-1.4.7.el9.tar.gz ~/rpmbuild/SOURCES; \
cp /root/vitastor-1.4.2.el9.tar.gz ~/rpmbuild/SOURCES; \
cp vitastor-el9.spec ~/rpmbuild/SPECS/vitastor.spec; \
cd ~/rpmbuild/SPECS/; \
rpmbuild -ba vitastor.spec; \

View File

@ -1,11 +1,11 @@
Name: vitastor
Version: 1.4.7
Version: 1.4.2
Release: 1%{?dist}
Summary: Vitastor, a fast software-defined clustered block storage
License: Vitastor Network Public License 1.1
URL: https://vitastor.io/
Source0: vitastor-1.4.7.el9.tar.gz
Source0: vitastor-1.4.2.el9.tar.gz
BuildRequires: liburing-devel >= 0.6
BuildRequires: gperftools-devel

View File

@ -16,8 +16,8 @@ if("${CMAKE_INSTALL_PREFIX}" MATCHES "^/usr/local/?$")
set(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}")
endif()
add_definitions(-DVERSION="1.4.7")
add_definitions(-D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -Wall -Wno-sign-compare -Wno-comment -Wno-parentheses -Wno-pointer-arith -fdiagnostics-color=always -fno-omit-frame-pointer -I ${CMAKE_SOURCE_DIR}/src)
add_definitions(-DVERSION="1.4.2")
add_definitions(-Wall -Wno-sign-compare -Wno-comment -Wno-parentheses -Wno-pointer-arith -fdiagnostics-color=always -fno-omit-frame-pointer -I ${CMAKE_SOURCE_DIR}/src)
add_link_options(-fno-omit-frame-pointer)
if (${WITH_ASAN})
add_definitions(-fsanitize=address)

View File

@ -108,10 +108,6 @@ void blockstore_disk_t::parse_config(std::map<std::string, std::string> & config
{
throw std::runtime_error("journal_block_size must be a multiple of "+std::to_string(DIRECT_IO_ALIGNMENT));
}
else if (journal_block_size > MAX_DATA_BLOCK_SIZE)
{
throw std::runtime_error("journal_block_size must not exceed "+std::to_string(MAX_DATA_BLOCK_SIZE));
}
if (!meta_block_size)
{
meta_block_size = 4096;
@ -120,10 +116,6 @@ void blockstore_disk_t::parse_config(std::map<std::string, std::string> & config
{
throw std::runtime_error("meta_block_size must be a multiple of "+std::to_string(DIRECT_IO_ALIGNMENT));
}
else if (meta_block_size > MAX_DATA_BLOCK_SIZE)
{
throw std::runtime_error("meta_block_size must not exceed "+std::to_string(MAX_DATA_BLOCK_SIZE));
}
if (data_offset % disk_alignment)
{
throw std::runtime_error("data_offset must be a multiple of disk_alignment = "+std::to_string(disk_alignment));

View File

@ -19,6 +19,7 @@ journal_flusher_t::journal_flusher_t(blockstore_impl_t *bs)
syncing_flushers = 0;
// FIXME: allow to configure flusher_start_threshold and journal_trim_interval
flusher_start_threshold = bs->dsk.journal_block_size / sizeof(journal_entry_stable);
journal_trim_interval = 512;
journal_trim_counter = bs->journal.flush_journal ? 1 : 0;
trim_wanted = bs->journal.flush_journal ? 1 : 0;
journal_superblock = bs->journal.inmemory ? bs->journal.buffer : memalign_or_die(MEM_ALIGNMENT, bs->dsk.journal_block_size);
@ -93,7 +94,7 @@ void journal_flusher_t::loop()
void journal_flusher_t::enqueue_flush(obj_ver_id ov)
{
#ifdef BLOCKSTORE_DEBUG
printf("enqueue_flush %jx:%jx v%ju\n", ov.oid.inode, ov.oid.stripe, ov.version);
printf("enqueue_flush %lx:%lx v%lu\n", ov.oid.inode, ov.oid.stripe, ov.version);
#endif
auto it = flush_versions.find(ov.oid);
if (it != flush_versions.end())
@ -116,7 +117,7 @@ void journal_flusher_t::enqueue_flush(obj_ver_id ov)
void journal_flusher_t::unshift_flush(obj_ver_id ov, bool force)
{
#ifdef BLOCKSTORE_DEBUG
printf("unshift_flush %jx:%jx v%ju\n", ov.oid.inode, ov.oid.stripe, ov.version);
printf("unshift_flush %lx:%lx v%lu\n", ov.oid.inode, ov.oid.stripe, ov.version);
#endif
auto it = flush_versions.find(ov.oid);
if (it != flush_versions.end())
@ -142,7 +143,7 @@ void journal_flusher_t::unshift_flush(obj_ver_id ov, bool force)
void journal_flusher_t::remove_flush(object_id oid)
{
#ifdef BLOCKSTORE_DEBUG
printf("undo_flush %jx:%jx\n", oid.inode, oid.stripe);
printf("undo_flush %lx:%lx\n", oid.inode, oid.stripe);
#endif
auto v_it = flush_versions.find(oid);
if (v_it != flush_versions.end())
@ -183,7 +184,8 @@ void journal_flusher_t::mark_trim_possible()
if (trim_wanted > 0)
{
dequeuing = true;
journal_trim_counter = 0;
if (!journal_trim_counter)
journal_trim_counter = journal_trim_interval;
bs->ringloop->wakeup();
}
}
@ -233,7 +235,7 @@ void journal_flusher_t::dump_diagnostics()
break;
}
printf(
"Flusher: queued=%zd first=%s%jx:%jx trim_wanted=%d dequeuing=%d trimming=%d cur=%d target=%d active=%d syncing=%d\n",
"Flusher: queued=%ld first=%s%lx:%lx trim_wanted=%d dequeuing=%d trimming=%d cur=%d target=%d active=%d syncing=%d\n",
flush_queue.size(), unflushable_type, unflushable.oid.inode, unflushable.oid.stripe,
trim_wanted, dequeuing, trimming, cur_flusher_count, target_flusher_count,
active_flushers, syncing_flushers
@ -266,7 +268,7 @@ bool journal_flusher_t::try_find_other(std::map<obj_ver_id, dirty_entry>::iterat
{
int search_left = flush_queue.size() - 1;
#ifdef BLOCKSTORE_DEBUG
printf("Flusher overran writers (%jx:%jx v%ju, dirty_start=%08jx) - searching for older flushes (%d left)\n",
printf("Flusher overran writers (%lx:%lx v%lu, dirty_start=%08lx) - searching for older flushes (%d left)\n",
cur.oid.inode, cur.oid.stripe, cur.version, bs->journal.dirty_start, search_left);
#endif
while (search_left > 0)
@ -283,7 +285,7 @@ bool journal_flusher_t::try_find_other(std::map<obj_ver_id, dirty_entry>::iterat
dirty_end->second.journal_sector < bs->journal.used_start))
{
#ifdef BLOCKSTORE_DEBUG
printf("Write %jx:%jx v%ju is too new: offset=%08jx\n", cur.oid.inode, cur.oid.stripe, cur.version, dirty_end->second.journal_sector);
printf("Write %lx:%lx v%lu is too new: offset=%08lx\n", cur.oid.inode, cur.oid.stripe, cur.version, dirty_end->second.journal_sector);
#endif
enqueue_flush(cur);
}
@ -364,10 +366,9 @@ resume_0:
!flusher->flush_queue.size() || !flusher->dequeuing)
{
stop_flusher:
if (flusher->trim_wanted > 0 && cur.oid.inode != 0)
if (flusher->trim_wanted > 0 && flusher->journal_trim_counter > 0)
{
// Attempt forced trim
cur.oid = {};
flusher->active_flushers++;
goto trim_journal;
}
@ -386,7 +387,7 @@ stop_flusher:
if (repeat_it != flusher->sync_to_repeat.end())
{
#ifdef BLOCKSTORE_DEBUG
printf("Postpone %jx:%jx v%ju\n", cur.oid.inode, cur.oid.stripe, cur.version);
printf("Postpone %lx:%lx v%lu\n", cur.oid.inode, cur.oid.stripe, cur.version);
#endif
// We don't flush different parts of history of the same object in parallel
// So we check if someone is already flushing this object
@ -415,13 +416,12 @@ stop_flusher:
flusher->sync_to_repeat.erase(cur.oid);
if (!flusher->try_find_other(dirty_end, cur))
{
cur.oid = {};
goto stop_flusher;
}
}
}
#ifdef BLOCKSTORE_DEBUG
printf("Flushing %jx:%jx v%ju\n", cur.oid.inode, cur.oid.stripe, cur.version);
printf("Flushing %lx:%lx v%lu\n", cur.oid.inode, cur.oid.stripe, cur.version);
#endif
flusher->active_flushers++;
// Find it in clean_db
@ -448,7 +448,7 @@ stop_flusher:
// Object not allocated. This is a bug.
char err[1024];
snprintf(
err, 1024, "BUG: Object %jx:%jx v%ju that we are trying to flush is not allocated on the data device",
err, 1024, "BUG: Object %lx:%lx v%lu that we are trying to flush is not allocated on the data device",
cur.oid.inode, cur.oid.stripe, cur.version
);
throw std::runtime_error(err);
@ -538,7 +538,7 @@ resume_2:
clean_disk_entry *old_entry = (clean_disk_entry*)((uint8_t*)meta_old.buf + meta_old.pos*bs->dsk.clean_entry_size);
if (old_entry->oid.inode != 0 && old_entry->oid != cur.oid)
{
printf("Fatal error (metadata corruption or bug): tried to wipe metadata entry %ju (%jx:%jx v%ju) as old location of %jx:%jx\n",
printf("Fatal error (metadata corruption or bug): tried to wipe metadata entry %lu (%lx:%lx v%lu) as old location of %lx:%lx\n",
old_clean_loc >> bs->dsk.block_order, old_entry->oid.inode, old_entry->oid.stripe,
old_entry->version, cur.oid.inode, cur.oid.stripe);
exit(1);
@ -571,7 +571,7 @@ resume_2:
// Erase dirty_db entries
bs->erase_dirty(dirty_start, std::next(dirty_end), clean_loc);
#ifdef BLOCKSTORE_DEBUG
printf("Flushed %jx:%jx v%ju (%d copies, wr:%d, del:%d), %jd left\n", cur.oid.inode, cur.oid.stripe, cur.version,
printf("Flushed %lx:%lx v%lu (%d copies, wr:%d, del:%d), %ld left\n", cur.oid.inode, cur.oid.stripe, cur.version,
copy_count, has_writes, has_delete, flusher->flush_queue.size());
#endif
release_oid:
@ -584,8 +584,7 @@ resume_2:
flusher->sync_to_repeat.erase(repeat_it);
trim_journal:
// Clear unused part of the journal every <journal_trim_interval> flushes
if (bs->journal_trim_interval && !((++flusher->journal_trim_counter) % bs->journal_trim_interval) ||
flusher->trim_wanted > 0)
if (!((++flusher->journal_trim_counter) % flusher->journal_trim_interval) || flusher->trim_wanted > 0)
{
resume_26:
resume_27:
@ -610,8 +609,8 @@ void journal_flusher_co::update_metadata_entry()
{
printf(
has_delete
? "Fatal error (metadata corruption or bug): tried to delete metadata entry %ju (%jx:%jx v%ju) while deleting %jx:%jx v%ju\n"
: "Fatal error (metadata corruption or bug): tried to overwrite non-zero metadata entry %ju (%jx:%jx v%ju) with %jx:%jx v%ju\n",
? "Fatal error (metadata corruption or bug): tried to delete metadata entry %lu (%lx:%lx v%lu) while deleting %lx:%lx v%lu\n"
: "Fatal error (metadata corruption or bug): tried to overwrite non-zero metadata entry %lu (%lx:%lx v%lu) with %lx:%lx v%lu\n",
clean_loc >> bs->dsk.block_order, new_entry->oid.inode, new_entry->oid.stripe,
new_entry->version, cur.oid.inode, cur.oid.stripe, cur.version
);
@ -711,7 +710,7 @@ bool journal_flusher_co::write_meta_block(flusher_meta_write_t & meta_block, int
if (wait_state == wait_base)
goto resume_0;
await_sqe(0);
data->iov = (struct iovec){ meta_block.buf, (size_t)bs->dsk.meta_block_size };
data->iov = (struct iovec){ meta_block.buf, bs->dsk.meta_block_size };
data->callback = simple_callback_w;
my_uring_prep_writev(
sqe, bs->dsk.meta_fd, &data->iov, 1, bs->dsk.meta_offset + bs->dsk.meta_block_size + meta_block.sector
@ -761,7 +760,7 @@ bool journal_flusher_co::clear_incomplete_csum_block_bits(int wait_base)
{
// If we encounter bad checksums during flush, we still update the bad block,
// but intentionally mangle checksums to avoid hiding the corruption.
iovec iov = { .iov_base = v[i].buf, .iov_len = (size_t)v[i].len };
iovec iov = { .iov_base = v[i].buf, .iov_len = v[i].len };
if (!(v[i].copy_flags & COPY_BUF_JOURNAL))
{
assert(!(v[i].offset % bs->dsk.csum_block_size));
@ -769,7 +768,7 @@ bool journal_flusher_co::clear_incomplete_csum_block_bits(int wait_base)
bs->verify_padded_checksums(new_clean_bitmap, new_clean_bitmap + 2*bs->dsk.clean_entry_bitmap_size,
v[i].offset, &iov, 1, [&](uint32_t bad_block, uint32_t calc_csum, uint32_t stored_csum)
{
printf("Checksum mismatch in object %jx:%jx v%ju in data area at offset 0x%jx+0x%x: got %08x, expected %08x\n",
printf("Checksum mismatch in object %lx:%lx v%lu in data area at offset 0x%lx+0x%x: got %08x, expected %08x\n",
cur.oid.inode, cur.oid.stripe, old_clean_ver, old_clean_loc, bad_block, calc_csum, stored_csum);
for (uint32_t j = 0; j < bs->dsk.csum_block_size; j += bs->dsk.bitmap_granularity)
{
@ -782,7 +781,7 @@ bool journal_flusher_co::clear_incomplete_csum_block_bits(int wait_base)
{
bs->verify_journal_checksums(v[i].csum_buf, v[i].offset, &iov, 1, [&](uint32_t bad_block, uint32_t calc_csum, uint32_t stored_csum)
{
printf("Checksum mismatch in object %jx:%jx v%ju in journal at offset 0x%jx+0x%x (block offset 0x%jx): got %08x, expected %08x\n",
printf("Checksum mismatch in object %lx:%lx v%lu in journal at offset 0x%lx+0x%x (block offset 0x%lx): got %08x, expected %08x\n",
cur.oid.inode, cur.oid.stripe, old_clean_ver,
v[i].disk_offset, bad_block, v[i].offset, calc_csum, stored_csum);
bad_block += (v[i].offset/bs->dsk.csum_block_size) * bs->dsk.csum_block_size;
@ -806,7 +805,7 @@ bool journal_flusher_co::clear_incomplete_csum_block_bits(int wait_base)
if (new_entry->oid != cur.oid)
{
printf(
"Fatal error (metadata corruption or bug): tried to make holes in %ju (%jx:%jx v%ju) with %jx:%jx v%ju\n",
"Fatal error (metadata corruption or bug): tried to make holes in %lu (%lx:%lx v%lu) with %lx:%lx v%lu\n",
clean_loc >> bs->dsk.block_order, new_entry->oid.inode, new_entry->oid.stripe,
new_entry->version, cur.oid.inode, cur.oid.stripe, cur.version
);
@ -926,7 +925,7 @@ void journal_flusher_co::scan_dirty()
{
char err[1024];
snprintf(
err, 1024, "BUG: Unexpected dirty_entry %jx:%jx v%ju unstable state during flush: 0x%x",
err, 1024, "BUG: Unexpected dirty_entry %lx:%lx v%lu unstable state during flush: 0x%x",
dirty_it->first.oid.inode, dirty_it->first.oid.stripe, dirty_it->first.version, dirty_it->second.state
);
throw std::runtime_error(err);
@ -1022,7 +1021,7 @@ void journal_flusher_co::scan_dirty()
// May happen if the metadata entry is corrupt, but journal isn't
// FIXME: Report corrupted object to the upper layer (OSD)
printf(
"Warning: object %jx:%jx has overwrites, but doesn't have a clean version."
"Warning: object %lx:%lx has overwrites, but doesn't have a clean version."
" Metadata is likely corrupted. Dropping object from the DB.\n",
cur.oid.inode, cur.oid.stripe
);
@ -1057,7 +1056,7 @@ void journal_flusher_co::scan_dirty()
flusher->enqueue_flush(cur);
cur.version = dirty_end->first.version;
#ifdef BLOCKSTORE_DEBUG
printf("Partial checksum block overwrites found - rewinding flush back to %jx:%jx v%ju\n", cur.oid.inode, cur.oid.stripe, cur.version);
printf("Partial checksum block overwrites found - rewinding flush back to %lx:%lx v%lu\n", cur.oid.inode, cur.oid.stripe, cur.version);
#endif
v.clear();
copy_count = 0;
@ -1085,7 +1084,7 @@ bool journal_flusher_co::read_dirty(int wait_base)
auto & vi = v[v.size()-i];
assert(vi.len != 0);
vi.buf = memalign_or_die(MEM_ALIGNMENT, vi.len);
data->iov = (struct iovec){ vi.buf, (size_t)vi.len };
data->iov = (struct iovec){ vi.buf, vi.len };
data->callback = simple_callback_r;
my_uring_prep_readv(
sqe, bs->dsk.data_fd, &data->iov, 1, bs->dsk.data_offset + old_clean_loc + vi.offset
@ -1209,7 +1208,7 @@ bool journal_flusher_co::modify_meta_read(uint64_t meta_loc, flusher_meta_write_
.usage_count = 1,
}).first;
await_sqe(0);
data->iov = (struct iovec){ wr.it->second.buf, (size_t)bs->dsk.meta_block_size };
data->iov = (struct iovec){ wr.it->second.buf, bs->dsk.meta_block_size };
data->callback = simple_callback_r;
wr.submitted = true;
my_uring_prep_readv(
@ -1248,7 +1247,7 @@ void journal_flusher_co::free_data_blocks()
auto uo_it = bs->used_clean_objects.find(old_clean_loc);
bool used = uo_it != bs->used_clean_objects.end();
#ifdef BLOCKSTORE_DEBUG
printf("%s block %ju from %jx:%jx v%ju (new location is %ju)\n",
printf("%s block %lu from %lx:%lx v%lu (new location is %lu)\n",
used ? "Postpone free" : "Free",
old_clean_loc >> bs->dsk.block_order,
cur.oid.inode, cur.oid.stripe, cur.version,
@ -1265,7 +1264,7 @@ void journal_flusher_co::free_data_blocks()
auto uo_it = bs->used_clean_objects.find(old_clean_loc);
bool used = uo_it != bs->used_clean_objects.end();
#ifdef BLOCKSTORE_DEBUG
printf("%s block %ju from %jx:%jx v%ju (delete)\n",
printf("%s block %lu from %lx:%lx v%lu (delete)\n",
used ? "Postpone free" : "Free",
old_clean_loc >> bs->dsk.block_order,
cur.oid.inode, cur.oid.stripe, cur.version);
@ -1347,6 +1346,7 @@ bool journal_flusher_co::trim_journal(int wait_base)
else if (wait_state == wait_base+2) goto resume_2;
else if (wait_state == wait_base+3) goto resume_3;
else if (wait_state == wait_base+4) goto resume_4;
flusher->journal_trim_counter = 0;
new_trim_pos = bs->journal.get_trim_pos();
if (new_trim_pos != bs->journal.used_start)
{
@ -1378,7 +1378,7 @@ bool journal_flusher_co::trim_journal(int wait_base)
.csum_block_size = bs->dsk.csum_block_size,
};
((journal_entry_start*)flusher->journal_superblock)->crc32 = je_crc32((journal_entry*)flusher->journal_superblock);
data->iov = (struct iovec){ flusher->journal_superblock, (size_t)bs->dsk.journal_block_size };
data->iov = (struct iovec){ flusher->journal_superblock, bs->dsk.journal_block_size };
data->callback = simple_callback_w;
my_uring_prep_writev(sqe, bs->dsk.journal_fd, &data->iov, 1, bs->journal.offset);
wait_count++;
@ -1410,7 +1410,7 @@ bool journal_flusher_co::trim_journal(int wait_base)
}
bs->journal.used_start = new_trim_pos;
#ifdef BLOCKSTORE_DEBUG
printf("Journal trimmed to %08jx (next_free=%08jx dirty_start=%08jx)\n", bs->journal.used_start, bs->journal.next_free, bs->journal.dirty_start);
printf("Journal trimmed to %08lx (next_free=%08lx dirty_start=%08lx)\n", bs->journal.used_start, bs->journal.next_free, bs->journal.dirty_start);
#endif
if (bs->journal.flush_journal && !flusher->flush_queue.size())
{
@ -1419,7 +1419,6 @@ bool journal_flusher_co::trim_journal(int wait_base)
exit(0);
}
}
flusher->journal_trim_counter = 0;
flusher->trimming = false;
}
return true;

View File

@ -107,7 +107,7 @@ class journal_flusher_t
blockstore_impl_t *bs;
friend class journal_flusher_co;
int journal_trim_counter;
int journal_trim_counter, journal_trim_interval;
bool trimming;
void* journal_superblock;

View File

@ -195,10 +195,6 @@ void blockstore_impl_t::loop()
// ring is full, stop submission
break;
}
else if (PRIV(op)->wait_for == WAIT_JOURNAL)
{
PRIV(op)->wait_detail2 = (unstable_writes.size()+unstable_unsynced);
}
}
}
if (op_idx != new_idx)
@ -269,7 +265,7 @@ void blockstore_impl_t::check_wait(blockstore_op_t *op)
{
// stop submission if there's still no free space
#ifdef BLOCKSTORE_DEBUG
printf("Still waiting for %ju SQE(s)\n", PRIV(op)->wait_detail);
printf("Still waiting for %lu SQE(s)\n", PRIV(op)->wait_detail);
#endif
return;
}
@ -277,15 +273,15 @@ void blockstore_impl_t::check_wait(blockstore_op_t *op)
}
else if (PRIV(op)->wait_for == WAIT_JOURNAL)
{
if (journal.used_start == PRIV(op)->wait_detail &&
(unstable_writes.size()+unstable_unsynced) == PRIV(op)->wait_detail2)
if (journal.used_start == PRIV(op)->wait_detail && !unstable_count_changed)
{
// do not submit
#ifdef BLOCKSTORE_DEBUG
printf("Still waiting to flush journal offset %08jx\n", PRIV(op)->wait_detail);
printf("Still waiting to flush journal offset %08lx\n", PRIV(op)->wait_detail);
#endif
return;
}
unstable_count_changed = false;
flusher->release_trim();
PRIV(op)->wait_for = 0;
}
@ -357,6 +353,7 @@ void blockstore_impl_t::enqueue_op(blockstore_op_t *op)
};
}
unstable_writes.clear();
unstable_count_changed = true;
op->callback = [old_callback](blockstore_op_t *op)
{
obj_ver_id *vers = (obj_ver_id*)op->buf;

View File

@ -202,7 +202,7 @@ struct blockstore_op_private_t
{
// Wait status
int wait_for;
uint64_t wait_detail, wait_detail2;
uint64_t wait_detail;
int pending_ops;
int op_state;
@ -253,7 +253,6 @@ class blockstore_impl_t
bool inmemory_meta = false;
// Maximum and minimum flusher count
unsigned max_flusher_count, min_flusher_count;
unsigned journal_trim_interval;
// Maximum queue depth
unsigned max_write_iodepth = 128;
// Enable small (journaled) write throttling, useful for the SSD+HDD case
@ -277,6 +276,7 @@ class blockstore_impl_t
std::vector<blockstore_op_t*> submit_queue;
std::vector<obj_ver_id> unsynced_big_writes, unsynced_small_writes;
int unsynced_big_write_count = 0, unstable_unsynced = 0;
bool unstable_count_changed = false;
int unsynced_queued_ops = 0;
allocator *data_alloc = NULL;
uint64_t used_blocks = 0;

View File

@ -63,7 +63,7 @@ int blockstore_init_meta::loop()
throw std::runtime_error("Failed to allocate metadata read buffer");
// Read superblock
GET_SQE();
data->iov = { metadata_buffer, (size_t)bs->dsk.meta_block_size };
data->iov = { metadata_buffer, bs->dsk.meta_block_size };
data->callback = [this](ring_data_t *data) { handle_event(data, -1); };
my_uring_prep_readv(sqe, bs->dsk.meta_fd, &data->iov, 1, bs->dsk.meta_offset);
bs->ringloop->submit();
@ -100,7 +100,7 @@ resume_1:
{
printf("Initializing metadata area\n");
GET_SQE();
data->iov = (struct iovec){ metadata_buffer, (size_t)bs->dsk.meta_block_size };
data->iov = (struct iovec){ metadata_buffer, bs->dsk.meta_block_size };
data->callback = [this](ring_data_t *data) { handle_event(data, -1); };
my_uring_prep_writev(sqe, bs->dsk.meta_fd, &data->iov, 1, bs->dsk.meta_offset);
bs->ringloop->submit();
@ -153,7 +153,7 @@ resume_1:
else if (hdr->version > BLOCKSTORE_META_FORMAT_V2)
{
printf(
"Metadata format is too new for me (stored version is %ju, max supported %u).\n",
"Metadata format is too new for me (stored version is %lu, max supported %u).\n",
hdr->version, BLOCKSTORE_META_FORMAT_V2
);
exit(1);
@ -167,7 +167,7 @@ resume_1:
printf(
"Configuration stored in metadata superblock"
" (meta_block_size=%u, data_block_size=%u, bitmap_granularity=%u, data_csum_type=%u, csum_block_size=%u)"
" differs from OSD configuration (%ju/%u/%ju, %u/%u).\n",
" differs from OSD configuration (%lu/%u/%lu, %u/%u).\n",
hdr->meta_block_size, hdr->data_block_size, hdr->bitmap_granularity,
hdr->data_csum_type, hdr->csum_block_size,
bs->dsk.meta_block_size, bs->dsk.data_block_size, bs->dsk.bitmap_granularity,
@ -199,8 +199,7 @@ resume_2:
submitted++;
next_offset += bufs[i].size;
GET_SQE();
assert(bufs[i].size <= 0x7fffffff);
data->iov = { bufs[i].buf, (size_t)bufs[i].size };
data->iov = { bufs[i].buf, bufs[i].size };
data->callback = [this, i](ring_data_t *data) { handle_event(data, i); };
if (!zero_on_init)
my_uring_prep_readv(sqe, bs->dsk.meta_fd, &data->iov, 1, bs->dsk.meta_offset + bufs[i].offset);
@ -232,8 +231,7 @@ resume_2:
{
// write the modified buffer back
GET_SQE();
assert(bufs[i].size <= 0x7fffffff);
data->iov = { bufs[i].buf, (size_t)bufs[i].size };
data->iov = { bufs[i].buf, bufs[i].size };
data->callback = [this, i](ring_data_t *data) { handle_event(data, i); };
my_uring_prep_writev(sqe, bs->dsk.meta_fd, &data->iov, 1, bs->dsk.meta_offset + bufs[i].offset);
bufs[i].state = INIT_META_WRITING;
@ -259,7 +257,7 @@ resume_2:
next_offset = entries_to_zero[i]/entries_per_block;
for (j = i; j < entries_to_zero.size() && entries_to_zero[j]/entries_per_block == next_offset; j++) {}
GET_SQE();
data->iov = { metadata_buffer, (size_t)bs->dsk.meta_block_size };
data->iov = { metadata_buffer, bs->dsk.meta_block_size };
data->callback = [this](ring_data_t *data) { handle_event(data, -1); };
my_uring_prep_readv(sqe, bs->dsk.meta_fd, &data->iov, 1, bs->dsk.meta_offset + (1+next_offset)*bs->dsk.meta_block_size);
submitted++;
@ -275,7 +273,7 @@ resume_5:
memset((uint8_t*)metadata_buffer + pos*bs->dsk.clean_entry_size, 0, bs->dsk.clean_entry_size);
}
GET_SQE();
data->iov = { metadata_buffer, (size_t)bs->dsk.meta_block_size };
data->iov = { metadata_buffer, bs->dsk.meta_block_size };
data->callback = [this](ring_data_t *data) { handle_event(data, -1); };
my_uring_prep_writev(sqe, bs->dsk.meta_fd, &data->iov, 1, bs->dsk.meta_offset + (1+next_offset)*bs->dsk.meta_block_size);
submitted++;
@ -289,7 +287,7 @@ resume_6:
entries_to_zero.clear();
}
// metadata read finished
printf("Metadata entries loaded: %ju, free blocks: %ju / %ju\n", entries_loaded, bs->data_alloc->get_free_count(), bs->dsk.block_count);
printf("Metadata entries loaded: %lu, free blocks: %lu / %lu\n", entries_loaded, bs->data_alloc->get_free_count(), bs->dsk.block_count);
if (!bs->inmemory_meta)
{
free(metadata_buffer);
@ -330,7 +328,7 @@ bool blockstore_init_meta::handle_meta_block(uint8_t *buf, uint64_t entries_per_
uint32_t *entry_csum = (uint32_t*)((uint8_t*)entry + bs->dsk.clean_entry_size - 4);
if (*entry_csum != crc32c(0, entry, bs->dsk.clean_entry_size - 4))
{
printf("Metadata entry %ju is corrupt (checksum mismatch), skipping\n", done_cnt+i);
printf("Metadata entry %lu is corrupt (checksum mismatch), skipping\n", done_cnt+i);
continue;
}
}
@ -368,7 +366,7 @@ bool blockstore_init_meta::handle_meta_block(uint8_t *buf, uint64_t entries_per_
entries_to_zero.push_back(clean_it->second.location >> bs->dsk.block_order);
}
#ifdef BLOCKSTORE_DEBUG
printf("Free block %ju from %jx:%jx v%ju (new location is %ju)\n",
printf("Free block %lu from %lx:%lx v%lu (new location is %lu)\n",
old_clean_loc,
clean_it->first.inode, clean_it->first.stripe, clean_it->second.version,
done_cnt+i);
@ -382,7 +380,7 @@ bool blockstore_init_meta::handle_meta_block(uint8_t *buf, uint64_t entries_per_
}
entries_loaded++;
#ifdef BLOCKSTORE_DEBUG
printf("Allocate block (clean entry) %ju: %jx:%jx v%ju\n", done_cnt+i, entry->oid.inode, entry->oid.stripe, entry->version);
printf("Allocate block (clean entry) %lu: %lx:%lx v%lu\n", done_cnt+i, entry->oid.inode, entry->oid.stripe, entry->version);
#endif
bs->data_alloc->set(done_cnt+i, true);
clean_db[entry->oid] = (struct clean_entry){
@ -396,7 +394,7 @@ bool blockstore_init_meta::handle_meta_block(uint8_t *buf, uint64_t entries_per_
updated = true;
memset(entry, 0, bs->dsk.clean_entry_size);
#ifdef BLOCKSTORE_DEBUG
printf("Old clean entry %ju: %jx:%jx v%ju\n", done_cnt+i, entry->oid.inode, entry->oid.stripe, entry->version);
printf("Old clean entry %lu: %lx:%lx v%lu\n", done_cnt+i, entry->oid.inode, entry->oid.stripe, entry->version);
#endif
}
}
@ -468,7 +466,7 @@ int blockstore_init_journal::loop()
if (!sqe)
throw std::runtime_error("io_uring is full while trying to read journal");
data = ((ring_data_t*)sqe->user_data);
data->iov = { submitted_buf, (size_t)bs->journal.block_size };
data->iov = { submitted_buf, bs->journal.block_size };
data->callback = simple_callback;
my_uring_prep_readv(sqe, bs->dsk.journal_fd, &data->iov, 1, bs->journal.offset);
bs->ringloop->submit();
@ -509,7 +507,7 @@ resume_1:
// FIXME: Randomize initial crc32. Track crc32 when trimming.
printf("Resetting journal\n");
GET_SQE();
data->iov = (struct iovec){ submitted_buf, (size_t)(2*bs->journal.block_size) };
data->iov = (struct iovec){ submitted_buf, 2*bs->journal.block_size };
data->callback = simple_callback;
my_uring_prep_writev(sqe, bs->dsk.journal_fd, &data->iov, 1, bs->journal.offset);
wait_count++;
@ -559,7 +557,7 @@ resume_1:
(je_start->version != JOURNAL_VERSION_V2 || je_start->size != JE_START_V2_SIZE && je_start->size != JE_START_V1_SIZE))
{
fprintf(
stderr, "The code only supports journal versions 2 and 1, but it is %ju on disk."
stderr, "The code only supports journal versions 2 and 1, but it is %lu on disk."
" Please use vitastor-disk to rewrite the journal\n",
je_start->size == JE_START_V0_SIZE ? 0 : je_start->version
);
@ -608,7 +606,7 @@ resume_1:
submitted_buf = (uint8_t*)bs->journal.buffer + journal_pos;
data->iov = {
submitted_buf,
(size_t)(end - journal_pos < JOURNAL_BUFFER_SIZE ? end - journal_pos : JOURNAL_BUFFER_SIZE),
end - journal_pos < JOURNAL_BUFFER_SIZE ? end - journal_pos : JOURNAL_BUFFER_SIZE,
};
data->callback = [this](ring_data_t *data1) { handle_event(data1); };
my_uring_prep_readv(sqe, bs->dsk.journal_fd, &data->iov, 1, bs->journal.offset + journal_pos);
@ -624,7 +622,7 @@ resume_1:
if (init_write_buf && !bs->readonly)
{
GET_SQE();
data->iov = { init_write_buf, (size_t)bs->journal.block_size };
data->iov = { init_write_buf, bs->journal.block_size };
data->callback = simple_callback;
my_uring_prep_writev(sqe, bs->dsk.journal_fd, &data->iov, 1, bs->journal.offset + init_write_sector);
wait_count++;
@ -693,7 +691,7 @@ resume_1:
IS_BIG_WRITE(dirty_it->second.state) &&
dirty_it->second.location == UINT64_MAX)
{
printf("Fatal error (bug): %jx:%jx v%ju big_write journal_entry was allocated over another object\n",
printf("Fatal error (bug): %lx:%lx v%lu big_write journal_entry was allocated over another object\n",
dirty_it->first.oid.inode, dirty_it->first.oid.stripe, dirty_it->first.version);
exit(1);
}
@ -701,7 +699,7 @@ resume_1:
bs->flusher->mark_trim_possible();
bs->journal.dirty_start = bs->journal.next_free;
printf(
"Journal entries loaded: %ju, free journal space: %ju bytes (%08jx..%08jx is used), free blocks: %ju / %ju\n",
"Journal entries loaded: %lu, free journal space: %lu bytes (%08lx..%08lx is used), free blocks: %lu / %lu\n",
entries_loaded,
(bs->journal.next_free >= bs->journal.used_start
? bs->journal.len-bs->journal.block_size - (bs->journal.next_free-bs->journal.used_start)
@ -756,7 +754,7 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t done_pos, u
{
#ifdef BLOCKSTORE_DEBUG
printf(
"je_small_write%s oid=%jx:%jx ver=%ju offset=%u len=%u\n",
"je_small_write%s oid=%lx:%lx ver=%lu offset=%u len=%u\n",
je->type == JE_SMALL_WRITE_INSTANT ? "_instant" : "",
je->small_write.oid.inode, je->small_write.oid.stripe, je->small_write.version,
je->small_write.offset, je->small_write.len
@ -778,7 +776,7 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t done_pos, u
if (location != je->small_write.data_offset)
{
char err[1024];
snprintf(err, 1024, "BUG: calculated journal data offset (%08jx) != stored journal data offset (%08jx)", location, je->small_write.data_offset);
snprintf(err, 1024, "BUG: calculated journal data offset (%08lx) != stored journal data offset (%08lx)", location, je->small_write.data_offset);
throw std::runtime_error(err);
}
small_write_data.clear();
@ -805,7 +803,7 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t done_pos, u
covered += part_end - part_begin;
small_write_data.push_back((iovec){
.iov_base = (uint8_t*)done[i].buf + part_begin - done[i].pos,
.iov_len = (size_t)(part_end - part_begin),
.iov_len = part_end - part_begin,
});
}
}
@ -828,7 +826,7 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t done_pos, u
if (!data_csum_valid)
{
printf(
"Journal entry data is corrupt for small_write%s oid=%jx:%jx ver=%ju offset=%u len=%u - data crc32 %x != %x\n",
"Journal entry data is corrupt for small_write%s oid=%lx:%lx ver=%lu offset=%u len=%u - data crc32 %x != %x\n",
je->type == JE_SMALL_WRITE_INSTANT ? "_instant" : "",
je->small_write.oid.inode, je->small_write.oid.stripe, je->small_write.version,
je->small_write.offset, je->small_write.len,
@ -847,7 +845,7 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t done_pos, u
if (je->size != required_size)
{
printf(
"Journal entry data has invalid size for small_write%s oid=%jx:%jx ver=%ju offset=%u len=%u - should be %u bytes but is %u bytes\n",
"Journal entry data has invalid size for small_write%s oid=%lx:%lx ver=%lu offset=%u len=%u - should be %u bytes but is %u bytes\n",
je->type == JE_SMALL_WRITE_INSTANT ? "_instant" : "",
je->small_write.oid.inode, je->small_write.oid.stripe, je->small_write.version,
je->small_write.offset, je->small_write.len,
@ -895,7 +893,7 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t done_pos, u
if (block_crc32 != *block_csums)
{
printf(
"Journal entry data is corrupt for small_write%s oid=%jx:%jx ver=%ju offset=%u len=%u - block %u crc32 %x != %x\n",
"Journal entry data is corrupt for small_write%s oid=%lx:%lx ver=%lu offset=%u len=%u - block %u crc32 %x != %x\n",
je->type == JE_SMALL_WRITE_INSTANT ? "_instant" : "",
je->small_write.oid.inode, je->small_write.oid.stripe, je->small_write.version,
je->small_write.offset, je->small_write.len,
@ -958,7 +956,7 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t done_pos, u
bs->journal.used_sectors[proc_pos]++;
#ifdef BLOCKSTORE_DEBUG
printf(
"journal offset %08jx is used by %jx:%jx v%ju (%ju refs)\n",
"journal offset %08lx is used by %lx:%lx v%lu (%lu refs)\n",
proc_pos, ov.oid.inode, ov.oid.stripe, ov.version, bs->journal.used_sectors[proc_pos]
);
#endif
@ -974,7 +972,7 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t done_pos, u
{
#ifdef BLOCKSTORE_DEBUG
printf(
"je_big_write%s oid=%jx:%jx ver=%ju loc=%ju\n",
"je_big_write%s oid=%lx:%lx ver=%lu loc=%lu\n",
je->type == JE_BIG_WRITE_INSTANT ? "_instant" : "",
je->big_write.oid.inode, je->big_write.oid.stripe, je->big_write.version, je->big_write.location >> bs->dsk.block_order
);
@ -1051,7 +1049,7 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t done_pos, u
{
#ifdef BLOCKSTORE_DEBUG
printf(
"Allocate block (journal) %ju: %jx:%jx v%ju\n",
"Allocate block (journal) %lu: %lx:%lx v%lu\n",
je->big_write.location >> bs->dsk.block_order,
ov.oid.inode, ov.oid.stripe, ov.version
);
@ -1061,7 +1059,7 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t done_pos, u
bs->journal.used_sectors[proc_pos]++;
#ifdef BLOCKSTORE_DEBUG
printf(
"journal offset %08jx is used by %jx:%jx v%ju (%ju refs)\n",
"journal offset %08lx is used by %lx:%lx v%lu (%lu refs)\n",
proc_pos, ov.oid.inode, ov.oid.stripe, ov.version, bs->journal.used_sectors[proc_pos]
);
#endif
@ -1076,7 +1074,7 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t done_pos, u
else if (je->type == JE_STABLE)
{
#ifdef BLOCKSTORE_DEBUG
printf("je_stable oid=%jx:%jx ver=%ju\n", je->stable.oid.inode, je->stable.oid.stripe, je->stable.version);
printf("je_stable oid=%lx:%lx ver=%lu\n", je->stable.oid.inode, je->stable.oid.stripe, je->stable.version);
#endif
// oid, version
obj_ver_id ov = {
@ -1088,7 +1086,7 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t done_pos, u
else if (je->type == JE_ROLLBACK)
{
#ifdef BLOCKSTORE_DEBUG
printf("je_rollback oid=%jx:%jx ver=%ju\n", je->rollback.oid.inode, je->rollback.oid.stripe, je->rollback.version);
printf("je_rollback oid=%lx:%lx ver=%lu\n", je->rollback.oid.inode, je->rollback.oid.stripe, je->rollback.version);
#endif
// rollback dirty writes of <oid> up to <version>
obj_ver_id ov = {
@ -1100,7 +1098,7 @@ int blockstore_init_journal::handle_journal_part(void *buf, uint64_t done_pos, u
else if (je->type == JE_DELETE)
{
#ifdef BLOCKSTORE_DEBUG
printf("je_delete oid=%jx:%jx ver=%ju\n", je->del.oid.inode, je->del.oid.stripe, je->del.version);
printf("je_delete oid=%lx:%lx ver=%lu\n", je->del.oid.inode, je->del.oid.stripe, je->del.version);
#endif
bool dirty_exists = false;
auto dirty_it = bs->dirty_db.upper_bound((obj_ver_id){

View File

@ -90,8 +90,8 @@ int blockstore_journal_check_t::check_available(blockstore_op_t *op, int entries
}
// In fact, it's even more rare than "ran out of journal space", so print a warning
printf(
"Ran out of journal sector buffers: %d/%ju buffers used (%d dirty), next buffer (%jd)"
" is %s and flushed %ju times. Consider increasing \'journal_sector_buffer_count\'\n",
"Ran out of journal sector buffers: %d/%lu buffers used (%d dirty), next buffer (%ld)"
" is %s and flushed %lu times. Consider increasing \'journal_sector_buffer_count\'\n",
used, bs->journal.sector_count, dirty, next_sector,
bs->journal.sector_info[next_sector].dirty ? "dirty" : "not dirty",
bs->journal.sector_info[next_sector].flush_count
@ -103,7 +103,7 @@ int blockstore_journal_check_t::check_available(blockstore_op_t *op, int entries
if (data_after > 0)
{
next_pos = next_pos + data_after;
if (next_pos >= bs->journal.len)
if (next_pos > bs->journal.len)
{
if (right_dir)
next_pos = bs->journal.block_size + data_after;
@ -114,7 +114,7 @@ int blockstore_journal_check_t::check_available(blockstore_op_t *op, int entries
{
// No space in the journal. Wait until used_start changes.
printf(
"Ran out of journal space (used_start=%08jx, next_free=%08jx, dirty_start=%08jx)\n",
"Ran out of journal space (used_start=%08lx, next_free=%08lx, dirty_start=%08lx)\n",
bs->journal.used_start, bs->journal.next_free, bs->journal.dirty_start
);
PRIV(op)->wait_for = WAIT_JOURNAL;
@ -146,7 +146,7 @@ journal_entry* prefill_single_journal_entry(journal_t & journal, uint16_t type,
journal.in_sector_pos = 0;
auto next_next_free = (journal.next_free+journal.block_size) < journal.len ? journal.next_free + journal.block_size : journal.block_size;
// double check that next_free doesn't cross used_start from the left
assert(journal.next_free >= journal.used_start && next_next_free >= journal.next_free || next_next_free < journal.used_start);
assert(journal.next_free >= journal.used_start || next_next_free < journal.used_start);
journal.next_free = next_next_free;
memset(journal.inmemory
? (uint8_t*)journal.buffer + journal.sector_info[journal.cur_sector].offset
@ -183,7 +183,7 @@ void blockstore_impl_t::prepare_journal_sector_write(int cur_sector, blockstore_
(journal.inmemory
? (uint8_t*)journal.buffer + journal.sector_info[cur_sector].offset
: (uint8_t*)journal.sector_buf + journal.block_size*cur_sector),
(size_t)journal.block_size
journal.block_size
};
data->callback = [this, flush_id = journal.submit_id](ring_data_t *data) { handle_journal_write(data, flush_id); };
my_uring_prep_writev(
@ -263,7 +263,7 @@ uint64_t journal_t::get_trim_pos()
// next_free does not need updating during trim
#ifdef BLOCKSTORE_DEBUG
printf(
"Trimming journal (used_start=%08jx, next_free=%08jx, dirty_start=%08jx, new_start=%08jx, new_refcount=%jd)\n",
"Trimming journal (used_start=%08lx, next_free=%08lx, dirty_start=%08lx, new_start=%08lx, new_refcount=%ld)\n",
used_start, next_free, dirty_start,
journal_used_it->first, journal_used_it->second
);
@ -276,7 +276,7 @@ uint64_t journal_t::get_trim_pos()
// Journal is cleared up to <journal_used_it>
#ifdef BLOCKSTORE_DEBUG
printf(
"Trimming journal (used_start=%08jx, next_free=%08jx, dirty_start=%08jx, new_start=%08jx, new_refcount=%jd)\n",
"Trimming journal (used_start=%08lx, next_free=%08lx, dirty_start=%08lx, new_start=%08lx, new_refcount=%ld)\n",
used_start, next_free, dirty_start,
journal_used_it->first, journal_used_it->second
);
@ -296,7 +296,7 @@ void journal_t::dump_diagnostics()
journal_used_it = used_sectors.begin();
}
printf(
"Journal: used_start=%08jx next_free=%08jx dirty_start=%08jx trim_to=%08jx trim_to_refs=%jd\n",
"Journal: used_start=%08lx next_free=%08lx dirty_start=%08lx trim_to=%08lx trim_to_refs=%ld\n",
used_start, next_free, dirty_start,
journal_used_it == used_sectors.end() ? 0 : journal_used_it->first,
journal_used_it == used_sectors.end() ? 0 : journal_used_it->second

View File

@ -13,7 +13,6 @@ void blockstore_impl_t::parse_config(blockstore_config_t & config, bool init)
max_flusher_count = strtoull(config["flusher_count"].c_str(), NULL, 10);
}
min_flusher_count = strtoull(config["min_flusher_count"].c_str(), NULL, 10);
journal_trim_interval = strtoull(config["journal_trim_interval"].c_str(), NULL, 10);
max_write_iodepth = strtoull(config["max_write_iodepth"].c_str(), NULL, 10);
throttle_small_writes = config["throttle_small_writes"] == "true" || config["throttle_small_writes"] == "1" || config["throttle_small_writes"] == "yes";
throttle_target_iops = strtoull(config["throttle_target_iops"].c_str(), NULL, 10);
@ -32,10 +31,6 @@ void blockstore_impl_t::parse_config(blockstore_config_t & config, bool init)
{
min_flusher_count = 1;
}
if (!journal_trim_interval)
{
journal_trim_interval = 512;
}
if (!max_write_iodepth)
{
max_write_iodepth = 128;

View File

@ -25,7 +25,7 @@ int blockstore_impl_t::fulfill_read_push(blockstore_op_t *op, void *buf, uint64_
return 1;
}
BS_SUBMIT_GET_SQE(sqe, data);
data->iov = (struct iovec){ buf, (size_t)len };
data->iov = (struct iovec){ buf, len };
PRIV(op)->pending_ops++;
my_uring_prep_readv(
sqe,
@ -505,7 +505,7 @@ int blockstore_impl_t::dequeue_read(blockstore_op_t *read_op)
for (auto & rv: PRIV(read_op)->read_vec)
{
if (rv.journal_sector)
journal.used_sectors.at(rv.journal_sector-1)++;
journal.used_sectors[rv.journal_sector-1]++;
}
}
read_op->retval = 0;
@ -700,7 +700,7 @@ uint8_t* blockstore_impl_t::read_clean_meta_block(blockstore_op_t *op, uint64_t
.buf = buf,
});
BS_SUBMIT_GET_SQE(sqe, data);
data->iov = (struct iovec){ buf, (size_t)dsk.meta_block_size };
data->iov = (struct iovec){ buf, dsk.meta_block_size };
PRIV(op)->pending_ops++;
my_uring_prep_readv(sqe, dsk.meta_fd, &data->iov, 1, dsk.meta_offset + dsk.meta_block_size + sector);
data->callback = [this, op](ring_data_t *data) { handle_read_event(data, op); };
@ -855,7 +855,7 @@ void blockstore_impl_t::handle_read_event(ring_data_t *data, blockstore_op_t *op
{
ok = false;
printf(
"Checksum mismatch in object %jx:%jx v%ju in journal at 0x%jx, checksum block #%u: got %08x, expected %08x\n",
"Checksum mismatch in object %lx:%lx v%lu in journal at 0x%lx, checksum block #%u: got %08x, expected %08x\n",
op->oid.inode, op->oid.stripe, op->version,
rv[i].disk_offset, bad_block / dsk.csum_block_size, calc_csum, stored_csum
);
@ -875,7 +875,7 @@ void blockstore_impl_t::handle_read_event(ring_data_t *data, blockstore_op_t *op
{
ok = false;
printf(
"Checksum mismatch in object %jx:%jx v%ju in %s data at 0x%jx, checksum block #%u: got %08x, expected %08x\n",
"Checksum mismatch in object %lx:%lx v%lu in %s data at 0x%lx, checksum block #%u: got %08x, expected %08x\n",
op->oid.inode, op->oid.stripe, op->version,
(rv[i].copy_flags & COPY_BUF_JOURNALED_BIG ? "redirect-write" : "clean"),
rv[i].disk_offset, bad_block / dsk.csum_block_size, calc_csum, stored_csum
@ -918,7 +918,7 @@ void blockstore_impl_t::handle_read_event(ring_data_t *data, blockstore_op_t *op
{
// checksum error
printf(
"Checksum mismatch in object %jx:%jx v%ju in %s area at offset 0x%jx+0x%zx: %08x vs %08x\n",
"Checksum mismatch in object %lx:%lx v%lu in %s area at offset 0x%lx+0x%lx: %08x vs %08x\n",
op->oid.inode, op->oid.stripe, op->version,
(vec.copy_flags & COPY_BUF_JOURNAL) ? "journal" : "data", vec.disk_offset, p,
crc32c(0, (uint8_t*)op->buf + vec.offset - op->offset + p, dsk.csum_block_size), *csum
@ -966,7 +966,7 @@ void blockstore_impl_t::handle_read_event(ring_data_t *data, blockstore_op_t *op
{
if (rv.journal_sector)
{
auto used = --journal.used_sectors.at(rv.journal_sector-1);
auto used = --journal.used_sectors[rv.journal_sector-1];
if (used == 0)
{
journal.used_sectors.erase(rv.journal_sector-1);

View File

@ -162,6 +162,7 @@ void blockstore_impl_t::mark_rolled_back(const obj_ver_id & ov)
unstable_writes.erase(unstab_it);
else
unstab_it->second = max_unstable;
unstable_count_changed = true;
}
}
}
@ -179,7 +180,7 @@ void blockstore_impl_t::erase_dirty(blockstore_dirty_db_t::iterator dirty_start,
{
object_id oid = dirty_it->first.oid;
#ifdef BLOCKSTORE_DEBUG
printf("Unblock writes-after-delete %jx:%jx v%ju\n", oid.inode, oid.stripe, dirty_it->first.version);
printf("Unblock writes-after-delete %lx:%lx v%lu\n", oid.inode, oid.stripe, dirty_it->first.version);
#endif
dirty_it = dirty_end;
// Unblock operations blocked by delete flushing
@ -210,26 +211,21 @@ void blockstore_impl_t::erase_dirty(blockstore_dirty_db_t::iterator dirty_start,
dirty_it->second.location != UINT64_MAX)
{
#ifdef BLOCKSTORE_DEBUG
printf("Free block %ju from %jx:%jx v%ju\n", dirty_it->second.location >> dsk.block_order,
printf("Free block %lu from %lx:%lx v%lu\n", dirty_it->second.location >> dsk.block_order,
dirty_it->first.oid.inode, dirty_it->first.oid.stripe, dirty_it->first.version);
#endif
data_alloc->set(dirty_it->second.location >> dsk.block_order, false);
}
auto used = --journal.used_sectors.at(dirty_it->second.journal_sector);
auto used = --journal.used_sectors[dirty_it->second.journal_sector];
#ifdef BLOCKSTORE_DEBUG
printf(
"remove usage of journal offset %08jx by %jx:%jx v%ju (%ju refs)\n", dirty_it->second.journal_sector,
"remove usage of journal offset %08lx by %lx:%lx v%lu (%lu refs)\n", dirty_it->second.journal_sector,
dirty_it->first.oid.inode, dirty_it->first.oid.stripe, dirty_it->first.version, used
);
#endif
if (used == 0)
{
journal.used_sectors.erase(dirty_it->second.journal_sector);
if (dirty_it->second.journal_sector == journal.sector_info[journal.cur_sector].offset)
{
// Mark current sector as "full" to select the new one
journal.in_sector_pos = dsk.journal_block_size;
}
flusher->mark_trim_possible();
}
free_dirty_dyn_data(dirty_it->second);

View File

@ -298,7 +298,7 @@ int blockstore_impl_t::dequeue_stable(blockstore_op_t *op)
if (clean_it == clean_db.end() || clean_it->second.version < ov.version)
{
// No such object version
printf("Error: %jx:%jx v%ju not found while stabilizing\n", ov.oid.inode, ov.oid.stripe, ov.version);
printf("Error: %lx:%lx v%lu not found while stabilizing\n", ov.oid.inode, ov.oid.stripe, ov.version);
return -ENOENT;
}
else
@ -307,49 +307,35 @@ int blockstore_impl_t::dequeue_stable(blockstore_op_t *op)
return STAB_SPLIT_DONE;
}
}
else if (IS_IN_FLIGHT(dirty_it->second.state))
{
// Object write is still in progress. Wait until the write request completes
return STAB_SPLIT_WAIT;
}
else if (!IS_SYNCED(dirty_it->second.state))
{
// Object not synced yet - sync it
// In previous versions we returned EBUSY here and required
// the caller (OSD) to issue a global sync first. But a global sync
// waits for all writes in the queue including inflight writes. And
// inflight writes may themselves be blocked by unstable writes being
// still present in the journal and not flushed away from it.
// So we must sync specific objects here.
//
// Even more, we have to process "stabilize" request in parts. That is,
// we must stabilize all objects which are already synced. Otherwise
// they may block objects which are NOT synced yet.
return STAB_SPLIT_SYNC;
}
else if (IS_STABLE(dirty_it->second.state))
{
// Already stable
return STAB_SPLIT_DONE;
}
while (true)
else
{
if (IS_IN_FLIGHT(dirty_it->second.state))
{
// Object write is still in progress. Wait until the write request completes
return STAB_SPLIT_WAIT;
}
else if (!IS_SYNCED(dirty_it->second.state))
{
// Object not synced yet - sync it
// In previous versions we returned EBUSY here and required
// the caller (OSD) to issue a global sync first. But a global sync
// waits for all writes in the queue including inflight writes. And
// inflight writes may themselves be blocked by unstable writes being
// still present in the journal and not flushed away from it.
// So we must sync specific objects here.
//
// Even more, we have to process "stabilize" request in parts. That is,
// we must stabilize all objects which are already synced. Otherwise
// they may block objects which are NOT synced yet.
return STAB_SPLIT_SYNC;
}
else if (IS_STABLE(dirty_it->second.state))
{
break;
}
// Check previous versions too
if (dirty_it == dirty_db.begin())
{
break;
}
dirty_it--;
if (dirty_it->first.oid != ov.oid)
{
break;
}
return STAB_SPLIT_TODO;
}
return STAB_SPLIT_TODO;
});
if (r != 1)
{
@ -416,7 +402,7 @@ resume_4:
{
// Mark all dirty_db entries up to op->version as stable
#ifdef BLOCKSTORE_DEBUG
printf("Stabilize %jx:%jx v%ju\n", v->oid.inode, v->oid.stripe, v->version);
printf("Stabilize %lx:%lx v%lu\n", v->oid.inode, v->oid.stripe, v->version);
#endif
mark_stable(*v);
}
@ -507,7 +493,7 @@ void blockstore_impl_t::mark_stable(obj_ver_id v, bool forget_dirty)
{
// mark_stable should never be called for in-flight or submitted writes
printf(
"BUG: Attempt to mark_stable object %jx:%jx v%ju state of which is %x\n",
"BUG: Attempt to mark_stable object %lx:%lx v%lu state of which is %x\n",
dirty_it->first.oid.inode, dirty_it->first.oid.stripe, dirty_it->first.version,
dirty_it->second.state
);
@ -551,5 +537,6 @@ void blockstore_impl_t::mark_stable(obj_ver_id v, bool forget_dirty)
unstab_it->second <= v.version)
{
unstable_writes.erase(unstab_it);
unstable_count_changed = true;
}
}

View File

@ -92,7 +92,8 @@ int blockstore_impl_t::continue_sync(blockstore_op_t *op)
}
}
else if (!space_check.check_available(op, PRIV(op)->sync_big_writes.size(),
sizeof(journal_entry_big_write) + dsk.clean_entry_bitmap_size, 0))
sizeof(journal_entry_big_write) + dsk.clean_entry_bitmap_size,
(unstable_writes.size()+unstable_unsynced)*journal.block_size))
{
return 0;
}
@ -115,14 +116,11 @@ int blockstore_impl_t::continue_sync(blockstore_op_t *op)
journal, (dirty_entry.state & BS_ST_INSTANT) ? JE_BIG_WRITE_INSTANT : JE_BIG_WRITE,
sizeof(journal_entry_big_write) + dyn_size
);
auto jsec = dirty_entry.journal_sector = journal.sector_info[journal.cur_sector].offset;
assert(journal.next_free >= journal.used_start
? (jsec >= journal.used_start && jsec < journal.next_free)
: (jsec >= journal.used_start || jsec < journal.next_free));
dirty_entry.journal_sector = journal.sector_info[journal.cur_sector].offset;
journal.used_sectors[journal.sector_info[journal.cur_sector].offset]++;
#ifdef BLOCKSTORE_DEBUG
printf(
"journal offset %08jx is used by %jx:%jx v%ju (%ju refs)\n",
"journal offset %08lx is used by %lx:%lx v%lu (%lu refs)\n",
dirty_entry.journal_sector, it->oid.inode, it->oid.stripe, it->version,
journal.used_sectors[journal.sector_info[journal.cur_sector].offset]
);
@ -176,7 +174,7 @@ void blockstore_impl_t::ack_sync(blockstore_op_t *op)
for (auto it = PRIV(op)->sync_big_writes.begin(); it != PRIV(op)->sync_big_writes.end(); it++)
{
#ifdef BLOCKSTORE_DEBUG
printf("Ack sync big %jx:%jx v%ju\n", it->oid.inode, it->oid.stripe, it->version);
printf("Ack sync big %lx:%lx v%lu\n", it->oid.inode, it->oid.stripe, it->version);
#endif
auto & unstab = unstable_writes[it->oid];
unstab = unstab < it->version ? it->version : unstab;
@ -204,7 +202,7 @@ void blockstore_impl_t::ack_sync(blockstore_op_t *op)
for (auto it = PRIV(op)->sync_small_writes.begin(); it != PRIV(op)->sync_small_writes.end(); it++)
{
#ifdef BLOCKSTORE_DEBUG
printf("Ack sync small %jx:%jx v%ju\n", it->oid.inode, it->oid.stripe, it->version);
printf("Ack sync small %lx:%lx v%lu\n", it->oid.inode, it->oid.stripe, it->version);
#endif
auto & unstab = unstable_writes[it->oid];
unstab = unstab < it->version ? it->version : unstab;

View File

@ -85,7 +85,7 @@ bool blockstore_impl_t::enqueue_write(blockstore_op_t *op)
// It's allowed to write versions with low numbers over deletes
// However, we have to flush those deletes first as we use version number for ordering
#ifdef BLOCKSTORE_DEBUG
printf("Write %jx:%jx v%ju over delete (real v%ju) offset=%u len=%u\n", op->oid.inode, op->oid.stripe, version, op->version, op->offset, op->len);
printf("Write %lx:%lx v%lu over delete (real v%lu) offset=%u len=%u\n", op->oid.inode, op->oid.stripe, version, op->version, op->offset, op->len);
#endif
wait_del = true;
PRIV(op)->real_version = op->version;
@ -95,13 +95,11 @@ bool blockstore_impl_t::enqueue_write(blockstore_op_t *op)
// Issue an additional sync so the delete reaches the journal
blockstore_op_t *sync_op = new blockstore_op_t;
sync_op->opcode = BS_OP_SYNC;
sync_op->oid = op->oid;
sync_op->version = op->version;
sync_op->callback = [this](blockstore_op_t *sync_op)
sync_op->callback = [this, op](blockstore_op_t *sync_op)
{
flusher->unshift_flush((obj_ver_id){
.oid = sync_op->oid,
.version = sync_op->version-1,
.oid = op->oid,
.version = op->version-1,
}, true);
delete sync_op;
};
@ -119,7 +117,7 @@ bool blockstore_impl_t::enqueue_write(blockstore_op_t *op)
{
// Invalid version requested
#ifdef BLOCKSTORE_DEBUG
printf("Write %jx:%jx v%ju requested, but we already have v%ju\n", op->oid.inode, op->oid.stripe, op->version, version);
printf("Write %lx:%lx v%lu requested, but we already have v%lu\n", op->oid.inode, op->oid.stripe, op->version, version);
#endif
op->retval = -EEXIST;
if (!is_del && alloc_dyn_data)
@ -146,9 +144,9 @@ bool blockstore_impl_t::enqueue_write(blockstore_op_t *op)
unsynced_queued_ops++;
#ifdef BLOCKSTORE_DEBUG
if (is_del)
printf("Delete %jx:%jx v%ju\n", op->oid.inode, op->oid.stripe, op->version);
printf("Delete %lx:%lx v%lu\n", op->oid.inode, op->oid.stripe, op->version);
else if (!wait_del)
printf("Write %jx:%jx v%ju offset=%u len=%u\n", op->oid.inode, op->oid.stripe, op->version, op->offset, op->len);
printf("Write %lx:%lx v%lu offset=%u len=%u\n", op->oid.inode, op->oid.stripe, op->version, op->offset, op->len);
#endif
// No strict need to add it into dirty_db here except maybe for listings to return
// correct data when there are inflight operations in the queue
@ -288,7 +286,7 @@ int blockstore_impl_t::dequeue_write(blockstore_op_t *op)
}
// Restore original low version number for unblocked operations
#ifdef BLOCKSTORE_DEBUG
printf("Restoring %jx:%jx version: v%ju -> v%ju\n", op->oid.inode, op->oid.stripe, op->version, PRIV(op)->real_version);
printf("Restoring %lx:%lx version: v%lu -> v%lu\n", op->oid.inode, op->oid.stripe, op->version, PRIV(op)->real_version);
#endif
auto prev_it = dirty_it;
if (prev_it != dirty_db.begin())
@ -298,7 +296,7 @@ int blockstore_impl_t::dequeue_write(blockstore_op_t *op)
{
// Original version is still invalid
// All subsequent writes to the same object must be canceled too
printf("Tried to write %jx:%jx v%ju after delete (old version v%ju), but already have v%ju\n",
printf("Tried to write %lx:%lx v%lu after delete (old version v%lu), but already have v%lu\n",
op->oid.inode, op->oid.stripe, PRIV(op)->real_version, op->version, prev_it->first.version);
cancel_all_writes(op, dirty_it, -EEXIST);
return 2;
@ -322,7 +320,7 @@ int blockstore_impl_t::dequeue_write(blockstore_op_t *op)
blockstore_journal_check_t space_check(this);
if (!space_check.check_available(op, unsynced_big_write_count + 1,
sizeof(journal_entry_big_write) + dsk.clean_dyn_size,
(unstable_writes.size()+unstable_unsynced+((dirty_it->second.state & BS_ST_INSTANT) ? 0 : 1))*journal.block_size))
(unstable_writes.size()+unstable_unsynced)*journal.block_size))
{
return 0;
}
@ -350,8 +348,8 @@ int blockstore_impl_t::dequeue_write(blockstore_op_t *op)
if (entry->oid.inode || entry->oid.stripe || entry->version)
{
printf(
"Fatal error (metadata corruption or bug): tried to write object %jx:%jx v%ju"
" over a non-zero metadata entry %ju with %jx:%jx v%ju\n", op->oid.inode,
"Fatal error (metadata corruption or bug): tried to write object %lx:%lx v%lu"
" over a non-zero metadata entry %lu with %lx:%lx v%lu\n", op->oid.inode,
op->oid.stripe, op->version, loc, entry->oid.inode, entry->oid.stripe, entry->version
);
exit(1);
@ -363,7 +361,7 @@ int blockstore_impl_t::dequeue_write(blockstore_op_t *op)
dirty_it->second.state = (dirty_it->second.state & ~BS_ST_WORKFLOW_MASK) | BS_ST_SUBMITTED;
#ifdef BLOCKSTORE_DEBUG
printf(
"Allocate block %ju for %jx:%jx v%ju\n",
"Allocate block %lu for %lx:%lx v%lu\n",
loc, op->oid.inode, op->oid.stripe, op->version
);
#endif
@ -374,13 +372,13 @@ int blockstore_impl_t::dequeue_write(blockstore_op_t *op)
int vcnt = 0;
if (stripe_offset)
{
PRIV(op)->iov_zerofill[vcnt++] = (struct iovec){ zero_object, (size_t)stripe_offset };
PRIV(op)->iov_zerofill[vcnt++] = (struct iovec){ zero_object, stripe_offset };
}
PRIV(op)->iov_zerofill[vcnt++] = (struct iovec){ op->buf, op->len };
if (stripe_end)
{
stripe_end = dsk.bitmap_granularity - stripe_end;
PRIV(op)->iov_zerofill[vcnt++] = (struct iovec){ zero_object, (size_t)stripe_end };
PRIV(op)->iov_zerofill[vcnt++] = (struct iovec){ zero_object, stripe_end };
}
data->iov.iov_len = op->len + stripe_offset + stripe_end; // to check it in the callback
data->callback = [this, op](ring_data_t *data) { handle_write_event(data, op); };
@ -414,7 +412,7 @@ int blockstore_impl_t::dequeue_write(blockstore_op_t *op)
sizeof(journal_entry_big_write) + dsk.clean_dyn_size, 0)
|| !space_check.check_available(op, 1,
sizeof(journal_entry_small_write) + dyn_size,
op->len + (unstable_writes.size()+unstable_unsynced+((dirty_it->second.state & BS_ST_INSTANT) ? 0 : 1))*journal.block_size))
op->len + (unstable_writes.size()+unstable_unsynced)*journal.block_size))
{
return 0;
}
@ -438,23 +436,11 @@ int blockstore_impl_t::dequeue_write(blockstore_op_t *op)
journal, op->opcode == BS_OP_WRITE_STABLE ? JE_SMALL_WRITE_INSTANT : JE_SMALL_WRITE,
sizeof(journal_entry_small_write) + dyn_size
);
auto jsec = dirty_it->second.journal_sector = journal.sector_info[journal.cur_sector].offset;
if (!(journal.next_free >= journal.used_start
? (jsec >= journal.used_start && jsec < journal.next_free)
: (jsec >= journal.used_start || jsec < journal.next_free)))
{
printf(
"BUG: journal offset %08jx is used by %jx:%jx v%ju (%ju refs) BUT used_start=%jx next_free=%jx\n",
dirty_it->second.journal_sector, dirty_it->first.oid.inode, dirty_it->first.oid.stripe, dirty_it->first.version,
journal.used_sectors[journal.sector_info[journal.cur_sector].offset],
journal.used_start, journal.next_free
);
abort();
}
dirty_it->second.journal_sector = journal.sector_info[journal.cur_sector].offset;
journal.used_sectors[journal.sector_info[journal.cur_sector].offset]++;
#ifdef BLOCKSTORE_DEBUG
printf(
"journal offset %08jx is used by %jx:%jx v%ju (%ju refs)\n",
"journal offset %08lx is used by %lx:%lx v%lu (%lu refs)\n",
dirty_it->second.journal_sector, dirty_it->first.oid.inode, dirty_it->first.oid.stripe, dirty_it->first.version,
journal.used_sectors[journal.sector_info[journal.cur_sector].offset]
);
@ -468,8 +454,8 @@ int blockstore_impl_t::dequeue_write(blockstore_op_t *op)
journal_used_it->first < next_next_free + op->len)
{
printf(
"BUG: Attempt to overwrite used offset (%jx, %ju refs) of the journal with the object %jx:%jx v%ju: data at %jx, len %x!"
" Journal used_start=%08jx (%ju refs), next_free=%08jx, dirty_start=%08jx\n",
"BUG: Attempt to overwrite used offset (%lx, %lu refs) of the journal with the object %lx:%lx v%lu: data at %lx, len %x!"
" Journal used_start=%08lx (%lu refs), next_free=%08lx, dirty_start=%08lx\n",
journal_used_it->first, journal_used_it->second, op->oid.inode, op->oid.stripe, op->version, next_next_free, op->len,
journal.used_start, journal.used_sectors[journal.used_start], journal.next_free, journal.dirty_start
);
@ -477,7 +463,7 @@ int blockstore_impl_t::dequeue_write(blockstore_op_t *op)
}
}
// double check that next_free doesn't cross used_start from the left
assert(journal.next_free >= journal.used_start && next_next_free >= journal.next_free || next_next_free < journal.used_start);
assert(journal.next_free >= journal.used_start || next_next_free < journal.used_start);
journal.next_free = next_next_free;
je->oid = op->oid;
je->version = op->version;
@ -519,7 +505,7 @@ int blockstore_impl_t::dequeue_write(blockstore_op_t *op)
if (next_next_free >= journal.len)
next_next_free = dsk.journal_block_size;
// double check that next_free doesn't cross used_start from the left
assert(journal.next_free >= journal.used_start && next_next_free >= journal.next_free || next_next_free < journal.used_start);
assert(journal.next_free >= journal.used_start || next_next_free < journal.used_start);
journal.next_free = next_next_free;
if (!(dirty_it->second.state & BS_ST_INSTANT))
{
@ -563,7 +549,7 @@ resume_2:
uint64_t dyn_size = dsk.dirty_dyn_size(op->offset, op->len);
blockstore_journal_check_t space_check(this);
if (!space_check.check_available(op, 1, sizeof(journal_entry_big_write) + dyn_size,
(unstable_writes.size()+unstable_unsynced+((dirty_it->second.state & BS_ST_INSTANT) ? 0 : 1))*journal.block_size))
(unstable_writes.size()+unstable_unsynced)*journal.block_size))
{
return 0;
}
@ -572,23 +558,11 @@ resume_2:
journal, op->opcode == BS_OP_WRITE_STABLE ? JE_BIG_WRITE_INSTANT : JE_BIG_WRITE,
sizeof(journal_entry_big_write) + dyn_size
);
auto jsec = dirty_it->second.journal_sector = journal.sector_info[journal.cur_sector].offset;
if (!(journal.next_free >= journal.used_start
? (jsec >= journal.used_start && jsec < journal.next_free)
: (jsec >= journal.used_start || jsec < journal.next_free)))
{
printf(
"BUG: journal offset %08jx is used by %jx:%jx v%ju (%ju refs) BUT used_start=%jx next_free=%jx\n",
dirty_it->second.journal_sector, dirty_it->first.oid.inode, dirty_it->first.oid.stripe, dirty_it->first.version,
journal.used_sectors[journal.sector_info[journal.cur_sector].offset],
journal.used_start, journal.next_free
);
abort();
}
dirty_it->second.journal_sector = journal.sector_info[journal.cur_sector].offset;
journal.used_sectors[journal.sector_info[journal.cur_sector].offset]++;
#ifdef BLOCKSTORE_DEBUG
printf(
"journal offset %08jx is used by %jx:%jx v%ju (%ju refs)\n",
"journal offset %08lx is used by %lx:%lx v%lu (%lu refs)\n",
journal.sector_info[journal.cur_sector].offset, op->oid.inode, op->oid.stripe, op->version,
journal.used_sectors[journal.sector_info[journal.cur_sector].offset]
);
@ -615,7 +589,7 @@ resume_4:
});
assert(dirty_it != dirty_db.end());
#ifdef BLOCKSTORE_DEBUG
printf("Ack write %jx:%jx v%ju = state 0x%x\n", op->oid.inode, op->oid.stripe, op->version, dirty_it->second.state);
printf("Ack write %lx:%lx v%lu = state 0x%x\n", op->oid.inode, op->oid.stripe, op->version, dirty_it->second.state);
#endif
bool is_big = (dirty_it->second.state & BS_ST_TYPE_MASK) == BS_ST_BIG_WRITE;
bool imm = is_big ? (immediate_commit == IMMEDIATE_ALL) : (immediate_commit != IMMEDIATE_NONE);
@ -808,7 +782,7 @@ int blockstore_impl_t::dequeue_del(blockstore_op_t *op)
journal.used_sectors[journal.sector_info[journal.cur_sector].offset]++;
#ifdef BLOCKSTORE_DEBUG
printf(
"journal offset %08jx is used by %jx:%jx v%ju (%ju refs)\n",
"journal offset %08lx is used by %lx:%lx v%lu (%lu refs)\n",
dirty_it->second.journal_sector, dirty_it->first.oid.inode, dirty_it->first.oid.stripe, dirty_it->first.version,
journal.used_sectors[journal.sector_info[journal.cur_sector].offset]
);

View File

@ -77,7 +77,7 @@ struct alloc_osd_t
std::string key = base64_decode(kv["key"].string_value());
osd_num_t cur_osd;
char null_byte = 0;
int scanned = sscanf(key.c_str() + parent->cli->st_cli.etcd_prefix.length(), "/osd/stats/%ju%c", &cur_osd, &null_byte);
int scanned = sscanf(key.c_str() + parent->cli->st_cli.etcd_prefix.length(), "/osd/stats/%lu%c", &cur_osd, &null_byte);
if (scanned != 1 || !cur_osd)
{
fprintf(stderr, "Invalid key in etcd: %s\n", key.c_str());

View File

@ -11,7 +11,7 @@ void cli_tool_t::change_parent(inode_t cur, inode_t new_parent, cli_result_t *re
if (cur_cfg_it == cli->st_cli.inode_config.end())
{
char buf[128];
snprintf(buf, 128, "Inode 0x%jx disappeared", cur);
snprintf(buf, 128, "Inode 0x%lx disappeared", cur);
*result = (cli_result_t){ .err = EIO, .text = buf };
return;
}

View File

@ -160,14 +160,14 @@ struct cli_describe_t
if (op->reply.hdr.retval < 0)
{
fprintf(
stderr, "Failed to describe objects on OSD %ju (retval=%jd)\n",
stderr, "Failed to describe objects on OSD %lu (retval=%ld)\n",
osd_num, op->reply.hdr.retval
);
}
else if (op->reply.describe.result_bytes != op->reply.hdr.retval * sizeof(osd_reply_describe_item_t))
{
fprintf(
stderr, "Invalid response size from OSD %ju (expected %ju bytes, got %ju bytes)\n",
stderr, "Invalid response size from OSD %lu (expected %lu bytes, got %lu bytes)\n",
osd_num, op->reply.hdr.retval * sizeof(osd_reply_describe_item_t), op->reply.describe.result_bytes
);
}
@ -178,11 +178,11 @@ struct cli_describe_t
{
if (!parent->json_output || parent->is_command_line)
{
#define FMT "{\"inode\":\"0x%jx\",\"stripe\":\"0x%jx\",\"part\":%u,\"osd_num\":%ju%s%s%s}"
#define FMT "{\"inode\":\"0x%lx\",\"stripe\":\"0x%lx\",\"part\":%u,\"osd_num\":%lu%s%s%s}"
printf(
(parent->json_output
? (count > 0 ? ",\n " FMT : " " FMT)
: "%jx:%jx part %u on OSD %ju%s%s%s\n"),
: "%lx:%lx part %u on OSD %lu%s%s%s\n"),
#undef FMT
items[i].inode, items[i].stripe,
items[i].role, items[i].osd_num,

View File

@ -82,7 +82,7 @@ resume_1:
// osd ID
osd_num_t osd_num;
char null_byte = 0;
int scanned = sscanf(kv.key.substr(parent->cli->st_cli.etcd_prefix.length()).c_str(), "/osd/stats/%ju%c", &osd_num, &null_byte);
int scanned = sscanf(kv.key.substr(parent->cli->st_cli.etcd_prefix.length()).c_str(), "/osd/stats/%lu%c", &osd_num, &null_byte);
if (scanned != 1 || !osd_num || osd_num >= POOL_ID_MAX)
{
fprintf(stderr, "Invalid key in etcd: %s\n", kv.key.c_str());

View File

@ -136,7 +136,7 @@ struct cli_fix_t
auto pool_cfg_it = parent->cli->st_cli.pool_config.find(INODE_POOL(obj.inode));
if (pool_cfg_it == parent->cli->st_cli.pool_config.end())
{
fprintf(stderr, "Object %jx:%jx is from unknown pool\n", obj.inode, obj.stripe);
fprintf(stderr, "Object %lx:%lx is from unknown pool\n", obj.inode, obj.stripe);
continue;
}
auto & pool_cfg = pool_cfg_it->second;
@ -146,7 +146,7 @@ struct cli_fix_t
!pg_it->second.cur_primary || !(pg_it->second.cur_state & PG_ACTIVE))
{
fprintf(
stderr, "Object %jx:%jx is from PG %u/%u which is not currently active\n",
stderr, "Object %lx:%lx is from PG %u/%u which is not currently active\n",
obj.inode, obj.stripe, pool_cfg_it->first, pg_num
);
continue;
@ -171,7 +171,7 @@ struct cli_fix_t
{
if (op->reply.hdr.retval < 0 || op->reply.describe.result_bytes != op->reply.hdr.retval * sizeof(osd_reply_describe_item_t))
{
fprintf(stderr, "Failed to describe objects on OSD %ju (retval=%jd)\n", primary_osd, op->reply.hdr.retval);
fprintf(stderr, "Failed to describe objects on OSD %lu (retval=%ld)\n", primary_osd, op->reply.hdr.retval);
parent->waiting--;
loop();
}
@ -209,7 +209,7 @@ struct cli_fix_t
if (rm_op->reply.hdr.retval < 0)
{
fprintf(
stderr, "Failed to remove object %jx:%jx from OSD %ju (retval=%jd)\n",
stderr, "Failed to remove object %lx:%lx from OSD %lu (retval=%ld)\n",
rm_op->req.sec_del.oid.inode, rm_op->req.sec_del.oid.stripe,
rm_osd_num, rm_op->reply.hdr.retval
);
@ -226,7 +226,7 @@ struct cli_fix_t
else
{
printf(
"Removed %jx:%jx (part %ju) from OSD %ju\n",
"Removed %lx:%lx (part %lu) from OSD %lu\n",
rm_op->req.sec_del.oid.inode, rm_op->req.sec_del.oid.stripe & ~STRIPE_MASK,
rm_op->req.sec_del.oid.stripe & STRIPE_MASK, rm_osd_num
);
@ -254,7 +254,7 @@ struct cli_fix_t
if (scrub_op->reply.hdr.retval < 0 && scrub_op->reply.hdr.retval != -ENOENT)
{
fprintf(
stderr, "Failed to scrub %jx:%jx on OSD %ju (retval=%jd)\n",
stderr, "Failed to scrub %lx:%lx on OSD %lu (retval=%ld)\n",
obj.inode, obj.stripe, primary_osd, scrub_op->reply.hdr.retval
);
}

View File

@ -150,7 +150,7 @@ resume_1:
inode_t only_inode_num;
char null_byte = 0;
int scanned = sscanf(kv.key.substr(parent->cli->st_cli.etcd_prefix.length()).c_str(),
"/inode/stats/%u/%ju%c", &pool_id, &only_inode_num, &null_byte);
"/inode/stats/%u/%lu%c", &pool_id, &only_inode_num, &null_byte);
if (scanned != 2 || !pool_id || pool_id >= POOL_ID_MAX || INODE_POOL(only_inode_num) != 0)
{
fprintf(stderr, "Invalid key in etcd: %s\n", kv.key.c_str());
@ -456,7 +456,7 @@ std::string format_lat(uint64_t lat)
char buf[256];
int l = 0;
if (lat < 100)
l = snprintf(buf, sizeof(buf), "%ju us", lat);
l = snprintf(buf, sizeof(buf), "%lu us", lat);
else if (lat < 500000)
l = snprintf(buf, sizeof(buf), "%.2f ms", (double)lat/1000);
else

View File

@ -202,7 +202,7 @@ struct snap_merger_t
if (parent->progress)
{
printf(
"Merging %zd layer(s) into target %s%s (inode %ju in pool %u)\n",
"Merging %ld layer(s) into target %s%s (inode %lu in pool %u)\n",
sources.size(), target_cfg->name.c_str(),
use_cas ? " online (with CAS)" : "", INODE_NO_POOL(target), INODE_POOL(target)
);
@ -275,7 +275,7 @@ struct snap_merger_t
processed++;
if (parent->progress && !(processed % 128))
{
printf("\rFiltering target blocks: %ju/%ju", processed, to_process);
printf("\rFiltering target blocks: %lu/%lu", processed, to_process);
}
}
if (in_flight > 0 || oit != merge_offsets.end())
@ -285,7 +285,7 @@ struct snap_merger_t
}
if (parent->progress)
{
printf("\r%ju full blocks of target filtered out\n", to_process-merge_offsets.size());
printf("\r%lu full blocks of target filtered out\n", to_process-merge_offsets.size());
}
}
state = 3;
@ -320,7 +320,7 @@ struct snap_merger_t
processed++;
if (parent->progress && !(processed % 128))
{
printf("\rOverwriting blocks: %ju/%ju", processed, to_process);
printf("\rOverwriting blocks: %lu/%lu", processed, to_process);
}
}
if (in_flight == 0 && rwo_error.size())
@ -339,7 +339,7 @@ struct snap_merger_t
}
if (parent->progress)
{
printf("\rOverwriting blocks: %ju/%ju\n", to_process, to_process);
printf("\rOverwriting blocks: %lu/%lu\n", to_process, to_process);
}
// Done
result = (cli_result_t){ .text = "Done, layers from "+from_name+" to "+to_name+" merged into "+target_name };
@ -384,7 +384,7 @@ struct snap_merger_t
auto & name = parent->cli->st_cli.inode_config.at(src).name;
if (parent->progress)
{
printf("Got listing of layer %s (inode %ju in pool %u)\n", name.c_str(), INODE_NO_POOL(src), INODE_POOL(src));
printf("Got listing of layer %s (inode %lu in pool %u)\n", name.c_str(), INODE_NO_POOL(src), INODE_POOL(src));
}
if (delete_source)
{
@ -416,7 +416,7 @@ struct snap_merger_t
{
if (op->retval < 0)
{
fprintf(stderr, "error reading target bitmap at offset %jx: %s\n", op->offset, strerror(-op->retval));
fprintf(stderr, "error reading target bitmap at offset %lx: %s\n", op->offset, strerror(-op->retval));
}
else
{
@ -571,7 +571,7 @@ struct snap_merger_t
{
if (subop->retval != 0)
{
fprintf(stderr, "error deleting from layer 0x%jx at offset %jx: %s", subop->inode, subop->offset, strerror(-subop->retval));
fprintf(stderr, "error deleting from layer 0x%lx at offset %lx: %s", subop->inode, subop->offset, strerror(-subop->retval));
}
delete subop;
};
@ -620,7 +620,7 @@ struct snap_merger_t
if (rwo->error_code)
{
char buf[1024];
snprintf(buf, 1024, "Error %s target at offset %jx: %s",
snprintf(buf, 1024, "Error %s target at offset %lx: %s",
rwo->error_read ? "reading" : "writing", rwo->error_offset, strerror(rwo->error_code));
rwo_error = std::string(buf);
}

View File

@ -291,7 +291,7 @@ resume_100:
if (it == parent->cli->st_cli.inode_config.end())
{
char buf[1024];
snprintf(buf, 1024, "Parent inode of layer %s (id 0x%jx) not found", cur->name.c_str(), cur->parent_id);
snprintf(buf, 1024, "Parent inode of layer %s (id 0x%lx) not found", cur->name.c_str(), cur->parent_id);
state = 100;
return;
}
@ -384,7 +384,7 @@ resume_100:
pool_id_t pool_id = 0;
inode_t inode = 0;
char null_byte = 0;
int scanned = sscanf(kv.key.c_str() + parent->cli->st_cli.etcd_prefix.length()+13, "%u/%ju%c", &pool_id, &inode, &null_byte);
int scanned = sscanf(kv.key.c_str() + parent->cli->st_cli.etcd_prefix.length()+13, "%u/%lu%c", &pool_id, &inode, &null_byte);
if (scanned != 2 || !inode)
{
result = (cli_result_t){ .err = EIO, .text = "Bad key returned from etcd: "+kv.key };
@ -439,7 +439,7 @@ resume_100:
if (child_it == parent->cli->st_cli.inode_config.end())
{
char buf[1024];
snprintf(buf, 1024, "Inode 0x%jx disappeared", inverse_child);
snprintf(buf, 1024, "Inode 0x%lx disappeared", inverse_child);
result = (cli_result_t){ .err = EIO, .text = std::string(buf) };
state = 100;
return;
@ -448,7 +448,7 @@ resume_100:
if (target_it == parent->cli->st_cli.inode_config.end())
{
char buf[1024];
snprintf(buf, 1024, "Inode 0x%jx disappeared", inverse_parent);
snprintf(buf, 1024, "Inode 0x%lx disappeared", inverse_parent);
result = (cli_result_t){ .err = EIO, .text = std::string(buf) };
state = 100;
return;
@ -576,7 +576,7 @@ resume_100:
if (cur_cfg_it == parent->cli->st_cli.inode_config.end())
{
char buf[1024];
snprintf(buf, 1024, "Inode 0x%jx disappeared", cur);
snprintf(buf, 1024, "Inode 0x%lx disappeared", cur);
result = (cli_result_t){ .err = EIO, .text = std::string(buf) };
state = 100;
return;
@ -640,7 +640,7 @@ resume_100:
if (child_it == parent->cli->st_cli.inode_config.end())
{
char buf[1024];
snprintf(buf, 1024, "Inode 0x%jx disappeared", child_inode);
snprintf(buf, 1024, "Inode 0x%lx disappeared", child_inode);
result = (cli_result_t){ .err = EIO, .text = std::string(buf) };
state = 100;
return;
@ -649,7 +649,7 @@ resume_100:
if (target_it == parent->cli->st_cli.inode_config.end())
{
char buf[1024];
snprintf(buf, 1024, "Inode 0x%jx disappeared", target_inode);
snprintf(buf, 1024, "Inode 0x%lx disappeared", target_inode);
result = (cli_result_t){ .err = EIO, .text = std::string(buf) };
state = 100;
return;
@ -670,7 +670,7 @@ resume_100:
if (source == parent->cli->st_cli.inode_config.end())
{
char buf[1024];
snprintf(buf, 1024, "Inode 0x%jx disappeared", inode);
snprintf(buf, 1024, "Inode 0x%lx disappeared", inode);
result = (cli_result_t){ .err = EIO, .text = std::string(buf) };
state = 100;
return;

View File

@ -95,7 +95,7 @@ struct rm_inode_t
fprintf(stderr, "Some data may remain after delete on OSDs which are currently down: ");
for (int i = 0; i < inactive_osds.size(); i++)
{
fprintf(stderr, i > 0 ? ", %ju" : "%ju", inactive_osds[i]);
fprintf(stderr, i > 0 ? ", %lu" : "%lu", inactive_osds[i]);
}
fprintf(stderr, "\n");
}
@ -138,7 +138,7 @@ struct rm_inode_t
cur_list->in_flight--;
if (op->reply.hdr.retval < 0)
{
fprintf(stderr, "Failed to remove object %jx:%jx from PG %u (OSD %ju) (retval=%jd)\n",
fprintf(stderr, "Failed to remove object %lx:%lx from PG %u (OSD %lu) (retval=%ld)\n",
op->req.rw.inode, op->req.rw.offset,
cur_list->pg_num, cur_list->rm_osd_num, op->reply.hdr.retval);
error_count++;
@ -174,7 +174,7 @@ struct rm_inode_t
cur_list->synced = true;
if (op->reply.hdr.retval < 0)
{
fprintf(stderr, "Failed to sync OSD %ju (retval=%jd)\n",
fprintf(stderr, "Failed to sync OSD %lu (retval=%ld)\n",
cur_list->rm_osd_num, op->reply.hdr.retval);
error_count++;
}
@ -212,7 +212,7 @@ struct rm_inode_t
}
if (parent->progress && total_count > 0 && total_done*1000/total_count != total_prev_pct)
{
fprintf(stderr, "\rRemoved %ju/%ju objects, %ju more PGs to list...", total_done, total_count, pgs_to_list);
fprintf(stderr, "\rRemoved %lu/%lu objects, %lu more PGs to list...", total_done, total_count, pgs_to_list);
total_prev_pct = total_done*1000/total_count;
}
if (lists_done && !lists.size())
@ -224,8 +224,8 @@ struct rm_inode_t
if (parent->progress && (total_done < total_count || inactive_osds.size() > 0 || error_count > 0))
{
fprintf(
stderr, "Warning: Pool:%u,ID:%ju inode data may not have been fully removed.\n"
" Use `vitastor-cli rm-data --pool %u --inode %ju` if you encounter it in listings.\n",
stderr, "Warning: Pool:%u,ID:%lu inode data may not have been fully removed.\n"
" Use `vitastor-cli rm-data --pool %u --inode %lu` if you encounter it in listings.\n",
pool_id, INODE_NO_POOL(inode), pool_id, INODE_NO_POOL(inode)
);
}

View File

@ -106,7 +106,7 @@ resume_2:
if (etcd_states[i]["error"].is_null())
{
etcd_alive++;
etcd_db_size = etcd_states[i]["dbSize"].uint64_value();
etcd_db_size = etcd_states[i]["dbSizeInUse"].uint64_value();
}
}
int mon_count = 0;
@ -132,7 +132,7 @@ resume_2:
auto kv = parent->cli->st_cli.parse_etcd_kv(osd_stats[i]);
osd_num_t stat_osd_num = 0;
char null_byte = 0;
int scanned = sscanf(kv.key.c_str() + parent->cli->st_cli.etcd_prefix.size(), "/osd/stats/%ju%c", &stat_osd_num, &null_byte);
int scanned = sscanf(kv.key.c_str() + parent->cli->st_cli.etcd_prefix.size(), "/osd/stats/%lu%c", &stat_osd_num, &null_byte);
if (scanned != 1 || !stat_osd_num)
{
fprintf(stderr, "Invalid key in etcd: %s\n", kv.key.c_str());
@ -283,7 +283,7 @@ resume_2:
}
printf(
" cluster:\n"
" etcd: %d / %zd up, %s database size\n"
" etcd: %d / %ld up, %s database size\n"
" mon: %d up%s\n"
" osd: %d / %d up\n"
" \n"

View File

@ -1156,7 +1156,7 @@ void cluster_client_t::handle_op_part(cluster_op_part_t *part)
if (op->retval != -EPIPE || log_level > 0)
{
fprintf(
stderr, "%s operation failed on OSD %ju: retval=%jd (expected %d), dropping connection\n",
stderr, "%s operation failed on OSD %lu: retval=%ld (expected %d), dropping connection\n",
osd_op_names[part->op.req.hdr.opcode], part->osd_num, part->op.reply.hdr.retval, expected
);
}
@ -1164,7 +1164,7 @@ void cluster_client_t::handle_op_part(cluster_op_part_t *part)
else if (log_level > 0)
{
fprintf(
stderr, "%s operation failed on OSD %ju: retval=%jd (expected %d)\n",
stderr, "%s operation failed on OSD %lu: retval=%ld (expected %d)\n",
osd_op_names[part->op.req.hdr.opcode], part->osd_num, part->op.reply.hdr.retval, expected
);
}

View File

@ -226,7 +226,7 @@ void cluster_client_t::send_list(inode_list_osd_t *cur_list)
{
if (op->reply.hdr.retval < 0)
{
fprintf(stderr, "Failed to get PG %u/%u object list from OSD %ju (retval=%jd), skipping\n",
fprintf(stderr, "Failed to get PG %u/%u object list from OSD %lu (retval=%ld), skipping\n",
cur_list->pg->lst->pool_id, cur_list->pg->pg_num, cur_list->osd_num, op->reply.hdr.retval);
}
else
@ -236,7 +236,7 @@ void cluster_client_t::send_list(inode_list_osd_t *cur_list)
// Unstable objects, if present, mean that someone still writes into the inode. Warn the user about it.
cur_list->pg->has_unstable = true;
fprintf(
stderr, "[PG %u/%u] Inode still has %ju unstable object versions out of total %ju - is it still open?\n",
stderr, "[PG %u/%u] Inode still has %lu unstable object versions out of total %lu - is it still open?\n",
cur_list->pg->lst->pool_id, cur_list->pg->pg_num, op->reply.hdr.retval - op->reply.sec_list.stable_count,
op->reply.hdr.retval
);
@ -244,7 +244,7 @@ void cluster_client_t::send_list(inode_list_osd_t *cur_list)
if (log_level > 0)
{
fprintf(
stderr, "[PG %u/%u] Got inode object list from OSD %ju: %jd object versions\n",
stderr, "[PG %u/%u] Got inode object list from OSD %lu: %ld object versions\n",
cur_list->pg->lst->pool_id, cur_list->pg->pg_num, cur_list->osd_num, op->reply.hdr.retval
);
}

View File

@ -47,7 +47,7 @@ void disk_tool_simple_offsets(json11::Json cfg, bool json_output)
if (!bitmap_granularity)
bitmap_granularity = DEFAULT_BITMAP_GRANULARITY;
if (!journal_size)
journal_size = 32*1024*1024;
journal_size = 16*1024*1024;
if (!device_block_size)
device_block_size = 4096;
if (!data_csum_type)
@ -75,9 +75,9 @@ void disk_tool_simple_offsets(json11::Json cfg, bool json_output)
if (st.st_blksize < device_block_size)
{
fprintf(
stderr, "Warning: %s reports %ju byte blocks, but we use %ju."
" Set --device_block_size=%ju if you're sure it works well with %ju byte blocks.\n",
device.c_str(), (uint64_t)st.st_blksize, device_block_size, (uint64_t)st.st_blksize, (uint64_t)st.st_blksize
stderr, "Warning: %s reports %lu byte blocks, but we use %lu."
" Set --device_block_size=%lu if you're sure it works well with %lu byte blocks.\n",
device.c_str(), st.st_blksize, device_block_size, st.st_blksize, st.st_blksize
);
}
}
@ -99,19 +99,19 @@ void disk_tool_simple_offsets(json11::Json cfg, bool json_output)
if (device_block_size < 512 || device_block_size > 1048576 ||
device_block_size & (device_block_size-1) != 0)
{
fprintf(stderr, "Invalid device block size specified: %ju\n", device_block_size);
fprintf(stderr, "Invalid device block size specified: %lu\n", device_block_size);
exit(1);
}
if (data_block_size < device_block_size || data_block_size > MAX_DATA_BLOCK_SIZE ||
data_block_size & (data_block_size-1) != 0)
{
fprintf(stderr, "Invalid object size specified: %ju\n", data_block_size);
fprintf(stderr, "Invalid object size specified: %lu\n", data_block_size);
exit(1);
}
if (bitmap_granularity < device_block_size || bitmap_granularity > data_block_size ||
bitmap_granularity & (bitmap_granularity-1) != 0)
{
fprintf(stderr, "Invalid bitmap granularity specified: %ju\n", bitmap_granularity);
fprintf(stderr, "Invalid bitmap granularity specified: %lu\n", bitmap_granularity);
exit(1);
}
if (csum_block_size && (data_block_size % csum_block_size))
@ -145,8 +145,8 @@ void disk_tool_simple_offsets(json11::Json cfg, bool json_output)
{
// Env
printf(
"meta_block_size=%ju\njournal_block_size=%ju\ndata_size=%ju\n"
"data_device=%s\njournal_offset=%ju\nmeta_offset=%ju\ndata_offset=%ju\n",
"meta_block_size=%lu\njournal_block_size=%lu\ndata_size=%lu\n"
"data_device=%s\njournal_offset=%lu\nmeta_offset=%lu\ndata_offset=%lu\n",
device_block_size, device_block_size, device_size-data_offset,
device.c_str(), journal_offset, meta_offset, data_offset
);
@ -160,14 +160,14 @@ void disk_tool_simple_offsets(json11::Json cfg, bool json_output)
}
if (device_block_size != 4096)
{
printf("--meta_block_size %ju\n--journal_block_size %ju\n", device_block_size, device_block_size);
printf("--meta_block_size %lu\n--journal_block_size %lu\n", device_block_size, device_block_size);
}
if (orig_device_size)
{
printf("--data_size %ju\n", device_size-data_offset);
printf("--data_size %lu\n", device_size-data_offset);
}
printf(
"--data_device %s\n--journal_offset %ju\n--meta_offset %ju\n--data_offset %ju\n",
"--data_device %s\n--journal_offset %lu\n--meta_offset %lu\n--data_offset %lu\n",
device.c_str(), journal_offset, meta_offset, data_offset
);
}

View File

@ -167,7 +167,7 @@ static const char *help_text =
" Calculate offsets for old simple&stupid (no superblock) OSD deployment. Options:\n"
" --object_size 128k Set blockstore block size\n"
" --bitmap_granularity 4k Set bitmap granularity\n"
" --journal_size 32M Set journal size\n"
" --journal_size 16M Set journal size\n"
" --data_csum_type none Set data checksum type (crc32c or none)\n"
" --csum_block_size 4k Set data checksum block size\n"
" --device_block_size 4k Set device block size\n"

View File

@ -4,7 +4,7 @@
#pragma once
#ifndef _LARGEFILE64_SOURCE
#define _LARGEFILE64_SOURCE
#define _LARGEFILE64_SOURCE 1
#endif
#include <map>

View File

@ -38,7 +38,7 @@ int disk_tool_t::dump_journal()
}
if (json)
{
printf("%s{\"offset\":\"0x%jx\"", first_block ? "" : ",\n", journal_pos);
printf("%s{\"offset\":\"0x%lx\"", first_block ? "" : ",\n", journal_pos);
first_block = false;
}
if (s == dsk.journal_block_size)
@ -46,13 +46,13 @@ int disk_tool_t::dump_journal()
if (json)
printf(",\"type\":\"zero\"}");
else
printf("offset %08jx: zeroes\n", journal_pos);
printf("offset %08lx: zeroes\n", journal_pos);
journal_pos += dsk.journal_block_size;
}
else if (((journal_entry*)journal_buf)->magic == JOURNAL_MAGIC)
{
if (!json)
printf("offset %08jx:\n", journal_pos);
printf("offset %08lx:\n", journal_pos);
else
printf(",\"entries\":[\n");
if (journal_pos == 0)
@ -80,9 +80,9 @@ int disk_tool_t::dump_journal()
else
{
if (json)
printf(",\"type\":\"data\",\"pattern\":\"%08jx\"}", *((uint64_t*)journal_buf));
printf(",\"type\":\"data\",\"pattern\":\"%08lx\"}", *((uint64_t*)journal_buf));
else
printf("offset %08jx: no magic in the beginning, looks like random data (pattern=%08jx)\n", journal_pos, *((uint64_t*)journal_buf));
printf("offset %08lx: no magic in the beginning, looks like random data (pattern=%08lx)\n", journal_pos, *((uint64_t*)journal_buf));
journal_pos += dsk.journal_block_size;
}
}
@ -98,12 +98,12 @@ int disk_tool_t::dump_journal()
if (json && dump_with_blocks)
first_entry = true;
if (!json)
printf("offset %08jx:\n", journal_pos);
printf("offset %08lx:\n", journal_pos);
auto pos = journal_pos;
int r = process_journal_block(data, [this, pos](int num, journal_entry *je)
{
if (json && dump_with_blocks && first_entry)
printf("%s{\"offset\":\"0x%jx\",\"entries\":[\n", first_block ? "" : ",\n", pos);
printf("%s{\"offset\":\"0x%lx\",\"entries\":[\n", first_block ? "" : ",\n", pos);
dump_journal_entry(num, je, json);
first_block = false;
});
@ -134,12 +134,12 @@ int disk_tool_t::process_journal(std::function<int(void*)> block_fn)
journal_entry *je = (journal_entry*)(data);
if (je->magic != JOURNAL_MAGIC || je->type != JE_START || je_crc32(je) != je->crc32)
{
fprintf(stderr, "offset %08jx: journal superblock is invalid\n", journal_pos);
fprintf(stderr, "offset %08lx: journal superblock is invalid\n", journal_pos);
r = 1;
}
else if (je->start.size != JE_START_V0_SIZE && je->start.version != JOURNAL_VERSION_V1 && je->start.version != JOURNAL_VERSION_V2)
{
fprintf(stderr, "offset %08jx: journal superblock contains version %ju, but I only understand 0, 1 and 2\n",
fprintf(stderr, "offset %08lx: journal superblock contains version %lu, but I only understand 0, 1 and 2\n",
journal_pos, je->start.size == JE_START_V0_SIZE ? 0 : je->start.version);
r = 1;
}
@ -296,7 +296,7 @@ void disk_tool_t::dump_journal_entry(int num, journal_entry *je, bool json)
if (je->type == JE_START)
{
printf(
json ? ",\"type\":\"start\",\"start\":\"0x%jx\"" : "je_start start=%08jx",
json ? ",\"type\":\"start\",\"start\":\"0x%lx\"" : "je_start start=%08lx",
je->start.journal_start
);
if (je->start.data_csum_type)
@ -312,15 +312,15 @@ void disk_tool_t::dump_journal_entry(int num, journal_entry *je, bool json)
{
auto & sw = je->small_write;
printf(
json ? ",\"type\":\"small_write%s\",\"inode\":\"0x%jx\",\"stripe\":\"0x%jx\",\"ver\":\"%ju\",\"offset\":%u,\"len\":%u,\"loc\":\"0x%jx\""
: "je_small_write%s oid=%jx:%jx ver=%ju offset=%u len=%u loc=%08jx",
json ? ",\"type\":\"small_write%s\",\"inode\":\"0x%lx\",\"stripe\":\"0x%lx\",\"ver\":\"%lu\",\"offset\":%u,\"len\":%u,\"loc\":\"0x%lx\""
: "je_small_write%s oid=%lx:%lx ver=%lu offset=%u len=%u loc=%08lx",
je->type == JE_SMALL_WRITE_INSTANT ? "_instant" : "",
sw.oid.inode, sw.oid.stripe, sw.version, sw.offset, sw.len, sw.data_offset
);
if (journal_calc_data_pos != sw.data_offset)
{
printf(json ? ",\"bad_loc\":true,\"calc_loc\":\"0x%jx\""
: " (mismatched, calculated = %08jx)", journal_pos);
printf(json ? ",\"bad_loc\":true,\"calc_loc\":\"0x%lx\""
: " (mismatched, calculated = %08lx)", journal_pos);
}
uint32_t data_csum_size = (!je_start.csum_block_size
? 0
@ -367,8 +367,8 @@ void disk_tool_t::dump_journal_entry(int num, journal_entry *je, bool json)
{
auto & bw = je->big_write;
printf(
json ? ",\"type\":\"big_write%s\",\"inode\":\"0x%jx\",\"stripe\":\"0x%jx\",\"ver\":\"%ju\",\"offset\":%u,\"len\":%u,\"loc\":\"0x%jx\""
: "je_big_write%s oid=%jx:%jx ver=%ju offset=%u len=%u loc=%08jx",
json ? ",\"type\":\"big_write%s\",\"inode\":\"0x%lx\",\"stripe\":\"0x%lx\",\"ver\":\"%lu\",\"offset\":%u,\"len\":%u,\"loc\":\"0x%lx\""
: "je_big_write%s oid=%lx:%lx ver=%lu offset=%u len=%u loc=%08lx",
je->type == JE_BIG_WRITE_INSTANT ? "_instant" : "",
bw.oid.inode, bw.oid.stripe, bw.version, bw.offset, bw.len, bw.location
);
@ -398,24 +398,24 @@ void disk_tool_t::dump_journal_entry(int num, journal_entry *je, bool json)
else if (je->type == JE_STABLE)
{
printf(
json ? ",\"type\":\"stable\",\"inode\":\"0x%jx\",\"stripe\":\"0x%jx\",\"ver\":\"%ju\"}"
: "je_stable oid=%jx:%jx ver=%ju\n",
json ? ",\"type\":\"stable\",\"inode\":\"0x%lx\",\"stripe\":\"0x%lx\",\"ver\":\"%lu\"}"
: "je_stable oid=%lx:%lx ver=%lu\n",
je->stable.oid.inode, je->stable.oid.stripe, je->stable.version
);
}
else if (je->type == JE_ROLLBACK)
{
printf(
json ? ",\"type\":\"rollback\",\"inode\":\"0x%jx\",\"stripe\":\"0x%jx\",\"ver\":\"%ju\"}"
: "je_rollback oid=%jx:%jx ver=%ju\n",
json ? ",\"type\":\"rollback\",\"inode\":\"0x%lx\",\"stripe\":\"0x%lx\",\"ver\":\"%lu\"}"
: "je_rollback oid=%lx:%lx ver=%lu\n",
je->rollback.oid.inode, je->rollback.oid.stripe, je->rollback.version
);
}
else if (je->type == JE_DELETE)
{
printf(
json ? ",\"type\":\"delete\",\"inode\":\"0x%jx\",\"stripe\":\"0x%jx\",\"ver\":\"%ju\"}"
: "je_delete oid=%jx:%jx ver=%ju\n",
json ? ",\"type\":\"delete\",\"inode\":\"0x%lx\",\"stripe\":\"0x%lx\",\"ver\":\"%lu\"}"
: "je_delete oid=%lx:%lx ver=%lu\n",
je->del.oid.inode, je->del.oid.stripe, je->del.version
);
}

View File

@ -54,7 +54,7 @@ int disk_tool_t::process_meta(std::function<void(blockstore_meta_header_v2_t *)>
else
{
// Unsupported version
fprintf(stderr, "Metadata format is too new for me (stored version is %ju, max supported %u).\n", hdr->version, BLOCKSTORE_META_FORMAT_V2);
fprintf(stderr, "Metadata format is too new for me (stored version is %lu, max supported %u).\n", hdr->version, BLOCKSTORE_META_FORMAT_V2);
free(data);
close(dsk.meta_fd);
dsk.meta_fd = -1;
@ -108,7 +108,7 @@ int disk_tool_t::process_meta(std::function<void(blockstore_meta_header_v2_t *)>
uint32_t *entry_csum = (uint32_t*)((uint8_t*)entry + dsk.clean_entry_size - 4);
if (*entry_csum != crc32c(0, entry, dsk.clean_entry_size - 4))
{
fprintf(stderr, "Metadata entry %ju is corrupt (checksum mismatch), skipping\n", block_num);
fprintf(stderr, "Metadata entry %lu is corrupt (checksum mismatch), skipping\n", block_num);
continue;
}
}
@ -184,7 +184,7 @@ void disk_tool_t::dump_meta_header(blockstore_meta_header_v2_t *hdr)
}
else
{
printf("{\"version\":\"0.5\",\"meta_block_size\":%ju,\"entries\":[\n", dsk.meta_block_size);
printf("{\"version\":\"0.5\",\"meta_block_size\":%lu,\"entries\":[\n", dsk.meta_block_size);
}
first_entry = true;
}
@ -192,7 +192,7 @@ void disk_tool_t::dump_meta_header(blockstore_meta_header_v2_t *hdr)
void disk_tool_t::dump_meta_entry(uint64_t block_num, clean_disk_entry *entry, uint8_t *bitmap)
{
printf(
#define ENTRY_FMT "{\"block\":%ju,\"pool\":%u,\"inode\":\"0x%jx\",\"stripe\":\"0x%jx\",\"version\":%ju"
#define ENTRY_FMT "{\"block\":%lu,\"pool\":%u,\"inode\":\"0x%lx\",\"stripe\":\"0x%lx\",\"version\":%lu"
(first_entry ? ENTRY_FMT : (",\n" ENTRY_FMT)),
#undef ENTRY_FMT
block_num, INODE_POOL(entry->oid.inode), INODE_NO_POOL(entry->oid.inode),
@ -265,7 +265,7 @@ int disk_tool_t::write_json_meta(json11::Json meta)
{
free(new_meta_buf);
new_meta_buf = NULL;
fprintf(stderr, "Metadata (data block %ju) doesn't fit into the new area\n", data_block);
fprintf(stderr, "Metadata (data block %lu) doesn't fit into the new area\n", data_block);
return 1;
}
clean_disk_entry *new_entry = (clean_disk_entry*)(new_meta_buf +

View File

@ -8,7 +8,6 @@
int disk_tool_t::prepare_one(std::map<std::string, std::string> options, int is_hdd)
{
static const char *allow_additional_params[] = {
"autosync_writes",
"data_io",
"meta_io",
"journal_io",
@ -100,9 +99,12 @@ int disk_tool_t::prepare_one(std::map<std::string, std::string> options, int is_
options["disable_journal_fsync"] = options["disable_data_fsync"];
}
// Calculate offsets if the same device is used for two or more of data, meta, and journal
if (options["journal_size"] == "" && (options["journal_device"] == "" || options["journal_device"] == options["data_device"]))
if (options["journal_size"] == "")
{
options["journal_size"] = is_hdd || !json_is_true(options["disable_data_fsync"]) ? "128M" : "32M";
if (options["journal_device"] == "")
options["journal_size"] = is_hdd ? "128M" : "32M";
else if (is_hdd)
options["journal_size"] = DEFAULT_HYBRID_JOURNAL;
}
bool is_hybrid = is_hdd && options["journal_device"] != "" && options["journal_device"] != options["data_device"];
if (is_hdd)
@ -112,15 +114,6 @@ int disk_tool_t::prepare_one(std::map<std::string, std::string> options, int is_
if (is_hybrid && options["throttle_small_writes"] == "")
options["throttle_small_writes"] = "1";
}
else if (!json_is_true(options["disable_data_fsync"]))
{
if (options.find("min_flusher_count") == options.end())
options["min_flusher_count"] = "32";
if (options.find("max_flusher_count") == options.end())
options["max_flusher_count"] = "256";
if (options.find("autosync_writes") == options.end())
options["autosync_writes"] = "512";
}
json11::Json::object sb;
blockstore_disk_t dsk;
try
@ -210,10 +203,10 @@ int disk_tool_t::prepare_one(std::map<std::string, std::string> options, int is_
desc += " with metadata on "+realpath_str(options["meta_device"]);
if (sep_j)
desc += (sep_m ? " and journal on " : " with journal on ") + realpath_str(options["journal_device"]);
fprintf(stderr, "Initialized OSD %ju on %s\n", osd_num, desc.c_str());
fprintf(stderr, "Initialized OSD %lu on %s\n", osd_num, desc.c_str());
if (shell_exec({ "systemctl", "enable", "--now", "vitastor-osd@"+std::to_string(osd_num) }, "", NULL, NULL) != 0)
{
fprintf(stderr, "Failed to enable systemd unit vitastor-osd@%ju\n", osd_num);
fprintf(stderr, "Failed to enable systemd unit vitastor-osd@%lu\n", osd_num);
return 1;
}
return 0;
@ -337,7 +330,7 @@ json11::Json disk_tool_t::add_partitions(vitastor_dev_info_t & devinfo, std::vec
std::string out;
if (shell_exec({ "sfdisk", "--no-reread", "--force", devinfo.path }, script, &out, NULL) != 0)
{
fprintf(stderr, "Failed to add %zu partition(s) with sfdisk\n", sizes.size());
fprintf(stderr, "Failed to add %lu partition(s) with sfdisk\n", sizes.size());
return {};
}
// Get new partition table and find created partitions
@ -352,7 +345,7 @@ json11::Json disk_tool_t::add_partitions(vitastor_dev_info_t & devinfo, std::vec
}
if (new_parts.size() != sizes.size())
{
fprintf(stderr, "Failed to add %zu partition(s) with sfdisk: new partitions not found in table\n", sizes.size());
fprintf(stderr, "Failed to add %lu partition(s) with sfdisk: new partitions not found in table\n", sizes.size());
return {};
}
// Check if new nodes exist and run partprobe if not
@ -456,7 +449,7 @@ std::vector<std::string> disk_tool_t::get_new_data_parts(vitastor_dev_info_t & d
bool is_journal = sb["params"]["journal_device"].string_value() == part_path;
bool is_data = sb["params"]["data_device"].string_value() == part_path;
fprintf(
stderr, "%s is already initialized for OSD %ju%s, skipping\n",
stderr, "%s is already initialized for OSD %lu%s, skipping\n",
part["node"].string_value().c_str(), sb["params"]["osd_num"].uint64_value(),
(is_data ? " data" : (is_meta ? " meta" : (is_journal ? " journal" : "")))
);
@ -539,7 +532,7 @@ int disk_tool_t::get_meta_partition(std::vector<vitastor_dev_info_t> & ssds, std
if (sel < 0)
{
fprintf(
stderr, "Could not find free space for new SSD journal and metadata (need %ju + %ju MiB)\n",
stderr, "Could not find free space for new SSD journal and metadata (need %lu + %lu MiB)\n",
meta_size/1024/1024, journal_size/1024/1024
);
return 1;
@ -623,7 +616,6 @@ int disk_tool_t::prepare(std::vector<std::string> devices)
options.erase("disable_meta_fsync");
options.erase("disable_journal_fsync");
}
auto journal_size = options["journal_size"];
for (auto & dev: devinfo)
{
if (!hybrid || dev.is_hdd)
@ -641,13 +633,11 @@ int disk_tool_t::prepare(std::vector<std::string> devices)
{
return 1;
}
options.erase("journal_size");
}
// Treat all disks as SSDs if not in the hybrid mode
prepare_one(options, dev.is_hdd ? 1 : 0);
if (hybrid)
{
options["journal_size"] = journal_size;
options.erase("journal_device");
options.erase("meta_device");
}

View File

@ -184,7 +184,7 @@ void disk_tool_t::resize_init(blockstore_meta_header_v2_t *hdr)
}
if (new_meta_len < dsk.meta_block_size*new_meta_blocks)
{
fprintf(stderr, "New metadata area size is too small, should be at least %ju bytes\n", dsk.meta_block_size*new_meta_blocks);
fprintf(stderr, "New metadata area size is too small, should be at least %lu bytes\n", dsk.meta_block_size*new_meta_blocks);
exit(1);
}
// Check that new metadata, journal and data areas don't overlap
@ -289,7 +289,7 @@ int disk_tool_t::resize_copy_data()
if (data->res != dsk.data_block_size)
{
fprintf(
stderr, "Failed to read %u bytes at %ju from %s: %s\n", dsk.data_block_size,
stderr, "Failed to read %u bytes at %lu from %s: %s\n", dsk.data_block_size,
dsk.data_offset + moving_blocks[i].old_loc*dsk.data_block_size, dsk.data_device.c_str(),
data->res < 0 ? strerror(-data->res) : "short read"
);
@ -314,7 +314,7 @@ int disk_tool_t::resize_copy_data()
if (data->res != dsk.data_block_size)
{
fprintf(
stderr, "Failed to write %u bytes at %ju to %s: %s\n", dsk.data_block_size,
stderr, "Failed to write %u bytes at %lu to %s: %s\n", dsk.data_block_size,
dsk.data_offset + moving_blocks[i].new_loc*dsk.data_block_size, dsk.data_device.c_str(),
data->res < 0 ? strerror(-data->res) : "short write"
);

View File

@ -43,8 +43,8 @@ int disk_tool_t::udev_import(std::string device)
}
uint64_t osd_num = sb["params"]["osd_num"].uint64_value();
// Print variables for udev
printf("VITASTOR_OSD_NUM=%ju\n", osd_num);
printf("VITASTOR_ALIAS=osd%ju-%s\n", osd_num, sb["device_type"].string_value().c_str());
printf("VITASTOR_OSD_NUM=%lu\n", osd_num);
printf("VITASTOR_ALIAS=osd%lu-%s\n", osd_num, sb["device_type"].string_value().c_str());
printf("VITASTOR_DATA_DEVICE=%s\n", udev_escape(sb["params"]["data_device"].string_value()).c_str());
if (sb["real_meta_device"].string_value() != "" && sb["real_meta_device"] != sb["real_data_device"])
printf("VITASTOR_META_DEVICE=%s\n", udev_escape(sb["params"]["meta_device"].string_value()).c_str());
@ -466,12 +466,12 @@ int disk_tool_t::purge_devices(const std::vector<std::string> & devices)
close(fd);
if (r != 0)
{
fprintf(stderr, "Failed to clear OSD %ju %s device %s superblock: %s\n",
fprintf(stderr, "Failed to clear OSD %lu %s device %s superblock: %s\n",
sb["params"]["osd_num"].uint64_value(), dev_type.c_str(), dev.c_str(), strerror(errno));
}
else
{
fprintf(stderr, "OSD %ju %s device %s superblock cleared\n",
fprintf(stderr, "OSD %lu %s device %s superblock cleared\n",
sb["params"]["osd_num"].uint64_value(), dev_type.c_str(), dev.c_str());
}
if (sb["params"][dev_type+"_device"].string_value().substr(0, 22) == "/dev/disk/by-partuuid/")

View File

@ -12,9 +12,9 @@ uint64_t sscanf_json(const char *fmt, const json11::Json & str)
{
uint64_t value = 0;
if (fmt)
sscanf(str.string_value().c_str(), "%jx", &value);
sscanf(str.string_value().c_str(), "%lx", &value);
else if (str.string_value().size() > 2 && (str.string_value()[0] == '0' && str.string_value()[1] == 'x'))
sscanf(str.string_value().c_str(), "0x%jx", &value);
sscanf(str.string_value().c_str(), "0x%lx", &value);
else
value = str.uint64_value();
return value;

View File

@ -333,7 +333,7 @@ void etcd_state_client_t::start_etcd_watcher()
etcd_watch_ws = NULL;
}
if (this->log_level > 1)
fprintf(stderr, "Trying to connect to etcd websocket at %s, watch from revision %ju\n", etcd_address.c_str(), etcd_watch_revision);
fprintf(stderr, "Trying to connect to etcd websocket at %s, watch from revision %lu\n", etcd_address.c_str(), etcd_watch_revision);
etcd_watch_ws = open_websocket(tfd, etcd_address, etcd_api_path+"/watch", etcd_slow_timeout,
[this, cur_addr = selected_etcd_address](const http_response_t *msg)
{
@ -357,7 +357,7 @@ void etcd_state_client_t::start_etcd_watcher()
watch_id == ETCD_OSD_STATE_WATCH_ID)
etcd_watches_initialised++;
if (etcd_watches_initialised == ETCD_TOTAL_WATCHES && this->log_level > 0)
fprintf(stderr, "Successfully subscribed to etcd at %s, revision %ju\n", cur_addr.c_str(), etcd_watch_revision);
fprintf(stderr, "Successfully subscribed to etcd at %s, revision %lu\n", cur_addr.c_str(), etcd_watch_revision);
}
if (data["result"]["canceled"].bool_value())
{
@ -371,7 +371,7 @@ void etcd_state_client_t::start_etcd_watcher()
// check to not trigger on_reload_hook multiple times
if (etcd_watch_ws != NULL)
{
fprintf(stderr, "Revisions before %ju were compacted by etcd, reloading state\n",
fprintf(stderr, "Revisions before %lu were compacted by etcd, reloading state\n",
data["result"]["compact_revision"].uint64_value());
http_close(etcd_watch_ws);
etcd_watch_ws = NULL;
@ -382,7 +382,7 @@ void etcd_state_client_t::start_etcd_watcher()
}
else
{
fprintf(stderr, "Revisions before %ju were compacted by etcd, exiting\n",
fprintf(stderr, "Revisions before %lu were compacted by etcd, exiting\n",
data["result"]["compact_revision"].uint64_value());
exit(1);
}
@ -646,7 +646,7 @@ void etcd_state_client_t::load_pgs()
etcd_watch_revision = data["header"]["revision"].uint64_value()+1;
if (this->log_level > 3)
{
fprintf(stderr, "Loaded revision %ju of PG configuration\n", etcd_watch_revision-1);
fprintf(stderr, "Loaded revision %lu of PG configuration\n", etcd_watch_revision-1);
}
}
for (auto & res: data["responses"].array_items())
@ -740,7 +740,7 @@ void etcd_state_client_t::clean_nonexistent_pgs()
{
if (seen_peers.find(peer_item.first) == seen_peers.end())
{
fprintf(stderr, "OSD %ju state disappeared after reload, forgetting it\n", peer_item.first);
fprintf(stderr, "OSD %lu state disappeared after reload, forgetting it\n", peer_item.first);
parse_state((etcd_kv_t){
.key = etcd_prefix+"/osd/state/"+std::to_string(peer_item.first),
});
@ -890,7 +890,7 @@ void etcd_state_client_t::parse_state(const etcd_kv_t & kv)
{
if (pg_item.second.target_set.size() != parsed_cfg.pg_size)
{
fprintf(stderr, "Pool %u PG %u configuration is invalid: osd_set size %zu != pool pg_size %ju\n",
fprintf(stderr, "Pool %u PG %u configuration is invalid: osd_set size %lu != pool pg_size %lu\n",
pool_id, pg_item.first, pg_item.second.target_set.size(), parsed_cfg.pg_size);
pg_item.second.pause = true;
}
@ -936,7 +936,7 @@ void etcd_state_client_t::parse_state(const etcd_kv_t & kv)
}
if (parsed_cfg.target_set.size() != pool_config[pool_id].pg_size)
{
fprintf(stderr, "Pool %u PG %u configuration is invalid: osd_set size %zu != pool pg_size %ju\n",
fprintf(stderr, "Pool %u PG %u configuration is invalid: osd_set size %lu != pool pg_size %lu\n",
pool_id, pg_num, parsed_cfg.target_set.size(), pool_config[pool_id].pg_size);
parsed_cfg.pause = true;
}
@ -950,7 +950,7 @@ void etcd_state_client_t::parse_state(const etcd_kv_t & kv)
if (pg_it->second.config_exists && pg_it->first != ++n)
{
fprintf(
stderr, "Invalid pool %u PG configuration: PG numbers don't cover whole 1..%zu range\n",
stderr, "Invalid pool %u PG configuration: PG numbers don't cover whole 1..%lu range\n",
pool_item.second.id, pool_item.second.pg_config.size()
);
for (pg_it = pool_item.second.pg_config.begin(); pg_it != pool_item.second.pg_config.end(); pg_it++)
@ -1066,7 +1066,7 @@ void etcd_state_client_t::parse_state(const etcd_kv_t & kv)
(state & PG_PEERING) && state != PG_PEERING ||
(state & PG_INCOMPLETE) && state != PG_INCOMPLETE)
{
fprintf(stderr, "Unexpected pool %u PG %u state in etcd: primary=%ju, state=%s\n", pool_id, pg_num, cur_primary, value["state"].dump().c_str());
fprintf(stderr, "Unexpected pool %u PG %u state in etcd: primary=%lu, state=%s\n", pool_id, pg_num, cur_primary, value["state"].dump().c_str());
return;
}
pg_cfg.cur_primary = cur_primary;
@ -1102,7 +1102,7 @@ void etcd_state_client_t::parse_state(const etcd_kv_t & kv)
uint64_t pool_id = 0;
uint64_t inode_num = 0;
char null_byte = 0;
int scanned = sscanf(key.c_str() + etcd_prefix.length()+14, "%ju/%ju%c", &pool_id, &inode_num, &null_byte);
int scanned = sscanf(key.c_str() + etcd_prefix.length()+14, "%lu/%lu%c", &pool_id, &inode_num, &null_byte);
if (scanned != 2 || !pool_id || pool_id >= POOL_ID_MAX || !inode_num || (inode_num >> (64-POOL_ID_BITS)))
{
fprintf(stderr, "Bad etcd key %s, ignoring\n", key.c_str());
@ -1145,7 +1145,7 @@ void etcd_state_client_t::parse_state(const etcd_kv_t & kv)
else if (parent_pool_id >= POOL_ID_MAX)
{
fprintf(
stderr, "Inode %ju/%ju parent_pool value is invalid, ignoring parent setting\n",
stderr, "Inode %lu/%lu parent_pool value is invalid, ignoring parent setting\n",
inode_num >> (64-POOL_ID_BITS), inode_num & (((uint64_t)1 << (64-POOL_ID_BITS)) - 1)
);
parent_inode_num = 0;

View File

@ -377,7 +377,7 @@ static void io_callback(void *opaque, long retval)
bsd->completed.push_back(io);
if (bsd->trace)
{
printf("--- %s 0x%jx retval=%ld\n", io->ddir == DDIR_READ ? "READ" :
printf("--- %s 0x%lx retval=%ld\n", io->ddir == DDIR_READ ? "READ" :
(io->ddir == DDIR_WRITE ? "WRITE" : "SYNC"), (uint64_t)io, retval);
}
}
@ -405,11 +405,10 @@ static enum fio_q_status sec_queue(struct thread_data *td, struct io_u *io)
bsd->inflight++;
uint64_t inode = opt->image ? vitastor_c_inode_get_num(bsd->watch) : opt->inode;
assert(io->xfer_buflen < (size_t)-1);
switch (io->ddir)
{
case DDIR_READ:
iov = { .iov_base = io->xfer_buf, .iov_len = (size_t)io->xfer_buflen };
iov = { .iov_base = io->xfer_buf, .iov_len = io->xfer_buflen };
vitastor_c_read(bsd->cli, inode, io->offset, io->xfer_buflen, &iov, 1, read_callback, io);
bsd->last_sync = false;
break;
@ -437,7 +436,7 @@ static enum fio_q_status sec_queue(struct thread_data *td, struct io_u *io)
io->error = EROFS;
return FIO_Q_COMPLETED;
}
iov = { .iov_base = io->xfer_buf, .iov_len = (size_t)io->xfer_buflen };
iov = { .iov_base = io->xfer_buf, .iov_len = io->xfer_buflen };
vitastor_c_write(bsd->cli, inode, io->offset, io->xfer_buflen, 0, &iov, 1, io_callback, io);
bsd->last_sync = false;
break;
@ -454,11 +453,11 @@ static enum fio_q_status sec_queue(struct thread_data *td, struct io_u *io)
{
if (io->ddir == DDIR_SYNC)
{
printf("+++ SYNC 0x%jx\n", (uint64_t)io);
printf("+++ SYNC 0x%lx\n", (uint64_t)io);
}
else
{
printf("+++ %s 0x%jx 0x%llx+%jx\n",
printf("+++ %s 0x%lx 0x%llx+%lx\n",
io->ddir == DDIR_READ ? "READ" : "WRITE",
(uint64_t)io, io->offset, (uint64_t)io->xfer_buflen);
}

View File

@ -310,8 +310,7 @@ static enum fio_q_status sec_queue(struct thread_data *td, struct io_u *io)
int iovcnt = 1, wtotal = OSD_PACKET_SIZE;
if (io->ddir == DDIR_WRITE)
{
assert(io->xfer_buflen <= 0x7fffffff);
iov[iovcnt++] = { .iov_base = io->xfer_buf, .iov_len = (size_t)io->xfer_buflen };
iov[iovcnt++] = { .iov_base = io->xfer_buf, .iov_len = io->xfer_buflen };
wtotal += io->xfer_buflen;
}
if (sendv_blocking(bsd->connect_fd, iov, iovcnt,
@ -342,13 +341,13 @@ static int sec_getevents(struct thread_data *td, unsigned int min, unsigned int
read_blocking(bsd->connect_fd, reply.buf, OSD_PACKET_SIZE);
if (reply.hdr.magic != SECONDARY_OSD_REPLY_MAGIC)
{
fprintf(stderr, "bad reply: magic = %jx instead of %jx\n", reply.hdr.magic, SECONDARY_OSD_REPLY_MAGIC);
fprintf(stderr, "bad reply: magic = %lx instead of %lx\n", reply.hdr.magic, SECONDARY_OSD_REPLY_MAGIC);
exit(1);
}
auto it = bsd->queue.find(reply.hdr.id);
if (it == bsd->queue.end())
{
fprintf(stderr, "bad reply: op id %jx missing in local queue\n", reply.hdr.id);
fprintf(stderr, "bad reply: op id %lx missing in local queue\n", reply.hdr.id);
exit(1);
}
io_u* io = it->second->fio_op;
@ -358,7 +357,7 @@ static int sec_getevents(struct thread_data *td, unsigned int min, unsigned int
{
if (reply.hdr.retval != io->xfer_buflen)
{
fprintf(stderr, "Short read: retval = %jd instead of %ju\n", reply.hdr.retval, (uint64_t)io->xfer_buflen);
fprintf(stderr, "Short read: retval = %ld instead of %lu\n", reply.hdr.retval, (uint64_t)io->xfer_buflen);
exit(1);
}
// Support bitmap
@ -372,8 +371,7 @@ static int sec_getevents(struct thread_data *td, unsigned int min, unsigned int
else
iov[iovcnt++] = { .iov_base = (void*)(bitmap = (uint64_t)malloc(reply.sec_rw.attr_len)), .iov_len = reply.sec_rw.attr_len };
}
assert(io->xfer_buflen <= 0x7FFFFFFF);
iov[iovcnt++] = { .iov_base = io->xfer_buf, .iov_len = (size_t)io->xfer_buflen };
iov[iovcnt++] = { .iov_base = io->xfer_buf, .iov_len = io->xfer_buflen };
readv_blocking(bsd->connect_fd, iov, iovcnt);
if (reply.sec_rw.attr_len > 8)
{
@ -384,7 +382,7 @@ static int sec_getevents(struct thread_data *td, unsigned int min, unsigned int
{
if (reply.hdr.retval != io->xfer_buflen)
{
fprintf(stderr, "Short write: retval = %jd instead of %ju\n", reply.hdr.retval, (uint64_t)io->xfer_buflen);
fprintf(stderr, "Short write: retval = %ld instead of %lu\n", reply.hdr.retval, (uint64_t)io->xfer_buflen);
exit(1);
}
}
@ -392,13 +390,13 @@ static int sec_getevents(struct thread_data *td, unsigned int min, unsigned int
{
if (reply.hdr.retval != 0)
{
fprintf(stderr, "Sync failed: retval = %jd\n", reply.hdr.retval);
fprintf(stderr, "Sync failed: retval = %ld\n", reply.hdr.retval);
exit(1);
}
}
if (opt->trace)
{
printf("--- %s # %ju\n", io->ddir == DDIR_READ ? "READ" :
printf("--- %s # %ld\n", io->ddir == DDIR_READ ? "READ" :
(io->ddir == DDIR_WRITE ? "WRITE" : "SYNC"), reply.hdr.id);
}
bsd->completed.push_back(io);

View File

@ -11,7 +11,7 @@ inline void* memalign_or_die(size_t alignment, size_t size)
void *buf = memalign(alignment, size);
if (!buf)
{
printf("Failed to allocate %zu bytes\n", size);
printf("Failed to allocate %lu bytes\n", size);
exit(1);
}
return buf;
@ -22,7 +22,7 @@ inline void* malloc_or_die(size_t size)
void *buf = malloc(size);
if (!buf)
{
printf("Failed to allocate %zu bytes\n", size);
printf("Failed to allocate %lu bytes\n", size);
exit(1);
}
return buf;
@ -33,7 +33,7 @@ inline void* realloc_or_die(void *ptr, size_t size)
void *buf = realloc(ptr, size);
if (!buf)
{
printf("Failed to allocate %zu bytes\n", size);
printf("Failed to allocate %lu bytes\n", size);
exit(1);
}
return buf;
@ -44,7 +44,7 @@ inline void* calloc_or_die(size_t nmemb, size_t size)
void *buf = calloc(nmemb, size);
if (!buf)
{
printf("Failed to allocate %zu bytes\n", size * nmemb);
printf("Failed to allocate %lu bytes\n", size * nmemb);
exit(1);
}
return buf;

View File

@ -27,13 +27,13 @@ void osd_messenger_t::init()
if (!rdma_context)
{
if (log_level > 0)
fprintf(stderr, "[OSD %ju] Couldn't initialize RDMA, proceeding with TCP only\n", osd_num);
fprintf(stderr, "[OSD %lu] Couldn't initialize RDMA, proceeding with TCP only\n", osd_num);
}
else
{
rdma_max_sge = rdma_max_sge < rdma_context->attrx.orig_attr.max_sge
? rdma_max_sge : rdma_context->attrx.orig_attr.max_sge;
fprintf(stderr, "[OSD %ju] RDMA initialized successfully\n", osd_num);
fprintf(stderr, "[OSD %lu] RDMA initialized successfully\n", osd_num);
fcntl(rdma_context->channel->fd, F_SETFL, fcntl(rdma_context->channel->fd, F_GETFL, 0) | O_NONBLOCK);
tfd->set_fd_handler(rdma_context->channel->fd, false, [this](int notify_fd, int epoll_events)
{
@ -45,12 +45,11 @@ void osd_messenger_t::init()
#endif
keepalive_timer_id = tfd->set_timer(1000, true, [this](int)
{
auto cl_it = clients.begin();
while (cl_it != clients.end())
std::vector<int> to_stop;
std::vector<osd_op_t*> to_ping;
for (auto cl_it = clients.begin(); cl_it != clients.end(); cl_it++)
{
auto cl = cl_it->second;
cl_it++;
auto peer_fd = cl->peer_fd;
if (!cl->osd_num || cl->peer_state != PEER_CONNECTED && cl->peer_state != PEER_RDMA)
{
// Do not run keepalive on regular clients
@ -62,10 +61,8 @@ void osd_messenger_t::init()
if (!cl->ping_time_remaining)
{
// Ping timed out, stop the client
fprintf(stderr, "Ping timed out for OSD %ju (client %d), disconnecting peer\n", cl->osd_num, cl->peer_fd);
stop_client(peer_fd, true);
// Restart iterator because it may be invalidated
cl_it = clients.upper_bound(peer_fd);
fprintf(stderr, "Ping timed out for OSD %lu (client %d), disconnecting peer\n", cl->osd_num, cl->peer_fd);
to_stop.push_back(cl->peer_fd);
}
}
else if (cl->idle_time_remaining > 0)
@ -99,15 +96,13 @@ void osd_messenger_t::init()
delete op;
if (fail_fd >= 0)
{
fprintf(stderr, "Ping failed for OSD %ju (client %d), disconnecting peer\n", fail_osd_num, fail_fd);
fprintf(stderr, "Ping failed for OSD %lu (client %d), disconnecting peer\n", fail_osd_num, fail_fd);
stop_client(fail_fd, true);
}
};
to_ping.push_back(op);
cl->ping_time_remaining = osd_ping_timeout;
cl->idle_time_remaining = osd_idle_timeout;
outbox_push(op);
// Restart iterator because it may be invalidated
cl_it = clients.upper_bound(peer_fd);
}
}
else
@ -115,6 +110,15 @@ void osd_messenger_t::init()
cl->idle_time_remaining = osd_idle_timeout;
}
}
// Don't stop clients while a 'clients' iterator is still active
for (int peer_fd: to_stop)
{
stop_client(peer_fd, true);
}
for (auto op: to_ping)
{
outbox_push(op);
}
});
}
@ -253,7 +257,7 @@ void osd_messenger_t::try_connect_peer_addr(osd_num_t peer_osd, const char *peer
clients[peer_fd] = new osd_client_t();
if (log_level > 0)
{
fprintf(stderr, "Connecting to OSD %ju at %s:%d (client %d)\n", peer_osd, peer_host, peer_port, peer_fd);
fprintf(stderr, "Connecting to OSD %lu at %s:%d (client %d)\n", peer_osd, peer_host, peer_port, peer_fd);
}
clients[peer_fd]->peer_addr = addr;
clients[peer_fd]->peer_port = peer_port;
@ -319,7 +323,7 @@ void osd_messenger_t::handle_peer_epoll(int peer_fd, int epoll_events)
// Stop client
if (log_level > 0)
{
fprintf(stderr, "[OSD %ju] client %d disconnected\n", this->osd_num, peer_fd);
fprintf(stderr, "[OSD %lu] client %d disconnected\n", this->osd_num, peer_fd);
}
stop_client(peer_fd, true);
}
@ -345,7 +349,7 @@ void osd_messenger_t::on_connect_peer(osd_num_t peer_osd, int peer_fd)
wp.connecting = false;
if (peer_fd < 0)
{
fprintf(stderr, "Failed to connect to peer OSD %ju address %s port %d: %s\n", peer_osd, wp.cur_addr.c_str(), wp.cur_port, strerror(-peer_fd));
fprintf(stderr, "Failed to connect to peer OSD %lu address %s port %d: %s\n", peer_osd, wp.cur_addr.c_str(), wp.cur_port, strerror(-peer_fd));
if (wp.address_changed)
{
wp.address_changed = false;
@ -372,7 +376,7 @@ void osd_messenger_t::on_connect_peer(osd_num_t peer_osd, int peer_fd)
}
if (log_level > 0)
{
fprintf(stderr, "[OSD %ju] Connected with peer OSD %ju (client %d)\n", osd_num, peer_osd, peer_fd);
fprintf(stderr, "[OSD %lu] Connected with peer OSD %lu (client %d)\n", osd_num, peer_osd, peer_fd);
}
wanted_peers.erase(peer_osd);
repeer_pgs(peer_osd);
@ -418,7 +422,7 @@ void osd_messenger_t::check_peer_config(osd_client_t *cl)
if (op->reply.hdr.retval < 0)
{
err = true;
fprintf(stderr, "Failed to get config from OSD %ju (retval=%jd), disconnecting peer\n", cl->osd_num, op->reply.hdr.retval);
fprintf(stderr, "Failed to get config from OSD %lu (retval=%ld), disconnecting peer\n", cl->osd_num, op->reply.hdr.retval);
}
else
{
@ -426,18 +430,18 @@ void osd_messenger_t::check_peer_config(osd_client_t *cl)
if (json_err != "")
{
err = true;
fprintf(stderr, "Failed to get config from OSD %ju: bad JSON: %s, disconnecting peer\n", cl->osd_num, json_err.c_str());
fprintf(stderr, "Failed to get config from OSD %lu: bad JSON: %s, disconnecting peer\n", cl->osd_num, json_err.c_str());
}
else if (config["osd_num"].uint64_value() != cl->osd_num)
{
err = true;
fprintf(stderr, "Connected to OSD %ju instead of OSD %ju, peer state is outdated, disconnecting peer\n", config["osd_num"].uint64_value(), cl->osd_num);
fprintf(stderr, "Connected to OSD %lu instead of OSD %lu, peer state is outdated, disconnecting peer\n", config["osd_num"].uint64_value(), cl->osd_num);
}
else if (config["protocol_version"].uint64_value() != OSD_PROTOCOL_VERSION)
{
err = true;
fprintf(
stderr, "OSD %ju protocol version is %ju, but only version %u is supported.\n"
stderr, "OSD %lu protocol version is %lu, but only version %u is supported.\n"
" If you need to upgrade from 0.5.x please request it via the issue tracker.\n",
cl->osd_num, config["protocol_version"].uint64_value(), OSD_PROTOCOL_VERSION
);
@ -463,7 +467,7 @@ void osd_messenger_t::check_peer_config(osd_client_t *cl)
cl->rdma_conn->connect(&addr) != 0)
{
fprintf(
stderr, "Failed to connect to OSD %ju (address %s) using RDMA\n",
stderr, "Failed to connect to OSD %lu (address %s) using RDMA\n",
cl->osd_num, config["rdma_address"].string_value().c_str()
);
delete cl->rdma_conn;
@ -484,7 +488,7 @@ void osd_messenger_t::check_peer_config(osd_client_t *cl)
}
if (log_level > 0)
{
fprintf(stderr, "Connected to OSD %ju using RDMA\n", cl->osd_num);
fprintf(stderr, "Connected to OSD %lu using RDMA\n", cl->osd_num);
}
cl->peer_state = PEER_RDMA;
tfd->set_fd_handler(cl->peer_fd, false, [this](int peer_fd, int epoll_events)
@ -516,7 +520,7 @@ void osd_messenger_t::accept_connections(int listen_fd)
while ((peer_fd = accept(listen_fd, (sockaddr*)&addr, &peer_addr_size)) >= 0)
{
assert(peer_fd != 0);
fprintf(stderr, "[OSD %ju] new client %d: connection from %s\n", this->osd_num, peer_fd,
fprintf(stderr, "[OSD %lu] new client %d: connection from %s\n", this->osd_num, peer_fd,
addr_to_string(addr).c_str());
fcntl(peer_fd, F_SETFL, fcntl(peer_fd, F_GETFL, 0) | O_NONBLOCK);
int one = 1;

View File

@ -76,7 +76,7 @@ struct osd_op_buf_list_t
buf = (iovec*)malloc(sizeof(iovec) * alloc);
if (!buf)
{
fprintf(stderr, "Failed to allocate %u bytes\n", (int)sizeof(iovec) * alloc);
fprintf(stderr, "Failed to allocate %lu bytes\n", sizeof(iovec) * alloc);
exit(1);
}
memcpy(buf, inline_buf, sizeof(iovec) * old);
@ -87,7 +87,7 @@ struct osd_op_buf_list_t
buf = (iovec*)realloc(buf, sizeof(iovec) * alloc);
if (!buf)
{
fprintf(stderr, "Failed to allocate %u bytes\n", (int)sizeof(iovec) * alloc);
fprintf(stderr, "Failed to allocate %lu bytes\n", sizeof(iovec) * alloc);
exit(1);
}
}
@ -109,7 +109,7 @@ struct osd_op_buf_list_t
buf = (iovec*)malloc(sizeof(iovec) * alloc);
if (!buf)
{
fprintf(stderr, "Failed to allocate %u bytes\n", (int)sizeof(iovec) * alloc);
fprintf(stderr, "Failed to allocate %lu bytes\n", sizeof(iovec) * alloc);
exit(1);
}
memcpy(buf, inline_buf, sizeof(iovec)*old);
@ -120,7 +120,7 @@ struct osd_op_buf_list_t
buf = (iovec*)realloc(buf, sizeof(iovec) * alloc);
if (!buf)
{
fprintf(stderr, "Failed to allocate %u bytes\n", (int)sizeof(iovec) * alloc);
fprintf(stderr, "Failed to allocate %lu bytes\n", sizeof(iovec) * alloc);
exit(1);
}
}

View File

@ -10,7 +10,7 @@ std::string msgr_rdma_address_t::to_string()
{
char msg[sizeof "0000:00000000:00000000:00000000000000000000000000000000"];
sprintf(
msg, "%04x:%06x:%06x:%016jx%016jx", lid, qpn, psn,
msg, "%04x:%06x:%06x:%016lx%016lx", lid, qpn, psn,
htobe64(((uint64_t*)&gid)[0]), htobe64(((uint64_t*)&gid)[1])
);
return std::string(msg);
@ -20,7 +20,7 @@ bool msgr_rdma_address_t::from_string(const char *str, msgr_rdma_address_t *dest
{
uint64_t* gid = (uint64_t*)&dest->gid;
int scanned = sscanf(
str, "%hx:%x:%x:%16jx%16jx", &dest->lid, &dest->qpn, &dest->psn, gid, gid+1
str, "%hx:%x:%x:%16lx%16lx", &dest->lid, &dest->qpn, &dest->psn, gid, gid+1
);
gid[0] = be64toh(gid[0]);
gid[1] = be64toh(gid[1]);
@ -594,7 +594,7 @@ void osd_messenger_t::handle_rdma_events()
fprintf(stderr, "RDMA work request failed for client %d", client_id);
if (cl->osd_num)
{
fprintf(stderr, " (OSD %ju)", cl->osd_num);
fprintf(stderr, " (OSD %lu)", cl->osd_num);
}
fprintf(stderr, " with status: %s, stopping client\n", ibv_wc_status_str(wc[i].status));
stop_client(client_id);

View File

@ -180,7 +180,7 @@ bool osd_messenger_t::handle_finished_read(osd_client_t *cl)
handle_op_hdr(cl);
else
{
fprintf(stderr, "Received garbage: magic=%jx id=%ju opcode=%jx from %d\n", cl->read_op->req.hdr.magic, cl->read_op->req.hdr.id, cl->read_op->req.hdr.opcode, cl->peer_fd);
fprintf(stderr, "Received garbage: magic=%lx id=%lu opcode=%lx from %d\n", cl->read_op->req.hdr.magic, cl->read_op->req.hdr.id, cl->read_op->req.hdr.opcode, cl->peer_fd);
stop_client(cl->peer_fd);
return false;
}
@ -297,7 +297,7 @@ bool osd_messenger_t::handle_reply_hdr(osd_client_t *cl)
if (req_it == cl->sent_ops.end())
{
// Command out of sync. Drop connection
fprintf(stderr, "Client %d command out of sync: id %ju\n", cl->peer_fd, cl->read_op->req.hdr.id);
fprintf(stderr, "Client %d command out of sync: id %lu\n", cl->peer_fd, cl->read_op->req.hdr.id);
stop_client(cl->peer_fd);
return false;
}
@ -312,7 +312,7 @@ bool osd_messenger_t::handle_reply_hdr(osd_client_t *cl)
if (op->reply.hdr.retval >= 0 && (op->reply.hdr.retval != expected_size || bmp_len > op->bitmap_len))
{
// Check reply length to not overflow the buffer
fprintf(stderr, "Client %d read reply of different length: expected %u+%u, got %jd+%u\n",
fprintf(stderr, "Client %d read reply of different length: expected %u+%u, got %ld+%u\n",
cl->peer_fd, expected_size, op->bitmap_len, op->reply.hdr.retval, bmp_len);
cl->sent_ops[op->req.hdr.id] = op;
stop_client(cl->peer_fd);

View File

@ -61,11 +61,11 @@ void osd_messenger_t::stop_client(int peer_fd, bool force, bool force_delete)
{
if (cl->osd_num)
{
fprintf(stderr, "[OSD %ju] Stopping client %d (OSD peer %ju)\n", osd_num, peer_fd, cl->osd_num);
fprintf(stderr, "[OSD %lu] Stopping client %d (OSD peer %lu)\n", osd_num, peer_fd, cl->osd_num);
}
else
{
fprintf(stderr, "[OSD %ju] Stopping client %d (regular client)\n", osd_num, peer_fd);
fprintf(stderr, "[OSD %lu] Stopping client %d (regular client)\n", osd_num, peer_fd);
}
}
// First set state to STOPPED so another stop_client() call doesn't try to free it again

View File

@ -738,7 +738,7 @@ protected:
}
uint64_t handle = *((uint64_t*)cur_req.handle);
#ifdef DEBUG
printf("request %jx +%x %jx\n", be64toh(cur_req.from), be32toh(cur_req.len), handle);
printf("request %lx +%x %lx\n", be64toh(cur_req.from), be32toh(cur_req.len), handle);
#endif
void *buf = NULL;
cluster_op_t *op = new cluster_op_t;
@ -759,7 +759,7 @@ protected:
op->callback = [this, buf, handle](cluster_op_t *op)
{
#ifdef DEBUG
printf("reply %jx e=%d\n", handle, op->retval);
printf("reply %lx e=%d\n", handle, op->retval);
#endif
nbd_reply *reply = (nbd_reply*)buf;
reply->magic = htobe32(NBD_REPLY_MAGIC);
@ -769,7 +769,7 @@ protected:
if (op->retval < 0 || op->opcode != OSD_OP_READ)
to_list.push_back({ .iov_base = buf, .iov_len = sizeof(nbd_reply) });
else
to_list.push_back({ .iov_base = buf, .iov_len = sizeof(nbd_reply) + (size_t)op->len });
to_list.push_back({ .iov_base = buf, .iov_len = sizeof(nbd_reply) + op->len });
to_free.push_back(buf);
delete op;
ringloop->wakeup();

View File

@ -517,7 +517,7 @@ static void extend_inode(nfs_client_t *self, uint64_t inode, uint64_t new_size)
auto & ext = self->extends[inode];
if (r.err)
{
fprintf(stderr, "Error extending inode %ju to %ju bytes: %s\n", inode, new_size, r.text.c_str());
fprintf(stderr, "Error extending inode %lu to %lu bytes: %s\n", inode, new_size, r.text.c_str());
}
if (r.err == EAGAIN || ext.next_extend > ext.cur_extend)
{

View File

@ -350,7 +350,7 @@ void nfs_proxy_t::parse_stats(etcd_kv_t & kv)
pool_id_t pool_id = 0;
inode_t inode_num = 0;
char null_byte = 0;
int scanned = sscanf(key.c_str() + cli->st_cli.etcd_prefix.length()+13, "%u/%ju%c", &pool_id, &inode_num, &null_byte);
int scanned = sscanf(key.c_str() + cli->st_cli.etcd_prefix.length()+13, "%u/%lu%c", &pool_id, &inode_num, &null_byte);
if (scanned != 2 || !pool_id || pool_id >= POOL_ID_MAX || !inode_num)
{
fprintf(stderr, "Bad etcd key %s, ignoring\n", key.c_str());
@ -387,7 +387,7 @@ void nfs_proxy_t::check_default_pool()
}
else
{
fprintf(stderr, "There are %zu pools. Please select default pool with --pool option\n", cli->st_cli.pool_config.size());
fprintf(stderr, "There are %lu pools. Please select default pool with --pool option\n", cli->st_cli.pool_config.size());
exit(1);
}
}

View File

@ -233,8 +233,6 @@ void osd_t::parse_config(bool init)
? 10 : config["recovery_tune_agg_interval"].uint64_value();
recovery_tune_sleep_min_us = config["recovery_tune_sleep_min_us"].is_null()
? 10 : config["recovery_tune_sleep_min_us"].uint64_value();
recovery_tune_sleep_cutoff_us = config["recovery_tune_sleep_cutoff_us"].is_null()
? 10000000 : config["recovery_tune_sleep_cutoff_us"].uint64_value();
recovery_pg_switch = config["recovery_pg_switch"].uint64_value();
if (recovery_pg_switch < 1)
recovery_pg_switch = DEFAULT_RECOVERY_PG_SWITCH;
@ -475,14 +473,14 @@ void osd_t::print_stats()
if (msgr.stats.op_stat_bytes[i] != 0)
{
printf(
"[OSD %ju] avg latency for op %d (%s): %ju us, B/W: %.2f %s\n", osd_num, i, osd_op_names[i], avg,
"[OSD %lu] avg latency for op %d (%s): %lu us, B/W: %.2f %s\n", osd_num, i, osd_op_names[i], avg,
(bw > 1024*1024*1024 ? bw/1024.0/1024/1024 : (bw > 1024*1024 ? bw/1024.0/1024 : bw/1024.0)),
(bw > 1024*1024*1024 ? "GB/s" : (bw > 1024*1024 ? "MB/s" : "KB/s"))
);
}
else
{
printf("[OSD %ju] avg latency for op %d (%s): %ju us\n", osd_num, i, osd_op_names[i], avg);
printf("[OSD %lu] avg latency for op %d (%s): %lu us\n", osd_num, i, osd_op_names[i], avg);
}
prev_stats.op_stat_count[i] = msgr.stats.op_stat_count[i];
prev_stats.op_stat_sum[i] = msgr.stats.op_stat_sum[i];
@ -494,7 +492,7 @@ void osd_t::print_stats()
if (msgr.stats.subop_stat_count[i] != prev_stats.subop_stat_count[i])
{
uint64_t avg = (msgr.stats.subop_stat_sum[i] - prev_stats.subop_stat_sum[i])/(msgr.stats.subop_stat_count[i] - prev_stats.subop_stat_count[i]);
printf("[OSD %ju] avg latency for subop %d (%s): %jd us\n", osd_num, i, osd_op_names[i], avg);
printf("[OSD %lu] avg latency for subop %d (%s): %ld us\n", osd_num, i, osd_op_names[i], avg);
prev_stats.subop_stat_count[i] = msgr.stats.subop_stat_count[i];
prev_stats.subop_stat_sum[i] = msgr.stats.subop_stat_sum[i];
}
@ -505,7 +503,7 @@ void osd_t::print_stats()
{
uint64_t bw = (recovery_stat[i].bytes - recovery_print_prev[i].bytes) / print_stats_interval;
printf(
"[OSD %ju] %s recovery: %.1f op/s, B/W: %.2f %s, avg latency %jd us, delay %jd us\n", osd_num, recovery_stat_names[i],
"[OSD %lu] %s recovery: %.1f op/s, B/W: %.2f %s, avg latency %ld us, delay %ld us\n", osd_num, recovery_stat_names[i],
(recovery_stat[i].count - recovery_print_prev[i].count) * 1.0 / print_stats_interval,
(bw > 1024*1024*1024 ? bw/1024.0/1024/1024 : (bw > 1024*1024 ? bw/1024.0/1024 : bw/1024.0)),
(bw > 1024*1024*1024 ? "GB/s" : (bw > 1024*1024 ? "MB/s" : "KB/s")),
@ -517,19 +515,19 @@ void osd_t::print_stats()
memcpy(recovery_print_prev, recovery_stat, sizeof(recovery_stat));
if (corrupted_objects > 0)
{
printf("[OSD %ju] %ju object(s) corrupted\n", osd_num, corrupted_objects);
printf("[OSD %lu] %lu object(s) corrupted\n", osd_num, corrupted_objects);
}
if (incomplete_objects > 0)
{
printf("[OSD %ju] %ju object(s) incomplete\n", osd_num, incomplete_objects);
printf("[OSD %lu] %lu object(s) incomplete\n", osd_num, incomplete_objects);
}
if (degraded_objects > 0)
{
printf("[OSD %ju] %ju object(s) degraded\n", osd_num, degraded_objects);
printf("[OSD %lu] %lu object(s) degraded\n", osd_num, degraded_objects);
}
if (misplaced_objects > 0)
{
printf("[OSD %ju] %ju object(s) misplaced\n", osd_num, misplaced_objects);
printf("[OSD %lu] %lu object(s) misplaced\n", osd_num, misplaced_objects);
}
}
@ -548,27 +546,27 @@ void osd_t::print_slow()
int l = sizeof(alloc), n;
char *buf = alloc;
#define bufprintf(s, ...) { n = snprintf(buf, l, s, __VA_ARGS__); n = n < 0 ? 0 : n; buf += n; l -= n; }
bufprintf("[OSD %ju] Slow op %jx", osd_num, (uint64_t)op);
bufprintf("[OSD %lu] Slow op %lx", osd_num, (unsigned long)op);
if (kv.second->osd_num)
{
bufprintf(" from peer OSD %ju (client %d)", kv.second->osd_num, kv.second->peer_fd);
bufprintf(" from peer OSD %lu (client %d)", kv.second->osd_num, kv.second->peer_fd);
}
else
{
bufprintf(" from client %d", kv.second->peer_fd);
}
bufprintf(": %s id=%ju", osd_op_names[op->req.hdr.opcode], op->req.hdr.id);
bufprintf(": %s id=%lu", osd_op_names[op->req.hdr.opcode], op->req.hdr.id);
if (op->req.hdr.opcode == OSD_OP_SEC_READ || op->req.hdr.opcode == OSD_OP_SEC_WRITE ||
op->req.hdr.opcode == OSD_OP_SEC_WRITE_STABLE || op->req.hdr.opcode == OSD_OP_SEC_DELETE)
{
bufprintf(" %jx:%jx v", op->req.sec_rw.oid.inode, op->req.sec_rw.oid.stripe);
bufprintf(" %lx:%lx v", op->req.sec_rw.oid.inode, op->req.sec_rw.oid.stripe);
if (op->req.sec_rw.version == UINT64_MAX)
{
bufprintf("%s", "max");
}
else
{
bufprintf("%ju", op->req.sec_rw.version);
bufprintf("%lu", op->req.sec_rw.version);
}
if (op->req.hdr.opcode != OSD_OP_SEC_DELETE)
{
@ -580,17 +578,17 @@ void osd_t::print_slow()
for (uint64_t i = 0; i < op->req.sec_stab.len && i < sizeof(obj_ver_id)*12; i += sizeof(obj_ver_id))
{
obj_ver_id *ov = (obj_ver_id*)((uint8_t*)op->buf + i);
bufprintf(i == 0 ? " %jx:%jx v%ju" : ", %jx:%jx v%ju", ov->oid.inode, ov->oid.stripe, ov->version);
bufprintf(i == 0 ? " %lx:%lx v%lu" : ", %lx:%lx v%lu", ov->oid.inode, ov->oid.stripe, ov->version);
}
if (op->req.sec_stab.len > sizeof(obj_ver_id)*12)
{
bufprintf(", ... (%ju items)", op->req.sec_stab.len/sizeof(obj_ver_id));
bufprintf(", ... (%lu items)", op->req.sec_stab.len/sizeof(obj_ver_id));
}
}
else if (op->req.hdr.opcode == OSD_OP_SEC_LIST)
{
bufprintf(
" oid=%jx/%jx-%jx/%jx pg=%u/%u, stripe=%ju, limit=%u",
" oid=%lx/%lx-%lx/%lx pg=%u/%u, stripe=%lu, limit=%u",
op->req.sec_list.min_inode, op->req.sec_list.min_stripe,
op->req.sec_list.max_inode, op->req.sec_list.max_stripe,
op->req.sec_list.list_pg, op->req.sec_list.pg_count,
@ -600,7 +598,7 @@ void osd_t::print_slow()
else if (op->req.hdr.opcode == OSD_OP_READ || op->req.hdr.opcode == OSD_OP_WRITE ||
op->req.hdr.opcode == OSD_OP_DELETE)
{
bufprintf(" inode=%jx offset=%jx len=%x", op->req.rw.inode, op->req.rw.offset, op->req.rw.len);
bufprintf(" inode=%lx offset=%lx len=%x", op->req.rw.inode, op->req.rw.offset, op->req.rw.len);
}
if (op->req.hdr.opcode == OSD_OP_SEC_READ || op->req.hdr.opcode == OSD_OP_SEC_WRITE ||
op->req.hdr.opcode == OSD_OP_SEC_WRITE_STABLE || op->req.hdr.opcode == OSD_OP_SEC_DELETE ||
@ -612,7 +610,7 @@ void osd_t::print_slow()
int wait_for = op->bs_op ? PRIV(op->bs_op)->wait_for : 0;
if (wait_for)
{
bufprintf(" wait=%d (detail=%ju)", wait_for, PRIV(op->bs_op)->wait_detail);
bufprintf(" wait=%d (detail=%lu)", wait_for, PRIV(op->bs_op)->wait_detail);
}
}
else if (op->req.hdr.opcode == OSD_OP_READ || op->req.hdr.opcode == OSD_OP_WRITE ||

View File

@ -125,7 +125,6 @@ class osd_t
int recovery_tune_interval = 1;
int recovery_tune_agg_interval = 10;
int recovery_tune_sleep_min_us = 10;
int recovery_tune_sleep_cutoff_us = 10000000;
int recovery_pg_switch = DEFAULT_RECOVERY_PG_SWITCH;
int recovery_sync_batch = DEFAULT_RECOVERY_BATCH;
int inode_vanish_time = 60;
@ -283,7 +282,6 @@ class osd_t
void exec_sync_stab_all(osd_op_t *cur_op);
void exec_show_config(osd_op_t *cur_op);
void exec_secondary(osd_op_t *cur_op);
void exec_secondary_real(osd_op_t *cur_op);
void secondary_op_callback(osd_op_t *cur_op);
// primary ops

View File

@ -117,7 +117,7 @@ bool osd_t::check_peer_config(osd_client_t *cl, json11::Json conf)
conf["immediate_commit"].is_null())
{
printf(
"[OSD %ju] Warning: peer OSD %ju does not report block_size/bitmap_granularity/immediate_commit."
"[OSD %lu] Warning: peer OSD %lu does not report block_size/bitmap_granularity/immediate_commit."
" Is it older than 0.6.3?\n", this->osd_num, cl->osd_num
);
}
@ -129,7 +129,7 @@ bool osd_t::check_peer_config(osd_client_t *cl, json11::Json conf)
immediate_commit == IMMEDIATE_SMALL && peer_immediate_commit == IMMEDIATE_NONE)
{
printf(
"[OSD %ju] My immediate_commit is \"%s\", but peer OSD %ju has \"%s\". We can't work together\n",
"[OSD %lu] My immediate_commit is \"%s\", but peer OSD %lu has \"%s\". We can't work together\n",
this->osd_num, immediate_commit == IMMEDIATE_ALL ? "all" : "small",
cl->osd_num, conf["immediate_commit"].string_value().c_str()
);
@ -138,7 +138,7 @@ bool osd_t::check_peer_config(osd_client_t *cl, json11::Json conf)
else if (conf["block_size"].uint64_value() != (uint64_t)this->bs_block_size)
{
printf(
"[OSD %ju] My block_size is %u, but peer OSD %ju has %ju. We can't work together\n",
"[OSD %lu] My block_size is %u, but peer OSD %lu has %lu. We can't work together\n",
this->osd_num, this->bs_block_size, cl->osd_num, conf["block_size"].uint64_value()
);
return false;
@ -146,7 +146,7 @@ bool osd_t::check_peer_config(osd_client_t *cl, json11::Json conf)
else if (conf["bitmap_granularity"].uint64_value() != (uint64_t)this->bs_bitmap_granularity)
{
printf(
"[OSD %ju] My bitmap_granularity is %u, but peer OSD %ju has %ju. We can't work together\n",
"[OSD %lu] My bitmap_granularity is %u, but peer OSD %lu has %lu. We can't work together\n",
this->osd_num, this->bs_bitmap_granularity, cl->osd_num, conf["bitmap_granularity"].uint64_value()
);
return false;
@ -181,7 +181,7 @@ json11::Json osd_t::get_statistics()
timespec ts;
clock_gettime(CLOCK_REALTIME, &ts);
char time_str[50] = { 0 };
sprintf(time_str, "%jd.%03ld", (uint64_t)ts.tv_sec, ts.tv_nsec/1000000);
sprintf(time_str, "%ld.%03ld", ts.tv_sec, ts.tv_nsec/1000000);
st["time"] = time_str;
if (bs)
{
@ -358,7 +358,7 @@ void osd_t::report_statistics()
etcd_reporting_stats = false;
if (err != "")
{
printf("[OSD %ju] Error reporting state to etcd: %s\n", this->osd_num, err.c_str());
printf("[OSD %lu] Error reporting state to etcd: %s\n", this->osd_num, err.c_str());
// Retry indefinitely
tfd->set_timer(st_cli.etcd_slow_timeout, false, [this](int timer_id)
{
@ -367,7 +367,7 @@ void osd_t::report_statistics()
}
else if (res["error"].string_value() != "")
{
printf("[OSD %ju] Error reporting state to etcd: %s\n", this->osd_num, res["error"].string_value().c_str());
printf("[OSD %lu] Error reporting state to etcd: %s\n", this->osd_num, res["error"].string_value().c_str());
force_stop(1);
}
});
@ -432,7 +432,7 @@ void osd_t::acquire_lease()
create_osd_state();
});
printf(
"[OSD %ju] reporting to etcd at %s every %d seconds (statistics every %d seconds)\n", this->osd_num,
"[OSD %lu] reporting to etcd at %s every %d seconds (statistics every %d seconds)\n", this->osd_num,
(config["etcd_address"].is_string() ? config["etcd_address"].string_value() : config["etcd_address"].dump()).c_str(),
etcd_report_interval, etcd_stats_interval
);
@ -499,11 +499,11 @@ void osd_t::create_osd_state()
{
// OSD is already up
auto kv = st_cli.parse_etcd_kv(data["responses"][0]["response_range"]["kvs"][0]);
printf("Key %s already exists in etcd, OSD %ju is still up\n", kv.key.c_str(), this->osd_num);
printf("Key %s already exists in etcd, OSD %lu is still up\n", kv.key.c_str(), this->osd_num);
int64_t port = kv.value["port"].int64_value();
for (auto & addr: kv.value["addresses"].array_items())
{
printf(" listening at: %s:%jd\n", addr.string_value().c_str(), port);
printf(" listening at: %s:%ld\n", addr.string_value().c_str(), port);
}
force_stop(0);
return;
@ -569,13 +569,13 @@ void osd_t::force_stop(int exitcode)
{
printf("Error revoking etcd lease: %s\n", err.c_str());
}
printf("[OSD %ju] Force stopping\n", this->osd_num);
printf("[OSD %lu] Force stopping\n", this->osd_num);
exit(exitcode);
});
}
else
{
printf("[OSD %ju] Force stopping\n", this->osd_num);
printf("[OSD %lu] Force stopping\n", this->osd_num);
exit(exitcode);
}
}
@ -629,7 +629,7 @@ void osd_t::apply_pg_count()
if (still_active > 0)
{
printf(
"[OSD %ju] PG count change detected for pool %u (new is %ju, old is %u),"
"[OSD %lu] PG count change detected for pool %u (new is %lu, old is %u),"
" but %u PG(s) are still active. This is not allowed. Exiting\n",
this->osd_num, pool_item.first, pool_item.second.real_pg_count, pg_counts[pool_item.first], still_active
);
@ -663,7 +663,7 @@ void osd_t::apply_pg_config()
if (!warned_block_size)
{
printf(
"[OSD %ju] My block_size and bitmap_granularity are %u/%u"
"[OSD %lu] My block_size and bitmap_granularity are %u/%u"
", but pool %u has %u/%u. Refusing to start PGs of this pool\n",
this->osd_num, bs_block_size, bs_bitmap_granularity,
pool_id, pool_item.second.data_block_size, pool_item.second.bitmap_granularity
@ -843,13 +843,7 @@ void osd_t::report_pg_states()
pg_state_exists = true;
if (pg.state == PG_OFFLINE && pg_it->second.cur_primary != this->osd_num)
{
// Nothing to report, PG is already taken over by another OSD
checks.push_back(json11::Json::object {
{ "target", "MOD" },
{ "key", state_key_base64 },
{ "result", "LESS" },
{ "mod_revision", st_cli.etcd_watch_revision+1 },
});
// Nothing to check or report, PG is already taken over by another OSD
continue;
}
}
@ -857,6 +851,11 @@ void osd_t::report_pg_states()
}
if (!pg_state_exists)
{
if (pg.state == PG_OFFLINE)
{
// Nothing to check or report, PG is already stopped
continue;
}
// Check that the PG key does not exist
// Failed check indicates an unsuccessful PG lock attempt in this case
checks.push_back(json11::Json::object {
@ -985,7 +984,7 @@ void osd_t::report_pg_states()
kv.value["primary"].uint64_value() != this->osd_num)
{
// PG is somehow captured by another OSD
printf("BUG: OSD %ju captured our PG %u/%u. Race condition detected, exiting\n",
printf("BUG: OSD %lu captured our PG %u/%u. Race condition detected, exiting\n",
kv.value["primary"].uint64_value(), pool_id, pg_num);
force_stop(1);
return;

View File

@ -66,7 +66,7 @@ void osd_t::handle_flush_op(bool rollback, pool_id_t pool_id, pg_num_t pg_num, p
{
if (log_level > 2)
{
printf("[PG %u/%u] flush batch %jx completed on OSD %ju with result %d\n",
printf("[PG %u/%u] flush batch %lx completed on OSD %lu with result %d\n",
pool_id, pg_num, (uint64_t)fb, peer_osd, retval);
}
pool_pg_num_t pg_id = { .pool_id = pool_id, .pg_num = pg_num };
@ -88,7 +88,7 @@ void osd_t::handle_flush_op(bool rollback, pool_id_t pool_id, pg_num_t pg_num, p
}
else
{
printf("Error while doing flush on OSD %ju: %d (%s)\n", osd_num, retval, strerror(-retval));
printf("Error while doing flush on OSD %lu: %d (%s)\n", osd_num, retval, strerror(-retval));
auto fd_it = msgr.osd_peer_fds.find(peer_osd);
if (fd_it != msgr.osd_peer_fds.end())
{
@ -122,7 +122,7 @@ void osd_t::handle_flush_op(bool rollback, pool_id_t pool_id, pg_num_t pg_num, p
{
if (log_level > 2)
{
printf("[PG %u/%u] continuing write %jx to object %jx:%jx after flush\n",
printf("[PG %u/%u] continuing write %lx to object %lx:%lx after flush\n",
pool_id, pg_num, (uint64_t)wr_it->second, wr_it->first.inode, wr_it->first.stripe);
}
continue_ops.push_back(wr_it->second);
@ -169,12 +169,12 @@ bool osd_t::submit_flush_op(pool_id_t pool_id, pg_num_t pg_num, pg_flush_batch_t
if (log_level > 2)
{
printf(
"[PG %u/%u] flush batch %jx on OSD %ju: %s objects: ",
"[PG %u/%u] flush batch %lx on OSD %lu: %s objects: ",
pool_id, pg_num, (uint64_t)fb, peer_osd, rollback ? "rollback" : "stabilize"
);
for (int i = 0; i < count; i++)
{
printf(i > 0 ? ", %jx:%jx v%ju" : "%jx:%jx v%ju", data[i].oid.inode, data[i].oid.stripe, data[i].version);
printf(i > 0 ? ", %lx:%lx v%lu" : "%lx:%lx v%lu", data[i].oid.inode, data[i].oid.stripe, data[i].version);
}
printf("\n");
}
@ -305,7 +305,7 @@ void osd_t::submit_recovery_op(osd_recovery_op_t *op)
};
if (log_level > 2)
{
printf("Submitting recovery operation for %jx:%jx (%s)\n", op->oid.inode, op->oid.stripe, op->degraded ? "degraded" : "misplaced");
printf("Submitting recovery operation for %lx:%lx (%s)\n", op->oid.inode, op->oid.stripe, op->degraded ? "degraded" : "misplaced");
}
op->osd_op->peer_fd = -1;
op->osd_op->callback = [this, op](osd_op_t *osd_op)
@ -315,7 +315,7 @@ void osd_t::submit_recovery_op(osd_recovery_op_t *op)
// Error recovering object
// EPIPE is totally harmless (peer is gone), others like EIO/EDOM may be not
printf(
"[PG %u/%u] Recovery operation failed with object %jx:%jx: error %jd\n",
"[PG %u/%u] Recovery operation failed with object %lx:%lx: error %ld\n",
INODE_POOL(op->oid.inode),
map_to_pg(op->oid, st_cli.pool_config.at(INODE_POOL(op->oid.inode)).pg_stripe_size),
op->oid.inode, op->oid.stripe, osd_op->reply.hdr.retval
@ -323,7 +323,7 @@ void osd_t::submit_recovery_op(osd_recovery_op_t *op)
}
else if (log_level > 2)
{
printf("Recovery operation done for %jx:%jx\n", op->oid.inode, op->oid.stripe);
printf("Recovery operation done for %lx:%lx\n", op->oid.inode, op->oid.stripe);
}
finish_recovery_op(op);
};
@ -422,10 +422,6 @@ void osd_t::tune_recovery()
rtune_avg_lat = total_recovery_usec/recovery_count;
uint64_t target_lat = rtune_avg_lat * rtune_avg_lat/1000000.0 * recovery_count/recovery_tune_interval / rtune_target_util;
auto sleep_us = target_lat > rtune_avg_lat+recovery_tune_sleep_min_us ? target_lat-rtune_avg_lat : 0;
if (sleep_us > recovery_tune_sleep_cutoff_us)
{
return;
}
if (recovery_target_sleep_items.size() != recovery_tune_agg_interval)
{
recovery_target_sleep_items.resize(recovery_tune_agg_interval);
@ -442,10 +438,10 @@ void osd_t::tune_recovery()
if (recovery_target_sleep_count < recovery_tune_agg_interval)
recovery_target_sleep_count++;
recovery_target_sleep_us = recovery_target_sleep_total / recovery_target_sleep_count;
if (log_level > 1)
if (log_level > 4)
{
printf(
"[OSD %ju] auto-tune: client util: %.2f, recovery util: %.2f, lat: %ju us -> target util %.2f, delay %ju us\n",
"[OSD %lu] auto-tune: client util: %.2f, recovery util: %.2f, lat: %lu us -> target util %.2f, delay %lu us\n",
osd_num, rtune_client_util, total_recovery_usec/1000000.0/recovery_tune_interval,
rtune_avg_lat, rtune_target_util, recovery_target_sleep_us
);

View File

@ -113,7 +113,7 @@ void osd_t::repeer_pgs(osd_num_t peer_osd)
if (repeer)
{
// Repeer this pg
printf("[PG %u/%u] Repeer because of OSD %ju\n", pg.pool_id, pg.pg_num, peer_osd);
printf("[PG %u/%u] Repeer because of OSD %lu\n", pg.pool_id, pg.pg_num, peer_osd);
if (!(pg.state & (PG_ACTIVE | PG_REPEERING)) || pg.inflight == 0 && !pg.flush_batch)
{
start_pg_peering(pg);
@ -222,9 +222,6 @@ void osd_t::start_pg_peering(pg_t & pg)
}
if (pg.pg_cursize < pg.pg_minsize)
{
// FIXME: Incomplete EC PGs may currently easily lead to write hangs ("slow ops" in OSD logs)
// because such PGs don't flush unstable entries on secondary OSDs so they can't remove these
// entries from their journals...
pg.state = PG_INCOMPLETE;
report_pg_state(pg);
return;
@ -347,7 +344,7 @@ void osd_t::submit_list_subop(osd_num_t role_osd, pg_peering_state_t *ps)
}
add_bs_subop_stats(op);
printf(
"[PG %u/%u] Got object list from OSD %ju (local): %d object versions (%ju of them stable)\n",
"[PG %u/%u] Got object list from OSD %lu (local): %d object versions (%lu of them stable)\n",
ps->pool_id, ps->pg_num, role_osd, bs_op->retval, bs_op->version
);
ps->list_results[role_osd] = {
@ -387,7 +384,7 @@ void osd_t::submit_list_subop(osd_num_t role_osd, pg_peering_state_t *ps)
{
if (op->reply.hdr.retval < 0)
{
printf("Failed to get object list from OSD %ju (retval=%jd), disconnecting peer\n", role_osd, op->reply.hdr.retval);
printf("Failed to get object list from OSD %lu (retval=%ld), disconnecting peer\n", role_osd, op->reply.hdr.retval);
int fail_fd = op->peer_fd;
ps->list_ops.erase(role_osd);
delete op;
@ -395,7 +392,7 @@ void osd_t::submit_list_subop(osd_num_t role_osd, pg_peering_state_t *ps)
return;
}
printf(
"[PG %u/%u] Got object list from OSD %ju: %jd object versions (%ju of them stable)\n",
"[PG %u/%u] Got object list from OSD %lu: %ld object versions (%lu of them stable)\n",
ps->pool_id, ps->pg_num, role_osd, op->reply.hdr.retval, op->reply.sec_list.stable_count
);
ps->list_results[role_osd] = {

View File

@ -239,7 +239,7 @@ void pg_obj_state_check_t::finish_object()
{
if (log_level > 1)
{
printf("Object is incomplete: %jx:%jx version=%ju/%ju\n", oid.inode, oid.stripe, target_ver, max_ver);
printf("Object is incomplete: %lx:%lx version=%lu/%lu\n", oid.inode, oid.stripe, target_ver, max_ver);
}
state = OBJ_INCOMPLETE;
pg->state = pg->state | PG_HAS_INCOMPLETE;
@ -248,7 +248,7 @@ void pg_obj_state_check_t::finish_object()
{
if (log_level > 1)
{
printf("Object is degraded: %jx:%jx version=%ju/%ju\n", oid.inode, oid.stripe, target_ver, max_ver);
printf("Object is degraded: %lx:%lx version=%lu/%lu\n", oid.inode, oid.stripe, target_ver, max_ver);
}
state = OBJ_DEGRADED;
pg->state = pg->state | PG_HAS_DEGRADED;
@ -257,7 +257,7 @@ void pg_obj_state_check_t::finish_object()
{
if (log_level > 2)
{
printf("Object is misplaced: %jx:%jx version=%ju/%ju\n", oid.inode, oid.stripe, target_ver, max_ver);
printf("Object is misplaced: %lx:%lx version=%lu/%lu\n", oid.inode, oid.stripe, target_ver, max_ver);
}
state |= OBJ_MISPLACED;
pg->state = pg->state | PG_HAS_MISPLACED;
@ -267,7 +267,7 @@ void pg_obj_state_check_t::finish_object()
{
for (int i = obj_start; i < obj_end; i++)
{
printf("v%ju present on: osd %ju, role %jd%s\n", list[i].version, list[i].osd_num,
printf("v%lu present on: osd %lu, role %ld%s\n", list[i].version, list[i].osd_num,
(list[i].oid.stripe & STRIPE_MASK), list[i].is_stable ? " (stable)" : "");
}
}
@ -445,7 +445,7 @@ void pg_t::calc_object_states(int log_level)
osd_set_desc += (osd_set_desc == "" ? "" : ", ")+std::to_string(osd_num);
}
printf(
"[PG %u/%u] %ju clean objects on target OSD set %s\n",
"[PG %u/%u] %lu clean objects on target OSD set %s\n",
pool_id, pg_num, clean_count, osd_set_desc.c_str()
);
for (auto & stp: state_dict)
@ -460,7 +460,7 @@ void pg_t::calc_object_states(int log_level)
(loc.loc_bad & LOC_CORRUPTED ? "(corrupted)" : "")+
(loc.loc_bad & LOC_INCONSISTENT ? "(inconsistent)" : "");
}
printf("[PG %u/%u] %ju objects on OSD set %s\n", pool_id, pg_num, stp.second.object_count, osd_set_desc.c_str());
printf("[PG %u/%u] %lu objects on OSD set %s\n", pool_id, pg_num, stp.second.object_count, osd_set_desc.c_str());
}
}
}
@ -468,7 +468,7 @@ void pg_t::calc_object_states(int log_level)
void pg_t::print_state()
{
printf(
"[PG %u/%u] is %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s (%ju objects)\n", pool_id, pg_num,
"[PG %u/%u] is %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s (%lu objects)\n", pool_id, pg_num,
(state & PG_STARTING) ? "starting" : "",
(state & PG_OFFLINE) ? "offline" : "",
(state & PG_PEERING) ? "peering" : "",

View File

@ -49,10 +49,10 @@ int main(int argc, char *argv[])
pg.peering_state->list_results[osd_num] = r;
}
pg.calc_object_states(0);
printf("deviation variants=%jd clean=%ju\n", pg.state_dict.size(), pg.clean_count);
printf("deviation variants=%ld clean=%lu\n", pg.state_dict.size(), pg.clean_count);
for (auto it: pg.state_dict)
{
printf("dev: state=%jx\n", it.second.state);
printf("dev: state=%lx\n", it.second.state);
}
delete pg.peering_state;
return 0;

View File

@ -473,7 +473,7 @@ pg_osd_set_state_t* osd_t::add_object_to_set(pg_t & pg, const object_id oid, con
}
if (this->log_level >= log_at_level)
{
printf("Marking object %jx:%jx ", oid.inode, oid.stripe);
printf("Marking object %lx:%lx ", oid.inode, oid.stripe);
for (int i = 0, j = 0; i < object_state_bit_count; i++)
{
if ((obj_state & object_state_bits[i]) || object_state_bits[i] == 0 && obj_state == 0)
@ -483,31 +483,31 @@ pg_osd_set_state_t* osd_t::add_object_to_set(pg_t & pg, const object_id oid, con
}
if (pg.scheme == POOL_SCHEME_REPLICATED)
{
printf(": %ju copies available", n_copies);
printf(": %lu copies available", n_copies);
}
else
{
printf(": %ju parts / %ju copies available", n_roles, n_copies);
printf(": %lu parts / %lu copies available", n_roles, n_copies);
}
if (n_invalid > 0)
{
printf(", %ju invalid", n_invalid);
printf(", %lu invalid", n_invalid);
}
if (n_outdated > 0)
{
printf(", %ju outdated", n_outdated);
printf(", %lu outdated", n_outdated);
}
if (n_misplaced > 0)
{
printf(", %ju misplaced", n_misplaced);
printf(", %lu misplaced", n_misplaced);
}
if (n_corrupted > 0)
{
printf(", %ju corrupted", n_corrupted);
printf(", %lu corrupted", n_corrupted);
}
if (n_inconsistent > 0)
{
printf(", %ju inconsistent", n_inconsistent);
printf(", %lu inconsistent", n_inconsistent);
}
printf("\n");
}

View File

@ -25,7 +25,7 @@ void osd_t::autosync()
{
if (op->reply.hdr.retval < 0)
{
printf("Warning: automatic sync resulted in an error: %jd (%s)\n", -op->reply.hdr.retval, strerror(-op->reply.hdr.retval));
printf("Warning: automatic sync resulted in an error: %ld (%s)\n", -op->reply.hdr.retval, strerror(-op->reply.hdr.retval));
}
delete autosync_op;
autosync_op = NULL;
@ -197,7 +197,7 @@ int osd_t::submit_primary_subop_batch(int submit_type, inode_t inode, uint64_t o
});
#ifdef OSD_DEBUG
printf(
"Submit %s to local: %jx:%jx v%ju %u-%u\n", wr ? "write" : "read",
"Submit %s to local: %lx:%lx v%lu %u-%u\n", wr ? "write" : "read",
inode, op_data->oid.stripe | stripe_num, op_version,
subop->bs_op->offset, subop->bs_op->len
);
@ -225,7 +225,7 @@ int osd_t::submit_primary_subop_batch(int submit_type, inode_t inode, uint64_t o
};
#ifdef OSD_DEBUG
printf(
"Submit %s to osd %ju: %jx:%jx v%ju %u-%u\n", wr ? "write" : "read", role_osd_num,
"Submit %s to osd %lu: %lx:%lx v%lu %u-%u\n", wr ? "write" : "read", role_osd_num,
inode, op_data->oid.stripe | stripe_num, op_version,
subop->req.sec_rw.offset, subop->req.sec_rw.len
);
@ -369,14 +369,14 @@ void osd_t::handle_primary_subop(osd_op_t *subop, osd_op_t *cur_op)
#ifdef OSD_DEBUG
uint64_t peer_osd = msgr.clients.find(subop->peer_fd) != msgr.clients.end()
? msgr.clients[subop->peer_fd]->osd_num : osd_num;
printf("subop %s %jx:%jx from osd %ju: version = %ju\n", osd_op_names[opcode], subop->req.sec_rw.oid.inode, subop->req.sec_rw.oid.stripe, peer_osd, version);
printf("subop %s %lx:%lx from osd %lu: version = %lu\n", osd_op_names[opcode], subop->req.sec_rw.oid.inode, subop->req.sec_rw.oid.stripe, peer_osd, version);
#endif
if (op_data->fact_ver != UINT64_MAX)
{
if (op_data->fact_ver != 0 && op_data->fact_ver != version)
{
fprintf(
stderr, "different fact_versions returned from %s subops: %ju vs %ju\n",
stderr, "different fact_versions returned from %s subops: %lu vs %lu\n",
osd_op_names[opcode], version, op_data->fact_ver
);
retval = -ERANGE;
@ -391,8 +391,8 @@ void osd_t::handle_primary_subop(osd_op_t *subop, osd_op_t *cur_op)
{
printf(
subop->peer_fd >= 0
? "%1$s subop to %2$jx:%3$jx v%4$ju failed on peer %7$d: retval = %5$d (expected %6$d)\n"
: "%1$s subop to %2$jx:%3$jx v%4$ju failed locally: retval = %5$d (expected %6$d)\n",
? "%1$s subop to %2$lx:%3$lx v%4$lu failed on peer %7$d: retval = %5$d (expected %6$d)\n"
: "%1$s subop to %2$lx:%3$lx v%4$lu failed locally: retval = %5$d (expected %6$d)\n",
osd_op_names[opcode], subop->req.sec_rw.oid.inode, subop->req.sec_rw.oid.stripe, subop->req.sec_rw.version,
retval, expected, subop->peer_fd
);

View File

@ -861,15 +861,15 @@ static void calc_rmw_parity_copy_mod(osd_rmw_stripe_t *stripes, int pg_size, int
static void calc_rmw_parity_copy_parity(osd_rmw_stripe_t *stripes, int pg_size, int pg_minsize,
uint64_t *read_osd_set, uint64_t *write_osd_set, uint32_t chunk_size, uint32_t start, uint32_t end)
{
if (write_osd_set != read_osd_set && end != 0)
if (write_osd_set != read_osd_set)
{
for (int role = pg_minsize; role < pg_size; role++)
{
if (write_osd_set[role] != read_osd_set[role] && write_osd_set[role] != 0 && (start != 0 || end != chunk_size))
if (write_osd_set[role] != read_osd_set[role] && (start != 0 || end != chunk_size))
{
// Copy new parity into the read buffer to write it back
memcpy(
(uint8_t*)stripes[role].read_buf + start - stripes[role].read_start,
(uint8_t*)stripes[role].read_buf + start,
stripes[role].write_buf,
end - start
);
@ -885,7 +885,7 @@ static void calc_rmw_parity_copy_parity(osd_rmw_stripe_t *stripes, int pg_size,
{
auto & s = stripes[role];
printf(
"Tr=%ju Tw=%ju Q=%x-%x R=%x-%x W=%x-%x Rb=%jx Wb=%jx\n",
"Tr=%lu Tw=%lu Q=%x-%x R=%x-%x W=%x-%x Rb=%lx Wb=%lx\n",
read_osd_set[role], write_osd_set[role],
s.req_start, s.req_end,
s.read_start, s.read_end,

View File

@ -30,7 +30,6 @@ void test16();
void test_recover_22_d2();
void test_ec43_error_bruteforce();
void test_recover_53_d5();
void test_recover_22();
int main(int narg, char *args[])
{
@ -71,8 +70,6 @@ int main(int narg, char *args[])
test_ec43_error_bruteforce();
// Test 19
test_recover_53_d5();
// Test 20
test_recover_22();
// End
printf("all ok\n");
return 0;
@ -1247,99 +1244,3 @@ void test_recover_53_d5()
// Done
use_ec(8, 5, false);
}
void test_recover_22()
{
const int bmp = 128*1024 / 4096 / 8;
use_ec(4, 2, true);
osd_num_t osd_set[4] = { 1, 2, 3, 4 };
osd_num_t write_osd_set[4] = { 5, 0, 3, 0 };
osd_rmw_stripe_t stripes[4] = {};
unsigned bitmaps[4] = { 0 };
// split
void *write_buf = (uint8_t*)malloc_or_die(4096);
set_pattern(write_buf, 4096, PATTERN0);
split_stripes(2, 128*1024, 120*1024, 4096, stripes);
assert(stripes[0].req_start == 120*1024 && stripes[0].req_end == 124*1024);
assert(stripes[1].req_start == 0 && stripes[1].req_end == 0);
assert(stripes[2].req_start == 0 && stripes[2].req_end == 0);
assert(stripes[3].req_start == 0 && stripes[3].req_end == 0);
// calc_rmw
void *rmw_buf = calc_rmw(write_buf, stripes, osd_set, 4, 2, 2, write_osd_set, 128*1024, bmp);
for (int i = 0; i < 4; i++)
stripes[i].bmp_buf = bitmaps+i;
assert(rmw_buf);
assert(stripes[0].read_start == 0 && stripes[0].read_end == 128*1024);
assert(stripes[1].read_start == 120*1024 && stripes[1].read_end == 124*1024);
assert(stripes[2].read_start == 0 && stripes[2].read_end == 0);
assert(stripes[3].read_start == 0 && stripes[3].read_end == 0);
assert(stripes[0].write_start == 120*1024 && stripes[0].write_end == 124*1024);
assert(stripes[1].write_start == 0 && stripes[1].write_end == 0);
assert(stripes[2].write_start == 120*1024 && stripes[2].write_end == 124*1024);
assert(stripes[3].write_start == 0 && stripes[3].write_end == 0);
assert(stripes[0].read_buf == (uint8_t*)rmw_buf+4*1024);
assert(stripes[1].read_buf == (uint8_t*)rmw_buf+132*1024);
assert(stripes[2].read_buf == NULL);
assert(stripes[3].read_buf == NULL);
assert(stripes[0].write_buf == write_buf);
assert(stripes[1].write_buf == NULL);
assert(stripes[2].write_buf == (uint8_t*)rmw_buf);
assert(stripes[3].write_buf == NULL);
// encode
set_pattern(stripes[0].read_buf, 128*1024, PATTERN1);
set_pattern(stripes[1].read_buf, 4*1024, PATTERN2);
memset(stripes[0].bmp_buf, 0xff, bmp);
memset(stripes[1].bmp_buf, 0xff, bmp);
calc_rmw_parity_ec(stripes, 4, 2, osd_set, write_osd_set, 128*1024, bmp);
assert(*(uint32_t*)stripes[2].bmp_buf == 0);
assert(stripes[0].write_start == 0 && stripes[0].write_end == 128*1024);
assert(stripes[1].write_start == 0 && stripes[1].write_end == 0);
assert(stripes[2].write_start == 120*1024 && stripes[2].write_end == 124*1024);
assert(stripes[3].write_start == 0 && stripes[3].write_end == 0);
assert(stripes[0].write_buf == stripes[0].read_buf);
assert(stripes[1].write_buf == NULL);
assert(stripes[2].write_buf == (uint8_t*)rmw_buf);
assert(stripes[3].write_buf == NULL);
check_pattern(stripes[2].write_buf, 4*1024, PATTERN0^PATTERN2);
// decode and verify
memset(stripes, 0, sizeof(stripes));
split_stripes(2, 128*1024, 0, 256*1024, stripes);
assert(stripes[0].req_start == 0 && stripes[0].req_end == 128*1024);
assert(stripes[1].req_start == 0 && stripes[1].req_end == 128*1024);
assert(stripes[2].req_start == 0 && stripes[2].req_end == 0);
assert(stripes[3].req_start == 0 && stripes[3].req_end == 0);
for (int role = 0; role < 4; role++)
{
stripes[role].read_start = stripes[role].req_start;
stripes[role].read_end = stripes[role].req_end;
}
assert(extend_missing_stripes(stripes, write_osd_set, 2, 4) == 0);
assert(stripes[0].read_start == 0 && stripes[0].read_end == 128*1024);
assert(stripes[1].read_start == 0 && stripes[1].read_end == 128*1024);
assert(stripes[2].read_start == 0 && stripes[2].read_end == 128*1024);
assert(stripes[3].read_start == 0 && stripes[3].read_end == 0);
void *read_buf = alloc_read_buffer(stripes, 4, 0);
for (int i = 0; i < 4; i++)
stripes[i].bmp_buf = bitmaps+i;
assert(read_buf);
assert(stripes[0].read_buf == read_buf);
assert(stripes[1].read_buf == (uint8_t*)read_buf+128*1024);
assert(stripes[2].read_buf == (uint8_t*)read_buf+2*128*1024);
set_pattern(stripes[0].read_buf, 128*1024, PATTERN1);
set_pattern(stripes[0].read_buf+120*1024, 4*1024, PATTERN0);
set_pattern(stripes[2].read_buf, 128*1024, PATTERN1^PATTERN2);
set_pattern(stripes[2].read_buf+120*1024, 4*1024, PATTERN0^PATTERN2);
memset(stripes[0].bmp_buf, 0xff, bmp);
memset(stripes[2].bmp_buf, 0, bmp);
bitmaps[1] = 0;
bitmaps[3] = 0;
reconstruct_stripes_ec(stripes, 4, 2, bmp);
assert(bitmaps[0] == 0xFFFFFFFF);
assert(*(uint32_t*)stripes[1].bmp_buf == 0xFFFFFFFF);
check_pattern(stripes[1].read_buf, 128*1024, PATTERN2);
free(read_buf);
// Done
free(rmw_buf);
free(write_buf);
use_ec(4, 2, false);
}

View File

@ -82,7 +82,7 @@ void osd_t::scrub_list(pool_pg_num_t pg_id, osd_num_t role_osd, object_id min_oi
scrub_list_op = NULL;
if (op->reply.hdr.retval < 0)
{
printf("Failed to get object list from OSD %ju (retval=%jd), disconnecting peer\n", role_osd, op->reply.hdr.retval);
printf("Failed to get object list from OSD %lu (retval=%ld), disconnecting peer\n", role_osd, op->reply.hdr.retval);
int fail_fd = op->peer_fd;
delete op;
msgr.stop_client(fail_fd);
@ -239,7 +239,7 @@ void osd_t::submit_scrub_op(object_id oid)
};
if (log_level > 2)
{
printf("Submitting scrub for %jx:%jx\n", oid.inode, oid.stripe);
printf("Submitting scrub for %lx:%lx\n", oid.inode, oid.stripe);
}
osd_op->callback = [this](osd_op_t *osd_op)
{
@ -248,7 +248,7 @@ void osd_t::submit_scrub_op(object_id oid)
{
// Scrub error
printf(
"Scrub failed with object %jx:%jx (PG %u/%u): error %jd\n",
"Scrub failed with object %lx:%lx (PG %u/%u): error %ld\n",
oid.inode, oid.stripe, INODE_POOL(oid.inode),
map_to_pg(oid, st_cli.pool_config.at(INODE_POOL(oid.inode)).pg_stripe_size),
osd_op->reply.hdr.retval
@ -256,7 +256,7 @@ void osd_t::submit_scrub_op(object_id oid)
}
else if (log_level > 2)
{
printf("Scrubbed %jx:%jx\n", oid.inode, oid.stripe);
printf("Scrubbed %lx:%lx\n", oid.inode, oid.stripe);
}
delete osd_op;
if (scrub_sleep_ms)
@ -518,7 +518,7 @@ resume_2:
if (votes[role] > 0 && votes[role] < votes[best])
{
printf(
"[PG %u/%u] Object %jx:%jx v%ju copy on OSD %ju doesn't match %d other copies%s\n",
"[PG %u/%u] Object %lx:%lx v%lu copy on OSD %lu doesn't match %d other copies%s\n",
INODE_POOL(op_data->oid.inode), op_data->pg_num,
op_data->oid.inode, op_data->oid.stripe, op_data->fact_ver,
op_data->stripes[role].osd_num, votes[best],
@ -541,7 +541,7 @@ resume_2:
best = -1;
inconsistent = true;
printf(
"[PG %u/%u] Object %jx:%jx v%ju is inconsistent: copies don't match. Use vitastor-cli fix to fix it\n",
"[PG %u/%u] Object %lx:%lx v%lu is inconsistent: copies don't match. Use vitastor-cli fix to fix it\n",
INODE_POOL(op_data->oid.inode), op_data->pg_num,
op_data->oid.inode, op_data->oid.stripe, op_data->fact_ver
);
@ -559,7 +559,7 @@ resume_2:
{
inconsistent = true;
printf(
"[PG %u/%u] Object %jx:%jx v%ju is inconsistent: parity chunks don't match data. Use vitastor-cli fix to fix it\n",
"[PG %u/%u] Object %lx:%lx v%lu is inconsistent: parity chunks don't match data. Use vitastor-cli fix to fix it\n",
INODE_POOL(op_data->oid.inode), op_data->pg_num,
op_data->oid.inode, op_data->oid.stripe, op_data->fact_ver
);
@ -584,7 +584,7 @@ resume_2:
if (!op_data->stripes[role].missing && op_data->stripes[role].read_error)
{
printf(
"[PG %u/%u] Object %jx:%jx v%ju chunk %d on OSD %ju doesn't match other chunks%s\n",
"[PG %u/%u] Object %lx:%lx v%lu chunk %d on OSD %lu doesn't match other chunks%s\n",
INODE_POOL(op_data->oid.inode), op_data->pg_num,
op_data->oid.inode, op_data->oid.stripe, op_data->fact_ver,
role, op_data->stripes[role].osd_num,
@ -596,7 +596,7 @@ resume_2:
{
inconsistent = true;
printf(
"[PG %u/%u] Object %jx:%jx v%ju is marked as inconsistent because scrub_find_best is turned off. Use vitastor-cli fix to fix it\n",
"[PG %u/%u] Object %lx:%lx v%lu is marked as inconsistent because scrub_find_best is turned off. Use vitastor-cli fix to fix it\n",
INODE_POOL(op_data->oid.inode), op_data->pg_num,
op_data->oid.inode, op_data->oid.stripe, op_data->fact_ver
);

View File

@ -42,10 +42,8 @@ void osd_t::secondary_op_callback(osd_op_t *op)
int retval = op->bs_op->retval;
delete op->bs_op;
op->bs_op = NULL;
if (op->is_recovery_related() && recovery_target_sleep_us &&
op->req.hdr.opcode == OSD_OP_SEC_STABILIZE)
if (op->is_recovery_related() && recovery_target_sleep_us)
{
// Apply pause AFTER commit. Do not apply pause to SYNC at all
if (!op->tv_end.tv_sec)
{
clock_gettime(CLOCK_REALTIME, &op->tv_end);
@ -61,25 +59,7 @@ void osd_t::secondary_op_callback(osd_op_t *op)
}
}
void osd_t::exec_secondary(osd_op_t *op)
{
if (op->is_recovery_related() && recovery_target_sleep_us &&
op->req.hdr.opcode != OSD_OP_SEC_STABILIZE && op->req.hdr.opcode != OSD_OP_SEC_SYNC)
{
// Apply pause BEFORE write/delete
tfd->set_timer_us(recovery_target_sleep_us, false, [this, op](int timer_id)
{
clock_gettime(CLOCK_REALTIME, &op->tv_begin);
exec_secondary_real(op);
});
}
else
{
exec_secondary_real(op);
}
}
void osd_t::exec_secondary_real(osd_op_t *cur_op)
void osd_t::exec_secondary(osd_op_t *cur_op)
{
if (cur_op->req.hdr.opcode == OSD_OP_SEC_READ_BMP)
{

View File

@ -174,7 +174,7 @@ bool check_reply(int r, osd_any_op_t & op, osd_any_reply_t & reply, int expected
}
if (expected >= 0 && reply.hdr.retval != expected)
{
printf("operation failed, retval=%jd\n", reply.hdr.retval);
printf("operation failed, retval=%ld\n", reply.hdr.retval);
return false;
}
return true;
@ -210,7 +210,7 @@ uint64_t test_read(int connect_fd, uint64_t inode, uint64_t stripe, uint64_t ver
return 0;
}
free(data);
printf("Read %jx:%jx v%ju = v%ju\n", inode, stripe, version, reply.sec_rw.version);
printf("Read %lx:%lx v%lu = v%lu\n", inode, stripe, version, reply.sec_rw.version);
op.hdr.opcode = OSD_OP_SEC_LIST;
op.sec_list.list_pg = 1;
op.sec_list.pg_count = 1;
@ -234,7 +234,7 @@ uint64_t test_read(int connect_fd, uint64_t inode, uint64_t stripe, uint64_t ver
{
if (ov[i].oid.inode == inode && (ov[i].oid.stripe & ~(4096-1)) == (stripe & ~(4096-1)))
{
printf("list: %jx:%jx v%ju stable=%d\n", ov[i].oid.inode, ov[i].oid.stripe, ov[i].version, i < reply.sec_list.stable_count ? 1 : 0);
printf("list: %lx:%lx v%lu stable=%d\n", ov[i].oid.inode, ov[i].oid.stripe, ov[i].version, i < reply.sec_list.stable_count ? 1 : 0);
}
}
return 0;

View File

@ -35,9 +35,9 @@ static uint64_t sync_sum = 0, sync_count = 0;
void handle_sigint(int sig)
{
printf("4k randread: %ju us avg\n", read_count ? read_sum/read_count : 0);
printf("4k randwrite: %ju us avg\n", write_count ? write_sum/write_count : 0);
printf("sync: %ju us avg\n", sync_count ? sync_sum/sync_count : 0);
printf("4k randread: %lu us avg\n", read_count ? read_sum/read_count : 0);
printf("4k randwrite: %lu us avg\n", write_count ? write_sum/write_count : 0);
printf("sync: %lu us avg\n", sync_count ? sync_sum/sync_count : 0);
exit(0);
}
@ -106,7 +106,7 @@ bool check_reply(int r, osd_any_op_t & op, osd_any_reply_t & reply, int expected
}
if (reply.hdr.retval != expected)
{
printf("operation failed, retval=%jd (%s)\n", reply.hdr.retval, strerror(-reply.hdr.retval));
printf("operation failed, retval=%ld (%s)\n", reply.hdr.retval, strerror(-reply.hdr.retval));
return false;
}
return true;

View File

@ -127,7 +127,7 @@ void run_stub(int peer_fd)
}
else
{
printf("client %d: unsupported stub opcode: %ju\n", peer_fd, op.hdr.opcode);
printf("client %d: unsupported stub opcode: %lu\n", peer_fd, op.hdr.opcode);
break;
}
}

View File

@ -89,7 +89,7 @@ void stub_exec_op(osd_messenger_t *msgr, osd_op_t *op)
}
else
{
printf("client %d: unsupported stub opcode: %ju\n", op->peer_fd, op->req.hdr.opcode);
printf("client %d: unsupported stub opcode: %lu\n", op->peer_fd, op->req.hdr.opcode);
op->reply.hdr.retval = -EINVAL;
}
msgr->outbox_push(op);

View File

@ -18,7 +18,7 @@ void alloc_all(int size)
}
if (x != i)
{
printf("incorrect block allocated: expected %d, got %ju\n", i, x);
printf("incorrect block allocated: expected %d, got %lu\n", i, x);
}
if (a->get(x))
{
@ -33,7 +33,7 @@ void alloc_all(int size)
uint64_t x = a->find_free();
if (x != UINT64_MAX)
{
printf("extra free space found: %jx (%d)\n", x, size);
printf("extra free space found: %lx (%d)\n", x, size);
exit(1);
}
delete a;

View File

@ -53,7 +53,7 @@ int main(int narg, char *args[])
}
else if (main_state == 2)
{
printf("version %ju written, syncing\n", op.version);
printf("version %lu written, syncing\n", op.version);
version = op.version;
op.opcode = BS_OP_SYNC;
bs->enqueue_op(&op);
@ -61,7 +61,7 @@ int main(int narg, char *args[])
}
else if (main_state == 4)
{
printf("stabilizing version %ju\n", version);
printf("stabilizing version %lu\n", version);
op.opcode = BS_OP_STABLE;
op.len = 1;
*((obj_ver_id*)op.buf) = {
@ -73,7 +73,7 @@ int main(int narg, char *args[])
}
else if (main_state == 6)
{
printf("stabilizing version %ju\n", version);
printf("stabilizing version %lu\n", version);
op.opcode = BS_OP_STABLE;
op.len = 1;
*((obj_ver_id*)op.buf) = {

View File

@ -49,7 +49,7 @@ void configure_single_pg_pool(cluster_client_t *cli)
int *test_write(cluster_client_t *cli, uint64_t offset, uint64_t len, uint8_t c, std::function<void()> cb = NULL, bool instant = false)
{
printf("Post write %jx+%jx\n", offset, len);
printf("Post write %lx+%lx\n", offset, len);
int *r = new int;
*r = instant ? -2 : -1;
cluster_op_t *op = new cluster_op_t();
@ -66,7 +66,7 @@ int *test_write(cluster_client_t *cli, uint64_t offset, uint64_t len, uint8_t c,
assert(*r != -1);
*r = op->retval == op->len ? 1 : 0;
free(op->iov.buf[0].iov_base);
printf("Done write %jx+%jx r=%d\n", op->offset, op->len, op->retval);
printf("Done write %lx+%lx r=%d\n", op->offset, op->len, op->retval);
delete op;
if (cb != NULL)
cb();
@ -117,7 +117,7 @@ void check_completed(int *r)
void pretend_connected(cluster_client_t *cli, osd_num_t osd_num)
{
printf("OSD %ju connected\n", osd_num);
printf("OSD %lu connected\n", osd_num);
int peer_fd = cli->msgr.clients.size() ? std::prev(cli->msgr.clients.end())->first+1 : 10;
cli->msgr.osd_peer_fds[osd_num] = peer_fd;
cli->msgr.clients[peer_fd] = new osd_client_t();
@ -129,7 +129,7 @@ void pretend_connected(cluster_client_t *cli, osd_num_t osd_num)
void pretend_disconnected(cluster_client_t *cli, osd_num_t osd_num)
{
printf("OSD %ju disconnected\n", osd_num);
printf("OSD %lu disconnected\n", osd_num);
cli->msgr.stop_client(cli->msgr.osd_peer_fds.at(osd_num));
}
@ -137,7 +137,7 @@ void check_disconnected(cluster_client_t *cli, osd_num_t osd_num)
{
if (cli->msgr.osd_peer_fds.find(osd_num) != cli->msgr.osd_peer_fds.end())
{
printf("OSD %ju not disconnected as it ought to be\n", osd_num);
printf("OSD %lu not disconnected as it ought to be\n", osd_num);
assert(0);
}
}
@ -170,17 +170,17 @@ osd_op_t *find_op(cluster_client_t *cli, osd_num_t osd_num, uint64_t opcode, uin
op_it = cli->msgr.clients[peer_fd]->sent_ops.begin();
while (op_it != cli->msgr.clients[peer_fd]->sent_ops.end())
{
printf("Found opcode %ju offset %jx size %x\n", op_it->second->req.hdr.opcode, op_it->second->req.rw.offset, op_it->second->req.rw.len);
printf("Found opcode %lu offset %lx size %x\n", op_it->second->req.hdr.opcode, op_it->second->req.rw.offset, op_it->second->req.rw.len);
op_it++;
}
printf("Not found opcode %ju offset %jx size %jx\n", opcode, offset, len);
printf("Not found opcode %lu offset %lx size %lx\n", opcode, offset, len);
return NULL;
}
void pretend_op_completed(cluster_client_t *cli, osd_op_t *op, int64_t retval)
{
assert(op);
printf("Pretend completed %s %jx+%x\n", op->req.hdr.opcode == OSD_OP_SYNC
printf("Pretend completed %s %lx+%x\n", op->req.hdr.opcode == OSD_OP_SYNC
? "sync" : (op->req.hdr.opcode == OSD_OP_WRITE ? "write" : "read"), op->req.rw.offset, op->req.rw.len);
uint64_t op_id = op->req.hdr.id;
int peer_fd = op->peer_fd;
@ -281,7 +281,7 @@ void test1()
uint8_t c = offset < 0xE000 ? 0x56 : (offset < 0x10000 ? 0x57 : 0x58);
if (((uint8_t*)op->iov.buf[buf_idx].iov_base)[i] != c)
{
printf("Write replay: mismatch at %ju\n", offset-op->req.rw.offset);
printf("Write replay: mismatch at %lu\n", offset-op->req.rw.offset);
goto fail;
}
}
@ -292,7 +292,7 @@ void test1()
}
if (replay_start != 0 || replay_end != 0x14000)
{
printf("Write replay: range mismatch: %jx-%jx\n", replay_start, replay_end);
printf("Write replay: range mismatch: %lx-%lx\n", replay_start, replay_end);
assert(0);
}
for (auto op: replay_ops)

View File

@ -12,4 +12,4 @@
#define PATTERN3 0x426bd7854eb08509
#define set_pattern(buf, len, pattern) for (uint64_t i = 0; i < len; i += 8) { *(uint64_t*)((uint8_t*)buf + i) = pattern; }
#define check_pattern(buf, len, pattern) { uint64_t bad = UINT64_MAX; for (uint64_t i = 0; i < len; i += 8) { if ((*(uint64_t*)((uint8_t*)buf + i)) != (pattern)) { bad = i; break; } } if (bad != UINT64_MAX) { printf("mismatch at %jx\n", bad); } assert(bad == UINT64_MAX); }
#define check_pattern(buf, len, pattern) { uint64_t bad = UINT64_MAX; for (uint64_t i = 0; i < len; i += 8) { if ((*(uint64_t*)((uint8_t*)buf + i)) != (pattern)) { bad = i; break; } } if (bad != UINT64_MAX) { printf("mismatch at %lx\n", bad); } assert(bad == UINT64_MAX); }

View File

@ -563,12 +563,12 @@ int main(int argc, char *argv[])
}
uniformity[pg-1] = u/host_count;
printf("pg %ju: hosts %ju, %ju, %ju ; avg deviation = %.2f\n", pg, r[0], r[1], r[2], u/host_count);
printf("pg %lu: hosts %lu, %lu, %lu ; avg deviation = %.2f\n", pg, r[0], r[1], r[2], u/host_count);
}
printf("total PGs: ");
for (int i = 0; i < host_count; i++)
{
printf(i > 0 ? ", %ju (%.2f)" : "%ju (%.2f)", total_pgs[i], total_pgs[i]/3.0/pg_count * total_weight/host_weights[i]);
printf(i > 0 ? ", %lu (%.2f)" : "%lu (%.2f)", total_pgs[i], total_pgs[i]/3.0/pg_count * total_weight/host_weights[i]);
}
printf("\n");
return 0;

View File

@ -6,7 +6,7 @@ includedir=${prefix}/@CMAKE_INSTALL_INCLUDEDIR@
Name: Vitastor
Description: Vitastor client library
Version: 1.4.7
Version: 1.4.2
Libs: -L${libdir} -lvitastor_client
Cflags: -I${includedir}

Some files were not shown because too many files have changed in this diff Show More