Compare commits

..

28 Commits

Author SHA1 Message Date
ff91288288 Implement CSI volume staging (one device per volume on each host) 2023-10-31 01:57:12 +03:00
ff2ccb6ab8 Implement CSI volume expansion 2023-10-31 01:39:46 +03:00
b3f1fd3848 Implement CSI volume snapshots 2023-10-31 01:00:48 +03:00
a378789f10 Rollback erroneous go.mod changes in 1.1.0 O:-) 2023-10-30 18:47:48 +03:00
1fe678e57b Add --no-block to udev rule 2023-10-30 12:18:29 +03:00
2e592a2f22 Fix undefined variable "timeout" 2023-10-29 01:30:55 +03:00
b92f644e3a Fix statistics aggregation, calculate inode stats by first deriving per-OSD stats, too 2023-10-29 01:30:55 +03:00
890ea3dbc0 Forgot to add new parameter page to README 2023-10-28 13:39:53 +03:00
06630369bf Plans++ 2023-10-28 13:38:04 +03:00
b4740acf62 Fix operations paused for 0.5-1 second when it happens that io_uring submit is not triggered 2023-10-28 13:18:21 +03:00
eae81bbda6 Fix typo 2023-10-28 01:09:20 +03:00
8222e3c77d Release 1.1.0
New features:

- Implement [client writeback cache](docs/config/client.en.md#client_enable_writeback)
- Add the third I/O mode: [O_DIRECT|O_SYNC](docs/config/osd.en.md#data_io) (good for Optane)
- Reduce load on etcd by splitting OSD lease and statistics reporting intervals:
  [etcd_stats_interval](docs/config/osd.en.md#etcd_stats_interval) (default 30 sec)
- Make MON automatically filter OSDs by layout (block_size/immediate_commit/bitmap_granularity)
  to prevent "refusing to start PGs of this pool" errors on misconfiguration
- Support running fio benchmarks on systems without io_uring
- Make QEMU driver compatible with QEMU 8.1
- Document usage of [vhost-user-blk](docs/usage/qemu.en.md#vhost-user-blk)

Bug fixes:

- Fix resizing disks in QEMU driver (for example, in Proxmox)
- Fix "unexpected result" in Proxmox driver by making CLI flush output on exit
- Remove unneeded block_size mismatch warnings on pools without matching PGs
- Fix possible segfault in vitastor-cli ls -l (usually with deleted pools)
- Fix QEMU driver compatibility with systems without io_uring
- Fix monitor eating 100% CPU when etcd is down (caused by infinite retries)
- Fix potential incorrect write processing with snapshots (not caught in tests
  but could probably lead to client hangs)
- Fix buffer insertion in cluster_client (not caught in tests but could
  probably lead to incorrect writes in rare cases)
- Fix rare OSD crash during sync operation processing
- Fix a reenterability issue in cluster_client not reproducible in QEMU/fio,
  but reproducible with the currently developed K/V database implementation
- Fix deletion of the first modified object - OSDs could crash if you modified
  the same object a lot of times, then deleted it, and then modified it again
- Fix the fio_sec_osd test tool
2023-10-28 00:33:06 +03:00
29cbe70e74 Bump qemu version to vitastor4 2023-10-28 00:33:06 +03:00
a883e79507 Make docs to add etcd_stats_interval 2023-10-27 14:09:26 +03:00
be7e76f849 Split etcd_stats_interval out of etcd_report_interval 2023-10-27 01:26:26 +03:00
6fd2cf5df6 Add documentation for the write-back cache 2023-10-27 01:26:26 +03:00
294a754c9e Allow write-back by default in NBD & NFS 2023-10-27 01:26:26 +03:00
8bfea6e7de Support vitastor_c_create_epoll() in fio driver 2023-10-26 22:57:36 +03:00
bac9e34836 Allow to create vitastor_c with plain epoll without uring :-) 2023-10-26 22:57:36 +03:00
8aa4d492c1 Allow to use epoll_manager without ringloop 2023-10-26 22:57:36 +03:00
9336ee5476 Correctly free manual "small vector" in cluster_client %-) 2023-10-26 22:57:36 +03:00
ad30b11519 Add the missing ringloop creation check to vitastor_c_create_uring_json() 2023-10-26 18:07:23 +03:00
a061246997 Do not attempt to initialize QEMU driver via vitastor_c_create_qemu_uring()
It doesn't add any compatibility because vitastor_c_uring_register_eventfd()
is added in the same VITASTOR_C_API_VERSION 2.
2023-10-26 17:46:19 +03:00
5066e35a49 Fix write-over-delete failing for the very first entry in dirty_db 2023-10-21 17:00:14 +03:00
93dc31f3fc Fix possible segfault in vitastor-cli ls -l 2023-10-18 11:11:41 +03:00
f245b56176 Fix another possible reenterability issue in cluster_client
Non-reproducible in QEMU/FIO, only caught during K/V DB debugging
2023-10-08 11:02:53 +03:00
befca06f18 Support any OSD count in test_heal 2023-10-08 11:02:53 +03:00
fbf0263625 Add qemu-storage-daemon to documentation 2023-09-16 18:40:52 +03:00
72 changed files with 1105 additions and 449 deletions

View File

@@ -2,6 +2,6 @@ cmake_minimum_required(VERSION 2.8.12)
project(vitastor)
set(VERSION "1.0.0")
set(VERSION "1.1.0")
add_subdirectory(src)

View File

@@ -50,6 +50,7 @@ Vitastor поддерживает QEMU-драйвер, протоколы NBD и
- Параметры
- [Общие](docs/config/common.ru.md)
- [Сетевые](docs/config/network.ru.md)
- [Клиентский код](docs/config/client.en.md)
- [Глобальные дисковые параметры](docs/config/layout-cluster.ru.md)
- [Дисковые параметры OSD](docs/config/layout-osd.ru.md)
- [Прочие параметры OSD](docs/config/osd.ru.md)

View File

@@ -50,6 +50,7 @@ Read more details below in the documentation.
- Parameter Reference
- [Common](docs/config/common.en.md)
- [Network](docs/config/network.en.md)
- [Client](docs/config/client.en.md)
- [Global Disk Layout](docs/config/layout-cluster.en.md)
- [OSD Disk Layout](docs/config/layout-osd.en.md)
- [OSD Runtime Parameters](docs/config/osd.en.md)

View File

@@ -1,4 +1,4 @@
VERSION ?= v1.0.0
VERSION ?= v1.1.0
all: build push

View File

@@ -49,7 +49,7 @@ spec:
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: vitalif/vitastor-csi:v1.0.0
image: vitalif/vitastor-csi:v1.1.0
args:
- "--node=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)"

View File

@@ -116,7 +116,7 @@ spec:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
image: vitalif/vitastor-csi:v1.0.0
image: vitalif/vitastor-csi:v1.1.0
args:
- "--node=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)"

View File

@@ -9,6 +9,7 @@ require (
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
google.golang.org/grpc v1.33.1
google.golang.org/protobuf v1.24.0
k8s.io/klog v1.0.0
k8s.io/utils v0.0.0-20210305010621-2afb4311ab10
)

View File

@@ -5,7 +5,7 @@ package vitastor
const (
vitastorCSIDriverName = "csi.vitastor.io"
vitastorCSIDriverVersion = "1.0.0"
vitastorCSIDriverVersion = "1.1.0"
)
// Config struct fills the parameters of request or user input

View File

@@ -20,6 +20,7 @@ import (
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/timestamppb"
"github.com/container-storage-interface/spec/lib/go/csi"
)
@@ -45,6 +46,7 @@ type InodeConfig struct
ParentPool uint64 `json:"parent_pool,omitempty"`
ParentId uint64 `json:"parent_id,omitempty"`
Readonly bool `json:"readonly,omitempty"`
CreateTs uint64 `json:"create_ts,omitempty"`
}
type ControllerServer struct
@@ -184,21 +186,11 @@ func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
{
if (strings.Index(err.Error(), "already exists") > 0)
{
stat, err := invokeCLI(ctxVars, []string{ "ls", "--json", volName })
inodeCfg, err := invokeList(ctxVars, volName, true)
if (err != nil)
{
return nil, err
}
var inodeCfg []InodeConfig
err = json.Unmarshal(stat, &inodeCfg)
if (err != nil)
{
return nil, status.Error(codes.Internal, "Invalid JSON in vitastor-cli ls: "+err.Error())
}
if (len(inodeCfg) == 0)
{
return nil, status.Error(codes.Internal, "vitastor-cli create said that image already exists, but ls can't find it")
}
if (inodeCfg[0].Size < uint64(volSize))
{
return nil, status.Error(codes.Internal, "image "+volName+" is already created, but size is less than expected")
@@ -230,15 +222,15 @@ func (cs *ControllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol
return nil, status.Error(codes.InvalidArgument, "request cannot be empty")
}
ctxVars := make(map[string]string)
err := json.Unmarshal([]byte(req.VolumeId), &ctxVars)
volVars := make(map[string]string)
err := json.Unmarshal([]byte(req.VolumeId), &volVars)
if (err != nil)
{
return nil, status.Error(codes.Internal, "volume ID not in JSON format")
}
volName := ctxVars["name"]
volName := volVars["name"]
ctxVars, _, _ = GetConnectionParams(ctxVars)
ctxVars, _, _ := GetConnectionParams(volVars)
_, err = invokeCLI(ctxVars, []string{ "rm", volName })
if (err != nil)
@@ -344,6 +336,8 @@ func (cs *ControllerServer) ControllerGetCapabilities(ctx context.Context, req *
csi.ControllerServiceCapability_RPC_LIST_VOLUMES,
csi.ControllerServiceCapability_RPC_EXPAND_VOLUME,
csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT,
csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS,
// TODO: csi.ControllerServiceCapability_RPC_CLONE_VOLUME,
} {
controllerServerCapabilities = append(controllerServerCapabilities, functionControllerServerCapabilities(capability))
}
@@ -353,28 +347,219 @@ func (cs *ControllerServer) ControllerGetCapabilities(ctx context.Context, req *
}, nil
}
func invokeList(ctxVars map[string]string, pattern string, expectExist bool) ([]InodeConfig, error)
{
stat, err := invokeCLI(ctxVars, []string{ "ls", "--json", pattern })
if (err != nil)
{
return nil, err
}
var inodeCfg []InodeConfig
err = json.Unmarshal(stat, &inodeCfg)
if (err != nil)
{
return nil, status.Error(codes.Internal, "Invalid JSON in vitastor-cli ls: "+err.Error())
}
if (expectExist && len(inodeCfg) == 0)
{
return nil, status.Error(codes.Internal, "Can't find expected image "+pattern+" via vitastor-cli ls")
}
return inodeCfg, nil
}
// CreateSnapshot create snapshot of an existing PV
func (cs *ControllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error)
{
return nil, status.Error(codes.Unimplemented, "")
klog.Infof("received controller create snapshot request %+v", protosanitizer.StripSecrets(req))
if (req == nil)
{
return nil, status.Errorf(codes.InvalidArgument, "request cannot be empty")
}
if (req.SourceVolumeId == "" || req.Name == "")
{
return nil, status.Error(codes.InvalidArgument, "source volume ID and snapshot name are required fields")
}
// snapshot name
snapName := req.Name
// req.VolumeId is an ugly json string in our case :)
ctxVars := make(map[string]string)
err := json.Unmarshal([]byte(req.SourceVolumeId), &ctxVars)
if (err != nil)
{
return nil, status.Error(codes.Internal, "volume ID not in JSON format")
}
volName := ctxVars["name"]
// Create image using vitastor-cli
_, err = invokeCLI(ctxVars, []string{ "create", "--snapshot", snapName, volName })
if (err != nil && strings.Index(err.Error(), "already exists") <= 0)
{
return nil, err
}
// Check created snapshot
inodeCfg, err := invokeList(ctxVars, volName+"@"+snapName, true)
if (err != nil)
{
return nil, err
}
// Use ugly JSON snapshot ID again, DeleteSnapshot doesn't have context :-(
ctxVars["snapshot"] = snapName
snapIdJson, _ := json.Marshal(ctxVars)
return &csi.CreateSnapshotResponse{
Snapshot: &csi.Snapshot{
SizeBytes: int64(inodeCfg[0].Size),
SnapshotId: string(snapIdJson),
SourceVolumeId: req.SourceVolumeId,
CreationTime: &timestamppb.Timestamp{ Seconds: int64(inodeCfg[0].CreateTs) },
ReadyToUse: true,
},
}, nil
}
// DeleteSnapshot delete provided snapshot of a PV
func (cs *ControllerServer) DeleteSnapshot(ctx context.Context, req *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error)
{
return nil, status.Error(codes.Unimplemented, "")
klog.Infof("received controller delete snapshot request %+v", protosanitizer.StripSecrets(req))
if (req == nil)
{
return nil, status.Errorf(codes.InvalidArgument, "request cannot be empty")
}
if (req.SnapshotId == "")
{
return nil, status.Error(codes.InvalidArgument, "snapshot ID is a required field")
}
volVars := make(map[string]string)
err := json.Unmarshal([]byte(req.SnapshotId), &volVars)
if (err != nil)
{
return nil, status.Error(codes.Internal, "snapshot ID not in JSON format")
}
volName := volVars["name"]
snapName := volVars["snapshot"]
ctxVars, _, _ := GetConnectionParams(volVars)
_, err = invokeCLI(ctxVars, []string{ "rm", volName+"@"+snapName })
if (err != nil)
{
return nil, err
}
return &csi.DeleteSnapshotResponse{}, nil
}
// ListSnapshots list the snapshots of a PV
func (cs *ControllerServer) ListSnapshots(ctx context.Context, req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error)
{
return nil, status.Error(codes.Unimplemented, "")
klog.Infof("received controller list snapshots request %+v", protosanitizer.StripSecrets(req))
if (req == nil)
{
return nil, status.Error(codes.InvalidArgument, "request cannot be empty")
}
volVars := make(map[string]string)
err := json.Unmarshal([]byte(req.SourceVolumeId), &volVars)
if (err != nil)
{
return nil, status.Error(codes.Internal, "volume ID not in JSON format")
}
volName := volVars["name"]
ctxVars, _, _ := GetConnectionParams(volVars)
inodeCfg, err := invokeList(ctxVars, volName+"@*", false)
if (err != nil)
{
return nil, err
}
resp := &csi.ListSnapshotsResponse{}
for _, ino := range inodeCfg
{
snapName := ino.Name[len(volName)+1:]
if (len(req.StartingToken) > 0 && snapName < req.StartingToken)
{
}
else if (req.MaxEntries == 0 || len(resp.Entries) < int(req.MaxEntries))
{
volVars["snapshot"] = snapName
snapIdJson, _ := json.Marshal(volVars)
resp.Entries = append(resp.Entries, &csi.ListSnapshotsResponse_Entry{
Snapshot: &csi.Snapshot{
SizeBytes: int64(ino.Size),
SnapshotId: string(snapIdJson),
SourceVolumeId: req.SourceVolumeId,
CreationTime: &timestamppb.Timestamp{ Seconds: int64(ino.CreateTs) },
ReadyToUse: true,
},
})
}
else
{
resp.NextToken = snapName
break
}
}
return resp, nil
}
// ControllerExpandVolume resizes a volume
// ControllerExpandVolume increases the size of a volume
func (cs *ControllerServer) ControllerExpandVolume(ctx context.Context, req *csi.ControllerExpandVolumeRequest) (*csi.ControllerExpandVolumeResponse, error)
{
return nil, status.Error(codes.Unimplemented, "")
klog.Infof("received controller expand volume request %+v", protosanitizer.StripSecrets(req))
if (req == nil)
{
return nil, status.Error(codes.InvalidArgument, "request cannot be empty")
}
if (req.VolumeId == "" || req.CapacityRange == nil || req.CapacityRange.RequiredBytes == 0)
{
return nil, status.Error(codes.InvalidArgument, "VolumeId, CapacityRange and RequiredBytes are required fields")
}
volVars := make(map[string]string)
err := json.Unmarshal([]byte(req.VolumeId), &volVars)
if (err != nil)
{
return nil, status.Error(codes.Internal, "volume ID not in JSON format")
}
volName := volVars["name"]
ctxVars, _, _ := GetConnectionParams(volVars)
inodeCfg, err := invokeList(ctxVars, volName, true)
if (err != nil)
{
return nil, err
}
if (req.CapacityRange.RequiredBytes > 0 && inodeCfg[0].Size < uint64(req.CapacityRange.RequiredBytes))
{
sz := ((req.CapacityRange.RequiredBytes+4095)/4096)*4096
_, err := invokeCLI(ctxVars, []string{ "modify", "--inc_size", "1", "--resize", fmt.Sprintf("%d", sz), volName })
if (err != nil)
{
return nil, err
}
inodeCfg, err := invokeList(ctxVars, volName, true)
if (err != nil)
{
return nil, err
}
return &csi.ControllerExpandVolumeResponse{
CapacityBytes: int64(inodeCfg[0].Size),
// Node expansion is required for FS
NodeExpansionRequired: req.VolumeCapability == nil || req.VolumeCapability.GetBlock() == nil,
}, nil
}
return &csi.ControllerExpandVolumeResponse{
CapacityBytes: int64(inodeCfg[0].Size),
NodeExpansionRequired: false,
}, nil
}
// ControllerGetVolume get volume info

View File

@@ -49,6 +49,13 @@ func (is *IdentityServer) GetPluginCapabilities(ctx context.Context, req *csi.Ge
},
},
},
{
Type: &csi.PluginCapability_VolumeExpansion_{
VolumeExpansion: &csi.PluginCapability_VolumeExpansion{
Type: csi.PluginCapability_VolumeExpansion_OFFLINE,
},
},
},
},
}, nil
}

View File

@@ -37,43 +37,13 @@ func NewNodeServer(driver *Driver) *NodeServer
}
}
// NodeStageVolume mounts the volume to a staging path on the node.
func (ns *NodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error)
func (ns *NodeServer) checkMountPoint(targetPath string, isBlock bool)
{
return &csi.NodeStageVolumeResponse{}, nil
}
// NodeUnstageVolume unstages the volume from the staging path
func (ns *NodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error)
{
return &csi.NodeUnstageVolumeResponse{}, nil
}
func Contains(list []string, s string) bool
{
for i := 0; i < len(list); i++
{
if (list[i] == s)
{
return true
}
}
return false
}
// NodePublishVolume mounts the volume mounted to the staging path to the target path
func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error)
{
klog.Infof("received node publish volume request %+v", protosanitizer.StripSecrets(req))
targetPath := req.GetTargetPath()
isBlock := req.GetVolumeCapability().GetBlock() != nil
// Check that it's not already mounted
_, error := mount.IsNotMountPoint(ns.mounter, targetPath)
if (error != nil)
_, err := mount.IsNotMountPoint(ns.mounter, targetPath)
if (err != nil)
{
if (os.IsNotExist(error))
if (os.IsNotExist(err))
{
if (isBlock)
{
@@ -81,13 +51,13 @@ func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
if (err != nil)
{
klog.Errorf("failed to create block device mount target %s with error: %v", targetPath, err)
return nil, status.Error(codes.Internal, err.Error())
return status.Error(codes.Internal, err.Error("failed to create block device mount target"))
}
err = pathFile.Close()
if (err != nil)
{
klog.Errorf("failed to close %s with error: %v", targetPath, err)
return nil, status.Error(codes.Internal, err.Error())
return status.Error(codes.Internal, err.Error("failed to create block device mount target"))
}
}
else
@@ -96,15 +66,32 @@ func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
if (err != nil)
{
klog.Errorf("failed to create fs mount target %s with error: %v", targetPath, err)
return nil, status.Error(codes.Internal, err.Error())
return status.Error(codes.Internal, err.Error("failed to create fs mount target"))
}
}
}
else
{
return nil, status.Error(codes.Internal, error.Error())
return status.Error(codes.Internal, err)
}
}
return nil
}
// NodeStageVolume mounts the volume to a staging path on the node.
func (ns *NodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error)
{
klog.Infof("received node stage volume request %+v", protosanitizer.StripSecrets(req))
targetPath := req.GetStagingTargetPath()
isBlock := req.GetVolumeCapability().GetBlock() != nil
// Check that it's not already mounted
err := ns.checkMountPoint(targetPath, isBlock)
if (err != nil)
{
return nil, err
}
ctxVars := make(map[string]string)
err := json.Unmarshal([]byte(req.VolumeId), &ctxVars)
@@ -121,7 +108,6 @@ func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
}
// Map NBD device
// FIXME: Check if already mapped
args := []string{
"map", "--etcd_address", strings.Join(etcdUrl, ","),
"--etcd_prefix", etcdPrefix,
@@ -226,6 +212,60 @@ func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
}
return nil, status.Error(codes.Internal, err.Error())
}
return &csi.NodeStageVolumeResponse{}, nil
}
// NodeUnstageVolume unstages the volume from the staging path
func (ns *NodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error)
{
klog.Infof("received node unstage volume request %+v", protosanitizer.StripSecrets(req))
err := ns.unmount(req.GetStagingTargetPath())
if (err != nil)
{
return nil, err
}
return &csi.NodeUnstageVolumeResponse{}, nil
}
func Contains(list []string, s string) bool
{
for i := 0; i < len(list); i++
{
if (list[i] == s)
{
return true
}
}
return false
}
// NodePublishVolume mounts the volume mounted to the staging path to the target path
func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error)
{
klog.Infof("received node publish volume request %+v", protosanitizer.StripSecrets(req))
stagingTargetPath := req.GetStagingTargetPath()
targetPath := req.GetTargetPath()
isBlock := req.GetVolumeCapability().GetBlock() != nil
err := ns.checkMountPoint(targetPath, isBlock)
if (err != nil)
{
return nil, err
}
diskMounter := &mount.SafeFormatAndMount{Interface: ns.mounter, Exec: utilexec.New()}
err = diskMounter.Mount(stagingTargetPath, targetPath, "", []string{"bind"})
if (err != nil)
{
klog.Errorf(
"failed to bind staging path (%s) to path (%s) for volume (%s) error: %s",
stagingPath, targetPath, volName, err,
)
return nil, status.Error(codes.Internal, err.Error("failed to bind staging path"))
}
return &csi.NodePublishVolumeResponse{}, nil
}
@@ -233,25 +273,34 @@ func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
func (ns *NodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error)
{
klog.Infof("received node unpublish volume request %+v", protosanitizer.StripSecrets(req))
targetPath := req.GetTargetPath()
err := ns.unmount(req.GetTargetPath())
if (err != nil)
{
return nil, err
}
return &csi.NodeUnpublishVolumeResponse{}, nil
}
func (ns *NodeServer) unmount(targetPath string) error
{
devicePath, refCount, err := mount.GetDeviceNameFromMount(ns.mounter, targetPath)
if (err != nil)
{
if (os.IsNotExist(err))
{
return nil, status.Error(codes.NotFound, "Target path not found")
return status.Error(codes.NotFound, "Target path not found")
}
return nil, status.Error(codes.Internal, err.Error())
return status.Error(codes.Internal, err.Error())
}
if (devicePath == "")
{
return nil, status.Error(codes.NotFound, "Volume not mounted")
return status.Error(codes.NotFound, "Volume not mounted")
}
// unmount
err = mount.CleanupMountPoint(targetPath, ns.mounter, false)
if (err != nil)
{
return nil, status.Error(codes.Internal, err.Error())
return status.Error(codes.Internal, err.Error())
}
// unmap NBD device
if (refCount == 1)
@@ -262,7 +311,7 @@ func (ns *NodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpu
klog.Errorf("failed to unmap NBD device %s: %s, error: %v", devicePath, unmapOut, unmapErr)
}
}
return &csi.NodeUnpublishVolumeResponse{}, nil
return nil
}
// NodeGetVolumeStats returns volume capacity statistics available for the volume

4
debian/changelog vendored
View File

@@ -1,10 +1,10 @@
vitastor (1.0.0-1) unstable; urgency=medium
vitastor (1.1.0-1) unstable; urgency=medium
* Bugfixes
-- Vitaliy Filippov <vitalif@yourcmc.ru> Fri, 03 Jun 2022 02:09:44 +0300
vitastor (1.0.0-1) unstable; urgency=medium
vitastor (1.1.0-1) unstable; urgency=medium
* Implement NFS proxy
* Add documentation

View File

@@ -54,7 +54,8 @@ RUN set -e; \
quilt add block/vitastor.c; \
cp /root/vitastor/src/qemu_driver.c block/vitastor.c; \
quilt refresh; \
V=$(head -n1 debian/changelog | perl -pe 's/^.*\((.*?)(~bpo[\d\+]*)?\).*$/$1/')+vitastor3; \
V=$(head -n1 debian/changelog | perl -pe 's/5\.2\+dfsg-9/5.2+dfsg-11/; s/^.*\((.*?)(~bpo[\d\+]*)?\).*$/$1/')+vitastor4; \
if [ "$REL" = bullseye ]; then V=${V}bullseye; fi; \
DEBEMAIL="Vitaliy Filippov <vitalif@yourcmc.ru>" dch -D $REL -v $V 'Plug Vitastor block driver'; \
DEB_BUILD_OPTIONS=nocheck dpkg-buildpackage --jobs=auto -sa; \
rm -rf /root/packages/qemu-$REL/qemu-*/

View File

@@ -35,8 +35,8 @@ RUN set -e -x; \
mkdir -p /root/packages/vitastor-$REL; \
rm -rf /root/packages/vitastor-$REL/*; \
cd /root/packages/vitastor-$REL; \
cp -r /root/vitastor vitastor-1.0.0; \
cd vitastor-1.0.0; \
cp -r /root/vitastor vitastor-1.1.0; \
cd vitastor-1.1.0; \
ln -s /root/fio-build/fio-*/ ./fio; \
FIO=$(head -n1 fio/debian/changelog | perl -pe 's/^.*\((.*?)\).*$/$1/'); \
ls /usr/include/linux/raw.h || cp ./debian/raw.h /usr/include/linux/raw.h; \
@@ -49,8 +49,8 @@ RUN set -e -x; \
rm -rf a b; \
echo "dep:fio=$FIO" > debian/fio_version; \
cd /root/packages/vitastor-$REL; \
tar --sort=name --mtime='2020-01-01' --owner=0 --group=0 --exclude=debian -cJf vitastor_1.0.0.orig.tar.xz vitastor-1.0.0; \
cd vitastor-1.0.0; \
tar --sort=name --mtime='2020-01-01' --owner=0 --group=0 --exclude=debian -cJf vitastor_1.1.0.orig.tar.xz vitastor-1.1.0; \
cd vitastor-1.1.0; \
V=$(head -n1 debian/changelog | perl -pe 's/^.*\((.*?)\).*$/$1/'); \
DEBFULLNAME="Vitaliy Filippov <vitalif@yourcmc.ru>" dch -D $REL -v "$V""$REL" "Rebuild for $REL"; \
DEB_BUILD_OPTIONS=nocheck dpkg-buildpackage --jobs=auto -sa; \

View File

@@ -33,6 +33,7 @@ In the future, additional configuration methods may be added:
- [Common](config/common.en.md)
- [Network](config/network.en.md)
- [Client](config/client.en.md)
- [Global Disk Layout](config/layout-cluster.en.md)
- [OSD Disk Layout](config/layout-osd.en.md)
- [OSD Runtime Parameters](config/osd.en.md)

View File

@@ -36,6 +36,7 @@
- [Общие](config/common.ru.md)
- [Сеть](config/network.ru.md)
- [Клиентский код](config/client.ru.md)
- [Глобальные дисковые параметры](config/layout-cluster.ru.md)
- [Дисковые параметры OSD](config/layout-osd.ru.md)
- [Прочие параметры OSD](config/osd.ru.md)

103
docs/config/client.en.md Normal file
View File

@@ -0,0 +1,103 @@
[Documentation](../../README.md#documentation) → [Configuration](../config.en.md) → Client Parameters
-----
[Читать на русском](client.ru.md)
# Client Parameters
These parameters apply only to clients and affect their interaction with
the cluster.
- [client_max_dirty_bytes](#client_max_dirty_bytes)
- [client_max_dirty_ops](#client_max_dirty_ops)
- [client_enable_writeback](#client_enable_writeback)
- [client_max_buffered_bytes](#client_max_buffered_bytes)
- [client_max_buffered_ops](#client_max_buffered_ops)
- [client_max_writeback_iodepth](#client_max_writeback_iodepth)
## client_max_dirty_bytes
- Type: integer
- Default: 33554432
- Can be changed online: yes
Without [immediate_commit](layout-cluster.en.md#immediate_commit)=all this parameter sets the limit of "dirty"
(not committed by fsync) data allowed by the client before forcing an
additional fsync and committing the data. Also note that the client always
holds a copy of uncommitted data in memory so this setting also affects
RAM usage of clients.
## client_max_dirty_ops
- Type: integer
- Default: 1024
- Can be changed online: yes
Same as client_max_dirty_bytes, but instead of total size, limits the number
of uncommitted write operations.
## client_enable_writeback
- Type: boolean
- Default: false
- Can be changed online: yes
This parameter enables client-side write buffering. This means that write
requests are accumulated in memory for a short time before being sent to
a Vitastor cluster which allows to send them in parallel and increase
performance of some applications. Writes are buffered until client forces
a flush with fsync() or until the amount of buffered writes exceeds the
limit.
Write buffering significantly increases performance of some applications,
for example, CrystalDiskMark under Windows (LOL :-D), but also any other
applications if they do writes in one of two non-optimal ways: either if
they do a lot of small (4 kb or so) sequential writes, or if they do a lot
of small random writes, but without any parallelism or asynchrony, and also
without calling fsync().
With write buffering enabled, you can expect around 22000 T1Q1 random write
iops in QEMU more or less regardless of the quality of your SSDs, and this
number is in fact bound by QEMU itself rather than Vitastor (check it
yourself by adding a "driver=null-co" disk in QEMU). Without write
buffering, the current record is 9900 iops, but the number is usually
even lower with non-ideal hardware, for example, it may be 5000 iops.
Even when this parameter is enabled, write buffering isn't enabled until
the client explicitly allows it, because enabling it without the client
being aware of the fact that his writes may be buffered may lead to data
loss. Because of this, older versions of clients don't support write
buffering at all, newer versions of the QEMU driver allow write buffering
only if it's enabled in disk settings with `-blockdev cache.direct=false`,
and newer versions of FIO only allow write buffering if you don't specify
`-direct=1`. NBD and NFS drivers allow write buffering by default.
You can overcome this restriction too with the `client_writeback_allowed`
parameter, but you shouldn't do that unless you **really** know what you
are doing.
## client_max_buffered_bytes
- Type: integer
- Default: 33554432
- Can be changed online: yes
Maximum total size of buffered writes which triggers write-back when reached.
## client_max_buffered_ops
- Type: integer
- Default: 1024
- Can be changed online: yes
Maximum number of buffered writes which triggers write-back when reached.
Multiple consecutive modified data regions are counted as 1 write here.
## client_max_writeback_iodepth
- Type: integer
- Default: 256
- Can be changed online: yes
Maximum number of parallel writes when flushing buffered data to the server.

103
docs/config/client.ru.md Normal file
View File

@@ -0,0 +1,103 @@
[Документация](../../README-ru.md#документация) → [Конфигурация](../config.ru.md) → Параметры клиентского кода
-----
[Read in English](client.en.md)
# Параметры клиентского кода
Данные параметры применяются только к клиентам Vitastor (QEMU, fio, NBD) и
затрагивают логику их работы с кластером.
- [client_max_dirty_bytes](#client_max_dirty_bytes)
- [client_max_dirty_ops](#client_max_dirty_ops)
- [client_enable_writeback](#client_enable_writeback)
- [client_max_buffered_bytes](#client_max_buffered_bytes)
- [client_max_buffered_ops](#client_max_buffered_ops)
- [client_max_writeback_iodepth](#client_max_writeback_iodepth)
## client_max_dirty_bytes
- Тип: целое число
- Значение по умолчанию: 33554432
- Можно менять на лету: да
При работе без [immediate_commit](layout-cluster.ru.md#immediate_commit)=all - это лимит объёма "грязных" (не
зафиксированных fsync-ом) данных, при достижении которого клиент будет
принудительно вызывать fsync и фиксировать данные. Также стоит иметь в виду,
что в этом случае до момента fsync клиент хранит копию незафиксированных
данных в памяти, то есть, настройка влияет на потребление памяти клиентами.
## client_max_dirty_ops
- Тип: целое число
- Значение по умолчанию: 1024
- Можно менять на лету: да
Аналогично client_max_dirty_bytes, но ограничивает количество
незафиксированных операций записи вместо их общего объёма.
## client_enable_writeback
- Тип: булево (да/нет)
- Значение по умолчанию: false
- Можно менять на лету: да
Данный параметр разрешает включать буферизацию записи в памяти. Буферизация
означает, что операции записи отправляются на кластер Vitastor не сразу, а
могут небольшое время накапливаться в памяти и сбрасываться сразу пакетами,
до тех пор, пока либо не будет превышен лимит неотправленных записей, либо
пока клиент не вызовет fsync.
Буферизация значительно повышает производительность некоторых приложений,
например, CrystalDiskMark в Windows (ха-ха :-D), но также и любых других,
которые пишут на диск неоптимально: либо последовательно, но мелкими блоками
(например, по 4 кб), либо случайно, но без параллелизма и без fsync - то
есть, например, отправляя 128 операций записи в разные места диска, но не
все сразу с помощью асинхронного I/O, а по одной.
В QEMU с буферизацией записи можно ожидать показателя примерно 22000
операций случайной записи в секунду в 1 поток и с глубиной очереди 1 (T1Q1)
без fsync, почти вне зависимости от того, насколько хороши ваши диски - эта
цифра упирается в сам QEMU. Без буферизации рекорд пока что - 9900 операций
в секунду, но на железе похуже может быть и поменьше, например, 5000 операций
в секунду.
При этом, даже если данный параметр включён, буферизация не включается, если
явно не разрешена клиентом, т.к. если клиент не знает, что запросы записи
буферизуются, это может приводить к потере данных. Поэтому в старых версиях
клиентских драйверов буферизация записи не включается вообще, в новых
версиях QEMU-драйвера включается, только если разрешена опцией диска
`-blockdev cache.direct=false`, а в fio - только если нет опции `-direct=1`.
В NBD и NFS драйверах буферизация записи разрешена по умолчанию.
Можно обойти и это ограничение с помощью параметра `client_writeback_allowed`,
но делать так не надо, если только вы не уверены в том, что делаете, на все
100%. :-)
## client_max_buffered_bytes
- Тип: целое число
- Значение по умолчанию: 33554432
- Можно менять на лету: да
Максимальный общий размер буферизованных записей, при достижении которого
начинается процесс сброса данных на сервер.
## client_max_buffered_ops
- Тип: целое число
- Значение по умолчанию: 1024
- Можно менять на лету: да
Максимальное количество буферизованных записей, при достижении которого
начинается процесс сброса данных на сервер. При этом несколько
последовательных изменённых областей здесь считаются 1 записью.
## client_max_writeback_iodepth
- Тип: целое число
- Значение по умолчанию: 256
- Можно менять на лету: да
Максимальное число параллельных операций записи при сбросе буферов на сервер.

View File

@@ -30,7 +30,6 @@ between clients, OSDs and etcd.
- [etcd_slow_timeout](#etcd_slow_timeout)
- [etcd_keepalive_timeout](#etcd_keepalive_timeout)
- [etcd_ws_keepalive_timeout](#etcd_ws_keepalive_timeout)
- [client_dirty_limit](#client_dirty_limit)
## tcp_header_buffer_size
@@ -240,17 +239,3 @@ etcd_report_interval to guarantee that keepalive actually works.
etcd websocket ping interval required to keep the connection alive and
detect disconnections quickly.
## client_dirty_limit
- Type: integer
- Default: 33554432
- Can be changed online: yes
Without immediate_commit=all this parameter sets the limit of "dirty"
(not committed by fsync) data allowed by the client before forcing an
additional fsync and committing the data. Also note that the client always
holds a copy of uncommitted data in memory so this setting also affects
RAM usage of clients.
This parameter doesn't affect OSDs themselves.

View File

@@ -30,7 +30,6 @@
- [etcd_slow_timeout](#etcd_slow_timeout)
- [etcd_keepalive_timeout](#etcd_keepalive_timeout)
- [etcd_ws_keepalive_timeout](#etcd_ws_keepalive_timeout)
- [client_dirty_limit](#client_dirty_limit)
## tcp_header_buffer_size
@@ -251,17 +250,3 @@ etcd_report_interval, чтобы keepalive гарантированно рабо
- Можно менять на лету: да
Интервал проверки живости вебсокет-подключений к etcd.
## client_dirty_limit
- Тип: целое число
- Значение по умолчанию: 33554432
- Можно менять на лету: да
При работе без immediate_commit=all - это лимит объёма "грязных" (не
зафиксированных fsync-ом) данных, при достижении которого клиент будет
принудительно вызывать fsync и фиксировать данные. Также стоит иметь в виду,
что в этом случае до момента fsync клиент хранит копию незафиксированных
данных в памяти, то есть, настройка влияет на потребление памяти клиентами.
Параметр не влияет на сами OSD.

View File

@@ -11,6 +11,7 @@ initialization and can be changed - either with an OSD restart or, for some of
them, even without restarting by updating configuration in etcd.
- [etcd_report_interval](#etcd_report_interval)
- [etcd_stats_interval](#etcd_stats_interval)
- [run_primary](#run_primary)
- [osd_network](#osd_network)
- [bind_address](#bind_address)
@@ -56,11 +57,21 @@ them, even without restarting by updating configuration in etcd.
- Type: seconds
- Default: 5
Interval at which OSDs report their state to etcd. Affects OSD lease time
Interval at which OSDs report their liveness to etcd. Affects OSD lease time
and thus the failover speed. Lease time is equal to this parameter value
plus max_etcd_attempts * etcd_quick_timeout because it should be guaranteed
that every OSD always refreshes its lease in time.
## etcd_stats_interval
- Type: seconds
- Default: 30
Interval at which OSDs report their statistics to etcd. Highly affects the
imposed load on etcd, because statistics include a key for every OSD and
for every PG. At the same time, low statistic intervals make `vitastor-cli`
statistics more responsive.
## run_primary
- Type: boolean

View File

@@ -12,6 +12,7 @@
изменения конфигурации в etcd.
- [etcd_report_interval](#etcd_report_interval)
- [etcd_stats_interval](#etcd_stats_interval)
- [run_primary](#run_primary)
- [osd_network](#osd_network)
- [bind_address](#bind_address)
@@ -57,11 +58,21 @@
- Тип: секунды
- Значение по умолчанию: 5
Интервал, с которым OSD обновляет своё состояние в etcd. Значение параметра
влияет на время резервации (lease) OSD и поэтому на скорость переключения
Интервал, с которым OSD сообщает о том, что жив, в etcd. Значение параметра
влияет на время резервации (lease) OSD и поэтому - на скорость переключения
при падении OSD. Время lease равняется значению этого параметра плюс
max_etcd_attempts * etcd_quick_timeout.
## etcd_stats_interval
- Тип: секунды
- Значение по умолчанию: 30
Интервал, с которым OSD обновляет свою статистику в etcd. Сильно влияет на
создаваемую нагрузку на etcd, потому что статистика содержит по ключу на
каждый OSD и на каждую PG. В то же время низкий интервал делает
статистику, печатаемую `vitastor-cli`, отзывчивей.
## run_primary
- Тип: булево (да/нет)

View File

@@ -0,0 +1,4 @@
# Client Parameters
These parameters apply only to clients and affect their interaction with
the cluster.

View File

@@ -0,0 +1,4 @@
# Параметры клиентского кода
Данные параметры применяются только к клиентам Vitastor (QEMU, fio, NBD) и
затрагивают логику их работы с кластером.

124
docs/config/src/client.yml Normal file
View File

@@ -0,0 +1,124 @@
- name: client_max_dirty_bytes
type: int
default: 33554432
online: true
info: |
Without [immediate_commit](layout-cluster.en.md#immediate_commit)=all this parameter sets the limit of "dirty"
(not committed by fsync) data allowed by the client before forcing an
additional fsync and committing the data. Also note that the client always
holds a copy of uncommitted data in memory so this setting also affects
RAM usage of clients.
info_ru: |
При работе без [immediate_commit](layout-cluster.ru.md#immediate_commit)=all - это лимит объёма "грязных" (не
зафиксированных fsync-ом) данных, при достижении которого клиент будет
принудительно вызывать fsync и фиксировать данные. Также стоит иметь в виду,
что в этом случае до момента fsync клиент хранит копию незафиксированных
данных в памяти, то есть, настройка влияет на потребление памяти клиентами.
- name: client_max_dirty_ops
type: int
default: 1024
online: true
info: |
Same as client_max_dirty_bytes, but instead of total size, limits the number
of uncommitted write operations.
info_ru: |
Аналогично client_max_dirty_bytes, но ограничивает количество
незафиксированных операций записи вместо их общего объёма.
- name: client_enable_writeback
type: bool
default: false
online: true
info: |
This parameter enables client-side write buffering. This means that write
requests are accumulated in memory for a short time before being sent to
a Vitastor cluster which allows to send them in parallel and increase
performance of some applications. Writes are buffered until client forces
a flush with fsync() or until the amount of buffered writes exceeds the
limit.
Write buffering significantly increases performance of some applications,
for example, CrystalDiskMark under Windows (LOL :-D), but also any other
applications if they do writes in one of two non-optimal ways: either if
they do a lot of small (4 kb or so) sequential writes, or if they do a lot
of small random writes, but without any parallelism or asynchrony, and also
without calling fsync().
With write buffering enabled, you can expect around 22000 T1Q1 random write
iops in QEMU more or less regardless of the quality of your SSDs, and this
number is in fact bound by QEMU itself rather than Vitastor (check it
yourself by adding a "driver=null-co" disk in QEMU). Without write
buffering, the current record is 9900 iops, but the number is usually
even lower with non-ideal hardware, for example, it may be 5000 iops.
Even when this parameter is enabled, write buffering isn't enabled until
the client explicitly allows it, because enabling it without the client
being aware of the fact that his writes may be buffered may lead to data
loss. Because of this, older versions of clients don't support write
buffering at all, newer versions of the QEMU driver allow write buffering
only if it's enabled in disk settings with `-blockdev cache.direct=false`,
and newer versions of FIO only allow write buffering if you don't specify
`-direct=1`. NBD and NFS drivers allow write buffering by default.
You can overcome this restriction too with the `client_writeback_allowed`
parameter, but you shouldn't do that unless you **really** know what you
are doing.
info_ru: |
Данный параметр разрешает включать буферизацию записи в памяти. Буферизация
означает, что операции записи отправляются на кластер Vitastor не сразу, а
могут небольшое время накапливаться в памяти и сбрасываться сразу пакетами,
до тех пор, пока либо не будет превышен лимит неотправленных записей, либо
пока клиент не вызовет fsync.
Буферизация значительно повышает производительность некоторых приложений,
например, CrystalDiskMark в Windows (ха-ха :-D), но также и любых других,
которые пишут на диск неоптимально: либо последовательно, но мелкими блоками
(например, по 4 кб), либо случайно, но без параллелизма и без fsync - то
есть, например, отправляя 128 операций записи в разные места диска, но не
все сразу с помощью асинхронного I/O, а по одной.
В QEMU с буферизацией записи можно ожидать показателя примерно 22000
операций случайной записи в секунду в 1 поток и с глубиной очереди 1 (T1Q1)
без fsync, почти вне зависимости от того, насколько хороши ваши диски - эта
цифра упирается в сам QEMU. Без буферизации рекорд пока что - 9900 операций
в секунду, но на железе похуже может быть и поменьше, например, 5000 операций
в секунду.
При этом, даже если данный параметр включён, буферизация не включается, если
явно не разрешена клиентом, т.к. если клиент не знает, что запросы записи
буферизуются, это может приводить к потере данных. Поэтому в старых версиях
клиентских драйверов буферизация записи не включается вообще, в новых
версиях QEMU-драйвера включается, только если разрешена опцией диска
`-blockdev cache.direct=false`, а в fio - только если нет опции `-direct=1`.
В NBD и NFS драйверах буферизация записи разрешена по умолчанию.
Можно обойти и это ограничение с помощью параметра `client_writeback_allowed`,
но делать так не надо, если только вы не уверены в том, что делаете, на все
100%. :-)
- name: client_max_buffered_bytes
type: int
default: 33554432
online: true
info: |
Maximum total size of buffered writes which triggers write-back when reached.
info_ru: |
Максимальный общий размер буферизованных записей, при достижении которого
начинается процесс сброса данных на сервер.
- name: client_max_buffered_ops
type: int
default: 1024
online: true
info: |
Maximum number of buffered writes which triggers write-back when reached.
Multiple consecutive modified data regions are counted as 1 write here.
info_ru: |
Максимальное количество буферизованных записей, при достижении которого
начинается процесс сброса данных на сервер. При этом несколько
последовательных изменённых областей здесь считаются 1 записью.
- name: client_max_writeback_iodepth
type: int
default: 256
online: true
info: |
Maximum number of parallel writes when flushing buffered data to the server.
info_ru: |
Максимальное число параллельных операций записи при сбросе буферов на сервер.

View File

@@ -28,6 +28,8 @@
{{../../config/network.en.md|indent=2}}
{{../../config/client.en.md|indent=2}}
{{../../config/layout-cluster.en.md|indent=2}}
{{../../config/layout-osd.en.md|indent=2}}

View File

@@ -28,6 +28,8 @@
{{../../config/network.ru.md|indent=2}}
{{../../config/client.ru.md|indent=2}}
{{../../config/layout-cluster.ru.md|indent=2}}
{{../../config/layout-osd.ru.md|indent=2}}

View File

@@ -259,23 +259,3 @@
detect disconnections quickly.
info_ru: |
Интервал проверки живости вебсокет-подключений к etcd.
- name: client_dirty_limit
type: int
default: 33554432
online: true
info: |
Without immediate_commit=all this parameter sets the limit of "dirty"
(not committed by fsync) data allowed by the client before forcing an
additional fsync and committing the data. Also note that the client always
holds a copy of uncommitted data in memory so this setting also affects
RAM usage of clients.
This parameter doesn't affect OSDs themselves.
info_ru: |
При работе без immediate_commit=all - это лимит объёма "грязных" (не
зафиксированных fsync-ом) данных, при достижении которого клиент будет
принудительно вызывать fsync и фиксировать данные. Также стоит иметь в виду,
что в этом случае до момента fsync клиент хранит копию незафиксированных
данных в памяти, то есть, настройка влияет на потребление памяти клиентами.
Параметр не влияет на сами OSD.

View File

@@ -2,15 +2,28 @@
type: sec
default: 5
info: |
Interval at which OSDs report their state to etcd. Affects OSD lease time
Interval at which OSDs report their liveness to etcd. Affects OSD lease time
and thus the failover speed. Lease time is equal to this parameter value
plus max_etcd_attempts * etcd_quick_timeout because it should be guaranteed
that every OSD always refreshes its lease in time.
info_ru: |
Интервал, с которым OSD обновляет своё состояние в etcd. Значение параметра
влияет на время резервации (lease) OSD и поэтому на скорость переключения
Интервал, с которым OSD сообщает о том, что жив, в etcd. Значение параметра
влияет на время резервации (lease) OSD и поэтому - на скорость переключения
при падении OSD. Время lease равняется значению этого параметра плюс
max_etcd_attempts * etcd_quick_timeout.
- name: etcd_stats_interval
type: sec
default: 30
info: |
Interval at which OSDs report their statistics to etcd. Highly affects the
imposed load on etcd, because statistics include a key for every OSD and
for every PG. At the same time, low statistic intervals make `vitastor-cli`
statistics more responsive.
info_ru: |
Интервал, с которым OSD обновляет свою статистику в etcd. Сильно влияет на
создаваемую нагрузку на etcd, потому что статистика содержит по ключу на
каждый OSD и на каждую PG. В то же время низкий интервал делает
статистику, печатаемую `vitastor-cli`, отзывчивей.
- name: run_primary
type: bool
default: true

View File

@@ -31,6 +31,7 @@
- [RDMA/RoCEv2 support via libibverbs](../config/network.en.md#rdma_device)
- [Scrubbing](../config/osd.en.md#auto_scrub) (verification of copies)
- [Checksums](../config/layout-osd.en.md#data_csum_type)
- [Client write-back cache](../config/client.en.md#client_enable_writeback)
## Plugins and tools
@@ -50,13 +51,15 @@
The following features are planned for the future:
- File system
- Control plane optimisation
- Other administrative tools
- Web GUI
- OpenNebula plugin
- iSCSI proxy
- iSCSI and NVMeoF gateways
- Multi-threaded client
- Faster failover
- S3
- Tiered storage (SSD caching)
- NVDIMM support
- Compression (possibly)
- Read caching using system page cache (possibly)

View File

@@ -33,6 +33,7 @@
- [Поддержка RDMA/RoCEv2 через libibverbs](../config/network.ru.md#rdma_device)
- [Фоновая проверка целостности](../config/osd.ru.md#auto_scrub) (сверка копий)
- [Контрольные суммы](../config/layout-osd.ru.md#data_csum_type)
- [Буферизация записи на стороне клиента](../config/client.ru.md#client_enable_writeback)
## Драйверы и инструменты
@@ -50,12 +51,15 @@
## Планы развития
- Файловая система
- Оптимизация слоя управления
- Другие инструменты администрирования
- Web-интерфейс
- Плагин для OpenNebula
- iSCSI-прокси
- iSCSI и NVMeoF прокси
- Многопоточный клиент
- Более быстрое переключение при отказах
- S3
- Поддержка SSD-кэширования (tiered storage)
- Поддержка NVDIMM
- Возможно, сжатие

View File

@@ -34,6 +34,20 @@ qemu-system-x86_64 -enable-kvm -m 1024 \
-vnc 0.0.0.0:0
```
With a separate I/O thread:
```
qemu-system-x86_64 -enable-kvm -m 1024 \
-object iothread,id=vitastor1 \
-blockdev '{"node-name":"drive-virtio-disk0","driver":"vitastor","image":"debian9",
"cache":{"direct":true,"no-flush":false},"auto-read-only":true,"discard":"unmap"}' \
-device 'virtio-blk-pci,iothread=vitastor1,scsi=off,bus=pci.0,addr=0x5,drive=drive-virtio-disk0,
id=virtio-disk0,bootindex=1,write-cache=off' \
-vnc 0.0.0.0:0
```
You can also specify inode ID, pool and size manually instead of `:image=<IMAGE>` option: `:pool=<POOL>:inode=<INODE>:size=<SIZE>`.
## qemu-img
For qemu-img, you should use `vitastor:etcd_host=<HOST>:image=<IMAGE>` as filename.
@@ -84,6 +98,29 @@ This can be used for backups. Just note that exporting an image that is currentl
is of course unsafe and doesn't produce a consistent result, so only export snapshots if you do this
on a live VM.
## vhost-user-blk
QEMU, starting with 6.0, includes support for attaching disks via a separate
userspace worker process, called `vhost-user-blk`. It usually has slightly (20-30 us)
lower latency.
Example commands to use it with Vitastor:
```
qemu-storage-daemon \
--daemonize \
--blockdev '{"node-name":"drive-virtio-disk1","driver":"vitastor","image":"testosd1","cache":{"direct":true,"no-flush":false},"auto-read-only":true,"discard":"unmap"}' \
--export type=vhost-user-blk,id=vitastor1,node-name=drive-virtio-disk1,addr.type=unix,addr.path=/run/vitastor1-user-blk.sock,writable=on,num-queues=1
qemu-system-x86_64 -enable-kvm -m 2048 -M accel=kvm,memory-backend=mem \
-object memory-backend-memfd,id=mem,size=2G,share=on \
-chardev socket,id=vitastor1,reconnect=1,path=/run/vitastor1-user-blk.sock \
-device vhost-user-blk-pci,chardev=vitastor1,num-queues=1,config-wce=off \
-vnc 0.0.0.0:0
```
memfd memory-backend is crucial, vhost-user-blk does not work without it.
## VDUSE
Linux kernel, starting with version 5.15, supports a new interface for attaching virtual disks

View File

@@ -36,6 +36,18 @@ qemu-system-x86_64 -enable-kvm -m 1024 \
-vnc 0.0.0.0:0
```
С отдельным потоком ввода-вывода:
```
qemu-system-x86_64 -enable-kvm -m 1024 \
-object iothread,id=vitastor1 \
-blockdev '{"node-name":"drive-virtio-disk0","driver":"vitastor","image":"debian9",
"cache":{"direct":true,"no-flush":false},"auto-read-only":true,"discard":"unmap"}' \
-device 'virtio-blk-pci,iothread=vitastor1,scsi=off,bus=pci.0,addr=0x5,drive=drive-virtio-disk0,
id=virtio-disk0,bootindex=1,write-cache=off' \
-vnc 0.0.0.0:0
```
Вместо `:image=<IMAGE>` также можно указывать номер инода, пул и размер: `:pool=<POOL>:inode=<INODE>:size=<SIZE>`.
## qemu-img
@@ -88,6 +100,29 @@ qemu-img rebase -u -b '' testimg.qcow2
в то же время идёт запись, небезопасно - результат чтения не будет целостным. Так что если вы работаете
с активными виртуальными машинами, экспортируйте только их снимки, но не сам образ.
## vhost-user-blk
QEMU, начиная с 6.0, позволяет подключать диски через отдельный рабочий процесс.
Этот метод подключения называется `vhost-user-blk` и обычно имеет чуть меньшую
задержку (ниже на 20-30 микросекунд, чем при обычном методе).
Пример команд для использования vhost-user-blk с Vitastor:
```
qemu-storage-daemon \
--daemonize \
--blockdev '{"node-name":"drive-virtio-disk1","driver":"vitastor","image":"testosd1","cache":{"direct":true,"no-flush":false},"auto-read-only":true,"discard":"unmap"}' \
--export type=vhost-user-blk,id=vitastor1,node-name=drive-virtio-disk1,addr.type=unix,addr.path=/run/vitastor1-user-blk.sock,writable=on,num-queues=1
qemu-system-x86_64 -enable-kvm -m 2048 -M accel=kvm,memory-backend=mem \
-object memory-backend-memfd,id=mem,size=2G,share=on \
-chardev socket,id=vitastor1,reconnect=1,path=/run/vitastor1-user-blk.sock \
-device vhost-user-blk-pci,chardev=vitastor1,num-queues=1,config-wce=off \
-vnc 0.0.0.0:0
```
Здесь критична опция memory-backend-memfd, vhost-user-blk без неё не работает.
## VDUSE
В Linux, начиная с версии ядра 5.15, доступен новый интерфейс для подключения виртуальных дисков

View File

@@ -3,5 +3,5 @@ SUBSYSTEM=="block", ENV{ID_PART_ENTRY_TYPE}=="e7009fac-a5a1-4d72-af72-53de130599
IMPORT{program}="/usr/bin/vitastor-disk udev $devnode", \
SYMLINK+="vitastor/$env{VITASTOR_ALIAS}"
ENV{VITASTOR_OSD_NUM}!="", ACTION=="add", RUN{program}+="/usr/bin/systemctl enable --now vitastor-osd@$env{VITASTOR_OSD_NUM}"
ENV{VITASTOR_OSD_NUM}!="", ACTION=="remove", RUN{program}+="/usr/bin/systemctl disable --now vitastor-osd@$env{VITASTOR_OSD_NUM}"
ENV{VITASTOR_OSD_NUM}!="", ACTION=="add", RUN{program}+="/usr/bin/systemctl enable --now --no-block vitastor-osd@$env{VITASTOR_OSD_NUM}"
ENV{VITASTOR_OSD_NUM}!="", ACTION=="remove", RUN{program}+="/usr/bin/systemctl disable --now --no-block vitastor-osd@$env{VITASTOR_OSD_NUM}"

View File

@@ -99,6 +99,7 @@ const etcd_tree = {
etcd_ws_keepalive_interval: 30, // seconds
// osd
etcd_report_interval: 5, // seconds
etcd_stats_interval: 30, // seconds
run_primary: true,
osd_network: null, // "192.168.7.0/24" or an array of masks
bind_address: "0.0.0.0",
@@ -396,8 +397,8 @@ class Mon
this.etcd_prefix = this.etcd_prefix.replace(/\/\/+/g, '/').replace(/^\/?(.*[^\/])\/?$/, '/$1');
this.etcd_start_timeout = (config.etcd_start_timeout || 5) * 1000;
this.state = JSON.parse(JSON.stringify(this.constructor.etcd_tree));
this.prev_stats = { osd_stats: {}, osd_diff: {} };
this.signals_set = false;
this.stat_time = Date.now();
this.ws = null;
this.ws_alive = false;
this.ws_keepalive_timer = null;
@@ -551,9 +552,9 @@ class Mon
const cur_addr = this.pick_next_etcd();
const base = 'ws'+cur_addr.substr(4);
let now = Date.now();
if (tried[base] && now-tried[base] < timeout)
if (tried[base] && now-tried[base] < this.etcd_start_timeout)
{
await new Promise(ok => setTimeout(ok, timeout-(now-tried[base])));
await new Promise(ok => setTimeout(ok, this.etcd_start_timeout-(now-tried[base])));
now = Date.now();
}
tried[base] = now;
@@ -1458,15 +1459,15 @@ class Mon
}
}
derive_osd_stats(st, prev)
derive_osd_stats(st, prev, prev_diff)
{
const zero_stats = { op: { bps: 0n, iops: 0n, lat: 0n }, subop: { iops: 0n, lat: 0n }, recovery: { bps: 0n, iops: 0n } };
const diff = { op_stats: {}, subop_stats: {}, recovery_stats: {} };
if (!st || !st.time || prev && (prev.time || this.stat_time/1000) >= st.time)
const diff = { op_stats: {}, subop_stats: {}, recovery_stats: {}, inode_stats: {} };
if (!st || !st.time || !prev || prev.time >= st.time)
{
return diff;
return prev_diff || diff;
}
const timediff = BigInt(st.time*1000 - (prev && prev.time*1000 || this.stat_time));
const timediff = BigInt(st.time*1000 - prev.time*1000);
for (const op in st.op_stats||{})
{
const pr = prev && prev.op_stats && prev.op_stats[op];
@@ -1498,25 +1499,47 @@ class Mon
if (n > 0)
diff.recovery_stats[op] = { ...c, bps: b*1000n/timediff, iops: n*1000n/timediff };
}
for (const pool_id in st.inode_stats||{})
{
const pool_diff = diff.inode_stats[pool_id] = {};
for (const inode_num in st.inode_stats[pool_id])
{
const inode_diff = diff.inode_stats[pool_id][inode_num] = {};
for (const op of [ 'read', 'write', 'delete' ])
{
const c = st.inode_stats[pool_id][inode_num][op];
const pr = prev && prev.inode_stats && prev.inode_stats[pool_id] &&
prev.inode_stats[pool_id][inode_num] && prev.inode_stats[pool_id][inode_num][op];
const n = BigInt(c.count||0) - BigInt(pr && pr.count||0);
inode_diff[op] = {
bps: (BigInt(c.bytes||0) - BigInt(pr && pr.bytes||0))*1000n/timediff,
iops: n*1000n/timediff,
lat: (BigInt(c.usec||0) - BigInt(pr && pr.usec||0))/(n || 1n),
};
}
}
}
return diff;
}
sum_op_stats(timestamp, prev_stats)
sum_op_stats()
{
const sum_diff = { op_stats: {}, subop_stats: {}, recovery_stats: {} };
if (!prev_stats || prev_stats.timestamp >= timestamp)
for (const osd in this.state.osd.stats)
{
return sum_diff;
const cur = { ...this.state.osd.stats[osd], inode_stats: this.state.osd.inodestats[osd]||{} };
this.prev_stats.osd_diff[osd] = this.derive_osd_stats(
cur, this.prev_stats.osd_stats[osd], this.prev_stats.osd_diff[osd]
);
this.prev_stats.osd_stats[osd] = cur;
}
const tm = BigInt(timestamp - (prev_stats.timestamp || 0));
const sum_diff = { op_stats: {}, subop_stats: {}, recovery_stats: {} };
// Sum derived values instead of deriving summed
for (const osd in this.state.osd.stats)
{
const derived = this.derive_osd_stats(this.state.osd.stats[osd],
this.prev_stats && this.prev_stats.osd_stats && this.prev_stats.osd_stats[osd]);
for (const type in derived)
const derived = this.prev_stats.osd_diff[osd];
for (const type in sum_diff)
{
for (const op in derived[type])
for (const op in derived[type]||{})
{
for (const k in derived[type][op])
{
@@ -1573,14 +1596,14 @@ class Mon
return { object_counts, object_bytes };
}
sum_inode_stats(prev_stats, timestamp, prev_timestamp)
sum_inode_stats()
{
const inode_stats = {};
const inode_stub = () => ({
raw_used: 0n,
read: { count: 0n, usec: 0n, bytes: 0n },
write: { count: 0n, usec: 0n, bytes: 0n },
delete: { count: 0n, usec: 0n, bytes: 0n },
read: { count: 0n, usec: 0n, bytes: 0n, bps: 0n, iops: 0n, lat: 0n },
write: { count: 0n, usec: 0n, bytes: 0n, bps: 0n, iops: 0n, lat: 0n },
delete: { count: 0n, usec: 0n, bytes: 0n, bps: 0n, iops: 0n, lat: 0n },
});
const seen_pools = {};
for (const pool_id in this.state.config.pools)
@@ -1632,11 +1655,25 @@ class Mon
}
}
}
if (prev_stats && prev_timestamp >= timestamp)
for (const osd in this.prev_stats.osd_diff)
{
prev_stats = null;
for (const pool_id in this.prev_stats.osd_diff[osd].inode_stats)
{
for (const inode_num in this.prev_stats.osd_diff[osd].inode_stats[pool_id])
{
inode_stats[pool_id][inode_num] = inode_stats[pool_id][inode_num] || inode_stub();
for (const op of [ 'read', 'write', 'delete' ])
{
const op_diff = this.prev_stats.osd_diff[osd].inode_stats[pool_id][inode_num][op] || {};
const op_st = inode_stats[pool_id][inode_num][op];
op_st.bps += op_diff.bps;
op_st.iops += op_diff.iops;
op_st.lat += op_diff.lat;
op_st.n_osd = (op_st.n_osd || 0) + 1;
}
}
}
}
const tm = prev_stats ? BigInt(timestamp - prev_timestamp) : 0;
for (const pool_id in inode_stats)
{
for (const inode_num in inode_stats[pool_id])
@@ -1645,11 +1682,12 @@ class Mon
for (const op of [ 'read', 'write', 'delete' ])
{
const op_st = inode_stats[pool_id][inode_num][op];
const prev_st = prev_stats && prev_stats[pool_id] && prev_stats[pool_id][inode_num] && prev_stats[pool_id][inode_num][op];
op_st.bps = prev_st ? (op_st.bytes - prev_st.bytes) * 1000n / tm : 0;
op_st.iops = prev_st ? (op_st.count - prev_st.count) * 1000n / tm : 0;
op_st.lat = prev_st ? (op_st.usec - prev_st.usec) / ((op_st.count - prev_st.count) || 1n) : 0;
if (op_st.bps > 0 || op_st.iops > 0 || op_st.lat > 0)
if (op_st.n_osd)
{
op_st.lat /= BigInt(op_st.n_osd);
delete op_st.n_osd;
}
if (op_st.bps > 0 || op_st.iops > 0)
nonzero = true;
}
if (!nonzero && (!this.state.config.inode[pool_id] || !this.state.config.inode[pool_id][inode_num]))
@@ -1682,15 +1720,9 @@ class Mon
async update_total_stats()
{
const txn = [];
const timestamp = Date.now();
const { object_counts, object_bytes } = this.sum_object_counts();
let stats = this.sum_op_stats(timestamp, this.prev_stats);
let { inode_stats, seen_pools } = this.sum_inode_stats(
this.prev_stats ? this.prev_stats.inode_stats : null,
timestamp, this.prev_stats ? this.prev_stats.timestamp : null
);
this.prev_stats = { timestamp, inode_stats, osd_stats: { ...this.state.osd.stats } };
this.stat_time = Date.now();
let stats = this.sum_op_stats();
let { inode_stats, seen_pools } = this.sum_inode_stats();
stats.object_counts = object_counts;
stats.object_bytes = object_bytes;
stats = this.serialize_bigints(stats);

View File

@@ -1,6 +1,6 @@
{
"name": "vitastor-mon",
"version": "1.0.0",
"version": "1.1.0",
"description": "Vitastor SDS monitor service",
"main": "mon-main.js",
"scripts": {

View File

@@ -50,7 +50,7 @@ from cinder.volume import configuration
from cinder.volume import driver
from cinder.volume import volume_utils
VERSION = '1.0.0'
VERSION = '1.1.0'
LOG = logging.getLogger(__name__)

View File

@@ -24,4 +24,4 @@ rm fio
mv fio-copy fio
FIO=`rpm -qi fio | perl -e 'while(<>) { /^Epoch[\s:]+(\S+)/ && print "$1:"; /^Version[\s:]+(\S+)/ && print $1; /^Release[\s:]+(\S+)/ && print "-$1"; }'`
perl -i -pe 's/(Requires:\s*fio)([^\n]+)?/$1 = '$FIO'/' $VITASTOR/rpm/vitastor-el$EL.spec
tar --transform 's#^#vitastor-1.0.0/#' --exclude 'rpm/*.rpm' -czf $VITASTOR/../vitastor-1.0.0$(rpm --eval '%dist').tar.gz *
tar --transform 's#^#vitastor-1.1.0/#' --exclude 'rpm/*.rpm' -czf $VITASTOR/../vitastor-1.1.0$(rpm --eval '%dist').tar.gz *

View File

@@ -35,7 +35,7 @@ ADD . /root/vitastor
RUN set -e; \
cd /root/vitastor/rpm; \
sh build-tarball.sh; \
cp /root/vitastor-1.0.0.el7.tar.gz ~/rpmbuild/SOURCES; \
cp /root/vitastor-1.1.0.el7.tar.gz ~/rpmbuild/SOURCES; \
cp vitastor-el7.spec ~/rpmbuild/SPECS/vitastor.spec; \
cd ~/rpmbuild/SPECS/; \
rpmbuild -ba vitastor.spec; \

View File

@@ -1,11 +1,11 @@
Name: vitastor
Version: 1.0.0
Version: 1.1.0
Release: 1%{?dist}
Summary: Vitastor, a fast software-defined clustered block storage
License: Vitastor Network Public License 1.1
URL: https://vitastor.io/
Source0: vitastor-1.0.0.el7.tar.gz
Source0: vitastor-1.1.0.el7.tar.gz
BuildRequires: liburing-devel >= 0.6
BuildRequires: gperftools-devel

View File

@@ -35,7 +35,7 @@ ADD . /root/vitastor
RUN set -e; \
cd /root/vitastor/rpm; \
sh build-tarball.sh; \
cp /root/vitastor-1.0.0.el8.tar.gz ~/rpmbuild/SOURCES; \
cp /root/vitastor-1.1.0.el8.tar.gz ~/rpmbuild/SOURCES; \
cp vitastor-el8.spec ~/rpmbuild/SPECS/vitastor.spec; \
cd ~/rpmbuild/SPECS/; \
rpmbuild -ba vitastor.spec; \

View File

@@ -1,11 +1,11 @@
Name: vitastor
Version: 1.0.0
Version: 1.1.0
Release: 1%{?dist}
Summary: Vitastor, a fast software-defined clustered block storage
License: Vitastor Network Public License 1.1
URL: https://vitastor.io/
Source0: vitastor-1.0.0.el8.tar.gz
Source0: vitastor-1.1.0.el8.tar.gz
BuildRequires: liburing-devel >= 0.6
BuildRequires: gperftools-devel

View File

@@ -18,7 +18,7 @@ ADD . /root/vitastor
RUN set -e; \
cd /root/vitastor/rpm; \
sh build-tarball.sh; \
cp /root/vitastor-1.0.0.el9.tar.gz ~/rpmbuild/SOURCES; \
cp /root/vitastor-1.1.0.el9.tar.gz ~/rpmbuild/SOURCES; \
cp vitastor-el9.spec ~/rpmbuild/SPECS/vitastor.spec; \
cd ~/rpmbuild/SPECS/; \
rpmbuild -ba vitastor.spec; \

View File

@@ -1,11 +1,11 @@
Name: vitastor
Version: 1.0.0
Version: 1.1.0
Release: 1%{?dist}
Summary: Vitastor, a fast software-defined clustered block storage
License: Vitastor Network Public License 1.1
URL: https://vitastor.io/
Source0: vitastor-1.0.0.el9.tar.gz
Source0: vitastor-1.1.0.el9.tar.gz
BuildRequires: liburing-devel >= 0.6
BuildRequires: gperftools-devel

View File

@@ -16,7 +16,7 @@ if("${CMAKE_INSTALL_PREFIX}" MATCHES "^/usr/local/?$")
set(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}")
endif()
add_definitions(-DVERSION="1.0.0")
add_definitions(-DVERSION="1.1.0")
add_definitions(-Wall -Wno-sign-compare -Wno-comment -Wno-parentheses -Wno-pointer-arith -fdiagnostics-color=always -I ${CMAKE_SOURCE_DIR}/src)
if (${WITH_ASAN})
add_definitions(-fsanitize=address -fno-omit-frame-pointer)

View File

@@ -82,8 +82,3 @@ uint32_t blockstore_t::get_bitmap_granularity()
{
return impl->get_bitmap_granularity();
}
bool blockstore_t::wants_fsync()
{
return impl->wants_fsync();
}

View File

@@ -226,7 +226,4 @@ public:
uint64_t get_journal_size();
uint32_t get_bitmap_granularity();
// Returns true if writing can stall due to a lack of fsync
bool wants_fsync();
};

View File

@@ -167,7 +167,7 @@ void blockstore_impl_t::loop()
// wait for all big writes to complete, submit data device fsync
// wait for the data device fsync to complete, then submit journal writes for big writes
// then submit an fsync operation
if (0 && has_writes)
if (has_writes)
{
// Can't submit SYNC before previous writes
continue;
@@ -734,15 +734,3 @@ void blockstore_impl_t::disk_error_abort(const char *op, int retval, int expecte
fprintf(stderr, "Disk %s failed: result is %d, expected %d. Can't continue, sorry :-(\n", op, retval, expected);
exit(1);
}
bool blockstore_impl_t::wants_fsync()
{
if (!unstable_writes.size())
{
return false;
}
uint64_t journal_free_space = journal.next_free < journal.used_start
? (journal.used_start - journal.next_free)
: (journal.len - journal.next_free + journal.used_start - journal.block_size);
return journal_fsync_feedback_limit > 0 && journal.len-journal_free_space >= journal_fsync_feedback_limit;
}

View File

@@ -264,8 +264,6 @@ class blockstore_impl_t
int throttle_threshold_us = 50;
// Maximum writes between automatically added fsync operations
uint64_t autosync_writes = 128;
// Maximum free space in the journal in bytes to start sending fsync feedback to primary OSDs
uint64_t journal_fsync_feedback_limit = 0;
/******* END OF OPTIONS *******/
struct ring_consumer_t ring_consumer;
@@ -435,6 +433,4 @@ public:
inline uint64_t get_free_block_count() { return data_alloc->get_free_count(); }
inline uint32_t get_bitmap_granularity() { return dsk.disk_alignment; }
inline uint64_t get_journal_size() { return dsk.journal_len; }
bool wants_fsync();
};

View File

@@ -4,25 +4,6 @@
#include <sys/file.h>
#include "blockstore_impl.h"
static uint64_t parse_fsync_feedback(blockstore_config_t & config, uint64_t journal_len)
{
uint64_t journal_fsync_feedback_limit = 0;
if (config.find("journal_min_free_bytes") == config.end() &&
config.find("journal_min_free_percent") == config.end())
{
journal_fsync_feedback_limit = 90 * journal_len / 100;
}
else
{
journal_fsync_feedback_limit = strtoull(config["journal_min_free_bytes"].c_str(), NULL, 10);
if (!journal_fsync_feedback_limit)
{
journal_fsync_feedback_limit = strtoull(config["journal_min_free_percent"].c_str(), NULL, 10) * journal_len / 100;
}
}
return journal_fsync_feedback_limit;
}
void blockstore_impl_t::parse_config(blockstore_config_t & config, bool init)
{
// Online-configurable options:
@@ -72,8 +53,6 @@ void blockstore_impl_t::parse_config(blockstore_config_t & config, bool init)
}
if (!init)
{
// has to be parsed after dsk.parse_config(), thus repeated here for online update
journal_fsync_feedback_limit = parse_fsync_feedback(config, journal.len);
return;
}
// Offline-configurable options:
@@ -117,7 +96,6 @@ void blockstore_impl_t::parse_config(blockstore_config_t & config, bool init)
config["journal_no_same_sector_overwrites"] == "1" || config["journal_no_same_sector_overwrites"] == "yes";
journal.inmemory = config["inmemory_journal"] != "false" && config["inmemory_journal"] != "0" &&
config["inmemory_journal"] != "no";
journal_fsync_feedback_limit = parse_fsync_feedback(config, journal.len);
// Validate
if (journal.sector_count < 2)
{

View File

@@ -16,6 +16,7 @@ int blockstore_impl_t::continue_sync(blockstore_op_t *op)
{
if (immediate_commit == IMMEDIATE_ALL)
{
// We can return immediately because sync is only dequeued after all previous writes
op->retval = 0;
FINISH_OP(op);
return 2;

View File

@@ -289,13 +289,18 @@ int blockstore_impl_t::dequeue_write(blockstore_op_t *op)
printf("Restoring %lx:%lx version: v%lu -> v%lu\n", op->oid.inode, op->oid.stripe, op->version, PRIV(op)->real_version);
#endif
auto prev_it = dirty_it;
prev_it--;
if (prev_it->first.oid == op->oid && prev_it->first.version >= PRIV(op)->real_version)
if (prev_it != dirty_db.begin())
{
// Original version is still invalid
// All subsequent writes to the same object must be canceled too
cancel_all_writes(op, dirty_it, -EEXIST);
return 2;
prev_it--;
if (prev_it->first.oid == op->oid && prev_it->first.version >= PRIV(op)->real_version)
{
// Original version is still invalid
// All subsequent writes to the same object must be canceled too
printf("Tried to write %lx:%lx v%lu after delete (old version v%lu), but already have v%lu\n",
op->oid.inode, op->oid.stripe, PRIV(op)->real_version, op->version, prev_it->first.version);
cancel_all_writes(op, dirty_it, -EEXIST);
return 2;
}
}
op->version = PRIV(op)->real_version;
PRIV(op)->real_version = 0;

View File

@@ -174,7 +174,7 @@ resume_1:
{ "size", 0 },
{ "readonly", false },
{ "pool_id", (uint64_t)INODE_POOL(inode_num) },
{ "pool_name", pool_it == parent->cli->st_cli.pool_config.end()
{ "pool_name", pool_it != parent->cli->st_cli.pool_config.end()
? (pool_it->second.name == "" ? "<Unnamed>" : pool_it->second.name) : "?" },
{ "inode_num", INODE_NO_POOL(inode_num) },
{ "inode_id", inode_num },

View File

@@ -169,46 +169,52 @@ void cluster_client_t::calc_wait(cluster_op_t *op)
void cluster_client_t::inc_wait(uint64_t opcode, uint64_t flags, cluster_op_t *next, int inc)
{
if (opcode == OSD_OP_WRITE)
if (opcode != OSD_OP_WRITE && opcode != OSD_OP_SYNC)
{
while (next)
{
auto n2 = next->next;
if (next->opcode == OSD_OP_SYNC && (!(flags & OP_IMMEDIATE_COMMIT) || enable_writeback) ||
next->opcode == OSD_OP_WRITE && (flags & OP_FLUSH_BUFFER) && !(next->flags & OP_FLUSH_BUFFER))
{
next->prev_wait += inc;
assert(next->prev_wait >= 0);
if (!next->prev_wait)
{
if (next->opcode == OSD_OP_SYNC)
continue_sync(next);
else
continue_rw(next);
}
}
next = n2;
}
return;
}
else if (opcode == OSD_OP_SYNC)
cluster_op_t *bh_ops_local[32], **bh_ops = bh_ops_local;
int bh_op_count = 0, bh_op_max = 32;
while (next)
{
while (next)
auto n2 = next->next;
if (opcode == OSD_OP_WRITE
? (next->opcode == OSD_OP_SYNC && (!(flags & OP_IMMEDIATE_COMMIT) || enable_writeback) ||
next->opcode == OSD_OP_WRITE && (flags & OP_FLUSH_BUFFER) && !(next->flags & OP_FLUSH_BUFFER))
: (next->opcode == OSD_OP_SYNC || next->opcode == OSD_OP_WRITE))
{
auto n2 = next->next;
if (next->opcode == OSD_OP_SYNC || next->opcode == OSD_OP_WRITE)
next->prev_wait += inc;
assert(next->prev_wait >= 0);
if (!next->prev_wait)
{
next->prev_wait += inc;
assert(next->prev_wait >= 0);
if (!next->prev_wait)
// Kind of std::vector with local "small vector optimisation"
if (bh_op_count >= bh_op_max)
{
if (next->opcode == OSD_OP_SYNC)
continue_sync(next);
else
continue_rw(next);
bh_op_max *= 2;
cluster_op_t **n = (cluster_op_t**)malloc_or_die(sizeof(cluster_op_t*) * bh_op_max);
memcpy(n, bh_ops, sizeof(cluster_op_t*) * bh_op_count);
if (bh_ops != bh_ops_local)
{
free(bh_ops);
}
bh_ops = n;
}
bh_ops[bh_op_count++] = next;
}
next = n2;
}
next = n2;
}
for (int i = 0; i < bh_op_count; i++)
{
cluster_op_t *next = bh_ops[i];
if (next->opcode == OSD_OP_SYNC)
continue_sync(next);
else
continue_rw(next);
}
if (bh_ops != bh_ops_local)
{
free(bh_ops);
}
}

View File

@@ -23,19 +23,24 @@ epoll_manager_t::epoll_manager_t(ring_loop_t *ringloop)
tfd = new timerfd_manager_t([this](int fd, bool wr, std::function<void(int, int)> handler) { set_fd_handler(fd, wr, handler); });
consumer.loop = [this]()
if (ringloop)
{
if (pending)
handle_epoll_events();
};
ringloop->register_consumer(&consumer);
handle_epoll_events();
consumer.loop = [this]()
{
if (pending)
handle_uring_event();
};
ringloop->register_consumer(&consumer);
handle_uring_event();
}
}
epoll_manager_t::~epoll_manager_t()
{
ringloop->unregister_consumer(&consumer);
if (ringloop)
{
ringloop->unregister_consumer(&consumer);
}
if (tfd)
{
delete tfd;
@@ -44,6 +49,11 @@ epoll_manager_t::~epoll_manager_t()
close(epoll_fd);
}
int epoll_manager_t::get_fd()
{
return epoll_fd;
}
void epoll_manager_t::set_fd_handler(int fd, bool wr, std::function<void(int, int)> handler)
{
if (handler != NULL)
@@ -75,7 +85,7 @@ void epoll_manager_t::set_fd_handler(int fd, bool wr, std::function<void(int, in
}
}
void epoll_manager_t::handle_epoll_events()
void epoll_manager_t::handle_uring_event()
{
io_uring_sqe *sqe = ringloop->get_sqe();
if (!sqe)
@@ -95,14 +105,20 @@ void epoll_manager_t::handle_epoll_events()
{
throw std::runtime_error(std::string("epoll failed: ") + strerror(-data->res));
}
handle_epoll_events();
handle_uring_event();
};
ringloop->submit();
handle_events(0);
}
void epoll_manager_t::handle_events(int timeout)
{
int nfds;
epoll_event events[MAX_EPOLL_EVENTS];
do
{
nfds = epoll_wait(epoll_fd, events, MAX_EPOLL_EVENTS, 0);
nfds = epoll_wait(epoll_fd, events, MAX_EPOLL_EVENTS, timeout);
timeout = 0;
for (int i = 0; i < nfds; i++)
{
auto cb_it = epoll_handlers.find(events[i].data.fd);

View File

@@ -15,11 +15,14 @@ class epoll_manager_t
ring_consumer_t consumer;
ring_loop_t *ringloop;
std::map<int, std::function<void(int, int)>> epoll_handlers;
void handle_uring_event();
public:
epoll_manager_t(ring_loop_t *ringloop);
~epoll_manager_t();
int get_fd();
void set_fd_handler(int fd, bool wr, std::function<void(int, int)> handler);
void handle_epoll_events();
void handle_events(int timeout);
timerfd_manager_t *tfd;
};

View File

@@ -32,6 +32,7 @@
struct sec_data
{
vitastor_c *cli = NULL;
bool epoll_based = false;
void *watch = NULL;
bool last_sync = false;
/* The list of completed io_u structs. */
@@ -58,6 +59,7 @@ struct sec_options
int rdma_port_num = 0;
int rdma_gid_index = 0;
int rdma_mtu = 0;
int no_io_uring = 0;
};
static struct fio_option options[] = {
@@ -193,6 +195,16 @@ static struct fio_option options[] = {
.category = FIO_OPT_C_ENGINE,
.group = FIO_OPT_G_FILENAME,
},
{
.name = "no_io_uring",
.lname = "Disable io_uring",
.type = FIO_OPT_BOOL,
.off1 = offsetof(struct sec_options, no_io_uring),
.help = "Use epoll and plain sendmsg/recvmsg instead of io_uring (slower)",
.def = "0",
.category = FIO_OPT_C_ENGINE,
.group = FIO_OPT_G_FILENAME,
},
{
.name = NULL,
},
@@ -281,7 +293,17 @@ static int sec_setup(struct thread_data *td)
opt_push(options, "log_level", std::to_string(o->cluster_log).c_str());
// allow writeback caching if -direct is not set
opt_push(options, "client_writeback_allowed", td->o.odirect ? "0" : "1");
bsd->cli = vitastor_c_create_uring_json((const char**)options.data(), options.size());
bsd->cli = o->no_io_uring ? NULL : vitastor_c_create_uring_json((const char**)options.data(), options.size());
bsd->epoll_based = false;
if (!bsd->cli)
{
if (o->no_io_uring)
fprintf(stderr, "vitastor: io_uring disabled - I/O will be slower\n");
else
fprintf(stderr, "vitastor: failed to create io_uring: %s - I/O will be slower\n", strerror(errno));
bsd->cli = vitastor_c_create_epoll_json((const char**)options.data(), options.size());
bsd->epoll_based = true;
}
for (auto opt: options)
free(opt);
options.clear();
@@ -289,12 +311,24 @@ static int sec_setup(struct thread_data *td)
{
bsd->watch = NULL;
vitastor_c_watch_inode(bsd->cli, o->image, watch_callback, bsd);
while (true)
if (!bsd->epoll_based)
{
vitastor_c_uring_handle_events(bsd->cli);
if (bsd->watch)
break;
vitastor_c_uring_wait_events(bsd->cli);
while (true)
{
vitastor_c_uring_handle_events(bsd->cli);
if (bsd->watch)
break;
vitastor_c_uring_wait_events(bsd->cli);
}
}
else
{
while (true)
{
if (bsd->watch)
break;
vitastor_c_epoll_handle_events(bsd->cli, 1000);
}
}
td->files[0]->real_file_size = vitastor_c_inode_get_size(bsd->watch);
if (!vitastor_c_inode_get_num(bsd->watch) ||
@@ -437,12 +471,24 @@ static enum fio_q_status sec_queue(struct thread_data *td, struct io_u *io)
static int sec_getevents(struct thread_data *td, unsigned int min, unsigned int max, const struct timespec *t)
{
sec_data *bsd = (sec_data*)td->io_ops_data;
while (true)
if (!bsd->epoll_based)
{
vitastor_c_uring_handle_events(bsd->cli);
if (bsd->completed.size() >= min)
break;
vitastor_c_uring_wait_events(bsd->cli);
while (true)
{
vitastor_c_uring_handle_events(bsd->cli);
if (bsd->completed.size() >= min)
break;
vitastor_c_uring_wait_events(bsd->cli);
}
}
else
{
while (true)
{
if (bsd->completed.size() >= min)
break;
vitastor_c_epoll_handle_events(bsd->cli, 1000);
}
}
return bsd->completed.size();
}

View File

@@ -395,27 +395,24 @@ void osd_messenger_t::check_peer_config(osd_client_t *cl)
},
},
};
json11::Json::object payload;
if (this->osd_num)
{
payload["osd_num"] = this->osd_num;
}
#ifdef WITH_RDMA
if (rdma_context)
{
cl->rdma_conn = msgr_rdma_connection_t::create(rdma_context, rdma_max_send, rdma_max_recv, rdma_max_sge, rdma_max_msg);
if (cl->rdma_conn)
{
payload["connect_rdma"] = cl->rdma_conn->addr.to_string();
payload["rdma_max_msg"] = cl->rdma_conn->max_msg;
json11::Json payload = json11::Json::object {
{ "connect_rdma", cl->rdma_conn->addr.to_string() },
{ "rdma_max_msg", cl->rdma_conn->max_msg },
};
std::string payload_str = payload.dump();
op->req.show_conf.json_len = payload_str.size();
op->buf = malloc_or_die(payload_str.size());
op->iov.push_back(op->buf, payload_str.size());
memcpy(op->buf, payload_str.c_str(), payload_str.size());
}
}
#endif
std::string payload_str = json11::Json(payload).dump();
op->req.show_conf.json_len = payload_str.size();
op->buf = malloc_or_die(payload_str.size());
op->iov.push_back(op->buf, payload_str.size());
memcpy(op->buf, payload_str.c_str(), payload_str.size());
op->callback = [this, cl](osd_op_t *op)
{
std::string json_err;

View File

@@ -119,9 +119,9 @@ void osd_messenger_t::outbox_push(osd_op_t *cur_op)
try_send(cl);
}
}
else if (cl->write_msg.msg_iovlen > 0 || !try_send(cl))
else
{
if (cl->write_state == 0)
if ((cl->write_msg.msg_iovlen > 0 || !try_send(cl)) && (cl->write_state == 0))
{
cl->write_state = CL_WRITE_READY;
write_ready_clients.push_back(cur_op->peer_fd);

View File

@@ -216,6 +216,14 @@ public:
{
nbd_timeout = cfg["nbd_timeout"].uint64_value();
}
if (cfg["client_writeback_allowed"].is_null())
{
// NBD is always aware of fsync, so we allow write-back cache
// by default if it's enabled
auto obj = cfg.object_items();
obj["client_writeback_allowed"] = true;
cfg = obj;
}
// Create client
ringloop = new ring_loop_t(512);
epmgr = new epoll_manager_t(ringloop);

View File

@@ -65,8 +65,9 @@ json11::Json::object nfs_proxy_t::parse_args(int narg, const char *args[])
" --pool <POOL> use <POOL> as default pool for new files (images)\n"
" --foreground 1 stay in foreground, do not daemonize\n"
"\n"
"NFS proxy is stateless if you use immediate_commit=all in your cluster, so\n"
"you can freely use multiple NFS proxies with L3 load balancing in this case.\n"
"NFS proxy is stateless if you use immediate_commit=all in your cluster and if\n"
"you do not use client_enable_writeback=true, so you can freely use multiple\n"
"NFS proxies with L3 load balancing in this case.\n"
"\n"
"Example start and mount commands for a custom NFS port:\n"
" %s --etcd_address 192.168.5.10:2379 --portmap 0 --port 2050 --pool testpool\n"
@@ -114,6 +115,14 @@ void nfs_proxy_t::run(json11::Json cfg)
if (name_prefix.size())
name_prefix += "/";
}
if (cfg["client_writeback_allowed"].is_null())
{
// NFS is always aware of fsync, so we allow write-back cache
// by default if it's enabled
auto obj = cfg.object_items();
obj["client_writeback_allowed"] = true;
cfg = obj;
}
// Create client
ringloop = new ring_loop_t(512);
epmgr = new epoll_manager_t(ringloop);

View File

@@ -160,6 +160,9 @@ void osd_t::parse_config(bool init)
etcd_report_interval = config["etcd_report_interval"].uint64_value();
if (etcd_report_interval <= 0)
etcd_report_interval = 5;
etcd_stats_interval = config["etcd_stats_interval"].uint64_value();
if (etcd_stats_interval <= 0)
etcd_stats_interval = 30;
readonly = json_is_true(config["readonly"]);
run_primary = !json_is_false(config["run_primary"]);
allow_test_ops = json_is_true(config["allow_test_ops"]);
@@ -184,14 +187,6 @@ void osd_t::parse_config(bool init)
// Allow to set it to 0
autosync_writes = config["autosync_writes"].uint64_value();
}
if (!config["fsync_feedback_repeat_interval"].is_null())
{
fsync_feedback_repeat_interval = config["fsync_feedback_repeat_interval"].uint64_value();
}
if (!fsync_feedback_repeat_interval)
{
fsync_feedback_repeat_interval = 500; // ms
}
if (!config["client_queue_depth"].is_null())
{
client_queue_depth = config["client_queue_depth"].uint64_value();

View File

@@ -93,6 +93,7 @@ class osd_t
json11::Json::object cli_config, file_config, etcd_global_config, etcd_osd_config, config;
int etcd_report_interval = 5;
int etcd_stats_interval = 30;
bool readonly = false;
osd_num_t osd_num = 1; // OSD numbers start with 1
@@ -122,7 +123,6 @@ class osd_t
uint32_t scrub_list_limit = 1000;
bool scrub_find_best = true;
uint64_t scrub_ec_max_bruteforce = 100;
uint64_t fsync_feedback_repeat_interval = 500;
// cluster state
@@ -167,8 +167,6 @@ class osd_t
uint64_t unstable_write_count = 0;
std::map<osd_object_id_t, uint64_t> unstable_writes;
std::deque<osd_op_t*> syncs_in_progress;
std::map<int, timespec> unstable_write_osds;
int fsync_feedback_timer_id = -1;
// client & peer I/O
@@ -260,7 +258,6 @@ class osd_t
void exec_show_config(osd_op_t *cur_op);
void exec_secondary(osd_op_t *cur_op);
void secondary_op_callback(osd_op_t *cur_op);
void fsync_feedback();
// primary ops
void autosync();

View File

@@ -429,14 +429,18 @@ void osd_t::acquire_lease()
create_osd_state();
});
printf(
"[OSD %lu] reporting to etcd at %s every %d seconds\n", this->osd_num,
"[OSD %lu] reporting to etcd at %s every %d seconds (statistics every %d seconds)\n", this->osd_num,
(config["etcd_address"].is_string() ? config["etcd_address"].string_value() : config["etcd_address"].dump()).c_str(),
etcd_report_interval
etcd_report_interval, etcd_stats_interval
);
tfd->set_timer(etcd_report_interval*1000, true, [this](int timer_id)
{
renew_lease(false);
});
tfd->set_timer(etcd_stats_interval*1000, true, [this](int timer_id)
{
report_statistics();
});
}
// Report "up" state once, then keep it alive using the lease
@@ -541,7 +545,6 @@ void osd_t::renew_lease(bool reload)
else
{
etcd_failed_attempts = 0;
report_statistics();
// Reload PGs
if (reload && run_primary)
{

View File

@@ -29,23 +29,6 @@ void osd_t::secondary_op_callback(osd_op_t *op)
if (op->bs_op->retval > 0)
op->iov.push_back(op->buf, op->bs_op->retval);
}
else if (op->req.hdr.opcode == OSD_OP_SEC_WRITE ||
op->req.hdr.opcode == OSD_OP_SEC_WRITE_STABLE)
{
#ifndef OSD_STUB
fsync_feedback();
#endif
if (op->req.hdr.opcode == OSD_OP_SEC_WRITE)
{
auto & u = unstable_write_osds[op->peer_fd];
u = u;
}
}
else if (op->req.hdr.opcode == OSD_OP_SEC_SYNC)
{
// FIXME It would be more correct to track STABILIZE ops, not just reset on SYNC
unstable_write_osds.erase(op->peer_fd);
}
else if (op->req.hdr.opcode == OSD_OP_SEC_LIST)
{
// allocated by blockstore
@@ -62,71 +45,6 @@ void osd_t::secondary_op_callback(osd_op_t *op)
finish_op(op, retval);
}
void osd_t::fsync_feedback()
{
if (!unstable_write_osds.size() || !bs->wants_fsync())
{
return;
}
bool postpone = false;
// Broadcast fsync feedback
timespec now;
clock_gettime(CLOCK_REALTIME, &now);
for (auto up_it = unstable_write_osds.begin(); up_it != unstable_write_osds.end(); )
{
auto & peer_fd = up_it->first;
auto & last_feedback = up_it->second;
if (msgr.clients.find(peer_fd) == msgr.clients.end() ||
!msgr.clients.at(peer_fd)->osd_num)
{
unstable_write_osds.erase(up_it++);
continue;
}
auto diff = (now.tv_sec-last_feedback.tv_sec)*1000 + (now.tv_nsec-last_feedback.tv_nsec)/1000000;
if (diff > fsync_feedback_repeat_interval)
{
last_feedback = now;
// Request fsync from the primary OSD
// Note: primary OSD should NOT divide syncs by clients or this logic will break
osd_op_t *fb_op = new osd_op_t();
fb_op->op_type = OSD_OP_OUT;
fb_op->req = (osd_any_op_t){
.sync = {
.header = {
.magic = SECONDARY_OSD_OP_MAGIC,
.id = msgr.next_subop_id++,
.opcode = OSD_OP_SYNC,
},
},
};
fb_op->callback = [this](osd_op_t *op)
{
delete op;
};
fb_op->peer_fd = peer_fd;
msgr.outbox_push(fb_op);
}
else
{
postpone = true;
}
up_it++;
}
if (fsync_feedback_timer_id >= 0)
{
tfd->clear_timer(fsync_feedback_timer_id);
fsync_feedback_timer_id = -1;
}
if (postpone)
{
fsync_feedback_timer_id = tfd->set_timer(fsync_feedback_repeat_interval, false, [this](int timer_id)
{
fsync_feedback_timer_id = -1;
fsync_feedback();
});
}
}
void osd_t::exec_secondary(osd_op_t *cur_op)
{
if (cur_op->req.hdr.opcode == OSD_OP_SEC_READ_BMP)
@@ -240,7 +158,6 @@ void osd_t::exec_show_config(osd_op_t *cur_op)
json11::Json req_json = cur_op->req.show_conf.json_len > 0
? json11::Json::parse(std::string((char *)cur_op->buf), json_err)
: json11::Json();
msgr.clients.at(cur_op->peer_fd)->osd_num = req_json["osd_num"].uint64_value();
// Expose sensitive configuration values so peers can check them
json11::Json::object wire_config = json11::Json::object {
{ "osd_num", osd_num },

View File

@@ -388,6 +388,7 @@ static void vitastor_aio_set_fd_handler(void *vcli, int fd, int unused1, IOHandl
);
}
#if defined VITASTOR_C_API_VERSION && VITASTOR_C_API_VERSION >= 2
typedef struct str_array
{
const char **items;
@@ -424,6 +425,7 @@ static void strarray_free(str_array *a)
a->items = NULL;
a->len = a->alloc = 0;
}
#endif
static int vitastor_file_open(BlockDriverState *bs, QDict *options, int flags, Error **errp)
{
@@ -443,6 +445,7 @@ static int vitastor_file_open(BlockDriverState *bs, QDict *options, int flags, E
client->rdma_gid_index = qdict_get_try_int(options, "rdma-gid-index", 0);
client->rdma_mtu = qdict_get_try_int(options, "rdma-mtu", 0);
client->ctx = bdrv_get_aio_context(bs);
#if defined VITASTOR_C_API_VERSION && VITASTOR_C_API_VERSION >= 2
str_array opt = {};
strarray_push_kv(&opt, "config_path", qdict_get_try_str(options, "config-path"));
strarray_push_kv(&opt, "etcd_address", qdict_get_try_str(options, "etcd-host"));
@@ -472,40 +475,15 @@ static int vitastor_file_open(BlockDriverState *bs, QDict *options, int flags, E
// Writeback cache is unusable without io_uring because the client can't correctly flush on exit
fprintf(stderr, "vitastor: failed to create io_uring: %s - I/O will be slower%s\n",
strerror(errno), (flags & BDRV_O_NOCACHE ? "" : " and writeback cache will be disabled"));
#endif
client->uring_eventfd = -1;
#if defined VITASTOR_C_API_VERSION && VITASTOR_C_API_VERSION >= 2
client->proxy = vitastor_c_create_qemu_uring(
vitastor_aio_set_fd_handler, client, client->config_path, client->etcd_host, client->etcd_prefix,
client->use_rdma, client->rdma_device, client->rdma_port_num, client->rdma_gid_index, client->rdma_mtu, 0
);
if (!client->proxy)
{
fprintf(stderr, "vitastor: failed to create io_uring: %s - I/O will be slower\n", strerror(errno));
client->uring_eventfd = -1;
client->proxy = vitastor_c_create_qemu(
vitastor_aio_set_fd_handler, client, client->config_path, client->etcd_host, client->etcd_prefix,
client->use_rdma, client->rdma_device, client->rdma_port_num, client->rdma_gid_index, client->rdma_mtu, 0
);
}
else
{
client->uring_eventfd = vitastor_c_uring_register_eventfd(client->proxy);
if (client->uring_eventfd < 0)
{
fprintf(stderr, "vitastor: failed to create io_uring eventfd: %s\n", strerror(errno));
error_setg(errp, "failed to create io_uring eventfd");
vitastor_close(bs);
return -1;
}
universal_aio_set_fd_handler(client->ctx, client->uring_eventfd, vitastor_uring_handler, NULL, client);
}
#else
client->proxy = vitastor_c_create_qemu(
vitastor_aio_set_fd_handler, client, client->config_path, client->etcd_host, client->etcd_prefix,
client->use_rdma, client->rdma_device, client->rdma_port_num, client->rdma_gid_index, client->rdma_mtu, 0
);
#endif
#if defined VITASTOR_C_API_VERSION && VITASTOR_C_API_VERSION >= 2
}
#endif
image = client->image = g_strdup(qdict_get_try_str(options, "image"));
client->readonly = (flags & BDRV_O_RDWR) ? 1 : 0;
// Get image metadata (size and readonly flag) or just wait until the client is ready

View File

@@ -6,7 +6,7 @@ includedir=${prefix}/@CMAKE_INSTALL_INCLUDEDIR@
Name: Vitastor
Description: Vitastor client library
Version: 1.0.0
Version: 1.1.0
Libs: -L${libdir} -lvitastor_client
Cflags: -I${includedir}

View File

@@ -164,6 +164,15 @@ int vitastor_c_uring_register_eventfd(vitastor_c *client)
vitastor_c *vitastor_c_create_uring_json(const char **options, int options_len)
{
ring_loop_t *ringloop = NULL;
try
{
ringloop = new ring_loop_t(512);
}
catch (std::exception & e)
{
return NULL;
}
json11::Json::object cfg;
for (int i = 0; i < options_len-1; i += 2)
{
@@ -171,18 +180,32 @@ vitastor_c *vitastor_c_create_uring_json(const char **options, int options_len)
}
json11::Json cfg_json(cfg);
vitastor_c *self = new vitastor_c;
self->ringloop = new ring_loop_t(512);
self->ringloop = ringloop;
self->epmgr = new epoll_manager_t(self->ringloop);
self->cli = new cluster_client_t(self->ringloop, self->epmgr->tfd, cfg_json);
return self;
}
vitastor_c *vitastor_c_create_epoll_json(const char **options, int options_len)
{
json11::Json::object cfg;
for (int i = 0; i < options_len-1; i += 2)
{
cfg[options[i]] = std::string(options[i+1]);
}
json11::Json cfg_json(cfg);
vitastor_c *self = new vitastor_c;
self->epmgr = new epoll_manager_t(NULL);
self->cli = new cluster_client_t(NULL, self->epmgr->tfd, cfg_json);
return self;
}
void vitastor_c_destroy(vitastor_c *client)
{
delete client->cli;
if (client->epmgr)
delete client->epmgr;
else
else if (client->tfd)
delete client->tfd;
if (client->ringloop)
delete client->ringloop;
@@ -220,6 +243,16 @@ int vitastor_c_uring_has_work(vitastor_c *client)
return client->ringloop->has_work();
}
int vitastor_c_epoll_get_fd(vitastor_c *client)
{
return !client->ringloop && client->epmgr ? client->epmgr->get_fd() : -1;
}
void vitastor_c_epoll_handle_events(vitastor_c *client, int timeout)
{
return client->epmgr->handle_events(timeout);
}
void vitastor_c_read(vitastor_c *client, uint64_t inode, uint64_t offset, uint64_t len,
struct iovec *iov, int iovcnt, VitastorReadHandler cb, void *opaque)
{

View File

@@ -7,7 +7,7 @@
#define VITASTOR_QEMU_PROXY_H
// C API wrapper version
#define VITASTOR_C_API_VERSION 2
#define VITASTOR_C_API_VERSION 3
#ifndef POOL_ID_BITS
#define POOL_ID_BITS 16
@@ -40,6 +40,7 @@ vitastor_c *vitastor_c_create_qemu_uring(QEMUSetFDHandler *aio_set_fd_handler, v
vitastor_c *vitastor_c_create_uring(const char *config_path, const char *etcd_host, const char *etcd_prefix,
int use_rdma, const char *rdma_device, int rdma_port_num, int rdma_gid_index, int rdma_mtu, int log_level);
vitastor_c *vitastor_c_create_uring_json(const char **options, int options_len);
vitastor_c *vitastor_c_create_epoll_json(const char **options, int options_len);
void vitastor_c_destroy(vitastor_c *client);
int vitastor_c_is_ready(vitastor_c *client);
int vitastor_c_uring_register_eventfd(vitastor_c *client);
@@ -47,6 +48,8 @@ void vitastor_c_uring_wait_ready(vitastor_c *client);
void vitastor_c_uring_handle_events(vitastor_c *client);
void vitastor_c_uring_wait_events(vitastor_c *client);
int vitastor_c_uring_has_work(vitastor_c *client);
int vitastor_c_epoll_get_fd(vitastor_c *client);
void vitastor_c_epoll_handle_events(vitastor_c *client, int timeout);
void vitastor_c_read(vitastor_c *client, uint64_t inode, uint64_t offset, uint64_t len,
struct iovec *iov, int iovcnt, VitastorReadHandler cb, void *opaque);
void vitastor_c_write(vitastor_c *client, uint64_t inode, uint64_t offset, uint64_t len, uint64_t check_version,

View File

@@ -18,10 +18,10 @@ else
fi
if [ "$IMMEDIATE_COMMIT" != "" ]; then
NO_SAME="--journal_no_same_sector_overwrites true --journal_sector_buffer_count 1024 --disable_data_fsync 1 --immediate_commit all --log_level 10"
NO_SAME="--journal_no_same_sector_overwrites true --journal_sector_buffer_count 1024 --disable_data_fsync 1 --immediate_commit all --log_level 10 --etcd_stats_interval 5"
$ETCDCTL put /vitastor/config/global '{"recovery_queue_depth":1,"osd_out_time":1,"immediate_commit":"all","client_enable_writeback":true}'
else
NO_SAME="--journal_sector_buffer_count 1024 --log_level 10"
NO_SAME="--journal_sector_buffer_count 1024 --log_level 10 --etcd_stats_interval 5"
$ETCDCTL put /vitastor/config/global '{"recovery_queue_depth":1,"osd_out_time":1,"client_enable_writeback":true}'
fi

View File

@@ -7,7 +7,7 @@ if [[ "$SCHEME" = "ec" ]]; then
PG_DATA_SIZE=${PG_DATA_SIZE:-2}
PG_MINSIZE=${PG_MINSIZE:-3}
fi
OSD_COUNT=7
OSD_COUNT=${OSD_COUNT:-7}
PG_COUNT=32
. `dirname $0`/run_3osds.sh
check_qemu
@@ -29,7 +29,7 @@ kill_osds()
kill -9 $OSD1_PID
$ETCDCTL del /vitastor/osd/state/1
for i in 2 3 4 5 6 7; do
for i in $(seq 2 $OSD_COUNT); do
sleep 15
echo Killing OSD $i and starting OSD $((i-1))
p=OSD${i}_PID
@@ -40,8 +40,8 @@ kill_osds()
done
sleep 5
echo Starting OSD 7
start_osd 7
echo Starting OSD $OSD_COUNT
start_osd $OSD_COUNT
sleep 5
}

View File

@@ -7,7 +7,7 @@ OSD_COUNT=5
OSD_ARGS="$OSD_ARGS"
for i in $(seq 1 $OSD_COUNT); do
dd if=/dev/zero of=./testdata/test_osd$i.bin bs=1024 count=1 seek=$((OSD_SIZE*1024-1))
build/src/vitastor-osd --osd_num $i --bind_address 127.0.0.1 $OSD_ARGS --etcd_address $ETCD_URL $(build/src/vitastor-disk simple-offsets --format options ./testdata/test_osd$i.bin 2>/dev/null) >>./testdata/osd$i.log 2>&1 &
build/src/vitastor-osd --osd_num $i --bind_address 127.0.0.1 --etcd_stats_interval 5 $OSD_ARGS --etcd_address $ETCD_URL $(build/src/vitastor-disk simple-offsets --format options ./testdata/test_osd$i.bin 2>/dev/null) >>./testdata/osd$i.log 2>&1 &
eval OSD${i}_PID=$!
done