Compare commits

...

9 Commits

Author SHA1 Message Date
Vitaliy Filippov 59d5ba52a9 Plug the new PG combinator into monitor
Test / test_minsize_1 (push) Failing after 2m15s Details
Test / test_rm (push) Failing after 2m15s Details
Test / test_snapshot_chain (push) Failing after 2m15s Details
Test / test_snapshot_chain_ec (push) Failing after 2m15s Details
Test / test_snapshot_down (push) Failing after 2m16s Details
Test / test_snapshot_down_ec (push) Failing after 2m14s Details
Test / test_splitbrain (push) Failing after 2m16s Details
Test / test_rebalance_verify (push) Failing after 2m16s Details
Test / test_rebalance_verify_imm (push) Failing after 2m16s Details
Test / test_rebalance_verify_ec (push) Failing after 2m15s Details
Test / test_rebalance_verify_ec_imm (push) Failing after 2m15s Details
Test / test_switch_primary (push) Failing after 2m15s Details
Test / test_write (push) Failing after 2m15s Details
Test / test_write_xor (push) Failing after 2m15s Details
Test / test_write_no_same (push) Failing after 2m15s Details
Test / test_heal_pg_size_2 (push) Failing after 2m16s Details
Test / test_heal_ec (push) Failing after 2m15s Details
Test / test_heal_csum_32k_dmj (push) Failing after 2m15s Details
Test / test_heal_csum_32k_dj (push) Failing after 2m15s Details
Test / test_heal_csum_32k (push) Failing after 2m15s Details
Test / test_heal_csum_4k_dmj (push) Failing after 2m15s Details
Test / test_heal_csum_4k_dj (push) Failing after 2m14s Details
Test / test_heal_csum_4k (push) Failing after 2m15s Details
Test / test_scrub (push) Failing after 2m16s Details
Test / test_scrub_zero_osd_2 (push) Failing after 2m15s Details
Test / test_scrub_xor (push) Failing after 2m15s Details
Test / test_scrub_pg_size_3 (push) Failing after 2m15s Details
Test / test_scrub_pg_size_6_pg_minsize_4_osd_count_6_ec (push) Failing after 2m16s Details
Test / test_scrub_ec (push) Failing after 2m15s Details
Test / test_nfs (push) Failing after 2m15s Details
2024-04-07 00:43:41 +03:00
Vitaliy Filippov 29284bef40 Implement new DSL/rule-based PG generation algorithm 2024-04-07 00:36:20 +03:00
Vitaliy Filippov 6a924d6066 Extract PG combinator into a separate module 2024-04-07 00:36:20 +03:00
Vitaliy Filippov 9fe779a691 Do not die on invalid pool configurations
Test / test_rm (push) Successful in 16s Details
Test / test_move_reappear (push) Successful in 24s Details
Test / test_snapshot_down (push) Successful in 26s Details
Test / test_snapshot_down_ec (push) Successful in 31s Details
Test / test_splitbrain (push) Successful in 17s Details
Test / test_snapshot_chain (push) Successful in 2m34s Details
Test / test_snapshot_chain_ec (push) Successful in 3m12s Details
Test / test_rebalance_verify_imm (push) Successful in 2m59s Details
Test / test_rebalance_verify (push) Successful in 3m27s Details
Test / test_switch_primary (push) Successful in 34s Details
Test / test_write (push) Successful in 55s Details
Test / test_write_xor (push) Successful in 54s Details
Test / test_write_no_same (push) Successful in 15s Details
Test / test_rebalance_verify_ec (push) Successful in 4m37s Details
Test / test_rebalance_verify_ec_imm (push) Successful in 4m8s Details
Test / test_heal_pg_size_2 (push) Successful in 3m48s Details
Test / test_heal_ec (push) Successful in 3m47s Details
Test / test_heal_csum_32k_dmj (push) Successful in 6m8s Details
Test / test_heal_csum_32k_dj (push) Successful in 6m18s Details
Test / test_heal_csum_32k (push) Successful in 7m9s Details
Test / test_heal_csum_4k_dmj (push) Successful in 7m7s Details
Test / test_scrub (push) Successful in 1m9s Details
Test / test_scrub_zero_osd_2 (push) Successful in 1m8s Details
Test / test_scrub_xor (push) Successful in 1m7s Details
Test / test_heal_csum_4k_dj (push) Successful in 6m20s Details
Test / test_heal_csum_4k (push) Successful in 5m58s Details
Test / test_scrub_pg_size_3 (push) Successful in 2m9s Details
Test / test_scrub_pg_size_6_pg_minsize_4_osd_count_6_ec (push) Successful in 1m4s Details
Test / test_nfs (push) Successful in 15s Details
Test / test_scrub_ec (push) Successful in 21s Details
2024-04-07 00:36:20 +03:00
Vitaliy Filippov 31c2751b9b Move NBD/VDUSE map/unmap functions to a separate file 2024-04-07 00:36:09 +03:00
Vitaliy Filippov c5195666cd Fix journal sequencing: make each journal write wait for all previous journal writes
Test / test_snapshot_ec (push) Successful in 31s Details
Test / test_rm (push) Successful in 15s Details
Test / test_snapshot_down (push) Successful in 30s Details
Test / test_snapshot_down_ec (push) Successful in 32s Details
Test / test_splitbrain (push) Successful in 25s Details
Test / test_snapshot_chain (push) Successful in 2m11s Details
Test / test_snapshot_chain_ec (push) Successful in 3m1s Details
Test / test_rebalance_verify_imm (push) Successful in 2m35s Details
Test / test_rebalance_verify (push) Successful in 3m10s Details
Test / test_switch_primary (push) Successful in 39s Details
Test / test_write (push) Successful in 43s Details
Test / test_write_no_same (push) Successful in 18s Details
Test / test_write_xor (push) Successful in 1m3s Details
Test / test_rebalance_verify_ec (push) Successful in 4m38s Details
Test / test_heal_pg_size_2 (push) Successful in 3m22s Details
Test / test_rebalance_verify_ec_imm (push) Successful in 5m11s Details
Test / test_heal_ec (push) Successful in 4m23s Details
Test / test_heal_csum_32k_dmj (push) Successful in 4m55s Details
Test / test_heal_csum_32k_dj (push) Successful in 6m31s Details
Test / test_heal_csum_32k (push) Successful in 6m29s Details
Test / test_heal_csum_4k_dmj (push) Successful in 7m18s Details
Test / test_scrub_zero_osd_2 (push) Successful in 1m0s Details
Test / test_scrub (push) Failing after 3m19s Details
Test / test_heal_csum_4k_dj (push) Successful in 6m39s Details
Test / test_scrub_xor (push) Successful in 58s Details
Test / test_scrub_pg_size_6_pg_minsize_4_osd_count_6_ec (push) Successful in 1m13s Details
Test / test_scrub_ec (push) Successful in 50s Details
Test / test_scrub_pg_size_3 (push) Successful in 1m51s Details
Test / test_heal_csum_4k (push) Successful in 5m13s Details
Test / test_nfs (push) Successful in 23s Details
2024-04-06 23:53:12 +03:00
Vitaliy Filippov f36d7eb76c Fix monitor thinking that OSD weight is 0 after deleting /osd/config/ key
Test / test_rm (push) Successful in 16s Details
Test / test_interrupted_rebalance_ec_imm (push) Successful in 1m30s Details
Test / test_snapshot_down (push) Successful in 31s Details
Test / test_snapshot_down_ec (push) Successful in 32s Details
Test / test_splitbrain (push) Successful in 23s Details
Test / test_snapshot_chain (push) Successful in 2m13s Details
Test / test_snapshot_chain_ec (push) Successful in 3m1s Details
Test / test_rebalance_verify_imm (push) Successful in 3m22s Details
Test / test_rebalance_verify (push) Successful in 4m5s Details
Test / test_switch_primary (push) Successful in 34s Details
Test / test_write (push) Successful in 52s Details
Test / test_write_xor (push) Successful in 52s Details
Test / test_write_no_same (push) Successful in 14s Details
Test / test_rebalance_verify_ec (push) Successful in 4m41s Details
Test / test_rebalance_verify_ec_imm (push) Successful in 3m51s Details
Test / test_heal_pg_size_2 (push) Successful in 3m55s Details
Test / test_heal_ec (push) Successful in 4m35s Details
Test / test_heal_csum_32k_dmj (push) Successful in 6m0s Details
Test / test_heal_csum_32k_dj (push) Successful in 5m51s Details
Test / test_heal_csum_32k (push) Successful in 6m48s Details
Test / test_heal_csum_4k_dmj (push) Successful in 7m7s Details
Test / test_scrub (push) Successful in 1m36s Details
Test / test_scrub_zero_osd_2 (push) Successful in 1m20s Details
Test / test_scrub_xor (push) Successful in 56s Details
Test / test_heal_csum_4k_dj (push) Successful in 6m39s Details
Test / test_heal_csum_4k (push) Successful in 6m37s Details
Test / test_nfs (push) Successful in 18s Details
Test / test_scrub_pg_size_6_pg_minsize_4_osd_count_6_ec (push) Successful in 47s Details
Test / test_scrub_ec (push) Successful in 27s Details
Test / test_scrub_pg_size_3 (push) Successful in 1m3s Details
2024-04-05 23:14:46 +03:00
Vitaliy Filippov dd7f651de1 Add --max-request-bytes=104857600 to etcd params in tests 2024-04-05 23:14:46 +03:00
Vitaliy Filippov a2994ecd0d Fix flusher possibly not trimming journal on rollback 2024-04-05 23:14:39 +03:00
28 changed files with 1598 additions and 636 deletions

View File

@ -5,14 +5,9 @@ package vitastor
import (
"context"
"errors"
"encoding/json"
"fmt"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"syscall"
"time"
@ -81,53 +76,6 @@ func NewNodeServer(driver *Driver) *NodeServer
return ns
}
func checkVduseSupport() bool
{
// Check VDUSE support (vdpa, vduse, virtio-vdpa kernel modules)
vduse := true
for _, mod := range []string{"vdpa", "vduse", "virtio-vdpa"}
{
_, err := os.Stat("/sys/module/"+mod)
if (err != nil)
{
if (!errors.Is(err, os.ErrNotExist))
{
klog.Errorf("failed to check /sys/module/%s: %v", mod, err)
}
c := exec.Command("/sbin/modprobe", mod)
c.Stdout = os.Stderr
c.Stderr = os.Stderr
err := c.Run()
if (err != nil)
{
klog.Errorf("/sbin/modprobe %s failed: %v", mod, err)
vduse = false
break
}
}
}
// Check that vdpa tool functions
if (vduse)
{
c := exec.Command("/sbin/vdpa", "-j", "dev")
c.Stderr = os.Stderr
err := c.Run()
if (err != nil)
{
klog.Errorf("/sbin/vdpa -j dev failed: %v", err)
vduse = false
}
}
if (!vduse)
{
klog.Errorf(
"Your host apparently has no VDUSE support. VDUSE support disabled, NBD will be used to map devices."+
" For VDUSE you need at least Linux 5.15 and the following kernel modules: vdpa, virtio-vdpa, vduse.",
)
}
return vduse
}
// NodeStageVolume mounts the volume to a staging path on the node.
func (ns *NodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error)
{
@ -140,242 +88,6 @@ func (ns *NodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstag
return &csi.NodeUnstageVolumeResponse{}, nil
}
func Contains(list []string, s string) bool
{
for i := 0; i < len(list); i++
{
if (list[i] == s)
{
return true
}
}
return false
}
func (ns *NodeServer) mapNbd(volName string, ctxVars map[string]string, readonly bool) (string, error)
{
// Map NBD device
// FIXME: Check if already mapped
args := []string{
"map", "--image", volName,
}
if (ctxVars["configPath"] != "")
{
args = append(args, "--config_path", ctxVars["configPath"])
}
if (readonly)
{
args = append(args, "--readonly", "1")
}
stdout, stderr, err := system("/usr/bin/vitastor-nbd", args...)
dev := strings.TrimSpace(string(stdout))
if (dev == "")
{
return "", fmt.Errorf("vitastor-nbd did not return the name of NBD device. output: %s", stderr)
}
return dev, err
}
func (ns *NodeServer) unmapNbd(devicePath string)
{
// unmap NBD device
unmapOut, unmapErr := exec.Command("/usr/bin/vitastor-nbd", "unmap", devicePath).CombinedOutput()
if (unmapErr != nil)
{
klog.Errorf("failed to unmap NBD device %s: %s, error: %v", devicePath, unmapOut, unmapErr)
}
}
func findByPidFile(pidFile string) (*os.Process, error)
{
pidBuf, err := os.ReadFile(pidFile)
if (err != nil)
{
return nil, err
}
pid, err := strconv.ParseInt(strings.TrimSpace(string(pidBuf)), 0, 64)
if (err != nil)
{
return nil, err
}
proc, err := os.FindProcess(int(pid))
if (err != nil)
{
return nil, err
}
return proc, nil
}
func killByPidFile(pidFile string) error
{
klog.Infof("killing process with PID from file %s", pidFile)
proc, err := findByPidFile(pidFile)
if (err != nil)
{
return err
}
return proc.Signal(syscall.SIGTERM)
}
func startStorageDaemon(vdpaId, volName, pidFile, configPath string, readonly bool) error
{
// Start qemu-storage-daemon
blockSpec := map[string]interface{}{
"node-name": "disk1",
"driver": "vitastor",
"image": volName,
"cache": map[string]bool{
"direct": true,
"no-flush": false,
},
"discard": "unmap",
}
if (configPath != "")
{
blockSpec["config-path"] = configPath
}
blockSpecJson, _ := json.Marshal(blockSpec)
writable := "true"
if (readonly)
{
writable = "false"
}
_, _, err := system(
"/usr/bin/qemu-storage-daemon", "--daemonize", "--pidfile", pidFile, "--blockdev", string(blockSpecJson),
"--export", "vduse-blk,id="+vdpaId+",node-name=disk1,name="+vdpaId+",num-queues=16,queue-size=128,writable="+writable,
)
return err
}
func (ns *NodeServer) mapVduse(volName string, ctxVars map[string]string, readonly bool) (string, string, error)
{
// Generate state file
stateFd, err := os.CreateTemp(ns.stateDir, "vitastor-vduse-*.json")
if (err != nil)
{
return "", "", err
}
stateFile := stateFd.Name()
stateFd.Close()
vdpaId := filepath.Base(stateFile)
vdpaId = vdpaId[0:len(vdpaId)-5] // remove ".json"
pidFile := ns.stateDir + vdpaId + ".pid"
// Map VDUSE device via qemu-storage-daemon
err = startStorageDaemon(vdpaId, volName, pidFile, ctxVars["configPath"], readonly)
if (err == nil)
{
// Add device to VDPA bus
_, _, err = system("/sbin/vdpa", "-j", "dev", "add", "name", vdpaId, "mgmtdev", "vduse")
if (err == nil)
{
// Find block device name
var matches []string
matches, err = filepath.Glob("/sys/bus/vdpa/devices/"+vdpaId+"/virtio*/block/*")
if (err == nil && len(matches) == 0)
{
err = errors.New("/sys/bus/vdpa/devices/"+vdpaId+"/virtio*/block/* is not found")
}
if (err == nil)
{
blockdev := "/dev/"+filepath.Base(matches[0])
_, err = os.Stat(blockdev)
if (err == nil)
{
// Generate state file
stateJSON, _ := json.Marshal(&DeviceState{
ConfigPath: ctxVars["configPath"],
VdpaId: vdpaId,
Image: volName,
Blockdev: blockdev,
Readonly: readonly,
PidFile: pidFile,
})
err = os.WriteFile(stateFile, stateJSON, 0600)
if (err == nil)
{
return blockdev, vdpaId, nil
}
}
}
}
killErr := killByPidFile(pidFile)
if (killErr != nil)
{
klog.Errorf("Failed to kill started qemu-storage-daemon: %v", killErr)
}
os.Remove(stateFile)
os.Remove(pidFile)
}
return "", "", err
}
func (ns *NodeServer) unmapVduse(devicePath string)
{
if (len(devicePath) < 6 || devicePath[0:6] != "/dev/v")
{
klog.Errorf("%s does not start with /dev/v", devicePath)
return
}
vduseDev, err := os.Readlink("/sys/block/"+devicePath[5:])
if (err != nil)
{
klog.Errorf("%s is not a symbolic link to VDUSE device (../devices/virtual/vduse/xxx): %v", devicePath, err)
return
}
vdpaId := ""
p := strings.Index(vduseDev, "/vduse/")
if (p >= 0)
{
vduseDev = vduseDev[p+7:]
p = strings.Index(vduseDev, "/")
if (p >= 0)
{
vdpaId = vduseDev[0:p]
}
}
if (vdpaId == "")
{
klog.Errorf("%s is not a symbolic link to VDUSE device (../devices/virtual/vduse/xxx), but is %v", devicePath, vduseDev)
return
}
ns.unmapVduseById(vdpaId)
}
func (ns *NodeServer) unmapVduseById(vdpaId string)
{
_, err := os.Stat("/sys/bus/vdpa/devices/"+vdpaId)
if (err != nil)
{
klog.Errorf("failed to stat /sys/bus/vdpa/devices/"+vdpaId+": %v", err)
}
else
{
_, _, _ = system("/sbin/vdpa", "-j", "dev", "del", vdpaId)
}
stateFile := ns.stateDir + vdpaId + ".json"
os.Remove(stateFile)
pidFile := ns.stateDir + vdpaId + ".pid"
_, err = os.Stat(pidFile)
if (os.IsNotExist(err))
{
// ok, already killed
}
else if (err != nil)
{
klog.Errorf("Failed to stat %v: %v", pidFile, err)
return
}
else
{
err = killByPidFile(pidFile)
if (err != nil)
{
klog.Errorf("Failed to kill started qemu-storage-daemon: %v", err)
}
os.Remove(pidFile)
}
}
func (ns *NodeServer) restarter()
{
// Restart dead VDUSE daemons at regular intervals
@ -459,7 +171,7 @@ func (ns *NodeServer) restoreVduseDaemons()
else
{
// Unused, clean it up
ns.unmapVduseById(vdpaId)
unmapVduseById(ns.stateDir, vdpaId)
}
}
}
@ -526,11 +238,11 @@ func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
var devicePath, vdpaId string
if (!ns.useVduse)
{
devicePath, err = ns.mapNbd(volName, ctxVars, req.GetReadonly())
devicePath, err = mapNbd(volName, ctxVars, req.GetReadonly())
}
else
{
devicePath, vdpaId, err = ns.mapVduse(volName, ctxVars, req.GetReadonly())
devicePath, vdpaId, err = mapVduse(ns.stateDir, volName, ctxVars, req.GetReadonly())
}
if (err != nil)
{
@ -619,11 +331,11 @@ func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
unmap:
if (!ns.useVduse || len(devicePath) >= 8 && devicePath[0:8] == "/dev/nbd")
{
ns.unmapNbd(devicePath)
unmapNbd(devicePath)
}
else
{
ns.unmapVduseById(vdpaId)
unmapVduseById(ns.stateDir, vdpaId)
}
return nil, err
}
@ -660,11 +372,11 @@ func (ns *NodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpu
{
if (!ns.useVduse)
{
ns.unmapNbd(devicePath)
unmapNbd(devicePath)
}
else
{
ns.unmapVduse(devicePath)
unmapVduse(ns.stateDir, devicePath)
}
}
return &csi.NodeUnpublishVolumeResponse{}, nil

301
csi/src/utils.go Normal file
View File

@ -0,0 +1,301 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 or GNU GPL-2.0+ (see README.md for details)
package vitastor
import (
"errors"
"encoding/json"
"fmt"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"syscall"
"k8s.io/klog"
)
func Contains(list []string, s string) bool
{
for i := 0; i < len(list); i++
{
if (list[i] == s)
{
return true
}
}
return false
}
func checkVduseSupport() bool
{
// Check VDUSE support (vdpa, vduse, virtio-vdpa kernel modules)
vduse := true
for _, mod := range []string{"vdpa", "vduse", "virtio-vdpa"}
{
_, err := os.Stat("/sys/module/"+mod)
if (err != nil)
{
if (!errors.Is(err, os.ErrNotExist))
{
klog.Errorf("failed to check /sys/module/%s: %v", mod, err)
}
c := exec.Command("/sbin/modprobe", mod)
c.Stdout = os.Stderr
c.Stderr = os.Stderr
err := c.Run()
if (err != nil)
{
klog.Errorf("/sbin/modprobe %s failed: %v", mod, err)
vduse = false
break
}
}
}
// Check that vdpa tool functions
if (vduse)
{
c := exec.Command("/sbin/vdpa", "-j", "dev")
c.Stderr = os.Stderr
err := c.Run()
if (err != nil)
{
klog.Errorf("/sbin/vdpa -j dev failed: %v", err)
vduse = false
}
}
if (!vduse)
{
klog.Errorf(
"Your host apparently has no VDUSE support. VDUSE support disabled, NBD will be used to map devices."+
" For VDUSE you need at least Linux 5.15 and the following kernel modules: vdpa, virtio-vdpa, vduse.",
)
}
return vduse
}
func mapNbd(volName string, ctxVars map[string]string, readonly bool) (string, error)
{
// Map NBD device
// FIXME: Check if already mapped
args := []string{
"map", "--image", volName,
}
if (ctxVars["configPath"] != "")
{
args = append(args, "--config_path", ctxVars["configPath"])
}
if (readonly)
{
args = append(args, "--readonly", "1")
}
stdout, stderr, err := system("/usr/bin/vitastor-nbd", args...)
dev := strings.TrimSpace(string(stdout))
if (dev == "")
{
return "", fmt.Errorf("vitastor-nbd did not return the name of NBD device. output: %s", stderr)
}
return dev, err
}
func unmapNbd(devicePath string)
{
// unmap NBD device
unmapOut, unmapErr := exec.Command("/usr/bin/vitastor-nbd", "unmap", devicePath).CombinedOutput()
if (unmapErr != nil)
{
klog.Errorf("failed to unmap NBD device %s: %s, error: %v", devicePath, unmapOut, unmapErr)
}
}
func findByPidFile(pidFile string) (*os.Process, error)
{
pidBuf, err := os.ReadFile(pidFile)
if (err != nil)
{
return nil, err
}
pid, err := strconv.ParseInt(strings.TrimSpace(string(pidBuf)), 0, 64)
if (err != nil)
{
return nil, err
}
proc, err := os.FindProcess(int(pid))
if (err != nil)
{
return nil, err
}
return proc, nil
}
func killByPidFile(pidFile string) error
{
klog.Infof("killing process with PID from file %s", pidFile)
proc, err := findByPidFile(pidFile)
if (err != nil)
{
return err
}
return proc.Signal(syscall.SIGTERM)
}
func startStorageDaemon(vdpaId, volName, pidFile, configPath string, readonly bool) error
{
// Start qemu-storage-daemon
blockSpec := map[string]interface{}{
"node-name": "disk1",
"driver": "vitastor",
"image": volName,
"cache": map[string]bool{
"direct": true,
"no-flush": false,
},
"discard": "unmap",
}
if (configPath != "")
{
blockSpec["config-path"] = configPath
}
blockSpecJson, _ := json.Marshal(blockSpec)
writable := "true"
if (readonly)
{
writable = "false"
}
_, _, err := system(
"/usr/bin/qemu-storage-daemon", "--daemonize", "--pidfile", pidFile, "--blockdev", string(blockSpecJson),
"--export", "vduse-blk,id="+vdpaId+",node-name=disk1,name="+vdpaId+",num-queues=16,queue-size=128,writable="+writable,
)
return err
}
func mapVduse(stateDir string, volName string, ctxVars map[string]string, readonly bool) (string, string, error)
{
// Generate state file
stateFd, err := os.CreateTemp(stateDir, "vitastor-vduse-*.json")
if (err != nil)
{
return "", "", err
}
stateFile := stateFd.Name()
stateFd.Close()
vdpaId := filepath.Base(stateFile)
vdpaId = vdpaId[0:len(vdpaId)-5] // remove ".json"
pidFile := stateDir + vdpaId + ".pid"
// Map VDUSE device via qemu-storage-daemon
err = startStorageDaemon(vdpaId, volName, pidFile, ctxVars["configPath"], readonly)
if (err == nil)
{
// Add device to VDPA bus
_, _, err = system("/sbin/vdpa", "-j", "dev", "add", "name", vdpaId, "mgmtdev", "vduse")
if (err == nil)
{
// Find block device name
var matches []string
matches, err = filepath.Glob("/sys/bus/vdpa/devices/"+vdpaId+"/virtio*/block/*")
if (err == nil && len(matches) == 0)
{
err = errors.New("/sys/bus/vdpa/devices/"+vdpaId+"/virtio*/block/* is not found")
}
if (err == nil)
{
blockdev := "/dev/"+filepath.Base(matches[0])
_, err = os.Stat(blockdev)
if (err == nil)
{
// Generate state file
stateJSON, _ := json.Marshal(&DeviceState{
ConfigPath: ctxVars["configPath"],
VdpaId: vdpaId,
Image: volName,
Blockdev: blockdev,
Readonly: readonly,
PidFile: pidFile,
})
err = os.WriteFile(stateFile, stateJSON, 0600)
if (err == nil)
{
return blockdev, vdpaId, nil
}
}
}
}
killErr := killByPidFile(pidFile)
if (killErr != nil)
{
klog.Errorf("Failed to kill started qemu-storage-daemon: %v", killErr)
}
os.Remove(stateFile)
os.Remove(pidFile)
}
return "", "", err
}
func unmapVduse(stateDir, devicePath string)
{
if (len(devicePath) < 6 || devicePath[0:6] != "/dev/v")
{
klog.Errorf("%s does not start with /dev/v", devicePath)
return
}
vduseDev, err := os.Readlink("/sys/block/"+devicePath[5:])
if (err != nil)
{
klog.Errorf("%s is not a symbolic link to VDUSE device (../devices/virtual/vduse/xxx): %v", devicePath, err)
return
}
vdpaId := ""
p := strings.Index(vduseDev, "/vduse/")
if (p >= 0)
{
vduseDev = vduseDev[p+7:]
p = strings.Index(vduseDev, "/")
if (p >= 0)
{
vdpaId = vduseDev[0:p]
}
}
if (vdpaId == "")
{
klog.Errorf("%s is not a symbolic link to VDUSE device (../devices/virtual/vduse/xxx), but is %v", devicePath, vduseDev)
return
}
unmapVduseById(stateDir, vdpaId)
}
func unmapVduseById(stateDir, vdpaId string)
{
_, err := os.Stat("/sys/bus/vdpa/devices/"+vdpaId)
if (err != nil)
{
klog.Errorf("failed to stat /sys/bus/vdpa/devices/"+vdpaId+": %v", err)
}
else
{
_, _, _ = system("/sbin/vdpa", "-j", "dev", "del", vdpaId)
}
stateFile := stateDir + vdpaId + ".json"
os.Remove(stateFile)
pidFile := stateDir + vdpaId + ".pid"
_, err = os.Stat(pidFile)
if (os.IsNotExist(err))
{
// ok, already killed
}
else if (err != nil)
{
klog.Errorf("Failed to stat %v: %v", pidFile, err)
return
}
else
{
err = killByPidFile(pidFile)
if (err != nil)
{
klog.Errorf("Failed to kill started qemu-storage-daemon: %v", err)
}
os.Remove(pidFile)
}
}

View File

@ -15,6 +15,7 @@ These parameters only apply to Monitors.
- [mon_stats_timeout](#mon_stats_timeout)
- [osd_out_time](#osd_out_time)
- [placement_levels](#placement_levels)
- [use_old_pg_combinator](#use_old_pg_combinator)
## etcd_mon_ttl
@ -77,3 +78,11 @@ values. Smaller priority means higher level in tree. For example,
levels are always predefined and can't be removed. If one of them is not
present in the configuration, then it is defined with the default priority
(100 for "host", 101 for "osd").
## use_old_pg_combinator
- Type: boolean
- Default: false
Use the old PG combination generator which doesn't support [level_placement](pool.en.md#level_placement)
and [raw_placement](pool.en.md#raw_placement) for pools which don't use this features.

View File

@ -15,6 +15,7 @@
- [mon_stats_timeout](#mon_stats_timeout)
- [osd_out_time](#osd_out_time)
- [placement_levels](#placement_levels)
- [use_old_pg_combinator](#use_old_pg_combinator)
## etcd_mon_ttl
@ -78,3 +79,11 @@ OSD перед обновлением агрегированной статис
"host" и "osd" являются предопределёнными и не могут быть удалены. Если
один из них отсутствует в конфигурации, он доопределяется с приоритетом по
умолчанию (100 для уровня "host", 101 для "osd").
## use_old_pg_combinator
- Тип: булево (да/нет)
- Значение по умолчанию: false
Использовать старый генератор комбинаций PG, не поддерживающий [level_placement](pool.ru.md#level_placement)
и [raw_placement](pool.ru.md#raw_placement) для пулов, которые не используют данные функции.

View File

@ -32,6 +32,8 @@ Parameters:
- [pg_minsize](#pg_minsize)
- [pg_count](#pg_count)
- [failure_domain](#failure_domain)
- [level_placement](#level_placement)
- [raw_placement](#raw_placement)
- [max_osd_combinations](#max_osd_combinations)
- [block_size](#block_size)
- [bitmap_granularity](#bitmap_granularity)
@ -209,6 +211,69 @@ never put on OSDs in the same failure domain (for example, on the same host).
So failure domain specifies the unit which failure you are protecting yourself
from.
## level_placement
- Type: string
Additional failure domain rules, applied in conjuction with failure_domain.
Must be specified in the following form:
`<placement level>=<sequence of characters>, <level2>=<sequence2>, ...`
Sequence should be exactly [pg_size](#pg_size) character long. Each character
corresponds to an OSD in the PG of this pool. Equal characters mean that
corresponding items of the PG should be placed into the same placement tree
item at this level. Different characters mean that items should be placed into
different items.
For example, if you want a EC 4+2 pool and you want every 2 chunks to be stored
in its own datacenter and you also want each chunk to be stored on a different
host, you should set `level_placement` to `dc=112233 host=123456`.
Or you can set `level_placement` to `dc=112233` and leave `failure_domain` empty,
because `host` is the default `failure_domain` and it will be applied anyway.
Without this rule, it may happen that 3 chunks will be stored on OSDs in the
same datacenter, and the data will become inaccessibly if that datacenter goes
down in this case.
Of course, you should group your hosts into datacenters before applying the rule
by setting [placement_levels](monitor.en.md#placement_levels) to something like
`{"dc":90,"host":100,"osd":110}` and add DCs to [node_placement](#placement-tree),
like `{"dc1":{"level":"dc"},"host1":{"parent":"dc1"},...}`.
## raw_placement
- Type: string
Raw PG placement rules, specified in the form of a DSL (domain-specific language).
Use only if you really know what you're doing :)
DSL specification:
```
dsl := item | item ("\n" | ",") items
item := "any" | rules
rules := rule | rule rules
rule := level operator arg
level := /\w+/
operator := "!=" | "=" | ">" | "?="
arg := value | "(" values ")"
values := value | value "," values
value := item_ref | constant_id
item_ref := /\d+/
constant_id := /"([^"]+)"/
```
"?=" operator means "preferred". I.e. `dc ?= "meow"` means "prefer datacenter meow
for this chunk, but put into another dc if it's unavailable".
Examples:
- Simple 3 replicas with failure_domain=host: `any, host!=1, host!=(1,2)`
- EC 4+2 in 3 DC: `any, dc=1 host!=1, dc!=1, dc=3 host!=3, dc!=(1,3), dc=5 host!=5`
- 1 replica in fixed DC + 2 in random DCs: `dc?=meow, dc!=1, dc!=(1,2)`
## max_osd_combinations
- Type: integer

View File

@ -31,6 +31,8 @@
- [pg_minsize](#pg_minsize)
- [pg_count](#pg_count)
- [failure_domain](#failure_domain)
- [level_placement](#level_placement)
- [raw_placement](#raw_placement)
- [max_osd_combinations](#max_osd_combinations)
- [block_size](#block_size)
- [bitmap_granularity](#bitmap_granularity)
@ -161,7 +163,7 @@ OSD, PG деактивируется на чтение и запись. Иным
Для примера, разница между pg_minsize 2 и 1 в реплицированном пуле с 3 копиями
данных (pg_size=3), проявляется следующим образом:
- Если 2 сервера отключаются при pg_minsize=2, пул становится неактивным и
остаётся неактивным в течение [osd_out_time](monitor.en.md#osd_out_time)
остаётся неактивным в течение [osd_out_time](monitor.ru.md#osd_out_time)
(10 минут), после чего монитор назначает другие OSD/серверы на замену, пул
поднимается и начинает восстанавливать недостающие копии данных. Соответственно,
если OSD на замену нет - то есть, если у вас всего 3 сервера с OSD и 2 из них
@ -169,7 +171,7 @@ OSD, PG деактивируется на чтение и запись. Иным
или не добавите хотя бы 1 сервер (или не переключите failure_domain на "osd").
- Если 2 сервера отключаются при pg_minsize=1, ввод-вывод лишь приостанавливается
на короткое время, до тех пор, пока монитор не поймёт, что OSD отключены
(что занимает 5-10 секунд при стандартном [etcd_report_interval](osd.en.md#etcd_report_interval)).
(что занимает 5-10 секунд при стандартном [etcd_report_interval](osd.ru.md#etcd_report_interval)).
После этого ввод-вывод восстанавливается, но новые данные временно пишутся
всего в 1 копии. Когда же проходит osd_out_time, монитор точно так же назначает
другие OSD на замену выбывшим и пул начинает восстанавливать копии данных.
@ -211,6 +213,71 @@ PG в Vitastor эферемерны, то есть вы можете менят
Иными словами, домен отказа - это то, от отказа чего вы защищаете себя избыточным
хранением.
## level_placement
- Тип: строка
Правила дополнительных доменов отказа, применяемые вместе с failure_domain.
Должны задаваться в следующем виде:
`<уровень>=<последовательность символов>, <уровень2>=<последовательность2>, ...`
Каждая `<последовательность>` должна состоять ровно из [pg_size](#pg_size) символов.
Каждый символ соответствует одному OSD (размещению одной части PG) этого пула.
Одинаковые символы означают, что соответствующие части размещаются в один и тот же
узел дерева OSD на заданном `<уровне>`. Разные символы означают, что части
размещаются в разные узлы.
Например, если вы хотите сделать пул EC 4+2 и хотите поместить каждые 2 части
данных в свой датацентр, и также вы хотите, чтобы каждая часть размещалась на
другом хосте, то вы должны задать `level_placement` равным `dc=112233 host=123456`.
Либо вы просто можете задать `level_placement` равным `dc=112233` и оставить
`failure_domain` пустым, т.к. `host` это его значение по умолчанию и оно также
применится автоматически.
Без этого правила может получиться так, что в одном из датацентров окажется
3 части данных одной PG и данные окажутся недоступными при временном отключении
этого датацентра.
Естественно, перед установкой правила вам нужно сгруппировать ваши хосты в
датацентры, установив [placement_levels](monitor.ru.md#placement_levels) во что-то
типа `{"dc":90,"host":100,"osd":110}` и добавив датацентры в [node_placement](#дерево-размещения),
примерно так: `{"dc1":{"level":"dc"},"host1":{"parent":"dc1"},...}`.
## raw_placement
- Type: string
Низкоуровневые правила генерации PG в форме DSL (доменно-специфичного языка).
Используйте, только если действительно знаете, зачем вам это надо :)
Спецификация DSL:
```
dsl := item | item ("\n" | ",") items
item := "any" | rules
rules := rule | rule rules
rule := level operator arg
level := /\w+/
operator := "!=" | "=" | ">" | "?="
arg := value | "(" values ")"
values := value | value "," values
value := item_ref | constant_id
item_ref := /\d+/
constant_id := /"([^"]+)"/
```
Оператор "?=" означает "предпочитаемый". Т.е. `dc ?= "meow"` означает "предпочитать
датацентр meow для этой части данных, но разместить её в другом датацентре, если
meow недоступен".
Примеры:
- Простые 3 реплики с failure_domain=host: `any, host!=1, host!=(1,2)`
- EC 4+2 в 3 датацентрах: `any, dc=1 host!=1, dc!=1, dc=3 host!=3, dc!=(1,3), dc=5 host!=5`
- 1 копия в фиксированном ДЦ + 2 в других ДЦ: `dc?=meow, dc!=1, dc!=(1,2)`
## max_osd_combinations
- Тип: целое число

View File

@ -63,3 +63,12 @@
"host" и "osd" являются предопределёнными и не могут быть удалены. Если
один из них отсутствует в конфигурации, он доопределяется с приоритетом по
умолчанию (100 для уровня "host", 101 для "osd").
- name: use_old_pg_combinator
type: bool
default: false
info: |
Use the old PG combination generator which doesn't support [level_placement](pool.en.md#level_placement)
and [raw_placement](pool.en.md#raw_placement) for pools which don't use this features.
info_ru: |
Использовать старый генератор комбинаций PG, не поддерживающий [level_placement](pool.ru.md#level_placement)
и [raw_placement](pool.ru.md#raw_placement) для пулов, которые не используют данные функции.

View File

@ -269,6 +269,8 @@ Optional parameters:
| `--block_size 128k` | Put pool only on OSDs with this data block size |
| `--bitmap_granularity 4k` | Put pool only on OSDs with this logical sector size |
| `--immediate_commit none` | Put pool only on OSDs with this or larger immediate_commit (none < small < all) |
| `--level_placement <rules>` | Use additional failure domain rules (example: "dc=112233") |
| `--raw_placement <rules>` | Specify raw PG generation rules ([details](../config/pool.en.md#raw_placement)) |
| `--primary_affinity_tags tags` | Prefer to put primary copies on OSDs with all specified tags |
| `--scrub_interval <time>` | Enable regular scrubbing for this pool. Format: number + unit s/m/h/d/M/y |
| `--used_for_fs <name>` | Mark pool as used for VitastorFS with metadata in image <name> |

View File

@ -286,6 +286,8 @@ vitastor-cli snap-create [-p|--pool <id|name>] <image>@<snapshot>
| `--block_size 128k` | ...только OSD с данным размером блока |
| `--bitmap_granularity 4k` | ...только OSD с данным размером логического сектора |
| `--immediate_commit none` | ...только OSD с этим или большим immediate_commit (none < small < all) |
| `--level_placement <rules>` | Задать правила дополнительных доменов отказа (пример: "dc=112233") |
| `--raw_placement <rules>` | Задать низкоуровневые правила генерации PG ([детали](../config/pool.ru.md#raw_placement)) |
| `--primary_affinity_tags tags` | Предпочитать OSD со всеми данными тегами для роли первичных |
| `--scrub_interval <time>` | Включить скрабы с заданным интервалом времени (число + единица s/m/h/d/M/y) |
| `--pg_stripe_size <number>` | Увеличить блок группировки объектов по PG |

408
mon/dsl_pgs.js Normal file
View File

@ -0,0 +1,408 @@
const { select_murmur3 } = require('./murmur3.js');
const NO_OSD = 'Z';
class RuleCombinator
{
constructor(osd_tree, rules, max_combinations, ordered)
{
this.osd_tree = index_tree(Object.values(osd_tree).filter(o => o.id));
this.rules = rules;
this.max_combinations = max_combinations;
this.ordered = ordered;
}
random_combinations()
{
return random_custom_combinations(this.osd_tree, this.rules, this.max_combinations, this.ordered);
}
check_combinations(pgs)
{
return check_custom_combinations(this.osd_tree, this.rules, pgs);
}
}
// Convert alternative "level-index" format to rules
// level_index = { [level: string]: string | string[] }
// level_sequence = optional, levels from upper to lower, i.e. [ 'dc', 'host' ]
// Example: level_index = { dc: "112233", host: "ABCDEF" }
function parse_level_indexes(level_index, level_sequence)
{
const rules = [];
const lvl_first = {};
for (const level in level_index)
{
const idx = level_index[level];
while (rules.length < idx.length)
{
rules.push([]);
}
const seen = {};
for (let i = 0; i < idx.length; i++)
{
if (!seen[idx[i]])
{
const other = Object.values(seen);
if (other.length)
{
rules[i].push([ level, '!=', other ]);
}
seen[idx[i]] = i+1;
}
else
{
rules[i].push([ level, '=', seen[idx[i]] ]);
}
}
lvl_first[level] = seen;
}
if (level_sequence)
{
// Prune useless rules for the sake of prettiness
// For simplicity, call "upper" level DC and "lower" level host
const level_prio = Object.keys(level_sequence).reduce((a, c) => { a[level_sequence[c]] = c; return a; }, {});
for (let upper_i = 0; upper_i < level_sequence.length-1; upper_i++)
{
const upper_level = level_sequence[upper_i];
for (let i = 0; i < rules.length; i++)
{
const noteq = {};
for (let k = 0; k < level_index[upper_level].length; k++)
{
// If upper_level[x] is different from upper_level[y]
// then lower_level[x] is also different from lower_level[y]
if (level_index[upper_level][k] != level_index[upper_level][i])
{
noteq[k+1] = true;
}
}
for (let j = 0; j < rules[i].length; j++)
{
if (level_prio[rules[i][j][0]] != null && level_prio[rules[i][j][0]] > upper_i && rules[i][j][1] == '!=')
{
rules[i][j][2] = rules[i][j][2].filter(other_host => !noteq[other_host]);
if (!rules[i][j][2].length)
{
rules[i].splice(j--, 1);
}
}
}
}
}
}
return rules;
}
// Parse rules in DSL format
// dsl := item | item ("\n" | ",") items
// item := "any" | rules
// rules := rule | rule rules
// rule := level operator arg
// level := /\w+/
// operator := "!=" | "=" | ">" | "?="
// arg := value | "(" values ")"
// values := value | value "," values
// value := item_ref | constant_id
// item_ref := /\d+/
// constant_id := /"([^"]+)"/
//
// Output: [ level, operator, value ][][]
function parse_pg_dsl(text)
{
const tokens = [ ...text.matchAll(/\w+|!=|\?=|[>=\(\),\n]|"([^\"]+)"/g) ].map(t => [ t[0], t.index ]);
let positions = [ [] ];
let rules = positions[0];
for (let i = 0; i < tokens.length; )
{
if (tokens[i][0] === '\n' || tokens[i][0] === ',')
{
rules = [];
positions.push(rules);
i++;
}
else if (!rules.length && tokens[i][0] === 'any' && (i == tokens.length-1 || tokens[i+1][0] === ',' || tokens[i+1][0] === '\n'))
{
i++;
}
else
{
if (!/^\w/.exec(tokens[i][0]))
{
throw new Error('Unexpected '+tokens[i][0]+' at '+tokens[i][1]+' (level name expected)');
}
if (i > tokens.length-3)
{
throw new Error('Unexpected EOF (operator and value expected)');
}
if (/^\w/.exec(tokens[i+1][0]) || tokens[i+1][0] === ',' || tokens[i+1][0] === '\n')
{
throw new Error('Unexpected '+tokens[i+1][0]+' at '+tokens[i+1][1]+' (operator expected)');
}
if (!/^[\w"(]/.exec(tokens[i+2][0])) // "
{
throw new Error('Unexpected '+tokens[i+2][0]+' at '+tokens[i+2][1]+' (id, round brace, number or node ID expected)');
}
let rule = [ tokens[i][0], tokens[i+1][0], tokens[i+2][0] ];
i += 3;
if (rule[2][0] == '"')
{
rule[2] = { id: rule[2].substr(1, rule[2].length-2) };
}
else if (rule[2] === '(')
{
rule[2] = [];
while (true)
{
if (i > tokens.length-1)
{
throw new Error('Unexpected EOF (expected list and a closing round brace)');
}
if (tokens[i][0] === ',')
{
i++;
}
else if (tokens[i][0] === ')')
{
i++;
break;
}
else if (tokens[i][0][0] === '"')
{
rule[2].push({ id: tokens[i][0].substr(1, tokens[i][0].length-2) });
i++;
}
else if (/^\d+$/.exec(tokens[i][0]))
{
const n = 0|tokens[i][0];
if (!n)
{
throw new Error('Level reference cannot be 0 (refs count from 1) at '+tokens[i][1]);
}
else if (n > positions.length)
{
throw new Error('Forward references are forbidden at '+tokens[i][1]);
}
rule[2].push(n);
i++;
}
else if (!/^\w/.exec(tokens[i][0]))
{
throw new Error('Unexpected '+tokens[i][0]+' at '+tokens[i][1]+' (number or node ID expected)');
}
else
{
rule[2].push({ id: tokens[i][0] });
i++;
}
}
}
else if (!/^\d+$/.exec(rule[2]))
{
rule[2] = { id: rule[2] };
}
else
{
rule[2] = 0|rule[2];
if (!rule[2])
{
throw new Error('Level reference cannot be 0 (refs count from 1) at '+tokens[i-1][1]);
}
else if (rule[2] > positions.length)
{
throw new Error('Forward references are forbidden at '+tokens[i-1][1]);
}
}
rules.push(rule);
}
}
return positions;
}
// osd_tree = index_tree() output
// levels = { string: number }
// rules = [ level, operator, value ][][]
// level = string
// operator = '=' | '!=' | '>' | '?='
// value = number|number[] | { id: string|string[] }
// examples:
// 1) simple 3 replicas with failure_domain=host:
// [ [], [ [ 'host', '!=', 1 ] ], [ [ 'host', '!=', [ 1, 2 ] ] ] ]
// in DSL form: any, host!=1, host!=(1,2)
// 2) EC 4+2 in 3 DC:
// [ [], [ [ 'dc', '=', 1 ], [ 'host', '!=', 1 ] ],
// [ 'dc', '!=', 1 ], [ [ 'dc', '=', 3 ], [ 'host', '!=', 3 ] ],
// [ 'dc', '!=', [ 1, 3 ] ], [ [ 'dc', '=', 5 ], [ 'host', '!=', 5 ] ] ]
// in DSL form: any, dc=1 host!=1, dc!=1, dc=3 host!=3, dc!=(1,3), dc=5 host!=5
// 3) 1 replica in fixed DC + 2 in random DCs:
// [ [ [ 'dc', '=', { id: 'meow' } ] ], [ [ 'dc', '!=', 1 ] ], [ [ 'dc', '!=', [ 1, 2 ] ] ] ]
// in DSL form: dc=meow, dc!=1, dc!=(1,2)
// 4) 2 replicas in each DC (almost the same as (2)):
// DSL: any, dc=1 host!=1, dc!=1, dc=3 host!=3
// Alternative simpler way to specify rules would be: [ DC: 112233 HOST: 123456 ]
function random_custom_combinations(osd_tree, rules, count, ordered)
{
const r = {};
const first = filter_tree_by_rules(osd_tree, rules[0], []);
let max_size = 0;
// All combinations for the first item (usually "any") to try to include each OSD at least once
for (const f of first)
{
const selected = [ f ];
for (let i = 1; i < rules.length; i++)
{
const filtered = filter_tree_by_rules(osd_tree, rules[i], selected);
const idx = select_murmur3(filtered.length, i => 'p:'+f.id+':'+filtered[i].id);
selected.push(idx == null ? { levels: {}, id: null } : filtered[idx]);
}
const size = selected.filter(s => s.id !== null).length;
max_size = max_size < size ? size : max_size;
const pg = selected.map(s => s.id === null ? NO_OSD : (0|s.id));
if (!ordered)
pg.sort();
r['pg_'+pg.join('_')] = pg;
}
// Pseudo-random selection
for (let n = 0; n < count; n++)
{
const selected = [];
for (const item_rules of rules)
{
const filtered = selected.length ? filter_tree_by_rules(osd_tree, item_rules, selected) : first;
const idx = select_murmur3(filtered.length, i => n+':'+filtered[i].id);
selected.push(idx == null ? { levels: {}, id: null } : filtered[idx]);
}
const size = selected.filter(s => s.id !== null).length;
max_size = max_size < size ? size : max_size;
const pg = selected.map(s => s.id === null ? NO_OSD : (0|s.id));
if (!ordered)
pg.sort();
r['pg_'+pg.join('_')] = pg;
}
// Exclude PGs with less successful selections than maximum
for (const k in r)
{
if (r[k].filter(s => s !== NO_OSD).length < max_size)
{
delete r[k];
}
}
return r;
}
function filter_tree_by_rules(osd_tree, rules, selected)
{
let cur = osd_tree[''].children;
for (const rule of rules)
{
const val = (rule[2] instanceof Array ? rule[2] : [ rule[2] ])
.map(v => v instanceof Object ? v.id : selected[v-1].levels[rule[0]]);
let preferred = [], other = [];
for (let i = 0; i < cur.length; i++)
{
const item = cur[i];
const level_id = item.levels[rule[0]];
if (level_id)
{
if (rule[1] == '>' && val.filter(v => level_id <= v).length == 0 ||
(rule[1] == '=' || rule[1] == '?=') && val.filter(v => level_id != v).length == 0 ||
rule[1] == '!=' && val.filter(v => level_id == v).length == 0)
{
// Include
preferred.push(item);
}
else if (rule[1] == '?=' && val.filter(v => level_id != v).length > 0)
{
// Non-preferred
other.push(item);
}
}
else if (item.children)
{
// Descend
cur.splice(i+1, 0, ...item.children);
}
}
cur = preferred.length ? preferred : other;
}
// Get leaf items
for (let i = 0; i < cur.length; i++)
{
if (cur[i].children)
{
// Descend
cur.splice(i, 1, ...cur[i].children);
i--;
}
}
return cur;
}
// Convert from
// node_list = { id: string|number, level: string, size?: number, parent?: string|number }[]
// to
// node_tree = { [node_id]: { id, level, size?, parent?, children?: child_node_id[], levels: { [level]: id, ... } } }
function index_tree(node_list)
{
const tree = { '': { children: [], levels: {} } };
for (const node of node_list)
{
tree[node.id] = { ...node, levels: {} };
delete tree[node.id].children;
}
for (const node of node_list)
{
const parent_id = node.parent && tree[node.parent] ? node.parent : '';
tree[parent_id].children = tree[parent_id].children || [];
tree[parent_id].children.push(tree[node.id]);
}
const cur = tree[''].children;
for (let i = 0; i < cur.length; i++)
{
cur[i].levels[cur[i].level] = cur[i].id;
if (cur[i].children)
{
for (const child of cur[i].children)
{
child.levels = { ...cur[i].levels, ...child.levels };
}
cur.splice(i, 1, ...cur[i].children);
i--;
}
}
return tree;
}
// selection = id[]
// osd_tree = index_tree output
// rules = parse_pg_dsl output
function check_custom_combinations(osd_tree, rules, pgs)
{
const res = [];
skip_pg: for (const pg of pgs)
{
let selected = pg.map(id => osd_tree[id] || null);
for (let i = 0; i < rules.length; i++)
{
const filtered = filter_tree_by_rules(osd_tree, rules[i], selected);
if (selected[i] === null && filtered.length ||
!filtered.filter(ok => selected[i].id === ok.id).length)
{
continue skip_pg;
}
}
res.push(pg);
}
return res;
}
module.exports = {
RuleCombinator,
NO_OSD,
index_tree,
parse_level_indexes,
parse_pg_dsl,
random_custom_combinations,
check_custom_combinations,
};

View File

@ -50,15 +50,15 @@ async function lp_solve(text)
return { score, vars };
}
async function optimize_initial({ osd_tree, pg_count, pg_size = 3, pg_minsize = 2, max_combinations = 10000, parity_space = 1, ordered = false })
// osd_weights = { [id]: weight }
async function optimize_initial({ osd_weights, combinator, pg_count, pg_size = 3, pg_minsize = 2, parity_space = 1, ordered = false })
{
if (!pg_count || !osd_tree)
if (!pg_count || !osd_weights)
{
return null;
}
const all_weights = Object.assign({}, ...Object.values(osd_tree));
const total_weight = Object.values(all_weights).reduce((a, c) => Number(a) + Number(c), 0);
const all_pgs = Object.values(random_combinations(osd_tree, pg_size, max_combinations, parity_space > 1));
const total_weight = Object.values(osd_weights).reduce((a, c) => Number(a) + Number(c), 0);
const all_pgs = Object.values(make_cyclic(combinator.random_combinations(), parity_space));
const pg_per_osd = {};
for (const pg of all_pgs)
{
@ -69,15 +69,15 @@ async function optimize_initial({ osd_tree, pg_count, pg_size = 3, pg_minsize =
pg_per_osd[osd].push((i >= pg_minsize ? parity_space+'*' : '')+"pg_"+pg.join("_"));
}
}
const pg_effsize = Math.min(pg_minsize, Object.keys(osd_tree).length)
+ Math.max(0, Math.min(pg_size, Object.keys(osd_tree).length) - pg_minsize) * parity_space;
let pg_effsize = all_pgs.reduce((a, c) => Math.max(a, c.filter(e => e != NO_OSD).length), 0);
pg_effsize = Math.min(pg_minsize, pg_effsize) + Math.max(0, Math.min(pg_size, pg_effsize) - pg_minsize) * parity_space;
let lp = '';
lp += "max: "+all_pgs.map(pg => 'pg_'+pg.join('_')).join(' + ')+";\n";
for (const osd in pg_per_osd)
{
if (osd !== NO_OSD)
{
let osd_pg_count = all_weights[osd]/total_weight*pg_effsize*pg_count;
let osd_pg_count = osd_weights[osd]/total_weight*pg_effsize*pg_count;
lp += pg_per_osd[osd].join(' + ')+' <= '+osd_pg_count+';\n';
}
}
@ -93,7 +93,7 @@ async function optimize_initial({ osd_tree, pg_count, pg_size = 3, pg_minsize =
throw new Error('Problem is infeasible or unbounded - is it a bug?');
}
const int_pgs = make_int_pgs(lp_result.vars, pg_count, ordered);
const eff = pg_list_space_efficiency(int_pgs, all_weights, pg_minsize, parity_space);
const eff = pg_list_space_efficiency(int_pgs, osd_weights, pg_minsize, parity_space);
const res = {
score: lp_result.score,
weights: lp_result.vars,
@ -104,6 +104,22 @@ async function optimize_initial({ osd_tree, pg_count, pg_size = 3, pg_minsize =
return res;
}
function make_cyclic(pgs, parity_space)
{
if (parity_space > 1)
{
for (const pg in pgs)
{
for (let i = 1; i < pg.size; i++)
{
const cyclic = [ ...pg.slice(i), ...pg.slice(0, i) ];
pgs['pg_'+cyclic.join('_')] = cyclic;
}
}
}
return pgs;
}
function shuffle(array)
{
for (let i = array.length - 1, j, x; i > 0; i--)
@ -216,47 +232,17 @@ function calc_intersect_weights(old_pg_size, pg_size, pg_count, prev_weights, al
return move_weights;
}
function add_valid_previous(osd_tree, prev_weights, all_pgs)
{
// Add previous combinations that are still valid
const hosts = Object.keys(osd_tree).sort();
const host_per_osd = {};
for (const host in osd_tree)
{
for (const osd in osd_tree[host])
{
host_per_osd[osd] = host;
}
}
skip_pg: for (const pg_name in prev_weights)
{
const seen_hosts = {};
const pg = pg_name.substr(3).split(/_/);
for (const osd of pg)
{
if (!host_per_osd[osd] || seen_hosts[host_per_osd[osd]])
{
continue skip_pg;
}
seen_hosts[host_per_osd[osd]] = true;
}
if (!all_pgs[pg_name])
{
all_pgs[pg_name] = pg;
}
}
}
// Try to minimize data movement
async function optimize_change({ prev_pgs: prev_int_pgs, osd_tree, pg_size = 3, pg_minsize = 2, max_combinations = 10000, parity_space = 1, ordered = false })
async function optimize_change({ prev_pgs: prev_int_pgs, osd_weights, combinator, pg_size = 3, pg_minsize = 2, parity_space = 1, ordered = false })
{
if (!osd_tree)
if (!osd_weights)
{
return null;
}
// FIXME: use parity_chunks with parity_space instead of pg_minsize
const pg_effsize = Math.min(pg_minsize, Object.keys(osd_tree).length)
+ Math.max(0, Math.min(pg_size, Object.keys(osd_tree).length) - pg_minsize) * parity_space;
let all_pgs = make_cyclic(combinator.random_combinations(), parity_space);
let pg_effsize = Object.values(all_pgs).reduce((a, c) => Math.max(a, c.filter(e => e != NO_OSD).length), 0);
pg_effsize = Math.min(pg_minsize, pg_effsize) + Math.max(0, Math.min(pg_size, pg_effsize) - pg_minsize) * parity_space;
const pg_count = prev_int_pgs.length;
const prev_weights = {};
const prev_pg_per_osd = {};
@ -273,10 +259,13 @@ async function optimize_change({ prev_pgs: prev_int_pgs, osd_tree, pg_size = 3,
}
const old_pg_size = prev_int_pgs[0].length;
// Get all combinations
let all_pgs = random_combinations(osd_tree, pg_size, max_combinations, parity_space > 1);
if (old_pg_size == pg_size)
{
add_valid_previous(osd_tree, prev_weights, all_pgs);
const still_valid = combinator.check_combinations(Object.keys(prev_weights).map(pg_name => pg_name.substr(3).split('_')));
for (const pg of still_valid)
{
all_pgs['pg_'+pg.join('_')] = pg;
}
}
all_pgs = Object.values(all_pgs);
const pg_per_osd = {};
@ -295,8 +284,7 @@ async function optimize_change({ prev_pgs: prev_int_pgs, osd_tree, pg_size = 3,
// Calculate total weight - old PG weights
const all_pg_names = all_pgs.map(pg => 'pg_'+pg.join('_'));
const all_pgs_hash = all_pg_names.reduce((a, c) => { a[c] = true; return a; }, {});
const all_weights = Object.assign({}, ...Object.values(osd_tree));
const total_weight = Object.values(all_weights).reduce((a, c) => Number(a) + Number(c), 0);
const total_weight = Object.values(osd_weights).reduce((a, c) => Number(a) + Number(c), 0);
// Generate the LP problem
let lp = '';
lp += 'max: '+all_pg_names.map(pg_name => (
@ -311,7 +299,7 @@ async function optimize_change({ prev_pgs: prev_int_pgs, osd_tree, pg_size = 3,
)).join(' + ');
const rm_osd_pg_count = (prev_pg_per_osd[osd]||[])
.reduce((a, [ old_pg_name, space ]) => (a + (all_pgs_hash[old_pg_name] ? space : 0)), 0);
const osd_pg_count = all_weights[osd]*pg_effsize/total_weight*pg_count - rm_osd_pg_count;
const osd_pg_count = osd_weights[osd]*pg_effsize/total_weight*pg_count - rm_osd_pg_count;
lp += osd_sum + ' <= ' + osd_pg_count + ';\n';
}
}
@ -421,7 +409,7 @@ async function optimize_change({ prev_pgs: prev_int_pgs, osd_tree, pg_size = 3,
int_pgs: new_pgs,
differs,
osd_differs,
space: pg_effsize * pg_list_space_efficiency(new_pgs, all_weights, pg_minsize, parity_space),
space: pg_effsize * pg_list_space_efficiency(new_pgs, osd_weights, pg_minsize, parity_space),
total_space: total_weight,
};
}
@ -502,198 +490,6 @@ function put_aligned_pgs(aligned_pgs, int_pgs, prev_int_pgs, keygen)
}
}
// Convert multi-level osd_tree = { level: number|string, id?: string, size?: number, children?: osd_tree }[]
// levels = { string: number }
// to a two-level osd_tree suitable for all_combinations()
function flatten_tree(osd_tree, levels, failure_domain_level, osd_level, domains = {}, i = { i: 1 })
{
osd_level = levels[osd_level] || osd_level;
failure_domain_level = levels[failure_domain_level] || failure_domain_level;
for (const node of osd_tree)
{
if ((levels[node.level] || node.level) < failure_domain_level)
{
flatten_tree(node.children||[], levels, failure_domain_level, osd_level, domains, i);
}
else
{
domains['dom'+(i.i++)] = extract_osds([ node ], levels, osd_level);
}
}
return domains;
}
function extract_osds(osd_tree, levels, osd_level, osds = {})
{
for (const node of osd_tree)
{
if ((levels[node.level] || node.level) >= osd_level)
{
osds[node.id] = node.size;
}
else
{
extract_osds(node.children||[], levels, osd_level, osds);
}
}
return osds;
}
// ordered = don't treat (x,y) and (y,x) as equal
function random_combinations(osd_tree, pg_size, count, ordered)
{
let seed = 0x5f020e43;
let rng = () =>
{
seed ^= seed << 13;
seed ^= seed >> 17;
seed ^= seed << 5;
return seed + 2147483648;
};
const osds = Object.keys(osd_tree).reduce((a, c) => { a[c] = Object.keys(osd_tree[c]).sort(); return a; }, {});
const hosts = Object.keys(osd_tree).sort().filter(h => osds[h].length > 0);
const r = {};
// Generate random combinations including each OSD at least once
for (let h = 0; h < hosts.length; h++)
{
for (let o = 0; o < osds[hosts[h]].length; o++)
{
const pg = [ osds[hosts[h]][o] ];
const cur_hosts = [ ...hosts ];
cur_hosts.splice(h, 1);
for (let i = 1; i < pg_size && i < hosts.length; i++)
{
const next_host = rng() % cur_hosts.length;
const next_osd = rng() % osds[cur_hosts[next_host]].length;
pg.push(osds[cur_hosts[next_host]][next_osd]);
cur_hosts.splice(next_host, 1);
}
const cyclic_pgs = [ pg ];
if (ordered)
{
for (let i = 1; i < pg.size; i++)
{
cyclic_pgs.push([ ...pg.slice(i), ...pg.slice(0, i) ]);
}
}
for (const pg of cyclic_pgs)
{
while (pg.length < pg_size)
{
pg.push(NO_OSD);
}
r['pg_'+pg.join('_')] = pg;
}
}
}
// Generate purely random combinations
while (count > 0)
{
let host_idx = [];
const cur_hosts = [ ...hosts.map((h, i) => i) ];
const max_hosts = pg_size < hosts.length ? pg_size : hosts.length;
if (ordered)
{
for (let i = 0; i < max_hosts; i++)
{
const r = rng() % cur_hosts.length;
host_idx[i] = cur_hosts[r];
cur_hosts.splice(r, 1);
}
}
else
{
for (let i = 0; i < max_hosts; i++)
{
const r = rng() % (cur_hosts.length - (max_hosts - i - 1));
host_idx[i] = cur_hosts[r];
cur_hosts.splice(0, r+1);
}
}
let pg = host_idx.map(h => osds[hosts[h]][rng() % osds[hosts[h]].length]);
while (pg.length < pg_size)
{
pg.push(NO_OSD);
}
r['pg_'+pg.join('_')] = pg;
count--;
}
return r;
}
// Super-stupid algorithm. Given the current OSD tree, generate all possible OSD combinations
// osd_tree = { failure_domain1: { osd1: size1, ... }, ... }
// ordered = return combinations without duplicates having different order
function all_combinations(osd_tree, pg_size, ordered, count)
{
const hosts = Object.keys(osd_tree).sort();
const osds = Object.keys(osd_tree).reduce((a, c) => { a[c] = Object.keys(osd_tree[c]).sort(); return a; }, {});
while (hosts.length < pg_size)
{
osds[NO_OSD] = [ NO_OSD ];
hosts.push(NO_OSD);
}
let host_idx = [];
let osd_idx = [];
for (let i = 0; i < pg_size; i++)
{
host_idx.push(i);
osd_idx.push(0);
}
const r = [];
while (!count || count < 0 || r.length < count)
{
r.push(host_idx.map((hi, i) => osds[hosts[hi]][osd_idx[i]]));
let inc = pg_size-1;
while (inc >= 0)
{
osd_idx[inc]++;
if (osd_idx[inc] >= osds[hosts[host_idx[inc]]].length)
{
osd_idx[inc] = 0;
inc--;
}
else
{
break;
}
}
if (inc < 0)
{
// no osds left in the current host combination, select the next one
inc = pg_size-1;
same_again: while (inc >= 0)
{
host_idx[inc]++;
for (let prev_host = 0; prev_host < inc; prev_host++)
{
if (host_idx[prev_host] == host_idx[inc])
{
continue same_again;
}
}
if (host_idx[inc] < (ordered ? hosts.length-(pg_size-1-inc) : hosts.length))
{
while ((++inc) < pg_size)
{
host_idx[inc] = (ordered ? host_idx[inc-1]+1 : 0);
}
break;
}
else
{
inc--;
}
}
if (inc < 0)
{
break;
}
}
}
return r;
}
function pg_weights_space_efficiency(weights, pg_count, osd_sizes)
{
const per_osd = {};
@ -752,11 +548,8 @@ module.exports = {
pg_weights_space_efficiency,
pg_list_space_efficiency,
pg_per_osd_space_efficiency,
flatten_tree,
lp_solve,
make_int_pgs,
align_pgs,
random_combinations,
all_combinations,
};

View File

@ -6,6 +6,8 @@ const http = require('http');
const crypto = require('crypto');
const os = require('os');
const WebSocket = require('ws');
const { RuleCombinator, parse_level_indexes, parse_pg_dsl } = require('./dsl_pgs.js');
const { SimpleCombinator } = require('./simple_pgs.js');
const LPOptimizer = require('./lp-optimizer.js');
const stableStringify = require('./stable-stringify.js');
const PGUtil = require('./PGUtil.js');
@ -63,6 +65,7 @@ const etcd_tree = {
mon_stats_timeout: 1000, // ms. min: 100
osd_out_time: 600, // seconds. min: 0
placement_levels: { datacenter: 1, rack: 2, host: 3, osd: 4, ... },
use_old_pg_combinator: false,
// client and osd
tcp_header_buffer_size: 65536,
use_sync_send_recv: false,
@ -185,7 +188,12 @@ const etcd_tree = {
// number of parity chunks, required for EC
parity_chunks?: 1,
pg_count: 100,
failure_domain: 'host',
// default is failure_domain=host
failure_domain?: 'host',
// additional failure domain rules; failure_domain=x is equivalent to x=123..N
level_placement?: 'dc=112233 host=123456',
raw_placement?: 'any, dc=1 host!=1, dc=1 host!=(1,2)',
old_combinator: false,
max_osd_combinations: 10000,
// block_size, bitmap_granularity, immediate_commit must match all OSDs used in that pool
block_size: 131072,
@ -886,7 +894,7 @@ class Mon
{
// Numeric IDs are reserved for OSDs
const osd_cfg = this.state.config.osd[osd_num];
let reweight = osd_cfg && Number(osd_cfg.reweight);
let reweight = osd_cfg == null ? 1 : Number(osd_cfg.reweight);
if (reweight < 0 || isNaN(reweight))
reweight = 1;
if (this.state.osd.state[osd_num] && reweight > 0)
@ -930,7 +938,6 @@ class Mon
// Parent's level must be less than child's; OSDs must be leaves
const parent = parent_level && parent_level < node_level ? node_cfg.parent : '';
tree[parent].children.push(tree[node_id]);
delete node_cfg.parent;
}
return { up_osds, levels, osd_tree: tree };
}
@ -1096,7 +1103,6 @@ class Mon
pool_cfg.pg_minsize = Math.floor(pool_cfg.pg_minsize);
pool_cfg.parity_chunks = Math.floor(pool_cfg.parity_chunks) || undefined;
pool_cfg.pg_count = Math.floor(pool_cfg.pg_count);
pool_cfg.failure_domain = pool_cfg.failure_domain || 'host';
pool_cfg.max_osd_combinations = Math.floor(pool_cfg.max_osd_combinations) || 10000;
if (!/^[1-9]\d*$/.exec(''+pool_id))
{
@ -1176,10 +1182,32 @@ class Mon
console.log('Pool '+pool_id+' has invalid primary_affinity_tags (must be a string or array of strings)');
return false;
}
if (!this.get_pg_rules(pool_id, pool_cfg, true))
{
return false;
}
return true;
}
filter_osds_by_tags(orig_tree, flat_tree, tags)
filter_osds_by_root_node(pool_tree, root_node)
{
if (!root_node)
{
return;
}
pool_tree = pool_tree[pool_cfg.root_node];
const cur = [ ...(pool_tree||{}).children||[] ];
for (let i = 0; i < cur.length; i++)
{
if (cur.children)
{
cur.splice(i+1, 1, ...cur.children);
}
}
return cur;
}
filter_osds_by_tags(orig_tree, tags)
{
if (!tags)
{
@ -1187,30 +1215,22 @@ class Mon
}
for (const tag of (tags instanceof Array ? tags : [ tags ]))
{
for (const host in flat_tree)
for (const osd in orig_tree)
{
let found = 0;
for (const osd in flat_tree[host])
if (orig_tree[osd].level === 'osd' &&
(!orig_tree[osd].tags || !orig_tree[osd].tags[tag]))
{
if (!orig_tree[osd].tags || !orig_tree[osd].tags[tag])
delete flat_tree[host][osd];
else
found++;
}
if (!found)
{
delete flat_tree[host];
delete orig_tree[osd];
}
}
}
}
filter_osds_by_block_layout(flat_tree, block_size, bitmap_granularity, immediate_commit)
filter_osds_by_block_layout(orig_tree, block_size, bitmap_granularity, immediate_commit)
{
for (const host in flat_tree)
for (const osd in orig_tree)
{
let found = 0;
for (const osd in flat_tree[host])
if (orig_tree[osd].level === 'osd')
{
const osd_stat = this.state.osd.stats[osd];
if (osd_stat && (osd_stat.bs_block_size && osd_stat.bs_block_size != block_size ||
@ -1218,16 +1238,8 @@ class Mon
osd_stat.immediate_commit == 'small' && immediate_commit == 'all' ||
osd_stat.immediate_commit == 'none' && immediate_commit != 'none'))
{
delete flat_tree[host][osd];
delete orig_tree[host][osd];
}
else
{
found++;
}
}
if (!found)
{
delete flat_tree[host];
}
}
}
@ -1237,12 +1249,84 @@ class Mon
let aff_osds = up_osds;
if (pool_cfg.primary_affinity_tags)
{
aff_osds = { ...up_osds };
this.filter_osds_by_tags(osd_tree, { x: aff_osds }, pool_cfg.primary_affinity_tags);
aff_osds = Object.keys(up_osds).reduce((a, c) => { a[c] = osd_tree[c]; return a; }, {});
this.filter_osds_by_tags(aff_osds, pool_cfg.primary_affinity_tags);
for (const osd in aff_osds)
{
aff_osds[osd] = true;
}
}
return aff_osds;
}
get_pg_rules(pool_id, pool_cfg, warn)
{
if (pool_cfg.level_placement)
{
const pg_size = (0|pool_cfg.pg_size);
let rules = pool_cfg.level_placement;
if (typeof rules === 'string')
{
rules = rules.split(/\s+/).map(s => s.split(/=/, 2)).reduce((a, c) => { a[c[0]] = c[1]; return a; }, {});
}
else
{
rules = { ...rules };
}
// Always add failure_domain to prevent rules from being totally incorrect
const all_diff = [];
for (let i = 1; i <= pg_size; i++)
{
all_diff.push(i);
}
rules[pool_cfg.failure_domain || 'host'] = all_diff;
const levels = this.config.placement_levels||{};
levels.host = levels.host || 100;
levels.osd = levels.osd || 101;
for (const k in rules)
{
if (!levels[k] || typeof rules[k] !== 'string' &&
(!rules[k] instanceof Array ||
rules[k].filter(s => typeof s !== 'string' && typeof s !== 'number').length > 0))
{
if (warn)
console.log('Pool '+pool_id+' configuration is invalid: level_placement should be { [level]: string | (string|number)[] }');
return null;
}
else if (rules[k].length != pg_size)
{
if (warn)
console.log('Pool '+pool_id+' configuration is invalid: values in level_placement should contain exactly pg_size ('+pg_size+') items');
return null;
}
}
return parse_level_indexes(rules);
}
else if (typeof pool_cfg.raw_placement === 'string')
{
try
{
return parse_pg_dsl(pool_cfg.raw_placement);
}
catch (e)
{
if (warn)
console.log('Pool '+pool_id+' configuration is invalid: invalid raw_placement: '+e.message);
}
}
else
{
let rules = [ [] ];
let prev = [ 1 ];
for (let i = 1; i < pool_cfg.pg_size; i++)
{
rules.push([ [ pool_cfg.failure_domain||'host', '!=', prev ] ]);
prev = [ ...prev, i+1 ];
}
return rules;
}
}
async generate_pool_pgs(pool_id, osd_tree, levels)
{
const pool_cfg = this.state.config.pools[pool_id];
@ -1250,10 +1334,9 @@ class Mon
{
return null;
}
let pool_tree = osd_tree[pool_cfg.root_node || ''];
pool_tree = pool_tree ? pool_tree.children : [];
pool_tree = LPOptimizer.flatten_tree(pool_tree, levels, pool_cfg.failure_domain, 'osd');
this.filter_osds_by_tags(osd_tree, pool_tree, pool_cfg.osd_tags);
let pool_tree = osd_tree;
this.filter_osds_by_root_node(pool_tree, pool_cfg.root_node);
this.filter_osds_by_tags(pool_tree, pool_cfg.osd_tags);
this.filter_osds_by_block_layout(
pool_tree,
pool_cfg.block_size || this.config.block_size || 131072,
@ -1276,11 +1359,15 @@ class Mon
}
const old_pg_count = prev_pgs.length;
const optimize_cfg = {
osd_tree: pool_tree,
osd_weights: Object.values(pool_tree).filter(item => item.level === 'osd').reduce((a, c) => { a[c.id] = c.size; return a; }, {}),
combinator: this.config.use_old_pg_combinator && !pool_cfg.level_placement && !pool_cfg.raw_placement
// new algorithm:
? new RuleCombinator(osd_tree, this.get_pg_rules(pool_id, pool_cfg), pool_cfg.max_osd_combinations)
// old algorithm:
: new SimpleCombinator(flatten_tree(osd_tree, levels, pool_cfg.failure_domain, 'osd'), pool_cfg.pg_size, pool_cfg.max_osd_combinations),
pg_count: pool_cfg.pg_count,
pg_size: pool_cfg.pg_size,
pg_minsize: pool_cfg.pg_minsize,
max_combinations: pool_cfg.max_osd_combinations,
ordered: pool_cfg.scheme != 'replicated',
};
let optimize_result;
@ -1349,8 +1436,8 @@ class Mon
// Something has changed
console.log('Pool configuration or OSD tree changed, re-optimizing');
// First re-optimize PGs, but don't look at history yet
const optimize_results = await Promise.all(Object.keys(this.state.config.pools)
.map(pool_id => this.generate_pool_pgs(pool_id, osd_tree, levels)));
const optimize_results = (await Promise.all(Object.keys(this.state.config.pools)
.map(pool_id => this.generate_pool_pgs(pool_id, osd_tree, levels)))).filter(r => r);
// Then apply the modification in the form of an optimistic transaction,
// each time considering new pg/history modifications (OSDs modify it during rebalance)
while (!await this.apply_pool_pgs(optimize_results, up_osds, osd_tree, tree_hash))
@ -1436,6 +1523,10 @@ class Mon
async apply_pool_pgs(results, up_osds, osd_tree, tree_hash)
{
if (!results.length)
{
return true;
}
for (const pool_id in (this.state.config.pgs||{}).items||{})
{
// We should stop all PGs when deleting a pool or changing its PG count

38
mon/murmur3.js Normal file
View File

@ -0,0 +1,38 @@
function select_murmur3(count, cb)
{
if (!count)
{
return null;
}
else
{
let i = 0, maxh = -1;
for (let j = 0; j < count; j++)
{
const h = murmur3(cb(j));
if (h > maxh)
{
i = j;
maxh = h;
}
}
return i;
}
}
function murmur3(s)
{
let hash = 0x12345678;
for (let i = 0; i < s.length; i++)
{
hash ^= s.charCodeAt(i);
hash = (hash*0x5bd1e995) & 0xFFFFFFFF;
hash ^= (hash >> 15);
}
return hash;
}
module.exports = {
murmur3,
select_murmur3,
};

241
mon/simple_pgs.js Normal file
View File

@ -0,0 +1,241 @@
const { select_murmur3 } = require('./murmur3.js');
const NO_OSD = 'Z';
class SimpleCombinator
{
constructor(flat_tree, pg_size, max_combinations, ordered)
{
this.osd_tree = flat_tree;
this.pg_size = pg_size;
this.max_combinations = max_combinations;
this.ordered = ordered;
}
random_combinations()
{
return random_combinations(this.osd_tree, this.pg_size, this.max_combinations, this.ordered);
}
check_combinations(pgs)
{
return check_combinations(this.osd_tree, pgs);
}
}
// Convert multi-level osd_tree = { level: number|string, id?: string, size?: number, children?: osd_tree }[]
// levels = { string: number }
// to a two-level osd_tree suitable for all_combinations()
function flatten_tree(osd_tree, levels, failure_domain_level, osd_level, domains = {}, i = { i: 1 })
{
osd_level = levels[osd_level] || osd_level;
failure_domain_level = levels[failure_domain_level] || failure_domain_level;
for (const node of osd_tree)
{
if ((levels[node.level] || node.level) < failure_domain_level)
{
flatten_tree(node.children||[], levels, failure_domain_level, osd_level, domains, i);
}
else
{
domains['dom'+(i.i++)] = extract_osds([ node ], levels, osd_level);
}
}
return domains;
}
function extract_osds(osd_tree, levels, osd_level, osds = {})
{
for (const node of osd_tree)
{
if ((levels[node.level] || node.level) >= osd_level)
{
osds[node.id] = node.size;
}
else
{
extract_osds(node.children||[], levels, osd_level, osds);
}
}
return osds;
}
// ordered = don't treat (x,y) and (y,x) as equal
function random_combinations(osd_tree, pg_size, count, ordered)
{
const osds = Object.keys(osd_tree).reduce((a, c) => { a[c] = Object.keys(osd_tree[c]).sort(); return a; }, {});
const hosts = Object.keys(osd_tree).sort().filter(h => osds[h].length > 0);
const r = {};
// Generate random combinations including each OSD at least once
for (let h = 0; h < hosts.length; h++)
{
for (let o = 0; o < osds[hosts[h]].length; o++)
{
const pg = [ osds[hosts[h]][o] ];
const cur_hosts = [ ...hosts ];
cur_hosts.splice(h, 1);
for (let i = 1; i < pg_size && i < hosts.length; i++)
{
const next_host = select_murmur3(cur_hosts.length, i => pg[0]+':i:'+cur_hosts[i]);
const next_osd = select_murmur3(osds[cur_hosts[next_host]].length, i => pg[0]+':i:'+osds[cur_hosts[next_host]][i]);
pg.push(osds[cur_hosts[next_host]][next_osd]);
cur_hosts.splice(next_host, 1);
}
while (pg.length < pg_size)
{
pg.push(NO_OSD);
}
r['pg_'+pg.join('_')] = pg;
}
}
// Generate purely random combinations
while (count > 0)
{
let host_idx = [];
const cur_hosts = [ ...hosts.map((h, i) => i) ];
const max_hosts = pg_size < hosts.length ? pg_size : hosts.length;
if (ordered)
{
for (let i = 0; i < max_hosts; i++)
{
const r = select_murmur3(cur_hosts.length, i => count+':h:'+cur_hosts[i]);
host_idx[i] = cur_hosts[r];
cur_hosts.splice(r, 1);
}
}
else
{
for (let i = 0; i < max_hosts; i++)
{
const r = select_murmur3(cur_hosts.length - (max_hosts - i - 1), i => count+':h:'+cur_hosts[i]);
host_idx[i] = cur_hosts[r];
cur_hosts.splice(0, r+1);
}
}
let pg = host_idx.map(h => osds[hosts[h]][select_murmur3(osds[hosts[h]].length, i => count+':o:'+osds[hosts[h]][i])]);
while (pg.length < pg_size)
{
pg.push(NO_OSD);
}
r['pg_'+pg.join('_')] = pg;
count--;
}
return r;
}
// Super-stupid algorithm. Given the current OSD tree, generate all possible OSD combinations
// osd_tree = { failure_domain1: { osd1: size1, ... }, ... }
// ordered = return combinations without duplicates having different order
function all_combinations(osd_tree, pg_size, ordered, count)
{
const hosts = Object.keys(osd_tree).sort();
const osds = Object.keys(osd_tree).reduce((a, c) => { a[c] = Object.keys(osd_tree[c]).sort(); return a; }, {});
while (hosts.length < pg_size)
{
osds[NO_OSD] = [ NO_OSD ];
hosts.push(NO_OSD);
}
let host_idx = [];
let osd_idx = [];
for (let i = 0; i < pg_size; i++)
{
host_idx.push(i);
osd_idx.push(0);
}
const r = [];
while (!count || count < 0 || r.length < count)
{
r.push(host_idx.map((hi, i) => osds[hosts[hi]][osd_idx[i]]));
let inc = pg_size-1;
while (inc >= 0)
{
osd_idx[inc]++;
if (osd_idx[inc] >= osds[hosts[host_idx[inc]]].length)
{
osd_idx[inc] = 0;
inc--;
}
else
{
break;
}
}
if (inc < 0)
{
// no osds left in the current host combination, select the next one
inc = pg_size-1;
same_again: while (inc >= 0)
{
host_idx[inc]++;
for (let prev_host = 0; prev_host < inc; prev_host++)
{
if (host_idx[prev_host] == host_idx[inc])
{
continue same_again;
}
}
if (host_idx[inc] < (ordered ? hosts.length-(pg_size-1-inc) : hosts.length))
{
while ((++inc) < pg_size)
{
host_idx[inc] = (ordered ? host_idx[inc-1]+1 : 0);
}
break;
}
else
{
inc--;
}
}
if (inc < 0)
{
break;
}
}
}
return r;
}
function check_combinations(osd_tree, pgs)
{
const hosts = Object.keys(osd_tree).sort();
const host_per_osd = {};
for (const host in osd_tree)
{
for (const osd in osd_tree[host])
{
host_per_osd[osd] = host;
}
}
const res = [];
skip_pg: for (const pg of pgs)
{
const seen_hosts = {};
for (const osd of pg)
{
if (!host_per_osd[osd] || seen_hosts[host_per_osd[osd]])
{
continue skip_pg;
}
seen_hosts[host_per_osd[osd]] = true;
}
res.push(pg);
}
return res;
}
function compat(params)
{
return {
...params,
osd_weights: Object.assign({}, ...Object.values(params.osd_tree)),
combinator: new SimpleCombinator(params.osd_tree, params.pg_size, params.max_combinations||10000),
};
}
module.exports = {
flatten_tree,
SimpleCombinator,
compat,
NO_OSD,
};

View File

@ -7,6 +7,7 @@
// This leads to really uneven OSD fill ratio in Ceph even when PGs are perfectly balanced.
// But we support this case with the "parity_space" parameter in optimize_initial()/optimize_change().
const { SimpleCombinator } = require('./simple_pgs.js');
const LPOptimizer = require('./lp-optimizer.js');
const osd_tree = {
@ -114,16 +115,17 @@ Fine, let's try to optimize for it.
async function run()
{
const all_weights = Object.assign({}, ...Object.values(osd_tree));
const total_weight = Object.values(all_weights).reduce((a, c) => Number(a) + Number(c), 0);
const eff = LPOptimizer.pg_list_space_efficiency(prev_pgs, all_weights, 2, 2.26);
const osd_weights = Object.assign({}, ...Object.values(osd_tree));
const total_weight = Object.values(osd_weights).reduce((a, c) => Number(a) + Number(c), 0);
const eff = LPOptimizer.pg_list_space_efficiency(prev_pgs, osd_weights, 2, 2.26);
const orig = eff*4.26 / total_weight;
console.log('Original efficiency was: '+Math.round(orig*10000)/100+' %');
let prev = await LPOptimizer.optimize_initial({ osd_tree, pg_size: 3, pg_count: 256, parity_space: 2.26 });
const combinator = new SimpleCombinator(osd_tree, 3, 10000);
let prev = await LPOptimizer.optimize_initial({ osd_weights, combinator, pg_size: 3, pg_count: 256, parity_space: 2.26 });
LPOptimizer.print_change_stats(prev);
let next = await LPOptimizer.optimize_change({ prev_pgs, osd_tree, pg_size: 3, max_combinations: 10000, parity_space: 2.26 });
let next = await LPOptimizer.optimize_change({ prev_pgs, osd_weights, combinator, pg_size: 3, parity_space: 2.26 });
LPOptimizer.print_change_stats(next);
}

View File

@ -1,6 +1,7 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
const { compat } = require('./simple_pgs.js');
const LPOptimizer = require('./lp-optimizer.js');
async function run()
@ -14,26 +15,26 @@ async function run()
let res;
console.log('16 PGs, size=3');
res = await LPOptimizer.optimize_initial({ osd_tree, pg_size: 3, pg_count: 16, ordered: false });
res = await LPOptimizer.optimize_initial(compat({ osd_tree, pg_size: 3, pg_count: 16, ordered: false }));
LPOptimizer.print_change_stats(res, false);
assert(res.space == 3, 'Initial distribution');
console.log('\nChange size to 2');
res = await LPOptimizer.optimize_change({ prev_pgs: res.int_pgs, osd_tree, pg_size: 2, ordered: false });
res = await LPOptimizer.optimize_change(compat({ prev_pgs: res.int_pgs, osd_tree, pg_size: 2, ordered: false }));
LPOptimizer.print_change_stats(res, false);
assert(res.space >= 3*14/16 && res.osd_differs == 0, 'Redistribution');
console.log('\nRemove OSD 3');
const no3_tree = { ...osd_tree };
delete no3_tree['300'];
res = await LPOptimizer.optimize_change({ prev_pgs: res.int_pgs, osd_tree: no3_tree, pg_size: 2, ordered: false });
res = await LPOptimizer.optimize_change(compat({ prev_pgs: res.int_pgs, osd_tree: no3_tree, pg_size: 2, ordered: false }));
LPOptimizer.print_change_stats(res, false);
assert(res.space == 2, 'Redistribution after OSD removal');
console.log('\n16 PGs, size=3, ordered');
res = await LPOptimizer.optimize_initial({ osd_tree, pg_size: 3, pg_count: 16, ordered: true });
res = await LPOptimizer.optimize_initial(compat({ osd_tree, pg_size: 3, pg_count: 16, ordered: true }));
LPOptimizer.print_change_stats(res, false);
assert(res.space == 3, 'Initial distribution');
console.log('\nChange size to 2, ordered');
res = await LPOptimizer.optimize_change({ prev_pgs: res.int_pgs, osd_tree, pg_size: 2, ordered: true });
res = await LPOptimizer.optimize_change(compat({ prev_pgs: res.int_pgs, osd_tree, pg_size: 2, ordered: true }));
LPOptimizer.print_change_stats(res, false);
assert(res.space >= 3*14/16 && res.osd_differs < 8, 'Redistribution');
}

View File

@ -1,6 +1,7 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
const { compat, flatten_tree } = require('./simple_pgs.js');
const LPOptimizer = require('./lp-optimizer.js');
const crush_tree = [
@ -36,44 +37,44 @@ const crush_tree = [
] },
];
const osd_tree = LPOptimizer.flatten_tree(crush_tree, {}, 1, 3);
const osd_tree = flatten_tree(crush_tree, {}, 1, 3);
console.log(osd_tree);
async function run()
{
const cur_tree = {};
console.log('Empty tree:');
let res = await LPOptimizer.optimize_initial({ osd_tree: cur_tree, pg_size: 3, pg_count: 256 });
let res = await LPOptimizer.optimize_initial(compat({ osd_tree: cur_tree, pg_size: 3, pg_count: 256 }));
LPOptimizer.print_change_stats(res, false);
assert(res.space == 0);
console.log('\nAdding 1st failure domain:');
cur_tree['dom1'] = osd_tree['dom1'];
res = await LPOptimizer.optimize_change({ prev_pgs: res.int_pgs, osd_tree: cur_tree, pg_size: 3 });
res = await LPOptimizer.optimize_change(compat({ prev_pgs: res.int_pgs, osd_tree: cur_tree, pg_size: 3 }));
LPOptimizer.print_change_stats(res, false);
assert(res.space == 12 && res.total_space == 12);
console.log('\nAdding 2nd failure domain:');
cur_tree['dom2'] = osd_tree['dom2'];
res = await LPOptimizer.optimize_change({ prev_pgs: res.int_pgs, osd_tree: cur_tree, pg_size: 3 });
res = await LPOptimizer.optimize_change(compat({ prev_pgs: res.int_pgs, osd_tree: cur_tree, pg_size: 3 }));
LPOptimizer.print_change_stats(res, false);
assert(res.space == 24 && res.total_space == 24);
console.log('\nAdding 3rd failure domain:');
cur_tree['dom3'] = osd_tree['dom3'];
res = await LPOptimizer.optimize_change({ prev_pgs: res.int_pgs, osd_tree: cur_tree, pg_size: 3 });
res = await LPOptimizer.optimize_change(compat({ prev_pgs: res.int_pgs, osd_tree: cur_tree, pg_size: 3 }));
LPOptimizer.print_change_stats(res, false);
assert(res.space == 36 && res.total_space == 36);
console.log('\nRemoving 3rd failure domain:');
delete cur_tree['dom3'];
res = await LPOptimizer.optimize_change({ prev_pgs: res.int_pgs, osd_tree: cur_tree, pg_size: 3 });
res = await LPOptimizer.optimize_change(compat({ prev_pgs: res.int_pgs, osd_tree: cur_tree, pg_size: 3 }));
LPOptimizer.print_change_stats(res, false);
assert(res.space == 24 && res.total_space == 24);
console.log('\nRemoving 2nd failure domain:');
delete cur_tree['dom2'];
res = await LPOptimizer.optimize_change({ prev_pgs: res.int_pgs, osd_tree: cur_tree, pg_size: 3 });
res = await LPOptimizer.optimize_change(compat({ prev_pgs: res.int_pgs, osd_tree: cur_tree, pg_size: 3 }));
LPOptimizer.print_change_stats(res, false);
assert(res.space == 12 && res.total_space == 12);
console.log('\nRemoving 1st failure domain:');
delete cur_tree['dom1'];
res = await LPOptimizer.optimize_change({ prev_pgs: res.int_pgs, osd_tree: cur_tree, pg_size: 3 });
res = await LPOptimizer.optimize_change(compat({ prev_pgs: res.int_pgs, osd_tree: cur_tree, pg_size: 3 }));
LPOptimizer.print_change_stats(res, false);
assert(res.space == 0);
}

View File

@ -1,6 +1,7 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
const { compat } = require('./simple_pgs.js');
const LPOptimizer = require('./lp-optimizer.js');
const osd_tree = {
@ -20,13 +21,13 @@ async function run()
{
let res;
console.log('256 PGs, 3+3 OSDs, size=2');
res = await LPOptimizer.optimize_initial({ osd_tree, pg_size: 2, pg_count: 256 });
res = await LPOptimizer.optimize_initial(compat({ osd_tree, pg_size: 2, pg_count: 256 }));
LPOptimizer.print_change_stats(res, false);
// Should NOT fail with the "unfeasible or unbounded" exception
console.log('\nRemoving osd.2');
delete osd_tree[100][2];
res = await LPOptimizer.optimize_change({ prev_pgs: res.int_pgs, osd_tree, pg_size: 2 });
res = await LPOptimizer.optimize_change(compat({ prev_pgs: res.int_pgs, osd_tree, pg_size: 2 }));
LPOptimizer.print_change_stats(res, false);
}

View File

@ -1,6 +1,7 @@
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
const { compat, flatten_tree } = require('./simple_pgs.js');
const LPOptimizer = require('./lp-optimizer.js');
const osd_tree = {
@ -84,31 +85,31 @@ async function run()
// Space efficiency is ~99% in all cases.
console.log('256 PGs, size=2');
res = await LPOptimizer.optimize_initial({ osd_tree, pg_size: 2, pg_count: 256 });
res = await LPOptimizer.optimize_initial(compat({ osd_tree, pg_size: 2, pg_count: 256 }));
LPOptimizer.print_change_stats(res, false);
console.log('\nAdding osd.8');
osd_tree[500][8] = 3.58589;
res = await LPOptimizer.optimize_change({ prev_pgs: res.int_pgs, osd_tree, pg_size: 2 });
res = await LPOptimizer.optimize_change(compat({ prev_pgs: res.int_pgs, osd_tree, pg_size: 2 }));
LPOptimizer.print_change_stats(res, false);
console.log('\nRemoving osd.8');
delete osd_tree[500][8];
res = await LPOptimizer.optimize_change({ prev_pgs: res.int_pgs, osd_tree, pg_size: 2 });
res = await LPOptimizer.optimize_change(compat({ prev_pgs: res.int_pgs, osd_tree, pg_size: 2 }));
LPOptimizer.print_change_stats(res, false);
console.log('\n256 PGs, size=3');
res = await LPOptimizer.optimize_initial({ osd_tree, pg_size: 3, pg_count: 256 });
res = await LPOptimizer.optimize_initial(compat({ osd_tree, pg_size: 3, pg_count: 256 }));
LPOptimizer.print_change_stats(res, false);
console.log('\nAdding osd.8');
osd_tree[500][8] = 3.58589;
res = await LPOptimizer.optimize_change({ prev_pgs: res.int_pgs, osd_tree, pg_size: 3 });
res = await LPOptimizer.optimize_change(compat({ prev_pgs: res.int_pgs, osd_tree, pg_size: 3 }));
LPOptimizer.print_change_stats(res, false);
console.log('\nRemoving osd.8');
delete osd_tree[500][8];
res = await LPOptimizer.optimize_change({ prev_pgs: res.int_pgs, osd_tree, pg_size: 3 });
res = await LPOptimizer.optimize_change(compat({ prev_pgs: res.int_pgs, osd_tree, pg_size: 3 }));
LPOptimizer.print_change_stats(res, false);
console.log('\n256 PGs, size=3, failure domain=rack');
res = await LPOptimizer.optimize_initial({ osd_tree: LPOptimizer.flatten_tree(crush_tree, {}, 1, 3), pg_size: 3, pg_count: 256 });
res = await LPOptimizer.optimize_initial(compat({ osd_tree: flatten_tree(crush_tree, {}, 1, 3), pg_size: 3, pg_count: 256 }));
LPOptimizer.print_change_stats(res, false);
}

118
mon/test-parse-dsl.js Normal file
View File

@ -0,0 +1,118 @@
const { random_custom_combinations, index_tree, parse_level_indexes, parse_pg_dsl } = require('./dsl_pgs.js');
function check(result, expected)
{
console.dir(result, { depth: null });
if (JSON.stringify(result) !== JSON.stringify(expected))
{
process.stderr.write('Unexpected value, expected: ');
console.dir(expected, { depth: null });
process.exit(1);
}
}
check(
parse_pg_dsl("any, dc=1 host!=1, dc!=1, dc=3 host!=3, dc!=(1,3), dc=5 host!=5"),
[
[],
[ [ 'dc', '=', 1 ], [ 'host', '!=', 1 ] ],
[ [ 'dc', '!=', 1 ] ],
[ [ 'dc', '=', 3 ], [ 'host', '!=', 3 ] ],
[ [ 'dc', '!=', [ 1, 3 ] ] ],
[ [ 'dc', '=', 5 ], [ 'host', '!=', 5 ] ],
]
);
check(
parse_pg_dsl("dc=meow, dc!=1, dc>2"),
[
[ [ 'dc', '=', { id: 'meow' } ] ],
[ [ 'dc', '!=', 1 ] ],
[ [ 'dc', '>', 2 ] ],
]
);
check(
parse_level_indexes({ dc: '112233', host: 'ABCDEF' }),
[
[],
[ [ 'dc', '=', 1 ], [ 'host', '!=', [ 1 ] ] ],
[ [ 'dc', '!=', [ 1 ] ], [ 'host', '!=', [ 1, 2 ] ] ],
[ [ 'dc', '=', 3 ], [ 'host', '!=', [ 1, 2, 3 ] ] ],
[ [ 'dc', '!=', [ 1, 3 ] ], [ 'host', '!=', [ 1, 2, 3, 4 ] ] ],
[ [ 'dc', '=', 5 ], [ 'host', '!=', [ 1, 2, 3, 4, 5 ] ] ],
]
);
check(
parse_level_indexes({ dc: '112233', host: 'ABCDEF' }, [ 'dc', 'host' ]),
[
[],
[ [ 'dc', '=', 1 ], [ 'host', '!=', [ 1 ] ] ],
[ [ 'dc', '!=', [ 1 ] ] ],
[ [ 'dc', '=', 3 ], [ 'host', '!=', [ 3 ] ] ],
[ [ 'dc', '!=', [ 1, 3 ] ] ],
[ [ 'dc', '=', 5 ], [ 'host', '!=', [ 5 ] ] ],
]
);
check(
parse_level_indexes({ dc: '112211223333', host: '123456789ABC' }),
[
[],
[ [ 'dc', '=', 1 ], [ 'host', '!=', [ 1 ] ] ],
[ [ 'dc', '!=', [ 1 ] ], [ 'host', '!=', [ 1, 2 ] ] ],
[ [ 'dc', '=', 3 ], [ 'host', '!=', [ 1, 2, 3 ] ] ],
[ [ 'dc', '=', 1 ], [ 'host', '!=', [ 1, 2, 3, 4 ] ] ],
[ [ 'dc', '=', 1 ], [ 'host', '!=', [ 1, 2, 3, 4, 5 ] ] ],
[ [ 'dc', '=', 3 ], [ 'host', '!=', [ 1, 2, 3, 4, 5, 6 ] ] ],
[ [ 'dc', '=', 3 ], [ 'host', '!=', [ 1, 2, 3, 4, 5, 6, 7 ] ] ],
[ [ 'dc', '!=', [ 1, 3 ] ], [ 'host', '!=', [ 1, 2, 3, 4, 5, 6, 7, 8 ] ] ],
[ [ 'dc', '=', 9 ], [ 'host', '!=', [ 1, 2, 3, 4, 5, 6, 7, 8, 9 ] ] ],
[ [ 'dc', '=', 9 ], [ 'host', '!=', [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 ] ] ],
[ [ 'dc', '=', 9 ], [ 'host', '!=', [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 ] ] ],
]
);
check(
parse_level_indexes({ dc: '112211223333', host: '123456789ABC' }, [ 'dc', 'host' ]),
[
[],
[ [ 'dc', '=', 1 ], [ 'host', '!=', [ 1 ] ] ],
[ [ 'dc', '!=', [ 1 ] ] ],
[ [ 'dc', '=', 3 ], [ 'host', '!=', [ 3 ] ] ],
[ [ 'dc', '=', 1 ], [ 'host', '!=', [ 1, 2 ] ] ],
[ [ 'dc', '=', 1 ], [ 'host', '!=', [ 1, 2, 5 ] ] ],
[ [ 'dc', '=', 3 ], [ 'host', '!=', [ 3, 4 ] ] ],
[ [ 'dc', '=', 3 ], [ 'host', '!=', [ 3, 4, 7 ] ] ],
[ [ 'dc', '!=', [ 1, 3 ] ] ],
[ [ 'dc', '=', 9 ], [ 'host', '!=', [ 9 ] ] ],
[ [ 'dc', '=', 9 ], [ 'host', '!=', [ 9, 10 ] ] ],
[ [ 'dc', '=', 9 ], [ 'host', '!=', [ 9, 10, 11 ] ] ]
]
);
check(
Object.keys(random_custom_combinations(index_tree([
{ id: '1', size: 1, level: 'osd' },
{ id: '2', size: 2, level: 'osd' },
{ id: '3', size: 3, level: 'osd' }
]), parse_level_indexes({ osd: '12' }), 10000)).sort(),
[ 'pg_1_2', 'pg_1_3', 'pg_2_3' ]
);
check(
Object.keys(random_custom_combinations(index_tree([
{ id: 'h1', level: 'host' },
{ id: 'h2', level: 'host' },
{ id: 'h3', level: 'host' },
{ id: '1', size: 1, level: 'osd', parent: 'h1' },
{ id: '2', size: 1, level: 'osd', parent: 'h2' },
{ id: '3', size: 1, level: 'osd', parent: 'h2' },
{ id: '4', size: 1, level: 'osd', parent: 'h3' },
{ id: '5', size: 1, level: 'osd', parent: 'h3' },
]), parse_level_indexes({ host: '1122', osd: '1234' }), 10000)).sort(),
[ 'pg_2_3_4_5' ]
);
console.log('OK');

View File

@ -86,6 +86,8 @@ void journal_flusher_t::loop()
cur_flusher_count--;
}
}
if (trim_wanted)
co[0].try_trim = true;
for (int i = 0; (active_flushers > 0 || dequeuing || trim_wanted > 0) && i < cur_flusher_count; i++)
co[i].loop();
}
@ -364,10 +366,10 @@ resume_0:
!flusher->flush_queue.size() || !flusher->dequeuing)
{
stop_flusher:
if (flusher->trim_wanted > 0 && cur.oid.inode != 0)
if (flusher->trim_wanted > 0 && try_trim)
{
// Attempt forced trim
cur.oid = {};
try_trim = false;
flusher->active_flushers++;
goto trim_journal;
}
@ -375,6 +377,7 @@ stop_flusher:
wait_state = 0;
return true;
}
try_trim = true;
cur.oid = flusher->flush_queue.front();
cur.version = flusher->flush_versions[cur.oid];
flusher->flush_queue.pop_front();

View File

@ -60,6 +60,7 @@ class journal_flusher_co
std::map<object_id, uint64_t>::iterator repeat_it;
std::function<void(ring_data_t*)> simple_callback_r, simple_callback_rj, simple_callback_w;
bool try_trim = false;
bool skip_copy, has_delete, has_writes;
std::vector<copy_buffer_t> v;
std::vector<copy_buffer_t>::iterator it;

View File

@ -177,6 +177,7 @@ void blockstore_impl_t::prepare_journal_sector_write(int cur_sector, blockstore_
ring_data_t *data = ((ring_data_t*)sqe->user_data);
journal.sector_info[cur_sector].written = true;
journal.sector_info[cur_sector].submit_id = ++journal.submit_id;
assert(journal.submit_id != 0); // check overflow
journal.submitting_sectors.push_back(cur_sector);
journal.sector_info[cur_sector].flush_count++;
data->iov = (struct iovec){
@ -192,8 +193,8 @@ void blockstore_impl_t::prepare_journal_sector_write(int cur_sector, blockstore_
}
journal.sector_info[cur_sector].dirty = false;
// But always remember that this operation has to wait until this exact journal write is finished
journal.flushing_ops.insert((pending_journaling_t){
.flush_id = journal.sector_info[cur_sector].submit_id,
journal.flushing_ops.emplace(journal.sector_info[cur_sector].submit_id, (pending_journaling_t){
.pending = 1,
.sector = cur_sector,
.op = op,
});
@ -213,23 +214,43 @@ void blockstore_impl_t::handle_journal_write(ring_data_t *data, uint64_t flush_i
// FIXME: our state becomes corrupted after a write error. maybe do something better than just die
disk_error_abort("journal write", data->res, data->iov.iov_len);
}
auto fl_it = journal.flushing_ops.upper_bound((pending_journaling_t){ .flush_id = flush_id });
if (fl_it != journal.flushing_ops.end() && fl_it->flush_id == flush_id)
auto fl_it = journal.flushing_ops.lower_bound(flush_id);
if (fl_it != journal.flushing_ops.end() && fl_it->first == flush_id && fl_it->second.sector >= 0)
{
journal.sector_info[fl_it->sector].flush_count--;
journal.sector_info[fl_it->second.sector].flush_count--;
}
while (fl_it != journal.flushing_ops.end() && fl_it->flush_id == flush_id)
auto is_first = fl_it == journal.flushing_ops.begin();
while (fl_it != journal.flushing_ops.end())
{
auto priv = PRIV(fl_it->op);
priv->pending_ops--;
assert(priv->pending_ops >= 0);
if (priv->pending_ops == 0)
bool del = false;
if (fl_it->first == flush_id)
{
release_journal_sectors(fl_it->op);
priv->op_state++;
ringloop->wakeup();
fl_it->second.pending = 0;
del = is_first;
}
else
{
del = !fl_it->second.pending;
}
if (del)
{
// Do not complete this operation if previous writes are unfinished
// Otherwise also complete following operations waiting for this one
auto priv = PRIV(fl_it->second.op);
priv->pending_ops--;
assert(priv->pending_ops >= 0);
if (priv->pending_ops == 0)
{
release_journal_sectors(fl_it->second.op);
priv->op_state++;
ringloop->wakeup();
}
journal.flushing_ops.erase(fl_it++);
}
else
{
fl_it++;
}
journal.flushing_ops.erase(fl_it++);
}
}

View File

@ -156,16 +156,11 @@ struct journal_sector_info_t
struct pending_journaling_t
{
uint64_t flush_id;
int pending;
int sector;
blockstore_op_t *op;
};
inline bool operator < (const pending_journaling_t & a, const pending_journaling_t & b)
{
return a.flush_id < b.flush_id || a.flush_id == b.flush_id && a.op < b.op;
}
struct journal_t
{
int fd;
@ -191,7 +186,7 @@ struct journal_t
int cur_sector = 0;
int in_sector_pos = 0;
std::vector<int> submitting_sectors;
std::set<pending_journaling_t> flushing_ops;
std::multimap<uint64_t, pending_journaling_t> flushing_ops;
uint64_t submit_id = 0;
// Used sector map

View File

@ -427,7 +427,6 @@ int blockstore_impl_t::dequeue_write(blockstore_op_t *op)
);
write_iodepth++;
// Got SQEs. Prepare previous journal sector write if required
auto cb = [this, op](ring_data_t *data) { handle_write_event(data, op); };
if (immediate_commit == IMMEDIATE_NONE &&
!journal.entry_fits(sizeof(journal_entry_small_write) + dyn_size))
{
@ -503,7 +502,15 @@ int blockstore_impl_t::dequeue_write(blockstore_op_t *op)
}
BS_SUBMIT_GET_SQE(sqe2, data2);
data2->iov = (struct iovec){ op->buf, op->len };
data2->callback = cb;
++journal.submit_id;
assert(journal.submit_id != 0); // check overflow
// Make subsequent journal writes wait for our data write
journal.flushing_ops.emplace(journal.submit_id, (pending_journaling_t){
.pending = 1,
.sector = -1,
.op = op,
});
data2->callback = [this, flush_id = journal.submit_id](ring_data_t *data) { handle_journal_write(data, flush_id); };
my_uring_prep_writev(
sqe2, dsk.journal_fd, &data2->iov, 1, journal.offset + journal.next_free
);

View File

@ -129,6 +129,8 @@ static const char* help_text =
" --block_size 128k Put pool only on OSDs with this data block size\n"
" --bitmap_granularity 4k Put pool only on OSDs with this logical sector size\n"
" --immediate_commit none Put pool only on OSDs with this or larger immediate_commit (none < small < all)\n"
" --level_placement <rules> Use additional failure domain rules (example: \"dc=112233\")\n"
" --raw_placement <rules> Specify raw PG generation rules (see documentation for details)\n"
" --primary_affinity_tags tags Prefer to put primary copies on OSDs with all specified tags\n"
" --scrub_interval <time> Enable regular scrubbing for this pool. Format: number + unit s/m/h/d/M/y\n"
" --used_for_fs <name> Mark pool as used for VitastorFS with metadata in image <name>\n"
@ -145,6 +147,7 @@ static const char* help_text =
" [-s|--pg_size <number>] [--pg_minsize <number>] [-n|--pg_count <count>]\n"
" [--failure_domain <level>] [--root_node <node>] [--osd_tags <tags>] [--used_for_fs <name>]\n"
" [--max_osd_combinations <number>] [--primary_affinity_tags <tags>] [--scrub_interval <time>]\n"
" [--level_placement <rules>] [--raw_placement <rules>]\n"
" Non-modifiable parameters (changing them WILL lead to data loss):\n"
" [--block_size <size>] [--bitmap_granularity <size>]\n"
" [--immediate_commit <all|small|none>] [--pg_stripe_size <size>]\n"

View File

@ -82,9 +82,38 @@ std::string validate_pool_config(json11::Json::object & new_cfg, json11::Json ol
value = value.uint64_value();
}
else if (key == "name" || key == "scheme" || key == "immediate_commit" ||
key == "failure_domain" || key == "root_node" || key == "scrub_interval" || key == "used_for_fs")
key == "failure_domain" || key == "root_node" || key == "scrub_interval" || key == "used_for_fs" ||
key == "raw_placement")
{
// OK
if (!value.is_string())
{
return key+" must be a string";
}
}
else if (key == "level_placement")
{
// level=rule, level=rule, ...
if (!value.is_object())
{
json11::Json::object obj;
for (auto & item: explode(",", value.string_value(), true))
{
auto pair = explode("=", item, true);
if (pair.size() >= 2)
{
obj[pair[0]] = pair[1];
}
}
if (obj.size())
{
value = obj;
}
else
{
new_cfg.erase(kv_it++);
continue;
}
}
}
else if (key == "osd_tags" || key == "primary_affinity_tags")
{
@ -184,6 +213,38 @@ std::string validate_pool_config(json11::Json::object & new_cfg, json11::Json ol
return "PG size can't be greater than 256";
}
// PG rules
if (!cfg["level_placement"].is_null())
{
for (auto & lr: cfg["level_placement"].object_items())
{
int len = 0;
if (lr.second.is_array())
{
for (auto & lri: lr.second.array_items())
{
if (!lri.is_string() && !lri.is_number())
{
return "--level_placement contains an array with non-scalar value: "+lri.dump();
}
}
len = lr.second.array_items().size();
}
else if (!lr.second.is_string())
{
return "--level_placement contains a non-array and non-string value: "+lr.second.dump();
}
else
{
len = lr.second.string_value().size();
}
if (len != pg_size)
{
return "values in --level_placement should be exactly pg_size ("+std::to_string(pg_size)+") long";
}
}
}
// parity_chunks
uint64_t parity_chunks = 1;
if (scheme == POOL_SCHEME_EC)

View File

@ -48,7 +48,7 @@ start_etcd()
--advertise-client-urls http://$ETCD_IP:$((ETCD_PORT+2*i-2)) --listen-client-urls http://$ETCD_IP:$((ETCD_PORT+2*i-2)) \
--initial-advertise-peer-urls http://$ETCD_IP:$((ETCD_PORT+2*i-1)) --listen-peer-urls http://$ETCD_IP:$((ETCD_PORT+2*i-1)) \
--initial-cluster-token vitastor-tests-etcd --initial-cluster-state new \
--initial-cluster "$ETCD_CLUSTER" \
--initial-cluster "$ETCD_CLUSTER" --max-request-bytes=104857600 \
--max-txn-ops=100000 --auto-compaction-retention=10 --auto-compaction-mode=revision &>./testdata/etcd$i.log &
eval ETCD${i}_PID=$!
}