parent
a7bb9da78a
commit
ba0e8df3a7
|
@ -4,19 +4,56 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
//future feature
|
//future feature
|
||||||
func makepreudorandom() {
|
func makeoffsets(threads int64, bs int64, objsize int64) [][]int64 {
|
||||||
a := make([]int, 0, 4096/4)
|
var offsets [][]int64
|
||||||
for i := 0; i < 4096; i += 4 {
|
for i := int64(0); i < threads; i++ {
|
||||||
a = append(a, i)
|
s1 := rand.NewSource(i)
|
||||||
|
r1 := rand.New(s1)
|
||||||
|
localoffsets := make([]int64, 0, objsize-bs)
|
||||||
|
for i := int64(0); i < objsize-bs; i += bs {
|
||||||
|
localoffsets = append(localoffsets, i)
|
||||||
|
}
|
||||||
|
r1.Shuffle(len(localoffsets), func(i, j int) {
|
||||||
|
localoffsets[i], localoffsets[j] = localoffsets[j], localoffsets[i]
|
||||||
|
})
|
||||||
|
offsets = append(offsets, localoffsets)
|
||||||
}
|
}
|
||||||
rand.Shuffle(len(a), func(i, j int) {
|
return offsets
|
||||||
a[i], a[j] = a[j], a[i]
|
}
|
||||||
})
|
|
||||||
fmt.Println(a)
|
func bench(cephconn *Cephconnection, osddevice Device, host string, buffs *[][]byte, offset [][]int64, params *Params,
|
||||||
|
wg *sync.WaitGroup, ready chan bool, result chan string, allready uint64) {
|
||||||
|
var nwg sync.WaitGroup
|
||||||
|
tready := make(chan bool, params.threadsCount)
|
||||||
|
start := uint64(0)
|
||||||
|
defer wg.Done()
|
||||||
|
for i := int64(0); i < params.threadsCount; i++ {
|
||||||
|
nwg.Add(1)
|
||||||
|
go _bench(cephconn, osddevice, host, buffs, offset[i], params, &nwg, i, tready, result, &start)
|
||||||
|
if params.parallel != true {
|
||||||
|
nwg.Wait()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
nwg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
func _bench(cephconn *Cephconnection, osddevice Device, host string, buffs *[][]byte, offset []int64, params *Params,
|
||||||
|
wg *sync.WaitGroup, i int64, ready chan bool, result chan string, start *uint64) {
|
||||||
|
defer wg.Done()
|
||||||
|
time.Sleep(time.Second * time.Duration(i)) // prepare objects
|
||||||
|
ready <- true
|
||||||
|
for {
|
||||||
|
if *start == 1 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
log.Println(host, i, osddevice.Name) //somework
|
||||||
|
result <- fmt.Sprintf("Host: %v\nOsdname: %v", host, osddevice.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
@ -28,7 +65,7 @@ func main() {
|
||||||
time.Sleep(time.Millisecond * 100)
|
time.Sleep(time.Millisecond * 100)
|
||||||
|
|
||||||
var buffs [][]byte
|
var buffs [][]byte
|
||||||
for i := 0; i < 2*params.threadsCount; i++ {
|
for i := int64(0); i < 2*params.threadsCount; i++ {
|
||||||
buffs = append(buffs, make([]byte, params.blocksize))
|
buffs = append(buffs, make([]byte, params.blocksize))
|
||||||
}
|
}
|
||||||
for num := range buffs {
|
for num := range buffs {
|
||||||
|
@ -37,6 +74,23 @@ func main() {
|
||||||
log.Fatalln(err)
|
log.Fatalln(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
osddevices := GetOsds(cephconn, params)
|
||||||
|
offsets := makeoffsets(params.threadsCount, params.blocksize, params.objectsize)
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
var allready uint64
|
||||||
|
var ready chan bool
|
||||||
|
var result chan string
|
||||||
|
for host, osds := range osddevices {
|
||||||
|
for _, osd := range osds {
|
||||||
|
wg.Add(1)
|
||||||
|
if params.parallel == true {
|
||||||
|
go bench(cephconn, osd, host, &buffs, offsets, ¶ms, &wg, ready, result, allready)
|
||||||
|
} else {
|
||||||
|
bench(cephconn, osd, host, &buffs, offsets, ¶ms, &wg, ready, result, allready)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
GetOsds(cephconn, params)
|
|
||||||
}
|
}
|
||||||
|
|
23
flags.go
23
flags.go
|
@ -17,6 +17,10 @@ func Route() Params {
|
||||||
"Block size in format KB = K = KiB = 1024 MB = M = MiB = 1024 * K GB = G = GiB = 1024 * M TB = T = TiB = 1024 * G")
|
"Block size in format KB = K = KiB = 1024 MB = M = MiB = 1024 * K GB = G = GiB = 1024 * M TB = T = TiB = 1024 * G")
|
||||||
gnuflag.StringVar(¶ms.bs, "s", "4K",
|
gnuflag.StringVar(¶ms.bs, "s", "4K",
|
||||||
"Block size in format KB = K = KiB = 1024 MB = M = MiB = 1024 * K GB = G = GiB = 1024 * M TB = T = TiB = 1024 * G")
|
"Block size in format KB = K = KiB = 1024 MB = M = MiB = 1024 * K GB = G = GiB = 1024 * M TB = T = TiB = 1024 * G")
|
||||||
|
gnuflag.StringVar(¶ms.os, "objectsize", "4M",
|
||||||
|
"Object size in format KB = K = KiB = 1024 MB = M = MiB = 1024 * K GB = G = GiB = 1024 * M TB = T = TiB = 1024 * G")
|
||||||
|
gnuflag.StringVar(¶ms.os, "o", "4M",
|
||||||
|
"Object size in format KB = K = KiB = 1024 MB = M = MiB = 1024 * K GB = G = GiB = 1024 * M TB = T = TiB = 1024 * G")
|
||||||
gnuflag.StringVar(¶ms.user, "user", "admin",
|
gnuflag.StringVar(¶ms.user, "user", "admin",
|
||||||
"Ceph user (cephx)")
|
"Ceph user (cephx)")
|
||||||
gnuflag.StringVar(¶ms.user, "u", "client.admin",
|
gnuflag.StringVar(¶ms.user, "u", "client.admin",
|
||||||
|
@ -39,23 +43,34 @@ func Route() Params {
|
||||||
"Ceph pool")
|
"Ceph pool")
|
||||||
gnuflag.StringVar(¶ms.define, "define", "",
|
gnuflag.StringVar(¶ms.define, "define", "",
|
||||||
"Define specifically osd or host. osd.X or ceph-host-X")
|
"Define specifically osd or host. osd.X or ceph-host-X")
|
||||||
gnuflag.IntVar(¶ms.threadsCount, "threads", 1,
|
gnuflag.Int64Var(¶ms.threadsCount, "threads", 1,
|
||||||
"Threads count")
|
"Threads count")
|
||||||
gnuflag.IntVar(¶ms.threadsCount, "t", 1,
|
gnuflag.Int64Var(¶ms.threadsCount, "t", 1,
|
||||||
"Threads count on each osd")
|
"Threads count on each osd")
|
||||||
gnuflag.BoolVar(¶ms.parallel, "parallel", false,
|
gnuflag.BoolVar(¶ms.parallel, "parallel", false,
|
||||||
"Do test all osd in parallel mode")
|
"Do test all osd in parallel mode")
|
||||||
gnuflag.Parse(true)
|
gnuflag.Parse(true)
|
||||||
var err error
|
|
||||||
if params.mode == "osd" && len(params.define) != 0 {
|
if params.mode == "osd" && len(params.define) != 0 {
|
||||||
if i := strings.HasPrefix(params.define, "osd."); i != true {
|
if i := strings.HasPrefix(params.define, "osd."); i != true {
|
||||||
log.Fatalln("Define correct osd in format osd.X")
|
log.Fatalln("Define correct osd in format osd.X")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
params.blocksize, err = bytefmt.ToBytes(params.bs)
|
blocksize, err := bytefmt.ToBytes(params.bs)
|
||||||
|
params.blocksize = int64(blocksize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Println("Can't convert defined block size. 4K block size will be used")
|
log.Println("Can't convert defined block size. 4K block size will be used")
|
||||||
params.blocksize = 4096
|
params.blocksize = 4096
|
||||||
}
|
}
|
||||||
|
//uint64(params.objectsize), err = bytefmt.ToBytes(params.os)
|
||||||
|
objsize, err := bytefmt.ToBytes(params.os)
|
||||||
|
params.objectsize = int64(objsize)
|
||||||
|
if err != nil {
|
||||||
|
log.Println("Can't convert defined block size. 4K block size will be used")
|
||||||
|
params.objectsize = 4194304
|
||||||
|
}
|
||||||
|
if params.objectsize/params.blocksize < 2 {
|
||||||
|
log.Fatalf("Current block size: %v\nCurrent object size: %v\nObject size must be at least 2 times bigger than block size", params.blocksize, params.objectsize)
|
||||||
|
}
|
||||||
return params
|
return params
|
||||||
}
|
}
|
||||||
|
|
30
getosd.go
30
getosd.go
|
@ -92,7 +92,7 @@ func GetCrushHostBuckets(buckets []Bucket, itemid int64) []Bucket {
|
||||||
return rootbuckets
|
return rootbuckets
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetOsdForLocations(params Params, osdcrushdump OsdCrushDump, osddump OsdDump, poolinfo Poolinfo, osdsmetadata []OsdMetadata) map[string]BenchOsd {
|
func GetOsdForLocations(params Params, osdcrushdump OsdCrushDump, osddump OsdDump, poolinfo Poolinfo, osdsmetadata []OsdMetadata) map[string][]Device {
|
||||||
var crushrule int64
|
var crushrule int64
|
||||||
var crushrulename string
|
var crushrulename string
|
||||||
for _, pool := range osddump.Pools {
|
for _, pool := range osddump.Pools {
|
||||||
|
@ -112,8 +112,8 @@ func GetOsdForLocations(params Params, osdcrushdump OsdCrushDump, osddump OsdDum
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
osdhosts := make(map[string]BenchOsd)
|
osdhosts := make(map[string][]Device)
|
||||||
var devices []Device
|
//var devices []Device
|
||||||
bucketitems := GetCrushHostBuckets(osdcrushdump.Buckets, rootid)
|
bucketitems := GetCrushHostBuckets(osdcrushdump.Buckets, rootid)
|
||||||
if params.define != "" {
|
if params.define != "" {
|
||||||
if strings.HasPrefix(params.define, "osd.") {
|
if strings.HasPrefix(params.define, "osd.") {
|
||||||
|
@ -124,17 +124,13 @@ func GetOsdForLocations(params Params, osdcrushdump OsdCrushDump, osddump OsdDum
|
||||||
for _, osdmetadata := range osdsmetadata {
|
for _, osdmetadata := range osdsmetadata {
|
||||||
if osdmetadata.ID == device.ID {
|
if osdmetadata.ID == device.ID {
|
||||||
device.Info = osdmetadata
|
device.Info = osdmetadata
|
||||||
|
osdhosts[hostbucket.Name] = append(osdhosts[hostbucket.Name], device)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
devices = append(devices, device)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(devices) != 0 {
|
|
||||||
osdhosts[hostbucket.Name] = BenchOsd{Osds: devices}
|
|
||||||
devices = []Device{}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if len(osdhosts) == 0 {
|
if len(osdhosts) == 0 {
|
||||||
log.Fatalf("Defined osd not exist in root for rule: %v pool: %v.\nYou should define osd like osd.X",
|
log.Fatalf("Defined osd not exist in root for rule: %v pool: %v.\nYou should define osd like osd.X",
|
||||||
|
@ -149,18 +145,14 @@ func GetOsdForLocations(params Params, osdcrushdump OsdCrushDump, osddump OsdDum
|
||||||
for _, osdmetadata := range osdsmetadata {
|
for _, osdmetadata := range osdsmetadata {
|
||||||
if osdmetadata.ID == device.ID {
|
if osdmetadata.ID == device.ID {
|
||||||
device.Info = osdmetadata
|
device.Info = osdmetadata
|
||||||
|
osdhosts[hostbucket.Name] = append(osdhosts[hostbucket.Name], device)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
devices = append(devices, device)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(devices) != 0 {
|
|
||||||
osdhosts[hostbucket.Name] = BenchOsd{Osds: devices}
|
|
||||||
devices = []Device{}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if len(osdhosts) == 0 {
|
if len(osdhosts) == 0 {
|
||||||
log.Fatalf("Defined host not exist in root for rule: %v pool: %v", crushrulename, poolinfo.Pool)
|
log.Fatalf("Defined host not exist in root for rule: %v pool: %v", crushrulename, poolinfo.Pool)
|
||||||
|
@ -177,14 +169,10 @@ func GetOsdForLocations(params Params, osdcrushdump OsdCrushDump, osddump OsdDum
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
devices = append(devices, device)
|
osdhosts[hostbucket.Name] = append(osdhosts[hostbucket.Name], device)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(devices) != 0 {
|
|
||||||
osdhosts[hostbucket.Name] = BenchOsd{Osds: devices}
|
|
||||||
devices = []Device{}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if len(osdhosts) == 0 {
|
if len(osdhosts) == 0 {
|
||||||
log.Fatalf("Osd not exist in root for rule: %v pool: %v", crushrulename, poolinfo.Pool)
|
log.Fatalf("Osd not exist in root for rule: %v pool: %v", crushrulename, poolinfo.Pool)
|
||||||
|
@ -202,7 +190,7 @@ func ContainsPg(pgs []PlacementGroup, i int64) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetOsds(cephconn *Cephconnection, params Params) map[string]BenchOsd {
|
func GetOsds(cephconn *Cephconnection, params Params) map[string][]Device {
|
||||||
poolinfo := GetPoolSize(cephconn, params)
|
poolinfo := GetPoolSize(cephconn, params)
|
||||||
if poolinfo.Size != 1 {
|
if poolinfo.Size != 1 {
|
||||||
log.Fatalf("Pool size must be 1. Current size for pool %v is %v. Don't forget that it must be useless pool (not production). Do:\n # ceph osd pool set %v min_size 1\n # ceph osd pool set %v size 1",
|
log.Fatalf("Pool size must be 1. Current size for pool %v is %v. Don't forget that it must be useless pool (not production). Do:\n # ceph osd pool set %v min_size 1\n # ceph osd pool set %v size 1",
|
||||||
|
@ -213,8 +201,8 @@ func GetOsds(cephconn *Cephconnection, params Params) map[string]BenchOsd {
|
||||||
osddump := GetOsdDump(cephconn)
|
osddump := GetOsdDump(cephconn)
|
||||||
osdsmetadata := GetOsdMetadata(cephconn)
|
osdsmetadata := GetOsdMetadata(cephconn)
|
||||||
osddevices := GetOsdForLocations(params, crushosddump, osddump, poolinfo, osdsmetadata)
|
osddevices := GetOsdForLocations(params, crushosddump, osddump, poolinfo, osdsmetadata)
|
||||||
for _, values := range osddevices {
|
for _, devices := range osddevices {
|
||||||
for _, item := range values.Osds {
|
for _, item := range devices {
|
||||||
if exist := ContainsPg(placementGroups, item.ID); exist == false {
|
if exist := ContainsPg(placementGroups, item.ID); exist == false {
|
||||||
log.Fatalln("Not enough pg for test. Some osd haven't placement group at all. Increase pg_num and pgp_num")
|
log.Fatalln("Not enough pg for test. Some osd haven't placement group at all. Increase pg_num and pgp_num")
|
||||||
}
|
}
|
||||||
|
|
16
types.go
16
types.go
|
@ -6,11 +6,11 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
type Params struct {
|
type Params struct {
|
||||||
duration time.Duration
|
duration time.Duration
|
||||||
threadsCount int
|
threadsCount int64
|
||||||
blocksize uint64
|
blocksize, objectsize int64
|
||||||
parallel bool
|
parallel bool
|
||||||
bs, cluster, user, keyring, config, pool, mode, define string
|
bs, os, cluster, user, keyring, config, pool, mode, define string
|
||||||
}
|
}
|
||||||
|
|
||||||
type Cephconnection struct {
|
type Cephconnection struct {
|
||||||
|
@ -373,9 +373,3 @@ type OsdMetadata struct {
|
||||||
OsdObjectstore string `json:"osd_objectstore"`
|
OsdObjectstore string `json:"osd_objectstore"`
|
||||||
Rotational string `json:"rotational"`
|
Rotational string `json:"rotational"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type BenchOsd struct {
|
|
||||||
Osds []Device
|
|
||||||
Buffs *[][]byte
|
|
||||||
Offsets []int64
|
|
||||||
}
|
|
||||||
|
|
Loading…
Reference in New Issue