16 objects per thread + generate random offsets for every op
parent
63a10e960f
commit
bc202f2f55
|
@ -13,25 +13,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
//future feature
|
func bench(cephconn *Cephconnection, osddevice Device, buffs *[][]byte, bs int64, objsize int64, params *Params,
|
||||||
func makeoffsets(threads int64, bs int64, objsize int64) [][]int64 {
|
|
||||||
var offsets [][]int64
|
|
||||||
for i := int64(0); i < threads; i++ {
|
|
||||||
s1 := rand.NewSource(i)
|
|
||||||
r1 := rand.New(s1)
|
|
||||||
localoffsets := make([]int64, 0, objsize-bs)
|
|
||||||
for i := int64(0); i < objsize-bs; i += bs {
|
|
||||||
localoffsets = append(localoffsets, i)
|
|
||||||
}
|
|
||||||
r1.Shuffle(len(localoffsets), func(i, j int) {
|
|
||||||
localoffsets[i], localoffsets[j] = localoffsets[j], localoffsets[i]
|
|
||||||
})
|
|
||||||
offsets = append(offsets, localoffsets)
|
|
||||||
}
|
|
||||||
return offsets
|
|
||||||
}
|
|
||||||
|
|
||||||
func bench(cephconn *Cephconnection, osddevice Device, buffs *[][]byte, offset [][]int64, params *Params,
|
|
||||||
wg *sync.WaitGroup, result chan string) {
|
wg *sync.WaitGroup, result chan string) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
threadresult := make(chan []time.Duration, params.threadsCount)
|
threadresult := make(chan []time.Duration, params.threadsCount)
|
||||||
|
@ -43,14 +25,14 @@ func bench(cephconn *Cephconnection, osddevice Device, buffs *[][]byte, offset [
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
// calculate object for each thread
|
// calculate object for each thread
|
||||||
for suffix := 0; len(objectnames) < int(params.threadsCount); suffix++ {
|
for suffix := 0; len(objectnames) < int(params.threadsCount)*16; suffix++ {
|
||||||
name := "bench_" + strconv.Itoa(suffix)
|
name := "bench_" + strconv.Itoa(suffix)
|
||||||
if osddevice.ID == GetObjActingPrimary(cephconn, *params, name) {
|
if osddevice.ID == GetObjActingPrimary(cephconn, *params, name) {
|
||||||
objectnames = append(objectnames, name)
|
objectnames = append(objectnames, name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for i, j := 0, 0; i < int(params.threadsCount); i, j = i+1, j+2 {
|
for i := 0; i < int(params.threadsCount); i++ {
|
||||||
go bench_thread(cephconn, osddevice, (*buffs)[j:j+2], offset[i], params, threadresult, objectnames[i])
|
go bench_thread(cephconn, osddevice, (*buffs)[i*2:i*2+2], bs, objsize, params, threadresult, objectnames[i*16:i*16+16])
|
||||||
}
|
}
|
||||||
for i := int64(0); i < params.threadsCount; i++ {
|
for i := int64(0); i < params.threadsCount; i++ {
|
||||||
for _, lat := range <-threadresult {
|
for _, lat := range <-threadresult {
|
||||||
|
@ -131,31 +113,27 @@ func bench(cephconn *Cephconnection, osddevice Device, buffs *[][]byte, offset [
|
||||||
result <- buffer.String()
|
result <- buffer.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
func bench_thread(cephconn *Cephconnection, osddevice Device, buffs [][]byte, offsets []int64, params *Params,
|
func bench_thread(cephconn *Cephconnection, osddevice Device, buffs [][]byte, bs int64, objsize int64, params *Params,
|
||||||
result chan []time.Duration, objname string) {
|
result chan []time.Duration, objnames []string) {
|
||||||
|
|
||||||
starttime := time.Now()
|
starttime := time.Now()
|
||||||
var latencies []time.Duration
|
var latencies []time.Duration
|
||||||
endtime := starttime.Add(params.duration)
|
endtime := starttime.Add(params.duration)
|
||||||
n := 0
|
n := 0
|
||||||
for {
|
for {
|
||||||
if time.Now().After(endtime) {
|
offset := rand.Int63n(objsize/bs) * bs
|
||||||
|
objname := objnames[rand.Int31n(int32(len(objnames)))]
|
||||||
|
startwritetime := time.Now()
|
||||||
|
if startwritetime.After(endtime) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
for _, offset := range offsets {
|
err := cephconn.ioctx.Write(objname, buffs[n], uint64(offset))
|
||||||
if time.Now().Before(endtime) {
|
endwritetime := time.Now()
|
||||||
startwritetime := time.Now()
|
if err != nil {
|
||||||
err := cephconn.ioctx.Write(objname, buffs[n], uint64(offset))
|
log.Printf("Can't write obj: %v, osd: %v", objname, osddevice.Name)
|
||||||
endwritetime := time.Now()
|
continue
|
||||||
if err != nil {
|
|
||||||
log.Printf("Can't write obj: %v, osd: %v", objname, osddevice.Name)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
latencies = append(latencies, endwritetime.Sub(startwritetime))
|
|
||||||
} else {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
latencies = append(latencies, endwritetime.Sub(startwritetime))
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
n++
|
n++
|
||||||
} else {
|
} else {
|
||||||
|
@ -184,16 +162,15 @@ func main() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
osddevices := GetOsds(cephconn, params)
|
osddevices := GetOsds(cephconn, params)
|
||||||
offsets := makeoffsets(params.threadsCount, params.blocksize, params.objectsize)
|
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
results := make(chan string, len(osddevices)*int(params.threadsCount))
|
results := make(chan string, len(osddevices)*int(params.threadsCount))
|
||||||
for _, osd := range osddevices {
|
for _, osd := range osddevices {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
if params.parallel == true {
|
if params.parallel == true {
|
||||||
go bench(cephconn, osd, &buffs, offsets, ¶ms, &wg, results)
|
go bench(cephconn, osd, &buffs, params.blocksize, params.objectsize, ¶ms, &wg, results)
|
||||||
} else {
|
} else {
|
||||||
bench(cephconn, osd, &buffs, offsets, ¶ms, &wg, results)
|
bench(cephconn, osd, &buffs, params.blocksize, params.objectsize, ¶ms, &wg, results)
|
||||||
log.Println(<-results)
|
log.Println(<-results)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue