2019-02-20 11:48:47 +03:00
package main
import (
"encoding/json"
"log"
2019-03-11 16:38:50 +03:00
"regexp"
2019-02-20 11:48:47 +03:00
"strings"
)
2019-03-04 12:42:43 +03:00
func makeMonQuery ( cephconn * cephconnection , query map [ string ] string ) [ ] byte {
2019-02-20 11:48:47 +03:00
monjson , err := json . Marshal ( query )
if err != nil {
log . Fatalf ( "Can't marshal json mon query. Error: %v" , err )
}
monrawanswer , _ , err := cephconn . conn . MonCommand ( monjson )
if err != nil {
log . Fatalf ( "Failed exec monCommand. Error: %v" , err )
}
return monrawanswer
}
2019-03-04 12:42:43 +03:00
func getPoolSize ( cephconn * cephconnection , params params ) Poolinfo {
monrawanswer := makeMonQuery ( cephconn , map [ string ] string { "prefix" : "osd pool get" , "pool" : params . pool ,
2019-02-20 11:48:47 +03:00
"format" : "json" , "var" : "size" } )
monanswer := Poolinfo { }
if err := json . Unmarshal ( [ ] byte ( monrawanswer ) , & monanswer ) ; err != nil {
log . Fatalf ( "Can't parse monitor answer. Error: %v" , err )
}
return monanswer
}
2019-03-04 12:42:43 +03:00
func getPgByPool ( cephconn * cephconnection , params params ) [ ] PlacementGroup {
monrawanswer := makeMonQuery ( cephconn , map [ string ] string { "prefix" : "pg ls-by-pool" , "poolstr" : params . pool ,
2019-02-20 11:48:47 +03:00
"format" : "json" } )
var monanswer [ ] PlacementGroup
if err := json . Unmarshal ( [ ] byte ( monrawanswer ) , & monanswer ) ; err != nil {
2019-03-11 16:38:50 +03:00
//try Nautilus
var nmonanswer placementGroupNautilus
if nerr := json . Unmarshal ( [ ] byte ( monrawanswer ) , & nmonanswer ) ; nerr != nil {
log . Fatalf ( "Can't parse monitor answer in getPgByPool. Error: %v" , err )
}
return nmonanswer . PgStats
2019-02-20 11:48:47 +03:00
}
return monanswer
}
2019-03-04 12:42:43 +03:00
func getOsdCrushDump ( cephconn * cephconnection ) OsdCrushDump {
monrawanswer := makeMonQuery ( cephconn , map [ string ] string { "prefix" : "osd crush dump" , "format" : "json" } )
2019-02-20 11:48:47 +03:00
var monanswer OsdCrushDump
if err := json . Unmarshal ( [ ] byte ( monrawanswer ) , & monanswer ) ; err != nil {
log . Fatalf ( "Can't parse monitor answer. Error: %v" , err )
}
return monanswer
}
2019-03-04 12:42:43 +03:00
func getOsdDump ( cephconn * cephconnection ) OsdDump {
monrawanswer := makeMonQuery ( cephconn , map [ string ] string { "prefix" : "osd dump" , "format" : "json" } )
2019-02-20 11:48:47 +03:00
var monanswer OsdDump
if err := json . Unmarshal ( [ ] byte ( monrawanswer ) , & monanswer ) ; err != nil {
log . Fatalf ( "Can't parse monitor answer. Error: %v" , err )
}
return monanswer
}
2019-03-04 12:42:43 +03:00
func getOsdMetadata ( cephconn * cephconnection ) [ ] OsdMetadata {
monrawanswer := makeMonQuery ( cephconn , map [ string ] string { "prefix" : "osd metadata" , "format" : "json" } )
2019-02-20 16:54:39 +03:00
var monanswer [ ] OsdMetadata
if err := json . Unmarshal ( [ ] byte ( monrawanswer ) , & monanswer ) ; err != nil {
log . Fatalf ( "Can't parse monitor answer. Error: %v" , err )
}
return monanswer
}
2019-03-04 12:42:43 +03:00
func getObjActingPrimary ( cephconn * cephconnection , params params , objname string ) int64 {
monrawanswer := makeMonQuery ( cephconn , map [ string ] string { "prefix" : "osd map" , "pool" : params . pool ,
2019-02-26 18:09:59 +03:00
"object" : objname , "format" : "json" } )
var monanswer OsdMap
if err := json . Unmarshal ( [ ] byte ( monrawanswer ) , & monanswer ) ; err != nil {
log . Fatalf ( "Can't parse monitor answer. Error: %v" , err )
}
return monanswer . UpPrimary
}
2019-03-04 12:42:43 +03:00
func getCrushHostBuckets ( buckets [ ] Bucket , itemid int64 ) [ ] Bucket {
2019-02-20 11:48:47 +03:00
var rootbuckets [ ] Bucket
for _ , bucket := range buckets {
if bucket . ID == itemid {
if bucket . TypeName == "host" {
rootbuckets = append ( rootbuckets , bucket )
} else {
for _ , item := range bucket . Items {
2019-03-04 12:42:43 +03:00
result := getCrushHostBuckets ( buckets , item . ID )
2019-02-20 11:48:47 +03:00
for _ , it := range result {
rootbuckets = append ( rootbuckets , it )
}
}
}
}
}
return rootbuckets
}
2019-03-04 12:42:43 +03:00
func getOsdForLocations ( params params , osdcrushdump OsdCrushDump , osddump OsdDump , poolinfo Poolinfo , osdsmetadata [ ] OsdMetadata ) [ ] Device {
2019-02-26 18:09:59 +03:00
var crushrule , rootid int64
2019-02-20 11:48:47 +03:00
var crushrulename string
for _ , pool := range osddump . Pools {
if pool . Pool == poolinfo . PoolId {
crushrule = pool . CrushRule
}
}
for _ , rule := range osdcrushdump . Rules {
if rule . RuleID == crushrule {
crushrulename = rule . RuleName
for _ , step := range rule . Steps {
if step . Op == "take" {
rootid = step . Item
}
}
}
}
2019-02-28 14:19:33 +03:00
osdstats := map [ uint64 ] * Osd { }
2019-02-28 12:48:22 +03:00
for num , stat := range osddump . Osds {
osdstats [ stat . Osd ] = & osddump . Osds [ num ]
}
2019-02-20 11:48:47 +03:00
2019-02-28 14:19:33 +03:00
var osddevices [ ] Device
2019-03-04 12:42:43 +03:00
bucketitems := getCrushHostBuckets ( osdcrushdump . Buckets , rootid )
2019-03-11 16:38:50 +03:00
if params . rdefine != "" { // match regex if exists
validbucket , err := regexp . CompilePOSIX ( params . rdefine )
if err != nil {
log . Fatalf ( "Can't parse regex %v" , params . rdefine )
}
for _ , hostbucket := range bucketitems {
for _ , item := range hostbucket . Items {
for _ , device := range osdcrushdump . Devices {
if device . ID == item . ID && ( validbucket . MatchString ( hostbucket . Name ) || validbucket . MatchString ( device . Name ) ) {
for _ , osdmetadata := range osdsmetadata {
if osdmetadata . ID == device . ID && osdstats [ uint64 ( device . ID ) ] . Up == 1 && osdstats [ uint64 ( device . ID ) ] . In == 1 {
device . Info = osdmetadata
osddevices = append ( osddevices , device )
}
}
}
}
}
}
if len ( osddevices ) == 0 {
log . Fatalf ( "Defined host/osd not exist in root for rule: %v pool: %v" , crushrulename , poolinfo . Pool )
}
} else if params . define != "" { // check defined osd/hosts
if strings . HasPrefix ( params . define , "osd." ) { //check that defined is osd, else host
2019-02-20 11:48:47 +03:00
for _ , hostbucket := range bucketitems {
for _ , item := range hostbucket . Items {
for _ , device := range osdcrushdump . Devices {
if device . ID == item . ID && params . define == device . Name {
2019-02-22 11:34:55 +03:00
for _ , osdmetadata := range osdsmetadata {
2019-02-28 14:19:33 +03:00
if osdmetadata . ID == device . ID && osdstats [ uint64 ( device . ID ) ] . Up == 1 && osdstats [ uint64 ( device . ID ) ] . In == 1 {
2019-02-22 11:34:55 +03:00
device . Info = osdmetadata
2019-02-26 18:09:59 +03:00
osddevices = append ( osddevices , device )
2019-02-22 11:34:55 +03:00
}
}
2019-02-20 11:48:47 +03:00
}
}
}
}
2019-02-26 18:09:59 +03:00
if len ( osddevices ) == 0 {
2019-02-20 11:48:47 +03:00
log . Fatalf ( "Defined osd not exist in root for rule: %v pool: %v.\nYou should define osd like osd.X" ,
crushrulename , poolinfo . Pool )
}
} else {
for _ , hostbucket := range bucketitems {
if strings . Split ( hostbucket . Name , "~" ) [ 0 ] == strings . Split ( params . define , "~" ) [ 0 ] { //purge device class
for _ , item := range hostbucket . Items {
for _ , device := range osdcrushdump . Devices {
if device . ID == item . ID {
2019-02-22 11:34:55 +03:00
for _ , osdmetadata := range osdsmetadata {
2019-02-28 14:19:33 +03:00
if osdmetadata . ID == device . ID && osdstats [ uint64 ( device . ID ) ] . Up == 1 && osdstats [ uint64 ( device . ID ) ] . In == 1 {
2019-02-22 11:34:55 +03:00
device . Info = osdmetadata
2019-02-26 18:09:59 +03:00
osddevices = append ( osddevices , device )
2019-02-22 11:34:55 +03:00
}
}
2019-02-20 11:48:47 +03:00
}
}
}
}
}
2019-02-26 18:09:59 +03:00
if len ( osddevices ) == 0 {
2019-02-20 11:48:47 +03:00
log . Fatalf ( "Defined host not exist in root for rule: %v pool: %v" , crushrulename , poolinfo . Pool )
}
}
} else {
for _ , hostbucket := range bucketitems {
for _ , item := range hostbucket . Items {
for _ , device := range osdcrushdump . Devices {
if device . ID == item . ID {
2019-02-22 11:34:55 +03:00
for _ , osdmetadata := range osdsmetadata {
2019-02-28 14:19:33 +03:00
if osdmetadata . ID == device . ID && osdstats [ uint64 ( device . ID ) ] . Up == 1 && osdstats [ uint64 ( device . ID ) ] . In == 1 {
2019-02-22 11:34:55 +03:00
device . Info = osdmetadata
2019-02-28 12:48:22 +03:00
osddevices = append ( osddevices , device )
2019-02-22 11:34:55 +03:00
}
}
2019-02-20 11:48:47 +03:00
}
}
}
}
2019-02-26 18:09:59 +03:00
if len ( osddevices ) == 0 {
2019-02-20 11:48:47 +03:00
log . Fatalf ( "Osd not exist in root for rule: %v pool: %v" , crushrulename , poolinfo . Pool )
}
}
2019-02-26 18:09:59 +03:00
return osddevices
2019-02-20 11:48:47 +03:00
}
2019-03-04 12:42:43 +03:00
func containsPg ( pgs [ ] PlacementGroup , i int64 ) bool {
2019-02-20 11:48:47 +03:00
for _ , pg := range pgs {
if i == pg . ActingPrimary {
return true
}
}
return false
}
2019-03-04 12:42:43 +03:00
func getOsds ( cephconn * cephconnection , params params ) [ ] Device {
poolinfo := getPoolSize ( cephconn , params )
2019-03-14 18:24:12 +03:00
if params . disablecheck == false {
if poolinfo . Size != 1 {
log . Fatalf ( "Pool size must be 1. Current size for pool %v is %v. Don't forget that it must be useless pool (not production). Do:\n # ceph osd pool set %v min_size 1\n # ceph osd pool set %v size 1" ,
poolinfo . Pool , poolinfo . Size , poolinfo . Pool , poolinfo . Pool )
}
2019-02-20 11:48:47 +03:00
}
2019-03-04 12:42:43 +03:00
placementGroups := getPgByPool ( cephconn , params )
crushosddump := getOsdCrushDump ( cephconn )
osddump := getOsdDump ( cephconn )
osdsmetadata := getOsdMetadata ( cephconn )
osddevices := getOsdForLocations ( params , crushosddump , osddump , poolinfo , osdsmetadata )
2019-02-26 18:09:59 +03:00
for _ , device := range osddevices {
2019-03-04 12:42:43 +03:00
if exist := containsPg ( placementGroups , device . ID ) ; exist == false {
2019-02-26 18:09:59 +03:00
log . Fatalln ( "Not enough pg for test. Some osd haven't placement group at all. Increase pg_num and pgp_num" )
2019-02-20 11:48:47 +03:00
}
}
return osddevices
}