add optimize option
parent
1e6830968d
commit
c18802a64e
|
@ -45,6 +45,10 @@ type Deleter interface {
|
|||
Delete(string, int64) error
|
||||
}
|
||||
|
||||
type Optimizer interface {
|
||||
Optimize() error
|
||||
}
|
||||
|
||||
var databases map[string]func(Config, *mapping.Mapping) (DB, error)
|
||||
|
||||
func init() {
|
||||
|
|
|
@ -391,6 +391,72 @@ func (pg *PostGIS) generalizeTable(table *GeneralizedTableSpec) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func clusterTable(pg *PostGIS, tableName string, srid int, columns []ColumnSpec) error {
|
||||
for _, col := range columns {
|
||||
if col.Type.Name() == "GEOMETRY" {
|
||||
step := log.StartStep(fmt.Sprintf("Indexing %s on geohash", tableName))
|
||||
sql := fmt.Sprintf(`CREATE INDEX "%s_geom_geohash" ON "%s"."%s" (ST_GeoHash(ST_Transform(ST_SetSRID(Box2D(%s), %d), 4326)))`,
|
||||
tableName, pg.Schema, tableName, col.Name, srid)
|
||||
_, err := pg.Db.Exec(sql)
|
||||
log.StopStep(step)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
step = log.StartStep(fmt.Sprintf("Clustering %s on geohash", tableName))
|
||||
sql = fmt.Sprintf(`CLUSTER "%s_geom_geohash" ON "%s"."%s"`,
|
||||
tableName, pg.Schema, tableName)
|
||||
_, err = pg.Db.Exec(sql)
|
||||
log.StopStep(step)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
// Finish creates spatial indices on all tables.
|
||||
func (pg *PostGIS) Optimize() error {
|
||||
defer log.StopStep(log.StartStep(fmt.Sprintf("Clustering on geometry")))
|
||||
|
||||
worker := int(runtime.NumCPU() / 2)
|
||||
if worker < 1 {
|
||||
worker = 1
|
||||
}
|
||||
|
||||
time.Sleep(0 * time.Second)
|
||||
p := newWorkerPool(worker, len(pg.Tables))
|
||||
for tableName, tbl := range pg.Tables {
|
||||
tableName := pg.Prefix + tableName
|
||||
table := tbl
|
||||
p.in <- func() error {
|
||||
return clusterTable(pg, tableName, table.Srid, table.Columns)
|
||||
}
|
||||
}
|
||||
err := p.wait()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p = newWorkerPool(worker, len(pg.GeneralizedTables))
|
||||
for tableName, tbl := range pg.GeneralizedTables {
|
||||
tableName := pg.Prefix + tableName
|
||||
table := tbl
|
||||
p.in <- func() error {
|
||||
return clusterTable(pg, tableName, table.Source.Srid, table.Source.Columns)
|
||||
}
|
||||
}
|
||||
err = p.wait()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type PostGIS struct {
|
||||
Db *sql.DB
|
||||
Schema string
|
||||
|
|
|
@ -193,6 +193,14 @@ func (clipper *Clipper) Clip(geom *geos.Geom) ([]*geos.Geom, error) {
|
|||
return mergeGeometries(g, intersections, geomType), nil
|
||||
}
|
||||
|
||||
func (clipper *Clipper) Intersects(g *geos.Geos, geom *geos.Geom) bool {
|
||||
hits := g.IndexQuery(clipper.index, geom)
|
||||
if len(hits) == 0 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func flattenPolygons(g *geos.Geos, geoms []*geos.Geom) []*geos.Geom {
|
||||
var result []*geos.Geom
|
||||
for _, geom := range geoms {
|
||||
|
|
13
goposm.go
13
goposm.go
|
@ -30,6 +30,7 @@ var (
|
|||
appendcache = flag.Bool("appendcache", false, "append cache")
|
||||
read = flag.String("read", "", "read")
|
||||
write = flag.Bool("write", false, "write")
|
||||
optimize = flag.Bool("optimize", false, "optimize")
|
||||
connection = flag.String("connection", "", "connection parameters")
|
||||
diff = flag.Bool("diff", false, "enable diff support")
|
||||
mappingFile = flag.String("mapping", "", "mapping file")
|
||||
|
@ -137,7 +138,7 @@ func main() {
|
|||
|
||||
srid := 3857 // TODO
|
||||
|
||||
if *write || *deployProduction || *revertDeploy || *removeBackup {
|
||||
if *write || *deployProduction || *revertDeploy || *removeBackup || *optimize {
|
||||
connType := database.ConnectionType(*connection)
|
||||
conf := database.Config{
|
||||
Type: connType,
|
||||
|
@ -267,6 +268,16 @@ func main() {
|
|||
log.StopStep(stepImport)
|
||||
}
|
||||
|
||||
if *optimize {
|
||||
if db, ok := db.(database.Optimizer); ok {
|
||||
if err := db.Optimize(); err != nil {
|
||||
die(err)
|
||||
}
|
||||
} else {
|
||||
die("database not optimizable")
|
||||
}
|
||||
}
|
||||
|
||||
if *deployProduction {
|
||||
if db, ok := db.(database.Deployer); ok {
|
||||
if err := db.Deploy(); err != nil {
|
||||
|
|
Loading…
Reference in New Issue