limit cache to -limitto

master
Oliver Tonnhofer 2013-11-04 08:51:21 +01:00
parent 35e66529a7
commit 2d2dad7b7e
7 changed files with 69 additions and 22 deletions

3
cache/delta.go vendored
View File

@ -211,6 +211,9 @@ func (self *DeltaCoordsCache) PutCoords(nodes []element.Node) error {
start = 0
totalNodes := len(nodes)
for i, node := range nodes {
if node.Id == SKIP {
continue
}
bunchId := self.getBunchId(node.Id)
if bunchId != currentBunchId {
if self.linearImport && int64(i) > self.bunchSize && int64(i) < int64(totalNodes)-self.bunchSize {

6
cache/nodes.go vendored
View File

@ -21,6 +21,9 @@ func newNodesCache(path string) (*NodesCache, error) {
}
func (p *NodesCache) PutNode(node *element.Node) error {
if node.Id == SKIP {
return nil
}
if node.Tags == nil {
return nil
}
@ -38,6 +41,9 @@ func (p *NodesCache) PutNodes(nodes []element.Node) (int, error) {
var n int
for _, node := range nodes {
if node.Id == SKIP {
continue
}
if len(node.Tags) == 0 {
continue
}

2
cache/osm.go vendored
View File

@ -12,6 +12,8 @@ var (
NotFound = errors.New("not found")
)
const SKIP int64 = -1
type OSMCache struct {
dir string
Coords *DeltaCoordsCache

View File

@ -14,7 +14,7 @@ type Config struct {
Connection string `json:"connection"`
MappingFile string `json:"mapping"`
LimitTo string `json:"limitto"`
LimitToCacheBuffer float64 `json:"limitto_cache_buffer"`
LimitToDiffBuffer float64 `json:"limitto_cache_buffer"`
Srid int `json:"srid"`
}
@ -30,7 +30,7 @@ type _BaseOptions struct {
MappingFile string
Srid int
LimitTo string
LimitToCacheBuffer float64
LimitToDiffBuffer float64
ConfigFile string
Httpprofile string
}
@ -68,8 +68,8 @@ func (o *_BaseOptions) updateFromConfig() error {
if o.LimitTo == "" {
o.LimitTo = conf.LimitTo
}
if o.LimitToCacheBuffer == 0.0 {
o.LimitToCacheBuffer = conf.LimitToCacheBuffer
if o.LimitToDiffBuffer == 0.0 {
o.LimitToDiffBuffer = conf.LimitToDiffBuffer
}
if o.CacheDir == defaultCacheDir {
o.CacheDir = conf.CacheDir
@ -110,6 +110,7 @@ func addBaseFlags(flags *flag.FlagSet) {
flags.StringVar(&BaseOptions.MappingFile, "mapping", "", "mapping file")
flags.IntVar(&BaseOptions.Srid, "srid", defaultSrid, "srs id")
flags.StringVar(&BaseOptions.LimitTo, "limitto", "", "limit to geometries")
flags.Float64Var(&BaseOptions.LimitToDiffBuffer, "limittodiffbuffer", 0.0, "limit to buffer for cache")
flags.StringVar(&BaseOptions.ConfigFile, "config", "", "config (json)")
flags.StringVar(&BaseOptions.Httpprofile, "httpprofile", "", "bind address for profile server")
}

View File

@ -36,10 +36,13 @@ func Import() {
}
var geometryLimiter *limit.Limiter
if config.ImportOptions.Write && config.BaseOptions.LimitTo != "" {
if (config.ImportOptions.Write || config.ImportOptions.Read != "") && config.BaseOptions.LimitTo != "" {
var err error
step := log.StartStep("Reading limitto geometries")
geometryLimiter, err = limit.NewFromGeoJson(config.BaseOptions.LimitTo)
geometryLimiter, err = limit.NewFromGeoJsonWithBuffered(
config.BaseOptions.LimitTo,
config.BaseOptions.LimitToDiffBuffer,
)
if err != nil {
log.Fatal(err)
}
@ -99,7 +102,13 @@ func Import() {
}
osmCache.Coords.SetLinearImport(true)
reader.ReadPbf(osmCache, progress, tagmapping, pbfFile)
readLimiter := geometryLimiter
if config.BaseOptions.LimitToDiffBuffer == 0.0 {
readLimiter = nil
}
reader.ReadPbf(osmCache, progress, tagmapping,
pbfFile, readLimiter)
osmCache.Coords.SetLinearImport(false)
elementCounts = progress.Stop()
osmCache.Close()

View File

@ -57,7 +57,7 @@ func main() {
step := log.StartStep("Reading limitto geometries")
geometryLimiter, err = limit.NewFromGeoJsonWithBuffered(
config.BaseOptions.LimitTo,
config.BaseOptions.LimitToCacheBuffer,
config.BaseOptions.LimitToDiffBuffer,
)
if err != nil {
log.Fatal(err)

View File

@ -1,8 +1,10 @@
package reader
import (
"imposm3/cache"
osmcache "imposm3/cache"
"imposm3/element"
"imposm3/geom/geos"
"imposm3/geom/limit"
"imposm3/logging"
"imposm3/mapping"
"imposm3/parser/pbf"
@ -45,12 +47,20 @@ func init() {
}
func ReadPbf(cache *cache.OSMCache, progress *stats.Statistics, tagmapping *mapping.Mapping, pbfFile *pbf.Pbf) {
func ReadPbf(cache *osmcache.OSMCache, progress *stats.Statistics,
tagmapping *mapping.Mapping, pbfFile *pbf.Pbf,
limiter *limit.Limiter,
) {
nodes := make(chan []element.Node, 4)
coords := make(chan []element.Node, 4)
ways := make(chan []element.Way, 4)
relations := make(chan []element.Relation, 4)
withLimiter := false
if limiter != nil {
withLimiter = true
}
if pbfFile.Header.Time.Unix() != 0 {
log.Printf("reading %s with data till %v", pbfFile.Filename, pbfFile.Header.Time.Local())
}
@ -86,6 +96,7 @@ func ReadPbf(cache *cache.OSMCache, progress *stats.Statistics, tagmapping *mapp
for i, _ := range ws {
m.Filter(&ws[i].Tags)
}
// TODO check withLimiter
cache.Ways.PutWays(ws)
progress.AddWays(len(ws))
}
@ -105,6 +116,7 @@ func ReadPbf(cache *cache.OSMCache, progress *stats.Statistics, tagmapping *mapp
numWithTags += 1
}
}
// TODO check withLimiter
cache.Relations.PutRelations(rels)
progress.AddRelations(numWithTags)
}
@ -115,10 +127,19 @@ func ReadPbf(cache *cache.OSMCache, progress *stats.Statistics, tagmapping *mapp
for i := 0; int64(i) < nCoords; i++ {
waitWriter.Add(1)
go func() {
g := geos.NewGeos()
defer g.Finish()
for nds := range coords {
if skipCoords {
continue
}
if withLimiter {
for i, _ := range nds {
if !limiter.IntersectsBuffer(g, nds[i].Long, nds[i].Lat) {
nds[i].Id = osmcache.SKIP
}
}
}
cache.Coords.PutCoords(nds)
progress.AddCoords(len(nds))
}
@ -129,6 +150,8 @@ func ReadPbf(cache *cache.OSMCache, progress *stats.Statistics, tagmapping *mapp
for i := 0; int64(i) < nNodes; i++ {
waitWriter.Add(1)
go func() {
g := geos.NewGeos()
defer g.Finish()
m := tagmapping.NodeTagFilter()
for nds := range nodes {
numWithTags := 0
@ -137,6 +160,9 @@ func ReadPbf(cache *cache.OSMCache, progress *stats.Statistics, tagmapping *mapp
if len(nds[i].Tags) > 0 {
numWithTags += 1
}
if withLimiter && !limiter.IntersectsBuffer(g, nds[i].Long, nds[i].Lat) {
nds[i].Id = osmcache.SKIP
}
}
cache.Nodes.PutNodes(nds)
progress.AddNodes(numWithTags)