Merge branch 'subcommands'

master
Oliver Tonnhofer 2013-07-29 09:51:42 +02:00
commit 1b3b7be949
8 changed files with 310 additions and 182 deletions

View File

@ -4,6 +4,7 @@ import (
"encoding/json"
"errors"
"flag"
"log"
"os"
)
@ -18,64 +19,166 @@ type Config struct {
const defaultSrid = 3857
const defaultCacheDir = "/tmp/goposm"
var (
connection = flag.String("connection", "", "connection parameters")
cachedir = flag.String("cachedir", defaultCacheDir, "cache directory")
mappingFile = flag.String("mapping", "", "mapping file")
srid = flag.Int("srid", defaultSrid, "srs id")
limitTo = flag.String("limitto", "", "limit to geometries")
configFile = flag.String("config", "", "config (json)")
)
var ImportFlags = flag.NewFlagSet("import", flag.ExitOnError)
var DiffImportFlags = flag.NewFlagSet("diff", flag.ExitOnError)
func Parse() (*Config, []error) {
config := &Config{
type ImportBaseOptions struct {
Connection string
CacheDir string
MappingFile string
Srid int
LimitTo string
ConfigFile string
}
type _ImportOptions struct {
Base ImportBaseOptions
Cpuprofile string
Httpprofile string
Memprofile string
Overwritecache bool
Appendcache bool
Read string
Write bool
Optimize bool
Diff bool
DeployProduction bool
RevertDeploy bool
RemoveBackup bool
Quiet bool
}
type _DiffImportOptions struct {
Base ImportBaseOptions
}
var ImportOptions = _ImportOptions{}
var DiffImportOptions = _DiffImportOptions{}
func addBaseFlags(flags *flag.FlagSet, baseOptions *ImportBaseOptions) {
flags.StringVar(&baseOptions.Connection, "connection", "", "connection parameters")
flags.StringVar(&baseOptions.CacheDir, "cachedir", defaultCacheDir, "cache directory")
flags.StringVar(&baseOptions.MappingFile, "mapping", "", "mapping file")
flags.IntVar(&baseOptions.Srid, "srid", defaultSrid, "srs id")
flags.StringVar(&baseOptions.LimitTo, "limitto", "", "limit to geometries")
flags.StringVar(&baseOptions.ConfigFile, "config", "", "config (json)")
}
func addImportFlags(flags *flag.FlagSet, options *_ImportOptions) {
flags.StringVar(&options.Cpuprofile, "cpuprofile", "", "filename of cpu profile output")
flags.StringVar(&options.Httpprofile, "httpprofile", "", "bind address for profile server")
flags.StringVar(&options.Memprofile, "memprofile", "", "dir name of mem profile output and interval (fname:interval)")
flags.BoolVar(&options.Overwritecache, "overwritecache", false, "overwritecache")
flags.BoolVar(&options.Appendcache, "appendcache", false, "append cache")
flags.StringVar(&options.Read, "read", "", "read")
flags.BoolVar(&options.Write, "write", false, "write")
flags.BoolVar(&options.Optimize, "optimize", false, "optimize")
flags.BoolVar(&options.Diff, "diff", false, "enable diff support")
flags.BoolVar(&options.DeployProduction, "deployproduction", false, "deploy production")
flags.BoolVar(&options.RevertDeploy, "revertdeploy", false, "revert deploy to production")
flags.BoolVar(&options.RemoveBackup, "removebackup", false, "remove backups from deploy")
flags.BoolVar(&options.Quiet, "quiet", false, "quiet log output")
}
func addDiffImportFlags(flags *flag.FlagSet, options *_DiffImportOptions) {
// no options yet
}
func init() {
addBaseFlags(ImportFlags, &ImportOptions.Base)
addImportFlags(ImportFlags, &ImportOptions)
addBaseFlags(DiffImportFlags, &DiffImportOptions.Base)
addDiffImportFlags(DiffImportFlags, &DiffImportOptions)
}
// var (
// connection = flag.String("connection", "", "connection parameters")
// cachedir = flag.String("cachedir", defaultCacheDir, "cache directory")
// mappingFile = flag.String("mapping", "", "mapping file")
// srid = flag.Int("srid", defaultSrid, "srs id")
// limitTo = flag.String("limitto", "", "limit to geometries")
// configFile = flag.String("config", "", "config (json)")
// )
func ParseImport(args []string) []error {
err := ImportFlags.Parse(args)
if err != nil {
log.Fatal(err)
}
errs := updateBaseOpts(&ImportOptions.Base)
if errs != nil {
return errs
}
errs = checkOptions(&ImportOptions.Base)
return errs
}
func updateBaseOpts(opts *ImportBaseOptions) []error {
conf := &Config{
CacheDir: defaultCacheDir,
Srid: defaultSrid,
}
if *configFile != "" {
f, err := os.Open(*configFile)
if opts.ConfigFile != "" {
f, err := os.Open(opts.ConfigFile)
if err != nil {
return nil, []error{err}
return []error{err}
}
decoder := json.NewDecoder(f)
err = decoder.Decode(&config)
err = decoder.Decode(&conf)
if err != nil {
return nil, []error{err}
return []error{err}
}
}
if *connection != "" {
config.Connection = *connection
if opts.Connection == "" {
opts.Connection = conf.Connection
}
if config.Srid == 0 {
config.Srid = defaultSrid
if conf.Srid == 0 {
conf.Srid = defaultSrid
}
if *srid != defaultSrid {
config.Srid = *srid
if opts.Srid != defaultSrid {
opts.Srid = conf.Srid
}
if *mappingFile != "" {
config.MappingFile = *mappingFile
if opts.MappingFile == "" {
opts.MappingFile = conf.MappingFile
}
if *limitTo != "" {
config.LimitTo = *limitTo
if opts.LimitTo == "" {
opts.LimitTo = conf.LimitTo
}
if *cachedir != defaultCacheDir {
config.CacheDir = *cachedir
if opts.CacheDir == defaultCacheDir {
opts.CacheDir = conf.CacheDir
}
errs := checkConfig(config)
return config, errs
return nil
}
func checkConfig(config *Config) []error {
func ParseDiffImport(args []string) []error {
err := DiffImportFlags.Parse(args)
if err != nil {
log.Fatal(err)
}
errs := updateBaseOpts(&DiffImportOptions.Base)
if errs != nil {
return errs
}
errs = checkOptions(&DiffImportOptions.Base)
return errs
}
func checkOptions(opts *ImportBaseOptions) []error {
errs := []error{}
if config.Srid != 3857 {
if opts.Srid != 3857 {
errs = append(errs, errors.New("srid!=3857 not implemented"))
}
if config.MappingFile == "" {
if opts.MappingFile == "" {
errs = append(errs, errors.New("missing mapping"))
}
if config.Connection == "" {
if opts.Connection == "" {
errs = append(errs, errors.New("missing connection"))
}
return errs

View File

@ -1,6 +1,7 @@
package database
import (
"errors"
"goposm/mapping"
"strings"
)
@ -63,7 +64,7 @@ func Register(name string, f func(Config, *mapping.Mapping) (DB, error)) {
func Open(conf Config, m *mapping.Mapping) (DB, error) {
newFunc, ok := databases[conf.Type]
if !ok {
panic("unsupported database type: " + conf.Type)
return nil, errors.New("unsupported database type: " + conf.Type)
}
db, err := newFunc(conf, m)

View File

@ -403,6 +403,7 @@ func clusterTable(pg *PostGIS, tableName string, srid int, columns []ColumnSpec)
type PostGIS struct {
Db *sql.DB
Params string
Schema string
BackupSchema string
Config database.Config
@ -415,11 +416,7 @@ type PostGIS struct {
func (pg *PostGIS) Open() error {
var err error
params, err := pq.ParseURL(pg.Config.ConnectionParams)
if err != nil {
return err
}
pg.Db, err = sql.Open("postgres", params)
pg.Db, err = sql.Open("postgres", pg.Params)
if err != nil {
return err
}
@ -580,6 +577,7 @@ func (pg *PostGIS) NewTableTx(spec *TableSpec, bulkImport bool) *TableTx {
func New(conf database.Config, m *mapping.Mapping) (database.DB, error) {
db := &PostGIS{}
db.Tables = make(map[string]*TableSpec)
db.GeneralizedTables = make(map[string]*GeneralizedTableSpec)
@ -596,6 +594,7 @@ func New(conf database.Config, m *mapping.Mapping) (database.DB, error) {
if err != nil {
return nil, err
}
params = disableDefaultSslOnLocalhost(params)
db.Schema, db.BackupSchema = schemasFromConnectionParams(params)
db.Prefix = prefixFromConnectionParams(params)
@ -607,6 +606,7 @@ func New(conf database.Config, m *mapping.Mapping) (database.DB, error) {
}
db.prepareGeneralizedTableSources()
db.Params = params
err = db.Open()
if err != nil {
return nil, err

View File

@ -3,6 +3,7 @@ package postgis
import (
"database/sql"
"fmt"
"os"
"strings"
"sync"
)
@ -26,6 +27,36 @@ func schemasFromConnectionParams(params string) (string, string) {
return schema, backupSchema
}
// disableDefaultSslOnLocalhost adds sslmode=disable to params
// when host is localhost/127.0.0.1 and the sslmode param and
// PGSSLMODE environment are both not set.
func disableDefaultSslOnLocalhost(params string) string {
parts := strings.Fields(params)
isLocalHost := false
for _, p := range parts {
if strings.HasPrefix(p, "sslmode=") {
return params
}
if p == "host=localhost" || p == "host=127.0.0.1" {
isLocalHost = true
}
}
if !isLocalHost {
return params
}
for _, v := range os.Environ() {
parts := strings.SplitN(v, "=", 2)
if parts[0] == "PGSSLMODE" {
return params
}
}
// found localhost but explicit no sslmode, disable sslmode
return params + " sslmode=disable"
}
func prefixFromConnectionParams(params string) string {
parts := strings.Fields(params)
var prefix string

View File

@ -6,13 +6,10 @@ import (
"goposm/diff/parser"
"goposm/element"
"goposm/expire"
"goposm/logging"
"goposm/mapping"
"goposm/proj"
)
var log = logging.NewLogger("diff")
type Deleter struct {
delDb database.Deleter
osmCache *cache.OSMCache

View File

@ -30,6 +30,9 @@ type diffDownload struct {
func NewDiffDownload(dest string) *diffDownload {
state, err := state.ParseLastState(dest)
if err != nil {
panic(err)
}
return &diffDownload{state.Url, dest, 0}
}

View File

@ -1,14 +1,13 @@
package main
package diff
import (
"flag"
"fmt"
"goposm/cache"
"goposm/config"
"goposm/database"
_ "goposm/database/postgis"
"goposm/diff"
"goposm/diff/parser"
diffstate "goposm/diff/state"
"goposm/element"
"goposm/expire"
"goposm/geom/clipper"
@ -17,40 +16,21 @@ import (
"goposm/stats"
"goposm/writer"
"io"
"os"
)
var log = logging.NewLogger("")
var log = logging.NewLogger("diff")
func main() {
flag.Parse()
conf, errs := config.Parse()
if len(errs) > 0 {
log.Warn("errors in config/options:")
for _, err := range errs {
log.Warnf("\t%s", err)
}
logging.Shutdown()
os.Exit(1)
}
for _, oscFile := range flag.Args() {
update(oscFile, conf, false)
}
logging.Shutdown()
os.Exit(0)
}
func update(oscFile string, conf *config.Config, force bool) {
state, err := diff.ParseStateFromOsc(oscFile)
func Update(oscFile string, force bool) {
state, err := diffstate.ParseFromOsc(oscFile)
if err != nil {
log.Fatal(err)
}
lastState, err := diff.ParseLastState(conf.CacheDir)
lastState, err := diffstate.ParseLastState(config.DiffImportOptions.Base.CacheDir)
if err != nil {
log.Fatal(err)
}
if lastState != nil && lastState.Sequence != 0 && state.Sequence <= lastState.Sequence {
if lastState != nil && lastState.Sequence != 0 && state != nil && state.Sequence <= lastState.Sequence {
if !force {
log.Warn(state, " already imported")
return
@ -61,28 +41,28 @@ func update(oscFile string, conf *config.Config, force bool) {
elems, errc := parser.Parse(oscFile)
osmCache := cache.NewOSMCache(conf.CacheDir)
osmCache := cache.NewOSMCache(config.DiffImportOptions.Base.CacheDir)
err = osmCache.Open()
if err != nil {
log.Fatal("osm cache: ", err)
}
diffCache := cache.NewDiffCache(conf.CacheDir)
diffCache := cache.NewDiffCache(config.DiffImportOptions.Base.CacheDir)
err = diffCache.Open()
if err != nil {
log.Fatal("diff cache: ", err)
}
tagmapping, err := mapping.NewMapping(conf.MappingFile)
tagmapping, err := mapping.NewMapping(config.DiffImportOptions.Base.MappingFile)
if err != nil {
log.Fatal(err)
}
connType := database.ConnectionType(conf.Connection)
connType := database.ConnectionType(config.DiffImportOptions.Base.Connection)
dbConf := database.Config{
Type: connType,
ConnectionParams: conf.Connection,
Srid: conf.Srid,
ConnectionParams: config.DiffImportOptions.Base.Connection,
Srid: config.DiffImportOptions.Base.Srid,
}
db, err := database.Open(dbConf, tagmapping)
if err != nil {
@ -98,7 +78,7 @@ func update(oscFile string, conf *config.Config, force bool) {
if !ok {
log.Fatal("database not deletable")
}
deleter := diff.NewDeleter(
deleter := NewDeleter(
delDb,
osmCache,
diffCache,
@ -126,19 +106,19 @@ func update(oscFile string, conf *config.Config, force bool) {
nodes := make(chan *element.Node)
relWriter := writer.NewRelationWriter(osmCache, diffCache, relations,
db, polygonsTagMatcher, progress, conf.Srid)
db, polygonsTagMatcher, progress, config.DiffImportOptions.Base.Srid)
relWriter.SetClipper(geometryClipper)
relWriter.SetExpireTiles(expiredTiles)
relWriter.Start()
wayWriter := writer.NewWayWriter(osmCache, diffCache, ways, db,
lineStringsTagMatcher, polygonsTagMatcher, progress, conf.Srid)
lineStringsTagMatcher, polygonsTagMatcher, progress, config.DiffImportOptions.Base.Srid)
wayWriter.SetClipper(geometryClipper)
wayWriter.SetExpireTiles(expiredTiles)
wayWriter.Start()
nodeWriter := writer.NewNodeWriter(osmCache, nodes, db,
pointsTagMatcher, progress, conf.Srid)
pointsTagMatcher, progress, config.DiffImportOptions.Base.Srid)
nodeWriter.SetClipper(geometryClipper)
nodeWriter.Start()
@ -296,7 +276,7 @@ For:
log.StopStep(step)
progress.Stop()
err = diff.WriteLastState(conf.CacheDir, state)
err = diffstate.WriteLastState(config.DiffImportOptions.Base.CacheDir, state)
if err != nil {
log.Warn(err) // warn only
}

215
goposm.go
View File

@ -1,11 +1,12 @@
package main
import (
"flag"
"fmt"
"goposm/cache"
"goposm/config"
"goposm/database"
_ "goposm/database/postgis"
"goposm/diff"
state "goposm/diff/state"
"goposm/geom/clipper"
"goposm/logging"
@ -25,52 +26,64 @@ import (
var log = logging.NewLogger("")
var (
cpuprofile = flag.String("cpuprofile", "", "filename of cpu profile output")
httpprofile = flag.String("httpprofile", "", "bind address for profile server")
memprofile = flag.String("memprofile", "", "dir name of mem profile output and interval (fname:interval)")
overwritecache = flag.Bool("overwritecache", false, "overwritecache")
appendcache = flag.Bool("appendcache", false, "append cache")
read = flag.String("read", "", "read")
write = flag.Bool("write", false, "write")
optimize = flag.Bool("optimize", false, "optimize")
diff = flag.Bool("diff", false, "enable diff support")
deployProduction = flag.Bool("deployproduction", false, "deploy production")
revertDeploy = flag.Bool("revertdeploy", false, "revert deploy to production")
removeBackup = flag.Bool("removebackup", false, "remove backups from deploy")
quiet = flag.Bool("quiet", false, "quiet log output")
)
func die(args ...interface{}) {
log.Fatal(args...)
func reportErrors(errs []error) {
fmt.Println("errors in config/options:")
for _, err := range errs {
fmt.Printf("\t%s\n", err)
}
logging.Shutdown()
os.Exit(1)
}
func dief(msg string, args ...interface{}) {
log.Fatalf(msg, args...)
func printCmds() {
fmt.Println("available commands:")
fmt.Println("\timport")
fmt.Println("\tdiff")
}
func main() {
golog.SetFlags(golog.LstdFlags | golog.Lshortfile)
if os.Getenv("GOMAXPROCS") == "" {
runtime.GOMAXPROCS(runtime.NumCPU())
}
flag.Parse()
conf, errs := config.Parse()
if len(errs) > 0 {
log.Warn("errors in config/options:")
for _, err := range errs {
log.Warnf("\t%s", err)
}
if len(os.Args) <= 1 {
printCmds()
logging.Shutdown()
os.Exit(1)
}
if *cpuprofile != "" {
f, err := os.Create(*cpuprofile)
switch os.Args[1] {
case "import":
errs := config.ParseImport(os.Args[2:])
if len(errs) > 0 {
config.ImportFlags.PrintDefaults()
reportErrors(errs)
break
}
mainimport()
case "diff":
errs := config.ParseDiffImport(os.Args[2:])
if len(errs) > 0 {
config.DiffImportFlags.PrintDefaults()
reportErrors(errs)
break
}
for _, oscFile := range config.DiffImportFlags.Args() {
diff.Update(oscFile, false)
}
default:
log.Fatal("invalid command")
}
logging.Shutdown()
os.Exit(0)
}
func mainimport() {
if config.ImportOptions.Cpuprofile != "" {
f, err := os.Create(config.ImportOptions.Cpuprofile)
if err != nil {
golog.Fatal(err)
}
@ -78,12 +91,12 @@ func main() {
defer pprof.StopCPUProfile()
}
if *httpprofile != "" {
stats.StartHttpPProf(*httpprofile)
if config.ImportOptions.Httpprofile != "" {
stats.StartHttpPProf(config.ImportOptions.Httpprofile)
}
if *memprofile != "" {
parts := strings.Split(*memprofile, string(os.PathListSeparator))
if config.ImportOptions.Memprofile != "" {
parts := strings.Split(config.ImportOptions.Memprofile, string(os.PathListSeparator))
var interval time.Duration
if len(parts) < 2 {
@ -99,76 +112,76 @@ func main() {
go stats.MemProfiler(parts[0], interval)
}
if *quiet {
if config.ImportOptions.Quiet {
logging.SetQuiet(true)
}
if (*write || *read != "") && (*revertDeploy || *removeBackup) {
die("-revertdeploy and -removebackup not compatible with -read/-write")
if (config.ImportOptions.Write || config.ImportOptions.Read != "") && (config.ImportOptions.RevertDeploy || config.ImportOptions.RemoveBackup) {
log.Fatal("-revertdeploy and -removebackup not compatible with -read/-write")
}
if *revertDeploy && (*removeBackup || *deployProduction) {
die("-revertdeploy not compatible with -deployproduction/-removebackup")
if config.ImportOptions.RevertDeploy && (config.ImportOptions.RemoveBackup || config.ImportOptions.DeployProduction) {
log.Fatal("-revertdeploy not compatible with -deployproduction/-removebackup")
}
var geometryClipper *clipper.Clipper
if *write && conf.LimitTo != "" {
if config.ImportOptions.Write && config.ImportOptions.Base.LimitTo != "" {
var err error
step := log.StartStep("Reading limitto geometries")
geometryClipper, err = clipper.NewFromOgrSource(conf.LimitTo)
geometryClipper, err = clipper.NewFromOgrSource(config.ImportOptions.Base.LimitTo)
if err != nil {
die(err)
log.Fatal(err)
}
log.StopStep(step)
}
osmCache := cache.NewOSMCache(conf.CacheDir)
if *read != "" && osmCache.Exists() {
if *overwritecache {
log.Printf("removing existing cache %s", conf.CacheDir)
err := osmCache.Remove()
if err != nil {
die("unable to remove cache:", err)
}
} else if !*appendcache {
die("cache already exists use -appendcache or -overwritecache")
}
}
progress := stats.StatsReporter()
tagmapping, err := mapping.NewMapping(conf.MappingFile)
tagmapping, err := mapping.NewMapping(config.ImportOptions.Base.MappingFile)
if err != nil {
die("mapping file: ", err)
log.Fatal("mapping file: ", err)
}
var db database.DB
if *write || *deployProduction || *revertDeploy || *removeBackup || *optimize {
connType := database.ConnectionType(conf.Connection)
if config.ImportOptions.Write || config.ImportOptions.DeployProduction || config.ImportOptions.RevertDeploy || config.ImportOptions.RemoveBackup || config.ImportOptions.Optimize {
connType := database.ConnectionType(config.ImportOptions.Base.Connection)
conf := database.Config{
Type: connType,
ConnectionParams: conf.Connection,
Srid: conf.Srid,
ConnectionParams: config.ImportOptions.Base.Connection,
Srid: config.ImportOptions.Base.Srid,
}
db, err = database.Open(conf, tagmapping)
if err != nil {
die(err)
log.Fatal(err)
}
}
osmCache := cache.NewOSMCache(config.ImportOptions.Base.CacheDir)
if config.ImportOptions.Read != "" && osmCache.Exists() {
if config.ImportOptions.Overwritecache {
log.Printf("removing existing cache %s", config.ImportOptions.Base.CacheDir)
err := osmCache.Remove()
if err != nil {
log.Fatal("unable to remove cache:", err)
}
} else if !config.ImportOptions.Appendcache {
log.Fatal("cache already exists use -appendcache or -overwritecache")
}
}
step := log.StartStep("Imposm")
if *read != "" {
if config.ImportOptions.Read != "" {
step := log.StartStep("Reading OSM data")
err = osmCache.Open()
if err != nil {
die(err)
log.Fatal(err)
}
progress.Start()
pbfFile, err := pbf.Open(*read)
pbfFile, err := pbf.Open(config.ImportOptions.Read)
if err != nil {
log.Fatal(err)
}
@ -179,21 +192,21 @@ func main() {
progress.Stop()
osmCache.Close()
log.StopStep(step)
if *diff {
if config.ImportOptions.Diff {
diffstate := state.FromPbf(pbfFile)
if diffstate != nil {
diffstate.WriteToFile(path.Join(conf.CacheDir, "last.state.txt"))
diffstate.WriteToFile(path.Join(config.ImportOptions.Base.CacheDir, "last.state.txt"))
}
}
}
if *write {
if config.ImportOptions.Write {
stepImport := log.StartStep("Importing OSM data")
stepWrite := log.StartStep("Writing OSM data")
progress.Start()
err = db.Init()
if err != nil {
die(err)
log.Fatal(err)
}
bulkDb, ok := db.(database.BulkBeginner)
@ -203,23 +216,23 @@ func main() {
err = db.Begin()
}
if err != nil {
die(err)
log.Fatal(err)
}
var diffCache *cache.DiffCache
if *diff {
diffCache = cache.NewDiffCache(conf.CacheDir)
if config.ImportOptions.Diff {
diffCache = cache.NewDiffCache(config.ImportOptions.Base.CacheDir)
if err = diffCache.Remove(); err != nil {
die(err)
log.Fatal(err)
}
if err = diffCache.Open(); err != nil {
die(err)
log.Fatal(err)
}
}
err = osmCache.Open()
if err != nil {
die(err)
log.Fatal(err)
}
osmCache.Coords.SetReadOnly(true)
pointsTagMatcher := tagmapping.PointMatcher()
@ -228,7 +241,7 @@ func main() {
relations := osmCache.Relations.Iter()
relWriter := writer.NewRelationWriter(osmCache, diffCache, relations,
db, polygonsTagMatcher, progress, conf.Srid)
db, polygonsTagMatcher, progress, config.ImportOptions.Base.Srid)
relWriter.SetClipper(geometryClipper)
relWriter.Start()
@ -238,7 +251,7 @@ func main() {
ways := osmCache.Ways.Iter()
wayWriter := writer.NewWayWriter(osmCache, diffCache, ways, db,
lineStringsTagMatcher, polygonsTagMatcher, progress, conf.Srid)
lineStringsTagMatcher, polygonsTagMatcher, progress, config.ImportOptions.Base.Srid)
wayWriter.SetClipper(geometryClipper)
wayWriter.Start()
@ -248,7 +261,7 @@ func main() {
nodes := osmCache.Nodes.Iter()
nodeWriter := writer.NewNodeWriter(osmCache, nodes, db,
pointsTagMatcher, progress, conf.Srid)
pointsTagMatcher, progress, config.ImportOptions.Base.Srid)
nodeWriter.SetClipper(geometryClipper)
nodeWriter.Start()
@ -258,12 +271,12 @@ func main() {
err = db.End()
if err != nil {
die(err)
log.Fatal(err)
}
progress.Stop()
if *diff {
if config.ImportOptions.Diff {
diffCache.Close()
}
@ -271,59 +284,59 @@ func main() {
if db, ok := db.(database.Generalizer); ok {
if err := db.Generalize(); err != nil {
die(err)
log.Fatal(err)
}
} else {
die("database not generalizeable")
log.Fatal("database not generalizeable")
}
if db, ok := db.(database.Finisher); ok {
if err := db.Finish(); err != nil {
die(err)
log.Fatal(err)
}
} else {
die("database not finishable")
log.Fatal("database not finishable")
}
log.StopStep(stepImport)
}
if *optimize {
if config.ImportOptions.Optimize {
if db, ok := db.(database.Optimizer); ok {
if err := db.Optimize(); err != nil {
die(err)
log.Fatal(err)
}
} else {
die("database not optimizable")
log.Fatal("database not optimizable")
}
}
if *deployProduction {
if config.ImportOptions.DeployProduction {
if db, ok := db.(database.Deployer); ok {
if err := db.Deploy(); err != nil {
die(err)
log.Fatal(err)
}
} else {
die("database not deployable")
log.Fatal("database not deployable")
}
}
if *revertDeploy {
if config.ImportOptions.RevertDeploy {
if db, ok := db.(database.Deployer); ok {
if err := db.RevertDeploy(); err != nil {
die(err)
log.Fatal(err)
}
} else {
die("database not deployable")
log.Fatal("database not deployable")
}
}
if *removeBackup {
if config.ImportOptions.RemoveBackup {
if db, ok := db.(database.Deployer); ok {
if err := db.RemoveBackup(); err != nil {
die(err)
log.Fatal(err)
}
} else {
die("database not deployable")
log.Fatal("database not deployable")
}
}