update lib/pq

master
Oliver Tonnhofer 2017-05-18 11:30:12 +02:00
parent 29d1882bda
commit a96e0caf8a
18 changed files with 2048 additions and 456 deletions

11
vendor/github.com/lib/pq/README.md generated vendored
View File

@ -1,6 +1,6 @@
# pq - A pure Go postgres driver for Go's database/sql package
[![Build Status](https://travis-ci.org/lib/pq.png?branch=master)](https://travis-ci.org/lib/pq)
[![Build Status](https://travis-ci.org/lib/pq.svg?branch=master)](https://travis-ci.org/lib/pq)
## Install
@ -20,11 +20,11 @@ variables.
Example:
PGHOST=/var/run/postgresql go test github.com/lib/pq
PGHOST=/run/postgresql go test github.com/lib/pq
Optionally, a benchmark suite can be run as part of the tests:
PGHOST=/var/run/postgresql go test -bench .
PGHOST=/run/postgresql go test -bench .
## Features
@ -38,6 +38,7 @@ Optionally, a benchmark suite can be run as part of the tests:
* Many libpq compatible environment variables
* Unix socket support
* Notifications: `LISTEN`/`NOTIFY`
* pgpass support
## Future / Things you can help with
@ -57,6 +58,7 @@ code still exists in here.
* Brad Fitzpatrick (bradfitz)
* Charlie Melbye (cmelbye)
* Chris Bandy (cbandy)
* Chris Gilling (cgilling)
* Chris Walsh (cwds)
* Dan Sosedoff (sosedoff)
* Daniel Farina (fdr)
@ -66,6 +68,7 @@ code still exists in here.
* Everyone at The Go Team
* Evan Shaw (edsrzf)
* Ewan Chou (coocood)
* Fazal Majid (fazalmajid)
* Federico Romero (federomero)
* Fumin (fumin)
* Gary Burd (garyburd)
@ -82,7 +85,7 @@ code still exists in here.
* Keith Rarick (kr)
* Kir Shatrov (kirs)
* Lann Martin (lann)
* Maciek Sakrejda (deafbybeheading)
* Maciek Sakrejda (uhoh-itsmaciek)
* Marc Brinkmann (mbr)
* Marko Tiikkaja (johto)
* Matt Newberry (MattNewberry)

756
vendor/github.com/lib/pq/array.go generated vendored Normal file
View File

@ -0,0 +1,756 @@
package pq
import (
"bytes"
"database/sql"
"database/sql/driver"
"encoding/hex"
"fmt"
"reflect"
"strconv"
"strings"
)
var typeByteSlice = reflect.TypeOf([]byte{})
var typeDriverValuer = reflect.TypeOf((*driver.Valuer)(nil)).Elem()
var typeSqlScanner = reflect.TypeOf((*sql.Scanner)(nil)).Elem()
// Array returns the optimal driver.Valuer and sql.Scanner for an array or
// slice of any dimension.
//
// For example:
// db.Query(`SELECT * FROM t WHERE id = ANY($1)`, pq.Array([]int{235, 401}))
//
// var x []sql.NullInt64
// db.QueryRow('SELECT ARRAY[235, 401]').Scan(pq.Array(&x))
//
// Scanning multi-dimensional arrays is not supported. Arrays where the lower
// bound is not one (such as `[0:0]={1}') are not supported.
func Array(a interface{}) interface {
driver.Valuer
sql.Scanner
} {
switch a := a.(type) {
case []bool:
return (*BoolArray)(&a)
case []float64:
return (*Float64Array)(&a)
case []int64:
return (*Int64Array)(&a)
case []string:
return (*StringArray)(&a)
case *[]bool:
return (*BoolArray)(a)
case *[]float64:
return (*Float64Array)(a)
case *[]int64:
return (*Int64Array)(a)
case *[]string:
return (*StringArray)(a)
}
return GenericArray{a}
}
// ArrayDelimiter may be optionally implemented by driver.Valuer or sql.Scanner
// to override the array delimiter used by GenericArray.
type ArrayDelimiter interface {
// ArrayDelimiter returns the delimiter character(s) for this element's type.
ArrayDelimiter() string
}
// BoolArray represents a one-dimensional array of the PostgreSQL boolean type.
type BoolArray []bool
// Scan implements the sql.Scanner interface.
func (a *BoolArray) Scan(src interface{}) error {
switch src := src.(type) {
case []byte:
return a.scanBytes(src)
case string:
return a.scanBytes([]byte(src))
case nil:
*a = nil
return nil
}
return fmt.Errorf("pq: cannot convert %T to BoolArray", src)
}
func (a *BoolArray) scanBytes(src []byte) error {
elems, err := scanLinearArray(src, []byte{','}, "BoolArray")
if err != nil {
return err
}
if *a != nil && len(elems) == 0 {
*a = (*a)[:0]
} else {
b := make(BoolArray, len(elems))
for i, v := range elems {
if len(v) != 1 {
return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v)
}
switch v[0] {
case 't':
b[i] = true
case 'f':
b[i] = false
default:
return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v)
}
}
*a = b
}
return nil
}
// Value implements the driver.Valuer interface.
func (a BoolArray) Value() (driver.Value, error) {
if a == nil {
return nil, nil
}
if n := len(a); n > 0 {
// There will be exactly two curly brackets, N bytes of values,
// and N-1 bytes of delimiters.
b := make([]byte, 1+2*n)
for i := 0; i < n; i++ {
b[2*i] = ','
if a[i] {
b[1+2*i] = 't'
} else {
b[1+2*i] = 'f'
}
}
b[0] = '{'
b[2*n] = '}'
return string(b), nil
}
return "{}", nil
}
// ByteaArray represents a one-dimensional array of the PostgreSQL bytea type.
type ByteaArray [][]byte
// Scan implements the sql.Scanner interface.
func (a *ByteaArray) Scan(src interface{}) error {
switch src := src.(type) {
case []byte:
return a.scanBytes(src)
case string:
return a.scanBytes([]byte(src))
case nil:
*a = nil
return nil
}
return fmt.Errorf("pq: cannot convert %T to ByteaArray", src)
}
func (a *ByteaArray) scanBytes(src []byte) error {
elems, err := scanLinearArray(src, []byte{','}, "ByteaArray")
if err != nil {
return err
}
if *a != nil && len(elems) == 0 {
*a = (*a)[:0]
} else {
b := make(ByteaArray, len(elems))
for i, v := range elems {
b[i], err = parseBytea(v)
if err != nil {
return fmt.Errorf("could not parse bytea array index %d: %s", i, err.Error())
}
}
*a = b
}
return nil
}
// Value implements the driver.Valuer interface. It uses the "hex" format which
// is only supported on PostgreSQL 9.0 or newer.
func (a ByteaArray) Value() (driver.Value, error) {
if a == nil {
return nil, nil
}
if n := len(a); n > 0 {
// There will be at least two curly brackets, 2*N bytes of quotes,
// 3*N bytes of hex formatting, and N-1 bytes of delimiters.
size := 1 + 6*n
for _, x := range a {
size += hex.EncodedLen(len(x))
}
b := make([]byte, size)
for i, s := 0, b; i < n; i++ {
o := copy(s, `,"\\x`)
o += hex.Encode(s[o:], a[i])
s[o] = '"'
s = s[o+1:]
}
b[0] = '{'
b[size-1] = '}'
return string(b), nil
}
return "{}", nil
}
// Float64Array represents a one-dimensional array of the PostgreSQL double
// precision type.
type Float64Array []float64
// Scan implements the sql.Scanner interface.
func (a *Float64Array) Scan(src interface{}) error {
switch src := src.(type) {
case []byte:
return a.scanBytes(src)
case string:
return a.scanBytes([]byte(src))
case nil:
*a = nil
return nil
}
return fmt.Errorf("pq: cannot convert %T to Float64Array", src)
}
func (a *Float64Array) scanBytes(src []byte) error {
elems, err := scanLinearArray(src, []byte{','}, "Float64Array")
if err != nil {
return err
}
if *a != nil && len(elems) == 0 {
*a = (*a)[:0]
} else {
b := make(Float64Array, len(elems))
for i, v := range elems {
if b[i], err = strconv.ParseFloat(string(v), 64); err != nil {
return fmt.Errorf("pq: parsing array element index %d: %v", i, err)
}
}
*a = b
}
return nil
}
// Value implements the driver.Valuer interface.
func (a Float64Array) Value() (driver.Value, error) {
if a == nil {
return nil, nil
}
if n := len(a); n > 0 {
// There will be at least two curly brackets, N bytes of values,
// and N-1 bytes of delimiters.
b := make([]byte, 1, 1+2*n)
b[0] = '{'
b = strconv.AppendFloat(b, a[0], 'f', -1, 64)
for i := 1; i < n; i++ {
b = append(b, ',')
b = strconv.AppendFloat(b, a[i], 'f', -1, 64)
}
return string(append(b, '}')), nil
}
return "{}", nil
}
// GenericArray implements the driver.Valuer and sql.Scanner interfaces for
// an array or slice of any dimension.
type GenericArray struct{ A interface{} }
func (GenericArray) evaluateDestination(rt reflect.Type) (reflect.Type, func([]byte, reflect.Value) error, string) {
var assign func([]byte, reflect.Value) error
var del = ","
// TODO calculate the assign function for other types
// TODO repeat this section on the element type of arrays or slices (multidimensional)
{
if reflect.PtrTo(rt).Implements(typeSqlScanner) {
// dest is always addressable because it is an element of a slice.
assign = func(src []byte, dest reflect.Value) (err error) {
ss := dest.Addr().Interface().(sql.Scanner)
if src == nil {
err = ss.Scan(nil)
} else {
err = ss.Scan(src)
}
return
}
goto FoundType
}
assign = func([]byte, reflect.Value) error {
return fmt.Errorf("pq: scanning to %s is not implemented; only sql.Scanner", rt)
}
}
FoundType:
if ad, ok := reflect.Zero(rt).Interface().(ArrayDelimiter); ok {
del = ad.ArrayDelimiter()
}
return rt, assign, del
}
// Scan implements the sql.Scanner interface.
func (a GenericArray) Scan(src interface{}) error {
dpv := reflect.ValueOf(a.A)
switch {
case dpv.Kind() != reflect.Ptr:
return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A)
case dpv.IsNil():
return fmt.Errorf("pq: destination %T is nil", a.A)
}
dv := dpv.Elem()
switch dv.Kind() {
case reflect.Slice:
case reflect.Array:
default:
return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A)
}
switch src := src.(type) {
case []byte:
return a.scanBytes(src, dv)
case string:
return a.scanBytes([]byte(src), dv)
case nil:
if dv.Kind() == reflect.Slice {
dv.Set(reflect.Zero(dv.Type()))
return nil
}
}
return fmt.Errorf("pq: cannot convert %T to %s", src, dv.Type())
}
func (a GenericArray) scanBytes(src []byte, dv reflect.Value) error {
dtype, assign, del := a.evaluateDestination(dv.Type().Elem())
dims, elems, err := parseArray(src, []byte(del))
if err != nil {
return err
}
// TODO allow multidimensional
if len(dims) > 1 {
return fmt.Errorf("pq: scanning from multidimensional ARRAY%s is not implemented",
strings.Replace(fmt.Sprint(dims), " ", "][", -1))
}
// Treat a zero-dimensional array like an array with a single dimension of zero.
if len(dims) == 0 {
dims = append(dims, 0)
}
for i, rt := 0, dv.Type(); i < len(dims); i, rt = i+1, rt.Elem() {
switch rt.Kind() {
case reflect.Slice:
case reflect.Array:
if rt.Len() != dims[i] {
return fmt.Errorf("pq: cannot convert ARRAY%s to %s",
strings.Replace(fmt.Sprint(dims), " ", "][", -1), dv.Type())
}
default:
// TODO handle multidimensional
}
}
values := reflect.MakeSlice(reflect.SliceOf(dtype), len(elems), len(elems))
for i, e := range elems {
if err := assign(e, values.Index(i)); err != nil {
return fmt.Errorf("pq: parsing array element index %d: %v", i, err)
}
}
// TODO handle multidimensional
switch dv.Kind() {
case reflect.Slice:
dv.Set(values.Slice(0, dims[0]))
case reflect.Array:
for i := 0; i < dims[0]; i++ {
dv.Index(i).Set(values.Index(i))
}
}
return nil
}
// Value implements the driver.Valuer interface.
func (a GenericArray) Value() (driver.Value, error) {
if a.A == nil {
return nil, nil
}
rv := reflect.ValueOf(a.A)
switch rv.Kind() {
case reflect.Slice:
if rv.IsNil() {
return nil, nil
}
case reflect.Array:
default:
return nil, fmt.Errorf("pq: Unable to convert %T to array", a.A)
}
if n := rv.Len(); n > 0 {
// There will be at least two curly brackets, N bytes of values,
// and N-1 bytes of delimiters.
b := make([]byte, 0, 1+2*n)
b, _, err := appendArray(b, rv, n)
return string(b), err
}
return "{}", nil
}
// Int64Array represents a one-dimensional array of the PostgreSQL integer types.
type Int64Array []int64
// Scan implements the sql.Scanner interface.
func (a *Int64Array) Scan(src interface{}) error {
switch src := src.(type) {
case []byte:
return a.scanBytes(src)
case string:
return a.scanBytes([]byte(src))
case nil:
*a = nil
return nil
}
return fmt.Errorf("pq: cannot convert %T to Int64Array", src)
}
func (a *Int64Array) scanBytes(src []byte) error {
elems, err := scanLinearArray(src, []byte{','}, "Int64Array")
if err != nil {
return err
}
if *a != nil && len(elems) == 0 {
*a = (*a)[:0]
} else {
b := make(Int64Array, len(elems))
for i, v := range elems {
if b[i], err = strconv.ParseInt(string(v), 10, 64); err != nil {
return fmt.Errorf("pq: parsing array element index %d: %v", i, err)
}
}
*a = b
}
return nil
}
// Value implements the driver.Valuer interface.
func (a Int64Array) Value() (driver.Value, error) {
if a == nil {
return nil, nil
}
if n := len(a); n > 0 {
// There will be at least two curly brackets, N bytes of values,
// and N-1 bytes of delimiters.
b := make([]byte, 1, 1+2*n)
b[0] = '{'
b = strconv.AppendInt(b, a[0], 10)
for i := 1; i < n; i++ {
b = append(b, ',')
b = strconv.AppendInt(b, a[i], 10)
}
return string(append(b, '}')), nil
}
return "{}", nil
}
// StringArray represents a one-dimensional array of the PostgreSQL character types.
type StringArray []string
// Scan implements the sql.Scanner interface.
func (a *StringArray) Scan(src interface{}) error {
switch src := src.(type) {
case []byte:
return a.scanBytes(src)
case string:
return a.scanBytes([]byte(src))
case nil:
*a = nil
return nil
}
return fmt.Errorf("pq: cannot convert %T to StringArray", src)
}
func (a *StringArray) scanBytes(src []byte) error {
elems, err := scanLinearArray(src, []byte{','}, "StringArray")
if err != nil {
return err
}
if *a != nil && len(elems) == 0 {
*a = (*a)[:0]
} else {
b := make(StringArray, len(elems))
for i, v := range elems {
if b[i] = string(v); v == nil {
return fmt.Errorf("pq: parsing array element index %d: cannot convert nil to string", i)
}
}
*a = b
}
return nil
}
// Value implements the driver.Valuer interface.
func (a StringArray) Value() (driver.Value, error) {
if a == nil {
return nil, nil
}
if n := len(a); n > 0 {
// There will be at least two curly brackets, 2*N bytes of quotes,
// and N-1 bytes of delimiters.
b := make([]byte, 1, 1+3*n)
b[0] = '{'
b = appendArrayQuotedBytes(b, []byte(a[0]))
for i := 1; i < n; i++ {
b = append(b, ',')
b = appendArrayQuotedBytes(b, []byte(a[i]))
}
return string(append(b, '}')), nil
}
return "{}", nil
}
// appendArray appends rv to the buffer, returning the extended buffer and
// the delimiter used between elements.
//
// It panics when n <= 0 or rv's Kind is not reflect.Array nor reflect.Slice.
func appendArray(b []byte, rv reflect.Value, n int) ([]byte, string, error) {
var del string
var err error
b = append(b, '{')
if b, del, err = appendArrayElement(b, rv.Index(0)); err != nil {
return b, del, err
}
for i := 1; i < n; i++ {
b = append(b, del...)
if b, del, err = appendArrayElement(b, rv.Index(i)); err != nil {
return b, del, err
}
}
return append(b, '}'), del, nil
}
// appendArrayElement appends rv to the buffer, returning the extended buffer
// and the delimiter to use before the next element.
//
// When rv's Kind is neither reflect.Array nor reflect.Slice, it is converted
// using driver.DefaultParameterConverter and the resulting []byte or string
// is double-quoted.
//
// See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO
func appendArrayElement(b []byte, rv reflect.Value) ([]byte, string, error) {
if k := rv.Kind(); k == reflect.Array || k == reflect.Slice {
if t := rv.Type(); t != typeByteSlice && !t.Implements(typeDriverValuer) {
if n := rv.Len(); n > 0 {
return appendArray(b, rv, n)
}
return b, "", nil
}
}
var del string = ","
var err error
var iv interface{} = rv.Interface()
if ad, ok := iv.(ArrayDelimiter); ok {
del = ad.ArrayDelimiter()
}
if iv, err = driver.DefaultParameterConverter.ConvertValue(iv); err != nil {
return b, del, err
}
switch v := iv.(type) {
case nil:
return append(b, "NULL"...), del, nil
case []byte:
return appendArrayQuotedBytes(b, v), del, nil
case string:
return appendArrayQuotedBytes(b, []byte(v)), del, nil
}
b, err = appendValue(b, iv)
return b, del, err
}
func appendArrayQuotedBytes(b, v []byte) []byte {
b = append(b, '"')
for {
i := bytes.IndexAny(v, `"\`)
if i < 0 {
b = append(b, v...)
break
}
if i > 0 {
b = append(b, v[:i]...)
}
b = append(b, '\\', v[i])
v = v[i+1:]
}
return append(b, '"')
}
func appendValue(b []byte, v driver.Value) ([]byte, error) {
return append(b, encode(nil, v, 0)...), nil
}
// parseArray extracts the dimensions and elements of an array represented in
// text format. Only representations emitted by the backend are supported.
// Notably, whitespace around brackets and delimiters is significant, and NULL
// is case-sensitive.
//
// See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO
func parseArray(src, del []byte) (dims []int, elems [][]byte, err error) {
var depth, i int
if len(src) < 1 || src[0] != '{' {
return nil, nil, fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '{', 0)
}
Open:
for i < len(src) {
switch src[i] {
case '{':
depth++
i++
case '}':
elems = make([][]byte, 0)
goto Close
default:
break Open
}
}
dims = make([]int, i)
Element:
for i < len(src) {
switch src[i] {
case '{':
if depth == len(dims) {
break Element
}
depth++
dims[depth-1] = 0
i++
case '"':
var elem = []byte{}
var escape bool
for i++; i < len(src); i++ {
if escape {
elem = append(elem, src[i])
escape = false
} else {
switch src[i] {
default:
elem = append(elem, src[i])
case '\\':
escape = true
case '"':
elems = append(elems, elem)
i++
break Element
}
}
}
default:
for start := i; i < len(src); i++ {
if bytes.HasPrefix(src[i:], del) || src[i] == '}' {
elem := src[start:i]
if len(elem) == 0 {
return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i)
}
if bytes.Equal(elem, []byte("NULL")) {
elem = nil
}
elems = append(elems, elem)
break Element
}
}
}
}
for i < len(src) {
if bytes.HasPrefix(src[i:], del) && depth > 0 {
dims[depth-1]++
i += len(del)
goto Element
} else if src[i] == '}' && depth > 0 {
dims[depth-1]++
depth--
i++
} else {
return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i)
}
}
Close:
for i < len(src) {
if src[i] == '}' && depth > 0 {
depth--
i++
} else {
return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i)
}
}
if depth > 0 {
err = fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '}', i)
}
if err == nil {
for _, d := range dims {
if (len(elems) % d) != 0 {
err = fmt.Errorf("pq: multidimensional arrays must have elements with matching dimensions")
}
}
}
return
}
func scanLinearArray(src, del []byte, typ string) (elems [][]byte, err error) {
dims, elems, err := parseArray(src, del)
if err != nil {
return nil, err
}
if len(dims) > 1 {
return nil, fmt.Errorf("pq: cannot convert ARRAY%s to %s", strings.Replace(fmt.Sprint(dims), " ", "][", -1), typ)
}
return elems, err
}

29
vendor/github.com/lib/pq/buf.go generated vendored
View File

@ -21,6 +21,7 @@ func (b *readBuf) oid() (n oid.Oid) {
return
}
// N.B: this is actually an unsigned 16-bit integer, unlike int32
func (b *readBuf) int16() (n int) {
n = int(binary.BigEndian.Uint16(*b))
*b = (*b)[2:]
@ -47,28 +48,44 @@ func (b *readBuf) byte() byte {
return b.next(1)[0]
}
type writeBuf []byte
type writeBuf struct {
buf []byte
pos int
}
func (b *writeBuf) int32(n int) {
x := make([]byte, 4)
binary.BigEndian.PutUint32(x, uint32(n))
*b = append(*b, x...)
b.buf = append(b.buf, x...)
}
func (b *writeBuf) int16(n int) {
x := make([]byte, 2)
binary.BigEndian.PutUint16(x, uint16(n))
*b = append(*b, x...)
b.buf = append(b.buf, x...)
}
func (b *writeBuf) string(s string) {
*b = append(*b, (s + "\000")...)
b.buf = append(b.buf, (s + "\000")...)
}
func (b *writeBuf) byte(c byte) {
*b = append(*b, c)
b.buf = append(b.buf, c)
}
func (b *writeBuf) bytes(v []byte) {
*b = append(*b, v...)
b.buf = append(b.buf, v...)
}
func (b *writeBuf) wrap() []byte {
p := b.buf[b.pos:]
binary.BigEndian.PutUint32(p, uint32(len(p)))
return b.buf
}
func (b *writeBuf) next(c byte) {
p := b.buf[b.pos:]
binary.BigEndian.PutUint32(p, uint32(len(p)))
b.pos = len(b.buf) + 1
b.buf = append(b.buf, c, 0, 0, 0, 0)
}

1045
vendor/github.com/lib/pq/conn.go generated vendored

File diff suppressed because it is too large Load Diff

107
vendor/github.com/lib/pq/conn_go18.go generated vendored Normal file
View File

@ -0,0 +1,107 @@
// +build go1.8
package pq
import (
"context"
"database/sql/driver"
"errors"
"io"
"io/ioutil"
)
// Implement the "QueryerContext" interface
func (cn *conn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) {
list := make([]driver.Value, len(args))
for i, nv := range args {
list[i] = nv.Value
}
finish := cn.watchCancel(ctx)
r, err := cn.query(query, list)
if err != nil {
return nil, err
}
r.finish = finish
return r, nil
}
// Implement the "ExecerContext" interface
func (cn *conn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
list := make([]driver.Value, len(args))
for i, nv := range args {
list[i] = nv.Value
}
if finish := cn.watchCancel(ctx); finish != nil {
defer finish()
}
return cn.Exec(query, list)
}
// Implement the "ConnBeginTx" interface
func (cn *conn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
if opts.Isolation != 0 {
return nil, errors.New("isolation levels not supported")
}
if opts.ReadOnly {
return nil, errors.New("read-only transactions not supported")
}
tx, err := cn.Begin()
if err != nil {
return nil, err
}
cn.txnFinish = cn.watchCancel(ctx)
return tx, nil
}
func (cn *conn) watchCancel(ctx context.Context) func() {
if done := ctx.Done(); done != nil {
finished := make(chan struct{})
go func() {
select {
case <-done:
_ = cn.cancel()
finished <- struct{}{}
case <-finished:
}
}()
return func() {
select {
case <-finished:
case finished <- struct{}{}:
}
}
}
return nil
}
func (cn *conn) cancel() error {
c, err := dial(cn.dialer, cn.opts)
if err != nil {
return err
}
defer c.Close()
{
can := conn{
c: c,
}
can.ssl(cn.opts)
w := can.writeBuf(0)
w.int32(80877102) // cancel request code
w.int32(cn.processID)
w.int32(cn.secretKey)
if err := can.sendStartupPacket(w); err != nil {
return err
}
}
// Read until EOF to ensure that the server received the cancel.
{
_, err := io.Copy(ioutil.Discard, c)
return err
}
}

38
vendor/github.com/lib/pq/copy.go generated vendored
View File

@ -13,6 +13,7 @@ var (
errBinaryCopyNotSupported = errors.New("pq: only text format supported for COPY")
errCopyToNotSupported = errors.New("pq: COPY TO is not supported")
errCopyNotSupportedOutsideTxn = errors.New("pq: COPY is only allowed inside a transaction")
errCopyInProgress = errors.New("pq: COPY in progress")
)
// CopyIn creates a COPY FROM statement which can be prepared with
@ -96,13 +97,13 @@ awaitCopyInResponse:
err = parseError(r)
case 'Z':
if err == nil {
cn.bad = true
ci.setBad()
errorf("unexpected ReadyForQuery in response to COPY")
}
cn.processReadyForQuery(r)
return nil, err
default:
cn.bad = true
ci.setBad()
errorf("unknown response for copy query: %q", t)
}
}
@ -121,7 +122,7 @@ awaitCopyInResponse:
cn.processReadyForQuery(r)
return nil, err
default:
cn.bad = true
ci.setBad()
errorf("unknown response for CopyFail: %q", t)
}
}
@ -142,7 +143,7 @@ func (ci *copyin) resploop() {
var r readBuf
t, err := ci.cn.recvMessage(&r)
if err != nil {
ci.cn.bad = true
ci.setBad()
ci.setError(err)
ci.done <- true
return
@ -160,7 +161,7 @@ func (ci *copyin) resploop() {
err := parseError(&r)
ci.setError(err)
default:
ci.cn.bad = true
ci.setBad()
ci.setError(fmt.Errorf("unknown response during CopyIn: %q", t))
ci.done <- true
return
@ -168,6 +169,19 @@ func (ci *copyin) resploop() {
}
}
func (ci *copyin) setBad() {
ci.Lock()
ci.cn.bad = true
ci.Unlock()
}
func (ci *copyin) isBad() bool {
ci.Lock()
b := ci.cn.bad
ci.Unlock()
return b
}
func (ci *copyin) isErrorSet() bool {
ci.Lock()
isSet := (ci.err != nil)
@ -205,7 +219,7 @@ func (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) {
return nil, errCopyInClosed
}
if ci.cn.bad {
if ci.isBad() {
return nil, driver.ErrBadConn
}
defer ci.cn.errRecover(&err)
@ -215,9 +229,7 @@ func (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) {
}
if len(v) == 0 {
err = ci.Close()
ci.closed = true
return nil, err
return nil, ci.Close()
}
numValues := len(v)
@ -240,11 +252,12 @@ func (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) {
}
func (ci *copyin) Close() (err error) {
if ci.closed {
return errCopyInClosed
if ci.closed { // Don't do anything, we're already closed
return nil
}
ci.closed = true
if ci.cn.bad {
if ci.isBad() {
return driver.ErrBadConn
}
defer ci.cn.errRecover(&err)
@ -259,6 +272,7 @@ func (ci *copyin) Close() (err error) {
}
<-ci.done
ci.cn.inCopy = false
if ci.isErrorSet() {
err = ci.err

25
vendor/github.com/lib/pq/doc.go generated vendored
View File

@ -86,9 +86,13 @@ variables not supported by pq are set, pq will panic during connection
establishment. Environment variables have a lower precedence than explicitly
provided connection parameters.
The pgpass mechanism as described in http://www.postgresql.org/docs/current/static/libpq-pgpass.html
is supported, but on Windows PGPASSFILE must be specified explicitly.
Queries
database/sql does not dictate any specific format for parameter
markers in query strings, and pq uses the Postgres-native ordinal markers,
as shown above. The same marker can be reused for the same parameter:
@ -112,8 +116,29 @@ For more details on RETURNING, see the Postgres documentation:
For additional instructions on querying see the documentation for the database/sql package.
Data Types
Parameters pass through driver.DefaultParameterConverter before they are handled
by this package. When the binary_parameters connection option is enabled,
[]byte values are sent directly to the backend as data in binary format.
This package returns the following types for values from the PostgreSQL backend:
- integer types smallint, integer, and bigint are returned as int64
- floating-point types real and double precision are returned as float64
- character types char, varchar, and text are returned as string
- temporal types date, time, timetz, timestamp, and timestamptz are returned as time.Time
- the boolean type is returned as bool
- the bytea type is returned as []byte
All other types are returned directly from the backend as []byte values in text format.
Errors
pq may return errors of type *pq.Error which can be interrogated for error details:
if err, ok := err.(*pq.Error); ok {

218
vendor/github.com/lib/pq/encode.go generated vendored
View File

@ -3,7 +3,9 @@ package pq
import (
"bytes"
"database/sql/driver"
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"math"
"strconv"
@ -14,6 +16,15 @@ import (
"github.com/lib/pq/oid"
)
func binaryEncode(parameterStatus *parameterStatus, x interface{}) []byte {
switch v := x.(type) {
case []byte:
return v
default:
return encode(parameterStatus, x, oid.T_unknown)
}
}
func encode(parameterStatus *parameterStatus, x interface{}, pgtypOid oid.Oid) []byte {
switch v := x.(type) {
case int64:
@ -44,10 +55,51 @@ func encode(parameterStatus *parameterStatus, x interface{}, pgtypOid oid.Oid) [
panic("not reached")
}
func decode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} {
func decode(parameterStatus *parameterStatus, s []byte, typ oid.Oid, f format) interface{} {
switch f {
case formatBinary:
return binaryDecode(parameterStatus, s, typ)
case formatText:
return textDecode(parameterStatus, s, typ)
default:
panic("not reached")
}
}
func binaryDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} {
switch typ {
case oid.T_bytea:
return parseBytea(s)
return s
case oid.T_int8:
return int64(binary.BigEndian.Uint64(s))
case oid.T_int4:
return int64(int32(binary.BigEndian.Uint32(s)))
case oid.T_int2:
return int64(int16(binary.BigEndian.Uint16(s)))
case oid.T_uuid:
b, err := decodeUUIDBinary(s)
if err != nil {
panic(err)
}
return b
default:
errorf("don't know how to decode binary parameter of type %d", uint32(typ))
}
panic("not reached")
}
func textDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} {
switch typ {
case oid.T_char, oid.T_varchar, oid.T_text:
return string(s)
case oid.T_bytea:
b, err := parseBytea(s)
if err != nil {
errorf("%s", err)
}
return b
case oid.T_timestamptz:
return parseTs(parameterStatus.currentLocation, string(s))
case oid.T_timestamp, oid.T_date:
@ -58,7 +110,7 @@ func decode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{}
return mustParse("15:04:05-07", typ, s)
case oid.T_bool:
return s[0] == 't'
case oid.T_int8, oid.T_int2, oid.T_int4:
case oid.T_int8, oid.T_int4, oid.T_int2:
i, err := strconv.ParseInt(string(s), 10, 64)
if err != nil {
errorf("%s", err)
@ -158,16 +210,39 @@ func mustParse(f string, typ oid.Oid, s []byte) time.Time {
return t
}
func expect(str, char string, pos int) {
if c := str[pos : pos+1]; c != char {
errorf("expected '%v' at position %v; got '%v'", char, pos, c)
var errInvalidTimestamp = errors.New("invalid timestamp")
type timestampParser struct {
err error
}
func (p *timestampParser) expect(str string, char byte, pos int) {
if p.err != nil {
return
}
if pos+1 > len(str) {
p.err = errInvalidTimestamp
return
}
if c := str[pos]; c != char && p.err == nil {
p.err = fmt.Errorf("expected '%v' at position %v; got '%v'", char, pos, c)
}
}
func mustAtoi(str string) int {
result, err := strconv.Atoi(str)
func (p *timestampParser) mustAtoi(str string, begin int, end int) int {
if p.err != nil {
return 0
}
if begin < 0 || end < 0 || begin > end || end > len(str) {
p.err = errInvalidTimestamp
return 0
}
result, err := strconv.Atoi(str[begin:end])
if err != nil {
errorf("expected number; got '%v'", str)
if p.err == nil {
p.err = fmt.Errorf("expected number; got '%v'", str)
}
return 0
}
return result
}
@ -182,7 +257,7 @@ type locationCache struct {
// about 5% speed could be gained by putting the cache in the connection and
// losing the mutex, at the cost of a small amount of memory and a somewhat
// significant increase in code complexity.
var globalLocationCache *locationCache = newLocationCache()
var globalLocationCache = newLocationCache()
func newLocationCache() *locationCache {
return &locationCache{cache: make(map[int]*time.Location)}
@ -212,26 +287,26 @@ const (
infinityTsNegativeMustBeSmaller = "pq: infinity timestamp: negative value must be smaller (before) than positive"
)
/*
* If EnableInfinityTs is not called, "-infinity" and "infinity" will return
* []byte("-infinity") and []byte("infinity") respectively, and potentially
* cause error "sql: Scan error on column index 0: unsupported driver -> Scan pair: []uint8 -> *time.Time",
* when scanning into a time.Time value.
*
* Once EnableInfinityTs has been called, all connections created using this
* driver will decode Postgres' "-infinity" and "infinity" for "timestamp",
* "timestamp with time zone" and "date" types to the predefined minimum and
* maximum times, respectively. When encoding time.Time values, any time which
* equals or preceeds the predefined minimum time will be encoded to
* "-infinity". Any values at or past the maximum time will similarly be
* encoded to "infinity".
*
*
* If EnableInfinityTs is called with negative >= positive, it will panic.
* Calling EnableInfinityTs after a connection has been established results in
* undefined behavior. If EnableInfinityTs is called more than once, it will
* panic.
*/
// EnableInfinityTs controls the handling of Postgres' "-infinity" and
// "infinity" "timestamp"s.
//
// If EnableInfinityTs is not called, "-infinity" and "infinity" will return
// []byte("-infinity") and []byte("infinity") respectively, and potentially
// cause error "sql: Scan error on column index 0: unsupported driver -> Scan
// pair: []uint8 -> *time.Time", when scanning into a time.Time value.
//
// Once EnableInfinityTs has been called, all connections created using this
// driver will decode Postgres' "-infinity" and "infinity" for "timestamp",
// "timestamp with time zone" and "date" types to the predefined minimum and
// maximum times, respectively. When encoding time.Time values, any time which
// equals or precedes the predefined minimum time will be encoded to
// "-infinity". Any values at or past the maximum time will similarly be
// encoded to "infinity".
//
// If EnableInfinityTs is called with negative >= positive, it will panic.
// Calling EnableInfinityTs after a connection has been established results in
// undefined behavior. If EnableInfinityTs is called more than once, it will
// panic.
func EnableInfinityTs(negative time.Time, positive time.Time) {
if infinityTsEnabled {
panic(infinityTsEnabledAlready)
@ -268,28 +343,41 @@ func parseTs(currentLocation *time.Location, str string) interface{} {
}
return []byte(str)
}
t, err := ParseTimestamp(currentLocation, str)
if err != nil {
panic(err)
}
return t
}
// ParseTimestamp parses Postgres' text format. It returns a time.Time in
// currentLocation iff that time's offset agrees with the offset sent from the
// Postgres server. Otherwise, ParseTimestamp returns a time.Time with the
// fixed offset offset provided by the Postgres server.
func ParseTimestamp(currentLocation *time.Location, str string) (time.Time, error) {
p := timestampParser{}
monSep := strings.IndexRune(str, '-')
// this is Gregorian year, not ISO Year
// In Gregorian system, the year 1 BC is followed by AD 1
year := mustAtoi(str[:monSep])
year := p.mustAtoi(str, 0, monSep)
daySep := monSep + 3
month := mustAtoi(str[monSep+1 : daySep])
expect(str, "-", daySep)
month := p.mustAtoi(str, monSep+1, daySep)
p.expect(str, '-', daySep)
timeSep := daySep + 3
day := mustAtoi(str[daySep+1 : timeSep])
day := p.mustAtoi(str, daySep+1, timeSep)
var hour, minute, second int
if len(str) > monSep+len("01-01")+1 {
expect(str, " ", timeSep)
p.expect(str, ' ', timeSep)
minSep := timeSep + 3
expect(str, ":", minSep)
hour = mustAtoi(str[timeSep+1 : minSep])
p.expect(str, ':', minSep)
hour = p.mustAtoi(str, timeSep+1, minSep)
secSep := minSep + 3
expect(str, ":", secSep)
minute = mustAtoi(str[minSep+1 : secSep])
p.expect(str, ':', secSep)
minute = p.mustAtoi(str, minSep+1, secSep)
secEnd := secSep + 3
second = mustAtoi(str[secSep+1 : secEnd])
second = p.mustAtoi(str, secSep+1, secEnd)
}
remainderIdx := monSep + len("01-01 00:00:00") + 1
// Three optional (but ordered) sections follow: the
@ -300,49 +388,50 @@ func parseTs(currentLocation *time.Location, str string) interface{} {
nanoSec := 0
tzOff := 0
if remainderIdx < len(str) && str[remainderIdx:remainderIdx+1] == "." {
if remainderIdx < len(str) && str[remainderIdx] == '.' {
fracStart := remainderIdx + 1
fracOff := strings.IndexAny(str[fracStart:], "-+ ")
if fracOff < 0 {
fracOff = len(str) - fracStart
}
fracSec := mustAtoi(str[fracStart : fracStart+fracOff])
fracSec := p.mustAtoi(str, fracStart, fracStart+fracOff)
nanoSec = fracSec * (1000000000 / int(math.Pow(10, float64(fracOff))))
remainderIdx += fracOff + 1
}
if tzStart := remainderIdx; tzStart < len(str) && (str[tzStart:tzStart+1] == "-" || str[tzStart:tzStart+1] == "+") {
if tzStart := remainderIdx; tzStart < len(str) && (str[tzStart] == '-' || str[tzStart] == '+') {
// time zone separator is always '-' or '+' (UTC is +00)
var tzSign int
if c := str[tzStart : tzStart+1]; c == "-" {
switch c := str[tzStart]; c {
case '-':
tzSign = -1
} else if c == "+" {
case '+':
tzSign = +1
} else {
errorf("expected '-' or '+' at position %v; got %v", tzStart, c)
default:
return time.Time{}, fmt.Errorf("expected '-' or '+' at position %v; got %v", tzStart, c)
}
tzHours := mustAtoi(str[tzStart+1 : tzStart+3])
tzHours := p.mustAtoi(str, tzStart+1, tzStart+3)
remainderIdx += 3
var tzMin, tzSec int
if tzStart+3 < len(str) && str[tzStart+3:tzStart+4] == ":" {
tzMin = mustAtoi(str[tzStart+4 : tzStart+6])
if remainderIdx < len(str) && str[remainderIdx] == ':' {
tzMin = p.mustAtoi(str, remainderIdx+1, remainderIdx+3)
remainderIdx += 3
}
if tzStart+6 < len(str) && str[tzStart+6:tzStart+7] == ":" {
tzSec = mustAtoi(str[tzStart+7 : tzStart+9])
if remainderIdx < len(str) && str[remainderIdx] == ':' {
tzSec = p.mustAtoi(str, remainderIdx+1, remainderIdx+3)
remainderIdx += 3
}
tzOff = tzSign * ((tzHours * 60 * 60) + (tzMin * 60) + tzSec)
}
var isoYear int
if remainderIdx < len(str) && str[remainderIdx:remainderIdx+3] == " BC" {
if remainderIdx+3 <= len(str) && str[remainderIdx:remainderIdx+3] == " BC" {
isoYear = 1 - year
remainderIdx += 3
} else {
isoYear = year
}
if remainderIdx < len(str) {
errorf("expected end of input, got %v", str[remainderIdx:])
return time.Time{}, fmt.Errorf("expected end of input, got %v", str[remainderIdx:])
}
t := time.Date(isoYear, time.Month(month), day,
hour, minute, second, nanoSec,
@ -359,11 +448,11 @@ func parseTs(currentLocation *time.Location, str string) interface{} {
}
}
return t
return t, p.err
}
// formatTs formats t into a format postgres understands.
func formatTs(t time.Time) (b []byte) {
func formatTs(t time.Time) []byte {
if infinityTsEnabled {
// t <= -infinity : ! (t > -infinity)
if !t.After(infinityTsNegative) {
@ -374,6 +463,11 @@ func formatTs(t time.Time) (b []byte) {
return []byte("infinity")
}
}
return FormatTimestamp(t)
}
// FormatTimestamp formats t into Postgres' text format for timestamps.
func FormatTimestamp(t time.Time) []byte {
// Need to send dates before 0001 A.D. with " BC" suffix, instead of the
// minus sign preferred by Go.
// Beware, "0000" in ISO is "1 BC", "-0001" is "2 BC" and so on
@ -383,7 +477,7 @@ func formatTs(t time.Time) (b []byte) {
t = t.AddDate((-t.Year())*2+1, 0, 0)
bc = true
}
b = []byte(t.Format(time.RFC3339Nano))
b := []byte(t.Format("2006-01-02 15:04:05.999999999Z07:00"))
_, offset := t.Zone()
offset = offset % 60
@ -408,14 +502,14 @@ func formatTs(t time.Time) (b []byte) {
// Parse a bytea value received from the server. Both "hex" and the legacy
// "escape" format are supported.
func parseBytea(s []byte) (result []byte) {
func parseBytea(s []byte) (result []byte, err error) {
if len(s) >= 2 && bytes.Equal(s[:2], []byte("\\x")) {
// bytea_output = hex
s = s[2:] // trim off leading "\\x"
result = make([]byte, hex.DecodedLen(len(s)))
_, err := hex.Decode(result, s)
if err != nil {
errorf("%s", err)
return nil, err
}
} else {
// bytea_output = escape
@ -430,11 +524,11 @@ func parseBytea(s []byte) (result []byte) {
// '\\' followed by an octal number
if len(s) < 4 {
errorf("invalid bytea sequence %v", s)
return nil, fmt.Errorf("invalid bytea sequence %v", s)
}
r, err := strconv.ParseInt(string(s[1:4]), 8, 9)
if err != nil {
errorf("could not parse bytea value: %s", err.Error())
return nil, fmt.Errorf("could not parse bytea value: %s", err.Error())
}
result = append(result, byte(r))
s = s[4:]
@ -452,7 +546,7 @@ func parseBytea(s []byte) (result []byte) {
}
}
return result
return result, nil
}
func encodeBytea(serverVersion int, v []byte) (result []byte) {

28
vendor/github.com/lib/pq/notify.go generated vendored
View File

@ -62,14 +62,18 @@ type ListenerConn struct {
// Creates a new ListenerConn. Use NewListener instead.
func NewListenerConn(name string, notificationChan chan<- *Notification) (*ListenerConn, error) {
cn, err := Open(name)
return newDialListenerConn(defaultDialer{}, name, notificationChan)
}
func newDialListenerConn(d Dialer, name string, c chan<- *Notification) (*ListenerConn, error) {
cn, err := DialOpen(d, name)
if err != nil {
return nil, err
}
l := &ListenerConn{
cn: cn.(*conn),
notificationChan: notificationChan,
notificationChan: c,
connState: connStateIdle,
replyChan: make(chan message, 2),
}
@ -253,8 +257,10 @@ func (l *ListenerConn) sendSimpleQuery(q string) (err error) {
// Can't use l.cn.writeBuf here because it uses the scratch buffer which
// might get overwritten by listenerConnLoop.
data := writeBuf([]byte("Q\x00\x00\x00\x00"))
b := &data
b := &writeBuf{
buf: []byte("Q\x00\x00\x00\x00"),
pos: 1,
}
b.string(q)
l.cn.send(b)
@ -389,6 +395,7 @@ type Listener struct {
name string
minReconnectInterval time.Duration
maxReconnectInterval time.Duration
dialer Dialer
eventCallback EventCallbackType
lock sync.Mutex
@ -419,10 +426,21 @@ func NewListener(name string,
minReconnectInterval time.Duration,
maxReconnectInterval time.Duration,
eventCallback EventCallbackType) *Listener {
return NewDialListener(defaultDialer{}, name, minReconnectInterval, maxReconnectInterval, eventCallback)
}
// NewDialListener is like NewListener but it takes a Dialer.
func NewDialListener(d Dialer,
name string,
minReconnectInterval time.Duration,
maxReconnectInterval time.Duration,
eventCallback EventCallbackType) *Listener {
l := &Listener{
name: name,
minReconnectInterval: minReconnectInterval,
maxReconnectInterval: maxReconnectInterval,
dialer: d,
eventCallback: eventCallback,
channels: make(map[string]struct{}),
@ -658,7 +676,7 @@ func (l *Listener) closed() bool {
func (l *Listener) connect() error {
notificationChan := make(chan *Notification, 32)
cn, err := NewListenerConn(l.name, notificationChan)
cn, err := newDialListenerConn(l.dialer, l.name, notificationChan)
if err != nil {
return err
}

158
vendor/github.com/lib/pq/ssl.go generated vendored Normal file
View File

@ -0,0 +1,158 @@
package pq
import (
"crypto/tls"
"crypto/x509"
"io/ioutil"
"net"
"os"
"os/user"
"path/filepath"
)
// ssl generates a function to upgrade a net.Conn based on the "sslmode" and
// related settings. The function is nil when no upgrade should take place.
func ssl(o values) func(net.Conn) net.Conn {
verifyCaOnly := false
tlsConf := tls.Config{}
switch mode := o["sslmode"]; mode {
// "require" is the default.
case "", "require":
// We must skip TLS's own verification since it requires full
// verification since Go 1.3.
tlsConf.InsecureSkipVerify = true
// From http://www.postgresql.org/docs/current/static/libpq-ssl.html:
//
// Note: For backwards compatibility with earlier versions of
// PostgreSQL, if a root CA file exists, the behavior of
// sslmode=require will be the same as that of verify-ca, meaning the
// server certificate is validated against the CA. Relying on this
// behavior is discouraged, and applications that need certificate
// validation should always use verify-ca or verify-full.
if sslrootcert, ok := o["sslrootcert"]; ok {
if _, err := os.Stat(sslrootcert); err == nil {
verifyCaOnly = true
} else {
delete(o, "sslrootcert")
}
}
case "verify-ca":
// We must skip TLS's own verification since it requires full
// verification since Go 1.3.
tlsConf.InsecureSkipVerify = true
verifyCaOnly = true
case "verify-full":
tlsConf.ServerName = o["host"]
case "disable":
return nil
default:
errorf(`unsupported sslmode %q; only "require" (default), "verify-full", "verify-ca", and "disable" supported`, mode)
}
sslClientCertificates(&tlsConf, o)
sslCertificateAuthority(&tlsConf, o)
sslRenegotiation(&tlsConf)
return func(conn net.Conn) net.Conn {
client := tls.Client(conn, &tlsConf)
if verifyCaOnly {
sslVerifyCertificateAuthority(client, &tlsConf)
}
return client
}
}
// sslClientCertificates adds the certificate specified in the "sslcert" and
// "sslkey" settings, or if they aren't set, from the .postgresql directory
// in the user's home directory. The configured files must exist and have
// the correct permissions.
func sslClientCertificates(tlsConf *tls.Config, o values) {
// user.Current() might fail when cross-compiling. We have to ignore the
// error and continue without home directory defaults, since we wouldn't
// know from where to load them.
user, _ := user.Current()
// In libpq, the client certificate is only loaded if the setting is not blank.
//
// https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1036-L1037
sslcert := o["sslcert"]
if len(sslcert) == 0 && user != nil {
sslcert = filepath.Join(user.HomeDir, ".postgresql", "postgresql.crt")
}
// https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1045
if len(sslcert) == 0 {
return
}
// https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1050:L1054
if _, err := os.Stat(sslcert); os.IsNotExist(err) {
return
} else if err != nil {
panic(err)
}
// In libpq, the ssl key is only loaded if the setting is not blank.
//
// https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1123-L1222
sslkey := o["sslkey"]
if len(sslkey) == 0 && user != nil {
sslkey = filepath.Join(user.HomeDir, ".postgresql", "postgresql.key")
}
if len(sslkey) > 0 {
if err := sslKeyPermissions(sslkey); err != nil {
panic(err)
}
}
cert, err := tls.LoadX509KeyPair(sslcert, sslkey)
if err != nil {
panic(err)
}
tlsConf.Certificates = []tls.Certificate{cert}
}
// sslCertificateAuthority adds the RootCA specified in the "sslrootcert" setting.
func sslCertificateAuthority(tlsConf *tls.Config, o values) {
// In libpq, the root certificate is only loaded if the setting is not blank.
//
// https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L950-L951
if sslrootcert := o["sslrootcert"]; len(sslrootcert) > 0 {
tlsConf.RootCAs = x509.NewCertPool()
cert, err := ioutil.ReadFile(sslrootcert)
if err != nil {
panic(err)
}
if !tlsConf.RootCAs.AppendCertsFromPEM(cert) {
errorf("couldn't parse pem in sslrootcert")
}
}
}
// sslVerifyCertificateAuthority carries out a TLS handshake to the server and
// verifies the presented certificate against the CA, i.e. the one specified in
// sslrootcert or the system CA if sslrootcert was not specified.
func sslVerifyCertificateAuthority(client *tls.Conn, tlsConf *tls.Config) {
err := client.Handshake()
if err != nil {
panic(err)
}
certs := client.ConnectionState().PeerCertificates
opts := x509.VerifyOptions{
DNSName: client.ConnectionState().ServerName,
Intermediates: x509.NewCertPool(),
Roots: tlsConf.RootCAs,
}
for i, cert := range certs {
if i == 0 {
continue
}
opts.Intermediates.AddCert(cert)
}
_, err = certs[0].Verify(opts)
if err != nil {
panic(err)
}
}

14
vendor/github.com/lib/pq/ssl_go1.7.go generated vendored Normal file
View File

@ -0,0 +1,14 @@
// +build go1.7
package pq
import "crypto/tls"
// Accept renegotiation requests initiated by the backend.
//
// Renegotiation was deprecated then removed from PostgreSQL 9.5, but
// the default configuration of older versions has it enabled. Redshift
// also initiates renegotiations and cannot be reconfigured.
func sslRenegotiation(conf *tls.Config) {
conf.Renegotiation = tls.RenegotiateFreelyAsClient
}

20
vendor/github.com/lib/pq/ssl_permissions.go generated vendored Normal file
View File

@ -0,0 +1,20 @@
// +build !windows
package pq
import "os"
// sslKeyPermissions checks the permissions on user-supplied ssl key files.
// The key file should have very little access.
//
// libpq does not check key file permissions on Windows.
func sslKeyPermissions(sslkey string) error {
info, err := os.Stat(sslkey)
if err != nil {
return err
}
if info.Mode().Perm()&0077 != 0 {
return ErrSSLKeyHasWorldPermissions
}
return nil
}

8
vendor/github.com/lib/pq/ssl_renegotiation.go generated vendored Normal file
View File

@ -0,0 +1,8 @@
// +build !go1.7
package pq
import "crypto/tls"
// Renegotiation is not supported by crypto/tls until Go 1.7.
func sslRenegotiation(*tls.Config) {}

9
vendor/github.com/lib/pq/ssl_windows.go generated vendored Normal file
View File

@ -0,0 +1,9 @@
// +build windows
package pq
// sslKeyPermissions checks the permissions on user-supplied ssl key files.
// The key file should have very little access.
//
// libpq does not check key file permissions on Windows.
func sslKeyPermissions(string) error { return nil }

8
vendor/github.com/lib/pq/url.go generated vendored
View File

@ -2,6 +2,7 @@ package pq
import (
"fmt"
"net"
nurl "net/url"
"sort"
"strings"
@ -54,12 +55,11 @@ func ParseURL(url string) (string, error) {
accrue("password", v)
}
i := strings.Index(u.Host, ":")
if i < 0 {
if host, port, err := net.SplitHostPort(u.Host); err != nil {
accrue("host", u.Host)
} else {
accrue("host", u.Host[:i])
accrue("port", u.Host[i+1:])
accrue("host", host)
accrue("port", port)
}
if u.Path != "" {

View File

@ -1,6 +1,6 @@
// Package pq is a pure Go Postgres driver for the database/sql package.
// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris
// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris rumprun
package pq

23
vendor/github.com/lib/pq/uuid.go generated vendored Normal file
View File

@ -0,0 +1,23 @@
package pq
import (
"encoding/hex"
"fmt"
)
// decodeUUIDBinary interprets the binary format of a uuid, returning it in text format.
func decodeUUIDBinary(src []byte) ([]byte, error) {
if len(src) != 16 {
return nil, fmt.Errorf("pq: unable to decode uuid; bad length: %d", len(src))
}
dst := make([]byte, 36)
dst[8], dst[13], dst[18], dst[23] = '-', '-', '-', '-'
hex.Encode(dst[0:], src[0:4])
hex.Encode(dst[9:], src[4:6])
hex.Encode(dst[14:], src[6:8])
hex.Encode(dst[19:], src[8:10])
hex.Encode(dst[24:], src[10:16])
return dst, nil
}

5
vendor/vendor.json vendored
View File

@ -13,9 +13,10 @@
"revisionTime": "2015-02-15T19:43:19-08:00"
},
{
"checksumSHA1": "uTUsjF7bymOuKvXbW2BpkK/w4Vg=",
"path": "github.com/lib/pq",
"revision": "93e9980741c9e593411b94e07d5bad8cfb4809db",
"revisionTime": "2015-05-02T14:36:36+03:00"
"revision": "2704adc878c21e1329f46f6e56a1c387d788ff94",
"revisionTime": "2017-03-24T20:46:54Z"
},
{
"path": "github.com/lib/pq/hstore",