Merge pull request #2882 from barakmich/security_client_new

*: Add security/authorization to etcd/client and etcdctl
release-2.1
Barak Michener 2015-06-11 13:40:32 -04:00
commit 7bf0479e66
62 changed files with 45661 additions and 39 deletions

14
Godeps/Godeps.json generated
View File

@ -1,6 +1,6 @@
{
"ImportPath": "github.com/coreos/etcd",
"GoVersion": "go1.4.1",
"GoVersion": "go1.4.2",
"Packages": [
"./..."
],
@ -14,6 +14,10 @@
"ImportPath": "github.com/beorn7/perks/quantile",
"Rev": "b965b613227fddccbfffe13eae360ed3fa822f8d"
},
{
"ImportPath": "github.com/bgentry/speakeasy",
"Rev": "5dfe43257d1f86b96484e760f2f0c4e2559089c7"
},
{
"ImportPath": "github.com/boltdb/bolt",
"Comment": "v1.0-71-g71f28ea",
@ -26,8 +30,8 @@
},
{
"ImportPath": "github.com/coreos/go-etcd/etcd",
"Comment": "v2.0.0-7-g73a8ef7",
"Rev": "73a8ef737e8ea002281a28b4cb92a1de121ad4c6"
"Comment": "v2.0.0-13-g4cceaf7",
"Rev": "4cceaf7283b76f27c4a732b20730dcdb61053bf5"
},
{
"ImportPath": "github.com/coreos/go-semver/semver",
@ -85,6 +89,10 @@
"ImportPath": "github.com/stretchr/testify/assert",
"Rev": "9cc77fa25329013ce07362c7742952ff887361f2"
},
{
"ImportPath": "github.com/ugorji/go/codec",
"Rev": "821cda7e48749cacf7cad2c6ed01e96457ca7e9d"
},
{
"ImportPath": "golang.org/x/crypto/bcrypt",
"Rev": "1351f936d976c60a0a48d728281922cf63eafb8d"

View File

@ -0,0 +1,2 @@
example/example
example/example.exe

View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [2013] [the CloudFoundry Authors]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,30 @@
# Speakeasy
This package provides cross-platform Go (#golang) helpers for taking user input
from the terminal while not echoing the input back (similar to `getpasswd`). The
package uses syscalls to avoid any dependence on cgo, and is therefore
compatible with cross-compiling.
[![GoDoc](https://godoc.org/github.com/bgentry/speakeasy?status.png)][godoc]
## Unicode
Multi-byte unicode characters work successfully on Mac OS X. On Windows,
however, this may be problematic (as is UTF in general on Windows). Other
platforms have not been tested.
## License
The code herein was not written by me, but was compiled from two separate open
source packages. Unix portions were imported from [gopass][gopass], while
Windows portions were imported from the [CloudFoundry Go CLI][cf-cli]'s
[Windows terminal helpers][cf-ui-windows].
The [license for the windows portion](./LICENSE_WINDOWS) has been copied exactly
from the source (though I attempted to fill in the correct owner in the
boilerplate copyright notice).
[cf-cli]: https://github.com/cloudfoundry/cli "CloudFoundry Go CLI"
[cf-ui-windows]: https://github.com/cloudfoundry/cli/blob/master/src/cf/terminal/ui_windows.go "CloudFoundry Go CLI Windows input helpers"
[godoc]: https://godoc.org/github.com/bgentry/speakeasy "speakeasy on Godoc.org"
[gopass]: https://code.google.com/p/gopass "gopass"

View File

@ -0,0 +1,18 @@
package main
import (
"fmt"
"os"
"github.com/coreos/etcd/Godeps/_workspace/src/github.com/bgentry/speakeasy"
)
func main() {
password, err := speakeasy.Ask("Please enter a password: ")
if err != nil {
fmt.Println(err)
os.Exit(1)
}
fmt.Printf("Password result: %q\n", password)
fmt.Printf("Password len: %d\n", len(password))
}

View File

@ -0,0 +1,47 @@
package speakeasy
import (
"fmt"
"io"
"os"
"strings"
)
// Ask the user to enter a password with input hidden. prompt is a string to
// display before the user's input. Returns the provided password, or an error
// if the command failed.
func Ask(prompt string) (password string, err error) {
return FAsk(os.Stdout, prompt)
}
// Same as the Ask function, except it is possible to specify the file to write
// the prompt to.
func FAsk(file *os.File, prompt string) (password string, err error) {
if prompt != "" {
fmt.Fprint(file, prompt) // Display the prompt.
}
password, err = getPassword()
// Carriage return after the user input.
fmt.Fprintln(file, "")
return
}
func readline() (value string, err error) {
var valb []byte
var n int
b := make([]byte, 1)
for {
// read one byte at a time so we don't accidentally read extra bytes
n, err = os.Stdin.Read(b)
if err != nil && err != io.EOF {
return "", err
}
if n == 0 || b[0] == '\n' {
break
}
valb = append(valb, b[0])
}
return strings.TrimSuffix(string(valb), "\r"), nil
}

View File

@ -0,0 +1,93 @@
// based on https://code.google.com/p/gopass
// Author: johnsiilver@gmail.com (John Doak)
//
// Original code is based on code by RogerV in the golang-nuts thread:
// https://groups.google.com/group/golang-nuts/browse_thread/thread/40cc41e9d9fc9247
// +build darwin freebsd linux netbsd openbsd
package speakeasy
import (
"fmt"
"os"
"os/signal"
"strings"
"syscall"
)
const sttyArg0 = "/bin/stty"
var (
sttyArgvEOff []string = []string{"stty", "-echo"}
sttyArgvEOn []string = []string{"stty", "echo"}
ws syscall.WaitStatus = 0
)
// getPassword gets input hidden from the terminal from a user. This is
// accomplished by turning off terminal echo, reading input from the user and
// finally turning on terminal echo.
func getPassword() (password string, err error) {
sig := make(chan os.Signal, 10)
brk := make(chan bool)
// File descriptors for stdin, stdout, and stderr.
fd := []uintptr{os.Stdin.Fd(), os.Stdout.Fd(), os.Stderr.Fd()}
// Setup notifications of termination signals to channel sig, create a process to
// watch for these signals so we can turn back on echo if need be.
signal.Notify(sig, syscall.SIGHUP, syscall.SIGINT, syscall.SIGKILL, syscall.SIGQUIT,
syscall.SIGTERM)
go catchSignal(fd, sig, brk)
// Turn off the terminal echo.
pid, err := echoOff(fd)
if err != nil {
return "", err
}
// Turn on the terminal echo and stop listening for signals.
defer close(brk)
defer echoOn(fd)
syscall.Wait4(pid, &ws, 0, nil)
line, err := readline()
if err == nil {
password = strings.TrimSpace(line)
} else {
err = fmt.Errorf("failed during password entry: %s", err)
}
return password, err
}
// echoOff turns off the terminal echo.
func echoOff(fd []uintptr) (int, error) {
pid, err := syscall.ForkExec(sttyArg0, sttyArgvEOff, &syscall.ProcAttr{Dir: "", Files: fd})
if err != nil {
return 0, fmt.Errorf("failed turning off console echo for password entry:\n\t%s", err)
}
return pid, nil
}
// echoOn turns back on the terminal echo.
func echoOn(fd []uintptr) {
// Turn on the terminal echo.
pid, e := syscall.ForkExec(sttyArg0, sttyArgvEOn, &syscall.ProcAttr{Dir: "", Files: fd})
if e == nil {
syscall.Wait4(pid, &ws, 0, nil)
}
}
// catchSignal tries to catch SIGKILL, SIGQUIT and SIGINT so that we can turn
// terminal echo back on before the program ends. Otherwise the user is left
// with echo off on their terminal.
func catchSignal(fd []uintptr, sig chan os.Signal, brk chan bool) {
select {
case <-sig:
echoOn(fd)
os.Exit(-1)
case <-brk:
}
}

View File

@ -0,0 +1,43 @@
// +build windows
package speakeasy
import (
"os"
"syscall"
)
// SetConsoleMode function can be used to change value of ENABLE_ECHO_INPUT:
// http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx
const ENABLE_ECHO_INPUT = 0x0004
func getPassword() (password string, err error) {
hStdin := syscall.Handle(os.Stdin.Fd())
var oldMode uint32
err = syscall.GetConsoleMode(hStdin, &oldMode)
if err != nil {
return
}
var newMode uint32 = (oldMode &^ ENABLE_ECHO_INPUT)
err = setConsoleMode(hStdin, newMode)
defer setConsoleMode(hStdin, oldMode)
if err != nil {
return
}
return readline()
}
func setConsoleMode(console syscall.Handle, mode uint32) (err error) {
dll := syscall.MustLoadDLL("kernel32")
proc := dll.MustFindProc("SetConsoleMode")
r, _, err := proc.Call(uintptr(console), uintptr(mode))
if r == 0 {
return err
}
return nil
}

View File

@ -306,12 +306,16 @@ func (c *Client) GetCluster() []string {
}
// SyncCluster updates the cluster information using the internal machine list.
// If no members are found, the intenral machine list is left untouched.
func (c *Client) SyncCluster() bool {
return c.internalSyncCluster(c.cluster.Machines)
}
// internalSyncCluster syncs cluster information using the given machine list.
func (c *Client) internalSyncCluster(machines []string) bool {
// comma-separated list of machines in the cluster.
members := ""
for _, machine := range machines {
httpPath := c.createHttpPath(machine, path.Join(version, "members"))
resp, err := c.httpClient.Get(httpPath)
@ -333,8 +337,7 @@ func (c *Client) internalSyncCluster(machines []string) bool {
// try another machine in the cluster
continue
}
// update Machines List
c.cluster.updateFromStr(string(b))
members = string(b)
} else {
b, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
@ -354,10 +357,16 @@ func (c *Client) internalSyncCluster(machines []string) bool {
urls = append(urls, m.ClientURLs...)
}
// update Machines List
c.cluster.updateFromStr(strings.Join(urls, ","))
members = strings.Join(urls, ",")
}
// We should never do an empty cluster update.
if members == "" {
continue
}
// update Machines List
c.cluster.updateFromStr(members)
logger.Debug("sync.machines ", c.cluster.Machines)
c.saveConfig()
return true

View File

@ -0,0 +1 @@
{"config":{"certFile":"","keyFile":"","caCertFiles":null,"timeout":1000000000,"consistency":"STRONG"},"cluster":{"leader":"http://127.0.0.1:4001","machines":["http://127.0.0.1:4001"]}}

File diff suppressed because it is too large Load Diff

View File

@ -1,10 +1,13 @@
package etcd
//go:generate codecgen -o response.generated.go response.go
import (
"encoding/json"
"net/http"
"strconv"
"time"
"github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec"
)
const (
@ -28,6 +31,7 @@ var (
http.StatusNotFound: true,
http.StatusPreconditionFailed: true,
http.StatusForbidden: true,
http.StatusUnauthorized: true,
}
)
@ -39,7 +43,7 @@ func (rr *RawResponse) Unmarshal() (*Response, error) {
resp := new(Response)
err := json.Unmarshal(rr.Body, resp)
err := codec.NewDecoderBytes(rr.Body, new(codec.JsonHandle)).Decode(resp)
if err != nil {
return nil, err

View File

@ -0,0 +1,75 @@
package etcd
import (
"net/http"
"reflect"
"strings"
"testing"
"github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec"
)
func createTestNode(size int) *Node {
return &Node{
Key: strings.Repeat("a", 30),
Value: strings.Repeat("a", size),
TTL: 123456789,
ModifiedIndex: 123456,
CreatedIndex: 123456,
}
}
func createTestNodeWithChildren(children, size int) *Node {
node := createTestNode(size)
for i := 0; i < children; i++ {
node.Nodes = append(node.Nodes, createTestNode(size))
}
return node
}
func createTestResponse(children, size int) *Response {
return &Response{
Action: "aaaaa",
Node: createTestNodeWithChildren(children, size),
PrevNode: nil,
EtcdIndex: 123456,
RaftIndex: 123456,
RaftTerm: 123456,
}
}
func benchmarkResponseUnmarshalling(b *testing.B, children, size int) {
response := createTestResponse(children, size)
rr := RawResponse{http.StatusOK, make([]byte, 0), http.Header{}}
codec.NewEncoderBytes(&rr.Body, new(codec.JsonHandle)).Encode(response)
b.ResetTimer()
newResponse := new(Response)
var err error
for i := 0; i < b.N; i++ {
if newResponse, err = rr.Unmarshal(); err != nil {
b.Errorf("Error: %v", err)
}
}
if !reflect.DeepEqual(response.Node, newResponse.Node) {
b.Errorf("Unexpected difference in a parsed response: \n%+v\n%+v", response, newResponse)
}
}
func BenchmarkSmallResponseUnmarshal(b *testing.B) {
benchmarkResponseUnmarshalling(b, 30, 20)
}
func BenchmarkManySmallResponseUnmarshal(b *testing.B) {
benchmarkResponseUnmarshalling(b, 3000, 20)
}
func BenchmarkMediumResponseUnmarshal(b *testing.B) {
benchmarkResponseUnmarshalling(b, 300, 200)
}
func BenchmarkLargeResponseUnmarshal(b *testing.B) {
benchmarkResponseUnmarshalling(b, 3000, 2000)
}

View File

@ -5,7 +5,7 @@ import (
oldlog "log"
"os"
"github.com/coreos/pkg/capnslog"
"github.com/coreos/etcd/Godeps/_workspace/src/github.com/coreos/pkg/capnslog"
)
var logLevel = capnslog.INFO

View File

@ -0,0 +1,150 @@
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a BSD-style license found in the LICENSE file.
/*
High Performance, Feature-Rich Idiomatic Go codec/encoding library for
binc, msgpack, cbor, json.
Supported Serialization formats are:
- msgpack: https://github.com/msgpack/msgpack
- binc: http://github.com/ugorji/binc
- cbor: http://cbor.io http://tools.ietf.org/html/rfc7049
- json: http://json.org http://tools.ietf.org/html/rfc7159
- simple:
To install:
go get github.com/ugorji/go/codec
This package understands the 'unsafe' tag, to allow using unsafe semantics:
- When decoding into a struct, you need to read the field name as a string
so you can find the struct field it is mapped to.
Using `unsafe` will bypass the allocation and copying overhead of []byte->string conversion.
To install using unsafe, pass the 'unsafe' tag:
go get -tags=unsafe github.com/ugorji/go/codec
For detailed usage information, read the primer at http://ugorji.net/blog/go-codec-primer .
The idiomatic Go support is as seen in other encoding packages in
the standard library (ie json, xml, gob, etc).
Rich Feature Set includes:
- Simple but extremely powerful and feature-rich API
- Very High Performance.
Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X.
- Multiple conversions:
Package coerces types where appropriate
e.g. decode an int in the stream into a float, etc.
- Corner Cases:
Overflows, nil maps/slices, nil values in streams are handled correctly
- Standard field renaming via tags
- Support for omitting empty fields during an encoding
- Encoding from any value and decoding into pointer to any value
(struct, slice, map, primitives, pointers, interface{}, etc)
- Extensions to support efficient encoding/decoding of any named types
- Support encoding.(Binary|Text)(M|Unm)arshaler interfaces
- Decoding without a schema (into a interface{}).
Includes Options to configure what specific map or slice type to use
when decoding an encoded list or map into a nil interface{}
- Encode a struct as an array, and decode struct from an array in the data stream
- Comprehensive support for anonymous fields
- Fast (no-reflection) encoding/decoding of common maps and slices
- Code-generation for faster performance.
- Support binary (e.g. messagepack, cbor) and text (e.g. json) formats
- Support indefinite-length formats to enable true streaming
(for formats which support it e.g. json, cbor)
- Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes.
This mostly applies to maps, where iteration order is non-deterministic.
- NIL in data stream decoded as zero value
- Never silently skip data when decoding.
User decides whether to return an error or silently skip data when keys or indexes
in the data stream do not map to fields in the struct.
- Encode/Decode from/to chan types (for iterative streaming support)
- Drop-in replacement for encoding/json. `json:` key in struct tag supported.
- Provides a RPC Server and Client Codec for net/rpc communication protocol.
- Handle unique idiosynchracies of codecs e.g.
- For messagepack, configure how ambiguities in handling raw bytes are resolved
- For messagepack, provide rpc server/client codec to support
msgpack-rpc protocol defined at:
https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
Extension Support
Users can register a function to handle the encoding or decoding of
their custom types.
There are no restrictions on what the custom type can be. Some examples:
type BisSet []int
type BitSet64 uint64
type UUID string
type MyStructWithUnexportedFields struct { a int; b bool; c []int; }
type GifImage struct { ... }
As an illustration, MyStructWithUnexportedFields would normally be
encoded as an empty map because it has no exported fields, while UUID
would be encoded as a string. However, with extension support, you can
encode any of these however you like.
RPC
RPC Client and Server Codecs are implemented, so the codecs can be used
with the standard net/rpc package.
Usage
Typical usage model:
// create and configure Handle
var (
bh codec.BincHandle
mh codec.MsgpackHandle
ch codec.CborHandle
)
mh.MapType = reflect.TypeOf(map[string]interface{}(nil))
// configure extensions
// e.g. for msgpack, define functions and enable Time support for tag 1
// mh.SetExt(reflect.TypeOf(time.Time{}), 1, myExt)
// create and use decoder/encoder
var (
r io.Reader
w io.Writer
b []byte
h = &bh // or mh to use msgpack
)
dec = codec.NewDecoder(r, h)
dec = codec.NewDecoderBytes(b, h)
err = dec.Decode(&v)
enc = codec.NewEncoder(w, h)
enc = codec.NewEncoderBytes(&b, h)
err = enc.Encode(v)
//RPC Server
go func() {
for {
conn, err := listener.Accept()
rpcCodec := codec.GoRpc.ServerCodec(conn, h)
//OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h)
rpc.ServeCodec(rpcCodec)
}
}()
//RPC Communication (client side)
conn, err = net.Dial("tcp", "localhost:5555")
rpcCodec := codec.GoRpc.ClientCodec(conn, h)
//OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h)
client := rpc.NewClientWithCodec(rpcCodec)
*/
package codec

View File

@ -0,0 +1,148 @@
# Codec
High Performance, Feature-Rich Idiomatic Go codec/encoding library for
binc, msgpack, cbor, json.
Supported Serialization formats are:
- msgpack: https://github.com/msgpack/msgpack
- binc: http://github.com/ugorji/binc
- cbor: http://cbor.io http://tools.ietf.org/html/rfc7049
- json: http://json.org http://tools.ietf.org/html/rfc7159
- simple:
To install:
go get github.com/ugorji/go/codec
This package understands the `unsafe` tag, to allow using unsafe semantics:
- When decoding into a struct, you need to read the field name as a string
so you can find the struct field it is mapped to.
Using `unsafe` will bypass the allocation and copying overhead of `[]byte->string` conversion.
To use it, you must pass the `unsafe` tag during install:
```
go install -tags=unsafe github.com/ugorji/go/codec
```
Online documentation: http://godoc.org/github.com/ugorji/go/codec
Detailed Usage/How-to Primer: http://ugorji.net/blog/go-codec-primer
The idiomatic Go support is as seen in other encoding packages in
the standard library (ie json, xml, gob, etc).
Rich Feature Set includes:
- Simple but extremely powerful and feature-rich API
- Very High Performance.
Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X.
- Multiple conversions:
Package coerces types where appropriate
e.g. decode an int in the stream into a float, etc.
- Corner Cases:
Overflows, nil maps/slices, nil values in streams are handled correctly
- Standard field renaming via tags
- Support for omitting empty fields during an encoding
- Encoding from any value and decoding into pointer to any value
(struct, slice, map, primitives, pointers, interface{}, etc)
- Extensions to support efficient encoding/decoding of any named types
- Support encoding.(Binary|Text)(M|Unm)arshaler interfaces
- Decoding without a schema (into a interface{}).
Includes Options to configure what specific map or slice type to use
when decoding an encoded list or map into a nil interface{}
- Encode a struct as an array, and decode struct from an array in the data stream
- Comprehensive support for anonymous fields
- Fast (no-reflection) encoding/decoding of common maps and slices
- Code-generation for faster performance.
- Support binary (e.g. messagepack, cbor) and text (e.g. json) formats
- Support indefinite-length formats to enable true streaming
(for formats which support it e.g. json, cbor)
- Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes.
This mostly applies to maps, where iteration order is non-deterministic.
- NIL in data stream decoded as zero value
- Never silently skip data when decoding.
User decides whether to return an error or silently skip data when keys or indexes
in the data stream do not map to fields in the struct.
- Encode/Decode from/to chan types (for iterative streaming support)
- Drop-in replacement for encoding/json. `json:` key in struct tag supported.
- Provides a RPC Server and Client Codec for net/rpc communication protocol.
- Handle unique idiosynchracies of codecs e.g.
- For messagepack, configure how ambiguities in handling raw bytes are resolved
- For messagepack, provide rpc server/client codec to support
msgpack-rpc protocol defined at:
https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
## Extension Support
Users can register a function to handle the encoding or decoding of
their custom types.
There are no restrictions on what the custom type can be. Some examples:
type BisSet []int
type BitSet64 uint64
type UUID string
type MyStructWithUnexportedFields struct { a int; b bool; c []int; }
type GifImage struct { ... }
As an illustration, MyStructWithUnexportedFields would normally be
encoded as an empty map because it has no exported fields, while UUID
would be encoded as a string. However, with extension support, you can
encode any of these however you like.
## RPC
RPC Client and Server Codecs are implemented, so the codecs can be used
with the standard net/rpc package.
## Usage
Typical usage model:
// create and configure Handle
var (
bh codec.BincHandle
mh codec.MsgpackHandle
ch codec.CborHandle
)
mh.MapType = reflect.TypeOf(map[string]interface{}(nil))
// configure extensions
// e.g. for msgpack, define functions and enable Time support for tag 1
// mh.SetExt(reflect.TypeOf(time.Time{}), 1, myExt)
// create and use decoder/encoder
var (
r io.Reader
w io.Writer
b []byte
h = &bh // or mh to use msgpack
)
dec = codec.NewDecoder(r, h)
dec = codec.NewDecoderBytes(b, h)
err = dec.Decode(&v)
enc = codec.NewEncoder(w, h)
enc = codec.NewEncoderBytes(&b, h)
err = enc.Encode(v)
//RPC Server
go func() {
for {
conn, err := listener.Accept()
rpcCodec := codec.GoRpc.ServerCodec(conn, h)
//OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h)
rpc.ServeCodec(rpcCodec)
}
}()
//RPC Communication (client side)
conn, err = net.Dial("tcp", "localhost:5555")
rpcCodec := codec.GoRpc.ClientCodec(conn, h)
//OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h)
client := rpc.NewClientWithCodec(rpcCodec)

View File

@ -0,0 +1,901 @@
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a BSD-style license found in the LICENSE file.
package codec
import (
"math"
"time"
)
const bincDoPrune = true // No longer needed. Needed before as C lib did not support pruning.
// vd as low 4 bits (there are 16 slots)
const (
bincVdSpecial byte = iota
bincVdPosInt
bincVdNegInt
bincVdFloat
bincVdString
bincVdByteArray
bincVdArray
bincVdMap
bincVdTimestamp
bincVdSmallInt
bincVdUnicodeOther
bincVdSymbol
bincVdDecimal
_ // open slot
_ // open slot
bincVdCustomExt = 0x0f
)
const (
bincSpNil byte = iota
bincSpFalse
bincSpTrue
bincSpNan
bincSpPosInf
bincSpNegInf
bincSpZeroFloat
bincSpZero
bincSpNegOne
)
const (
bincFlBin16 byte = iota
bincFlBin32
_ // bincFlBin32e
bincFlBin64
_ // bincFlBin64e
// others not currently supported
)
type bincEncDriver struct {
e *Encoder
w encWriter
m map[string]uint16 // symbols
s uint16 // symbols sequencer
b [scratchByteArrayLen]byte
encNoSeparator
}
func (e *bincEncDriver) IsBuiltinType(rt uintptr) bool {
return rt == timeTypId
}
func (e *bincEncDriver) EncodeBuiltin(rt uintptr, v interface{}) {
if rt == timeTypId {
bs := encodeTime(v.(time.Time))
e.w.writen1(bincVdTimestamp<<4 | uint8(len(bs)))
e.w.writeb(bs)
}
}
func (e *bincEncDriver) EncodeNil() {
e.w.writen1(bincVdSpecial<<4 | bincSpNil)
}
func (e *bincEncDriver) EncodeBool(b bool) {
if b {
e.w.writen1(bincVdSpecial<<4 | bincSpTrue)
} else {
e.w.writen1(bincVdSpecial<<4 | bincSpFalse)
}
}
func (e *bincEncDriver) EncodeFloat32(f float32) {
if f == 0 {
e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat)
return
}
e.w.writen1(bincVdFloat<<4 | bincFlBin32)
bigenHelper{e.b[:4], e.w}.writeUint32(math.Float32bits(f))
}
func (e *bincEncDriver) EncodeFloat64(f float64) {
if f == 0 {
e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat)
return
}
bigen.PutUint64(e.b[:8], math.Float64bits(f))
if bincDoPrune {
i := 7
for ; i >= 0 && (e.b[i] == 0); i-- {
}
i++
if i <= 6 {
e.w.writen1(bincVdFloat<<4 | 0x8 | bincFlBin64)
e.w.writen1(byte(i))
e.w.writeb(e.b[:i])
return
}
}
e.w.writen1(bincVdFloat<<4 | bincFlBin64)
e.w.writeb(e.b[:8])
}
func (e *bincEncDriver) encIntegerPrune(bd byte, pos bool, v uint64, lim uint8) {
if lim == 4 {
bigen.PutUint32(e.b[:lim], uint32(v))
} else {
bigen.PutUint64(e.b[:lim], v)
}
if bincDoPrune {
i := pruneSignExt(e.b[:lim], pos)
e.w.writen1(bd | lim - 1 - byte(i))
e.w.writeb(e.b[i:lim])
} else {
e.w.writen1(bd | lim - 1)
e.w.writeb(e.b[:lim])
}
}
func (e *bincEncDriver) EncodeInt(v int64) {
const nbd byte = bincVdNegInt << 4
if v >= 0 {
e.encUint(bincVdPosInt<<4, true, uint64(v))
} else if v == -1 {
e.w.writen1(bincVdSpecial<<4 | bincSpNegOne)
} else {
e.encUint(bincVdNegInt<<4, false, uint64(-v))
}
}
func (e *bincEncDriver) EncodeUint(v uint64) {
e.encUint(bincVdPosInt<<4, true, v)
}
func (e *bincEncDriver) encUint(bd byte, pos bool, v uint64) {
if v == 0 {
e.w.writen1(bincVdSpecial<<4 | bincSpZero)
} else if pos && v >= 1 && v <= 16 {
e.w.writen1(bincVdSmallInt<<4 | byte(v-1))
} else if v <= math.MaxUint8 {
e.w.writen2(bd|0x0, byte(v))
} else if v <= math.MaxUint16 {
e.w.writen1(bd | 0x01)
bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v))
} else if v <= math.MaxUint32 {
e.encIntegerPrune(bd, pos, v, 4)
} else {
e.encIntegerPrune(bd, pos, v, 8)
}
}
func (e *bincEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, _ *Encoder) {
bs := ext.WriteExt(rv)
if bs == nil {
e.EncodeNil()
return
}
e.encodeExtPreamble(uint8(xtag), len(bs))
e.w.writeb(bs)
}
func (e *bincEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) {
e.encodeExtPreamble(uint8(re.Tag), len(re.Data))
e.w.writeb(re.Data)
}
func (e *bincEncDriver) encodeExtPreamble(xtag byte, length int) {
e.encLen(bincVdCustomExt<<4, uint64(length))
e.w.writen1(xtag)
}
func (e *bincEncDriver) EncodeArrayStart(length int) {
e.encLen(bincVdArray<<4, uint64(length))
}
func (e *bincEncDriver) EncodeMapStart(length int) {
e.encLen(bincVdMap<<4, uint64(length))
}
func (e *bincEncDriver) EncodeString(c charEncoding, v string) {
l := uint64(len(v))
e.encBytesLen(c, l)
if l > 0 {
e.w.writestr(v)
}
}
func (e *bincEncDriver) EncodeSymbol(v string) {
// if WriteSymbolsNoRefs {
// e.encodeString(c_UTF8, v)
// return
// }
//symbols only offer benefit when string length > 1.
//This is because strings with length 1 take only 2 bytes to store
//(bd with embedded length, and single byte for string val).
l := len(v)
if l == 0 {
e.encBytesLen(c_UTF8, 0)
return
} else if l == 1 {
e.encBytesLen(c_UTF8, 1)
e.w.writen1(v[0])
return
}
if e.m == nil {
e.m = make(map[string]uint16, 16)
}
ui, ok := e.m[v]
if ok {
if ui <= math.MaxUint8 {
e.w.writen2(bincVdSymbol<<4, byte(ui))
} else {
e.w.writen1(bincVdSymbol<<4 | 0x8)
bigenHelper{e.b[:2], e.w}.writeUint16(ui)
}
} else {
e.s++
ui = e.s
//ui = uint16(atomic.AddUint32(&e.s, 1))
e.m[v] = ui
var lenprec uint8
if l <= math.MaxUint8 {
// lenprec = 0
} else if l <= math.MaxUint16 {
lenprec = 1
} else if int64(l) <= math.MaxUint32 {
lenprec = 2
} else {
lenprec = 3
}
if ui <= math.MaxUint8 {
e.w.writen2(bincVdSymbol<<4|0x0|0x4|lenprec, byte(ui))
} else {
e.w.writen1(bincVdSymbol<<4 | 0x8 | 0x4 | lenprec)
bigenHelper{e.b[:2], e.w}.writeUint16(ui)
}
if lenprec == 0 {
e.w.writen1(byte(l))
} else if lenprec == 1 {
bigenHelper{e.b[:2], e.w}.writeUint16(uint16(l))
} else if lenprec == 2 {
bigenHelper{e.b[:4], e.w}.writeUint32(uint32(l))
} else {
bigenHelper{e.b[:8], e.w}.writeUint64(uint64(l))
}
e.w.writestr(v)
}
}
func (e *bincEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
l := uint64(len(v))
e.encBytesLen(c, l)
if l > 0 {
e.w.writeb(v)
}
}
func (e *bincEncDriver) encBytesLen(c charEncoding, length uint64) {
//TODO: support bincUnicodeOther (for now, just use string or bytearray)
if c == c_RAW {
e.encLen(bincVdByteArray<<4, length)
} else {
e.encLen(bincVdString<<4, length)
}
}
func (e *bincEncDriver) encLen(bd byte, l uint64) {
if l < 12 {
e.w.writen1(bd | uint8(l+4))
} else {
e.encLenNumber(bd, l)
}
}
func (e *bincEncDriver) encLenNumber(bd byte, v uint64) {
if v <= math.MaxUint8 {
e.w.writen2(bd, byte(v))
} else if v <= math.MaxUint16 {
e.w.writen1(bd | 0x01)
bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v))
} else if v <= math.MaxUint32 {
e.w.writen1(bd | 0x02)
bigenHelper{e.b[:4], e.w}.writeUint32(uint32(v))
} else {
e.w.writen1(bd | 0x03)
bigenHelper{e.b[:8], e.w}.writeUint64(uint64(v))
}
}
//------------------------------------
type bincDecSymbol struct {
i uint16
s string
b []byte
}
type bincDecDriver struct {
d *Decoder
h *BincHandle
r decReader
br bool // bytes reader
bdRead bool
bdType valueType
bd byte
vd byte
vs byte
noStreamingCodec
decNoSeparator
b [scratchByteArrayLen]byte
// linear searching on this slice is ok,
// because we typically expect < 32 symbols in each stream.
s []bincDecSymbol
}
func (d *bincDecDriver) readNextBd() {
d.bd = d.r.readn1()
d.vd = d.bd >> 4
d.vs = d.bd & 0x0f
d.bdRead = true
d.bdType = valueTypeUnset
}
func (d *bincDecDriver) IsContainerType(vt valueType) (b bool) {
switch vt {
case valueTypeNil:
return d.vd == bincVdSpecial && d.vs == bincSpNil
case valueTypeBytes:
return d.vd == bincVdByteArray
case valueTypeString:
return d.vd == bincVdString
case valueTypeArray:
return d.vd == bincVdArray
case valueTypeMap:
return d.vd == bincVdMap
}
d.d.errorf("isContainerType: unsupported parameter: %v", vt)
return // "unreachable"
}
func (d *bincDecDriver) TryDecodeAsNil() bool {
if !d.bdRead {
d.readNextBd()
}
if d.bd == bincVdSpecial<<4|bincSpNil {
d.bdRead = false
return true
}
return false
}
func (d *bincDecDriver) IsBuiltinType(rt uintptr) bool {
return rt == timeTypId
}
func (d *bincDecDriver) DecodeBuiltin(rt uintptr, v interface{}) {
if !d.bdRead {
d.readNextBd()
}
if rt == timeTypId {
if d.vd != bincVdTimestamp {
d.d.errorf("Invalid d.vd. Expecting 0x%x. Received: 0x%x", bincVdTimestamp, d.vd)
return
}
tt, err := decodeTime(d.r.readx(int(d.vs)))
if err != nil {
panic(err)
}
var vt *time.Time = v.(*time.Time)
*vt = tt
d.bdRead = false
}
}
func (d *bincDecDriver) decFloatPre(vs, defaultLen byte) {
if vs&0x8 == 0 {
d.r.readb(d.b[0:defaultLen])
} else {
l := d.r.readn1()
if l > 8 {
d.d.errorf("At most 8 bytes used to represent float. Received: %v bytes", l)
return
}
for i := l; i < 8; i++ {
d.b[i] = 0
}
d.r.readb(d.b[0:l])
}
}
func (d *bincDecDriver) decFloat() (f float64) {
//if true { f = math.Float64frombits(bigen.Uint64(d.r.readx(8))); break; }
if x := d.vs & 0x7; x == bincFlBin32 {
d.decFloatPre(d.vs, 4)
f = float64(math.Float32frombits(bigen.Uint32(d.b[0:4])))
} else if x == bincFlBin64 {
d.decFloatPre(d.vs, 8)
f = math.Float64frombits(bigen.Uint64(d.b[0:8]))
} else {
d.d.errorf("only float32 and float64 are supported. d.vd: 0x%x, d.vs: 0x%x", d.vd, d.vs)
return
}
return
}
func (d *bincDecDriver) decUint() (v uint64) {
// need to inline the code (interface conversion and type assertion expensive)
switch d.vs {
case 0:
v = uint64(d.r.readn1())
case 1:
d.r.readb(d.b[6:8])
v = uint64(bigen.Uint16(d.b[6:8]))
case 2:
d.b[4] = 0
d.r.readb(d.b[5:8])
v = uint64(bigen.Uint32(d.b[4:8]))
case 3:
d.r.readb(d.b[4:8])
v = uint64(bigen.Uint32(d.b[4:8]))
case 4, 5, 6:
lim := int(7 - d.vs)
d.r.readb(d.b[lim:8])
for i := 0; i < lim; i++ {
d.b[i] = 0
}
v = uint64(bigen.Uint64(d.b[:8]))
case 7:
d.r.readb(d.b[:8])
v = uint64(bigen.Uint64(d.b[:8]))
default:
d.d.errorf("unsigned integers with greater than 64 bits of precision not supported")
return
}
return
}
func (d *bincDecDriver) decCheckInteger() (ui uint64, neg bool) {
if !d.bdRead {
d.readNextBd()
}
vd, vs := d.vd, d.vs
if vd == bincVdPosInt {
ui = d.decUint()
} else if vd == bincVdNegInt {
ui = d.decUint()
neg = true
} else if vd == bincVdSmallInt {
ui = uint64(d.vs) + 1
} else if vd == bincVdSpecial {
if vs == bincSpZero {
//i = 0
} else if vs == bincSpNegOne {
neg = true
ui = 1
} else {
d.d.errorf("numeric decode fails for special value: d.vs: 0x%x", d.vs)
return
}
} else {
d.d.errorf("number can only be decoded from uint or int values. d.bd: 0x%x, d.vd: 0x%x", d.bd, d.vd)
return
}
return
}
func (d *bincDecDriver) DecodeInt(bitsize uint8) (i int64) {
ui, neg := d.decCheckInteger()
i, overflow := chkOvf.SignedInt(ui)
if overflow {
d.d.errorf("simple: overflow converting %v to signed integer", ui)
return
}
if neg {
i = -i
}
if chkOvf.Int(i, bitsize) {
d.d.errorf("binc: overflow integer: %v", i)
return
}
d.bdRead = false
return
}
func (d *bincDecDriver) DecodeUint(bitsize uint8) (ui uint64) {
ui, neg := d.decCheckInteger()
if neg {
d.d.errorf("Assigning negative signed value to unsigned type")
return
}
if chkOvf.Uint(ui, bitsize) {
d.d.errorf("binc: overflow integer: %v", ui)
return
}
d.bdRead = false
return
}
func (d *bincDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) {
if !d.bdRead {
d.readNextBd()
}
vd, vs := d.vd, d.vs
if vd == bincVdSpecial {
d.bdRead = false
if vs == bincSpNan {
return math.NaN()
} else if vs == bincSpPosInf {
return math.Inf(1)
} else if vs == bincSpZeroFloat || vs == bincSpZero {
return
} else if vs == bincSpNegInf {
return math.Inf(-1)
} else {
d.d.errorf("Invalid d.vs decoding float where d.vd=bincVdSpecial: %v", d.vs)
return
}
} else if vd == bincVdFloat {
f = d.decFloat()
} else {
f = float64(d.DecodeInt(64))
}
if chkOverflow32 && chkOvf.Float32(f) {
d.d.errorf("binc: float32 overflow: %v", f)
return
}
d.bdRead = false
return
}
// bool can be decoded from bool only (single byte).
func (d *bincDecDriver) DecodeBool() (b bool) {
if !d.bdRead {
d.readNextBd()
}
if bd := d.bd; bd == (bincVdSpecial | bincSpFalse) {
// b = false
} else if bd == (bincVdSpecial | bincSpTrue) {
b = true
} else {
d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd)
return
}
d.bdRead = false
return
}
func (d *bincDecDriver) ReadMapStart() (length int) {
if d.vd != bincVdMap {
d.d.errorf("Invalid d.vd for map. Expecting 0x%x. Got: 0x%x", bincVdMap, d.vd)
return
}
length = d.decLen()
d.bdRead = false
return
}
func (d *bincDecDriver) ReadArrayStart() (length int) {
if d.vd != bincVdArray {
d.d.errorf("Invalid d.vd for array. Expecting 0x%x. Got: 0x%x", bincVdArray, d.vd)
return
}
length = d.decLen()
d.bdRead = false
return
}
func (d *bincDecDriver) decLen() int {
if d.vs > 3 {
return int(d.vs - 4)
}
return int(d.decLenNumber())
}
func (d *bincDecDriver) decLenNumber() (v uint64) {
if x := d.vs; x == 0 {
v = uint64(d.r.readn1())
} else if x == 1 {
d.r.readb(d.b[6:8])
v = uint64(bigen.Uint16(d.b[6:8]))
} else if x == 2 {
d.r.readb(d.b[4:8])
v = uint64(bigen.Uint32(d.b[4:8]))
} else {
d.r.readb(d.b[:8])
v = bigen.Uint64(d.b[:8])
}
return
}
func (d *bincDecDriver) decStringAndBytes(bs []byte, withString, zerocopy bool) (bs2 []byte, s string) {
if !d.bdRead {
d.readNextBd()
}
if d.bd == bincVdSpecial<<4|bincSpNil {
d.bdRead = false
return
}
var slen int = -1
// var ok bool
switch d.vd {
case bincVdString, bincVdByteArray:
slen = d.decLen()
if zerocopy {
if d.br {
bs2 = d.r.readx(slen)
} else if len(bs) == 0 {
bs2 = decByteSlice(d.r, slen, d.b[:])
} else {
bs2 = decByteSlice(d.r, slen, bs)
}
} else {
bs2 = decByteSlice(d.r, slen, bs)
}
if withString {
s = string(bs2)
}
case bincVdSymbol:
// zerocopy doesn't apply for symbols,
// as the values must be stored in a table for later use.
//
//from vs: extract numSymbolBytes, containsStringVal, strLenPrecision,
//extract symbol
//if containsStringVal, read it and put in map
//else look in map for string value
var symbol uint16
vs := d.vs
if vs&0x8 == 0 {
symbol = uint16(d.r.readn1())
} else {
symbol = uint16(bigen.Uint16(d.r.readx(2)))
}
if d.s == nil {
d.s = make([]bincDecSymbol, 0, 16)
}
if vs&0x4 == 0 {
for i := range d.s {
j := &d.s[i]
if j.i == symbol {
bs2 = j.b
if withString {
if j.s == "" && bs2 != nil {
j.s = string(bs2)
}
s = j.s
}
break
}
}
} else {
switch vs & 0x3 {
case 0:
slen = int(d.r.readn1())
case 1:
slen = int(bigen.Uint16(d.r.readx(2)))
case 2:
slen = int(bigen.Uint32(d.r.readx(4)))
case 3:
slen = int(bigen.Uint64(d.r.readx(8)))
}
// since using symbols, do not store any part of
// the parameter bs in the map, as it might be a shared buffer.
// bs2 = decByteSlice(d.r, slen, bs)
bs2 = decByteSlice(d.r, slen, nil)
if withString {
s = string(bs2)
}
d.s = append(d.s, bincDecSymbol{symbol, s, bs2})
}
default:
d.d.errorf("Invalid d.vd. Expecting string:0x%x, bytearray:0x%x or symbol: 0x%x. Got: 0x%x",
bincVdString, bincVdByteArray, bincVdSymbol, d.vd)
return
}
d.bdRead = false
return
}
func (d *bincDecDriver) DecodeString() (s string) {
// DecodeBytes does not accomodate symbols, whose impl stores string version in map.
// Use decStringAndBytes directly.
// return string(d.DecodeBytes(d.b[:], true, true))
_, s = d.decStringAndBytes(d.b[:], true, true)
return
}
func (d *bincDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) {
if isstring {
bsOut, _ = d.decStringAndBytes(bs, false, zerocopy)
return
}
if !d.bdRead {
d.readNextBd()
}
if d.bd == bincVdSpecial<<4|bincSpNil {
d.bdRead = false
return nil
}
var clen int
if d.vd == bincVdString || d.vd == bincVdByteArray {
clen = d.decLen()
} else {
d.d.errorf("Invalid d.vd for bytes. Expecting string:0x%x or bytearray:0x%x. Got: 0x%x",
bincVdString, bincVdByteArray, d.vd)
return
}
d.bdRead = false
if zerocopy {
if d.br {
return d.r.readx(clen)
} else if len(bs) == 0 {
bs = d.b[:]
}
}
return decByteSlice(d.r, clen, bs)
}
func (d *bincDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
if xtag > 0xff {
d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag)
return
}
realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag))
realxtag = uint64(realxtag1)
if ext == nil {
re := rv.(*RawExt)
re.Tag = realxtag
re.Data = detachZeroCopyBytes(d.br, re.Data, xbs)
} else {
ext.ReadExt(rv, xbs)
}
return
}
func (d *bincDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) {
if !d.bdRead {
d.readNextBd()
}
if d.vd == bincVdCustomExt {
l := d.decLen()
xtag = d.r.readn1()
if verifyTag && xtag != tag {
d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", xtag, tag)
return
}
xbs = d.r.readx(l)
} else if d.vd == bincVdByteArray {
xbs = d.DecodeBytes(nil, false, true)
} else {
d.d.errorf("Invalid d.vd for extensions (Expecting extensions or byte array). Got: 0x%x", d.vd)
return
}
d.bdRead = false
return
}
func (d *bincDecDriver) DecodeNaked() (v interface{}, vt valueType, decodeFurther bool) {
if !d.bdRead {
d.readNextBd()
}
switch d.vd {
case bincVdSpecial:
switch d.vs {
case bincSpNil:
vt = valueTypeNil
case bincSpFalse:
vt = valueTypeBool
v = false
case bincSpTrue:
vt = valueTypeBool
v = true
case bincSpNan:
vt = valueTypeFloat
v = math.NaN()
case bincSpPosInf:
vt = valueTypeFloat
v = math.Inf(1)
case bincSpNegInf:
vt = valueTypeFloat
v = math.Inf(-1)
case bincSpZeroFloat:
vt = valueTypeFloat
v = float64(0)
case bincSpZero:
vt = valueTypeUint
v = uint64(0) // int8(0)
case bincSpNegOne:
vt = valueTypeInt
v = int64(-1) // int8(-1)
default:
d.d.errorf("decodeNaked: Unrecognized special value 0x%x", d.vs)
return
}
case bincVdSmallInt:
vt = valueTypeUint
v = uint64(int8(d.vs)) + 1 // int8(d.vs) + 1
case bincVdPosInt:
vt = valueTypeUint
v = d.decUint()
case bincVdNegInt:
vt = valueTypeInt
v = -(int64(d.decUint()))
case bincVdFloat:
vt = valueTypeFloat
v = d.decFloat()
case bincVdSymbol:
vt = valueTypeSymbol
v = d.DecodeString()
case bincVdString:
vt = valueTypeString
v = d.DecodeString()
case bincVdByteArray:
vt = valueTypeBytes
v = d.DecodeBytes(nil, false, false)
case bincVdTimestamp:
vt = valueTypeTimestamp
tt, err := decodeTime(d.r.readx(int(d.vs)))
if err != nil {
panic(err)
}
v = tt
case bincVdCustomExt:
vt = valueTypeExt
l := d.decLen()
var re RawExt
re.Tag = uint64(d.r.readn1())
re.Data = d.r.readx(l)
v = &re
vt = valueTypeExt
case bincVdArray:
vt = valueTypeArray
decodeFurther = true
case bincVdMap:
vt = valueTypeMap
decodeFurther = true
default:
d.d.errorf("decodeNaked: Unrecognized d.vd: 0x%x", d.vd)
return
}
if !decodeFurther {
d.bdRead = false
}
if vt == valueTypeUint && d.h.SignedInteger {
d.bdType = valueTypeInt
v = int64(v.(uint64))
}
return
}
//------------------------------------
//BincHandle is a Handle for the Binc Schema-Free Encoding Format
//defined at https://github.com/ugorji/binc .
//
//BincHandle currently supports all Binc features with the following EXCEPTIONS:
// - only integers up to 64 bits of precision are supported.
// big integers are unsupported.
// - Only IEEE 754 binary32 and binary64 floats are supported (ie Go float32 and float64 types).
// extended precision and decimal IEEE 754 floats are unsupported.
// - Only UTF-8 strings supported.
// Unicode_Other Binc types (UTF16, UTF32) are currently unsupported.
//
//Note that these EXCEPTIONS are temporary and full support is possible and may happen soon.
type BincHandle struct {
BasicHandle
binaryEncodingType
}
func (h *BincHandle) newEncDriver(e *Encoder) encDriver {
return &bincEncDriver{e: e, w: e.w}
}
func (h *BincHandle) newDecDriver(d *Decoder) decDriver {
return &bincDecDriver{d: d, r: d.r, h: h, br: d.bytes}
}
var _ decDriver = (*bincDecDriver)(nil)
var _ encDriver = (*bincEncDriver)(nil)

View File

@ -0,0 +1,566 @@
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a BSD-style license found in the LICENSE file.
package codec
import "math"
const (
cborMajorUint byte = iota
cborMajorNegInt
cborMajorBytes
cborMajorText
cborMajorArray
cborMajorMap
cborMajorTag
cborMajorOther
)
const (
cborBdFalse byte = 0xf4 + iota
cborBdTrue
cborBdNil
cborBdUndefined
cborBdExt
cborBdFloat16
cborBdFloat32
cborBdFloat64
)
const (
cborBdIndefiniteBytes byte = 0x5f
cborBdIndefiniteString = 0x7f
cborBdIndefiniteArray = 0x9f
cborBdIndefiniteMap = 0xbf
cborBdBreak = 0xff
)
const (
CborStreamBytes byte = 0x5f
CborStreamString = 0x7f
CborStreamArray = 0x9f
CborStreamMap = 0xbf
CborStreamBreak = 0xff
)
const (
cborBaseUint byte = 0x00
cborBaseNegInt = 0x20
cborBaseBytes = 0x40
cborBaseString = 0x60
cborBaseArray = 0x80
cborBaseMap = 0xa0
cborBaseTag = 0xc0
cborBaseSimple = 0xe0
)
// -------------------
type cborEncDriver struct {
e *Encoder
w encWriter
h *CborHandle
noBuiltInTypes
encNoSeparator
x [8]byte
}
func (e *cborEncDriver) EncodeNil() {
e.w.writen1(cborBdNil)
}
func (e *cborEncDriver) EncodeBool(b bool) {
if b {
e.w.writen1(cborBdTrue)
} else {
e.w.writen1(cborBdFalse)
}
}
func (e *cborEncDriver) EncodeFloat32(f float32) {
e.w.writen1(cborBdFloat32)
bigenHelper{e.x[:4], e.w}.writeUint32(math.Float32bits(f))
}
func (e *cborEncDriver) EncodeFloat64(f float64) {
e.w.writen1(cborBdFloat64)
bigenHelper{e.x[:8], e.w}.writeUint64(math.Float64bits(f))
}
func (e *cborEncDriver) encUint(v uint64, bd byte) {
if v <= 0x17 {
e.w.writen1(byte(v) + bd)
} else if v <= math.MaxUint8 {
e.w.writen2(bd+0x18, uint8(v))
} else if v <= math.MaxUint16 {
e.w.writen1(bd + 0x19)
bigenHelper{e.x[:2], e.w}.writeUint16(uint16(v))
} else if v <= math.MaxUint32 {
e.w.writen1(bd + 0x1a)
bigenHelper{e.x[:4], e.w}.writeUint32(uint32(v))
} else if v <= math.MaxUint64 {
e.w.writen1(bd + 0x1b)
bigenHelper{e.x[:8], e.w}.writeUint64(v)
}
}
func (e *cborEncDriver) EncodeInt(v int64) {
if v < 0 {
e.encUint(uint64(-1-v), cborBaseNegInt)
} else {
e.encUint(uint64(v), cborBaseUint)
}
}
func (e *cborEncDriver) EncodeUint(v uint64) {
e.encUint(v, cborBaseUint)
}
func (e *cborEncDriver) encLen(bd byte, length int) {
e.encUint(uint64(length), bd)
}
func (e *cborEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) {
e.encUint(uint64(xtag), cborBaseTag)
if v := ext.ConvertExt(rv); v == nil {
e.EncodeNil()
} else {
en.encode(v)
}
}
func (e *cborEncDriver) EncodeRawExt(re *RawExt, en *Encoder) {
e.encUint(uint64(re.Tag), cborBaseTag)
if re.Data != nil {
en.encode(re.Data)
} else if re.Value == nil {
e.EncodeNil()
} else {
en.encode(re.Value)
}
}
func (e *cborEncDriver) EncodeArrayStart(length int) {
e.encLen(cborBaseArray, length)
}
func (e *cborEncDriver) EncodeMapStart(length int) {
e.encLen(cborBaseMap, length)
}
func (e *cborEncDriver) EncodeString(c charEncoding, v string) {
e.encLen(cborBaseString, len(v))
e.w.writestr(v)
}
func (e *cborEncDriver) EncodeSymbol(v string) {
e.EncodeString(c_UTF8, v)
}
func (e *cborEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
e.encLen(cborBaseBytes, len(v))
e.w.writeb(v)
}
// ----------------------
type cborDecDriver struct {
d *Decoder
h *CborHandle
r decReader
br bool // bytes reader
bdRead bool
bdType valueType
bd byte
b [scratchByteArrayLen]byte
noBuiltInTypes
decNoSeparator
}
func (d *cborDecDriver) readNextBd() {
d.bd = d.r.readn1()
d.bdRead = true
d.bdType = valueTypeUnset
}
func (d *cborDecDriver) IsContainerType(vt valueType) (bv bool) {
switch vt {
case valueTypeNil:
return d.bd == cborBdNil
case valueTypeBytes:
return d.bd == cborBdIndefiniteBytes || (d.bd >= cborBaseBytes && d.bd < cborBaseString)
case valueTypeString:
return d.bd == cborBdIndefiniteString || (d.bd >= cborBaseString && d.bd < cborBaseArray)
case valueTypeArray:
return d.bd == cborBdIndefiniteArray || (d.bd >= cborBaseArray && d.bd < cborBaseMap)
case valueTypeMap:
return d.bd == cborBdIndefiniteMap || (d.bd >= cborBaseMap && d.bd < cborBaseTag)
}
d.d.errorf("isContainerType: unsupported parameter: %v", vt)
return // "unreachable"
}
func (d *cborDecDriver) TryDecodeAsNil() bool {
if !d.bdRead {
d.readNextBd()
}
// treat Nil and Undefined as nil values
if d.bd == cborBdNil || d.bd == cborBdUndefined {
d.bdRead = false
return true
}
return false
}
func (d *cborDecDriver) CheckBreak() bool {
if !d.bdRead {
d.readNextBd()
}
if d.bd == cborBdBreak {
d.bdRead = false
return true
}
return false
}
func (d *cborDecDriver) decUint() (ui uint64) {
v := d.bd & 0x1f
if v <= 0x17 {
ui = uint64(v)
} else {
if v == 0x18 {
ui = uint64(d.r.readn1())
} else if v == 0x19 {
ui = uint64(bigen.Uint16(d.r.readx(2)))
} else if v == 0x1a {
ui = uint64(bigen.Uint32(d.r.readx(4)))
} else if v == 0x1b {
ui = uint64(bigen.Uint64(d.r.readx(8)))
} else {
d.d.errorf("decUint: Invalid descriptor: %v", d.bd)
return
}
}
return
}
func (d *cborDecDriver) decCheckInteger() (neg bool) {
if !d.bdRead {
d.readNextBd()
}
major := d.bd >> 5
if major == cborMajorUint {
} else if major == cborMajorNegInt {
neg = true
} else {
d.d.errorf("invalid major: %v (bd: %v)", major, d.bd)
return
}
return
}
func (d *cborDecDriver) DecodeInt(bitsize uint8) (i int64) {
neg := d.decCheckInteger()
ui := d.decUint()
// check if this number can be converted to an int without overflow
var overflow bool
if neg {
if i, overflow = chkOvf.SignedInt(ui + 1); overflow {
d.d.errorf("cbor: overflow converting %v to signed integer", ui+1)
return
}
i = -i
} else {
if i, overflow = chkOvf.SignedInt(ui); overflow {
d.d.errorf("cbor: overflow converting %v to signed integer", ui)
return
}
}
if chkOvf.Int(i, bitsize) {
d.d.errorf("cbor: overflow integer: %v", i)
return
}
d.bdRead = false
return
}
func (d *cborDecDriver) DecodeUint(bitsize uint8) (ui uint64) {
if d.decCheckInteger() {
d.d.errorf("Assigning negative signed value to unsigned type")
return
}
ui = d.decUint()
if chkOvf.Uint(ui, bitsize) {
d.d.errorf("cbor: overflow integer: %v", ui)
return
}
d.bdRead = false
return
}
func (d *cborDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) {
if !d.bdRead {
d.readNextBd()
}
if bd := d.bd; bd == cborBdFloat16 {
f = float64(math.Float32frombits(halfFloatToFloatBits(bigen.Uint16(d.r.readx(2)))))
} else if bd == cborBdFloat32 {
f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4))))
} else if bd == cborBdFloat64 {
f = math.Float64frombits(bigen.Uint64(d.r.readx(8)))
} else if bd >= cborBaseUint && bd < cborBaseBytes {
f = float64(d.DecodeInt(64))
} else {
d.d.errorf("Float only valid from float16/32/64: Invalid descriptor: %v", bd)
return
}
if chkOverflow32 && chkOvf.Float32(f) {
d.d.errorf("cbor: float32 overflow: %v", f)
return
}
d.bdRead = false
return
}
// bool can be decoded from bool only (single byte).
func (d *cborDecDriver) DecodeBool() (b bool) {
if !d.bdRead {
d.readNextBd()
}
if bd := d.bd; bd == cborBdTrue {
b = true
} else if bd == cborBdFalse {
} else {
d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd)
return
}
d.bdRead = false
return
}
func (d *cborDecDriver) ReadMapStart() (length int) {
d.bdRead = false
if d.bd == cborBdIndefiniteMap {
return -1
}
return d.decLen()
}
func (d *cborDecDriver) ReadArrayStart() (length int) {
d.bdRead = false
if d.bd == cborBdIndefiniteArray {
return -1
}
return d.decLen()
}
func (d *cborDecDriver) decLen() int {
return int(d.decUint())
}
func (d *cborDecDriver) decAppendIndefiniteBytes(bs []byte) []byte {
d.bdRead = false
for {
if d.CheckBreak() {
break
}
if major := d.bd >> 5; major != cborMajorBytes && major != cborMajorText {
d.d.errorf("cbor: expect bytes or string major type in indefinite string/bytes; got: %v, byte: %v", major, d.bd)
return nil
}
n := d.decLen()
oldLen := len(bs)
newLen := oldLen + n
if newLen > cap(bs) {
bs2 := make([]byte, newLen, 2*cap(bs)+n)
copy(bs2, bs)
bs = bs2
} else {
bs = bs[:newLen]
}
d.r.readb(bs[oldLen:newLen])
// bs = append(bs, d.r.readn()...)
d.bdRead = false
}
d.bdRead = false
return bs
}
func (d *cborDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) {
if !d.bdRead {
d.readNextBd()
}
if d.bd == cborBdNil || d.bd == cborBdUndefined {
d.bdRead = false
return nil
}
if d.bd == cborBdIndefiniteBytes || d.bd == cborBdIndefiniteString {
if bs == nil {
return d.decAppendIndefiniteBytes(nil)
}
return d.decAppendIndefiniteBytes(bs[:0])
}
clen := d.decLen()
d.bdRead = false
if zerocopy {
if d.br {
return d.r.readx(clen)
} else if len(bs) == 0 {
bs = d.b[:]
}
}
return decByteSlice(d.r, clen, bs)
}
func (d *cborDecDriver) DecodeString() (s string) {
return string(d.DecodeBytes(d.b[:], true, true))
}
func (d *cborDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
if !d.bdRead {
d.readNextBd()
}
u := d.decUint()
d.bdRead = false
realxtag = u
if ext == nil {
re := rv.(*RawExt)
re.Tag = realxtag
d.d.decode(&re.Value)
} else if xtag != realxtag {
d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", realxtag, xtag)
return
} else {
var v interface{}
d.d.decode(&v)
ext.UpdateExt(rv, v)
}
d.bdRead = false
return
}
func (d *cborDecDriver) DecodeNaked() (v interface{}, vt valueType, decodeFurther bool) {
if !d.bdRead {
d.readNextBd()
}
switch d.bd {
case cborBdNil:
vt = valueTypeNil
case cborBdFalse:
vt = valueTypeBool
v = false
case cborBdTrue:
vt = valueTypeBool
v = true
case cborBdFloat16, cborBdFloat32:
vt = valueTypeFloat
v = d.DecodeFloat(true)
case cborBdFloat64:
vt = valueTypeFloat
v = d.DecodeFloat(false)
case cborBdIndefiniteBytes:
vt = valueTypeBytes
v = d.DecodeBytes(nil, false, false)
case cborBdIndefiniteString:
vt = valueTypeString
v = d.DecodeString()
case cborBdIndefiniteArray:
vt = valueTypeArray
decodeFurther = true
case cborBdIndefiniteMap:
vt = valueTypeMap
decodeFurther = true
default:
switch {
case d.bd >= cborBaseUint && d.bd < cborBaseNegInt:
if d.h.SignedInteger {
vt = valueTypeInt
v = d.DecodeInt(64)
} else {
vt = valueTypeUint
v = d.DecodeUint(64)
}
case d.bd >= cborBaseNegInt && d.bd < cborBaseBytes:
vt = valueTypeInt
v = d.DecodeInt(64)
case d.bd >= cborBaseBytes && d.bd < cborBaseString:
vt = valueTypeBytes
v = d.DecodeBytes(nil, false, false)
case d.bd >= cborBaseString && d.bd < cborBaseArray:
vt = valueTypeString
v = d.DecodeString()
case d.bd >= cborBaseArray && d.bd < cborBaseMap:
vt = valueTypeArray
decodeFurther = true
case d.bd >= cborBaseMap && d.bd < cborBaseTag:
vt = valueTypeMap
decodeFurther = true
case d.bd >= cborBaseTag && d.bd < cborBaseSimple:
vt = valueTypeExt
var re RawExt
ui := d.decUint()
d.bdRead = false
re.Tag = ui
d.d.decode(&re.Value)
v = &re
// decodeFurther = true
default:
d.d.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd)
return
}
}
if !decodeFurther {
d.bdRead = false
}
return
}
// -------------------------
// CborHandle is a Handle for the CBOR encoding format,
// defined at http://tools.ietf.org/html/rfc7049 and documented further at http://cbor.io .
//
// CBOR is comprehensively supported, including support for:
// - indefinite-length arrays/maps/bytes/strings
// - (extension) tags in range 0..0xffff (0 .. 65535)
// - half, single and double-precision floats
// - all numbers (1, 2, 4 and 8-byte signed and unsigned integers)
// - nil, true, false, ...
// - arrays and maps, bytes and text strings
//
// None of the optional extensions (with tags) defined in the spec are supported out-of-the-box.
// Users can implement them as needed (using SetExt), including spec-documented ones:
// - timestamp, BigNum, BigFloat, Decimals, Encoded Text (e.g. URL, regexp, base64, MIME Message), etc.
//
// To encode with indefinite lengths (streaming), users will use
// (Must)Encode methods of *Encoder, along with writing CborStreamXXX constants.
//
// For example, to encode "one-byte" as an indefinite length string:
// var buf bytes.Buffer
// e := NewEncoder(&buf, new(CborHandle))
// buf.WriteByte(CborStreamString)
// e.MustEncode("one-")
// e.MustEncode("byte")
// buf.WriteByte(CborStreamBreak)
// encodedBytes := buf.Bytes()
// var vv interface{}
// NewDecoderBytes(buf.Bytes(), new(CborHandle)).MustDecode(&vv)
// // Now, vv contains the same string "one-byte"
//
type CborHandle struct {
BasicHandle
binaryEncodingType
}
func (h *CborHandle) newEncDriver(e *Encoder) encDriver {
return &cborEncDriver{e: e, w: e.w, h: h}
}
func (h *CborHandle) newDecDriver(d *Decoder) decDriver {
return &cborDecDriver{d: d, r: d.r, h: h, br: d.bytes}
}
var _ decDriver = (*cborDecDriver)(nil)
var _ encDriver = (*cborEncDriver)(nil)

View File

@ -0,0 +1,205 @@
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a BSD-style license found in the LICENSE file.
package codec
import (
"bufio"
"bytes"
"encoding/hex"
"math"
"os"
"regexp"
"strings"
"testing"
)
func TestCborIndefiniteLength(t *testing.T) {
oldMapType := testCborH.MapType
defer func() {
testCborH.MapType = oldMapType
}()
testCborH.MapType = testMapStrIntfTyp
// var (
// M1 map[string][]byte
// M2 map[uint64]bool
// L1 []interface{}
// S1 []string
// B1 []byte
// )
var v, vv interface{}
// define it (v), encode it using indefinite lengths, decode it (vv), compare v to vv
v = map[string]interface{}{
"one-byte-key": []byte{1, 2, 3, 4, 5, 6},
"two-string-key": "two-value",
"three-list-key": []interface{}{true, false, uint64(1), int64(-1)},
}
var buf bytes.Buffer
// buf.Reset()
e := NewEncoder(&buf, testCborH)
buf.WriteByte(cborBdIndefiniteMap)
//----
buf.WriteByte(cborBdIndefiniteString)
e.MustEncode("one-")
e.MustEncode("byte-")
e.MustEncode("key")
buf.WriteByte(cborBdBreak)
buf.WriteByte(cborBdIndefiniteBytes)
e.MustEncode([]byte{1, 2, 3})
e.MustEncode([]byte{4, 5, 6})
buf.WriteByte(cborBdBreak)
//----
buf.WriteByte(cborBdIndefiniteString)
e.MustEncode("two-")
e.MustEncode("string-")
e.MustEncode("key")
buf.WriteByte(cborBdBreak)
buf.WriteByte(cborBdIndefiniteString)
e.MustEncode([]byte("two-")) // encode as bytes, to check robustness of code
e.MustEncode([]byte("value"))
buf.WriteByte(cborBdBreak)
//----
buf.WriteByte(cborBdIndefiniteString)
e.MustEncode("three-")
e.MustEncode("list-")
e.MustEncode("key")
buf.WriteByte(cborBdBreak)
buf.WriteByte(cborBdIndefiniteArray)
e.MustEncode(true)
e.MustEncode(false)
e.MustEncode(uint64(1))
e.MustEncode(int64(-1))
buf.WriteByte(cborBdBreak)
buf.WriteByte(cborBdBreak) // close map
NewDecoderBytes(buf.Bytes(), testCborH).MustDecode(&vv)
if err := deepEqual(v, vv); err != nil {
logT(t, "-------- Before and After marshal do not match: Error: %v", err)
logT(t, " ....... GOLDEN: (%T) %#v", v, v)
logT(t, " ....... DECODED: (%T) %#v", vv, vv)
failT(t)
}
}
type testCborGolden struct {
Base64 string `codec:"cbor"`
Hex string `codec:"hex"`
Roundtrip bool `codec:"roundtrip"`
Decoded interface{} `codec:"decoded"`
Diagnostic string `codec:"diagnostic"`
Skip bool `codec:"skip"`
}
// Some tests are skipped because they include numbers outside the range of int64/uint64
func doTestCborGoldens(t *testing.T) {
oldMapType := testCborH.MapType
defer func() {
testCborH.MapType = oldMapType
}()
testCborH.MapType = testMapStrIntfTyp
// decode test-cbor-goldens.json into a list of []*testCborGolden
// for each one,
// - decode hex into []byte bs
// - decode bs into interface{} v
// - compare both using deepequal
// - for any miss, record it
var gs []*testCborGolden
f, err := os.Open("test-cbor-goldens.json")
if err != nil {
logT(t, "error opening test-cbor-goldens.json: %v", err)
failT(t)
}
defer f.Close()
jh := new(JsonHandle)
jh.MapType = testMapStrIntfTyp
// d := NewDecoder(f, jh)
d := NewDecoder(bufio.NewReader(f), jh)
// err = d.Decode(&gs)
d.MustDecode(&gs)
if err != nil {
logT(t, "error json decoding test-cbor-goldens.json: %v", err)
failT(t)
}
tagregex := regexp.MustCompile(`[\d]+\(.+?\)`)
hexregex := regexp.MustCompile(`h'([0-9a-fA-F]*)'`)
for i, g := range gs {
// fmt.Printf("%v, skip: %v, isTag: %v, %s\n", i, g.Skip, tagregex.MatchString(g.Diagnostic), g.Diagnostic)
// skip tags or simple or those with prefix, as we can't verify them.
if g.Skip || strings.HasPrefix(g.Diagnostic, "simple(") || tagregex.MatchString(g.Diagnostic) {
// fmt.Printf("%v: skipped\n", i)
logT(t, "[%v] skipping because skip=true OR unsupported simple value or Tag Value", i)
continue
}
// println("++++++++++++", i, "g.Diagnostic", g.Diagnostic)
if hexregex.MatchString(g.Diagnostic) {
// println(i, "g.Diagnostic matched hex")
if s2 := g.Diagnostic[2 : len(g.Diagnostic)-1]; s2 == "" {
g.Decoded = zeroByteSlice
} else if bs2, err2 := hex.DecodeString(s2); err2 == nil {
g.Decoded = bs2
}
// fmt.Printf("%v: hex: %v\n", i, g.Decoded)
}
bs, err := hex.DecodeString(g.Hex)
if err != nil {
logT(t, "[%v] error hex decoding %s [%v]: %v", i, g.Hex, err)
failT(t)
}
var v interface{}
NewDecoderBytes(bs, testCborH).MustDecode(&v)
if _, ok := v.(RawExt); ok {
continue
}
// check the diagnostics to compare
switch g.Diagnostic {
case "Infinity":
b := math.IsInf(v.(float64), 1)
testCborError(t, i, math.Inf(1), v, nil, &b)
case "-Infinity":
b := math.IsInf(v.(float64), -1)
testCborError(t, i, math.Inf(-1), v, nil, &b)
case "NaN":
// println(i, "checking NaN")
b := math.IsNaN(v.(float64))
testCborError(t, i, math.NaN(), v, nil, &b)
case "undefined":
b := v == nil
testCborError(t, i, nil, v, nil, &b)
default:
v0 := g.Decoded
// testCborCoerceJsonNumber(reflect.ValueOf(&v0))
testCborError(t, i, v0, v, deepEqual(v0, v), nil)
}
}
}
func testCborError(t *testing.T, i int, v0, v1 interface{}, err error, equal *bool) {
if err == nil && equal == nil {
// fmt.Printf("%v testCborError passed (err and equal nil)\n", i)
return
}
if err != nil {
logT(t, "[%v] deepEqual error: %v", i, err)
logT(t, " ....... GOLDEN: (%T) %#v", v0, v0)
logT(t, " ....... DECODED: (%T) %#v", v1, v1)
failT(t)
}
if equal != nil && !*equal {
logT(t, "[%v] values not equal", i)
logT(t, " ....... GOLDEN: (%T) %#v", v0, v0)
logT(t, " ....... DECODED: (%T) %#v", v1, v1)
failT(t)
}
// fmt.Printf("%v testCborError passed (checks passed)\n", i)
}
func TestCborGoldens(t *testing.T) {
doTestCborGoldens(t)
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,36 @@
# codecgen tool
Generate is given a list of *.go files to parse, and an output file (fout),
codecgen will create an output file __file.go__ which
contains `codec.Selfer` implementations for the named types found
in the files parsed.
Using codecgen is very straightforward.
**Download and install the tool**
`go get -u github.com/ugorji/go/codec/codecgen`
**Run the tool on your files**
The command line format is:
`codecgen [options] (-o outfile) (infile ...)`
```sh
% codecgen -?
Usage of codecgen:
-c="github.com/ugorji/go/codec": codec path
-o="": out file
-r=".*": regex for type name to match
-rt="": tags for go run
-t="": build tag to put in file
-u=false: Use unsafe, e.g. to avoid unnecessary allocation on []byte->string
-x=false: keep temp file
% codecgen -o values_codecgen.go values.go values2.go moretypedefs.go
```
Please see the [blog article](http://ugorji.net/blog/go-codecgen)
for more information on how to use the tool.

View File

@ -0,0 +1,271 @@
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a BSD-style license found in the LICENSE file.
// codecgen generates codec.Selfer implementations for a set of types.
package main
import (
"bufio"
"bytes"
"errors"
"flag"
"fmt"
"go/ast"
"go/build"
"go/parser"
"go/token"
"os"
"os/exec"
"path/filepath"
"regexp"
"strconv"
"text/template"
"time"
)
const genFrunMainTmpl = `//+build ignore
package main
{{ if .Types }}import "{{ .ImportPath }}"{{ end }}
func main() {
{{ $.PackageName }}.CodecGenTempWrite{{ .RandString }}()
}
`
// const genFrunPkgTmpl = `//+build codecgen
const genFrunPkgTmpl = `
package {{ $.PackageName }}
import (
{{ if not .CodecPkgFiles }}{{ .CodecPkgName }} "{{ .CodecImportPath }}"{{ end }}
{{/*
{{ if .Types }}"{{ .ImportPath }}"{{ end }}
"io"
*/}}
"os"
"reflect"
"bytes"
"go/format"
)
{{/* This is not used anymore. Remove it.
func write(w io.Writer, s string) {
if _, err := io.WriteString(w, s); err != nil {
panic(err)
}
}
*/}}
func CodecGenTempWrite{{ .RandString }}() {
fout, err := os.Create("{{ .OutFile }}")
if err != nil {
panic(err)
}
defer fout.Close()
var out bytes.Buffer
var typs []reflect.Type
{{ range $index, $element := .Types }}
var t{{ $index }} {{ . }}
typs = append(typs, reflect.TypeOf(t{{ $index }}))
{{ end }}
{{ if not .CodecPkgFiles }}{{ .CodecPkgName }}.{{ end }}Gen(&out, "{{ .BuildTag }}", "{{ .PackageName }}", {{ .UseUnsafe }}, typs...)
bout, err := format.Source(out.Bytes())
if err != nil {
fout.Write(out.Bytes())
panic(err)
}
fout.Write(bout)
}
`
// Generate is given a list of *.go files to parse, and an output file (fout).
//
// It finds all types T in the files, and it creates 2 tmp files (frun).
// - main package file passed to 'go run'
// - package level file which calls *genRunner.Selfer to write Selfer impls for each T.
// We use a package level file so that it can reference unexported types in the package being worked on.
// Tool then executes: "go run __frun__" which creates fout.
// fout contains Codec(En|De)codeSelf implementations for every type T.
//
func Generate(outfile, buildTag, codecPkgPath string, useUnsafe bool, goRunTag string,
regexName *regexp.Regexp, deleteTempFile bool, infiles ...string) (err error) {
// For each file, grab AST, find each type, and write a call to it.
if len(infiles) == 0 {
return
}
if outfile == "" || codecPkgPath == "" {
err = errors.New("outfile and codec package path cannot be blank")
return
}
// We have to parse dir for package, before opening the temp file for writing (else ImportDir fails).
// Also, ImportDir(...) must take an absolute path.
lastdir := filepath.Dir(outfile)
absdir, err := filepath.Abs(lastdir)
if err != nil {
return
}
pkg, err := build.Default.ImportDir(absdir, build.AllowBinary)
if err != nil {
return
}
type tmplT struct {
CodecPkgName string
CodecImportPath string
ImportPath string
OutFile string
PackageName string
RandString string
BuildTag string
Types []string
CodecPkgFiles bool
UseUnsafe bool
}
tv := tmplT{
CodecPkgName: "codec1978",
OutFile: outfile,
CodecImportPath: codecPkgPath,
BuildTag: buildTag,
UseUnsafe: useUnsafe,
RandString: strconv.FormatInt(time.Now().UnixNano(), 10),
}
tv.ImportPath = pkg.ImportPath
if tv.ImportPath == tv.CodecImportPath {
tv.CodecPkgFiles = true
tv.CodecPkgName = "codec"
}
astfiles := make([]*ast.File, len(infiles))
for i, infile := range infiles {
if filepath.Dir(infile) != lastdir {
err = errors.New("in files must all be in same directory as outfile")
return
}
fset := token.NewFileSet()
astfiles[i], err = parser.ParseFile(fset, infile, nil, 0)
if err != nil {
return
}
if i == 0 {
tv.PackageName = astfiles[i].Name.Name
if tv.PackageName == "main" {
// codecgen cannot be run on types in the 'main' package.
// A temporary 'main' package must be created, and should reference the fully built
// package containing the types.
// Also, the temporary main package will conflict with the main package which already has a main method.
err = errors.New("codecgen cannot be run on types in the 'main' package")
return
}
}
}
for _, f := range astfiles {
for _, d := range f.Decls {
if gd, ok := d.(*ast.GenDecl); ok {
for _, dd := range gd.Specs {
if td, ok := dd.(*ast.TypeSpec); ok {
// if len(td.Name.Name) == 0 || td.Name.Name[0] > 'Z' || td.Name.Name[0] < 'A' {
if len(td.Name.Name) == 0 {
continue
}
// only generate for:
// struct: StructType
// primitives (numbers, bool, string): Ident
// map: MapType
// slice, array: ArrayType
// chan: ChanType
// do not generate:
// FuncType, InterfaceType, StarExpr (ptr), etc
switch td.Type.(type) {
case *ast.StructType, *ast.Ident, *ast.MapType, *ast.ArrayType, *ast.ChanType:
if regexName.FindStringIndex(td.Name.Name) != nil {
tv.Types = append(tv.Types, td.Name.Name)
}
}
}
}
}
}
}
if len(tv.Types) == 0 {
return
}
// we cannot use ioutil.TempFile, because we cannot guarantee the file suffix (.go).
// Also, we cannot create file in temp directory,
// because go run will not work (as it needs to see the types here).
// Consequently, create the temp file in the current directory, and remove when done.
// frun, err = ioutil.TempFile("", "codecgen-")
// frunName := filepath.Join(os.TempDir(), "codecgen-"+strconv.FormatInt(time.Now().UnixNano(), 10)+".go")
frunMainName := "codecgen-main-" + tv.RandString + ".generated.go"
frunPkgName := "codecgen-pkg-" + tv.RandString + ".generated.go"
if deleteTempFile {
defer os.Remove(frunMainName)
defer os.Remove(frunPkgName)
}
// var frunMain, frunPkg *os.File
if _, err = gen1(frunMainName, genFrunMainTmpl, &tv); err != nil {
return
}
if _, err = gen1(frunPkgName, genFrunPkgTmpl, &tv); err != nil {
return
}
// remove outfile, so "go run ..." will not think that types in outfile already exist.
os.Remove(outfile)
// execute go run frun
cmd := exec.Command("go", "run", "-tags="+goRunTag, frunMainName) //, frunPkg.Name())
var buf bytes.Buffer
cmd.Stdout = &buf
cmd.Stderr = &buf
if err = cmd.Run(); err != nil {
err = fmt.Errorf("error running 'go run %s': %v, console: %s",
frunMainName, err, buf.Bytes())
return
}
os.Stdout.Write(buf.Bytes())
return
}
func gen1(frunName, tmplStr string, tv interface{}) (frun *os.File, err error) {
os.Remove(frunName)
if frun, err = os.Create(frunName); err != nil {
return
}
defer frun.Close()
t := template.New("")
if t, err = t.Parse(tmplStr); err != nil {
return
}
bw := bufio.NewWriter(frun)
if err = t.Execute(bw, tv); err != nil {
return
}
if err = bw.Flush(); err != nil {
return
}
return
}
func main() {
o := flag.String("o", "", "out file")
c := flag.String("c", genCodecPath, "codec path")
t := flag.String("t", "", "build tag to put in file")
r := flag.String("r", ".*", "regex for type name to match")
rt := flag.String("rt", "", "tags for go run")
x := flag.Bool("x", false, "keep temp file")
u := flag.Bool("u", false, "Use unsafe, e.g. to avoid unnecessary allocation on []byte->string")
flag.Parse()
if err := Generate(*o, *t, *c, *u, *rt,
regexp.MustCompile(*r), !*x, flag.Args()...); err != nil {
fmt.Fprintf(os.Stderr, "codecgen error: %v\n", err)
os.Exit(1)
}
}

View File

@ -0,0 +1,3 @@
package main
const genCodecPath = "github.com/ugorji/go/codec"

View File

@ -0,0 +1,22 @@
//+build x,codecgen
package codec
import (
"fmt"
"testing"
)
func TestCodecgenJson1(t *testing.T) {
const callCodecgenDirect bool = true
v := newTestStruc(2, false, !testSkipIntf, false)
var bs []byte
e := NewEncoderBytes(&bs, testJsonH)
if callCodecgenDirect {
v.CodecEncodeSelf(e)
e.w.atEndOfEncode()
} else {
e.MustEncode(v)
}
fmt.Printf("%s\n", bs)
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,442 @@
// //+build ignore
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a BSD-style license found in the LICENSE file.
// ************************************************************
// DO NOT EDIT.
// THIS FILE IS AUTO-GENERATED from fast-path.go.tmpl
// ************************************************************
package codec
// Fast path functions try to create a fast path encode or decode implementation
// for common maps and slices.
//
// We define the functions and register then in this single file
// so as not to pollute the encode.go and decode.go, and create a dependency in there.
// This file can be omitted without causing a build failure.
//
// The advantage of fast paths is:
// - Many calls bypass reflection altogether
//
// Currently support
// - slice of all builtin types,
// - map of all builtin types to string or interface value
// - symetrical maps of all builtin types (e.g. str-str, uint8-uint8)
// This should provide adequate "typical" implementations.
//
// Note that fast track decode functions must handle values for which an address cannot be obtained.
// For example:
// m2 := map[string]int{}
// p2 := []interface{}{m2}
// // decoding into p2 will bomb if fast track functions do not treat like unaddressable.
//
import (
"reflect"
"sort"
)
const fastpathCheckNilFalse = false // for reflect
const fastpathCheckNilTrue = true // for type switch
type fastpathT struct {}
var fastpathTV fastpathT
type fastpathE struct {
rtid uintptr
rt reflect.Type
encfn func(encFnInfo, reflect.Value)
decfn func(decFnInfo, reflect.Value)
}
type fastpathA [{{ .FastpathLen }}]fastpathE
func (x *fastpathA) index(rtid uintptr) int {
// use binary search to grab the index (adapted from sort/search.go)
h, i, j := 0, 0, {{ .FastpathLen }} // len(x)
for i < j {
h = i + (j-i)/2
if x[h].rtid < rtid {
i = h + 1
} else {
j = h
}
}
if i < {{ .FastpathLen }} && x[i].rtid == rtid {
return i
}
return -1
}
type fastpathAslice []fastpathE
func (x fastpathAslice) Len() int { return len(x) }
func (x fastpathAslice) Less(i, j int) bool { return x[i].rtid < x[j].rtid }
func (x fastpathAslice) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
var fastpathAV fastpathA
// due to possible initialization loop error, make fastpath in an init()
func init() {
if !fastpathEnabled {
return
}
i := 0
fn := func(v interface{}, fe func(encFnInfo, reflect.Value), fd func(decFnInfo, reflect.Value)) (f fastpathE) {
xrt := reflect.TypeOf(v)
xptr := reflect.ValueOf(xrt).Pointer()
fastpathAV[i] = fastpathE{xptr, xrt, fe, fd}
i++
return
}
{{range .Values}}{{if not .Primitive}}{{if .Slice }}
fn([]{{ .Elem }}(nil), (encFnInfo).{{ .MethodNamePfx "fastpathEnc" false }}R, (decFnInfo).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}}
{{range .Values}}{{if not .Primitive}}{{if not .Slice }}
fn(map[{{ .MapKey }}]{{ .Elem }}(nil), (encFnInfo).{{ .MethodNamePfx "fastpathEnc" false }}R, (decFnInfo).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}}
sort.Sort(fastpathAslice(fastpathAV[:]))
}
// -- encode
// -- -- fast path type switch
func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool {
switch v := iv.(type) {
{{range .Values}}{{if not .Primitive}}{{if .Slice }}
case []{{ .Elem }}:{{else}}
case map[{{ .MapKey }}]{{ .Elem }}:{{end}}
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, fastpathCheckNilTrue, e){{if .Slice }}
case *[]{{ .Elem }}:{{else}}
case *map[{{ .MapKey }}]{{ .Elem }}:{{end}}
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, fastpathCheckNilTrue, e)
{{end}}{{end}}
default:
return false
}
return true
}
func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool {
switch v := iv.(type) {
{{range .Values}}{{if not .Primitive}}{{if .Slice }}
case []{{ .Elem }}:
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, fastpathCheckNilTrue, e)
case *[]{{ .Elem }}:
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, fastpathCheckNilTrue, e)
{{end}}{{end}}{{end}}
default:
return false
}
return true
}
func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool {
switch v := iv.(type) {
{{range .Values}}{{if not .Primitive}}{{if not .Slice }}
case map[{{ .MapKey }}]{{ .Elem }}:
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, fastpathCheckNilTrue, e)
case *map[{{ .MapKey }}]{{ .Elem }}:
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, fastpathCheckNilTrue, e)
{{end}}{{end}}{{end}}
default:
return false
}
return true
}
// -- -- fast path functions
{{range .Values}}{{if not .Primitive}}{{if .Slice }}
func (f encFnInfo) {{ .MethodNamePfx "fastpathEnc" false }}R(rv reflect.Value) {
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv.Interface().([]{{ .Elem }}), fastpathCheckNilFalse, f.e)
}
func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v []{{ .Elem }}, checkNil bool, e *Encoder) {
ee := e.e
if checkNil && v == nil {
ee.EncodeNil()
return
}
ee.EncodeArrayStart(len(v))
if e.be {
for _, v2 := range v {
{{ encmd .Elem "v2"}}
}
} else {
for j, v2 := range v {
if j > 0 {
ee.EncodeArrayEntrySeparator()
}
{{ encmd .Elem "v2"}}
}
ee.EncodeArrayEnd()
}
}
{{end}}{{end}}{{end}}
{{range .Values}}{{if not .Primitive}}{{if not .Slice }}
func (f encFnInfo) {{ .MethodNamePfx "fastpathEnc" false }}R(rv reflect.Value) {
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv.Interface().(map[{{ .MapKey }}]{{ .Elem }}), fastpathCheckNilFalse, f.e)
}
func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, checkNil bool, e *Encoder) {
ee := e.e
if checkNil && v == nil {
ee.EncodeNil()
return
}
ee.EncodeMapStart(len(v))
{{if eq .MapKey "string"}}asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0{{end}}
if e.be {
for k2, v2 := range v {
{{if eq .MapKey "string"}}if asSymbols {
ee.EncodeSymbol(k2)
} else {
ee.EncodeString(c_UTF8, k2)
}{{else}}{{ encmd .MapKey "k2"}}{{end}}
{{ encmd .Elem "v2"}}
}
} else {
j := 0
for k2, v2 := range v {
if j > 0 {
ee.EncodeMapEntrySeparator()
}
{{if eq .MapKey "string"}}if asSymbols {
ee.EncodeSymbol(k2)
} else {
ee.EncodeString(c_UTF8, k2)
}{{else}}{{ encmd .MapKey "k2"}}{{end}}
ee.EncodeMapKVSeparator()
{{ encmd .Elem "v2"}}
j++
}
ee.EncodeMapEnd()
}
}
{{end}}{{end}}{{end}}
// -- decode
// -- -- fast path type switch
func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool {
switch v := iv.(type) {
{{range .Values}}{{if not .Primitive}}{{if .Slice }}
case []{{ .Elem }}:{{else}}
case map[{{ .MapKey }}]{{ .Elem }}:{{end}}
fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, fastpathCheckNilFalse, false, d){{if .Slice }}
case *[]{{ .Elem }}:{{else}}
case *map[{{ .MapKey }}]{{ .Elem }}:{{end}}
v2, changed2 := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*v, fastpathCheckNilFalse, true, d)
if changed2 {
*v = v2
}
{{end}}{{end}}
default:
return false
}
return true
}
// -- -- fast path functions
{{range .Values}}{{if not .Primitive}}{{if .Slice }}
{{/*
Slices can change if they
- did not come from an array
- are addressable (from a ptr)
- are settable (e.g. contained in an interface{})
*/}}
func (f decFnInfo) {{ .MethodNamePfx "fastpathDec" false }}R(rv reflect.Value) {
array := f.seq == seqTypeArray
if !array && rv.CanAddr() { // CanSet => CanAddr + Exported
vp := rv.Addr().Interface().(*[]{{ .Elem }})
v, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*vp, fastpathCheckNilFalse, !array, f.d)
if changed {
*vp = v
}
} else {
v := rv.Interface().([]{{ .Elem }})
fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, fastpathCheckNilFalse, false, f.d)
}
}
func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *[]{{ .Elem }}, checkNil bool, d *Decoder) {
v, changed := f.{{ .MethodNamePfx "Dec" false }}V(*vp, checkNil, true, d)
if changed {
*vp = v
}
}
func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v []{{ .Elem }}, checkNil bool, canChange bool,
d *Decoder) (_ []{{ .Elem }}, changed bool) {
dd := d.d
// if dd.isContainerType(valueTypeNil) { dd.TryDecodeAsNil()
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
changed = true
}
return nil, changed
}
slh, containerLenS := d.decSliceHelperStart()
if canChange && v == nil {
if containerLenS <= 0 {
v = []{{ .Elem }}{}
} else {
v = make([]{{ .Elem }}, containerLenS, containerLenS)
}
changed = true
}
if containerLenS == 0 {
if canChange && len(v) != 0 {
v = v[:0]
changed = true
}{{/*
// slh.End() // dd.ReadArrayEnd()
*/}}
return v, changed
}
// for j := 0; j < containerLenS; j++ {
if containerLenS > 0 {
decLen := containerLenS
if containerLenS > cap(v) {
if canChange {
s := make([]{{ .Elem }}, containerLenS, containerLenS)
// copy(s, v[:cap(v)])
v = s
changed = true
} else {
d.arrayCannotExpand(len(v), containerLenS)
decLen = len(v)
}
} else if containerLenS != len(v) {
v = v[:containerLenS]
changed = true
}
// all checks done. cannot go past len.
j := 0
for ; j < decLen; j++ {
{{ if eq .Elem "interface{}" }}d.decode(&v[j]){{ else }}v[j] = {{ decmd .Elem }}{{ end }}
}
if !canChange {
for ; j < containerLenS; j++ {
d.swallow()
}
}
} else {
j := 0
for ; !dd.CheckBreak(); j++ {
if j >= len(v) {
if canChange {
v = append(v, {{ zerocmd .Elem }})
changed = true
} else {
d.arrayCannotExpand(len(v), j+1)
}
}
if j > 0 {
slh.Sep(j)
}
if j < len(v) { // all checks done. cannot go past len.
{{ if eq .Elem "interface{}" }}d.decode(&v[j])
{{ else }}v[j] = {{ decmd .Elem }}{{ end }}
} else {
d.swallow()
}
}
slh.End()
}
return v, changed
}
{{end}}{{end}}{{end}}
{{range .Values}}{{if not .Primitive}}{{if not .Slice }}
{{/*
Maps can change if they are
- addressable (from a ptr)
- settable (e.g. contained in an interface{})
*/}}
func (f decFnInfo) {{ .MethodNamePfx "fastpathDec" false }}R(rv reflect.Value) {
if rv.CanAddr() {
vp := rv.Addr().Interface().(*map[{{ .MapKey }}]{{ .Elem }})
v, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*vp, fastpathCheckNilFalse, true, f.d)
if changed {
*vp = v
}
} else {
v := rv.Interface().(map[{{ .MapKey }}]{{ .Elem }})
fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, fastpathCheckNilFalse, false, f.d)
}
}
func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *map[{{ .MapKey }}]{{ .Elem }}, checkNil bool, d *Decoder) {
v, changed := f.{{ .MethodNamePfx "Dec" false }}V(*vp, checkNil, true, d)
if changed {
*vp = v
}
}
func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, checkNil bool, canChange bool,
d *Decoder) (_ map[{{ .MapKey }}]{{ .Elem }}, changed bool) {
dd := d.d
// if dd.isContainerType(valueTypeNil) {dd.TryDecodeAsNil()
if checkNil && dd.TryDecodeAsNil() {
if v != nil {
changed = true
}
return nil, changed
}
containerLen := dd.ReadMapStart()
if canChange && v == nil {
if containerLen > 0 {
v = make(map[{{ .MapKey }}]{{ .Elem }}, containerLen)
} else {
v = make(map[{{ .MapKey }}]{{ .Elem }}) // supports indefinite-length, etc
}
changed = true
}
if containerLen > 0 {
for j := 0; j < containerLen; j++ {
{{ if eq .MapKey "interface{}" }}var mk interface{}
d.decode(&mk)
if bv, bok := mk.([]byte); bok {
mk = string(bv) // maps cannot have []byte as key. switch to string.
}{{ else }}mk := {{ decmd .MapKey }}{{ end }}
mv := v[mk]
{{ if eq .Elem "interface{}" }}d.decode(&mv)
{{ else }}mv = {{ decmd .Elem }}{{ end }}
if v != nil {
v[mk] = mv
}
}
} else if containerLen < 0 {
for j := 0; !dd.CheckBreak(); j++ {
if j > 0 {
dd.ReadMapEntrySeparator()
}
{{ if eq .MapKey "interface{}" }}var mk interface{}
d.decode(&mk)
if bv, bok := mk.([]byte); bok {
mk = string(bv) // maps cannot have []byte as key. switch to string.
}{{ else }}mk := {{ decmd .MapKey }}{{ end }}
dd.ReadMapKVSeparator()
mv := v[mk]
{{ if eq .Elem "interface{}" }}d.decode(&mv)
{{ else }}mv = {{ decmd .Elem }}{{ end }}
if v != nil {
v[mk] = mv
}
}
dd.ReadMapEnd()
}
return v, changed
}
{{end}}{{end}}{{end}}

View File

@ -0,0 +1,77 @@
{{var "v"}} := *{{ .Varname }}
{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart()
var {{var "c"}} bool
{{ if not isArray }}if {{var "v"}} == nil {
if {{var "l"}} <= 0 {
{{var "v"}} = make({{ .CTyp }}, 0)
} else {
{{var "v"}} = make({{ .CTyp }}, {{var "l"}})
}
{{var "c"}} = true
}
{{ end }}
if {{var "l"}} == 0 { {{ if isSlice }}
if len({{var "v"}}) != 0 {
{{var "v"}} = {{var "v"}}[:0]
{{var "c"}} = true
} {{ end }}
} else if {{var "l"}} > 0 {
{{ if isChan }}
for {{var "r"}} := 0; {{var "r"}} < {{var "l"}}; {{var "r"}}++ {
var {{var "t"}} {{ .Typ }}
{{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }}
{{var "v"}} <- {{var "t"}}
{{ else }}
{{var "n"}} := {{var "l"}}
if {{var "l"}} > cap({{var "v"}}) {
{{ if isArray }}r.ReadArrayCannotExpand(len({{var "v"}}), {{var "l"}})
{{var "n"}} = len({{var "v"}})
{{ else }}{{ if .Immutable }}
{{var "v2"}} := {{var "v"}}
{{var "v"}} = make([]{{ .Typ }}, {{var "l"}}, {{var "l"}})
if len({{var "v"}}) > 0 {
copy({{var "v"}}, {{var "v2"}}[:cap({{var "v2"}})])
}
{{ else }}{{var "v"}} = make([]{{ .Typ }}, {{var "l"}}, {{var "l"}})
{{ end }}{{var "c"}} = true
{{ end }}
} else if {{var "l"}} != len({{var "v"}}) {
{{var "v"}} = {{var "v"}}[:{{var "l"}}]
{{var "c"}} = true
}
{{var "j"}} := 0
for ; {{var "j"}} < {{var "n"}} ; {{var "j"}}++ {
{{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }}
} {{ if isArray }}
for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ {
z.DecSwallow()
}{{ end }}
{{ end }}{{/* closing if not chan */}}
} else {
for {{var "j"}} := 0; !r.CheckBreak(); {{var "j"}}++ {
if {{var "j"}} >= len({{var "v"}}) {
{{ if isArray }}r.ReadArrayCannotExpand(len({{var "v"}}), {{var "j"}}+1)
{{ else if isSlice}}{{var "v"}} = append({{var "v"}}, {{zero}})// var {{var "z"}} {{ .Typ }}
{{var "c"}} = true {{ end }}
}
if {{var "j"}} > 0 {
{{var "h"}}.Sep({{var "j"}})
}
{{ if isChan}}
var {{var "t"}} {{ .Typ }}
{{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }}
{{var "v"}} <- {{var "t"}}
{{ else }}
if {{var "j"}} < len({{var "v"}}) {
{{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }}
} else {
z.DecSwallow()
}
{{ end }}
}
{{var "h"}}.End()
}
if {{var "c"}} {
*{{ .Varname }} = {{var "v"}}
}

View File

@ -0,0 +1,46 @@
{{var "v"}} := *{{ .Varname }}
{{var "l"}} := r.ReadMapStart()
if {{var "v"}} == nil {
if {{var "l"}} > 0 {
{{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "l"}})
} else {
{{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}) // supports indefinite-length, etc
}
*{{ .Varname }} = {{var "v"}}
}
if {{var "l"}} > 0 {
for {{var "j"}} := 0; {{var "j"}} < {{var "l"}}; {{var "j"}}++ {
var {{var "mk"}} {{ .KTyp }}
{{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }}
{{ if eq .KTyp "interface{}" }}// special case if a byte array.
if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} {
{{var "mk"}} = string({{var "bv"}})
}
{{ end }}
{{var "mv"}} := {{var "v"}}[{{var "mk"}}]
{{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }}
if {{var "v"}} != nil {
{{var "v"}}[{{var "mk"}}] = {{var "mv"}}
}
}
} else if {{var "l"}} < 0 {
for {{var "j"}} := 0; !r.CheckBreak(); {{var "j"}}++ {
if {{var "j"}} > 0 {
r.ReadMapEntrySeparator()
}
var {{var "mk"}} {{ .KTyp }}
{{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }}
{{ if eq .KTyp "interface{}" }}// special case if a byte array.
if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} {
{{var "mk"}} = string({{var "bv"}})
}
{{ end }}
r.ReadMapKVSeparator()
{{var "mv"}} := {{var "v"}}[{{var "mk"}}]
{{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }}
if {{var "v"}} != nil {
{{var "v"}}[{{var "mk"}}] = {{var "mv"}}
}
}
r.ReadMapEnd()
} // else len==0: TODO: Should we clear map entries?

View File

@ -0,0 +1,102 @@
// //+build ignore
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a BSD-style license found in the LICENSE file.
// ************************************************************
// DO NOT EDIT.
// THIS FILE IS AUTO-GENERATED from gen-helper.go.tmpl
// ************************************************************
package codec
// This file is used to generate helper code for codecgen.
// The values here i.e. genHelper(En|De)coder are not to be used directly by
// library users. They WILL change continously and without notice.
//
// To help enforce this, we create an unexported type with exported members.
// The only way to get the type is via the one exported type that we control (somewhat).
//
// When static codecs are created for types, they will use this value
// to perform encoding or decoding of primitives or known slice or map types.
// GenHelperEncoder is exported so that it can be used externally by codecgen.
// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.
func GenHelperEncoder(e *Encoder) (genHelperEncoder, encDriver) {
return genHelperEncoder{e: e}, e.e
}
// GenHelperDecoder is exported so that it can be used externally by codecgen.
// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.
func GenHelperDecoder(d *Decoder) (genHelperDecoder, decDriver) {
return genHelperDecoder{d: d}, d.d
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
type genHelperEncoder struct {
e *Encoder
F fastpathT
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
type genHelperDecoder struct {
d *Decoder
F fastpathT
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncBasicHandle() *BasicHandle {
return f.e.h
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncBinary() bool {
return f.e.be // f.e.hh.isBinaryEncoding()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncFallback(iv interface{}) {
// println(">>>>>>>>> EncFallback")
f.e.encodeI(iv, false, false)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecBasicHandle() *BasicHandle {
return f.d.h
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecBinary() bool {
return f.d.be // f.d.hh.isBinaryEncoding()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecSwallow() {
f.d.swallow()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecScratchBuffer() []byte {
return f.d.b[:]
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) {
// println(">>>>>>>>> DecFallback")
f.d.decodeI(iv, chkPtr, false, false, false)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) {
return f.d.decSliceHelperStart()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) {
f.d.structFieldNotFound(index, name)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) {
f.d.arrayCannotExpand(sliceLen, streamLen)
}

View File

@ -0,0 +1,250 @@
// //+build ignore
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a BSD-style license found in the LICENSE file.
// ************************************************************
// DO NOT EDIT.
// THIS FILE IS AUTO-GENERATED from gen-helper.go.tmpl
// ************************************************************
package codec
// This file is used to generate helper code for codecgen.
// The values here i.e. genHelper(En|De)coder are not to be used directly by
// library users. They WILL change continously and without notice.
//
// To help enforce this, we create an unexported type with exported members.
// The only way to get the type is via the one exported type that we control (somewhat).
//
// When static codecs are created for types, they will use this value
// to perform encoding or decoding of primitives or known slice or map types.
// GenHelperEncoder is exported so that it can be used externally by codecgen.
// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.
func GenHelperEncoder(e *Encoder) (genHelperEncoder, encDriver) {
return genHelperEncoder{e:e}, e.e
}
// GenHelperDecoder is exported so that it can be used externally by codecgen.
// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.
func GenHelperDecoder(d *Decoder) (genHelperDecoder, decDriver) {
return genHelperDecoder{d:d}, d.d
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
type genHelperEncoder struct {
e *Encoder
F fastpathT
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
type genHelperDecoder struct {
d *Decoder
F fastpathT
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncBasicHandle() *BasicHandle {
return f.e.h
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncBinary() bool {
return f.e.be // f.e.hh.isBinaryEncoding()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncFallback(iv interface{}) {
// println(">>>>>>>>> EncFallback")
f.e.encodeI(iv, false, false)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecBasicHandle() *BasicHandle {
return f.d.h
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecBinary() bool {
return f.d.be // f.d.hh.isBinaryEncoding()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecSwallow() {
f.d.swallow()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecScratchBuffer() []byte {
return f.d.b[:]
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) {
// println(">>>>>>>>> DecFallback")
f.d.decodeI(iv, chkPtr, false, false, false)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) {
return f.d.decSliceHelperStart()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) {
f.d.structFieldNotFound(index, name)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) {
f.d.arrayCannotExpand(sliceLen, streamLen)
}
{{/*
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncDriver() encDriver {
return f.e.e
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecDriver() decDriver {
return f.d.d
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncNil() {
f.e.e.EncodeNil()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncBytes(v []byte) {
f.e.e.EncodeStringBytes(c_RAW, v)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncArrayStart(length int) {
f.e.e.EncodeArrayStart(length)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncArrayEnd() {
f.e.e.EncodeArrayEnd()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncArrayEntrySeparator() {
f.e.e.EncodeArrayEntrySeparator()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncMapStart(length int) {
f.e.e.EncodeMapStart(length)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncMapEnd() {
f.e.e.EncodeMapEnd()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncMapEntrySeparator() {
f.e.e.EncodeMapEntrySeparator()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncMapKVSeparator() {
f.e.e.EncodeMapKVSeparator()
}
// ---------
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecBytes(v *[]byte) {
*v = f.d.d.DecodeBytes(*v)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecTryNil() bool {
return f.d.d.TryDecodeAsNil()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecContainerIsNil() (b bool) {
return f.d.d.IsContainerType(valueTypeNil)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecContainerIsMap() (b bool) {
return f.d.d.IsContainerType(valueTypeMap)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecContainerIsArray() (b bool) {
return f.d.d.IsContainerType(valueTypeArray)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecCheckBreak() bool {
return f.d.d.CheckBreak()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecMapStart() int {
return f.d.d.ReadMapStart()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecArrayStart() int {
return f.d.d.ReadArrayStart()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecMapEnd() {
f.d.d.ReadMapEnd()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecArrayEnd() {
f.d.d.ReadArrayEnd()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecArrayEntrySeparator() {
f.d.d.ReadArrayEntrySeparator()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecMapEntrySeparator() {
f.d.d.ReadMapEntrySeparator()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecMapKVSeparator() {
f.d.d.ReadMapKVSeparator()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) ReadStringAsBytes(bs []byte) []byte {
return f.d.d.DecodeStringAsBytes(bs)
}
// -- encode calls (primitives)
{{range .Values}}{{if .Primitive }}{{if ne .Primitive "interface{}" }}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) {{ .MethodNamePfx "Enc" true }}(v {{ .Primitive }}) {
ee := f.e.e
{{ encmd .Primitive "v" }}
}
{{ end }}{{ end }}{{ end }}
// -- decode calls (primitives)
{{range .Values}}{{if .Primitive }}{{if ne .Primitive "interface{}" }}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) {{ .MethodNamePfx "Dec" true }}(vp *{{ .Primitive }}) {
dd := f.d.d
*vp = {{ decmd .Primitive }}
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) {{ .MethodNamePfx "Read" true }}() (v {{ .Primitive }}) {
dd := f.d.d
v = {{ decmd .Primitive }}
return
}
{{ end }}{{ end }}{{ end }}
// -- encode calls (slices/maps)
{{range .Values}}{{if not .Primitive }}{{if .Slice }}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) {{ .MethodNamePfx "Enc" false }}(v []{{ .Elem }}) { {{ else }}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) {{ .MethodNamePfx "Enc" false }}(v map[{{ .MapKey }}]{{ .Elem }}) { {{end}}
f.F.{{ .MethodNamePfx "Enc" false }}V(v, false, f.e)
}
{{ end }}{{ end }}
// -- decode calls (slices/maps)
{{range .Values}}{{if not .Primitive }}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
{{if .Slice }}func (f genHelperDecoder) {{ .MethodNamePfx "Dec" false }}(vp *[]{{ .Elem }}) {
{{else}}func (f genHelperDecoder) {{ .MethodNamePfx "Dec" false }}(vp *map[{{ .MapKey }}]{{ .Elem }}) { {{end}}
v, changed := f.F.{{ .MethodNamePfx "Dec" false }}V(*vp, false, true, f.d)
if changed {
*vp = v
}
}
{{ end }}{{ end }}
*/}}

View File

@ -0,0 +1,136 @@
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a BSD-style license found in the LICENSE file.
package codec
// DO NOT EDIT. THIS FILE IS AUTO-GENERATED FROM gen-dec-(map|array).go.tmpl
const genDecMapTmpl = `
{{var "v"}} := *{{ .Varname }}
{{var "l"}} := r.ReadMapStart()
if {{var "v"}} == nil {
if {{var "l"}} > 0 {
{{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "l"}})
} else {
{{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}) // supports indefinite-length, etc
}
*{{ .Varname }} = {{var "v"}}
}
if {{var "l"}} > 0 {
for {{var "j"}} := 0; {{var "j"}} < {{var "l"}}; {{var "j"}}++ {
var {{var "mk"}} {{ .KTyp }}
{{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }}
{{ if eq .KTyp "interface{}" }}// special case if a byte array.
if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} {
{{var "mk"}} = string({{var "bv"}})
}
{{ end }}
{{var "mv"}} := {{var "v"}}[{{var "mk"}}]
{{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }}
if {{var "v"}} != nil {
{{var "v"}}[{{var "mk"}}] = {{var "mv"}}
}
}
} else if {{var "l"}} < 0 {
for {{var "j"}} := 0; !r.CheckBreak(); {{var "j"}}++ {
if {{var "j"}} > 0 {
r.ReadMapEntrySeparator()
}
var {{var "mk"}} {{ .KTyp }}
{{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }}
{{ if eq .KTyp "interface{}" }}// special case if a byte array.
if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} {
{{var "mk"}} = string({{var "bv"}})
}
{{ end }}
r.ReadMapKVSeparator()
{{var "mv"}} := {{var "v"}}[{{var "mk"}}]
{{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }}
if {{var "v"}} != nil {
{{var "v"}}[{{var "mk"}}] = {{var "mv"}}
}
}
r.ReadMapEnd()
} // else len==0: TODO: Should we clear map entries?
`
const genDecListTmpl = `
{{var "v"}} := *{{ .Varname }}
{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart()
var {{var "c"}} bool
{{ if not isArray }}if {{var "v"}} == nil {
if {{var "l"}} <= 0 {
{{var "v"}} = make({{ .CTyp }}, 0)
} else {
{{var "v"}} = make({{ .CTyp }}, {{var "l"}})
}
{{var "c"}} = true
}
{{ end }}
if {{var "l"}} == 0 { {{ if isSlice }}
if len({{var "v"}}) != 0 {
{{var "v"}} = {{var "v"}}[:0]
{{var "c"}} = true
} {{ end }}
} else if {{var "l"}} > 0 {
{{ if isChan }}
for {{var "r"}} := 0; {{var "r"}} < {{var "l"}}; {{var "r"}}++ {
var {{var "t"}} {{ .Typ }}
{{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }}
{{var "v"}} <- {{var "t"}}
{{ else }}
{{var "n"}} := {{var "l"}}
if {{var "l"}} > cap({{var "v"}}) {
{{ if isArray }}r.ReadArrayCannotExpand(len({{var "v"}}), {{var "l"}})
{{var "n"}} = len({{var "v"}})
{{ else }}{{ if .Immutable }}
{{var "v2"}} := {{var "v"}}
{{var "v"}} = make([]{{ .Typ }}, {{var "l"}}, {{var "l"}})
if len({{var "v"}}) > 0 {
copy({{var "v"}}, {{var "v2"}}[:cap({{var "v2"}})])
}
{{ else }}{{var "v"}} = make([]{{ .Typ }}, {{var "l"}}, {{var "l"}})
{{ end }}{{var "c"}} = true
{{ end }}
} else if {{var "l"}} != len({{var "v"}}) {
{{var "v"}} = {{var "v"}}[:{{var "l"}}]
{{var "c"}} = true
}
{{var "j"}} := 0
for ; {{var "j"}} < {{var "n"}} ; {{var "j"}}++ {
{{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }}
} {{ if isArray }}
for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ {
z.DecSwallow()
}{{ end }}
{{ end }}{{/* closing if not chan */}}
} else {
for {{var "j"}} := 0; !r.CheckBreak(); {{var "j"}}++ {
if {{var "j"}} >= len({{var "v"}}) {
{{ if isArray }}r.ReadArrayCannotExpand(len({{var "v"}}), {{var "j"}}+1)
{{ else if isSlice}}{{var "v"}} = append({{var "v"}}, {{zero}})// var {{var "z"}} {{ .Typ }}
{{var "c"}} = true {{ end }}
}
if {{var "j"}} > 0 {
{{var "h"}}.Sep({{var "j"}})
}
{{ if isChan}}
var {{var "t"}} {{ .Typ }}
{{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }}
{{var "v"}} <- {{var "t"}}
{{ else }}
if {{var "j"}} < len({{var "v"}}) {
{{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }}
} else {
z.DecSwallow()
}
{{ end }}
}
{{var "h"}}.End()
}
if {{var "c"}} {
*{{ .Varname }} = {{var "v"}}
}
`

1709
Godeps/_workspace/src/github.com/ugorji/go/codec/gen.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,846 @@
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a BSD-style license found in the LICENSE file.
package codec
// Contains code shared by both encode and decode.
// Some shared ideas around encoding/decoding
// ------------------------------------------
//
// If an interface{} is passed, we first do a type assertion to see if it is
// a primitive type or a map/slice of primitive types, and use a fastpath to handle it.
//
// If we start with a reflect.Value, we are already in reflect.Value land and
// will try to grab the function for the underlying Type and directly call that function.
// This is more performant than calling reflect.Value.Interface().
//
// This still helps us bypass many layers of reflection, and give best performance.
//
// Containers
// ------------
// Containers in the stream are either associative arrays (key-value pairs) or
// regular arrays (indexed by incrementing integers).
//
// Some streams support indefinite-length containers, and use a breaking
// byte-sequence to denote that the container has come to an end.
//
// Some streams also are text-based, and use explicit separators to denote the
// end/beginning of different values.
//
// During encode, we use a high-level condition to determine how to iterate through
// the container. That decision is based on whether the container is text-based (with
// separators) or binary (without separators). If binary, we do not even call the
// encoding of separators.
//
// During decode, we use a different high-level condition to determine how to iterate
// through the containers. That decision is based on whether the stream contained
// a length prefix, or if it used explicit breaks. If length-prefixed, we assume that
// it has to be binary, and we do not even try to read separators.
//
// The only codec that may suffer (slightly) is cbor, and only when decoding indefinite-length.
// It may suffer because we treat it like a text-based codec, and read separators.
// However, this read is a no-op and the cost is insignificant.
//
// Philosophy
// ------------
// On decode, this codec will update containers appropriately:
// - If struct, update fields from stream into fields of struct.
// If field in stream not found in struct, handle appropriately (based on option).
// If a struct field has no corresponding value in the stream, leave it AS IS.
// If nil in stream, set value to nil/zero value.
// - If map, update map from stream.
// If the stream value is NIL, set the map to nil.
// - if slice, try to update up to length of array in stream.
// if container len is less than stream array length,
// and container cannot be expanded, handled (based on option).
// This means you can decode 4-element stream array into 1-element array.
//
// ------------------------------------
// On encode, user can specify omitEmpty. This means that the value will be omitted
// if the zero value. The problem may occur during decode, where omitted values do not affect
// the value being decoded into. This means that if decoding into a struct with an
// int field with current value=5, and the field is omitted in the stream, then after
// decoding, the value will still be 5 (not 0).
// omitEmpty only works if you guarantee that you always decode into zero-values.
//
// ------------------------------------
// We could have truncated a map to remove keys not available in the stream,
// or set values in the struct which are not in the stream to their zero values.
// We decided against it because there is no efficient way to do it.
// We may introduce it as an option later.
// However, that will require enabling it for both runtime and code generation modes.
//
// To support truncate, we need to do 2 passes over the container:
// map
// - first collect all keys (e.g. in k1)
// - for each key in stream, mark k1 that the key should not be removed
// - after updating map, do second pass and call delete for all keys in k1 which are not marked
// struct:
// - for each field, track the *typeInfo s1
// - iterate through all s1, and for each one not marked, set value to zero
// - this involves checking the possible anonymous fields which are nil ptrs.
// too much work.
//
// ------------------------------------------
// Error Handling is done within the library using panic.
//
// This way, the code doesn't have to keep checking if an error has happened,
// and we don't have to keep sending the error value along with each call
// or storing it in the En|Decoder and checking it constantly along the way.
//
// The disadvantage is that small functions which use panics cannot be inlined.
// The code accounts for that by only using panics behind an interface;
// since interface calls cannot be inlined, this is irrelevant.
//
// We considered storing the error is En|Decoder.
// - once it has its err field set, it cannot be used again.
// - panicing will be optional, controlled by const flag.
// - code should always check error first and return early.
// We eventually decided against it as it makes the code clumsier to always
// check for these error conditions.
import (
"encoding"
"encoding/binary"
"errors"
"fmt"
"math"
"reflect"
"sort"
"strings"
"sync"
"time"
"unicode"
"unicode/utf8"
)
const (
scratchByteArrayLen = 32
// Support encoding.(Binary|Text)(Unm|M)arshaler.
// This constant flag will enable or disable it.
supportMarshalInterfaces = true
// Each Encoder or Decoder uses a cache of functions based on conditionals,
// so that the conditionals are not run every time.
//
// Either a map or a slice is used to keep track of the functions.
// The map is more natural, but has a higher cost than a slice/array.
// This flag (useMapForCodecCache) controls which is used.
//
// From benchmarks, slices with linear search perform better with < 32 entries.
// We have typically seen a high threshold of about 24 entries.
useMapForCodecCache = false
// for debugging, set this to false, to catch panic traces.
// Note that this will always cause rpc tests to fail, since they need io.EOF sent via panic.
recoverPanicToErr = true
// Fast path functions try to create a fast path encode or decode implementation
// for common maps and slices, by by-passing reflection altogether.
fastpathEnabled = true
// if checkStructForEmptyValue, check structs fields to see if an empty value.
// This could be an expensive call, so possibly disable it.
checkStructForEmptyValue = false
// if derefForIsEmptyValue, deref pointers and interfaces when checking isEmptyValue
derefForIsEmptyValue = false
)
var oneByteArr = [1]byte{0}
var zeroByteSlice = oneByteArr[:0:0]
type charEncoding uint8
const (
c_RAW charEncoding = iota
c_UTF8
c_UTF16LE
c_UTF16BE
c_UTF32LE
c_UTF32BE
)
// valueType is the stream type
type valueType uint8
const (
valueTypeUnset valueType = iota
valueTypeNil
valueTypeInt
valueTypeUint
valueTypeFloat
valueTypeBool
valueTypeString
valueTypeSymbol
valueTypeBytes
valueTypeMap
valueTypeArray
valueTypeTimestamp
valueTypeExt
// valueTypeInvalid = 0xff
)
type seqType uint8
const (
_ seqType = iota
seqTypeArray
seqTypeSlice
seqTypeChan
)
var (
bigen = binary.BigEndian
structInfoFieldName = "_struct"
cachedTypeInfo = make(map[uintptr]*typeInfo, 64)
cachedTypeInfoMutex sync.RWMutex
// mapStrIntfTyp = reflect.TypeOf(map[string]interface{}(nil))
intfSliceTyp = reflect.TypeOf([]interface{}(nil))
intfTyp = intfSliceTyp.Elem()
stringTyp = reflect.TypeOf("")
timeTyp = reflect.TypeOf(time.Time{})
rawExtTyp = reflect.TypeOf(RawExt{})
uint8SliceTyp = reflect.TypeOf([]uint8(nil))
mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem()
binaryMarshalerTyp = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem()
binaryUnmarshalerTyp = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem()
textMarshalerTyp = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
textUnmarshalerTyp = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
selferTyp = reflect.TypeOf((*Selfer)(nil)).Elem()
uint8SliceTypId = reflect.ValueOf(uint8SliceTyp).Pointer()
rawExtTypId = reflect.ValueOf(rawExtTyp).Pointer()
intfTypId = reflect.ValueOf(intfTyp).Pointer()
timeTypId = reflect.ValueOf(timeTyp).Pointer()
stringTypId = reflect.ValueOf(stringTyp).Pointer()
// mapBySliceTypId = reflect.ValueOf(mapBySliceTyp).Pointer()
intBitsize uint8 = uint8(reflect.TypeOf(int(0)).Bits())
uintBitsize uint8 = uint8(reflect.TypeOf(uint(0)).Bits())
bsAll0x00 = []byte{0, 0, 0, 0, 0, 0, 0, 0}
bsAll0xff = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
chkOvf checkOverflow
noFieldNameToStructFieldInfoErr = errors.New("no field name passed to parseStructFieldInfo")
)
// Selfer defines methods by which a value can encode or decode itself.
//
// Any type which implements Selfer will be able to encode or decode itself.
// Consequently, during (en|de)code, this takes precedence over
// (text|binary)(M|Unm)arshal or extension support.
type Selfer interface {
CodecEncodeSelf(*Encoder)
CodecDecodeSelf(*Decoder)
}
// MapBySlice represents a slice which should be encoded as a map in the stream.
// The slice contains a sequence of key-value pairs.
// This affords storing a map in a specific sequence in the stream.
//
// The support of MapBySlice affords the following:
// - A slice type which implements MapBySlice will be encoded as a map
// - A slice can be decoded from a map in the stream
type MapBySlice interface {
MapBySlice()
}
// WARNING: DO NOT USE DIRECTLY. EXPORTED FOR GODOC BENEFIT. WILL BE REMOVED.
//
// BasicHandle encapsulates the common options and extension functions.
type BasicHandle struct {
extHandle
EncodeOptions
DecodeOptions
}
func (x *BasicHandle) getBasicHandle() *BasicHandle {
return x
}
// Handle is the interface for a specific encoding format.
//
// Typically, a Handle is pre-configured before first time use,
// and not modified while in use. Such a pre-configured Handle
// is safe for concurrent access.
type Handle interface {
getBasicHandle() *BasicHandle
newEncDriver(w *Encoder) encDriver
newDecDriver(r *Decoder) decDriver
isBinary() bool
}
// RawExt represents raw unprocessed extension data.
// Some codecs will decode extension data as a *RawExt if there is no registered extension for the tag.
//
// Only one of Data or Value is nil. If Data is nil, then the content of the RawExt is in the Value.
type RawExt struct {
Tag uint64
// Data is the []byte which represents the raw ext. If Data is nil, ext is exposed in Value.
// Data is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types
Data []byte
// Value represents the extension, if Data is nil.
// Value is used by codecs (e.g. cbor) which use the format to do custom serialization of the types.
Value interface{}
}
// Ext handles custom (de)serialization of custom types / extensions.
type Ext interface {
// WriteExt converts a value to a []byte.
// It is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types.
WriteExt(v interface{}) []byte
// ReadExt updates a value from a []byte.
// It is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types.
ReadExt(dst interface{}, src []byte)
// ConvertExt converts a value into a simpler interface for easy encoding e.g. convert time.Time to int64.
// It is used by codecs (e.g. cbor) which use the format to do custom serialization of the types.
ConvertExt(v interface{}) interface{}
// UpdateExt updates a value from a simpler interface for easy decoding e.g. convert int64 to time.Time.
// It is used by codecs (e.g. cbor) which use the format to do custom serialization of the types.
UpdateExt(dst interface{}, src interface{})
}
// bytesExt is a wrapper implementation to support former AddExt exported method.
type bytesExt struct {
encFn func(reflect.Value) ([]byte, error)
decFn func(reflect.Value, []byte) error
}
func (x bytesExt) WriteExt(v interface{}) []byte {
// fmt.Printf(">>>>>>>>>> WriteExt: %T, %v\n", v, v)
bs, err := x.encFn(reflect.ValueOf(v))
if err != nil {
panic(err)
}
return bs
}
func (x bytesExt) ReadExt(v interface{}, bs []byte) {
// fmt.Printf(">>>>>>>>>> ReadExt: %T, %v\n", v, v)
if err := x.decFn(reflect.ValueOf(v), bs); err != nil {
panic(err)
}
}
func (x bytesExt) ConvertExt(v interface{}) interface{} {
return x.WriteExt(v)
}
func (x bytesExt) UpdateExt(dest interface{}, v interface{}) {
x.ReadExt(dest, v.([]byte))
}
// type errorString string
// func (x errorString) Error() string { return string(x) }
type binaryEncodingType struct{}
func (_ binaryEncodingType) isBinary() bool { return true }
type textEncodingType struct{}
func (_ textEncodingType) isBinary() bool { return false }
// noBuiltInTypes is embedded into many types which do not support builtins
// e.g. msgpack, simple, cbor.
type noBuiltInTypes struct{}
func (_ noBuiltInTypes) IsBuiltinType(rt uintptr) bool { return false }
func (_ noBuiltInTypes) EncodeBuiltin(rt uintptr, v interface{}) {}
func (_ noBuiltInTypes) DecodeBuiltin(rt uintptr, v interface{}) {}
type noStreamingCodec struct{}
func (_ noStreamingCodec) CheckBreak() bool { return false }
// bigenHelper.
// Users must already slice the x completely, because we will not reslice.
type bigenHelper struct {
x []byte // must be correctly sliced to appropriate len. slicing is a cost.
w encWriter
}
func (z bigenHelper) writeUint16(v uint16) {
bigen.PutUint16(z.x, v)
z.w.writeb(z.x)
}
func (z bigenHelper) writeUint32(v uint32) {
bigen.PutUint32(z.x, v)
z.w.writeb(z.x)
}
func (z bigenHelper) writeUint64(v uint64) {
bigen.PutUint64(z.x, v)
z.w.writeb(z.x)
}
type extTypeTagFn struct {
rtid uintptr
rt reflect.Type
tag uint64
ext Ext
}
type extHandle []*extTypeTagFn
// DEPRECATED: AddExt is deprecated in favor of SetExt. It exists for compatibility only.
//
// AddExt registes an encode and decode function for a reflect.Type.
// AddExt internally calls SetExt.
// To deregister an Ext, call AddExt with nil encfn and/or nil decfn.
func (o *extHandle) AddExt(
rt reflect.Type, tag byte,
encfn func(reflect.Value) ([]byte, error), decfn func(reflect.Value, []byte) error,
) (err error) {
if encfn == nil || decfn == nil {
return o.SetExt(rt, uint64(tag), nil)
}
return o.SetExt(rt, uint64(tag), bytesExt{encfn, decfn})
}
// SetExt registers a tag and Ext for a reflect.Type.
//
// Note that the type must be a named type, and specifically not
// a pointer or Interface. An error is returned if that is not honored.
//
// To Deregister an ext, call SetExt with nil Ext
func (o *extHandle) SetExt(rt reflect.Type, tag uint64, ext Ext) (err error) {
// o is a pointer, because we may need to initialize it
if rt.PkgPath() == "" || rt.Kind() == reflect.Interface {
err = fmt.Errorf("codec.Handle.AddExt: Takes named type, especially not a pointer or interface: %T",
reflect.Zero(rt).Interface())
return
}
rtid := reflect.ValueOf(rt).Pointer()
for _, v := range *o {
if v.rtid == rtid {
v.tag, v.ext = tag, ext
return
}
}
*o = append(*o, &extTypeTagFn{rtid, rt, tag, ext})
return
}
func (o extHandle) getExt(rtid uintptr) *extTypeTagFn {
for _, v := range o {
if v.rtid == rtid {
return v
}
}
return nil
}
func (o extHandle) getExtForTag(tag uint64) *extTypeTagFn {
for _, v := range o {
if v.tag == tag {
return v
}
}
return nil
}
type structFieldInfo struct {
encName string // encode name
// only one of 'i' or 'is' can be set. If 'i' is -1, then 'is' has been set.
is []int // (recursive/embedded) field index in struct
i int16 // field index in struct
omitEmpty bool
toArray bool // if field is _struct, is the toArray set?
}
// rv returns the field of the struct.
// If anonymous, it returns an Invalid
func (si *structFieldInfo) field(v reflect.Value, update bool) (rv2 reflect.Value) {
if si.i != -1 {
v = v.Field(int(si.i))
return v
}
// replicate FieldByIndex
for _, x := range si.is {
for v.Kind() == reflect.Ptr {
if v.IsNil() {
if !update {
return
}
v.Set(reflect.New(v.Type().Elem()))
}
v = v.Elem()
}
v = v.Field(x)
}
return v
}
func (si *structFieldInfo) setToZeroValue(v reflect.Value) {
if si.i != -1 {
v = v.Field(int(si.i))
v.Set(reflect.Zero(v.Type()))
// v.Set(reflect.New(v.Type()).Elem())
// v.Set(reflect.New(v.Type()))
} else {
// replicate FieldByIndex
for _, x := range si.is {
for v.Kind() == reflect.Ptr {
if v.IsNil() {
return
}
v = v.Elem()
}
v = v.Field(x)
}
v.Set(reflect.Zero(v.Type()))
}
}
func parseStructFieldInfo(fname string, stag string) *structFieldInfo {
if fname == "" {
panic(noFieldNameToStructFieldInfoErr)
}
si := structFieldInfo{
encName: fname,
}
if stag != "" {
for i, s := range strings.Split(stag, ",") {
if i == 0 {
if s != "" {
si.encName = s
}
} else {
if s == "omitempty" {
si.omitEmpty = true
} else if s == "toarray" {
si.toArray = true
}
}
}
}
// si.encNameBs = []byte(si.encName)
return &si
}
type sfiSortedByEncName []*structFieldInfo
func (p sfiSortedByEncName) Len() int {
return len(p)
}
func (p sfiSortedByEncName) Less(i, j int) bool {
return p[i].encName < p[j].encName
}
func (p sfiSortedByEncName) Swap(i, j int) {
p[i], p[j] = p[j], p[i]
}
// typeInfo keeps information about each type referenced in the encode/decode sequence.
//
// During an encode/decode sequence, we work as below:
// - If base is a built in type, en/decode base value
// - If base is registered as an extension, en/decode base value
// - If type is binary(M/Unm)arshaler, call Binary(M/Unm)arshal method
// - If type is text(M/Unm)arshaler, call Text(M/Unm)arshal method
// - Else decode appropriately based on the reflect.Kind
type typeInfo struct {
sfi []*structFieldInfo // sorted. Used when enc/dec struct to map.
sfip []*structFieldInfo // unsorted. Used when enc/dec struct to array.
rt reflect.Type
rtid uintptr
// baseId gives pointer to the base reflect.Type, after deferencing
// the pointers. E.g. base type of ***time.Time is time.Time.
base reflect.Type
baseId uintptr
baseIndir int8 // number of indirections to get to base
mbs bool // base type (T or *T) is a MapBySlice
bm bool // base type (T or *T) is a binaryMarshaler
bunm bool // base type (T or *T) is a binaryUnmarshaler
bmIndir int8 // number of indirections to get to binaryMarshaler type
bunmIndir int8 // number of indirections to get to binaryUnmarshaler type
tm bool // base type (T or *T) is a textMarshaler
tunm bool // base type (T or *T) is a textUnmarshaler
tmIndir int8 // number of indirections to get to textMarshaler type
tunmIndir int8 // number of indirections to get to textUnmarshaler type
cs bool // base type (T or *T) is a Selfer
csIndir int8 // number of indirections to get to Selfer type
toArray bool // whether this (struct) type should be encoded as an array
}
func (ti *typeInfo) indexForEncName(name string) int {
//tisfi := ti.sfi
const binarySearchThreshold = 16
if sfilen := len(ti.sfi); sfilen < binarySearchThreshold {
// linear search. faster than binary search in my testing up to 16-field structs.
for i, si := range ti.sfi {
if si.encName == name {
return i
}
}
} else {
// binary search. adapted from sort/search.go.
h, i, j := 0, 0, sfilen
for i < j {
h = i + (j-i)/2
if ti.sfi[h].encName < name {
i = h + 1
} else {
j = h
}
}
if i < sfilen && ti.sfi[i].encName == name {
return i
}
}
return -1
}
func getStructTag(t reflect.StructTag) (s string) {
// check for tags: codec, json, in that order.
// this allows seamless support for many configured structs.
s = t.Get("codec")
if s == "" {
s = t.Get("json")
}
return
}
func getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
var ok bool
cachedTypeInfoMutex.RLock()
pti, ok = cachedTypeInfo[rtid]
cachedTypeInfoMutex.RUnlock()
if ok {
return
}
cachedTypeInfoMutex.Lock()
defer cachedTypeInfoMutex.Unlock()
if pti, ok = cachedTypeInfo[rtid]; ok {
return
}
ti := typeInfo{rt: rt, rtid: rtid}
pti = &ti
var indir int8
if ok, indir = implementsIntf(rt, binaryMarshalerTyp); ok {
ti.bm, ti.bmIndir = true, indir
}
if ok, indir = implementsIntf(rt, binaryUnmarshalerTyp); ok {
ti.bunm, ti.bunmIndir = true, indir
}
if ok, indir = implementsIntf(rt, textMarshalerTyp); ok {
ti.tm, ti.tmIndir = true, indir
}
if ok, indir = implementsIntf(rt, textUnmarshalerTyp); ok {
ti.tunm, ti.tunmIndir = true, indir
}
if ok, indir = implementsIntf(rt, selferTyp); ok {
ti.cs, ti.csIndir = true, indir
}
if ok, _ = implementsIntf(rt, mapBySliceTyp); ok {
ti.mbs = true
}
pt := rt
var ptIndir int8
// for ; pt.Kind() == reflect.Ptr; pt, ptIndir = pt.Elem(), ptIndir+1 { }
for pt.Kind() == reflect.Ptr {
pt = pt.Elem()
ptIndir++
}
if ptIndir == 0 {
ti.base = rt
ti.baseId = rtid
} else {
ti.base = pt
ti.baseId = reflect.ValueOf(pt).Pointer()
ti.baseIndir = ptIndir
}
if rt.Kind() == reflect.Struct {
var siInfo *structFieldInfo
if f, ok := rt.FieldByName(structInfoFieldName); ok {
siInfo = parseStructFieldInfo(structInfoFieldName, getStructTag(f.Tag))
ti.toArray = siInfo.toArray
}
sfip := make([]*structFieldInfo, 0, rt.NumField())
rgetTypeInfo(rt, nil, make(map[string]bool, 16), &sfip, siInfo)
ti.sfip = make([]*structFieldInfo, len(sfip))
ti.sfi = make([]*structFieldInfo, len(sfip))
copy(ti.sfip, sfip)
sort.Sort(sfiSortedByEncName(sfip))
copy(ti.sfi, sfip)
}
// sfi = sfip
cachedTypeInfo[rtid] = pti
return
}
func rgetTypeInfo(rt reflect.Type, indexstack []int, fnameToHastag map[string]bool,
sfi *[]*structFieldInfo, siInfo *structFieldInfo,
) {
for j := 0; j < rt.NumField(); j++ {
f := rt.Field(j)
// func types are skipped.
if tk := f.Type.Kind(); tk == reflect.Func {
continue
}
stag := getStructTag(f.Tag)
if stag == "-" {
continue
}
if r1, _ := utf8.DecodeRuneInString(f.Name); r1 == utf8.RuneError || !unicode.IsUpper(r1) {
continue
}
// if anonymous and there is no struct tag and its a struct (or pointer to struct), inline it.
if f.Anonymous && stag == "" {
ft := f.Type
for ft.Kind() == reflect.Ptr {
ft = ft.Elem()
}
if ft.Kind() == reflect.Struct {
indexstack2 := make([]int, len(indexstack)+1, len(indexstack)+4)
copy(indexstack2, indexstack)
indexstack2[len(indexstack)] = j
// indexstack2 := append(append(make([]int, 0, len(indexstack)+4), indexstack...), j)
rgetTypeInfo(ft, indexstack2, fnameToHastag, sfi, siInfo)
continue
}
}
// do not let fields with same name in embedded structs override field at higher level.
// this must be done after anonymous check, to allow anonymous field
// still include their child fields
if _, ok := fnameToHastag[f.Name]; ok {
continue
}
si := parseStructFieldInfo(f.Name, stag)
// si.ikind = int(f.Type.Kind())
if len(indexstack) == 0 {
si.i = int16(j)
} else {
si.i = -1
si.is = append(append(make([]int, 0, len(indexstack)+4), indexstack...), j)
}
if siInfo != nil {
if siInfo.omitEmpty {
si.omitEmpty = true
}
}
*sfi = append(*sfi, si)
fnameToHastag[f.Name] = stag != ""
}
}
func panicToErr(err *error) {
if recoverPanicToErr {
if x := recover(); x != nil {
//debug.PrintStack()
panicValToErr(x, err)
}
}
}
// func doPanic(tag string, format string, params ...interface{}) {
// params2 := make([]interface{}, len(params)+1)
// params2[0] = tag
// copy(params2[1:], params)
// panic(fmt.Errorf("%s: "+format, params2...))
// }
func isMutableKind(k reflect.Kind) (v bool) {
return k == reflect.Int ||
k == reflect.Int8 ||
k == reflect.Int16 ||
k == reflect.Int32 ||
k == reflect.Int64 ||
k == reflect.Uint ||
k == reflect.Uint8 ||
k == reflect.Uint16 ||
k == reflect.Uint32 ||
k == reflect.Uint64 ||
k == reflect.Float32 ||
k == reflect.Float64 ||
k == reflect.Bool ||
k == reflect.String
}
// these functions must be inlinable, and not call anybody
type checkOverflow struct{}
func (_ checkOverflow) Float32(f float64) (overflow bool) {
if f < 0 {
f = -f
}
return math.MaxFloat32 < f && f <= math.MaxFloat64
}
func (_ checkOverflow) Uint(v uint64, bitsize uint8) (overflow bool) {
if bitsize == 0 || bitsize >= 64 || v == 0 {
return
}
if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc {
overflow = true
}
return
}
func (_ checkOverflow) Int(v int64, bitsize uint8) (overflow bool) {
if bitsize == 0 || bitsize >= 64 || v == 0 {
return
}
if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc {
overflow = true
}
return
}
func (_ checkOverflow) SignedInt(v uint64) (i int64, overflow bool) {
//e.g. -127 to 128 for int8
pos := (v >> 63) == 0
ui2 := v & 0x7fffffffffffffff
if pos {
if ui2 > math.MaxInt64 {
overflow = true
return
}
} else {
if ui2 > math.MaxInt64-1 {
overflow = true
return
}
}
i = int64(v)
return
}

View File

@ -0,0 +1,151 @@
// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a BSD-style license found in the LICENSE file.
package codec
// All non-std package dependencies live in this file,
// so porting to different environment is easy (just update functions).
import (
"errors"
"fmt"
"math"
"reflect"
)
func panicValToErr(panicVal interface{}, err *error) {
if panicVal == nil {
return
}
// case nil
switch xerr := panicVal.(type) {
case error:
*err = xerr
case string:
*err = errors.New(xerr)
default:
*err = fmt.Errorf("%v", panicVal)
}
return
}
func hIsEmptyValue(v reflect.Value, deref, checkStruct bool) bool {
switch v.Kind() {
case reflect.Invalid:
return true
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
return v.Len() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Interface, reflect.Ptr:
if deref {
if v.IsNil() {
return true
}
return hIsEmptyValue(v.Elem(), deref, checkStruct)
} else {
return v.IsNil()
}
case reflect.Struct:
if !checkStruct {
return false
}
// return true if all fields are empty. else return false.
// we cannot use equality check, because some fields may be maps/slices/etc
// and consequently the structs are not comparable.
// return v.Interface() == reflect.Zero(v.Type()).Interface()
for i, n := 0, v.NumField(); i < n; i++ {
if !hIsEmptyValue(v.Field(i), deref, checkStruct) {
return false
}
}
return true
}
return false
}
func isEmptyValue(v reflect.Value) bool {
return hIsEmptyValue(v, derefForIsEmptyValue, checkStructForEmptyValue)
}
func pruneSignExt(v []byte, pos bool) (n int) {
if len(v) < 2 {
} else if pos && v[0] == 0 {
for ; v[n] == 0 && n+1 < len(v) && (v[n+1]&(1<<7) == 0); n++ {
}
} else if !pos && v[0] == 0xff {
for ; v[n] == 0xff && n+1 < len(v) && (v[n+1]&(1<<7) != 0); n++ {
}
}
return
}
func implementsIntf(typ, iTyp reflect.Type) (success bool, indir int8) {
if typ == nil {
return
}
rt := typ
// The type might be a pointer and we need to keep
// dereferencing to the base type until we find an implementation.
for {
if rt.Implements(iTyp) {
return true, indir
}
if p := rt; p.Kind() == reflect.Ptr {
indir++
if indir >= math.MaxInt8 { // insane number of indirections
return false, 0
}
rt = p.Elem()
continue
}
break
}
// No luck yet, but if this is a base type (non-pointer), the pointer might satisfy.
if typ.Kind() != reflect.Ptr {
// Not a pointer, but does the pointer work?
if reflect.PtrTo(typ).Implements(iTyp) {
return true, -1
}
}
return false, 0
}
// validate that this function is correct ...
// culled from OGRE (Object-Oriented Graphics Rendering Engine)
// function: halfToFloatI (http://stderr.org/doc/ogre-doc/api/OgreBitwise_8h-source.html)
func halfFloatToFloatBits(yy uint16) (d uint32) {
y := uint32(yy)
s := (y >> 15) & 0x01
e := (y >> 10) & 0x1f
m := y & 0x03ff
if e == 0 {
if m == 0 { // plu or minus 0
return s << 31
} else { // Denormalized number -- renormalize it
for (m & 0x00000400) == 0 {
m <<= 1
e -= 1
}
e += 1
const zz uint32 = 0x0400
m &= ^zz
}
} else if e == 31 {
if m == 0 { // Inf
return (s << 31) | 0x7f800000
} else { // NaN
return (s << 31) | 0x7f800000 | (m << 13)
}
}
e = e + (127 - 15)
m = m << 13
return (s << 31) | (e << 23) | m
}

View File

@ -0,0 +1,20 @@
//+build !unsafe
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a BSD-style license found in the LICENSE file.
package codec
// stringView returns a view of the []byte as a string.
// In unsafe mode, it doesn't incur allocation and copying caused by conversion.
// In regular safe mode, it is an allocation and copy.
func stringView(v []byte) string {
return string(v)
}
// bytesView returns a view of the string as a []byte.
// In unsafe mode, it doesn't incur allocation and copying caused by conversion.
// In regular safe mode, it is an allocation and copy.
func bytesView(v string) []byte {
return []byte(v)
}

View File

@ -0,0 +1,155 @@
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a BSD-style license found in the LICENSE file.
package codec
// All non-std package dependencies related to testing live in this file,
// so porting to different environment is easy (just update functions).
//
// This file sets up the variables used, including testInitFns.
// Each file should add initialization that should be performed
// after flags are parsed.
//
// init is a multi-step process:
// - setup vars (handled by init functions in each file)
// - parse flags
// - setup derived vars (handled by pre-init registered functions - registered in init function)
// - post init (handled by post-init registered functions - registered in init function)
// This way, no one has to manage carefully control the initialization
// using file names, etc.
//
// Tests which require external dependencies need the -tag=x parameter.
// They should be run as:
// go test -tags=x -run=. <other parameters ...>
// Benchmarks should also take this parameter, to include the sereal, xdr, etc.
// To run against codecgen, etc, make sure you pass extra parameters.
// Example usage:
// go test "-tags=x codecgen unsafe" -bench=. <other parameters ...>
//
// To fully test everything:
// go test -tags=x -benchtime=100ms -tv -bg -bi -brw -bu -v -run=. -bench=.
import (
"errors"
"flag"
"fmt"
"reflect"
"sync"
"testing"
)
const (
testLogToT = true
failNowOnFail = true
)
var (
testNoopH = NoopHandle(8)
testMsgpackH = &MsgpackHandle{}
testBincH = &BincHandle{}
testBincHNoSym = &BincHandle{}
testBincHSym = &BincHandle{}
testSimpleH = &SimpleHandle{}
testCborH = &CborHandle{}
testJsonH = &JsonHandle{}
testPreInitFns []func()
testPostInitFns []func()
testOnce sync.Once
)
func init() {
testBincHSym.AsSymbols = AsSymbolAll
testBincHNoSym.AsSymbols = AsSymbolNone
}
func testInitAll() {
flag.Parse()
for _, f := range testPreInitFns {
f()
}
for _, f := range testPostInitFns {
f()
}
}
func logT(x interface{}, format string, args ...interface{}) {
if t, ok := x.(*testing.T); ok && t != nil && testLogToT {
if testVerbose {
t.Logf(format, args...)
}
} else if b, ok := x.(*testing.B); ok && b != nil && testLogToT {
b.Logf(format, args...)
} else {
if len(format) == 0 || format[len(format)-1] != '\n' {
format = format + "\n"
}
fmt.Printf(format, args...)
}
}
func approxDataSize(rv reflect.Value) (sum int) {
switch rk := rv.Kind(); rk {
case reflect.Invalid:
case reflect.Ptr, reflect.Interface:
sum += int(rv.Type().Size())
sum += approxDataSize(rv.Elem())
case reflect.Slice:
sum += int(rv.Type().Size())
for j := 0; j < rv.Len(); j++ {
sum += approxDataSize(rv.Index(j))
}
case reflect.String:
sum += int(rv.Type().Size())
sum += rv.Len()
case reflect.Map:
sum += int(rv.Type().Size())
for _, mk := range rv.MapKeys() {
sum += approxDataSize(mk)
sum += approxDataSize(rv.MapIndex(mk))
}
case reflect.Struct:
//struct size already includes the full data size.
//sum += int(rv.Type().Size())
for j := 0; j < rv.NumField(); j++ {
sum += approxDataSize(rv.Field(j))
}
default:
//pure value types
sum += int(rv.Type().Size())
}
return
}
// ----- functions below are used only by tests (not benchmarks)
func checkErrT(t *testing.T, err error) {
if err != nil {
logT(t, err.Error())
failT(t)
}
}
func checkEqualT(t *testing.T, v1 interface{}, v2 interface{}, desc string) (err error) {
if err = deepEqual(v1, v2); err != nil {
logT(t, "Not Equal: %s: %v. v1: %v, v2: %v", desc, err, v1, v2)
failT(t)
}
return
}
func failT(t *testing.T) {
if failNowOnFail {
t.FailNow()
} else {
t.Fail()
}
}
func deepEqual(v1, v2 interface{}) (err error) {
if !reflect.DeepEqual(v1, v2) {
err = errors.New("Not Match")
}
return
}

View File

@ -0,0 +1,39 @@
//+build unsafe
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a BSD-style license found in the LICENSE file.
package codec
import (
"unsafe"
)
// This file has unsafe variants of some helper methods.
type unsafeString struct {
Data uintptr
Len int
}
type unsafeBytes struct {
Data uintptr
Len int
Cap int
}
// stringView returns a view of the []byte as a string.
// In unsafe mode, it doesn't incur allocation and copying caused by conversion.
// In regular safe mode, it is an allocation and copy.
func stringView(v []byte) string {
x := unsafeString{uintptr(unsafe.Pointer(&v[0])), len(v)}
return *(*string)(unsafe.Pointer(&x))
}
// bytesView returns a view of the string as a []byte.
// In unsafe mode, it doesn't incur allocation and copying caused by conversion.
// In regular safe mode, it is an allocation and copy.
func bytesView(v string) []byte {
x := unsafeBytes{uintptr(unsafe.Pointer(&v)), len(v), len(v)}
return *(*[]byte)(unsafe.Pointer(&x))
}

View File

@ -0,0 +1,924 @@
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a BSD-style license found in the LICENSE file.
package codec
// This json support uses base64 encoding for bytes, because you cannot
// store and read any arbitrary string in json (only unicode).
//
// This library specifically supports UTF-8 for encoding and decoding only.
//
// Note that the library will happily encode/decode things which are not valid
// json e.g. a map[int64]string. We do it for consistency. With valid json,
// we will encode and decode appropriately.
// Users can specify their map type if necessary to force it.
//
// Note:
// - we cannot use strconv.Quote and strconv.Unquote because json quotes/unquotes differently.
// We implement it here.
// - Also, strconv.ParseXXX for floats and integers
// - only works on strings resulting in unnecessary allocation and []byte-string conversion.
// - it does a lot of redundant checks, because json numbers are simpler that what it supports.
// - We parse numbers (floats and integers) directly here.
// We only delegate parsing floats if it is a hairy float which could cause a loss of precision.
// In that case, we delegate to strconv.ParseFloat.
//
// Note:
// - encode does not beautify. There is no whitespace when encoding.
// - rpc calls which take single integer arguments or write single numeric arguments will need care.
import (
"bytes"
"encoding/base64"
"fmt"
"strconv"
"unicode/utf16"
"unicode/utf8"
)
//--------------------------------
var jsonLiterals = [...]byte{'t', 'r', 'u', 'e', 'f', 'a', 'l', 's', 'e', 'n', 'u', 'l', 'l'}
var jsonFloat64Pow10 = [...]float64{
1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9,
1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19,
1e20, 1e21, 1e22,
}
var jsonUint64Pow10 = [...]uint64{
1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9,
1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19,
}
const (
// if jsonTrackSkipWhitespace, we track Whitespace and reduce the number of redundant checks.
// Make it a const flag, so that it can be elided during linking if false.
//
// It is not a clear win, because we continually set a flag behind a pointer
// and then check it each time, as opposed to just 4 conditionals on a stack variable.
jsonTrackSkipWhitespace = true
// If !jsonValidateSymbols, decoding will be faster, by skipping some checks:
// - If we see first character of null, false or true,
// do not validate subsequent characters.
// - e.g. if we see a n, assume null and skip next 3 characters,
// and do not validate they are ull.
// P.S. Do not expect a significant decoding boost from this.
jsonValidateSymbols = true
// if jsonTruncateMantissa, truncate mantissa if trailing 0's.
// This is important because it could allow some floats to be decoded without
// deferring to strconv.ParseFloat.
jsonTruncateMantissa = true
// if mantissa >= jsonNumUintCutoff before multiplying by 10, this is an overflow
jsonNumUintCutoff = (1<<64-1)/uint64(10) + 1 // cutoff64(base)
// if mantissa >= jsonNumUintMaxVal, this is an overflow
jsonNumUintMaxVal = 1<<uint64(64) - 1
// jsonNumDigitsUint64Largest = 19
)
type jsonEncDriver struct {
e *Encoder
w encWriter
h *JsonHandle
b [64]byte // scratch
bs []byte // scratch
noBuiltInTypes
}
func (e *jsonEncDriver) EncodeNil() {
e.w.writeb(jsonLiterals[9:13]) // null
}
func (e *jsonEncDriver) EncodeBool(b bool) {
if b {
e.w.writeb(jsonLiterals[0:4]) // true
} else {
e.w.writeb(jsonLiterals[4:9]) // false
}
}
func (e *jsonEncDriver) EncodeFloat32(f float32) {
e.w.writeb(strconv.AppendFloat(e.b[:0], float64(f), 'E', -1, 32))
}
func (e *jsonEncDriver) EncodeFloat64(f float64) {
// e.w.writestr(strconv.FormatFloat(f, 'E', -1, 64))
e.w.writeb(strconv.AppendFloat(e.b[:0], f, 'E', -1, 64))
}
func (e *jsonEncDriver) EncodeInt(v int64) {
e.w.writeb(strconv.AppendInt(e.b[:0], v, 10))
}
func (e *jsonEncDriver) EncodeUint(v uint64) {
e.w.writeb(strconv.AppendUint(e.b[:0], v, 10))
}
func (e *jsonEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) {
if v := ext.ConvertExt(rv); v == nil {
e.EncodeNil()
} else {
en.encode(v)
}
}
func (e *jsonEncDriver) EncodeRawExt(re *RawExt, en *Encoder) {
// only encodes re.Value (never re.Data)
if re.Value == nil {
e.EncodeNil()
} else {
en.encode(re.Value)
}
}
func (e *jsonEncDriver) EncodeArrayStart(length int) {
e.w.writen1('[')
}
func (e *jsonEncDriver) EncodeArrayEntrySeparator() {
e.w.writen1(',')
}
func (e *jsonEncDriver) EncodeArrayEnd() {
e.w.writen1(']')
}
func (e *jsonEncDriver) EncodeMapStart(length int) {
e.w.writen1('{')
}
func (e *jsonEncDriver) EncodeMapEntrySeparator() {
e.w.writen1(',')
}
func (e *jsonEncDriver) EncodeMapKVSeparator() {
e.w.writen1(':')
}
func (e *jsonEncDriver) EncodeMapEnd() {
e.w.writen1('}')
}
func (e *jsonEncDriver) EncodeString(c charEncoding, v string) {
// e.w.writestr(strconv.Quote(v))
e.quoteStr(v)
}
func (e *jsonEncDriver) EncodeSymbol(v string) {
// e.EncodeString(c_UTF8, v)
e.quoteStr(v)
}
func (e *jsonEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
if c == c_RAW {
slen := base64.StdEncoding.EncodedLen(len(v))
if e.bs == nil {
e.bs = e.b[:]
}
if cap(e.bs) >= slen {
e.bs = e.bs[:slen]
} else {
e.bs = make([]byte, slen)
}
base64.StdEncoding.Encode(e.bs, v)
e.w.writen1('"')
e.w.writeb(e.bs)
e.w.writen1('"')
} else {
// e.EncodeString(c, string(v))
e.quoteStr(stringView(v))
}
}
func (e *jsonEncDriver) quoteStr(s string) {
// adapted from std pkg encoding/json
const hex = "0123456789abcdef"
w := e.w
w.writen1('"')
start := 0
for i := 0; i < len(s); {
if b := s[i]; b < utf8.RuneSelf {
if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' {
i++
continue
}
if start < i {
w.writestr(s[start:i])
}
switch b {
case '\\', '"':
w.writen2('\\', b)
case '\n':
w.writen2('\\', 'n')
case '\r':
w.writen2('\\', 'r')
case '\b':
w.writen2('\\', 'b')
case '\f':
w.writen2('\\', 'f')
case '\t':
w.writen2('\\', 't')
default:
// encode all bytes < 0x20 (except \r, \n).
// also encode < > & to prevent security holes when served to some browsers.
w.writestr(`\u00`)
w.writen2(hex[b>>4], hex[b&0xF])
}
i++
start = i
continue
}
c, size := utf8.DecodeRuneInString(s[i:])
if c == utf8.RuneError && size == 1 {
if start < i {
w.writestr(s[start:i])
}
w.writestr(`\ufffd`)
i += size
start = i
continue
}
// U+2028 is LINE SEPARATOR. U+2029 is PARAGRAPH SEPARATOR.
// Both technically valid JSON, but bomb on JSONP, so fix here.
if c == '\u2028' || c == '\u2029' {
if start < i {
w.writestr(s[start:i])
}
w.writestr(`\u202`)
w.writen1(hex[c&0xF])
i += size
start = i
continue
}
i += size
}
if start < len(s) {
w.writestr(s[start:])
}
w.writen1('"')
}
//--------------------------------
type jsonNum struct {
bytes []byte // may have [+-.eE0-9]
mantissa uint64 // where mantissa ends, and maybe dot begins.
exponent int16 // exponent value.
manOverflow bool
neg bool // started with -. No initial sign in the bytes above.
dot bool // has dot
explicitExponent bool // explicit exponent
}
func (x *jsonNum) reset() {
x.bytes = x.bytes[:0]
x.manOverflow = false
x.neg = false
x.dot = false
x.explicitExponent = false
x.mantissa = 0
x.exponent = 0
}
// uintExp is called only if exponent > 0.
func (x *jsonNum) uintExp() (n uint64, overflow bool) {
n = x.mantissa
e := x.exponent
if e >= int16(len(jsonUint64Pow10)) {
overflow = true
return
}
n *= jsonUint64Pow10[e]
if n < x.mantissa || n > jsonNumUintMaxVal {
overflow = true
return
}
return
// for i := int16(0); i < e; i++ {
// if n >= jsonNumUintCutoff {
// overflow = true
// return
// }
// n *= 10
// }
// return
}
func (x *jsonNum) floatVal() (f float64) {
// We do not want to lose precision.
// Consequently, we will delegate to strconv.ParseFloat if any of the following happen:
// - There are more digits than in math.MaxUint64: 18446744073709551615 (20 digits)
// We expect up to 99.... (19 digits)
// - The mantissa cannot fit into a 52 bits of uint64
// - The exponent is beyond our scope ie beyong 22.
const uint64MantissaBits = 52
const maxExponent = int16(len(jsonFloat64Pow10)) - 1
parseUsingStrConv := x.manOverflow ||
x.exponent > maxExponent ||
(x.exponent < 0 && -(x.exponent) > maxExponent) ||
x.mantissa>>uint64MantissaBits != 0
if parseUsingStrConv {
var err error
if f, err = strconv.ParseFloat(stringView(x.bytes), 64); err != nil {
panic(fmt.Errorf("parse float: %s, %v", x.bytes, err))
return
}
if x.neg {
f = -f
}
return
}
// all good. so handle parse here.
f = float64(x.mantissa)
// fmt.Printf(".Float: uint64 value: %v, float: %v\n", m, f)
if x.neg {
f = -f
}
if x.exponent > 0 {
f *= jsonFloat64Pow10[x.exponent]
} else if x.exponent < 0 {
f /= jsonFloat64Pow10[-x.exponent]
}
return
}
type jsonDecDriver struct {
d *Decoder
h *JsonHandle
r decReader // *bytesDecReader decReader
ct valueType // container type. one of unset, array or map.
bstr [8]byte // scratch used for string \UXXX parsing
b [64]byte // scratch
wsSkipped bool // whitespace skipped
n jsonNum
noBuiltInTypes
}
// This will skip whitespace characters and return the next byte to read.
// The next byte determines what the value will be one of.
func (d *jsonDecDriver) skipWhitespace(unread bool) (b byte) {
// as initReadNext is not called all the time, we set ct to unSet whenever
// we skipwhitespace, as this is the signal that something new is about to be read.
d.ct = valueTypeUnset
b = d.r.readn1()
if !jsonTrackSkipWhitespace || !d.wsSkipped {
for ; b == ' ' || b == '\t' || b == '\r' || b == '\n'; b = d.r.readn1() {
}
if jsonTrackSkipWhitespace {
d.wsSkipped = true
}
}
if unread {
d.r.unreadn1()
}
return b
}
func (d *jsonDecDriver) CheckBreak() bool {
b := d.skipWhitespace(true)
return b == '}' || b == ']'
}
func (d *jsonDecDriver) readStrIdx(fromIdx, toIdx uint8) {
bs := d.r.readx(int(toIdx - fromIdx))
if jsonValidateSymbols {
if !bytes.Equal(bs, jsonLiterals[fromIdx:toIdx]) {
d.d.errorf("json: expecting %s: got %s", jsonLiterals[fromIdx:toIdx], bs)
return
}
}
if jsonTrackSkipWhitespace {
d.wsSkipped = false
}
}
func (d *jsonDecDriver) TryDecodeAsNil() bool {
b := d.skipWhitespace(true)
if b == 'n' {
d.readStrIdx(9, 13) // null
d.ct = valueTypeNil
return true
}
return false
}
func (d *jsonDecDriver) DecodeBool() bool {
b := d.skipWhitespace(false)
if b == 'f' {
d.readStrIdx(5, 9) // alse
return false
}
if b == 't' {
d.readStrIdx(1, 4) // rue
return true
}
d.d.errorf("json: decode bool: got first char %c", b)
return false // "unreachable"
}
func (d *jsonDecDriver) ReadMapStart() int {
d.expectChar('{')
d.ct = valueTypeMap
return -1
}
func (d *jsonDecDriver) ReadArrayStart() int {
d.expectChar('[')
d.ct = valueTypeArray
return -1
}
func (d *jsonDecDriver) ReadMapEnd() {
d.expectChar('}')
}
func (d *jsonDecDriver) ReadArrayEnd() {
d.expectChar(']')
}
func (d *jsonDecDriver) ReadArrayEntrySeparator() {
d.expectChar(',')
}
func (d *jsonDecDriver) ReadMapEntrySeparator() {
d.expectChar(',')
}
func (d *jsonDecDriver) ReadMapKVSeparator() {
d.expectChar(':')
}
func (d *jsonDecDriver) expectChar(c uint8) {
b := d.skipWhitespace(false)
if b != c {
d.d.errorf("json: expect char %c but got char %c", c, b)
return
}
if jsonTrackSkipWhitespace {
d.wsSkipped = false
}
}
func (d *jsonDecDriver) IsContainerType(vt valueType) bool {
// check container type by checking the first char
if d.ct == valueTypeUnset {
b := d.skipWhitespace(true)
if b == '{' {
d.ct = valueTypeMap
} else if b == '[' {
d.ct = valueTypeArray
} else if b == 'n' {
d.ct = valueTypeNil
} else if b == '"' {
d.ct = valueTypeString
}
}
if vt == valueTypeNil || vt == valueTypeBytes || vt == valueTypeString ||
vt == valueTypeArray || vt == valueTypeMap {
return d.ct == vt
}
// ugorji: made switch into conditionals, so that IsContainerType can be inlined.
// switch vt {
// case valueTypeNil, valueTypeBytes, valueTypeString, valueTypeArray, valueTypeMap:
// return d.ct == vt
// }
d.d.errorf("isContainerType: unsupported parameter: %v", vt)
return false // "unreachable"
}
func (d *jsonDecDriver) decNum(storeBytes bool) {
// storeBytes = true // TODO: remove.
// If it is has a . or an e|E, decode as a float; else decode as an int.
b := d.skipWhitespace(false)
if !(b == '+' || b == '-' || b == '.' || (b >= '0' && b <= '9')) {
d.d.errorf("json: decNum: got first char '%c'", b)
return
}
const cutoff = (1<<64-1)/uint64(10) + 1 // cutoff64(base)
const jsonNumUintMaxVal = 1<<uint64(64) - 1
// var n jsonNum // create stack-copy jsonNum, and set to pointer at end.
// n.bytes = d.n.bytes[:0]
n := &d.n
n.reset()
// The format of a number is as below:
// parsing: sign? digit* dot? digit* e? sign? digit*
// states: 0 1* 2 3* 4 5* 6 7
// We honor this state so we can break correctly.
var state uint8 = 0
var eNeg bool
var e int16
var eof bool
LOOP:
for !eof {
// fmt.Printf("LOOP: b: %q\n", b)
switch b {
case '+':
switch state {
case 0:
state = 2
// do not add sign to the slice ...
b, eof = d.r.readn1eof()
continue
case 6: // typ = jsonNumFloat
state = 7
default:
break LOOP
}
case '-':
switch state {
case 0:
state = 2
n.neg = true
// do not add sign to the slice ...
b, eof = d.r.readn1eof()
continue
case 6: // typ = jsonNumFloat
eNeg = true
state = 7
default:
break LOOP
}
case '.':
switch state {
case 0, 2: // typ = jsonNumFloat
state = 4
n.dot = true
default:
break LOOP
}
case 'e', 'E':
switch state {
case 0, 2, 4: // typ = jsonNumFloat
state = 6
// n.mantissaEndIndex = int16(len(n.bytes))
n.explicitExponent = true
default:
break LOOP
}
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
switch state {
case 0:
state = 2
fallthrough
case 2:
fallthrough
case 4:
if n.dot {
n.exponent--
}
if n.mantissa >= jsonNumUintCutoff {
n.manOverflow = true
break
}
v := uint64(b - '0')
n.mantissa *= 10
if v != 0 {
n1 := n.mantissa + v
if n1 < n.mantissa || n1 > jsonNumUintMaxVal {
n.manOverflow = true // n+v overflows
break
}
n.mantissa = n1
}
case 6:
state = 7
fallthrough
case 7:
if !(b == '0' && e == 0) {
e = e*10 + int16(b-'0')
}
default:
break LOOP
}
default:
break LOOP
}
if storeBytes {
n.bytes = append(n.bytes, b)
}
b, eof = d.r.readn1eof()
}
if jsonTruncateMantissa && n.mantissa != 0 {
for n.mantissa%10 == 0 {
n.mantissa /= 10
n.exponent++
}
}
if e != 0 {
if eNeg {
n.exponent -= e
} else {
n.exponent += e
}
}
// d.n = n
if !eof {
d.r.unreadn1()
}
if jsonTrackSkipWhitespace {
d.wsSkipped = false
}
// fmt.Printf("1: n: bytes: %s, neg: %v, dot: %v, exponent: %v, mantissaEndIndex: %v\n",
// n.bytes, n.neg, n.dot, n.exponent, n.mantissaEndIndex)
return
}
func (d *jsonDecDriver) DecodeInt(bitsize uint8) (i int64) {
d.decNum(false)
n := &d.n
if n.manOverflow {
d.d.errorf("json: overflow integer after: %v", n.mantissa)
return
}
var u uint64
if n.exponent == 0 {
u = n.mantissa
} else if n.exponent < 0 {
d.d.errorf("json: fractional integer")
return
} else if n.exponent > 0 {
var overflow bool
if u, overflow = n.uintExp(); overflow {
d.d.errorf("json: overflow integer")
return
}
}
i = int64(u)
if n.neg {
i = -i
}
if chkOvf.Int(i, bitsize) {
d.d.errorf("json: overflow %v bits: %s", bitsize, n.bytes)
return
}
// fmt.Printf("DecodeInt: %v\n", i)
return
}
func (d *jsonDecDriver) DecodeUint(bitsize uint8) (u uint64) {
d.decNum(false)
n := &d.n
if n.neg {
d.d.errorf("json: unsigned integer cannot be negative")
return
}
if n.manOverflow {
d.d.errorf("json: overflow integer after: %v", n.mantissa)
return
}
if n.exponent == 0 {
u = n.mantissa
} else if n.exponent < 0 {
d.d.errorf("json: fractional integer")
return
} else if n.exponent > 0 {
var overflow bool
if u, overflow = n.uintExp(); overflow {
d.d.errorf("json: overflow integer")
return
}
}
if chkOvf.Uint(u, bitsize) {
d.d.errorf("json: overflow %v bits: %s", bitsize, n.bytes)
return
}
// fmt.Printf("DecodeUint: %v\n", u)
return
}
func (d *jsonDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) {
d.decNum(true)
n := &d.n
f = n.floatVal()
if chkOverflow32 && chkOvf.Float32(f) {
d.d.errorf("json: overflow float32: %v, %s", f, n.bytes)
return
}
return
}
func (d *jsonDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
if ext == nil {
re := rv.(*RawExt)
re.Tag = xtag
d.d.decode(&re.Value)
} else {
var v interface{}
d.d.decode(&v)
ext.UpdateExt(rv, v)
}
return
}
func (d *jsonDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) {
// zerocopy doesn't matter for json, as the bytes must be parsed.
bs0 := d.appendStringAsBytes(d.b[:0])
if isstring {
return bs0
}
slen := base64.StdEncoding.DecodedLen(len(bs0))
if cap(bs) >= slen {
bsOut = bs[:slen]
} else {
bsOut = make([]byte, slen)
}
slen2, err := base64.StdEncoding.Decode(bsOut, bs0)
if err != nil {
d.d.errorf("json: error decoding base64 binary '%s': %v", bs0, err)
return nil
}
if slen != slen2 {
bsOut = bsOut[:slen2]
}
return
}
func (d *jsonDecDriver) DecodeString() (s string) {
return string(d.appendStringAsBytes(d.b[:0]))
}
func (d *jsonDecDriver) appendStringAsBytes(v []byte) []byte {
d.expectChar('"')
for {
c := d.r.readn1()
if c == '"' {
break
} else if c == '\\' {
c = d.r.readn1()
switch c {
case '"', '\\', '/', '\'':
v = append(v, c)
case 'b':
v = append(v, '\b')
case 'f':
v = append(v, '\f')
case 'n':
v = append(v, '\n')
case 'r':
v = append(v, '\r')
case 't':
v = append(v, '\t')
case 'u':
rr := d.jsonU4(false)
// fmt.Printf("$$$$$$$$$: is surrogate: %v\n", utf16.IsSurrogate(rr))
if utf16.IsSurrogate(rr) {
rr = utf16.DecodeRune(rr, d.jsonU4(true))
}
w2 := utf8.EncodeRune(d.bstr[:], rr)
v = append(v, d.bstr[:w2]...)
default:
d.d.errorf("json: unsupported escaped value: %c", c)
return nil
}
} else {
v = append(v, c)
}
}
if jsonTrackSkipWhitespace {
d.wsSkipped = false
}
return v
}
func (d *jsonDecDriver) jsonU4(checkSlashU bool) rune {
if checkSlashU && !(d.r.readn1() == '\\' && d.r.readn1() == 'u') {
d.d.errorf(`json: unquoteStr: invalid unicode sequence. Expecting \u`)
return 0
}
// u, _ := strconv.ParseUint(string(d.bstr[:4]), 16, 64)
var u uint32
for i := 0; i < 4; i++ {
v := d.r.readn1()
if '0' <= v && v <= '9' {
v = v - '0'
} else if 'a' <= v && v <= 'z' {
v = v - 'a' + 10
} else if 'A' <= v && v <= 'Z' {
v = v - 'A' + 10
} else {
d.d.errorf(`json: unquoteStr: invalid hex char in \u unicode sequence: %q`, v)
return 0
}
u = u*16 + uint32(v)
}
return rune(u)
}
func (d *jsonDecDriver) DecodeNaked() (v interface{}, vt valueType, decodeFurther bool) {
n := d.skipWhitespace(true)
switch n {
case 'n':
d.readStrIdx(9, 13) // null
vt = valueTypeNil
case 'f':
d.readStrIdx(4, 9) // false
vt = valueTypeBool
v = false
case 't':
d.readStrIdx(0, 4) // true
vt = valueTypeBool
v = true
case '{':
vt = valueTypeMap
decodeFurther = true
case '[':
vt = valueTypeArray
decodeFurther = true
case '"':
vt = valueTypeString
v = d.DecodeString()
default: // number
d.decNum(true)
n := &d.n
// if the string had a any of [.eE], then decode as float.
switch {
case n.explicitExponent, n.dot, n.exponent < 0, n.manOverflow:
vt = valueTypeFloat
v = n.floatVal()
case n.exponent == 0:
u := n.mantissa
switch {
case n.neg:
vt = valueTypeInt
v = -int64(u)
case d.h.SignedInteger:
vt = valueTypeInt
v = int64(u)
default:
vt = valueTypeUint
v = u
}
default:
u, overflow := n.uintExp()
switch {
case overflow:
vt = valueTypeFloat
v = n.floatVal()
case n.neg:
vt = valueTypeInt
v = -int64(u)
case d.h.SignedInteger:
vt = valueTypeInt
v = int64(u)
default:
vt = valueTypeUint
v = u
}
}
// fmt.Printf("DecodeNaked: Number: %T, %v\n", v, v)
}
return
}
//----------------------
// JsonHandle is a handle for JSON encoding format.
//
// Json is comprehensively supported:
// - decodes numbers into interface{} as int, uint or float64
// - encodes and decodes []byte using base64 Std Encoding
// - UTF-8 support for encoding and decoding
//
// It has better performance than the json library in the standard library,
// by leveraging the performance improvements of the codec library and
// minimizing allocations.
//
// In addition, it doesn't read more bytes than necessary during a decode, which allows
// reading multiple values from a stream containing json and non-json content.
// For example, a user can read a json value, then a cbor value, then a msgpack value,
// all from the same stream in sequence.
type JsonHandle struct {
BasicHandle
textEncodingType
}
func (h *JsonHandle) newEncDriver(e *Encoder) encDriver {
return &jsonEncDriver{e: e, w: e.w, h: h}
}
func (h *JsonHandle) newDecDriver(d *Decoder) decDriver {
// d := jsonDecDriver{r: r.(*bytesDecReader), h: h}
hd := jsonDecDriver{d: d, r: d.r, h: h}
hd.n.bytes = d.b[:]
return &hd
}
var jsonEncodeTerminate = []byte{' '}
func (h *JsonHandle) rpcEncodeTerminate() []byte {
return jsonEncodeTerminate
}
var _ decDriver = (*jsonDecDriver)(nil)
var _ encDriver = (*jsonEncDriver)(nil)

View File

@ -0,0 +1,843 @@
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a BSD-style license found in the LICENSE file.
/*
MSGPACK
Msgpack-c implementation powers the c, c++, python, ruby, etc libraries.
We need to maintain compatibility with it and how it encodes integer values
without caring about the type.
For compatibility with behaviour of msgpack-c reference implementation:
- Go intX (>0) and uintX
IS ENCODED AS
msgpack +ve fixnum, unsigned
- Go intX (<0)
IS ENCODED AS
msgpack -ve fixnum, signed
*/
package codec
import (
"fmt"
"io"
"math"
"net/rpc"
)
const (
mpPosFixNumMin byte = 0x00
mpPosFixNumMax = 0x7f
mpFixMapMin = 0x80
mpFixMapMax = 0x8f
mpFixArrayMin = 0x90
mpFixArrayMax = 0x9f
mpFixStrMin = 0xa0
mpFixStrMax = 0xbf
mpNil = 0xc0
_ = 0xc1
mpFalse = 0xc2
mpTrue = 0xc3
mpFloat = 0xca
mpDouble = 0xcb
mpUint8 = 0xcc
mpUint16 = 0xcd
mpUint32 = 0xce
mpUint64 = 0xcf
mpInt8 = 0xd0
mpInt16 = 0xd1
mpInt32 = 0xd2
mpInt64 = 0xd3
// extensions below
mpBin8 = 0xc4
mpBin16 = 0xc5
mpBin32 = 0xc6
mpExt8 = 0xc7
mpExt16 = 0xc8
mpExt32 = 0xc9
mpFixExt1 = 0xd4
mpFixExt2 = 0xd5
mpFixExt4 = 0xd6
mpFixExt8 = 0xd7
mpFixExt16 = 0xd8
mpStr8 = 0xd9 // new
mpStr16 = 0xda
mpStr32 = 0xdb
mpArray16 = 0xdc
mpArray32 = 0xdd
mpMap16 = 0xde
mpMap32 = 0xdf
mpNegFixNumMin = 0xe0
mpNegFixNumMax = 0xff
)
// MsgpackSpecRpcMultiArgs is a special type which signifies to the MsgpackSpecRpcCodec
// that the backend RPC service takes multiple arguments, which have been arranged
// in sequence in the slice.
//
// The Codec then passes it AS-IS to the rpc service (without wrapping it in an
// array of 1 element).
type MsgpackSpecRpcMultiArgs []interface{}
// A MsgpackContainer type specifies the different types of msgpackContainers.
type msgpackContainerType struct {
fixCutoff int
bFixMin, b8, b16, b32 byte
hasFixMin, has8, has8Always bool
}
var (
msgpackContainerStr = msgpackContainerType{32, mpFixStrMin, mpStr8, mpStr16, mpStr32, true, true, false}
msgpackContainerBin = msgpackContainerType{0, 0, mpBin8, mpBin16, mpBin32, false, true, true}
msgpackContainerList = msgpackContainerType{16, mpFixArrayMin, 0, mpArray16, mpArray32, true, false, false}
msgpackContainerMap = msgpackContainerType{16, mpFixMapMin, 0, mpMap16, mpMap32, true, false, false}
)
//---------------------------------------------
type msgpackEncDriver struct {
e *Encoder
w encWriter
h *MsgpackHandle
noBuiltInTypes
encNoSeparator
x [8]byte
}
func (e *msgpackEncDriver) EncodeNil() {
e.w.writen1(mpNil)
}
func (e *msgpackEncDriver) EncodeInt(i int64) {
if i >= 0 {
e.EncodeUint(uint64(i))
} else if i >= -32 {
e.w.writen1(byte(i))
} else if i >= math.MinInt8 {
e.w.writen2(mpInt8, byte(i))
} else if i >= math.MinInt16 {
e.w.writen1(mpInt16)
bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i))
} else if i >= math.MinInt32 {
e.w.writen1(mpInt32)
bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i))
} else {
e.w.writen1(mpInt64)
bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i))
}
}
func (e *msgpackEncDriver) EncodeUint(i uint64) {
if i <= math.MaxInt8 {
e.w.writen1(byte(i))
} else if i <= math.MaxUint8 {
e.w.writen2(mpUint8, byte(i))
} else if i <= math.MaxUint16 {
e.w.writen1(mpUint16)
bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i))
} else if i <= math.MaxUint32 {
e.w.writen1(mpUint32)
bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i))
} else {
e.w.writen1(mpUint64)
bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i))
}
}
func (e *msgpackEncDriver) EncodeBool(b bool) {
if b {
e.w.writen1(mpTrue)
} else {
e.w.writen1(mpFalse)
}
}
func (e *msgpackEncDriver) EncodeFloat32(f float32) {
e.w.writen1(mpFloat)
bigenHelper{e.x[:4], e.w}.writeUint32(math.Float32bits(f))
}
func (e *msgpackEncDriver) EncodeFloat64(f float64) {
e.w.writen1(mpDouble)
bigenHelper{e.x[:8], e.w}.writeUint64(math.Float64bits(f))
}
func (e *msgpackEncDriver) EncodeExt(v interface{}, xtag uint64, ext Ext, _ *Encoder) {
bs := ext.WriteExt(v)
if bs == nil {
e.EncodeNil()
return
}
if e.h.WriteExt {
e.encodeExtPreamble(uint8(xtag), len(bs))
e.w.writeb(bs)
} else {
e.EncodeStringBytes(c_RAW, bs)
}
}
func (e *msgpackEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) {
e.encodeExtPreamble(uint8(re.Tag), len(re.Data))
e.w.writeb(re.Data)
}
func (e *msgpackEncDriver) encodeExtPreamble(xtag byte, l int) {
if l == 1 {
e.w.writen2(mpFixExt1, xtag)
} else if l == 2 {
e.w.writen2(mpFixExt2, xtag)
} else if l == 4 {
e.w.writen2(mpFixExt4, xtag)
} else if l == 8 {
e.w.writen2(mpFixExt8, xtag)
} else if l == 16 {
e.w.writen2(mpFixExt16, xtag)
} else if l < 256 {
e.w.writen2(mpExt8, byte(l))
e.w.writen1(xtag)
} else if l < 65536 {
e.w.writen1(mpExt16)
bigenHelper{e.x[:2], e.w}.writeUint16(uint16(l))
e.w.writen1(xtag)
} else {
e.w.writen1(mpExt32)
bigenHelper{e.x[:4], e.w}.writeUint32(uint32(l))
e.w.writen1(xtag)
}
}
func (e *msgpackEncDriver) EncodeArrayStart(length int) {
e.writeContainerLen(msgpackContainerList, length)
}
func (e *msgpackEncDriver) EncodeMapStart(length int) {
e.writeContainerLen(msgpackContainerMap, length)
}
func (e *msgpackEncDriver) EncodeString(c charEncoding, s string) {
if c == c_RAW && e.h.WriteExt {
e.writeContainerLen(msgpackContainerBin, len(s))
} else {
e.writeContainerLen(msgpackContainerStr, len(s))
}
if len(s) > 0 {
e.w.writestr(s)
}
}
func (e *msgpackEncDriver) EncodeSymbol(v string) {
e.EncodeString(c_UTF8, v)
}
func (e *msgpackEncDriver) EncodeStringBytes(c charEncoding, bs []byte) {
if c == c_RAW && e.h.WriteExt {
e.writeContainerLen(msgpackContainerBin, len(bs))
} else {
e.writeContainerLen(msgpackContainerStr, len(bs))
}
if len(bs) > 0 {
e.w.writeb(bs)
}
}
func (e *msgpackEncDriver) writeContainerLen(ct msgpackContainerType, l int) {
if ct.hasFixMin && l < ct.fixCutoff {
e.w.writen1(ct.bFixMin | byte(l))
} else if ct.has8 && l < 256 && (ct.has8Always || e.h.WriteExt) {
e.w.writen2(ct.b8, uint8(l))
} else if l < 65536 {
e.w.writen1(ct.b16)
bigenHelper{e.x[:2], e.w}.writeUint16(uint16(l))
} else {
e.w.writen1(ct.b32)
bigenHelper{e.x[:4], e.w}.writeUint32(uint32(l))
}
}
//---------------------------------------------
type msgpackDecDriver struct {
d *Decoder
r decReader // *Decoder decReader decReaderT
h *MsgpackHandle
b [scratchByteArrayLen]byte
bd byte
bdRead bool
br bool // bytes reader
bdType valueType
noBuiltInTypes
noStreamingCodec
decNoSeparator
}
// Note: This returns either a primitive (int, bool, etc) for non-containers,
// or a containerType, or a specific type denoting nil or extension.
// It is called when a nil interface{} is passed, leaving it up to the DecDriver
// to introspect the stream and decide how best to decode.
// It deciphers the value by looking at the stream first.
func (d *msgpackDecDriver) DecodeNaked() (v interface{}, vt valueType, decodeFurther bool) {
if !d.bdRead {
d.readNextBd()
}
bd := d.bd
switch bd {
case mpNil:
vt = valueTypeNil
d.bdRead = false
case mpFalse:
vt = valueTypeBool
v = false
case mpTrue:
vt = valueTypeBool
v = true
case mpFloat:
vt = valueTypeFloat
v = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4))))
case mpDouble:
vt = valueTypeFloat
v = math.Float64frombits(bigen.Uint64(d.r.readx(8)))
case mpUint8:
vt = valueTypeUint
v = uint64(d.r.readn1())
case mpUint16:
vt = valueTypeUint
v = uint64(bigen.Uint16(d.r.readx(2)))
case mpUint32:
vt = valueTypeUint
v = uint64(bigen.Uint32(d.r.readx(4)))
case mpUint64:
vt = valueTypeUint
v = uint64(bigen.Uint64(d.r.readx(8)))
case mpInt8:
vt = valueTypeInt
v = int64(int8(d.r.readn1()))
case mpInt16:
vt = valueTypeInt
v = int64(int16(bigen.Uint16(d.r.readx(2))))
case mpInt32:
vt = valueTypeInt
v = int64(int32(bigen.Uint32(d.r.readx(4))))
case mpInt64:
vt = valueTypeInt
v = int64(int64(bigen.Uint64(d.r.readx(8))))
default:
switch {
case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax:
// positive fixnum (always signed)
vt = valueTypeInt
v = int64(int8(bd))
case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax:
// negative fixnum
vt = valueTypeInt
v = int64(int8(bd))
case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax:
if d.h.RawToString {
var rvm string
vt = valueTypeString
v = &rvm
} else {
var rvm = zeroByteSlice
vt = valueTypeBytes
v = &rvm
}
decodeFurther = true
case bd == mpBin8, bd == mpBin16, bd == mpBin32:
var rvm = zeroByteSlice
vt = valueTypeBytes
v = &rvm
decodeFurther = true
case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax:
vt = valueTypeArray
decodeFurther = true
case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax:
vt = valueTypeMap
decodeFurther = true
case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32:
clen := d.readExtLen()
var re RawExt
re.Tag = uint64(d.r.readn1())
re.Data = d.r.readx(clen)
v = &re
vt = valueTypeExt
default:
d.d.errorf("Nil-Deciphered DecodeValue: %s: hex: %x, dec: %d", msgBadDesc, bd, bd)
return
}
}
if !decodeFurther {
d.bdRead = false
}
if vt == valueTypeUint && d.h.SignedInteger {
d.bdType = valueTypeInt
v = int64(v.(uint64))
}
return
}
// int can be decoded from msgpack type: intXXX or uintXXX
func (d *msgpackDecDriver) DecodeInt(bitsize uint8) (i int64) {
if !d.bdRead {
d.readNextBd()
}
switch d.bd {
case mpUint8:
i = int64(uint64(d.r.readn1()))
case mpUint16:
i = int64(uint64(bigen.Uint16(d.r.readx(2))))
case mpUint32:
i = int64(uint64(bigen.Uint32(d.r.readx(4))))
case mpUint64:
i = int64(bigen.Uint64(d.r.readx(8)))
case mpInt8:
i = int64(int8(d.r.readn1()))
case mpInt16:
i = int64(int16(bigen.Uint16(d.r.readx(2))))
case mpInt32:
i = int64(int32(bigen.Uint32(d.r.readx(4))))
case mpInt64:
i = int64(bigen.Uint64(d.r.readx(8)))
default:
switch {
case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax:
i = int64(int8(d.bd))
case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax:
i = int64(int8(d.bd))
default:
d.d.errorf("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd)
return
}
}
// check overflow (logic adapted from std pkg reflect/value.go OverflowUint()
if bitsize > 0 {
if trunc := (i << (64 - bitsize)) >> (64 - bitsize); i != trunc {
d.d.errorf("Overflow int value: %v", i)
return
}
}
d.bdRead = false
return
}
// uint can be decoded from msgpack type: intXXX or uintXXX
func (d *msgpackDecDriver) DecodeUint(bitsize uint8) (ui uint64) {
if !d.bdRead {
d.readNextBd()
}
switch d.bd {
case mpUint8:
ui = uint64(d.r.readn1())
case mpUint16:
ui = uint64(bigen.Uint16(d.r.readx(2)))
case mpUint32:
ui = uint64(bigen.Uint32(d.r.readx(4)))
case mpUint64:
ui = bigen.Uint64(d.r.readx(8))
case mpInt8:
if i := int64(int8(d.r.readn1())); i >= 0 {
ui = uint64(i)
} else {
d.d.errorf("Assigning negative signed value: %v, to unsigned type", i)
return
}
case mpInt16:
if i := int64(int16(bigen.Uint16(d.r.readx(2)))); i >= 0 {
ui = uint64(i)
} else {
d.d.errorf("Assigning negative signed value: %v, to unsigned type", i)
return
}
case mpInt32:
if i := int64(int32(bigen.Uint32(d.r.readx(4)))); i >= 0 {
ui = uint64(i)
} else {
d.d.errorf("Assigning negative signed value: %v, to unsigned type", i)
return
}
case mpInt64:
if i := int64(bigen.Uint64(d.r.readx(8))); i >= 0 {
ui = uint64(i)
} else {
d.d.errorf("Assigning negative signed value: %v, to unsigned type", i)
return
}
default:
switch {
case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax:
ui = uint64(d.bd)
case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax:
d.d.errorf("Assigning negative signed value: %v, to unsigned type", int(d.bd))
return
default:
d.d.errorf("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd)
return
}
}
// check overflow (logic adapted from std pkg reflect/value.go OverflowUint()
if bitsize > 0 {
if trunc := (ui << (64 - bitsize)) >> (64 - bitsize); ui != trunc {
d.d.errorf("Overflow uint value: %v", ui)
return
}
}
d.bdRead = false
return
}
// float can either be decoded from msgpack type: float, double or intX
func (d *msgpackDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) {
if !d.bdRead {
d.readNextBd()
}
if d.bd == mpFloat {
f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4))))
} else if d.bd == mpDouble {
f = math.Float64frombits(bigen.Uint64(d.r.readx(8)))
} else {
f = float64(d.DecodeInt(0))
}
if chkOverflow32 && chkOvf.Float32(f) {
d.d.errorf("msgpack: float32 overflow: %v", f)
return
}
d.bdRead = false
return
}
// bool can be decoded from bool, fixnum 0 or 1.
func (d *msgpackDecDriver) DecodeBool() (b bool) {
if !d.bdRead {
d.readNextBd()
}
if d.bd == mpFalse || d.bd == 0 {
// b = false
} else if d.bd == mpTrue || d.bd == 1 {
b = true
} else {
d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd)
return
}
d.bdRead = false
return
}
func (d *msgpackDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) {
if !d.bdRead {
d.readNextBd()
}
var clen int
if isstring {
clen = d.readContainerLen(msgpackContainerStr)
} else {
// bytes can be decoded from msgpackContainerStr or msgpackContainerBin
if bd := d.bd; bd == mpBin8 || bd == mpBin16 || bd == mpBin32 {
clen = d.readContainerLen(msgpackContainerBin)
} else {
clen = d.readContainerLen(msgpackContainerStr)
}
}
// println("DecodeBytes: clen: ", clen)
d.bdRead = false
// bytes may be nil, so handle it. if nil, clen=-1.
if clen < 0 {
return nil
}
if zerocopy {
if d.br {
return d.r.readx(clen)
} else if len(bs) == 0 {
bs = d.b[:]
}
}
return decByteSlice(d.r, clen, bs)
}
func (d *msgpackDecDriver) DecodeString() (s string) {
return string(d.DecodeBytes(d.b[:], true, true))
}
func (d *msgpackDecDriver) readNextBd() {
d.bd = d.r.readn1()
d.bdRead = true
d.bdType = valueTypeUnset
}
func (d *msgpackDecDriver) IsContainerType(vt valueType) bool {
bd := d.bd
switch vt {
case valueTypeNil:
return bd == mpNil
case valueTypeBytes:
return bd == mpBin8 || bd == mpBin16 || bd == mpBin32 ||
(!d.h.RawToString &&
(bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax)))
case valueTypeString:
return d.h.RawToString &&
(bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax))
case valueTypeArray:
return bd == mpArray16 || bd == mpArray32 || (bd >= mpFixArrayMin && bd <= mpFixArrayMax)
case valueTypeMap:
return bd == mpMap16 || bd == mpMap32 || (bd >= mpFixMapMin && bd <= mpFixMapMax)
}
d.d.errorf("isContainerType: unsupported parameter: %v", vt)
return false // "unreachable"
}
func (d *msgpackDecDriver) TryDecodeAsNil() (v bool) {
if !d.bdRead {
d.readNextBd()
}
if d.bd == mpNil {
d.bdRead = false
v = true
}
return
}
func (d *msgpackDecDriver) readContainerLen(ct msgpackContainerType) (clen int) {
bd := d.bd
if bd == mpNil {
clen = -1 // to represent nil
} else if bd == ct.b8 {
clen = int(d.r.readn1())
} else if bd == ct.b16 {
clen = int(bigen.Uint16(d.r.readx(2)))
} else if bd == ct.b32 {
clen = int(bigen.Uint32(d.r.readx(4)))
} else if (ct.bFixMin & bd) == ct.bFixMin {
clen = int(ct.bFixMin ^ bd)
} else {
d.d.errorf("readContainerLen: %s: hex: %x, dec: %d", msgBadDesc, bd, bd)
return
}
d.bdRead = false
return
}
func (d *msgpackDecDriver) ReadMapStart() int {
return d.readContainerLen(msgpackContainerMap)
}
func (d *msgpackDecDriver) ReadArrayStart() int {
return d.readContainerLen(msgpackContainerList)
}
func (d *msgpackDecDriver) readExtLen() (clen int) {
switch d.bd {
case mpNil:
clen = -1 // to represent nil
case mpFixExt1:
clen = 1
case mpFixExt2:
clen = 2
case mpFixExt4:
clen = 4
case mpFixExt8:
clen = 8
case mpFixExt16:
clen = 16
case mpExt8:
clen = int(d.r.readn1())
case mpExt16:
clen = int(bigen.Uint16(d.r.readx(2)))
case mpExt32:
clen = int(bigen.Uint32(d.r.readx(4)))
default:
d.d.errorf("decoding ext bytes: found unexpected byte: %x", d.bd)
return
}
return
}
func (d *msgpackDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
if xtag > 0xff {
d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag)
return
}
realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag))
realxtag = uint64(realxtag1)
if ext == nil {
re := rv.(*RawExt)
re.Tag = realxtag
re.Data = detachZeroCopyBytes(d.br, re.Data, xbs)
} else {
ext.ReadExt(rv, xbs)
}
return
}
func (d *msgpackDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) {
if !d.bdRead {
d.readNextBd()
}
xbd := d.bd
if xbd == mpBin8 || xbd == mpBin16 || xbd == mpBin32 {
xbs = d.DecodeBytes(nil, false, true)
} else if xbd == mpStr8 || xbd == mpStr16 || xbd == mpStr32 ||
(xbd >= mpFixStrMin && xbd <= mpFixStrMax) {
xbs = d.DecodeBytes(nil, true, true)
} else {
clen := d.readExtLen()
xtag = d.r.readn1()
if verifyTag && xtag != tag {
d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", xtag, tag)
return
}
xbs = d.r.readx(clen)
}
d.bdRead = false
return
}
//--------------------------------------------------
//MsgpackHandle is a Handle for the Msgpack Schema-Free Encoding Format.
type MsgpackHandle struct {
BasicHandle
binaryEncodingType
// RawToString controls how raw bytes are decoded into a nil interface{}.
RawToString bool
// WriteExt flag supports encoding configured extensions with extension tags.
// It also controls whether other elements of the new spec are encoded (ie Str8).
//
// With WriteExt=false, configured extensions are serialized as raw bytes
// and Str8 is not encoded.
//
// A stream can still be decoded into a typed value, provided an appropriate value
// is provided, but the type cannot be inferred from the stream. If no appropriate
// type is provided (e.g. decoding into a nil interface{}), you get back
// a []byte or string based on the setting of RawToString.
WriteExt bool
}
func (h *MsgpackHandle) newEncDriver(e *Encoder) encDriver {
return &msgpackEncDriver{e: e, w: e.w, h: h}
}
func (h *MsgpackHandle) newDecDriver(d *Decoder) decDriver {
return &msgpackDecDriver{d: d, r: d.r, h: h, br: d.bytes}
}
//--------------------------------------------------
type msgpackSpecRpcCodec struct {
rpcCodec
}
// /////////////// Spec RPC Codec ///////////////////
func (c *msgpackSpecRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error {
// WriteRequest can write to both a Go service, and other services that do
// not abide by the 1 argument rule of a Go service.
// We discriminate based on if the body is a MsgpackSpecRpcMultiArgs
var bodyArr []interface{}
if m, ok := body.(MsgpackSpecRpcMultiArgs); ok {
bodyArr = ([]interface{})(m)
} else {
bodyArr = []interface{}{body}
}
r2 := []interface{}{0, uint32(r.Seq), r.ServiceMethod, bodyArr}
return c.write(r2, nil, false, true)
}
func (c *msgpackSpecRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error {
var moe interface{}
if r.Error != "" {
moe = r.Error
}
if moe != nil && body != nil {
body = nil
}
r2 := []interface{}{1, uint32(r.Seq), moe, body}
return c.write(r2, nil, false, true)
}
func (c *msgpackSpecRpcCodec) ReadResponseHeader(r *rpc.Response) error {
return c.parseCustomHeader(1, &r.Seq, &r.Error)
}
func (c *msgpackSpecRpcCodec) ReadRequestHeader(r *rpc.Request) error {
return c.parseCustomHeader(0, &r.Seq, &r.ServiceMethod)
}
func (c *msgpackSpecRpcCodec) ReadRequestBody(body interface{}) error {
if body == nil { // read and discard
return c.read(nil)
}
bodyArr := []interface{}{body}
return c.read(&bodyArr)
}
func (c *msgpackSpecRpcCodec) parseCustomHeader(expectTypeByte byte, msgid *uint64, methodOrError *string) (err error) {
if c.cls {
return io.EOF
}
// We read the response header by hand
// so that the body can be decoded on its own from the stream at a later time.
const fia byte = 0x94 //four item array descriptor value
// Not sure why the panic of EOF is swallowed above.
// if bs1 := c.dec.r.readn1(); bs1 != fia {
// err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, bs1)
// return
// }
var b byte
b, err = c.br.ReadByte()
if err != nil {
return
}
if b != fia {
err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, b)
return
}
if err = c.read(&b); err != nil {
return
}
if b != expectTypeByte {
err = fmt.Errorf("Unexpected byte descriptor in header. Expecting %v. Received %v", expectTypeByte, b)
return
}
if err = c.read(msgid); err != nil {
return
}
if err = c.read(methodOrError); err != nil {
return
}
return
}
//--------------------------------------------------
// msgpackSpecRpc is the implementation of Rpc that uses custom communication protocol
// as defined in the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
type msgpackSpecRpc struct{}
// MsgpackSpecRpc implements Rpc using the communication protocol defined in
// the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md .
// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered.
var MsgpackSpecRpc msgpackSpecRpc
func (x msgpackSpecRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec {
return &msgpackSpecRpcCodec{newRPCCodec(conn, h)}
}
func (x msgpackSpecRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec {
return &msgpackSpecRpcCodec{newRPCCodec(conn, h)}
}
var _ decDriver = (*msgpackDecDriver)(nil)
var _ encDriver = (*msgpackEncDriver)(nil)

View File

@ -0,0 +1,164 @@
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a BSD-style license found in the LICENSE file.
package codec
import (
"math/rand"
"time"
)
// NoopHandle returns a no-op handle. It basically does nothing.
// It is only useful for benchmarking, as it gives an idea of the
// overhead from the codec framework.
// LIBRARY USERS: *** DO NOT USE ***
func NoopHandle(slen int) *noopHandle {
h := noopHandle{}
h.rand = rand.New(rand.NewSource(time.Now().UnixNano()))
h.B = make([][]byte, slen)
h.S = make([]string, slen)
for i := 0; i < len(h.S); i++ {
b := make([]byte, i+1)
for j := 0; j < len(b); j++ {
b[j] = 'a' + byte(i)
}
h.B[i] = b
h.S[i] = string(b)
}
return &h
}
// noopHandle does nothing.
// It is used to simulate the overhead of the codec framework.
type noopHandle struct {
BasicHandle
binaryEncodingType
noopDrv // noopDrv is unexported here, so we can get a copy of it when needed.
}
type noopDrv struct {
i int
S []string
B [][]byte
mk bool // are we about to read a map key?
ct valueType // last request for IsContainerType.
cb bool // last response for IsContainerType.
rand *rand.Rand
}
func (h *noopDrv) r(v int) int { return h.rand.Intn(v) }
func (h *noopDrv) m(v int) int { h.i++; return h.i % v }
func (h *noopDrv) newEncDriver(_ *Encoder) encDriver { return h }
func (h *noopDrv) newDecDriver(_ *Decoder) decDriver { return h }
// --- encDriver
func (h *noopDrv) EncodeBuiltin(rt uintptr, v interface{}) {}
func (h *noopDrv) EncodeNil() {}
func (h *noopDrv) EncodeInt(i int64) {}
func (h *noopDrv) EncodeUint(i uint64) {}
func (h *noopDrv) EncodeBool(b bool) {}
func (h *noopDrv) EncodeFloat32(f float32) {}
func (h *noopDrv) EncodeFloat64(f float64) {}
func (h *noopDrv) EncodeRawExt(re *RawExt, e *Encoder) {}
func (h *noopDrv) EncodeArrayStart(length int) {}
func (h *noopDrv) EncodeArrayEnd() {}
func (h *noopDrv) EncodeArrayEntrySeparator() {}
func (h *noopDrv) EncodeMapStart(length int) {}
func (h *noopDrv) EncodeMapEnd() {}
func (h *noopDrv) EncodeMapEntrySeparator() {}
func (h *noopDrv) EncodeMapKVSeparator() {}
func (h *noopDrv) EncodeString(c charEncoding, v string) {}
func (h *noopDrv) EncodeSymbol(v string) {}
func (h *noopDrv) EncodeStringBytes(c charEncoding, v []byte) {}
func (h *noopDrv) EncodeExt(rv interface{}, xtag uint64, ext Ext, e *Encoder) {}
// ---- decDriver
func (h *noopDrv) initReadNext() {}
func (h *noopDrv) CheckBreak() bool { return false }
func (h *noopDrv) IsBuiltinType(rt uintptr) bool { return false }
func (h *noopDrv) DecodeBuiltin(rt uintptr, v interface{}) {}
func (h *noopDrv) DecodeInt(bitsize uint8) (i int64) { return int64(h.m(15)) }
func (h *noopDrv) DecodeUint(bitsize uint8) (ui uint64) { return uint64(h.m(35)) }
func (h *noopDrv) DecodeFloat(chkOverflow32 bool) (f float64) { return float64(h.m(95)) }
func (h *noopDrv) DecodeBool() (b bool) { return h.m(2) == 0 }
func (h *noopDrv) DecodeString() (s string) { return h.S[h.m(8)] }
// func (h *noopDrv) DecodeStringAsBytes(bs []byte) []byte { return h.DecodeBytes(bs) }
func (h *noopDrv) DecodeBytes(bs []byte, isstring, zerocopy bool) []byte { return h.B[h.m(len(h.B))] }
func (h *noopDrv) ReadMapEnd() { h.mk = false }
func (h *noopDrv) ReadArrayEnd() {}
func (h *noopDrv) ReadArrayEntrySeparator() {}
func (h *noopDrv) ReadMapEntrySeparator() { h.mk = true }
func (h *noopDrv) ReadMapKVSeparator() { h.mk = false }
// toggle map/slice
func (h *noopDrv) ReadMapStart() int { h.mk = true; return h.m(10) }
func (h *noopDrv) ReadArrayStart() int { return h.m(10) }
func (h *noopDrv) IsContainerType(vt valueType) bool {
// return h.m(2) == 0
// handle kStruct
if h.ct == valueTypeMap && vt == valueTypeArray || h.ct == valueTypeArray && vt == valueTypeMap {
h.cb = !h.cb
h.ct = vt
return h.cb
}
// go in a loop and check it.
h.ct = vt
h.cb = h.m(7) == 0
return h.cb
}
func (h *noopDrv) TryDecodeAsNil() bool {
if h.mk {
return false
} else {
return h.m(8) == 0
}
}
func (h *noopDrv) DecodeExt(rv interface{}, xtag uint64, ext Ext) uint64 {
return 0
}
func (h *noopDrv) DecodeNaked() (v interface{}, vt valueType, decodeFurther bool) {
// use h.r (random) not h.m() because h.m() could cause the same value to be given.
var sk int
if h.mk {
// if mapkey, do not support values of nil OR bytes, array, map or rawext
sk = h.r(7) + 1
} else {
sk = h.r(12)
}
switch sk {
case 0:
vt = valueTypeNil
case 1:
vt, v = valueTypeBool, false
case 2:
vt, v = valueTypeBool, true
case 3:
vt, v = valueTypeInt, h.DecodeInt(64)
case 4:
vt, v = valueTypeUint, h.DecodeUint(64)
case 5:
vt, v = valueTypeFloat, h.DecodeFloat(true)
case 6:
vt, v = valueTypeFloat, h.DecodeFloat(false)
case 7:
vt, v = valueTypeString, h.DecodeString()
case 8:
vt, v = valueTypeBytes, h.B[h.m(len(h.B))]
case 9:
vt, decodeFurther = valueTypeArray, true
case 10:
vt, decodeFurther = valueTypeMap, true
default:
vt, v = valueTypeExt, &RawExt{Tag: h.DecodeUint(64), Data: h.B[h.m(len(h.B))]}
}
h.ct = vt
return
}

View File

@ -0,0 +1,3 @@
package codec
//go:generate bash prebuild.sh

View File

@ -0,0 +1,191 @@
#!/bin/bash
# _needgen is a helper function to tell if we need to generate files for msgp, codecgen.
_needgen() {
local a="$1"
zneedgen=0
if [[ ! -e "$a" ]]
then
zneedgen=1
echo 1
return 0
fi
for i in `ls -1 *.go.tmpl gen.go values_test.go`
do
if [[ "$a" -ot "$i" ]]
then
zneedgen=1
echo 1
return 0
fi
done
echo 0
}
# _build generates fast-path.go and gen-helper.go.
#
# It is needed because there is some dependency between the generated code
# and the other classes. Consequently, we have to totally remove the
# generated files and put stubs in place, before calling "go run" again
# to recreate them.
_build() {
if ! [[ "${zforce}" == "1" ||
"1" == $( _needgen "fast-path.generated.go" ) ||
"1" == $( _needgen "gen-helper.generated.go" ) ||
"1" == $( _needgen "gen.generated.go" ) ||
1 == 0 ]]
then
return 0
fi
# echo "Running prebuild"
if [ "${zbak}" == "1" ]
then
# echo "Backing up old generated files"
_zts=`date '+%m%d%Y_%H%M%S'`
_gg=".generated.go"
[ -e "gen-helper${_gg}" ] && mv gen-helper${_gg} gen-helper${_gg}__${_zts}.bak
[ -e "fast-path${_gg}" ] && mv fast-path${_gg} fast-path${_gg}__${_zts}.bak
# [ -e "safe${_gg}" ] && mv safe${_gg} safe${_gg}__${_zts}.bak
# [ -e "unsafe${_gg}" ] && mv unsafe${_gg} unsafe${_gg}__${_zts}.bak
else
rm -f fast-path.generated.go gen.generated.go gen-helper.generated.go *safe.generated.go *_generated_test.go *.generated_ffjson_expose.go
fi
cat > gen.generated.go <<EOF
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a BSD-style license found in the LICENSE file.
package codec
// DO NOT EDIT. THIS FILE IS AUTO-GENERATED FROM gen-dec-(map|array).go.tmpl
const genDecMapTmpl = \`
EOF
cat >> gen.generated.go < gen-dec-map.go.tmpl
cat >> gen.generated.go <<EOF
\`
const genDecListTmpl = \`
EOF
cat >> gen.generated.go < gen-dec-array.go.tmpl
cat >> gen.generated.go <<EOF
\`
EOF
# All functions, variables which must exist are put in this file.
# This way, build works before we generate the right things.
cat > fast-path.generated.go <<EOF
package codec
import "reflect"
// func GenBytesToStringRO(b []byte) string { return string(b) }
func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { return false }
func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { return false }
func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { return false }
func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { return false }
type fastpathE struct {
rtid uintptr
rt reflect.Type
encfn func(encFnInfo, reflect.Value)
decfn func(decFnInfo, reflect.Value)
}
type fastpathA [0]fastpathE
func (x fastpathA) index(rtid uintptr) int { return -1 }
var fastpathAV fastpathA
EOF
cat > gen-from-tmpl.generated.go <<EOF
//+build ignore
package main
//import "flag"
import "ugorji.net/codec"
import "os"
func run(fnameIn, fnameOut string, safe bool) {
fin, err := os.Open(fnameIn)
if err != nil { panic(err) }
defer fin.Close()
fout, err := os.Create(fnameOut)
if err != nil { panic(err) }
defer fout.Close()
err = codec.GenInternalGoFile(fin, fout, safe)
if err != nil { panic(err) }
}
func main() {
// do not make safe/unsafe variants.
// Instead, depend on escape analysis, and place string creation and usage appropriately.
// run("unsafe.go.tmpl", "safe.generated.go", true)
// run("unsafe.go.tmpl", "unsafe.generated.go", false)
run("fast-path.go.tmpl", "fast-path.generated.go", false)
run("gen-helper.go.tmpl", "gen-helper.generated.go", false)
}
EOF
go run gen-from-tmpl.generated.go && \
rm -f gen-from-tmpl.generated.go
}
_codegenerators() {
if [[ $zforce == "1" ||
"1" == $( _needgen "values_msgp${zsfx}" )
|| "1" == $( _needgen "values_codecgen${zsfx}" ) ]]
then
true && \
echo "msgp ... " && \
msgp -tests=false -pkg=codec -o=values_msgp${zsfx} -file=$zfin && \
echo "codecgen - !unsafe ... " && \
codecgen -rt codecgen -t 'x,codecgen,!unsafe' -o values_codecgen${zsfx} $zfin && \
echo "codecgen - unsafe ... " && \
codecgen -u -rt codecgen -t 'x,codecgen,unsafe' -o values_codecgen_unsafe${zsfx} $zfin && \
echo "ffjson ... " && \
ffjson -w values_ffjson${zsfx} $zfin && \
# remove (M|Unm)arshalJSON implementations, so they don't conflict with encoding/json bench \
sed -i 's+ MarshalJSON(+ _MarshalJSON(+g' values_ffjson${zsfx} && \
sed -i 's+ UnmarshalJSON(+ _UnmarshalJSON(+g' values_ffjson${zsfx} && \
echo "generators done!" && \
true
fi
}
# _init reads the arguments and sets up the flags
_init() {
OPTIND=1
while getopts "fb" flag
do
case "x$flag" in
'xf') zforce=1;;
'xb') zbak=1;;
*) echo "prebuild.sh accepts [-fb] only"; return 1;;
esac
done
shift $((OPTIND-1))
OPTIND=1
}
# main script.
# First ensure that this is being run from the basedir (i.e. dirname of script is .)
if [ "." = `dirname $0` ]
then
zmydir=`pwd`
zfin="test_values.generated.go"
zsfx="_generated_test.go"
# rm -f *_generated_test.go
rm -f codecgen-*.go && \
_init "$@" && \
_build && \
cp $zmydir/values_test.go $zmydir/$zfin && \
_codegenerators && \
echo prebuild done successfully
rm -f $zmydir/$zfin
else
echo "Script must be run from the directory it resides in"
fi

View File

@ -0,0 +1,29 @@
//+build x
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a BSD-style license found in the LICENSE file.
package codec
// These tests are used to verify msgpack and cbor implementations against their python libraries.
// If you have the library installed, you can enable the tests back by removing the //+build ignore.
import (
"testing"
)
func TestMsgpackPythonGenStreams(t *testing.T) {
doTestPythonGenStreams(t, "msgpack", testMsgpackH)
}
func TestCborPythonGenStreams(t *testing.T) {
doTestPythonGenStreams(t, "cbor", testCborH)
}
func TestMsgpackRpcSpecGoClientToPythonSvc(t *testing.T) {
doTestMsgpackRpcSpecGoClientToPythonSvc(t)
}
func TestMsgpackRpcSpecPythonClientToGoSvc(t *testing.T) {
doTestMsgpackRpcSpecPythonClientToGoSvc(t)
}

169
Godeps/_workspace/src/github.com/ugorji/go/codec/rpc.go generated vendored Normal file
View File

@ -0,0 +1,169 @@
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a BSD-style license found in the LICENSE file.
package codec
import (
"bufio"
"io"
"net/rpc"
"sync"
)
// rpcEncodeTerminator allows a handler specify a []byte terminator to send after each Encode.
//
// Some codecs like json need to put a space after each encoded value, to serve as a
// delimiter for things like numbers (else json codec will continue reading till EOF).
type rpcEncodeTerminator interface {
rpcEncodeTerminate() []byte
}
// Rpc provides a rpc Server or Client Codec for rpc communication.
type Rpc interface {
ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec
ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec
}
// RpcCodecBuffered allows access to the underlying bufio.Reader/Writer
// used by the rpc connection. It accomodates use-cases where the connection
// should be used by rpc and non-rpc functions, e.g. streaming a file after
// sending an rpc response.
type RpcCodecBuffered interface {
BufferedReader() *bufio.Reader
BufferedWriter() *bufio.Writer
}
// -------------------------------------
// rpcCodec defines the struct members and common methods.
type rpcCodec struct {
rwc io.ReadWriteCloser
dec *Decoder
enc *Encoder
bw *bufio.Writer
br *bufio.Reader
mu sync.Mutex
cls bool
h Handle
}
func newRPCCodec(conn io.ReadWriteCloser, h Handle) rpcCodec {
bw := bufio.NewWriter(conn)
br := bufio.NewReader(conn)
return rpcCodec{
rwc: conn,
bw: bw,
br: br,
enc: NewEncoder(bw, h),
dec: NewDecoder(br, h),
h: h,
}
}
func (c *rpcCodec) BufferedReader() *bufio.Reader {
return c.br
}
func (c *rpcCodec) BufferedWriter() *bufio.Writer {
return c.bw
}
func (c *rpcCodec) write(obj1, obj2 interface{}, writeObj2, doFlush bool) (err error) {
if c.cls {
return io.EOF
}
if err = c.enc.Encode(obj1); err != nil {
return
}
t, tOk := c.h.(rpcEncodeTerminator)
if tOk {
c.bw.Write(t.rpcEncodeTerminate())
}
if writeObj2 {
if err = c.enc.Encode(obj2); err != nil {
return
}
if tOk {
c.bw.Write(t.rpcEncodeTerminate())
}
}
if doFlush {
return c.bw.Flush()
}
return
}
func (c *rpcCodec) read(obj interface{}) (err error) {
if c.cls {
return io.EOF
}
//If nil is passed in, we should still attempt to read content to nowhere.
if obj == nil {
var obj2 interface{}
return c.dec.Decode(&obj2)
}
return c.dec.Decode(obj)
}
func (c *rpcCodec) Close() error {
if c.cls {
return io.EOF
}
c.cls = true
return c.rwc.Close()
}
func (c *rpcCodec) ReadResponseBody(body interface{}) error {
return c.read(body)
}
// -------------------------------------
type goRpcCodec struct {
rpcCodec
}
func (c *goRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error {
// Must protect for concurrent access as per API
c.mu.Lock()
defer c.mu.Unlock()
return c.write(r, body, true, true)
}
func (c *goRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error {
c.mu.Lock()
defer c.mu.Unlock()
return c.write(r, body, true, true)
}
func (c *goRpcCodec) ReadResponseHeader(r *rpc.Response) error {
return c.read(r)
}
func (c *goRpcCodec) ReadRequestHeader(r *rpc.Request) error {
return c.read(r)
}
func (c *goRpcCodec) ReadRequestBody(body interface{}) error {
return c.read(body)
}
// -------------------------------------
// goRpc is the implementation of Rpc that uses the communication protocol
// as defined in net/rpc package.
type goRpc struct{}
// GoRpc implements Rpc using the communication protocol defined in net/rpc package.
// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered.
var GoRpc goRpc
func (x goRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec {
return &goRpcCodec{newRPCCodec(conn, h)}
}
func (x goRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec {
return &goRpcCodec{newRPCCodec(conn, h)}
}
var _ RpcCodecBuffered = (*rpcCodec)(nil) // ensure *rpcCodec implements RpcCodecBuffered

View File

@ -0,0 +1,505 @@
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a BSD-style license found in the LICENSE file.
package codec
import "math"
const (
_ uint8 = iota
simpleVdNil = 1
simpleVdFalse = 2
simpleVdTrue = 3
simpleVdFloat32 = 4
simpleVdFloat64 = 5
// each lasts for 4 (ie n, n+1, n+2, n+3)
simpleVdPosInt = 8
simpleVdNegInt = 12
// containers: each lasts for 4 (ie n, n+1, n+2, ... n+7)
simpleVdString = 216
simpleVdByteArray = 224
simpleVdArray = 232
simpleVdMap = 240
simpleVdExt = 248
)
type simpleEncDriver struct {
e *Encoder
h *SimpleHandle
w encWriter
noBuiltInTypes
b [8]byte
encNoSeparator
}
func (e *simpleEncDriver) EncodeNil() {
e.w.writen1(simpleVdNil)
}
func (e *simpleEncDriver) EncodeBool(b bool) {
if b {
e.w.writen1(simpleVdTrue)
} else {
e.w.writen1(simpleVdFalse)
}
}
func (e *simpleEncDriver) EncodeFloat32(f float32) {
e.w.writen1(simpleVdFloat32)
bigenHelper{e.b[:4], e.w}.writeUint32(math.Float32bits(f))
}
func (e *simpleEncDriver) EncodeFloat64(f float64) {
e.w.writen1(simpleVdFloat64)
bigenHelper{e.b[:8], e.w}.writeUint64(math.Float64bits(f))
}
func (e *simpleEncDriver) EncodeInt(v int64) {
if v < 0 {
e.encUint(uint64(-v), simpleVdNegInt)
} else {
e.encUint(uint64(v), simpleVdPosInt)
}
}
func (e *simpleEncDriver) EncodeUint(v uint64) {
e.encUint(v, simpleVdPosInt)
}
func (e *simpleEncDriver) encUint(v uint64, bd uint8) {
if v <= math.MaxUint8 {
e.w.writen2(bd, uint8(v))
} else if v <= math.MaxUint16 {
e.w.writen1(bd + 1)
bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v))
} else if v <= math.MaxUint32 {
e.w.writen1(bd + 2)
bigenHelper{e.b[:4], e.w}.writeUint32(uint32(v))
} else if v <= math.MaxUint64 {
e.w.writen1(bd + 3)
bigenHelper{e.b[:8], e.w}.writeUint64(v)
}
}
func (e *simpleEncDriver) encLen(bd byte, length int) {
if length == 0 {
e.w.writen1(bd)
} else if length <= math.MaxUint8 {
e.w.writen1(bd + 1)
e.w.writen1(uint8(length))
} else if length <= math.MaxUint16 {
e.w.writen1(bd + 2)
bigenHelper{e.b[:2], e.w}.writeUint16(uint16(length))
} else if int64(length) <= math.MaxUint32 {
e.w.writen1(bd + 3)
bigenHelper{e.b[:4], e.w}.writeUint32(uint32(length))
} else {
e.w.writen1(bd + 4)
bigenHelper{e.b[:8], e.w}.writeUint64(uint64(length))
}
}
func (e *simpleEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, _ *Encoder) {
bs := ext.WriteExt(rv)
if bs == nil {
e.EncodeNil()
return
}
e.encodeExtPreamble(uint8(xtag), len(bs))
e.w.writeb(bs)
}
func (e *simpleEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) {
e.encodeExtPreamble(uint8(re.Tag), len(re.Data))
e.w.writeb(re.Data)
}
func (e *simpleEncDriver) encodeExtPreamble(xtag byte, length int) {
e.encLen(simpleVdExt, length)
e.w.writen1(xtag)
}
func (e *simpleEncDriver) EncodeArrayStart(length int) {
e.encLen(simpleVdArray, length)
}
func (e *simpleEncDriver) EncodeMapStart(length int) {
e.encLen(simpleVdMap, length)
}
func (e *simpleEncDriver) EncodeString(c charEncoding, v string) {
e.encLen(simpleVdString, len(v))
e.w.writestr(v)
}
func (e *simpleEncDriver) EncodeSymbol(v string) {
e.EncodeString(c_UTF8, v)
}
func (e *simpleEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
e.encLen(simpleVdByteArray, len(v))
e.w.writeb(v)
}
//------------------------------------
type simpleDecDriver struct {
d *Decoder
h *SimpleHandle
r decReader
bdRead bool
bdType valueType
bd byte
br bool // bytes reader
noBuiltInTypes
noStreamingCodec
decNoSeparator
b [scratchByteArrayLen]byte
}
func (d *simpleDecDriver) readNextBd() {
d.bd = d.r.readn1()
d.bdRead = true
d.bdType = valueTypeUnset
}
func (d *simpleDecDriver) IsContainerType(vt valueType) bool {
switch vt {
case valueTypeNil:
return d.bd == simpleVdNil
case valueTypeBytes:
const x uint8 = simpleVdByteArray
return d.bd == x || d.bd == x+1 || d.bd == x+2 || d.bd == x+3 || d.bd == x+4
case valueTypeString:
const x uint8 = simpleVdString
return d.bd == x || d.bd == x+1 || d.bd == x+2 || d.bd == x+3 || d.bd == x+4
case valueTypeArray:
const x uint8 = simpleVdArray
return d.bd == x || d.bd == x+1 || d.bd == x+2 || d.bd == x+3 || d.bd == x+4
case valueTypeMap:
const x uint8 = simpleVdMap
return d.bd == x || d.bd == x+1 || d.bd == x+2 || d.bd == x+3 || d.bd == x+4
}
d.d.errorf("isContainerType: unsupported parameter: %v", vt)
return false // "unreachable"
}
func (d *simpleDecDriver) TryDecodeAsNil() bool {
if !d.bdRead {
d.readNextBd()
}
if d.bd == simpleVdNil {
d.bdRead = false
return true
}
return false
}
func (d *simpleDecDriver) decCheckInteger() (ui uint64, neg bool) {
if !d.bdRead {
d.readNextBd()
}
switch d.bd {
case simpleVdPosInt:
ui = uint64(d.r.readn1())
case simpleVdPosInt + 1:
ui = uint64(bigen.Uint16(d.r.readx(2)))
case simpleVdPosInt + 2:
ui = uint64(bigen.Uint32(d.r.readx(4)))
case simpleVdPosInt + 3:
ui = uint64(bigen.Uint64(d.r.readx(8)))
case simpleVdNegInt:
ui = uint64(d.r.readn1())
neg = true
case simpleVdNegInt + 1:
ui = uint64(bigen.Uint16(d.r.readx(2)))
neg = true
case simpleVdNegInt + 2:
ui = uint64(bigen.Uint32(d.r.readx(4)))
neg = true
case simpleVdNegInt + 3:
ui = uint64(bigen.Uint64(d.r.readx(8)))
neg = true
default:
d.d.errorf("decIntAny: Integer only valid from pos/neg integer1..8. Invalid descriptor: %v", d.bd)
return
}
// don't do this check, because callers may only want the unsigned value.
// if ui > math.MaxInt64 {
// d.d.errorf("decIntAny: Integer out of range for signed int64: %v", ui)
// return
// }
return
}
func (d *simpleDecDriver) DecodeInt(bitsize uint8) (i int64) {
ui, neg := d.decCheckInteger()
i, overflow := chkOvf.SignedInt(ui)
if overflow {
d.d.errorf("simple: overflow converting %v to signed integer", ui)
return
}
if neg {
i = -i
}
if chkOvf.Int(i, bitsize) {
d.d.errorf("simple: overflow integer: %v", i)
return
}
d.bdRead = false
return
}
func (d *simpleDecDriver) DecodeUint(bitsize uint8) (ui uint64) {
ui, neg := d.decCheckInteger()
if neg {
d.d.errorf("Assigning negative signed value to unsigned type")
return
}
if chkOvf.Uint(ui, bitsize) {
d.d.errorf("simple: overflow integer: %v", ui)
return
}
d.bdRead = false
return
}
func (d *simpleDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) {
if !d.bdRead {
d.readNextBd()
}
if d.bd == simpleVdFloat32 {
f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4))))
} else if d.bd == simpleVdFloat64 {
f = math.Float64frombits(bigen.Uint64(d.r.readx(8)))
} else {
if d.bd >= simpleVdPosInt && d.bd <= simpleVdNegInt+3 {
f = float64(d.DecodeInt(64))
} else {
d.d.errorf("Float only valid from float32/64: Invalid descriptor: %v", d.bd)
return
}
}
if chkOverflow32 && chkOvf.Float32(f) {
d.d.errorf("msgpack: float32 overflow: %v", f)
return
}
d.bdRead = false
return
}
// bool can be decoded from bool only (single byte).
func (d *simpleDecDriver) DecodeBool() (b bool) {
if !d.bdRead {
d.readNextBd()
}
if d.bd == simpleVdTrue {
b = true
} else if d.bd == simpleVdFalse {
} else {
d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd)
return
}
d.bdRead = false
return
}
func (d *simpleDecDriver) ReadMapStart() (length int) {
d.bdRead = false
return d.decLen()
}
func (d *simpleDecDriver) ReadArrayStart() (length int) {
d.bdRead = false
return d.decLen()
}
func (d *simpleDecDriver) decLen() int {
switch d.bd % 8 {
case 0:
return 0
case 1:
return int(d.r.readn1())
case 2:
return int(bigen.Uint16(d.r.readx(2)))
case 3:
ui := uint64(bigen.Uint32(d.r.readx(4)))
if chkOvf.Uint(ui, intBitsize) {
d.d.errorf("simple: overflow integer: %v", ui)
return 0
}
return int(ui)
case 4:
ui := bigen.Uint64(d.r.readx(8))
if chkOvf.Uint(ui, intBitsize) {
d.d.errorf("simple: overflow integer: %v", ui)
return 0
}
return int(ui)
}
d.d.errorf("decLen: Cannot read length: bd%8 must be in range 0..4. Got: %d", d.bd%8)
return -1
}
func (d *simpleDecDriver) DecodeString() (s string) {
return string(d.DecodeBytes(d.b[:], true, true))
}
func (d *simpleDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) {
if !d.bdRead {
d.readNextBd()
}
if d.bd == simpleVdNil {
d.bdRead = false
return
}
clen := d.decLen()
d.bdRead = false
if zerocopy {
if d.br {
return d.r.readx(clen)
} else if len(bs) == 0 {
bs = d.b[:]
}
}
return decByteSlice(d.r, clen, bs)
}
func (d *simpleDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
if xtag > 0xff {
d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag)
return
}
realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag))
realxtag = uint64(realxtag1)
if ext == nil {
re := rv.(*RawExt)
re.Tag = realxtag
re.Data = detachZeroCopyBytes(d.br, re.Data, xbs)
} else {
ext.ReadExt(rv, xbs)
}
return
}
func (d *simpleDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) {
if !d.bdRead {
d.readNextBd()
}
switch d.bd {
case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4:
l := d.decLen()
xtag = d.r.readn1()
if verifyTag && xtag != tag {
d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", xtag, tag)
return
}
xbs = d.r.readx(l)
case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4:
xbs = d.DecodeBytes(nil, false, true)
default:
d.d.errorf("Invalid d.bd for extensions (Expecting extensions or byte array). Got: 0x%x", d.bd)
return
}
d.bdRead = false
return
}
func (d *simpleDecDriver) DecodeNaked() (v interface{}, vt valueType, decodeFurther bool) {
if !d.bdRead {
d.readNextBd()
}
switch d.bd {
case simpleVdNil:
vt = valueTypeNil
case simpleVdFalse:
vt = valueTypeBool
v = false
case simpleVdTrue:
vt = valueTypeBool
v = true
case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3:
if d.h.SignedInteger {
vt = valueTypeInt
v = d.DecodeInt(64)
} else {
vt = valueTypeUint
v = d.DecodeUint(64)
}
case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3:
vt = valueTypeInt
v = d.DecodeInt(64)
case simpleVdFloat32:
vt = valueTypeFloat
v = d.DecodeFloat(true)
case simpleVdFloat64:
vt = valueTypeFloat
v = d.DecodeFloat(false)
case simpleVdString, simpleVdString + 1, simpleVdString + 2, simpleVdString + 3, simpleVdString + 4:
vt = valueTypeString
v = d.DecodeString()
case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4:
vt = valueTypeBytes
v = d.DecodeBytes(nil, false, false)
case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4:
vt = valueTypeExt
l := d.decLen()
var re RawExt
re.Tag = uint64(d.r.readn1())
re.Data = d.r.readx(l)
v = &re
case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4:
vt = valueTypeArray
decodeFurther = true
case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4:
vt = valueTypeMap
decodeFurther = true
default:
d.d.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd)
return
}
if !decodeFurther {
d.bdRead = false
}
return
}
//------------------------------------
// SimpleHandle is a Handle for a very simple encoding format.
//
// simple is a simplistic codec similar to binc, but not as compact.
// - Encoding of a value is always preceeded by the descriptor byte (bd)
// - True, false, nil are encoded fully in 1 byte (the descriptor)
// - Integers (intXXX, uintXXX) are encoded in 1, 2, 4 or 8 bytes (plus a descriptor byte).
// There are positive (uintXXX and intXXX >= 0) and negative (intXXX < 0) integers.
// - Floats are encoded in 4 or 8 bytes (plus a descriptor byte)
// - Lenght of containers (strings, bytes, array, map, extensions)
// are encoded in 0, 1, 2, 4 or 8 bytes.
// Zero-length containers have no length encoded.
// For others, the number of bytes is given by pow(2, bd%3)
// - maps are encoded as [bd] [length] [[key][value]]...
// - arrays are encoded as [bd] [length] [value]...
// - extensions are encoded as [bd] [length] [tag] [byte]...
// - strings/bytearrays are encoded as [bd] [length] [byte]...
//
// The full spec will be published soon.
type SimpleHandle struct {
BasicHandle
binaryEncodingType
}
func (h *SimpleHandle) newEncDriver(e *Encoder) encDriver {
return &simpleEncDriver{e: e, w: e.w, h: h}
}
func (h *SimpleHandle) newDecDriver(d *Decoder) decDriver {
return &simpleDecDriver{d: d, r: d.r, h: h, br: d.bytes}
}
var _ decDriver = (*simpleDecDriver)(nil)
var _ encDriver = (*simpleEncDriver)(nil)

View File

@ -0,0 +1,639 @@
[
{
"cbor": "AA==",
"hex": "00",
"roundtrip": true,
"decoded": 0
},
{
"cbor": "AQ==",
"hex": "01",
"roundtrip": true,
"decoded": 1
},
{
"cbor": "Cg==",
"hex": "0a",
"roundtrip": true,
"decoded": 10
},
{
"cbor": "Fw==",
"hex": "17",
"roundtrip": true,
"decoded": 23
},
{
"cbor": "GBg=",
"hex": "1818",
"roundtrip": true,
"decoded": 24
},
{
"cbor": "GBk=",
"hex": "1819",
"roundtrip": true,
"decoded": 25
},
{
"cbor": "GGQ=",
"hex": "1864",
"roundtrip": true,
"decoded": 100
},
{
"cbor": "GQPo",
"hex": "1903e8",
"roundtrip": true,
"decoded": 1000
},
{
"cbor": "GgAPQkA=",
"hex": "1a000f4240",
"roundtrip": true,
"decoded": 1000000
},
{
"cbor": "GwAAAOjUpRAA",
"hex": "1b000000e8d4a51000",
"roundtrip": true,
"decoded": 1000000000000
},
{
"cbor": "G///////////",
"hex": "1bffffffffffffffff",
"roundtrip": true,
"decoded": 18446744073709551615
},
{
"cbor": "wkkBAAAAAAAAAAA=",
"hex": "c249010000000000000000",
"roundtrip": true,
"decoded": 18446744073709551616
},
{
"cbor": "O///////////",
"hex": "3bffffffffffffffff",
"roundtrip": true,
"decoded": -18446744073709551616,
"skip": true
},
{
"cbor": "w0kBAAAAAAAAAAA=",
"hex": "c349010000000000000000",
"roundtrip": true,
"decoded": -18446744073709551617
},
{
"cbor": "IA==",
"hex": "20",
"roundtrip": true,
"decoded": -1
},
{
"cbor": "KQ==",
"hex": "29",
"roundtrip": true,
"decoded": -10
},
{
"cbor": "OGM=",
"hex": "3863",
"roundtrip": true,
"decoded": -100
},
{
"cbor": "OQPn",
"hex": "3903e7",
"roundtrip": true,
"decoded": -1000
},
{
"cbor": "+QAA",
"hex": "f90000",
"roundtrip": true,
"decoded": 0.0
},
{
"cbor": "+YAA",
"hex": "f98000",
"roundtrip": true,
"decoded": -0.0
},
{
"cbor": "+TwA",
"hex": "f93c00",
"roundtrip": true,
"decoded": 1.0
},
{
"cbor": "+z/xmZmZmZma",
"hex": "fb3ff199999999999a",
"roundtrip": true,
"decoded": 1.1
},
{
"cbor": "+T4A",
"hex": "f93e00",
"roundtrip": true,
"decoded": 1.5
},
{
"cbor": "+Xv/",
"hex": "f97bff",
"roundtrip": true,
"decoded": 65504.0
},
{
"cbor": "+kfDUAA=",
"hex": "fa47c35000",
"roundtrip": true,
"decoded": 100000.0
},
{
"cbor": "+n9///8=",
"hex": "fa7f7fffff",
"roundtrip": true,
"decoded": 3.4028234663852886e+38
},
{
"cbor": "+3435DyIAHWc",
"hex": "fb7e37e43c8800759c",
"roundtrip": true,
"decoded": 1.0e+300
},
{
"cbor": "+QAB",
"hex": "f90001",
"roundtrip": true,
"decoded": 5.960464477539063e-08
},
{
"cbor": "+QQA",
"hex": "f90400",
"roundtrip": true,
"decoded": 6.103515625e-05
},
{
"cbor": "+cQA",
"hex": "f9c400",
"roundtrip": true,
"decoded": -4.0
},
{
"cbor": "+8AQZmZmZmZm",
"hex": "fbc010666666666666",
"roundtrip": true,
"decoded": -4.1
},
{
"cbor": "+XwA",
"hex": "f97c00",
"roundtrip": true,
"diagnostic": "Infinity"
},
{
"cbor": "+X4A",
"hex": "f97e00",
"roundtrip": true,
"diagnostic": "NaN"
},
{
"cbor": "+fwA",
"hex": "f9fc00",
"roundtrip": true,
"diagnostic": "-Infinity"
},
{
"cbor": "+n+AAAA=",
"hex": "fa7f800000",
"roundtrip": false,
"diagnostic": "Infinity"
},
{
"cbor": "+n/AAAA=",
"hex": "fa7fc00000",
"roundtrip": false,
"diagnostic": "NaN"
},
{
"cbor": "+v+AAAA=",
"hex": "faff800000",
"roundtrip": false,
"diagnostic": "-Infinity"
},
{
"cbor": "+3/wAAAAAAAA",
"hex": "fb7ff0000000000000",
"roundtrip": false,
"diagnostic": "Infinity"
},
{
"cbor": "+3/4AAAAAAAA",
"hex": "fb7ff8000000000000",
"roundtrip": false,
"diagnostic": "NaN"
},
{
"cbor": "+//wAAAAAAAA",
"hex": "fbfff0000000000000",
"roundtrip": false,
"diagnostic": "-Infinity"
},
{
"cbor": "9A==",
"hex": "f4",
"roundtrip": true,
"decoded": false
},
{
"cbor": "9Q==",
"hex": "f5",
"roundtrip": true,
"decoded": true
},
{
"cbor": "9g==",
"hex": "f6",
"roundtrip": true,
"decoded": null
},
{
"cbor": "9w==",
"hex": "f7",
"roundtrip": true,
"diagnostic": "undefined"
},
{
"cbor": "8A==",
"hex": "f0",
"roundtrip": true,
"diagnostic": "simple(16)"
},
{
"cbor": "+Bg=",
"hex": "f818",
"roundtrip": true,
"diagnostic": "simple(24)"
},
{
"cbor": "+P8=",
"hex": "f8ff",
"roundtrip": true,
"diagnostic": "simple(255)"
},
{
"cbor": "wHQyMDEzLTAzLTIxVDIwOjA0OjAwWg==",
"hex": "c074323031332d30332d32315432303a30343a30305a",
"roundtrip": true,
"diagnostic": "0(\"2013-03-21T20:04:00Z\")"
},
{
"cbor": "wRpRS2ew",
"hex": "c11a514b67b0",
"roundtrip": true,
"diagnostic": "1(1363896240)"
},
{
"cbor": "wftB1FLZ7CAAAA==",
"hex": "c1fb41d452d9ec200000",
"roundtrip": true,
"diagnostic": "1(1363896240.5)"
},
{
"cbor": "10QBAgME",
"hex": "d74401020304",
"roundtrip": true,
"diagnostic": "23(h'01020304')"
},
{
"cbor": "2BhFZElFVEY=",
"hex": "d818456449455446",
"roundtrip": true,
"diagnostic": "24(h'6449455446')"
},
{
"cbor": "2CB2aHR0cDovL3d3dy5leGFtcGxlLmNvbQ==",
"hex": "d82076687474703a2f2f7777772e6578616d706c652e636f6d",
"roundtrip": true,
"diagnostic": "32(\"http://www.example.com\")"
},
{
"cbor": "QA==",
"hex": "40",
"roundtrip": true,
"diagnostic": "h''"
},
{
"cbor": "RAECAwQ=",
"hex": "4401020304",
"roundtrip": true,
"diagnostic": "h'01020304'"
},
{
"cbor": "YA==",
"hex": "60",
"roundtrip": true,
"decoded": ""
},
{
"cbor": "YWE=",
"hex": "6161",
"roundtrip": true,
"decoded": "a"
},
{
"cbor": "ZElFVEY=",
"hex": "6449455446",
"roundtrip": true,
"decoded": "IETF"
},
{
"cbor": "YiJc",
"hex": "62225c",
"roundtrip": true,
"decoded": "\"\\"
},
{
"cbor": "YsO8",
"hex": "62c3bc",
"roundtrip": true,
"decoded": "ü"
},
{
"cbor": "Y+awtA==",
"hex": "63e6b0b4",
"roundtrip": true,
"decoded": "水"
},
{
"cbor": "ZPCQhZE=",
"hex": "64f0908591",
"roundtrip": true,
"decoded": "𐅑"
},
{
"cbor": "gA==",
"hex": "80",
"roundtrip": true,
"decoded": [
]
},
{
"cbor": "gwECAw==",
"hex": "83010203",
"roundtrip": true,
"decoded": [
1,
2,
3
]
},
{
"cbor": "gwGCAgOCBAU=",
"hex": "8301820203820405",
"roundtrip": true,
"decoded": [
1,
[
2,
3
],
[
4,
5
]
]
},
{
"cbor": "mBkBAgMEBQYHCAkKCwwNDg8QERITFBUWFxgYGBk=",
"hex": "98190102030405060708090a0b0c0d0e0f101112131415161718181819",
"roundtrip": true,
"decoded": [
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25
]
},
{
"cbor": "oA==",
"hex": "a0",
"roundtrip": true,
"decoded": {
}
},
{
"cbor": "ogECAwQ=",
"hex": "a201020304",
"roundtrip": true,
"skip": true,
"diagnostic": "{1: 2, 3: 4}"
},
{
"cbor": "omFhAWFiggID",
"hex": "a26161016162820203",
"roundtrip": true,
"decoded": {
"a": 1,
"b": [
2,
3
]
}
},
{
"cbor": "gmFhoWFiYWM=",
"hex": "826161a161626163",
"roundtrip": true,
"decoded": [
"a",
{
"b": "c"
}
]
},
{
"cbor": "pWFhYUFhYmFCYWNhQ2FkYURhZWFF",
"hex": "a56161614161626142616361436164614461656145",
"roundtrip": true,
"decoded": {
"a": "A",
"b": "B",
"c": "C",
"d": "D",
"e": "E"
}
},
{
"cbor": "X0IBAkMDBAX/",
"hex": "5f42010243030405ff",
"roundtrip": false,
"skip": true,
"diagnostic": "(_ h'0102', h'030405')"
},
{
"cbor": "f2VzdHJlYWRtaW5n/w==",
"hex": "7f657374726561646d696e67ff",
"roundtrip": false,
"decoded": "streaming"
},
{
"cbor": "n/8=",
"hex": "9fff",
"roundtrip": false,
"decoded": [
]
},
{
"cbor": "nwGCAgOfBAX//w==",
"hex": "9f018202039f0405ffff",
"roundtrip": false,
"decoded": [
1,
[
2,
3
],
[
4,
5
]
]
},
{
"cbor": "nwGCAgOCBAX/",
"hex": "9f01820203820405ff",
"roundtrip": false,
"decoded": [
1,
[
2,
3
],
[
4,
5
]
]
},
{
"cbor": "gwGCAgOfBAX/",
"hex": "83018202039f0405ff",
"roundtrip": false,
"decoded": [
1,
[
2,
3
],
[
4,
5
]
]
},
{
"cbor": "gwGfAgP/ggQF",
"hex": "83019f0203ff820405",
"roundtrip": false,
"decoded": [
1,
[
2,
3
],
[
4,
5
]
]
},
{
"cbor": "nwECAwQFBgcICQoLDA0ODxAREhMUFRYXGBgYGf8=",
"hex": "9f0102030405060708090a0b0c0d0e0f101112131415161718181819ff",
"roundtrip": false,
"decoded": [
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25
]
},
{
"cbor": "v2FhAWFinwID//8=",
"hex": "bf61610161629f0203ffff",
"roundtrip": false,
"decoded": {
"a": 1,
"b": [
2,
3
]
}
},
{
"cbor": "gmFhv2FiYWP/",
"hex": "826161bf61626163ff",
"roundtrip": false,
"decoded": [
"a",
{
"b": "c"
}
]
},
{
"cbor": "v2NGdW71Y0FtdCH/",
"hex": "bf6346756ef563416d7421ff",
"roundtrip": false,
"decoded": {
"Fun": true,
"Amt": -2
}
}
]

View File

@ -0,0 +1,119 @@
#!/usr/bin/env python
# This will create golden files in a directory passed to it.
# A Test calls this internally to create the golden files
# So it can process them (so we don't have to checkin the files).
# Ensure msgpack-python and cbor are installed first, using:
# pip install --user msgpack-python
# pip install --user cbor
import cbor, msgpack, msgpackrpc, sys, os, threading
def get_test_data_list():
# get list with all primitive types, and a combo type
l0 = [
-8,
-1616,
-32323232,
-6464646464646464,
192,
1616,
32323232,
6464646464646464,
192,
-3232.0,
-6464646464.0,
3232.0,
6464646464.0,
False,
True,
None,
u"someday",
u"",
u"bytestring",
1328176922000002000,
-2206187877999998000,
270,
-2013855847999995777,
#-6795364578871345152,
]
l1 = [
{ "true": True,
"false": False },
{ "true": "True",
"false": False,
"uint16(1616)": 1616 },
{ "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ],
"int32":32323232, "bool": True,
"LONG STRING": "123456789012345678901234567890123456789012345678901234567890",
"SHORT STRING": "1234567890" },
{ True: "true", 8: False, "false": 0 }
]
l = []
l.extend(l0)
l.append(l0)
l.extend(l1)
return l
def build_test_data(destdir):
l = get_test_data_list()
for i in range(len(l)):
# packer = msgpack.Packer()
serialized = msgpack.dumps(l[i])
f = open(os.path.join(destdir, str(i) + '.msgpack.golden'), 'wb')
f.write(serialized)
f.close()
serialized = cbor.dumps(l[i])
f = open(os.path.join(destdir, str(i) + '.cbor.golden'), 'wb')
f.write(serialized)
f.close()
def doRpcServer(port, stopTimeSec):
class EchoHandler(object):
def Echo123(self, msg1, msg2, msg3):
return ("1:%s 2:%s 3:%s" % (msg1, msg2, msg3))
def EchoStruct(self, msg):
return ("%s" % msg)
addr = msgpackrpc.Address('localhost', port)
server = msgpackrpc.Server(EchoHandler())
server.listen(addr)
# run thread to stop it after stopTimeSec seconds if > 0
if stopTimeSec > 0:
def myStopRpcServer():
server.stop()
t = threading.Timer(stopTimeSec, myStopRpcServer)
t.start()
server.start()
def doRpcClientToPythonSvc(port):
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("Echo123", "A1", "B2", "C3")
print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doRpcClientToGoSvc(port):
# print ">>>> port: ", port, " <<<<<"
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"])
print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doMain(args):
if len(args) == 2 and args[0] == "testdata":
build_test_data(args[1])
elif len(args) == 3 and args[0] == "rpc-server":
doRpcServer(int(args[1]), int(args[2]))
elif len(args) == 2 and args[0] == "rpc-client-python-service":
doRpcClientToPythonSvc(int(args[1]))
elif len(args) == 2 and args[0] == "rpc-client-go-service":
doRpcClientToGoSvc(int(args[1]))
else:
print("Usage: test.py " +
"[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...")
if __name__ == "__main__":
doMain(sys.argv[1:])

View File

@ -0,0 +1,193 @@
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a BSD-style license found in the LICENSE file.
package codec
import (
"time"
)
var (
timeDigits = [...]byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}
)
// EncodeTime encodes a time.Time as a []byte, including
// information on the instant in time and UTC offset.
//
// Format Description
//
// A timestamp is composed of 3 components:
//
// - secs: signed integer representing seconds since unix epoch
// - nsces: unsigned integer representing fractional seconds as a
// nanosecond offset within secs, in the range 0 <= nsecs < 1e9
// - tz: signed integer representing timezone offset in minutes east of UTC,
// and a dst (daylight savings time) flag
//
// When encoding a timestamp, the first byte is the descriptor, which
// defines which components are encoded and how many bytes are used to
// encode secs and nsecs components. *If secs/nsecs is 0 or tz is UTC, it
// is not encoded in the byte array explicitly*.
//
// Descriptor 8 bits are of the form `A B C DDD EE`:
// A: Is secs component encoded? 1 = true
// B: Is nsecs component encoded? 1 = true
// C: Is tz component encoded? 1 = true
// DDD: Number of extra bytes for secs (range 0-7).
// If A = 1, secs encoded in DDD+1 bytes.
// If A = 0, secs is not encoded, and is assumed to be 0.
// If A = 1, then we need at least 1 byte to encode secs.
// DDD says the number of extra bytes beyond that 1.
// E.g. if DDD=0, then secs is represented in 1 byte.
// if DDD=2, then secs is represented in 3 bytes.
// EE: Number of extra bytes for nsecs (range 0-3).
// If B = 1, nsecs encoded in EE+1 bytes (similar to secs/DDD above)
//
// Following the descriptor bytes, subsequent bytes are:
//
// secs component encoded in `DDD + 1` bytes (if A == 1)
// nsecs component encoded in `EE + 1` bytes (if B == 1)
// tz component encoded in 2 bytes (if C == 1)
//
// secs and nsecs components are integers encoded in a BigEndian
// 2-complement encoding format.
//
// tz component is encoded as 2 bytes (16 bits). Most significant bit 15 to
// Least significant bit 0 are described below:
//
// Timezone offset has a range of -12:00 to +14:00 (ie -720 to +840 minutes).
// Bit 15 = have\_dst: set to 1 if we set the dst flag.
// Bit 14 = dst\_on: set to 1 if dst is in effect at the time, or 0 if not.
// Bits 13..0 = timezone offset in minutes. It is a signed integer in Big Endian format.
//
func encodeTime(t time.Time) []byte {
//t := rv.Interface().(time.Time)
tsecs, tnsecs := t.Unix(), t.Nanosecond()
var (
bd byte
btmp [8]byte
bs [16]byte
i int = 1
)
l := t.Location()
if l == time.UTC {
l = nil
}
if tsecs != 0 {
bd = bd | 0x80
bigen.PutUint64(btmp[:], uint64(tsecs))
f := pruneSignExt(btmp[:], tsecs >= 0)
bd = bd | (byte(7-f) << 2)
copy(bs[i:], btmp[f:])
i = i + (8 - f)
}
if tnsecs != 0 {
bd = bd | 0x40
bigen.PutUint32(btmp[:4], uint32(tnsecs))
f := pruneSignExt(btmp[:4], true)
bd = bd | byte(3-f)
copy(bs[i:], btmp[f:4])
i = i + (4 - f)
}
if l != nil {
bd = bd | 0x20
// Note that Go Libs do not give access to dst flag.
_, zoneOffset := t.Zone()
//zoneName, zoneOffset := t.Zone()
zoneOffset /= 60
z := uint16(zoneOffset)
bigen.PutUint16(btmp[:2], z)
// clear dst flags
bs[i] = btmp[0] & 0x3f
bs[i+1] = btmp[1]
i = i + 2
}
bs[0] = bd
return bs[0:i]
}
// DecodeTime decodes a []byte into a time.Time.
func decodeTime(bs []byte) (tt time.Time, err error) {
bd := bs[0]
var (
tsec int64
tnsec uint32
tz uint16
i byte = 1
i2 byte
n byte
)
if bd&(1<<7) != 0 {
var btmp [8]byte
n = ((bd >> 2) & 0x7) + 1
i2 = i + n
copy(btmp[8-n:], bs[i:i2])
//if first bit of bs[i] is set, then fill btmp[0..8-n] with 0xff (ie sign extend it)
if bs[i]&(1<<7) != 0 {
copy(btmp[0:8-n], bsAll0xff)
//for j,k := byte(0), 8-n; j < k; j++ { btmp[j] = 0xff }
}
i = i2
tsec = int64(bigen.Uint64(btmp[:]))
}
if bd&(1<<6) != 0 {
var btmp [4]byte
n = (bd & 0x3) + 1
i2 = i + n
copy(btmp[4-n:], bs[i:i2])
i = i2
tnsec = bigen.Uint32(btmp[:])
}
if bd&(1<<5) == 0 {
tt = time.Unix(tsec, int64(tnsec)).UTC()
return
}
// In stdlib time.Parse, when a date is parsed without a zone name, it uses "" as zone name.
// However, we need name here, so it can be shown when time is printed.
// Zone name is in form: UTC-08:00.
// Note that Go Libs do not give access to dst flag, so we ignore dst bits
i2 = i + 2
tz = bigen.Uint16(bs[i:i2])
i = i2
// sign extend sign bit into top 2 MSB (which were dst bits):
if tz&(1<<13) == 0 { // positive
tz = tz & 0x3fff //clear 2 MSBs: dst bits
} else { // negative
tz = tz | 0xc000 //set 2 MSBs: dst bits
//tzname[3] = '-' (TODO: verify. this works here)
}
tzint := int16(tz)
if tzint == 0 {
tt = time.Unix(tsec, int64(tnsec)).UTC()
} else {
// For Go Time, do not use a descriptive timezone.
// It's unnecessary, and makes it harder to do a reflect.DeepEqual.
// The Offset already tells what the offset should be, if not on UTC and unknown zone name.
// var zoneName = timeLocUTCName(tzint)
tt = time.Unix(tsec, int64(tnsec)).In(time.FixedZone("", int(tzint)*60))
}
return
}
func timeLocUTCName(tzint int16) string {
if tzint == 0 {
return "UTC"
}
var tzname = []byte("UTC+00:00")
//tzname := fmt.Sprintf("UTC%s%02d:%02d", tzsign, tz/60, tz%60) //perf issue using Sprintf. inline below.
//tzhr, tzmin := tz/60, tz%60 //faster if u convert to int first
var tzhr, tzmin int16
if tzint < 0 {
tzname[3] = '-' // (TODO: verify. this works here)
tzhr, tzmin = -tzint/60, (-tzint)%60
} else {
tzhr, tzmin = tzint/60, tzint%60
}
tzname[4] = timeDigits[tzhr/10]
tzname[5] = timeDigits[tzhr%10]
tzname[7] = timeDigits[tzmin/10]
tzname[8] = timeDigits[tzmin%10]
return string(tzname)
//return time.FixedZone(string(tzname), int(tzint)*60)
}

View File

@ -0,0 +1,198 @@
// // +build testing
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a BSD-style license found in the LICENSE file.
package codec
// This file contains values used by tests and benchmarks.
// JSON/BSON do not like maps with keys that are not strings,
// so we only use maps with string keys here.
import (
"math"
"time"
)
var testStrucTime = time.Date(2012, 2, 2, 2, 2, 2, 2000, time.UTC).UTC()
type AnonInTestStruc struct {
AS string
AI64 int64
AI16 int16
AUi64 uint64
ASslice []string
AI64slice []int64
AF64slice []float64
// AMI32U32 map[int32]uint32
// AMU32F64 map[uint32]float64 // json/bson do not like it
AMSU16 map[string]uint16
}
type AnonInTestStrucIntf struct {
Islice []interface{}
Ms map[string]interface{}
Nintf interface{} //don't set this, so we can test for nil
T time.Time
}
type TestStruc struct {
_struct struct{} `codec:",omitempty"` //set omitempty for every field
S string
I64 int64
I16 int16
Ui64 uint64
Ui8 uint8
B bool
By uint8 // byte: msgp doesn't like byte
Sslice []string
I64slice []int64
I16slice []int16
Ui64slice []uint64
Ui8slice []uint8
Bslice []bool
Byslice []byte
Iptrslice []*int64
AnonInTestStruc
//M map[interface{}]interface{} `json:"-",bson:"-"`
Msi64 map[string]int64
// make this a ptr, so that it could be set or not.
// for comparison (e.g. with msgp), give it a struct tag (so it is not inlined),
// make this one omitempty (so it is included if nil).
*AnonInTestStrucIntf `codec:",omitempty"`
Nmap map[string]bool //don't set this, so we can test for nil
Nslice []byte //don't set this, so we can test for nil
Nint64 *int64 //don't set this, so we can test for nil
Mtsptr map[string]*TestStruc
Mts map[string]TestStruc
Its []*TestStruc
Nteststruc *TestStruc
}
// small struct for testing that codecgen works for unexported types
type tLowerFirstLetter struct {
I int
u uint64
S string
b []byte
}
func newTestStruc(depth int, bench bool, useInterface, useStringKeyOnly bool) (ts *TestStruc) {
var i64a, i64b, i64c, i64d int64 = 64, 6464, 646464, 64646464
ts = &TestStruc{
S: "some string",
I64: math.MaxInt64 * 2 / 3, // 64,
I16: 1616,
Ui64: uint64(int64(math.MaxInt64 * 2 / 3)), // 64, //don't use MaxUint64, as bson can't write it
Ui8: 160,
B: true,
By: 5,
Sslice: []string{"one", "two", "three"},
I64slice: []int64{1111, 2222, 3333},
I16slice: []int16{44, 55, 66},
Ui64slice: []uint64{12121212, 34343434, 56565656},
Ui8slice: []uint8{210, 211, 212},
Bslice: []bool{true, false, true, false},
Byslice: []byte{13, 14, 15},
Msi64: map[string]int64{
"one": 1,
"two": 2,
},
AnonInTestStruc: AnonInTestStruc{
// There's more leeway in altering this.
AS: "A-String",
AI64: -64646464,
AI16: 1616,
AUi64: 64646464,
// (U+1D11E)G-clef character may be represented in json as "\uD834\uDD1E".
// single reverse solidus character may be represented in json as "\u005C".
// include these in ASslice below.
ASslice: []string{"Aone", "Atwo", "Athree",
"Afour.reverse_solidus.\u005c", "Afive.Gclef.\U0001d11E"},
AI64slice: []int64{1, -22, 333, -4444, 55555, -666666},
AMSU16: map[string]uint16{"1": 1, "22": 2, "333": 3, "4444": 4},
AF64slice: []float64{11.11e-11, 22.22E+22, 33.33E-33, 44.44e+44, 555.55E-6, 666.66E6},
},
}
if useInterface {
ts.AnonInTestStrucIntf = &AnonInTestStrucIntf{
Islice: []interface{}{"true", true, "no", false, uint64(288), float64(0.4)},
Ms: map[string]interface{}{
"true": "true",
"int64(9)": false,
},
T: testStrucTime,
}
}
//For benchmarks, some things will not work.
if !bench {
//json and bson require string keys in maps
//ts.M = map[interface{}]interface{}{
// true: "true",
// int8(9): false,
//}
//gob cannot encode nil in element in array (encodeArray: nil element)
ts.Iptrslice = []*int64{nil, &i64a, nil, &i64b, nil, &i64c, nil, &i64d, nil}
// ts.Iptrslice = nil
}
if !useStringKeyOnly {
// ts.AnonInTestStruc.AMU32F64 = map[uint32]float64{1: 1, 2: 2, 3: 3} // Json/Bson barf
}
if depth > 0 {
depth--
if ts.Mtsptr == nil {
ts.Mtsptr = make(map[string]*TestStruc)
}
if ts.Mts == nil {
ts.Mts = make(map[string]TestStruc)
}
ts.Mtsptr["0"] = newTestStruc(depth, bench, useInterface, useStringKeyOnly)
ts.Mts["0"] = *(ts.Mtsptr["0"])
ts.Its = append(ts.Its, ts.Mtsptr["0"])
}
return
}
// Some other types
type Sstring string
type Bbool bool
type Sstructsmall struct {
A int
}
type Sstructbig struct {
A int
B bool
c string
// Sval Sstruct
Ssmallptr *Sstructsmall
Ssmall *Sstructsmall
Sptr *Sstructbig
}
type SstructbigMapBySlice struct {
_struct struct{} `codec:",toarray"`
A int
B bool
c string
// Sval Sstruct
Ssmallptr *Sstructsmall
Ssmall *Sstructsmall
Sptr *Sstructbig
}
type Sinterface interface {
Noop()
}

View File

@ -76,6 +76,13 @@ type Config struct {
// If CheckRedirect is nil, the Client uses its default policy,
// which is to stop after 10 consecutive requests.
CheckRedirect CheckRedirectFunc
// Username specifies the user credential to add as an authorization header
Username string
// Password is the password for the specified user to add as an authorization header
// to the request.
Password string
}
func (cfg *Config) transport() CancelableTransport {
@ -122,7 +129,15 @@ type Client interface {
}
func New(cfg Config) (Client, error) {
c := &httpClusterClient{clientFactory: newHTTPClientFactory(cfg.transport(), cfg.checkRedirect())}
c := &httpClusterClient{
clientFactory: newHTTPClientFactory(cfg.transport(), cfg.checkRedirect()),
}
if cfg.Username != "" {
c.credentials = &credentials{
username: cfg.Username,
password: cfg.Password,
}
}
if err := c.reset(cfg.Endpoints); err != nil {
return nil, err
}
@ -145,6 +160,11 @@ func newHTTPClientFactory(tr CancelableTransport, cr CheckRedirectFunc) httpClie
}
}
type credentials struct {
username string
password string
}
type httpClientFactory func(url.URL) httpClient
type httpAction interface {
@ -154,6 +174,7 @@ type httpAction interface {
type httpClusterClient struct {
clientFactory httpClientFactory
endpoints []url.URL
credentials *credentials
sync.RWMutex
}
@ -177,10 +198,18 @@ func (c *httpClusterClient) reset(eps []string) error {
}
func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
action := act
c.RLock()
leps := len(c.endpoints)
eps := make([]url.URL, leps)
n := copy(eps, c.endpoints)
if c.credentials != nil {
action = &authedAction{
act: act,
credentials: *c.credentials,
}
}
c.RUnlock()
if leps == 0 {
@ -197,7 +226,7 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo
for _, ep := range eps {
hc := c.clientFactory(ep)
resp, body, err = hc.Do(ctx, act)
resp, body, err = hc.Do(ctx, action)
if err != nil {
if err == context.DeadlineExceeded || err == context.Canceled {
return nil, nil, err
@ -313,6 +342,17 @@ func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Respon
return resp, body, err
}
type authedAction struct {
act httpAction
credentials credentials
}
func (a *authedAction) HTTPRequest(url url.URL) *http.Request {
r := a.act.HTTPRequest(url)
r.SetBasicAuth(a.credentials.username, a.credentials.password)
return r
}
type redirectFollowingHTTPClient struct {
client httpClient
checkRedirect CheckRedirectFunc

235
client/security_role.go Normal file
View File

@ -0,0 +1,235 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"bytes"
"encoding/json"
"net/http"
"net/url"
"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context"
)
type Role struct {
Role string `json:"role"`
Permissions Permissions `json:"permissions"`
Grant *Permissions `json:"grant,omitempty"`
Revoke *Permissions `json:"revoke,omitempty"`
}
type Permissions struct {
KV rwPermission `json:"kv"`
}
type rwPermission struct {
Read []string `json:"read"`
Write []string `json:"write"`
}
type PermissionType int
const (
ReadPermission PermissionType = iota
WritePermission
ReadWritePermission
)
// NewSecurityRoleAPI constructs a new SecurityRoleAPI that uses HTTP to
// interact with etcd's role creation and modification features.
func NewSecurityRoleAPI(c Client) SecurityRoleAPI {
return &httpSecurityRoleAPI{
client: c,
}
}
type SecurityRoleAPI interface {
// Add a role.
AddRole(ctx context.Context, role string) error
// Remove a role.
RemoveRole(ctx context.Context, role string) error
// Get role details.
GetRole(ctx context.Context, role string) (*Role, error)
// Grant a role some permission prefixes for the KV store.
GrantRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error)
// Revoke some some permission prefixes for a role on the KV store.
RevokeRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error)
// List roles.
ListRoles(ctx context.Context) ([]string, error)
}
type httpSecurityRoleAPI struct {
client httpClient
}
type securityRoleAPIAction struct {
verb string
name string
role *Role
}
type securityRoleAPIList struct{}
func (list *securityRoleAPIList) HTTPRequest(ep url.URL) *http.Request {
u := v2SecurityURL(ep, "roles", "")
req, _ := http.NewRequest("GET", u.String(), nil)
req.Header.Set("Content-Type", "application/json")
return req
}
func (l *securityRoleAPIAction) HTTPRequest(ep url.URL) *http.Request {
u := v2SecurityURL(ep, "roles", l.name)
if l.role == nil {
req, _ := http.NewRequest(l.verb, u.String(), nil)
return req
}
b, err := json.Marshal(l.role)
if err != nil {
panic(err)
}
body := bytes.NewReader(b)
req, _ := http.NewRequest(l.verb, u.String(), body)
req.Header.Set("Content-Type", "application/json")
return req
}
func (r *httpSecurityRoleAPI) ListRoles(ctx context.Context) ([]string, error) {
resp, body, err := r.client.Do(ctx, &securityRoleAPIList{})
if err != nil {
return nil, err
}
if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
return nil, err
}
var userList struct {
Roles []string `json:"roles"`
}
err = json.Unmarshal(body, &userList)
if err != nil {
return nil, err
}
return userList.Roles, nil
}
func (r *httpSecurityRoleAPI) AddRole(ctx context.Context, rolename string) error {
role := &Role{
Role: rolename,
}
return r.addRemoveRole(ctx, &securityRoleAPIAction{
verb: "PUT",
name: rolename,
role: role,
})
}
func (r *httpSecurityRoleAPI) RemoveRole(ctx context.Context, rolename string) error {
return r.addRemoveRole(ctx, &securityRoleAPIAction{
verb: "DELETE",
name: rolename,
})
}
func (r *httpSecurityRoleAPI) addRemoveRole(ctx context.Context, req *securityRoleAPIAction) error {
resp, body, err := r.client.Do(ctx, req)
if err != nil {
return err
}
if err := assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
var sec securityError
err := json.Unmarshal(body, &sec)
if err != nil {
return err
}
return sec
}
return nil
}
func (r *httpSecurityRoleAPI) GetRole(ctx context.Context, rolename string) (*Role, error) {
return r.modRole(ctx, &securityRoleAPIAction{
verb: "GET",
name: rolename,
})
}
func buildRWPermission(prefixes []string, permType PermissionType) rwPermission {
var out rwPermission
switch permType {
case ReadPermission:
out.Read = prefixes
case WritePermission:
out.Write = prefixes
case ReadWritePermission:
out.Read = prefixes
out.Write = prefixes
}
return out
}
func (r *httpSecurityRoleAPI) GrantRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) {
rwp := buildRWPermission(prefixes, permType)
role := &Role{
Role: rolename,
Grant: &Permissions{
KV: rwp,
},
}
return r.modRole(ctx, &securityRoleAPIAction{
verb: "PUT",
name: rolename,
role: role,
})
}
func (r *httpSecurityRoleAPI) RevokeRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) {
rwp := buildRWPermission(prefixes, permType)
role := &Role{
Role: rolename,
Revoke: &Permissions{
KV: rwp,
},
}
return r.modRole(ctx, &securityRoleAPIAction{
verb: "PUT",
name: rolename,
role: role,
})
}
func (r *httpSecurityRoleAPI) modRole(ctx context.Context, req *securityRoleAPIAction) (*Role, error) {
resp, body, err := r.client.Do(ctx, req)
if err != nil {
return nil, err
}
if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
var sec securityError
err := json.Unmarshal(body, &sec)
if err != nil {
return nil, err
}
return nil, sec
}
var role Role
err = json.Unmarshal(body, &role)
if err != nil {
return nil, err
}
return &role, nil
}

297
client/security_user.go Normal file
View File

@ -0,0 +1,297 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"bytes"
"encoding/json"
"net/http"
"net/url"
"path"
"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context"
)
var (
defaultV2SecurityPrefix = "/v2/security"
)
type User struct {
User string `json:"user"`
Password string `json:"password,omitempty"`
Roles []string `json:"roles"`
Grant []string `json:"grant,omitempty"`
Revoke []string `json:"revoke,omitempty"`
}
func v2SecurityURL(ep url.URL, action string, name string) *url.URL {
if name != "" {
ep.Path = path.Join(ep.Path, defaultV2SecurityPrefix, action, name)
return &ep
}
ep.Path = path.Join(ep.Path, defaultV2SecurityPrefix, action)
return &ep
}
// NewSecurityAPI constructs a new SecurityAPI that uses HTTP to
// interact with etcd's general security features.
func NewSecurityAPI(c Client) SecurityAPI {
return &httpSecurityAPI{
client: c,
}
}
type SecurityAPI interface {
// Enable security.
Enable(ctx context.Context) error
// Disable security.
Disable(ctx context.Context) error
}
type httpSecurityAPI struct {
client httpClient
}
func (s *httpSecurityAPI) Enable(ctx context.Context) error {
return s.enableDisable(ctx, &securityAPIAction{"PUT"})
}
func (s *httpSecurityAPI) Disable(ctx context.Context) error {
return s.enableDisable(ctx, &securityAPIAction{"DELETE"})
}
func (s *httpSecurityAPI) enableDisable(ctx context.Context, req httpAction) error {
resp, body, err := s.client.Do(ctx, req)
if err != nil {
return err
}
if err := assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
var sec securityError
err := json.Unmarshal(body, &sec)
if err != nil {
return err
}
return sec
}
return nil
}
type securityAPIAction struct {
verb string
}
func (l *securityAPIAction) HTTPRequest(ep url.URL) *http.Request {
u := v2SecurityURL(ep, "enable", "")
req, _ := http.NewRequest(l.verb, u.String(), nil)
return req
}
type securityError struct {
Message string `json:"message"`
Code int `json:"-"`
}
func (e securityError) Error() string {
return e.Message
}
// NewSecurityUserAPI constructs a new SecurityUserAPI that uses HTTP to
// interact with etcd's user creation and modification features.
func NewSecurityUserAPI(c Client) SecurityUserAPI {
return &httpSecurityUserAPI{
client: c,
}
}
type SecurityUserAPI interface {
// Add a user.
AddUser(ctx context.Context, username string, password string) error
// Remove a user.
RemoveUser(ctx context.Context, username string) error
// Get user details.
GetUser(ctx context.Context, username string) (*User, error)
// Grant a user some permission roles.
GrantUser(ctx context.Context, username string, roles []string) (*User, error)
// Revoke some permission roles from a user.
RevokeUser(ctx context.Context, username string, roles []string) (*User, error)
// Change the user's password.
ChangePassword(ctx context.Context, username string, password string) (*User, error)
// List users.
ListUsers(ctx context.Context) ([]string, error)
}
type httpSecurityUserAPI struct {
client httpClient
}
type securityUserAPIAction struct {
verb string
username string
user *User
}
type securityUserAPIList struct{}
func (list *securityUserAPIList) HTTPRequest(ep url.URL) *http.Request {
u := v2SecurityURL(ep, "users", "")
req, _ := http.NewRequest("GET", u.String(), nil)
req.Header.Set("Content-Type", "application/json")
return req
}
func (l *securityUserAPIAction) HTTPRequest(ep url.URL) *http.Request {
u := v2SecurityURL(ep, "users", l.username)
if l.user == nil {
req, _ := http.NewRequest(l.verb, u.String(), nil)
return req
}
b, err := json.Marshal(l.user)
if err != nil {
panic(err)
}
body := bytes.NewReader(b)
req, _ := http.NewRequest(l.verb, u.String(), body)
req.Header.Set("Content-Type", "application/json")
return req
}
func (u *httpSecurityUserAPI) ListUsers(ctx context.Context) ([]string, error) {
resp, body, err := u.client.Do(ctx, &securityUserAPIList{})
if err != nil {
return nil, err
}
if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
var sec securityError
err := json.Unmarshal(body, &sec)
if err != nil {
return nil, err
}
return nil, sec
}
var userList struct {
Users []string `json:"users"`
}
err = json.Unmarshal(body, &userList)
if err != nil {
return nil, err
}
return userList.Users, nil
}
func (u *httpSecurityUserAPI) AddUser(ctx context.Context, username string, password string) error {
user := &User{
User: username,
Password: password,
}
return u.addRemoveUser(ctx, &securityUserAPIAction{
verb: "PUT",
username: username,
user: user,
})
}
func (u *httpSecurityUserAPI) RemoveUser(ctx context.Context, username string) error {
return u.addRemoveUser(ctx, &securityUserAPIAction{
verb: "DELETE",
username: username,
})
}
func (u *httpSecurityUserAPI) addRemoveUser(ctx context.Context, req *securityUserAPIAction) error {
resp, body, err := u.client.Do(ctx, req)
if err != nil {
return err
}
if err := assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
var sec securityError
err := json.Unmarshal(body, &sec)
if err != nil {
return err
}
return sec
}
return nil
}
func (u *httpSecurityUserAPI) GetUser(ctx context.Context, username string) (*User, error) {
return u.modUser(ctx, &securityUserAPIAction{
verb: "GET",
username: username,
})
}
func (u *httpSecurityUserAPI) GrantUser(ctx context.Context, username string, roles []string) (*User, error) {
user := &User{
User: username,
Grant: roles,
}
return u.modUser(ctx, &securityUserAPIAction{
verb: "PUT",
username: username,
user: user,
})
}
func (u *httpSecurityUserAPI) RevokeUser(ctx context.Context, username string, roles []string) (*User, error) {
user := &User{
User: username,
Revoke: roles,
}
return u.modUser(ctx, &securityUserAPIAction{
verb: "PUT",
username: username,
user: user,
})
}
func (u *httpSecurityUserAPI) ChangePassword(ctx context.Context, username string, password string) (*User, error) {
user := &User{
User: username,
Password: password,
}
return u.modUser(ctx, &securityUserAPIAction{
verb: "PUT",
username: username,
user: user,
})
}
func (u *httpSecurityUserAPI) modUser(ctx context.Context, req *securityUserAPIAction) (*User, error) {
resp, body, err := u.client.Do(ctx, req)
if err != nil {
return nil, err
}
if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
var sec securityError
err := json.Unmarshal(body, &sec)
if err != nil {
return nil, err
}
return nil, sec
}
var user User
err = json.Unmarshal(body, &user)
if err != nil {
return nil, err
}
return &user, nil
}

View File

@ -20,6 +20,7 @@ import (
"os"
"strings"
"github.com/coreos/etcd/Godeps/_workspace/src/github.com/bgentry/speakeasy"
"github.com/coreos/etcd/Godeps/_workspace/src/github.com/codegangsta/cli"
"github.com/coreos/etcd/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd"
)
@ -36,6 +37,31 @@ func dumpCURL(client *etcd.Client) {
}
}
func getUsernamePasswordFromFlag(usernameFlag string) (username string, password string, err error) {
colon := strings.Index(usernameFlag, ":")
if colon == -1 {
username = usernameFlag
// Prompt for the password.
password, err = speakeasy.Ask("Password: ")
if err != nil {
return "", "", err
}
} else {
username = usernameFlag[:colon]
password = usernameFlag[colon+1:]
}
return username, password, nil
}
func prepAuth(client *etcd.Client, usernameFlag string) error {
username, password, err := getUsernamePasswordFromFlag(usernameFlag)
if err != nil {
return err
}
client.SetCredentials(username, password)
return nil
}
// rawhandle wraps the command function handlers and sets up the
// environment but performs no output formatting.
func rawhandle(c *cli.Context, fn handlerFunc) (*etcd.Response, error) {
@ -52,6 +78,14 @@ func rawhandle(c *cli.Context, fn handlerFunc) (*etcd.Response, error) {
client := etcd.NewClient(endpoints)
client.SetTransport(tr)
username := c.GlobalString("username")
if username != "" {
err := prepAuth(client, username)
if err != nil {
return nil, err
}
}
if c.GlobalBool("debug") {
go dumpCURL(client)
}

View File

@ -49,28 +49,8 @@ func NewMemberCommand() cli.Command {
}
func mustNewMembersAPI(c *cli.Context) client.MembersAPI {
eps, err := getEndpoints(c)
if err != nil {
fmt.Fprintln(os.Stderr, err.Error())
os.Exit(1)
}
tr, err := getTransport(c)
if err != nil {
fmt.Fprintln(os.Stderr, err.Error())
os.Exit(1)
}
cfg := client.Config{
Transport: tr,
Endpoints: eps,
}
hc, err := client.New(cfg)
if err != nil {
fmt.Fprintln(os.Stderr, err.Error())
os.Exit(1)
}
hc := mustNewClient(c)
if !c.GlobalBool("no-sync") {
ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)

View File

@ -0,0 +1,241 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package command
import (
"fmt"
"os"
"reflect"
"strings"
"github.com/coreos/etcd/Godeps/_workspace/src/github.com/codegangsta/cli"
"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context"
"github.com/coreos/etcd/client"
)
func NewRoleCommands() cli.Command {
return cli.Command{
Name: "role",
Usage: "role add, grant and revoke subcommands",
Subcommands: []cli.Command{
cli.Command{
Name: "add",
Usage: "add a new role for the etcd cluster",
Action: actionRoleAdd,
},
cli.Command{
Name: "get",
Usage: "get details for a role",
Action: actionRoleGet,
},
cli.Command{
Name: "list",
Usage: "list all roles",
Action: actionRoleList,
},
cli.Command{
Name: "remove",
Usage: "remove a role from the etcd cluster",
Action: actionRoleRemove,
},
cli.Command{
Name: "grant",
Usage: "grant path matches to an etcd role",
Flags: []cli.Flag{
cli.StringFlag{Name: "path", Value: "", Usage: "Path granted for the role to access"},
cli.BoolFlag{Name: "read", Usage: "Grant read-only access"},
cli.BoolFlag{Name: "write", Usage: "Grant write-only access"},
cli.BoolFlag{Name: "readwrite", Usage: "Grant read-write access"},
},
Action: actionRoleGrant,
},
cli.Command{
Name: "revoke",
Usage: "revoke path matches for an etcd role",
Flags: []cli.Flag{
cli.StringFlag{Name: "path", Value: "", Usage: "Path revoked for the role to access"},
cli.BoolFlag{Name: "read", Usage: "Revoke read access"},
cli.BoolFlag{Name: "write", Usage: "Revoke write access"},
cli.BoolFlag{Name: "readwrite", Usage: "Revoke read-write access"},
},
Action: actionRoleRevoke,
},
},
}
}
func mustNewSecurityRoleAPI(c *cli.Context) client.SecurityRoleAPI {
hc := mustNewClient(c)
if c.GlobalBool("debug") {
fmt.Fprintf(os.Stderr, "Cluster-Endpoints: %s\n", strings.Join(hc.Endpoints(), ", "))
}
return client.NewSecurityRoleAPI(hc)
}
func actionRoleList(c *cli.Context) {
if len(c.Args()) != 0 {
fmt.Fprintln(os.Stderr, "No arguments accepted")
os.Exit(1)
}
r := mustNewSecurityRoleAPI(c)
ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
roles, err := r.ListRoles(ctx)
cancel()
if err != nil {
fmt.Fprintln(os.Stderr, err.Error())
os.Exit(1)
}
for _, role := range roles {
fmt.Printf("%s\n", role)
}
}
func actionRoleAdd(c *cli.Context) {
api, role := mustRoleAPIAndName(c)
ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
currentRole, err := api.GetRole(ctx, role)
cancel()
if currentRole != nil {
fmt.Fprintf(os.Stderr, "Role %s already exists\n", role)
os.Exit(1)
}
ctx, cancel = context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
err = api.AddRole(ctx, role)
cancel()
if err != nil {
fmt.Fprintln(os.Stderr, err.Error())
os.Exit(1)
}
fmt.Printf("Role %s created\n", role)
}
func actionRoleRemove(c *cli.Context) {
api, role := mustRoleAPIAndName(c)
ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
err := api.RemoveRole(ctx, role)
cancel()
if err != nil {
fmt.Fprintln(os.Stderr, err.Error())
os.Exit(1)
}
fmt.Printf("Role %s removed\n", role)
}
func actionRoleGrant(c *cli.Context) {
roleGrantRevoke(c, true)
}
func actionRoleRevoke(c *cli.Context) {
roleGrantRevoke(c, false)
}
func roleGrantRevoke(c *cli.Context, grant bool) {
path := c.String("path")
if path == "" {
fmt.Fprintln(os.Stderr, "No path specified; please use `-path`")
os.Exit(1)
}
read := c.Bool("read")
write := c.Bool("write")
rw := c.Bool("readwrite")
permcount := 0
for _, v := range []bool{read, write, rw} {
if v {
permcount++
}
}
if permcount != 1 {
fmt.Fprintln(os.Stderr, "Please specify exactly one of -read, -write or -readwrite")
os.Exit(1)
}
var permType client.PermissionType
switch {
case read:
permType = client.ReadPermission
case write:
permType = client.WritePermission
case rw:
permType = client.ReadWritePermission
}
api, role := mustRoleAPIAndName(c)
ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
currentRole, err := api.GetRole(ctx, role)
cancel()
if err != nil {
fmt.Fprintln(os.Stderr, err.Error())
os.Exit(1)
}
ctx, cancel = context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
var newRole *client.Role
if grant {
newRole, err = api.GrantRoleKV(ctx, role, []string{path}, permType)
} else {
newRole, err = api.RevokeRoleKV(ctx, role, []string{path}, permType)
}
cancel()
if err != nil {
fmt.Fprintln(os.Stderr, err.Error())
os.Exit(1)
}
if reflect.DeepEqual(newRole, currentRole) {
if grant {
fmt.Printf("Role unchanged; already granted")
} else {
fmt.Printf("Role unchanged; already revoked")
}
}
fmt.Printf("Role %s updated\n", role)
}
func actionRoleGet(c *cli.Context) {
api, rolename := mustRoleAPIAndName(c)
ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
role, err := api.GetRole(ctx, rolename)
cancel()
if err != nil {
fmt.Fprintln(os.Stderr, err.Error())
os.Exit(1)
}
fmt.Printf("Role: %s\n", role.Role)
fmt.Printf("KV Read:\n")
for _, v := range role.Permissions.KV.Read {
fmt.Printf("\t%s\n", v)
}
fmt.Printf("KV Write:\n")
for _, v := range role.Permissions.KV.Write {
fmt.Printf("\t%s\n", v)
}
}
func mustRoleAPIAndName(c *cli.Context) (client.SecurityRoleAPI, string) {
args := c.Args()
if len(args) != 1 {
fmt.Fprintln(os.Stderr, "Please provide a role name")
os.Exit(1)
}
name := args[0]
api := mustNewSecurityRoleAPI(c)
return api, name
}

View File

@ -0,0 +1,87 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package command
import (
"fmt"
"os"
"strings"
"github.com/coreos/etcd/Godeps/_workspace/src/github.com/codegangsta/cli"
"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context"
"github.com/coreos/etcd/client"
)
func NewSecurityCommands() cli.Command {
return cli.Command{
Name: "security",
Usage: "overall security controls",
Subcommands: []cli.Command{
cli.Command{
Name: "enable",
Usage: "enable security access controls",
Action: actionSecurityEnable,
},
cli.Command{
Name: "disable",
Usage: "disable security access controls",
Action: actionSecurityDisable,
},
},
}
}
func actionSecurityEnable(c *cli.Context) {
securityEnableDisable(c, true)
}
func actionSecurityDisable(c *cli.Context) {
securityEnableDisable(c, false)
}
func mustNewSecurityAPI(c *cli.Context) client.SecurityAPI {
hc := mustNewClient(c)
if c.GlobalBool("debug") {
fmt.Fprintf(os.Stderr, "Cluster-Endpoints: %s\n", strings.Join(hc.Endpoints(), ", "))
}
return client.NewSecurityAPI(hc)
}
func securityEnableDisable(c *cli.Context, enable bool) {
if len(c.Args()) != 0 {
fmt.Fprintln(os.Stderr, "No arguments accepted")
os.Exit(1)
}
s := mustNewSecurityAPI(c)
ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
var err error
if enable {
err = s.Enable(ctx)
} else {
err = s.Disable(ctx)
}
cancel()
if err != nil {
fmt.Fprintln(os.Stderr, err.Error())
os.Exit(1)
}
if enable {
fmt.Println("Security Enabled")
} else {
fmt.Println("Security Disabled")
}
}

View File

@ -0,0 +1,242 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package command
import (
"fmt"
"os"
"reflect"
"sort"
"strings"
"github.com/coreos/etcd/Godeps/_workspace/src/github.com/bgentry/speakeasy"
"github.com/coreos/etcd/Godeps/_workspace/src/github.com/codegangsta/cli"
"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context"
"github.com/coreos/etcd/client"
)
func NewUserCommands() cli.Command {
return cli.Command{
Name: "user",
Usage: "user add, grant and revoke subcommands",
Subcommands: []cli.Command{
cli.Command{
Name: "add",
Usage: "add a new user for the etcd cluster",
Action: actionUserAdd,
},
cli.Command{
Name: "get",
Usage: "get details for a user",
Action: actionUserGet,
},
cli.Command{
Name: "list",
Usage: "list all current users",
Action: actionUserList,
},
cli.Command{
Name: "remove",
Usage: "remove a user for the etcd cluster",
Action: actionUserRemove,
},
cli.Command{
Name: "grant",
Usage: "grant roles to an etcd user",
Flags: []cli.Flag{cli.StringSliceFlag{Name: "roles", Value: new(cli.StringSlice), Usage: "List of roles to grant or revoke"}},
Action: actionUserGrant,
},
cli.Command{
Name: "revoke",
Usage: "revoke roles for an etcd user",
Flags: []cli.Flag{cli.StringSliceFlag{Name: "roles", Value: new(cli.StringSlice), Usage: "List of roles to grant or revoke"}},
Action: actionUserRevoke,
},
cli.Command{
Name: "passwd",
Usage: "change password for a user",
Action: actionUserPasswd,
},
},
}
}
func mustNewSecurityUserAPI(c *cli.Context) client.SecurityUserAPI {
hc := mustNewClient(c)
if c.GlobalBool("debug") {
fmt.Fprintf(os.Stderr, "Cluster-Endpoints: %s\n", strings.Join(hc.Endpoints(), ", "))
}
return client.NewSecurityUserAPI(hc)
}
func actionUserList(c *cli.Context) {
if len(c.Args()) != 0 {
fmt.Fprintln(os.Stderr, "No arguments accepted")
os.Exit(1)
}
u := mustNewSecurityUserAPI(c)
ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
users, err := u.ListUsers(ctx)
cancel()
if err != nil {
fmt.Fprintln(os.Stderr, err.Error())
os.Exit(1)
}
for _, user := range users {
fmt.Printf("%s\n", user)
}
}
func actionUserAdd(c *cli.Context) {
api, user := mustUserAPIAndName(c)
ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
currentUser, err := api.GetUser(ctx, user)
cancel()
if currentUser != nil {
fmt.Fprintf(os.Stderr, "User %s already exists\n", user)
os.Exit(1)
}
pass, err := speakeasy.Ask("New password: ")
if err != nil {
fmt.Fprintln(os.Stderr, "Error reading password:", err)
os.Exit(1)
}
ctx, cancel = context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
err = api.AddUser(ctx, user, pass)
cancel()
if err != nil {
fmt.Fprintln(os.Stderr, err.Error())
os.Exit(1)
}
fmt.Printf("User %s created\n", user)
}
func actionUserRemove(c *cli.Context) {
api, user := mustUserAPIAndName(c)
ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
err := api.RemoveUser(ctx, user)
cancel()
if err != nil {
fmt.Fprintln(os.Stderr, err.Error())
os.Exit(1)
}
fmt.Printf("User %s removed\n", user)
}
func actionUserPasswd(c *cli.Context) {
api, user := mustUserAPIAndName(c)
ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
currentUser, err := api.GetUser(ctx, user)
cancel()
if currentUser == nil {
fmt.Fprintln(os.Stderr, err.Error())
os.Exit(1)
}
pass, err := speakeasy.Ask("New password: ")
if err != nil {
fmt.Fprintln(os.Stderr, "Error reading password:", err)
os.Exit(1)
}
ctx, cancel = context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
_, err = api.ChangePassword(ctx, user, pass)
cancel()
if err != nil {
fmt.Fprintln(os.Stderr, err.Error())
os.Exit(1)
}
fmt.Printf("Password updated\n")
}
func actionUserGrant(c *cli.Context) {
userGrantRevoke(c, true)
}
func actionUserRevoke(c *cli.Context) {
userGrantRevoke(c, false)
}
func userGrantRevoke(c *cli.Context, grant bool) {
roles := c.StringSlice("roles")
if len(roles) == 0 {
fmt.Fprintln(os.Stderr, "No roles specified; please use `-roles`")
os.Exit(1)
}
api, user := mustUserAPIAndName(c)
ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
currentUser, err := api.GetUser(ctx, user)
cancel()
if currentUser == nil {
fmt.Fprintln(os.Stderr, err.Error())
os.Exit(1)
}
ctx, cancel = context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
var newUser *client.User
if grant {
newUser, err = api.GrantUser(ctx, user, roles)
} else {
newUser, err = api.RevokeUser(ctx, user, roles)
}
cancel()
sort.Strings(newUser.Roles)
sort.Strings(currentUser.Roles)
if reflect.DeepEqual(newUser.Roles, currentUser.Roles) {
if grant {
fmt.Printf("User unchanged; roles already granted")
} else {
fmt.Printf("User unchanged; roles already revoked")
}
}
if err != nil {
fmt.Fprintln(os.Stderr, err.Error())
os.Exit(1)
}
fmt.Printf("User %s updated\n", user)
}
func actionUserGet(c *cli.Context) {
api, username := mustUserAPIAndName(c)
ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
user, err := api.GetUser(ctx, username)
cancel()
if err != nil {
fmt.Fprintln(os.Stderr, err.Error())
os.Exit(1)
}
fmt.Printf("User: %s\n", user.User)
fmt.Printf("Roles: %s\n", strings.Join(user.Roles, " "))
}
func mustUserAPIAndName(c *cli.Context) (client.SecurityUserAPI, string) {
args := c.Args()
if len(args) != 1 {
fmt.Fprintln(os.Stderr, "Please provide a username")
os.Exit(1)
}
api := mustNewSecurityUserAPI(c)
username := args[0]
return api, username
}

View File

@ -16,6 +16,7 @@ package command
import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
@ -24,6 +25,7 @@ import (
"strings"
"github.com/coreos/etcd/Godeps/_workspace/src/github.com/codegangsta/cli"
"github.com/coreos/etcd/client"
"github.com/coreos/etcd/pkg/transport"
)
@ -112,3 +114,40 @@ func getTransport(c *cli.Context) (*http.Transport, error) {
}
return transport.NewTransport(tls)
}
func mustNewClient(c *cli.Context) client.Client {
eps, err := getEndpoints(c)
if err != nil {
fmt.Fprintln(os.Stderr, err.Error())
os.Exit(1)
}
tr, err := getTransport(c)
if err != nil {
fmt.Fprintln(os.Stderr, err.Error())
os.Exit(1)
}
cfg := client.Config{
Transport: tr,
Endpoints: eps,
}
uFlag := c.GlobalString("username")
if uFlag != "" {
username, password, err := getUsernamePasswordFromFlag(uFlag)
if err != nil {
fmt.Fprintln(os.Stderr, err.Error())
os.Exit(1)
}
cfg.Username = username
cfg.Password = password
}
hc, err := client.New(cfg)
if err != nil {
fmt.Fprintln(os.Stderr, err.Error())
os.Exit(1)
}
return hc
}

View File

@ -35,6 +35,7 @@ func main() {
cli.StringFlag{Name: "cert-file", Value: "", Usage: "identify HTTPS client using this SSL certificate file"},
cli.StringFlag{Name: "key-file", Value: "", Usage: "identify HTTPS client using this SSL key file"},
cli.StringFlag{Name: "ca-file", Value: "", Usage: "verify certificates of HTTPS-enabled servers using this CA bundle"},
cli.StringFlag{Name: "username, u", Value: "", Usage: "provide username[:password] and prompt if password is not supplied."},
}
app.Commands = []cli.Command{
command.NewBackupCommand(),
@ -53,6 +54,9 @@ func main() {
command.NewExecWatchCommand(),
command.NewMemberCommand(),
command.NewImportSnapCommand(),
command.NewUserCommands(),
command.NewRoleCommands(),
command.NewSecurityCommands(),
}
app.Run(os.Args)

View File

@ -104,7 +104,7 @@ type Error struct {
func (se Error) Error() string { return se.errmsg }
func mergeErr(s string, v ...interface{}) Error {
return Error{fmt.Sprintf("security-merging: "+s, v...)}
return Error{fmt.Sprintf("security: "+s, v...)}
}
func securityErr(s string, v ...interface{}) Error {
@ -155,7 +155,16 @@ func (s *Store) GetUser(name string) (User, error) {
}
// Require that root always has a root role.
if u.User == "root" {
u.Roles = append(u.Roles, RootRoleName)
inRoles := false
for _, r := range u.Roles {
if r == RootRoleName {
inRoles = true
break
}
}
if !inRoles {
u.Roles = append(u.Roles, RootRoleName)
}
}
return u, nil
@ -208,10 +217,16 @@ func (s *Store) DeleteUser(name string) error {
return securityErr("Cannot delete root user while security is enabled.")
}
_, err := s.deleteResource("/users/" + name)
if err == nil {
log.Printf("security: deleted user %s", name)
if err != nil {
if e, ok := err.(*etcderr.Error); ok {
if e.ErrorCode == etcderr.EcodeKeyNotFound {
return securityErr("User %s does not exist", name)
}
}
return err
}
return err
log.Printf("security: deleted user %s", name)
return nil
}
func (s *Store) UpdateUser(user User) (User, error) {
@ -242,7 +257,7 @@ func (s *Store) UpdateUser(user User) (User, error) {
}
func (s *Store) AllRoles() ([]string, error) {
nodes := []string{GuestRoleName, RootRoleName}
nodes := []string{RootRoleName}
resp, err := s.requestResource("/roles/", false)
if err != nil {
if e, ok := err.(*etcderr.Error); ok {