Merge pull request #54 from MichaelMure/editablecomment

Michael MurΓ© created

Core support for editable comments

Change summary

Gopkg.lock                                                          |   62 
bug/bug.go                                                          |    6 
bug/bug_actions_test.go                                             |  388 
bug/bug_test.go                                                     |   94 
bug/comment.go                                                      |    9 
bug/op_add_comment.go                                               |   88 
bug/op_create.go                                                    |  107 
bug/op_create_test.go                                               |   47 
bug/op_edit_comment.go                                              |  125 
bug/op_edit_comment_test.go                                         |   53 
bug/op_label_change.go                                              |   46 
bug/op_set_status.go                                                |   66 
bug/op_set_title.go                                                 |   46 
bug/operation.go                                                    |   77 
bug/operation_iterator_test.go                                      |   20 
bug/operation_pack.go                                               |   58 
bug/operation_pack_test.go                                          |   19 
bug/operation_test.go                                               |   32 
bug/snapshot.go                                                     |   30 
bug/time.go                                                         |    9 
bug/timeline.go                                                     |   87 
bug/with_snapshot.go                                                |    6 
cache/bug_cache.go                                                  |   15 
cache/repo_cache.go                                                 |    3 
graphql/connections/connections.go                                  |    1 
graphql/connections/gen_timeline.go                                 |  111 
graphql/gqlgen.yml                                                  |   18 
graphql/graph/gen_graph.go                                          |  709 
graphql/models/edges.go                                             |    5 
graphql/models/gen_models.go                                        |   12 
graphql/resolvers/bug.go                                            |   27 
graphql/resolvers/operations.go                                     |   23 
graphql/resolvers/root.go                                           |   16 
graphql/resolvers/timeline.go                                       |   36 
graphql/schema.graphql                                              |   64 
misc/random_bugs/create_random_bugs.go                              |   17 
operations/add_comment.go                                           |   75 
operations/create.go                                                |   90 
operations/create_test.go                                           |   36 
operations/operations.go                                            |   17 
operations/set_status.go                                            |   60 
termui/show_bug.go                                                  |   44 
tests/bug_actions_test.go                                           |  365 
tests/bug_test.go                                                   |   81 
tests/graphql_test.go                                               |    2 
tests/read_bugs_test.go                                             |   26 
vendor/github.com/davecgh/go-spew/LICENSE                           |   15 
vendor/github.com/davecgh/go-spew/spew/bypass.go                    |  145 
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go                |   38 
vendor/github.com/davecgh/go-spew/spew/common.go                    |  341 
vendor/github.com/davecgh/go-spew/spew/config.go                    |  306 
vendor/github.com/davecgh/go-spew/spew/doc.go                       |  211 
vendor/github.com/davecgh/go-spew/spew/dump.go                      |  509 
vendor/github.com/davecgh/go-spew/spew/format.go                    |  419 
vendor/github.com/davecgh/go-spew/spew/spew.go                      |  148 
vendor/github.com/go-test/deep/.gitignore                           |    2 
vendor/github.com/go-test/deep/.travis.yml                          |   13 
vendor/github.com/go-test/deep/CHANGES.md                           |    9 
vendor/github.com/go-test/deep/LICENSE                              |   21 
vendor/github.com/go-test/deep/README.md                            |   51 
vendor/github.com/go-test/deep/deep.go                              |  352 
vendor/github.com/google/go-cmp/LICENSE                             |   27 
vendor/github.com/google/go-cmp/cmp/compare.go                      |  553 
vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go  |   17 
vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go   |  122 
vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go           |  363 
vendor/github.com/google/go-cmp/cmp/internal/function/func.go       |   49 
vendor/github.com/google/go-cmp/cmp/internal/value/format.go        |  277 
vendor/github.com/google/go-cmp/cmp/internal/value/sort.go          |  111 
vendor/github.com/google/go-cmp/cmp/options.go                      |  453 
vendor/github.com/google/go-cmp/cmp/path.go                         |  309 
vendor/github.com/google/go-cmp/cmp/reporter.go                     |   53 
vendor/github.com/google/go-cmp/cmp/unsafe_panic.go                 |   15 
vendor/github.com/google/go-cmp/cmp/unsafe_reflect.go               |   23 
vendor/github.com/pmezard/go-difflib/LICENSE                        |   27 
vendor/github.com/pmezard/go-difflib/difflib/difflib.go             |  772 
vendor/github.com/stretchr/testify/LICENSE                          |   22 
vendor/github.com/stretchr/testify/assert/assertion_format.go       |  484 
vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl  |    5 
vendor/github.com/stretchr/testify/assert/assertion_forward.go      |  956 
vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl |    5 
vendor/github.com/stretchr/testify/assert/assertions.go             | 1394 
vendor/github.com/stretchr/testify/assert/doc.go                    |   45 
vendor/github.com/stretchr/testify/assert/errors.go                 |   10 
vendor/github.com/stretchr/testify/assert/forward_assertions.go     |   16 
vendor/github.com/stretchr/testify/assert/http_assertions.go        |  143 
vendor/gotest.tools/LICENSE                                         |  202 
vendor/gotest.tools/assert/assert.go                                |  311 
vendor/gotest.tools/assert/cmp/compare.go                           |  312 
vendor/gotest.tools/assert/cmp/result.go                            |   94 
vendor/gotest.tools/assert/result.go                                |  107 
vendor/gotest.tools/internal/difflib/LICENSE                        |   27 
vendor/gotest.tools/internal/difflib/difflib.go                     |  420 
vendor/gotest.tools/internal/format/diff.go                         |  161 
vendor/gotest.tools/internal/format/format.go                       |   27 
vendor/gotest.tools/internal/source/source.go                       |  163 
96 files changed, 12,984 insertions(+), 999 deletions(-)

Detailed changes

Gopkg.lock πŸ”—

@@ -49,6 +49,14 @@
   revision = "20f5889cbdc3c73dbd2862796665e7c465ade7d1"
   version = "v1.0.8"
 
+[[projects]]
+  digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec"
+  name = "github.com/davecgh/go-spew"
+  packages = ["spew"]
+  pruneopts = "UT"
+  revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
+  version = "v1.1.1"
+
 [[projects]]
   branch = "master"
   digest = "1:6f9339c912bbdda81302633ad7e99a28dfa5a639c864061f1929510a9a64aa74"
@@ -65,6 +73,14 @@
   revision = "5b77d2a35fb0ede96d138fc9a99f5c9b6aef11b4"
   version = "v1.7.0"
 
+[[projects]]
+  digest = "1:7f89e0c888fb99c61055c646f5678aae645b0b0a1443d9b2dcd9964d850827ce"
+  name = "github.com/go-test/deep"
+  packages = ["."]
+  pruneopts = "UT"
+  revision = "6592d9cc0a499ad2d5f574fde80a2b5c5cc3b4f5"
+  version = "v1.0.1"
+
 [[projects]]
   digest = "1:97df918963298c287643883209a2c3f642e6593379f97ab400c2a2e219ab647d"
   name = "github.com/golang/protobuf"
@@ -73,6 +89,19 @@
   revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5"
   version = "v1.2.0"
 
+[[projects]]
+  digest = "1:2e3c336fc7fde5c984d2841455a658a6d626450b1754a854b3b32e7a8f49a07a"
+  name = "github.com/google/go-cmp"
+  packages = [
+    "cmp",
+    "cmp/internal/diff",
+    "cmp/internal/function",
+    "cmp/internal/value",
+  ]
+  pruneopts = "UT"
+  revision = "3af367b6b30c263d47e8895973edcca9a49cf029"
+  version = "v0.2.0"
+
 [[projects]]
   digest = "1:c79fb010be38a59d657c48c6ba1d003a8aa651fa56b579d959d74573b7dff8e1"
   name = "github.com/gorilla/context"
@@ -188,6 +217,14 @@
   revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
   version = "v0.8.0"
 
+[[projects]]
+  digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe"
+  name = "github.com/pmezard/go-difflib"
+  packages = ["difflib"]
+  pruneopts = "UT"
+  revision = "792786c7400a136282c1664665ae0a8db921c6c2"
+  version = "v1.0.0"
+
 [[projects]]
   digest = "1:8bc629776d035c003c7814d4369521afe67fdb8efc4b5f66540d29343b98cf23"
   name = "github.com/russross/blackfriday"
@@ -270,6 +307,14 @@
   revision = "583c0c0531f06d5278b7d917446061adc344b5cd"
   version = "v1.0.1"
 
+[[projects]]
+  digest = "1:18752d0b95816a1b777505a97f71c7467a8445b8ffb55631a7bf779f6ba4fa83"
+  name = "github.com/stretchr/testify"
+  packages = ["assert"]
+  pruneopts = "UT"
+  revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686"
+  version = "v1.2.2"
+
 [[projects]]
   digest = "1:895fa0f564003de2498fbaf00be596fa814655ab445ce7cc215f7724bb6831ae"
   name = "github.com/vektah/gqlgen"
@@ -374,6 +419,20 @@
   revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
   version = "v2.2.1"
 
+[[projects]]
+  digest = "1:225f565dc88a02cebe329d3a49d0ca125789091af952a5cc4fde6312c34ce44d"
+  name = "gotest.tools"
+  packages = [
+    "assert",
+    "assert/cmp",
+    "internal/difflib",
+    "internal/format",
+    "internal/source",
+  ]
+  pruneopts = "UT"
+  revision = "b6e20af1ed078cd01a6413b734051a292450b4cb"
+  version = "v2.1.0"
+
 [solve-meta]
   analyzer-name = "dep"
   analyzer-version = 1
@@ -385,6 +444,7 @@
     "github.com/cheekybits/genny/generic",
     "github.com/dustin/go-humanize",
     "github.com/fatih/color",
+    "github.com/go-test/deep",
     "github.com/gorilla/mux",
     "github.com/icrowley/fake",
     "github.com/jroimartin/gocui",
@@ -396,11 +456,13 @@
     "github.com/skratchdot/open-golang/open",
     "github.com/spf13/cobra",
     "github.com/spf13/cobra/doc",
+    "github.com/stretchr/testify/assert",
     "github.com/vektah/gqlgen/client",
     "github.com/vektah/gqlparser",
     "github.com/vektah/gqlparser/ast",
     "golang.org/x/crypto/ssh/terminal",
     "golang.org/x/oauth2",
+    "gotest.tools/assert",
   ]
   solver-name = "gps-cdcl"
   solver-version = 1

bug/bug.go πŸ”—

@@ -300,7 +300,7 @@ func (bug *Bug) Validate() error {
 
 	// The very first Op should be a CreateOp
 	firstOp := bug.FirstOp()
-	if firstOp == nil || firstOp.OpType() != CreateOp {
+	if firstOp == nil || firstOp.base().OperationType != CreateOp {
 		return fmt.Errorf("first operation should be a Create op")
 	}
 
@@ -308,7 +308,7 @@ func (bug *Bug) Validate() error {
 	it := NewOperationIterator(bug)
 	createCount := 0
 	for it.Next() {
-		if it.Value().OpType() == CreateOp {
+		if it.Value().base().OperationType == CreateOp {
 			createCount++
 		}
 	}
@@ -641,7 +641,7 @@ func (bug *Bug) Compile() Snapshot {
 
 	for it.Next() {
 		op := it.Value()
-		snap = op.Apply(snap)
+		op.Apply(&snap)
 		snap.Operations = append(snap.Operations, op)
 	}
 

bug/bug_actions_test.go πŸ”—

@@ -0,0 +1,388 @@
+package bug
+
+import (
+	"github.com/MichaelMure/git-bug/repository"
+	"github.com/stretchr/testify/assert"
+
+	"io/ioutil"
+	"log"
+	"os"
+	"testing"
+)
+
+func createRepo(bare bool) *repository.GitRepo {
+	dir, err := ioutil.TempDir("", "")
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	// fmt.Println("Creating repo:", dir)
+
+	var creator func(string) (*repository.GitRepo, error)
+
+	if bare {
+		creator = repository.InitBareGitRepo
+	} else {
+		creator = repository.InitGitRepo
+	}
+
+	repo, err := creator(dir)
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	return repo
+}
+
+func cleanupRepo(repo repository.Repo) error {
+	path := repo.GetPath()
+	// fmt.Println("Cleaning repo:", path)
+	return os.RemoveAll(path)
+}
+
+func setupRepos(t testing.TB) (repoA, repoB, remote *repository.GitRepo) {
+	repoA = createRepo(false)
+	repoB = createRepo(false)
+	remote = createRepo(true)
+
+	remoteAddr := "file://" + remote.GetPath()
+
+	err := repoA.AddRemote("origin", remoteAddr)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	err = repoB.AddRemote("origin", remoteAddr)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	return repoA, repoB, remote
+}
+
+func cleanupRepos(repoA, repoB, remote *repository.GitRepo) {
+	cleanupRepo(repoA)
+	cleanupRepo(repoB)
+	cleanupRepo(remote)
+}
+
+func TestPushPull(t *testing.T) {
+	repoA, repoB, remote := setupRepos(t)
+	defer cleanupRepos(repoA, repoB, remote)
+
+	bug1, err := Create(rene, unix, "bug1", "message")
+	assert.Nil(t, err)
+	err = bug1.Commit(repoA)
+	assert.Nil(t, err)
+
+	// A --> remote --> B
+	_, err = Push(repoA, "origin")
+	assert.Nil(t, err)
+
+	err = Pull(repoB, "origin")
+	assert.Nil(t, err)
+
+	bugs := allBugs(t, ReadAllLocalBugs(repoB))
+
+	if len(bugs) != 1 {
+		t.Fatal("Unexpected number of bugs")
+	}
+
+	// B --> remote --> A
+	bug2, err := Create(rene, unix, "bug2", "message")
+	assert.Nil(t, err)
+	err = bug2.Commit(repoB)
+	assert.Nil(t, err)
+
+	_, err = Push(repoB, "origin")
+	assert.Nil(t, err)
+
+	err = Pull(repoA, "origin")
+	assert.Nil(t, err)
+
+	bugs = allBugs(t, ReadAllLocalBugs(repoA))
+
+	if len(bugs) != 2 {
+		t.Fatal("Unexpected number of bugs")
+	}
+}
+
+func allBugs(t testing.TB, bugs <-chan StreamedBug) []*Bug {
+	var result []*Bug
+	for streamed := range bugs {
+		if streamed.Err != nil {
+			t.Fatal(streamed.Err)
+		}
+		result = append(result, streamed.Bug)
+	}
+	return result
+}
+
+func TestRebaseTheirs(t *testing.T) {
+	_RebaseTheirs(t)
+}
+
+func BenchmarkRebaseTheirs(b *testing.B) {
+	for n := 0; n < b.N; n++ {
+		_RebaseTheirs(b)
+	}
+}
+
+func _RebaseTheirs(t testing.TB) {
+	repoA, repoB, remote := setupRepos(t)
+	defer cleanupRepos(repoA, repoB, remote)
+
+	bug1, err := Create(rene, unix, "bug1", "message")
+	assert.Nil(t, err)
+	err = bug1.Commit(repoA)
+	assert.Nil(t, err)
+
+	// A --> remote
+	_, err = Push(repoA, "origin")
+	assert.Nil(t, err)
+
+	// remote --> B
+	err = Pull(repoB, "origin")
+	assert.Nil(t, err)
+
+	bug2, err := ReadLocalBug(repoB, bug1.Id())
+	assert.Nil(t, err)
+
+	err = AddComment(bug2, rene, unix, "message2")
+	assert.Nil(t, err)
+	err = AddComment(bug2, rene, unix, "message3")
+	assert.Nil(t, err)
+	err = AddComment(bug2, rene, unix, "message4")
+	assert.Nil(t, err)
+	err = bug2.Commit(repoB)
+	assert.Nil(t, err)
+
+	// B --> remote
+	_, err = Push(repoB, "origin")
+	assert.Nil(t, err)
+
+	// remote --> A
+	err = Pull(repoA, "origin")
+	assert.Nil(t, err)
+
+	bugs := allBugs(t, ReadAllLocalBugs(repoB))
+
+	if len(bugs) != 1 {
+		t.Fatal("Unexpected number of bugs")
+	}
+
+	bug3, err := ReadLocalBug(repoA, bug1.Id())
+	assert.Nil(t, err)
+
+	if nbOps(bug3) != 4 {
+		t.Fatal("Unexpected number of operations")
+	}
+}
+
+func TestRebaseOurs(t *testing.T) {
+	_RebaseOurs(t)
+}
+
+func BenchmarkRebaseOurs(b *testing.B) {
+	for n := 0; n < b.N; n++ {
+		_RebaseOurs(b)
+	}
+}
+
+func _RebaseOurs(t testing.TB) {
+	repoA, repoB, remote := setupRepos(t)
+	defer cleanupRepos(repoA, repoB, remote)
+
+	bug1, err := Create(rene, unix, "bug1", "message")
+	assert.Nil(t, err)
+	err = bug1.Commit(repoA)
+	assert.Nil(t, err)
+
+	// A --> remote
+	_, err = Push(repoA, "origin")
+	assert.Nil(t, err)
+
+	// remote --> B
+	err = Pull(repoB, "origin")
+	assert.Nil(t, err)
+
+	err = AddComment(bug1, rene, unix, "message2")
+	assert.Nil(t, err)
+	err = AddComment(bug1, rene, unix, "message3")
+	assert.Nil(t, err)
+	err = AddComment(bug1, rene, unix, "message4")
+	assert.Nil(t, err)
+	err = bug1.Commit(repoA)
+	assert.Nil(t, err)
+
+	err = AddComment(bug1, rene, unix, "message5")
+	assert.Nil(t, err)
+	err = AddComment(bug1, rene, unix, "message6")
+	assert.Nil(t, err)
+	err = AddComment(bug1, rene, unix, "message7")
+	assert.Nil(t, err)
+	err = bug1.Commit(repoA)
+	assert.Nil(t, err)
+
+	err = AddComment(bug1, rene, unix, "message8")
+	assert.Nil(t, err)
+	err = AddComment(bug1, rene, unix, "message9")
+	assert.Nil(t, err)
+	err = AddComment(bug1, rene, unix, "message10")
+	assert.Nil(t, err)
+	err = bug1.Commit(repoA)
+	assert.Nil(t, err)
+
+	// remote --> A
+	err = Pull(repoA, "origin")
+	assert.Nil(t, err)
+
+	bugs := allBugs(t, ReadAllLocalBugs(repoA))
+
+	if len(bugs) != 1 {
+		t.Fatal("Unexpected number of bugs")
+	}
+
+	bug2, err := ReadLocalBug(repoA, bug1.Id())
+	assert.Nil(t, err)
+
+	if nbOps(bug2) != 10 {
+		t.Fatal("Unexpected number of operations")
+	}
+}
+
+func nbOps(b *Bug) int {
+	it := NewOperationIterator(b)
+	counter := 0
+	for it.Next() {
+		counter++
+	}
+	return counter
+}
+
+func TestRebaseConflict(t *testing.T) {
+	_RebaseConflict(t)
+}
+
+func BenchmarkRebaseConflict(b *testing.B) {
+	for n := 0; n < b.N; n++ {
+		_RebaseConflict(b)
+	}
+}
+
+func _RebaseConflict(t testing.TB) {
+	repoA, repoB, remote := setupRepos(t)
+	defer cleanupRepos(repoA, repoB, remote)
+
+	bug1, err := Create(rene, unix, "bug1", "message")
+	assert.Nil(t, err)
+	err = bug1.Commit(repoA)
+	assert.Nil(t, err)
+
+	// A --> remote
+	_, err = Push(repoA, "origin")
+	assert.Nil(t, err)
+
+	// remote --> B
+	err = Pull(repoB, "origin")
+	assert.Nil(t, err)
+
+	err = AddComment(bug1, rene, unix, "message2")
+	assert.Nil(t, err)
+	err = AddComment(bug1, rene, unix, "message3")
+	assert.Nil(t, err)
+	err = AddComment(bug1, rene, unix, "message4")
+	assert.Nil(t, err)
+	err = bug1.Commit(repoA)
+	assert.Nil(t, err)
+
+	err = AddComment(bug1, rene, unix, "message5")
+	assert.Nil(t, err)
+	err = AddComment(bug1, rene, unix, "message6")
+	assert.Nil(t, err)
+	err = AddComment(bug1, rene, unix, "message7")
+	assert.Nil(t, err)
+	err = bug1.Commit(repoA)
+	assert.Nil(t, err)
+
+	err = AddComment(bug1, rene, unix, "message8")
+	assert.Nil(t, err)
+	err = AddComment(bug1, rene, unix, "message9")
+	assert.Nil(t, err)
+	err = AddComment(bug1, rene, unix, "message10")
+	assert.Nil(t, err)
+	err = bug1.Commit(repoA)
+	assert.Nil(t, err)
+
+	bug2, err := ReadLocalBug(repoB, bug1.Id())
+	assert.Nil(t, err)
+
+	err = AddComment(bug2, rene, unix, "message11")
+	assert.Nil(t, err)
+	err = AddComment(bug2, rene, unix, "message12")
+	assert.Nil(t, err)
+	err = AddComment(bug2, rene, unix, "message13")
+	assert.Nil(t, err)
+	err = bug2.Commit(repoB)
+	assert.Nil(t, err)
+
+	err = AddComment(bug2, rene, unix, "message14")
+	assert.Nil(t, err)
+	err = AddComment(bug2, rene, unix, "message15")
+	assert.Nil(t, err)
+	err = AddComment(bug2, rene, unix, "message16")
+	assert.Nil(t, err)
+	err = bug2.Commit(repoB)
+	assert.Nil(t, err)
+
+	err = AddComment(bug2, rene, unix, "message17")
+	assert.Nil(t, err)
+	err = AddComment(bug2, rene, unix, "message18")
+	assert.Nil(t, err)
+	err = AddComment(bug2, rene, unix, "message19")
+	assert.Nil(t, err)
+	err = bug2.Commit(repoB)
+	assert.Nil(t, err)
+
+	// A --> remote
+	_, err = Push(repoA, "origin")
+	assert.Nil(t, err)
+
+	// remote --> B
+	err = Pull(repoB, "origin")
+	assert.Nil(t, err)
+
+	bugs := allBugs(t, ReadAllLocalBugs(repoB))
+
+	if len(bugs) != 1 {
+		t.Fatal("Unexpected number of bugs")
+	}
+
+	bug3, err := ReadLocalBug(repoB, bug1.Id())
+	assert.Nil(t, err)
+
+	if nbOps(bug3) != 19 {
+		t.Fatal("Unexpected number of operations")
+	}
+
+	// B --> remote
+	_, err = Push(repoB, "origin")
+	assert.Nil(t, err)
+
+	// remote --> A
+	err = Pull(repoA, "origin")
+	assert.Nil(t, err)
+
+	bugs = allBugs(t, ReadAllLocalBugs(repoA))
+
+	if len(bugs) != 1 {
+		t.Fatal("Unexpected number of bugs")
+	}
+
+	bug4, err := ReadLocalBug(repoA, bug1.Id())
+	assert.Nil(t, err)
+
+	if nbOps(bug4) != 19 {
+		t.Fatal("Unexpected number of operations")
+	}
+}

bug/bug_test.go πŸ”—

@@ -0,0 +1,94 @@
+package bug
+
+import (
+	"github.com/MichaelMure/git-bug/repository"
+	"github.com/go-test/deep"
+	"github.com/stretchr/testify/assert"
+
+	"testing"
+)
+
+func TestBugId(t *testing.T) {
+	mockRepo := repository.NewMockRepoForTest()
+
+	bug1 := NewBug()
+
+	bug1.Append(createOp)
+
+	err := bug1.Commit(mockRepo)
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	bug1.Id()
+}
+
+func TestBugValidity(t *testing.T) {
+	mockRepo := repository.NewMockRepoForTest()
+
+	bug1 := NewBug()
+
+	if bug1.Validate() == nil {
+		t.Fatal("Empty bug should be invalid")
+	}
+
+	bug1.Append(createOp)
+
+	if bug1.Validate() != nil {
+		t.Fatal("Bug with just a CreateOp should be valid")
+	}
+
+	err := bug1.Commit(mockRepo)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	bug1.Append(createOp)
+
+	if bug1.Validate() == nil {
+		t.Fatal("Bug with multiple CreateOp should be invalid")
+	}
+
+	err = bug1.Commit(mockRepo)
+	if err == nil {
+		t.Fatal("Invalid bug should not commit")
+	}
+}
+
+func TestBugSerialisation(t *testing.T) {
+	bug1 := NewBug()
+
+	bug1.Append(createOp)
+	bug1.Append(setTitleOp)
+	bug1.Append(setTitleOp)
+	bug1.Append(addCommentOp)
+
+	repo := repository.NewMockRepoForTest()
+
+	err := bug1.Commit(repo)
+	assert.Nil(t, err)
+
+	bug2, err := ReadLocalBug(repo, bug1.Id())
+	if err != nil {
+		t.Error(err)
+	}
+
+	// ignore some fields
+	bug2.packs[0].commitHash = bug1.packs[0].commitHash
+	for i := range bug1.packs[0].Operations {
+		bug2.packs[0].Operations[i].base().hash = bug1.packs[0].Operations[i].base().hash
+	}
+
+	// check hashes
+	for i := range bug1.packs[0].Operations {
+		if !bug2.packs[0].Operations[i].base().hash.IsValid() {
+			t.Fatal("invalid hash")
+		}
+	}
+
+	deep.CompareUnexportedFields = true
+	if diff := deep.Equal(bug1, bug2); diff != nil {
+		t.Fatal(diff)
+	}
+}

bug/comment.go πŸ”—

@@ -3,7 +3,6 @@ package bug
 import (
 	"github.com/MichaelMure/git-bug/util/git"
 	"github.com/dustin/go-humanize"
-	"time"
 )
 
 // Comment represent a comment in a Bug
@@ -14,16 +13,14 @@ type Comment struct {
 
 	// Creation time of the comment.
 	// Should be used only for human display, never for ordering as we can't rely on it in a distributed system.
-	UnixTime int64
+	UnixTime Timestamp
 }
 
 // FormatTimeRel format the UnixTime of the comment for human consumption
 func (c Comment) FormatTimeRel() string {
-	t := time.Unix(c.UnixTime, 0)
-	return humanize.Time(t)
+	return humanize.Time(c.UnixTime.Time())
 }
 
 func (c Comment) FormatTime() string {
-	t := time.Unix(c.UnixTime, 0)
-	return t.Format("Mon Jan 2 15:04:05 2006 +0200")
+	return c.UnixTime.Time().Format("Mon Jan 2 15:04:05 2006 +0200")
 }

bug/op_add_comment.go πŸ”—

@@ -0,0 +1,88 @@
+package bug
+
+import (
+	"fmt"
+
+	"github.com/MichaelMure/git-bug/util/git"
+	"github.com/MichaelMure/git-bug/util/text"
+)
+
+var _ Operation = &AddCommentOperation{}
+
+// AddCommentOperation will add a new comment in the bug
+type AddCommentOperation struct {
+	*OpBase
+	Message string `json:"message"`
+	// TODO: change for a map[string]util.hash to store the filename ?
+	Files []git.Hash `json:"files"`
+}
+
+func (op *AddCommentOperation) base() *OpBase {
+	return op.OpBase
+}
+
+func (op *AddCommentOperation) Hash() (git.Hash, error) {
+	return hashOperation(op)
+}
+
+func (op *AddCommentOperation) Apply(snapshot *Snapshot) {
+	comment := Comment{
+		Message:  op.Message,
+		Author:   op.Author,
+		Files:    op.Files,
+		UnixTime: Timestamp(op.UnixTime),
+	}
+
+	snapshot.Comments = append(snapshot.Comments, comment)
+
+	hash, err := op.Hash()
+	if err != nil {
+		// Should never error unless a programming error happened
+		// (covered in OpBase.Validate())
+		panic(err)
+	}
+
+	snapshot.Timeline = append(snapshot.Timeline, NewCommentTimelineItem(hash, comment))
+}
+
+func (op *AddCommentOperation) GetFiles() []git.Hash {
+	return op.Files
+}
+
+func (op *AddCommentOperation) Validate() error {
+	if err := opBaseValidate(op, AddCommentOp); err != nil {
+		return err
+	}
+
+	if text.Empty(op.Message) {
+		return fmt.Errorf("message is empty")
+	}
+
+	if !text.Safe(op.Message) {
+		return fmt.Errorf("message is not fully printable")
+	}
+
+	return nil
+}
+
+func NewAddCommentOp(author Person, unixTime int64, message string, files []git.Hash) *AddCommentOperation {
+	return &AddCommentOperation{
+		OpBase:  newOpBase(AddCommentOp, author, unixTime),
+		Message: message,
+		Files:   files,
+	}
+}
+
+// Convenience function to apply the operation
+func AddComment(b Interface, author Person, unixTime int64, message string) error {
+	return AddCommentWithFiles(b, author, unixTime, message, nil)
+}
+
+func AddCommentWithFiles(b Interface, author Person, unixTime int64, message string, files []git.Hash) error {
+	addCommentOp := NewAddCommentOp(author, unixTime, message, files)
+	if err := addCommentOp.Validate(); err != nil {
+		return err
+	}
+	b.Append(addCommentOp)
+	return nil
+}

bug/op_create.go πŸ”—

@@ -0,0 +1,107 @@
+package bug
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/MichaelMure/git-bug/util/git"
+	"github.com/MichaelMure/git-bug/util/text"
+)
+
+var _ Operation = &CreateOperation{}
+
+// CreateOperation define the initial creation of a bug
+type CreateOperation struct {
+	*OpBase
+	Title   string     `json:"title"`
+	Message string     `json:"message"`
+	Files   []git.Hash `json:"files"`
+}
+
+func (op *CreateOperation) base() *OpBase {
+	return op.OpBase
+}
+
+func (op *CreateOperation) Hash() (git.Hash, error) {
+	return hashOperation(op)
+}
+
+func (op *CreateOperation) Apply(snapshot *Snapshot) {
+	snapshot.Title = op.Title
+
+	comment := Comment{
+		Message:  op.Message,
+		Author:   op.Author,
+		UnixTime: Timestamp(op.UnixTime),
+	}
+
+	snapshot.Comments = []Comment{comment}
+	snapshot.Author = op.Author
+	snapshot.CreatedAt = op.Time()
+
+	hash, err := op.Hash()
+	if err != nil {
+		// Should never error unless a programming error happened
+		// (covered in OpBase.Validate())
+		panic(err)
+	}
+
+	snapshot.Timeline = []TimelineItem{
+		NewCreateTimelineItem(hash, comment),
+	}
+}
+
+func (op *CreateOperation) GetFiles() []git.Hash {
+	return op.Files
+}
+
+func (op *CreateOperation) Validate() error {
+	if err := opBaseValidate(op, CreateOp); err != nil {
+		return err
+	}
+
+	if text.Empty(op.Title) {
+		return fmt.Errorf("title is empty")
+	}
+
+	if strings.Contains(op.Title, "\n") {
+		return fmt.Errorf("title should be a single line")
+	}
+
+	if !text.Safe(op.Title) {
+		return fmt.Errorf("title is not fully printable")
+	}
+
+	if !text.Safe(op.Message) {
+		return fmt.Errorf("message is not fully printable")
+	}
+
+	return nil
+}
+
+func NewCreateOp(author Person, unixTime int64, title, message string, files []git.Hash) *CreateOperation {
+	return &CreateOperation{
+		OpBase:  newOpBase(CreateOp, author, unixTime),
+		Title:   title,
+		Message: message,
+		Files:   files,
+	}
+}
+
+// Convenience function to apply the operation
+func Create(author Person, unixTime int64, title, message string) (*Bug, error) {
+	return CreateWithFiles(author, unixTime, title, message, nil)
+}
+
+func CreateWithFiles(author Person, unixTime int64, title, message string, files []git.Hash) (*Bug, error) {
+	newBug := NewBug()
+	createOp := NewCreateOp(author, unixTime, title, message, files)
+
+	if err := createOp.Validate(); err != nil {
+		return nil, err
+	}
+
+	newBug.Append(createOp)
+
+	return newBug, nil
+}

bug/op_create_test.go πŸ”—

@@ -0,0 +1,47 @@
+package bug
+
+import (
+	"testing"
+	"time"
+
+	"github.com/go-test/deep"
+)
+
+func TestCreate(t *testing.T) {
+	snapshot := Snapshot{}
+
+	var rene = Person{
+		Name:  "RenΓ© Descartes",
+		Email: "rene@descartes.fr",
+	}
+
+	unix := time.Now().Unix()
+
+	create := NewCreateOp(rene, unix, "title", "message", nil)
+
+	create.Apply(&snapshot)
+
+	hash, err := create.Hash()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	comment := Comment{Author: rene, Message: "message", UnixTime: Timestamp(create.UnixTime)}
+
+	expected := Snapshot{
+		Title: "title",
+		Comments: []Comment{
+			comment,
+		},
+		Author:    rene,
+		CreatedAt: create.Time(),
+		Timeline: []TimelineItem{
+			NewCreateTimelineItem(hash, comment),
+		},
+	}
+
+	deep.CompareUnexportedFields = true
+	if diff := deep.Equal(snapshot, expected); diff != nil {
+		t.Fatal(diff)
+	}
+}

bug/op_edit_comment.go πŸ”—

@@ -0,0 +1,125 @@
+package bug
+
+import (
+	"fmt"
+
+	"github.com/MichaelMure/git-bug/util/git"
+	"github.com/MichaelMure/git-bug/util/text"
+)
+
+var _ Operation = &EditCommentOperation{}
+
+// EditCommentOperation will change a comment in the bug
+type EditCommentOperation struct {
+	*OpBase
+	Target  git.Hash   `json:"target"`
+	Message string     `json:"message"`
+	Files   []git.Hash `json:"files"`
+}
+
+func (op *EditCommentOperation) base() *OpBase {
+	return op.OpBase
+}
+
+func (op *EditCommentOperation) Hash() (git.Hash, error) {
+	return hashOperation(op)
+}
+
+func (op *EditCommentOperation) Apply(snapshot *Snapshot) {
+	// Todo: currently any message can be edited, even by a different author
+	// crypto signature are needed.
+
+	var target TimelineItem
+	var commentIndex int
+
+	for i, item := range snapshot.Timeline {
+		h, err := item.Hash()
+
+		if err != nil {
+			// Should never happen, we control what goes into the timeline
+			panic(err)
+		}
+
+		if h == op.Target {
+			target = snapshot.Timeline[i]
+			break
+		}
+
+		// Track the index in the []Comment
+		switch item.(type) {
+		case *CreateTimelineItem, *CommentTimelineItem:
+			commentIndex++
+		}
+	}
+
+	if target == nil {
+		// Target not found, edit is a no-op
+		return
+	}
+
+	comment := Comment{
+		Message:  op.Message,
+		Files:    op.Files,
+		UnixTime: Timestamp(op.UnixTime),
+	}
+
+	switch target.(type) {
+	case *CreateTimelineItem:
+		item := target.(*CreateTimelineItem)
+		item.Append(comment)
+
+	case *CommentTimelineItem:
+		item := target.(*CommentTimelineItem)
+		item.Append(comment)
+	}
+
+	snapshot.Comments[commentIndex].Message = op.Message
+	snapshot.Comments[commentIndex].Files = op.Files
+}
+
+func (op *EditCommentOperation) GetFiles() []git.Hash {
+	return op.Files
+}
+
+func (op *EditCommentOperation) Validate() error {
+	if err := opBaseValidate(op, EditCommentOp); err != nil {
+		return err
+	}
+
+	if !op.Target.IsValid() {
+		return fmt.Errorf("target hash is invalid")
+	}
+
+	if text.Empty(op.Message) {
+		return fmt.Errorf("message is empty")
+	}
+
+	if !text.Safe(op.Message) {
+		return fmt.Errorf("message is not fully printable")
+	}
+
+	return nil
+}
+
+func NewEditCommentOp(author Person, unixTime int64, target git.Hash, message string, files []git.Hash) *EditCommentOperation {
+	return &EditCommentOperation{
+		OpBase:  newOpBase(EditCommentOp, author, unixTime),
+		Target:  target,
+		Message: message,
+		Files:   files,
+	}
+}
+
+// Convenience function to apply the operation
+func EditComment(b Interface, author Person, unixTime int64, target git.Hash, message string) error {
+	return EditCommentWithFiles(b, author, unixTime, target, message, nil)
+}
+
+func EditCommentWithFiles(b Interface, author Person, unixTime int64, target git.Hash, message string, files []git.Hash) error {
+	editCommentOp := NewEditCommentOp(author, unixTime, target, message, files)
+	if err := editCommentOp.Validate(); err != nil {
+		return err
+	}
+	b.Append(editCommentOp)
+	return nil
+}

bug/op_edit_comment_test.go πŸ”—

@@ -0,0 +1,53 @@
+package bug
+
+import (
+	"testing"
+	"time"
+
+	"gotest.tools/assert"
+)
+
+func TestEdit(t *testing.T) {
+	snapshot := Snapshot{}
+
+	var rene = Person{
+		Name:  "RenΓ© Descartes",
+		Email: "rene@descartes.fr",
+	}
+
+	unix := time.Now().Unix()
+
+	create := NewCreateOp(rene, unix, "title", "create", nil)
+	create.Apply(&snapshot)
+
+	hash1, err := create.Hash()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	comment := NewAddCommentOp(rene, unix, "comment", nil)
+	comment.Apply(&snapshot)
+
+	hash2, err := comment.Hash()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	edit := NewEditCommentOp(rene, unix, hash1, "create edited", nil)
+	edit.Apply(&snapshot)
+
+	assert.Equal(t, len(snapshot.Timeline), 2)
+	assert.Equal(t, len(snapshot.Timeline[0].(*CreateTimelineItem).History), 2)
+	assert.Equal(t, len(snapshot.Timeline[1].(*CommentTimelineItem).History), 1)
+	assert.Equal(t, snapshot.Comments[0].Message, "create edited")
+	assert.Equal(t, snapshot.Comments[1].Message, "comment")
+
+	edit2 := NewEditCommentOp(rene, unix, hash2, "comment edited", nil)
+	edit2.Apply(&snapshot)
+
+	assert.Equal(t, len(snapshot.Timeline), 2)
+	assert.Equal(t, len(snapshot.Timeline[0].(*CreateTimelineItem).History), 2)
+	assert.Equal(t, len(snapshot.Timeline[1].(*CommentTimelineItem).History), 2)
+	assert.Equal(t, snapshot.Comments[0].Message, "create edited")
+	assert.Equal(t, snapshot.Comments[1].Message, "comment edited")
+}

operations/label_change.go β†’ bug/op_label_change.go πŸ”—

@@ -1,24 +1,32 @@
-package operations
+package bug
 
 import (
 	"fmt"
 	"sort"
 
-	"github.com/MichaelMure/git-bug/bug"
+	"github.com/MichaelMure/git-bug/util/git"
 	"github.com/pkg/errors"
 )
 
-var _ bug.Operation = LabelChangeOperation{}
+var _ Operation = &LabelChangeOperation{}
 
 // LabelChangeOperation define a Bug operation to add or remove labels
 type LabelChangeOperation struct {
-	*bug.OpBase
-	Added   []bug.Label `json:"added"`
-	Removed []bug.Label `json:"removed"`
+	*OpBase
+	Added   []Label `json:"added"`
+	Removed []Label `json:"removed"`
+}
+
+func (op *LabelChangeOperation) base() *OpBase {
+	return op.OpBase
+}
+
+func (op *LabelChangeOperation) Hash() (git.Hash, error) {
+	return hashOperation(op)
 }
 
 // Apply apply the operation
-func (op LabelChangeOperation) Apply(snapshot bug.Snapshot) bug.Snapshot {
+func (op *LabelChangeOperation) Apply(snapshot *Snapshot) {
 	// Add in the set
 AddLoop:
 	for _, added := range op.Added {
@@ -47,11 +55,11 @@ AddLoop:
 		return string(snapshot.Labels[i]) < string(snapshot.Labels[j])
 	})
 
-	return snapshot
+	snapshot.Timeline = append(snapshot.Timeline, op)
 }
 
-func (op LabelChangeOperation) Validate() error {
-	if err := bug.OpBaseValidate(op, bug.LabelChangeOp); err != nil {
+func (op *LabelChangeOperation) Validate() error {
+	if err := opBaseValidate(op, LabelChangeOp); err != nil {
 		return err
 	}
 
@@ -74,23 +82,23 @@ func (op LabelChangeOperation) Validate() error {
 	return nil
 }
 
-func NewLabelChangeOperation(author bug.Person, unixTime int64, added, removed []bug.Label) LabelChangeOperation {
-	return LabelChangeOperation{
-		OpBase:  bug.NewOpBase(bug.LabelChangeOp, author, unixTime),
+func NewLabelChangeOperation(author Person, unixTime int64, added, removed []Label) *LabelChangeOperation {
+	return &LabelChangeOperation{
+		OpBase:  newOpBase(LabelChangeOp, author, unixTime),
 		Added:   added,
 		Removed: removed,
 	}
 }
 
 // ChangeLabels is a convenience function to apply the operation
-func ChangeLabels(b bug.Interface, author bug.Person, unixTime int64, add, remove []string) ([]LabelChangeResult, error) {
-	var added, removed []bug.Label
+func ChangeLabels(b Interface, author Person, unixTime int64, add, remove []string) ([]LabelChangeResult, error) {
+	var added, removed []Label
 	var results []LabelChangeResult
 
 	snap := b.Compile()
 
 	for _, str := range add {
-		label := bug.Label(str)
+		label := Label(str)
 
 		// check for duplicate
 		if labelExist(added, label) {
@@ -109,7 +117,7 @@ func ChangeLabels(b bug.Interface, author bug.Person, unixTime int64, add, remov
 	}
 
 	for _, str := range remove {
-		label := bug.Label(str)
+		label := Label(str)
 
 		// check for duplicate
 		if labelExist(removed, label) {
@@ -142,7 +150,7 @@ func ChangeLabels(b bug.Interface, author bug.Person, unixTime int64, add, remov
 	return results, nil
 }
 
-func labelExist(labels []bug.Label, label bug.Label) bool {
+func labelExist(labels []Label, label Label) bool {
 	for _, l := range labels {
 		if l == label {
 			return true
@@ -164,7 +172,7 @@ const (
 )
 
 type LabelChangeResult struct {
-	Label  bug.Label
+	Label  Label
 	Status LabelChangeStatus
 }
 

bug/op_set_status.go πŸ”—

@@ -0,0 +1,66 @@
+package bug
+
+import (
+	"github.com/MichaelMure/git-bug/util/git"
+	"github.com/pkg/errors"
+)
+
+var _ Operation = &SetStatusOperation{}
+
+// SetStatusOperation will change the status of a bug
+type SetStatusOperation struct {
+	*OpBase
+	Status Status `json:"status"`
+}
+
+func (op *SetStatusOperation) base() *OpBase {
+	return op.OpBase
+}
+
+func (op *SetStatusOperation) Hash() (git.Hash, error) {
+	return hashOperation(op)
+}
+
+func (op *SetStatusOperation) Apply(snapshot *Snapshot) {
+	snapshot.Status = op.Status
+	snapshot.Timeline = append(snapshot.Timeline, op)
+}
+
+func (op *SetStatusOperation) Validate() error {
+	if err := opBaseValidate(op, SetStatusOp); err != nil {
+		return err
+	}
+
+	if err := op.Status.Validate(); err != nil {
+		return errors.Wrap(err, "status")
+	}
+
+	return nil
+}
+
+func NewSetStatusOp(author Person, unixTime int64, status Status) *SetStatusOperation {
+	return &SetStatusOperation{
+		OpBase: newOpBase(SetStatusOp, author, unixTime),
+		Status: status,
+	}
+}
+
+// Convenience function to apply the operation
+func Open(b Interface, author Person, unixTime int64) error {
+	op := NewSetStatusOp(author, unixTime, OpenStatus)
+	if err := op.Validate(); err != nil {
+		return err
+	}
+	b.Append(op)
+	return nil
+}
+
+// Convenience function to apply the operation
+func Close(b Interface, author Person, unixTime int64) error {
+	op := NewSetStatusOp(author, unixTime, ClosedStatus)
+	if err := op.Validate(); err != nil {
+		return err
+	}
+	b.Append(op)
+	return nil
+}

operations/set_title.go β†’ bug/op_set_title.go πŸ”—

@@ -1,31 +1,37 @@
-package operations
+package bug
 
 import (
 	"fmt"
 	"strings"
 
-	"github.com/MichaelMure/git-bug/bug"
+	"github.com/MichaelMure/git-bug/util/git"
 	"github.com/MichaelMure/git-bug/util/text"
 )
 
-// SetTitleOperation will change the title of a bug
-
-var _ bug.Operation = SetTitleOperation{}
+var _ Operation = &SetTitleOperation{}
 
+// SetTitleOperation will change the title of a bug
 type SetTitleOperation struct {
-	*bug.OpBase
+	*OpBase
 	Title string `json:"title"`
 	Was   string `json:"was"`
 }
 
-func (op SetTitleOperation) Apply(snapshot bug.Snapshot) bug.Snapshot {
-	snapshot.Title = op.Title
+func (op *SetTitleOperation) base() *OpBase {
+	return op.OpBase
+}
+
+func (op *SetTitleOperation) Hash() (git.Hash, error) {
+	return hashOperation(op)
+}
 
-	return snapshot
+func (op *SetTitleOperation) Apply(snapshot *Snapshot) {
+	snapshot.Title = op.Title
+	snapshot.Timeline = append(snapshot.Timeline, op)
 }
 
-func (op SetTitleOperation) Validate() error {
-	if err := bug.OpBaseValidate(op, bug.SetTitleOp); err != nil {
+func (op *SetTitleOperation) Validate() error {
+	if err := opBaseValidate(op, SetTitleOp); err != nil {
 		return err
 	}
 
@@ -52,31 +58,31 @@ func (op SetTitleOperation) Validate() error {
 	return nil
 }
 
-func NewSetTitleOp(author bug.Person, unixTime int64, title string, was string) SetTitleOperation {
-	return SetTitleOperation{
-		OpBase: bug.NewOpBase(bug.SetTitleOp, author, unixTime),
+func NewSetTitleOp(author Person, unixTime int64, title string, was string) *SetTitleOperation {
+	return &SetTitleOperation{
+		OpBase: newOpBase(SetTitleOp, author, unixTime),
 		Title:  title,
 		Was:    was,
 	}
 }
 
 // Convenience function to apply the operation
-func SetTitle(b bug.Interface, author bug.Person, unixTime int64, title string) error {
-	it := bug.NewOperationIterator(b)
+func SetTitle(b Interface, author Person, unixTime int64, title string) error {
+	it := NewOperationIterator(b)
 
-	var lastTitleOp bug.Operation
+	var lastTitleOp Operation
 	for it.Next() {
 		op := it.Value()
-		if op.OpType() == bug.SetTitleOp {
+		if op.base().OperationType == SetTitleOp {
 			lastTitleOp = op
 		}
 	}
 
 	var was string
 	if lastTitleOp != nil {
-		was = lastTitleOp.(SetTitleOperation).Title
+		was = lastTitleOp.(*SetTitleOperation).Title
 	} else {
-		was = b.FirstOp().(CreateOperation).Title
+		was = b.FirstOp().(*CreateOperation).Title
 	}
 
 	setTitleOp := NewSetTitleOp(author, unixTime, title, was)

bug/operation.go πŸ”—

@@ -1,11 +1,13 @@
 package bug
 
 import (
-	"github.com/MichaelMure/git-bug/util/git"
-	"github.com/pkg/errors"
-
+	"crypto/sha256"
+	"encoding/json"
 	"fmt"
 	"time"
+
+	"github.com/MichaelMure/git-bug/util/git"
+	"github.com/pkg/errors"
 )
 
 // OperationType is an operation type identifier
@@ -18,22 +20,23 @@ const (
 	AddCommentOp
 	SetStatusOp
 	LabelChangeOp
+	EditCommentOp
 )
 
 // Operation define the interface to fulfill for an edit operation of a Bug
 type Operation interface {
-	// OpType return the type of operation
-	OpType() OperationType
+	// base return the OpBase of the Operation, for package internal use
+	base() *OpBase
+	// Hash return the hash of the operation, to be used for back references
+	Hash() (git.Hash, error)
 	// Time return the time when the operation was added
 	Time() time.Time
 	// GetUnixTime return the unix timestamp when the operation was added
 	GetUnixTime() int64
-	// GetAuthor return the author of the operation
-	GetAuthor() Person
 	// GetFiles return the files needed by this operation
 	GetFiles() []git.Hash
 	// Apply the operation to a Snapshot to create the final state
-	Apply(snapshot Snapshot) Snapshot
+	Apply(snapshot *Snapshot)
 	// Validate check if the operation is valid (ex: a title is a single line)
 	Validate() error
 	// SetMetadata store arbitrary metadata about the operation
@@ -42,16 +45,42 @@ type Operation interface {
 	GetMetadata(key string) (string, bool)
 }
 
+func hashRaw(data []byte) git.Hash {
+	hasher := sha256.New()
+	// Write can't fail
+	_, _ = hasher.Write(data)
+	return git.Hash(fmt.Sprintf("%x", hasher.Sum(nil)))
+}
+
+// hash compute the hash of the serialized operation
+func hashOperation(op Operation) (git.Hash, error) {
+	base := op.base()
+
+	if base.hash != "" {
+		return base.hash, nil
+	}
+
+	data, err := json.Marshal(op)
+	if err != nil {
+		return "", err
+	}
+
+	base.hash = hashRaw(data)
+
+	return base.hash, nil
+}
+
 // OpBase implement the common code for all operations
 type OpBase struct {
-	OperationType OperationType     `json:"type"`
-	Author        Person            `json:"author"`
-	UnixTime      int64             `json:"timestamp"`
+	OperationType OperationType `json:"type"`
+	Author        Person        `json:"author"`
+	UnixTime      int64         `json:"timestamp"`
+	hash          git.Hash
 	Metadata      map[string]string `json:"metadata,omitempty"`
 }
 
-// NewOpBase is the constructor for an OpBase
-func NewOpBase(opType OperationType, author Person, unixTime int64) *OpBase {
+// newOpBase is the constructor for an OpBase
+func newOpBase(opType OperationType, author Person, unixTime int64) *OpBase {
 	return &OpBase{
 		OperationType: opType,
 		Author:        author,
@@ -59,11 +88,6 @@ func NewOpBase(opType OperationType, author Person, unixTime int64) *OpBase {
 	}
 }
 
-// OpType return the type of operation
-func (op *OpBase) OpType() OperationType {
-	return op.OperationType
-}
-
 // Time return the time when the operation was added
 func (op *OpBase) Time() time.Time {
 	return time.Unix(op.UnixTime, 0)
@@ -74,27 +98,26 @@ func (op *OpBase) GetUnixTime() int64 {
 	return op.UnixTime
 }
 
-// GetAuthor return the author of the operation
-func (op *OpBase) GetAuthor() Person {
-	return op.Author
-}
-
 // GetFiles return the files needed by this operation
 func (op *OpBase) GetFiles() []git.Hash {
 	return nil
 }
 
 // Validate check the OpBase for errors
-func OpBaseValidate(op Operation, opType OperationType) error {
-	if op.OpType() != opType {
-		return fmt.Errorf("incorrect operation type (expected: %v, actual: %v)", opType, op.OpType())
+func opBaseValidate(op Operation, opType OperationType) error {
+	if op.base().OperationType != opType {
+		return fmt.Errorf("incorrect operation type (expected: %v, actual: %v)", opType, op.base().OperationType)
+	}
+
+	if _, err := op.Hash(); err != nil {
+		return errors.Wrap(err, "op is not serializable")
 	}
 
 	if op.GetUnixTime() == 0 {
 		return fmt.Errorf("time not set")
 	}
 
-	if err := op.GetAuthor().Validate(); err != nil {
+	if err := op.base().Author.Validate(); err != nil {
 		return errors.Wrap(err, "author")
 	}
 

tests/operation_iterator_test.go β†’ bug/operation_iterator_test.go πŸ”—

@@ -1,32 +1,30 @@
-package tests
+package bug
 
 import (
-	"github.com/MichaelMure/git-bug/bug"
-	"github.com/MichaelMure/git-bug/operations"
 	"github.com/MichaelMure/git-bug/repository"
 	"testing"
 	"time"
 )
 
 var (
-	rene = bug.Person{
+	rene = Person{
 		Name:  "RenΓ© Descartes",
 		Email: "rene@descartes.fr",
 	}
 
 	unix = time.Now().Unix()
 
-	createOp      = operations.NewCreateOp(rene, unix, "title", "message", nil)
-	setTitleOp    = operations.NewSetTitleOp(rene, unix, "title2", "title1")
-	addCommentOp  = operations.NewAddCommentOp(rene, unix, "message2", nil)
-	setStatusOp   = operations.NewSetStatusOp(rene, unix, bug.ClosedStatus)
-	labelChangeOp = operations.NewLabelChangeOperation(rene, unix, []bug.Label{"added"}, []bug.Label{"removed"})
+	createOp      = NewCreateOp(rene, unix, "title", "message", nil)
+	setTitleOp    = NewSetTitleOp(rene, unix, "title2", "title1")
+	addCommentOp  = NewAddCommentOp(rene, unix, "message2", nil)
+	setStatusOp   = NewSetStatusOp(rene, unix, ClosedStatus)
+	labelChangeOp = NewLabelChangeOperation(rene, unix, []Label{"added"}, []Label{"removed"})
 )
 
 func TestOpIterator(t *testing.T) {
 	mockRepo := repository.NewMockRepoForTest()
 
-	bug1 := bug.NewBug()
+	bug1 := NewBug()
 
 	// first pack
 	bug1.Append(createOp)
@@ -47,7 +45,7 @@ func TestOpIterator(t *testing.T) {
 	bug1.Append(setTitleOp)
 	bug1.Append(setTitleOp)
 
-	it := bug.NewOperationIterator(bug1)
+	it := NewOperationIterator(bug1)
 
 	counter := 0
 	for it.Next() {

bug/operation_pack.go πŸ”—

@@ -3,7 +3,6 @@ package bug
 import (
 	"encoding/json"
 	"fmt"
-	"reflect"
 
 	"github.com/MichaelMure/git-bug/repository"
 	"github.com/MichaelMure/git-bug/util/git"
@@ -25,17 +24,6 @@ type OperationPack struct {
 	commitHash git.Hash
 }
 
-// hold the different operation type to instantiate to parse JSON
-var operations map[OperationType]reflect.Type
-
-// Register will register a new type of Operation to be able to parse the corresponding JSON
-func Register(t OperationType, op interface{}) {
-	if operations == nil {
-		operations = make(map[OperationType]reflect.Type)
-	}
-	operations[t] = reflect.TypeOf(op)
-}
-
 func (opp *OperationPack) MarshalJSON() ([]byte, error) {
 	return json.Marshal(struct {
 		Version    uint        `json:"version"`
@@ -69,25 +57,51 @@ func (opp *OperationPack) UnmarshalJSON(data []byte) error {
 			return err
 		}
 
-		opType, ok := operations[t.OperationType]
-		if !ok {
-			return fmt.Errorf("unknown operation type %v", t.OperationType)
-		}
-
-		op := reflect.New(opType).Interface()
-
-		if err := json.Unmarshal(raw, op); err != nil {
+		op, err := opp.unmarshalOp(raw, t.OperationType)
+		if err != nil {
 			return err
 		}
 
-		deref := reflect.ValueOf(op).Elem().Interface()
+		// Compute the hash of the operation
+		op.base().hash = hashRaw(raw)
 
-		opp.Operations = append(opp.Operations, deref.(Operation))
+		opp.Operations = append(opp.Operations, op)
 	}
 
 	return nil
 }
 
+func (opp *OperationPack) unmarshalOp(raw []byte, _type OperationType) (Operation, error) {
+	switch _type {
+	case CreateOp:
+		op := &CreateOperation{}
+		err := json.Unmarshal(raw, &op)
+		return op, err
+	case SetTitleOp:
+		op := &SetTitleOperation{}
+		err := json.Unmarshal(raw, &op)
+		return op, err
+	case AddCommentOp:
+		op := &AddCommentOperation{}
+		err := json.Unmarshal(raw, &op)
+		return op, err
+	case SetStatusOp:
+		op := &SetStatusOperation{}
+		err := json.Unmarshal(raw, &op)
+		return op, err
+	case LabelChangeOp:
+		op := &LabelChangeOperation{}
+		err := json.Unmarshal(raw, &op)
+		return op, err
+	case EditCommentOp:
+		op := &EditCommentOperation{}
+		err := json.Unmarshal(raw, &op)
+		return op, err
+	default:
+		return nil, fmt.Errorf("unknown operation type %v", _type)
+	}
+}
+
 // Append a new operation to the pack
 func (opp *OperationPack) Append(op Operation) {
 	opp.Operations = append(opp.Operations, op)

tests/operation_pack_test.go β†’ bug/operation_pack_test.go πŸ”—

@@ -1,17 +1,15 @@
-package tests
+package bug
 
 import (
 	"encoding/json"
-	"reflect"
 	"testing"
 
-	"github.com/MichaelMure/git-bug/bug"
-	"github.com/MichaelMure/git-bug/operations"
 	"github.com/MichaelMure/git-bug/util/git"
+	"github.com/go-test/deep"
 )
 
 func TestOperationPackSerialize(t *testing.T) {
-	opp := &bug.OperationPack{}
+	opp := &OperationPack{}
 
 	opp.Append(createOp)
 	opp.Append(setTitleOp)
@@ -19,7 +17,7 @@ func TestOperationPackSerialize(t *testing.T) {
 	opp.Append(setStatusOp)
 	opp.Append(labelChangeOp)
 
-	opMeta := operations.NewCreateOp(rene, unix, "title", "message", nil)
+	opMeta := NewCreateOp(rene, unix, "title", "message", nil)
 	opMeta.SetMetadata("key", "value")
 	opp.Append(opMeta)
 
@@ -27,7 +25,7 @@ func TestOperationPackSerialize(t *testing.T) {
 		t.Fatal()
 	}
 
-	opFile := operations.NewCreateOp(rene, unix, "title", "message", []git.Hash{
+	opFile := NewCreateOp(rene, unix, "title", "message", []git.Hash{
 		"abcdef",
 		"ghijkl",
 	})
@@ -42,13 +40,14 @@ func TestOperationPackSerialize(t *testing.T) {
 		t.Fatal(err)
 	}
 
-	var opp2 *bug.OperationPack
+	var opp2 *OperationPack
 	err = json.Unmarshal(data, &opp2)
 	if err != nil {
 		t.Fatal(err)
 	}
 
-	if !reflect.DeepEqual(opp, opp2) {
-		t.Fatalf("%v and %v are different", opp, opp2)
+	deep.CompareUnexportedFields = false
+	if diff := deep.Equal(opp, opp2); diff != nil {
+		t.Fatal(diff)
 	}
 }

operations/operations_test.go β†’ bug/operation_test.go πŸ”—

@@ -1,27 +1,26 @@
-package operations
+package bug
 
 import (
 	"testing"
 	"time"
 
-	"github.com/MichaelMure/git-bug/bug"
 	"github.com/MichaelMure/git-bug/util/git"
 )
 
 func TestValidate(t *testing.T) {
-	rene := bug.Person{
+	rene := Person{
 		Name:  "RenΓ© Descartes",
 		Email: "rene@descartes.fr",
 	}
 
 	unix := time.Now().Unix()
 
-	good := []bug.Operation{
+	good := []Operation{
 		NewCreateOp(rene, unix, "title", "message", nil),
 		NewSetTitleOp(rene, unix, "title2", "title1"),
 		NewAddCommentOp(rene, unix, "message2", nil),
-		NewSetStatusOp(rene, unix, bug.ClosedStatus),
-		NewLabelChangeOperation(rene, unix, []bug.Label{"added"}, []bug.Label{"removed"}),
+		NewSetStatusOp(rene, unix, ClosedStatus),
+		NewLabelChangeOperation(rene, unix, []Label{"added"}, []Label{"removed"}),
 	}
 
 	for _, op := range good {
@@ -30,17 +29,17 @@ func TestValidate(t *testing.T) {
 		}
 	}
 
-	bad := []bug.Operation{
+	bad := []Operation{
 		// opbase
-		NewSetStatusOp(bug.Person{Name: "", Email: "rene@descartes.fr"}, unix, bug.ClosedStatus),
-		NewSetStatusOp(bug.Person{Name: "RenΓ© Descartes\u001b", Email: "rene@descartes.fr"}, unix, bug.ClosedStatus),
-		NewSetStatusOp(bug.Person{Name: "RenΓ© Descartes", Email: "rene@descartes.fr\u001b"}, unix, bug.ClosedStatus),
-		NewSetStatusOp(bug.Person{Name: "RenΓ© \nDescartes", Email: "rene@descartes.fr"}, unix, bug.ClosedStatus),
-		NewSetStatusOp(bug.Person{Name: "RenΓ© Descartes", Email: "rene@\ndescartes.fr"}, unix, bug.ClosedStatus),
-		CreateOperation{OpBase: &bug.OpBase{
+		NewSetStatusOp(Person{Name: "", Email: "rene@descartes.fr"}, unix, ClosedStatus),
+		NewSetStatusOp(Person{Name: "RenΓ© Descartes\u001b", Email: "rene@descartes.fr"}, unix, ClosedStatus),
+		NewSetStatusOp(Person{Name: "RenΓ© Descartes", Email: "rene@descartes.fr\u001b"}, unix, ClosedStatus),
+		NewSetStatusOp(Person{Name: "RenΓ© \nDescartes", Email: "rene@descartes.fr"}, unix, ClosedStatus),
+		NewSetStatusOp(Person{Name: "RenΓ© Descartes", Email: "rene@\ndescartes.fr"}, unix, ClosedStatus),
+		&CreateOperation{OpBase: &OpBase{
 			Author:        rene,
 			UnixTime:      0,
-			OperationType: bug.CreateOp,
+			OperationType: CreateOp,
 		},
 			Title:   "title",
 			Message: "message",
@@ -59,8 +58,8 @@ func TestValidate(t *testing.T) {
 		NewAddCommentOp(rene, unix, "message", []git.Hash{git.Hash("invalid")}),
 		NewSetStatusOp(rene, unix, 1000),
 		NewSetStatusOp(rene, unix, 0),
-		NewLabelChangeOperation(rene, unix, []bug.Label{}, []bug.Label{}),
-		NewLabelChangeOperation(rene, unix, []bug.Label{"multi\nline"}, []bug.Label{}),
+		NewLabelChangeOperation(rene, unix, []Label{}, []Label{}),
+		NewLabelChangeOperation(rene, unix, []Label{"multi\nline"}, []Label{}),
 	}
 
 	for i, op := range bad {
@@ -68,5 +67,4 @@ func TestValidate(t *testing.T) {
 			t.Fatal("validation should have failed", i, op)
 		}
 	}
-
 }

bug/snapshot.go πŸ”—

@@ -3,6 +3,8 @@ package bug
 import (
 	"fmt"
 	"time"
+
+	"github.com/MichaelMure/git-bug/util/git"
 )
 
 // Snapshot is a compiled form of the Bug data structure used for storage and merge
@@ -16,20 +18,22 @@ type Snapshot struct {
 	Author    Person
 	CreatedAt time.Time
 
+	Timeline []TimelineItem
+
 	Operations []Operation
 }
 
 // Return the Bug identifier
-func (snap Snapshot) Id() string {
+func (snap *Snapshot) Id() string {
 	return snap.id
 }
 
 // Return the Bug identifier truncated for human consumption
-func (snap Snapshot) HumanId() string {
+func (snap *Snapshot) HumanId() string {
 	return fmt.Sprintf("%.8s", snap.id)
 }
 
-func (snap Snapshot) Summary() string {
+func (snap *Snapshot) Summary() string {
 	return fmt.Sprintf("C:%d L:%d",
 		len(snap.Comments)-1,
 		len(snap.Labels),
@@ -37,7 +41,7 @@ func (snap Snapshot) Summary() string {
 }
 
 // Return the last time a bug was modified
-func (snap Snapshot) LastEditTime() time.Time {
+func (snap *Snapshot) LastEditTime() time.Time {
 	if len(snap.Operations) == 0 {
 		return time.Unix(0, 0)
 	}
@@ -46,10 +50,26 @@ func (snap Snapshot) LastEditTime() time.Time {
 }
 
 // Return the last timestamp a bug was modified
-func (snap Snapshot) LastEditUnix() int64 {
+func (snap *Snapshot) LastEditUnix() int64 {
 	if len(snap.Operations) == 0 {
 		return 0
 	}
 
 	return snap.Operations[len(snap.Operations)-1].GetUnixTime()
 }
+
+// SearchTimelineItem will search in the timeline for an item matching the given hash
+func (snap *Snapshot) SearchTimelineItem(hash git.Hash) (TimelineItem, error) {
+	for i := range snap.Timeline {
+		h, err := snap.Timeline[i].Hash()
+		if err != nil {
+			return nil, err
+		}
+
+		if h == hash {
+			return snap.Timeline[i], nil
+		}
+	}
+
+	return nil, fmt.Errorf("timeline item not found")
+}

bug/time.go πŸ”—

@@ -0,0 +1,9 @@
+package bug
+
+import "time"
+
+type Timestamp int64
+
+func (t Timestamp) Time() time.Time {
+	return time.Unix(int64(t), 0)
+}

bug/timeline.go πŸ”—

@@ -0,0 +1,87 @@
+package bug
+
+import (
+	"github.com/MichaelMure/git-bug/util/git"
+)
+
+type TimelineItem interface {
+	// Hash return the hash of the item
+	Hash() (git.Hash, error)
+}
+
+type CommentHistoryStep struct {
+	Message  string
+	UnixTime Timestamp
+}
+
+// CreateTimelineItem replace a Create operation in the Timeline and hold its edition history
+type CreateTimelineItem struct {
+	CommentTimelineItem
+}
+
+func NewCreateTimelineItem(hash git.Hash, comment Comment) *CreateTimelineItem {
+	return &CreateTimelineItem{
+		CommentTimelineItem: CommentTimelineItem{
+			hash:      hash,
+			Author:    comment.Author,
+			Message:   comment.Message,
+			Files:     comment.Files,
+			CreatedAt: comment.UnixTime,
+			LastEdit:  comment.UnixTime,
+			History: []CommentHistoryStep{
+				{
+					Message:  comment.Message,
+					UnixTime: comment.UnixTime,
+				},
+			},
+		},
+	}
+}
+
+// CommentTimelineItem replace a Comment in the Timeline and hold its edition history
+type CommentTimelineItem struct {
+	hash      git.Hash
+	Author    Person
+	Message   string
+	Files     []git.Hash
+	CreatedAt Timestamp
+	LastEdit  Timestamp
+	History   []CommentHistoryStep
+}
+
+func NewCommentTimelineItem(hash git.Hash, comment Comment) *CommentTimelineItem {
+	return &CommentTimelineItem{
+		hash:      hash,
+		Author:    comment.Author,
+		Message:   comment.Message,
+		Files:     comment.Files,
+		CreatedAt: comment.UnixTime,
+		LastEdit:  comment.UnixTime,
+		History: []CommentHistoryStep{
+			{
+				Message:  comment.Message,
+				UnixTime: comment.UnixTime,
+			},
+		},
+	}
+}
+
+func (c *CommentTimelineItem) Hash() (git.Hash, error) {
+	return c.hash, nil
+}
+
+// Append will append a new comment in the history and update the other values
+func (c *CommentTimelineItem) Append(comment Comment) {
+	c.Message = comment.Message
+	c.Files = comment.Files
+	c.LastEdit = comment.UnixTime
+	c.History = append(c.History, CommentHistoryStep{
+		Message:  comment.Message,
+		UnixTime: comment.UnixTime,
+	})
+}
+
+// Edited say if the comment was edited
+func (c *CommentTimelineItem) Edited() bool {
+	return len(c.History) > 1
+}

bug/with_snapshot.go πŸ”—

@@ -27,10 +27,8 @@ func (b *WithSnapshot) Append(op Operation) {
 		return
 	}
 
-	snap := op.Apply(*b.snap)
-	snap.Operations = append(snap.Operations, op)
-
-	b.snap = &snap
+	op.Apply(b.snap)
+	b.snap.Operations = append(b.snap.Operations, op)
 }
 
 // Commit intercept Bug.Commit() to update the snapshot efficiently

cache/bug_cache.go πŸ”—

@@ -4,7 +4,6 @@ import (
 	"time"
 
 	"github.com/MichaelMure/git-bug/bug"
-	"github.com/MichaelMure/git-bug/operations"
 	"github.com/MichaelMure/git-bug/util/git"
 )
 
@@ -50,7 +49,7 @@ func (c *BugCache) AddCommentWithFiles(message string, files []git.Hash) error {
 }
 
 func (c *BugCache) AddCommentRaw(author bug.Person, unixTime int64, message string, files []git.Hash, metadata map[string]string) error {
-	err := operations.CommentWithFiles(c.bug, author, unixTime, message, files)
+	err := bug.AddCommentWithFiles(c.bug, author, unixTime, message, files)
 	if err != nil {
 		return err
 	}
@@ -58,7 +57,7 @@ func (c *BugCache) AddCommentRaw(author bug.Person, unixTime int64, message stri
 	return c.notifyUpdated()
 }
 
-func (c *BugCache) ChangeLabels(added []string, removed []string) ([]operations.LabelChangeResult, error) {
+func (c *BugCache) ChangeLabels(added []string, removed []string) ([]bug.LabelChangeResult, error) {
 	author, err := bug.GetUser(c.repoCache.repo)
 	if err != nil {
 		return nil, err
@@ -67,8 +66,8 @@ func (c *BugCache) ChangeLabels(added []string, removed []string) ([]operations.
 	return c.ChangeLabelsRaw(author, time.Now().Unix(), added, removed)
 }
 
-func (c *BugCache) ChangeLabelsRaw(author bug.Person, unixTime int64, added []string, removed []string) ([]operations.LabelChangeResult, error) {
-	changes, err := operations.ChangeLabels(c.bug, author, unixTime, added, removed)
+func (c *BugCache) ChangeLabelsRaw(author bug.Person, unixTime int64, added []string, removed []string) ([]bug.LabelChangeResult, error) {
+	changes, err := bug.ChangeLabels(c.bug, author, unixTime, added, removed)
 	if err != nil {
 		return changes, err
 	}
@@ -91,7 +90,7 @@ func (c *BugCache) Open() error {
 }
 
 func (c *BugCache) OpenRaw(author bug.Person, unixTime int64) error {
-	err := operations.Open(c.bug, author, unixTime)
+	err := bug.Open(c.bug, author, unixTime)
 	if err != nil {
 		return err
 	}
@@ -109,7 +108,7 @@ func (c *BugCache) Close() error {
 }
 
 func (c *BugCache) CloseRaw(author bug.Person, unixTime int64) error {
-	err := operations.Close(c.bug, author, unixTime)
+	err := bug.Close(c.bug, author, unixTime)
 	if err != nil {
 		return err
 	}
@@ -127,7 +126,7 @@ func (c *BugCache) SetTitle(title string) error {
 }
 
 func (c *BugCache) SetTitleRaw(author bug.Person, unixTime int64, title string) error {
-	err := operations.SetTitle(c.bug, author, unixTime, title)
+	err := bug.SetTitle(c.bug, author, unixTime, title)
 	if err != nil {
 		return err
 	}

cache/repo_cache.go πŸ”—

@@ -14,7 +14,6 @@ import (
 	"time"
 
 	"github.com/MichaelMure/git-bug/bug"
-	"github.com/MichaelMure/git-bug/operations"
 	"github.com/MichaelMure/git-bug/repository"
 	"github.com/MichaelMure/git-bug/util/git"
 	"github.com/MichaelMure/git-bug/util/process"
@@ -362,7 +361,7 @@ func (c *RepoCache) NewBugWithFiles(title string, message string, files []git.Ha
 // well as metadata for the Create operation.
 // The new bug is written in the repository (commit)
 func (c *RepoCache) NewBugRaw(author bug.Person, unixTime int64, title string, message string, files []git.Hash, metadata map[string]string) (*BugCache, error) {
-	b, err := operations.CreateWithFiles(author, unixTime, title, message, files)
+	b, err := bug.CreateWithFiles(author, unixTime, title, message, files)
 	if err != nil {
 		return nil, err
 	}

graphql/connections/connections.go πŸ”—

@@ -1,6 +1,7 @@
 //go:generate genny -in=connection_template.go -out=gen_bug.go gen "NodeType=string EdgeType=LazyBugEdge ConnectionType=models.BugConnection"
 //go:generate genny -in=connection_template.go -out=gen_operation.go gen "NodeType=bug.Operation EdgeType=models.OperationEdge ConnectionType=models.OperationConnection"
 //go:generate genny -in=connection_template.go -out=gen_comment.go gen "NodeType=bug.Comment EdgeType=models.CommentEdge ConnectionType=models.CommentConnection"
+//go:generate genny -in=connection_template.go -out=gen_timeline.go gen "NodeType=bug.TimelineItem EdgeType=models.TimelineItemEdge ConnectionType=models.TimelineItemConnection"
 
 // Package connections implement a generic GraphQL relay connection
 package connections

graphql/connections/gen_timeline.go πŸ”—

@@ -0,0 +1,111 @@
+// This file was automatically generated by genny.
+// Any changes will be lost if this file is regenerated.
+// see https://github.com/cheekybits/genny
+
+package connections
+
+import (
+	"fmt"
+
+	"github.com/MichaelMure/git-bug/bug"
+	"github.com/MichaelMure/git-bug/graphql/models"
+)
+
+// BugTimelineItemEdgeMaker define a function that take a bug.TimelineItem and an offset and
+// create an Edge.
+type BugTimelineItemEdgeMaker func(value bug.TimelineItem, offset int) Edge
+
+// BugTimelineItemConMaker define a function that create a models.TimelineItemConnection
+type BugTimelineItemConMaker func(
+	edges []models.TimelineItemEdge,
+	nodes []bug.TimelineItem,
+	info models.PageInfo,
+	totalCount int) (models.TimelineItemConnection, error)
+
+// BugTimelineItemCon will paginate a source according to the input of a relay connection
+func BugTimelineItemCon(source []bug.TimelineItem, edgeMaker BugTimelineItemEdgeMaker, conMaker BugTimelineItemConMaker, input models.ConnectionInput) (models.TimelineItemConnection, error) {
+	var nodes []bug.TimelineItem
+	var edges []models.TimelineItemEdge
+	var cursors []string
+	var pageInfo models.PageInfo
+	var totalCount = len(source)
+
+	emptyCon, _ := conMaker(edges, nodes, pageInfo, 0)
+
+	offset := 0
+
+	if input.After != nil {
+		for i, value := range source {
+			edge := edgeMaker(value, i)
+			if edge.GetCursor() == *input.After {
+				// remove all previous element including the "after" one
+				source = source[i+1:]
+				offset = i + 1
+				pageInfo.HasPreviousPage = true
+				break
+			}
+		}
+	}
+
+	if input.Before != nil {
+		for i, value := range source {
+			edge := edgeMaker(value, i+offset)
+
+			if edge.GetCursor() == *input.Before {
+				// remove all after element including the "before" one
+				pageInfo.HasNextPage = true
+				break
+			}
+
+			edges = append(edges, edge.(models.TimelineItemEdge))
+			cursors = append(cursors, edge.GetCursor())
+			nodes = append(nodes, value)
+		}
+	} else {
+		edges = make([]models.TimelineItemEdge, len(source))
+		cursors = make([]string, len(source))
+		nodes = source
+
+		for i, value := range source {
+			edge := edgeMaker(value, i+offset)
+			edges[i] = edge.(models.TimelineItemEdge)
+			cursors[i] = edge.GetCursor()
+		}
+	}
+
+	if input.First != nil {
+		if *input.First < 0 {
+			return emptyCon, fmt.Errorf("first less than zero")
+		}
+
+		if len(edges) > *input.First {
+			// Slice result to be of length first by removing edges from the end
+			edges = edges[:*input.First]
+			cursors = cursors[:*input.First]
+			nodes = nodes[:*input.First]
+			pageInfo.HasNextPage = true
+		}
+	}
+
+	if input.Last != nil {
+		if *input.Last < 0 {
+			return emptyCon, fmt.Errorf("last less than zero")
+		}
+
+		if len(edges) > *input.Last {
+			// Slice result to be of length last by removing edges from the start
+			edges = edges[len(edges)-*input.Last:]
+			cursors = cursors[len(cursors)-*input.Last:]
+			nodes = nodes[len(nodes)-*input.Last:]
+			pageInfo.HasPreviousPage = true
+		}
+	}
+
+	// Fill up pageInfo cursors
+	if len(cursors) > 0 {
+		pageInfo.StartCursor = cursors[0]
+		pageInfo.EndCursor = cursors[len(cursors)-1]
+	}
+
+	return conMaker(edges, nodes, pageInfo, totalCount)
+}

graphql/gqlgen.yml πŸ”—

@@ -22,12 +22,20 @@ models:
   Operation:
     model: github.com/MichaelMure/git-bug/bug.Operation
   CreateOperation:
-    model: github.com/MichaelMure/git-bug/operations.CreateOperation
+    model: github.com/MichaelMure/git-bug/bug.CreateOperation
   SetTitleOperation:
-    model: github.com/MichaelMure/git-bug/operations.SetTitleOperation
+    model: github.com/MichaelMure/git-bug/bug.SetTitleOperation
   AddCommentOperation:
-    model: github.com/MichaelMure/git-bug/operations.AddCommentOperation
+    model: github.com/MichaelMure/git-bug/bug.AddCommentOperation
   SetStatusOperation:
-    model: github.com/MichaelMure/git-bug/operations.SetStatusOperation
+    model: github.com/MichaelMure/git-bug/bug.SetStatusOperation
   LabelChangeOperation:
-    model: github.com/MichaelMure/git-bug/operations.LabelChangeOperation
+    model: github.com/MichaelMure/git-bug/bug.LabelChangeOperation
+  TimelineItem:
+    model: github.com/MichaelMure/git-bug/bug.TimelineItem
+  CommentHistoryStep:
+    model: github.com/MichaelMure/git-bug/bug.CommentHistoryStep
+  CreateTimelineItem:
+    model: github.com/MichaelMure/git-bug/bug.CreateTimelineItem
+  CommentTimelineItem:
+    model: github.com/MichaelMure/git-bug/bug.CommentTimelineItem

graphql/graph/gen_graph.go πŸ”—

@@ -14,7 +14,6 @@ import (
 	introspection "github.com/99designs/gqlgen/graphql/introspection"
 	bug "github.com/MichaelMure/git-bug/bug"
 	models "github.com/MichaelMure/git-bug/graphql/models"
-	operations "github.com/MichaelMure/git-bug/operations"
 	git "github.com/MichaelMure/git-bug/util/git"
 	gqlparser "github.com/vektah/gqlparser"
 	ast "github.com/vektah/gqlparser/ast"
@@ -38,7 +37,10 @@ type Config struct {
 type ResolverRoot interface {
 	AddCommentOperation() AddCommentOperationResolver
 	Bug() BugResolver
+	CommentHistoryStep() CommentHistoryStepResolver
+	CommentTimelineItem() CommentTimelineItemResolver
 	CreateOperation() CreateOperationResolver
+	CreateTimelineItem() CreateTimelineItemResolver
 	LabelChangeOperation() LabelChangeOperationResolver
 	Mutation() MutationResolver
 	Query() QueryResolver
@@ -68,6 +70,7 @@ type ComplexityRoot struct {
 		CreatedAt  func(childComplexity int) int
 		LastEdit   func(childComplexity int) int
 		Comments   func(childComplexity int, after *string, before *string, first *int, last *int) int
+		Timeline   func(childComplexity int, after *string, before *string, first *int, last *int) int
 		Operations func(childComplexity int, after *string, before *string, first *int, last *int) int
 	}
 
@@ -101,6 +104,22 @@ type ComplexityRoot struct {
 		Node   func(childComplexity int) int
 	}
 
+	CommentHistoryStep struct {
+		Message func(childComplexity int) int
+		Date    func(childComplexity int) int
+	}
+
+	CommentTimelineItem struct {
+		Hash      func(childComplexity int) int
+		Author    func(childComplexity int) int
+		Message   func(childComplexity int) int
+		Files     func(childComplexity int) int
+		CreatedAt func(childComplexity int) int
+		LastEdit  func(childComplexity int) int
+		Edited    func(childComplexity int) int
+		History   func(childComplexity int) int
+	}
+
 	CreateOperation struct {
 		Author  func(childComplexity int) int
 		Date    func(childComplexity int) int
@@ -109,7 +128,19 @@ type ComplexityRoot struct {
 		Files   func(childComplexity int) int
 	}
 
+	CreateTimelineItem struct {
+		Hash      func(childComplexity int) int
+		Author    func(childComplexity int) int
+		Message   func(childComplexity int) int
+		Files     func(childComplexity int) int
+		CreatedAt func(childComplexity int) int
+		LastEdit  func(childComplexity int) int
+		Edited    func(childComplexity int) int
+		History   func(childComplexity int) int
+	}
+
 	LabelChangeOperation struct {
+		Hash    func(childComplexity int) int
 		Author  func(childComplexity int) int
 		Date    func(childComplexity int) int
 		Added   func(childComplexity int) int
@@ -162,37 +193,63 @@ type ComplexityRoot struct {
 	}
 
 	SetStatusOperation struct {
+		Hash   func(childComplexity int) int
 		Author func(childComplexity int) int
 		Date   func(childComplexity int) int
 		Status func(childComplexity int) int
 	}
 
 	SetTitleOperation struct {
+		Hash   func(childComplexity int) int
 		Author func(childComplexity int) int
 		Date   func(childComplexity int) int
 		Title  func(childComplexity int) int
 		Was    func(childComplexity int) int
 	}
+
+	TimelineItemConnection struct {
+		Edges      func(childComplexity int) int
+		Nodes      func(childComplexity int) int
+		PageInfo   func(childComplexity int) int
+		TotalCount func(childComplexity int) int
+	}
+
+	TimelineItemEdge struct {
+		Cursor func(childComplexity int) int
+		Node   func(childComplexity int) int
+	}
 }
 
 type AddCommentOperationResolver interface {
-	Author(ctx context.Context, obj *operations.AddCommentOperation) (bug.Person, error)
-	Date(ctx context.Context, obj *operations.AddCommentOperation) (time.Time, error)
+	Author(ctx context.Context, obj *bug.AddCommentOperation) (bug.Person, error)
+	Date(ctx context.Context, obj *bug.AddCommentOperation) (time.Time, error)
 }
 type BugResolver interface {
 	Status(ctx context.Context, obj *bug.Snapshot) (models.Status, error)
 
 	LastEdit(ctx context.Context, obj *bug.Snapshot) (time.Time, error)
 	Comments(ctx context.Context, obj *bug.Snapshot, after *string, before *string, first *int, last *int) (models.CommentConnection, error)
+	Timeline(ctx context.Context, obj *bug.Snapshot, after *string, before *string, first *int, last *int) (models.TimelineItemConnection, error)
 	Operations(ctx context.Context, obj *bug.Snapshot, after *string, before *string, first *int, last *int) (models.OperationConnection, error)
 }
+type CommentHistoryStepResolver interface {
+	Date(ctx context.Context, obj *bug.CommentHistoryStep) (time.Time, error)
+}
+type CommentTimelineItemResolver interface {
+	CreatedAt(ctx context.Context, obj *bug.CommentTimelineItem) (time.Time, error)
+	LastEdit(ctx context.Context, obj *bug.CommentTimelineItem) (time.Time, error)
+}
 type CreateOperationResolver interface {
-	Author(ctx context.Context, obj *operations.CreateOperation) (bug.Person, error)
-	Date(ctx context.Context, obj *operations.CreateOperation) (time.Time, error)
+	Author(ctx context.Context, obj *bug.CreateOperation) (bug.Person, error)
+	Date(ctx context.Context, obj *bug.CreateOperation) (time.Time, error)
+}
+type CreateTimelineItemResolver interface {
+	CreatedAt(ctx context.Context, obj *bug.CreateTimelineItem) (time.Time, error)
+	LastEdit(ctx context.Context, obj *bug.CreateTimelineItem) (time.Time, error)
 }
 type LabelChangeOperationResolver interface {
-	Author(ctx context.Context, obj *operations.LabelChangeOperation) (bug.Person, error)
-	Date(ctx context.Context, obj *operations.LabelChangeOperation) (time.Time, error)
+	Author(ctx context.Context, obj *bug.LabelChangeOperation) (bug.Person, error)
+	Date(ctx context.Context, obj *bug.LabelChangeOperation) (time.Time, error)
 }
 type MutationResolver interface {
 	NewBug(ctx context.Context, repoRef *string, title string, message string, files []git.Hash) (bug.Snapshot, error)
@@ -212,13 +269,13 @@ type RepositoryResolver interface {
 	Bug(ctx context.Context, obj *models.Repository, prefix string) (*bug.Snapshot, error)
 }
 type SetStatusOperationResolver interface {
-	Author(ctx context.Context, obj *operations.SetStatusOperation) (bug.Person, error)
-	Date(ctx context.Context, obj *operations.SetStatusOperation) (time.Time, error)
-	Status(ctx context.Context, obj *operations.SetStatusOperation) (models.Status, error)
+	Author(ctx context.Context, obj *bug.SetStatusOperation) (bug.Person, error)
+	Date(ctx context.Context, obj *bug.SetStatusOperation) (time.Time, error)
+	Status(ctx context.Context, obj *bug.SetStatusOperation) (models.Status, error)
 }
 type SetTitleOperationResolver interface {
-	Author(ctx context.Context, obj *operations.SetTitleOperation) (bug.Person, error)
-	Date(ctx context.Context, obj *operations.SetTitleOperation) (time.Time, error)
+	Author(ctx context.Context, obj *bug.SetTitleOperation) (bug.Person, error)
+	Date(ctx context.Context, obj *bug.SetTitleOperation) (time.Time, error)
 }
 
 func field_Bug_comments_args(rawArgs map[string]interface{}) (map[string]interface{}, error) {
@@ -283,6 +340,68 @@ func field_Bug_comments_args(rawArgs map[string]interface{}) (map[string]interfa
 
 }
 
+func field_Bug_timeline_args(rawArgs map[string]interface{}) (map[string]interface{}, error) {
+	args := map[string]interface{}{}
+	var arg0 *string
+	if tmp, ok := rawArgs["after"]; ok {
+		var err error
+		var ptr1 string
+		if tmp != nil {
+			ptr1, err = graphql.UnmarshalString(tmp)
+			arg0 = &ptr1
+		}
+
+		if err != nil {
+			return nil, err
+		}
+	}
+	args["after"] = arg0
+	var arg1 *string
+	if tmp, ok := rawArgs["before"]; ok {
+		var err error
+		var ptr1 string
+		if tmp != nil {
+			ptr1, err = graphql.UnmarshalString(tmp)
+			arg1 = &ptr1
+		}
+
+		if err != nil {
+			return nil, err
+		}
+	}
+	args["before"] = arg1
+	var arg2 *int
+	if tmp, ok := rawArgs["first"]; ok {
+		var err error
+		var ptr1 int
+		if tmp != nil {
+			ptr1, err = graphql.UnmarshalInt(tmp)
+			arg2 = &ptr1
+		}
+
+		if err != nil {
+			return nil, err
+		}
+	}
+	args["first"] = arg2
+	var arg3 *int
+	if tmp, ok := rawArgs["last"]; ok {
+		var err error
+		var ptr1 int
+		if tmp != nil {
+			ptr1, err = graphql.UnmarshalInt(tmp)
+			arg3 = &ptr1
+		}
+
+		if err != nil {
+			return nil, err
+		}
+	}
+	args["last"] = arg3
+	return args, nil
+
+}
+
 func field_Bug_operations_args(rawArgs map[string]interface{}) (map[string]interface{}, error) {
 	args := map[string]interface{}{}
 	var arg0 *string
@@ -915,6 +1034,18 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
 
 		return e.complexity.Bug.Comments(childComplexity, args["after"].(*string), args["before"].(*string), args["first"].(*int), args["last"].(*int)), true
 
+	case "Bug.timeline":
+		if e.complexity.Bug.Timeline == nil {
+			break
+		}
+
+		args, err := field_Bug_timeline_args(rawArgs)
+		if err != nil {
+			return 0, false
+		}
+
+		return e.complexity.Bug.Timeline(childComplexity, args["after"].(*string), args["before"].(*string), args["first"].(*int), args["last"].(*int)), true
+
 	case "Bug.operations":
 		if e.complexity.Bug.Operations == nil {
 			break
@@ -1032,6 +1163,76 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
 
 		return e.complexity.CommentEdge.Node(childComplexity), true
 
+	case "CommentHistoryStep.message":
+		if e.complexity.CommentHistoryStep.Message == nil {
+			break
+		}
+
+		return e.complexity.CommentHistoryStep.Message(childComplexity), true
+
+	case "CommentHistoryStep.date":
+		if e.complexity.CommentHistoryStep.Date == nil {
+			break
+		}
+
+		return e.complexity.CommentHistoryStep.Date(childComplexity), true
+
+	case "CommentTimelineItem.hash":
+		if e.complexity.CommentTimelineItem.Hash == nil {
+			break
+		}
+
+		return e.complexity.CommentTimelineItem.Hash(childComplexity), true
+
+	case "CommentTimelineItem.author":
+		if e.complexity.CommentTimelineItem.Author == nil {
+			break
+		}
+
+		return e.complexity.CommentTimelineItem.Author(childComplexity), true
+
+	case "CommentTimelineItem.message":
+		if e.complexity.CommentTimelineItem.Message == nil {
+			break
+		}
+
+		return e.complexity.CommentTimelineItem.Message(childComplexity), true
+
+	case "CommentTimelineItem.files":
+		if e.complexity.CommentTimelineItem.Files == nil {
+			break
+		}
+
+		return e.complexity.CommentTimelineItem.Files(childComplexity), true
+
+	case "CommentTimelineItem.createdAt":
+		if e.complexity.CommentTimelineItem.CreatedAt == nil {
+			break
+		}
+
+		return e.complexity.CommentTimelineItem.CreatedAt(childComplexity), true
+
+	case "CommentTimelineItem.lastEdit":
+		if e.complexity.CommentTimelineItem.LastEdit == nil {
+			break
+		}
+
+		return e.complexity.CommentTimelineItem.LastEdit(childComplexity), true
+
+	case "CommentTimelineItem.edited":
+		if e.complexity.CommentTimelineItem.Edited == nil {
+			break
+		}
+
+		return e.complexity.CommentTimelineItem.Edited(childComplexity), true
+
+	case "CommentTimelineItem.history":
+		if e.complexity.CommentTimelineItem.History == nil {
+			break
+		}
+
+		return e.complexity.CommentTimelineItem.History(childComplexity), true
+
 	case "CreateOperation.author":
 		if e.complexity.CreateOperation.Author == nil {
 			break
@@ -1067,6 +1268,69 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
 
 		return e.complexity.CreateOperation.Files(childComplexity), true
 
+	case "CreateTimelineItem.hash":
+		if e.complexity.CreateTimelineItem.Hash == nil {
+			break
+		}
+
+		return e.complexity.CreateTimelineItem.Hash(childComplexity), true
+
+	case "CreateTimelineItem.author":
+		if e.complexity.CreateTimelineItem.Author == nil {
+			break
+		}
+
+		return e.complexity.CreateTimelineItem.Author(childComplexity), true
+
+	case "CreateTimelineItem.message":
+		if e.complexity.CreateTimelineItem.Message == nil {
+			break
+		}
+
+		return e.complexity.CreateTimelineItem.Message(childComplexity), true
+
+	case "CreateTimelineItem.files":
+		if e.complexity.CreateTimelineItem.Files == nil {
+			break
+		}
+
+		return e.complexity.CreateTimelineItem.Files(childComplexity), true
+
+	case "CreateTimelineItem.createdAt":
+		if e.complexity.CreateTimelineItem.CreatedAt == nil {
+			break
+		}
+
+		return e.complexity.CreateTimelineItem.CreatedAt(childComplexity), true
+
+	case "CreateTimelineItem.lastEdit":
+		if e.complexity.CreateTimelineItem.LastEdit == nil {
+			break
+		}
+
+		return e.complexity.CreateTimelineItem.LastEdit(childComplexity), true
+
+	case "CreateTimelineItem.edited":
+		if e.complexity.CreateTimelineItem.Edited == nil {
+			break
+		}
+
+		return e.complexity.CreateTimelineItem.Edited(childComplexity), true
+
+	case "CreateTimelineItem.history":
+		if e.complexity.CreateTimelineItem.History == nil {
+			break
+		}
+
+		return e.complexity.CreateTimelineItem.History(childComplexity), true
+
+	case "LabelChangeOperation.hash":
+		if e.complexity.LabelChangeOperation.Hash == nil {
+			break
+		}
+
+		return e.complexity.LabelChangeOperation.Hash(childComplexity), true
+
 	case "LabelChangeOperation.author":
 		if e.complexity.LabelChangeOperation.Author == nil {
 			break
@@ -1313,6 +1577,13 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
 
 		return e.complexity.Repository.Bug(childComplexity, args["prefix"].(string)), true
 
+	case "SetStatusOperation.hash":
+		if e.complexity.SetStatusOperation.Hash == nil {
+			break
+		}
+
+		return e.complexity.SetStatusOperation.Hash(childComplexity), true
+
 	case "SetStatusOperation.author":
 		if e.complexity.SetStatusOperation.Author == nil {
 			break
@@ -1334,6 +1605,13 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
 
 		return e.complexity.SetStatusOperation.Status(childComplexity), true
 
+	case "SetTitleOperation.hash":
+		if e.complexity.SetTitleOperation.Hash == nil {
+			break
+		}
+
+		return e.complexity.SetTitleOperation.Hash(childComplexity), true
+
 	case "SetTitleOperation.author":
 		if e.complexity.SetTitleOperation.Author == nil {
 			break
@@ -1362,6 +1640,48 @@ func (e *executableSchema) Complexity(typeName, field string, childComplexity in
 
 		return e.complexity.SetTitleOperation.Was(childComplexity), true
 
+	case "TimelineItemConnection.edges":
+		if e.complexity.TimelineItemConnection.Edges == nil {
+			break
+		}
+
+		return e.complexity.TimelineItemConnection.Edges(childComplexity), true
+
+	case "TimelineItemConnection.nodes":
+		if e.complexity.TimelineItemConnection.Nodes == nil {
+			break
+		}
+
+		return e.complexity.TimelineItemConnection.Nodes(childComplexity), true
+
+	case "TimelineItemConnection.pageInfo":
+		if e.complexity.TimelineItemConnection.PageInfo == nil {
+			break
+		}
+
+		return e.complexity.TimelineItemConnection.PageInfo(childComplexity), true
+
+	case "TimelineItemConnection.totalCount":
+		if e.complexity.TimelineItemConnection.TotalCount == nil {
+			break
+		}
+
+		return e.complexity.TimelineItemConnection.TotalCount(childComplexity), true
+
+	case "TimelineItemEdge.cursor":
+		if e.complexity.TimelineItemEdge.Cursor == nil {
+			break
+		}
+
+		return e.complexity.TimelineItemEdge.Cursor(childComplexity), true
+
+	case "TimelineItemEdge.node":
+		if e.complexity.TimelineItemEdge.Node == nil {
+			break
+		}
+
+		return e.complexity.TimelineItemEdge.Node(childComplexity), true
+
 	}
 	return 0, false
 }
@@ -1410,7 +1730,7 @@ type executionContext struct {
 var addCommentOperationImplementors = []string{"AddCommentOperation", "Operation", "Authored"}
 
 // nolint: gocyclo, errcheck, gas, goconst
-func (ec *executionContext) _AddCommentOperation(ctx context.Context, sel ast.SelectionSet, obj *operations.AddCommentOperation) graphql.Marshaler {
+func (ec *executionContext) _AddCommentOperation(ctx context.Context, sel ast.SelectionSet, obj *bug.AddCommentOperation) graphql.Marshaler {
 	fields := graphql.CollectFields(ctx, sel, addCommentOperationImplementors)
 
 	var wg sync.WaitGroup
@@ -1462,7 +1782,7 @@ func (ec *executionContext) _AddCommentOperation(ctx context.Context, sel ast.Se
 }
 
 // nolint: vetshadow
-func (ec *executionContext) _AddCommentOperation_author(ctx context.Context, field graphql.CollectedField, obj *operations.AddCommentOperation) graphql.Marshaler {
+func (ec *executionContext) _AddCommentOperation_author(ctx context.Context, field graphql.CollectedField, obj *bug.AddCommentOperation) graphql.Marshaler {
 	rctx := &graphql.ResolverContext{
 		Object: "AddCommentOperation",
 		Args:   nil,
@@ -1485,7 +1805,7 @@ func (ec *executionContext) _AddCommentOperation_author(ctx context.Context, fie
 }
 
 // nolint: vetshadow
-func (ec *executionContext) _AddCommentOperation_date(ctx context.Context, field graphql.CollectedField, obj *operations.AddCommentOperation) graphql.Marshaler {
+func (ec *executionContext) _AddCommentOperation_date(ctx context.Context, field graphql.CollectedField, obj *bug.AddCommentOperation) graphql.Marshaler {
 	rctx := &graphql.ResolverContext{
 		Object: "AddCommentOperation",
 		Args:   nil,
@@ -1507,7 +1827,7 @@ func (ec *executionContext) _AddCommentOperation_date(ctx context.Context, field
 }
 
 // nolint: vetshadow
-func (ec *executionContext) _AddCommentOperation_message(ctx context.Context, field graphql.CollectedField, obj *operations.AddCommentOperation) graphql.Marshaler {
+func (ec *executionContext) _AddCommentOperation_message(ctx context.Context, field graphql.CollectedField, obj *bug.AddCommentOperation) graphql.Marshaler {
 	rctx := &graphql.ResolverContext{
 		Object: "AddCommentOperation",
 		Args:   nil,
@@ -1529,7 +1849,7 @@ func (ec *executionContext) _AddCommentOperation_message(ctx context.Context, fi
 }
 
 // nolint: vetshadow
-func (ec *executionContext) _AddCommentOperation_files(ctx context.Context, field graphql.CollectedField, obj *operations.AddCommentOperation) graphql.Marshaler {
+func (ec *executionContext) _AddCommentOperation_files(ctx context.Context, field graphql.CollectedField, obj *bug.AddCommentOperation) graphql.Marshaler {
 	rctx := &graphql.ResolverContext{
 		Object: "AddCommentOperation",
 		Args:   nil,
@@ -1631,6 +1951,15 @@ func (ec *executionContext) _Bug(ctx context.Context, sel ast.SelectionSet, obj
 				}
 				wg.Done()
 			}(i, field)
+		case "timeline":
+			wg.Add(1)
+			go func(i int, field graphql.CollectedField) {
+				out.Values[i] = ec._Bug_timeline(ctx, field, obj)
+				if out.Values[i] == graphql.Null {
+					invalid = true
+				}
+				wg.Done()
+			}(i, field)
 		case "operations":
 			wg.Add(1)
 			go func(i int, field graphql.CollectedField) {
@@ -1866,6 +2195,35 @@ func (ec *executionContext) _Bug_comments(ctx context.Context, field graphql.Col
 	return ec._CommentConnection(ctx, field.Selections, &res)
 }
 
+// nolint: vetshadow
+func (ec *executionContext) _Bug_timeline(ctx context.Context, field graphql.CollectedField, obj *bug.Snapshot) graphql.Marshaler {
+	rawArgs := field.ArgumentMap(ec.Variables)
+	args, err := field_Bug_timeline_args(rawArgs)
+	if err != nil {
+		ec.Error(ctx, err)
+		return graphql.Null
+	}
+	rctx := &graphql.ResolverContext{
+		Object: "Bug",
+		Args:   args,
+		Field:  field,
+	}
+	ctx = graphql.WithResolverContext(ctx, rctx)
+	resTmp := ec.FieldMiddleware(ctx, obj, func(ctx context.Context) (interface{}, error) {
+		return ec.resolvers.Bug().Timeline(ctx, obj, args["after"].(*string), args["before"].(*string), args["first"].(*int), args["last"].(*int))
+	})
+	if resTmp == nil {
+		if !ec.HasError(rctx) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.(models.TimelineItemConnection)
+	rctx.Result = res
+
+	return ec._TimelineItemConnection(ctx, field.Selections, &res)
+}
+
 // nolint: vetshadow
 func (ec *executionContext) _Bug_operations(ctx context.Context, field graphql.CollectedField, obj *bug.Snapshot) graphql.Marshaler {
 	rawArgs := field.ArgumentMap(ec.Variables)
@@ -2571,11 +2929,11 @@ func (ec *executionContext) _CommentEdge_node(ctx context.Context, field graphql
 	return ec._Comment(ctx, field.Selections, &res)
 }
 
-var createOperationImplementors = []string{"CreateOperation", "Operation", "Authored"}
+var commentHistoryStepImplementors = []string{"CommentHistoryStep"}
 
 // nolint: gocyclo, errcheck, gas, goconst
-func (ec *executionContext) _CreateOperation(ctx context.Context, sel ast.SelectionSet, obj *operations.CreateOperation) graphql.Marshaler {
-	fields := graphql.CollectFields(ctx, sel, createOperationImplementors)
+func (ec *executionContext) _CommentHistoryStep(ctx context.Context, sel ast.SelectionSet, obj *bug.CommentHistoryStep) graphql.Marshaler {
+	fields := graphql.CollectFields(ctx, sel, commentHistoryStepImplementors)
 
 	var wg sync.WaitGroup
 	out := graphql.NewOrderedMap(len(fields))
@@ -2585,40 +2943,21 @@ func (ec *executionContext) _CreateOperation(ctx context.Context, sel ast.Select
 
 		switch field.Name {
 		case "__typename":
-			out.Values[i] = graphql.MarshalString("CreateOperation")
-		case "author":
-			wg.Add(1)
-			go func(i int, field graphql.CollectedField) {
-				out.Values[i] = ec._CreateOperation_author(ctx, field, obj)
-				if out.Values[i] == graphql.Null {
-					invalid = true
-				}
-				wg.Done()
-			}(i, field)
+			out.Values[i] = graphql.MarshalString("CommentHistoryStep")
+		case "message":
+			out.Values[i] = ec._CommentHistoryStep_message(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				invalid = true
+			}
 		case "date":
 			wg.Add(1)
 			go func(i int, field graphql.CollectedField) {
-				out.Values[i] = ec._CreateOperation_date(ctx, field, obj)
+				out.Values[i] = ec._CommentHistoryStep_date(ctx, field, obj)
 				if out.Values[i] == graphql.Null {
 					invalid = true
 				}
 				wg.Done()
 			}(i, field)
-		case "title":
-			out.Values[i] = ec._CreateOperation_title(ctx, field, obj)
-			if out.Values[i] == graphql.Null {
-				invalid = true
-			}
-		case "message":
-			out.Values[i] = ec._CreateOperation_message(ctx, field, obj)
-			if out.Values[i] == graphql.Null {
-				invalid = true
-			}
-		case "files":
-			out.Values[i] = ec._CreateOperation_files(ctx, field, obj)
-			if out.Values[i] == graphql.Null {
-				invalid = true
-			}
 		default:
 			panic("unknown field " + strconv.Quote(field.Name))
 		}
@@ -2631,15 +2970,15 @@ func (ec *executionContext) _CreateOperation(ctx context.Context, sel ast.Select
 }
 
 // nolint: vetshadow
-func (ec *executionContext) _CreateOperation_author(ctx context.Context, field graphql.CollectedField, obj *operations.CreateOperation) graphql.Marshaler {
+func (ec *executionContext) _CommentHistoryStep_message(ctx context.Context, field graphql.CollectedField, obj *bug.CommentHistoryStep) graphql.Marshaler {
 	rctx := &graphql.ResolverContext{
-		Object: "CreateOperation",
+		Object: "CommentHistoryStep",
 		Args:   nil,
 		Field:  field,
 	}
 	ctx = graphql.WithResolverContext(ctx, rctx)
 	resTmp := ec.FieldMiddleware(ctx, obj, func(ctx context.Context) (interface{}, error) {
-		return ec.resolvers.CreateOperation().Author(ctx, obj)
+		return obj.Message, nil
 	})
 	if resTmp == nil {
 		if !ec.HasError(rctx) {
@@ -2647,22 +2986,21 @@ func (ec *executionContext) _CreateOperation_author(ctx context.Context, field g
 		}
 		return graphql.Null
 	}
-	res := resTmp.(bug.Person)
+	res := resTmp.(string)
 	rctx.Result = res
-
-	return ec._Person(ctx, field.Selections, &res)
+	return graphql.MarshalString(res)
 }
 
 // nolint: vetshadow
-func (ec *executionContext) _CreateOperation_date(ctx context.Context, field graphql.CollectedField, obj *operations.CreateOperation) graphql.Marshaler {
+func (ec *executionContext) _CommentHistoryStep_date(ctx context.Context, field graphql.CollectedField, obj *bug.CommentHistoryStep) graphql.Marshaler {
 	rctx := &graphql.ResolverContext{
-		Object: "CreateOperation",
+		Object: "CommentHistoryStep",
 		Args:   nil,
 		Field:  field,
 	}
 	ctx = graphql.WithResolverContext(ctx, rctx)
 	resTmp := ec.FieldMiddleware(ctx, obj, func(ctx context.Context) (interface{}, error) {
-		return ec.resolvers.CreateOperation().Date(ctx, obj)
+		return ec.resolvers.CommentHistoryStep().Date(ctx, obj)
 	})
 	if resTmp == nil {
 		if !ec.HasError(rctx) {
@@ -2675,16 +3013,90 @@ func (ec *executionContext) _CreateOperation_date(ctx context.Context, field gra
 	return graphql.MarshalTime(res)
 }
 
+var commentTimelineItemImplementors = []string{"CommentTimelineItem", "TimelineItem"}
+
+// nolint: gocyclo, errcheck, gas, goconst
+func (ec *executionContext) _CommentTimelineItem(ctx context.Context, sel ast.SelectionSet, obj *bug.CommentTimelineItem) graphql.Marshaler {
+	fields := graphql.CollectFields(ctx, sel, commentTimelineItemImplementors)
+
+	var wg sync.WaitGroup
+	out := graphql.NewOrderedMap(len(fields))
+	invalid := false
+	for i, field := range fields {
+		out.Keys[i] = field.Alias
+
+		switch field.Name {
+		case "__typename":
+			out.Values[i] = graphql.MarshalString("CommentTimelineItem")
+		case "hash":
+			out.Values[i] = ec._CommentTimelineItem_hash(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				invalid = true
+			}
+		case "author":
+			out.Values[i] = ec._CommentTimelineItem_author(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				invalid = true
+			}
+		case "message":
+			out.Values[i] = ec._CommentTimelineItem_message(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				invalid = true
+			}
+		case "files":
+			out.Values[i] = ec._CommentTimelineItem_files(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				invalid = true
+			}
+		case "createdAt":
+			wg.Add(1)
+			go func(i int, field graphql.CollectedField) {
+				out.Values[i] = ec._CommentTimelineItem_createdAt(ctx, field, obj)
+				if out.Values[i] == graphql.Null {
+					invalid = true
+				}
+				wg.Done()
+			}(i, field)
+		case "lastEdit":
+			wg.Add(1)
+			go func(i int, field graphql.CollectedField) {
+				out.Values[i] = ec._CommentTimelineItem_lastEdit(ctx, field, obj)
+				if out.Values[i] == graphql.Null {
+					invalid = true
+				}
+				wg.Done()
+			}(i, field)
+		case "edited":
+			out.Values[i] = ec._CommentTimelineItem_edited(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				invalid = true
+			}
+		case "history":
+			out.Values[i] = ec._CommentTimelineItem_history(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				invalid = true
+			}
+		default:
+			panic("unknown field " + strconv.Quote(field.Name))
+		}
+	}
+	wg.Wait()
+	if invalid {
+		return graphql.Null
+	}
+	return out
+}
+
 // nolint: vetshadow
-func (ec *executionContext) _CreateOperation_title(ctx context.Context, field graphql.CollectedField, obj *operations.CreateOperation) graphql.Marshaler {
+func (ec *executionContext) _CommentTimelineItem_hash(ctx context.Context, field graphql.CollectedField, obj *bug.CommentTimelineItem) graphql.Marshaler {
 	rctx := &graphql.ResolverContext{
-		Object: "CreateOperation",
+		Object: "CommentTimelineItem",
 		Args:   nil,
 		Field:  field,
 	}
 	ctx = graphql.WithResolverContext(ctx, rctx)
 	resTmp := ec.FieldMiddleware(ctx, obj, func(ctx context.Context) (interface{}, error) {
-		return obj.Title, nil
+		return obj.Hash()
 	})
 	if resTmp == nil {
 		if !ec.HasError(rctx) {
@@ -2692,15 +3104,38 @@ func (ec *executionContext) _CreateOperation_title(ctx context.Context, field gr
 		}
 		return graphql.Null
 	}
-	res := resTmp.(string)
+	res := resTmp.(git.Hash)
 	rctx.Result = res
-	return graphql.MarshalString(res)
+	return res
 }
 
 // nolint: vetshadow
-func (ec *executionContext) _CreateOperation_message(ctx context.Context, field graphql.CollectedField, obj *operations.CreateOperation) graphql.Marshaler {
+func (ec *executionContext) _CommentTimelineItem_author(ctx context.Context, field graphql.CollectedField, obj *bug.CommentTimelineItem) graphql.Marshaler {
 	rctx := &graphql.ResolverContext{
-		Object: "CreateOperation",
+		Object: "CommentTimelineItem",
+		Args:   nil,
+		Field:  field,
+	}
+	ctx = graphql.WithResolverContext(ctx, rctx)
+	resTmp := ec.FieldMiddleware(ctx, obj, func(ctx context.Context) (interface{}, error) {
+		return obj.Author, nil
+	})
+	if resTmp == nil {
+		if !ec.HasError(rctx) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.(bug.Person)
+	rctx.Result = res
+
+	return ec._Person(ctx, field.Selections, &res)
+}
+
+// nolint: vetshadow
+func (ec *executionContext) _CommentTimelineItem_message(ctx context.Context, field graphql.CollectedField, obj *bug.CommentTimelineItem) graphql.Marshaler {
+	rctx := &graphql.ResolverContext{
+		Object: "CommentTimelineItem",
 		Args:   nil,
 		Field:  field,
 	}
@@ -2720,9 +3155,9 @@ func (ec *executionContext) _CreateOperation_message(ctx context.Context, field
 }
 
 // nolint: vetshadow
-func (ec *executionContext) _CreateOperation_files(ctx context.Context, field graphql.CollectedField, obj *operations.CreateOperation) graphql.Marshaler {
+func (ec *executionContext) _CommentTimelineItem_files(ctx context.Context, field graphql.CollectedField, obj *bug.CommentTimelineItem) graphql.Marshaler {
 	rctx := &graphql.ResolverContext{
-		Object: "CreateOperation",
+		Object: "CommentTimelineItem",
 		Args:   nil,
 		Field:  field,
 	}
@@ -2750,11 +3185,132 @@ func (ec *executionContext) _CreateOperation_files(ctx context.Context, field gr
 	return arr1
 }
 
-var labelChangeOperationImplementors = []string{"LabelChangeOperation", "Operation", "Authored"}
+// nolint: vetshadow
+func (ec *executionContext) _CommentTimelineItem_createdAt(ctx context.Context, field graphql.CollectedField, obj *bug.CommentTimelineItem) graphql.Marshaler {
+	rctx := &graphql.ResolverContext{
+		Object: "CommentTimelineItem",
+		Args:   nil,
+		Field:  field,
+	}
+	ctx = graphql.WithResolverContext(ctx, rctx)
+	resTmp := ec.FieldMiddleware(ctx, obj, func(ctx context.Context) (interface{}, error) {
+		return ec.resolvers.CommentTimelineItem().CreatedAt(ctx, obj)
+	})
+	if resTmp == nil {
+		if !ec.HasError(rctx) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.(time.Time)
+	rctx.Result = res
+	return graphql.MarshalTime(res)
+}
+
+// nolint: vetshadow
+func (ec *executionContext) _CommentTimelineItem_lastEdit(ctx context.Context, field graphql.CollectedField, obj *bug.CommentTimelineItem) graphql.Marshaler {
+	rctx := &graphql.ResolverContext{
+		Object: "CommentTimelineItem",
+		Args:   nil,
+		Field:  field,
+	}
+	ctx = graphql.WithResolverContext(ctx, rctx)
+	resTmp := ec.FieldMiddleware(ctx, obj, func(ctx context.Context) (interface{}, error) {
+		return ec.resolvers.CommentTimelineItem().LastEdit(ctx, obj)
+	})
+	if resTmp == nil {
+		if !ec.HasError(rctx) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.(time.Time)
+	rctx.Result = res
+	return graphql.MarshalTime(res)
+}
+
+// nolint: vetshadow
+func (ec *executionContext) _CommentTimelineItem_edited(ctx context.Context, field graphql.CollectedField, obj *bug.CommentTimelineItem) graphql.Marshaler {
+	rctx := &graphql.ResolverContext{
+		Object: "CommentTimelineItem",
+		Args:   nil,
+		Field:  field,
+	}
+	ctx = graphql.WithResolverContext(ctx, rctx)
+	resTmp := ec.FieldMiddleware(ctx, obj, func(ctx context.Context) (interface{}, error) {
+		return obj.Edited(), nil
+	})
+	if resTmp == nil {
+		if !ec.HasError(rctx) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.(bool)
+	rctx.Result = res
+	return graphql.MarshalBoolean(res)
+}
+
+// nolint: vetshadow
+func (ec *executionContext) _CommentTimelineItem_history(ctx context.Context, field graphql.CollectedField, obj *bug.CommentTimelineItem) graphql.Marshaler {
+	rctx := &graphql.ResolverContext{
+		Object: "CommentTimelineItem",
+		Args:   nil,
+		Field:  field,
+	}
+	ctx = graphql.WithResolverContext(ctx, rctx)
+	resTmp := ec.FieldMiddleware(ctx, obj, func(ctx context.Context) (interface{}, error) {
+		return obj.History, nil
+	})
+	if resTmp == nil {
+		if !ec.HasError(rctx) {
+			ec.Errorf(ctx, "must not be null")
+		}
+		return graphql.Null
+	}
+	res := resTmp.([]bug.CommentHistoryStep)
+	rctx.Result = res
+
+	arr1 := make(graphql.Array, len(res))
+	var wg sync.WaitGroup
+
+	isLen1 := len(res) == 1
+	if !isLen1 {
+		wg.Add(len(res))
+	}
+
+	for idx1 := range res {
+		idx1 := idx1
+		rctx := &graphql.ResolverContext{
+			Index:  &idx1,
+			Result: &res[idx1],
+		}
+		ctx := graphql.WithResolverContext(ctx, rctx)
+		f := func(idx1 int) {
+			if !isLen1 {
+				defer wg.Done()
+			}
+			arr1[idx1] = func() graphql.Marshaler {
+
+				return ec._CommentHistoryStep(ctx, field.Selections, &res[idx1])
+			}()
+		}
+		if isLen1 {
+			f(idx1)
+		} else {
+			go f(idx1)
+		}
+
+	}
+	wg.Wait()
+	return arr1
+}
+
+var createOperationImplementors = []string{"CreateOperation", "Operation", "Authored"}
 
 // nolint: gocyclo, errcheck, gas, goconst
-func (ec *executionContext) _LabelChangeOperation(ctx context.Context, sel ast.SelectionSet, obj *operations.LabelChangeOperation) graphql.Marshaler {
-	fields := graphql.CollectFields(ctx, sel, labelChangeOperationImplementors)
+func (ec *executionContext) _CreateOperation(ctx context.Context, sel ast.SelectionSet, obj *bug.CreateOperation) graphql.Marshaler {
+	fields := graphql.CollectFields(ctx, sel, createOperationImplementors)
 
 	var wg sync.WaitGroup
 	out := graphql.NewOrderedMap(len(fields))
@@ -2764,11 +3320,11 @@ func (ec *executionContext) _LabelChangeOperation(ctx context.Context, sel ast.S
 
 		switch field.Name {
 		case "__typename":
-			out.Values[i] = graphql.MarshalString("LabelChangeOperation")
+			out.Values[i] = graphql.MarshalString("CreateOperation")
 		case "author":
 			wg.Add(1)
 			go func(i int, field graphql.CollectedField) {
-				out.Values[i] = ec._LabelChangeOperation_author(ctx, field, obj)
+				out.Values[i] = ec._CreateOperation_author(ctx, field, obj)
 				if out.Values[i] == graphql.Null {
 					invalid = true
 				}
@@ -2777,19 +3333,24 @@ func (ec *executionContext) _LabelChangeOperation(ctx context.Context, sel ast.S
 		case "date":
 			wg.Add(1)
 			go func(i int, field graphql.CollectedField) {
-				out.Values[i] = ec._LabelChangeOperation_date(ctx, field, obj)
+				out.Values[i] = ec._CreateOperation_date(ctx, field, obj)
 				if out.Values[i] == graphql.Null {
 					invalid = true
 				}
 				wg.Done()
 			}(i, field)
-		case "added":
-			out.Values[i] = ec._LabelChangeOperation_added(ctx, field, obj)
+		case "title":
+			out.Values[i] = ec._CreateOperation_title(ctx, field, obj)
 			if out.Values[i] == graphql.Null {
 				invalid = true
 			}
-		case "removed":
-			out.Values[i] = ec._LabelChangeOperation_removed(ctx, field, obj)
+		case "message":
+			out.Values[i] = ec._CreateOperation_message(ctx, field, obj)
+			if out.Values[i] == graphql.Null {
+				invalid = true
+			}
+		case "files":
+			out.Values[i] = ec._CreateOperation_files(ctx, field, obj)
 			if out.Values[i] == graphql.Null {
 				invalid = true
 			}

graphql/models/edges.go πŸ”—

@@ -14,3 +14,8 @@ func (e BugEdge) GetCursor() string {
 func (e CommentEdge) GetCursor() string {
 	return e.Cursor
 }
+
+// GetCursor return the cursor entry of an edge
+func (e TimelineItemEdge) GetCursor() string {
+	return e.Cursor
+}

graphql/models/gen_models.go πŸ”—

@@ -59,6 +59,18 @@ type PageInfo struct {
 	EndCursor       string `json:"endCursor"`
 }
 
+type TimelineItemConnection struct {
+	Edges      []TimelineItemEdge `json:"edges"`
+	Nodes      []bug.TimelineItem `json:"nodes"`
+	PageInfo   PageInfo           `json:"pageInfo"`
+	TotalCount int                `json:"totalCount"`
+}
+
+type TimelineItemEdge struct {
+	Cursor string           `json:"cursor"`
+	Node   bug.TimelineItem `json:"node"`
+}
+
 type Status string
 
 const (

graphql/resolvers/bug.go πŸ”—

@@ -69,6 +69,33 @@ func (bugResolver) Operations(ctx context.Context, obj *bug.Snapshot, after *str
 	return connections.BugOperationCon(obj.Operations, edger, conMaker, input)
 }
 
+func (bugResolver) Timeline(ctx context.Context, obj *bug.Snapshot, after *string, before *string, first *int, last *int) (models.TimelineItemConnection, error) {
+	input := models.ConnectionInput{
+		Before: before,
+		After:  after,
+		First:  first,
+		Last:   last,
+	}
+
+	edger := func(op bug.TimelineItem, offset int) connections.Edge {
+		return models.TimelineItemEdge{
+			Node:   op,
+			Cursor: connections.OffsetToCursor(offset),
+		}
+	}
+
+	conMaker := func(edges []models.TimelineItemEdge, nodes []bug.TimelineItem, info models.PageInfo, totalCount int) (models.TimelineItemConnection, error) {
+		return models.TimelineItemConnection{
+			Edges:      edges,
+			Nodes:      nodes,
+			PageInfo:   info,
+			TotalCount: totalCount,
+		}, nil
+	}
+
+	return connections.BugTimelineItemCon(obj.Timeline, edger, conMaker, input)
+}
+
 func (bugResolver) LastEdit(ctx context.Context, obj *bug.Snapshot) (time.Time, error) {
 	return obj.LastEditTime(), nil
 }

graphql/resolvers/operations.go πŸ”—

@@ -7,60 +7,59 @@ import (
 
 	"github.com/MichaelMure/git-bug/bug"
 	"github.com/MichaelMure/git-bug/graphql/models"
-	"github.com/MichaelMure/git-bug/operations"
 )
 
 type addCommentOperationResolver struct{}
 
-func (addCommentOperationResolver) Author(ctx context.Context, obj *operations.AddCommentOperation) (bug.Person, error) {
+func (addCommentOperationResolver) Author(ctx context.Context, obj *bug.AddCommentOperation) (bug.Person, error) {
 	return obj.Author, nil
 }
 
-func (addCommentOperationResolver) Date(ctx context.Context, obj *operations.AddCommentOperation) (time.Time, error) {
+func (addCommentOperationResolver) Date(ctx context.Context, obj *bug.AddCommentOperation) (time.Time, error) {
 	return obj.Time(), nil
 }
 
 type createOperationResolver struct{}
 
-func (createOperationResolver) Author(ctx context.Context, obj *operations.CreateOperation) (bug.Person, error) {
+func (createOperationResolver) Author(ctx context.Context, obj *bug.CreateOperation) (bug.Person, error) {
 	return obj.Author, nil
 }
 
-func (createOperationResolver) Date(ctx context.Context, obj *operations.CreateOperation) (time.Time, error) {
+func (createOperationResolver) Date(ctx context.Context, obj *bug.CreateOperation) (time.Time, error) {
 	return obj.Time(), nil
 }
 
 type labelChangeOperation struct{}
 
-func (labelChangeOperation) Author(ctx context.Context, obj *operations.LabelChangeOperation) (bug.Person, error) {
+func (labelChangeOperation) Author(ctx context.Context, obj *bug.LabelChangeOperation) (bug.Person, error) {
 	return obj.Author, nil
 }
 
-func (labelChangeOperation) Date(ctx context.Context, obj *operations.LabelChangeOperation) (time.Time, error) {
+func (labelChangeOperation) Date(ctx context.Context, obj *bug.LabelChangeOperation) (time.Time, error) {
 	return obj.Time(), nil
 }
 
 type setStatusOperationResolver struct{}
 
-func (setStatusOperationResolver) Author(ctx context.Context, obj *operations.SetStatusOperation) (bug.Person, error) {
+func (setStatusOperationResolver) Author(ctx context.Context, obj *bug.SetStatusOperation) (bug.Person, error) {
 	return obj.Author, nil
 }
 
-func (setStatusOperationResolver) Date(ctx context.Context, obj *operations.SetStatusOperation) (time.Time, error) {
+func (setStatusOperationResolver) Date(ctx context.Context, obj *bug.SetStatusOperation) (time.Time, error) {
 	return obj.Time(), nil
 }
 
-func (setStatusOperationResolver) Status(ctx context.Context, obj *operations.SetStatusOperation) (models.Status, error) {
+func (setStatusOperationResolver) Status(ctx context.Context, obj *bug.SetStatusOperation) (models.Status, error) {
 	return convertStatus(obj.Status)
 }
 
 type setTitleOperationResolver struct{}
 
-func (setTitleOperationResolver) Author(ctx context.Context, obj *operations.SetTitleOperation) (bug.Person, error) {
+func (setTitleOperationResolver) Author(ctx context.Context, obj *bug.SetTitleOperation) (bug.Person, error) {
 	return obj.Author, nil
 }
 
-func (setTitleOperationResolver) Date(ctx context.Context, obj *operations.SetTitleOperation) (time.Time, error) {
+func (setTitleOperationResolver) Date(ctx context.Context, obj *bug.SetTitleOperation) (time.Time, error) {
 	return obj.Time(), nil
 }
 

graphql/resolvers/root.go πŸ”—

@@ -32,10 +32,22 @@ func (RootResolver) AddCommentOperation() graph.AddCommentOperationResolver {
 	return &addCommentOperationResolver{}
 }
 
-func (r RootResolver) Bug() graph.BugResolver {
+func (RootResolver) Bug() graph.BugResolver {
 	return &bugResolver{}
 }
 
+func (RootResolver) CommentHistoryStep() graph.CommentHistoryStepResolver {
+	return &commentHistoryStepResolver{}
+}
+
+func (RootResolver) CommentTimelineItem() graph.CommentTimelineItemResolver {
+	return &commentTimelineItemResolver{}
+}
+
+func (RootResolver) CreateTimelineItem() graph.CreateTimelineItemResolver {
+	return &createTimelineItemResolver{}
+}
+
 func (RootResolver) CreateOperation() graph.CreateOperationResolver {
 	return &createOperationResolver{}
 }
@@ -44,7 +56,7 @@ func (RootResolver) LabelChangeOperation() graph.LabelChangeOperationResolver {
 	return &labelChangeOperation{}
 }
 
-func (r RootResolver) Repository() graph.RepositoryResolver {
+func (RootResolver) Repository() graph.RepositoryResolver {
 	return &repoResolver{}
 }
 

graphql/resolvers/timeline.go πŸ”—

@@ -0,0 +1,36 @@
+package resolvers
+
+import (
+	"context"
+	"time"
+
+	"github.com/MichaelMure/git-bug/bug"
+)
+
+type commentHistoryStepResolver struct{}
+
+func (commentHistoryStepResolver) Date(ctx context.Context, obj *bug.CommentHistoryStep) (time.Time, error) {
+	return obj.UnixTime.Time(), nil
+}
+
+type commentTimelineItemResolver struct{}
+
+func (commentTimelineItemResolver) CreatedAt(ctx context.Context, obj *bug.CommentTimelineItem) (time.Time, error) {
+	return obj.CreatedAt.Time(), nil
+}
+
+func (commentTimelineItemResolver) LastEdit(ctx context.Context, obj *bug.CommentTimelineItem) (time.Time, error) {
+	return obj.LastEdit.Time(), nil
+}
+
+type createTimelineItemResolver struct{}
+
+func (createTimelineItemResolver) CreatedAt(ctx context.Context, obj *bug.CreateTimelineItem) (time.Time, error) {
+	return obj.CreatedAt.Time(), nil
+
+}
+
+func (createTimelineItemResolver) LastEdit(ctx context.Context, obj *bug.CreateTimelineItem) (time.Time, error) {
+	return obj.LastEdit.Time(), nil
+
+}

graphql/schema.graphql πŸ”—

@@ -73,6 +73,11 @@ type OperationEdge {
   node: Operation!
 }
 
+"""An item in the timeline of events"""
+interface TimelineItem {
+  hash: Hash!
+}
+
 """An operation applied to a bug."""
 interface Operation {
   """The operations author."""
@@ -90,7 +95,8 @@ type CreateOperation implements Operation & Authored {
   files: [Hash!]!
 }
 
-type SetTitleOperation implements Operation & Authored {
+type SetTitleOperation implements Operation & Authored & TimelineItem {
+  hash: Hash!
   author: Person!
   date: Time!
 
@@ -106,14 +112,16 @@ type AddCommentOperation implements Operation & Authored {
   files: [Hash!]!
 }
 
-type SetStatusOperation implements Operation & Authored {
+type SetStatusOperation implements Operation & Authored & TimelineItem {
+  hash: Hash!
   author: Person!
   date: Time!
 
   status: Status!
 }
 
-type LabelChangeOperation implements Operation & Authored {
+type LabelChangeOperation implements Operation & Authored & TimelineItem {
+  hash: Hash!
   author: Person!
   date: Time!
 
@@ -121,6 +129,45 @@ type LabelChangeOperation implements Operation & Authored {
   removed: [Label!]!
 }
 
+type TimelineItemConnection {
+  edges: [TimelineItemEdge!]!
+  nodes: [TimelineItem!]!
+  pageInfo: PageInfo!
+  totalCount: Int!
+}
+
+type TimelineItemEdge {
+  cursor: String!
+  node: TimelineItem!
+}
+
+type CommentHistoryStep {
+  message: String!
+  date: Time!
+}
+
+type CreateTimelineItem implements TimelineItem {
+  hash: Hash!
+  author: Person!
+  message: String!
+  files: [Hash!]!
+  createdAt: Time!
+  lastEdit: Time!
+  edited: Boolean!
+  history: [CommentHistoryStep!]!
+}
+
+type CommentTimelineItem implements TimelineItem {
+  hash: Hash!
+  author: Person!
+  message: String!
+  files: [Hash!]!
+  createdAt: Time!
+  lastEdit: Time!
+  edited: Boolean!
+  history: [CommentHistoryStep!]!
+}
+
 """The connection type for Bug."""
 type BugConnection {
   """A list of edges."""
@@ -161,6 +208,17 @@ type Bug {
     last: Int
   ): CommentConnection!
 
+  timeline(
+    """Returns the elements in the list that come after the specified cursor."""
+    after: String
+    """Returns the elements in the list that come before the specified cursor."""
+    before: String
+    """Returns the first _n_ elements from the list."""
+    first: Int
+    """Returns the last _n_ elements from the list."""
+    last: Int
+  ): TimelineItemConnection!
+
   operations(
     """Returns the elements in the list that come after the specified cursor."""
     after: String

misc/random_bugs/create_random_bugs.go πŸ”—

@@ -6,7 +6,6 @@ import (
 	"time"
 
 	"github.com/MichaelMure/git-bug/bug"
-	"github.com/MichaelMure/git-bug/operations"
 	"github.com/MichaelMure/git-bug/repository"
 	"github.com/icrowley/fake"
 )
@@ -66,7 +65,7 @@ func GenerateRandomBugsWithSeed(opts Options, seed int64) []*bug.Bug {
 	for i := 0; i < opts.BugNumber; i++ {
 		addedLabels = []string{}
 
-		b, err := operations.Create(
+		b, err := bug.Create(
 			randomPerson(opts.PersonNumber),
 			time.Now().Unix(),
 			fake.Sentence(),
@@ -111,7 +110,7 @@ func GenerateRandomOperationPacksWithSeed(packNumber int, opNumber int, seed int
 
 		var op bug.Operation
 
-		op = operations.NewCreateOp(
+		op = bug.NewCreateOp(
 			randomPerson(5),
 			time.Now().Unix(),
 			fake.Sentence(),
@@ -122,7 +121,7 @@ func GenerateRandomOperationPacksWithSeed(packNumber int, opNumber int, seed int
 		opp.Append(op)
 
 		for j := 0; j < opNumber-1; j++ {
-			op = operations.NewAddCommentOp(
+			op = bug.NewAddCommentOp(
 				randomPerson(5),
 				time.Now().Unix(),
 				paragraphs(),
@@ -164,19 +163,19 @@ func paragraphs() string {
 }
 
 func comment(b bug.Interface, p bug.Person) {
-	_ = operations.Comment(b, p, time.Now().Unix(), paragraphs())
+	_ = bug.AddComment(b, p, time.Now().Unix(), paragraphs())
 }
 
 func title(b bug.Interface, p bug.Person) {
-	_ = operations.SetTitle(b, p, time.Now().Unix(), fake.Sentence())
+	_ = bug.SetTitle(b, p, time.Now().Unix(), fake.Sentence())
 }
 
 func open(b bug.Interface, p bug.Person) {
-	_ = operations.Open(b, p, time.Now().Unix())
+	_ = bug.Open(b, p, time.Now().Unix())
 }
 
 func close(b bug.Interface, p bug.Person) {
-	_ = operations.Close(b, p, time.Now().Unix())
+	_ = bug.Close(b, p, time.Now().Unix())
 }
 
 var addedLabels []string
@@ -203,5 +202,5 @@ func labels(b bug.Interface, p bug.Person) {
 	// ignore error
 	// if the randomisation produce no changes, no op
 	// is added to the bug
-	_, _ = operations.ChangeLabels(b, p, time.Now().Unix(), added, removed)
+	_, _ = bug.ChangeLabels(b, p, time.Now().Unix(), added, removed)
 }

operations/add_comment.go πŸ”—

@@ -1,75 +0,0 @@
-package operations
-
-import (
-	"fmt"
-
-	"github.com/MichaelMure/git-bug/bug"
-	"github.com/MichaelMure/git-bug/util/git"
-	"github.com/MichaelMure/git-bug/util/text"
-)
-
-// AddCommentOperation will add a new comment in the bug
-
-var _ bug.Operation = AddCommentOperation{}
-
-type AddCommentOperation struct {
-	*bug.OpBase
-	Message string `json:"message"`
-	// TODO: change for a map[string]util.hash to store the filename ?
-	Files []git.Hash `json:"files"`
-}
-
-func (op AddCommentOperation) Apply(snapshot bug.Snapshot) bug.Snapshot {
-	comment := bug.Comment{
-		Message:  op.Message,
-		Author:   op.Author,
-		Files:    op.Files,
-		UnixTime: op.UnixTime,
-	}
-
-	snapshot.Comments = append(snapshot.Comments, comment)
-
-	return snapshot
-}
-
-func (op AddCommentOperation) GetFiles() []git.Hash {
-	return op.Files
-}
-
-func (op AddCommentOperation) Validate() error {
-	if err := bug.OpBaseValidate(op, bug.AddCommentOp); err != nil {
-		return err
-	}
-
-	if text.Empty(op.Message) {
-		return fmt.Errorf("message is empty")
-	}
-
-	if !text.Safe(op.Message) {
-		return fmt.Errorf("message is not fully printable")
-	}
-
-	return nil
-}
-
-func NewAddCommentOp(author bug.Person, unixTime int64, message string, files []git.Hash) AddCommentOperation {
-	return AddCommentOperation{
-		OpBase:  bug.NewOpBase(bug.AddCommentOp, author, unixTime),
-		Message: message,
-		Files:   files,
-	}
-}
-
-// Convenience function to apply the operation
-func Comment(b bug.Interface, author bug.Person, unixTime int64, message string) error {
-	return CommentWithFiles(b, author, unixTime, message, nil)
-}
-
-func CommentWithFiles(b bug.Interface, author bug.Person, unixTime int64, message string, files []git.Hash) error {
-	addCommentOp := NewAddCommentOp(author, unixTime, message, files)
-	if err := addCommentOp.Validate(); err != nil {
-		return err
-	}
-	b.Append(addCommentOp)
-	return nil
-}

operations/create.go πŸ”—

@@ -1,90 +0,0 @@
-package operations
-
-import (
-	"fmt"
-	"strings"
-
-	"github.com/MichaelMure/git-bug/bug"
-	"github.com/MichaelMure/git-bug/util/git"
-	"github.com/MichaelMure/git-bug/util/text"
-)
-
-// CreateOperation define the initial creation of a bug
-
-var _ bug.Operation = CreateOperation{}
-
-type CreateOperation struct {
-	*bug.OpBase
-	Title   string     `json:"title"`
-	Message string     `json:"message"`
-	Files   []git.Hash `json:"files"`
-}
-
-func (op CreateOperation) Apply(snapshot bug.Snapshot) bug.Snapshot {
-	snapshot.Title = op.Title
-	snapshot.Comments = []bug.Comment{
-		{
-			Message:  op.Message,
-			Author:   op.Author,
-			UnixTime: op.UnixTime,
-		},
-	}
-	snapshot.Author = op.Author
-	snapshot.CreatedAt = op.Time()
-	return snapshot
-}
-
-func (op CreateOperation) GetFiles() []git.Hash {
-	return op.Files
-}
-
-func (op CreateOperation) Validate() error {
-	if err := bug.OpBaseValidate(op, bug.CreateOp); err != nil {
-		return err
-	}
-
-	if text.Empty(op.Title) {
-		return fmt.Errorf("title is empty")
-	}
-
-	if strings.Contains(op.Title, "\n") {
-		return fmt.Errorf("title should be a single line")
-	}
-
-	if !text.Safe(op.Title) {
-		return fmt.Errorf("title is not fully printable")
-	}
-
-	if !text.Safe(op.Message) {
-		return fmt.Errorf("message is not fully printable")
-	}
-
-	return nil
-}
-
-func NewCreateOp(author bug.Person, unixTime int64, title, message string, files []git.Hash) CreateOperation {
-	return CreateOperation{
-		OpBase:  bug.NewOpBase(bug.CreateOp, author, unixTime),
-		Title:   title,
-		Message: message,
-		Files:   files,
-	}
-}
-
-// Convenience function to apply the operation
-func Create(author bug.Person, unixTime int64, title, message string) (*bug.Bug, error) {
-	return CreateWithFiles(author, unixTime, title, message, nil)
-}
-
-func CreateWithFiles(author bug.Person, unixTime int64, title, message string, files []git.Hash) (*bug.Bug, error) {
-	newBug := bug.NewBug()
-	createOp := NewCreateOp(author, unixTime, title, message, files)
-
-	if err := createOp.Validate(); err != nil {
-		return nil, err
-	}
-
-	newBug.Append(createOp)
-
-	return newBug, nil
-}

operations/create_test.go πŸ”—

@@ -1,36 +0,0 @@
-package operations
-
-import (
-	"github.com/MichaelMure/git-bug/bug"
-	"reflect"
-	"testing"
-	"time"
-)
-
-func TestCreate(t *testing.T) {
-	snapshot := bug.Snapshot{}
-
-	var rene = bug.Person{
-		Name:  "RenΓ© Descartes",
-		Email: "rene@descartes.fr",
-	}
-
-	unix := time.Now().Unix()
-
-	create := NewCreateOp(rene, unix, "title", "message", nil)
-
-	snapshot = create.Apply(snapshot)
-
-	expected := bug.Snapshot{
-		Title: "title",
-		Comments: []bug.Comment{
-			{Author: rene, Message: "message", UnixTime: create.UnixTime},
-		},
-		Author:    rene,
-		CreatedAt: create.Time(),
-	}
-
-	if !reflect.DeepEqual(snapshot, expected) {
-		t.Fatalf("%v different than %v", snapshot, expected)
-	}
-}

operations/operations.go πŸ”—

@@ -1,17 +0,0 @@
-// Package operations contains the various bug operations. A bug operation is
-// an atomic edit operation of a bug state. These operations are applied
-// sequentially to compile the current state of the bug.
-package operations
-
-import (
-	"github.com/MichaelMure/git-bug/bug"
-)
-
-// Package initialisation used to register operation's type for (de)serialization
-func init() {
-	bug.Register(bug.CreateOp, CreateOperation{})
-	bug.Register(bug.SetTitleOp, SetTitleOperation{})
-	bug.Register(bug.AddCommentOp, AddCommentOperation{})
-	bug.Register(bug.SetStatusOp, SetStatusOperation{})
-	bug.Register(bug.LabelChangeOp, LabelChangeOperation{})
-}

operations/set_status.go πŸ”—

@@ -1,60 +0,0 @@
-package operations
-
-import (
-	"github.com/MichaelMure/git-bug/bug"
-	"github.com/pkg/errors"
-)
-
-// SetStatusOperation will change the status of a bug
-
-var _ bug.Operation = SetStatusOperation{}
-
-type SetStatusOperation struct {
-	*bug.OpBase
-	Status bug.Status `json:"status"`
-}
-
-func (op SetStatusOperation) Apply(snapshot bug.Snapshot) bug.Snapshot {
-	snapshot.Status = op.Status
-
-	return snapshot
-}
-
-func (op SetStatusOperation) Validate() error {
-	if err := bug.OpBaseValidate(op, bug.SetStatusOp); err != nil {
-		return err
-	}
-
-	if err := op.Status.Validate(); err != nil {
-		return errors.Wrap(err, "status")
-	}
-
-	return nil
-}
-
-func NewSetStatusOp(author bug.Person, unixTime int64, status bug.Status) SetStatusOperation {
-	return SetStatusOperation{
-		OpBase: bug.NewOpBase(bug.SetStatusOp, author, unixTime),
-		Status: status,
-	}
-}
-
-// Convenience function to apply the operation
-func Open(b bug.Interface, author bug.Person, unixTime int64) error {
-	op := NewSetStatusOp(author, unixTime, bug.OpenStatus)
-	if err := op.Validate(); err != nil {
-		return err
-	}
-	b.Append(op)
-	return nil
-}
-
-// Convenience function to apply the operation
-func Close(b bug.Interface, author bug.Person, unixTime int64) error {
-	op := NewSetStatusOp(author, unixTime, bug.ClosedStatus)
-	if err := op.Validate(); err != nil {
-		return err
-	}
-	b.Append(op)
-	return nil
-}

termui/show_bug.go πŸ”—

@@ -5,8 +5,8 @@ import (
 	"fmt"
 	"strings"
 
+	"github.com/MichaelMure/git-bug/bug"
 	"github.com/MichaelMure/git-bug/cache"
-	"github.com/MichaelMure/git-bug/operations"
 	"github.com/MichaelMure/git-bug/util/colors"
 	"github.com/MichaelMure/git-bug/util/text"
 	"github.com/jroimartin/gocui"
@@ -209,12 +209,20 @@ func (sb *showBug) renderMain(g *gocui.Gui, mainView *gocui.View) error {
 
 	sb.mainSelectableView = nil
 
-	bugHeader := fmt.Sprintf("[%s] %s\n\n[%s] %s opened this bug on %s",
+	createTimelineItem := snap.Timeline[0].(*bug.CreateTimelineItem)
+
+	edited := ""
+	if createTimelineItem.Edited() {
+		edited = " (edited)"
+	}
+
+	bugHeader := fmt.Sprintf("[%s] %s\n\n[%s] %s opened this bug on %s%s",
 		colors.Cyan(snap.HumanId()),
 		colors.Bold(snap.Title),
 		colors.Yellow(snap.Status),
 		colors.Magenta(snap.Author.Name),
 		snap.CreatedAt.Format(timeLayout),
+		edited,
 	)
 	bugHeader, lines := text.Wrap(bugHeader, maxX)
 
@@ -226,7 +234,7 @@ func (sb *showBug) renderMain(g *gocui.Gui, mainView *gocui.View) error {
 	fmt.Fprint(v, bugHeader)
 	y0 += lines + 1
 
-	for i, op := range snap.Operations {
+	for i, op := range snap.Timeline {
 		viewName := fmt.Sprintf("op%d", i)
 
 		// TODO: me might skip the rendering of blocks that are outside of the view
@@ -234,8 +242,8 @@ func (sb *showBug) renderMain(g *gocui.Gui, mainView *gocui.View) error {
 
 		switch op.(type) {
 
-		case operations.CreateOperation:
-			create := op.(operations.CreateOperation)
+		case *bug.CreateTimelineItem:
+			create := op.(*bug.CreateTimelineItem)
 			content, lines := text.WrapLeftPadded(create.Message, maxX, 4)
 
 			v, err := sb.createOpView(g, viewName, x0, y0, maxX+1, lines, true)
@@ -245,13 +253,19 @@ func (sb *showBug) renderMain(g *gocui.Gui, mainView *gocui.View) error {
 			fmt.Fprint(v, content)
 			y0 += lines + 2
 
-		case operations.AddCommentOperation:
-			comment := op.(operations.AddCommentOperation)
+		case *bug.CommentTimelineItem:
+			comment := op.(*bug.CommentTimelineItem)
+
+			edited := ""
+			if comment.Edited() {
+				edited = " (edited)"
+			}
 
 			message, _ := text.WrapLeftPadded(comment.Message, maxX, 4)
-			content := fmt.Sprintf("%s commented on %s\n\n%s",
+			content := fmt.Sprintf("%s commented on %s%s\n\n%s",
 				colors.Magenta(comment.Author.Name),
-				comment.Time().Format(timeLayout),
+				comment.CreatedAt.Time().Format(timeLayout),
+				edited,
 				message,
 			)
 			content, lines = text.Wrap(content, maxX)
@@ -263,8 +277,8 @@ func (sb *showBug) renderMain(g *gocui.Gui, mainView *gocui.View) error {
 			fmt.Fprint(v, content)
 			y0 += lines + 2
 
-		case operations.SetTitleOperation:
-			setTitle := op.(operations.SetTitleOperation)
+		case *bug.SetTitleOperation:
+			setTitle := op.(*bug.SetTitleOperation)
 
 			content := fmt.Sprintf("%s changed the title to %s on %s",
 				colors.Magenta(setTitle.Author.Name),
@@ -280,8 +294,8 @@ func (sb *showBug) renderMain(g *gocui.Gui, mainView *gocui.View) error {
 			fmt.Fprint(v, content)
 			y0 += lines + 2
 
-		case operations.SetStatusOperation:
-			setStatus := op.(operations.SetStatusOperation)
+		case *bug.SetStatusOperation:
+			setStatus := op.(*bug.SetStatusOperation)
 
 			content := fmt.Sprintf("%s %s the bug on %s",
 				colors.Magenta(setStatus.Author.Name),
@@ -297,8 +311,8 @@ func (sb *showBug) renderMain(g *gocui.Gui, mainView *gocui.View) error {
 			fmt.Fprint(v, content)
 			y0 += lines + 2
 
-		case operations.LabelChangeOperation:
-			labelChange := op.(operations.LabelChangeOperation)
+		case *bug.LabelChangeOperation:
+			labelChange := op.(*bug.LabelChangeOperation)
 
 			var added []string
 			for _, label := range labelChange.Added {

tests/bug_actions_test.go πŸ”—

@@ -1,365 +0,0 @@
-package tests
-
-import (
-	"io/ioutil"
-	"log"
-	"os"
-	"testing"
-
-	"github.com/MichaelMure/git-bug/bug"
-	"github.com/MichaelMure/git-bug/operations"
-	"github.com/MichaelMure/git-bug/repository"
-)
-
-func createRepo(bare bool) *repository.GitRepo {
-	dir, err := ioutil.TempDir("", "")
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	// fmt.Println("Creating repo:", dir)
-
-	var creator func(string) (*repository.GitRepo, error)
-
-	if bare {
-		creator = repository.InitBareGitRepo
-	} else {
-		creator = repository.InitGitRepo
-	}
-
-	repo, err := creator(dir)
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	return repo
-}
-
-func cleanupRepo(repo repository.Repo) error {
-	path := repo.GetPath()
-	// fmt.Println("Cleaning repo:", path)
-	return os.RemoveAll(path)
-}
-
-func setupRepos(t testing.TB) (repoA, repoB, remote *repository.GitRepo) {
-	repoA = createRepo(false)
-	repoB = createRepo(false)
-	remote = createRepo(true)
-
-	remoteAddr := "file://" + remote.GetPath()
-
-	err := repoA.AddRemote("origin", remoteAddr)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	err = repoB.AddRemote("origin", remoteAddr)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	return repoA, repoB, remote
-}
-
-func cleanupRepos(repoA, repoB, remote *repository.GitRepo) {
-	cleanupRepo(repoA)
-	cleanupRepo(repoB)
-	cleanupRepo(remote)
-}
-
-func TestPushPull(t *testing.T) {
-	repoA, repoB, remote := setupRepos(t)
-	defer cleanupRepos(repoA, repoB, remote)
-
-	bug1, err := operations.Create(rene, unix, "bug1", "message")
-	checkErr(t, err)
-	err = bug1.Commit(repoA)
-	checkErr(t, err)
-
-	// A --> remote --> B
-	_, err = bug.Push(repoA, "origin")
-	checkErr(t, err)
-
-	err = bug.Pull(repoB, "origin")
-	checkErr(t, err)
-
-	bugs := allBugs(t, bug.ReadAllLocalBugs(repoB))
-
-	if len(bugs) != 1 {
-		t.Fatal("Unexpected number of bugs")
-	}
-
-	// B --> remote --> A
-	bug2, err := operations.Create(rene, unix, "bug2", "message")
-	checkErr(t, err)
-	err = bug2.Commit(repoB)
-	checkErr(t, err)
-
-	_, err = bug.Push(repoB, "origin")
-	checkErr(t, err)
-
-	err = bug.Pull(repoA, "origin")
-	checkErr(t, err)
-
-	bugs = allBugs(t, bug.ReadAllLocalBugs(repoA))
-
-	if len(bugs) != 2 {
-		t.Fatal("Unexpected number of bugs")
-	}
-}
-
-func checkErr(t testing.TB, err error) {
-	if err != nil {
-		t.Fatal(err)
-	}
-}
-
-func allBugs(t testing.TB, bugs <-chan bug.StreamedBug) []*bug.Bug {
-	var result []*bug.Bug
-	for streamed := range bugs {
-		if streamed.Err != nil {
-			t.Fatal(streamed.Err)
-		}
-		result = append(result, streamed.Bug)
-	}
-	return result
-}
-
-func TestRebaseTheirs(t *testing.T) {
-	_RebaseTheirs(t)
-}
-
-func BenchmarkRebaseTheirs(b *testing.B) {
-	for n := 0; n < b.N; n++ {
-		_RebaseTheirs(b)
-	}
-}
-
-func _RebaseTheirs(t testing.TB) {
-	repoA, repoB, remote := setupRepos(t)
-	defer cleanupRepos(repoA, repoB, remote)
-
-	bug1, err := operations.Create(rene, unix, "bug1", "message")
-	checkErr(t, err)
-	err = bug1.Commit(repoA)
-	checkErr(t, err)
-
-	// A --> remote
-	_, err = bug.Push(repoA, "origin")
-	checkErr(t, err)
-
-	// remote --> B
-	err = bug.Pull(repoB, "origin")
-	checkErr(t, err)
-
-	bug2, err := bug.ReadLocalBug(repoB, bug1.Id())
-	checkErr(t, err)
-
-	operations.Comment(bug2, rene, unix, "message2")
-	operations.Comment(bug2, rene, unix, "message3")
-	operations.Comment(bug2, rene, unix, "message4")
-	err = bug2.Commit(repoB)
-	checkErr(t, err)
-
-	// B --> remote
-	_, err = bug.Push(repoB, "origin")
-	checkErr(t, err)
-
-	// remote --> A
-	err = bug.Pull(repoA, "origin")
-	checkErr(t, err)
-
-	bugs := allBugs(t, bug.ReadAllLocalBugs(repoB))
-
-	if len(bugs) != 1 {
-		t.Fatal("Unexpected number of bugs")
-	}
-
-	bug3, err := bug.ReadLocalBug(repoA, bug1.Id())
-	checkErr(t, err)
-
-	if nbOps(bug3) != 4 {
-		t.Fatal("Unexpected number of operations")
-	}
-}
-
-func TestRebaseOurs(t *testing.T) {
-	_RebaseOurs(t)
-}
-
-func BenchmarkRebaseOurs(b *testing.B) {
-	for n := 0; n < b.N; n++ {
-		_RebaseOurs(b)
-	}
-}
-
-func _RebaseOurs(t testing.TB) {
-	repoA, repoB, remote := setupRepos(t)
-	defer cleanupRepos(repoA, repoB, remote)
-
-	bug1, err := operations.Create(rene, unix, "bug1", "message")
-	checkErr(t, err)
-	err = bug1.Commit(repoA)
-	checkErr(t, err)
-
-	// A --> remote
-	_, err = bug.Push(repoA, "origin")
-	checkErr(t, err)
-
-	// remote --> B
-	err = bug.Pull(repoB, "origin")
-	checkErr(t, err)
-
-	operations.Comment(bug1, rene, unix, "message2")
-	operations.Comment(bug1, rene, unix, "message3")
-	operations.Comment(bug1, rene, unix, "message4")
-	err = bug1.Commit(repoA)
-	checkErr(t, err)
-
-	operations.Comment(bug1, rene, unix, "message5")
-	operations.Comment(bug1, rene, unix, "message6")
-	operations.Comment(bug1, rene, unix, "message7")
-	err = bug1.Commit(repoA)
-	checkErr(t, err)
-
-	operations.Comment(bug1, rene, unix, "message8")
-	operations.Comment(bug1, rene, unix, "message9")
-	operations.Comment(bug1, rene, unix, "message10")
-	err = bug1.Commit(repoA)
-	checkErr(t, err)
-
-	// remote --> A
-	err = bug.Pull(repoA, "origin")
-	checkErr(t, err)
-
-	bugs := allBugs(t, bug.ReadAllLocalBugs(repoA))
-
-	if len(bugs) != 1 {
-		t.Fatal("Unexpected number of bugs")
-	}
-
-	bug2, err := bug.ReadLocalBug(repoA, bug1.Id())
-	checkErr(t, err)
-
-	if nbOps(bug2) != 10 {
-		t.Fatal("Unexpected number of operations")
-	}
-}
-
-func nbOps(b *bug.Bug) int {
-	it := bug.NewOperationIterator(b)
-	counter := 0
-	for it.Next() {
-		counter++
-	}
-	return counter
-}
-
-func TestRebaseConflict(t *testing.T) {
-	_RebaseConflict(t)
-}
-
-func BenchmarkRebaseConflict(b *testing.B) {
-	for n := 0; n < b.N; n++ {
-		_RebaseConflict(b)
-	}
-}
-
-func _RebaseConflict(t testing.TB) {
-	repoA, repoB, remote := setupRepos(t)
-	defer cleanupRepos(repoA, repoB, remote)
-
-	bug1, err := operations.Create(rene, unix, "bug1", "message")
-	checkErr(t, err)
-	err = bug1.Commit(repoA)
-	checkErr(t, err)
-
-	// A --> remote
-	_, err = bug.Push(repoA, "origin")
-	checkErr(t, err)
-
-	// remote --> B
-	err = bug.Pull(repoB, "origin")
-	checkErr(t, err)
-
-	operations.Comment(bug1, rene, unix, "message2")
-	operations.Comment(bug1, rene, unix, "message3")
-	operations.Comment(bug1, rene, unix, "message4")
-	err = bug1.Commit(repoA)
-	checkErr(t, err)
-
-	operations.Comment(bug1, rene, unix, "message5")
-	operations.Comment(bug1, rene, unix, "message6")
-	operations.Comment(bug1, rene, unix, "message7")
-	err = bug1.Commit(repoA)
-	checkErr(t, err)
-
-	operations.Comment(bug1, rene, unix, "message8")
-	operations.Comment(bug1, rene, unix, "message9")
-	operations.Comment(bug1, rene, unix, "message10")
-	err = bug1.Commit(repoA)
-	checkErr(t, err)
-
-	bug2, err := bug.ReadLocalBug(repoB, bug1.Id())
-	checkErr(t, err)
-
-	operations.Comment(bug2, rene, unix, "message11")
-	operations.Comment(bug2, rene, unix, "message12")
-	operations.Comment(bug2, rene, unix, "message13")
-	err = bug2.Commit(repoB)
-	checkErr(t, err)
-
-	operations.Comment(bug2, rene, unix, "message14")
-	operations.Comment(bug2, rene, unix, "message15")
-	operations.Comment(bug2, rene, unix, "message16")
-	err = bug2.Commit(repoB)
-	checkErr(t, err)
-
-	operations.Comment(bug2, rene, unix, "message17")
-	operations.Comment(bug2, rene, unix, "message18")
-	operations.Comment(bug2, rene, unix, "message19")
-	err = bug2.Commit(repoB)
-	checkErr(t, err)
-
-	// A --> remote
-	_, err = bug.Push(repoA, "origin")
-	checkErr(t, err)
-
-	// remote --> B
-	err = bug.Pull(repoB, "origin")
-	checkErr(t, err)
-
-	bugs := allBugs(t, bug.ReadAllLocalBugs(repoB))
-
-	if len(bugs) != 1 {
-		t.Fatal("Unexpected number of bugs")
-	}
-
-	bug3, err := bug.ReadLocalBug(repoB, bug1.Id())
-	checkErr(t, err)
-
-	if nbOps(bug3) != 19 {
-		t.Fatal("Unexpected number of operations")
-	}
-
-	// B --> remote
-	_, err = bug.Push(repoB, "origin")
-	checkErr(t, err)
-
-	// remote --> A
-	err = bug.Pull(repoA, "origin")
-	checkErr(t, err)
-
-	bugs = allBugs(t, bug.ReadAllLocalBugs(repoA))
-
-	if len(bugs) != 1 {
-		t.Fatal("Unexpected number of bugs")
-	}
-
-	bug4, err := bug.ReadLocalBug(repoA, bug1.Id())
-	checkErr(t, err)
-
-	if nbOps(bug4) != 19 {
-		t.Fatal("Unexpected number of operations")
-	}
-}

tests/bug_test.go πŸ”—

@@ -1,81 +0,0 @@
-package tests
-
-import (
-	"github.com/MichaelMure/git-bug/bug"
-	"github.com/MichaelMure/git-bug/repository"
-
-	"testing"
-)
-
-func TestBugId(t *testing.T) {
-	mockRepo := repository.NewMockRepoForTest()
-
-	bug1 := bug.NewBug()
-
-	bug1.Append(createOp)
-
-	err := bug1.Commit(mockRepo)
-
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	bug1.Id()
-}
-
-func TestBugValidity(t *testing.T) {
-	mockRepo := repository.NewMockRepoForTest()
-
-	bug1 := bug.NewBug()
-
-	if bug1.Validate() == nil {
-		t.Fatal("Empty bug should be invalid")
-	}
-
-	bug1.Append(createOp)
-
-	if bug1.Validate() != nil {
-		t.Fatal("Bug with just a CreateOp should be valid")
-	}
-
-	err := bug1.Commit(mockRepo)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	bug1.Append(createOp)
-
-	if bug1.Validate() == nil {
-		t.Fatal("Bug with multiple CreateOp should be invalid")
-	}
-
-	err = bug1.Commit(mockRepo)
-	if err == nil {
-		t.Fatal("Invalid bug should not commit")
-	}
-}
-
-//func TestBugSerialisation(t *testing.T) {
-//	bug1, err := bug.NewBug()
-//	if err != nil {
-//		t.Error(err)
-//	}
-//
-//	bug1.Append(createOp)
-//	bug1.Append(setTitleOp)
-//	bug1.Append(setTitleOp)
-//	bug1.Append(addCommentOp)
-//
-//	repo := repository.NewMockRepoForTest()
-//
-//	bug1.Commit(repo)
-//
-//	bug2, err := bug.ReadBug(repo, bug.BugsRefPattern+bug1.Id())
-//	if err != nil {
-//		t.Error(err)
-//	}
-//
-//	if !reflect.DeepEqual(bug1, bug2) {
-//		t.Fatalf("%v different than %v", bug1, bug2)
-//	}
-//}

tests/graphql_test.go πŸ”—

@@ -111,7 +111,7 @@ func TestQueries(t *testing.T) {
 				Nodes    []struct {
 					Author    Person
 					CreatedAt string `json:"createdAt"`
-					HumandId  string `json:"humanId"`
+					HumanId   string `json:"humanId"`
 					Id        string
 					LastEdit  string `json:"lastEdit"`
 					Status    string

tests/read_bugs_test.go πŸ”—

@@ -1,6 +1,8 @@
 package tests
 
 import (
+	"io/ioutil"
+	"log"
 	"testing"
 
 	"github.com/MichaelMure/git-bug/bug"
@@ -8,6 +10,30 @@ import (
 	"github.com/MichaelMure/git-bug/repository"
 )
 
+func createRepo(bare bool) *repository.GitRepo {
+	dir, err := ioutil.TempDir("", "")
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	// fmt.Println("Creating repo:", dir)
+
+	var creator func(string) (*repository.GitRepo, error)
+
+	if bare {
+		creator = repository.InitBareGitRepo
+	} else {
+		creator = repository.InitGitRepo
+	}
+
+	repo, err := creator(dir)
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	return repo
+}
+
 func createFilledRepo(bugNumber int) repository.ClockedRepo {
 	repo := createRepo(false)
 

vendor/github.com/davecgh/go-spew/LICENSE πŸ”—

@@ -0,0 +1,15 @@
+ISC License
+
+Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

vendor/github.com/davecgh/go-spew/spew/bypass.go πŸ”—

@@ -0,0 +1,145 @@
+// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is not running on Google App Engine, compiled by GopherJS, and
+// "-tags safe" is not added to the go build command line.  The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// Go versions prior to 1.4 are disabled because they use a different layout
+// for interfaces which make the implementation of unsafeReflectValue more complex.
+// +build !js,!appengine,!safe,!disableunsafe,go1.4
+
+package spew
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+const (
+	// UnsafeDisabled is a build-time constant which specifies whether or
+	// not access to the unsafe package is available.
+	UnsafeDisabled = false
+
+	// ptrSize is the size of a pointer on the current arch.
+	ptrSize = unsafe.Sizeof((*byte)(nil))
+)
+
+type flag uintptr
+
+var (
+	// flagRO indicates whether the value field of a reflect.Value
+	// is read-only.
+	flagRO flag
+
+	// flagAddr indicates whether the address of the reflect.Value's
+	// value may be taken.
+	flagAddr flag
+)
+
+// flagKindMask holds the bits that make up the kind
+// part of the flags field. In all the supported versions,
+// it is in the lower 5 bits.
+const flagKindMask = flag(0x1f)
+
+// Different versions of Go have used different
+// bit layouts for the flags type. This table
+// records the known combinations.
+var okFlags = []struct {
+	ro, addr flag
+}{{
+	// From Go 1.4 to 1.5
+	ro:   1 << 5,
+	addr: 1 << 7,
+}, {
+	// Up to Go tip.
+	ro:   1<<5 | 1<<6,
+	addr: 1 << 8,
+}}
+
+var flagValOffset = func() uintptr {
+	field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
+	if !ok {
+		panic("reflect.Value has no flag field")
+	}
+	return field.Offset
+}()
+
+// flagField returns a pointer to the flag field of a reflect.Value.
+func flagField(v *reflect.Value) *flag {
+	return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))
+}
+
+// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
+// the typical safety restrictions preventing access to unaddressable and
+// unexported data.  It works by digging the raw pointer to the underlying
+// value out of the protected value and generating a new unprotected (unsafe)
+// reflect.Value to it.
+//
+// This allows us to check for implementations of the Stringer and error
+// interfaces to be used for pretty printing ordinarily unaddressable and
+// inaccessible values such as unexported struct fields.
+func unsafeReflectValue(v reflect.Value) reflect.Value {
+	if !v.IsValid() || (v.CanInterface() && v.CanAddr()) {
+		return v
+	}
+	flagFieldPtr := flagField(&v)
+	*flagFieldPtr &^= flagRO
+	*flagFieldPtr |= flagAddr
+	return v
+}
+
+// Sanity checks against future reflect package changes
+// to the type or semantics of the Value.flag field.
+func init() {
+	field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
+	if !ok {
+		panic("reflect.Value has no flag field")
+	}
+	if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
+		panic("reflect.Value flag field has changed kind")
+	}
+	type t0 int
+	var t struct {
+		A t0
+		// t0 will have flagEmbedRO set.
+		t0
+		// a will have flagStickyRO set
+		a t0
+	}
+	vA := reflect.ValueOf(t).FieldByName("A")
+	va := reflect.ValueOf(t).FieldByName("a")
+	vt0 := reflect.ValueOf(t).FieldByName("t0")
+
+	// Infer flagRO from the difference between the flags
+	// for the (otherwise identical) fields in t.
+	flagPublic := *flagField(&vA)
+	flagWithRO := *flagField(&va) | *flagField(&vt0)
+	flagRO = flagPublic ^ flagWithRO
+
+	// Infer flagAddr from the difference between a value
+	// taken from a pointer and not.
+	vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
+	flagNoPtr := *flagField(&vA)
+	flagPtr := *flagField(&vPtrA)
+	flagAddr = flagNoPtr ^ flagPtr
+
+	// Check that the inferred flags tally with one of the known versions.
+	for _, f := range okFlags {
+		if flagRO == f.ro && flagAddr == f.addr {
+			return
+		}
+	}
+	panic("reflect.Value read-only flag has changed semantics")
+}

vendor/github.com/davecgh/go-spew/spew/bypasssafe.go πŸ”—

@@ -0,0 +1,38 @@
+// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is running on Google App Engine, compiled by GopherJS, or
+// "-tags safe" is added to the go build command line.  The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// +build js appengine safe disableunsafe !go1.4
+
+package spew
+
+import "reflect"
+
+const (
+	// UnsafeDisabled is a build-time constant which specifies whether or
+	// not access to the unsafe package is available.
+	UnsafeDisabled = true
+)
+
+// unsafeReflectValue typically converts the passed reflect.Value into a one
+// that bypasses the typical safety restrictions preventing access to
+// unaddressable and unexported data.  However, doing this relies on access to
+// the unsafe package.  This is a stub version which simply returns the passed
+// reflect.Value when the unsafe package is not available.
+func unsafeReflectValue(v reflect.Value) reflect.Value {
+	return v
+}

vendor/github.com/davecgh/go-spew/spew/common.go πŸ”—

@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"reflect"
+	"sort"
+	"strconv"
+)
+
+// Some constants in the form of bytes to avoid string overhead.  This mirrors
+// the technique used in the fmt package.
+var (
+	panicBytes            = []byte("(PANIC=")
+	plusBytes             = []byte("+")
+	iBytes                = []byte("i")
+	trueBytes             = []byte("true")
+	falseBytes            = []byte("false")
+	interfaceBytes        = []byte("(interface {})")
+	commaNewlineBytes     = []byte(",\n")
+	newlineBytes          = []byte("\n")
+	openBraceBytes        = []byte("{")
+	openBraceNewlineBytes = []byte("{\n")
+	closeBraceBytes       = []byte("}")
+	asteriskBytes         = []byte("*")
+	colonBytes            = []byte(":")
+	colonSpaceBytes       = []byte(": ")
+	openParenBytes        = []byte("(")
+	closeParenBytes       = []byte(")")
+	spaceBytes            = []byte(" ")
+	pointerChainBytes     = []byte("->")
+	nilAngleBytes         = []byte("<nil>")
+	maxNewlineBytes       = []byte("<max depth reached>\n")
+	maxShortBytes         = []byte("<max>")
+	circularBytes         = []byte("<already shown>")
+	circularShortBytes    = []byte("<shown>")
+	invalidAngleBytes     = []byte("<invalid>")
+	openBracketBytes      = []byte("[")
+	closeBracketBytes     = []byte("]")
+	percentBytes          = []byte("%")
+	precisionBytes        = []byte(".")
+	openAngleBytes        = []byte("<")
+	closeAngleBytes       = []byte(">")
+	openMapBytes          = []byte("map[")
+	closeMapBytes         = []byte("]")
+	lenEqualsBytes        = []byte("len=")
+	capEqualsBytes        = []byte("cap=")
+)
+
+// hexDigits is used to map a decimal value to a hex digit.
+var hexDigits = "0123456789abcdef"
+
+// catchPanic handles any panics that might occur during the handleMethods
+// calls.
+func catchPanic(w io.Writer, v reflect.Value) {
+	if err := recover(); err != nil {
+		w.Write(panicBytes)
+		fmt.Fprintf(w, "%v", err)
+		w.Write(closeParenBytes)
+	}
+}
+
+// handleMethods attempts to call the Error and String methods on the underlying
+// type the passed reflect.Value represents and outputes the result to Writer w.
+//
+// It handles panics in any called methods by catching and displaying the error
+// as the formatted value.
+func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
+	// We need an interface to check if the type implements the error or
+	// Stringer interface.  However, the reflect package won't give us an
+	// interface on certain things like unexported struct fields in order
+	// to enforce visibility rules.  We use unsafe, when it's available,
+	// to bypass these restrictions since this package does not mutate the
+	// values.
+	if !v.CanInterface() {
+		if UnsafeDisabled {
+			return false
+		}
+
+		v = unsafeReflectValue(v)
+	}
+
+	// Choose whether or not to do error and Stringer interface lookups against
+	// the base type or a pointer to the base type depending on settings.
+	// Technically calling one of these methods with a pointer receiver can
+	// mutate the value, however, types which choose to satisify an error or
+	// Stringer interface with a pointer receiver should not be mutating their
+	// state inside these interface methods.
+	if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
+		v = unsafeReflectValue(v)
+	}
+	if v.CanAddr() {
+		v = v.Addr()
+	}
+
+	// Is it an error or Stringer?
+	switch iface := v.Interface().(type) {
+	case error:
+		defer catchPanic(w, v)
+		if cs.ContinueOnMethod {
+			w.Write(openParenBytes)
+			w.Write([]byte(iface.Error()))
+			w.Write(closeParenBytes)
+			w.Write(spaceBytes)
+			return false
+		}
+
+		w.Write([]byte(iface.Error()))
+		return true
+
+	case fmt.Stringer:
+		defer catchPanic(w, v)
+		if cs.ContinueOnMethod {
+			w.Write(openParenBytes)
+			w.Write([]byte(iface.String()))
+			w.Write(closeParenBytes)
+			w.Write(spaceBytes)
+			return false
+		}
+		w.Write([]byte(iface.String()))
+		return true
+	}
+	return false
+}
+
+// printBool outputs a boolean value as true or false to Writer w.
+func printBool(w io.Writer, val bool) {
+	if val {
+		w.Write(trueBytes)
+	} else {
+		w.Write(falseBytes)
+	}
+}
+
+// printInt outputs a signed integer value to Writer w.
+func printInt(w io.Writer, val int64, base int) {
+	w.Write([]byte(strconv.FormatInt(val, base)))
+}
+
+// printUint outputs an unsigned integer value to Writer w.
+func printUint(w io.Writer, val uint64, base int) {
+	w.Write([]byte(strconv.FormatUint(val, base)))
+}
+
+// printFloat outputs a floating point value using the specified precision,
+// which is expected to be 32 or 64bit, to Writer w.
+func printFloat(w io.Writer, val float64, precision int) {
+	w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
+}
+
+// printComplex outputs a complex value using the specified float precision
+// for the real and imaginary parts to Writer w.
+func printComplex(w io.Writer, c complex128, floatPrecision int) {
+	r := real(c)
+	w.Write(openParenBytes)
+	w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
+	i := imag(c)
+	if i >= 0 {
+		w.Write(plusBytes)
+	}
+	w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
+	w.Write(iBytes)
+	w.Write(closeParenBytes)
+}
+
+// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
+// prefix to Writer w.
+func printHexPtr(w io.Writer, p uintptr) {
+	// Null pointer.
+	num := uint64(p)
+	if num == 0 {
+		w.Write(nilAngleBytes)
+		return
+	}
+
+	// Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
+	buf := make([]byte, 18)
+
+	// It's simpler to construct the hex string right to left.
+	base := uint64(16)
+	i := len(buf) - 1
+	for num >= base {
+		buf[i] = hexDigits[num%base]
+		num /= base
+		i--
+	}
+	buf[i] = hexDigits[num]
+
+	// Add '0x' prefix.
+	i--
+	buf[i] = 'x'
+	i--
+	buf[i] = '0'
+
+	// Strip unused leading bytes.
+	buf = buf[i:]
+	w.Write(buf)
+}
+
+// valuesSorter implements sort.Interface to allow a slice of reflect.Value
+// elements to be sorted.
+type valuesSorter struct {
+	values  []reflect.Value
+	strings []string // either nil or same len and values
+	cs      *ConfigState
+}
+
+// newValuesSorter initializes a valuesSorter instance, which holds a set of
+// surrogate keys on which the data should be sorted.  It uses flags in
+// ConfigState to decide if and how to populate those surrogate keys.
+func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
+	vs := &valuesSorter{values: values, cs: cs}
+	if canSortSimply(vs.values[0].Kind()) {
+		return vs
+	}
+	if !cs.DisableMethods {
+		vs.strings = make([]string, len(values))
+		for i := range vs.values {
+			b := bytes.Buffer{}
+			if !handleMethods(cs, &b, vs.values[i]) {
+				vs.strings = nil
+				break
+			}
+			vs.strings[i] = b.String()
+		}
+	}
+	if vs.strings == nil && cs.SpewKeys {
+		vs.strings = make([]string, len(values))
+		for i := range vs.values {
+			vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
+		}
+	}
+	return vs
+}
+
+// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
+// directly, or whether it should be considered for sorting by surrogate keys
+// (if the ConfigState allows it).
+func canSortSimply(kind reflect.Kind) bool {
+	// This switch parallels valueSortLess, except for the default case.
+	switch kind {
+	case reflect.Bool:
+		return true
+	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+		return true
+	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+		return true
+	case reflect.Float32, reflect.Float64:
+		return true
+	case reflect.String:
+		return true
+	case reflect.Uintptr:
+		return true
+	case reflect.Array:
+		return true
+	}
+	return false
+}
+
+// Len returns the number of values in the slice.  It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Len() int {
+	return len(s.values)
+}
+
+// Swap swaps the values at the passed indices.  It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Swap(i, j int) {
+	s.values[i], s.values[j] = s.values[j], s.values[i]
+	if s.strings != nil {
+		s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
+	}
+}
+
+// valueSortLess returns whether the first value should sort before the second
+// value.  It is used by valueSorter.Less as part of the sort.Interface
+// implementation.
+func valueSortLess(a, b reflect.Value) bool {
+	switch a.Kind() {
+	case reflect.Bool:
+		return !a.Bool() && b.Bool()
+	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+		return a.Int() < b.Int()
+	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+		return a.Uint() < b.Uint()
+	case reflect.Float32, reflect.Float64:
+		return a.Float() < b.Float()
+	case reflect.String:
+		return a.String() < b.String()
+	case reflect.Uintptr:
+		return a.Uint() < b.Uint()
+	case reflect.Array:
+		// Compare the contents of both arrays.
+		l := a.Len()
+		for i := 0; i < l; i++ {
+			av := a.Index(i)
+			bv := b.Index(i)
+			if av.Interface() == bv.Interface() {
+				continue
+			}
+			return valueSortLess(av, bv)
+		}
+	}
+	return a.String() < b.String()
+}
+
+// Less returns whether the value at index i should sort before the
+// value at index j.  It is part of the sort.Interface implementation.
+func (s *valuesSorter) Less(i, j int) bool {
+	if s.strings == nil {
+		return valueSortLess(s.values[i], s.values[j])
+	}
+	return s.strings[i] < s.strings[j]
+}
+
+// sortValues is a sort function that handles both native types and any type that
+// can be converted to error or Stringer.  Other inputs are sorted according to
+// their Value.String() value to ensure display stability.
+func sortValues(values []reflect.Value, cs *ConfigState) {
+	if len(values) == 0 {
+		return
+	}
+	sort.Sort(newValuesSorter(values, cs))
+}

vendor/github.com/davecgh/go-spew/spew/config.go πŸ”—

@@ -0,0 +1,306 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"os"
+)
+
+// ConfigState houses the configuration options used by spew to format and
+// display values.  There is a global instance, Config, that is used to control
+// all top-level Formatter and Dump functionality.  Each ConfigState instance
+// provides methods equivalent to the top-level functions.
+//
+// The zero value for ConfigState provides no indentation.  You would typically
+// want to set it to a space or a tab.
+//
+// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
+// with default settings.  See the documentation of NewDefaultConfig for default
+// values.
+type ConfigState struct {
+	// Indent specifies the string to use for each indentation level.  The
+	// global config instance that all top-level functions use set this to a
+	// single space by default.  If you would like more indentation, you might
+	// set this to a tab with "\t" or perhaps two spaces with "  ".
+	Indent string
+
+	// MaxDepth controls the maximum number of levels to descend into nested
+	// data structures.  The default, 0, means there is no limit.
+	//
+	// NOTE: Circular data structures are properly detected, so it is not
+	// necessary to set this value unless you specifically want to limit deeply
+	// nested data structures.
+	MaxDepth int
+
+	// DisableMethods specifies whether or not error and Stringer interfaces are
+	// invoked for types that implement them.
+	DisableMethods bool
+
+	// DisablePointerMethods specifies whether or not to check for and invoke
+	// error and Stringer interfaces on types which only accept a pointer
+	// receiver when the current type is not a pointer.
+	//
+	// NOTE: This might be an unsafe action since calling one of these methods
+	// with a pointer receiver could technically mutate the value, however,
+	// in practice, types which choose to satisify an error or Stringer
+	// interface with a pointer receiver should not be mutating their state
+	// inside these interface methods.  As a result, this option relies on
+	// access to the unsafe package, so it will not have any effect when
+	// running in environments without access to the unsafe package such as
+	// Google App Engine or with the "safe" build tag specified.
+	DisablePointerMethods bool
+
+	// DisablePointerAddresses specifies whether to disable the printing of
+	// pointer addresses. This is useful when diffing data structures in tests.
+	DisablePointerAddresses bool
+
+	// DisableCapacities specifies whether to disable the printing of capacities
+	// for arrays, slices, maps and channels. This is useful when diffing
+	// data structures in tests.
+	DisableCapacities bool
+
+	// ContinueOnMethod specifies whether or not recursion should continue once
+	// a custom error or Stringer interface is invoked.  The default, false,
+	// means it will print the results of invoking the custom error or Stringer
+	// interface and return immediately instead of continuing to recurse into
+	// the internals of the data type.
+	//
+	// NOTE: This flag does not have any effect if method invocation is disabled
+	// via the DisableMethods or DisablePointerMethods options.
+	ContinueOnMethod bool
+
+	// SortKeys specifies map keys should be sorted before being printed. Use
+	// this to have a more deterministic, diffable output.  Note that only
+	// native types (bool, int, uint, floats, uintptr and string) and types
+	// that support the error or Stringer interfaces (if methods are
+	// enabled) are supported, with other types sorted according to the
+	// reflect.Value.String() output which guarantees display stability.
+	SortKeys bool
+
+	// SpewKeys specifies that, as a last resort attempt, map keys should
+	// be spewed to strings and sorted by those strings.  This is only
+	// considered if SortKeys is true.
+	SpewKeys bool
+}
+
+// Config is the active configuration of the top-level functions.
+// The configuration can be changed by modifying the contents of spew.Config.
+var Config = ConfigState{Indent: " "}
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the formatted string as a value that satisfies error.  See NewFormatter
+// for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
+	return fmt.Errorf(format, c.convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+	return fmt.Fprint(w, c.convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+	return fmt.Fprintf(w, format, c.convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a Formatter interface returned by c.NewFormatter.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+	return fmt.Fprintln(w, c.convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
+	return fmt.Print(c.convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
+	return fmt.Printf(format, c.convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
+	return fmt.Println(c.convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprint(a ...interface{}) string {
+	return fmt.Sprint(c.convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
+	return fmt.Sprintf(format, c.convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a Formatter interface returned by c.NewFormatter.  It
+// returns the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintln(a ...interface{}) string {
+	return fmt.Sprintln(c.convertArgs(a)...)
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface.  As a result, it integrates cleanly with standard fmt package
+printing functions.  The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
+combinations.  Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting.  In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly.  It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+c.Printf, c.Println, or c.Printf.
+*/
+func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
+	return newFormatter(c, v)
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w.  It formats
+// exactly the same as Dump.
+func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
+	fdump(c, w, a...)
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value.  It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+	* Pointers are dereferenced and followed
+	* Circular data structures are detected and handled properly
+	* Custom Stringer/error interfaces are optionally invoked, including
+	  on unexported types
+	* Custom types which only implement the Stringer/error interfaces via
+	  a pointer receiver are optionally invoked when passing non-pointer
+	  variables
+	* Byte arrays and slices are dumped like the hexdump -C command which
+	  includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by modifying the public members
+of c.  See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func (c *ConfigState) Dump(a ...interface{}) {
+	fdump(c, os.Stdout, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func (c *ConfigState) Sdump(a ...interface{}) string {
+	var buf bytes.Buffer
+	fdump(c, &buf, a...)
+	return buf.String()
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a spew Formatter interface using
+// the ConfigState associated with s.
+func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
+	formatters = make([]interface{}, len(args))
+	for index, arg := range args {
+		formatters[index] = newFormatter(c, arg)
+	}
+	return formatters
+}
+
+// NewDefaultConfig returns a ConfigState with the following default settings.
+//
+// 	Indent: " "
+// 	MaxDepth: 0
+// 	DisableMethods: false
+// 	DisablePointerMethods: false
+// 	ContinueOnMethod: false
+// 	SortKeys: false
+func NewDefaultConfig() *ConfigState {
+	return &ConfigState{Indent: " "}
+}

vendor/github.com/davecgh/go-spew/spew/doc.go πŸ”—

@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+Package spew implements a deep pretty printer for Go data structures to aid in
+debugging.
+
+A quick overview of the additional features spew provides over the built-in
+printing facilities for Go data types are as follows:
+
+	* Pointers are dereferenced and followed
+	* Circular data structures are detected and handled properly
+	* Custom Stringer/error interfaces are optionally invoked, including
+	  on unexported types
+	* Custom types which only implement the Stringer/error interfaces via
+	  a pointer receiver are optionally invoked when passing non-pointer
+	  variables
+	* Byte arrays and slices are dumped like the hexdump -C command which
+	  includes offsets, byte values in hex, and ASCII output (only when using
+	  Dump style)
+
+There are two different approaches spew allows for dumping Go data structures:
+
+	* Dump style which prints with newlines, customizable indentation,
+	  and additional debug information such as types and all pointer addresses
+	  used to indirect to the final value
+	* A custom Formatter interface that integrates cleanly with the standard fmt
+	  package and replaces %v, %+v, %#v, and %#+v to provide inline printing
+	  similar to the default %v while providing the additional functionality
+	  outlined above and passing unsupported format verbs such as %x and %q
+	  along to fmt
+
+Quick Start
+
+This section demonstrates how to quickly get started with spew.  See the
+sections below for further details on formatting and configuration options.
+
+To dump a variable with full newlines, indentation, type, and pointer
+information use Dump, Fdump, or Sdump:
+	spew.Dump(myVar1, myVar2, ...)
+	spew.Fdump(someWriter, myVar1, myVar2, ...)
+	str := spew.Sdump(myVar1, myVar2, ...)
+
+Alternatively, if you would prefer to use format strings with a compacted inline
+printing style, use the convenience wrappers Printf, Fprintf, etc with
+%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
+%#+v (adds types and pointer addresses):
+	spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+	spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+	spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+	spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+Configuration Options
+
+Configuration of spew is handled by fields in the ConfigState type.  For
+convenience, all of the top-level functions use a global state available
+via the spew.Config global.
+
+It is also possible to create a ConfigState instance that provides methods
+equivalent to the top-level functions.  This allows concurrent configuration
+options.  See the ConfigState documentation for more details.
+
+The following configuration options are available:
+	* Indent
+		String to use for each indentation level for Dump functions.
+		It is a single space by default.  A popular alternative is "\t".
+
+	* MaxDepth
+		Maximum number of levels to descend into nested data structures.
+		There is no limit by default.
+
+	* DisableMethods
+		Disables invocation of error and Stringer interface methods.
+		Method invocation is enabled by default.
+
+	* DisablePointerMethods
+		Disables invocation of error and Stringer interface methods on types
+		which only accept pointer receivers from non-pointer variables.
+		Pointer method invocation is enabled by default.
+
+	* DisablePointerAddresses
+		DisablePointerAddresses specifies whether to disable the printing of
+		pointer addresses. This is useful when diffing data structures in tests.
+
+	* DisableCapacities
+		DisableCapacities specifies whether to disable the printing of
+		capacities for arrays, slices, maps and channels. This is useful when
+		diffing data structures in tests.
+
+	* ContinueOnMethod
+		Enables recursion into types after invoking error and Stringer interface
+		methods. Recursion after method invocation is disabled by default.
+
+	* SortKeys
+		Specifies map keys should be sorted before being printed. Use
+		this to have a more deterministic, diffable output.  Note that
+		only native types (bool, int, uint, floats, uintptr and string)
+		and types which implement error or Stringer interfaces are
+		supported with other types sorted according to the
+		reflect.Value.String() output which guarantees display
+		stability.  Natural map order is used by default.
+
+	* SpewKeys
+		Specifies that, as a last resort attempt, map keys should be
+		spewed to strings and sorted by those strings.  This is only
+		considered if SortKeys is true.
+
+Dump Usage
+
+Simply call spew.Dump with a list of variables you want to dump:
+
+	spew.Dump(myVar1, myVar2, ...)
+
+You may also call spew.Fdump if you would prefer to output to an arbitrary
+io.Writer.  For example, to dump to standard error:
+
+	spew.Fdump(os.Stderr, myVar1, myVar2, ...)
+
+A third option is to call spew.Sdump to get the formatted output as a string:
+
+	str := spew.Sdump(myVar1, myVar2, ...)
+
+Sample Dump Output
+
+See the Dump example for details on the setup of the types and variables being
+shown here.
+
+	(main.Foo) {
+	 unexportedField: (*main.Bar)(0xf84002e210)({
+	  flag: (main.Flag) flagTwo,
+	  data: (uintptr) <nil>
+	 }),
+	 ExportedField: (map[interface {}]interface {}) (len=1) {
+	  (string) (len=3) "one": (bool) true
+	 }
+	}
+
+Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
+command as shown.
+	([]uint8) (len=32 cap=32) {
+	 00000000  11 12 13 14 15 16 17 18  19 1a 1b 1c 1d 1e 1f 20  |............... |
+	 00000010  21 22 23 24 25 26 27 28  29 2a 2b 2c 2d 2e 2f 30  |!"#$%&'()*+,-./0|
+	 00000020  31 32                                             |12|
+	}
+
+Custom Formatter
+
+Spew provides a custom formatter that implements the fmt.Formatter interface
+so that it integrates cleanly with standard fmt package printing functions. The
+formatter is useful for inline printing of smaller data types similar to the
+standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations.  Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting.  In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Custom Formatter Usage
+
+The simplest way to make use of the spew custom formatter is to call one of the
+convenience functions such as spew.Printf, spew.Println, or spew.Printf.  The
+functions have syntax you are most likely already familiar with:
+
+	spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+	spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+	spew.Println(myVar, myVar2)
+	spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+	spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+See the Index for the full list convenience functions.
+
+Sample Formatter Output
+
+Double pointer to a uint8:
+	  %v: <**>5
+	 %+v: <**>(0xf8400420d0->0xf8400420c8)5
+	 %#v: (**uint8)5
+	%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
+
+Pointer to circular struct with a uint8 field and a pointer to itself:
+	  %v: <*>{1 <*><shown>}
+	 %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
+	 %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
+	%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
+
+See the Printf example for details on the setup of variables being shown
+here.
+
+Errors
+
+Since it is possible for custom Stringer/error interfaces to panic, spew
+detects them and handles them internally by printing the panic information
+inline with the output.  Since spew is intended to provide deep pretty printing
+capabilities on structures, it intentionally does not return any errors.
+*/
+package spew

vendor/github.com/davecgh/go-spew/spew/dump.go πŸ”—

@@ -0,0 +1,509 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+	"bytes"
+	"encoding/hex"
+	"fmt"
+	"io"
+	"os"
+	"reflect"
+	"regexp"
+	"strconv"
+	"strings"
+)
+
+var (
+	// uint8Type is a reflect.Type representing a uint8.  It is used to
+	// convert cgo types to uint8 slices for hexdumping.
+	uint8Type = reflect.TypeOf(uint8(0))
+
+	// cCharRE is a regular expression that matches a cgo char.
+	// It is used to detect character arrays to hexdump them.
+	cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
+
+	// cUnsignedCharRE is a regular expression that matches a cgo unsigned
+	// char.  It is used to detect unsigned character arrays to hexdump
+	// them.
+	cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
+
+	// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
+	// It is used to detect uint8_t arrays to hexdump them.
+	cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
+)
+
+// dumpState contains information about the state of a dump operation.
+type dumpState struct {
+	w                io.Writer
+	depth            int
+	pointers         map[uintptr]int
+	ignoreNextType   bool
+	ignoreNextIndent bool
+	cs               *ConfigState
+}
+
+// indent performs indentation according to the depth level and cs.Indent
+// option.
+func (d *dumpState) indent() {
+	if d.ignoreNextIndent {
+		d.ignoreNextIndent = false
+		return
+	}
+	d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
+	if v.Kind() == reflect.Interface && !v.IsNil() {
+		v = v.Elem()
+	}
+	return v
+}
+
+// dumpPtr handles formatting of pointers by indirecting them as necessary.
+func (d *dumpState) dumpPtr(v reflect.Value) {
+	// Remove pointers at or below the current depth from map used to detect
+	// circular refs.
+	for k, depth := range d.pointers {
+		if depth >= d.depth {
+			delete(d.pointers, k)
+		}
+	}
+
+	// Keep list of all dereferenced pointers to show later.
+	pointerChain := make([]uintptr, 0)
+
+	// Figure out how many levels of indirection there are by dereferencing
+	// pointers and unpacking interfaces down the chain while detecting circular
+	// references.
+	nilFound := false
+	cycleFound := false
+	indirects := 0
+	ve := v
+	for ve.Kind() == reflect.Ptr {
+		if ve.IsNil() {
+			nilFound = true
+			break
+		}
+		indirects++
+		addr := ve.Pointer()
+		pointerChain = append(pointerChain, addr)
+		if pd, ok := d.pointers[addr]; ok && pd < d.depth {
+			cycleFound = true
+			indirects--
+			break
+		}
+		d.pointers[addr] = d.depth
+
+		ve = ve.Elem()
+		if ve.Kind() == reflect.Interface {
+			if ve.IsNil() {
+				nilFound = true
+				break
+			}
+			ve = ve.Elem()
+		}
+	}
+
+	// Display type information.
+	d.w.Write(openParenBytes)
+	d.w.Write(bytes.Repeat(asteriskBytes, indirects))
+	d.w.Write([]byte(ve.Type().String()))
+	d.w.Write(closeParenBytes)
+
+	// Display pointer information.
+	if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
+		d.w.Write(openParenBytes)
+		for i, addr := range pointerChain {
+			if i > 0 {
+				d.w.Write(pointerChainBytes)
+			}
+			printHexPtr(d.w, addr)
+		}
+		d.w.Write(closeParenBytes)
+	}
+
+	// Display dereferenced value.
+	d.w.Write(openParenBytes)
+	switch {
+	case nilFound:
+		d.w.Write(nilAngleBytes)
+
+	case cycleFound:
+		d.w.Write(circularBytes)
+
+	default:
+		d.ignoreNextType = true
+		d.dump(ve)
+	}
+	d.w.Write(closeParenBytes)
+}
+
+// dumpSlice handles formatting of arrays and slices.  Byte (uint8 under
+// reflection) arrays and slices are dumped in hexdump -C fashion.
+func (d *dumpState) dumpSlice(v reflect.Value) {
+	// Determine whether this type should be hex dumped or not.  Also,
+	// for types which should be hexdumped, try to use the underlying data
+	// first, then fall back to trying to convert them to a uint8 slice.
+	var buf []uint8
+	doConvert := false
+	doHexDump := false
+	numEntries := v.Len()
+	if numEntries > 0 {
+		vt := v.Index(0).Type()
+		vts := vt.String()
+		switch {
+		// C types that need to be converted.
+		case cCharRE.MatchString(vts):
+			fallthrough
+		case cUnsignedCharRE.MatchString(vts):
+			fallthrough
+		case cUint8tCharRE.MatchString(vts):
+			doConvert = true
+
+		// Try to use existing uint8 slices and fall back to converting
+		// and copying if that fails.
+		case vt.Kind() == reflect.Uint8:
+			// We need an addressable interface to convert the type
+			// to a byte slice.  However, the reflect package won't
+			// give us an interface on certain things like
+			// unexported struct fields in order to enforce
+			// visibility rules.  We use unsafe, when available, to
+			// bypass these restrictions since this package does not
+			// mutate the values.
+			vs := v
+			if !vs.CanInterface() || !vs.CanAddr() {
+				vs = unsafeReflectValue(vs)
+			}
+			if !UnsafeDisabled {
+				vs = vs.Slice(0, numEntries)
+
+				// Use the existing uint8 slice if it can be
+				// type asserted.
+				iface := vs.Interface()
+				if slice, ok := iface.([]uint8); ok {
+					buf = slice
+					doHexDump = true
+					break
+				}
+			}
+
+			// The underlying data needs to be converted if it can't
+			// be type asserted to a uint8 slice.
+			doConvert = true
+		}
+
+		// Copy and convert the underlying type if needed.
+		if doConvert && vt.ConvertibleTo(uint8Type) {
+			// Convert and copy each element into a uint8 byte
+			// slice.
+			buf = make([]uint8, numEntries)
+			for i := 0; i < numEntries; i++ {
+				vv := v.Index(i)
+				buf[i] = uint8(vv.Convert(uint8Type).Uint())
+			}
+			doHexDump = true
+		}
+	}
+
+	// Hexdump the entire slice as needed.
+	if doHexDump {
+		indent := strings.Repeat(d.cs.Indent, d.depth)
+		str := indent + hex.Dump(buf)
+		str = strings.Replace(str, "\n", "\n"+indent, -1)
+		str = strings.TrimRight(str, d.cs.Indent)
+		d.w.Write([]byte(str))
+		return
+	}
+
+	// Recursively call dump for each item.
+	for i := 0; i < numEntries; i++ {
+		d.dump(d.unpackValue(v.Index(i)))
+		if i < (numEntries - 1) {
+			d.w.Write(commaNewlineBytes)
+		} else {
+			d.w.Write(newlineBytes)
+		}
+	}
+}
+
+// dump is the main workhorse for dumping a value.  It uses the passed reflect
+// value to figure out what kind of object we are dealing with and formats it
+// appropriately.  It is a recursive function, however circular data structures
+// are detected and handled properly.
+func (d *dumpState) dump(v reflect.Value) {
+	// Handle invalid reflect values immediately.
+	kind := v.Kind()
+	if kind == reflect.Invalid {
+		d.w.Write(invalidAngleBytes)
+		return
+	}
+
+	// Handle pointers specially.
+	if kind == reflect.Ptr {
+		d.indent()
+		d.dumpPtr(v)
+		return
+	}
+
+	// Print type information unless already handled elsewhere.
+	if !d.ignoreNextType {
+		d.indent()
+		d.w.Write(openParenBytes)
+		d.w.Write([]byte(v.Type().String()))
+		d.w.Write(closeParenBytes)
+		d.w.Write(spaceBytes)
+	}
+	d.ignoreNextType = false
+
+	// Display length and capacity if the built-in len and cap functions
+	// work with the value's kind and the len/cap itself is non-zero.
+	valueLen, valueCap := 0, 0
+	switch v.Kind() {
+	case reflect.Array, reflect.Slice, reflect.Chan:
+		valueLen, valueCap = v.Len(), v.Cap()
+	case reflect.Map, reflect.String:
+		valueLen = v.Len()
+	}
+	if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
+		d.w.Write(openParenBytes)
+		if valueLen != 0 {
+			d.w.Write(lenEqualsBytes)
+			printInt(d.w, int64(valueLen), 10)
+		}
+		if !d.cs.DisableCapacities && valueCap != 0 {
+			if valueLen != 0 {
+				d.w.Write(spaceBytes)
+			}
+			d.w.Write(capEqualsBytes)
+			printInt(d.w, int64(valueCap), 10)
+		}
+		d.w.Write(closeParenBytes)
+		d.w.Write(spaceBytes)
+	}
+
+	// Call Stringer/error interfaces if they exist and the handle methods flag
+	// is enabled
+	if !d.cs.DisableMethods {
+		if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+			if handled := handleMethods(d.cs, d.w, v); handled {
+				return
+			}
+		}
+	}
+
+	switch kind {
+	case reflect.Invalid:
+		// Do nothing.  We should never get here since invalid has already
+		// been handled above.
+
+	case reflect.Bool:
+		printBool(d.w, v.Bool())
+
+	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+		printInt(d.w, v.Int(), 10)
+
+	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+		printUint(d.w, v.Uint(), 10)
+
+	case reflect.Float32:
+		printFloat(d.w, v.Float(), 32)
+
+	case reflect.Float64:
+		printFloat(d.w, v.Float(), 64)
+
+	case reflect.Complex64:
+		printComplex(d.w, v.Complex(), 32)
+
+	case reflect.Complex128:
+		printComplex(d.w, v.Complex(), 64)
+
+	case reflect.Slice:
+		if v.IsNil() {
+			d.w.Write(nilAngleBytes)
+			break
+		}
+		fallthrough
+
+	case reflect.Array:
+		d.w.Write(openBraceNewlineBytes)
+		d.depth++
+		if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+			d.indent()
+			d.w.Write(maxNewlineBytes)
+		} else {
+			d.dumpSlice(v)
+		}
+		d.depth--
+		d.indent()
+		d.w.Write(closeBraceBytes)
+
+	case reflect.String:
+		d.w.Write([]byte(strconv.Quote(v.String())))
+
+	case reflect.Interface:
+		// The only time we should get here is for nil interfaces due to
+		// unpackValue calls.
+		if v.IsNil() {
+			d.w.Write(nilAngleBytes)
+		}
+
+	case reflect.Ptr:
+		// Do nothing.  We should never get here since pointers have already
+		// been handled above.
+
+	case reflect.Map:
+		// nil maps should be indicated as different than empty maps
+		if v.IsNil() {
+			d.w.Write(nilAngleBytes)
+			break
+		}
+
+		d.w.Write(openBraceNewlineBytes)
+		d.depth++
+		if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+			d.indent()
+			d.w.Write(maxNewlineBytes)
+		} else {
+			numEntries := v.Len()
+			keys := v.MapKeys()
+			if d.cs.SortKeys {
+				sortValues(keys, d.cs)
+			}
+			for i, key := range keys {
+				d.dump(d.unpackValue(key))
+				d.w.Write(colonSpaceBytes)
+				d.ignoreNextIndent = true
+				d.dump(d.unpackValue(v.MapIndex(key)))
+				if i < (numEntries - 1) {
+					d.w.Write(commaNewlineBytes)
+				} else {
+					d.w.Write(newlineBytes)
+				}
+			}
+		}
+		d.depth--
+		d.indent()
+		d.w.Write(closeBraceBytes)
+
+	case reflect.Struct:
+		d.w.Write(openBraceNewlineBytes)
+		d.depth++
+		if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+			d.indent()
+			d.w.Write(maxNewlineBytes)
+		} else {
+			vt := v.Type()
+			numFields := v.NumField()
+			for i := 0; i < numFields; i++ {
+				d.indent()
+				vtf := vt.Field(i)
+				d.w.Write([]byte(vtf.Name))
+				d.w.Write(colonSpaceBytes)
+				d.ignoreNextIndent = true
+				d.dump(d.unpackValue(v.Field(i)))
+				if i < (numFields - 1) {
+					d.w.Write(commaNewlineBytes)
+				} else {
+					d.w.Write(newlineBytes)
+				}
+			}
+		}
+		d.depth--
+		d.indent()
+		d.w.Write(closeBraceBytes)
+
+	case reflect.Uintptr:
+		printHexPtr(d.w, uintptr(v.Uint()))
+
+	case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+		printHexPtr(d.w, v.Pointer())
+
+	// There were not any other types at the time this code was written, but
+	// fall back to letting the default fmt package handle it in case any new
+	// types are added.
+	default:
+		if v.CanInterface() {
+			fmt.Fprintf(d.w, "%v", v.Interface())
+		} else {
+			fmt.Fprintf(d.w, "%v", v.String())
+		}
+	}
+}
+
+// fdump is a helper function to consolidate the logic from the various public
+// methods which take varying writers and config states.
+func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
+	for _, arg := range a {
+		if arg == nil {
+			w.Write(interfaceBytes)
+			w.Write(spaceBytes)
+			w.Write(nilAngleBytes)
+			w.Write(newlineBytes)
+			continue
+		}
+
+		d := dumpState{w: w, cs: cs}
+		d.pointers = make(map[uintptr]int)
+		d.dump(reflect.ValueOf(arg))
+		d.w.Write(newlineBytes)
+	}
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w.  It formats
+// exactly the same as Dump.
+func Fdump(w io.Writer, a ...interface{}) {
+	fdump(&Config, w, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func Sdump(a ...interface{}) string {
+	var buf bytes.Buffer
+	fdump(&Config, &buf, a...)
+	return buf.String()
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value.  It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+	* Pointers are dereferenced and followed
+	* Circular data structures are detected and handled properly
+	* Custom Stringer/error interfaces are optionally invoked, including
+	  on unexported types
+	* Custom types which only implement the Stringer/error interfaces via
+	  a pointer receiver are optionally invoked when passing non-pointer
+	  variables
+	* Byte arrays and slices are dumped like the hexdump -C command which
+	  includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by an exported package global,
+spew.Config.  See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func Dump(a ...interface{}) {
+	fdump(&Config, os.Stdout, a...)
+}

vendor/github.com/davecgh/go-spew/spew/format.go πŸ”—

@@ -0,0 +1,419 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+	"bytes"
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+)
+
+// supportedFlags is a list of all the character flags supported by fmt package.
+const supportedFlags = "0-+# "
+
+// formatState implements the fmt.Formatter interface and contains information
+// about the state of a formatting operation.  The NewFormatter function can
+// be used to get a new Formatter which can be used directly as arguments
+// in standard fmt package printing calls.
+type formatState struct {
+	value          interface{}
+	fs             fmt.State
+	depth          int
+	pointers       map[uintptr]int
+	ignoreNextType bool
+	cs             *ConfigState
+}
+
+// buildDefaultFormat recreates the original format string without precision
+// and width information to pass in to fmt.Sprintf in the case of an
+// unrecognized type.  Unless new types are added to the language, this
+// function won't ever be called.
+func (f *formatState) buildDefaultFormat() (format string) {
+	buf := bytes.NewBuffer(percentBytes)
+
+	for _, flag := range supportedFlags {
+		if f.fs.Flag(int(flag)) {
+			buf.WriteRune(flag)
+		}
+	}
+
+	buf.WriteRune('v')
+
+	format = buf.String()
+	return format
+}
+
+// constructOrigFormat recreates the original format string including precision
+// and width information to pass along to the standard fmt package.  This allows
+// automatic deferral of all format strings this package doesn't support.
+func (f *formatState) constructOrigFormat(verb rune) (format string) {
+	buf := bytes.NewBuffer(percentBytes)
+
+	for _, flag := range supportedFlags {
+		if f.fs.Flag(int(flag)) {
+			buf.WriteRune(flag)
+		}
+	}
+
+	if width, ok := f.fs.Width(); ok {
+		buf.WriteString(strconv.Itoa(width))
+	}
+
+	if precision, ok := f.fs.Precision(); ok {
+		buf.Write(precisionBytes)
+		buf.WriteString(strconv.Itoa(precision))
+	}
+
+	buf.WriteRune(verb)
+
+	format = buf.String()
+	return format
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible and
+// ensures that types for values which have been unpacked from an interface
+// are displayed when the show types flag is also set.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
+	if v.Kind() == reflect.Interface {
+		f.ignoreNextType = false
+		if !v.IsNil() {
+			v = v.Elem()
+		}
+	}
+	return v
+}
+
+// formatPtr handles formatting of pointers by indirecting them as necessary.
+func (f *formatState) formatPtr(v reflect.Value) {
+	// Display nil if top level pointer is nil.
+	showTypes := f.fs.Flag('#')
+	if v.IsNil() && (!showTypes || f.ignoreNextType) {
+		f.fs.Write(nilAngleBytes)
+		return
+	}
+
+	// Remove pointers at or below the current depth from map used to detect
+	// circular refs.
+	for k, depth := range f.pointers {
+		if depth >= f.depth {
+			delete(f.pointers, k)
+		}
+	}
+
+	// Keep list of all dereferenced pointers to possibly show later.
+	pointerChain := make([]uintptr, 0)
+
+	// Figure out how many levels of indirection there are by derferencing
+	// pointers and unpacking interfaces down the chain while detecting circular
+	// references.
+	nilFound := false
+	cycleFound := false
+	indirects := 0
+	ve := v
+	for ve.Kind() == reflect.Ptr {
+		if ve.IsNil() {
+			nilFound = true
+			break
+		}
+		indirects++
+		addr := ve.Pointer()
+		pointerChain = append(pointerChain, addr)
+		if pd, ok := f.pointers[addr]; ok && pd < f.depth {
+			cycleFound = true
+			indirects--
+			break
+		}
+		f.pointers[addr] = f.depth
+
+		ve = ve.Elem()
+		if ve.Kind() == reflect.Interface {
+			if ve.IsNil() {
+				nilFound = true
+				break
+			}
+			ve = ve.Elem()
+		}
+	}
+
+	// Display type or indirection level depending on flags.
+	if showTypes && !f.ignoreNextType {
+		f.fs.Write(openParenBytes)
+		f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
+		f.fs.Write([]byte(ve.Type().String()))
+		f.fs.Write(closeParenBytes)
+	} else {
+		if nilFound || cycleFound {
+			indirects += strings.Count(ve.Type().String(), "*")
+		}
+		f.fs.Write(openAngleBytes)
+		f.fs.Write([]byte(strings.Repeat("*", indirects)))
+		f.fs.Write(closeAngleBytes)
+	}
+
+	// Display pointer information depending on flags.
+	if f.fs.Flag('+') && (len(pointerChain) > 0) {
+		f.fs.Write(openParenBytes)
+		for i, addr := range pointerChain {
+			if i > 0 {
+				f.fs.Write(pointerChainBytes)
+			}
+			printHexPtr(f.fs, addr)
+		}
+		f.fs.Write(closeParenBytes)
+	}
+
+	// Display dereferenced value.
+	switch {
+	case nilFound:
+		f.fs.Write(nilAngleBytes)
+
+	case cycleFound:
+		f.fs.Write(circularShortBytes)
+
+	default:
+		f.ignoreNextType = true
+		f.format(ve)
+	}
+}
+
+// format is the main workhorse for providing the Formatter interface.  It
+// uses the passed reflect value to figure out what kind of object we are
+// dealing with and formats it appropriately.  It is a recursive function,
+// however circular data structures are detected and handled properly.
+func (f *formatState) format(v reflect.Value) {
+	// Handle invalid reflect values immediately.
+	kind := v.Kind()
+	if kind == reflect.Invalid {
+		f.fs.Write(invalidAngleBytes)
+		return
+	}
+
+	// Handle pointers specially.
+	if kind == reflect.Ptr {
+		f.formatPtr(v)
+		return
+	}
+
+	// Print type information unless already handled elsewhere.
+	if !f.ignoreNextType && f.fs.Flag('#') {
+		f.fs.Write(openParenBytes)
+		f.fs.Write([]byte(v.Type().String()))
+		f.fs.Write(closeParenBytes)
+	}
+	f.ignoreNextType = false
+
+	// Call Stringer/error interfaces if they exist and the handle methods
+	// flag is enabled.
+	if !f.cs.DisableMethods {
+		if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+			if handled := handleMethods(f.cs, f.fs, v); handled {
+				return
+			}
+		}
+	}
+
+	switch kind {
+	case reflect.Invalid:
+		// Do nothing.  We should never get here since invalid has already
+		// been handled above.
+
+	case reflect.Bool:
+		printBool(f.fs, v.Bool())
+
+	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+		printInt(f.fs, v.Int(), 10)
+
+	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+		printUint(f.fs, v.Uint(), 10)
+
+	case reflect.Float32:
+		printFloat(f.fs, v.Float(), 32)
+
+	case reflect.Float64:
+		printFloat(f.fs, v.Float(), 64)
+
+	case reflect.Complex64:
+		printComplex(f.fs, v.Complex(), 32)
+
+	case reflect.Complex128:
+		printComplex(f.fs, v.Complex(), 64)
+
+	case reflect.Slice:
+		if v.IsNil() {
+			f.fs.Write(nilAngleBytes)
+			break
+		}
+		fallthrough
+
+	case reflect.Array:
+		f.fs.Write(openBracketBytes)
+		f.depth++
+		if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+			f.fs.Write(maxShortBytes)
+		} else {
+			numEntries := v.Len()
+			for i := 0; i < numEntries; i++ {
+				if i > 0 {
+					f.fs.Write(spaceBytes)
+				}
+				f.ignoreNextType = true
+				f.format(f.unpackValue(v.Index(i)))
+			}
+		}
+		f.depth--
+		f.fs.Write(closeBracketBytes)
+
+	case reflect.String:
+		f.fs.Write([]byte(v.String()))
+
+	case reflect.Interface:
+		// The only time we should get here is for nil interfaces due to
+		// unpackValue calls.
+		if v.IsNil() {
+			f.fs.Write(nilAngleBytes)
+		}
+
+	case reflect.Ptr:
+		// Do nothing.  We should never get here since pointers have already
+		// been handled above.
+
+	case reflect.Map:
+		// nil maps should be indicated as different than empty maps
+		if v.IsNil() {
+			f.fs.Write(nilAngleBytes)
+			break
+		}
+
+		f.fs.Write(openMapBytes)
+		f.depth++
+		if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+			f.fs.Write(maxShortBytes)
+		} else {
+			keys := v.MapKeys()
+			if f.cs.SortKeys {
+				sortValues(keys, f.cs)
+			}
+			for i, key := range keys {
+				if i > 0 {
+					f.fs.Write(spaceBytes)
+				}
+				f.ignoreNextType = true
+				f.format(f.unpackValue(key))
+				f.fs.Write(colonBytes)
+				f.ignoreNextType = true
+				f.format(f.unpackValue(v.MapIndex(key)))
+			}
+		}
+		f.depth--
+		f.fs.Write(closeMapBytes)
+
+	case reflect.Struct:
+		numFields := v.NumField()
+		f.fs.Write(openBraceBytes)
+		f.depth++
+		if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+			f.fs.Write(maxShortBytes)
+		} else {
+			vt := v.Type()
+			for i := 0; i < numFields; i++ {
+				if i > 0 {
+					f.fs.Write(spaceBytes)
+				}
+				vtf := vt.Field(i)
+				if f.fs.Flag('+') || f.fs.Flag('#') {
+					f.fs.Write([]byte(vtf.Name))
+					f.fs.Write(colonBytes)
+				}
+				f.format(f.unpackValue(v.Field(i)))
+			}
+		}
+		f.depth--
+		f.fs.Write(closeBraceBytes)
+
+	case reflect.Uintptr:
+		printHexPtr(f.fs, uintptr(v.Uint()))
+
+	case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+		printHexPtr(f.fs, v.Pointer())
+
+	// There were not any other types at the time this code was written, but
+	// fall back to letting the default fmt package handle it if any get added.
+	default:
+		format := f.buildDefaultFormat()
+		if v.CanInterface() {
+			fmt.Fprintf(f.fs, format, v.Interface())
+		} else {
+			fmt.Fprintf(f.fs, format, v.String())
+		}
+	}
+}
+
+// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
+// details.
+func (f *formatState) Format(fs fmt.State, verb rune) {
+	f.fs = fs
+
+	// Use standard formatting for verbs that are not v.
+	if verb != 'v' {
+		format := f.constructOrigFormat(verb)
+		fmt.Fprintf(fs, format, f.value)
+		return
+	}
+
+	if f.value == nil {
+		if fs.Flag('#') {
+			fs.Write(interfaceBytes)
+		}
+		fs.Write(nilAngleBytes)
+		return
+	}
+
+	f.format(reflect.ValueOf(f.value))
+}
+
+// newFormatter is a helper function to consolidate the logic from the various
+// public methods which take varying config states.
+func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
+	fs := &formatState{value: v, cs: cs}
+	fs.pointers = make(map[uintptr]int)
+	return fs
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface.  As a result, it integrates cleanly with standard fmt package
+printing functions.  The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations.  Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting.  In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly.  It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+Printf, Println, or Fprintf.
+*/
+func NewFormatter(v interface{}) fmt.Formatter {
+	return newFormatter(&Config, v)
+}

vendor/github.com/davecgh/go-spew/spew/spew.go πŸ”—

@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+	"fmt"
+	"io"
+)
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the formatted string as a value that satisfies error.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Errorf(format string, a ...interface{}) (err error) {
+	return fmt.Errorf(format, convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+	return fmt.Fprint(w, convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+	return fmt.Fprintf(w, format, convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a default Formatter interface returned by NewFormatter.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+	return fmt.Fprintln(w, convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
+func Print(a ...interface{}) (n int, err error) {
+	return fmt.Print(convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Printf(format string, a ...interface{}) (n int, err error) {
+	return fmt.Printf(format, convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
+func Println(a ...interface{}) (n int, err error) {
+	return fmt.Println(convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprint(a ...interface{}) string {
+	return fmt.Sprint(convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintf(format string, a ...interface{}) string {
+	return fmt.Sprintf(format, convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a default Formatter interface returned by NewFormatter.  It
+// returns the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintln(a ...interface{}) string {
+	return fmt.Sprintln(convertArgs(a)...)
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a default spew Formatter interface.
+func convertArgs(args []interface{}) (formatters []interface{}) {
+	formatters = make([]interface{}, len(args))
+	for index, arg := range args {
+		formatters[index] = NewFormatter(arg)
+	}
+	return formatters
+}

vendor/github.com/go-test/deep/.travis.yml πŸ”—

@@ -0,0 +1,13 @@
+language: go
+
+go:
+  - 1.7
+  - 1.8
+  - 1.9
+
+before_install:
+  - go get github.com/mattn/goveralls
+  - go get golang.org/x/tools/cover
+
+script:
+  - $HOME/gopath/bin/goveralls -service=travis-ci

vendor/github.com/go-test/deep/LICENSE πŸ”—

@@ -0,0 +1,21 @@
+MIT License
+
+Copyright 2015-2017 Daniel Nichter
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.

vendor/github.com/go-test/deep/README.md πŸ”—

@@ -0,0 +1,51 @@
+# Deep Variable Equality for Humans
+
+[![Go Report Card](https://goreportcard.com/badge/github.com/go-test/deep)](https://goreportcard.com/report/github.com/go-test/deep) [![Build Status](https://travis-ci.org/go-test/deep.svg?branch=master)](https://travis-ci.org/go-test/deep) [![Coverage Status](https://coveralls.io/repos/github/go-test/deep/badge.svg?branch=master)](https://coveralls.io/github/go-test/deep?branch=master) [![GoDoc](https://godoc.org/github.com/go-test/deep?status.svg)](https://godoc.org/github.com/go-test/deep)
+
+This package provides a single function: `deep.Equal`. It's like [reflect.DeepEqual](http://golang.org/pkg/reflect/#DeepEqual) but much friendlier to humans (or any sentient being) for two reason:
+
+* `deep.Equal` returns a list of differences
+* `deep.Equal` does not compare unexported fields (by default)
+
+`reflect.DeepEqual` is good (like all things Golang!), but it's a game of [Hunt the Wumpus](https://en.wikipedia.org/wiki/Hunt_the_Wumpus). For large maps, slices, and structs, finding the difference is difficult.
+
+`deep.Equal` doesn't play games with you, it lists the differences:
+
+```go
+package main_test
+
+import (
+	"testing"
+	"github.com/go-test/deep"
+)
+
+type T struct {
+	Name    string
+	Numbers []float64
+}
+
+func TestDeepEqual(t *testing.T) {
+	// Can you spot the difference?
+	t1 := T{
+		Name:    "Isabella",
+		Numbers: []float64{1.13459, 2.29343, 3.010100010},
+	}
+	t2 := T{
+		Name:    "Isabella",
+		Numbers: []float64{1.13459, 2.29843, 3.010100010},
+	}
+
+	if diff := deep.Equal(t1, t2); diff != nil {
+		t.Error(diff)
+	}
+}
+```
+
+
+```
+$ go test
+--- FAIL: TestDeepEqual (0.00s)
+        main_test.go:25: [Numbers.slice[1]: 2.29343 != 2.29843]
+```
+
+The difference is in `Numbers.slice[1]`: the two values aren't equal using Go `==`.

vendor/github.com/go-test/deep/deep.go πŸ”—

@@ -0,0 +1,352 @@
+// Package deep provides function deep.Equal which is like reflect.DeepEqual but
+// returns a list of differences. This is helpful when comparing complex types
+// like structures and maps.
+package deep
+
+import (
+	"errors"
+	"fmt"
+	"log"
+	"reflect"
+	"strings"
+)
+
+var (
+	// FloatPrecision is the number of decimal places to round float values
+	// to when comparing.
+	FloatPrecision = 10
+
+	// MaxDiff specifies the maximum number of differences to return.
+	MaxDiff = 10
+
+	// MaxDepth specifies the maximum levels of a struct to recurse into.
+	MaxDepth = 10
+
+	// LogErrors causes errors to be logged to STDERR when true.
+	LogErrors = false
+
+	// CompareUnexportedFields causes unexported struct fields, like s in
+	// T{s int}, to be comparsed when true.
+	CompareUnexportedFields = false
+)
+
+var (
+	// ErrMaxRecursion is logged when MaxDepth is reached.
+	ErrMaxRecursion = errors.New("recursed to MaxDepth")
+
+	// ErrTypeMismatch is logged when Equal passed two different types of values.
+	ErrTypeMismatch = errors.New("variables are different reflect.Type")
+
+	// ErrNotHandled is logged when a primitive Go kind is not handled.
+	ErrNotHandled = errors.New("cannot compare the reflect.Kind")
+)
+
+type cmp struct {
+	diff        []string
+	buff        []string
+	floatFormat string
+}
+
+var errorType = reflect.TypeOf((*error)(nil)).Elem()
+
+// Equal compares variables a and b, recursing into their structure up to
+// MaxDepth levels deep, and returns a list of differences, or nil if there are
+// none. Some differences may not be found if an error is also returned.
+//
+// If a type has an Equal method, like time.Equal, it is called to check for
+// equality.
+func Equal(a, b interface{}) []string {
+	aVal := reflect.ValueOf(a)
+	bVal := reflect.ValueOf(b)
+	c := &cmp{
+		diff:        []string{},
+		buff:        []string{},
+		floatFormat: fmt.Sprintf("%%.%df", FloatPrecision),
+	}
+	if a == nil && b == nil {
+		return nil
+	} else if a == nil && b != nil {
+		c.saveDiff(b, "<nil pointer>")
+	} else if a != nil && b == nil {
+		c.saveDiff(a, "<nil pointer>")
+	}
+	if len(c.diff) > 0 {
+		return c.diff
+	}
+
+	c.equals(aVal, bVal, 0)
+	if len(c.diff) > 0 {
+		return c.diff // diffs
+	}
+	return nil // no diffs
+}
+
+func (c *cmp) equals(a, b reflect.Value, level int) {
+	if level > MaxDepth {
+		logError(ErrMaxRecursion)
+		return
+	}
+
+	// Check if one value is nil, e.g. T{x: *X} and T.x is nil
+	if !a.IsValid() || !b.IsValid() {
+		if a.IsValid() && !b.IsValid() {
+			c.saveDiff(a.Type(), "<nil pointer>")
+		} else if !a.IsValid() && b.IsValid() {
+			c.saveDiff("<nil pointer>", b.Type())
+		}
+		return
+	}
+
+	// If differenet types, they can't be equal
+	aType := a.Type()
+	bType := b.Type()
+	if aType != bType {
+		c.saveDiff(aType, bType)
+		logError(ErrTypeMismatch)
+		return
+	}
+
+	// Primitive https://golang.org/pkg/reflect/#Kind
+	aKind := a.Kind()
+	bKind := b.Kind()
+
+	// If both types implement the error interface, compare the error strings.
+	// This must be done before dereferencing because the interface is on a
+	// pointer receiver.
+	if aType.Implements(errorType) && bType.Implements(errorType) {
+		if a.Elem().IsValid() && b.Elem().IsValid() { // both err != nil
+			aString := a.MethodByName("Error").Call(nil)[0].String()
+			bString := b.MethodByName("Error").Call(nil)[0].String()
+			if aString != bString {
+				c.saveDiff(aString, bString)
+			}
+			return
+		}
+	}
+
+	// Dereference pointers and interface{}
+	if aElem, bElem := (aKind == reflect.Ptr || aKind == reflect.Interface),
+		(bKind == reflect.Ptr || bKind == reflect.Interface); aElem || bElem {
+
+		if aElem {
+			a = a.Elem()
+		}
+
+		if bElem {
+			b = b.Elem()
+		}
+
+		c.equals(a, b, level+1)
+		return
+	}
+
+	// Types with an Equal(), like time.Time.
+	eqFunc := a.MethodByName("Equal")
+	if eqFunc.IsValid() {
+		retVals := eqFunc.Call([]reflect.Value{b})
+		if !retVals[0].Bool() {
+			c.saveDiff(a, b)
+		}
+		return
+	}
+
+	switch aKind {
+
+	/////////////////////////////////////////////////////////////////////
+	// Iterable kinds
+	/////////////////////////////////////////////////////////////////////
+
+	case reflect.Struct:
+		/*
+			The variables are structs like:
+				type T struct {
+					FirstName string
+					LastName  string
+				}
+			Type = <pkg>.T, Kind = reflect.Struct
+
+			Iterate through the fields (FirstName, LastName), recurse into their values.
+		*/
+		for i := 0; i < a.NumField(); i++ {
+			if aType.Field(i).PkgPath != "" && !CompareUnexportedFields {
+				continue // skip unexported field, e.g. s in type T struct {s string}
+			}
+
+			c.push(aType.Field(i).Name) // push field name to buff
+
+			// Get the Value for each field, e.g. FirstName has Type = string,
+			// Kind = reflect.String.
+			af := a.Field(i)
+			bf := b.Field(i)
+
+			// Recurse to compare the field values
+			c.equals(af, bf, level+1)
+
+			c.pop() // pop field name from buff
+
+			if len(c.diff) >= MaxDiff {
+				break
+			}
+		}
+	case reflect.Map:
+		/*
+			The variables are maps like:
+				map[string]int{
+					"foo": 1,
+					"bar": 2,
+				}
+			Type = map[string]int, Kind = reflect.Map
+
+			Or:
+				type T map[string]int{}
+			Type = <pkg>.T, Kind = reflect.Map
+
+			Iterate through the map keys (foo, bar), recurse into their values.
+		*/
+
+		if a.IsNil() || b.IsNil() {
+			if a.IsNil() && !b.IsNil() {
+				c.saveDiff("<nil map>", b)
+			} else if !a.IsNil() && b.IsNil() {
+				c.saveDiff(a, "<nil map>")
+			}
+			return
+		}
+
+		if a.Pointer() == b.Pointer() {
+			return
+		}
+
+		for _, key := range a.MapKeys() {
+			c.push(fmt.Sprintf("map[%s]", key))
+
+			aVal := a.MapIndex(key)
+			bVal := b.MapIndex(key)
+			if bVal.IsValid() {
+				c.equals(aVal, bVal, level+1)
+			} else {
+				c.saveDiff(aVal, "<does not have key>")
+			}
+
+			c.pop()
+
+			if len(c.diff) >= MaxDiff {
+				return
+			}
+		}
+
+		for _, key := range b.MapKeys() {
+			if aVal := a.MapIndex(key); aVal.IsValid() {
+				continue
+			}
+
+			c.push(fmt.Sprintf("map[%s]", key))
+			c.saveDiff("<does not have key>", b.MapIndex(key))
+			c.pop()
+			if len(c.diff) >= MaxDiff {
+				return
+			}
+		}
+	case reflect.Array:
+		n := a.Len()
+		for i := 0; i < n; i++ {
+			c.push(fmt.Sprintf("array[%d]", i))
+			c.equals(a.Index(i), b.Index(i), level+1)
+			c.pop()
+			if len(c.diff) >= MaxDiff {
+				break
+			}
+		}
+	case reflect.Slice:
+		if a.IsNil() || b.IsNil() {
+			if a.IsNil() && !b.IsNil() {
+				c.saveDiff("<nil slice>", b)
+			} else if !a.IsNil() && b.IsNil() {
+				c.saveDiff(a, "<nil slice>")
+			}
+			return
+		}
+
+		if a.Pointer() == b.Pointer() {
+			return
+		}
+
+		aLen := a.Len()
+		bLen := b.Len()
+		n := aLen
+		if bLen > aLen {
+			n = bLen
+		}
+		for i := 0; i < n; i++ {
+			c.push(fmt.Sprintf("slice[%d]", i))
+			if i < aLen && i < bLen {
+				c.equals(a.Index(i), b.Index(i), level+1)
+			} else if i < aLen {
+				c.saveDiff(a.Index(i), "<no value>")
+			} else {
+				c.saveDiff("<no value>", b.Index(i))
+			}
+			c.pop()
+			if len(c.diff) >= MaxDiff {
+				break
+			}
+		}
+
+	/////////////////////////////////////////////////////////////////////
+	// Primitive kinds
+	/////////////////////////////////////////////////////////////////////
+
+	case reflect.Float32, reflect.Float64:
+		// Avoid 0.04147685731961082 != 0.041476857319611
+		// 6 decimal places is close enough
+		aval := fmt.Sprintf(c.floatFormat, a.Float())
+		bval := fmt.Sprintf(c.floatFormat, b.Float())
+		if aval != bval {
+			c.saveDiff(a.Float(), b.Float())
+		}
+	case reflect.Bool:
+		if a.Bool() != b.Bool() {
+			c.saveDiff(a.Bool(), b.Bool())
+		}
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		if a.Int() != b.Int() {
+			c.saveDiff(a.Int(), b.Int())
+		}
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+		if a.Uint() != b.Uint() {
+			c.saveDiff(a.Uint(), b.Uint())
+		}
+	case reflect.String:
+		if a.String() != b.String() {
+			c.saveDiff(a.String(), b.String())
+		}
+
+	default:
+		logError(ErrNotHandled)
+	}
+}
+
+func (c *cmp) push(name string) {
+	c.buff = append(c.buff, name)
+}
+
+func (c *cmp) pop() {
+	if len(c.buff) > 0 {
+		c.buff = c.buff[0 : len(c.buff)-1]
+	}
+}
+
+func (c *cmp) saveDiff(aval, bval interface{}) {
+	if len(c.buff) > 0 {
+		varName := strings.Join(c.buff, ".")
+		c.diff = append(c.diff, fmt.Sprintf("%s: %v != %v", varName, aval, bval))
+	} else {
+		c.diff = append(c.diff, fmt.Sprintf("%v != %v", aval, bval))
+	}
+}
+
+func logError(err error) {
+	if LogErrors {
+		log.Println(err)
+	}
+}

vendor/github.com/google/go-cmp/LICENSE πŸ”—

@@ -0,0 +1,27 @@
+Copyright (c) 2017 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

vendor/github.com/google/go-cmp/cmp/compare.go πŸ”—

@@ -0,0 +1,553 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+// Package cmp determines equality of values.
+//
+// This package is intended to be a more powerful and safer alternative to
+// reflect.DeepEqual for comparing whether two values are semantically equal.
+//
+// The primary features of cmp are:
+//
+// β€’ When the default behavior of equality does not suit the needs of the test,
+// custom equality functions can override the equality operation.
+// For example, an equality function may report floats as equal so long as they
+// are within some tolerance of each other.
+//
+// β€’ Types that have an Equal method may use that method to determine equality.
+// This allows package authors to determine the equality operation for the types
+// that they define.
+//
+// β€’ If no custom equality functions are used and no Equal method is defined,
+// equality is determined by recursively comparing the primitive kinds on both
+// values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, unexported
+// fields are not compared by default; they result in panics unless suppressed
+// by using an Ignore option (see cmpopts.IgnoreUnexported) or explicitly compared
+// using the AllowUnexported option.
+package cmp
+
+import (
+	"fmt"
+	"reflect"
+
+	"github.com/google/go-cmp/cmp/internal/diff"
+	"github.com/google/go-cmp/cmp/internal/function"
+	"github.com/google/go-cmp/cmp/internal/value"
+)
+
+// BUG(dsnet): Maps with keys containing NaN values cannot be properly compared due to
+// the reflection package's inability to retrieve such entries. Equal will panic
+// anytime it comes across a NaN key, but this behavior may change.
+//
+// See https://golang.org/issue/11104 for more details.
+
+var nothing = reflect.Value{}
+
+// Equal reports whether x and y are equal by recursively applying the
+// following rules in the given order to x and y and all of their sub-values:
+//
+// β€’ If two values are not of the same type, then they are never equal
+// and the overall result is false.
+//
+// β€’ Let S be the set of all Ignore, Transformer, and Comparer options that
+// remain after applying all path filters, value filters, and type filters.
+// If at least one Ignore exists in S, then the comparison is ignored.
+// If the number of Transformer and Comparer options in S is greater than one,
+// then Equal panics because it is ambiguous which option to use.
+// If S contains a single Transformer, then use that to transform the current
+// values and recursively call Equal on the output values.
+// If S contains a single Comparer, then use that to compare the current values.
+// Otherwise, evaluation proceeds to the next rule.
+//
+// β€’ If the values have an Equal method of the form "(T) Equal(T) bool" or
+// "(T) Equal(I) bool" where T is assignable to I, then use the result of
+// x.Equal(y) even if x or y is nil.
+// Otherwise, no such method exists and evaluation proceeds to the next rule.
+//
+// β€’ Lastly, try to compare x and y based on their basic kinds.
+// Simple kinds like booleans, integers, floats, complex numbers, strings, and
+// channels are compared using the equivalent of the == operator in Go.
+// Functions are only equal if they are both nil, otherwise they are unequal.
+// Pointers are equal if the underlying values they point to are also equal.
+// Interfaces are equal if their underlying concrete values are also equal.
+//
+// Structs are equal if all of their fields are equal. If a struct contains
+// unexported fields, Equal panics unless the AllowUnexported option is used or
+// an Ignore option (e.g., cmpopts.IgnoreUnexported) ignores that field.
+//
+// Arrays, slices, and maps are equal if they are both nil or both non-nil
+// with the same length and the elements at each index or key are equal.
+// Note that a non-nil empty slice and a nil slice are not equal.
+// To equate empty slices and maps, consider using cmpopts.EquateEmpty.
+// Map keys are equal according to the == operator.
+// To use custom comparisons for map keys, consider using cmpopts.SortMaps.
+func Equal(x, y interface{}, opts ...Option) bool {
+	s := newState(opts)
+	s.compareAny(reflect.ValueOf(x), reflect.ValueOf(y))
+	return s.result.Equal()
+}
+
+// Diff returns a human-readable report of the differences between two values.
+// It returns an empty string if and only if Equal returns true for the same
+// input values and options. The output string will use the "-" symbol to
+// indicate elements removed from x, and the "+" symbol to indicate elements
+// added to y.
+//
+// Do not depend on this output being stable.
+func Diff(x, y interface{}, opts ...Option) string {
+	r := new(defaultReporter)
+	opts = Options{Options(opts), r}
+	eq := Equal(x, y, opts...)
+	d := r.String()
+	if (d == "") != eq {
+		panic("inconsistent difference and equality results")
+	}
+	return d
+}
+
+type state struct {
+	// These fields represent the "comparison state".
+	// Calling statelessCompare must not result in observable changes to these.
+	result   diff.Result // The current result of comparison
+	curPath  Path        // The current path in the value tree
+	reporter reporter    // Optional reporter used for difference formatting
+
+	// dynChecker triggers pseudo-random checks for option correctness.
+	// It is safe for statelessCompare to mutate this value.
+	dynChecker dynChecker
+
+	// These fields, once set by processOption, will not change.
+	exporters map[reflect.Type]bool // Set of structs with unexported field visibility
+	opts      Options               // List of all fundamental and filter options
+}
+
+func newState(opts []Option) *state {
+	s := new(state)
+	for _, opt := range opts {
+		s.processOption(opt)
+	}
+	return s
+}
+
+func (s *state) processOption(opt Option) {
+	switch opt := opt.(type) {
+	case nil:
+	case Options:
+		for _, o := range opt {
+			s.processOption(o)
+		}
+	case coreOption:
+		type filtered interface {
+			isFiltered() bool
+		}
+		if fopt, ok := opt.(filtered); ok && !fopt.isFiltered() {
+			panic(fmt.Sprintf("cannot use an unfiltered option: %v", opt))
+		}
+		s.opts = append(s.opts, opt)
+	case visibleStructs:
+		if s.exporters == nil {
+			s.exporters = make(map[reflect.Type]bool)
+		}
+		for t := range opt {
+			s.exporters[t] = true
+		}
+	case reporter:
+		if s.reporter != nil {
+			panic("difference reporter already registered")
+		}
+		s.reporter = opt
+	default:
+		panic(fmt.Sprintf("unknown option %T", opt))
+	}
+}
+
+// statelessCompare compares two values and returns the result.
+// This function is stateless in that it does not alter the current result,
+// or output to any registered reporters.
+func (s *state) statelessCompare(vx, vy reflect.Value) diff.Result {
+	// We do not save and restore the curPath because all of the compareX
+	// methods should properly push and pop from the path.
+	// It is an implementation bug if the contents of curPath differs from
+	// when calling this function to when returning from it.
+
+	oldResult, oldReporter := s.result, s.reporter
+	s.result = diff.Result{} // Reset result
+	s.reporter = nil         // Remove reporter to avoid spurious printouts
+	s.compareAny(vx, vy)
+	res := s.result
+	s.result, s.reporter = oldResult, oldReporter
+	return res
+}
+
+func (s *state) compareAny(vx, vy reflect.Value) {
+	// TODO: Support cyclic data structures.
+
+	// Rule 0: Differing types are never equal.
+	if !vx.IsValid() || !vy.IsValid() {
+		s.report(vx.IsValid() == vy.IsValid(), vx, vy)
+		return
+	}
+	if vx.Type() != vy.Type() {
+		s.report(false, vx, vy) // Possible for path to be empty
+		return
+	}
+	t := vx.Type()
+	if len(s.curPath) == 0 {
+		s.curPath.push(&pathStep{typ: t})
+		defer s.curPath.pop()
+	}
+	vx, vy = s.tryExporting(vx, vy)
+
+	// Rule 1: Check whether an option applies on this node in the value tree.
+	if s.tryOptions(vx, vy, t) {
+		return
+	}
+
+	// Rule 2: Check whether the type has a valid Equal method.
+	if s.tryMethod(vx, vy, t) {
+		return
+	}
+
+	// Rule 3: Recursively descend into each value's underlying kind.
+	switch t.Kind() {
+	case reflect.Bool:
+		s.report(vx.Bool() == vy.Bool(), vx, vy)
+		return
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		s.report(vx.Int() == vy.Int(), vx, vy)
+		return
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		s.report(vx.Uint() == vy.Uint(), vx, vy)
+		return
+	case reflect.Float32, reflect.Float64:
+		s.report(vx.Float() == vy.Float(), vx, vy)
+		return
+	case reflect.Complex64, reflect.Complex128:
+		s.report(vx.Complex() == vy.Complex(), vx, vy)
+		return
+	case reflect.String:
+		s.report(vx.String() == vy.String(), vx, vy)
+		return
+	case reflect.Chan, reflect.UnsafePointer:
+		s.report(vx.Pointer() == vy.Pointer(), vx, vy)
+		return
+	case reflect.Func:
+		s.report(vx.IsNil() && vy.IsNil(), vx, vy)
+		return
+	case reflect.Ptr:
+		if vx.IsNil() || vy.IsNil() {
+			s.report(vx.IsNil() && vy.IsNil(), vx, vy)
+			return
+		}
+		s.curPath.push(&indirect{pathStep{t.Elem()}})
+		defer s.curPath.pop()
+		s.compareAny(vx.Elem(), vy.Elem())
+		return
+	case reflect.Interface:
+		if vx.IsNil() || vy.IsNil() {
+			s.report(vx.IsNil() && vy.IsNil(), vx, vy)
+			return
+		}
+		if vx.Elem().Type() != vy.Elem().Type() {
+			s.report(false, vx.Elem(), vy.Elem())
+			return
+		}
+		s.curPath.push(&typeAssertion{pathStep{vx.Elem().Type()}})
+		defer s.curPath.pop()
+		s.compareAny(vx.Elem(), vy.Elem())
+		return
+	case reflect.Slice:
+		if vx.IsNil() || vy.IsNil() {
+			s.report(vx.IsNil() && vy.IsNil(), vx, vy)
+			return
+		}
+		fallthrough
+	case reflect.Array:
+		s.compareArray(vx, vy, t)
+		return
+	case reflect.Map:
+		s.compareMap(vx, vy, t)
+		return
+	case reflect.Struct:
+		s.compareStruct(vx, vy, t)
+		return
+	default:
+		panic(fmt.Sprintf("%v kind not handled", t.Kind()))
+	}
+}
+
+func (s *state) tryExporting(vx, vy reflect.Value) (reflect.Value, reflect.Value) {
+	if sf, ok := s.curPath[len(s.curPath)-1].(*structField); ok && sf.unexported {
+		if sf.force {
+			// Use unsafe pointer arithmetic to get read-write access to an
+			// unexported field in the struct.
+			vx = unsafeRetrieveField(sf.pvx, sf.field)
+			vy = unsafeRetrieveField(sf.pvy, sf.field)
+		} else {
+			// We are not allowed to export the value, so invalidate them
+			// so that tryOptions can panic later if not explicitly ignored.
+			vx = nothing
+			vy = nothing
+		}
+	}
+	return vx, vy
+}
+
+func (s *state) tryOptions(vx, vy reflect.Value, t reflect.Type) bool {
+	// If there were no FilterValues, we will not detect invalid inputs,
+	// so manually check for them and append invalid if necessary.
+	// We still evaluate the options since an ignore can override invalid.
+	opts := s.opts
+	if !vx.IsValid() || !vy.IsValid() {
+		opts = Options{opts, invalid{}}
+	}
+
+	// Evaluate all filters and apply the remaining options.
+	if opt := opts.filter(s, vx, vy, t); opt != nil {
+		opt.apply(s, vx, vy)
+		return true
+	}
+	return false
+}
+
+func (s *state) tryMethod(vx, vy reflect.Value, t reflect.Type) bool {
+	// Check if this type even has an Equal method.
+	m, ok := t.MethodByName("Equal")
+	if !ok || !function.IsType(m.Type, function.EqualAssignable) {
+		return false
+	}
+
+	eq := s.callTTBFunc(m.Func, vx, vy)
+	s.report(eq, vx, vy)
+	return true
+}
+
+func (s *state) callTRFunc(f, v reflect.Value) reflect.Value {
+	v = sanitizeValue(v, f.Type().In(0))
+	if !s.dynChecker.Next() {
+		return f.Call([]reflect.Value{v})[0]
+	}
+
+	// Run the function twice and ensure that we get the same results back.
+	// We run in goroutines so that the race detector (if enabled) can detect
+	// unsafe mutations to the input.
+	c := make(chan reflect.Value)
+	go detectRaces(c, f, v)
+	want := f.Call([]reflect.Value{v})[0]
+	if got := <-c; !s.statelessCompare(got, want).Equal() {
+		// To avoid false-positives with non-reflexive equality operations,
+		// we sanity check whether a value is equal to itself.
+		if !s.statelessCompare(want, want).Equal() {
+			return want
+		}
+		fn := getFuncName(f.Pointer())
+		panic(fmt.Sprintf("non-deterministic function detected: %s", fn))
+	}
+	return want
+}
+
+func (s *state) callTTBFunc(f, x, y reflect.Value) bool {
+	x = sanitizeValue(x, f.Type().In(0))
+	y = sanitizeValue(y, f.Type().In(1))
+	if !s.dynChecker.Next() {
+		return f.Call([]reflect.Value{x, y})[0].Bool()
+	}
+
+	// Swapping the input arguments is sufficient to check that
+	// f is symmetric and deterministic.
+	// We run in goroutines so that the race detector (if enabled) can detect
+	// unsafe mutations to the input.
+	c := make(chan reflect.Value)
+	go detectRaces(c, f, y, x)
+	want := f.Call([]reflect.Value{x, y})[0].Bool()
+	if got := <-c; !got.IsValid() || got.Bool() != want {
+		fn := getFuncName(f.Pointer())
+		panic(fmt.Sprintf("non-deterministic or non-symmetric function detected: %s", fn))
+	}
+	return want
+}
+
+func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) {
+	var ret reflect.Value
+	defer func() {
+		recover() // Ignore panics, let the other call to f panic instead
+		c <- ret
+	}()
+	ret = f.Call(vs)[0]
+}
+
+// sanitizeValue converts nil interfaces of type T to those of type R,
+// assuming that T is assignable to R.
+// Otherwise, it returns the input value as is.
+func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value {
+	// TODO(dsnet): Remove this hacky workaround.
+	// See https://golang.org/issue/22143
+	if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t {
+		return reflect.New(t).Elem()
+	}
+	return v
+}
+
+func (s *state) compareArray(vx, vy reflect.Value, t reflect.Type) {
+	step := &sliceIndex{pathStep{t.Elem()}, 0, 0}
+	s.curPath.push(step)
+
+	// Compute an edit-script for slices vx and vy.
+	es := diff.Difference(vx.Len(), vy.Len(), func(ix, iy int) diff.Result {
+		step.xkey, step.ykey = ix, iy
+		return s.statelessCompare(vx.Index(ix), vy.Index(iy))
+	})
+
+	// Report the entire slice as is if the arrays are of primitive kind,
+	// and the arrays are different enough.
+	isPrimitive := false
+	switch t.Elem().Kind() {
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+		reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
+		reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
+		isPrimitive = true
+	}
+	if isPrimitive && es.Dist() > (vx.Len()+vy.Len())/4 {
+		s.curPath.pop() // Pop first since we are reporting the whole slice
+		s.report(false, vx, vy)
+		return
+	}
+
+	// Replay the edit-script.
+	var ix, iy int
+	for _, e := range es {
+		switch e {
+		case diff.UniqueX:
+			step.xkey, step.ykey = ix, -1
+			s.report(false, vx.Index(ix), nothing)
+			ix++
+		case diff.UniqueY:
+			step.xkey, step.ykey = -1, iy
+			s.report(false, nothing, vy.Index(iy))
+			iy++
+		default:
+			step.xkey, step.ykey = ix, iy
+			if e == diff.Identity {
+				s.report(true, vx.Index(ix), vy.Index(iy))
+			} else {
+				s.compareAny(vx.Index(ix), vy.Index(iy))
+			}
+			ix++
+			iy++
+		}
+	}
+	s.curPath.pop()
+	return
+}
+
+func (s *state) compareMap(vx, vy reflect.Value, t reflect.Type) {
+	if vx.IsNil() || vy.IsNil() {
+		s.report(vx.IsNil() && vy.IsNil(), vx, vy)
+		return
+	}
+
+	// We combine and sort the two map keys so that we can perform the
+	// comparisons in a deterministic order.
+	step := &mapIndex{pathStep: pathStep{t.Elem()}}
+	s.curPath.push(step)
+	defer s.curPath.pop()
+	for _, k := range value.SortKeys(append(vx.MapKeys(), vy.MapKeys()...)) {
+		step.key = k
+		vvx := vx.MapIndex(k)
+		vvy := vy.MapIndex(k)
+		switch {
+		case vvx.IsValid() && vvy.IsValid():
+			s.compareAny(vvx, vvy)
+		case vvx.IsValid() && !vvy.IsValid():
+			s.report(false, vvx, nothing)
+		case !vvx.IsValid() && vvy.IsValid():
+			s.report(false, nothing, vvy)
+		default:
+			// It is possible for both vvx and vvy to be invalid if the
+			// key contained a NaN value in it. There is no way in
+			// reflection to be able to retrieve these values.
+			// See https://golang.org/issue/11104
+			panic(fmt.Sprintf("%#v has map key with NaNs", s.curPath))
+		}
+	}
+}
+
+func (s *state) compareStruct(vx, vy reflect.Value, t reflect.Type) {
+	var vax, vay reflect.Value // Addressable versions of vx and vy
+
+	step := &structField{}
+	s.curPath.push(step)
+	defer s.curPath.pop()
+	for i := 0; i < t.NumField(); i++ {
+		vvx := vx.Field(i)
+		vvy := vy.Field(i)
+		step.typ = t.Field(i).Type
+		step.name = t.Field(i).Name
+		step.idx = i
+		step.unexported = !isExported(step.name)
+		if step.unexported {
+			// Defer checking of unexported fields until later to give an
+			// Ignore a chance to ignore the field.
+			if !vax.IsValid() || !vay.IsValid() {
+				// For unsafeRetrieveField to work, the parent struct must
+				// be addressable. Create a new copy of the values if
+				// necessary to make them addressable.
+				vax = makeAddressable(vx)
+				vay = makeAddressable(vy)
+			}
+			step.force = s.exporters[t]
+			step.pvx = vax
+			step.pvy = vay
+			step.field = t.Field(i)
+		}
+		s.compareAny(vvx, vvy)
+	}
+}
+
+// report records the result of a single comparison.
+// It also calls Report if any reporter is registered.
+func (s *state) report(eq bool, vx, vy reflect.Value) {
+	if eq {
+		s.result.NSame++
+	} else {
+		s.result.NDiff++
+	}
+	if s.reporter != nil {
+		s.reporter.Report(vx, vy, eq, s.curPath)
+	}
+}
+
+// dynChecker tracks the state needed to periodically perform checks that
+// user provided functions are symmetric and deterministic.
+// The zero value is safe for immediate use.
+type dynChecker struct{ curr, next int }
+
+// Next increments the state and reports whether a check should be performed.
+//
+// Checks occur every Nth function call, where N is a triangular number:
+//	0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ...
+// See https://en.wikipedia.org/wiki/Triangular_number
+//
+// This sequence ensures that the cost of checks drops significantly as
+// the number of functions calls grows larger.
+func (dc *dynChecker) Next() bool {
+	ok := dc.curr == dc.next
+	if ok {
+		dc.curr = 0
+		dc.next++
+	}
+	dc.curr++
+	return ok
+}
+
+// makeAddressable returns a value that is always addressable.
+// It returns the input verbatim if it is already addressable,
+// otherwise it creates a new value and returns an addressable copy.
+func makeAddressable(v reflect.Value) reflect.Value {
+	if v.CanAddr() {
+		return v
+	}
+	vc := reflect.New(v.Type()).Elem()
+	vc.Set(v)
+	return vc
+}

vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go πŸ”—

@@ -0,0 +1,17 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+// +build !debug
+
+package diff
+
+var debug debugger
+
+type debugger struct{}
+
+func (debugger) Begin(_, _ int, f EqualFunc, _, _ *EditScript) EqualFunc {
+	return f
+}
+func (debugger) Update() {}
+func (debugger) Finish() {}

vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go πŸ”—

@@ -0,0 +1,122 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+// +build debug
+
+package diff
+
+import (
+	"fmt"
+	"strings"
+	"sync"
+	"time"
+)
+
+// The algorithm can be seen running in real-time by enabling debugging:
+//	go test -tags=debug -v
+//
+// Example output:
+//	=== RUN   TestDifference/#34
+//	β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”
+//	β”‚ \ Β· Β· Β· Β· Β· Β· Β· Β· Β· Β· Β· Β· Β· Β· β”‚
+//	β”‚ Β· # Β· Β· Β· Β· Β· Β· Β· Β· Β· Β· Β· Β· Β· β”‚
+//	β”‚ Β· \ Β· Β· Β· Β· Β· Β· Β· Β· Β· Β· Β· Β· Β· β”‚
+//	β”‚ Β· Β· \ Β· Β· Β· Β· Β· Β· Β· Β· Β· Β· Β· Β· β”‚
+//	β”‚ Β· Β· Β· X # Β· Β· Β· Β· Β· Β· Β· Β· Β· Β· β”‚
+//	β”‚ Β· Β· Β· # \ Β· Β· Β· Β· Β· Β· Β· Β· Β· Β· β”‚
+//	β”‚ Β· Β· Β· Β· Β· # # Β· Β· Β· Β· Β· Β· Β· Β· β”‚
+//	β”‚ Β· Β· Β· Β· Β· # \ Β· Β· Β· Β· Β· Β· Β· Β· β”‚
+//	β”‚ Β· Β· Β· Β· Β· Β· Β· \ Β· Β· Β· Β· Β· Β· Β· β”‚
+//	β”‚ Β· Β· Β· Β· Β· Β· Β· Β· \ Β· Β· Β· Β· Β· Β· β”‚
+//	β”‚ Β· Β· Β· Β· Β· Β· Β· Β· Β· \ Β· Β· Β· Β· Β· β”‚
+//	β”‚ Β· Β· Β· Β· Β· Β· Β· Β· Β· Β· \ Β· Β· # Β· β”‚
+//	β”‚ Β· Β· Β· Β· Β· Β· Β· Β· Β· Β· Β· \ # # Β· β”‚
+//	β”‚ Β· Β· Β· Β· Β· Β· Β· Β· Β· Β· Β· # # # Β· β”‚
+//	β”‚ Β· Β· Β· Β· Β· Β· Β· Β· Β· Β· # # # # Β· β”‚
+//	β”‚ Β· Β· Β· Β· Β· Β· Β· Β· Β· # # # # # Β· β”‚
+//	β”‚ Β· Β· Β· Β· Β· Β· Β· Β· Β· Β· Β· Β· Β· Β· \ β”‚
+//	β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜
+//	[.Y..M.XY......YXYXY.|]
+//
+// The grid represents the edit-graph where the horizontal axis represents
+// list X and the vertical axis represents list Y. The start of the two lists
+// is the top-left, while the ends are the bottom-right. The 'Β·' represents
+// an unexplored node in the graph. The '\' indicates that the two symbols
+// from list X and Y are equal. The 'X' indicates that two symbols are similar
+// (but not exactly equal) to each other. The '#' indicates that the two symbols
+// are different (and not similar). The algorithm traverses this graph trying to
+// make the paths starting in the top-left and the bottom-right connect.
+//
+// The series of '.', 'X', 'Y', and 'M' characters at the bottom represents
+// the currently established path from the forward and reverse searches,
+// separated by a '|' character.
+
+const (
+	updateDelay  = 100 * time.Millisecond
+	finishDelay  = 500 * time.Millisecond
+	ansiTerminal = true // ANSI escape codes used to move terminal cursor
+)
+
+var debug debugger
+
+type debugger struct {
+	sync.Mutex
+	p1, p2           EditScript
+	fwdPath, revPath *EditScript
+	grid             []byte
+	lines            int
+}
+
+func (dbg *debugger) Begin(nx, ny int, f EqualFunc, p1, p2 *EditScript) EqualFunc {
+	dbg.Lock()
+	dbg.fwdPath, dbg.revPath = p1, p2
+	top := "β”Œβ”€" + strings.Repeat("──", nx) + "┐\n"
+	row := "β”‚ " + strings.Repeat("Β· ", nx) + "β”‚\n"
+	btm := "└─" + strings.Repeat("──", nx) + "β”˜\n"
+	dbg.grid = []byte(top + strings.Repeat(row, ny) + btm)
+	dbg.lines = strings.Count(dbg.String(), "\n")
+	fmt.Print(dbg)
+
+	// Wrap the EqualFunc so that we can intercept each result.
+	return func(ix, iy int) (r Result) {
+		cell := dbg.grid[len(top)+iy*len(row):][len("β”‚ ")+len("Β· ")*ix:][:len("Β·")]
+		for i := range cell {
+			cell[i] = 0 // Zero out the multiple bytes of UTF-8 middle-dot
+		}
+		switch r = f(ix, iy); {
+		case r.Equal():
+			cell[0] = '\\'
+		case r.Similar():
+			cell[0] = 'X'
+		default:
+			cell[0] = '#'
+		}
+		return
+	}
+}
+
+func (dbg *debugger) Update() {
+	dbg.print(updateDelay)
+}
+
+func (dbg *debugger) Finish() {
+	dbg.print(finishDelay)
+	dbg.Unlock()
+}
+
+func (dbg *debugger) String() string {
+	dbg.p1, dbg.p2 = *dbg.fwdPath, dbg.p2[:0]
+	for i := len(*dbg.revPath) - 1; i >= 0; i-- {
+		dbg.p2 = append(dbg.p2, (*dbg.revPath)[i])
+	}
+	return fmt.Sprintf("%s[%v|%v]\n\n", dbg.grid, dbg.p1, dbg.p2)
+}
+
+func (dbg *debugger) print(d time.Duration) {
+	if ansiTerminal {
+		fmt.Printf("\x1b[%dA", dbg.lines) // Reset terminal cursor
+	}
+	fmt.Print(dbg)
+	time.Sleep(d)
+}

vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go πŸ”—

@@ -0,0 +1,363 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+// Package diff implements an algorithm for producing edit-scripts.
+// The edit-script is a sequence of operations needed to transform one list
+// of symbols into another (or vice-versa). The edits allowed are insertions,
+// deletions, and modifications. The summation of all edits is called the
+// Levenshtein distance as this problem is well-known in computer science.
+//
+// This package prioritizes performance over accuracy. That is, the run time
+// is more important than obtaining a minimal Levenshtein distance.
+package diff
+
+// EditType represents a single operation within an edit-script.
+type EditType uint8
+
+const (
+	// Identity indicates that a symbol pair is identical in both list X and Y.
+	Identity EditType = iota
+	// UniqueX indicates that a symbol only exists in X and not Y.
+	UniqueX
+	// UniqueY indicates that a symbol only exists in Y and not X.
+	UniqueY
+	// Modified indicates that a symbol pair is a modification of each other.
+	Modified
+)
+
+// EditScript represents the series of differences between two lists.
+type EditScript []EditType
+
+// String returns a human-readable string representing the edit-script where
+// Identity, UniqueX, UniqueY, and Modified are represented by the
+// '.', 'X', 'Y', and 'M' characters, respectively.
+func (es EditScript) String() string {
+	b := make([]byte, len(es))
+	for i, e := range es {
+		switch e {
+		case Identity:
+			b[i] = '.'
+		case UniqueX:
+			b[i] = 'X'
+		case UniqueY:
+			b[i] = 'Y'
+		case Modified:
+			b[i] = 'M'
+		default:
+			panic("invalid edit-type")
+		}
+	}
+	return string(b)
+}
+
+// stats returns a histogram of the number of each type of edit operation.
+func (es EditScript) stats() (s struct{ NI, NX, NY, NM int }) {
+	for _, e := range es {
+		switch e {
+		case Identity:
+			s.NI++
+		case UniqueX:
+			s.NX++
+		case UniqueY:
+			s.NY++
+		case Modified:
+			s.NM++
+		default:
+			panic("invalid edit-type")
+		}
+	}
+	return
+}
+
+// Dist is the Levenshtein distance and is guaranteed to be 0 if and only if
+// lists X and Y are equal.
+func (es EditScript) Dist() int { return len(es) - es.stats().NI }
+
+// LenX is the length of the X list.
+func (es EditScript) LenX() int { return len(es) - es.stats().NY }
+
+// LenY is the length of the Y list.
+func (es EditScript) LenY() int { return len(es) - es.stats().NX }
+
+// EqualFunc reports whether the symbols at indexes ix and iy are equal.
+// When called by Difference, the index is guaranteed to be within nx and ny.
+type EqualFunc func(ix int, iy int) Result
+
+// Result is the result of comparison.
+// NSame is the number of sub-elements that are equal.
+// NDiff is the number of sub-elements that are not equal.
+type Result struct{ NSame, NDiff int }
+
+// Equal indicates whether the symbols are equal. Two symbols are equal
+// if and only if NDiff == 0. If Equal, then they are also Similar.
+func (r Result) Equal() bool { return r.NDiff == 0 }
+
+// Similar indicates whether two symbols are similar and may be represented
+// by using the Modified type. As a special case, we consider binary comparisons
+// (i.e., those that return Result{1, 0} or Result{0, 1}) to be similar.
+//
+// The exact ratio of NSame to NDiff to determine similarity may change.
+func (r Result) Similar() bool {
+	// Use NSame+1 to offset NSame so that binary comparisons are similar.
+	return r.NSame+1 >= r.NDiff
+}
+
+// Difference reports whether two lists of lengths nx and ny are equal
+// given the definition of equality provided as f.
+//
+// This function returns an edit-script, which is a sequence of operations
+// needed to convert one list into the other. The following invariants for
+// the edit-script are maintained:
+//	β€’ eq == (es.Dist()==0)
+//	β€’ nx == es.LenX()
+//	β€’ ny == es.LenY()
+//
+// This algorithm is not guaranteed to be an optimal solution (i.e., one that
+// produces an edit-script with a minimal Levenshtein distance). This algorithm
+// favors performance over optimality. The exact output is not guaranteed to
+// be stable and may change over time.
+func Difference(nx, ny int, f EqualFunc) (es EditScript) {
+	// This algorithm is based on traversing what is known as an "edit-graph".
+	// See Figure 1 from "An O(ND) Difference Algorithm and Its Variations"
+	// by Eugene W. Myers. Since D can be as large as N itself, this is
+	// effectively O(N^2). Unlike the algorithm from that paper, we are not
+	// interested in the optimal path, but at least some "decent" path.
+	//
+	// For example, let X and Y be lists of symbols:
+	//	X = [A B C A B B A]
+	//	Y = [C B A B A C]
+	//
+	// The edit-graph can be drawn as the following:
+	//	   A B C A B B A
+	//	  β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”
+	//	C β”‚_|_|\|_|_|_|_β”‚ 0
+	//	B β”‚_|\|_|_|\|\|_β”‚ 1
+	//	A β”‚\|_|_|\|_|_|\β”‚ 2
+	//	B β”‚_|\|_|_|\|\|_β”‚ 3
+	//	A β”‚\|_|_|\|_|_|\β”‚ 4
+	//	C β”‚ | |\| | | | β”‚ 5
+	//	  β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ 6
+	//	   0 1 2 3 4 5 6 7
+	//
+	// List X is written along the horizontal axis, while list Y is written
+	// along the vertical axis. At any point on this grid, if the symbol in
+	// list X matches the corresponding symbol in list Y, then a '\' is drawn.
+	// The goal of any minimal edit-script algorithm is to find a path from the
+	// top-left corner to the bottom-right corner, while traveling through the
+	// fewest horizontal or vertical edges.
+	// A horizontal edge is equivalent to inserting a symbol from list X.
+	// A vertical edge is equivalent to inserting a symbol from list Y.
+	// A diagonal edge is equivalent to a matching symbol between both X and Y.
+
+	// Invariants:
+	//	β€’ 0 ≀ fwdPath.X ≀ (fwdFrontier.X, revFrontier.X) ≀ revPath.X ≀ nx
+	//	β€’ 0 ≀ fwdPath.Y ≀ (fwdFrontier.Y, revFrontier.Y) ≀ revPath.Y ≀ ny
+	//
+	// In general:
+	//	β€’ fwdFrontier.X < revFrontier.X
+	//	β€’ fwdFrontier.Y < revFrontier.Y
+	// Unless, it is time for the algorithm to terminate.
+	fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)}
+	revPath := path{-1, point{nx, ny}, make(EditScript, 0)}
+	fwdFrontier := fwdPath.point // Forward search frontier
+	revFrontier := revPath.point // Reverse search frontier
+
+	// Search budget bounds the cost of searching for better paths.
+	// The longest sequence of non-matching symbols that can be tolerated is
+	// approximately the square-root of the search budget.
+	searchBudget := 4 * (nx + ny) // O(n)
+
+	// The algorithm below is a greedy, meet-in-the-middle algorithm for
+	// computing sub-optimal edit-scripts between two lists.
+	//
+	// The algorithm is approximately as follows:
+	//	β€’ Searching for differences switches back-and-forth between
+	//	a search that starts at the beginning (the top-left corner), and
+	//	a search that starts at the end (the bottom-right corner). The goal of
+	//	the search is connect with the search from the opposite corner.
+	//	β€’ As we search, we build a path in a greedy manner, where the first
+	//	match seen is added to the path (this is sub-optimal, but provides a
+	//	decent result in practice). When matches are found, we try the next pair
+	//	of symbols in the lists and follow all matches as far as possible.
+	//	β€’ When searching for matches, we search along a diagonal going through
+	//	through the "frontier" point. If no matches are found, we advance the
+	//	frontier towards the opposite corner.
+	//	β€’ This algorithm terminates when either the X coordinates or the
+	//	Y coordinates of the forward and reverse frontier points ever intersect.
+	//
+	// This algorithm is correct even if searching only in the forward direction
+	// or in the reverse direction. We do both because it is commonly observed
+	// that two lists commonly differ because elements were added to the front
+	// or end of the other list.
+	//
+	// Running the tests with the "debug" build tag prints a visualization of
+	// the algorithm running in real-time. This is educational for understanding
+	// how the algorithm works. See debug_enable.go.
+	f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es)
+	for {
+		// Forward search from the beginning.
+		if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
+			break
+		}
+		for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
+			// Search in a diagonal pattern for a match.
+			z := zigzag(i)
+			p := point{fwdFrontier.X + z, fwdFrontier.Y - z}
+			switch {
+			case p.X >= revPath.X || p.Y < fwdPath.Y:
+				stop1 = true // Hit top-right corner
+			case p.Y >= revPath.Y || p.X < fwdPath.X:
+				stop2 = true // Hit bottom-left corner
+			case f(p.X, p.Y).Equal():
+				// Match found, so connect the path to this point.
+				fwdPath.connect(p, f)
+				fwdPath.append(Identity)
+				// Follow sequence of matches as far as possible.
+				for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y {
+					if !f(fwdPath.X, fwdPath.Y).Equal() {
+						break
+					}
+					fwdPath.append(Identity)
+				}
+				fwdFrontier = fwdPath.point
+				stop1, stop2 = true, true
+			default:
+				searchBudget-- // Match not found
+			}
+			debug.Update()
+		}
+		// Advance the frontier towards reverse point.
+		if revPath.X-fwdFrontier.X >= revPath.Y-fwdFrontier.Y {
+			fwdFrontier.X++
+		} else {
+			fwdFrontier.Y++
+		}
+
+		// Reverse search from the end.
+		if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
+			break
+		}
+		for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
+			// Search in a diagonal pattern for a match.
+			z := zigzag(i)
+			p := point{revFrontier.X - z, revFrontier.Y + z}
+			switch {
+			case fwdPath.X >= p.X || revPath.Y < p.Y:
+				stop1 = true // Hit bottom-left corner
+			case fwdPath.Y >= p.Y || revPath.X < p.X:
+				stop2 = true // Hit top-right corner
+			case f(p.X-1, p.Y-1).Equal():
+				// Match found, so connect the path to this point.
+				revPath.connect(p, f)
+				revPath.append(Identity)
+				// Follow sequence of matches as far as possible.
+				for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y {
+					if !f(revPath.X-1, revPath.Y-1).Equal() {
+						break
+					}
+					revPath.append(Identity)
+				}
+				revFrontier = revPath.point
+				stop1, stop2 = true, true
+			default:
+				searchBudget-- // Match not found
+			}
+			debug.Update()
+		}
+		// Advance the frontier towards forward point.
+		if revFrontier.X-fwdPath.X >= revFrontier.Y-fwdPath.Y {
+			revFrontier.X--
+		} else {
+			revFrontier.Y--
+		}
+	}
+
+	// Join the forward and reverse paths and then append the reverse path.
+	fwdPath.connect(revPath.point, f)
+	for i := len(revPath.es) - 1; i >= 0; i-- {
+		t := revPath.es[i]
+		revPath.es = revPath.es[:i]
+		fwdPath.append(t)
+	}
+	debug.Finish()
+	return fwdPath.es
+}
+
+type path struct {
+	dir   int // +1 if forward, -1 if reverse
+	point     // Leading point of the EditScript path
+	es    EditScript
+}
+
+// connect appends any necessary Identity, Modified, UniqueX, or UniqueY types
+// to the edit-script to connect p.point to dst.
+func (p *path) connect(dst point, f EqualFunc) {
+	if p.dir > 0 {
+		// Connect in forward direction.
+		for dst.X > p.X && dst.Y > p.Y {
+			switch r := f(p.X, p.Y); {
+			case r.Equal():
+				p.append(Identity)
+			case r.Similar():
+				p.append(Modified)
+			case dst.X-p.X >= dst.Y-p.Y:
+				p.append(UniqueX)
+			default:
+				p.append(UniqueY)
+			}
+		}
+		for dst.X > p.X {
+			p.append(UniqueX)
+		}
+		for dst.Y > p.Y {
+			p.append(UniqueY)
+		}
+	} else {
+		// Connect in reverse direction.
+		for p.X > dst.X && p.Y > dst.Y {
+			switch r := f(p.X-1, p.Y-1); {
+			case r.Equal():
+				p.append(Identity)
+			case r.Similar():
+				p.append(Modified)
+			case p.Y-dst.Y >= p.X-dst.X:
+				p.append(UniqueY)
+			default:
+				p.append(UniqueX)
+			}
+		}
+		for p.X > dst.X {
+			p.append(UniqueX)
+		}
+		for p.Y > dst.Y {
+			p.append(UniqueY)
+		}
+	}
+}
+
+func (p *path) append(t EditType) {
+	p.es = append(p.es, t)
+	switch t {
+	case Identity, Modified:
+		p.add(p.dir, p.dir)
+	case UniqueX:
+		p.add(p.dir, 0)
+	case UniqueY:
+		p.add(0, p.dir)
+	}
+	debug.Update()
+}
+
+type point struct{ X, Y int }
+
+func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy }
+
+// zigzag maps a consecutive sequence of integers to a zig-zag sequence.
+//	[0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...]
+func zigzag(x int) int {
+	if x&1 != 0 {
+		x = ^x
+	}
+	return x >> 1
+}

vendor/github.com/google/go-cmp/cmp/internal/function/func.go πŸ”—

@@ -0,0 +1,49 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+// Package function identifies function types.
+package function
+
+import "reflect"
+
+type funcType int
+
+const (
+	_ funcType = iota
+
+	ttbFunc // func(T, T) bool
+	tibFunc // func(T, I) bool
+	trFunc  // func(T) R
+
+	Equal           = ttbFunc // func(T, T) bool
+	EqualAssignable = tibFunc // func(T, I) bool; encapsulates func(T, T) bool
+	Transformer     = trFunc  // func(T) R
+	ValueFilter     = ttbFunc // func(T, T) bool
+	Less            = ttbFunc // func(T, T) bool
+)
+
+var boolType = reflect.TypeOf(true)
+
+// IsType reports whether the reflect.Type is of the specified function type.
+func IsType(t reflect.Type, ft funcType) bool {
+	if t == nil || t.Kind() != reflect.Func || t.IsVariadic() {
+		return false
+	}
+	ni, no := t.NumIn(), t.NumOut()
+	switch ft {
+	case ttbFunc: // func(T, T) bool
+		if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType {
+			return true
+		}
+	case tibFunc: // func(T, I) bool
+		if ni == 2 && no == 1 && t.In(0).AssignableTo(t.In(1)) && t.Out(0) == boolType {
+			return true
+		}
+	case trFunc: // func(T) R
+		if ni == 1 && no == 1 {
+			return true
+		}
+	}
+	return false
+}

vendor/github.com/google/go-cmp/cmp/internal/value/format.go πŸ”—

@@ -0,0 +1,277 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+// Package value provides functionality for reflect.Value types.
+package value
+
+import (
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+	"unicode"
+)
+
+var stringerIface = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
+
+// Format formats the value v as a string.
+//
+// This is similar to fmt.Sprintf("%+v", v) except this:
+//	* Prints the type unless it can be elided
+//	* Avoids printing struct fields that are zero
+//	* Prints a nil-slice as being nil, not empty
+//	* Prints map entries in deterministic order
+func Format(v reflect.Value, conf FormatConfig) string {
+	conf.printType = true
+	conf.followPointers = true
+	conf.realPointers = true
+	return formatAny(v, conf, nil)
+}
+
+type FormatConfig struct {
+	UseStringer        bool // Should the String method be used if available?
+	printType          bool // Should we print the type before the value?
+	PrintPrimitiveType bool // Should we print the type of primitives?
+	followPointers     bool // Should we recursively follow pointers?
+	realPointers       bool // Should we print the real address of pointers?
+}
+
+func formatAny(v reflect.Value, conf FormatConfig, visited map[uintptr]bool) string {
+	// TODO: Should this be a multi-line printout in certain situations?
+
+	if !v.IsValid() {
+		return "<non-existent>"
+	}
+	if conf.UseStringer && v.Type().Implements(stringerIface) && v.CanInterface() {
+		if (v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface) && v.IsNil() {
+			return "<nil>"
+		}
+
+		const stringerPrefix = "s" // Indicates that the String method was used
+		s := v.Interface().(fmt.Stringer).String()
+		return stringerPrefix + formatString(s)
+	}
+
+	switch v.Kind() {
+	case reflect.Bool:
+		return formatPrimitive(v.Type(), v.Bool(), conf)
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return formatPrimitive(v.Type(), v.Int(), conf)
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		if v.Type().PkgPath() == "" || v.Kind() == reflect.Uintptr {
+			// Unnamed uints are usually bytes or words, so use hexadecimal.
+			return formatPrimitive(v.Type(), formatHex(v.Uint()), conf)
+		}
+		return formatPrimitive(v.Type(), v.Uint(), conf)
+	case reflect.Float32, reflect.Float64:
+		return formatPrimitive(v.Type(), v.Float(), conf)
+	case reflect.Complex64, reflect.Complex128:
+		return formatPrimitive(v.Type(), v.Complex(), conf)
+	case reflect.String:
+		return formatPrimitive(v.Type(), formatString(v.String()), conf)
+	case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+		return formatPointer(v, conf)
+	case reflect.Ptr:
+		if v.IsNil() {
+			if conf.printType {
+				return fmt.Sprintf("(%v)(nil)", v.Type())
+			}
+			return "<nil>"
+		}
+		if visited[v.Pointer()] || !conf.followPointers {
+			return formatPointer(v, conf)
+		}
+		visited = insertPointer(visited, v.Pointer())
+		return "&" + formatAny(v.Elem(), conf, visited)
+	case reflect.Interface:
+		if v.IsNil() {
+			if conf.printType {
+				return fmt.Sprintf("%v(nil)", v.Type())
+			}
+			return "<nil>"
+		}
+		return formatAny(v.Elem(), conf, visited)
+	case reflect.Slice:
+		if v.IsNil() {
+			if conf.printType {
+				return fmt.Sprintf("%v(nil)", v.Type())
+			}
+			return "<nil>"
+		}
+		if visited[v.Pointer()] {
+			return formatPointer(v, conf)
+		}
+		visited = insertPointer(visited, v.Pointer())
+		fallthrough
+	case reflect.Array:
+		var ss []string
+		subConf := conf
+		subConf.printType = v.Type().Elem().Kind() == reflect.Interface
+		for i := 0; i < v.Len(); i++ {
+			s := formatAny(v.Index(i), subConf, visited)
+			ss = append(ss, s)
+		}
+		s := fmt.Sprintf("{%s}", strings.Join(ss, ", "))
+		if conf.printType {
+			return v.Type().String() + s
+		}
+		return s
+	case reflect.Map:
+		if v.IsNil() {
+			if conf.printType {
+				return fmt.Sprintf("%v(nil)", v.Type())
+			}
+			return "<nil>"
+		}
+		if visited[v.Pointer()] {
+			return formatPointer(v, conf)
+		}
+		visited = insertPointer(visited, v.Pointer())
+
+		var ss []string
+		keyConf, valConf := conf, conf
+		keyConf.printType = v.Type().Key().Kind() == reflect.Interface
+		keyConf.followPointers = false
+		valConf.printType = v.Type().Elem().Kind() == reflect.Interface
+		for _, k := range SortKeys(v.MapKeys()) {
+			sk := formatAny(k, keyConf, visited)
+			sv := formatAny(v.MapIndex(k), valConf, visited)
+			ss = append(ss, fmt.Sprintf("%s: %s", sk, sv))
+		}
+		s := fmt.Sprintf("{%s}", strings.Join(ss, ", "))
+		if conf.printType {
+			return v.Type().String() + s
+		}
+		return s
+	case reflect.Struct:
+		var ss []string
+		subConf := conf
+		subConf.printType = true
+		for i := 0; i < v.NumField(); i++ {
+			vv := v.Field(i)
+			if isZero(vv) {
+				continue // Elide zero value fields
+			}
+			name := v.Type().Field(i).Name
+			subConf.UseStringer = conf.UseStringer
+			s := formatAny(vv, subConf, visited)
+			ss = append(ss, fmt.Sprintf("%s: %s", name, s))
+		}
+		s := fmt.Sprintf("{%s}", strings.Join(ss, ", "))
+		if conf.printType {
+			return v.Type().String() + s
+		}
+		return s
+	default:
+		panic(fmt.Sprintf("%v kind not handled", v.Kind()))
+	}
+}
+
+func formatString(s string) string {
+	// Use quoted string if it the same length as a raw string literal.
+	// Otherwise, attempt to use the raw string form.
+	qs := strconv.Quote(s)
+	if len(qs) == 1+len(s)+1 {
+		return qs
+	}
+
+	// Disallow newlines to ensure output is a single line.
+	// Only allow printable runes for readability purposes.
+	rawInvalid := func(r rune) bool {
+		return r == '`' || r == '\n' || !unicode.IsPrint(r)
+	}
+	if strings.IndexFunc(s, rawInvalid) < 0 {
+		return "`" + s + "`"
+	}
+	return qs
+}
+
+func formatPrimitive(t reflect.Type, v interface{}, conf FormatConfig) string {
+	if conf.printType && (conf.PrintPrimitiveType || t.PkgPath() != "") {
+		return fmt.Sprintf("%v(%v)", t, v)
+	}
+	return fmt.Sprintf("%v", v)
+}
+
+func formatPointer(v reflect.Value, conf FormatConfig) string {
+	p := v.Pointer()
+	if !conf.realPointers {
+		p = 0 // For deterministic printing purposes
+	}
+	s := formatHex(uint64(p))
+	if conf.printType {
+		return fmt.Sprintf("(%v)(%s)", v.Type(), s)
+	}
+	return s
+}
+
+func formatHex(u uint64) string {
+	var f string
+	switch {
+	case u <= 0xff:
+		f = "0x%02x"
+	case u <= 0xffff:
+		f = "0x%04x"
+	case u <= 0xffffff:
+		f = "0x%06x"
+	case u <= 0xffffffff:
+		f = "0x%08x"
+	case u <= 0xffffffffff:
+		f = "0x%010x"
+	case u <= 0xffffffffffff:
+		f = "0x%012x"
+	case u <= 0xffffffffffffff:
+		f = "0x%014x"
+	case u <= 0xffffffffffffffff:
+		f = "0x%016x"
+	}
+	return fmt.Sprintf(f, u)
+}
+
+// insertPointer insert p into m, allocating m if necessary.
+func insertPointer(m map[uintptr]bool, p uintptr) map[uintptr]bool {
+	if m == nil {
+		m = make(map[uintptr]bool)
+	}
+	m[p] = true
+	return m
+}
+
+// isZero reports whether v is the zero value.
+// This does not rely on Interface and so can be used on unexported fields.
+func isZero(v reflect.Value) bool {
+	switch v.Kind() {
+	case reflect.Bool:
+		return v.Bool() == false
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return v.Int() == 0
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return v.Uint() == 0
+	case reflect.Float32, reflect.Float64:
+		return v.Float() == 0
+	case reflect.Complex64, reflect.Complex128:
+		return v.Complex() == 0
+	case reflect.String:
+		return v.String() == ""
+	case reflect.UnsafePointer:
+		return v.Pointer() == 0
+	case reflect.Chan, reflect.Func, reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
+		return v.IsNil()
+	case reflect.Array:
+		for i := 0; i < v.Len(); i++ {
+			if !isZero(v.Index(i)) {
+				return false
+			}
+		}
+		return true
+	case reflect.Struct:
+		for i := 0; i < v.NumField(); i++ {
+			if !isZero(v.Field(i)) {
+				return false
+			}
+		}
+		return true
+	}
+	return false
+}

vendor/github.com/google/go-cmp/cmp/internal/value/sort.go πŸ”—

@@ -0,0 +1,111 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+package value
+
+import (
+	"fmt"
+	"math"
+	"reflect"
+	"sort"
+)
+
+// SortKeys sorts a list of map keys, deduplicating keys if necessary.
+// The type of each value must be comparable.
+func SortKeys(vs []reflect.Value) []reflect.Value {
+	if len(vs) == 0 {
+		return vs
+	}
+
+	// Sort the map keys.
+	sort.Sort(valueSorter(vs))
+
+	// Deduplicate keys (fails for NaNs).
+	vs2 := vs[:1]
+	for _, v := range vs[1:] {
+		if isLess(vs2[len(vs2)-1], v) {
+			vs2 = append(vs2, v)
+		}
+	}
+	return vs2
+}
+
+// TODO: Use sort.Slice once Google AppEngine is on Go1.8 or above.
+type valueSorter []reflect.Value
+
+func (vs valueSorter) Len() int           { return len(vs) }
+func (vs valueSorter) Less(i, j int) bool { return isLess(vs[i], vs[j]) }
+func (vs valueSorter) Swap(i, j int)      { vs[i], vs[j] = vs[j], vs[i] }
+
+// isLess is a generic function for sorting arbitrary map keys.
+// The inputs must be of the same type and must be comparable.
+func isLess(x, y reflect.Value) bool {
+	switch x.Type().Kind() {
+	case reflect.Bool:
+		return !x.Bool() && y.Bool()
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return x.Int() < y.Int()
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return x.Uint() < y.Uint()
+	case reflect.Float32, reflect.Float64:
+		fx, fy := x.Float(), y.Float()
+		return fx < fy || math.IsNaN(fx) && !math.IsNaN(fy)
+	case reflect.Complex64, reflect.Complex128:
+		cx, cy := x.Complex(), y.Complex()
+		rx, ix, ry, iy := real(cx), imag(cx), real(cy), imag(cy)
+		if rx == ry || (math.IsNaN(rx) && math.IsNaN(ry)) {
+			return ix < iy || math.IsNaN(ix) && !math.IsNaN(iy)
+		}
+		return rx < ry || math.IsNaN(rx) && !math.IsNaN(ry)
+	case reflect.Ptr, reflect.UnsafePointer, reflect.Chan:
+		return x.Pointer() < y.Pointer()
+	case reflect.String:
+		return x.String() < y.String()
+	case reflect.Array:
+		for i := 0; i < x.Len(); i++ {
+			if isLess(x.Index(i), y.Index(i)) {
+				return true
+			}
+			if isLess(y.Index(i), x.Index(i)) {
+				return false
+			}
+		}
+		return false
+	case reflect.Struct:
+		for i := 0; i < x.NumField(); i++ {
+			if isLess(x.Field(i), y.Field(i)) {
+				return true
+			}
+			if isLess(y.Field(i), x.Field(i)) {
+				return false
+			}
+		}
+		return false
+	case reflect.Interface:
+		vx, vy := x.Elem(), y.Elem()
+		if !vx.IsValid() || !vy.IsValid() {
+			return !vx.IsValid() && vy.IsValid()
+		}
+		tx, ty := vx.Type(), vy.Type()
+		if tx == ty {
+			return isLess(x.Elem(), y.Elem())
+		}
+		if tx.Kind() != ty.Kind() {
+			return vx.Kind() < vy.Kind()
+		}
+		if tx.String() != ty.String() {
+			return tx.String() < ty.String()
+		}
+		if tx.PkgPath() != ty.PkgPath() {
+			return tx.PkgPath() < ty.PkgPath()
+		}
+		// This can happen in rare situations, so we fallback to just comparing
+		// the unique pointer for a reflect.Type. This guarantees deterministic
+		// ordering within a program, but it is obviously not stable.
+		return reflect.ValueOf(vx.Type()).Pointer() < reflect.ValueOf(vy.Type()).Pointer()
+	default:
+		// Must be Func, Map, or Slice; which are not comparable.
+		panic(fmt.Sprintf("%T is not comparable", x.Type()))
+	}
+}

vendor/github.com/google/go-cmp/cmp/options.go πŸ”—

@@ -0,0 +1,453 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+package cmp
+
+import (
+	"fmt"
+	"reflect"
+	"runtime"
+	"strings"
+
+	"github.com/google/go-cmp/cmp/internal/function"
+)
+
+// Option configures for specific behavior of Equal and Diff. In particular,
+// the fundamental Option functions (Ignore, Transformer, and Comparer),
+// configure how equality is determined.
+//
+// The fundamental options may be composed with filters (FilterPath and
+// FilterValues) to control the scope over which they are applied.
+//
+// The cmp/cmpopts package provides helper functions for creating options that
+// may be used with Equal and Diff.
+type Option interface {
+	// filter applies all filters and returns the option that remains.
+	// Each option may only read s.curPath and call s.callTTBFunc.
+	//
+	// An Options is returned only if multiple comparers or transformers
+	// can apply simultaneously and will only contain values of those types
+	// or sub-Options containing values of those types.
+	filter(s *state, vx, vy reflect.Value, t reflect.Type) applicableOption
+}
+
+// applicableOption represents the following types:
+//	Fundamental: ignore | invalid | *comparer | *transformer
+//	Grouping:    Options
+type applicableOption interface {
+	Option
+
+	// apply executes the option, which may mutate s or panic.
+	apply(s *state, vx, vy reflect.Value)
+}
+
+// coreOption represents the following types:
+//	Fundamental: ignore | invalid | *comparer | *transformer
+//	Filters:     *pathFilter | *valuesFilter
+type coreOption interface {
+	Option
+	isCore()
+}
+
+type core struct{}
+
+func (core) isCore() {}
+
+// Options is a list of Option values that also satisfies the Option interface.
+// Helper comparison packages may return an Options value when packing multiple
+// Option values into a single Option. When this package processes an Options,
+// it will be implicitly expanded into a flat list.
+//
+// Applying a filter on an Options is equivalent to applying that same filter
+// on all individual options held within.
+type Options []Option
+
+func (opts Options) filter(s *state, vx, vy reflect.Value, t reflect.Type) (out applicableOption) {
+	for _, opt := range opts {
+		switch opt := opt.filter(s, vx, vy, t); opt.(type) {
+		case ignore:
+			return ignore{} // Only ignore can short-circuit evaluation
+		case invalid:
+			out = invalid{} // Takes precedence over comparer or transformer
+		case *comparer, *transformer, Options:
+			switch out.(type) {
+			case nil:
+				out = opt
+			case invalid:
+				// Keep invalid
+			case *comparer, *transformer, Options:
+				out = Options{out, opt} // Conflicting comparers or transformers
+			}
+		}
+	}
+	return out
+}
+
+func (opts Options) apply(s *state, _, _ reflect.Value) {
+	const warning = "ambiguous set of applicable options"
+	const help = "consider using filters to ensure at most one Comparer or Transformer may apply"
+	var ss []string
+	for _, opt := range flattenOptions(nil, opts) {
+		ss = append(ss, fmt.Sprint(opt))
+	}
+	set := strings.Join(ss, "\n\t")
+	panic(fmt.Sprintf("%s at %#v:\n\t%s\n%s", warning, s.curPath, set, help))
+}
+
+func (opts Options) String() string {
+	var ss []string
+	for _, opt := range opts {
+		ss = append(ss, fmt.Sprint(opt))
+	}
+	return fmt.Sprintf("Options{%s}", strings.Join(ss, ", "))
+}
+
+// FilterPath returns a new Option where opt is only evaluated if filter f
+// returns true for the current Path in the value tree.
+//
+// The option passed in may be an Ignore, Transformer, Comparer, Options, or
+// a previously filtered Option.
+func FilterPath(f func(Path) bool, opt Option) Option {
+	if f == nil {
+		panic("invalid path filter function")
+	}
+	if opt := normalizeOption(opt); opt != nil {
+		return &pathFilter{fnc: f, opt: opt}
+	}
+	return nil
+}
+
+type pathFilter struct {
+	core
+	fnc func(Path) bool
+	opt Option
+}
+
+func (f pathFilter) filter(s *state, vx, vy reflect.Value, t reflect.Type) applicableOption {
+	if f.fnc(s.curPath) {
+		return f.opt.filter(s, vx, vy, t)
+	}
+	return nil
+}
+
+func (f pathFilter) String() string {
+	fn := getFuncName(reflect.ValueOf(f.fnc).Pointer())
+	return fmt.Sprintf("FilterPath(%s, %v)", fn, f.opt)
+}
+
+// FilterValues returns a new Option where opt is only evaluated if filter f,
+// which is a function of the form "func(T, T) bool", returns true for the
+// current pair of values being compared. If the type of the values is not
+// assignable to T, then this filter implicitly returns false.
+//
+// The filter function must be
+// symmetric (i.e., agnostic to the order of the inputs) and
+// deterministic (i.e., produces the same result when given the same inputs).
+// If T is an interface, it is possible that f is called with two values with
+// different concrete types that both implement T.
+//
+// The option passed in may be an Ignore, Transformer, Comparer, Options, or
+// a previously filtered Option.
+func FilterValues(f interface{}, opt Option) Option {
+	v := reflect.ValueOf(f)
+	if !function.IsType(v.Type(), function.ValueFilter) || v.IsNil() {
+		panic(fmt.Sprintf("invalid values filter function: %T", f))
+	}
+	if opt := normalizeOption(opt); opt != nil {
+		vf := &valuesFilter{fnc: v, opt: opt}
+		if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
+			vf.typ = ti
+		}
+		return vf
+	}
+	return nil
+}
+
+type valuesFilter struct {
+	core
+	typ reflect.Type  // T
+	fnc reflect.Value // func(T, T) bool
+	opt Option
+}
+
+func (f valuesFilter) filter(s *state, vx, vy reflect.Value, t reflect.Type) applicableOption {
+	if !vx.IsValid() || !vy.IsValid() {
+		return invalid{}
+	}
+	if (f.typ == nil || t.AssignableTo(f.typ)) && s.callTTBFunc(f.fnc, vx, vy) {
+		return f.opt.filter(s, vx, vy, t)
+	}
+	return nil
+}
+
+func (f valuesFilter) String() string {
+	fn := getFuncName(f.fnc.Pointer())
+	return fmt.Sprintf("FilterValues(%s, %v)", fn, f.opt)
+}
+
+// Ignore is an Option that causes all comparisons to be ignored.
+// This value is intended to be combined with FilterPath or FilterValues.
+// It is an error to pass an unfiltered Ignore option to Equal.
+func Ignore() Option { return ignore{} }
+
+type ignore struct{ core }
+
+func (ignore) isFiltered() bool                                                     { return false }
+func (ignore) filter(_ *state, _, _ reflect.Value, _ reflect.Type) applicableOption { return ignore{} }
+func (ignore) apply(_ *state, _, _ reflect.Value)                                   { return }
+func (ignore) String() string                                                       { return "Ignore()" }
+
+// invalid is a sentinel Option type to indicate that some options could not
+// be evaluated due to unexported fields.
+type invalid struct{ core }
+
+func (invalid) filter(_ *state, _, _ reflect.Value, _ reflect.Type) applicableOption { return invalid{} }
+func (invalid) apply(s *state, _, _ reflect.Value) {
+	const help = "consider using AllowUnexported or cmpopts.IgnoreUnexported"
+	panic(fmt.Sprintf("cannot handle unexported field: %#v\n%s", s.curPath, help))
+}
+
+// Transformer returns an Option that applies a transformation function that
+// converts values of a certain type into that of another.
+//
+// The transformer f must be a function "func(T) R" that converts values of
+// type T to those of type R and is implicitly filtered to input values
+// assignable to T. The transformer must not mutate T in any way.
+//
+// To help prevent some cases of infinite recursive cycles applying the
+// same transform to the output of itself (e.g., in the case where the
+// input and output types are the same), an implicit filter is added such that
+// a transformer is applicable only if that exact transformer is not already
+// in the tail of the Path since the last non-Transform step.
+//
+// The name is a user provided label that is used as the Transform.Name in the
+// transformation PathStep. If empty, an arbitrary name is used.
+func Transformer(name string, f interface{}) Option {
+	v := reflect.ValueOf(f)
+	if !function.IsType(v.Type(), function.Transformer) || v.IsNil() {
+		panic(fmt.Sprintf("invalid transformer function: %T", f))
+	}
+	if name == "" {
+		name = "Ξ»" // Lambda-symbol as place-holder for anonymous transformer
+	}
+	if !isValid(name) {
+		panic(fmt.Sprintf("invalid name: %q", name))
+	}
+	tr := &transformer{name: name, fnc: reflect.ValueOf(f)}
+	if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
+		tr.typ = ti
+	}
+	return tr
+}
+
+type transformer struct {
+	core
+	name string
+	typ  reflect.Type  // T
+	fnc  reflect.Value // func(T) R
+}
+
+func (tr *transformer) isFiltered() bool { return tr.typ != nil }
+
+func (tr *transformer) filter(s *state, _, _ reflect.Value, t reflect.Type) applicableOption {
+	for i := len(s.curPath) - 1; i >= 0; i-- {
+		if t, ok := s.curPath[i].(*transform); !ok {
+			break // Hit most recent non-Transform step
+		} else if tr == t.trans {
+			return nil // Cannot directly use same Transform
+		}
+	}
+	if tr.typ == nil || t.AssignableTo(tr.typ) {
+		return tr
+	}
+	return nil
+}
+
+func (tr *transformer) apply(s *state, vx, vy reflect.Value) {
+	// Update path before calling the Transformer so that dynamic checks
+	// will use the updated path.
+	s.curPath.push(&transform{pathStep{tr.fnc.Type().Out(0)}, tr})
+	defer s.curPath.pop()
+
+	vx = s.callTRFunc(tr.fnc, vx)
+	vy = s.callTRFunc(tr.fnc, vy)
+	s.compareAny(vx, vy)
+}
+
+func (tr transformer) String() string {
+	return fmt.Sprintf("Transformer(%s, %s)", tr.name, getFuncName(tr.fnc.Pointer()))
+}
+
+// Comparer returns an Option that determines whether two values are equal
+// to each other.
+//
+// The comparer f must be a function "func(T, T) bool" and is implicitly
+// filtered to input values assignable to T. If T is an interface, it is
+// possible that f is called with two values of different concrete types that
+// both implement T.
+//
+// The equality function must be:
+//	β€’ Symmetric: equal(x, y) == equal(y, x)
+//	β€’ Deterministic: equal(x, y) == equal(x, y)
+//	β€’ Pure: equal(x, y) does not modify x or y
+func Comparer(f interface{}) Option {
+	v := reflect.ValueOf(f)
+	if !function.IsType(v.Type(), function.Equal) || v.IsNil() {
+		panic(fmt.Sprintf("invalid comparer function: %T", f))
+	}
+	cm := &comparer{fnc: v}
+	if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
+		cm.typ = ti
+	}
+	return cm
+}
+
+type comparer struct {
+	core
+	typ reflect.Type  // T
+	fnc reflect.Value // func(T, T) bool
+}
+
+func (cm *comparer) isFiltered() bool { return cm.typ != nil }
+
+func (cm *comparer) filter(_ *state, _, _ reflect.Value, t reflect.Type) applicableOption {
+	if cm.typ == nil || t.AssignableTo(cm.typ) {
+		return cm
+	}
+	return nil
+}
+
+func (cm *comparer) apply(s *state, vx, vy reflect.Value) {
+	eq := s.callTTBFunc(cm.fnc, vx, vy)
+	s.report(eq, vx, vy)
+}
+
+func (cm comparer) String() string {
+	return fmt.Sprintf("Comparer(%s)", getFuncName(cm.fnc.Pointer()))
+}
+
+// AllowUnexported returns an Option that forcibly allows operations on
+// unexported fields in certain structs, which are specified by passing in a
+// value of each struct type.
+//
+// Users of this option must understand that comparing on unexported fields
+// from external packages is not safe since changes in the internal
+// implementation of some external package may cause the result of Equal
+// to unexpectedly change. However, it may be valid to use this option on types
+// defined in an internal package where the semantic meaning of an unexported
+// field is in the control of the user.
+//
+// For some cases, a custom Comparer should be used instead that defines
+// equality as a function of the public API of a type rather than the underlying
+// unexported implementation.
+//
+// For example, the reflect.Type documentation defines equality to be determined
+// by the == operator on the interface (essentially performing a shallow pointer
+// comparison) and most attempts to compare *regexp.Regexp types are interested
+// in only checking that the regular expression strings are equal.
+// Both of these are accomplished using Comparers:
+//
+//	Comparer(func(x, y reflect.Type) bool { return x == y })
+//	Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() })
+//
+// In other cases, the cmpopts.IgnoreUnexported option can be used to ignore
+// all unexported fields on specified struct types.
+func AllowUnexported(types ...interface{}) Option {
+	if !supportAllowUnexported {
+		panic("AllowUnexported is not supported on purego builds, Google App Engine Standard, or GopherJS")
+	}
+	m := make(map[reflect.Type]bool)
+	for _, typ := range types {
+		t := reflect.TypeOf(typ)
+		if t.Kind() != reflect.Struct {
+			panic(fmt.Sprintf("invalid struct type: %T", typ))
+		}
+		m[t] = true
+	}
+	return visibleStructs(m)
+}
+
+type visibleStructs map[reflect.Type]bool
+
+func (visibleStructs) filter(_ *state, _, _ reflect.Value, _ reflect.Type) applicableOption {
+	panic("not implemented")
+}
+
+// reporter is an Option that configures how differences are reported.
+type reporter interface {
+	// TODO: Not exported yet.
+	//
+	// Perhaps add PushStep and PopStep and change Report to only accept
+	// a PathStep instead of the full-path? Adding a PushStep and PopStep makes
+	// it clear that we are traversing the value tree in a depth-first-search
+	// manner, which has an effect on how values are printed.
+
+	Option
+
+	// Report is called for every comparison made and will be provided with
+	// the two values being compared, the equality result, and the
+	// current path in the value tree. It is possible for x or y to be an
+	// invalid reflect.Value if one of the values is non-existent;
+	// which is possible with maps and slices.
+	Report(x, y reflect.Value, eq bool, p Path)
+}
+
+// normalizeOption normalizes the input options such that all Options groups
+// are flattened and groups with a single element are reduced to that element.
+// Only coreOptions and Options containing coreOptions are allowed.
+func normalizeOption(src Option) Option {
+	switch opts := flattenOptions(nil, Options{src}); len(opts) {
+	case 0:
+		return nil
+	case 1:
+		return opts[0]
+	default:
+		return opts
+	}
+}
+
+// flattenOptions copies all options in src to dst as a flat list.
+// Only coreOptions and Options containing coreOptions are allowed.
+func flattenOptions(dst, src Options) Options {
+	for _, opt := range src {
+		switch opt := opt.(type) {
+		case nil:
+			continue
+		case Options:
+			dst = flattenOptions(dst, opt)
+		case coreOption:
+			dst = append(dst, opt)
+		default:
+			panic(fmt.Sprintf("invalid option type: %T", opt))
+		}
+	}
+	return dst
+}
+
+// getFuncName returns a short function name from the pointer.
+// The string parsing logic works up until Go1.9.
+func getFuncName(p uintptr) string {
+	fnc := runtime.FuncForPC(p)
+	if fnc == nil {
+		return "<unknown>"
+	}
+	name := fnc.Name() // E.g., "long/path/name/mypkg.(mytype).(long/path/name/mypkg.myfunc)-fm"
+	if strings.HasSuffix(name, ")-fm") || strings.HasSuffix(name, ")Β·fm") {
+		// Strip the package name from method name.
+		name = strings.TrimSuffix(name, ")-fm")
+		name = strings.TrimSuffix(name, ")Β·fm")
+		if i := strings.LastIndexByte(name, '('); i >= 0 {
+			methodName := name[i+1:] // E.g., "long/path/name/mypkg.myfunc"
+			if j := strings.LastIndexByte(methodName, '.'); j >= 0 {
+				methodName = methodName[j+1:] // E.g., "myfunc"
+			}
+			name = name[:i] + methodName // E.g., "long/path/name/mypkg.(mytype)." + "myfunc"
+		}
+	}
+	if i := strings.LastIndexByte(name, '/'); i >= 0 {
+		// Strip the package name.
+		name = name[i+1:] // E.g., "mypkg.(mytype).myfunc"
+	}
+	return name
+}

vendor/github.com/google/go-cmp/cmp/path.go πŸ”—

@@ -0,0 +1,309 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+package cmp
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+	"unicode"
+	"unicode/utf8"
+)
+
+type (
+	// Path is a list of PathSteps describing the sequence of operations to get
+	// from some root type to the current position in the value tree.
+	// The first Path element is always an operation-less PathStep that exists
+	// simply to identify the initial type.
+	//
+	// When traversing structs with embedded structs, the embedded struct will
+	// always be accessed as a field before traversing the fields of the
+	// embedded struct themselves. That is, an exported field from the
+	// embedded struct will never be accessed directly from the parent struct.
+	Path []PathStep
+
+	// PathStep is a union-type for specific operations to traverse
+	// a value's tree structure. Users of this package never need to implement
+	// these types as values of this type will be returned by this package.
+	PathStep interface {
+		String() string
+		Type() reflect.Type // Resulting type after performing the path step
+		isPathStep()
+	}
+
+	// SliceIndex is an index operation on a slice or array at some index Key.
+	SliceIndex interface {
+		PathStep
+		Key() int // May return -1 if in a split state
+
+		// SplitKeys returns the indexes for indexing into slices in the
+		// x and y values, respectively. These indexes may differ due to the
+		// insertion or removal of an element in one of the slices, causing
+		// all of the indexes to be shifted. If an index is -1, then that
+		// indicates that the element does not exist in the associated slice.
+		//
+		// Key is guaranteed to return -1 if and only if the indexes returned
+		// by SplitKeys are not the same. SplitKeys will never return -1 for
+		// both indexes.
+		SplitKeys() (x int, y int)
+
+		isSliceIndex()
+	}
+	// MapIndex is an index operation on a map at some index Key.
+	MapIndex interface {
+		PathStep
+		Key() reflect.Value
+		isMapIndex()
+	}
+	// TypeAssertion represents a type assertion on an interface.
+	TypeAssertion interface {
+		PathStep
+		isTypeAssertion()
+	}
+	// StructField represents a struct field access on a field called Name.
+	StructField interface {
+		PathStep
+		Name() string
+		Index() int
+		isStructField()
+	}
+	// Indirect represents pointer indirection on the parent type.
+	Indirect interface {
+		PathStep
+		isIndirect()
+	}
+	// Transform is a transformation from the parent type to the current type.
+	Transform interface {
+		PathStep
+		Name() string
+		Func() reflect.Value
+
+		// Option returns the originally constructed Transformer option.
+		// The == operator can be used to detect the exact option used.
+		Option() Option
+
+		isTransform()
+	}
+)
+
+func (pa *Path) push(s PathStep) {
+	*pa = append(*pa, s)
+}
+
+func (pa *Path) pop() {
+	*pa = (*pa)[:len(*pa)-1]
+}
+
+// Last returns the last PathStep in the Path.
+// If the path is empty, this returns a non-nil PathStep that reports a nil Type.
+func (pa Path) Last() PathStep {
+	return pa.Index(-1)
+}
+
+// Index returns the ith step in the Path and supports negative indexing.
+// A negative index starts counting from the tail of the Path such that -1
+// refers to the last step, -2 refers to the second-to-last step, and so on.
+// If index is invalid, this returns a non-nil PathStep that reports a nil Type.
+func (pa Path) Index(i int) PathStep {
+	if i < 0 {
+		i = len(pa) + i
+	}
+	if i < 0 || i >= len(pa) {
+		return pathStep{}
+	}
+	return pa[i]
+}
+
+// String returns the simplified path to a node.
+// The simplified path only contains struct field accesses.
+//
+// For example:
+//	MyMap.MySlices.MyField
+func (pa Path) String() string {
+	var ss []string
+	for _, s := range pa {
+		if _, ok := s.(*structField); ok {
+			ss = append(ss, s.String())
+		}
+	}
+	return strings.TrimPrefix(strings.Join(ss, ""), ".")
+}
+
+// GoString returns the path to a specific node using Go syntax.
+//
+// For example:
+//	(*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField
+func (pa Path) GoString() string {
+	var ssPre, ssPost []string
+	var numIndirect int
+	for i, s := range pa {
+		var nextStep PathStep
+		if i+1 < len(pa) {
+			nextStep = pa[i+1]
+		}
+		switch s := s.(type) {
+		case *indirect:
+			numIndirect++
+			pPre, pPost := "(", ")"
+			switch nextStep.(type) {
+			case *indirect:
+				continue // Next step is indirection, so let them batch up
+			case *structField:
+				numIndirect-- // Automatic indirection on struct fields
+			case nil:
+				pPre, pPost = "", "" // Last step; no need for parenthesis
+			}
+			if numIndirect > 0 {
+				ssPre = append(ssPre, pPre+strings.Repeat("*", numIndirect))
+				ssPost = append(ssPost, pPost)
+			}
+			numIndirect = 0
+			continue
+		case *transform:
+			ssPre = append(ssPre, s.trans.name+"(")
+			ssPost = append(ssPost, ")")
+			continue
+		case *typeAssertion:
+			// As a special-case, elide type assertions on anonymous types
+			// since they are typically generated dynamically and can be very
+			// verbose. For example, some transforms return interface{} because
+			// of Go's lack of generics, but typically take in and return the
+			// exact same concrete type.
+			if s.Type().PkgPath() == "" {
+				continue
+			}
+		}
+		ssPost = append(ssPost, s.String())
+	}
+	for i, j := 0, len(ssPre)-1; i < j; i, j = i+1, j-1 {
+		ssPre[i], ssPre[j] = ssPre[j], ssPre[i]
+	}
+	return strings.Join(ssPre, "") + strings.Join(ssPost, "")
+}
+
+type (
+	pathStep struct {
+		typ reflect.Type
+	}
+
+	sliceIndex struct {
+		pathStep
+		xkey, ykey int
+	}
+	mapIndex struct {
+		pathStep
+		key reflect.Value
+	}
+	typeAssertion struct {
+		pathStep
+	}
+	structField struct {
+		pathStep
+		name string
+		idx  int
+
+		// These fields are used for forcibly accessing an unexported field.
+		// pvx, pvy, and field are only valid if unexported is true.
+		unexported bool
+		force      bool                // Forcibly allow visibility
+		pvx, pvy   reflect.Value       // Parent values
+		field      reflect.StructField // Field information
+	}
+	indirect struct {
+		pathStep
+	}
+	transform struct {
+		pathStep
+		trans *transformer
+	}
+)
+
+func (ps pathStep) Type() reflect.Type { return ps.typ }
+func (ps pathStep) String() string {
+	if ps.typ == nil {
+		return "<nil>"
+	}
+	s := ps.typ.String()
+	if s == "" || strings.ContainsAny(s, "{}\n") {
+		return "root" // Type too simple or complex to print
+	}
+	return fmt.Sprintf("{%s}", s)
+}
+
+func (si sliceIndex) String() string {
+	switch {
+	case si.xkey == si.ykey:
+		return fmt.Sprintf("[%d]", si.xkey)
+	case si.ykey == -1:
+		// [5->?] means "I don't know where X[5] went"
+		return fmt.Sprintf("[%d->?]", si.xkey)
+	case si.xkey == -1:
+		// [?->3] means "I don't know where Y[3] came from"
+		return fmt.Sprintf("[?->%d]", si.ykey)
+	default:
+		// [5->3] means "X[5] moved to Y[3]"
+		return fmt.Sprintf("[%d->%d]", si.xkey, si.ykey)
+	}
+}
+func (mi mapIndex) String() string      { return fmt.Sprintf("[%#v]", mi.key) }
+func (ta typeAssertion) String() string { return fmt.Sprintf(".(%v)", ta.typ) }
+func (sf structField) String() string   { return fmt.Sprintf(".%s", sf.name) }
+func (in indirect) String() string      { return "*" }
+func (tf transform) String() string     { return fmt.Sprintf("%s()", tf.trans.name) }
+
+func (si sliceIndex) Key() int {
+	if si.xkey != si.ykey {
+		return -1
+	}
+	return si.xkey
+}
+func (si sliceIndex) SplitKeys() (x, y int) { return si.xkey, si.ykey }
+func (mi mapIndex) Key() reflect.Value      { return mi.key }
+func (sf structField) Name() string         { return sf.name }
+func (sf structField) Index() int           { return sf.idx }
+func (tf transform) Name() string           { return tf.trans.name }
+func (tf transform) Func() reflect.Value    { return tf.trans.fnc }
+func (tf transform) Option() Option         { return tf.trans }
+
+func (pathStep) isPathStep()           {}
+func (sliceIndex) isSliceIndex()       {}
+func (mapIndex) isMapIndex()           {}
+func (typeAssertion) isTypeAssertion() {}
+func (structField) isStructField()     {}
+func (indirect) isIndirect()           {}
+func (transform) isTransform()         {}
+
+var (
+	_ SliceIndex    = sliceIndex{}
+	_ MapIndex      = mapIndex{}
+	_ TypeAssertion = typeAssertion{}
+	_ StructField   = structField{}
+	_ Indirect      = indirect{}
+	_ Transform     = transform{}
+
+	_ PathStep = sliceIndex{}
+	_ PathStep = mapIndex{}
+	_ PathStep = typeAssertion{}
+	_ PathStep = structField{}
+	_ PathStep = indirect{}
+	_ PathStep = transform{}
+)
+
+// isExported reports whether the identifier is exported.
+func isExported(id string) bool {
+	r, _ := utf8.DecodeRuneInString(id)
+	return unicode.IsUpper(r)
+}
+
+// isValid reports whether the identifier is valid.
+// Empty and underscore-only strings are not valid.
+func isValid(id string) bool {
+	ok := id != "" && id != "_"
+	for j, c := range id {
+		ok = ok && (j > 0 || !unicode.IsDigit(c))
+		ok = ok && (c == '_' || unicode.IsLetter(c) || unicode.IsDigit(c))
+	}
+	return ok
+}

vendor/github.com/google/go-cmp/cmp/reporter.go πŸ”—

@@ -0,0 +1,53 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+package cmp
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+
+	"github.com/google/go-cmp/cmp/internal/value"
+)
+
+type defaultReporter struct {
+	Option
+	diffs  []string // List of differences, possibly truncated
+	ndiffs int      // Total number of differences
+	nbytes int      // Number of bytes in diffs
+	nlines int      // Number of lines in diffs
+}
+
+var _ reporter = (*defaultReporter)(nil)
+
+func (r *defaultReporter) Report(x, y reflect.Value, eq bool, p Path) {
+	if eq {
+		return // Ignore equal results
+	}
+	const maxBytes = 4096
+	const maxLines = 256
+	r.ndiffs++
+	if r.nbytes < maxBytes && r.nlines < maxLines {
+		sx := value.Format(x, value.FormatConfig{UseStringer: true})
+		sy := value.Format(y, value.FormatConfig{UseStringer: true})
+		if sx == sy {
+			// Unhelpful output, so use more exact formatting.
+			sx = value.Format(x, value.FormatConfig{PrintPrimitiveType: true})
+			sy = value.Format(y, value.FormatConfig{PrintPrimitiveType: true})
+		}
+		s := fmt.Sprintf("%#v:\n\t-: %s\n\t+: %s\n", p, sx, sy)
+		r.diffs = append(r.diffs, s)
+		r.nbytes += len(s)
+		r.nlines += strings.Count(s, "\n")
+	}
+}
+
+func (r *defaultReporter) String() string {
+	s := strings.Join(r.diffs, "")
+	if r.ndiffs == len(r.diffs) {
+		return s
+	}
+	return fmt.Sprintf("%s... %d more differences ...", s, r.ndiffs-len(r.diffs))
+}

vendor/github.com/google/go-cmp/cmp/unsafe_panic.go πŸ”—

@@ -0,0 +1,15 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+// +build purego appengine js
+
+package cmp
+
+import "reflect"
+
+const supportAllowUnexported = false
+
+func unsafeRetrieveField(reflect.Value, reflect.StructField) reflect.Value {
+	panic("unsafeRetrieveField is not implemented")
+}

vendor/github.com/google/go-cmp/cmp/unsafe_reflect.go πŸ”—

@@ -0,0 +1,23 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+// +build !purego,!appengine,!js
+
+package cmp
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+const supportAllowUnexported = true
+
+// unsafeRetrieveField uses unsafe to forcibly retrieve any field from a struct
+// such that the value has read-write permissions.
+//
+// The parent struct, v, must be addressable, while f must be a StructField
+// describing the field to retrieve.
+func unsafeRetrieveField(v reflect.Value, f reflect.StructField) reflect.Value {
+	return reflect.NewAt(f.Type, unsafe.Pointer(v.UnsafeAddr()+f.Offset)).Elem()
+}

vendor/github.com/pmezard/go-difflib/LICENSE πŸ”—

@@ -0,0 +1,27 @@
+Copyright (c) 2013, Patrick Mezard
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+    Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+    The names of its contributors may not be used to endorse or promote
+products derived from this software without specific prior written
+permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

vendor/github.com/pmezard/go-difflib/difflib/difflib.go πŸ”—

@@ -0,0 +1,772 @@
+// Package difflib is a partial port of Python difflib module.
+//
+// It provides tools to compare sequences of strings and generate textual diffs.
+//
+// The following class and functions have been ported:
+//
+// - SequenceMatcher
+//
+// - unified_diff
+//
+// - context_diff
+//
+// Getting unified diffs was the main goal of the port. Keep in mind this code
+// is mostly suitable to output text differences in a human friendly way, there
+// are no guarantees generated diffs are consumable by patch(1).
+package difflib
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"strings"
+)
+
+func min(a, b int) int {
+	if a < b {
+		return a
+	}
+	return b
+}
+
+func max(a, b int) int {
+	if a > b {
+		return a
+	}
+	return b
+}
+
+func calculateRatio(matches, length int) float64 {
+	if length > 0 {
+		return 2.0 * float64(matches) / float64(length)
+	}
+	return 1.0
+}
+
+type Match struct {
+	A    int
+	B    int
+	Size int
+}
+
+type OpCode struct {
+	Tag byte
+	I1  int
+	I2  int
+	J1  int
+	J2  int
+}
+
+// SequenceMatcher compares sequence of strings. The basic
+// algorithm predates, and is a little fancier than, an algorithm
+// published in the late 1980's by Ratcliff and Obershelp under the
+// hyperbolic name "gestalt pattern matching".  The basic idea is to find
+// the longest contiguous matching subsequence that contains no "junk"
+// elements (R-O doesn't address junk).  The same idea is then applied
+// recursively to the pieces of the sequences to the left and to the right
+// of the matching subsequence.  This does not yield minimal edit
+// sequences, but does tend to yield matches that "look right" to people.
+//
+// SequenceMatcher tries to compute a "human-friendly diff" between two
+// sequences.  Unlike e.g. UNIX(tm) diff, the fundamental notion is the
+// longest *contiguous* & junk-free matching subsequence.  That's what
+// catches peoples' eyes.  The Windows(tm) windiff has another interesting
+// notion, pairing up elements that appear uniquely in each sequence.
+// That, and the method here, appear to yield more intuitive difference
+// reports than does diff.  This method appears to be the least vulnerable
+// to synching up on blocks of "junk lines", though (like blank lines in
+// ordinary text files, or maybe "<P>" lines in HTML files).  That may be
+// because this is the only method of the 3 that has a *concept* of
+// "junk" <wink>.
+//
+// Timing:  Basic R-O is cubic time worst case and quadratic time expected
+// case.  SequenceMatcher is quadratic time for the worst case and has
+// expected-case behavior dependent in a complicated way on how many
+// elements the sequences have in common; best case time is linear.
+type SequenceMatcher struct {
+	a              []string
+	b              []string
+	b2j            map[string][]int
+	IsJunk         func(string) bool
+	autoJunk       bool
+	bJunk          map[string]struct{}
+	matchingBlocks []Match
+	fullBCount     map[string]int
+	bPopular       map[string]struct{}
+	opCodes        []OpCode
+}
+
+func NewMatcher(a, b []string) *SequenceMatcher {
+	m := SequenceMatcher{autoJunk: true}
+	m.SetSeqs(a, b)
+	return &m
+}
+
+func NewMatcherWithJunk(a, b []string, autoJunk bool,
+	isJunk func(string) bool) *SequenceMatcher {
+
+	m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk}
+	m.SetSeqs(a, b)
+	return &m
+}
+
+// Set two sequences to be compared.
+func (m *SequenceMatcher) SetSeqs(a, b []string) {
+	m.SetSeq1(a)
+	m.SetSeq2(b)
+}
+
+// Set the first sequence to be compared. The second sequence to be compared is
+// not changed.
+//
+// SequenceMatcher computes and caches detailed information about the second
+// sequence, so if you want to compare one sequence S against many sequences,
+// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other
+// sequences.
+//
+// See also SetSeqs() and SetSeq2().
+func (m *SequenceMatcher) SetSeq1(a []string) {
+	if &a == &m.a {
+		return
+	}
+	m.a = a
+	m.matchingBlocks = nil
+	m.opCodes = nil
+}
+
+// Set the second sequence to be compared. The first sequence to be compared is
+// not changed.
+func (m *SequenceMatcher) SetSeq2(b []string) {
+	if &b == &m.b {
+		return
+	}
+	m.b = b
+	m.matchingBlocks = nil
+	m.opCodes = nil
+	m.fullBCount = nil
+	m.chainB()
+}
+
+func (m *SequenceMatcher) chainB() {
+	// Populate line -> index mapping
+	b2j := map[string][]int{}
+	for i, s := range m.b {
+		indices := b2j[s]
+		indices = append(indices, i)
+		b2j[s] = indices
+	}
+
+	// Purge junk elements
+	m.bJunk = map[string]struct{}{}
+	if m.IsJunk != nil {
+		junk := m.bJunk
+		for s, _ := range b2j {
+			if m.IsJunk(s) {
+				junk[s] = struct{}{}
+			}
+		}
+		for s, _ := range junk {
+			delete(b2j, s)
+		}
+	}
+
+	// Purge remaining popular elements
+	popular := map[string]struct{}{}
+	n := len(m.b)
+	if m.autoJunk && n >= 200 {
+		ntest := n/100 + 1
+		for s, indices := range b2j {
+			if len(indices) > ntest {
+				popular[s] = struct{}{}
+			}
+		}
+		for s, _ := range popular {
+			delete(b2j, s)
+		}
+	}
+	m.bPopular = popular
+	m.b2j = b2j
+}
+
+func (m *SequenceMatcher) isBJunk(s string) bool {
+	_, ok := m.bJunk[s]
+	return ok
+}
+
+// Find longest matching block in a[alo:ahi] and b[blo:bhi].
+//
+// If IsJunk is not defined:
+//
+// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
+//     alo <= i <= i+k <= ahi
+//     blo <= j <= j+k <= bhi
+// and for all (i',j',k') meeting those conditions,
+//     k >= k'
+//     i <= i'
+//     and if i == i', j <= j'
+//
+// In other words, of all maximal matching blocks, return one that
+// starts earliest in a, and of all those maximal matching blocks that
+// start earliest in a, return the one that starts earliest in b.
+//
+// If IsJunk is defined, first the longest matching block is
+// determined as above, but with the additional restriction that no
+// junk element appears in the block.  Then that block is extended as
+// far as possible by matching (only) junk elements on both sides.  So
+// the resulting block never matches on junk except as identical junk
+// happens to be adjacent to an "interesting" match.
+//
+// If no blocks match, return (alo, blo, 0).
+func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match {
+	// CAUTION:  stripping common prefix or suffix would be incorrect.
+	// E.g.,
+	//    ab
+	//    acab
+	// Longest matching block is "ab", but if common prefix is
+	// stripped, it's "a" (tied with "b").  UNIX(tm) diff does so
+	// strip, so ends up claiming that ab is changed to acab by
+	// inserting "ca" in the middle.  That's minimal but unintuitive:
+	// "it's obvious" that someone inserted "ac" at the front.
+	// Windiff ends up at the same place as diff, but by pairing up
+	// the unique 'b's and then matching the first two 'a's.
+	besti, bestj, bestsize := alo, blo, 0
+
+	// find longest junk-free match
+	// during an iteration of the loop, j2len[j] = length of longest
+	// junk-free match ending with a[i-1] and b[j]
+	j2len := map[int]int{}
+	for i := alo; i != ahi; i++ {
+		// look at all instances of a[i] in b; note that because
+		// b2j has no junk keys, the loop is skipped if a[i] is junk
+		newj2len := map[int]int{}
+		for _, j := range m.b2j[m.a[i]] {
+			// a[i] matches b[j]
+			if j < blo {
+				continue
+			}
+			if j >= bhi {
+				break
+			}
+			k := j2len[j-1] + 1
+			newj2len[j] = k
+			if k > bestsize {
+				besti, bestj, bestsize = i-k+1, j-k+1, k
+			}
+		}
+		j2len = newj2len
+	}
+
+	// Extend the best by non-junk elements on each end.  In particular,
+	// "popular" non-junk elements aren't in b2j, which greatly speeds
+	// the inner loop above, but also means "the best" match so far
+	// doesn't contain any junk *or* popular non-junk elements.
+	for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) &&
+		m.a[besti-1] == m.b[bestj-1] {
+		besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
+	}
+	for besti+bestsize < ahi && bestj+bestsize < bhi &&
+		!m.isBJunk(m.b[bestj+bestsize]) &&
+		m.a[besti+bestsize] == m.b[bestj+bestsize] {
+		bestsize += 1
+	}
+
+	// Now that we have a wholly interesting match (albeit possibly
+	// empty!), we may as well suck up the matching junk on each
+	// side of it too.  Can't think of a good reason not to, and it
+	// saves post-processing the (possibly considerable) expense of
+	// figuring out what to do with it.  In the case of an empty
+	// interesting match, this is clearly the right thing to do,
+	// because no other kind of match is possible in the regions.
+	for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) &&
+		m.a[besti-1] == m.b[bestj-1] {
+		besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
+	}
+	for besti+bestsize < ahi && bestj+bestsize < bhi &&
+		m.isBJunk(m.b[bestj+bestsize]) &&
+		m.a[besti+bestsize] == m.b[bestj+bestsize] {
+		bestsize += 1
+	}
+
+	return Match{A: besti, B: bestj, Size: bestsize}
+}
+
+// Return list of triples describing matching subsequences.
+//
+// Each triple is of the form (i, j, n), and means that
+// a[i:i+n] == b[j:j+n].  The triples are monotonically increasing in
+// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are
+// adjacent triples in the list, and the second is not the last triple in the
+// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe
+// adjacent equal blocks.
+//
+// The last triple is a dummy, (len(a), len(b), 0), and is the only
+// triple with n==0.
+func (m *SequenceMatcher) GetMatchingBlocks() []Match {
+	if m.matchingBlocks != nil {
+		return m.matchingBlocks
+	}
+
+	var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match
+	matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match {
+		match := m.findLongestMatch(alo, ahi, blo, bhi)
+		i, j, k := match.A, match.B, match.Size
+		if match.Size > 0 {
+			if alo < i && blo < j {
+				matched = matchBlocks(alo, i, blo, j, matched)
+			}
+			matched = append(matched, match)
+			if i+k < ahi && j+k < bhi {
+				matched = matchBlocks(i+k, ahi, j+k, bhi, matched)
+			}
+		}
+		return matched
+	}
+	matched := matchBlocks(0, len(m.a), 0, len(m.b), nil)
+
+	// It's possible that we have adjacent equal blocks in the
+	// matching_blocks list now.
+	nonAdjacent := []Match{}
+	i1, j1, k1 := 0, 0, 0
+	for _, b := range matched {
+		// Is this block adjacent to i1, j1, k1?
+		i2, j2, k2 := b.A, b.B, b.Size
+		if i1+k1 == i2 && j1+k1 == j2 {
+			// Yes, so collapse them -- this just increases the length of
+			// the first block by the length of the second, and the first
+			// block so lengthened remains the block to compare against.
+			k1 += k2
+		} else {
+			// Not adjacent.  Remember the first block (k1==0 means it's
+			// the dummy we started with), and make the second block the
+			// new block to compare against.
+			if k1 > 0 {
+				nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
+			}
+			i1, j1, k1 = i2, j2, k2
+		}
+	}
+	if k1 > 0 {
+		nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
+	}
+
+	nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0})
+	m.matchingBlocks = nonAdjacent
+	return m.matchingBlocks
+}
+
+// Return list of 5-tuples describing how to turn a into b.
+//
+// Each tuple is of the form (tag, i1, i2, j1, j2).  The first tuple
+// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
+// tuple preceding it, and likewise for j1 == the previous j2.
+//
+// The tags are characters, with these meanings:
+//
+// 'r' (replace):  a[i1:i2] should be replaced by b[j1:j2]
+//
+// 'd' (delete):   a[i1:i2] should be deleted, j1==j2 in this case.
+//
+// 'i' (insert):   b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case.
+//
+// 'e' (equal):    a[i1:i2] == b[j1:j2]
+func (m *SequenceMatcher) GetOpCodes() []OpCode {
+	if m.opCodes != nil {
+		return m.opCodes
+	}
+	i, j := 0, 0
+	matching := m.GetMatchingBlocks()
+	opCodes := make([]OpCode, 0, len(matching))
+	for _, m := range matching {
+		//  invariant:  we've pumped out correct diffs to change
+		//  a[:i] into b[:j], and the next matching block is
+		//  a[ai:ai+size] == b[bj:bj+size]. So we need to pump
+		//  out a diff to change a[i:ai] into b[j:bj], pump out
+		//  the matching block, and move (i,j) beyond the match
+		ai, bj, size := m.A, m.B, m.Size
+		tag := byte(0)
+		if i < ai && j < bj {
+			tag = 'r'
+		} else if i < ai {
+			tag = 'd'
+		} else if j < bj {
+			tag = 'i'
+		}
+		if tag > 0 {
+			opCodes = append(opCodes, OpCode{tag, i, ai, j, bj})
+		}
+		i, j = ai+size, bj+size
+		// the list of matching blocks is terminated by a
+		// sentinel with size 0
+		if size > 0 {
+			opCodes = append(opCodes, OpCode{'e', ai, i, bj, j})
+		}
+	}
+	m.opCodes = opCodes
+	return m.opCodes
+}
+
+// Isolate change clusters by eliminating ranges with no changes.
+//
+// Return a generator of groups with up to n lines of context.
+// Each group is in the same format as returned by GetOpCodes().
+func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
+	if n < 0 {
+		n = 3
+	}
+	codes := m.GetOpCodes()
+	if len(codes) == 0 {
+		codes = []OpCode{OpCode{'e', 0, 1, 0, 1}}
+	}
+	// Fixup leading and trailing groups if they show no changes.
+	if codes[0].Tag == 'e' {
+		c := codes[0]
+		i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+		codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2}
+	}
+	if codes[len(codes)-1].Tag == 'e' {
+		c := codes[len(codes)-1]
+		i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+		codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)}
+	}
+	nn := n + n
+	groups := [][]OpCode{}
+	group := []OpCode{}
+	for _, c := range codes {
+		i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+		// End the current group and start a new one whenever
+		// there is a large range with no changes.
+		if c.Tag == 'e' && i2-i1 > nn {
+			group = append(group, OpCode{c.Tag, i1, min(i2, i1+n),
+				j1, min(j2, j1+n)})
+			groups = append(groups, group)
+			group = []OpCode{}
+			i1, j1 = max(i1, i2-n), max(j1, j2-n)
+		}
+		group = append(group, OpCode{c.Tag, i1, i2, j1, j2})
+	}
+	if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') {
+		groups = append(groups, group)
+	}
+	return groups
+}
+
+// Return a measure of the sequences' similarity (float in [0,1]).
+//
+// Where T is the total number of elements in both sequences, and
+// M is the number of matches, this is 2.0*M / T.
+// Note that this is 1 if the sequences are identical, and 0 if
+// they have nothing in common.
+//
+// .Ratio() is expensive to compute if you haven't already computed
+// .GetMatchingBlocks() or .GetOpCodes(), in which case you may
+// want to try .QuickRatio() or .RealQuickRation() first to get an
+// upper bound.
+func (m *SequenceMatcher) Ratio() float64 {
+	matches := 0
+	for _, m := range m.GetMatchingBlocks() {
+		matches += m.Size
+	}
+	return calculateRatio(matches, len(m.a)+len(m.b))
+}
+
+// Return an upper bound on ratio() relatively quickly.
+//
+// This isn't defined beyond that it is an upper bound on .Ratio(), and
+// is faster to compute.
+func (m *SequenceMatcher) QuickRatio() float64 {
+	// viewing a and b as multisets, set matches to the cardinality
+	// of their intersection; this counts the number of matches
+	// without regard to order, so is clearly an upper bound
+	if m.fullBCount == nil {
+		m.fullBCount = map[string]int{}
+		for _, s := range m.b {
+			m.fullBCount[s] = m.fullBCount[s] + 1
+		}
+	}
+
+	// avail[x] is the number of times x appears in 'b' less the
+	// number of times we've seen it in 'a' so far ... kinda
+	avail := map[string]int{}
+	matches := 0
+	for _, s := range m.a {
+		n, ok := avail[s]
+		if !ok {
+			n = m.fullBCount[s]
+		}
+		avail[s] = n - 1
+		if n > 0 {
+			matches += 1
+		}
+	}
+	return calculateRatio(matches, len(m.a)+len(m.b))
+}
+
+// Return an upper bound on ratio() very quickly.
+//
+// This isn't defined beyond that it is an upper bound on .Ratio(), and
+// is faster to compute than either .Ratio() or .QuickRatio().
+func (m *SequenceMatcher) RealQuickRatio() float64 {
+	la, lb := len(m.a), len(m.b)
+	return calculateRatio(min(la, lb), la+lb)
+}
+
+// Convert range to the "ed" format
+func formatRangeUnified(start, stop int) string {
+	// Per the diff spec at http://www.unix.org/single_unix_specification/
+	beginning := start + 1 // lines start numbering with one
+	length := stop - start
+	if length == 1 {
+		return fmt.Sprintf("%d", beginning)
+	}
+	if length == 0 {
+		beginning -= 1 // empty ranges begin at line just before the range
+	}
+	return fmt.Sprintf("%d,%d", beginning, length)
+}
+
+// Unified diff parameters
+type UnifiedDiff struct {
+	A        []string // First sequence lines
+	FromFile string   // First file name
+	FromDate string   // First file time
+	B        []string // Second sequence lines
+	ToFile   string   // Second file name
+	ToDate   string   // Second file time
+	Eol      string   // Headers end of line, defaults to LF
+	Context  int      // Number of context lines
+}
+
+// Compare two sequences of lines; generate the delta as a unified diff.
+//
+// Unified diffs are a compact way of showing line changes and a few
+// lines of context.  The number of context lines is set by 'n' which
+// defaults to three.
+//
+// By default, the diff control lines (those with ---, +++, or @@) are
+// created with a trailing newline.  This is helpful so that inputs
+// created from file.readlines() result in diffs that are suitable for
+// file.writelines() since both the inputs and outputs have trailing
+// newlines.
+//
+// For inputs that do not have trailing newlines, set the lineterm
+// argument to "" so that the output will be uniformly newline free.
+//
+// The unidiff format normally has a header for filenames and modification
+// times.  Any or all of these may be specified using strings for
+// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
+// The modification times are normally expressed in the ISO 8601 format.
+func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
+	buf := bufio.NewWriter(writer)
+	defer buf.Flush()
+	wf := func(format string, args ...interface{}) error {
+		_, err := buf.WriteString(fmt.Sprintf(format, args...))
+		return err
+	}
+	ws := func(s string) error {
+		_, err := buf.WriteString(s)
+		return err
+	}
+
+	if len(diff.Eol) == 0 {
+		diff.Eol = "\n"
+	}
+
+	started := false
+	m := NewMatcher(diff.A, diff.B)
+	for _, g := range m.GetGroupedOpCodes(diff.Context) {
+		if !started {
+			started = true
+			fromDate := ""
+			if len(diff.FromDate) > 0 {
+				fromDate = "\t" + diff.FromDate
+			}
+			toDate := ""
+			if len(diff.ToDate) > 0 {
+				toDate = "\t" + diff.ToDate
+			}
+			if diff.FromFile != "" || diff.ToFile != "" {
+				err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol)
+				if err != nil {
+					return err
+				}
+				err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol)
+				if err != nil {
+					return err
+				}
+			}
+		}
+		first, last := g[0], g[len(g)-1]
+		range1 := formatRangeUnified(first.I1, last.I2)
+		range2 := formatRangeUnified(first.J1, last.J2)
+		if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil {
+			return err
+		}
+		for _, c := range g {
+			i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+			if c.Tag == 'e' {
+				for _, line := range diff.A[i1:i2] {
+					if err := ws(" " + line); err != nil {
+						return err
+					}
+				}
+				continue
+			}
+			if c.Tag == 'r' || c.Tag == 'd' {
+				for _, line := range diff.A[i1:i2] {
+					if err := ws("-" + line); err != nil {
+						return err
+					}
+				}
+			}
+			if c.Tag == 'r' || c.Tag == 'i' {
+				for _, line := range diff.B[j1:j2] {
+					if err := ws("+" + line); err != nil {
+						return err
+					}
+				}
+			}
+		}
+	}
+	return nil
+}
+
+// Like WriteUnifiedDiff but returns the diff a string.
+func GetUnifiedDiffString(diff UnifiedDiff) (string, error) {
+	w := &bytes.Buffer{}
+	err := WriteUnifiedDiff(w, diff)
+	return string(w.Bytes()), err
+}
+
+// Convert range to the "ed" format.
+func formatRangeContext(start, stop int) string {
+	// Per the diff spec at http://www.unix.org/single_unix_specification/
+	beginning := start + 1 // lines start numbering with one
+	length := stop - start
+	if length == 0 {
+		beginning -= 1 // empty ranges begin at line just before the range
+	}
+	if length <= 1 {
+		return fmt.Sprintf("%d", beginning)
+	}
+	return fmt.Sprintf("%d,%d", beginning, beginning+length-1)
+}
+
+type ContextDiff UnifiedDiff
+
+// Compare two sequences of lines; generate the delta as a context diff.
+//
+// Context diffs are a compact way of showing line changes and a few
+// lines of context. The number of context lines is set by diff.Context
+// which defaults to three.
+//
+// By default, the diff control lines (those with *** or ---) are
+// created with a trailing newline.
+//
+// For inputs that do not have trailing newlines, set the diff.Eol
+// argument to "" so that the output will be uniformly newline free.
+//
+// The context diff format normally has a header for filenames and
+// modification times.  Any or all of these may be specified using
+// strings for diff.FromFile, diff.ToFile, diff.FromDate, diff.ToDate.
+// The modification times are normally expressed in the ISO 8601 format.
+// If not specified, the strings default to blanks.
+func WriteContextDiff(writer io.Writer, diff ContextDiff) error {
+	buf := bufio.NewWriter(writer)
+	defer buf.Flush()
+	var diffErr error
+	wf := func(format string, args ...interface{}) {
+		_, err := buf.WriteString(fmt.Sprintf(format, args...))
+		if diffErr == nil && err != nil {
+			diffErr = err
+		}
+	}
+	ws := func(s string) {
+		_, err := buf.WriteString(s)
+		if diffErr == nil && err != nil {
+			diffErr = err
+		}
+	}
+
+	if len(diff.Eol) == 0 {
+		diff.Eol = "\n"
+	}
+
+	prefix := map[byte]string{
+		'i': "+ ",
+		'd': "- ",
+		'r': "! ",
+		'e': "  ",
+	}
+
+	started := false
+	m := NewMatcher(diff.A, diff.B)
+	for _, g := range m.GetGroupedOpCodes(diff.Context) {
+		if !started {
+			started = true
+			fromDate := ""
+			if len(diff.FromDate) > 0 {
+				fromDate = "\t" + diff.FromDate
+			}
+			toDate := ""
+			if len(diff.ToDate) > 0 {
+				toDate = "\t" + diff.ToDate
+			}
+			if diff.FromFile != "" || diff.ToFile != "" {
+				wf("*** %s%s%s", diff.FromFile, fromDate, diff.Eol)
+				wf("--- %s%s%s", diff.ToFile, toDate, diff.Eol)
+			}
+		}
+
+		first, last := g[0], g[len(g)-1]
+		ws("***************" + diff.Eol)
+
+		range1 := formatRangeContext(first.I1, last.I2)
+		wf("*** %s ****%s", range1, diff.Eol)
+		for _, c := range g {
+			if c.Tag == 'r' || c.Tag == 'd' {
+				for _, cc := range g {
+					if cc.Tag == 'i' {
+						continue
+					}
+					for _, line := range diff.A[cc.I1:cc.I2] {
+						ws(prefix[cc.Tag] + line)
+					}
+				}
+				break
+			}
+		}
+
+		range2 := formatRangeContext(first.J1, last.J2)
+		wf("--- %s ----%s", range2, diff.Eol)
+		for _, c := range g {
+			if c.Tag == 'r' || c.Tag == 'i' {
+				for _, cc := range g {
+					if cc.Tag == 'd' {
+						continue
+					}
+					for _, line := range diff.B[cc.J1:cc.J2] {
+						ws(prefix[cc.Tag] + line)
+					}
+				}
+				break
+			}
+		}
+	}
+	return diffErr
+}
+
+// Like WriteContextDiff but returns the diff a string.
+func GetContextDiffString(diff ContextDiff) (string, error) {
+	w := &bytes.Buffer{}
+	err := WriteContextDiff(w, diff)
+	return string(w.Bytes()), err
+}
+
+// Split a string on "\n" while preserving them. The output can be used
+// as input for UnifiedDiff and ContextDiff structures.
+func SplitLines(s string) []string {
+	lines := strings.SplitAfter(s, "\n")
+	lines[len(lines)-1] += "\n"
+	return lines
+}

vendor/github.com/stretchr/testify/LICENSE πŸ”—

@@ -0,0 +1,22 @@
+Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell
+
+Please consider promoting this project if you find it useful.
+
+Permission is hereby granted, free of charge, to any person 
+obtaining a copy of this software and associated documentation 
+files (the "Software"), to deal in the Software without restriction, 
+including without limitation the rights to use, copy, modify, merge, 
+publish, distribute, sublicense, and/or sell copies of the Software, 
+and to permit persons to whom the Software is furnished to do so, 
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included
+in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES 
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, 
+DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT 
+OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE 
+OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

vendor/github.com/stretchr/testify/assert/assertion_format.go πŸ”—

@@ -0,0 +1,484 @@
+/*
+* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
+* THIS FILE MUST NOT BE EDITED BY HAND
+ */
+
+package assert
+
+import (
+	http "net/http"
+	url "net/url"
+	time "time"
+)
+
+// Conditionf uses a Comparison to assert a complex condition.
+func Conditionf(t TestingT, comp Comparison, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return Condition(t, comp, append([]interface{}{msg}, args...)...)
+}
+
+// Containsf asserts that the specified string, list(array, slice...) or map contains the
+// specified substring or element.
+//
+//    assert.Containsf(t, "Hello World", "World", "error message %s", "formatted")
+//    assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted")
+//    assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted")
+func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return Contains(t, s, contains, append([]interface{}{msg}, args...)...)
+}
+
+// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists.
+func DirExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return DirExists(t, path, append([]interface{}{msg}, args...)...)
+}
+
+// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should match.
+//
+// assert.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted")
+func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return ElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...)
+}
+
+// Emptyf asserts that the specified object is empty.  I.e. nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+//  assert.Emptyf(t, obj, "error message %s", "formatted")
+func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return Empty(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// Equalf asserts that two objects are equal.
+//
+//    assert.Equalf(t, 123, 123, "error message %s", "formatted")
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses). Function equality
+// cannot be determined and will always fail.
+func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return Equal(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// EqualErrorf asserts that a function returned an error (i.e. not `nil`)
+// and that it is equal to the provided error.
+//
+//   actualObj, err := SomeFunction()
+//   assert.EqualErrorf(t, err,  expectedErrorString, "error message %s", "formatted")
+func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return EqualError(t, theError, errString, append([]interface{}{msg}, args...)...)
+}
+
+// EqualValuesf asserts that two objects are equal or convertable to the same types
+// and equal.
+//
+//    assert.EqualValuesf(t, uint32(123, "error message %s", "formatted"), int32(123))
+func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return EqualValues(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// Errorf asserts that a function returned an error (i.e. not `nil`).
+//
+//   actualObj, err := SomeFunction()
+//   if assert.Errorf(t, err, "error message %s", "formatted") {
+// 	   assert.Equal(t, expectedErrorf, err)
+//   }
+func Errorf(t TestingT, err error, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return Error(t, err, append([]interface{}{msg}, args...)...)
+}
+
+// Exactlyf asserts that two objects are equal in value and type.
+//
+//    assert.Exactlyf(t, int32(123, "error message %s", "formatted"), int64(123))
+func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return Exactly(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// Failf reports a failure through
+func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return Fail(t, failureMessage, append([]interface{}{msg}, args...)...)
+}
+
+// FailNowf fails test
+func FailNowf(t TestingT, failureMessage string, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return FailNow(t, failureMessage, append([]interface{}{msg}, args...)...)
+}
+
+// Falsef asserts that the specified value is false.
+//
+//    assert.Falsef(t, myBool, "error message %s", "formatted")
+func Falsef(t TestingT, value bool, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return False(t, value, append([]interface{}{msg}, args...)...)
+}
+
+// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file.
+func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return FileExists(t, path, append([]interface{}{msg}, args...)...)
+}
+
+// HTTPBodyContainsf asserts that a specified handler returns a
+// body that contains a string.
+//
+//  assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return HTTPBodyContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...)
+}
+
+// HTTPBodyNotContainsf asserts that a specified handler returns a
+// body that does not contain a string.
+//
+//  assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return HTTPBodyNotContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...)
+}
+
+// HTTPErrorf asserts that a specified handler returns an error status code.
+//
+//  assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return HTTPError(t, handler, method, url, values, append([]interface{}{msg}, args...)...)
+}
+
+// HTTPRedirectf asserts that a specified handler returns a redirect status code.
+//
+//  assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return HTTPRedirect(t, handler, method, url, values, append([]interface{}{msg}, args...)...)
+}
+
+// HTTPSuccessf asserts that a specified handler returns a success status code.
+//
+//  assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return HTTPSuccess(t, handler, method, url, values, append([]interface{}{msg}, args...)...)
+}
+
+// Implementsf asserts that an object is implemented by the specified interface.
+//
+//    assert.Implementsf(t, (*MyInterface, "error message %s", "formatted")(nil), new(MyObject))
+func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return Implements(t, interfaceObject, object, append([]interface{}{msg}, args...)...)
+}
+
+// InDeltaf asserts that the two numerals are within delta of each other.
+//
+// 	 assert.InDeltaf(t, math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01)
+func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return InDelta(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
+}
+
+// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
+func InDeltaMapValuesf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return InDeltaMapValues(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
+}
+
+// InDeltaSlicef is the same as InDelta, except it compares two slices.
+func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return InDeltaSlice(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
+}
+
+// InEpsilonf asserts that expected and actual have a relative error less than epsilon
+func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return InEpsilon(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...)
+}
+
+// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices.
+func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return InEpsilonSlice(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...)
+}
+
+// IsTypef asserts that the specified objects are of the same type.
+func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return IsType(t, expectedType, object, append([]interface{}{msg}, args...)...)
+}
+
+// JSONEqf asserts that two JSON strings are equivalent.
+//
+//  assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted")
+func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return JSONEq(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// Lenf asserts that the specified object has specific length.
+// Lenf also fails if the object has a type that len() not accept.
+//
+//    assert.Lenf(t, mySlice, 3, "error message %s", "formatted")
+func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return Len(t, object, length, append([]interface{}{msg}, args...)...)
+}
+
+// Nilf asserts that the specified object is nil.
+//
+//    assert.Nilf(t, err, "error message %s", "formatted")
+func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return Nil(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// NoErrorf asserts that a function returned no error (i.e. `nil`).
+//
+//   actualObj, err := SomeFunction()
+//   if assert.NoErrorf(t, err, "error message %s", "formatted") {
+// 	   assert.Equal(t, expectedObj, actualObj)
+//   }
+func NoErrorf(t TestingT, err error, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return NoError(t, err, append([]interface{}{msg}, args...)...)
+}
+
+// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the
+// specified substring or element.
+//
+//    assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted")
+//    assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted")
+//    assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted")
+func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotContains(t, s, contains, append([]interface{}{msg}, args...)...)
+}
+
+// NotEmptyf asserts that the specified object is NOT empty.  I.e. not nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+//  if assert.NotEmptyf(t, obj, "error message %s", "formatted") {
+//    assert.Equal(t, "two", obj[1])
+//  }
+func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotEmpty(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// NotEqualf asserts that the specified values are NOT equal.
+//
+//    assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted")
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses).
+func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotEqual(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// NotNilf asserts that the specified object is not nil.
+//
+//    assert.NotNilf(t, err, "error message %s", "formatted")
+func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotNil(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic.
+//
+//   assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted")
+func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotPanics(t, f, append([]interface{}{msg}, args...)...)
+}
+
+// NotRegexpf asserts that a specified regexp does not match a string.
+//
+//  assert.NotRegexpf(t, regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting")
+//  assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted")
+func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotRegexp(t, rx, str, append([]interface{}{msg}, args...)...)
+}
+
+// NotSubsetf asserts that the specified list(array, slice...) contains not all
+// elements given in the specified subset(array, slice...).
+//
+//    assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
+func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotSubset(t, list, subset, append([]interface{}{msg}, args...)...)
+}
+
+// NotZerof asserts that i is not the zero value for its type.
+func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotZero(t, i, append([]interface{}{msg}, args...)...)
+}
+
+// Panicsf asserts that the code inside the specified PanicTestFunc panics.
+//
+//   assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted")
+func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return Panics(t, f, append([]interface{}{msg}, args...)...)
+}
+
+// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that
+// the recovered panic value equals the expected panic value.
+//
+//   assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return PanicsWithValue(t, expected, f, append([]interface{}{msg}, args...)...)
+}
+
+// Regexpf asserts that a specified regexp matches a string.
+//
+//  assert.Regexpf(t, regexp.MustCompile("start", "error message %s", "formatted"), "it's starting")
+//  assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted")
+func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return Regexp(t, rx, str, append([]interface{}{msg}, args...)...)
+}
+
+// Subsetf asserts that the specified list(array, slice...) contains all
+// elements given in the specified subset(array, slice...).
+//
+//    assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
+func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return Subset(t, list, subset, append([]interface{}{msg}, args...)...)
+}
+
+// Truef asserts that the specified value is true.
+//
+//    assert.Truef(t, myBool, "error message %s", "formatted")
+func Truef(t TestingT, value bool, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return True(t, value, append([]interface{}{msg}, args...)...)
+}
+
+// WithinDurationf asserts that the two times are within duration delta of each other.
+//
+//   assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted")
+func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return WithinDuration(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
+}
+
+// Zerof asserts that i is the zero value for its type.
+func Zerof(t TestingT, i interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return Zero(t, i, append([]interface{}{msg}, args...)...)
+}

vendor/github.com/stretchr/testify/assert/assertion_forward.go πŸ”—

@@ -0,0 +1,956 @@
+/*
+* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
+* THIS FILE MUST NOT BE EDITED BY HAND
+ */
+
+package assert
+
+import (
+	http "net/http"
+	url "net/url"
+	time "time"
+)
+
+// Condition uses a Comparison to assert a complex condition.
+func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Condition(a.t, comp, msgAndArgs...)
+}
+
+// Conditionf uses a Comparison to assert a complex condition.
+func (a *Assertions) Conditionf(comp Comparison, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Conditionf(a.t, comp, msg, args...)
+}
+
+// Contains asserts that the specified string, list(array, slice...) or map contains the
+// specified substring or element.
+//
+//    a.Contains("Hello World", "World")
+//    a.Contains(["Hello", "World"], "World")
+//    a.Contains({"Hello": "World"}, "Hello")
+func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Contains(a.t, s, contains, msgAndArgs...)
+}
+
+// Containsf asserts that the specified string, list(array, slice...) or map contains the
+// specified substring or element.
+//
+//    a.Containsf("Hello World", "World", "error message %s", "formatted")
+//    a.Containsf(["Hello", "World"], "World", "error message %s", "formatted")
+//    a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted")
+func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Containsf(a.t, s, contains, msg, args...)
+}
+
+// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists.
+func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return DirExists(a.t, path, msgAndArgs...)
+}
+
+// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists.
+func (a *Assertions) DirExistsf(path string, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return DirExistsf(a.t, path, msg, args...)
+}
+
+// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should match.
+//
+// a.ElementsMatch([1, 3, 2, 3], [1, 3, 3, 2])
+func (a *Assertions) ElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return ElementsMatch(a.t, listA, listB, msgAndArgs...)
+}
+
+// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should match.
+//
+// a.ElementsMatchf([1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted")
+func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return ElementsMatchf(a.t, listA, listB, msg, args...)
+}
+
+// Empty asserts that the specified object is empty.  I.e. nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+//  a.Empty(obj)
+func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Empty(a.t, object, msgAndArgs...)
+}
+
+// Emptyf asserts that the specified object is empty.  I.e. nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+//  a.Emptyf(obj, "error message %s", "formatted")
+func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Emptyf(a.t, object, msg, args...)
+}
+
+// Equal asserts that two objects are equal.
+//
+//    a.Equal(123, 123)
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses). Function equality
+// cannot be determined and will always fail.
+func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Equal(a.t, expected, actual, msgAndArgs...)
+}
+
+// EqualError asserts that a function returned an error (i.e. not `nil`)
+// and that it is equal to the provided error.
+//
+//   actualObj, err := SomeFunction()
+//   a.EqualError(err,  expectedErrorString)
+func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return EqualError(a.t, theError, errString, msgAndArgs...)
+}
+
+// EqualErrorf asserts that a function returned an error (i.e. not `nil`)
+// and that it is equal to the provided error.
+//
+//   actualObj, err := SomeFunction()
+//   a.EqualErrorf(err,  expectedErrorString, "error message %s", "formatted")
+func (a *Assertions) EqualErrorf(theError error, errString string, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return EqualErrorf(a.t, theError, errString, msg, args...)
+}
+
+// EqualValues asserts that two objects are equal or convertable to the same types
+// and equal.
+//
+//    a.EqualValues(uint32(123), int32(123))
+func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return EqualValues(a.t, expected, actual, msgAndArgs...)
+}
+
+// EqualValuesf asserts that two objects are equal or convertable to the same types
+// and equal.
+//
+//    a.EqualValuesf(uint32(123, "error message %s", "formatted"), int32(123))
+func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return EqualValuesf(a.t, expected, actual, msg, args...)
+}
+
+// Equalf asserts that two objects are equal.
+//
+//    a.Equalf(123, 123, "error message %s", "formatted")
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses). Function equality
+// cannot be determined and will always fail.
+func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Equalf(a.t, expected, actual, msg, args...)
+}
+
+// Error asserts that a function returned an error (i.e. not `nil`).
+//
+//   actualObj, err := SomeFunction()
+//   if a.Error(err) {
+// 	   assert.Equal(t, expectedError, err)
+//   }
+func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Error(a.t, err, msgAndArgs...)
+}
+
+// Errorf asserts that a function returned an error (i.e. not `nil`).
+//
+//   actualObj, err := SomeFunction()
+//   if a.Errorf(err, "error message %s", "formatted") {
+// 	   assert.Equal(t, expectedErrorf, err)
+//   }
+func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Errorf(a.t, err, msg, args...)
+}
+
+// Exactly asserts that two objects are equal in value and type.
+//
+//    a.Exactly(int32(123), int64(123))
+func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Exactly(a.t, expected, actual, msgAndArgs...)
+}
+
+// Exactlyf asserts that two objects are equal in value and type.
+//
+//    a.Exactlyf(int32(123, "error message %s", "formatted"), int64(123))
+func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Exactlyf(a.t, expected, actual, msg, args...)
+}
+
+// Fail reports a failure through
+func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Fail(a.t, failureMessage, msgAndArgs...)
+}
+
+// FailNow fails test
+func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return FailNow(a.t, failureMessage, msgAndArgs...)
+}
+
+// FailNowf fails test
+func (a *Assertions) FailNowf(failureMessage string, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return FailNowf(a.t, failureMessage, msg, args...)
+}
+
+// Failf reports a failure through
+func (a *Assertions) Failf(failureMessage string, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Failf(a.t, failureMessage, msg, args...)
+}
+
+// False asserts that the specified value is false.
+//
+//    a.False(myBool)
+func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return False(a.t, value, msgAndArgs...)
+}
+
+// Falsef asserts that the specified value is false.
+//
+//    a.Falsef(myBool, "error message %s", "formatted")
+func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Falsef(a.t, value, msg, args...)
+}
+
+// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file.
+func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return FileExists(a.t, path, msgAndArgs...)
+}
+
+// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file.
+func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return FileExistsf(a.t, path, msg, args...)
+}
+
+// HTTPBodyContains asserts that a specified handler returns a
+// body that contains a string.
+//
+//  a.HTTPBodyContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return HTTPBodyContains(a.t, handler, method, url, values, str, msgAndArgs...)
+}
+
+// HTTPBodyContainsf asserts that a specified handler returns a
+// body that contains a string.
+//
+//  a.HTTPBodyContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return HTTPBodyContainsf(a.t, handler, method, url, values, str, msg, args...)
+}
+
+// HTTPBodyNotContains asserts that a specified handler returns a
+// body that does not contain a string.
+//
+//  a.HTTPBodyNotContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return HTTPBodyNotContains(a.t, handler, method, url, values, str, msgAndArgs...)
+}
+
+// HTTPBodyNotContainsf asserts that a specified handler returns a
+// body that does not contain a string.
+//
+//  a.HTTPBodyNotContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return HTTPBodyNotContainsf(a.t, handler, method, url, values, str, msg, args...)
+}
+
+// HTTPError asserts that a specified handler returns an error status code.
+//
+//  a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return HTTPError(a.t, handler, method, url, values, msgAndArgs...)
+}
+
+// HTTPErrorf asserts that a specified handler returns an error status code.
+//
+//  a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return HTTPErrorf(a.t, handler, method, url, values, msg, args...)
+}
+
+// HTTPRedirect asserts that a specified handler returns a redirect status code.
+//
+//  a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return HTTPRedirect(a.t, handler, method, url, values, msgAndArgs...)
+}
+
+// HTTPRedirectf asserts that a specified handler returns a redirect status code.
+//
+//  a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return HTTPRedirectf(a.t, handler, method, url, values, msg, args...)
+}
+
+// HTTPSuccess asserts that a specified handler returns a success status code.
+//
+//  a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return HTTPSuccess(a.t, handler, method, url, values, msgAndArgs...)
+}
+
+// HTTPSuccessf asserts that a specified handler returns a success status code.
+//
+//  a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return HTTPSuccessf(a.t, handler, method, url, values, msg, args...)
+}
+
+// Implements asserts that an object is implemented by the specified interface.
+//
+//    a.Implements((*MyInterface)(nil), new(MyObject))
+func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Implements(a.t, interfaceObject, object, msgAndArgs...)
+}
+
+// Implementsf asserts that an object is implemented by the specified interface.
+//
+//    a.Implementsf((*MyInterface, "error message %s", "formatted")(nil), new(MyObject))
+func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Implementsf(a.t, interfaceObject, object, msg, args...)
+}
+
+// InDelta asserts that the two numerals are within delta of each other.
+//
+// 	 a.InDelta(math.Pi, (22 / 7.0), 0.01)
+func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return InDelta(a.t, expected, actual, delta, msgAndArgs...)
+}
+
+// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
+func (a *Assertions) InDeltaMapValues(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return InDeltaMapValues(a.t, expected, actual, delta, msgAndArgs...)
+}
+
+// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
+func (a *Assertions) InDeltaMapValuesf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return InDeltaMapValuesf(a.t, expected, actual, delta, msg, args...)
+}
+
+// InDeltaSlice is the same as InDelta, except it compares two slices.
+func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...)
+}
+
+// InDeltaSlicef is the same as InDelta, except it compares two slices.
+func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return InDeltaSlicef(a.t, expected, actual, delta, msg, args...)
+}
+
+// InDeltaf asserts that the two numerals are within delta of each other.
+//
+// 	 a.InDeltaf(math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01)
+func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return InDeltaf(a.t, expected, actual, delta, msg, args...)
+}
+
+// InEpsilon asserts that expected and actual have a relative error less than epsilon
+func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...)
+}
+
+// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices.
+func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return InEpsilonSlice(a.t, expected, actual, epsilon, msgAndArgs...)
+}
+
+// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices.
+func (a *Assertions) InEpsilonSlicef(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return InEpsilonSlicef(a.t, expected, actual, epsilon, msg, args...)
+}
+
+// InEpsilonf asserts that expected and actual have a relative error less than epsilon
+func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return InEpsilonf(a.t, expected, actual, epsilon, msg, args...)
+}
+
+// IsType asserts that the specified objects are of the same type.
+func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return IsType(a.t, expectedType, object, msgAndArgs...)
+}
+
+// IsTypef asserts that the specified objects are of the same type.
+func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return IsTypef(a.t, expectedType, object, msg, args...)
+}
+
+// JSONEq asserts that two JSON strings are equivalent.
+//
+//  a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
+func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return JSONEq(a.t, expected, actual, msgAndArgs...)
+}
+
+// JSONEqf asserts that two JSON strings are equivalent.
+//
+//  a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted")
+func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return JSONEqf(a.t, expected, actual, msg, args...)
+}
+
+// Len asserts that the specified object has specific length.
+// Len also fails if the object has a type that len() not accept.
+//
+//    a.Len(mySlice, 3)
+func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Len(a.t, object, length, msgAndArgs...)
+}
+
+// Lenf asserts that the specified object has specific length.
+// Lenf also fails if the object has a type that len() not accept.
+//
+//    a.Lenf(mySlice, 3, "error message %s", "formatted")
+func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Lenf(a.t, object, length, msg, args...)
+}
+
+// Nil asserts that the specified object is nil.
+//
+//    a.Nil(err)
+func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Nil(a.t, object, msgAndArgs...)
+}
+
+// Nilf asserts that the specified object is nil.
+//
+//    a.Nilf(err, "error message %s", "formatted")
+func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Nilf(a.t, object, msg, args...)
+}
+
+// NoError asserts that a function returned no error (i.e. `nil`).
+//
+//   actualObj, err := SomeFunction()
+//   if a.NoError(err) {
+// 	   assert.Equal(t, expectedObj, actualObj)
+//   }
+func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return NoError(a.t, err, msgAndArgs...)
+}
+
+// NoErrorf asserts that a function returned no error (i.e. `nil`).
+//
+//   actualObj, err := SomeFunction()
+//   if a.NoErrorf(err, "error message %s", "formatted") {
+// 	   assert.Equal(t, expectedObj, actualObj)
+//   }
+func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return NoErrorf(a.t, err, msg, args...)
+}
+
+// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
+// specified substring or element.
+//
+//    a.NotContains("Hello World", "Earth")
+//    a.NotContains(["Hello", "World"], "Earth")
+//    a.NotContains({"Hello": "World"}, "Earth")
+func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotContains(a.t, s, contains, msgAndArgs...)
+}
+
+// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the
+// specified substring or element.
+//
+//    a.NotContainsf("Hello World", "Earth", "error message %s", "formatted")
+//    a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted")
+//    a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted")
+func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotContainsf(a.t, s, contains, msg, args...)
+}
+
+// NotEmpty asserts that the specified object is NOT empty.  I.e. not nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+//  if a.NotEmpty(obj) {
+//    assert.Equal(t, "two", obj[1])
+//  }
+func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotEmpty(a.t, object, msgAndArgs...)
+}
+
+// NotEmptyf asserts that the specified object is NOT empty.  I.e. not nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+//  if a.NotEmptyf(obj, "error message %s", "formatted") {
+//    assert.Equal(t, "two", obj[1])
+//  }
+func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotEmptyf(a.t, object, msg, args...)
+}
+
+// NotEqual asserts that the specified values are NOT equal.
+//
+//    a.NotEqual(obj1, obj2)
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses).
+func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotEqual(a.t, expected, actual, msgAndArgs...)
+}
+
+// NotEqualf asserts that the specified values are NOT equal.
+//
+//    a.NotEqualf(obj1, obj2, "error message %s", "formatted")
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses).
+func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotEqualf(a.t, expected, actual, msg, args...)
+}
+
+// NotNil asserts that the specified object is not nil.
+//
+//    a.NotNil(err)
+func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotNil(a.t, object, msgAndArgs...)
+}
+
+// NotNilf asserts that the specified object is not nil.
+//
+//    a.NotNilf(err, "error message %s", "formatted")
+func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotNilf(a.t, object, msg, args...)
+}
+
+// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
+//
+//   a.NotPanics(func(){ RemainCalm() })
+func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotPanics(a.t, f, msgAndArgs...)
+}
+
+// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic.
+//
+//   a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted")
+func (a *Assertions) NotPanicsf(f PanicTestFunc, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotPanicsf(a.t, f, msg, args...)
+}
+
+// NotRegexp asserts that a specified regexp does not match a string.
+//
+//  a.NotRegexp(regexp.MustCompile("starts"), "it's starting")
+//  a.NotRegexp("^start", "it's not starting")
+func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotRegexp(a.t, rx, str, msgAndArgs...)
+}
+
+// NotRegexpf asserts that a specified regexp does not match a string.
+//
+//  a.NotRegexpf(regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting")
+//  a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted")
+func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotRegexpf(a.t, rx, str, msg, args...)
+}
+
+// NotSubset asserts that the specified list(array, slice...) contains not all
+// elements given in the specified subset(array, slice...).
+//
+//    a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]")
+func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotSubset(a.t, list, subset, msgAndArgs...)
+}
+
+// NotSubsetf asserts that the specified list(array, slice...) contains not all
+// elements given in the specified subset(array, slice...).
+//
+//    a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
+func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotSubsetf(a.t, list, subset, msg, args...)
+}
+
+// NotZero asserts that i is not the zero value for its type.
+func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotZero(a.t, i, msgAndArgs...)
+}
+
+// NotZerof asserts that i is not the zero value for its type.
+func (a *Assertions) NotZerof(i interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotZerof(a.t, i, msg, args...)
+}
+
+// Panics asserts that the code inside the specified PanicTestFunc panics.
+//
+//   a.Panics(func(){ GoCrazy() })
+func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Panics(a.t, f, msgAndArgs...)
+}
+
+// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that
+// the recovered panic value equals the expected panic value.
+//
+//   a.PanicsWithValue("crazy error", func(){ GoCrazy() })
+func (a *Assertions) PanicsWithValue(expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return PanicsWithValue(a.t, expected, f, msgAndArgs...)
+}
+
+// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that
+// the recovered panic value equals the expected panic value.
+//
+//   a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+func (a *Assertions) PanicsWithValuef(expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return PanicsWithValuef(a.t, expected, f, msg, args...)
+}
+
+// Panicsf asserts that the code inside the specified PanicTestFunc panics.
+//
+//   a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted")
+func (a *Assertions) Panicsf(f PanicTestFunc, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Panicsf(a.t, f, msg, args...)
+}
+
+// Regexp asserts that a specified regexp matches a string.
+//
+//  a.Regexp(regexp.MustCompile("start"), "it's starting")
+//  a.Regexp("start...$", "it's not starting")
+func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Regexp(a.t, rx, str, msgAndArgs...)
+}
+
+// Regexpf asserts that a specified regexp matches a string.
+//
+//  a.Regexpf(regexp.MustCompile("start", "error message %s", "formatted"), "it's starting")
+//  a.Regexpf("start...$", "it's not starting", "error message %s", "formatted")
+func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Regexpf(a.t, rx, str, msg, args...)
+}
+
+// Subset asserts that the specified list(array, slice...) contains all
+// elements given in the specified subset(array, slice...).
+//
+//    a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]")
+func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Subset(a.t, list, subset, msgAndArgs...)
+}
+
+// Subsetf asserts that the specified list(array, slice...) contains all
+// elements given in the specified subset(array, slice...).
+//
+//    a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
+func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Subsetf(a.t, list, subset, msg, args...)
+}
+
+// True asserts that the specified value is true.
+//
+//    a.True(myBool)
+func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return True(a.t, value, msgAndArgs...)
+}
+
+// Truef asserts that the specified value is true.
+//
+//    a.Truef(myBool, "error message %s", "formatted")
+func (a *Assertions) Truef(value bool, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Truef(a.t, value, msg, args...)
+}
+
+// WithinDuration asserts that the two times are within duration delta of each other.
+//
+//   a.WithinDuration(time.Now(), time.Now(), 10*time.Second)
+func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return WithinDuration(a.t, expected, actual, delta, msgAndArgs...)
+}
+
+// WithinDurationf asserts that the two times are within duration delta of each other.
+//
+//   a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted")
+func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return WithinDurationf(a.t, expected, actual, delta, msg, args...)
+}
+
+// Zero asserts that i is the zero value for its type.
+func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Zero(a.t, i, msgAndArgs...)
+}
+
+// Zerof asserts that i is the zero value for its type.
+func (a *Assertions) Zerof(i interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Zerof(a.t, i, msg, args...)
+}

vendor/github.com/stretchr/testify/assert/assertions.go πŸ”—

@@ -0,0 +1,1394 @@
+package assert
+
+import (
+	"bufio"
+	"bytes"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"math"
+	"os"
+	"reflect"
+	"regexp"
+	"runtime"
+	"strings"
+	"time"
+	"unicode"
+	"unicode/utf8"
+
+	"github.com/davecgh/go-spew/spew"
+	"github.com/pmezard/go-difflib/difflib"
+)
+
+//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_format.go.tmpl
+
+// TestingT is an interface wrapper around *testing.T
+type TestingT interface {
+	Errorf(format string, args ...interface{})
+}
+
+// ComparisonAssertionFunc is a common function prototype when comparing two values.  Can be useful
+// for table driven tests.
+type ComparisonAssertionFunc func(TestingT, interface{}, interface{}, ...interface{}) bool
+
+// ValueAssertionFunc is a common function prototype when validating a single value.  Can be useful
+// for table driven tests.
+type ValueAssertionFunc func(TestingT, interface{}, ...interface{}) bool
+
+// BoolAssertionFunc is a common function prototype when validating a bool value.  Can be useful
+// for table driven tests.
+type BoolAssertionFunc func(TestingT, bool, ...interface{}) bool
+
+// ValuesAssertionFunc is a common function prototype when validating an error value.  Can be useful
+// for table driven tests.
+type ErrorAssertionFunc func(TestingT, error, ...interface{}) bool
+
+// Comparison a custom function that returns true on success and false on failure
+type Comparison func() (success bool)
+
+/*
+	Helper functions
+*/
+
+// ObjectsAreEqual determines if two objects are considered equal.
+//
+// This function does no assertion of any kind.
+func ObjectsAreEqual(expected, actual interface{}) bool {
+	if expected == nil || actual == nil {
+		return expected == actual
+	}
+
+	exp, ok := expected.([]byte)
+	if !ok {
+		return reflect.DeepEqual(expected, actual)
+	}
+
+	act, ok := actual.([]byte)
+	if !ok {
+		return false
+	}
+	if exp == nil || act == nil {
+		return exp == nil && act == nil
+	}
+	return bytes.Equal(exp, act)
+}
+
+// ObjectsAreEqualValues gets whether two objects are equal, or if their
+// values are equal.
+func ObjectsAreEqualValues(expected, actual interface{}) bool {
+	if ObjectsAreEqual(expected, actual) {
+		return true
+	}
+
+	actualType := reflect.TypeOf(actual)
+	if actualType == nil {
+		return false
+	}
+	expectedValue := reflect.ValueOf(expected)
+	if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) {
+		// Attempt comparison after type conversion
+		return reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual)
+	}
+
+	return false
+}
+
+/* CallerInfo is necessary because the assert functions use the testing object
+internally, causing it to print the file:line of the assert method, rather than where
+the problem actually occurred in calling code.*/
+
+// CallerInfo returns an array of strings containing the file and line number
+// of each stack frame leading from the current test to the assert call that
+// failed.
+func CallerInfo() []string {
+
+	pc := uintptr(0)
+	file := ""
+	line := 0
+	ok := false
+	name := ""
+
+	callers := []string{}
+	for i := 0; ; i++ {
+		pc, file, line, ok = runtime.Caller(i)
+		if !ok {
+			// The breaks below failed to terminate the loop, and we ran off the
+			// end of the call stack.
+			break
+		}
+
+		// This is a huge edge case, but it will panic if this is the case, see #180
+		if file == "<autogenerated>" {
+			break
+		}
+
+		f := runtime.FuncForPC(pc)
+		if f == nil {
+			break
+		}
+		name = f.Name()
+
+		// testing.tRunner is the standard library function that calls
+		// tests. Subtests are called directly by tRunner, without going through
+		// the Test/Benchmark/Example function that contains the t.Run calls, so
+		// with subtests we should break when we hit tRunner, without adding it
+		// to the list of callers.
+		if name == "testing.tRunner" {
+			break
+		}
+
+		parts := strings.Split(file, "/")
+		file = parts[len(parts)-1]
+		if len(parts) > 1 {
+			dir := parts[len(parts)-2]
+			if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" {
+				callers = append(callers, fmt.Sprintf("%s:%d", file, line))
+			}
+		}
+
+		// Drop the package
+		segments := strings.Split(name, ".")
+		name = segments[len(segments)-1]
+		if isTest(name, "Test") ||
+			isTest(name, "Benchmark") ||
+			isTest(name, "Example") {
+			break
+		}
+	}
+
+	return callers
+}
+
+// Stolen from the `go test` tool.
+// isTest tells whether name looks like a test (or benchmark, according to prefix).
+// It is a Test (say) if there is a character after Test that is not a lower-case letter.
+// We don't want TesticularCancer.
+func isTest(name, prefix string) bool {
+	if !strings.HasPrefix(name, prefix) {
+		return false
+	}
+	if len(name) == len(prefix) { // "Test" is ok
+		return true
+	}
+	rune, _ := utf8.DecodeRuneInString(name[len(prefix):])
+	return !unicode.IsLower(rune)
+}
+
+func messageFromMsgAndArgs(msgAndArgs ...interface{}) string {
+	if len(msgAndArgs) == 0 || msgAndArgs == nil {
+		return ""
+	}
+	if len(msgAndArgs) == 1 {
+		return msgAndArgs[0].(string)
+	}
+	if len(msgAndArgs) > 1 {
+		return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...)
+	}
+	return ""
+}
+
+// Aligns the provided message so that all lines after the first line start at the same location as the first line.
+// Assumes that the first line starts at the correct location (after carriage return, tab, label, spacer and tab).
+// The longestLabelLen parameter specifies the length of the longest label in the output (required becaues this is the
+// basis on which the alignment occurs).
+func indentMessageLines(message string, longestLabelLen int) string {
+	outBuf := new(bytes.Buffer)
+
+	for i, scanner := 0, bufio.NewScanner(strings.NewReader(message)); scanner.Scan(); i++ {
+		// no need to align first line because it starts at the correct location (after the label)
+		if i != 0 {
+			// append alignLen+1 spaces to align with "{{longestLabel}}:" before adding tab
+			outBuf.WriteString("\n\t" + strings.Repeat(" ", longestLabelLen+1) + "\t")
+		}
+		outBuf.WriteString(scanner.Text())
+	}
+
+	return outBuf.String()
+}
+
+type failNower interface {
+	FailNow()
+}
+
+// FailNow fails test
+func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	Fail(t, failureMessage, msgAndArgs...)
+
+	// We cannot extend TestingT with FailNow() and
+	// maintain backwards compatibility, so we fallback
+	// to panicking when FailNow is not available in
+	// TestingT.
+	// See issue #263
+
+	if t, ok := t.(failNower); ok {
+		t.FailNow()
+	} else {
+		panic("test failed and t is missing `FailNow()`")
+	}
+	return false
+}
+
+// Fail reports a failure through
+func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	content := []labeledContent{
+		{"Error Trace", strings.Join(CallerInfo(), "\n\t\t\t")},
+		{"Error", failureMessage},
+	}
+
+	// Add test name if the Go version supports it
+	if n, ok := t.(interface {
+		Name() string
+	}); ok {
+		content = append(content, labeledContent{"Test", n.Name()})
+	}
+
+	message := messageFromMsgAndArgs(msgAndArgs...)
+	if len(message) > 0 {
+		content = append(content, labeledContent{"Messages", message})
+	}
+
+	t.Errorf("\n%s", ""+labeledOutput(content...))
+
+	return false
+}
+
+type labeledContent struct {
+	label   string
+	content string
+}
+
+// labeledOutput returns a string consisting of the provided labeledContent. Each labeled output is appended in the following manner:
+//
+//   \t{{label}}:{{align_spaces}}\t{{content}}\n
+//
+// The initial carriage return is required to undo/erase any padding added by testing.T.Errorf. The "\t{{label}}:" is for the label.
+// If a label is shorter than the longest label provided, padding spaces are added to make all the labels match in length. Once this
+// alignment is achieved, "\t{{content}}\n" is added for the output.
+//
+// If the content of the labeledOutput contains line breaks, the subsequent lines are aligned so that they start at the same location as the first line.
+func labeledOutput(content ...labeledContent) string {
+	longestLabel := 0
+	for _, v := range content {
+		if len(v.label) > longestLabel {
+			longestLabel = len(v.label)
+		}
+	}
+	var output string
+	for _, v := range content {
+		output += "\t" + v.label + ":" + strings.Repeat(" ", longestLabel-len(v.label)) + "\t" + indentMessageLines(v.content, longestLabel) + "\n"
+	}
+	return output
+}
+
+// Implements asserts that an object is implemented by the specified interface.
+//
+//    assert.Implements(t, (*MyInterface)(nil), new(MyObject))
+func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	interfaceType := reflect.TypeOf(interfaceObject).Elem()
+
+	if object == nil {
+		return Fail(t, fmt.Sprintf("Cannot check if nil implements %v", interfaceType), msgAndArgs...)
+	}
+	if !reflect.TypeOf(object).Implements(interfaceType) {
+		return Fail(t, fmt.Sprintf("%T must implement %v", object, interfaceType), msgAndArgs...)
+	}
+
+	return true
+}
+
+// IsType asserts that the specified objects are of the same type.
+func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+
+	if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) {
+		return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...)
+	}
+
+	return true
+}
+
+// Equal asserts that two objects are equal.
+//
+//    assert.Equal(t, 123, 123)
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses). Function equality
+// cannot be determined and will always fail.
+func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	if err := validateEqualArgs(expected, actual); err != nil {
+		return Fail(t, fmt.Sprintf("Invalid operation: %#v == %#v (%s)",
+			expected, actual, err), msgAndArgs...)
+	}
+
+	if !ObjectsAreEqual(expected, actual) {
+		diff := diff(expected, actual)
+		expected, actual = formatUnequalValues(expected, actual)
+		return Fail(t, fmt.Sprintf("Not equal: \n"+
+			"expected: %s\n"+
+			"actual  : %s%s", expected, actual, diff), msgAndArgs...)
+	}
+
+	return true
+
+}
+
+// formatUnequalValues takes two values of arbitrary types and returns string
+// representations appropriate to be presented to the user.
+//
+// If the values are not of like type, the returned strings will be prefixed
+// with the type name, and the value will be enclosed in parenthesis similar
+// to a type conversion in the Go grammar.
+func formatUnequalValues(expected, actual interface{}) (e string, a string) {
+	if reflect.TypeOf(expected) != reflect.TypeOf(actual) {
+		return fmt.Sprintf("%T(%#v)", expected, expected),
+			fmt.Sprintf("%T(%#v)", actual, actual)
+	}
+
+	return fmt.Sprintf("%#v", expected),
+		fmt.Sprintf("%#v", actual)
+}
+
+// EqualValues asserts that two objects are equal or convertable to the same types
+// and equal.
+//
+//    assert.EqualValues(t, uint32(123), int32(123))
+func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+
+	if !ObjectsAreEqualValues(expected, actual) {
+		diff := diff(expected, actual)
+		expected, actual = formatUnequalValues(expected, actual)
+		return Fail(t, fmt.Sprintf("Not equal: \n"+
+			"expected: %s\n"+
+			"actual  : %s%s", expected, actual, diff), msgAndArgs...)
+	}
+
+	return true
+
+}
+
+// Exactly asserts that two objects are equal in value and type.
+//
+//    assert.Exactly(t, int32(123), int64(123))
+func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+
+	aType := reflect.TypeOf(expected)
+	bType := reflect.TypeOf(actual)
+
+	if aType != bType {
+		return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...)
+	}
+
+	return Equal(t, expected, actual, msgAndArgs...)
+
+}
+
+// NotNil asserts that the specified object is not nil.
+//
+//    assert.NotNil(t, err)
+func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	if !isNil(object) {
+		return true
+	}
+	return Fail(t, "Expected value not to be nil.", msgAndArgs...)
+}
+
+// isNil checks if a specified object is nil or not, without Failing.
+func isNil(object interface{}) bool {
+	if object == nil {
+		return true
+	}
+
+	value := reflect.ValueOf(object)
+	kind := value.Kind()
+	if kind >= reflect.Chan && kind <= reflect.Slice && value.IsNil() {
+		return true
+	}
+
+	return false
+}
+
+// Nil asserts that the specified object is nil.
+//
+//    assert.Nil(t, err)
+func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	if isNil(object) {
+		return true
+	}
+	return Fail(t, fmt.Sprintf("Expected nil, but got: %#v", object), msgAndArgs...)
+}
+
+// isEmpty gets whether the specified object is considered empty or not.
+func isEmpty(object interface{}) bool {
+
+	// get nil case out of the way
+	if object == nil {
+		return true
+	}
+
+	objValue := reflect.ValueOf(object)
+
+	switch objValue.Kind() {
+	// collection types are empty when they have no element
+	case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
+		return objValue.Len() == 0
+	// pointers are empty if nil or if the value they point to is empty
+	case reflect.Ptr:
+		if objValue.IsNil() {
+			return true
+		}
+		deref := objValue.Elem().Interface()
+		return isEmpty(deref)
+	// for all other types, compare against the zero value
+	default:
+		zero := reflect.Zero(objValue.Type())
+		return reflect.DeepEqual(object, zero.Interface())
+	}
+}
+
+// Empty asserts that the specified object is empty.  I.e. nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+//  assert.Empty(t, obj)
+func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+
+	pass := isEmpty(object)
+	if !pass {
+		Fail(t, fmt.Sprintf("Should be empty, but was %v", object), msgAndArgs...)
+	}
+
+	return pass
+
+}
+
+// NotEmpty asserts that the specified object is NOT empty.  I.e. not nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+//  if assert.NotEmpty(t, obj) {
+//    assert.Equal(t, "two", obj[1])
+//  }
+func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+
+	pass := !isEmpty(object)
+	if !pass {
+		Fail(t, fmt.Sprintf("Should NOT be empty, but was %v", object), msgAndArgs...)
+	}
+
+	return pass
+
+}
+
+// getLen try to get length of object.
+// return (false, 0) if impossible.
+func getLen(x interface{}) (ok bool, length int) {
+	v := reflect.ValueOf(x)
+	defer func() {
+		if e := recover(); e != nil {
+			ok = false
+		}
+	}()
+	return true, v.Len()
+}
+
+// Len asserts that the specified object has specific length.
+// Len also fails if the object has a type that len() not accept.
+//
+//    assert.Len(t, mySlice, 3)
+func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	ok, l := getLen(object)
+	if !ok {
+		return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...)
+	}
+
+	if l != length {
+		return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...)
+	}
+	return true
+}
+
+// True asserts that the specified value is true.
+//
+//    assert.True(t, myBool)
+func True(t TestingT, value bool, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	if h, ok := t.(interface {
+		Helper()
+	}); ok {
+		h.Helper()
+	}
+
+	if value != true {
+		return Fail(t, "Should be true", msgAndArgs...)
+	}
+
+	return true
+
+}
+
+// False asserts that the specified value is false.
+//
+//    assert.False(t, myBool)
+func False(t TestingT, value bool, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+
+	if value != false {
+		return Fail(t, "Should be false", msgAndArgs...)
+	}
+
+	return true
+
+}
+
+// NotEqual asserts that the specified values are NOT equal.
+//
+//    assert.NotEqual(t, obj1, obj2)
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses).
+func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	if err := validateEqualArgs(expected, actual); err != nil {
+		return Fail(t, fmt.Sprintf("Invalid operation: %#v != %#v (%s)",
+			expected, actual, err), msgAndArgs...)
+	}
+
+	if ObjectsAreEqual(expected, actual) {
+		return Fail(t, fmt.Sprintf("Should not be: %#v\n", actual), msgAndArgs...)
+	}
+
+	return true
+
+}
+
+// containsElement try loop over the list check if the list includes the element.
+// return (false, false) if impossible.
+// return (true, false) if element was not found.
+// return (true, true) if element was found.
+func includeElement(list interface{}, element interface{}) (ok, found bool) {
+
+	listValue := reflect.ValueOf(list)
+	elementValue := reflect.ValueOf(element)
+	defer func() {
+		if e := recover(); e != nil {
+			ok = false
+			found = false
+		}
+	}()
+
+	if reflect.TypeOf(list).Kind() == reflect.String {
+		return true, strings.Contains(listValue.String(), elementValue.String())
+	}
+
+	if reflect.TypeOf(list).Kind() == reflect.Map {
+		mapKeys := listValue.MapKeys()
+		for i := 0; i < len(mapKeys); i++ {
+			if ObjectsAreEqual(mapKeys[i].Interface(), element) {
+				return true, true
+			}
+		}
+		return true, false
+	}
+
+	for i := 0; i < listValue.Len(); i++ {
+		if ObjectsAreEqual(listValue.Index(i).Interface(), element) {
+			return true, true
+		}
+	}
+	return true, false
+
+}
+
+// Contains asserts that the specified string, list(array, slice...) or map contains the
+// specified substring or element.
+//
+//    assert.Contains(t, "Hello World", "World")
+//    assert.Contains(t, ["Hello", "World"], "World")
+//    assert.Contains(t, {"Hello": "World"}, "Hello")
+func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+
+	ok, found := includeElement(s, contains)
+	if !ok {
+		return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...)
+	}
+	if !found {
+		return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", s, contains), msgAndArgs...)
+	}
+
+	return true
+
+}
+
+// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
+// specified substring or element.
+//
+//    assert.NotContains(t, "Hello World", "Earth")
+//    assert.NotContains(t, ["Hello", "World"], "Earth")
+//    assert.NotContains(t, {"Hello": "World"}, "Earth")
+func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+
+	ok, found := includeElement(s, contains)
+	if !ok {
+		return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...)
+	}
+	if found {
+		return Fail(t, fmt.Sprintf("\"%s\" should not contain \"%s\"", s, contains), msgAndArgs...)
+	}
+
+	return true
+
+}
+
+// Subset asserts that the specified list(array, slice...) contains all
+// elements given in the specified subset(array, slice...).
+//
+//    assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]")
+func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	if subset == nil {
+		return true // we consider nil to be equal to the nil set
+	}
+
+	subsetValue := reflect.ValueOf(subset)
+	defer func() {
+		if e := recover(); e != nil {
+			ok = false
+		}
+	}()
+
+	listKind := reflect.TypeOf(list).Kind()
+	subsetKind := reflect.TypeOf(subset).Kind()
+
+	if listKind != reflect.Array && listKind != reflect.Slice {
+		return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...)
+	}
+
+	if subsetKind != reflect.Array && subsetKind != reflect.Slice {
+		return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
+	}
+
+	for i := 0; i < subsetValue.Len(); i++ {
+		element := subsetValue.Index(i).Interface()
+		ok, found := includeElement(list, element)
+		if !ok {
+			return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)
+		}
+		if !found {
+			return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, element), msgAndArgs...)
+		}
+	}
+
+	return true
+}
+
+// NotSubset asserts that the specified list(array, slice...) contains not all
+// elements given in the specified subset(array, slice...).
+//
+//    assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]")
+func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	if subset == nil {
+		return Fail(t, fmt.Sprintf("nil is the empty set which is a subset of every set"), msgAndArgs...)
+	}
+
+	subsetValue := reflect.ValueOf(subset)
+	defer func() {
+		if e := recover(); e != nil {
+			ok = false
+		}
+	}()
+
+	listKind := reflect.TypeOf(list).Kind()
+	subsetKind := reflect.TypeOf(subset).Kind()
+
+	if listKind != reflect.Array && listKind != reflect.Slice {
+		return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...)
+	}
+
+	if subsetKind != reflect.Array && subsetKind != reflect.Slice {
+		return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
+	}
+
+	for i := 0; i < subsetValue.Len(); i++ {
+		element := subsetValue.Index(i).Interface()
+		ok, found := includeElement(list, element)
+		if !ok {
+			return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)
+		}
+		if !found {
+			return true
+		}
+	}
+
+	return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...)
+}
+
+// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should match.
+//
+// assert.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2])
+func ElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface{}) (ok bool) {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	if isEmpty(listA) && isEmpty(listB) {
+		return true
+	}
+
+	aKind := reflect.TypeOf(listA).Kind()
+	bKind := reflect.TypeOf(listB).Kind()
+
+	if aKind != reflect.Array && aKind != reflect.Slice {
+		return Fail(t, fmt.Sprintf("%q has an unsupported type %s", listA, aKind), msgAndArgs...)
+	}
+
+	if bKind != reflect.Array && bKind != reflect.Slice {
+		return Fail(t, fmt.Sprintf("%q has an unsupported type %s", listB, bKind), msgAndArgs...)
+	}
+
+	aValue := reflect.ValueOf(listA)
+	bValue := reflect.ValueOf(listB)
+
+	aLen := aValue.Len()
+	bLen := bValue.Len()
+
+	if aLen != bLen {
+		return Fail(t, fmt.Sprintf("lengths don't match: %d != %d", aLen, bLen), msgAndArgs...)
+	}
+
+	// Mark indexes in bValue that we already used
+	visited := make([]bool, bLen)
+	for i := 0; i < aLen; i++ {
+		element := aValue.Index(i).Interface()
+		found := false
+		for j := 0; j < bLen; j++ {
+			if visited[j] {
+				continue
+			}
+			if ObjectsAreEqual(bValue.Index(j).Interface(), element) {
+				visited[j] = true
+				found = true
+				break
+			}
+		}
+		if !found {
+			return Fail(t, fmt.Sprintf("element %s appears more times in %s than in %s", element, aValue, bValue), msgAndArgs...)
+		}
+	}
+
+	return true
+}
+
+// Condition uses a Comparison to assert a complex condition.
+func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	result := comp()
+	if !result {
+		Fail(t, "Condition failed!", msgAndArgs...)
+	}
+	return result
+}
+
+// PanicTestFunc defines a func that should be passed to the assert.Panics and assert.NotPanics
+// methods, and represents a simple func that takes no arguments, and returns nothing.
+type PanicTestFunc func()
+
+// didPanic returns true if the function passed to it panics. Otherwise, it returns false.
+func didPanic(f PanicTestFunc) (bool, interface{}) {
+
+	didPanic := false
+	var message interface{}
+	func() {
+
+		defer func() {
+			if message = recover(); message != nil {
+				didPanic = true
+			}
+		}()
+
+		// call the target function
+		f()
+
+	}()
+
+	return didPanic, message
+
+}
+
+// Panics asserts that the code inside the specified PanicTestFunc panics.
+//
+//   assert.Panics(t, func(){ GoCrazy() })
+func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+
+	if funcDidPanic, panicValue := didPanic(f); !funcDidPanic {
+		return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...)
+	}
+
+	return true
+}
+
+// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that
+// the recovered panic value equals the expected panic value.
+//
+//   assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() })
+func PanicsWithValue(t TestingT, expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+
+	funcDidPanic, panicValue := didPanic(f)
+	if !funcDidPanic {
+		return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...)
+	}
+	if panicValue != expected {
+		return Fail(t, fmt.Sprintf("func %#v should panic with value:\t%#v\n\tPanic value:\t%#v", f, expected, panicValue), msgAndArgs...)
+	}
+
+	return true
+}
+
+// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
+//
+//   assert.NotPanics(t, func(){ RemainCalm() })
+func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+
+	if funcDidPanic, panicValue := didPanic(f); funcDidPanic {
+		return Fail(t, fmt.Sprintf("func %#v should not panic\n\tPanic value:\t%v", f, panicValue), msgAndArgs...)
+	}
+
+	return true
+}
+
+// WithinDuration asserts that the two times are within duration delta of each other.
+//
+//   assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second)
+func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+
+	dt := expected.Sub(actual)
+	if dt < -delta || dt > delta {
+		return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...)
+	}
+
+	return true
+}
+
+func toFloat(x interface{}) (float64, bool) {
+	var xf float64
+	xok := true
+
+	switch xn := x.(type) {
+	case uint8:
+		xf = float64(xn)
+	case uint16:
+		xf = float64(xn)
+	case uint32:
+		xf = float64(xn)
+	case uint64:
+		xf = float64(xn)
+	case int:
+		xf = float64(xn)
+	case int8:
+		xf = float64(xn)
+	case int16:
+		xf = float64(xn)
+	case int32:
+		xf = float64(xn)
+	case int64:
+		xf = float64(xn)
+	case float32:
+		xf = float64(xn)
+	case float64:
+		xf = float64(xn)
+	case time.Duration:
+		xf = float64(xn)
+	default:
+		xok = false
+	}
+
+	return xf, xok
+}
+
+// InDelta asserts that the two numerals are within delta of each other.
+//
+// 	 assert.InDelta(t, math.Pi, (22 / 7.0), 0.01)
+func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+
+	af, aok := toFloat(expected)
+	bf, bok := toFloat(actual)
+
+	if !aok || !bok {
+		return Fail(t, fmt.Sprintf("Parameters must be numerical"), msgAndArgs...)
+	}
+
+	if math.IsNaN(af) {
+		return Fail(t, fmt.Sprintf("Expected must not be NaN"), msgAndArgs...)
+	}
+
+	if math.IsNaN(bf) {
+		return Fail(t, fmt.Sprintf("Expected %v with delta %v, but was NaN", expected, delta), msgAndArgs...)
+	}
+
+	dt := af - bf
+	if dt < -delta || dt > delta {
+		return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...)
+	}
+
+	return true
+}
+
+// InDeltaSlice is the same as InDelta, except it compares two slices.
+func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	if expected == nil || actual == nil ||
+		reflect.TypeOf(actual).Kind() != reflect.Slice ||
+		reflect.TypeOf(expected).Kind() != reflect.Slice {
+		return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...)
+	}
+
+	actualSlice := reflect.ValueOf(actual)
+	expectedSlice := reflect.ValueOf(expected)
+
+	for i := 0; i < actualSlice.Len(); i++ {
+		result := InDelta(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), delta, msgAndArgs...)
+		if !result {
+			return result
+		}
+	}
+
+	return true
+}
+
+// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
+func InDeltaMapValues(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	if expected == nil || actual == nil ||
+		reflect.TypeOf(actual).Kind() != reflect.Map ||
+		reflect.TypeOf(expected).Kind() != reflect.Map {
+		return Fail(t, "Arguments must be maps", msgAndArgs...)
+	}
+
+	expectedMap := reflect.ValueOf(expected)
+	actualMap := reflect.ValueOf(actual)
+
+	if expectedMap.Len() != actualMap.Len() {
+		return Fail(t, "Arguments must have the same number of keys", msgAndArgs...)
+	}
+
+	for _, k := range expectedMap.MapKeys() {
+		ev := expectedMap.MapIndex(k)
+		av := actualMap.MapIndex(k)
+
+		if !ev.IsValid() {
+			return Fail(t, fmt.Sprintf("missing key %q in expected map", k), msgAndArgs...)
+		}
+
+		if !av.IsValid() {
+			return Fail(t, fmt.Sprintf("missing key %q in actual map", k), msgAndArgs...)
+		}
+
+		if !InDelta(
+			t,
+			ev.Interface(),
+			av.Interface(),
+			delta,
+			msgAndArgs...,
+		) {
+			return false
+		}
+	}
+
+	return true
+}
+
+func calcRelativeError(expected, actual interface{}) (float64, error) {
+	af, aok := toFloat(expected)
+	if !aok {
+		return 0, fmt.Errorf("expected value %q cannot be converted to float", expected)
+	}
+	if af == 0 {
+		return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error")
+	}
+	bf, bok := toFloat(actual)
+	if !bok {
+		return 0, fmt.Errorf("actual value %q cannot be converted to float", actual)
+	}
+
+	return math.Abs(af-bf) / math.Abs(af), nil
+}
+
+// InEpsilon asserts that expected and actual have a relative error less than epsilon
+func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	actualEpsilon, err := calcRelativeError(expected, actual)
+	if err != nil {
+		return Fail(t, err.Error(), msgAndArgs...)
+	}
+	if actualEpsilon > epsilon {
+		return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+
+			"        < %#v (actual)", epsilon, actualEpsilon), msgAndArgs...)
+	}
+
+	return true
+}
+
+// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices.
+func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	if expected == nil || actual == nil ||
+		reflect.TypeOf(actual).Kind() != reflect.Slice ||
+		reflect.TypeOf(expected).Kind() != reflect.Slice {
+		return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...)
+	}
+
+	actualSlice := reflect.ValueOf(actual)
+	expectedSlice := reflect.ValueOf(expected)
+
+	for i := 0; i < actualSlice.Len(); i++ {
+		result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), epsilon)
+		if !result {
+			return result
+		}
+	}
+
+	return true
+}
+
+/*
+	Errors
+*/
+
+// NoError asserts that a function returned no error (i.e. `nil`).
+//
+//   actualObj, err := SomeFunction()
+//   if assert.NoError(t, err) {
+//	   assert.Equal(t, expectedObj, actualObj)
+//   }
+func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	if err != nil {
+		return Fail(t, fmt.Sprintf("Received unexpected error:\n%+v", err), msgAndArgs...)
+	}
+
+	return true
+}
+
+// Error asserts that a function returned an error (i.e. not `nil`).
+//
+//   actualObj, err := SomeFunction()
+//   if assert.Error(t, err) {
+//	   assert.Equal(t, expectedError, err)
+//   }
+func Error(t TestingT, err error, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+
+	if err == nil {
+		return Fail(t, "An error is expected but got nil.", msgAndArgs...)
+	}
+
+	return true
+}
+
+// EqualError asserts that a function returned an error (i.e. not `nil`)
+// and that it is equal to the provided error.
+//
+//   actualObj, err := SomeFunction()
+//   assert.EqualError(t, err,  expectedErrorString)
+func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	if !Error(t, theError, msgAndArgs...) {
+		return false
+	}
+	expected := errString
+	actual := theError.Error()
+	// don't need to use deep equals here, we know they are both strings
+	if expected != actual {
+		return Fail(t, fmt.Sprintf("Error message not equal:\n"+
+			"expected: %q\n"+
+			"actual  : %q", expected, actual), msgAndArgs...)
+	}
+	return true
+}
+
+// matchRegexp return true if a specified regexp matches a string.
+func matchRegexp(rx interface{}, str interface{}) bool {
+
+	var r *regexp.Regexp
+	if rr, ok := rx.(*regexp.Regexp); ok {
+		r = rr
+	} else {
+		r = regexp.MustCompile(fmt.Sprint(rx))
+	}
+
+	return (r.FindStringIndex(fmt.Sprint(str)) != nil)
+
+}
+
+// Regexp asserts that a specified regexp matches a string.
+//
+//  assert.Regexp(t, regexp.MustCompile("start"), "it's starting")
+//  assert.Regexp(t, "start...$", "it's not starting")
+func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+
+	match := matchRegexp(rx, str)
+
+	if !match {
+		Fail(t, fmt.Sprintf("Expect \"%v\" to match \"%v\"", str, rx), msgAndArgs...)
+	}
+
+	return match
+}
+
+// NotRegexp asserts that a specified regexp does not match a string.
+//
+//  assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting")
+//  assert.NotRegexp(t, "^start", "it's not starting")
+func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	match := matchRegexp(rx, str)
+
+	if match {
+		Fail(t, fmt.Sprintf("Expect \"%v\" to NOT match \"%v\"", str, rx), msgAndArgs...)
+	}
+
+	return !match
+
+}
+
+// Zero asserts that i is the zero value for its type.
+func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	if i != nil && !reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) {
+		return Fail(t, fmt.Sprintf("Should be zero, but was %v", i), msgAndArgs...)
+	}
+	return true
+}
+
+// NotZero asserts that i is not the zero value for its type.
+func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	if i == nil || reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) {
+		return Fail(t, fmt.Sprintf("Should not be zero, but was %v", i), msgAndArgs...)
+	}
+	return true
+}
+
+// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file.
+func FileExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	info, err := os.Lstat(path)
+	if err != nil {
+		if os.IsNotExist(err) {
+			return Fail(t, fmt.Sprintf("unable to find file %q", path), msgAndArgs...)
+		}
+		return Fail(t, fmt.Sprintf("error when running os.Lstat(%q): %s", path, err), msgAndArgs...)
+	}
+	if info.IsDir() {
+		return Fail(t, fmt.Sprintf("%q is a directory", path), msgAndArgs...)
+	}
+	return true
+}
+
+// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists.
+func DirExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	info, err := os.Lstat(path)
+	if err != nil {
+		if os.IsNotExist(err) {
+			return Fail(t, fmt.Sprintf("unable to find file %q", path), msgAndArgs...)
+		}
+		return Fail(t, fmt.Sprintf("error when running os.Lstat(%q): %s", path, err), msgAndArgs...)
+	}
+	if !info.IsDir() {
+		return Fail(t, fmt.Sprintf("%q is a file", path), msgAndArgs...)
+	}
+	return true
+}
+
+// JSONEq asserts that two JSON strings are equivalent.
+//
+//  assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
+func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	var expectedJSONAsInterface, actualJSONAsInterface interface{}
+
+	if err := json.Unmarshal([]byte(expected), &expectedJSONAsInterface); err != nil {
+		return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid json.\nJSON parsing error: '%s'", expected, err.Error()), msgAndArgs...)
+	}
+
+	if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil {
+		return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid json.\nJSON parsing error: '%s'", actual, err.Error()), msgAndArgs...)
+	}
+
+	return Equal(t, expectedJSONAsInterface, actualJSONAsInterface, msgAndArgs...)
+}
+
+func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) {
+	t := reflect.TypeOf(v)
+	k := t.Kind()
+
+	if k == reflect.Ptr {
+		t = t.Elem()
+		k = t.Kind()
+	}
+	return t, k
+}
+
+// diff returns a diff of both values as long as both are of the same type and
+// are a struct, map, slice or array. Otherwise it returns an empty string.
+func diff(expected interface{}, actual interface{}) string {
+	if expected == nil || actual == nil {
+		return ""
+	}
+
+	et, ek := typeAndKind(expected)
+	at, _ := typeAndKind(actual)
+
+	if et != at {
+		return ""
+	}
+
+	if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array && ek != reflect.String {
+		return ""
+	}
+
+	var e, a string
+	if ek != reflect.String {
+		e = spewConfig.Sdump(expected)
+		a = spewConfig.Sdump(actual)
+	} else {
+		e = expected.(string)
+		a = actual.(string)
+	}
+
+	diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{
+		A:        difflib.SplitLines(e),
+		B:        difflib.SplitLines(a),
+		FromFile: "Expected",
+		FromDate: "",
+		ToFile:   "Actual",
+		ToDate:   "",
+		Context:  1,
+	})
+
+	return "\n\nDiff:\n" + diff
+}
+
+// validateEqualArgs checks whether provided arguments can be safely used in the
+// Equal/NotEqual functions.
+func validateEqualArgs(expected, actual interface{}) error {
+	if isFunction(expected) || isFunction(actual) {
+		return errors.New("cannot take func type as argument")
+	}
+	return nil
+}
+
+func isFunction(arg interface{}) bool {
+	if arg == nil {
+		return false
+	}
+	return reflect.TypeOf(arg).Kind() == reflect.Func
+}
+
+var spewConfig = spew.ConfigState{
+	Indent:                  " ",
+	DisablePointerAddresses: true,
+	DisableCapacities:       true,
+	SortKeys:                true,
+}
+
+type tHelper interface {
+	Helper()
+}

vendor/github.com/stretchr/testify/assert/doc.go πŸ”—

@@ -0,0 +1,45 @@
+// Package assert provides a set of comprehensive testing tools for use with the normal Go testing system.
+//
+// Example Usage
+//
+// The following is a complete example using assert in a standard test function:
+//    import (
+//      "testing"
+//      "github.com/stretchr/testify/assert"
+//    )
+//
+//    func TestSomething(t *testing.T) {
+//
+//      var a string = "Hello"
+//      var b string = "Hello"
+//
+//      assert.Equal(t, a, b, "The two words should be the same.")
+//
+//    }
+//
+// if you assert many times, use the format below:
+//
+//    import (
+//      "testing"
+//      "github.com/stretchr/testify/assert"
+//    )
+//
+//    func TestSomething(t *testing.T) {
+//      assert := assert.New(t)
+//
+//      var a string = "Hello"
+//      var b string = "Hello"
+//
+//      assert.Equal(a, b, "The two words should be the same.")
+//    }
+//
+// Assertions
+//
+// Assertions allow you to easily write test code, and are global funcs in the `assert` package.
+// All assertion functions take, as the first argument, the `*testing.T` object provided by the
+// testing framework. This allows the assertion funcs to write the failings and other details to
+// the correct place.
+//
+// Every assertion function also takes an optional string message as the final argument,
+// allowing custom error messages to be appended to the message the assertion method outputs.
+package assert

vendor/github.com/stretchr/testify/assert/errors.go πŸ”—

@@ -0,0 +1,10 @@
+package assert
+
+import (
+	"errors"
+)
+
+// AnError is an error instance useful for testing.  If the code does not care
+// about error specifics, and only needs to return the error for example, this
+// error should be used to make the test code more readable.
+var AnError = errors.New("assert.AnError general error for testing")

vendor/github.com/stretchr/testify/assert/forward_assertions.go πŸ”—

@@ -0,0 +1,16 @@
+package assert
+
+// Assertions provides assertion methods around the
+// TestingT interface.
+type Assertions struct {
+	t TestingT
+}
+
+// New makes a new Assertions object for the specified TestingT.
+func New(t TestingT) *Assertions {
+	return &Assertions{
+		t: t,
+	}
+}
+
+//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_forward.go.tmpl -include-format-funcs

vendor/github.com/stretchr/testify/assert/http_assertions.go πŸ”—

@@ -0,0 +1,143 @@
+package assert
+
+import (
+	"fmt"
+	"net/http"
+	"net/http/httptest"
+	"net/url"
+	"strings"
+)
+
+// httpCode is a helper that returns HTTP code of the response. It returns -1 and
+// an error if building a new request fails.
+func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (int, error) {
+	w := httptest.NewRecorder()
+	req, err := http.NewRequest(method, url, nil)
+	if err != nil {
+		return -1, err
+	}
+	req.URL.RawQuery = values.Encode()
+	handler(w, req)
+	return w.Code, nil
+}
+
+// HTTPSuccess asserts that a specified handler returns a success status code.
+//
+//  assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	code, err := httpCode(handler, method, url, values)
+	if err != nil {
+		Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
+		return false
+	}
+
+	isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent
+	if !isSuccessCode {
+		Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code))
+	}
+
+	return isSuccessCode
+}
+
+// HTTPRedirect asserts that a specified handler returns a redirect status code.
+//
+//  assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	code, err := httpCode(handler, method, url, values)
+	if err != nil {
+		Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
+		return false
+	}
+
+	isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect
+	if !isRedirectCode {
+		Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code))
+	}
+
+	return isRedirectCode
+}
+
+// HTTPError asserts that a specified handler returns an error status code.
+//
+//  assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	code, err := httpCode(handler, method, url, values)
+	if err != nil {
+		Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
+		return false
+	}
+
+	isErrorCode := code >= http.StatusBadRequest
+	if !isErrorCode {
+		Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code))
+	}
+
+	return isErrorCode
+}
+
+// HTTPBody is a helper that returns HTTP body of the response. It returns
+// empty string if building a new request fails.
+func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string {
+	w := httptest.NewRecorder()
+	req, err := http.NewRequest(method, url+"?"+values.Encode(), nil)
+	if err != nil {
+		return ""
+	}
+	handler(w, req)
+	return w.Body.String()
+}
+
+// HTTPBodyContains asserts that a specified handler returns a
+// body that contains a string.
+//
+//  assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	body := HTTPBody(handler, method, url, values)
+
+	contains := strings.Contains(body, fmt.Sprint(str))
+	if !contains {
+		Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body))
+	}
+
+	return contains
+}
+
+// HTTPBodyNotContains asserts that a specified handler returns a
+// body that does not contain a string.
+//
+//  assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	body := HTTPBody(handler, method, url, values)
+
+	contains := strings.Contains(body, fmt.Sprint(str))
+	if contains {
+		Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body))
+	}
+
+	return !contains
+}

vendor/gotest.tools/LICENSE πŸ”—

@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.

vendor/gotest.tools/assert/assert.go πŸ”—

@@ -0,0 +1,311 @@
+/*Package assert provides assertions for comparing expected values to actual
+values. When an assertion fails a helpful error message is printed.
+
+Assert and Check
+
+Assert() and Check() both accept a Comparison, and fail the test when the
+comparison fails. The one difference is that Assert() will end the test execution
+immediately (using t.FailNow()) whereas Check() will fail the test (using t.Fail()),
+return the value of the comparison, then proceed with the rest of the test case.
+
+Example usage
+
+The example below shows assert used with some common types.
+
+
+	import (
+	    "testing"
+
+	    "gotest.tools/assert"
+	    is "gotest.tools/assert/cmp"
+	)
+
+	func TestEverything(t *testing.T) {
+	    // booleans
+	    assert.Assert(t, ok)
+	    assert.Assert(t, !missing)
+
+	    // primitives
+	    assert.Equal(t, count, 1)
+	    assert.Equal(t, msg, "the message")
+	    assert.Assert(t, total != 10) // NotEqual
+
+	    // errors
+	    assert.NilError(t, closer.Close())
+	    assert.Error(t, err, "the exact error message")
+	    assert.ErrorContains(t, err, "includes this")
+	    assert.ErrorType(t, err, os.IsNotExist)
+
+	    // complex types
+	    assert.DeepEqual(t, result, myStruct{Name: "title"})
+	    assert.Assert(t, is.Len(items, 3))
+	    assert.Assert(t, len(sequence) != 0) // NotEmpty
+	    assert.Assert(t, is.Contains(mapping, "key"))
+
+	    // pointers and interface
+	    assert.Assert(t, is.Nil(ref))
+	    assert.Assert(t, ref != nil) // NotNil
+	}
+
+Comparisons
+
+Package https://godoc.org/gotest.tools/assert/cmp provides
+many common comparisons. Additional comparisons can be written to compare
+values in other ways. See the example Assert (CustomComparison).
+
+Automated migration from testify
+
+gty-migrate-from-testify is a binary which can update source code which uses
+testify assertions to use the assertions provided by this package.
+
+See http://bit.do/cmd-gty-migrate-from-testify.
+
+
+*/
+package assert // import "gotest.tools/assert"
+
+import (
+	"fmt"
+	"go/ast"
+	"go/token"
+
+	gocmp "github.com/google/go-cmp/cmp"
+	"gotest.tools/assert/cmp"
+	"gotest.tools/internal/format"
+	"gotest.tools/internal/source"
+)
+
+// BoolOrComparison can be a bool, or cmp.Comparison. See Assert() for usage.
+type BoolOrComparison interface{}
+
+// TestingT is the subset of testing.T used by the assert package.
+type TestingT interface {
+	FailNow()
+	Fail()
+	Log(args ...interface{})
+}
+
+type helperT interface {
+	Helper()
+}
+
+const failureMessage = "assertion failed: "
+
+// nolint: gocyclo
+func assert(
+	t TestingT,
+	failer func(),
+	argSelector argSelector,
+	comparison BoolOrComparison,
+	msgAndArgs ...interface{},
+) bool {
+	if ht, ok := t.(helperT); ok {
+		ht.Helper()
+	}
+	var success bool
+	switch check := comparison.(type) {
+	case bool:
+		if check {
+			return true
+		}
+		logFailureFromBool(t, msgAndArgs...)
+
+	// Undocumented legacy comparison without Result type
+	case func() (success bool, message string):
+		success = runCompareFunc(t, check, msgAndArgs...)
+
+	case nil:
+		return true
+
+	case error:
+		msg := "error is not nil: "
+		t.Log(format.WithCustomMessage(failureMessage+msg+check.Error(), msgAndArgs...))
+
+	case cmp.Comparison:
+		success = runComparison(t, argSelector, check, msgAndArgs...)
+
+	case func() cmp.Result:
+		success = runComparison(t, argSelector, check, msgAndArgs...)
+
+	default:
+		t.Log(fmt.Sprintf("invalid Comparison: %v (%T)", check, check))
+	}
+
+	if success {
+		return true
+	}
+	failer()
+	return false
+}
+
+func runCompareFunc(
+	t TestingT,
+	f func() (success bool, message string),
+	msgAndArgs ...interface{},
+) bool {
+	if ht, ok := t.(helperT); ok {
+		ht.Helper()
+	}
+	if success, message := f(); !success {
+		t.Log(format.WithCustomMessage(failureMessage+message, msgAndArgs...))
+		return false
+	}
+	return true
+}
+
+func logFailureFromBool(t TestingT, msgAndArgs ...interface{}) {
+	if ht, ok := t.(helperT); ok {
+		ht.Helper()
+	}
+	const stackIndex = 3 // Assert()/Check(), assert(), formatFailureFromBool()
+	const comparisonArgPos = 1
+	args, err := source.CallExprArgs(stackIndex)
+	if err != nil {
+		t.Log(err.Error())
+		return
+	}
+
+	msg, err := boolFailureMessage(args[comparisonArgPos])
+	if err != nil {
+		t.Log(err.Error())
+		msg = "expression is false"
+	}
+
+	t.Log(format.WithCustomMessage(failureMessage+msg, msgAndArgs...))
+}
+
+func boolFailureMessage(expr ast.Expr) (string, error) {
+	if binaryExpr, ok := expr.(*ast.BinaryExpr); ok && binaryExpr.Op == token.NEQ {
+		x, err := source.FormatNode(binaryExpr.X)
+		if err != nil {
+			return "", err
+		}
+		y, err := source.FormatNode(binaryExpr.Y)
+		if err != nil {
+			return "", err
+		}
+		return x + " is " + y, nil
+	}
+
+	if unaryExpr, ok := expr.(*ast.UnaryExpr); ok && unaryExpr.Op == token.NOT {
+		x, err := source.FormatNode(unaryExpr.X)
+		if err != nil {
+			return "", err
+		}
+		return x + " is true", nil
+	}
+
+	formatted, err := source.FormatNode(expr)
+	if err != nil {
+		return "", err
+	}
+	return "expression is false: " + formatted, nil
+}
+
+// Assert performs a comparison. If the comparison fails the test is marked as
+// failed, a failure message is logged, and execution is stopped immediately.
+//
+// The comparison argument may be one of three types: bool, cmp.Comparison or
+// error.
+// When called with a bool the failure message will contain the literal source
+// code of the expression.
+// When called with a cmp.Comparison the comparison is responsible for producing
+// a helpful failure message.
+// When called with an error a nil value is considered success. A non-nil error
+// is a failure, and Error() is used as the failure message.
+func Assert(t TestingT, comparison BoolOrComparison, msgAndArgs ...interface{}) {
+	if ht, ok := t.(helperT); ok {
+		ht.Helper()
+	}
+	assert(t, t.FailNow, argsFromComparisonCall, comparison, msgAndArgs...)
+}
+
+// Check performs a comparison. If the comparison fails the test is marked as
+// failed, a failure message is logged, and Check returns false. Otherwise returns
+// true.
+//
+// See Assert for details about the comparison arg and failure messages.
+func Check(t TestingT, comparison BoolOrComparison, msgAndArgs ...interface{}) bool {
+	if ht, ok := t.(helperT); ok {
+		ht.Helper()
+	}
+	return assert(t, t.Fail, argsFromComparisonCall, comparison, msgAndArgs...)
+}
+
+// NilError fails the test immediately if err is not nil.
+// This is equivalent to Assert(t, err)
+func NilError(t TestingT, err error, msgAndArgs ...interface{}) {
+	if ht, ok := t.(helperT); ok {
+		ht.Helper()
+	}
+	assert(t, t.FailNow, argsAfterT, err, msgAndArgs...)
+}
+
+// Equal uses the == operator to assert two values are equal and fails the test
+// if they are not equal.
+//
+// If the comparison fails Equal will use the variable names for x and y as part
+// of the failure message to identify the actual and expected values.
+//
+// If either x or y are a multi-line string the failure message will include a
+// unified diff of the two values. If the values only differ by whitespace
+// the unified diff will be augmented by replacing whitespace characters with
+// visible characters to identify the whitespace difference.
+//
+// This is equivalent to Assert(t, cmp.Equal(x, y)).
+func Equal(t TestingT, x, y interface{}, msgAndArgs ...interface{}) {
+	if ht, ok := t.(helperT); ok {
+		ht.Helper()
+	}
+	assert(t, t.FailNow, argsAfterT, cmp.Equal(x, y), msgAndArgs...)
+}
+
+// DeepEqual uses google/go-cmp (http://bit.do/go-cmp) to assert two values are
+// equal and fails the test if they are not equal.
+//
+// Package https://godoc.org/gotest.tools/assert/opt provides some additional
+// commonly used Options.
+//
+// This is equivalent to Assert(t, cmp.DeepEqual(x, y)).
+func DeepEqual(t TestingT, x, y interface{}, opts ...gocmp.Option) {
+	if ht, ok := t.(helperT); ok {
+		ht.Helper()
+	}
+	assert(t, t.FailNow, argsAfterT, cmp.DeepEqual(x, y, opts...))
+}
+
+// Error fails the test if err is nil, or the error message is not the expected
+// message.
+// Equivalent to Assert(t, cmp.Error(err, message)).
+func Error(t TestingT, err error, message string, msgAndArgs ...interface{}) {
+	if ht, ok := t.(helperT); ok {
+		ht.Helper()
+	}
+	assert(t, t.FailNow, argsAfterT, cmp.Error(err, message), msgAndArgs...)
+}
+
+// ErrorContains fails the test if err is nil, or the error message does not
+// contain the expected substring.
+// Equivalent to Assert(t, cmp.ErrorContains(err, substring)).
+func ErrorContains(t TestingT, err error, substring string, msgAndArgs ...interface{}) {
+	if ht, ok := t.(helperT); ok {
+		ht.Helper()
+	}
+	assert(t, t.FailNow, argsAfterT, cmp.ErrorContains(err, substring), msgAndArgs...)
+}
+
+// ErrorType fails the test if err is nil, or err is not the expected type.
+//
+// Expected can be one of:
+// a func(error) bool which returns true if the error is the expected type,
+// an instance of (or a pointer to) a struct of the expected type,
+// a pointer to an interface the error is expected to implement,
+// a reflect.Type of the expected struct or interface.
+//
+// Equivalent to Assert(t, cmp.ErrorType(err, expected)).
+func ErrorType(t TestingT, err error, expected interface{}, msgAndArgs ...interface{}) {
+	if ht, ok := t.(helperT); ok {
+		ht.Helper()
+	}
+	assert(t, t.FailNow, argsAfterT, cmp.ErrorType(err, expected), msgAndArgs...)
+}

vendor/gotest.tools/assert/cmp/compare.go πŸ”—

@@ -0,0 +1,312 @@
+/*Package cmp provides Comparisons for Assert and Check*/
+package cmp // import "gotest.tools/assert/cmp"
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+
+	"github.com/google/go-cmp/cmp"
+	"gotest.tools/internal/format"
+)
+
+// Comparison is a function which compares values and returns ResultSuccess if
+// the actual value matches the expected value. If the values do not match the
+// Result will contain a message about why it failed.
+type Comparison func() Result
+
+// DeepEqual compares two values using google/go-cmp (http://bit.do/go-cmp)
+// and succeeds if the values are equal.
+//
+// The comparison can be customized using comparison Options.
+// Package https://godoc.org/gotest.tools/assert/opt provides some additional
+// commonly used Options.
+func DeepEqual(x, y interface{}, opts ...cmp.Option) Comparison {
+	return func() (result Result) {
+		defer func() {
+			if panicmsg, handled := handleCmpPanic(recover()); handled {
+				result = ResultFailure(panicmsg)
+			}
+		}()
+		diff := cmp.Diff(x, y, opts...)
+		if diff == "" {
+			return ResultSuccess
+		}
+		return multiLineDiffResult(diff)
+	}
+}
+
+func handleCmpPanic(r interface{}) (string, bool) {
+	if r == nil {
+		return "", false
+	}
+	panicmsg, ok := r.(string)
+	if !ok {
+		panic(r)
+	}
+	switch {
+	case strings.HasPrefix(panicmsg, "cannot handle unexported field"):
+		return panicmsg, true
+	}
+	panic(r)
+}
+
+func toResult(success bool, msg string) Result {
+	if success {
+		return ResultSuccess
+	}
+	return ResultFailure(msg)
+}
+
+// Equal succeeds if x == y. See assert.Equal for full documentation.
+func Equal(x, y interface{}) Comparison {
+	return func() Result {
+		switch {
+		case x == y:
+			return ResultSuccess
+		case isMultiLineStringCompare(x, y):
+			diff := format.UnifiedDiff(format.DiffConfig{A: x.(string), B: y.(string)})
+			return multiLineDiffResult(diff)
+		}
+		return ResultFailureTemplate(`
+			{{- .Data.x}} (
+				{{- with callArg 0 }}{{ formatNode . }} {{end -}}
+				{{- printf "%T" .Data.x -}}
+			) != {{ .Data.y}} (
+				{{- with callArg 1 }}{{ formatNode . }} {{end -}}
+				{{- printf "%T" .Data.y -}}
+			)`,
+			map[string]interface{}{"x": x, "y": y})
+	}
+}
+
+func isMultiLineStringCompare(x, y interface{}) bool {
+	strX, ok := x.(string)
+	if !ok {
+		return false
+	}
+	strY, ok := y.(string)
+	if !ok {
+		return false
+	}
+	return strings.Contains(strX, "\n") || strings.Contains(strY, "\n")
+}
+
+func multiLineDiffResult(diff string) Result {
+	return ResultFailureTemplate(`
+--- {{ with callArg 0 }}{{ formatNode . }}{{else}}←{{end}}
++++ {{ with callArg 1 }}{{ formatNode . }}{{else}}β†’{{end}}
+{{ .Data.diff }}`,
+		map[string]interface{}{"diff": diff})
+}
+
+// Len succeeds if the sequence has the expected length.
+func Len(seq interface{}, expected int) Comparison {
+	return func() (result Result) {
+		defer func() {
+			if e := recover(); e != nil {
+				result = ResultFailure(fmt.Sprintf("type %T does not have a length", seq))
+			}
+		}()
+		value := reflect.ValueOf(seq)
+		length := value.Len()
+		if length == expected {
+			return ResultSuccess
+		}
+		msg := fmt.Sprintf("expected %s (length %d) to have length %d", seq, length, expected)
+		return ResultFailure(msg)
+	}
+}
+
+// Contains succeeds if item is in collection. Collection may be a string, map,
+// slice, or array.
+//
+// If collection is a string, item must also be a string, and is compared using
+// strings.Contains().
+// If collection is a Map, contains will succeed if item is a key in the map.
+// If collection is a slice or array, item is compared to each item in the
+// sequence using reflect.DeepEqual().
+func Contains(collection interface{}, item interface{}) Comparison {
+	return func() Result {
+		colValue := reflect.ValueOf(collection)
+		if !colValue.IsValid() {
+			return ResultFailure(fmt.Sprintf("nil does not contain items"))
+		}
+		msg := fmt.Sprintf("%v does not contain %v", collection, item)
+
+		itemValue := reflect.ValueOf(item)
+		switch colValue.Type().Kind() {
+		case reflect.String:
+			if itemValue.Type().Kind() != reflect.String {
+				return ResultFailure("string may only contain strings")
+			}
+			return toResult(
+				strings.Contains(colValue.String(), itemValue.String()),
+				fmt.Sprintf("string %q does not contain %q", collection, item))
+
+		case reflect.Map:
+			if itemValue.Type() != colValue.Type().Key() {
+				return ResultFailure(fmt.Sprintf(
+					"%v can not contain a %v key", colValue.Type(), itemValue.Type()))
+			}
+			return toResult(colValue.MapIndex(itemValue).IsValid(), msg)
+
+		case reflect.Slice, reflect.Array:
+			for i := 0; i < colValue.Len(); i++ {
+				if reflect.DeepEqual(colValue.Index(i).Interface(), item) {
+					return ResultSuccess
+				}
+			}
+			return ResultFailure(msg)
+		default:
+			return ResultFailure(fmt.Sprintf("type %T does not contain items", collection))
+		}
+	}
+}
+
+// Panics succeeds if f() panics.
+func Panics(f func()) Comparison {
+	return func() (result Result) {
+		defer func() {
+			if err := recover(); err != nil {
+				result = ResultSuccess
+			}
+		}()
+		f()
+		return ResultFailure("did not panic")
+	}
+}
+
+// Error succeeds if err is a non-nil error, and the error message equals the
+// expected message.
+func Error(err error, message string) Comparison {
+	return func() Result {
+		switch {
+		case err == nil:
+			return ResultFailure("expected an error, got nil")
+		case err.Error() != message:
+			return ResultFailure(fmt.Sprintf(
+				"expected error %q, got %+v", message, err))
+		}
+		return ResultSuccess
+	}
+}
+
+// ErrorContains succeeds if err is a non-nil error, and the error message contains
+// the expected substring.
+func ErrorContains(err error, substring string) Comparison {
+	return func() Result {
+		switch {
+		case err == nil:
+			return ResultFailure("expected an error, got nil")
+		case !strings.Contains(err.Error(), substring):
+			return ResultFailure(fmt.Sprintf(
+				"expected error to contain %q, got %+v", substring, err))
+		}
+		return ResultSuccess
+	}
+}
+
+// Nil succeeds if obj is a nil interface, pointer, or function.
+//
+// Use NilError() for comparing errors. Use Len(obj, 0) for comparing slices,
+// maps, and channels.
+func Nil(obj interface{}) Comparison {
+	msgFunc := func(value reflect.Value) string {
+		return fmt.Sprintf("%v (type %s) is not nil", reflect.Indirect(value), value.Type())
+	}
+	return isNil(obj, msgFunc)
+}
+
+func isNil(obj interface{}, msgFunc func(reflect.Value) string) Comparison {
+	return func() Result {
+		if obj == nil {
+			return ResultSuccess
+		}
+		value := reflect.ValueOf(obj)
+		kind := value.Type().Kind()
+		if kind >= reflect.Chan && kind <= reflect.Slice {
+			if value.IsNil() {
+				return ResultSuccess
+			}
+			return ResultFailure(msgFunc(value))
+		}
+
+		return ResultFailure(fmt.Sprintf("%v (type %s) can not be nil", value, value.Type()))
+	}
+}
+
+// ErrorType succeeds if err is not nil and is of the expected type.
+//
+// Expected can be one of:
+// a func(error) bool which returns true if the error is the expected type,
+// an instance of (or a pointer to) a struct of the expected type,
+// a pointer to an interface the error is expected to implement,
+// a reflect.Type of the expected struct or interface.
+func ErrorType(err error, expected interface{}) Comparison {
+	return func() Result {
+		switch expectedType := expected.(type) {
+		case func(error) bool:
+			return cmpErrorTypeFunc(err, expectedType)
+		case reflect.Type:
+			if expectedType.Kind() == reflect.Interface {
+				return cmpErrorTypeImplementsType(err, expectedType)
+			}
+			return cmpErrorTypeEqualType(err, expectedType)
+		case nil:
+			return ResultFailure(fmt.Sprintf("invalid type for expected: nil"))
+		}
+
+		expectedType := reflect.TypeOf(expected)
+		switch {
+		case expectedType.Kind() == reflect.Struct, isPtrToStruct(expectedType):
+			return cmpErrorTypeEqualType(err, expectedType)
+		case isPtrToInterface(expectedType):
+			return cmpErrorTypeImplementsType(err, expectedType.Elem())
+		}
+		return ResultFailure(fmt.Sprintf("invalid type for expected: %T", expected))
+	}
+}
+
+func cmpErrorTypeFunc(err error, f func(error) bool) Result {
+	if f(err) {
+		return ResultSuccess
+	}
+	actual := "nil"
+	if err != nil {
+		actual = fmt.Sprintf("%s (%T)", err, err)
+	}
+	return ResultFailureTemplate(`error is {{ .Data.actual }}
+		{{- with callArg 1 }}, not {{ formatNode . }}{{end -}}`,
+		map[string]interface{}{"actual": actual})
+}
+
+func cmpErrorTypeEqualType(err error, expectedType reflect.Type) Result {
+	if err == nil {
+		return ResultFailure(fmt.Sprintf("error is nil, not %s", expectedType))
+	}
+	errValue := reflect.ValueOf(err)
+	if errValue.Type() == expectedType {
+		return ResultSuccess
+	}
+	return ResultFailure(fmt.Sprintf("error is %s (%T), not %s", err, err, expectedType))
+}
+
+func cmpErrorTypeImplementsType(err error, expectedType reflect.Type) Result {
+	if err == nil {
+		return ResultFailure(fmt.Sprintf("error is nil, not %s", expectedType))
+	}
+	errValue := reflect.ValueOf(err)
+	if errValue.Type().Implements(expectedType) {
+		return ResultSuccess
+	}
+	return ResultFailure(fmt.Sprintf("error is %s (%T), not %s", err, err, expectedType))
+}
+
+func isPtrToInterface(typ reflect.Type) bool {
+	return typ.Kind() == reflect.Ptr && typ.Elem().Kind() == reflect.Interface
+}
+
+func isPtrToStruct(typ reflect.Type) bool {
+	return typ.Kind() == reflect.Ptr && typ.Elem().Kind() == reflect.Struct
+}

vendor/gotest.tools/assert/cmp/result.go πŸ”—

@@ -0,0 +1,94 @@
+package cmp
+
+import (
+	"bytes"
+	"fmt"
+	"go/ast"
+	"text/template"
+
+	"gotest.tools/internal/source"
+)
+
+// Result of a Comparison.
+type Result interface {
+	Success() bool
+}
+
+type result struct {
+	success bool
+	message string
+}
+
+func (r result) Success() bool {
+	return r.success
+}
+
+func (r result) FailureMessage() string {
+	return r.message
+}
+
+// ResultSuccess is a constant which is returned by a ComparisonWithResult to
+// indicate success.
+var ResultSuccess = result{success: true}
+
+// ResultFailure returns a failed Result with a failure message.
+func ResultFailure(message string) Result {
+	return result{message: message}
+}
+
+// ResultFromError returns ResultSuccess if err is nil. Otherwise ResultFailure
+// is returned with the error message as the failure message.
+func ResultFromError(err error) Result {
+	if err == nil {
+		return ResultSuccess
+	}
+	return ResultFailure(err.Error())
+}
+
+type templatedResult struct {
+	success  bool
+	template string
+	data     map[string]interface{}
+}
+
+func (r templatedResult) Success() bool {
+	return r.success
+}
+
+func (r templatedResult) FailureMessage(args []ast.Expr) string {
+	msg, err := renderMessage(r, args)
+	if err != nil {
+		return fmt.Sprintf("failed to render failure message: %s", err)
+	}
+	return msg
+}
+
+// ResultFailureTemplate returns a Result with a template string and data which
+// can be used to format a failure message. The template may access data from .Data,
+// the comparison args with the callArg function, and the formatNode function may
+// be used to format the call args.
+func ResultFailureTemplate(template string, data map[string]interface{}) Result {
+	return templatedResult{template: template, data: data}
+}
+
+func renderMessage(result templatedResult, args []ast.Expr) (string, error) {
+	tmpl := template.New("failure").Funcs(template.FuncMap{
+		"formatNode": source.FormatNode,
+		"callArg": func(index int) ast.Expr {
+			if index >= len(args) {
+				return nil
+			}
+			return args[index]
+		},
+	})
+	var err error
+	tmpl, err = tmpl.Parse(result.template)
+	if err != nil {
+		return "", err
+	}
+	buf := new(bytes.Buffer)
+	err = tmpl.Execute(buf, map[string]interface{}{
+		"Data": result.data,
+	})
+	return buf.String(), err
+}

vendor/gotest.tools/assert/result.go πŸ”—

@@ -0,0 +1,107 @@
+package assert
+
+import (
+	"fmt"
+	"go/ast"
+
+	"gotest.tools/assert/cmp"
+	"gotest.tools/internal/format"
+	"gotest.tools/internal/source"
+)
+
+func runComparison(
+	t TestingT,
+	argSelector argSelector,
+	f cmp.Comparison,
+	msgAndArgs ...interface{},
+) bool {
+	if ht, ok := t.(helperT); ok {
+		ht.Helper()
+	}
+	result := f()
+	if result.Success() {
+		return true
+	}
+
+	var message string
+	switch typed := result.(type) {
+	case resultWithComparisonArgs:
+		const stackIndex = 3 // Assert/Check, assert, runComparison
+		args, err := source.CallExprArgs(stackIndex)
+		if err != nil {
+			t.Log(err.Error())
+		}
+		message = typed.FailureMessage(filterPrintableExpr(argSelector(args)))
+	case resultBasic:
+		message = typed.FailureMessage()
+	default:
+		message = fmt.Sprintf("comparison returned invalid Result type: %T", result)
+	}
+
+	t.Log(format.WithCustomMessage(failureMessage+message, msgAndArgs...))
+	return false
+}
+
+type resultWithComparisonArgs interface {
+	FailureMessage(args []ast.Expr) string
+}
+
+type resultBasic interface {
+	FailureMessage() string
+}
+
+// filterPrintableExpr filters the ast.Expr slice to only include Expr that are
+// easy to read when printed and contain relevant information to an assertion.
+//
+// Ident and SelectorExpr are included because they print nicely and the variable
+// names may provide additional context to their values.
+// BasicLit and CompositeLit are excluded because their source is equivalent to
+// their value, which is already available.
+// Other types are ignored for now, but could be added if they are relevant.
+func filterPrintableExpr(args []ast.Expr) []ast.Expr {
+	result := make([]ast.Expr, len(args))
+	for i, arg := range args {
+		if isShortPrintableExpr(arg) {
+			result[i] = arg
+			continue
+		}
+
+		if starExpr, ok := arg.(*ast.StarExpr); ok {
+			result[i] = starExpr.X
+			continue
+		}
+		result[i] = nil
+	}
+	return result
+}
+
+func isShortPrintableExpr(expr ast.Expr) bool {
+	switch expr.(type) {
+	case *ast.Ident, *ast.SelectorExpr, *ast.IndexExpr, *ast.SliceExpr:
+		return true
+	case *ast.BinaryExpr, *ast.UnaryExpr:
+		return true
+	default:
+		// CallExpr, ParenExpr, TypeAssertExpr, KeyValueExpr, StarExpr
+		return false
+	}
+}
+
+type argSelector func([]ast.Expr) []ast.Expr
+
+func argsAfterT(args []ast.Expr) []ast.Expr {
+	if len(args) < 1 {
+		return nil
+	}
+	return args[1:]
+}
+
+func argsFromComparisonCall(args []ast.Expr) []ast.Expr {
+	if len(args) < 1 {
+		return nil
+	}
+	if callExpr, ok := args[1].(*ast.CallExpr); ok {
+		return callExpr.Args
+	}
+	return nil
+}

vendor/gotest.tools/internal/difflib/LICENSE πŸ”—

@@ -0,0 +1,27 @@
+Copyright (c) 2013, Patrick Mezard
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+    Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+    The names of its contributors may not be used to endorse or promote
+products derived from this software without specific prior written
+permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

vendor/gotest.tools/internal/difflib/difflib.go πŸ”—

@@ -0,0 +1,420 @@
+/* Package difflib is a partial port of Python difflib module.
+
+Original source: https://github.com/pmezard/go-difflib
+
+This file is trimmed to only the parts used by this repository.
+*/
+package difflib // import "gotest.tools/internal/difflib"
+
+func min(a, b int) int {
+	if a < b {
+		return a
+	}
+	return b
+}
+
+func max(a, b int) int {
+	if a > b {
+		return a
+	}
+	return b
+}
+
+type Match struct {
+	A    int
+	B    int
+	Size int
+}
+
+type OpCode struct {
+	Tag byte
+	I1  int
+	I2  int
+	J1  int
+	J2  int
+}
+
+// SequenceMatcher compares sequence of strings. The basic
+// algorithm predates, and is a little fancier than, an algorithm
+// published in the late 1980's by Ratcliff and Obershelp under the
+// hyperbolic name "gestalt pattern matching".  The basic idea is to find
+// the longest contiguous matching subsequence that contains no "junk"
+// elements (R-O doesn't address junk).  The same idea is then applied
+// recursively to the pieces of the sequences to the left and to the right
+// of the matching subsequence.  This does not yield minimal edit
+// sequences, but does tend to yield matches that "look right" to people.
+//
+// SequenceMatcher tries to compute a "human-friendly diff" between two
+// sequences.  Unlike e.g. UNIX(tm) diff, the fundamental notion is the
+// longest *contiguous* & junk-free matching subsequence.  That's what
+// catches peoples' eyes.  The Windows(tm) windiff has another interesting
+// notion, pairing up elements that appear uniquely in each sequence.
+// That, and the method here, appear to yield more intuitive difference
+// reports than does diff.  This method appears to be the least vulnerable
+// to synching up on blocks of "junk lines", though (like blank lines in
+// ordinary text files, or maybe "<P>" lines in HTML files).  That may be
+// because this is the only method of the 3 that has a *concept* of
+// "junk" <wink>.
+//
+// Timing:  Basic R-O is cubic time worst case and quadratic time expected
+// case.  SequenceMatcher is quadratic time for the worst case and has
+// expected-case behavior dependent in a complicated way on how many
+// elements the sequences have in common; best case time is linear.
+type SequenceMatcher struct {
+	a              []string
+	b              []string
+	b2j            map[string][]int
+	IsJunk         func(string) bool
+	autoJunk       bool
+	bJunk          map[string]struct{}
+	matchingBlocks []Match
+	fullBCount     map[string]int
+	bPopular       map[string]struct{}
+	opCodes        []OpCode
+}
+
+func NewMatcher(a, b []string) *SequenceMatcher {
+	m := SequenceMatcher{autoJunk: true}
+	m.SetSeqs(a, b)
+	return &m
+}
+
+// Set two sequences to be compared.
+func (m *SequenceMatcher) SetSeqs(a, b []string) {
+	m.SetSeq1(a)
+	m.SetSeq2(b)
+}
+
+// Set the first sequence to be compared. The second sequence to be compared is
+// not changed.
+//
+// SequenceMatcher computes and caches detailed information about the second
+// sequence, so if you want to compare one sequence S against many sequences,
+// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other
+// sequences.
+//
+// See also SetSeqs() and SetSeq2().
+func (m *SequenceMatcher) SetSeq1(a []string) {
+	if &a == &m.a {
+		return
+	}
+	m.a = a
+	m.matchingBlocks = nil
+	m.opCodes = nil
+}
+
+// Set the second sequence to be compared. The first sequence to be compared is
+// not changed.
+func (m *SequenceMatcher) SetSeq2(b []string) {
+	if &b == &m.b {
+		return
+	}
+	m.b = b
+	m.matchingBlocks = nil
+	m.opCodes = nil
+	m.fullBCount = nil
+	m.chainB()
+}
+
+func (m *SequenceMatcher) chainB() {
+	// Populate line -> index mapping
+	b2j := map[string][]int{}
+	for i, s := range m.b {
+		indices := b2j[s]
+		indices = append(indices, i)
+		b2j[s] = indices
+	}
+
+	// Purge junk elements
+	m.bJunk = map[string]struct{}{}
+	if m.IsJunk != nil {
+		junk := m.bJunk
+		for s, _ := range b2j {
+			if m.IsJunk(s) {
+				junk[s] = struct{}{}
+			}
+		}
+		for s, _ := range junk {
+			delete(b2j, s)
+		}
+	}
+
+	// Purge remaining popular elements
+	popular := map[string]struct{}{}
+	n := len(m.b)
+	if m.autoJunk && n >= 200 {
+		ntest := n/100 + 1
+		for s, indices := range b2j {
+			if len(indices) > ntest {
+				popular[s] = struct{}{}
+			}
+		}
+		for s, _ := range popular {
+			delete(b2j, s)
+		}
+	}
+	m.bPopular = popular
+	m.b2j = b2j
+}
+
+func (m *SequenceMatcher) isBJunk(s string) bool {
+	_, ok := m.bJunk[s]
+	return ok
+}
+
+// Find longest matching block in a[alo:ahi] and b[blo:bhi].
+//
+// If IsJunk is not defined:
+//
+// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
+//     alo <= i <= i+k <= ahi
+//     blo <= j <= j+k <= bhi
+// and for all (i',j',k') meeting those conditions,
+//     k >= k'
+//     i <= i'
+//     and if i == i', j <= j'
+//
+// In other words, of all maximal matching blocks, return one that
+// starts earliest in a, and of all those maximal matching blocks that
+// start earliest in a, return the one that starts earliest in b.
+//
+// If IsJunk is defined, first the longest matching block is
+// determined as above, but with the additional restriction that no
+// junk element appears in the block.  Then that block is extended as
+// far as possible by matching (only) junk elements on both sides.  So
+// the resulting block never matches on junk except as identical junk
+// happens to be adjacent to an "interesting" match.
+//
+// If no blocks match, return (alo, blo, 0).
+func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match {
+	// CAUTION:  stripping common prefix or suffix would be incorrect.
+	// E.g.,
+	//    ab
+	//    acab
+	// Longest matching block is "ab", but if common prefix is
+	// stripped, it's "a" (tied with "b").  UNIX(tm) diff does so
+	// strip, so ends up claiming that ab is changed to acab by
+	// inserting "ca" in the middle.  That's minimal but unintuitive:
+	// "it's obvious" that someone inserted "ac" at the front.
+	// Windiff ends up at the same place as diff, but by pairing up
+	// the unique 'b's and then matching the first two 'a's.
+	besti, bestj, bestsize := alo, blo, 0
+
+	// find longest junk-free match
+	// during an iteration of the loop, j2len[j] = length of longest
+	// junk-free match ending with a[i-1] and b[j]
+	j2len := map[int]int{}
+	for i := alo; i != ahi; i++ {
+		// look at all instances of a[i] in b; note that because
+		// b2j has no junk keys, the loop is skipped if a[i] is junk
+		newj2len := map[int]int{}
+		for _, j := range m.b2j[m.a[i]] {
+			// a[i] matches b[j]
+			if j < blo {
+				continue
+			}
+			if j >= bhi {
+				break
+			}
+			k := j2len[j-1] + 1
+			newj2len[j] = k
+			if k > bestsize {
+				besti, bestj, bestsize = i-k+1, j-k+1, k
+			}
+		}
+		j2len = newj2len
+	}
+
+	// Extend the best by non-junk elements on each end.  In particular,
+	// "popular" non-junk elements aren't in b2j, which greatly speeds
+	// the inner loop above, but also means "the best" match so far
+	// doesn't contain any junk *or* popular non-junk elements.
+	for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) &&
+		m.a[besti-1] == m.b[bestj-1] {
+		besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
+	}
+	for besti+bestsize < ahi && bestj+bestsize < bhi &&
+		!m.isBJunk(m.b[bestj+bestsize]) &&
+		m.a[besti+bestsize] == m.b[bestj+bestsize] {
+		bestsize += 1
+	}
+
+	// Now that we have a wholly interesting match (albeit possibly
+	// empty!), we may as well suck up the matching junk on each
+	// side of it too.  Can't think of a good reason not to, and it
+	// saves post-processing the (possibly considerable) expense of
+	// figuring out what to do with it.  In the case of an empty
+	// interesting match, this is clearly the right thing to do,
+	// because no other kind of match is possible in the regions.
+	for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) &&
+		m.a[besti-1] == m.b[bestj-1] {
+		besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
+	}
+	for besti+bestsize < ahi && bestj+bestsize < bhi &&
+		m.isBJunk(m.b[bestj+bestsize]) &&
+		m.a[besti+bestsize] == m.b[bestj+bestsize] {
+		bestsize += 1
+	}
+
+	return Match{A: besti, B: bestj, Size: bestsize}
+}
+
+// Return list of triples describing matching subsequences.
+//
+// Each triple is of the form (i, j, n), and means that
+// a[i:i+n] == b[j:j+n].  The triples are monotonically increasing in
+// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are
+// adjacent triples in the list, and the second is not the last triple in the
+// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe
+// adjacent equal blocks.
+//
+// The last triple is a dummy, (len(a), len(b), 0), and is the only
+// triple with n==0.
+func (m *SequenceMatcher) GetMatchingBlocks() []Match {
+	if m.matchingBlocks != nil {
+		return m.matchingBlocks
+	}
+
+	var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match
+	matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match {
+		match := m.findLongestMatch(alo, ahi, blo, bhi)
+		i, j, k := match.A, match.B, match.Size
+		if match.Size > 0 {
+			if alo < i && blo < j {
+				matched = matchBlocks(alo, i, blo, j, matched)
+			}
+			matched = append(matched, match)
+			if i+k < ahi && j+k < bhi {
+				matched = matchBlocks(i+k, ahi, j+k, bhi, matched)
+			}
+		}
+		return matched
+	}
+	matched := matchBlocks(0, len(m.a), 0, len(m.b), nil)
+
+	// It's possible that we have adjacent equal blocks in the
+	// matching_blocks list now.
+	nonAdjacent := []Match{}
+	i1, j1, k1 := 0, 0, 0
+	for _, b := range matched {
+		// Is this block adjacent to i1, j1, k1?
+		i2, j2, k2 := b.A, b.B, b.Size
+		if i1+k1 == i2 && j1+k1 == j2 {
+			// Yes, so collapse them -- this just increases the length of
+			// the first block by the length of the second, and the first
+			// block so lengthened remains the block to compare against.
+			k1 += k2
+		} else {
+			// Not adjacent.  Remember the first block (k1==0 means it's
+			// the dummy we started with), and make the second block the
+			// new block to compare against.
+			if k1 > 0 {
+				nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
+			}
+			i1, j1, k1 = i2, j2, k2
+		}
+	}
+	if k1 > 0 {
+		nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
+	}
+
+	nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0})
+	m.matchingBlocks = nonAdjacent
+	return m.matchingBlocks
+}
+
+// Return list of 5-tuples describing how to turn a into b.
+//
+// Each tuple is of the form (tag, i1, i2, j1, j2).  The first tuple
+// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
+// tuple preceding it, and likewise for j1 == the previous j2.
+//
+// The tags are characters, with these meanings:
+//
+// 'r' (replace):  a[i1:i2] should be replaced by b[j1:j2]
+//
+// 'd' (delete):   a[i1:i2] should be deleted, j1==j2 in this case.
+//
+// 'i' (insert):   b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case.
+//
+// 'e' (equal):    a[i1:i2] == b[j1:j2]
+func (m *SequenceMatcher) GetOpCodes() []OpCode {
+	if m.opCodes != nil {
+		return m.opCodes
+	}
+	i, j := 0, 0
+	matching := m.GetMatchingBlocks()
+	opCodes := make([]OpCode, 0, len(matching))
+	for _, m := range matching {
+		//  invariant:  we've pumped out correct diffs to change
+		//  a[:i] into b[:j], and the next matching block is
+		//  a[ai:ai+size] == b[bj:bj+size]. So we need to pump
+		//  out a diff to change a[i:ai] into b[j:bj], pump out
+		//  the matching block, and move (i,j) beyond the match
+		ai, bj, size := m.A, m.B, m.Size
+		tag := byte(0)
+		if i < ai && j < bj {
+			tag = 'r'
+		} else if i < ai {
+			tag = 'd'
+		} else if j < bj {
+			tag = 'i'
+		}
+		if tag > 0 {
+			opCodes = append(opCodes, OpCode{tag, i, ai, j, bj})
+		}
+		i, j = ai+size, bj+size
+		// the list of matching blocks is terminated by a
+		// sentinel with size 0
+		if size > 0 {
+			opCodes = append(opCodes, OpCode{'e', ai, i, bj, j})
+		}
+	}
+	m.opCodes = opCodes
+	return m.opCodes
+}
+
+// Isolate change clusters by eliminating ranges with no changes.
+//
+// Return a generator of groups with up to n lines of context.
+// Each group is in the same format as returned by GetOpCodes().
+func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
+	if n < 0 {
+		n = 3
+	}
+	codes := m.GetOpCodes()
+	if len(codes) == 0 {
+		codes = []OpCode{OpCode{'e', 0, 1, 0, 1}}
+	}
+	// Fixup leading and trailing groups if they show no changes.
+	if codes[0].Tag == 'e' {
+		c := codes[0]
+		i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+		codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2}
+	}
+	if codes[len(codes)-1].Tag == 'e' {
+		c := codes[len(codes)-1]
+		i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+		codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)}
+	}
+	nn := n + n
+	groups := [][]OpCode{}
+	group := []OpCode{}
+	for _, c := range codes {
+		i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+		// End the current group and start a new one whenever
+		// there is a large range with no changes.
+		if c.Tag == 'e' && i2-i1 > nn {
+			group = append(group, OpCode{c.Tag, i1, min(i2, i1+n),
+				j1, min(j2, j1+n)})
+			groups = append(groups, group)
+			group = []OpCode{}
+			i1, j1 = max(i1, i2-n), max(j1, j2-n)
+		}
+		group = append(group, OpCode{c.Tag, i1, i2, j1, j2})
+	}
+	if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') {
+		groups = append(groups, group)
+	}
+	return groups
+}

vendor/gotest.tools/internal/format/diff.go πŸ”—

@@ -0,0 +1,161 @@
+package format
+
+import (
+	"bytes"
+	"fmt"
+	"strings"
+	"unicode"
+
+	"gotest.tools/internal/difflib"
+)
+
+const (
+	contextLines = 2
+)
+
+// DiffConfig for a unified diff
+type DiffConfig struct {
+	A    string
+	B    string
+	From string
+	To   string
+}
+
+// UnifiedDiff is a modified version of difflib.WriteUnifiedDiff with better
+// support for showing the whitespace differences.
+func UnifiedDiff(conf DiffConfig) string {
+	a := strings.SplitAfter(conf.A, "\n")
+	b := strings.SplitAfter(conf.B, "\n")
+	groups := difflib.NewMatcher(a, b).GetGroupedOpCodes(contextLines)
+	if len(groups) == 0 {
+		return ""
+	}
+
+	buf := new(bytes.Buffer)
+	writeFormat := func(format string, args ...interface{}) {
+		buf.WriteString(fmt.Sprintf(format, args...))
+	}
+	writeLine := func(prefix string, s string) {
+		buf.WriteString(prefix + s)
+	}
+	if hasWhitespaceDiffLines(groups, a, b) {
+		writeLine = visibleWhitespaceLine(writeLine)
+	}
+	formatHeader(writeFormat, conf)
+	for _, group := range groups {
+		formatRangeLine(writeFormat, group)
+		for _, opCode := range group {
+			in, out := a[opCode.I1:opCode.I2], b[opCode.J1:opCode.J2]
+			switch opCode.Tag {
+			case 'e':
+				formatLines(writeLine, " ", in)
+			case 'r':
+				formatLines(writeLine, "-", in)
+				formatLines(writeLine, "+", out)
+			case 'd':
+				formatLines(writeLine, "-", in)
+			case 'i':
+				formatLines(writeLine, "+", out)
+			}
+		}
+	}
+	return buf.String()
+}
+
+// hasWhitespaceDiffLines returns true if any diff groups is only different
+// because of whitespace characters.
+func hasWhitespaceDiffLines(groups [][]difflib.OpCode, a, b []string) bool {
+	for _, group := range groups {
+		in, out := new(bytes.Buffer), new(bytes.Buffer)
+		for _, opCode := range group {
+			if opCode.Tag == 'e' {
+				continue
+			}
+			for _, line := range a[opCode.I1:opCode.I2] {
+				in.WriteString(line)
+			}
+			for _, line := range b[opCode.J1:opCode.J2] {
+				out.WriteString(line)
+			}
+		}
+		if removeWhitespace(in.String()) == removeWhitespace(out.String()) {
+			return true
+		}
+	}
+	return false
+}
+
+func removeWhitespace(s string) string {
+	var result []rune
+	for _, r := range s {
+		if !unicode.IsSpace(r) {
+			result = append(result, r)
+		}
+	}
+	return string(result)
+}
+
+func visibleWhitespaceLine(ws func(string, string)) func(string, string) {
+	mapToVisibleSpace := func(r rune) rune {
+		switch r {
+		case '\n':
+		case ' ':
+			return 'Β·'
+		case '\t':
+			return 'β–·'
+		case '\v':
+			return 'β–½'
+		case '\r':
+			return '↡'
+		case '\f':
+			return '↓'
+		default:
+			if unicode.IsSpace(r) {
+				return 'οΏ½'
+			}
+		}
+		return r
+	}
+	return func(prefix, s string) {
+		ws(prefix, strings.Map(mapToVisibleSpace, s))
+	}
+}
+
+func formatHeader(wf func(string, ...interface{}), conf DiffConfig) {
+	if conf.From != "" || conf.To != "" {
+		wf("--- %s\n", conf.From)
+		wf("+++ %s\n", conf.To)
+	}
+}
+
+func formatRangeLine(wf func(string, ...interface{}), group []difflib.OpCode) {
+	first, last := group[0], group[len(group)-1]
+	range1 := formatRangeUnified(first.I1, last.I2)
+	range2 := formatRangeUnified(first.J1, last.J2)
+	wf("@@ -%s +%s @@\n", range1, range2)
+}
+
+// Convert range to the "ed" format
+func formatRangeUnified(start, stop int) string {
+	// Per the diff spec at http://www.unix.org/single_unix_specification/
+	beginning := start + 1 // lines start numbering with one
+	length := stop - start
+	if length == 1 {
+		return fmt.Sprintf("%d", beginning)
+	}
+	if length == 0 {
+		beginning-- // empty ranges begin at line just before the range
+	}
+	return fmt.Sprintf("%d,%d", beginning, length)
+}
+
+func formatLines(writeLine func(string, string), prefix string, lines []string) {
+	for _, line := range lines {
+		writeLine(prefix, line)
+	}
+	// Add a newline if the last line is missing one so that the diff displays
+	// properly.
+	if !strings.HasSuffix(lines[len(lines)-1], "\n") {
+		writeLine("", "\n")
+	}
+}

vendor/gotest.tools/internal/format/format.go πŸ”—

@@ -0,0 +1,27 @@
+package format // import "gotest.tools/internal/format"
+
+import "fmt"
+
+// Message accepts a msgAndArgs varargs and formats it using fmt.Sprintf
+func Message(msgAndArgs ...interface{}) string {
+	switch len(msgAndArgs) {
+	case 0:
+		return ""
+	case 1:
+		return fmt.Sprintf("%v", msgAndArgs[0])
+	default:
+		return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...)
+	}
+}
+
+// WithCustomMessage accepts one or two messages and formats them appropriately
+func WithCustomMessage(source string, msgAndArgs ...interface{}) string {
+	custom := Message(msgAndArgs...)
+	switch {
+	case custom == "":
+		return source
+	case source == "":
+		return custom
+	}
+	return fmt.Sprintf("%s: %s", source, custom)
+}

vendor/gotest.tools/internal/source/source.go πŸ”—

@@ -0,0 +1,163 @@
+package source // import "gotest.tools/internal/source"
+
+import (
+	"bytes"
+	"fmt"
+	"go/ast"
+	"go/format"
+	"go/parser"
+	"go/token"
+	"os"
+	"runtime"
+	"strconv"
+	"strings"
+
+	"github.com/pkg/errors"
+)
+
+const baseStackIndex = 1
+
+// FormattedCallExprArg returns the argument from an ast.CallExpr at the
+// index in the call stack. The argument is formatted using FormatNode.
+func FormattedCallExprArg(stackIndex int, argPos int) (string, error) {
+	args, err := CallExprArgs(stackIndex + 1)
+	if err != nil {
+		return "", err
+	}
+	return FormatNode(args[argPos])
+}
+
+func getNodeAtLine(filename string, lineNum int) (ast.Node, error) {
+	fileset := token.NewFileSet()
+	astFile, err := parser.ParseFile(fileset, filename, nil, parser.AllErrors)
+	if err != nil {
+		return nil, errors.Wrapf(err, "failed to parse source file: %s", filename)
+	}
+
+	node := scanToLine(fileset, astFile, lineNum)
+	if node == nil {
+		return nil, errors.Errorf(
+			"failed to find an expression on line %d in %s", lineNum, filename)
+	}
+	return node, nil
+}
+
+func scanToLine(fileset *token.FileSet, node ast.Node, lineNum int) ast.Node {
+	v := &scanToLineVisitor{lineNum: lineNum, fileset: fileset}
+	ast.Walk(v, node)
+	return v.matchedNode
+}
+
+type scanToLineVisitor struct {
+	lineNum     int
+	matchedNode ast.Node
+	fileset     *token.FileSet
+}
+
+func (v *scanToLineVisitor) Visit(node ast.Node) ast.Visitor {
+	if node == nil || v.matchedNode != nil {
+		return nil
+	}
+	if v.nodePosition(node).Line == v.lineNum {
+		v.matchedNode = node
+		return nil
+	}
+	return v
+}
+
+// In golang 1.9 the line number changed from being the line where the statement
+// ended to the line where the statement began.
+func (v *scanToLineVisitor) nodePosition(node ast.Node) token.Position {
+	if goVersionBefore19 {
+		return v.fileset.Position(node.End())
+	}
+	return v.fileset.Position(node.Pos())
+}
+
+var goVersionBefore19 = isGOVersionBefore19()
+
+func isGOVersionBefore19() bool {
+	version := runtime.Version()
+	// not a release version
+	if !strings.HasPrefix(version, "go") {
+		return false
+	}
+	version = strings.TrimPrefix(version, "go")
+	parts := strings.Split(version, ".")
+	if len(parts) < 2 {
+		return false
+	}
+	minor, err := strconv.ParseInt(parts[1], 10, 32)
+	return err == nil && parts[0] == "1" && minor < 9
+}
+
+func getCallExprArgs(node ast.Node) ([]ast.Expr, error) {
+	visitor := &callExprVisitor{}
+	ast.Walk(visitor, node)
+	if visitor.expr == nil {
+		return nil, errors.New("failed to find call expression")
+	}
+	return visitor.expr.Args, nil
+}
+
+type callExprVisitor struct {
+	expr *ast.CallExpr
+}
+
+func (v *callExprVisitor) Visit(node ast.Node) ast.Visitor {
+	if v.expr != nil || node == nil {
+		return nil
+	}
+	debug("visit (%T): %s", node, debugFormatNode{node})
+
+	if callExpr, ok := node.(*ast.CallExpr); ok {
+		v.expr = callExpr
+		return nil
+	}
+	return v
+}
+
+// FormatNode using go/format.Node and return the result as a string
+func FormatNode(node ast.Node) (string, error) {
+	buf := new(bytes.Buffer)
+	err := format.Node(buf, token.NewFileSet(), node)
+	return buf.String(), err
+}
+
+// CallExprArgs returns the ast.Expr slice for the args of an ast.CallExpr at
+// the index in the call stack.
+func CallExprArgs(stackIndex int) ([]ast.Expr, error) {
+	_, filename, lineNum, ok := runtime.Caller(baseStackIndex + stackIndex)
+	if !ok {
+		return nil, errors.New("failed to get call stack")
+	}
+	debug("call stack position: %s:%d", filename, lineNum)
+
+	node, err := getNodeAtLine(filename, lineNum)
+	if err != nil {
+		return nil, err
+	}
+	debug("found node (%T): %s", node, debugFormatNode{node})
+
+	return getCallExprArgs(node)
+}
+
+var debugEnabled = os.Getenv("GOTESTYOURSELF_DEBUG") != ""
+
+func debug(format string, args ...interface{}) {
+	if debugEnabled {
+		fmt.Fprintf(os.Stderr, "DEBUG: "+format+"\n", args...)
+	}
+}
+
+type debugFormatNode struct {
+	ast.Node
+}
+
+func (n debugFormatNode) String() string {
+	out, err := FormatNode(n.Node)
+	if err != nil {
+		return fmt.Sprintf("failed to format %s: %s", n.Node, err)
+	}
+	return out
+}