relay connection working with gqlgen

Michael Muré created

Change summary

Gopkg.lock                                                              |   29 
Gopkg.toml                                                              |   12 
cache/cache.go                                                          |   10 
commands/webui.go                                                       |   12 
graphql2/gqlgen.yml                                                     |   11 
graphql2/handler.go                                                     |   18 
graphql2/relay.go                                                       |   39 
graphql2/resolvers.go                                                   |   82 
graphql2/resolvers/bug.go                                               |   61 
graphql2/resolvers/generated_graph.go                                   |  751 
graphql2/resolvers/generated_model.go                                   |   40 
graphql2/resolvers/operations.go                                        |   54 
graphql2/resolvers/pager_bug.go                                         |  225 
graphql2/resolvers/pager_comment.go                                     |  225 
graphql2/resolvers/pager_operation.go                                   |  225 
graphql2/resolvers/pagers.go                                            |   51 
graphql2/resolvers/pagers_template.go                                   |  224 
graphql2/resolvers/query.go                                             |   36 
graphql2/resolvers/repo.go                                              |   26 
graphql2/resolvers/root.go                                              |   53 
graphql2/schema.graphql                                                 |  120 
vendor/github.com/cheekybits/genny/LICENSE                              |   22 
vendor/github.com/cheekybits/genny/generic/doc.go                       |    2 
vendor/github.com/cheekybits/genny/generic/generic.go                   |   13 
vendor/github.com/gorilla/websocket/.gitignore                          |   25 
vendor/github.com/gorilla/websocket/.travis.yml                         |   19 
vendor/github.com/gorilla/websocket/AUTHORS                             |    8 
vendor/github.com/gorilla/websocket/LICENSE                             |   22 
vendor/github.com/gorilla/websocket/README.md                           |   64 
vendor/github.com/gorilla/websocket/client.go                           |  392 
vendor/github.com/gorilla/websocket/client_clone.go                     |   16 
vendor/github.com/gorilla/websocket/client_clone_legacy.go              |   38 
vendor/github.com/gorilla/websocket/compression.go                      |  148 
vendor/github.com/gorilla/websocket/conn.go                             | 1149 
vendor/github.com/gorilla/websocket/conn_read.go                        |   18 
vendor/github.com/gorilla/websocket/conn_read_legacy.go                 |   21 
vendor/github.com/gorilla/websocket/doc.go                              |  180 
vendor/github.com/gorilla/websocket/json.go                             |   55 
vendor/github.com/gorilla/websocket/mask.go                             |   55 
vendor/github.com/gorilla/websocket/mask_safe.go                        |   15 
vendor/github.com/gorilla/websocket/prepared.go                         |  103 
vendor/github.com/gorilla/websocket/server.go                           |  291 
vendor/github.com/gorilla/websocket/util.go                             |  214 
vendor/github.com/vektah/gqlgen/LICENSE                                 |   19 
vendor/github.com/vektah/gqlgen/graphql/bool.go                         |   30 
vendor/github.com/vektah/gqlgen/graphql/context.go                      |  145 
vendor/github.com/vektah/gqlgen/graphql/defer.go                        |   30 
vendor/github.com/vektah/gqlgen/graphql/error.go                        |   46 
vendor/github.com/vektah/gqlgen/graphql/exec.go                         |  118 
vendor/github.com/vektah/gqlgen/graphql/float.go                        |   26 
vendor/github.com/vektah/gqlgen/graphql/id.go                           |   33 
vendor/github.com/vektah/gqlgen/graphql/int.go                          |   26 
vendor/github.com/vektah/gqlgen/graphql/jsonw.go                        |   83 
vendor/github.com/vektah/gqlgen/graphql/map.go                          |   24 
vendor/github.com/vektah/gqlgen/graphql/oneshot.go                      |   14 
vendor/github.com/vektah/gqlgen/graphql/recovery.go                     |   19 
vendor/github.com/vektah/gqlgen/graphql/response.go                     |   18 
vendor/github.com/vektah/gqlgen/graphql/string.go                       |   63 
vendor/github.com/vektah/gqlgen/graphql/time.go                         |   21 
vendor/github.com/vektah/gqlgen/handler/graphql.go                      |  235 
vendor/github.com/vektah/gqlgen/handler/playground.go                   |   51 
vendor/github.com/vektah/gqlgen/handler/stub.go                         |   45 
vendor/github.com/vektah/gqlgen/handler/websocket.go                    |  245 
vendor/github.com/vektah/gqlgen/neelance/LICENSE                        |   24 
vendor/github.com/vektah/gqlgen/neelance/common/directive.go            |   32 
vendor/github.com/vektah/gqlgen/neelance/common/lexer.go                |  122 
vendor/github.com/vektah/gqlgen/neelance/common/literals.go             |  206 
vendor/github.com/vektah/gqlgen/neelance/common/types.go                |   80 
vendor/github.com/vektah/gqlgen/neelance/common/values.go               |   77 
vendor/github.com/vektah/gqlgen/neelance/errors/errors.go               |   41 
vendor/github.com/vektah/gqlgen/neelance/introspection/introspection.go |  313 
vendor/github.com/vektah/gqlgen/neelance/introspection/query.go         |  104 
vendor/github.com/vektah/gqlgen/neelance/query/query.go                 |  261 
vendor/github.com/vektah/gqlgen/neelance/schema/meta.go                 |  193 
vendor/github.com/vektah/gqlgen/neelance/schema/schema.go               |  489 
vendor/github.com/vektah/gqlgen/neelance/tests/testdata/LICENSE         |   33 
vendor/github.com/vektah/gqlgen/neelance/validation/suggestion.go       |   71 
vendor/github.com/vektah/gqlgen/neelance/validation/validation.go       |  861 
78 files changed, 8,647 insertions(+), 725 deletions(-)

Detailed changes

Gopkg.lock 🔗

@@ -1,6 +1,12 @@
 # This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
 
 
+[[projects]]
+  branch = "master"
+  name = "github.com/cheekybits/genny"
+  packages = ["generic"]
+  revision = "9127e812e1e9e501ce899a18121d316ecb52e4ba"
+
 [[projects]]
   name = "github.com/cpuguy83/go-md2man"
   packages = ["md2man"]
@@ -31,6 +37,12 @@
   revision = "e3702bed27f0d39777b0b37b664b6280e8ef8fbf"
   version = "v1.6.2"
 
+[[projects]]
+  name = "github.com/gorilla/websocket"
+  packages = ["."]
+  revision = "ea4d1f681babbce9545c9c5f3d5194a789c89f5b"
+  version = "v1.2.0"
+
 [[projects]]
   name = "github.com/graphql-go/graphql"
   packages = [
@@ -118,6 +130,21 @@
   revision = "583c0c0531f06d5278b7d917446061adc344b5cd"
   version = "v1.0.1"
 
+[[projects]]
+  name = "github.com/vektah/gqlgen"
+  packages = [
+    "graphql",
+    "handler",
+    "neelance/common",
+    "neelance/errors",
+    "neelance/introspection",
+    "neelance/query",
+    "neelance/schema",
+    "neelance/validation"
+  ]
+  revision = "381b34691fd93829e50ba8821412dc3467ec4821"
+  version = "0.3.0"
+
 [[projects]]
   branch = "master"
   name = "golang.org/x/sys"
@@ -133,6 +160,6 @@
 [solve-meta]
   analyzer-name = "dep"
   analyzer-version = 1
-  inputs-digest = "bbcfc01c18bb3703bea4b5d08a015a6bcc74f7ea2e9eb130c36e551745f2ec06"
+  inputs-digest = "c70340117a5b5a1d50ad4e8c20e51b01ff6cbec9e3c49911a066e6fd1115b854"
   solver-name = "gps-cdcl"
   solver-version = 1

Gopkg.toml 🔗

@@ -52,14 +52,10 @@
   name = "github.com/spf13/cobra"
   version = "v0.0.3"
 
-[[constraint]]
-  name = "github.com/graphql-go/graphql"
-  version = "v0.7.5"
-
-[[constraint]]
-  name = "github.com/graphql-go/handler"
-  version = "v0.2.1"
-
 [[constraint]]
   branch = "master"
   name = "github.com/dustin/go-humanize"
+
+[[constraint]]
+  name = "github.com/vektah/gqlgen"
+  version = "0.3.0"

cache/cache.go 🔗

@@ -27,7 +27,7 @@ type RepoCacher interface {
 }
 
 type BugCacher interface {
-	Snapshot() bug.Snapshot
+	Snapshot() *bug.Snapshot
 	ClearSnapshot()
 }
 
@@ -37,8 +37,8 @@ type RootCache struct {
 	repos map[string]RepoCacher
 }
 
-func NewCache() Cacher {
-	return &RootCache{
+func NewCache() RootCache {
+	return RootCache{
 		repos: make(map[string]RepoCacher),
 	}
 }
@@ -172,12 +172,12 @@ func NewBugCache(b *bug.Bug) BugCacher {
 	}
 }
 
-func (c BugCache) Snapshot() bug.Snapshot {
+func (c BugCache) Snapshot() *bug.Snapshot {
 	if c.snap == nil {
 		snap := c.bug.Compile()
 		c.snap = &snap
 	}
-	return *c.snap
+	return c.snap
 }
 
 func (c BugCache) ClearSnapshot() {

commands/webui.go 🔗

@@ -2,12 +2,13 @@ package commands
 
 import (
 	"fmt"
-	"github.com/MichaelMure/git-bug/graphql"
+	"github.com/MichaelMure/git-bug/graphql2"
 	"github.com/MichaelMure/git-bug/webui"
 	"github.com/gorilla/mux"
 	"github.com/phayes/freeport"
 	"github.com/skratchdot/open-golang/open"
 	"github.com/spf13/cobra"
+	"github.com/vektah/gqlgen/handler"
 	"log"
 	"net/http"
 )
@@ -28,16 +29,11 @@ func runWebUI(cmd *cobra.Command, args []string) error {
 
 	fmt.Printf("Web UI available at %s\n", webUiAddr)
 
-	graphqlHandler, err := graphql.NewHandler(repo)
-
-	if err != nil {
-		return err
-	}
-
 	router := mux.NewRouter()
 
 	// Routes
-	router.Path("/graphql").Handler(graphqlHandler)
+	router.Path("/playground").Handler(handler.Playground("git-bug", "/graphql"))
+	router.Path("/graphql").Handler(graphql2.NewHandler(repo))
 	router.PathPrefix("/").Handler(http.FileServer(webui.WebUIAssets))
 
 	open.Run(webUiAddr)

graphql2/gqlgen.yml 🔗

@@ -1,10 +1,17 @@
 schema: schema.graphql
 exec:
-  filename: gen/graph.go
+  filename: resolvers/generated_graph.go
 model:
-  filename: gen/model.go
+  filename: resolvers/generated_model.go
 
 models:
+  Repository:
+    fields:
+        bug:
+          resolver: true
+        allBugs:
+          resolver: true
+#    model: github.com/MichaelMure/git-bug/graphql2/resolvers.repoResolver
   Bug:
     model: github.com/MichaelMure/git-bug/bug.Snapshot
   Comment:

graphql2/handler.go 🔗

@@ -0,0 +1,18 @@
+//go:generate gorunpkg github.com/vektah/gqlgen
+
+package graphql2
+
+import (
+	"github.com/MichaelMure/git-bug/graphql2/resolvers"
+	"github.com/MichaelMure/git-bug/repository"
+	"github.com/vektah/gqlgen/handler"
+	"net/http"
+)
+
+func NewHandler(repo repository.Repo) http.Handler {
+	backend := resolvers.NewRootResolver()
+
+	backend.RegisterDefaultRepository(repo)
+
+	return handler.GraphQL(resolvers.NewExecutableSchema(backend))
+}

graphql2/relay.go 🔗

@@ -0,0 +1,39 @@
+package graphql2
+
+import (
+	"encoding/base64"
+	"strings"
+)
+
+
+type ResolvedGlobalID struct {
+	Type string `json:"type"`
+	ID   string `json:"id"`
+}
+
+// Takes a type name and an ID specific to that type name, and returns a
+// "global ID" that is unique among all types.
+func ToGlobalID(ttype string, id string) string {
+	str := ttype + ":" + id
+	encStr := base64.StdEncoding.EncodeToString([]byte(str))
+	return encStr
+}
+
+// Takes the "global ID" created by toGlobalID, and returns the type name and ID
+// used to create it.
+func FromGlobalID(globalID string) *ResolvedGlobalID {
+	strID := ""
+	b, err := base64.StdEncoding.DecodeString(globalID)
+	if err == nil {
+		strID = string(b)
+	}
+	tokens := strings.Split(strID, ":")
+	if len(tokens) < 2 {
+		return nil
+	}
+	return &ResolvedGlobalID{
+		Type: tokens[0],
+		ID:   tokens[1],
+	}
+}
+

graphql2/resolvers.go 🔗

@@ -1,82 +0,0 @@
-package graphql2
-
-import (
-	"context"
-	"fmt"
-	"github.com/MichaelMure/git-bug/bug"
-	"github.com/MichaelMure/git-bug/bug/operations"
-	"github.com/MichaelMure/git-bug/cache"
-	"github.com/MichaelMure/git-bug/graphql2/gen"
-	"time"
-)
-
-type Backend struct {
-	cache cache.RootCache
-}
-
-func (*Backend) Bug_labels(ctx context.Context, obj *bug.Snapshot) ([]*bug.Label, error) {
-	return obj.Labels
-}
-
-func (*Backend) LabelChangeOperation_added(ctx context.Context, obj *operations.LabelChangeOperation) ([]*bug.Label, error) {
-	panic("implement me")
-}
-
-func (*Backend) LabelChangeOperation_removed(ctx context.Context, obj *operations.LabelChangeOperation) ([]*bug.Label, error) {
-	panic("implement me")
-}
-
-func (*Backend) AddCommentOperation_date(ctx context.Context, obj *operations.AddCommentOperation) (time.Time, error) {
-	return obj.Time(), nil
-}
-
-func (*Backend) Bug_status(ctx context.Context, obj *bug.Snapshot) (gen.Status, error) {
-	return convertStatus(obj.Status)
-}
-
-func (*Backend) Bug_comments(ctx context.Context, obj *bug.Snapshot, after *string, before *string, first *int, last *int, query *string) (gen.CommentConnection, error) {
-	panic("implement me")
-}
-
-func (*Backend) Bug_operations(ctx context.Context, obj *bug.Snapshot, after *string, before *string, first *int, last *int, query *string) (gen.OperationConnection, error) {
-	panic("implement me")
-}
-
-func (*Backend) CreateOperation_date(ctx context.Context, obj *operations.CreateOperation) (time.Time, error) {
-	return obj.Time(), nil
-}
-
-func (*Backend) LabelChangeOperation_date(ctx context.Context, obj *operations.LabelChangeOperation) (time.Time, error) {
-	return obj.Time(), nil
-}
-
-func (*Backend) RootQuery_allBugs(ctx context.Context, after *string, before *string, first *int, last *int, query *string) (gen.BugConnection, error) {
-	panic("implement me")
-}
-
-func (*Backend) RootQuery_bug(ctx context.Context, id string) (*bug.Snapshot, error) {
-	panic("implement me")
-}
-
-func (*Backend) SetStatusOperation_date(ctx context.Context, obj *operations.SetStatusOperation) (time.Time, error) {
-	return obj.Time(), nil
-}
-
-func (*Backend) SetStatusOperation_status(ctx context.Context, obj *operations.SetStatusOperation) (gen.Status, error) {
-	return convertStatus(obj.Status)
-}
-
-func (*Backend) SetTitleOperation_date(ctx context.Context, obj *operations.SetTitleOperation) (time.Time, error) {
-	return obj.Time(), nil
-}
-
-func convertStatus(status bug.Status) (gen.Status, error) {
-	switch status {
-	case bug.OpenStatus:
-		return gen.StatusOpen, nil
-	case bug.ClosedStatus:
-		return gen.StatusClosed, nil
-	}
-
-	return "", fmt.Errorf("Unknown status")
-}

graphql2/resolvers/bug.go 🔗

@@ -0,0 +1,61 @@
+package resolvers
+
+import (
+	"context"
+	"github.com/MichaelMure/git-bug/bug"
+	"github.com/MichaelMure/git-bug/cache"
+)
+
+type bugResolver struct {
+	cache cache.Cacher
+}
+
+func (bugResolver) Status(ctx context.Context, obj *bug.Snapshot) (Status, error) {
+	return convertStatus(obj.Status)
+}
+
+func (bugResolver) Comments(ctx context.Context, obj *bug.Snapshot, input ConnectionInput) (CommentConnection, error) {
+	var connection CommentConnection
+
+	edger := func(comment bug.Comment, offset int) Edge {
+		return CommentEdge{
+			Node:   comment,
+			Cursor: offsetToCursor(offset),
+		}
+	}
+
+	edges, pageInfo, err := BugCommentPaginate(obj.Comments, edger, input)
+
+	if err != nil {
+		return connection, err
+	}
+
+	connection.Edges = edges
+	connection.PageInfo = pageInfo
+	connection.TotalCount = len(obj.Comments)
+
+	return connection, nil
+}
+
+func (bugResolver) Operations(ctx context.Context, obj *bug.Snapshot, input ConnectionInput) (OperationConnection, error) {
+	var connection OperationConnection
+
+	edger := func(op bug.Operation, offset int) Edge {
+		return OperationEdge{
+			Node:   op.(OperationUnion),
+			Cursor: offsetToCursor(offset),
+		}
+	}
+
+	edges, pageInfo, err := BugOperationPaginate(obj.Operations, edger, input)
+
+	if err != nil {
+		return connection, err
+	}
+
+	connection.Edges = edges
+	connection.PageInfo = pageInfo
+	connection.TotalCount = len(obj.Operations)
+
+	return connection, nil
+}

graphql2/gen/graph.go → graphql2/resolvers/generated_graph.go 🔗

@@ -1,6 +1,6 @@
 // Code generated by github.com/vektah/gqlgen, DO NOT EDIT.
 
-package gen
+package resolvers
 
 import (
 	"bytes"
@@ -31,18 +31,19 @@ type Resolvers interface {
 	AddCommentOperation_date(ctx context.Context, obj *operations.AddCommentOperation) (time.Time, error)
 
 	Bug_status(ctx context.Context, obj *bug.Snapshot) (Status, error)
-	Bug_labels(ctx context.Context, obj *bug.Snapshot) ([]*bug.Label, error)
-	Bug_comments(ctx context.Context, obj *bug.Snapshot, after *string, before *string, first *int, last *int, query *string) (CommentConnection, error)
-	Bug_operations(ctx context.Context, obj *bug.Snapshot, after *string, before *string, first *int, last *int, query *string) (OperationConnection, error)
+
+	Bug_comments(ctx context.Context, obj *bug.Snapshot, input ConnectionInput) (CommentConnection, error)
+	Bug_operations(ctx context.Context, obj *bug.Snapshot, input ConnectionInput) (OperationConnection, error)
 
 	CreateOperation_date(ctx context.Context, obj *operations.CreateOperation) (time.Time, error)
 
 	LabelChangeOperation_date(ctx context.Context, obj *operations.LabelChangeOperation) (time.Time, error)
-	LabelChangeOperation_added(ctx context.Context, obj *operations.LabelChangeOperation) ([]*bug.Label, error)
-	LabelChangeOperation_removed(ctx context.Context, obj *operations.LabelChangeOperation) ([]*bug.Label, error)
 
-	RootQuery_allBugs(ctx context.Context, after *string, before *string, first *int, last *int, query *string) (BugConnection, error)
-	RootQuery_bug(ctx context.Context, id string) (*bug.Snapshot, error)
+	Query_defaultRepository(ctx context.Context) (*repoResolver, error)
+	Query_repository(ctx context.Context, id string) (*repoResolver, error)
+
+	Repository_allBugs(ctx context.Context, obj *repoResolver, input ConnectionInput) (BugConnection, error)
+	Repository_bug(ctx context.Context, obj *repoResolver, prefix string) (*bug.Snapshot, error)
 
 	SetStatusOperation_date(ctx context.Context, obj *operations.SetStatusOperation) (time.Time, error)
 	SetStatusOperation_status(ctx context.Context, obj *operations.SetStatusOperation) (Status, error)
@@ -55,7 +56,8 @@ type ResolverRoot interface {
 	Bug() BugResolver
 	CreateOperation() CreateOperationResolver
 	LabelChangeOperation() LabelChangeOperationResolver
-	RootQuery() RootQueryResolver
+	Query() QueryResolver
+	Repository() RepositoryResolver
 	SetStatusOperation() SetStatusOperationResolver
 	SetTitleOperation() SetTitleOperationResolver
 }
@@ -64,21 +66,23 @@ type AddCommentOperationResolver interface {
 }
 type BugResolver interface {
 	Status(ctx context.Context, obj *bug.Snapshot) (Status, error)
-	Labels(ctx context.Context, obj *bug.Snapshot) ([]*bug.Label, error)
-	Comments(ctx context.Context, obj *bug.Snapshot, after *string, before *string, first *int, last *int, query *string) (CommentConnection, error)
-	Operations(ctx context.Context, obj *bug.Snapshot, after *string, before *string, first *int, last *int, query *string) (OperationConnection, error)
+
+	Comments(ctx context.Context, obj *bug.Snapshot, input ConnectionInput) (CommentConnection, error)
+	Operations(ctx context.Context, obj *bug.Snapshot, input ConnectionInput) (OperationConnection, error)
 }
 type CreateOperationResolver interface {
 	Date(ctx context.Context, obj *operations.CreateOperation) (time.Time, error)
 }
 type LabelChangeOperationResolver interface {
 	Date(ctx context.Context, obj *operations.LabelChangeOperation) (time.Time, error)
-	Added(ctx context.Context, obj *operations.LabelChangeOperation) ([]*bug.Label, error)
-	Removed(ctx context.Context, obj *operations.LabelChangeOperation) ([]*bug.Label, error)
 }
-type RootQueryResolver interface {
-	AllBugs(ctx context.Context, after *string, before *string, first *int, last *int, query *string) (BugConnection, error)
-	Bug(ctx context.Context, id string) (*bug.Snapshot, error)
+type QueryResolver interface {
+	DefaultRepository(ctx context.Context) (*repoResolver, error)
+	Repository(ctx context.Context, id string) (*repoResolver, error)
+}
+type RepositoryResolver interface {
+	AllBugs(ctx context.Context, obj *repoResolver, input ConnectionInput) (BugConnection, error)
+	Bug(ctx context.Context, obj *repoResolver, prefix string) (*bug.Snapshot, error)
 }
 type SetStatusOperationResolver interface {
 	Date(ctx context.Context, obj *operations.SetStatusOperation) (time.Time, error)
@@ -100,16 +104,12 @@ func (s shortMapper) Bug_status(ctx context.Context, obj *bug.Snapshot) (Status,
 	return s.r.Bug().Status(ctx, obj)
 }
 
-func (s shortMapper) Bug_labels(ctx context.Context, obj *bug.Snapshot) ([]*bug.Label, error) {
-	return s.r.Bug().Labels(ctx, obj)
+func (s shortMapper) Bug_comments(ctx context.Context, obj *bug.Snapshot, input ConnectionInput) (CommentConnection, error) {
+	return s.r.Bug().Comments(ctx, obj, input)
 }
 
-func (s shortMapper) Bug_comments(ctx context.Context, obj *bug.Snapshot, after *string, before *string, first *int, last *int, query *string) (CommentConnection, error) {
-	return s.r.Bug().Comments(ctx, obj, after, before, first, last, query)
-}
-
-func (s shortMapper) Bug_operations(ctx context.Context, obj *bug.Snapshot, after *string, before *string, first *int, last *int, query *string) (OperationConnection, error) {
-	return s.r.Bug().Operations(ctx, obj, after, before, first, last, query)
+func (s shortMapper) Bug_operations(ctx context.Context, obj *bug.Snapshot, input ConnectionInput) (OperationConnection, error) {
+	return s.r.Bug().Operations(ctx, obj, input)
 }
 
 func (s shortMapper) CreateOperation_date(ctx context.Context, obj *operations.CreateOperation) (time.Time, error) {
@@ -120,20 +120,20 @@ func (s shortMapper) LabelChangeOperation_date(ctx context.Context, obj *operati
 	return s.r.LabelChangeOperation().Date(ctx, obj)
 }
 
-func (s shortMapper) LabelChangeOperation_added(ctx context.Context, obj *operations.LabelChangeOperation) ([]*bug.Label, error) {
-	return s.r.LabelChangeOperation().Added(ctx, obj)
+func (s shortMapper) Query_defaultRepository(ctx context.Context) (*repoResolver, error) {
+	return s.r.Query().DefaultRepository(ctx)
 }
 
-func (s shortMapper) LabelChangeOperation_removed(ctx context.Context, obj *operations.LabelChangeOperation) ([]*bug.Label, error) {
-	return s.r.LabelChangeOperation().Removed(ctx, obj)
+func (s shortMapper) Query_repository(ctx context.Context, id string) (*repoResolver, error) {
+	return s.r.Query().Repository(ctx, id)
 }
 
-func (s shortMapper) RootQuery_allBugs(ctx context.Context, after *string, before *string, first *int, last *int, query *string) (BugConnection, error) {
-	return s.r.RootQuery().AllBugs(ctx, after, before, first, last, query)
+func (s shortMapper) Repository_allBugs(ctx context.Context, obj *repoResolver, input ConnectionInput) (BugConnection, error) {
+	return s.r.Repository().AllBugs(ctx, obj, input)
 }
 
-func (s shortMapper) RootQuery_bug(ctx context.Context, id string) (*bug.Snapshot, error) {
-	return s.r.RootQuery().Bug(ctx, id)
+func (s shortMapper) Repository_bug(ctx context.Context, obj *repoResolver, prefix string) (*bug.Snapshot, error) {
+	return s.r.Repository().Bug(ctx, obj, prefix)
 }
 
 func (s shortMapper) SetStatusOperation_date(ctx context.Context, obj *operations.SetStatusOperation) (time.Time, error) {
@@ -160,7 +160,7 @@ func (e *executableSchema) Query(ctx context.Context, op *query.Operation) *grap
 	ec := executionContext{graphql.GetRequestContext(ctx), e.resolvers}
 
 	buf := ec.RequestMiddleware(ctx, func(ctx context.Context) []byte {
-		data := ec._RootQuery(ctx, op.Selections)
+		data := ec._Query(ctx, op.Selections)
 		var buf bytes.Buffer
 		data.MarshalGQL(&buf)
 		return buf.Bytes()
@@ -265,7 +265,7 @@ func (ec *executionContext) _AddCommentOperation_message(ctx context.Context, fi
 	return graphql.MarshalString(res)
 }
 
-var bugImplementors = []string{"Bug", "Authored", "Commentable"}
+var bugImplementors = []string{"Bug"}
 
 // nolint: gocyclo, errcheck, gas, goconst
 func (ec *executionContext) _Bug(ctx context.Context, sel []query.Selection, obj *bug.Snapshot) graphql.Marshaler {
@@ -364,124 +364,37 @@ func (ec *executionContext) _Bug_status(ctx context.Context, field graphql.Colle
 }
 
 func (ec *executionContext) _Bug_labels(ctx context.Context, field graphql.CollectedField, obj *bug.Snapshot) graphql.Marshaler {
-	ctx = graphql.WithResolverContext(ctx, &graphql.ResolverContext{
-		Object: "Bug",
-		Args:   nil,
-		Field:  field,
-	})
-	return graphql.Defer(func() (ret graphql.Marshaler) {
-		defer func() {
-			if r := recover(); r != nil {
-				userErr := ec.Recover(ctx, r)
-				ec.Error(ctx, userErr)
-				ret = graphql.Null
-			}
-		}()
-
-		resTmp, err := ec.ResolverMiddleware(ctx, func(ctx context.Context) (interface{}, error) {
-			return ec.resolvers.Bug_labels(ctx, obj)
-		})
-		if err != nil {
-			ec.Error(ctx, err)
-			return graphql.Null
-		}
-		if resTmp == nil {
-			return graphql.Null
-		}
-		res := resTmp.([]*bug.Label)
-		arr1 := graphql.Array{}
-		for idx1 := range res {
-			arr1 = append(arr1, func() graphql.Marshaler {
-				rctx := graphql.GetResolverContext(ctx)
-				rctx.PushIndex(idx1)
-				defer rctx.Pop()
-				if res[idx1] == nil {
-					return graphql.Null
-				}
-				return *res[idx1]
-			}())
-		}
-		return arr1
-	})
+	rctx := graphql.GetResolverContext(ctx)
+	rctx.Object = "Bug"
+	rctx.Args = nil
+	rctx.Field = field
+	rctx.PushField(field.Alias)
+	defer rctx.Pop()
+	res := obj.Labels
+	arr1 := graphql.Array{}
+	for idx1 := range res {
+		arr1 = append(arr1, func() graphql.Marshaler {
+			rctx := graphql.GetResolverContext(ctx)
+			rctx.PushIndex(idx1)
+			defer rctx.Pop()
+			return res[idx1]
+		}())
+	}
+	return arr1
 }
 
 func (ec *executionContext) _Bug_comments(ctx context.Context, field graphql.CollectedField, obj *bug.Snapshot) graphql.Marshaler {
 	args := map[string]interface{}{}
-	var arg0 *string
-	if tmp, ok := field.Args["after"]; ok {
+	var arg0 ConnectionInput
+	if tmp, ok := field.Args["input"]; ok {
 		var err error
-		var ptr1 string
-		if tmp != nil {
-			ptr1, err = graphql.UnmarshalString(tmp)
-			arg0 = &ptr1
-		}
-
+		arg0, err = UnmarshalConnectionInput(tmp)
 		if err != nil {
 			ec.Error(ctx, err)
 			return graphql.Null
 		}
 	}
-	args["after"] = arg0
-	var arg1 *string
-	if tmp, ok := field.Args["before"]; ok {
-		var err error
-		var ptr1 string
-		if tmp != nil {
-			ptr1, err = graphql.UnmarshalString(tmp)
-			arg1 = &ptr1
-		}
-
-		if err != nil {
-			ec.Error(ctx, err)
-			return graphql.Null
-		}
-	}
-	args["before"] = arg1
-	var arg2 *int
-	if tmp, ok := field.Args["first"]; ok {
-		var err error
-		var ptr1 int
-		if tmp != nil {
-			ptr1, err = graphql.UnmarshalInt(tmp)
-			arg2 = &ptr1
-		}
-
-		if err != nil {
-			ec.Error(ctx, err)
-			return graphql.Null
-		}
-	}
-	args["first"] = arg2
-	var arg3 *int
-	if tmp, ok := field.Args["last"]; ok {
-		var err error
-		var ptr1 int
-		if tmp != nil {
-			ptr1, err = graphql.UnmarshalInt(tmp)
-			arg3 = &ptr1
-		}
-
-		if err != nil {
-			ec.Error(ctx, err)
-			return graphql.Null
-		}
-	}
-	args["last"] = arg3
-	var arg4 *string
-	if tmp, ok := field.Args["query"]; ok {
-		var err error
-		var ptr1 string
-		if tmp != nil {
-			ptr1, err = graphql.UnmarshalString(tmp)
-			arg4 = &ptr1
-		}
-
-		if err != nil {
-			ec.Error(ctx, err)
-			return graphql.Null
-		}
-	}
-	args["query"] = arg4
+	args["input"] = arg0
 	ctx = graphql.WithResolverContext(ctx, &graphql.ResolverContext{
 		Object: "Bug",
 		Args:   args,
@@ -497,7 +410,7 @@ func (ec *executionContext) _Bug_comments(ctx context.Context, field graphql.Col
 		}()
 
 		resTmp, err := ec.ResolverMiddleware(ctx, func(ctx context.Context) (interface{}, error) {
-			return ec.resolvers.Bug_comments(ctx, obj, args["after"].(*string), args["before"].(*string), args["first"].(*int), args["last"].(*int), args["query"].(*string))
+			return ec.resolvers.Bug_comments(ctx, obj, args["input"].(ConnectionInput))
 		})
 		if err != nil {
 			ec.Error(ctx, err)
@@ -513,81 +426,16 @@ func (ec *executionContext) _Bug_comments(ctx context.Context, field graphql.Col
 
 func (ec *executionContext) _Bug_operations(ctx context.Context, field graphql.CollectedField, obj *bug.Snapshot) graphql.Marshaler {
 	args := map[string]interface{}{}
-	var arg0 *string
-	if tmp, ok := field.Args["after"]; ok {
-		var err error
-		var ptr1 string
-		if tmp != nil {
-			ptr1, err = graphql.UnmarshalString(tmp)
-			arg0 = &ptr1
-		}
-
-		if err != nil {
-			ec.Error(ctx, err)
-			return graphql.Null
-		}
-	}
-	args["after"] = arg0
-	var arg1 *string
-	if tmp, ok := field.Args["before"]; ok {
-		var err error
-		var ptr1 string
-		if tmp != nil {
-			ptr1, err = graphql.UnmarshalString(tmp)
-			arg1 = &ptr1
-		}
-
-		if err != nil {
-			ec.Error(ctx, err)
-			return graphql.Null
-		}
-	}
-	args["before"] = arg1
-	var arg2 *int
-	if tmp, ok := field.Args["first"]; ok {
-		var err error
-		var ptr1 int
-		if tmp != nil {
-			ptr1, err = graphql.UnmarshalInt(tmp)
-			arg2 = &ptr1
-		}
-
-		if err != nil {
-			ec.Error(ctx, err)
-			return graphql.Null
-		}
-	}
-	args["first"] = arg2
-	var arg3 *int
-	if tmp, ok := field.Args["last"]; ok {
-		var err error
-		var ptr1 int
-		if tmp != nil {
-			ptr1, err = graphql.UnmarshalInt(tmp)
-			arg3 = &ptr1
-		}
-
-		if err != nil {
-			ec.Error(ctx, err)
-			return graphql.Null
-		}
-	}
-	args["last"] = arg3
-	var arg4 *string
-	if tmp, ok := field.Args["query"]; ok {
+	var arg0 ConnectionInput
+	if tmp, ok := field.Args["input"]; ok {
 		var err error
-		var ptr1 string
-		if tmp != nil {
-			ptr1, err = graphql.UnmarshalString(tmp)
-			arg4 = &ptr1
-		}
-
+		arg0, err = UnmarshalConnectionInput(tmp)
 		if err != nil {
 			ec.Error(ctx, err)
 			return graphql.Null
 		}
 	}
-	args["query"] = arg4
+	args["input"] = arg0
 	ctx = graphql.WithResolverContext(ctx, &graphql.ResolverContext{
 		Object: "Bug",
 		Args:   args,
@@ -603,7 +451,7 @@ func (ec *executionContext) _Bug_operations(ctx context.Context, field graphql.C
 		}()
 
 		resTmp, err := ec.ResolverMiddleware(ctx, func(ctx context.Context) (interface{}, error) {
-			return ec.resolvers.Bug_operations(ctx, obj, args["after"].(*string), args["before"].(*string), args["first"].(*int), args["last"].(*int), args["query"].(*string))
+			return ec.resolvers.Bug_operations(ctx, obj, args["input"].(ConnectionInput))
 		})
 		if err != nil {
 			ec.Error(ctx, err)
@@ -632,8 +480,6 @@ func (ec *executionContext) _BugConnection(ctx context.Context, sel []query.Sele
 			out.Values[i] = graphql.MarshalString("BugConnection")
 		case "edges":
 			out.Values[i] = ec._BugConnection_edges(ctx, field, obj)
-		case "nodes":
-			out.Values[i] = ec._BugConnection_nodes(ctx, field, obj)
 		case "pageInfo":
 			out.Values[i] = ec._BugConnection_pageInfo(ctx, field, obj)
 		case "totalCount":
@@ -669,29 +515,6 @@ func (ec *executionContext) _BugConnection_edges(ctx context.Context, field grap
 	return arr1
 }
 
-func (ec *executionContext) _BugConnection_nodes(ctx context.Context, field graphql.CollectedField, obj *BugConnection) graphql.Marshaler {
-	rctx := graphql.GetResolverContext(ctx)
-	rctx.Object = "BugConnection"
-	rctx.Args = nil
-	rctx.Field = field
-	rctx.PushField(field.Alias)
-	defer rctx.Pop()
-	res := obj.Nodes
-	arr1 := graphql.Array{}
-	for idx1 := range res {
-		arr1 = append(arr1, func() graphql.Marshaler {
-			rctx := graphql.GetResolverContext(ctx)
-			rctx.PushIndex(idx1)
-			defer rctx.Pop()
-			if res[idx1] == nil {
-				return graphql.Null
-			}
-			return ec._Bug(ctx, field.Selections, res[idx1])
-		}())
-	}
-	return arr1
-}
-
 func (ec *executionContext) _BugConnection_pageInfo(ctx context.Context, field graphql.CollectedField, obj *BugConnection) graphql.Marshaler {
 	rctx := graphql.GetResolverContext(ctx)
 	rctx.Object = "BugConnection"
@@ -758,10 +581,7 @@ func (ec *executionContext) _BugEdge_node(ctx context.Context, field graphql.Col
 	rctx.PushField(field.Alias)
 	defer rctx.Pop()
 	res := obj.Node
-	if res == nil {
-		return graphql.Null
-	}
-	return ec._Bug(ctx, field.Selections, res)
+	return ec._Bug(ctx, field.Selections, &res)
 }
 
 var commentImplementors = []string{"Comment", "Authored"}
@@ -826,8 +646,6 @@ func (ec *executionContext) _CommentConnection(ctx context.Context, sel []query.
 			out.Values[i] = graphql.MarshalString("CommentConnection")
 		case "edges":
 			out.Values[i] = ec._CommentConnection_edges(ctx, field, obj)
-		case "nodes":
-			out.Values[i] = ec._CommentConnection_nodes(ctx, field, obj)
 		case "pageInfo":
 			out.Values[i] = ec._CommentConnection_pageInfo(ctx, field, obj)
 		case "totalCount":
@@ -854,33 +672,7 @@ func (ec *executionContext) _CommentConnection_edges(ctx context.Context, field
 			rctx := graphql.GetResolverContext(ctx)
 			rctx.PushIndex(idx1)
 			defer rctx.Pop()
-			if res[idx1] == nil {
-				return graphql.Null
-			}
-			return ec._CommentEdge(ctx, field.Selections, res[idx1])
-		}())
-	}
-	return arr1
-}
-
-func (ec *executionContext) _CommentConnection_nodes(ctx context.Context, field graphql.CollectedField, obj *CommentConnection) graphql.Marshaler {
-	rctx := graphql.GetResolverContext(ctx)
-	rctx.Object = "CommentConnection"
-	rctx.Args = nil
-	rctx.Field = field
-	rctx.PushField(field.Alias)
-	defer rctx.Pop()
-	res := obj.Nodes
-	arr1 := graphql.Array{}
-	for idx1 := range res {
-		arr1 = append(arr1, func() graphql.Marshaler {
-			rctx := graphql.GetResolverContext(ctx)
-			rctx.PushIndex(idx1)
-			defer rctx.Pop()
-			if res[idx1] == nil {
-				return graphql.Null
-			}
-			return ec._Comment(ctx, field.Selections, res[idx1])
+			return ec._CommentEdge(ctx, field.Selections, &res[idx1])
 		}())
 	}
 	return arr1
@@ -1118,87 +910,43 @@ func (ec *executionContext) _LabelChangeOperation_date(ctx context.Context, fiel
 }
 
 func (ec *executionContext) _LabelChangeOperation_added(ctx context.Context, field graphql.CollectedField, obj *operations.LabelChangeOperation) graphql.Marshaler {
-	ctx = graphql.WithResolverContext(ctx, &graphql.ResolverContext{
-		Object: "LabelChangeOperation",
-		Args:   nil,
-		Field:  field,
-	})
-	return graphql.Defer(func() (ret graphql.Marshaler) {
-		defer func() {
-			if r := recover(); r != nil {
-				userErr := ec.Recover(ctx, r)
-				ec.Error(ctx, userErr)
-				ret = graphql.Null
-			}
-		}()
-
-		resTmp, err := ec.ResolverMiddleware(ctx, func(ctx context.Context) (interface{}, error) {
-			return ec.resolvers.LabelChangeOperation_added(ctx, obj)
-		})
-		if err != nil {
-			ec.Error(ctx, err)
-			return graphql.Null
-		}
-		if resTmp == nil {
-			return graphql.Null
-		}
-		res := resTmp.([]*bug.Label)
-		arr1 := graphql.Array{}
-		for idx1 := range res {
-			arr1 = append(arr1, func() graphql.Marshaler {
-				rctx := graphql.GetResolverContext(ctx)
-				rctx.PushIndex(idx1)
-				defer rctx.Pop()
-				if res[idx1] == nil {
-					return graphql.Null
-				}
-				return *res[idx1]
-			}())
-		}
-		return arr1
-	})
+	rctx := graphql.GetResolverContext(ctx)
+	rctx.Object = "LabelChangeOperation"
+	rctx.Args = nil
+	rctx.Field = field
+	rctx.PushField(field.Alias)
+	defer rctx.Pop()
+	res := obj.Added
+	arr1 := graphql.Array{}
+	for idx1 := range res {
+		arr1 = append(arr1, func() graphql.Marshaler {
+			rctx := graphql.GetResolverContext(ctx)
+			rctx.PushIndex(idx1)
+			defer rctx.Pop()
+			return res[idx1]
+		}())
+	}
+	return arr1
 }
 
 func (ec *executionContext) _LabelChangeOperation_removed(ctx context.Context, field graphql.CollectedField, obj *operations.LabelChangeOperation) graphql.Marshaler {
-	ctx = graphql.WithResolverContext(ctx, &graphql.ResolverContext{
-		Object: "LabelChangeOperation",
-		Args:   nil,
-		Field:  field,
-	})
-	return graphql.Defer(func() (ret graphql.Marshaler) {
-		defer func() {
-			if r := recover(); r != nil {
-				userErr := ec.Recover(ctx, r)
-				ec.Error(ctx, userErr)
-				ret = graphql.Null
-			}
-		}()
-
-		resTmp, err := ec.ResolverMiddleware(ctx, func(ctx context.Context) (interface{}, error) {
-			return ec.resolvers.LabelChangeOperation_removed(ctx, obj)
-		})
-		if err != nil {
-			ec.Error(ctx, err)
-			return graphql.Null
-		}
-		if resTmp == nil {
-			return graphql.Null
-		}
-		res := resTmp.([]*bug.Label)
-		arr1 := graphql.Array{}
-		for idx1 := range res {
-			arr1 = append(arr1, func() graphql.Marshaler {
-				rctx := graphql.GetResolverContext(ctx)
-				rctx.PushIndex(idx1)
-				defer rctx.Pop()
-				if res[idx1] == nil {
-					return graphql.Null
-				}
-				return *res[idx1]
-			}())
-		}
-		return arr1
-	})
+	rctx := graphql.GetResolverContext(ctx)
+	rctx.Object = "LabelChangeOperation"
+	rctx.Args = nil
+	rctx.Field = field
+	rctx.PushField(field.Alias)
+	defer rctx.Pop()
+	res := obj.Removed
+	arr1 := graphql.Array{}
+	for idx1 := range res {
+		arr1 = append(arr1, func() graphql.Marshaler {
+			rctx := graphql.GetResolverContext(ctx)
+			rctx.PushIndex(idx1)
+			defer rctx.Pop()
+			return res[idx1]
+		}())
+	}
+	return arr1
 }
 
 var operationConnectionImplementors = []string{"OperationConnection"}
@@ -1216,8 +964,6 @@ func (ec *executionContext) _OperationConnection(ctx context.Context, sel []quer
 			out.Values[i] = graphql.MarshalString("OperationConnection")
 		case "edges":
 			out.Values[i] = ec._OperationConnection_edges(ctx, field, obj)
-		case "nodes":
-			out.Values[i] = ec._OperationConnection_nodes(ctx, field, obj)
 		case "pageInfo":
 			out.Values[i] = ec._OperationConnection_pageInfo(ctx, field, obj)
 		case "totalCount":
@@ -1244,33 +990,7 @@ func (ec *executionContext) _OperationConnection_edges(ctx context.Context, fiel
 			rctx := graphql.GetResolverContext(ctx)
 			rctx.PushIndex(idx1)
 			defer rctx.Pop()
-			if res[idx1] == nil {
-				return graphql.Null
-			}
-			return ec._OperationEdge(ctx, field.Selections, res[idx1])
-		}())
-	}
-	return arr1
-}
-
-func (ec *executionContext) _OperationConnection_nodes(ctx context.Context, field graphql.CollectedField, obj *OperationConnection) graphql.Marshaler {
-	rctx := graphql.GetResolverContext(ctx)
-	rctx.Object = "OperationConnection"
-	rctx.Args = nil
-	rctx.Field = field
-	rctx.PushField(field.Alias)
-	defer rctx.Pop()
-	res := obj.Nodes
-	arr1 := graphql.Array{}
-	for idx1 := range res {
-		arr1 = append(arr1, func() graphql.Marshaler {
-			rctx := graphql.GetResolverContext(ctx)
-			rctx.PushIndex(idx1)
-			defer rctx.Pop()
-			if res[idx1] == nil {
-				return graphql.Null
-			}
-			return ec._OperationUnion(ctx, field.Selections, res[idx1])
+			return ec._OperationEdge(ctx, field.Selections, &res[idx1])
 		}())
 	}
 	return arr1
@@ -1362,10 +1082,6 @@ func (ec *executionContext) _PageInfo(ctx context.Context, sel []query.Selection
 			out.Values[i] = ec._PageInfo_hasNextPage(ctx, field, obj)
 		case "hasPreviousPage":
 			out.Values[i] = ec._PageInfo_hasPreviousPage(ctx, field, obj)
-		case "startCursor":
-			out.Values[i] = ec._PageInfo_startCursor(ctx, field, obj)
-		case "endCursor":
-			out.Values[i] = ec._PageInfo_endCursor(ctx, field, obj)
 		default:
 			panic("unknown field " + strconv.Quote(field.Name))
 		}
@@ -1396,34 +1112,6 @@ func (ec *executionContext) _PageInfo_hasPreviousPage(ctx context.Context, field
 	return graphql.MarshalBoolean(res)
 }
 
-func (ec *executionContext) _PageInfo_startCursor(ctx context.Context, field graphql.CollectedField, obj *PageInfo) graphql.Marshaler {
-	rctx := graphql.GetResolverContext(ctx)
-	rctx.Object = "PageInfo"
-	rctx.Args = nil
-	rctx.Field = field
-	rctx.PushField(field.Alias)
-	defer rctx.Pop()
-	res := obj.StartCursor
-	if res == nil {
-		return graphql.Null
-	}
-	return graphql.MarshalString(*res)
-}
-
-func (ec *executionContext) _PageInfo_endCursor(ctx context.Context, field graphql.CollectedField, obj *PageInfo) graphql.Marshaler {
-	rctx := graphql.GetResolverContext(ctx)
-	rctx.Object = "PageInfo"
-	rctx.Args = nil
-	rctx.Field = field
-	rctx.PushField(field.Alias)
-	defer rctx.Pop()
-	res := obj.EndCursor
-	if res == nil {
-		return graphql.Null
-	}
-	return graphql.MarshalString(*res)
-}
-
 var personImplementors = []string{"Person"}
 
 // nolint: gocyclo, errcheck, gas, goconst
@@ -1471,14 +1159,14 @@ func (ec *executionContext) _Person_name(ctx context.Context, field graphql.Coll
 	return graphql.MarshalString(res)
 }
 
-var rootQueryImplementors = []string{"RootQuery"}
+var queryImplementors = []string{"Query"}
 
 // nolint: gocyclo, errcheck, gas, goconst
-func (ec *executionContext) _RootQuery(ctx context.Context, sel []query.Selection) graphql.Marshaler {
-	fields := graphql.CollectFields(ec.Doc, sel, rootQueryImplementors, ec.Variables)
+func (ec *executionContext) _Query(ctx context.Context, sel []query.Selection) graphql.Marshaler {
+	fields := graphql.CollectFields(ec.Doc, sel, queryImplementors, ec.Variables)
 
 	ctx = graphql.WithResolverContext(ctx, &graphql.ResolverContext{
-		Object: "RootQuery",
+		Object: "Query",
 	})
 
 	out := graphql.NewOrderedMap(len(fields))
@@ -1487,15 +1175,15 @@ func (ec *executionContext) _RootQuery(ctx context.Context, sel []query.Selectio
 
 		switch field.Name {
 		case "__typename":
-			out.Values[i] = graphql.MarshalString("RootQuery")
-		case "allBugs":
-			out.Values[i] = ec._RootQuery_allBugs(ctx, field)
-		case "bug":
-			out.Values[i] = ec._RootQuery_bug(ctx, field)
+			out.Values[i] = graphql.MarshalString("Query")
+		case "defaultRepository":
+			out.Values[i] = ec._Query_defaultRepository(ctx, field)
+		case "repository":
+			out.Values[i] = ec._Query_repository(ctx, field)
 		case "__schema":
-			out.Values[i] = ec._RootQuery___schema(ctx, field)
+			out.Values[i] = ec._Query___schema(ctx, field)
 		case "__type":
-			out.Values[i] = ec._RootQuery___type(ctx, field)
+			out.Values[i] = ec._Query___type(ctx, field)
 		default:
 			panic("unknown field " + strconv.Quote(field.Name))
 		}
@@ -1504,86 +1192,10 @@ func (ec *executionContext) _RootQuery(ctx context.Context, sel []query.Selectio
 	return out
 }
 
-func (ec *executionContext) _RootQuery_allBugs(ctx context.Context, field graphql.CollectedField) graphql.Marshaler {
-	args := map[string]interface{}{}
-	var arg0 *string
-	if tmp, ok := field.Args["after"]; ok {
-		var err error
-		var ptr1 string
-		if tmp != nil {
-			ptr1, err = graphql.UnmarshalString(tmp)
-			arg0 = &ptr1
-		}
-
-		if err != nil {
-			ec.Error(ctx, err)
-			return graphql.Null
-		}
-	}
-	args["after"] = arg0
-	var arg1 *string
-	if tmp, ok := field.Args["before"]; ok {
-		var err error
-		var ptr1 string
-		if tmp != nil {
-			ptr1, err = graphql.UnmarshalString(tmp)
-			arg1 = &ptr1
-		}
-
-		if err != nil {
-			ec.Error(ctx, err)
-			return graphql.Null
-		}
-	}
-	args["before"] = arg1
-	var arg2 *int
-	if tmp, ok := field.Args["first"]; ok {
-		var err error
-		var ptr1 int
-		if tmp != nil {
-			ptr1, err = graphql.UnmarshalInt(tmp)
-			arg2 = &ptr1
-		}
-
-		if err != nil {
-			ec.Error(ctx, err)
-			return graphql.Null
-		}
-	}
-	args["first"] = arg2
-	var arg3 *int
-	if tmp, ok := field.Args["last"]; ok {
-		var err error
-		var ptr1 int
-		if tmp != nil {
-			ptr1, err = graphql.UnmarshalInt(tmp)
-			arg3 = &ptr1
-		}
-
-		if err != nil {
-			ec.Error(ctx, err)
-			return graphql.Null
-		}
-	}
-	args["last"] = arg3
-	var arg4 *string
-	if tmp, ok := field.Args["query"]; ok {
-		var err error
-		var ptr1 string
-		if tmp != nil {
-			ptr1, err = graphql.UnmarshalString(tmp)
-			arg4 = &ptr1
-		}
-
-		if err != nil {
-			ec.Error(ctx, err)
-			return graphql.Null
-		}
-	}
-	args["query"] = arg4
+func (ec *executionContext) _Query_defaultRepository(ctx context.Context, field graphql.CollectedField) graphql.Marshaler {
 	ctx = graphql.WithResolverContext(ctx, &graphql.ResolverContext{
-		Object: "RootQuery",
-		Args:   args,
+		Object: "Query",
+		Args:   nil,
 		Field:  field,
 	})
 	return graphql.Defer(func() (ret graphql.Marshaler) {
@@ -1596,7 +1208,7 @@ func (ec *executionContext) _RootQuery_allBugs(ctx context.Context, field graphq
 		}()
 
 		resTmp, err := ec.ResolverMiddleware(ctx, func(ctx context.Context) (interface{}, error) {
-			return ec.resolvers.RootQuery_allBugs(ctx, args["after"].(*string), args["before"].(*string), args["first"].(*int), args["last"].(*int), args["query"].(*string))
+			return ec.resolvers.Query_defaultRepository(ctx)
 		})
 		if err != nil {
 			ec.Error(ctx, err)
@@ -1605,12 +1217,15 @@ func (ec *executionContext) _RootQuery_allBugs(ctx context.Context, field graphq
 		if resTmp == nil {
 			return graphql.Null
 		}
-		res := resTmp.(BugConnection)
-		return ec._BugConnection(ctx, field.Selections, &res)
+		res := resTmp.(*repoResolver)
+		if res == nil {
+			return graphql.Null
+		}
+		return ec._Repository(ctx, field.Selections, res)
 	})
 }
 
-func (ec *executionContext) _RootQuery_bug(ctx context.Context, field graphql.CollectedField) graphql.Marshaler {
+func (ec *executionContext) _Query_repository(ctx context.Context, field graphql.CollectedField) graphql.Marshaler {
 	args := map[string]interface{}{}
 	var arg0 string
 	if tmp, ok := field.Args["id"]; ok {
@@ -1623,7 +1238,7 @@ func (ec *executionContext) _RootQuery_bug(ctx context.Context, field graphql.Co
 	}
 	args["id"] = arg0
 	ctx = graphql.WithResolverContext(ctx, &graphql.ResolverContext{
-		Object: "RootQuery",
+		Object: "Query",
 		Args:   args,
 		Field:  field,
 	})
@@ -1637,7 +1252,7 @@ func (ec *executionContext) _RootQuery_bug(ctx context.Context, field graphql.Co
 		}()
 
 		resTmp, err := ec.ResolverMiddleware(ctx, func(ctx context.Context) (interface{}, error) {
-			return ec.resolvers.RootQuery_bug(ctx, args["id"].(string))
+			return ec.resolvers.Query_repository(ctx, args["id"].(string))
 		})
 		if err != nil {
 			ec.Error(ctx, err)
@@ -1646,17 +1261,17 @@ func (ec *executionContext) _RootQuery_bug(ctx context.Context, field graphql.Co
 		if resTmp == nil {
 			return graphql.Null
 		}
-		res := resTmp.(*bug.Snapshot)
+		res := resTmp.(*repoResolver)
 		if res == nil {
 			return graphql.Null
 		}
-		return ec._Bug(ctx, field.Selections, res)
+		return ec._Repository(ctx, field.Selections, res)
 	})
 }
 
-func (ec *executionContext) _RootQuery___schema(ctx context.Context, field graphql.CollectedField) graphql.Marshaler {
+func (ec *executionContext) _Query___schema(ctx context.Context, field graphql.CollectedField) graphql.Marshaler {
 	rctx := graphql.GetResolverContext(ctx)
-	rctx.Object = "RootQuery"
+	rctx.Object = "Query"
 	rctx.Args = nil
 	rctx.Field = field
 	rctx.PushField(field.Alias)
@@ -1668,7 +1283,7 @@ func (ec *executionContext) _RootQuery___schema(ctx context.Context, field graph
 	return ec.___Schema(ctx, field.Selections, res)
 }
 
-func (ec *executionContext) _RootQuery___type(ctx context.Context, field graphql.CollectedField) graphql.Marshaler {
+func (ec *executionContext) _Query___type(ctx context.Context, field graphql.CollectedField) graphql.Marshaler {
 	args := map[string]interface{}{}
 	var arg0 string
 	if tmp, ok := field.Args["name"]; ok {
@@ -1681,7 +1296,7 @@ func (ec *executionContext) _RootQuery___type(ctx context.Context, field graphql
 	}
 	args["name"] = arg0
 	rctx := graphql.GetResolverContext(ctx)
-	rctx.Object = "RootQuery"
+	rctx.Object = "Query"
 	rctx.Args = args
 	rctx.Field = field
 	rctx.PushField(field.Alias)
@@ -1693,6 +1308,116 @@ func (ec *executionContext) _RootQuery___type(ctx context.Context, field graphql
 	return ec.___Type(ctx, field.Selections, res)
 }
 
+var repositoryImplementors = []string{"Repository"}
+
+// nolint: gocyclo, errcheck, gas, goconst
+func (ec *executionContext) _Repository(ctx context.Context, sel []query.Selection, obj *repoResolver) graphql.Marshaler {
+	fields := graphql.CollectFields(ec.Doc, sel, repositoryImplementors, ec.Variables)
+
+	out := graphql.NewOrderedMap(len(fields))
+	for i, field := range fields {
+		out.Keys[i] = field.Alias
+
+		switch field.Name {
+		case "__typename":
+			out.Values[i] = graphql.MarshalString("Repository")
+		case "allBugs":
+			out.Values[i] = ec._Repository_allBugs(ctx, field, obj)
+		case "bug":
+			out.Values[i] = ec._Repository_bug(ctx, field, obj)
+		default:
+			panic("unknown field " + strconv.Quote(field.Name))
+		}
+	}
+
+	return out
+}
+
+func (ec *executionContext) _Repository_allBugs(ctx context.Context, field graphql.CollectedField, obj *repoResolver) graphql.Marshaler {
+	args := map[string]interface{}{}
+	var arg0 ConnectionInput
+	if tmp, ok := field.Args["input"]; ok {
+		var err error
+		arg0, err = UnmarshalConnectionInput(tmp)
+		if err != nil {
+			ec.Error(ctx, err)
+			return graphql.Null
+		}
+	}
+	args["input"] = arg0
+	ctx = graphql.WithResolverContext(ctx, &graphql.ResolverContext{
+		Object: "Repository",
+		Args:   args,
+		Field:  field,
+	})
+	return graphql.Defer(func() (ret graphql.Marshaler) {
+		defer func() {
+			if r := recover(); r != nil {
+				userErr := ec.Recover(ctx, r)
+				ec.Error(ctx, userErr)
+				ret = graphql.Null
+			}
+		}()
+
+		resTmp, err := ec.ResolverMiddleware(ctx, func(ctx context.Context) (interface{}, error) {
+			return ec.resolvers.Repository_allBugs(ctx, obj, args["input"].(ConnectionInput))
+		})
+		if err != nil {
+			ec.Error(ctx, err)
+			return graphql.Null
+		}
+		if resTmp == nil {
+			return graphql.Null
+		}
+		res := resTmp.(BugConnection)
+		return ec._BugConnection(ctx, field.Selections, &res)
+	})
+}
+
+func (ec *executionContext) _Repository_bug(ctx context.Context, field graphql.CollectedField, obj *repoResolver) graphql.Marshaler {
+	args := map[string]interface{}{}
+	var arg0 string
+	if tmp, ok := field.Args["prefix"]; ok {
+		var err error
+		arg0, err = graphql.UnmarshalString(tmp)
+		if err != nil {
+			ec.Error(ctx, err)
+			return graphql.Null
+		}
+	}
+	args["prefix"] = arg0
+	ctx = graphql.WithResolverContext(ctx, &graphql.ResolverContext{
+		Object: "Repository",
+		Args:   args,
+		Field:  field,
+	})
+	return graphql.Defer(func() (ret graphql.Marshaler) {
+		defer func() {
+			if r := recover(); r != nil {
+				userErr := ec.Recover(ctx, r)
+				ec.Error(ctx, userErr)
+				ret = graphql.Null
+			}
+		}()
+
+		resTmp, err := ec.ResolverMiddleware(ctx, func(ctx context.Context) (interface{}, error) {
+			return ec.resolvers.Repository_bug(ctx, obj, args["prefix"].(string))
+		})
+		if err != nil {
+			ec.Error(ctx, err)
+			return graphql.Null
+		}
+		if resTmp == nil {
+			return graphql.Null
+		}
+		res := resTmp.(*bug.Snapshot)
+		if res == nil {
+			return graphql.Null
+		}
+		return ec._Bug(ctx, field.Selections, res)
+	})
+}
+
 var setStatusOperationImplementors = []string{"SetStatusOperation", "Operation", "Authored"}
 
 // nolint: gocyclo, errcheck, gas, goconst

graphql2/gen/model.go → graphql2/resolvers/generated_model.go 🔗

@@ -1,6 +1,6 @@
 // Code generated by github.com/vektah/gqlgen, DO NOT EDIT.
 
-package gen
+package resolvers
 
 import (
 	fmt "fmt"
@@ -12,32 +12,34 @@ import (
 
 type Authored interface{}
 type BugConnection struct {
-	Edges      []*BugEdge      `json:"edges"`
-	Nodes      []*bug.Snapshot `json:"nodes"`
-	PageInfo   PageInfo        `json:"pageInfo"`
-	TotalCount int             `json:"totalCount"`
+	Edges      []*BugEdge `json:"edges"`
+	PageInfo   PageInfo   `json:"pageInfo"`
+	TotalCount int        `json:"totalCount"`
 }
 type BugEdge struct {
-	Cursor string        `json:"cursor"`
-	Node   *bug.Snapshot `json:"node"`
+	Cursor string       `json:"cursor"`
+	Node   bug.Snapshot `json:"node"`
 }
 type CommentConnection struct {
-	Edges      []*CommentEdge `json:"edges"`
-	Nodes      []*bug.Comment `json:"nodes"`
-	PageInfo   PageInfo       `json:"pageInfo"`
-	TotalCount int            `json:"totalCount"`
+	Edges      []CommentEdge `json:"edges"`
+	PageInfo   PageInfo      `json:"pageInfo"`
+	TotalCount int           `json:"totalCount"`
 }
 type CommentEdge struct {
 	Cursor string      `json:"cursor"`
 	Node   bug.Comment `json:"node"`
 }
-type Commentable interface{}
+type ConnectionInput struct {
+	After  *string `json:"after"`
+	Before *string `json:"before"`
+	First  *int    `json:"first"`
+	Last   *int    `json:"last"`
+}
 type Operation interface{}
 type OperationConnection struct {
-	Edges      []*OperationEdge  `json:"edges"`
-	Nodes      []*OperationUnion `json:"nodes"`
-	PageInfo   PageInfo          `json:"pageInfo"`
-	TotalCount int               `json:"totalCount"`
+	Edges      []OperationEdge `json:"edges"`
+	PageInfo   PageInfo        `json:"pageInfo"`
+	TotalCount int             `json:"totalCount"`
 }
 type OperationEdge struct {
 	Cursor string         `json:"cursor"`
@@ -45,10 +47,8 @@ type OperationEdge struct {
 }
 type OperationUnion interface{}
 type PageInfo struct {
-	HasNextPage     bool    `json:"hasNextPage"`
-	HasPreviousPage bool    `json:"hasPreviousPage"`
-	StartCursor     *string `json:"startCursor"`
-	EndCursor       *string `json:"endCursor"`
+	HasNextPage     bool `json:"hasNextPage"`
+	HasPreviousPage bool `json:"hasPreviousPage"`
 }
 
 type Status string

graphql2/resolvers/operations.go 🔗

@@ -0,0 +1,54 @@
+package resolvers
+
+import (
+	"context"
+	"fmt"
+	"github.com/MichaelMure/git-bug/bug"
+	"github.com/MichaelMure/git-bug/bug/operations"
+	"time"
+)
+
+type addCommentOperationResolver struct{}
+
+func (addCommentOperationResolver) Date(ctx context.Context, obj *operations.AddCommentOperation) (time.Time, error) {
+	return obj.Time(), nil
+}
+
+type createOperationResolver struct{}
+
+func (createOperationResolver) Date(ctx context.Context, obj *operations.CreateOperation) (time.Time, error) {
+	return obj.Time(), nil
+}
+
+type labelChangeOperation struct{}
+
+func (labelChangeOperation) Date(ctx context.Context, obj *operations.LabelChangeOperation) (time.Time, error) {
+	return obj.Time(), nil
+}
+
+type setStatusOperationResolver struct{}
+
+func (setStatusOperationResolver) Date(ctx context.Context, obj *operations.SetStatusOperation) (time.Time, error) {
+	return obj.Time(), nil
+}
+
+func (setStatusOperationResolver) Status(ctx context.Context, obj *operations.SetStatusOperation) (Status, error) {
+	return convertStatus(obj.Status)
+}
+
+type setTitleOperationResolver struct{}
+
+func (setTitleOperationResolver) Date(ctx context.Context, obj *operations.SetTitleOperation) (time.Time, error) {
+	return obj.Time(), nil
+}
+
+func convertStatus(status bug.Status) (Status, error) {
+	switch status {
+	case bug.OpenStatus:
+		return StatusOpen, nil
+	case bug.ClosedStatus:
+		return StatusClosed, nil
+	}
+
+	return "", fmt.Errorf("Unknown status")
+}

graphql2/resolvers/pager_bug.go 🔗

@@ -0,0 +1,225 @@
+// This file was automatically generated by genny.
+// Any changes will be lost if this file is regenerated.
+// see https://github.com/cheekybits/genny
+
+package resolvers
+
+import (
+	"fmt"
+
+	"github.com/MichaelMure/git-bug/bug"
+)
+
+type BugSnapshotEdger func(value bug.Snapshot, offset int) Edge
+
+func BugSnapshotPaginate(source []bug.Snapshot, edger BugSnapshotEdger, input ConnectionInput) ([]BugEdge, PageInfo, error) {
+	var result []BugEdge
+	var pageInfo PageInfo
+
+	offset := 0
+
+	if input.After != nil {
+		for i, value := range source {
+			edge := edger(value, i)
+			if edge.GetCursor() == *input.After {
+				// remove all previous element including the "after" one
+				source = source[i+1:]
+				offset = i + 1
+				break
+			}
+		}
+	}
+
+	if input.Before != nil {
+		for i, value := range source {
+			edge := edger(value, i+offset)
+
+			if edge.GetCursor() == *input.Before {
+				// remove all after element including the "before" one
+				break
+			}
+
+			result = append(result, edge.(BugEdge))
+		}
+	} else {
+		result = make([]BugEdge, len(source))
+
+		for i, value := range source {
+			result[i] = edger(value, i+offset).(BugEdge)
+		}
+	}
+
+	if input.First != nil {
+		if *input.First < 0 {
+			return nil, PageInfo{}, fmt.Errorf("first less than zero")
+		}
+
+		if len(result) > *input.First {
+			// Slice result to be of length first by removing edges from the end
+			result = result[:*input.First]
+			pageInfo.HasNextPage = true
+		}
+	}
+
+	if input.Last != nil {
+		if *input.Last < 0 {
+			return nil, PageInfo{}, fmt.Errorf("last less than zero")
+		}
+
+		if len(result) > *input.Last {
+			// Slice result to be of length last by removing edges from the start
+			result = result[len(result)-*input.Last:]
+			pageInfo.HasPreviousPage = true
+		}
+	}
+
+	return result, pageInfo, nil
+}
+
+// Apply the before/after cursor params to the source and return an array of edges
+//func ApplyCursorToEdges(source []interface{}, edger Edger, input ConnectionInput) []Edge {
+//	var result []Edge
+//
+//	if input.After != nil {
+//		for i, value := range source {
+//			edge := edger(value)
+//			if edge.Cursor() == *input.After {
+//				// remove all previous element including the "after" one
+//				source = source[i+1:]
+//				break
+//			}
+//		}
+//	}
+//
+//	if input.Before != nil {
+//		for _, value := range source {
+//			edge := edger(value)
+//
+//			if edge.Cursor() == *input.Before {
+//				// remove all after element including the "before" one
+//				break
+//			}
+//
+//			result = append(result, edge)
+//		}
+//	} else {
+//		result = make([]Edge, len(source))
+//
+//		for i, value := range source {
+//			result[i] = edger(value)
+//		}
+//	}
+//
+//	return result
+//}
+
+// Apply the first/last cursor params to the edges
+//func EdgesToReturn(edges []Edge, input ConnectionInput) ([]Edge, PageInfo, error) {
+//	hasPreviousPage := false
+//	hasNextPage := false
+//
+//	if input.First != nil {
+//		if *input.First < 0 {
+//			return nil, nil, fmt.Errorf("first less than zero")
+//		}
+//
+//		if len(edges) > *input.First {
+//			// Slice result to be of length first by removing edges from the end
+//			edges = edges[:*input.First]
+//			hasNextPage = true
+//		}
+//	}
+//
+//	if input.Last != nil {
+//		if *input.Last < 0 {
+//			return nil, nil, fmt.Errorf("last less than zero")
+//		}
+//
+//		if len(edges) > *input.Last {
+//			// Slice result to be of length last by removing edges from the start
+//			edges = edges[len(edges)-*input.Last:]
+//			hasPreviousPage = true
+//		}
+//	}
+//
+//	pageInfo := PageInfo{
+//		HasNextPage:     hasNextPage,
+//		HasPreviousPage: hasPreviousPage,
+//	}
+//
+//	return edges, pageInfo, nil
+//}
+
+//func EdgesToReturn(allEdges []Edge, before *cursor, after *cursor, first *int, last *int) ([]Edge, error) {
+//	result := ApplyCursorToEdges(allEdges, before, after)
+//
+//	if first != nil {
+//		if *first < 0 {
+//			return nil, fmt.Errorf("first less than zero")
+//		}
+//
+//		if len(result) > *first {
+//			// Slice result to be of length first by removing edges from the end
+//			result = result[:*first]
+//		}
+//	}
+//
+//	if last != nil {
+//		if *last < 0 {
+//			return nil, fmt.Errorf("last less than zero")
+//		}
+//
+//		if len(result) > *last {
+//			// Slice result to be of length last by removing edges from the start
+//			result = result[len(result)-*last:]
+//		}
+//	}
+//
+//	return result, nil
+//}
+
+//func ApplyCursorToEdges(allEdges []Edge, before *cursor, after *cursor) []Edge {
+//	result := allEdges
+//
+//	if after != nil {
+//		for i, edge := range result {
+//			if edge.Cursor() == *after {
+//				// remove all previous element including the "after" one
+//				result = result[i+1:]
+//				break
+//			}
+//		}
+//	}
+//
+//	if before != nil {
+//		for i, edge := range result {
+//			if edge.Cursor() == *before {
+//				// remove all after element including the "before" one
+//				result = result[:i]
+//			}
+//		}
+//	}
+//
+//	return result
+//}
+
+//func HasPreviousPage(allEdges []Edge, before *cursor, after *cursor, last *int) bool {
+//	if last != nil {
+//		edges := ApplyCursorToEdges(allEdges, before, after)
+//		return len(edges) > *last
+//	}
+//
+//	// TODO: handle "after", but according to the spec it's ok to return false
+//
+//	return false
+//}
+//
+//func HasNextPage(allEdges []Edge, before *cursor, after *cursor, first *int) bool {
+//	if first != nil {
+//		edges := ApplyCursorToEdges(allEdges, before, after)
+//		return len(edges) > *first
+//	}
+//
+//	// TODO: handle "before", but according to the spec it's ok to return false
+//
+//	return false

graphql2/resolvers/pager_comment.go 🔗

@@ -0,0 +1,225 @@
+// This file was automatically generated by genny.
+// Any changes will be lost if this file is regenerated.
+// see https://github.com/cheekybits/genny
+
+package resolvers
+
+import (
+	"fmt"
+
+	"github.com/MichaelMure/git-bug/bug"
+)
+
+type BugCommentEdger func(value bug.Comment, offset int) Edge
+
+func BugCommentPaginate(source []bug.Comment, edger BugCommentEdger, input ConnectionInput) ([]CommentEdge, PageInfo, error) {
+	var result []CommentEdge
+	var pageInfo PageInfo
+
+	offset := 0
+
+	if input.After != nil {
+		for i, value := range source {
+			edge := edger(value, i)
+			if edge.GetCursor() == *input.After {
+				// remove all previous element including the "after" one
+				source = source[i+1:]
+				offset = i + 1
+				break
+			}
+		}
+	}
+
+	if input.Before != nil {
+		for i, value := range source {
+			edge := edger(value, i+offset)
+
+			if edge.GetCursor() == *input.Before {
+				// remove all after element including the "before" one
+				break
+			}
+
+			result = append(result, edge.(CommentEdge))
+		}
+	} else {
+		result = make([]CommentEdge, len(source))
+
+		for i, value := range source {
+			result[i] = edger(value, i+offset).(CommentEdge)
+		}
+	}
+
+	if input.First != nil {
+		if *input.First < 0 {
+			return nil, PageInfo{}, fmt.Errorf("first less than zero")
+		}
+
+		if len(result) > *input.First {
+			// Slice result to be of length first by removing edges from the end
+			result = result[:*input.First]
+			pageInfo.HasNextPage = true
+		}
+	}
+
+	if input.Last != nil {
+		if *input.Last < 0 {
+			return nil, PageInfo{}, fmt.Errorf("last less than zero")
+		}
+
+		if len(result) > *input.Last {
+			// Slice result to be of length last by removing edges from the start
+			result = result[len(result)-*input.Last:]
+			pageInfo.HasPreviousPage = true
+		}
+	}
+
+	return result, pageInfo, nil
+}
+
+// Apply the before/after cursor params to the source and return an array of edges
+//func ApplyCursorToEdges(source []interface{}, edger Edger, input ConnectionInput) []Edge {
+//	var result []Edge
+//
+//	if input.After != nil {
+//		for i, value := range source {
+//			edge := edger(value)
+//			if edge.Cursor() == *input.After {
+//				// remove all previous element including the "after" one
+//				source = source[i+1:]
+//				break
+//			}
+//		}
+//	}
+//
+//	if input.Before != nil {
+//		for _, value := range source {
+//			edge := edger(value)
+//
+//			if edge.Cursor() == *input.Before {
+//				// remove all after element including the "before" one
+//				break
+//			}
+//
+//			result = append(result, edge)
+//		}
+//	} else {
+//		result = make([]Edge, len(source))
+//
+//		for i, value := range source {
+//			result[i] = edger(value)
+//		}
+//	}
+//
+//	return result
+//}
+
+// Apply the first/last cursor params to the edges
+//func EdgesToReturn(edges []Edge, input ConnectionInput) ([]Edge, PageInfo, error) {
+//	hasPreviousPage := false
+//	hasNextPage := false
+//
+//	if input.First != nil {
+//		if *input.First < 0 {
+//			return nil, nil, fmt.Errorf("first less than zero")
+//		}
+//
+//		if len(edges) > *input.First {
+//			// Slice result to be of length first by removing edges from the end
+//			edges = edges[:*input.First]
+//			hasNextPage = true
+//		}
+//	}
+//
+//	if input.Last != nil {
+//		if *input.Last < 0 {
+//			return nil, nil, fmt.Errorf("last less than zero")
+//		}
+//
+//		if len(edges) > *input.Last {
+//			// Slice result to be of length last by removing edges from the start
+//			edges = edges[len(edges)-*input.Last:]
+//			hasPreviousPage = true
+//		}
+//	}
+//
+//	pageInfo := PageInfo{
+//		HasNextPage:     hasNextPage,
+//		HasPreviousPage: hasPreviousPage,
+//	}
+//
+//	return edges, pageInfo, nil
+//}
+
+//func EdgesToReturn(allEdges []Edge, before *cursor, after *cursor, first *int, last *int) ([]Edge, error) {
+//	result := ApplyCursorToEdges(allEdges, before, after)
+//
+//	if first != nil {
+//		if *first < 0 {
+//			return nil, fmt.Errorf("first less than zero")
+//		}
+//
+//		if len(result) > *first {
+//			// Slice result to be of length first by removing edges from the end
+//			result = result[:*first]
+//		}
+//	}
+//
+//	if last != nil {
+//		if *last < 0 {
+//			return nil, fmt.Errorf("last less than zero")
+//		}
+//
+//		if len(result) > *last {
+//			// Slice result to be of length last by removing edges from the start
+//			result = result[len(result)-*last:]
+//		}
+//	}
+//
+//	return result, nil
+//}
+
+//func ApplyCursorToEdges(allEdges []Edge, before *cursor, after *cursor) []Edge {
+//	result := allEdges
+//
+//	if after != nil {
+//		for i, edge := range result {
+//			if edge.Cursor() == *after {
+//				// remove all previous element including the "after" one
+//				result = result[i+1:]
+//				break
+//			}
+//		}
+//	}
+//
+//	if before != nil {
+//		for i, edge := range result {
+//			if edge.Cursor() == *before {
+//				// remove all after element including the "before" one
+//				result = result[:i]
+//			}
+//		}
+//	}
+//
+//	return result
+//}
+
+//func HasPreviousPage(allEdges []Edge, before *cursor, after *cursor, last *int) bool {
+//	if last != nil {
+//		edges := ApplyCursorToEdges(allEdges, before, after)
+//		return len(edges) > *last
+//	}
+//
+//	// TODO: handle "after", but according to the spec it's ok to return false
+//
+//	return false
+//}
+//
+//func HasNextPage(allEdges []Edge, before *cursor, after *cursor, first *int) bool {
+//	if first != nil {
+//		edges := ApplyCursorToEdges(allEdges, before, after)
+//		return len(edges) > *first
+//	}
+//
+//	// TODO: handle "before", but according to the spec it's ok to return false
+//
+//	return false

graphql2/resolvers/pager_operation.go 🔗

@@ -0,0 +1,225 @@
+// This file was automatically generated by genny.
+// Any changes will be lost if this file is regenerated.
+// see https://github.com/cheekybits/genny
+
+package resolvers
+
+import (
+	"fmt"
+
+	"github.com/MichaelMure/git-bug/bug"
+)
+
+type BugOperationEdger func(value bug.Operation, offset int) Edge
+
+func BugOperationPaginate(source []bug.Operation, edger BugOperationEdger, input ConnectionInput) ([]OperationEdge, PageInfo, error) {
+	var result []OperationEdge
+	var pageInfo PageInfo
+
+	offset := 0
+
+	if input.After != nil {
+		for i, value := range source {
+			edge := edger(value, i)
+			if edge.GetCursor() == *input.After {
+				// remove all previous element including the "after" one
+				source = source[i+1:]
+				offset = i + 1
+				break
+			}
+		}
+	}
+
+	if input.Before != nil {
+		for i, value := range source {
+			edge := edger(value, i+offset)
+
+			if edge.GetCursor() == *input.Before {
+				// remove all after element including the "before" one
+				break
+			}
+
+			result = append(result, edge.(OperationEdge))
+		}
+	} else {
+		result = make([]OperationEdge, len(source))
+
+		for i, value := range source {
+			result[i] = edger(value, i+offset).(OperationEdge)
+		}
+	}
+
+	if input.First != nil {
+		if *input.First < 0 {
+			return nil, PageInfo{}, fmt.Errorf("first less than zero")
+		}
+
+		if len(result) > *input.First {
+			// Slice result to be of length first by removing edges from the end
+			result = result[:*input.First]
+			pageInfo.HasNextPage = true
+		}
+	}
+
+	if input.Last != nil {
+		if *input.Last < 0 {
+			return nil, PageInfo{}, fmt.Errorf("last less than zero")
+		}
+
+		if len(result) > *input.Last {
+			// Slice result to be of length last by removing edges from the start
+			result = result[len(result)-*input.Last:]
+			pageInfo.HasPreviousPage = true
+		}
+	}
+
+	return result, pageInfo, nil
+}
+
+// Apply the before/after cursor params to the source and return an array of edges
+//func ApplyCursorToEdges(source []interface{}, edger Edger, input ConnectionInput) []Edge {
+//	var result []Edge
+//
+//	if input.After != nil {
+//		for i, value := range source {
+//			edge := edger(value)
+//			if edge.Cursor() == *input.After {
+//				// remove all previous element including the "after" one
+//				source = source[i+1:]
+//				break
+//			}
+//		}
+//	}
+//
+//	if input.Before != nil {
+//		for _, value := range source {
+//			edge := edger(value)
+//
+//			if edge.Cursor() == *input.Before {
+//				// remove all after element including the "before" one
+//				break
+//			}
+//
+//			result = append(result, edge)
+//		}
+//	} else {
+//		result = make([]Edge, len(source))
+//
+//		for i, value := range source {
+//			result[i] = edger(value)
+//		}
+//	}
+//
+//	return result
+//}
+
+// Apply the first/last cursor params to the edges
+//func EdgesToReturn(edges []Edge, input ConnectionInput) ([]Edge, PageInfo, error) {
+//	hasPreviousPage := false
+//	hasNextPage := false
+//
+//	if input.First != nil {
+//		if *input.First < 0 {
+//			return nil, nil, fmt.Errorf("first less than zero")
+//		}
+//
+//		if len(edges) > *input.First {
+//			// Slice result to be of length first by removing edges from the end
+//			edges = edges[:*input.First]
+//			hasNextPage = true
+//		}
+//	}
+//
+//	if input.Last != nil {
+//		if *input.Last < 0 {
+//			return nil, nil, fmt.Errorf("last less than zero")
+//		}
+//
+//		if len(edges) > *input.Last {
+//			// Slice result to be of length last by removing edges from the start
+//			edges = edges[len(edges)-*input.Last:]
+//			hasPreviousPage = true
+//		}
+//	}
+//
+//	pageInfo := PageInfo{
+//		HasNextPage:     hasNextPage,
+//		HasPreviousPage: hasPreviousPage,
+//	}
+//
+//	return edges, pageInfo, nil
+//}
+
+//func EdgesToReturn(allEdges []Edge, before *cursor, after *cursor, first *int, last *int) ([]Edge, error) {
+//	result := ApplyCursorToEdges(allEdges, before, after)
+//
+//	if first != nil {
+//		if *first < 0 {
+//			return nil, fmt.Errorf("first less than zero")
+//		}
+//
+//		if len(result) > *first {
+//			// Slice result to be of length first by removing edges from the end
+//			result = result[:*first]
+//		}
+//	}
+//
+//	if last != nil {
+//		if *last < 0 {
+//			return nil, fmt.Errorf("last less than zero")
+//		}
+//
+//		if len(result) > *last {
+//			// Slice result to be of length last by removing edges from the start
+//			result = result[len(result)-*last:]
+//		}
+//	}
+//
+//	return result, nil
+//}
+
+//func ApplyCursorToEdges(allEdges []Edge, before *cursor, after *cursor) []Edge {
+//	result := allEdges
+//
+//	if after != nil {
+//		for i, edge := range result {
+//			if edge.Cursor() == *after {
+//				// remove all previous element including the "after" one
+//				result = result[i+1:]
+//				break
+//			}
+//		}
+//	}
+//
+//	if before != nil {
+//		for i, edge := range result {
+//			if edge.Cursor() == *before {
+//				// remove all after element including the "before" one
+//				result = result[:i]
+//			}
+//		}
+//	}
+//
+//	return result
+//}
+
+//func HasPreviousPage(allEdges []Edge, before *cursor, after *cursor, last *int) bool {
+//	if last != nil {
+//		edges := ApplyCursorToEdges(allEdges, before, after)
+//		return len(edges) > *last
+//	}
+//
+//	// TODO: handle "after", but according to the spec it's ok to return false
+//
+//	return false
+//}
+//
+//func HasNextPage(allEdges []Edge, before *cursor, after *cursor, first *int) bool {
+//	if first != nil {
+//		edges := ApplyCursorToEdges(allEdges, before, after)
+//		return len(edges) > *first
+//	}
+//
+//	// TODO: handle "before", but according to the spec it's ok to return false
+//
+//	return false

graphql2/resolvers/pagers.go 🔗

@@ -0,0 +1,51 @@
+//go:generate genny -in=pagers_template.go -out=pager_bug.go gen "NodeType=bug.Snapshot EdgeType=BugEdge"
+//go:generate genny -in=pagers_template.go -out=pager_operation.go gen "NodeType=bug.Operation EdgeType=OperationEdge"
+//go:generate genny -in=pagers_template.go -out=pager_comment.go gen "NodeType=bug.Comment EdgeType=CommentEdge"
+
+package resolvers
+
+import (
+	"encoding/base64"
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+const cursorPrefix = "cursor:"
+
+type Edge interface {
+	GetCursor() string
+}
+
+// Creates the cursor string from an offset
+func offsetToCursor(offset int) string {
+	str := fmt.Sprintf("%v%v", cursorPrefix, offset)
+	return base64.StdEncoding.EncodeToString([]byte(str))
+}
+
+// Re-derives the offset from the cursor string.
+func cursorToOffset(cursor string) (int, error) {
+	str := ""
+	b, err := base64.StdEncoding.DecodeString(cursor)
+	if err == nil {
+		str = string(b)
+	}
+	str = strings.Replace(str, cursorPrefix, "", -1)
+	offset, err := strconv.Atoi(str)
+	if err != nil {
+		return 0, fmt.Errorf("Invalid cursor")
+	}
+	return offset, nil
+}
+
+func (e OperationEdge) GetCursor() string {
+	return e.Cursor
+}
+
+func (e BugEdge) GetCursor() string {
+	return e.Cursor
+}
+
+func (e CommentEdge) GetCursor() string {
+	return e.Cursor
+}

graphql2/resolvers/pagers_template.go 🔗

@@ -0,0 +1,224 @@
+package resolvers
+
+import (
+	"fmt"
+	"github.com/cheekybits/genny/generic"
+)
+
+type NodeType generic.Type
+type EdgeType generic.Type
+
+type NodeTypeEdger func(value NodeType, offset int) Edge
+
+func NodeTypePaginate(source []NodeType, edger NodeTypeEdger, input ConnectionInput) ([]EdgeType, PageInfo, error) {
+	var result []EdgeType
+	var pageInfo PageInfo
+
+	offset := 0
+
+	if input.After != nil {
+		for i, value := range source {
+			edge := edger(value, i)
+			if edge.GetCursor() == *input.After {
+				// remove all previous element including the "after" one
+				source = source[i+1:]
+				offset = i + 1
+				break
+			}
+		}
+	}
+
+	if input.Before != nil {
+		for i, value := range source {
+			edge := edger(value, i+offset)
+
+			if edge.GetCursor() == *input.Before {
+				// remove all after element including the "before" one
+				break
+			}
+
+			result = append(result, edge.(EdgeType))
+		}
+	} else {
+		result = make([]EdgeType, len(source))
+
+		for i, value := range source {
+			result[i] = edger(value, i+offset).(EdgeType)
+		}
+	}
+
+	if input.First != nil {
+		if *input.First < 0 {
+			return nil, PageInfo{}, fmt.Errorf("first less than zero")
+		}
+
+		if len(result) > *input.First {
+			// Slice result to be of length first by removing edges from the end
+			result = result[:*input.First]
+			pageInfo.HasNextPage = true
+		}
+	}
+
+	if input.Last != nil {
+		if *input.Last < 0 {
+			return nil, PageInfo{}, fmt.Errorf("last less than zero")
+		}
+
+		if len(result) > *input.Last {
+			// Slice result to be of length last by removing edges from the start
+			result = result[len(result)-*input.Last:]
+			pageInfo.HasPreviousPage = true
+		}
+	}
+
+	return result, pageInfo, nil
+}
+
+// Apply the before/after cursor params to the source and return an array of edges
+//func ApplyCursorToEdges(source []interface{}, edger Edger, input ConnectionInput) []Edge {
+//	var result []Edge
+//
+//	if input.After != nil {
+//		for i, value := range source {
+//			edge := edger(value)
+//			if edge.Cursor() == *input.After {
+//				// remove all previous element including the "after" one
+//				source = source[i+1:]
+//				break
+//			}
+//		}
+//	}
+//
+//	if input.Before != nil {
+//		for _, value := range source {
+//			edge := edger(value)
+//
+//			if edge.Cursor() == *input.Before {
+//				// remove all after element including the "before" one
+//				break
+//			}
+//
+//			result = append(result, edge)
+//		}
+//	} else {
+//		result = make([]Edge, len(source))
+//
+//		for i, value := range source {
+//			result[i] = edger(value)
+//		}
+//	}
+//
+//	return result
+//}
+
+// Apply the first/last cursor params to the edges
+//func EdgesToReturn(edges []Edge, input ConnectionInput) ([]Edge, PageInfo, error) {
+//	hasPreviousPage := false
+//	hasNextPage := false
+//
+//	if input.First != nil {
+//		if *input.First < 0 {
+//			return nil, nil, fmt.Errorf("first less than zero")
+//		}
+//
+//		if len(edges) > *input.First {
+//			// Slice result to be of length first by removing edges from the end
+//			edges = edges[:*input.First]
+//			hasNextPage = true
+//		}
+//	}
+//
+//	if input.Last != nil {
+//		if *input.Last < 0 {
+//			return nil, nil, fmt.Errorf("last less than zero")
+//		}
+//
+//		if len(edges) > *input.Last {
+//			// Slice result to be of length last by removing edges from the start
+//			edges = edges[len(edges)-*input.Last:]
+//			hasPreviousPage = true
+//		}
+//	}
+//
+//	pageInfo := PageInfo{
+//		HasNextPage:     hasNextPage,
+//		HasPreviousPage: hasPreviousPage,
+//	}
+//
+//	return edges, pageInfo, nil
+//}
+
+//func EdgesToReturn(allEdges []Edge, before *cursor, after *cursor, first *int, last *int) ([]Edge, error) {
+//	result := ApplyCursorToEdges(allEdges, before, after)
+//
+//	if first != nil {
+//		if *first < 0 {
+//			return nil, fmt.Errorf("first less than zero")
+//		}
+//
+//		if len(result) > *first {
+//			// Slice result to be of length first by removing edges from the end
+//			result = result[:*first]
+//		}
+//	}
+//
+//	if last != nil {
+//		if *last < 0 {
+//			return nil, fmt.Errorf("last less than zero")
+//		}
+//
+//		if len(result) > *last {
+//			// Slice result to be of length last by removing edges from the start
+//			result = result[len(result)-*last:]
+//		}
+//	}
+//
+//	return result, nil
+//}
+
+//func ApplyCursorToEdges(allEdges []Edge, before *cursor, after *cursor) []Edge {
+//	result := allEdges
+//
+//	if after != nil {
+//		for i, edge := range result {
+//			if edge.Cursor() == *after {
+//				// remove all previous element including the "after" one
+//				result = result[i+1:]
+//				break
+//			}
+//		}
+//	}
+//
+//	if before != nil {
+//		for i, edge := range result {
+//			if edge.Cursor() == *before {
+//				// remove all after element including the "before" one
+//				result = result[:i]
+//			}
+//		}
+//	}
+//
+//	return result
+//}
+
+//func HasPreviousPage(allEdges []Edge, before *cursor, after *cursor, last *int) bool {
+//	if last != nil {
+//		edges := ApplyCursorToEdges(allEdges, before, after)
+//		return len(edges) > *last
+//	}
+//
+//	// TODO: handle "after", but according to the spec it's ok to return false
+//
+//	return false
+//}
+//
+//func HasNextPage(allEdges []Edge, before *cursor, after *cursor, first *int) bool {
+//	if first != nil {
+//		edges := ApplyCursorToEdges(allEdges, before, after)
+//		return len(edges) > *first
+//	}
+//
+//	// TODO: handle "before", but according to the spec it's ok to return false
+//
+//	return false
+//}

graphql2/resolvers/query.go 🔗

@@ -0,0 +1,36 @@
+package resolvers
+
+import (
+	"context"
+	"github.com/MichaelMure/git-bug/cache"
+)
+
+type rootQueryResolver struct {
+	cache cache.Cacher
+}
+
+func (r rootQueryResolver) DefaultRepository(ctx context.Context) (*repoResolver, error) {
+	repo, err := r.cache.DefaultRepo()
+
+	if err != nil {
+		return nil, err
+	}
+
+	return &repoResolver{
+		cache: r.cache,
+		repo:  repo,
+	}, nil
+}
+
+func (r rootQueryResolver) Repository(ctx context.Context, id string) (*repoResolver, error) {
+	repo, err := r.cache.ResolveRepo(id)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return &repoResolver{
+		cache: r.cache,
+		repo:  repo,
+	}, nil
+}

graphql2/resolvers/repo.go 🔗

@@ -0,0 +1,26 @@
+package resolvers
+
+import (
+	"context"
+	"github.com/MichaelMure/git-bug/bug"
+	"github.com/MichaelMure/git-bug/cache"
+)
+
+type repoResolver struct {
+	cache cache.Cacher
+	repo  cache.RepoCacher
+}
+
+func (repoResolver) AllBugs(ctx context.Context, obj *repoResolver, input ConnectionInput) (BugConnection, error) {
+	panic("implement me")
+}
+
+func (repoResolver) Bug(ctx context.Context, obj *repoResolver, prefix string) (*bug.Snapshot, error) {
+	b, err := obj.repo.ResolveBugPrefix(prefix)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return b.Snapshot(), nil
+}

graphql2/resolvers/root.go 🔗

@@ -0,0 +1,53 @@
+package resolvers
+
+import (
+	"github.com/MichaelMure/git-bug/cache"
+)
+
+type RootResolver struct {
+	cache.RootCache
+}
+
+func NewRootResolver() *RootResolver {
+	return &RootResolver{
+		RootCache: cache.NewCache(),
+	}
+}
+
+func (r RootResolver) Query() QueryResolver {
+	return &rootQueryResolver{
+		cache: &r.RootCache,
+	}
+}
+
+func (RootResolver) AddCommentOperation() AddCommentOperationResolver {
+	return &addCommentOperationResolver{}
+}
+
+func (r RootResolver) Bug() BugResolver {
+	return &bugResolver{
+		cache: &r.RootCache,
+	}
+}
+
+func (RootResolver) CreateOperation() CreateOperationResolver {
+	return &createOperationResolver{}
+}
+
+func (RootResolver) LabelChangeOperation() LabelChangeOperationResolver {
+	return &labelChangeOperation{}
+}
+
+func (r RootResolver) Repository() RepositoryResolver {
+	return &repoResolver{
+		cache: &r.RootCache,
+	}
+}
+
+func (RootResolver) SetStatusOperation() SetStatusOperationResolver {
+	return &setStatusOperationResolver{}
+}
+
+func (RootResolver) SetTitleOperation() SetTitleOperationResolver {
+	return &setTitleOperationResolver{}
+}

graphql2/schema.graphql 🔗

@@ -1,7 +1,3 @@
-schema {
-  query: RootQuery
-}
-
 scalar Time
 scalar Label
 
@@ -14,10 +10,24 @@ type PageInfo {
   hasPreviousPage: Boolean!
 
   # When paginating backwards, the cursor to continue.
-  startCursor: String
+#  startCursor: String
 
   # When paginating forwards, the cursor to continue.
-  endCursor: String
+#  endCursor: String
+}
+
+input ConnectionInput {
+  # Returns the elements in the list that come after the specified cursor.
+  after: String
+
+  # Returns the elements in the list that come before the specified cursor.
+  before: String
+
+  # Returns the first _n_ elements from the list.
+  first: Int
+
+  # Returns the last _n_ elements from the list.
+  last: Int
 }
 
 # Represents an person in a git object.
@@ -31,8 +41,7 @@ type Person {
 
 
 type CommentConnection {
-  edges: [CommentEdge]
-  nodes: [Comment]
+  edges: [CommentEdge!]!
   pageInfo: PageInfo!
   totalCount: Int!
 }
@@ -42,23 +51,6 @@ type CommentEdge {
   node: Comment!
 }
 
-interface Commentable {
-  # A list of comments associated with the object.
-  comments(
-    # Returns the elements in the list that come after the specified cursor.
-    after: String
-
-    # Returns the elements in the list that come before the specified cursor.
-    before: String
-
-    # Returns the first _n_ elements from the list.
-    first: Int
-
-    # Returns the last _n_ elements from the list.
-    last: Int
-  ): CommentConnection!
-}
-
 # Represents a comment on a bug.
 type Comment implements Authored {
   # The author of this comment.
@@ -80,15 +72,14 @@ interface Authored {
 }
 
 type OperationConnection {
-  edges: [OperationEdge]!
-  nodes: [OperationUnion]!
+  edges: [OperationEdge!]!
   pageInfo: PageInfo!
   totalCount: Int!
 }
 
 type OperationEdge {
   cursor: String!
-  node: OperationUnion
+  node: OperationUnion!
 }
 
 # An operation applied to a bug.
@@ -133,8 +124,8 @@ type LabelChangeOperation implements Operation, Authored {
   author: Person!
   date: Time!
 
-  added: [Label]!
-  removed: [Label]!
+  added: [Label!]!
+  removed: [Label!]!
 }
 
 union OperationUnion =
@@ -144,14 +135,11 @@ union OperationUnion =
   | SetStatusOperation
   | LabelChangeOperation
 
-# The connection type for Label.
+# The connection type for Bug.
 type BugConnection {
   # A list of edges.
   edges: [BugEdge]!
 
-  # A list of nodes.
-  nodes: [Bug]!
-
   # Information to aid in pagination.
   pageInfo: PageInfo!
 
@@ -165,69 +153,29 @@ type BugEdge {
   cursor: String!
 
   # The item at the end of the edge.
-  node: Bug
+  node: Bug!
 }
 
-type Bug implements Authored, Commentable {
+type Bug {
   id: String!
   humanId: String!
   title: String!
   status: Status!
 
   # A list of labels associated with the repository.
-  labels: [Label]!
+  labels: [Label!]!
 
-  comments(
-    # Returns the elements in the list that come after the specified cursor.
-    after: String
+  comments(input: ConnectionInput!): CommentConnection!
 
-    # Returns the elements in the list that come before the specified cursor.
-    before: String
-
-    # Returns the first _n_ elements from the list.
-    first: Int
-
-    # Returns the last _n_ elements from the list.
-    last: Int
-
-    # If provided, searches comments by name and description.
-    query: String
-  ): CommentConnection!
-
-  operations(
-    # Returns the elements in the list that come after the specified cursor.
-    after: String
-
-    # Returns the elements in the list that come before the specified cursor.
-    before: String
-
-    # Returns the first _n_ elements from the list.
-    first: Int
-
-    # Returns the last _n_ elements from the list.
-    last: Int
-
-    # If provided, searches operations by name and description.
-    query: String
-  ): OperationConnection!
+  operations(input: ConnectionInput!): OperationConnection!
 }
 
-type RootQuery {
-  allBugs(
-    # Returns the elements in the list that come after the specified cursor.
-    after: String
-
-    # Returns the elements in the list that come before the specified cursor.
-    before: String
-
-    # Returns the first _n_ elements from the list.
-    first: Int
-
-    # Returns the last _n_ elements from the list.
-    last: Int
+type Repository {
+  allBugs(input: ConnectionInput!): BugConnection!
+  bug(prefix: String!): Bug
+}
 
-    # If provided, searches labels by name and description.
-    query: String
-  ): BugConnection!
-  bug(id: String!): Bug
+type Query {
+  defaultRepository: Repository
+  repository(id: String!): Repository
 }

vendor/github.com/cheekybits/genny/LICENSE 🔗

@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 cheekybits
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+

vendor/github.com/cheekybits/genny/generic/generic.go 🔗

@@ -0,0 +1,13 @@
+package generic
+
+// Type is the placeholder type that indicates a generic value.
+// When genny is executed, variables of this type will be replaced with
+// references to the specific types.
+//      var GenericType generic.Type
+type Type interface{}
+
+// Number is the placehoder type that indiccates a generic numerical value.
+// When genny is executed, variables of this type will be replaced with
+// references to the specific types.
+//      var GenericType generic.Number
+type Number float64

vendor/github.com/gorilla/websocket/.gitignore 🔗

@@ -0,0 +1,25 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+
+.idea/
+*.iml

vendor/github.com/gorilla/websocket/.travis.yml 🔗

@@ -0,0 +1,19 @@
+language: go
+sudo: false
+
+matrix:
+  include:
+    - go: 1.4
+    - go: 1.5
+    - go: 1.6
+    - go: 1.7
+    - go: 1.8
+    - go: tip
+  allow_failures:
+    - go: tip
+
+script:
+  - go get -t -v ./...
+  - diff -u <(echo -n) <(gofmt -d .)
+  - go vet $(go list ./... | grep -v /vendor/)
+  - go test -v -race ./...

vendor/github.com/gorilla/websocket/AUTHORS 🔗

@@ -0,0 +1,8 @@
+# This is the official list of Gorilla WebSocket authors for copyright
+# purposes.
+#
+# Please keep the list sorted.
+
+Gary Burd <gary@beagledreams.com>
+Joachim Bauch <mail@joachim-bauch.de>
+

vendor/github.com/gorilla/websocket/LICENSE 🔗

@@ -0,0 +1,22 @@
+Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+  Redistributions of source code must retain the above copyright notice, this
+  list of conditions and the following disclaimer.
+
+  Redistributions in binary form must reproduce the above copyright notice,
+  this list of conditions and the following disclaimer in the documentation
+  and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

vendor/github.com/gorilla/websocket/README.md 🔗

@@ -0,0 +1,64 @@
+# Gorilla WebSocket
+
+Gorilla WebSocket is a [Go](http://golang.org/) implementation of the
+[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol.
+
+[![Build Status](https://travis-ci.org/gorilla/websocket.svg?branch=master)](https://travis-ci.org/gorilla/websocket)
+[![GoDoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket)
+
+### Documentation
+
+* [API Reference](http://godoc.org/github.com/gorilla/websocket)
+* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat)
+* [Command example](https://github.com/gorilla/websocket/tree/master/examples/command)
+* [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo)
+* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch)
+
+### Status
+
+The Gorilla WebSocket package provides a complete and tested implementation of
+the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The
+package API is stable.
+
+### Installation
+
+    go get github.com/gorilla/websocket
+
+### Protocol Compliance
+
+The Gorilla WebSocket package passes the server tests in the [Autobahn Test
+Suite](http://autobahn.ws/testsuite) using the application in the [examples/autobahn
+subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn).
+
+### Gorilla WebSocket compared with other packages
+
+<table>
+<tr>
+<th></th>
+<th><a href="http://godoc.org/github.com/gorilla/websocket">github.com/gorilla</a></th>
+<th><a href="http://godoc.org/golang.org/x/net/websocket">golang.org/x/net</a></th>
+</tr>
+<tr>
+<tr><td colspan="3"><a href="http://tools.ietf.org/html/rfc6455">RFC 6455</a> Features</td></tr>
+<tr><td>Passes <a href="http://autobahn.ws/testsuite/">Autobahn Test Suite</a></td><td><a href="https://github.com/gorilla/websocket/tree/master/examples/autobahn">Yes</a></td><td>No</td></tr>
+<tr><td>Receive <a href="https://tools.ietf.org/html/rfc6455#section-5.4">fragmented</a> message<td>Yes</td><td><a href="https://code.google.com/p/go/issues/detail?id=7632">No</a>, see note 1</td></tr>
+<tr><td>Send <a href="https://tools.ietf.org/html/rfc6455#section-5.5.1">close</a> message</td><td><a href="http://godoc.org/github.com/gorilla/websocket#hdr-Control_Messages">Yes</a></td><td><a href="https://code.google.com/p/go/issues/detail?id=4588">No</a></td></tr>
+<tr><td>Send <a href="https://tools.ietf.org/html/rfc6455#section-5.5.2">pings</a> and receive <a href="https://tools.ietf.org/html/rfc6455#section-5.5.3">pongs</a></td><td><a href="http://godoc.org/github.com/gorilla/websocket#hdr-Control_Messages">Yes</a></td><td>No</td></tr>
+<tr><td>Get the <a href="https://tools.ietf.org/html/rfc6455#section-5.6">type</a> of a received data message</td><td>Yes</td><td>Yes, see note 2</td></tr>
+<tr><td colspan="3">Other Features</tr></td>
+<tr><td><a href="https://tools.ietf.org/html/rfc7692">Compression Extensions</a></td><td>Experimental</td><td>No</td></tr>
+<tr><td>Read message using io.Reader</td><td><a href="http://godoc.org/github.com/gorilla/websocket#Conn.NextReader">Yes</a></td><td>No, see note 3</td></tr>
+<tr><td>Write message using io.WriteCloser</td><td><a href="http://godoc.org/github.com/gorilla/websocket#Conn.NextWriter">Yes</a></td><td>No, see note 3</td></tr>
+</table>
+
+Notes: 
+
+1. Large messages are fragmented in [Chrome's new WebSocket implementation](http://www.ietf.org/mail-archive/web/hybi/current/msg10503.html).
+2. The application can get the type of a received data message by implementing
+   a [Codec marshal](http://godoc.org/golang.org/x/net/websocket#Codec.Marshal)
+   function.
+3. The go.net io.Reader and io.Writer operate across WebSocket frame boundaries.
+  Read returns when the input buffer is full or a frame boundary is
+  encountered. Each call to Write sends a single frame message. The Gorilla
+  io.Reader and io.WriteCloser operate on a single WebSocket message.
+

vendor/github.com/gorilla/websocket/client.go 🔗

@@ -0,0 +1,392 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+	"bufio"
+	"bytes"
+	"crypto/tls"
+	"encoding/base64"
+	"errors"
+	"io"
+	"io/ioutil"
+	"net"
+	"net/http"
+	"net/url"
+	"strings"
+	"time"
+)
+
+// ErrBadHandshake is returned when the server response to opening handshake is
+// invalid.
+var ErrBadHandshake = errors.New("websocket: bad handshake")
+
+var errInvalidCompression = errors.New("websocket: invalid compression negotiation")
+
+// NewClient creates a new client connection using the given net connection.
+// The URL u specifies the host and request URI. Use requestHeader to specify
+// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies
+// (Cookie). Use the response.Header to get the selected subprotocol
+// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
+//
+// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
+// non-nil *http.Response so that callers can handle redirects, authentication,
+// etc.
+//
+// Deprecated: Use Dialer instead.
+func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) {
+	d := Dialer{
+		ReadBufferSize:  readBufSize,
+		WriteBufferSize: writeBufSize,
+		NetDial: func(net, addr string) (net.Conn, error) {
+			return netConn, nil
+		},
+	}
+	return d.Dial(u.String(), requestHeader)
+}
+
+// A Dialer contains options for connecting to WebSocket server.
+type Dialer struct {
+	// NetDial specifies the dial function for creating TCP connections. If
+	// NetDial is nil, net.Dial is used.
+	NetDial func(network, addr string) (net.Conn, error)
+
+	// Proxy specifies a function to return a proxy for a given
+	// Request. If the function returns a non-nil error, the
+	// request is aborted with the provided error.
+	// If Proxy is nil or returns a nil *URL, no proxy is used.
+	Proxy func(*http.Request) (*url.URL, error)
+
+	// TLSClientConfig specifies the TLS configuration to use with tls.Client.
+	// If nil, the default configuration is used.
+	TLSClientConfig *tls.Config
+
+	// HandshakeTimeout specifies the duration for the handshake to complete.
+	HandshakeTimeout time.Duration
+
+	// ReadBufferSize and WriteBufferSize specify I/O buffer sizes. If a buffer
+	// size is zero, then a useful default size is used. The I/O buffer sizes
+	// do not limit the size of the messages that can be sent or received.
+	ReadBufferSize, WriteBufferSize int
+
+	// Subprotocols specifies the client's requested subprotocols.
+	Subprotocols []string
+
+	// EnableCompression specifies if the client should attempt to negotiate
+	// per message compression (RFC 7692). Setting this value to true does not
+	// guarantee that compression will be supported. Currently only "no context
+	// takeover" modes are supported.
+	EnableCompression bool
+
+	// Jar specifies the cookie jar.
+	// If Jar is nil, cookies are not sent in requests and ignored
+	// in responses.
+	Jar http.CookieJar
+}
+
+var errMalformedURL = errors.New("malformed ws or wss URL")
+
+// parseURL parses the URL.
+//
+// This function is a replacement for the standard library url.Parse function.
+// In Go 1.4 and earlier, url.Parse loses information from the path.
+func parseURL(s string) (*url.URL, error) {
+	// From the RFC:
+	//
+	// ws-URI = "ws:" "//" host [ ":" port ] path [ "?" query ]
+	// wss-URI = "wss:" "//" host [ ":" port ] path [ "?" query ]
+	var u url.URL
+	switch {
+	case strings.HasPrefix(s, "ws://"):
+		u.Scheme = "ws"
+		s = s[len("ws://"):]
+	case strings.HasPrefix(s, "wss://"):
+		u.Scheme = "wss"
+		s = s[len("wss://"):]
+	default:
+		return nil, errMalformedURL
+	}
+
+	if i := strings.Index(s, "?"); i >= 0 {
+		u.RawQuery = s[i+1:]
+		s = s[:i]
+	}
+
+	if i := strings.Index(s, "/"); i >= 0 {
+		u.Opaque = s[i:]
+		s = s[:i]
+	} else {
+		u.Opaque = "/"
+	}
+
+	u.Host = s
+
+	if strings.Contains(u.Host, "@") {
+		// Don't bother parsing user information because user information is
+		// not allowed in websocket URIs.
+		return nil, errMalformedURL
+	}
+
+	return &u, nil
+}
+
+func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) {
+	hostPort = u.Host
+	hostNoPort = u.Host
+	if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") {
+		hostNoPort = hostNoPort[:i]
+	} else {
+		switch u.Scheme {
+		case "wss":
+			hostPort += ":443"
+		case "https":
+			hostPort += ":443"
+		default:
+			hostPort += ":80"
+		}
+	}
+	return hostPort, hostNoPort
+}
+
+// DefaultDialer is a dialer with all fields set to the default zero values.
+var DefaultDialer = &Dialer{
+	Proxy: http.ProxyFromEnvironment,
+}
+
+// Dial creates a new client connection. Use requestHeader to specify the
+// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie).
+// Use the response.Header to get the selected subprotocol
+// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
+//
+// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
+// non-nil *http.Response so that callers can handle redirects, authentication,
+// etcetera. The response body may not contain the entire response and does not
+// need to be closed by the application.
+func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
+
+	if d == nil {
+		d = &Dialer{
+			Proxy: http.ProxyFromEnvironment,
+		}
+	}
+
+	challengeKey, err := generateChallengeKey()
+	if err != nil {
+		return nil, nil, err
+	}
+
+	u, err := parseURL(urlStr)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	switch u.Scheme {
+	case "ws":
+		u.Scheme = "http"
+	case "wss":
+		u.Scheme = "https"
+	default:
+		return nil, nil, errMalformedURL
+	}
+
+	if u.User != nil {
+		// User name and password are not allowed in websocket URIs.
+		return nil, nil, errMalformedURL
+	}
+
+	req := &http.Request{
+		Method:     "GET",
+		URL:        u,
+		Proto:      "HTTP/1.1",
+		ProtoMajor: 1,
+		ProtoMinor: 1,
+		Header:     make(http.Header),
+		Host:       u.Host,
+	}
+
+	// Set the cookies present in the cookie jar of the dialer
+	if d.Jar != nil {
+		for _, cookie := range d.Jar.Cookies(u) {
+			req.AddCookie(cookie)
+		}
+	}
+
+	// Set the request headers using the capitalization for names and values in
+	// RFC examples. Although the capitalization shouldn't matter, there are
+	// servers that depend on it. The Header.Set method is not used because the
+	// method canonicalizes the header names.
+	req.Header["Upgrade"] = []string{"websocket"}
+	req.Header["Connection"] = []string{"Upgrade"}
+	req.Header["Sec-WebSocket-Key"] = []string{challengeKey}
+	req.Header["Sec-WebSocket-Version"] = []string{"13"}
+	if len(d.Subprotocols) > 0 {
+		req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")}
+	}
+	for k, vs := range requestHeader {
+		switch {
+		case k == "Host":
+			if len(vs) > 0 {
+				req.Host = vs[0]
+			}
+		case k == "Upgrade" ||
+			k == "Connection" ||
+			k == "Sec-Websocket-Key" ||
+			k == "Sec-Websocket-Version" ||
+			k == "Sec-Websocket-Extensions" ||
+			(k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0):
+			return nil, nil, errors.New("websocket: duplicate header not allowed: " + k)
+		default:
+			req.Header[k] = vs
+		}
+	}
+
+	if d.EnableCompression {
+		req.Header.Set("Sec-Websocket-Extensions", "permessage-deflate; server_no_context_takeover; client_no_context_takeover")
+	}
+
+	hostPort, hostNoPort := hostPortNoPort(u)
+
+	var proxyURL *url.URL
+	// Check wether the proxy method has been configured
+	if d.Proxy != nil {
+		proxyURL, err = d.Proxy(req)
+	}
+	if err != nil {
+		return nil, nil, err
+	}
+
+	var targetHostPort string
+	if proxyURL != nil {
+		targetHostPort, _ = hostPortNoPort(proxyURL)
+	} else {
+		targetHostPort = hostPort
+	}
+
+	var deadline time.Time
+	if d.HandshakeTimeout != 0 {
+		deadline = time.Now().Add(d.HandshakeTimeout)
+	}
+
+	netDial := d.NetDial
+	if netDial == nil {
+		netDialer := &net.Dialer{Deadline: deadline}
+		netDial = netDialer.Dial
+	}
+
+	netConn, err := netDial("tcp", targetHostPort)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	defer func() {
+		if netConn != nil {
+			netConn.Close()
+		}
+	}()
+
+	if err := netConn.SetDeadline(deadline); err != nil {
+		return nil, nil, err
+	}
+
+	if proxyURL != nil {
+		connectHeader := make(http.Header)
+		if user := proxyURL.User; user != nil {
+			proxyUser := user.Username()
+			if proxyPassword, passwordSet := user.Password(); passwordSet {
+				credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword))
+				connectHeader.Set("Proxy-Authorization", "Basic "+credential)
+			}
+		}
+		connectReq := &http.Request{
+			Method: "CONNECT",
+			URL:    &url.URL{Opaque: hostPort},
+			Host:   hostPort,
+			Header: connectHeader,
+		}
+
+		connectReq.Write(netConn)
+
+		// Read response.
+		// Okay to use and discard buffered reader here, because
+		// TLS server will not speak until spoken to.
+		br := bufio.NewReader(netConn)
+		resp, err := http.ReadResponse(br, connectReq)
+		if err != nil {
+			return nil, nil, err
+		}
+		if resp.StatusCode != 200 {
+			f := strings.SplitN(resp.Status, " ", 2)
+			return nil, nil, errors.New(f[1])
+		}
+	}
+
+	if u.Scheme == "https" {
+		cfg := cloneTLSConfig(d.TLSClientConfig)
+		if cfg.ServerName == "" {
+			cfg.ServerName = hostNoPort
+		}
+		tlsConn := tls.Client(netConn, cfg)
+		netConn = tlsConn
+		if err := tlsConn.Handshake(); err != nil {
+			return nil, nil, err
+		}
+		if !cfg.InsecureSkipVerify {
+			if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil {
+				return nil, nil, err
+			}
+		}
+	}
+
+	conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize)
+
+	if err := req.Write(netConn); err != nil {
+		return nil, nil, err
+	}
+
+	resp, err := http.ReadResponse(conn.br, req)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	if d.Jar != nil {
+		if rc := resp.Cookies(); len(rc) > 0 {
+			d.Jar.SetCookies(u, rc)
+		}
+	}
+
+	if resp.StatusCode != 101 ||
+		!strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") ||
+		!strings.EqualFold(resp.Header.Get("Connection"), "upgrade") ||
+		resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) {
+		// Before closing the network connection on return from this
+		// function, slurp up some of the response to aid application
+		// debugging.
+		buf := make([]byte, 1024)
+		n, _ := io.ReadFull(resp.Body, buf)
+		resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n]))
+		return nil, resp, ErrBadHandshake
+	}
+
+	for _, ext := range parseExtensions(resp.Header) {
+		if ext[""] != "permessage-deflate" {
+			continue
+		}
+		_, snct := ext["server_no_context_takeover"]
+		_, cnct := ext["client_no_context_takeover"]
+		if !snct || !cnct {
+			return nil, resp, errInvalidCompression
+		}
+		conn.newCompressionWriter = compressNoContextTakeover
+		conn.newDecompressionReader = decompressNoContextTakeover
+		break
+	}
+
+	resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{}))
+	conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol")
+
+	netConn.SetDeadline(time.Time{})
+	netConn = nil // to avoid close in defer.
+	return conn, resp, nil
+}

vendor/github.com/gorilla/websocket/client_clone.go 🔗

@@ -0,0 +1,16 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.8
+
+package websocket
+
+import "crypto/tls"
+
+func cloneTLSConfig(cfg *tls.Config) *tls.Config {
+	if cfg == nil {
+		return &tls.Config{}
+	}
+	return cfg.Clone()
+}

vendor/github.com/gorilla/websocket/client_clone_legacy.go 🔗

@@ -0,0 +1,38 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.8
+
+package websocket
+
+import "crypto/tls"
+
+// cloneTLSConfig clones all public fields except the fields
+// SessionTicketsDisabled and SessionTicketKey. This avoids copying the
+// sync.Mutex in the sync.Once and makes it safe to call cloneTLSConfig on a
+// config in active use.
+func cloneTLSConfig(cfg *tls.Config) *tls.Config {
+	if cfg == nil {
+		return &tls.Config{}
+	}
+	return &tls.Config{
+		Rand:                     cfg.Rand,
+		Time:                     cfg.Time,
+		Certificates:             cfg.Certificates,
+		NameToCertificate:        cfg.NameToCertificate,
+		GetCertificate:           cfg.GetCertificate,
+		RootCAs:                  cfg.RootCAs,
+		NextProtos:               cfg.NextProtos,
+		ServerName:               cfg.ServerName,
+		ClientAuth:               cfg.ClientAuth,
+		ClientCAs:                cfg.ClientCAs,
+		InsecureSkipVerify:       cfg.InsecureSkipVerify,
+		CipherSuites:             cfg.CipherSuites,
+		PreferServerCipherSuites: cfg.PreferServerCipherSuites,
+		ClientSessionCache:       cfg.ClientSessionCache,
+		MinVersion:               cfg.MinVersion,
+		MaxVersion:               cfg.MaxVersion,
+		CurvePreferences:         cfg.CurvePreferences,
+	}
+}

vendor/github.com/gorilla/websocket/compression.go 🔗

@@ -0,0 +1,148 @@
+// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+	"compress/flate"
+	"errors"
+	"io"
+	"strings"
+	"sync"
+)
+
+const (
+	minCompressionLevel     = -2 // flate.HuffmanOnly not defined in Go < 1.6
+	maxCompressionLevel     = flate.BestCompression
+	defaultCompressionLevel = 1
+)
+
+var (
+	flateWriterPools [maxCompressionLevel - minCompressionLevel + 1]sync.Pool
+	flateReaderPool  = sync.Pool{New: func() interface{} {
+		return flate.NewReader(nil)
+	}}
+)
+
+func decompressNoContextTakeover(r io.Reader) io.ReadCloser {
+	const tail =
+	// Add four bytes as specified in RFC
+	"\x00\x00\xff\xff" +
+		// Add final block to squelch unexpected EOF error from flate reader.
+		"\x01\x00\x00\xff\xff"
+
+	fr, _ := flateReaderPool.Get().(io.ReadCloser)
+	fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil)
+	return &flateReadWrapper{fr}
+}
+
+func isValidCompressionLevel(level int) bool {
+	return minCompressionLevel <= level && level <= maxCompressionLevel
+}
+
+func compressNoContextTakeover(w io.WriteCloser, level int) io.WriteCloser {
+	p := &flateWriterPools[level-minCompressionLevel]
+	tw := &truncWriter{w: w}
+	fw, _ := p.Get().(*flate.Writer)
+	if fw == nil {
+		fw, _ = flate.NewWriter(tw, level)
+	} else {
+		fw.Reset(tw)
+	}
+	return &flateWriteWrapper{fw: fw, tw: tw, p: p}
+}
+
+// truncWriter is an io.Writer that writes all but the last four bytes of the
+// stream to another io.Writer.
+type truncWriter struct {
+	w io.WriteCloser
+	n int
+	p [4]byte
+}
+
+func (w *truncWriter) Write(p []byte) (int, error) {
+	n := 0
+
+	// fill buffer first for simplicity.
+	if w.n < len(w.p) {
+		n = copy(w.p[w.n:], p)
+		p = p[n:]
+		w.n += n
+		if len(p) == 0 {
+			return n, nil
+		}
+	}
+
+	m := len(p)
+	if m > len(w.p) {
+		m = len(w.p)
+	}
+
+	if nn, err := w.w.Write(w.p[:m]); err != nil {
+		return n + nn, err
+	}
+
+	copy(w.p[:], w.p[m:])
+	copy(w.p[len(w.p)-m:], p[len(p)-m:])
+	nn, err := w.w.Write(p[:len(p)-m])
+	return n + nn, err
+}
+
+type flateWriteWrapper struct {
+	fw *flate.Writer
+	tw *truncWriter
+	p  *sync.Pool
+}
+
+func (w *flateWriteWrapper) Write(p []byte) (int, error) {
+	if w.fw == nil {
+		return 0, errWriteClosed
+	}
+	return w.fw.Write(p)
+}
+
+func (w *flateWriteWrapper) Close() error {
+	if w.fw == nil {
+		return errWriteClosed
+	}
+	err1 := w.fw.Flush()
+	w.p.Put(w.fw)
+	w.fw = nil
+	if w.tw.p != [4]byte{0, 0, 0xff, 0xff} {
+		return errors.New("websocket: internal error, unexpected bytes at end of flate stream")
+	}
+	err2 := w.tw.w.Close()
+	if err1 != nil {
+		return err1
+	}
+	return err2
+}
+
+type flateReadWrapper struct {
+	fr io.ReadCloser
+}
+
+func (r *flateReadWrapper) Read(p []byte) (int, error) {
+	if r.fr == nil {
+		return 0, io.ErrClosedPipe
+	}
+	n, err := r.fr.Read(p)
+	if err == io.EOF {
+		// Preemptively place the reader back in the pool. This helps with
+		// scenarios where the application does not call NextReader() soon after
+		// this final read.
+		r.Close()
+	}
+	return n, err
+}
+
+func (r *flateReadWrapper) Close() error {
+	if r.fr == nil {
+		return io.ErrClosedPipe
+	}
+	err := r.fr.Close()
+	flateReaderPool.Put(r.fr)
+	r.fr = nil
+	return err
+}

vendor/github.com/gorilla/websocket/conn.go 🔗

@@ -0,0 +1,1149 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+	"bufio"
+	"encoding/binary"
+	"errors"
+	"io"
+	"io/ioutil"
+	"math/rand"
+	"net"
+	"strconv"
+	"sync"
+	"time"
+	"unicode/utf8"
+)
+
+const (
+	// Frame header byte 0 bits from Section 5.2 of RFC 6455
+	finalBit = 1 << 7
+	rsv1Bit  = 1 << 6
+	rsv2Bit  = 1 << 5
+	rsv3Bit  = 1 << 4
+
+	// Frame header byte 1 bits from Section 5.2 of RFC 6455
+	maskBit = 1 << 7
+
+	maxFrameHeaderSize         = 2 + 8 + 4 // Fixed header + length + mask
+	maxControlFramePayloadSize = 125
+
+	writeWait = time.Second
+
+	defaultReadBufferSize  = 4096
+	defaultWriteBufferSize = 4096
+
+	continuationFrame = 0
+	noFrame           = -1
+)
+
+// Close codes defined in RFC 6455, section 11.7.
+const (
+	CloseNormalClosure           = 1000
+	CloseGoingAway               = 1001
+	CloseProtocolError           = 1002
+	CloseUnsupportedData         = 1003
+	CloseNoStatusReceived        = 1005
+	CloseAbnormalClosure         = 1006
+	CloseInvalidFramePayloadData = 1007
+	ClosePolicyViolation         = 1008
+	CloseMessageTooBig           = 1009
+	CloseMandatoryExtension      = 1010
+	CloseInternalServerErr       = 1011
+	CloseServiceRestart          = 1012
+	CloseTryAgainLater           = 1013
+	CloseTLSHandshake            = 1015
+)
+
+// The message types are defined in RFC 6455, section 11.8.
+const (
+	// TextMessage denotes a text data message. The text message payload is
+	// interpreted as UTF-8 encoded text data.
+	TextMessage = 1
+
+	// BinaryMessage denotes a binary data message.
+	BinaryMessage = 2
+
+	// CloseMessage denotes a close control message. The optional message
+	// payload contains a numeric code and text. Use the FormatCloseMessage
+	// function to format a close message payload.
+	CloseMessage = 8
+
+	// PingMessage denotes a ping control message. The optional message payload
+	// is UTF-8 encoded text.
+	PingMessage = 9
+
+	// PongMessage denotes a ping control message. The optional message payload
+	// is UTF-8 encoded text.
+	PongMessage = 10
+)
+
+// ErrCloseSent is returned when the application writes a message to the
+// connection after sending a close message.
+var ErrCloseSent = errors.New("websocket: close sent")
+
+// ErrReadLimit is returned when reading a message that is larger than the
+// read limit set for the connection.
+var ErrReadLimit = errors.New("websocket: read limit exceeded")
+
+// netError satisfies the net Error interface.
+type netError struct {
+	msg       string
+	temporary bool
+	timeout   bool
+}
+
+func (e *netError) Error() string   { return e.msg }
+func (e *netError) Temporary() bool { return e.temporary }
+func (e *netError) Timeout() bool   { return e.timeout }
+
+// CloseError represents close frame.
+type CloseError struct {
+
+	// Code is defined in RFC 6455, section 11.7.
+	Code int
+
+	// Text is the optional text payload.
+	Text string
+}
+
+func (e *CloseError) Error() string {
+	s := []byte("websocket: close ")
+	s = strconv.AppendInt(s, int64(e.Code), 10)
+	switch e.Code {
+	case CloseNormalClosure:
+		s = append(s, " (normal)"...)
+	case CloseGoingAway:
+		s = append(s, " (going away)"...)
+	case CloseProtocolError:
+		s = append(s, " (protocol error)"...)
+	case CloseUnsupportedData:
+		s = append(s, " (unsupported data)"...)
+	case CloseNoStatusReceived:
+		s = append(s, " (no status)"...)
+	case CloseAbnormalClosure:
+		s = append(s, " (abnormal closure)"...)
+	case CloseInvalidFramePayloadData:
+		s = append(s, " (invalid payload data)"...)
+	case ClosePolicyViolation:
+		s = append(s, " (policy violation)"...)
+	case CloseMessageTooBig:
+		s = append(s, " (message too big)"...)
+	case CloseMandatoryExtension:
+		s = append(s, " (mandatory extension missing)"...)
+	case CloseInternalServerErr:
+		s = append(s, " (internal server error)"...)
+	case CloseTLSHandshake:
+		s = append(s, " (TLS handshake error)"...)
+	}
+	if e.Text != "" {
+		s = append(s, ": "...)
+		s = append(s, e.Text...)
+	}
+	return string(s)
+}
+
+// IsCloseError returns boolean indicating whether the error is a *CloseError
+// with one of the specified codes.
+func IsCloseError(err error, codes ...int) bool {
+	if e, ok := err.(*CloseError); ok {
+		for _, code := range codes {
+			if e.Code == code {
+				return true
+			}
+		}
+	}
+	return false
+}
+
+// IsUnexpectedCloseError returns boolean indicating whether the error is a
+// *CloseError with a code not in the list of expected codes.
+func IsUnexpectedCloseError(err error, expectedCodes ...int) bool {
+	if e, ok := err.(*CloseError); ok {
+		for _, code := range expectedCodes {
+			if e.Code == code {
+				return false
+			}
+		}
+		return true
+	}
+	return false
+}
+
+var (
+	errWriteTimeout        = &netError{msg: "websocket: write timeout", timeout: true, temporary: true}
+	errUnexpectedEOF       = &CloseError{Code: CloseAbnormalClosure, Text: io.ErrUnexpectedEOF.Error()}
+	errBadWriteOpCode      = errors.New("websocket: bad write message type")
+	errWriteClosed         = errors.New("websocket: write closed")
+	errInvalidControlFrame = errors.New("websocket: invalid control frame")
+)
+
+func newMaskKey() [4]byte {
+	n := rand.Uint32()
+	return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)}
+}
+
+func hideTempErr(err error) error {
+	if e, ok := err.(net.Error); ok && e.Temporary() {
+		err = &netError{msg: e.Error(), timeout: e.Timeout()}
+	}
+	return err
+}
+
+func isControl(frameType int) bool {
+	return frameType == CloseMessage || frameType == PingMessage || frameType == PongMessage
+}
+
+func isData(frameType int) bool {
+	return frameType == TextMessage || frameType == BinaryMessage
+}
+
+var validReceivedCloseCodes = map[int]bool{
+	// see http://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number
+
+	CloseNormalClosure:           true,
+	CloseGoingAway:               true,
+	CloseProtocolError:           true,
+	CloseUnsupportedData:         true,
+	CloseNoStatusReceived:        false,
+	CloseAbnormalClosure:         false,
+	CloseInvalidFramePayloadData: true,
+	ClosePolicyViolation:         true,
+	CloseMessageTooBig:           true,
+	CloseMandatoryExtension:      true,
+	CloseInternalServerErr:       true,
+	CloseServiceRestart:          true,
+	CloseTryAgainLater:           true,
+	CloseTLSHandshake:            false,
+}
+
+func isValidReceivedCloseCode(code int) bool {
+	return validReceivedCloseCodes[code] || (code >= 3000 && code <= 4999)
+}
+
+// The Conn type represents a WebSocket connection.
+type Conn struct {
+	conn        net.Conn
+	isServer    bool
+	subprotocol string
+
+	// Write fields
+	mu            chan bool // used as mutex to protect write to conn
+	writeBuf      []byte    // frame is constructed in this buffer.
+	writeDeadline time.Time
+	writer        io.WriteCloser // the current writer returned to the application
+	isWriting     bool           // for best-effort concurrent write detection
+
+	writeErrMu sync.Mutex
+	writeErr   error
+
+	enableWriteCompression bool
+	compressionLevel       int
+	newCompressionWriter   func(io.WriteCloser, int) io.WriteCloser
+
+	// Read fields
+	reader        io.ReadCloser // the current reader returned to the application
+	readErr       error
+	br            *bufio.Reader
+	readRemaining int64 // bytes remaining in current frame.
+	readFinal     bool  // true the current message has more frames.
+	readLength    int64 // Message size.
+	readLimit     int64 // Maximum message size.
+	readMaskPos   int
+	readMaskKey   [4]byte
+	handlePong    func(string) error
+	handlePing    func(string) error
+	handleClose   func(int, string) error
+	readErrCount  int
+	messageReader *messageReader // the current low-level reader
+
+	readDecompress         bool // whether last read frame had RSV1 set
+	newDecompressionReader func(io.Reader) io.ReadCloser
+}
+
+func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int) *Conn {
+	return newConnBRW(conn, isServer, readBufferSize, writeBufferSize, nil)
+}
+
+type writeHook struct {
+	p []byte
+}
+
+func (wh *writeHook) Write(p []byte) (int, error) {
+	wh.p = p
+	return len(p), nil
+}
+
+func newConnBRW(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int, brw *bufio.ReadWriter) *Conn {
+	mu := make(chan bool, 1)
+	mu <- true
+
+	var br *bufio.Reader
+	if readBufferSize == 0 && brw != nil && brw.Reader != nil {
+		// Reuse the supplied bufio.Reader if the buffer has a useful size.
+		// This code assumes that peek on a reader returns
+		// bufio.Reader.buf[:0].
+		brw.Reader.Reset(conn)
+		if p, err := brw.Reader.Peek(0); err == nil && cap(p) >= 256 {
+			br = brw.Reader
+		}
+	}
+	if br == nil {
+		if readBufferSize == 0 {
+			readBufferSize = defaultReadBufferSize
+		}
+		if readBufferSize < maxControlFramePayloadSize {
+			readBufferSize = maxControlFramePayloadSize
+		}
+		br = bufio.NewReaderSize(conn, readBufferSize)
+	}
+
+	var writeBuf []byte
+	if writeBufferSize == 0 && brw != nil && brw.Writer != nil {
+		// Use the bufio.Writer's buffer if the buffer has a useful size. This
+		// code assumes that bufio.Writer.buf[:1] is passed to the
+		// bufio.Writer's underlying writer.
+		var wh writeHook
+		brw.Writer.Reset(&wh)
+		brw.Writer.WriteByte(0)
+		brw.Flush()
+		if cap(wh.p) >= maxFrameHeaderSize+256 {
+			writeBuf = wh.p[:cap(wh.p)]
+		}
+	}
+
+	if writeBuf == nil {
+		if writeBufferSize == 0 {
+			writeBufferSize = defaultWriteBufferSize
+		}
+		writeBuf = make([]byte, writeBufferSize+maxFrameHeaderSize)
+	}
+
+	c := &Conn{
+		isServer:               isServer,
+		br:                     br,
+		conn:                   conn,
+		mu:                     mu,
+		readFinal:              true,
+		writeBuf:               writeBuf,
+		enableWriteCompression: true,
+		compressionLevel:       defaultCompressionLevel,
+	}
+	c.SetCloseHandler(nil)
+	c.SetPingHandler(nil)
+	c.SetPongHandler(nil)
+	return c
+}
+
+// Subprotocol returns the negotiated protocol for the connection.
+func (c *Conn) Subprotocol() string {
+	return c.subprotocol
+}
+
+// Close closes the underlying network connection without sending or waiting for a close frame.
+func (c *Conn) Close() error {
+	return c.conn.Close()
+}
+
+// LocalAddr returns the local network address.
+func (c *Conn) LocalAddr() net.Addr {
+	return c.conn.LocalAddr()
+}
+
+// RemoteAddr returns the remote network address.
+func (c *Conn) RemoteAddr() net.Addr {
+	return c.conn.RemoteAddr()
+}
+
+// Write methods
+
+func (c *Conn) writeFatal(err error) error {
+	err = hideTempErr(err)
+	c.writeErrMu.Lock()
+	if c.writeErr == nil {
+		c.writeErr = err
+	}
+	c.writeErrMu.Unlock()
+	return err
+}
+
+func (c *Conn) write(frameType int, deadline time.Time, bufs ...[]byte) error {
+	<-c.mu
+	defer func() { c.mu <- true }()
+
+	c.writeErrMu.Lock()
+	err := c.writeErr
+	c.writeErrMu.Unlock()
+	if err != nil {
+		return err
+	}
+
+	c.conn.SetWriteDeadline(deadline)
+	for _, buf := range bufs {
+		if len(buf) > 0 {
+			_, err := c.conn.Write(buf)
+			if err != nil {
+				return c.writeFatal(err)
+			}
+		}
+	}
+
+	if frameType == CloseMessage {
+		c.writeFatal(ErrCloseSent)
+	}
+	return nil
+}
+
+// WriteControl writes a control message with the given deadline. The allowed
+// message types are CloseMessage, PingMessage and PongMessage.
+func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) error {
+	if !isControl(messageType) {
+		return errBadWriteOpCode
+	}
+	if len(data) > maxControlFramePayloadSize {
+		return errInvalidControlFrame
+	}
+
+	b0 := byte(messageType) | finalBit
+	b1 := byte(len(data))
+	if !c.isServer {
+		b1 |= maskBit
+	}
+
+	buf := make([]byte, 0, maxFrameHeaderSize+maxControlFramePayloadSize)
+	buf = append(buf, b0, b1)
+
+	if c.isServer {
+		buf = append(buf, data...)
+	} else {
+		key := newMaskKey()
+		buf = append(buf, key[:]...)
+		buf = append(buf, data...)
+		maskBytes(key, 0, buf[6:])
+	}
+
+	d := time.Hour * 1000
+	if !deadline.IsZero() {
+		d = deadline.Sub(time.Now())
+		if d < 0 {
+			return errWriteTimeout
+		}
+	}
+
+	timer := time.NewTimer(d)
+	select {
+	case <-c.mu:
+		timer.Stop()
+	case <-timer.C:
+		return errWriteTimeout
+	}
+	defer func() { c.mu <- true }()
+
+	c.writeErrMu.Lock()
+	err := c.writeErr
+	c.writeErrMu.Unlock()
+	if err != nil {
+		return err
+	}
+
+	c.conn.SetWriteDeadline(deadline)
+	_, err = c.conn.Write(buf)
+	if err != nil {
+		return c.writeFatal(err)
+	}
+	if messageType == CloseMessage {
+		c.writeFatal(ErrCloseSent)
+	}
+	return err
+}
+
+func (c *Conn) prepWrite(messageType int) error {
+	// Close previous writer if not already closed by the application. It's
+	// probably better to return an error in this situation, but we cannot
+	// change this without breaking existing applications.
+	if c.writer != nil {
+		c.writer.Close()
+		c.writer = nil
+	}
+
+	if !isControl(messageType) && !isData(messageType) {
+		return errBadWriteOpCode
+	}
+
+	c.writeErrMu.Lock()
+	err := c.writeErr
+	c.writeErrMu.Unlock()
+	return err
+}
+
+// NextWriter returns a writer for the next message to send. The writer's Close
+// method flushes the complete message to the network.
+//
+// There can be at most one open writer on a connection. NextWriter closes the
+// previous writer if the application has not already done so.
+func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) {
+	if err := c.prepWrite(messageType); err != nil {
+		return nil, err
+	}
+
+	mw := &messageWriter{
+		c:         c,
+		frameType: messageType,
+		pos:       maxFrameHeaderSize,
+	}
+	c.writer = mw
+	if c.newCompressionWriter != nil && c.enableWriteCompression && isData(messageType) {
+		w := c.newCompressionWriter(c.writer, c.compressionLevel)
+		mw.compress = true
+		c.writer = w
+	}
+	return c.writer, nil
+}
+
+type messageWriter struct {
+	c         *Conn
+	compress  bool // whether next call to flushFrame should set RSV1
+	pos       int  // end of data in writeBuf.
+	frameType int  // type of the current frame.
+	err       error
+}
+
+func (w *messageWriter) fatal(err error) error {
+	if w.err != nil {
+		w.err = err
+		w.c.writer = nil
+	}
+	return err
+}
+
+// flushFrame writes buffered data and extra as a frame to the network. The
+// final argument indicates that this is the last frame in the message.
+func (w *messageWriter) flushFrame(final bool, extra []byte) error {
+	c := w.c
+	length := w.pos - maxFrameHeaderSize + len(extra)
+
+	// Check for invalid control frames.
+	if isControl(w.frameType) &&
+		(!final || length > maxControlFramePayloadSize) {
+		return w.fatal(errInvalidControlFrame)
+	}
+
+	b0 := byte(w.frameType)
+	if final {
+		b0 |= finalBit
+	}
+	if w.compress {
+		b0 |= rsv1Bit
+	}
+	w.compress = false
+
+	b1 := byte(0)
+	if !c.isServer {
+		b1 |= maskBit
+	}
+
+	// Assume that the frame starts at beginning of c.writeBuf.
+	framePos := 0
+	if c.isServer {
+		// Adjust up if mask not included in the header.
+		framePos = 4
+	}
+
+	switch {
+	case length >= 65536:
+		c.writeBuf[framePos] = b0
+		c.writeBuf[framePos+1] = b1 | 127
+		binary.BigEndian.PutUint64(c.writeBuf[framePos+2:], uint64(length))
+	case length > 125:
+		framePos += 6
+		c.writeBuf[framePos] = b0
+		c.writeBuf[framePos+1] = b1 | 126
+		binary.BigEndian.PutUint16(c.writeBuf[framePos+2:], uint16(length))
+	default:
+		framePos += 8
+		c.writeBuf[framePos] = b0
+		c.writeBuf[framePos+1] = b1 | byte(length)
+	}
+
+	if !c.isServer {
+		key := newMaskKey()
+		copy(c.writeBuf[maxFrameHeaderSize-4:], key[:])
+		maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:w.pos])
+		if len(extra) > 0 {
+			return c.writeFatal(errors.New("websocket: internal error, extra used in client mode"))
+		}
+	}
+
+	// Write the buffers to the connection with best-effort detection of
+	// concurrent writes. See the concurrency section in the package
+	// documentation for more info.
+
+	if c.isWriting {
+		panic("concurrent write to websocket connection")
+	}
+	c.isWriting = true
+
+	err := c.write(w.frameType, c.writeDeadline, c.writeBuf[framePos:w.pos], extra)
+
+	if !c.isWriting {
+		panic("concurrent write to websocket connection")
+	}
+	c.isWriting = false
+
+	if err != nil {
+		return w.fatal(err)
+	}
+
+	if final {
+		c.writer = nil
+		return nil
+	}
+
+	// Setup for next frame.
+	w.pos = maxFrameHeaderSize
+	w.frameType = continuationFrame
+	return nil
+}
+
+func (w *messageWriter) ncopy(max int) (int, error) {
+	n := len(w.c.writeBuf) - w.pos
+	if n <= 0 {
+		if err := w.flushFrame(false, nil); err != nil {
+			return 0, err
+		}
+		n = len(w.c.writeBuf) - w.pos
+	}
+	if n > max {
+		n = max
+	}
+	return n, nil
+}
+
+func (w *messageWriter) Write(p []byte) (int, error) {
+	if w.err != nil {
+		return 0, w.err
+	}
+
+	if len(p) > 2*len(w.c.writeBuf) && w.c.isServer {
+		// Don't buffer large messages.
+		err := w.flushFrame(false, p)
+		if err != nil {
+			return 0, err
+		}
+		return len(p), nil
+	}
+
+	nn := len(p)
+	for len(p) > 0 {
+		n, err := w.ncopy(len(p))
+		if err != nil {
+			return 0, err
+		}
+		copy(w.c.writeBuf[w.pos:], p[:n])
+		w.pos += n
+		p = p[n:]
+	}
+	return nn, nil
+}
+
+func (w *messageWriter) WriteString(p string) (int, error) {
+	if w.err != nil {
+		return 0, w.err
+	}
+
+	nn := len(p)
+	for len(p) > 0 {
+		n, err := w.ncopy(len(p))
+		if err != nil {
+			return 0, err
+		}
+		copy(w.c.writeBuf[w.pos:], p[:n])
+		w.pos += n
+		p = p[n:]
+	}
+	return nn, nil
+}
+
+func (w *messageWriter) ReadFrom(r io.Reader) (nn int64, err error) {
+	if w.err != nil {
+		return 0, w.err
+	}
+	for {
+		if w.pos == len(w.c.writeBuf) {
+			err = w.flushFrame(false, nil)
+			if err != nil {
+				break
+			}
+		}
+		var n int
+		n, err = r.Read(w.c.writeBuf[w.pos:])
+		w.pos += n
+		nn += int64(n)
+		if err != nil {
+			if err == io.EOF {
+				err = nil
+			}
+			break
+		}
+	}
+	return nn, err
+}
+
+func (w *messageWriter) Close() error {
+	if w.err != nil {
+		return w.err
+	}
+	if err := w.flushFrame(true, nil); err != nil {
+		return err
+	}
+	w.err = errWriteClosed
+	return nil
+}
+
+// WritePreparedMessage writes prepared message into connection.
+func (c *Conn) WritePreparedMessage(pm *PreparedMessage) error {
+	frameType, frameData, err := pm.frame(prepareKey{
+		isServer:         c.isServer,
+		compress:         c.newCompressionWriter != nil && c.enableWriteCompression && isData(pm.messageType),
+		compressionLevel: c.compressionLevel,
+	})
+	if err != nil {
+		return err
+	}
+	if c.isWriting {
+		panic("concurrent write to websocket connection")
+	}
+	c.isWriting = true
+	err = c.write(frameType, c.writeDeadline, frameData, nil)
+	if !c.isWriting {
+		panic("concurrent write to websocket connection")
+	}
+	c.isWriting = false
+	return err
+}
+
+// WriteMessage is a helper method for getting a writer using NextWriter,
+// writing the message and closing the writer.
+func (c *Conn) WriteMessage(messageType int, data []byte) error {
+
+	if c.isServer && (c.newCompressionWriter == nil || !c.enableWriteCompression) {
+		// Fast path with no allocations and single frame.
+
+		if err := c.prepWrite(messageType); err != nil {
+			return err
+		}
+		mw := messageWriter{c: c, frameType: messageType, pos: maxFrameHeaderSize}
+		n := copy(c.writeBuf[mw.pos:], data)
+		mw.pos += n
+		data = data[n:]
+		return mw.flushFrame(true, data)
+	}
+
+	w, err := c.NextWriter(messageType)
+	if err != nil {
+		return err
+	}
+	if _, err = w.Write(data); err != nil {
+		return err
+	}
+	return w.Close()
+}
+
+// SetWriteDeadline sets the write deadline on the underlying network
+// connection. After a write has timed out, the websocket state is corrupt and
+// all future writes will return an error. A zero value for t means writes will
+// not time out.
+func (c *Conn) SetWriteDeadline(t time.Time) error {
+	c.writeDeadline = t
+	return nil
+}
+
+// Read methods
+
+func (c *Conn) advanceFrame() (int, error) {
+
+	// 1. Skip remainder of previous frame.
+
+	if c.readRemaining > 0 {
+		if _, err := io.CopyN(ioutil.Discard, c.br, c.readRemaining); err != nil {
+			return noFrame, err
+		}
+	}
+
+	// 2. Read and parse first two bytes of frame header.
+
+	p, err := c.read(2)
+	if err != nil {
+		return noFrame, err
+	}
+
+	final := p[0]&finalBit != 0
+	frameType := int(p[0] & 0xf)
+	mask := p[1]&maskBit != 0
+	c.readRemaining = int64(p[1] & 0x7f)
+
+	c.readDecompress = false
+	if c.newDecompressionReader != nil && (p[0]&rsv1Bit) != 0 {
+		c.readDecompress = true
+		p[0] &^= rsv1Bit
+	}
+
+	if rsv := p[0] & (rsv1Bit | rsv2Bit | rsv3Bit); rsv != 0 {
+		return noFrame, c.handleProtocolError("unexpected reserved bits 0x" + strconv.FormatInt(int64(rsv), 16))
+	}
+
+	switch frameType {
+	case CloseMessage, PingMessage, PongMessage:
+		if c.readRemaining > maxControlFramePayloadSize {
+			return noFrame, c.handleProtocolError("control frame length > 125")
+		}
+		if !final {
+			return noFrame, c.handleProtocolError("control frame not final")
+		}
+	case TextMessage, BinaryMessage:
+		if !c.readFinal {
+			return noFrame, c.handleProtocolError("message start before final message frame")
+		}
+		c.readFinal = final
+	case continuationFrame:
+		if c.readFinal {
+			return noFrame, c.handleProtocolError("continuation after final message frame")
+		}
+		c.readFinal = final
+	default:
+		return noFrame, c.handleProtocolError("unknown opcode " + strconv.Itoa(frameType))
+	}
+
+	// 3. Read and parse frame length.
+
+	switch c.readRemaining {
+	case 126:
+		p, err := c.read(2)
+		if err != nil {
+			return noFrame, err
+		}
+		c.readRemaining = int64(binary.BigEndian.Uint16(p))
+	case 127:
+		p, err := c.read(8)
+		if err != nil {
+			return noFrame, err
+		}
+		c.readRemaining = int64(binary.BigEndian.Uint64(p))
+	}
+
+	// 4. Handle frame masking.
+
+	if mask != c.isServer {
+		return noFrame, c.handleProtocolError("incorrect mask flag")
+	}
+
+	if mask {
+		c.readMaskPos = 0
+		p, err := c.read(len(c.readMaskKey))
+		if err != nil {
+			return noFrame, err
+		}
+		copy(c.readMaskKey[:], p)
+	}
+
+	// 5. For text and binary messages, enforce read limit and return.
+
+	if frameType == continuationFrame || frameType == TextMessage || frameType == BinaryMessage {
+
+		c.readLength += c.readRemaining
+		if c.readLimit > 0 && c.readLength > c.readLimit {
+			c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait))
+			return noFrame, ErrReadLimit
+		}
+
+		return frameType, nil
+	}
+
+	// 6. Read control frame payload.
+
+	var payload []byte
+	if c.readRemaining > 0 {
+		payload, err = c.read(int(c.readRemaining))
+		c.readRemaining = 0
+		if err != nil {
+			return noFrame, err
+		}
+		if c.isServer {
+			maskBytes(c.readMaskKey, 0, payload)
+		}
+	}
+
+	// 7. Process control frame payload.
+
+	switch frameType {
+	case PongMessage:
+		if err := c.handlePong(string(payload)); err != nil {
+			return noFrame, err
+		}
+	case PingMessage:
+		if err := c.handlePing(string(payload)); err != nil {
+			return noFrame, err
+		}
+	case CloseMessage:
+		closeCode := CloseNoStatusReceived
+		closeText := ""
+		if len(payload) >= 2 {
+			closeCode = int(binary.BigEndian.Uint16(payload))
+			if !isValidReceivedCloseCode(closeCode) {
+				return noFrame, c.handleProtocolError("invalid close code")
+			}
+			closeText = string(payload[2:])
+			if !utf8.ValidString(closeText) {
+				return noFrame, c.handleProtocolError("invalid utf8 payload in close frame")
+			}
+		}
+		if err := c.handleClose(closeCode, closeText); err != nil {
+			return noFrame, err
+		}
+		return noFrame, &CloseError{Code: closeCode, Text: closeText}
+	}
+
+	return frameType, nil
+}
+
+func (c *Conn) handleProtocolError(message string) error {
+	c.WriteControl(CloseMessage, FormatCloseMessage(CloseProtocolError, message), time.Now().Add(writeWait))
+	return errors.New("websocket: " + message)
+}
+
+// NextReader returns the next data message received from the peer. The
+// returned messageType is either TextMessage or BinaryMessage.
+//
+// There can be at most one open reader on a connection. NextReader discards
+// the previous message if the application has not already consumed it.
+//
+// Applications must break out of the application's read loop when this method
+// returns a non-nil error value. Errors returned from this method are
+// permanent. Once this method returns a non-nil error, all subsequent calls to
+// this method return the same error.
+func (c *Conn) NextReader() (messageType int, r io.Reader, err error) {
+	// Close previous reader, only relevant for decompression.
+	if c.reader != nil {
+		c.reader.Close()
+		c.reader = nil
+	}
+
+	c.messageReader = nil
+	c.readLength = 0
+
+	for c.readErr == nil {
+		frameType, err := c.advanceFrame()
+		if err != nil {
+			c.readErr = hideTempErr(err)
+			break
+		}
+		if frameType == TextMessage || frameType == BinaryMessage {
+			c.messageReader = &messageReader{c}
+			c.reader = c.messageReader
+			if c.readDecompress {
+				c.reader = c.newDecompressionReader(c.reader)
+			}
+			return frameType, c.reader, nil
+		}
+	}
+
+	// Applications that do handle the error returned from this method spin in
+	// tight loop on connection failure. To help application developers detect
+	// this error, panic on repeated reads to the failed connection.
+	c.readErrCount++
+	if c.readErrCount >= 1000 {
+		panic("repeated read on failed websocket connection")
+	}
+
+	return noFrame, nil, c.readErr
+}
+
+type messageReader struct{ c *Conn }
+
+func (r *messageReader) Read(b []byte) (int, error) {
+	c := r.c
+	if c.messageReader != r {
+		return 0, io.EOF
+	}
+
+	for c.readErr == nil {
+
+		if c.readRemaining > 0 {
+			if int64(len(b)) > c.readRemaining {
+				b = b[:c.readRemaining]
+			}
+			n, err := c.br.Read(b)
+			c.readErr = hideTempErr(err)
+			if c.isServer {
+				c.readMaskPos = maskBytes(c.readMaskKey, c.readMaskPos, b[:n])
+			}
+			c.readRemaining -= int64(n)
+			if c.readRemaining > 0 && c.readErr == io.EOF {
+				c.readErr = errUnexpectedEOF
+			}
+			return n, c.readErr
+		}
+
+		if c.readFinal {
+			c.messageReader = nil
+			return 0, io.EOF
+		}
+
+		frameType, err := c.advanceFrame()
+		switch {
+		case err != nil:
+			c.readErr = hideTempErr(err)
+		case frameType == TextMessage || frameType == BinaryMessage:
+			c.readErr = errors.New("websocket: internal error, unexpected text or binary in Reader")
+		}
+	}
+
+	err := c.readErr
+	if err == io.EOF && c.messageReader == r {
+		err = errUnexpectedEOF
+	}
+	return 0, err
+}
+
+func (r *messageReader) Close() error {
+	return nil
+}
+
+// ReadMessage is a helper method for getting a reader using NextReader and
+// reading from that reader to a buffer.
+func (c *Conn) ReadMessage() (messageType int, p []byte, err error) {
+	var r io.Reader
+	messageType, r, err = c.NextReader()
+	if err != nil {
+		return messageType, nil, err
+	}
+	p, err = ioutil.ReadAll(r)
+	return messageType, p, err
+}
+
+// SetReadDeadline sets the read deadline on the underlying network connection.
+// After a read has timed out, the websocket connection state is corrupt and
+// all future reads will return an error. A zero value for t means reads will
+// not time out.
+func (c *Conn) SetReadDeadline(t time.Time) error {
+	return c.conn.SetReadDeadline(t)
+}
+
+// SetReadLimit sets the maximum size for a message read from the peer. If a
+// message exceeds the limit, the connection sends a close frame to the peer
+// and returns ErrReadLimit to the application.
+func (c *Conn) SetReadLimit(limit int64) {
+	c.readLimit = limit
+}
+
+// CloseHandler returns the current close handler
+func (c *Conn) CloseHandler() func(code int, text string) error {
+	return c.handleClose
+}
+
+// SetCloseHandler sets the handler for close messages received from the peer.
+// The code argument to h is the received close code or CloseNoStatusReceived
+// if the close message is empty. The default close handler sends a close frame
+// back to the peer.
+//
+// The application must read the connection to process close messages as
+// described in the section on Control Frames above.
+//
+// The connection read methods return a CloseError when a close frame is
+// received. Most applications should handle close messages as part of their
+// normal error handling. Applications should only set a close handler when the
+// application must perform some action before sending a close frame back to
+// the peer.
+func (c *Conn) SetCloseHandler(h func(code int, text string) error) {
+	if h == nil {
+		h = func(code int, text string) error {
+			message := []byte{}
+			if code != CloseNoStatusReceived {
+				message = FormatCloseMessage(code, "")
+			}
+			c.WriteControl(CloseMessage, message, time.Now().Add(writeWait))
+			return nil
+		}
+	}
+	c.handleClose = h
+}
+
+// PingHandler returns the current ping handler
+func (c *Conn) PingHandler() func(appData string) error {
+	return c.handlePing
+}
+
+// SetPingHandler sets the handler for ping messages received from the peer.
+// The appData argument to h is the PING frame application data. The default
+// ping handler sends a pong to the peer.
+//
+// The application must read the connection to process ping messages as
+// described in the section on Control Frames above.
+func (c *Conn) SetPingHandler(h func(appData string) error) {
+	if h == nil {
+		h = func(message string) error {
+			err := c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait))
+			if err == ErrCloseSent {
+				return nil
+			} else if e, ok := err.(net.Error); ok && e.Temporary() {
+				return nil
+			}
+			return err
+		}
+	}
+	c.handlePing = h
+}
+
+// PongHandler returns the current pong handler
+func (c *Conn) PongHandler() func(appData string) error {
+	return c.handlePong
+}
+
+// SetPongHandler sets the handler for pong messages received from the peer.
+// The appData argument to h is the PONG frame application data. The default
+// pong handler does nothing.
+//
+// The application must read the connection to process ping messages as
+// described in the section on Control Frames above.
+func (c *Conn) SetPongHandler(h func(appData string) error) {
+	if h == nil {
+		h = func(string) error { return nil }
+	}
+	c.handlePong = h
+}
+
+// UnderlyingConn returns the internal net.Conn. This can be used to further
+// modifications to connection specific flags.
+func (c *Conn) UnderlyingConn() net.Conn {
+	return c.conn
+}
+
+// EnableWriteCompression enables and disables write compression of
+// subsequent text and binary messages. This function is a noop if
+// compression was not negotiated with the peer.
+func (c *Conn) EnableWriteCompression(enable bool) {
+	c.enableWriteCompression = enable
+}
+
+// SetCompressionLevel sets the flate compression level for subsequent text and
+// binary messages. This function is a noop if compression was not negotiated
+// with the peer. See the compress/flate package for a description of
+// compression levels.
+func (c *Conn) SetCompressionLevel(level int) error {
+	if !isValidCompressionLevel(level) {
+		return errors.New("websocket: invalid compression level")
+	}
+	c.compressionLevel = level
+	return nil
+}
+
+// FormatCloseMessage formats closeCode and text as a WebSocket close message.
+func FormatCloseMessage(closeCode int, text string) []byte {
+	buf := make([]byte, 2+len(text))
+	binary.BigEndian.PutUint16(buf, uint16(closeCode))
+	copy(buf[2:], text)
+	return buf
+}

vendor/github.com/gorilla/websocket/conn_read.go 🔗

@@ -0,0 +1,18 @@
+// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.5
+
+package websocket
+
+import "io"
+
+func (c *Conn) read(n int) ([]byte, error) {
+	p, err := c.br.Peek(n)
+	if err == io.EOF {
+		err = errUnexpectedEOF
+	}
+	c.br.Discard(len(p))
+	return p, err
+}

vendor/github.com/gorilla/websocket/conn_read_legacy.go 🔗

@@ -0,0 +1,21 @@
+// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.5
+
+package websocket
+
+import "io"
+
+func (c *Conn) read(n int) ([]byte, error) {
+	p, err := c.br.Peek(n)
+	if err == io.EOF {
+		err = errUnexpectedEOF
+	}
+	if len(p) > 0 {
+		// advance over the bytes just read
+		io.ReadFull(c.br, p)
+	}
+	return p, err
+}

vendor/github.com/gorilla/websocket/doc.go 🔗

@@ -0,0 +1,180 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package websocket implements the WebSocket protocol defined in RFC 6455.
+//
+// Overview
+//
+// The Conn type represents a WebSocket connection. A server application uses
+// the Upgrade function from an Upgrader object with a HTTP request handler
+// to get a pointer to a Conn:
+//
+//  var upgrader = websocket.Upgrader{
+//      ReadBufferSize:  1024,
+//      WriteBufferSize: 1024,
+//  }
+//
+//  func handler(w http.ResponseWriter, r *http.Request) {
+//      conn, err := upgrader.Upgrade(w, r, nil)
+//      if err != nil {
+//          log.Println(err)
+//          return
+//      }
+//      ... Use conn to send and receive messages.
+//  }
+//
+// Call the connection's WriteMessage and ReadMessage methods to send and
+// receive messages as a slice of bytes. This snippet of code shows how to echo
+// messages using these methods:
+//
+//  for {
+//      messageType, p, err := conn.ReadMessage()
+//      if err != nil {
+//          return
+//      }
+//      if err = conn.WriteMessage(messageType, p); err != nil {
+//          return err
+//      }
+//  }
+//
+// In above snippet of code, p is a []byte and messageType is an int with value
+// websocket.BinaryMessage or websocket.TextMessage.
+//
+// An application can also send and receive messages using the io.WriteCloser
+// and io.Reader interfaces. To send a message, call the connection NextWriter
+// method to get an io.WriteCloser, write the message to the writer and close
+// the writer when done. To receive a message, call the connection NextReader
+// method to get an io.Reader and read until io.EOF is returned. This snippet
+// shows how to echo messages using the NextWriter and NextReader methods:
+//
+//  for {
+//      messageType, r, err := conn.NextReader()
+//      if err != nil {
+//          return
+//      }
+//      w, err := conn.NextWriter(messageType)
+//      if err != nil {
+//          return err
+//      }
+//      if _, err := io.Copy(w, r); err != nil {
+//          return err
+//      }
+//      if err := w.Close(); err != nil {
+//          return err
+//      }
+//  }
+//
+// Data Messages
+//
+// The WebSocket protocol distinguishes between text and binary data messages.
+// Text messages are interpreted as UTF-8 encoded text. The interpretation of
+// binary messages is left to the application.
+//
+// This package uses the TextMessage and BinaryMessage integer constants to
+// identify the two data message types. The ReadMessage and NextReader methods
+// return the type of the received message. The messageType argument to the
+// WriteMessage and NextWriter methods specifies the type of a sent message.
+//
+// It is the application's responsibility to ensure that text messages are
+// valid UTF-8 encoded text.
+//
+// Control Messages
+//
+// The WebSocket protocol defines three types of control messages: close, ping
+// and pong. Call the connection WriteControl, WriteMessage or NextWriter
+// methods to send a control message to the peer.
+//
+// Connections handle received close messages by sending a close message to the
+// peer and returning a *CloseError from the the NextReader, ReadMessage or the
+// message Read method.
+//
+// Connections handle received ping and pong messages by invoking callback
+// functions set with SetPingHandler and SetPongHandler methods. The callback
+// functions are called from the NextReader, ReadMessage and the message Read
+// methods.
+//
+// The default ping handler sends a pong to the peer. The application's reading
+// goroutine can block for a short time while the handler writes the pong data
+// to the connection.
+//
+// The application must read the connection to process ping, pong and close
+// messages sent from the peer. If the application is not otherwise interested
+// in messages from the peer, then the application should start a goroutine to
+// read and discard messages from the peer. A simple example is:
+//
+//  func readLoop(c *websocket.Conn) {
+//      for {
+//          if _, _, err := c.NextReader(); err != nil {
+//              c.Close()
+//              break
+//          }
+//      }
+//  }
+//
+// Concurrency
+//
+// Connections support one concurrent reader and one concurrent writer.
+//
+// Applications are responsible for ensuring that no more than one goroutine
+// calls the write methods (NextWriter, SetWriteDeadline, WriteMessage,
+// WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and
+// that no more than one goroutine calls the read methods (NextReader,
+// SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler)
+// concurrently.
+//
+// The Close and WriteControl methods can be called concurrently with all other
+// methods.
+//
+// Origin Considerations
+//
+// Web browsers allow Javascript applications to open a WebSocket connection to
+// any host. It's up to the server to enforce an origin policy using the Origin
+// request header sent by the browser.
+//
+// The Upgrader calls the function specified in the CheckOrigin field to check
+// the origin. If the CheckOrigin function returns false, then the Upgrade
+// method fails the WebSocket handshake with HTTP status 403.
+//
+// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail
+// the handshake if the Origin request header is present and not equal to the
+// Host request header.
+//
+// An application can allow connections from any origin by specifying a
+// function that always returns true:
+//
+//  var upgrader = websocket.Upgrader{
+//      CheckOrigin: func(r *http.Request) bool { return true },
+//  }
+//
+// The deprecated Upgrade function does not enforce an origin policy. It's the
+// application's responsibility to check the Origin header before calling
+// Upgrade.
+//
+// Compression EXPERIMENTAL
+//
+// Per message compression extensions (RFC 7692) are experimentally supported
+// by this package in a limited capacity. Setting the EnableCompression option
+// to true in Dialer or Upgrader will attempt to negotiate per message deflate
+// support.
+//
+//  var upgrader = websocket.Upgrader{
+//      EnableCompression: true,
+//  }
+//
+// If compression was successfully negotiated with the connection's peer, any
+// message received in compressed form will be automatically decompressed.
+// All Read methods will return uncompressed bytes.
+//
+// Per message compression of messages written to a connection can be enabled
+// or disabled by calling the corresponding Conn method:
+//
+//  conn.EnableWriteCompression(false)
+//
+// Currently this package does not support compression with "context takeover".
+// This means that messages must be compressed and decompressed in isolation,
+// without retaining sliding window or dictionary state across messages. For
+// more details refer to RFC 7692.
+//
+// Use of compression is experimental and may result in decreased performance.
+package websocket

vendor/github.com/gorilla/websocket/json.go 🔗

@@ -0,0 +1,55 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+	"encoding/json"
+	"io"
+)
+
+// WriteJSON is deprecated, use c.WriteJSON instead.
+func WriteJSON(c *Conn, v interface{}) error {
+	return c.WriteJSON(v)
+}
+
+// WriteJSON writes the JSON encoding of v to the connection.
+//
+// See the documentation for encoding/json Marshal for details about the
+// conversion of Go values to JSON.
+func (c *Conn) WriteJSON(v interface{}) error {
+	w, err := c.NextWriter(TextMessage)
+	if err != nil {
+		return err
+	}
+	err1 := json.NewEncoder(w).Encode(v)
+	err2 := w.Close()
+	if err1 != nil {
+		return err1
+	}
+	return err2
+}
+
+// ReadJSON is deprecated, use c.ReadJSON instead.
+func ReadJSON(c *Conn, v interface{}) error {
+	return c.ReadJSON(v)
+}
+
+// ReadJSON reads the next JSON-encoded message from the connection and stores
+// it in the value pointed to by v.
+//
+// See the documentation for the encoding/json Unmarshal function for details
+// about the conversion of JSON to a Go value.
+func (c *Conn) ReadJSON(v interface{}) error {
+	_, r, err := c.NextReader()
+	if err != nil {
+		return err
+	}
+	err = json.NewDecoder(r).Decode(v)
+	if err == io.EOF {
+		// One value is expected in the message.
+		err = io.ErrUnexpectedEOF
+	}
+	return err
+}

vendor/github.com/gorilla/websocket/mask.go 🔗

@@ -0,0 +1,55 @@
+// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved.  Use of
+// this source code is governed by a BSD-style license that can be found in the
+// LICENSE file.
+
+// +build !appengine
+
+package websocket
+
+import "unsafe"
+
+const wordSize = int(unsafe.Sizeof(uintptr(0)))
+
+func maskBytes(key [4]byte, pos int, b []byte) int {
+
+	// Mask one byte at a time for small buffers.
+	if len(b) < 2*wordSize {
+		for i := range b {
+			b[i] ^= key[pos&3]
+			pos++
+		}
+		return pos & 3
+	}
+
+	// Mask one byte at a time to word boundary.
+	if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 {
+		n = wordSize - n
+		for i := range b[:n] {
+			b[i] ^= key[pos&3]
+			pos++
+		}
+		b = b[n:]
+	}
+
+	// Create aligned word size key.
+	var k [wordSize]byte
+	for i := range k {
+		k[i] = key[(pos+i)&3]
+	}
+	kw := *(*uintptr)(unsafe.Pointer(&k))
+
+	// Mask one word at a time.
+	n := (len(b) / wordSize) * wordSize
+	for i := 0; i < n; i += wordSize {
+		*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw
+	}
+
+	// Mask one byte at a time for remaining bytes.
+	b = b[n:]
+	for i := range b {
+		b[i] ^= key[pos&3]
+		pos++
+	}
+
+	return pos & 3
+}

vendor/github.com/gorilla/websocket/mask_safe.go 🔗

@@ -0,0 +1,15 @@
+// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved.  Use of
+// this source code is governed by a BSD-style license that can be found in the
+// LICENSE file.
+
+// +build appengine
+
+package websocket
+
+func maskBytes(key [4]byte, pos int, b []byte) int {
+	for i := range b {
+		b[i] ^= key[pos&3]
+		pos++
+	}
+	return pos & 3
+}

vendor/github.com/gorilla/websocket/prepared.go 🔗

@@ -0,0 +1,103 @@
+// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+	"bytes"
+	"net"
+	"sync"
+	"time"
+)
+
+// PreparedMessage caches on the wire representations of a message payload.
+// Use PreparedMessage to efficiently send a message payload to multiple
+// connections. PreparedMessage is especially useful when compression is used
+// because the CPU and memory expensive compression operation can be executed
+// once for a given set of compression options.
+type PreparedMessage struct {
+	messageType int
+	data        []byte
+	err         error
+	mu          sync.Mutex
+	frames      map[prepareKey]*preparedFrame
+}
+
+// prepareKey defines a unique set of options to cache prepared frames in PreparedMessage.
+type prepareKey struct {
+	isServer         bool
+	compress         bool
+	compressionLevel int
+}
+
+// preparedFrame contains data in wire representation.
+type preparedFrame struct {
+	once sync.Once
+	data []byte
+}
+
+// NewPreparedMessage returns an initialized PreparedMessage. You can then send
+// it to connection using WritePreparedMessage method. Valid wire
+// representation will be calculated lazily only once for a set of current
+// connection options.
+func NewPreparedMessage(messageType int, data []byte) (*PreparedMessage, error) {
+	pm := &PreparedMessage{
+		messageType: messageType,
+		frames:      make(map[prepareKey]*preparedFrame),
+		data:        data,
+	}
+
+	// Prepare a plain server frame.
+	_, frameData, err := pm.frame(prepareKey{isServer: true, compress: false})
+	if err != nil {
+		return nil, err
+	}
+
+	// To protect against caller modifying the data argument, remember the data
+	// copied to the plain server frame.
+	pm.data = frameData[len(frameData)-len(data):]
+	return pm, nil
+}
+
+func (pm *PreparedMessage) frame(key prepareKey) (int, []byte, error) {
+	pm.mu.Lock()
+	frame, ok := pm.frames[key]
+	if !ok {
+		frame = &preparedFrame{}
+		pm.frames[key] = frame
+	}
+	pm.mu.Unlock()
+
+	var err error
+	frame.once.Do(func() {
+		// Prepare a frame using a 'fake' connection.
+		// TODO: Refactor code in conn.go to allow more direct construction of
+		// the frame.
+		mu := make(chan bool, 1)
+		mu <- true
+		var nc prepareConn
+		c := &Conn{
+			conn:                   &nc,
+			mu:                     mu,
+			isServer:               key.isServer,
+			compressionLevel:       key.compressionLevel,
+			enableWriteCompression: true,
+			writeBuf:               make([]byte, defaultWriteBufferSize+maxFrameHeaderSize),
+		}
+		if key.compress {
+			c.newCompressionWriter = compressNoContextTakeover
+		}
+		err = c.WriteMessage(pm.messageType, pm.data)
+		frame.data = nc.buf.Bytes()
+	})
+	return pm.messageType, frame.data, err
+}
+
+type prepareConn struct {
+	buf bytes.Buffer
+	net.Conn
+}
+
+func (pc *prepareConn) Write(p []byte) (int, error)        { return pc.buf.Write(p) }
+func (pc *prepareConn) SetWriteDeadline(t time.Time) error { return nil }

vendor/github.com/gorilla/websocket/server.go 🔗

@@ -0,0 +1,291 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+	"bufio"
+	"errors"
+	"net"
+	"net/http"
+	"net/url"
+	"strings"
+	"time"
+)
+
+// HandshakeError describes an error with the handshake from the peer.
+type HandshakeError struct {
+	message string
+}
+
+func (e HandshakeError) Error() string { return e.message }
+
+// Upgrader specifies parameters for upgrading an HTTP connection to a
+// WebSocket connection.
+type Upgrader struct {
+	// HandshakeTimeout specifies the duration for the handshake to complete.
+	HandshakeTimeout time.Duration
+
+	// ReadBufferSize and WriteBufferSize specify I/O buffer sizes. If a buffer
+	// size is zero, then buffers allocated by the HTTP server are used. The
+	// I/O buffer sizes do not limit the size of the messages that can be sent
+	// or received.
+	ReadBufferSize, WriteBufferSize int
+
+	// Subprotocols specifies the server's supported protocols in order of
+	// preference. If this field is set, then the Upgrade method negotiates a
+	// subprotocol by selecting the first match in this list with a protocol
+	// requested by the client.
+	Subprotocols []string
+
+	// Error specifies the function for generating HTTP error responses. If Error
+	// is nil, then http.Error is used to generate the HTTP response.
+	Error func(w http.ResponseWriter, r *http.Request, status int, reason error)
+
+	// CheckOrigin returns true if the request Origin header is acceptable. If
+	// CheckOrigin is nil, the host in the Origin header must not be set or
+	// must match the host of the request.
+	CheckOrigin func(r *http.Request) bool
+
+	// EnableCompression specify if the server should attempt to negotiate per
+	// message compression (RFC 7692). Setting this value to true does not
+	// guarantee that compression will be supported. Currently only "no context
+	// takeover" modes are supported.
+	EnableCompression bool
+}
+
+func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) {
+	err := HandshakeError{reason}
+	if u.Error != nil {
+		u.Error(w, r, status, err)
+	} else {
+		w.Header().Set("Sec-Websocket-Version", "13")
+		http.Error(w, http.StatusText(status), status)
+	}
+	return nil, err
+}
+
+// checkSameOrigin returns true if the origin is not set or is equal to the request host.
+func checkSameOrigin(r *http.Request) bool {
+	origin := r.Header["Origin"]
+	if len(origin) == 0 {
+		return true
+	}
+	u, err := url.Parse(origin[0])
+	if err != nil {
+		return false
+	}
+	return u.Host == r.Host
+}
+
+func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string {
+	if u.Subprotocols != nil {
+		clientProtocols := Subprotocols(r)
+		for _, serverProtocol := range u.Subprotocols {
+			for _, clientProtocol := range clientProtocols {
+				if clientProtocol == serverProtocol {
+					return clientProtocol
+				}
+			}
+		}
+	} else if responseHeader != nil {
+		return responseHeader.Get("Sec-Websocket-Protocol")
+	}
+	return ""
+}
+
+// Upgrade upgrades the HTTP server connection to the WebSocket protocol.
+//
+// The responseHeader is included in the response to the client's upgrade
+// request. Use the responseHeader to specify cookies (Set-Cookie) and the
+// application negotiated subprotocol (Sec-Websocket-Protocol).
+//
+// If the upgrade fails, then Upgrade replies to the client with an HTTP error
+// response.
+func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) {
+	if r.Method != "GET" {
+		return u.returnError(w, r, http.StatusMethodNotAllowed, "websocket: not a websocket handshake: request method is not GET")
+	}
+
+	if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok {
+		return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-Websocket-Extensions' headers are unsupported")
+	}
+
+	if !tokenListContainsValue(r.Header, "Connection", "upgrade") {
+		return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'upgrade' token not found in 'Connection' header")
+	}
+
+	if !tokenListContainsValue(r.Header, "Upgrade", "websocket") {
+		return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'websocket' token not found in 'Upgrade' header")
+	}
+
+	if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") {
+		return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header")
+	}
+
+	checkOrigin := u.CheckOrigin
+	if checkOrigin == nil {
+		checkOrigin = checkSameOrigin
+	}
+	if !checkOrigin(r) {
+		return u.returnError(w, r, http.StatusForbidden, "websocket: 'Origin' header value not allowed")
+	}
+
+	challengeKey := r.Header.Get("Sec-Websocket-Key")
+	if challengeKey == "" {
+		return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: `Sec-Websocket-Key' header is missing or blank")
+	}
+
+	subprotocol := u.selectSubprotocol(r, responseHeader)
+
+	// Negotiate PMCE
+	var compress bool
+	if u.EnableCompression {
+		for _, ext := range parseExtensions(r.Header) {
+			if ext[""] != "permessage-deflate" {
+				continue
+			}
+			compress = true
+			break
+		}
+	}
+
+	var (
+		netConn net.Conn
+		err     error
+	)
+
+	h, ok := w.(http.Hijacker)
+	if !ok {
+		return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker")
+	}
+	var brw *bufio.ReadWriter
+	netConn, brw, err = h.Hijack()
+	if err != nil {
+		return u.returnError(w, r, http.StatusInternalServerError, err.Error())
+	}
+
+	if brw.Reader.Buffered() > 0 {
+		netConn.Close()
+		return nil, errors.New("websocket: client sent data before handshake is complete")
+	}
+
+	c := newConnBRW(netConn, true, u.ReadBufferSize, u.WriteBufferSize, brw)
+	c.subprotocol = subprotocol
+
+	if compress {
+		c.newCompressionWriter = compressNoContextTakeover
+		c.newDecompressionReader = decompressNoContextTakeover
+	}
+
+	p := c.writeBuf[:0]
+	p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...)
+	p = append(p, computeAcceptKey(challengeKey)...)
+	p = append(p, "\r\n"...)
+	if c.subprotocol != "" {
+		p = append(p, "Sec-Websocket-Protocol: "...)
+		p = append(p, c.subprotocol...)
+		p = append(p, "\r\n"...)
+	}
+	if compress {
+		p = append(p, "Sec-Websocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...)
+	}
+	for k, vs := range responseHeader {
+		if k == "Sec-Websocket-Protocol" {
+			continue
+		}
+		for _, v := range vs {
+			p = append(p, k...)
+			p = append(p, ": "...)
+			for i := 0; i < len(v); i++ {
+				b := v[i]
+				if b <= 31 {
+					// prevent response splitting.
+					b = ' '
+				}
+				p = append(p, b)
+			}
+			p = append(p, "\r\n"...)
+		}
+	}
+	p = append(p, "\r\n"...)
+
+	// Clear deadlines set by HTTP server.
+	netConn.SetDeadline(time.Time{})
+
+	if u.HandshakeTimeout > 0 {
+		netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout))
+	}
+	if _, err = netConn.Write(p); err != nil {
+		netConn.Close()
+		return nil, err
+	}
+	if u.HandshakeTimeout > 0 {
+		netConn.SetWriteDeadline(time.Time{})
+	}
+
+	return c, nil
+}
+
+// Upgrade upgrades the HTTP server connection to the WebSocket protocol.
+//
+// This function is deprecated, use websocket.Upgrader instead.
+//
+// The application is responsible for checking the request origin before
+// calling Upgrade. An example implementation of the same origin policy is:
+//
+//	if req.Header.Get("Origin") != "http://"+req.Host {
+//		http.Error(w, "Origin not allowed", 403)
+//		return
+//	}
+//
+// If the endpoint supports subprotocols, then the application is responsible
+// for negotiating the protocol used on the connection. Use the Subprotocols()
+// function to get the subprotocols requested by the client. Use the
+// Sec-Websocket-Protocol response header to specify the subprotocol selected
+// by the application.
+//
+// The responseHeader is included in the response to the client's upgrade
+// request. Use the responseHeader to specify cookies (Set-Cookie) and the
+// negotiated subprotocol (Sec-Websocket-Protocol).
+//
+// The connection buffers IO to the underlying network connection. The
+// readBufSize and writeBufSize parameters specify the size of the buffers to
+// use. Messages can be larger than the buffers.
+//
+// If the request is not a valid WebSocket handshake, then Upgrade returns an
+// error of type HandshakeError. Applications should handle this error by
+// replying to the client with an HTTP error response.
+func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) {
+	u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize}
+	u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) {
+		// don't return errors to maintain backwards compatibility
+	}
+	u.CheckOrigin = func(r *http.Request) bool {
+		// allow all connections by default
+		return true
+	}
+	return u.Upgrade(w, r, responseHeader)
+}
+
+// Subprotocols returns the subprotocols requested by the client in the
+// Sec-Websocket-Protocol header.
+func Subprotocols(r *http.Request) []string {
+	h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol"))
+	if h == "" {
+		return nil
+	}
+	protocols := strings.Split(h, ",")
+	for i := range protocols {
+		protocols[i] = strings.TrimSpace(protocols[i])
+	}
+	return protocols
+}
+
+// IsWebSocketUpgrade returns true if the client requested upgrade to the
+// WebSocket protocol.
+func IsWebSocketUpgrade(r *http.Request) bool {
+	return tokenListContainsValue(r.Header, "Connection", "upgrade") &&
+		tokenListContainsValue(r.Header, "Upgrade", "websocket")
+}

vendor/github.com/gorilla/websocket/util.go 🔗

@@ -0,0 +1,214 @@
+// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+	"crypto/rand"
+	"crypto/sha1"
+	"encoding/base64"
+	"io"
+	"net/http"
+	"strings"
+)
+
+var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11")
+
+func computeAcceptKey(challengeKey string) string {
+	h := sha1.New()
+	h.Write([]byte(challengeKey))
+	h.Write(keyGUID)
+	return base64.StdEncoding.EncodeToString(h.Sum(nil))
+}
+
+func generateChallengeKey() (string, error) {
+	p := make([]byte, 16)
+	if _, err := io.ReadFull(rand.Reader, p); err != nil {
+		return "", err
+	}
+	return base64.StdEncoding.EncodeToString(p), nil
+}
+
+// Octet types from RFC 2616.
+var octetTypes [256]byte
+
+const (
+	isTokenOctet = 1 << iota
+	isSpaceOctet
+)
+
+func init() {
+	// From RFC 2616
+	//
+	// OCTET      = <any 8-bit sequence of data>
+	// CHAR       = <any US-ASCII character (octets 0 - 127)>
+	// CTL        = <any US-ASCII control character (octets 0 - 31) and DEL (127)>
+	// CR         = <US-ASCII CR, carriage return (13)>
+	// LF         = <US-ASCII LF, linefeed (10)>
+	// SP         = <US-ASCII SP, space (32)>
+	// HT         = <US-ASCII HT, horizontal-tab (9)>
+	// <">        = <US-ASCII double-quote mark (34)>
+	// CRLF       = CR LF
+	// LWS        = [CRLF] 1*( SP | HT )
+	// TEXT       = <any OCTET except CTLs, but including LWS>
+	// separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
+	//              | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
+	// token      = 1*<any CHAR except CTLs or separators>
+	// qdtext     = <any TEXT except <">>
+
+	for c := 0; c < 256; c++ {
+		var t byte
+		isCtl := c <= 31 || c == 127
+		isChar := 0 <= c && c <= 127
+		isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0
+		if strings.IndexRune(" \t\r\n", rune(c)) >= 0 {
+			t |= isSpaceOctet
+		}
+		if isChar && !isCtl && !isSeparator {
+			t |= isTokenOctet
+		}
+		octetTypes[c] = t
+	}
+}
+
+func skipSpace(s string) (rest string) {
+	i := 0
+	for ; i < len(s); i++ {
+		if octetTypes[s[i]]&isSpaceOctet == 0 {
+			break
+		}
+	}
+	return s[i:]
+}
+
+func nextToken(s string) (token, rest string) {
+	i := 0
+	for ; i < len(s); i++ {
+		if octetTypes[s[i]]&isTokenOctet == 0 {
+			break
+		}
+	}
+	return s[:i], s[i:]
+}
+
+func nextTokenOrQuoted(s string) (value string, rest string) {
+	if !strings.HasPrefix(s, "\"") {
+		return nextToken(s)
+	}
+	s = s[1:]
+	for i := 0; i < len(s); i++ {
+		switch s[i] {
+		case '"':
+			return s[:i], s[i+1:]
+		case '\\':
+			p := make([]byte, len(s)-1)
+			j := copy(p, s[:i])
+			escape := true
+			for i = i + 1; i < len(s); i++ {
+				b := s[i]
+				switch {
+				case escape:
+					escape = false
+					p[j] = b
+					j += 1
+				case b == '\\':
+					escape = true
+				case b == '"':
+					return string(p[:j]), s[i+1:]
+				default:
+					p[j] = b
+					j += 1
+				}
+			}
+			return "", ""
+		}
+	}
+	return "", ""
+}
+
+// tokenListContainsValue returns true if the 1#token header with the given
+// name contains token.
+func tokenListContainsValue(header http.Header, name string, value string) bool {
+headers:
+	for _, s := range header[name] {
+		for {
+			var t string
+			t, s = nextToken(skipSpace(s))
+			if t == "" {
+				continue headers
+			}
+			s = skipSpace(s)
+			if s != "" && s[0] != ',' {
+				continue headers
+			}
+			if strings.EqualFold(t, value) {
+				return true
+			}
+			if s == "" {
+				continue headers
+			}
+			s = s[1:]
+		}
+	}
+	return false
+}
+
+// parseExtensiosn parses WebSocket extensions from a header.
+func parseExtensions(header http.Header) []map[string]string {
+
+	// From RFC 6455:
+	//
+	//  Sec-WebSocket-Extensions = extension-list
+	//  extension-list = 1#extension
+	//  extension = extension-token *( ";" extension-param )
+	//  extension-token = registered-token
+	//  registered-token = token
+	//  extension-param = token [ "=" (token | quoted-string) ]
+	//     ;When using the quoted-string syntax variant, the value
+	//     ;after quoted-string unescaping MUST conform to the
+	//     ;'token' ABNF.
+
+	var result []map[string]string
+headers:
+	for _, s := range header["Sec-Websocket-Extensions"] {
+		for {
+			var t string
+			t, s = nextToken(skipSpace(s))
+			if t == "" {
+				continue headers
+			}
+			ext := map[string]string{"": t}
+			for {
+				s = skipSpace(s)
+				if !strings.HasPrefix(s, ";") {
+					break
+				}
+				var k string
+				k, s = nextToken(skipSpace(s[1:]))
+				if k == "" {
+					continue headers
+				}
+				s = skipSpace(s)
+				var v string
+				if strings.HasPrefix(s, "=") {
+					v, s = nextTokenOrQuoted(skipSpace(s[1:]))
+					s = skipSpace(s)
+				}
+				if s != "" && s[0] != ',' && s[0] != ';' {
+					continue headers
+				}
+				ext[k] = v
+			}
+			if s != "" && s[0] != ',' {
+				continue headers
+			}
+			result = append(result, ext)
+			if s == "" {
+				continue headers
+			}
+			s = s[1:]
+		}
+	}
+	return result
+}

vendor/github.com/vektah/gqlgen/LICENSE 🔗

@@ -0,0 +1,19 @@
+Copyright (c) 2018 Adam Scarr
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.

vendor/github.com/vektah/gqlgen/graphql/bool.go 🔗

@@ -0,0 +1,30 @@
+package graphql
+
+import (
+	"fmt"
+	"io"
+	"strings"
+)
+
+func MarshalBoolean(b bool) Marshaler {
+	return WriterFunc(func(w io.Writer) {
+		if b {
+			w.Write(trueLit)
+		} else {
+			w.Write(falseLit)
+		}
+	})
+}
+
+func UnmarshalBoolean(v interface{}) (bool, error) {
+	switch v := v.(type) {
+	case string:
+		return "true" == strings.ToLower(v), nil
+	case int:
+		return v != 0, nil
+	case bool:
+		return v, nil
+	default:
+		return false, fmt.Errorf("%T is not a bool", v)
+	}
+}

vendor/github.com/vektah/gqlgen/graphql/context.go 🔗

@@ -0,0 +1,145 @@
+package graphql
+
+import (
+	"context"
+	"fmt"
+	"sync"
+
+	"github.com/vektah/gqlgen/neelance/query"
+)
+
+type Resolver func(ctx context.Context) (res interface{}, err error)
+type ResolverMiddleware func(ctx context.Context, next Resolver) (res interface{}, err error)
+type RequestMiddleware func(ctx context.Context, next func(ctx context.Context) []byte) []byte
+
+type RequestContext struct {
+	RawQuery  string
+	Variables map[string]interface{}
+	Doc       *query.Document
+	// ErrorPresenter will be used to generate the error
+	// message from errors given to Error().
+	ErrorPresenter     ErrorPresenterFunc
+	Recover            RecoverFunc
+	ResolverMiddleware ResolverMiddleware
+	RequestMiddleware  RequestMiddleware
+
+	errorsMu sync.Mutex
+	Errors   []*Error
+}
+
+func DefaultResolverMiddleware(ctx context.Context, next Resolver) (res interface{}, err error) {
+	return next(ctx)
+}
+
+func DefaultRequestMiddleware(ctx context.Context, next func(ctx context.Context) []byte) []byte {
+	return next(ctx)
+}
+
+func NewRequestContext(doc *query.Document, query string, variables map[string]interface{}) *RequestContext {
+	return &RequestContext{
+		Doc:                doc,
+		RawQuery:           query,
+		Variables:          variables,
+		ResolverMiddleware: DefaultResolverMiddleware,
+		RequestMiddleware:  DefaultRequestMiddleware,
+		Recover:            DefaultRecover,
+		ErrorPresenter:     DefaultErrorPresenter,
+	}
+}
+
+type key string
+
+const (
+	request  key = "request_context"
+	resolver key = "resolver_context"
+)
+
+func GetRequestContext(ctx context.Context) *RequestContext {
+	val := ctx.Value(request)
+	if val == nil {
+		return nil
+	}
+
+	return val.(*RequestContext)
+}
+
+func WithRequestContext(ctx context.Context, rc *RequestContext) context.Context {
+	return context.WithValue(ctx, request, rc)
+}
+
+type ResolverContext struct {
+	// The name of the type this field belongs to
+	Object string
+	// These are the args after processing, they can be mutated in middleware to change what the resolver will get.
+	Args map[string]interface{}
+	// The raw field
+	Field CollectedField
+	// The path of fields to get to this resolver
+	Path []interface{}
+}
+
+func (r *ResolverContext) PushField(alias string) {
+	r.Path = append(r.Path, alias)
+}
+
+func (r *ResolverContext) PushIndex(index int) {
+	r.Path = append(r.Path, index)
+}
+
+func (r *ResolverContext) Pop() {
+	r.Path = r.Path[0 : len(r.Path)-1]
+}
+
+func GetResolverContext(ctx context.Context) *ResolverContext {
+	val := ctx.Value(resolver)
+	if val == nil {
+		return nil
+	}
+
+	return val.(*ResolverContext)
+}
+
+func WithResolverContext(ctx context.Context, rc *ResolverContext) context.Context {
+	parent := GetResolverContext(ctx)
+	rc.Path = nil
+	if parent != nil {
+		rc.Path = append(rc.Path, parent.Path...)
+	}
+	if rc.Field.Alias != "" {
+		rc.PushField(rc.Field.Alias)
+	}
+	return context.WithValue(ctx, resolver, rc)
+}
+
+// This is just a convenient wrapper method for CollectFields
+func CollectFieldsCtx(ctx context.Context, satisfies []string) []CollectedField {
+	reqctx := GetRequestContext(ctx)
+	resctx := GetResolverContext(ctx)
+	return CollectFields(reqctx.Doc, resctx.Field.Selections, satisfies, reqctx.Variables)
+}
+
+// Errorf sends an error string to the client, passing it through the formatter.
+func (c *RequestContext) Errorf(ctx context.Context, format string, args ...interface{}) {
+	c.errorsMu.Lock()
+	defer c.errorsMu.Unlock()
+
+	c.Errors = append(c.Errors, c.ErrorPresenter(ctx, fmt.Errorf(format, args...)))
+}
+
+// Error sends an error to the client, passing it through the formatter.
+func (c *RequestContext) Error(ctx context.Context, err error) {
+	c.errorsMu.Lock()
+	defer c.errorsMu.Unlock()
+
+	c.Errors = append(c.Errors, c.ErrorPresenter(ctx, err))
+}
+
+// AddError is a convenience method for adding an error to the current response
+func AddError(ctx context.Context, err error) {
+	GetRequestContext(ctx).Error(ctx, err)
+}
+
+// AddErrorf is a convenience method for adding an error to the current response
+func AddErrorf(ctx context.Context, format string, args ...interface{}) {
+	GetRequestContext(ctx).Errorf(ctx, format, args...)
+}

vendor/github.com/vektah/gqlgen/graphql/defer.go 🔗

@@ -0,0 +1,30 @@
+package graphql
+
+import (
+	"io"
+	"sync"
+)
+
+// Defer will begin executing the given function and immediately return a result that will block until the function completes
+func Defer(f func() Marshaler) Marshaler {
+	var deferred deferred
+	deferred.mu.Lock()
+
+	go func() {
+		deferred.result = f()
+		deferred.mu.Unlock()
+	}()
+
+	return &deferred
+}
+
+type deferred struct {
+	result Marshaler
+	mu     sync.Mutex
+}
+
+func (d *deferred) MarshalGQL(w io.Writer) {
+	d.mu.Lock()
+	d.result.MarshalGQL(w)
+	d.mu.Unlock()
+}

vendor/github.com/vektah/gqlgen/graphql/error.go 🔗

@@ -0,0 +1,46 @@
+package graphql
+
+import (
+	"context"
+)
+
+// Error is the standard graphql error type described in https://facebook.github.io/graphql/draft/#sec-Errors
+type Error struct {
+	Message    string                 `json:"message"`
+	Path       []interface{}          `json:"path,omitempty"`
+	Locations  []ErrorLocation        `json:"locations,omitempty"`
+	Extensions map[string]interface{} `json:"extensions,omitempty"`
+}
+
+func (e *Error) Error() string {
+	return e.Message
+}
+
+type ErrorLocation struct {
+	Line   int `json:"line,omitempty"`
+	Column int `json:"column,omitempty"`
+}
+
+type ErrorPresenterFunc func(context.Context, error) *Error
+
+type ExtendedError interface {
+	Extensions() map[string]interface{}
+}
+
+func DefaultErrorPresenter(ctx context.Context, err error) *Error {
+	if gqlerr, ok := err.(*Error); ok {
+		gqlerr.Path = GetResolverContext(ctx).Path
+		return gqlerr
+	}
+
+	var extensions map[string]interface{}
+	if ee, ok := err.(ExtendedError); ok {
+		extensions = ee.Extensions()
+	}
+
+	return &Error{
+		Message:    err.Error(),
+		Path:       GetResolverContext(ctx).Path,
+		Extensions: extensions,
+	}
+}

vendor/github.com/vektah/gqlgen/graphql/exec.go 🔗

@@ -0,0 +1,118 @@
+package graphql
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/vektah/gqlgen/neelance/common"
+	"github.com/vektah/gqlgen/neelance/query"
+	"github.com/vektah/gqlgen/neelance/schema"
+)
+
+type ExecutableSchema interface {
+	Schema() *schema.Schema
+
+	Query(ctx context.Context, op *query.Operation) *Response
+	Mutation(ctx context.Context, op *query.Operation) *Response
+	Subscription(ctx context.Context, op *query.Operation) func() *Response
+}
+
+func CollectFields(doc *query.Document, selSet []query.Selection, satisfies []string, variables map[string]interface{}) []CollectedField {
+	return collectFields(doc, selSet, satisfies, variables, map[string]bool{})
+}
+
+func collectFields(doc *query.Document, selSet []query.Selection, satisfies []string, variables map[string]interface{}, visited map[string]bool) []CollectedField {
+	var groupedFields []CollectedField
+
+	for _, sel := range selSet {
+		switch sel := sel.(type) {
+		case *query.Field:
+			f := getOrCreateField(&groupedFields, sel.Alias.Name, func() CollectedField {
+				f := CollectedField{
+					Alias: sel.Alias.Name,
+					Name:  sel.Name.Name,
+				}
+				if len(sel.Arguments) > 0 {
+					f.Args = map[string]interface{}{}
+					for _, arg := range sel.Arguments {
+						if variable, ok := arg.Value.(*common.Variable); ok {
+							if val, ok := variables[variable.Name]; ok {
+								f.Args[arg.Name.Name] = val
+							}
+						} else {
+							f.Args[arg.Name.Name] = arg.Value.Value(variables)
+						}
+					}
+				}
+				return f
+			})
+
+			f.Selections = append(f.Selections, sel.Selections...)
+		case *query.InlineFragment:
+			if !instanceOf(sel.On.Ident.Name, satisfies) {
+				continue
+			}
+
+			for _, childField := range collectFields(doc, sel.Selections, satisfies, variables, visited) {
+				f := getOrCreateField(&groupedFields, childField.Name, func() CollectedField { return childField })
+				f.Selections = append(f.Selections, childField.Selections...)
+			}
+
+		case *query.FragmentSpread:
+			fragmentName := sel.Name.Name
+			if _, seen := visited[fragmentName]; seen {
+				continue
+			}
+			visited[fragmentName] = true
+
+			fragment := doc.Fragments.Get(fragmentName)
+			if fragment == nil {
+				// should never happen, validator has already run
+				panic(fmt.Errorf("missing fragment %s", fragmentName))
+			}
+
+			if !instanceOf(fragment.On.Ident.Name, satisfies) {
+				continue
+			}
+
+			for _, childField := range collectFields(doc, fragment.Selections, satisfies, variables, visited) {
+				f := getOrCreateField(&groupedFields, childField.Name, func() CollectedField { return childField })
+				f.Selections = append(f.Selections, childField.Selections...)
+			}
+
+		default:
+			panic(fmt.Errorf("unsupported %T", sel))
+		}
+	}
+
+	return groupedFields
+}
+
+type CollectedField struct {
+	Alias      string
+	Name       string
+	Args       map[string]interface{}
+	Selections []query.Selection
+}
+
+func instanceOf(val string, satisfies []string) bool {
+	for _, s := range satisfies {
+		if val == s {
+			return true
+		}
+	}
+	return false
+}
+
+func getOrCreateField(c *[]CollectedField, name string, creator func() CollectedField) *CollectedField {
+	for i, cf := range *c {
+		if cf.Alias == name {
+			return &(*c)[i]
+		}
+	}
+
+	f := creator()
+
+	*c = append(*c, f)
+	return &(*c)[len(*c)-1]
+}

vendor/github.com/vektah/gqlgen/graphql/float.go 🔗

@@ -0,0 +1,26 @@
+package graphql
+
+import (
+	"fmt"
+	"io"
+	"strconv"
+)
+
+func MarshalFloat(f float64) Marshaler {
+	return WriterFunc(func(w io.Writer) {
+		io.WriteString(w, fmt.Sprintf("%f", f))
+	})
+}
+
+func UnmarshalFloat(v interface{}) (float64, error) {
+	switch v := v.(type) {
+	case string:
+		return strconv.ParseFloat(v, 64)
+	case int:
+		return float64(v), nil
+	case float64:
+		return v, nil
+	default:
+		return 0, fmt.Errorf("%T is not an float", v)
+	}
+}

vendor/github.com/vektah/gqlgen/graphql/id.go 🔗

@@ -0,0 +1,33 @@
+package graphql
+
+import (
+	"fmt"
+	"io"
+	"strconv"
+)
+
+func MarshalID(s string) Marshaler {
+	return WriterFunc(func(w io.Writer) {
+		io.WriteString(w, strconv.Quote(s))
+	})
+}
+func UnmarshalID(v interface{}) (string, error) {
+	switch v := v.(type) {
+	case string:
+		return v, nil
+	case int:
+		return strconv.Itoa(v), nil
+	case float64:
+		return fmt.Sprintf("%f", v), nil
+	case bool:
+		if v {
+			return "true", nil
+		} else {
+			return "false", nil
+		}
+	case nil:
+		return "null", nil
+	default:
+		return "", fmt.Errorf("%T is not a string", v)
+	}
+}

vendor/github.com/vektah/gqlgen/graphql/int.go 🔗

@@ -0,0 +1,26 @@
+package graphql
+
+import (
+	"fmt"
+	"io"
+	"strconv"
+)
+
+func MarshalInt(i int) Marshaler {
+	return WriterFunc(func(w io.Writer) {
+		io.WriteString(w, strconv.Itoa(i))
+	})
+}
+
+func UnmarshalInt(v interface{}) (int, error) {
+	switch v := v.(type) {
+	case string:
+		return strconv.Atoi(v)
+	case int:
+		return v, nil
+	case float64:
+		return int(v), nil
+	default:
+		return 0, fmt.Errorf("%T is not an int", v)
+	}
+}

vendor/github.com/vektah/gqlgen/graphql/jsonw.go 🔗

@@ -0,0 +1,83 @@
+package graphql
+
+import (
+	"io"
+	"strconv"
+)
+
+var nullLit = []byte(`null`)
+var trueLit = []byte(`true`)
+var falseLit = []byte(`false`)
+var openBrace = []byte(`{`)
+var closeBrace = []byte(`}`)
+var openBracket = []byte(`[`)
+var closeBracket = []byte(`]`)
+var colon = []byte(`:`)
+var comma = []byte(`,`)
+
+var Null = lit(nullLit)
+var True = lit(trueLit)
+var False = lit(falseLit)
+
+type Marshaler interface {
+	MarshalGQL(w io.Writer)
+}
+
+type Unmarshaler interface {
+	UnmarshalGQL(v interface{}) error
+}
+
+type OrderedMap struct {
+	Keys   []string
+	Values []Marshaler
+}
+
+type WriterFunc func(writer io.Writer)
+
+func (f WriterFunc) MarshalGQL(w io.Writer) {
+	f(w)
+}
+
+func NewOrderedMap(len int) *OrderedMap {
+	return &OrderedMap{
+		Keys:   make([]string, len),
+		Values: make([]Marshaler, len),
+	}
+}
+
+func (m *OrderedMap) Add(key string, value Marshaler) {
+	m.Keys = append(m.Keys, key)
+	m.Values = append(m.Values, value)
+}
+
+func (m *OrderedMap) MarshalGQL(writer io.Writer) {
+	writer.Write(openBrace)
+	for i, key := range m.Keys {
+		if i != 0 {
+			writer.Write(comma)
+		}
+		io.WriteString(writer, strconv.Quote(key))
+		writer.Write(colon)
+		m.Values[i].MarshalGQL(writer)
+	}
+	writer.Write(closeBrace)
+}
+
+type Array []Marshaler
+
+func (a Array) MarshalGQL(writer io.Writer) {
+	writer.Write(openBracket)
+	for i, val := range a {
+		if i != 0 {
+			writer.Write(comma)
+		}
+		val.MarshalGQL(writer)
+	}
+	writer.Write(closeBracket)
+}
+
+func lit(b []byte) Marshaler {
+	return WriterFunc(func(w io.Writer) {
+		w.Write(b)
+	})
+}

vendor/github.com/vektah/gqlgen/graphql/map.go 🔗

@@ -0,0 +1,24 @@
+package graphql
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+)
+
+func MarshalMap(val map[string]interface{}) Marshaler {
+	return WriterFunc(func(w io.Writer) {
+		err := json.NewEncoder(w).Encode(val)
+		if err != nil {
+			panic(err)
+		}
+	})
+}
+
+func UnmarshalMap(v interface{}) (map[string]interface{}, error) {
+	if m, ok := v.(map[string]interface{}); ok {
+		return m, nil
+	}
+
+	return nil, fmt.Errorf("%T is not a map", v)
+}

vendor/github.com/vektah/gqlgen/graphql/recovery.go 🔗

@@ -0,0 +1,19 @@
+package graphql
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"os"
+	"runtime/debug"
+)
+
+type RecoverFunc func(ctx context.Context, err interface{}) (userMessage error)
+
+func DefaultRecover(ctx context.Context, err interface{}) error {
+	fmt.Fprintln(os.Stderr, err)
+	fmt.Fprintln(os.Stderr)
+	debug.PrintStack()
+
+	return errors.New("internal system error")
+}

vendor/github.com/vektah/gqlgen/graphql/response.go 🔗

@@ -0,0 +1,18 @@
+package graphql
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+)
+
+type Response struct {
+	Data   json.RawMessage `json:"data"`
+	Errors []*Error        `json:"errors,omitempty"`
+}
+
+func ErrorResponse(ctx context.Context, messagef string, args ...interface{}) *Response {
+	return &Response{
+		Errors: []*Error{{Message: fmt.Sprintf(messagef, args...)}},
+	}
+}

vendor/github.com/vektah/gqlgen/graphql/string.go 🔗

@@ -0,0 +1,63 @@
+package graphql
+
+import (
+	"fmt"
+	"io"
+	"strconv"
+)
+
+const encodeHex = "0123456789ABCDEF"
+
+func MarshalString(s string) Marshaler {
+	return WriterFunc(func(w io.Writer) {
+		start := 0
+		io.WriteString(w, `"`)
+
+		for i, c := range s {
+			if c < 0x20 || c == '\\' || c == '"' {
+				io.WriteString(w, s[start:i])
+
+				switch c {
+				case '\t':
+					io.WriteString(w, `\t`)
+				case '\r':
+					io.WriteString(w, `\r`)
+				case '\n':
+					io.WriteString(w, `\n`)
+				case '\\':
+					io.WriteString(w, `\\`)
+				case '"':
+					io.WriteString(w, `\"`)
+				default:
+					io.WriteString(w, `\u00`)
+					w.Write([]byte{encodeHex[c>>4], encodeHex[c&0xf]})
+				}
+
+				start = i + 1
+			}
+		}
+
+		io.WriteString(w, s[start:])
+		io.WriteString(w, `"`)
+	})
+}
+func UnmarshalString(v interface{}) (string, error) {
+	switch v := v.(type) {
+	case string:
+		return v, nil
+	case int:
+		return strconv.Itoa(v), nil
+	case float64:
+		return fmt.Sprintf("%f", v), nil
+	case bool:
+		if v {
+			return "true", nil
+		} else {
+			return "false", nil
+		}
+	case nil:
+		return "null", nil
+	default:
+		return "", fmt.Errorf("%T is not a string", v)
+	}
+}

vendor/github.com/vektah/gqlgen/graphql/time.go 🔗

@@ -0,0 +1,21 @@
+package graphql
+
+import (
+	"errors"
+	"io"
+	"strconv"
+	"time"
+)
+
+func MarshalTime(t time.Time) Marshaler {
+	return WriterFunc(func(w io.Writer) {
+		io.WriteString(w, strconv.Quote(t.Format(time.RFC3339)))
+	})
+}
+
+func UnmarshalTime(v interface{}) (time.Time, error) {
+	if tmpStr, ok := v.(string); ok {
+		return time.Parse(time.RFC3339, tmpStr)
+	}
+	return time.Time{}, errors.New("time should be RFC3339 formatted string")
+}

vendor/github.com/vektah/gqlgen/handler/graphql.go 🔗

@@ -0,0 +1,235 @@
+package handler
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"strings"
+
+	"github.com/gorilla/websocket"
+	"github.com/vektah/gqlgen/graphql"
+	"github.com/vektah/gqlgen/neelance/errors"
+	"github.com/vektah/gqlgen/neelance/query"
+	"github.com/vektah/gqlgen/neelance/validation"
+)
+
+type params struct {
+	Query         string                 `json:"query"`
+	OperationName string                 `json:"operationName"`
+	Variables     map[string]interface{} `json:"variables"`
+}
+
+type Config struct {
+	upgrader       websocket.Upgrader
+	recover        graphql.RecoverFunc
+	errorPresenter graphql.ErrorPresenterFunc
+	resolverHook   graphql.ResolverMiddleware
+	requestHook    graphql.RequestMiddleware
+}
+
+func (c *Config) newRequestContext(doc *query.Document, query string, variables map[string]interface{}) *graphql.RequestContext {
+	reqCtx := graphql.NewRequestContext(doc, query, variables)
+	if hook := c.recover; hook != nil {
+		reqCtx.Recover = hook
+	}
+
+	if hook := c.errorPresenter; hook != nil {
+		reqCtx.ErrorPresenter = hook
+	}
+
+	if hook := c.resolverHook; hook != nil {
+		reqCtx.ResolverMiddleware = hook
+	}
+
+	if hook := c.requestHook; hook != nil {
+		reqCtx.RequestMiddleware = hook
+	}
+
+	return reqCtx
+}
+
+type Option func(cfg *Config)
+
+func WebsocketUpgrader(upgrader websocket.Upgrader) Option {
+	return func(cfg *Config) {
+		cfg.upgrader = upgrader
+	}
+}
+
+func RecoverFunc(recover graphql.RecoverFunc) Option {
+	return func(cfg *Config) {
+		cfg.recover = recover
+	}
+}
+
+// ErrorPresenter transforms errors found while resolving into errors that will be returned to the user. It provides
+// a good place to add any extra fields, like error.type, that might be desired by your frontend. Check the default
+// implementation in graphql.DefaultErrorPresenter for an example.
+func ErrorPresenter(f graphql.ErrorPresenterFunc) Option {
+	return func(cfg *Config) {
+		cfg.errorPresenter = f
+	}
+}
+
+// ResolverMiddleware allows you to define a function that will be called around every resolver,
+// useful for tracing and logging.
+// It will only be called for user defined resolvers, any direct binding to models is assumed
+// to cost nothing.
+func ResolverMiddleware(middleware graphql.ResolverMiddleware) Option {
+	return func(cfg *Config) {
+		if cfg.resolverHook == nil {
+			cfg.resolverHook = middleware
+			return
+		}
+
+		lastResolve := cfg.resolverHook
+		cfg.resolverHook = func(ctx context.Context, next graphql.Resolver) (res interface{}, err error) {
+			return lastResolve(ctx, func(ctx context.Context) (res interface{}, err error) {
+				return middleware(ctx, next)
+			})
+		}
+	}
+}
+
+// RequestMiddleware allows you to define a function that will be called around the root request,
+// after the query has been parsed. This is useful for logging and tracing
+func RequestMiddleware(middleware graphql.RequestMiddleware) Option {
+	return func(cfg *Config) {
+		if cfg.requestHook == nil {
+			cfg.requestHook = middleware
+			return
+		}
+
+		lastResolve := cfg.requestHook
+		cfg.requestHook = func(ctx context.Context, next func(ctx context.Context) []byte) []byte {
+			return lastResolve(ctx, func(ctx context.Context) []byte {
+				return middleware(ctx, next)
+			})
+		}
+	}
+}
+
+func GraphQL(exec graphql.ExecutableSchema, options ...Option) http.HandlerFunc {
+	cfg := Config{
+		upgrader: websocket.Upgrader{
+			ReadBufferSize:  1024,
+			WriteBufferSize: 1024,
+		},
+	}
+
+	for _, option := range options {
+		option(&cfg)
+	}
+
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		if r.Method == http.MethodOptions {
+			w.Header().Set("Allow", "OPTIONS, GET, POST")
+			w.WriteHeader(http.StatusOK)
+			return
+		}
+
+		if strings.Contains(r.Header.Get("Upgrade"), "websocket") {
+			connectWs(exec, w, r, &cfg)
+			return
+		}
+
+		var reqParams params
+		switch r.Method {
+		case http.MethodGet:
+			reqParams.Query = r.URL.Query().Get("query")
+			reqParams.OperationName = r.URL.Query().Get("operationName")
+
+			if variables := r.URL.Query().Get("variables"); variables != "" {
+				if err := json.Unmarshal([]byte(variables), &reqParams.Variables); err != nil {
+					sendErrorf(w, http.StatusBadRequest, "variables could not be decoded")
+					return
+				}
+			}
+		case http.MethodPost:
+			if err := json.NewDecoder(r.Body).Decode(&reqParams); err != nil {
+				sendErrorf(w, http.StatusBadRequest, "json body could not be decoded: "+err.Error())
+				return
+			}
+		default:
+			w.WriteHeader(http.StatusMethodNotAllowed)
+			return
+		}
+		w.Header().Set("Content-Type", "application/json")
+
+		doc, qErr := query.Parse(reqParams.Query)
+		if qErr != nil {
+			sendError(w, http.StatusUnprocessableEntity, qErr)
+			return
+		}
+
+		errs := validation.Validate(exec.Schema(), doc)
+		if len(errs) != 0 {
+			sendError(w, http.StatusUnprocessableEntity, errs...)
+			return
+		}
+
+		op, err := doc.GetOperation(reqParams.OperationName)
+		if err != nil {
+			sendErrorf(w, http.StatusUnprocessableEntity, err.Error())
+			return
+		}
+
+		reqCtx := cfg.newRequestContext(doc, reqParams.Query, reqParams.Variables)
+		ctx := graphql.WithRequestContext(r.Context(), reqCtx)
+
+		defer func() {
+			if err := recover(); err != nil {
+				userErr := reqCtx.Recover(ctx, err)
+				sendErrorf(w, http.StatusUnprocessableEntity, userErr.Error())
+			}
+		}()
+
+		switch op.Type {
+		case query.Query:
+			b, err := json.Marshal(exec.Query(ctx, op))
+			if err != nil {
+				panic(err)
+			}
+			w.Write(b)
+		case query.Mutation:
+			b, err := json.Marshal(exec.Mutation(ctx, op))
+			if err != nil {
+				panic(err)
+			}
+			w.Write(b)
+		default:
+			sendErrorf(w, http.StatusBadRequest, "unsupported operation type")
+		}
+	})
+}
+
+func sendError(w http.ResponseWriter, code int, errors ...*errors.QueryError) {
+	w.WriteHeader(code)
+	var errs []*graphql.Error
+	for _, err := range errors {
+		var locations []graphql.ErrorLocation
+		for _, l := range err.Locations {
+			fmt.Println(graphql.ErrorLocation(l))
+			locations = append(locations, graphql.ErrorLocation{
+				Line:   l.Line,
+				Column: l.Column,
+			})
+		}
+
+		errs = append(errs, &graphql.Error{
+			Message:   err.Message,
+			Path:      err.Path,
+			Locations: locations,
+		})
+	}
+	b, err := json.Marshal(&graphql.Response{Errors: errs})
+	if err != nil {
+		panic(err)
+	}
+	w.Write(b)
+}
+
+func sendErrorf(w http.ResponseWriter, code int, format string, args ...interface{}) {
+	sendError(w, code, &errors.QueryError{Message: fmt.Sprintf(format, args...)})
+}

vendor/github.com/vektah/gqlgen/handler/playground.go 🔗

@@ -0,0 +1,51 @@
+package handler
+
+import (
+	"html/template"
+	"net/http"
+)
+
+var page = template.Must(template.New("graphiql").Parse(`<!DOCTYPE html>
+<html>
+<head>
+	<meta charset=utf-8/>
+	<meta name="viewport" content="user-scalable=no, initial-scale=1.0, minimum-scale=1.0, maximum-scale=1.0, minimal-ui">
+	<link rel="shortcut icon" href="https://graphcool-playground.netlify.com/favicon.png">
+	<link rel="stylesheet" href="//cdn.jsdelivr.net/npm/graphql-playground-react@{{ .version }}/build/static/css/index.css"/>
+	<link rel="shortcut icon" href="//cdn.jsdelivr.net/npm/graphql-playground-react@{{ .version }}/build/favicon.png"/>
+	<script src="//cdn.jsdelivr.net/npm/graphql-playground-react@{{ .version }}/build/static/js/middleware.js"></script>
+	<title>{{.title}}</title>
+</head>
+<body>
+<style type="text/css">
+	html { font-family: "Open Sans", sans-serif; overflow: hidden; }
+	body { margin: 0; background: #172a3a; }
+</style>
+<div id="root"/>
+<script type="text/javascript">
+	window.addEventListener('load', function (event) {
+		const root = document.getElementById('root');
+		root.classList.add('playgroundIn');
+		const wsProto = location.protocol == 'https:' ? 'wss:' : 'ws:'
+		GraphQLPlayground.init(root, {
+			endpoint: location.protocol + '//' + location.host + '{{.endpoint}}',
+			subscriptionsEndpoint: wsProto + '//' + location.host + '{{.endpoint }}',
+		})
+	})
+</script>
+</body>
+</html>
+`))
+
+func Playground(title string, endpoint string) http.HandlerFunc {
+	return func(w http.ResponseWriter, r *http.Request) {
+		err := page.Execute(w, map[string]string{
+			"title":    title,
+			"endpoint": endpoint,
+			"version":  "1.4.3",
+		})
+		if err != nil {
+			panic(err)
+		}
+	}
+}

vendor/github.com/vektah/gqlgen/handler/stub.go 🔗

@@ -0,0 +1,45 @@
+package handler
+
+import (
+	"context"
+	"time"
+
+	"github.com/vektah/gqlgen/graphql"
+	"github.com/vektah/gqlgen/neelance/query"
+	"github.com/vektah/gqlgen/neelance/schema"
+)
+
+type executableSchemaStub struct {
+}
+
+var _ graphql.ExecutableSchema = &executableSchemaStub{}
+
+func (e *executableSchemaStub) Schema() *schema.Schema {
+	return schema.MustParse(`
+		schema { query: Query }
+		type Query { me: User! }
+		type User { name: String! }
+	`)
+}
+
+func (e *executableSchemaStub) Query(ctx context.Context, op *query.Operation) *graphql.Response {
+	return &graphql.Response{Data: []byte(`{"name":"test"}`)}
+}
+
+func (e *executableSchemaStub) Mutation(ctx context.Context, op *query.Operation) *graphql.Response {
+	return graphql.ErrorResponse(ctx, "mutations are not supported")
+}
+
+func (e *executableSchemaStub) Subscription(ctx context.Context, op *query.Operation) func() *graphql.Response {
+	return func() *graphql.Response {
+		time.Sleep(50 * time.Millisecond)
+		select {
+		case <-ctx.Done():
+			return nil
+		default:
+			return &graphql.Response{
+				Data: []byte(`{"name":"test"}`),
+			}
+		}
+	}
+}

vendor/github.com/vektah/gqlgen/handler/websocket.go 🔗

@@ -0,0 +1,245 @@
+package handler
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+	"log"
+	"net/http"
+	"sync"
+
+	"github.com/gorilla/websocket"
+	"github.com/vektah/gqlgen/graphql"
+	"github.com/vektah/gqlgen/neelance/errors"
+	"github.com/vektah/gqlgen/neelance/query"
+	"github.com/vektah/gqlgen/neelance/validation"
+)
+
+const (
+	connectionInitMsg      = "connection_init"      // Client -> Server
+	connectionTerminateMsg = "connection_terminate" // Client -> Server
+	startMsg               = "start"                // Client -> Server
+	stopMsg                = "stop"                 // Client -> Server
+	connectionAckMsg       = "connection_ack"       // Server -> Client
+	connectionErrorMsg     = "connection_error"     // Server -> Client
+	dataMsg                = "data"                 // Server -> Client
+	errorMsg               = "error"                // Server -> Client
+	completeMsg            = "complete"             // Server -> Client
+	//connectionKeepAliveMsg = "ka"                 // Server -> Client  TODO: keepalives
+)
+
+type operationMessage struct {
+	Payload json.RawMessage `json:"payload,omitempty"`
+	ID      string          `json:"id,omitempty"`
+	Type    string          `json:"type"`
+}
+
+type wsConnection struct {
+	ctx    context.Context
+	conn   *websocket.Conn
+	exec   graphql.ExecutableSchema
+	active map[string]context.CancelFunc
+	mu     sync.Mutex
+	cfg    *Config
+}
+
+func connectWs(exec graphql.ExecutableSchema, w http.ResponseWriter, r *http.Request, cfg *Config) {
+	ws, err := cfg.upgrader.Upgrade(w, r, http.Header{
+		"Sec-Websocket-Protocol": []string{"graphql-ws"},
+	})
+	if err != nil {
+		log.Printf("unable to upgrade %T to websocket %s: ", w, err.Error())
+		sendErrorf(w, http.StatusBadRequest, "unable to upgrade")
+		return
+	}
+
+	conn := wsConnection{
+		active: map[string]context.CancelFunc{},
+		exec:   exec,
+		conn:   ws,
+		ctx:    r.Context(),
+		cfg:    cfg,
+	}
+
+	if !conn.init() {
+		return
+	}
+
+	conn.run()
+}
+
+func (c *wsConnection) init() bool {
+	message := c.readOp()
+	if message == nil {
+		c.close(websocket.CloseProtocolError, "decoding error")
+		return false
+	}
+
+	switch message.Type {
+	case connectionInitMsg:
+		c.write(&operationMessage{Type: connectionAckMsg})
+	case connectionTerminateMsg:
+		c.close(websocket.CloseNormalClosure, "terminated")
+		return false
+	default:
+		c.sendConnectionError("unexpected message %s", message.Type)
+		c.close(websocket.CloseProtocolError, "unexpected message")
+		return false
+	}
+
+	return true
+}
+
+func (c *wsConnection) write(msg *operationMessage) {
+	c.mu.Lock()
+	c.conn.WriteJSON(msg)
+	c.mu.Unlock()
+}
+
+func (c *wsConnection) run() {
+	for {
+		message := c.readOp()
+		if message == nil {
+			return
+		}
+
+		switch message.Type {
+		case startMsg:
+			if !c.subscribe(message) {
+				return
+			}
+		case stopMsg:
+			c.mu.Lock()
+			closer := c.active[message.ID]
+			c.mu.Unlock()
+			if closer == nil {
+				c.sendError(message.ID, errors.Errorf("%s is not running, cannot stop", message.ID))
+				continue
+			}
+
+			closer()
+		case connectionTerminateMsg:
+			c.close(websocket.CloseNormalClosure, "terminated")
+			return
+		default:
+			c.sendConnectionError("unexpected message %s", message.Type)
+			c.close(websocket.CloseProtocolError, "unexpected message")
+			return
+		}
+	}
+}
+
+func (c *wsConnection) subscribe(message *operationMessage) bool {
+	var reqParams params
+	if err := json.Unmarshal(message.Payload, &reqParams); err != nil {
+		c.sendConnectionError("invalid json")
+		return false
+	}
+
+	doc, qErr := query.Parse(reqParams.Query)
+	if qErr != nil {
+		c.sendError(message.ID, qErr)
+		return true
+	}
+
+	errs := validation.Validate(c.exec.Schema(), doc)
+	if len(errs) != 0 {
+		c.sendError(message.ID, errs...)
+		return true
+	}
+
+	op, err := doc.GetOperation(reqParams.OperationName)
+	if err != nil {
+		c.sendError(message.ID, errors.Errorf("%s", err.Error()))
+		return true
+	}
+
+	reqCtx := c.cfg.newRequestContext(doc, reqParams.Query, reqParams.Variables)
+	ctx := graphql.WithRequestContext(c.ctx, reqCtx)
+
+	if op.Type != query.Subscription {
+		var result *graphql.Response
+		if op.Type == query.Query {
+			result = c.exec.Query(ctx, op)
+		} else {
+			result = c.exec.Mutation(ctx, op)
+		}
+
+		c.sendData(message.ID, result)
+		c.write(&operationMessage{ID: message.ID, Type: completeMsg})
+		return true
+	}
+
+	ctx, cancel := context.WithCancel(ctx)
+	c.mu.Lock()
+	c.active[message.ID] = cancel
+	c.mu.Unlock()
+	go func() {
+		defer func() {
+			if r := recover(); r != nil {
+				userErr := reqCtx.Recover(ctx, r)
+				c.sendError(message.ID, &errors.QueryError{Message: userErr.Error()})
+			}
+		}()
+		next := c.exec.Subscription(ctx, op)
+		for result := next(); result != nil; result = next() {
+			c.sendData(message.ID, result)
+		}
+
+		c.write(&operationMessage{ID: message.ID, Type: completeMsg})
+
+		c.mu.Lock()
+		delete(c.active, message.ID)
+		c.mu.Unlock()
+		cancel()
+	}()
+
+	return true
+}
+
+func (c *wsConnection) sendData(id string, response *graphql.Response) {
+	b, err := json.Marshal(response)
+	if err != nil {
+		c.sendError(id, errors.Errorf("unable to encode json response: %s", err.Error()))
+		return
+	}
+
+	c.write(&operationMessage{Type: dataMsg, ID: id, Payload: b})
+}
+
+func (c *wsConnection) sendError(id string, errors ...*errors.QueryError) {
+	var errs []error
+	for _, err := range errors {
+		errs = append(errs, err)
+	}
+	b, err := json.Marshal(errs)
+	if err != nil {
+		panic(err)
+	}
+	c.write(&operationMessage{Type: errorMsg, ID: id, Payload: b})
+}
+
+func (c *wsConnection) sendConnectionError(format string, args ...interface{}) {
+	b, err := json.Marshal(&graphql.Error{Message: fmt.Sprintf(format, args...)})
+	if err != nil {
+		panic(err)
+	}
+
+	c.write(&operationMessage{Type: connectionErrorMsg, Payload: b})
+}
+
+func (c *wsConnection) readOp() *operationMessage {
+	message := operationMessage{}
+	if err := c.conn.ReadJSON(&message); err != nil {
+		c.sendConnectionError("invalid json")
+		return nil
+	}
+	return &message
+}
+
+func (c *wsConnection) close(closeCode int, message string) {
+	c.mu.Lock()
+	_ = c.conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(closeCode, message))
+	c.mu.Unlock()
+	_ = c.conn.Close()
+}

vendor/github.com/vektah/gqlgen/neelance/LICENSE 🔗

@@ -0,0 +1,24 @@
+Copyright (c) 2016 Richard Musiol. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

vendor/github.com/vektah/gqlgen/neelance/common/directive.go 🔗

@@ -0,0 +1,32 @@
+package common
+
+type Directive struct {
+	Name Ident
+	Args ArgumentList
+}
+
+func ParseDirectives(l *Lexer) DirectiveList {
+	var directives DirectiveList
+	for l.Peek() == '@' {
+		l.ConsumeToken('@')
+		d := &Directive{}
+		d.Name = l.ConsumeIdentWithLoc()
+		d.Name.Loc.Column--
+		if l.Peek() == '(' {
+			d.Args = ParseArguments(l)
+		}
+		directives = append(directives, d)
+	}
+	return directives
+}
+
+type DirectiveList []*Directive
+
+func (l DirectiveList) Get(name string) *Directive {
+	for _, d := range l {
+		if d.Name.Name == name {
+			return d
+		}
+	}
+	return nil
+}

vendor/github.com/vektah/gqlgen/neelance/common/lexer.go 🔗

@@ -0,0 +1,122 @@
+package common
+
+import (
+	"fmt"
+	"text/scanner"
+
+	"github.com/vektah/gqlgen/neelance/errors"
+)
+
+type syntaxError string
+
+type Lexer struct {
+	sc          *scanner.Scanner
+	next        rune
+	descComment string
+}
+
+type Ident struct {
+	Name string
+	Loc  errors.Location
+}
+
+func New(sc *scanner.Scanner) *Lexer {
+	l := &Lexer{sc: sc}
+	l.Consume()
+	return l
+}
+
+func (l *Lexer) CatchSyntaxError(f func()) (errRes *errors.QueryError) {
+	defer func() {
+		if err := recover(); err != nil {
+			if err, ok := err.(syntaxError); ok {
+				errRes = errors.Errorf("syntax error: %s", err)
+				errRes.Locations = []errors.Location{l.Location()}
+				return
+			}
+			panic(err)
+		}
+	}()
+
+	f()
+	return
+}
+
+func (l *Lexer) Peek() rune {
+	return l.next
+}
+
+func (l *Lexer) Consume() {
+	l.descComment = ""
+	for {
+		l.next = l.sc.Scan()
+		if l.next == ',' {
+			continue
+		}
+		if l.next == '#' {
+			if l.sc.Peek() == ' ' {
+				l.sc.Next()
+			}
+			if l.descComment != "" {
+				l.descComment += "\n"
+			}
+			for {
+				next := l.sc.Next()
+				if next == '\n' || next == scanner.EOF {
+					break
+				}
+				l.descComment += string(next)
+			}
+			continue
+		}
+		break
+	}
+}
+
+func (l *Lexer) ConsumeIdent() string {
+	name := l.sc.TokenText()
+	l.ConsumeToken(scanner.Ident)
+	return name
+}
+
+func (l *Lexer) ConsumeIdentWithLoc() Ident {
+	loc := l.Location()
+	name := l.sc.TokenText()
+	l.ConsumeToken(scanner.Ident)
+	return Ident{name, loc}
+}
+
+func (l *Lexer) ConsumeKeyword(keyword string) {
+	if l.next != scanner.Ident || l.sc.TokenText() != keyword {
+		l.SyntaxError(fmt.Sprintf("unexpected %q, expecting %q", l.sc.TokenText(), keyword))
+	}
+	l.Consume()
+}
+
+func (l *Lexer) ConsumeLiteral() *BasicLit {
+	lit := &BasicLit{Type: l.next, Text: l.sc.TokenText()}
+	l.Consume()
+	return lit
+}
+
+func (l *Lexer) ConsumeToken(expected rune) {
+	if l.next != expected {
+		l.SyntaxError(fmt.Sprintf("unexpected %q, expecting %s", l.sc.TokenText(), scanner.TokenString(expected)))
+	}
+	l.Consume()
+}
+
+func (l *Lexer) DescComment() string {
+	return l.descComment
+}
+
+func (l *Lexer) SyntaxError(message string) {
+	panic(syntaxError(message))
+}
+
+func (l *Lexer) Location() errors.Location {
+	return errors.Location{
+		Line:   l.sc.Line,
+		Column: l.sc.Column,
+	}
+}

vendor/github.com/vektah/gqlgen/neelance/common/literals.go 🔗

@@ -0,0 +1,206 @@
+package common
+
+import (
+	"strconv"
+	"strings"
+	"text/scanner"
+
+	"github.com/vektah/gqlgen/neelance/errors"
+)
+
+type Literal interface {
+	Value(vars map[string]interface{}) interface{}
+	String() string
+	Location() errors.Location
+}
+
+type BasicLit struct {
+	Type rune
+	Text string
+	Loc  errors.Location
+}
+
+func (lit *BasicLit) Value(vars map[string]interface{}) interface{} {
+	switch lit.Type {
+	case scanner.Int:
+		value, err := strconv.ParseInt(lit.Text, 10, 64)
+		if err != nil {
+			panic(err)
+		}
+		return int(value)
+
+	case scanner.Float:
+		value, err := strconv.ParseFloat(lit.Text, 64)
+		if err != nil {
+			panic(err)
+		}
+		return value
+
+	case scanner.String:
+		value, err := strconv.Unquote(lit.Text)
+		if err != nil {
+			panic(err)
+		}
+		return value
+
+	case scanner.Ident:
+		switch lit.Text {
+		case "true":
+			return true
+		case "false":
+			return false
+		default:
+			return lit.Text
+		}
+
+	default:
+		panic("invalid literal")
+	}
+}
+
+func (lit *BasicLit) String() string {
+	return lit.Text
+}
+
+func (lit *BasicLit) Location() errors.Location {
+	return lit.Loc
+}
+
+type ListLit struct {
+	Entries []Literal
+	Loc     errors.Location
+}
+
+func (lit *ListLit) Value(vars map[string]interface{}) interface{} {
+	entries := make([]interface{}, len(lit.Entries))
+	for i, entry := range lit.Entries {
+		entries[i] = entry.Value(vars)
+	}
+	return entries
+}
+
+func (lit *ListLit) String() string {
+	entries := make([]string, len(lit.Entries))
+	for i, entry := range lit.Entries {
+		entries[i] = entry.String()
+	}
+	return "[" + strings.Join(entries, ", ") + "]"
+}
+
+func (lit *ListLit) Location() errors.Location {
+	return lit.Loc
+}
+
+type ObjectLit struct {
+	Fields []*ObjectLitField
+	Loc    errors.Location
+}
+
+type ObjectLitField struct {
+	Name  Ident
+	Value Literal
+}
+
+func (lit *ObjectLit) Value(vars map[string]interface{}) interface{} {
+	fields := make(map[string]interface{}, len(lit.Fields))
+	for _, f := range lit.Fields {
+		fields[f.Name.Name] = f.Value.Value(vars)
+	}
+	return fields
+}
+
+func (lit *ObjectLit) String() string {
+	entries := make([]string, 0, len(lit.Fields))
+	for _, f := range lit.Fields {
+		entries = append(entries, f.Name.Name+": "+f.Value.String())
+	}
+	return "{" + strings.Join(entries, ", ") + "}"
+}
+
+func (lit *ObjectLit) Location() errors.Location {
+	return lit.Loc
+}
+
+type NullLit struct {
+	Loc errors.Location
+}
+
+func (lit *NullLit) Value(vars map[string]interface{}) interface{} {
+	return nil
+}
+
+func (lit *NullLit) String() string {
+	return "null"
+}
+
+func (lit *NullLit) Location() errors.Location {
+	return lit.Loc
+}
+
+type Variable struct {
+	Name string
+	Loc  errors.Location
+}
+
+func (v Variable) Value(vars map[string]interface{}) interface{} {
+	return vars[v.Name]
+}
+
+func (v Variable) String() string {
+	return "$" + v.Name
+}
+
+func (v *Variable) Location() errors.Location {
+	return v.Loc
+}
+
+func ParseLiteral(l *Lexer, constOnly bool) Literal {
+	loc := l.Location()
+	switch l.Peek() {
+	case '$':
+		if constOnly {
+			l.SyntaxError("variable not allowed")
+			panic("unreachable")
+		}
+		l.ConsumeToken('$')
+		return &Variable{l.ConsumeIdent(), loc}
+
+	case scanner.Int, scanner.Float, scanner.String, scanner.Ident:
+		lit := l.ConsumeLiteral()
+		if lit.Type == scanner.Ident && lit.Text == "null" {
+			return &NullLit{loc}
+		}
+		lit.Loc = loc
+		return lit
+	case '-':
+		l.ConsumeToken('-')
+		lit := l.ConsumeLiteral()
+		lit.Text = "-" + lit.Text
+		lit.Loc = loc
+		return lit
+	case '[':
+		l.ConsumeToken('[')
+		var list []Literal
+		for l.Peek() != ']' {
+			list = append(list, ParseLiteral(l, constOnly))
+		}
+		l.ConsumeToken(']')
+		return &ListLit{list, loc}
+
+	case '{':
+		l.ConsumeToken('{')
+		var fields []*ObjectLitField
+		for l.Peek() != '}' {
+			name := l.ConsumeIdentWithLoc()
+			l.ConsumeToken(':')
+			value := ParseLiteral(l, constOnly)
+			fields = append(fields, &ObjectLitField{name, value})
+		}
+		l.ConsumeToken('}')
+		return &ObjectLit{fields, loc}
+
+	default:
+		l.SyntaxError("invalid value")
+		panic("unreachable")
+	}
+}

vendor/github.com/vektah/gqlgen/neelance/common/types.go 🔗

@@ -0,0 +1,80 @@
+package common
+
+import (
+	"github.com/vektah/gqlgen/neelance/errors"
+)
+
+type Type interface {
+	Kind() string
+	String() string
+}
+
+type List struct {
+	OfType Type
+}
+
+type NonNull struct {
+	OfType Type
+}
+
+type TypeName struct {
+	Ident
+}
+
+func (*List) Kind() string     { return "LIST" }
+func (*NonNull) Kind() string  { return "NON_NULL" }
+func (*TypeName) Kind() string { panic("TypeName needs to be resolved to actual type") }
+
+func (t *List) String() string    { return "[" + t.OfType.String() + "]" }
+func (t *NonNull) String() string { return t.OfType.String() + "!" }
+func (*TypeName) String() string  { panic("TypeName needs to be resolved to actual type") }
+
+func ParseType(l *Lexer) Type {
+	t := parseNullType(l)
+	if l.Peek() == '!' {
+		l.ConsumeToken('!')
+		return &NonNull{OfType: t}
+	}
+	return t
+}
+
+func parseNullType(l *Lexer) Type {
+	if l.Peek() == '[' {
+		l.ConsumeToken('[')
+		ofType := ParseType(l)
+		l.ConsumeToken(']')
+		return &List{OfType: ofType}
+	}
+
+	return &TypeName{Ident: l.ConsumeIdentWithLoc()}
+}
+
+type Resolver func(name string) Type
+
+func ResolveType(t Type, resolver Resolver) (Type, *errors.QueryError) {
+	switch t := t.(type) {
+	case *List:
+		ofType, err := ResolveType(t.OfType, resolver)
+		if err != nil {
+			return nil, err
+		}
+		return &List{OfType: ofType}, nil
+	case *NonNull:
+		ofType, err := ResolveType(t.OfType, resolver)
+		if err != nil {
+			return nil, err
+		}
+		return &NonNull{OfType: ofType}, nil
+	case *TypeName:
+		refT := resolver(t.Name)
+		if refT == nil {
+			err := errors.Errorf("Unknown type %q.", t.Name)
+			err.Rule = "KnownTypeNames"
+			err.Locations = []errors.Location{t.Loc}
+			return nil, err
+		}
+		return refT, nil
+	default:
+		return t, nil
+	}
+}

vendor/github.com/vektah/gqlgen/neelance/common/values.go 🔗

@@ -0,0 +1,77 @@
+package common
+
+import (
+	"github.com/vektah/gqlgen/neelance/errors"
+)
+
+type InputValue struct {
+	Name    Ident
+	Type    Type
+	Default Literal
+	Desc    string
+	Loc     errors.Location
+	TypeLoc errors.Location
+}
+
+type InputValueList []*InputValue
+
+func (l InputValueList) Get(name string) *InputValue {
+	for _, v := range l {
+		if v.Name.Name == name {
+			return v
+		}
+	}
+	return nil
+}
+
+func ParseInputValue(l *Lexer) *InputValue {
+	p := &InputValue{}
+	p.Loc = l.Location()
+	p.Desc = l.DescComment()
+	p.Name = l.ConsumeIdentWithLoc()
+	l.ConsumeToken(':')
+	p.TypeLoc = l.Location()
+	p.Type = ParseType(l)
+	if l.Peek() == '=' {
+		l.ConsumeToken('=')
+		p.Default = ParseLiteral(l, true)
+	}
+	return p
+}
+
+type Argument struct {
+	Name  Ident
+	Value Literal
+}
+
+type ArgumentList []Argument
+
+func (l ArgumentList) Get(name string) (Literal, bool) {
+	for _, arg := range l {
+		if arg.Name.Name == name {
+			return arg.Value, true
+		}
+	}
+	return nil, false
+}
+
+func (l ArgumentList) MustGet(name string) Literal {
+	value, ok := l.Get(name)
+	if !ok {
+		panic("argument not found")
+	}
+	return value
+}
+
+func ParseArguments(l *Lexer) ArgumentList {
+	var args ArgumentList
+	l.ConsumeToken('(')
+	for l.Peek() != ')' {
+		name := l.ConsumeIdentWithLoc()
+		l.ConsumeToken(':')
+		value := ParseLiteral(l, false)
+		args = append(args, Argument{Name: name, Value: value})
+	}
+	l.ConsumeToken(')')
+	return args
+}

vendor/github.com/vektah/gqlgen/neelance/errors/errors.go 🔗

@@ -0,0 +1,41 @@
+package errors
+
+import (
+	"fmt"
+)
+
+type QueryError struct {
+	Message       string        `json:"message"`
+	Locations     []Location    `json:"locations,omitempty"`
+	Path          []interface{} `json:"path,omitempty"`
+	Rule          string        `json:"-"`
+	ResolverError error         `json:"-"`
+}
+
+type Location struct {
+	Line   int `json:"line"`
+	Column int `json:"column"`
+}
+
+func (a Location) Before(b Location) bool {
+	return a.Line < b.Line || (a.Line == b.Line && a.Column < b.Column)
+}
+
+func Errorf(format string, a ...interface{}) *QueryError {
+	return &QueryError{
+		Message: fmt.Sprintf(format, a...),
+	}
+}
+
+func (err *QueryError) Error() string {
+	if err == nil {
+		return "<nil>"
+	}
+	str := fmt.Sprintf("graphql: %s", err.Message)
+	for _, loc := range err.Locations {
+		str += fmt.Sprintf(" (line %d, column %d)", loc.Line, loc.Column)
+	}
+	return str
+}
+
+var _ error = &QueryError{}

vendor/github.com/vektah/gqlgen/neelance/introspection/introspection.go 🔗

@@ -0,0 +1,313 @@
+package introspection
+
+import (
+	"sort"
+
+	"github.com/vektah/gqlgen/neelance/common"
+	"github.com/vektah/gqlgen/neelance/schema"
+)
+
+type Schema struct {
+	schema *schema.Schema
+}
+
+// WrapSchema is only used internally.
+func WrapSchema(schema *schema.Schema) *Schema {
+	return &Schema{schema}
+}
+
+func (r *Schema) Types() []Type {
+	var names []string
+	for name := range r.schema.Types {
+		names = append(names, name)
+	}
+	sort.Strings(names)
+
+	l := make([]Type, len(names))
+	for i, name := range names {
+		l[i] = Type{r.schema.Types[name]}
+	}
+	return l
+}
+
+func (r *Schema) Directives() []Directive {
+	var names []string
+	for name := range r.schema.Directives {
+		names = append(names, name)
+	}
+	sort.Strings(names)
+
+	l := make([]Directive, len(names))
+	for i, name := range names {
+		l[i] = Directive{r.schema.Directives[name]}
+	}
+	return l
+}
+
+func (r *Schema) QueryType() Type {
+	t, ok := r.schema.EntryPoints["query"]
+	if !ok {
+		return Type{}
+	}
+	return Type{t}
+}
+
+func (r *Schema) MutationType() *Type {
+	t, ok := r.schema.EntryPoints["mutation"]
+	if !ok {
+		return nil
+	}
+	return &Type{t}
+}
+
+func (r *Schema) SubscriptionType() *Type {
+	t, ok := r.schema.EntryPoints["subscription"]
+	if !ok {
+		return nil
+	}
+	return &Type{t}
+}
+
+type Type struct {
+	typ common.Type
+}
+
+// WrapType is only used internally.
+func WrapType(typ common.Type) *Type {
+	return &Type{typ}
+}
+
+func (r *Type) Kind() string {
+	return r.typ.Kind()
+}
+
+func (r *Type) Name() *string {
+	if named, ok := r.typ.(schema.NamedType); ok {
+		name := named.TypeName()
+		return &name
+	}
+	return nil
+}
+
+func (r *Type) Description() *string {
+	if named, ok := r.typ.(schema.NamedType); ok {
+		desc := named.Description()
+		if desc == "" {
+			return nil
+		}
+		return &desc
+	}
+	return nil
+}
+
+func (r *Type) Fields(includeDeprecated bool) []Field {
+	var fields schema.FieldList
+	switch t := r.typ.(type) {
+	case *schema.Object:
+		fields = t.Fields
+	case *schema.Interface:
+		fields = t.Fields
+	default:
+		return nil
+	}
+
+	var l []Field
+	for _, f := range fields {
+		if d := f.Directives.Get("deprecated"); d == nil || includeDeprecated {
+			l = append(l, Field{f})
+		}
+	}
+	return l
+}
+
+func (r *Type) Interfaces() []Type {
+	t, ok := r.typ.(*schema.Object)
+	if !ok {
+		return nil
+	}
+
+	l := make([]Type, len(t.Interfaces))
+	for i, intf := range t.Interfaces {
+		l[i] = Type{intf}
+	}
+	return l
+}
+
+func (r *Type) PossibleTypes() []Type {
+	var possibleTypes []*schema.Object
+	switch t := r.typ.(type) {
+	case *schema.Interface:
+		possibleTypes = t.PossibleTypes
+	case *schema.Union:
+		possibleTypes = t.PossibleTypes
+	default:
+		return nil
+	}
+
+	l := make([]Type, len(possibleTypes))
+	for i, intf := range possibleTypes {
+		l[i] = Type{intf}
+	}
+	return l
+}
+
+func (r *Type) EnumValues(includeDeprecated bool) []EnumValue {
+	t, ok := r.typ.(*schema.Enum)
+	if !ok {
+		return nil
+	}
+
+	var l []EnumValue
+	for _, v := range t.Values {
+		if d := v.Directives.Get("deprecated"); d == nil || includeDeprecated {
+			l = append(l, EnumValue{v})
+		}
+	}
+	return l
+}
+
+func (r *Type) InputFields() []InputValue {
+	t, ok := r.typ.(*schema.InputObject)
+	if !ok {
+		return nil
+	}
+
+	l := make([]InputValue, len(t.Values))
+	for i, v := range t.Values {
+		l[i] = InputValue{v}
+	}
+	return l
+}
+
+func (r *Type) OfType() *Type {
+	switch t := r.typ.(type) {
+	case *common.List:
+		return &Type{t.OfType}
+	case *common.NonNull:
+		return &Type{t.OfType}
+	default:
+		return nil
+	}
+}
+
+type Field struct {
+	field *schema.Field
+}
+
+func (r *Field) Name() string {
+	return r.field.Name
+}
+
+func (r *Field) Description() *string {
+	if r.field.Desc == "" {
+		return nil
+	}
+	return &r.field.Desc
+}
+
+func (r *Field) Args() []InputValue {
+	l := make([]InputValue, len(r.field.Args))
+	for i, v := range r.field.Args {
+		l[i] = InputValue{v}
+	}
+	return l
+}
+
+func (r *Field) Type() Type {
+	return Type{r.field.Type}
+}
+
+func (r *Field) IsDeprecated() bool {
+	return r.field.Directives.Get("deprecated") != nil
+}
+
+func (r *Field) DeprecationReason() *string {
+	d := r.field.Directives.Get("deprecated")
+	if d == nil {
+		return nil
+	}
+	reason := d.Args.MustGet("reason").Value(nil).(string)
+	return &reason
+}
+
+type InputValue struct {
+	value *common.InputValue
+}
+
+func (r *InputValue) Name() string {
+	return r.value.Name.Name
+}
+
+func (r *InputValue) Description() *string {
+	if r.value.Desc == "" {
+		return nil
+	}
+	return &r.value.Desc
+}
+
+func (r *InputValue) Type() Type {
+	return Type{r.value.Type}
+}
+
+func (r *InputValue) DefaultValue() *string {
+	if r.value.Default == nil {
+		return nil
+	}
+	s := r.value.Default.String()
+	return &s
+}
+
+type EnumValue struct {
+	value *schema.EnumValue
+}
+
+func (r *EnumValue) Name() string {
+	return r.value.Name
+}
+
+func (r *EnumValue) Description() *string {
+	if r.value.Desc == "" {
+		return nil
+	}
+	return &r.value.Desc
+}
+
+func (r *EnumValue) IsDeprecated() bool {
+	return r.value.Directives.Get("deprecated") != nil
+}
+
+func (r *EnumValue) DeprecationReason() *string {
+	d := r.value.Directives.Get("deprecated")
+	if d == nil {
+		return nil
+	}
+	reason := d.Args.MustGet("reason").Value(nil).(string)
+	return &reason
+}
+
+type Directive struct {
+	directive *schema.DirectiveDecl
+}
+
+func (r *Directive) Name() string {
+	return r.directive.Name
+}
+
+func (r *Directive) Description() *string {
+	if r.directive.Desc == "" {
+		return nil
+	}
+	return &r.directive.Desc
+}
+
+func (r *Directive) Locations() []string {
+	return r.directive.Locs
+}
+
+func (r *Directive) Args() []InputValue {
+	l := make([]InputValue, len(r.directive.Args))
+	for i, v := range r.directive.Args {
+		l[i] = InputValue{v}
+	}
+	return l
+}

vendor/github.com/vektah/gqlgen/neelance/introspection/query.go 🔗

@@ -0,0 +1,104 @@
+package introspection
+
+// Query is the query generated by graphiql to determine type information
+const Query = `
+query IntrospectionQuery {
+  __schema {
+    queryType {
+      name
+    }
+    mutationType {
+      name
+    }
+    subscriptionType {
+      name
+    }
+    types {
+      ...FullType
+    }
+    directives {
+      name
+      description
+      locations
+      args {
+        ...InputValue
+      }
+    }
+  }
+}
+
+fragment FullType on __Type {
+  kind
+  name
+  description
+  fields(includeDeprecated: true) {
+    name
+    description
+    args {
+      ...InputValue
+    }
+    type {
+      ...TypeRef
+    }
+    isDeprecated
+    deprecationReason
+  }
+  inputFields {
+    ...InputValue
+  }
+  interfaces {
+    ...TypeRef
+  }
+  enumValues(includeDeprecated: true) {
+    name
+    description
+    isDeprecated
+    deprecationReason
+  }
+  possibleTypes {
+    ...TypeRef
+  }
+}
+
+fragment InputValue on __InputValue {
+  name
+  description
+  type {
+    ...TypeRef
+  }
+  defaultValue
+}
+
+fragment TypeRef on __Type {
+  kind
+  name
+  ofType {
+    kind
+    name
+    ofType {
+      kind
+      name
+      ofType {
+        kind
+        name
+        ofType {
+          kind
+          name
+          ofType {
+            kind
+            name
+            ofType {
+              kind
+              name
+              ofType {
+                kind
+                name
+              }
+            }
+          }
+        }
+      }
+    }
+  }
+}
+`

vendor/github.com/vektah/gqlgen/neelance/query/query.go 🔗

@@ -0,0 +1,261 @@
+package query
+
+import (
+	"fmt"
+	"strings"
+	"text/scanner"
+
+	"github.com/vektah/gqlgen/neelance/common"
+	"github.com/vektah/gqlgen/neelance/errors"
+)
+
+type Document struct {
+	Operations OperationList
+	Fragments  FragmentList
+}
+
+type OperationList []*Operation
+
+func (l OperationList) Get(name string) *Operation {
+	for _, f := range l {
+		if f.Name.Name == name {
+			return f
+		}
+	}
+	return nil
+}
+
+type FragmentList []*FragmentDecl
+
+func (l FragmentList) Get(name string) *FragmentDecl {
+	for _, f := range l {
+		if f.Name.Name == name {
+			return f
+		}
+	}
+	return nil
+}
+
+type Operation struct {
+	Type       OperationType
+	Name       common.Ident
+	Vars       common.InputValueList
+	Selections []Selection
+	Directives common.DirectiveList
+	Loc        errors.Location
+}
+
+type OperationType string
+
+const (
+	Query        OperationType = "QUERY"
+	Mutation                   = "MUTATION"
+	Subscription               = "SUBSCRIPTION"
+)
+
+type Fragment struct {
+	On         common.TypeName
+	Selections []Selection
+}
+
+type FragmentDecl struct {
+	Fragment
+	Name       common.Ident
+	Directives common.DirectiveList
+	Loc        errors.Location
+}
+
+type Selection interface {
+	isSelection()
+}
+
+type Field struct {
+	Alias           common.Ident
+	Name            common.Ident
+	Arguments       common.ArgumentList
+	Directives      common.DirectiveList
+	Selections      []Selection
+	SelectionSetLoc errors.Location
+}
+
+type InlineFragment struct {
+	Fragment
+	Directives common.DirectiveList
+	Loc        errors.Location
+}
+
+type FragmentSpread struct {
+	Name       common.Ident
+	Directives common.DirectiveList
+	Loc        errors.Location
+}
+
+func (Field) isSelection()          {}
+func (InlineFragment) isSelection() {}
+func (FragmentSpread) isSelection() {}
+
+func Parse(queryString string) (*Document, *errors.QueryError) {
+	sc := &scanner.Scanner{
+		Mode: scanner.ScanIdents | scanner.ScanInts | scanner.ScanFloats | scanner.ScanStrings,
+	}
+	sc.Init(strings.NewReader(queryString))
+
+	l := common.New(sc)
+	var doc *Document
+	err := l.CatchSyntaxError(func() {
+		doc = parseDocument(l)
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	return doc, nil
+}
+
+func parseDocument(l *common.Lexer) *Document {
+	d := &Document{}
+	for l.Peek() != scanner.EOF {
+		if l.Peek() == '{' {
+			op := &Operation{Type: Query, Loc: l.Location()}
+			op.Selections = parseSelectionSet(l)
+			d.Operations = append(d.Operations, op)
+			continue
+		}
+
+		loc := l.Location()
+		switch x := l.ConsumeIdent(); x {
+		case "query":
+			op := parseOperation(l, Query)
+			op.Loc = loc
+			d.Operations = append(d.Operations, op)
+
+		case "mutation":
+			d.Operations = append(d.Operations, parseOperation(l, Mutation))
+
+		case "subscription":
+			d.Operations = append(d.Operations, parseOperation(l, Subscription))
+
+		case "fragment":
+			frag := parseFragment(l)
+			frag.Loc = loc
+			d.Fragments = append(d.Fragments, frag)
+
+		default:
+			l.SyntaxError(fmt.Sprintf(`unexpected %q, expecting "fragment"`, x))
+		}
+	}
+	return d
+}
+
+func parseOperation(l *common.Lexer, opType OperationType) *Operation {
+	op := &Operation{Type: opType}
+	op.Name.Loc = l.Location()
+	if l.Peek() == scanner.Ident {
+		op.Name = l.ConsumeIdentWithLoc()
+	}
+	op.Directives = common.ParseDirectives(l)
+	if l.Peek() == '(' {
+		l.ConsumeToken('(')
+		for l.Peek() != ')' {
+			loc := l.Location()
+			l.ConsumeToken('$')
+			iv := common.ParseInputValue(l)
+			iv.Loc = loc
+			op.Vars = append(op.Vars, iv)
+		}
+		l.ConsumeToken(')')
+	}
+	op.Selections = parseSelectionSet(l)
+	return op
+}
+
+func parseFragment(l *common.Lexer) *FragmentDecl {
+	f := &FragmentDecl{}
+	f.Name = l.ConsumeIdentWithLoc()
+	l.ConsumeKeyword("on")
+	f.On = common.TypeName{Ident: l.ConsumeIdentWithLoc()}
+	f.Directives = common.ParseDirectives(l)
+	f.Selections = parseSelectionSet(l)
+	return f
+}
+
+func parseSelectionSet(l *common.Lexer) []Selection {
+	var sels []Selection
+	l.ConsumeToken('{')
+	for l.Peek() != '}' {
+		sels = append(sels, parseSelection(l))
+	}
+	l.ConsumeToken('}')
+	return sels
+}
+
+func parseSelection(l *common.Lexer) Selection {
+	if l.Peek() == '.' {
+		return parseSpread(l)
+	}
+	return parseField(l)
+}
+
+func parseField(l *common.Lexer) *Field {
+	f := &Field{}
+	f.Alias = l.ConsumeIdentWithLoc()
+	f.Name = f.Alias
+	if l.Peek() == ':' {
+		l.ConsumeToken(':')
+		f.Name = l.ConsumeIdentWithLoc()
+	}
+	if l.Peek() == '(' {
+		f.Arguments = common.ParseArguments(l)
+	}
+	f.Directives = common.ParseDirectives(l)
+	if l.Peek() == '{' {
+		f.SelectionSetLoc = l.Location()
+		f.Selections = parseSelectionSet(l)
+	}
+	return f
+}
+
+func parseSpread(l *common.Lexer) Selection {
+	loc := l.Location()
+	l.ConsumeToken('.')
+	l.ConsumeToken('.')
+	l.ConsumeToken('.')
+
+	f := &InlineFragment{Loc: loc}
+	if l.Peek() == scanner.Ident {
+		ident := l.ConsumeIdentWithLoc()
+		if ident.Name != "on" {
+			fs := &FragmentSpread{
+				Name: ident,
+				Loc:  loc,
+			}
+			fs.Directives = common.ParseDirectives(l)
+			return fs
+		}
+		f.On = common.TypeName{Ident: l.ConsumeIdentWithLoc()}
+	}
+	f.Directives = common.ParseDirectives(l)
+	f.Selections = parseSelectionSet(l)
+	return f
+}
+
+func (d *Document) GetOperation(operationName string) (*Operation, error) {
+	if len(d.Operations) == 0 {
+		return nil, fmt.Errorf("no operations in query document")
+	}
+
+	if operationName == "" {
+		if len(d.Operations) > 1 {
+			return nil, fmt.Errorf("more than one operation in query document and no operation name given")
+		}
+		for _, op := range d.Operations {
+			return op, nil // return the one and only operation
+		}
+	}
+
+	op := d.Operations.Get(operationName)
+	if op == nil {
+		return nil, fmt.Errorf("no operation with name %q", operationName)
+	}
+	return op, nil
+}

vendor/github.com/vektah/gqlgen/neelance/schema/meta.go 🔗

@@ -0,0 +1,193 @@
+package schema
+
+var Meta *Schema
+
+func init() {
+	Meta = &Schema{} // bootstrap
+	Meta = New()
+	if err := Meta.Parse(metaSrc); err != nil {
+		panic(err)
+	}
+}
+
+var metaSrc = `
+	# The ` + "`" + `Int` + "`" + ` scalar type represents non-fractional signed whole numeric values. Int can represent values between -(2^31) and 2^31 - 1.
+	scalar Int
+
+	# The ` + "`" + `Float` + "`" + ` scalar type represents signed double-precision fractional values as specified by [IEEE 754](http://en.wikipedia.org/wiki/IEEE_floating_point).
+	scalar Float
+
+	# The ` + "`" + `String` + "`" + ` scalar type represents textual data, represented as UTF-8 character sequences. The String type is most often used by GraphQL to represent free-form human-readable text.
+	scalar String
+
+	# The ` + "`" + `Boolean` + "`" + ` scalar type represents ` + "`" + `true` + "`" + ` or ` + "`" + `false` + "`" + `.
+	scalar Boolean
+
+	# The ` + "`" + `ID` + "`" + ` scalar type represents a unique identifier, often used to refetch an object or as key for a cache. The ID type appears in a JSON response as a String; however, it is not intended to be human-readable. When expected as an input type, any string (such as ` + "`" + `"4"` + "`" + `) or integer (such as ` + "`" + `4` + "`" + `) input value will be accepted as an ID.
+	scalar ID
+
+	# The ` + "`" + `Map` + "`" + ` scalar type is a simple json object
+	scalar Map
+
+	# Directs the executor to include this field or fragment only when the ` + "`" + `if` + "`" + ` argument is true.
+	directive @include(
+		# Included when true.
+		if: Boolean!
+	) on FIELD | FRAGMENT_SPREAD | INLINE_FRAGMENT
+
+	# Directs the executor to skip this field or fragment when the ` + "`" + `if` + "`" + ` argument is true.
+	directive @skip(
+		# Skipped when true.
+		if: Boolean!
+	) on FIELD | FRAGMENT_SPREAD | INLINE_FRAGMENT
+
+	# Marks an element of a GraphQL schema as no longer supported.
+	directive @deprecated(
+		# Explains why this element was deprecated, usually also including a suggestion
+		# for how to access supported similar data. Formatted in
+		# [Markdown](https://daringfireball.net/projects/markdown/).
+		reason: String = "No longer supported"
+	) on FIELD_DEFINITION | ENUM_VALUE
+
+	# A Directive provides a way to describe alternate runtime execution and type validation behavior in a GraphQL document.
+	#
+	# In some cases, you need to provide options to alter GraphQL's execution behavior
+	# in ways field arguments will not suffice, such as conditionally including or
+	# skipping a field. Directives provide this by describing additional information
+	# to the executor.
+	type __Directive {
+		name: String!
+		description: String
+		locations: [__DirectiveLocation!]!
+		args: [__InputValue!]!
+	}
+
+	# A Directive can be adjacent to many parts of the GraphQL language, a
+	# __DirectiveLocation describes one such possible adjacencies.
+	enum __DirectiveLocation {
+		# Location adjacent to a query operation.
+		QUERY
+		# Location adjacent to a mutation operation.
+		MUTATION
+		# Location adjacent to a subscription operation.
+		SUBSCRIPTION
+		# Location adjacent to a field.
+		FIELD
+		# Location adjacent to a fragment definition.
+		FRAGMENT_DEFINITION
+		# Location adjacent to a fragment spread.
+		FRAGMENT_SPREAD
+		# Location adjacent to an inline fragment.
+		INLINE_FRAGMENT
+		# Location adjacent to a schema definition.
+		SCHEMA
+		# Location adjacent to a scalar definition.
+		SCALAR
+		# Location adjacent to an object type definition.
+		OBJECT
+		# Location adjacent to a field definition.
+		FIELD_DEFINITION
+		# Location adjacent to an argument definition.
+		ARGUMENT_DEFINITION
+		# Location adjacent to an interface definition.
+		INTERFACE
+		# Location adjacent to a union definition.
+		UNION
+		# Location adjacent to an enum definition.
+		ENUM
+		# Location adjacent to an enum value definition.
+		ENUM_VALUE
+		# Location adjacent to an input object type definition.
+		INPUT_OBJECT
+		# Location adjacent to an input object field definition.
+		INPUT_FIELD_DEFINITION
+	}
+
+	# One possible value for a given Enum. Enum values are unique values, not a
+	# placeholder for a string or numeric value. However an Enum value is returned in
+	# a JSON response as a string.
+	type __EnumValue {
+		name: String!
+		description: String
+		isDeprecated: Boolean!
+		deprecationReason: String
+	}
+
+	# Object and Interface types are described by a list of Fields, each of which has
+	# a name, potentially a list of arguments, and a return type.
+	type __Field {
+		name: String!
+		description: String
+		args: [__InputValue!]!
+		type: __Type!
+		isDeprecated: Boolean!
+		deprecationReason: String
+	}
+
+	# Arguments provided to Fields or Directives and the input fields of an
+	# InputObject are represented as Input Values which describe their type and
+	# optionally a default value.
+	type __InputValue {
+		name: String!
+		description: String
+		type: __Type!
+		# A GraphQL-formatted string representing the default value for this input value.
+		defaultValue: String
+	}
+
+	# A GraphQL Schema defines the capabilities of a GraphQL server. It exposes all
+	# available types and directives on the server, as well as the entry points for
+	# query, mutation, and subscription operations.
+	type __Schema {
+		# A list of all types supported by this server.
+		types: [__Type!]!
+		# The type that query operations will be rooted at.
+		queryType: __Type!
+		# If this server supports mutation, the type that mutation operations will be rooted at.
+		mutationType: __Type
+		# If this server support subscription, the type that subscription operations will be rooted at.
+		subscriptionType: __Type
+		# A list of all directives supported by this server.
+		directives: [__Directive!]!
+	}
+
+	# The fundamental unit of any GraphQL Schema is the type. There are many kinds of
+	# types in GraphQL as represented by the ` + "`" + `__TypeKind` + "`" + ` enum.
+	#
+	# Depending on the kind of a type, certain fields describe information about that
+	# type. Scalar types provide no information beyond a name and description, while
+	# Enum types provide their values. Object and Interface types provide the fields
+	# they describe. Abstract types, Union and Interface, provide the Object types
+	# possible at runtime. List and NonNull types compose other types.
+	type __Type {
+		kind: __TypeKind!
+		name: String
+		description: String
+		fields(includeDeprecated: Boolean = false): [__Field!]
+		interfaces: [__Type!]
+		possibleTypes: [__Type!]
+		enumValues(includeDeprecated: Boolean = false): [__EnumValue!]
+		inputFields: [__InputValue!]
+		ofType: __Type
+	}
+	
+	# An enum describing what kind of type a given ` + "`" + `__Type` + "`" + ` is.
+	enum __TypeKind {
+		# Indicates this type is a scalar.
+		SCALAR
+		# Indicates this type is an object. ` + "`" + `fields` + "`" + ` and ` + "`" + `interfaces` + "`" + ` are valid fields.
+		OBJECT
+		# Indicates this type is an interface. ` + "`" + `fields` + "`" + ` and ` + "`" + `possibleTypes` + "`" + ` are valid fields.
+		INTERFACE
+		# Indicates this type is a union. ` + "`" + `possibleTypes` + "`" + ` is a valid field.
+		UNION
+		# Indicates this type is an enum. ` + "`" + `enumValues` + "`" + ` is a valid field.
+		ENUM
+		# Indicates this type is an input object. ` + "`" + `inputFields` + "`" + ` is a valid field.
+		INPUT_OBJECT
+		# Indicates this type is a list. ` + "`" + `ofType` + "`" + ` is a valid field.
+		LIST
+		# Indicates this type is a non-null. ` + "`" + `ofType` + "`" + ` is a valid field.
+		NON_NULL
+	}
+`

vendor/github.com/vektah/gqlgen/neelance/schema/schema.go 🔗

@@ -0,0 +1,489 @@
+package schema
+
+import (
+	"fmt"
+	"strings"
+	"text/scanner"
+
+	"github.com/vektah/gqlgen/neelance/common"
+	"github.com/vektah/gqlgen/neelance/errors"
+)
+
+type Schema struct {
+	EntryPoints map[string]NamedType
+	Types       map[string]NamedType
+	Directives  map[string]*DirectiveDecl
+
+	entryPointNames map[string]string
+	objects         []*Object
+	unions          []*Union
+	enums           []*Enum
+}
+
+var defaultEntrypoints = map[string]string{
+	"query":        "Query",
+	"mutation":     "Mutation",
+	"subscription": "Subscription",
+}
+
+func (s *Schema) Resolve(name string) common.Type {
+	return s.Types[name]
+}
+
+type NamedType interface {
+	common.Type
+	TypeName() string
+	Description() string
+}
+
+type Scalar struct {
+	Name string
+	Desc string
+}
+
+type Object struct {
+	Name       string
+	Interfaces []*Interface
+	Fields     FieldList
+	Desc       string
+
+	interfaceNames []string
+}
+
+type Interface struct {
+	Name          string
+	PossibleTypes []*Object
+	Fields        FieldList
+	Desc          string
+}
+
+type Union struct {
+	Name          string
+	PossibleTypes []*Object
+	Desc          string
+
+	typeNames []string
+}
+
+type Enum struct {
+	Name   string
+	Values []*EnumValue
+	Desc   string
+}
+
+type EnumValue struct {
+	Name       string
+	Directives common.DirectiveList
+	Desc       string
+}
+
+type InputObject struct {
+	Name   string
+	Desc   string
+	Values common.InputValueList
+}
+
+type FieldList []*Field
+
+func (l FieldList) Get(name string) *Field {
+	for _, f := range l {
+		if f.Name == name {
+			return f
+		}
+	}
+	return nil
+}
+
+func (l FieldList) Names() []string {
+	names := make([]string, len(l))
+	for i, f := range l {
+		names[i] = f.Name
+	}
+	return names
+}
+
+type DirectiveDecl struct {
+	Name string
+	Desc string
+	Locs []string
+	Args common.InputValueList
+}
+
+func (*Scalar) Kind() string      { return "SCALAR" }
+func (*Object) Kind() string      { return "OBJECT" }
+func (*Interface) Kind() string   { return "INTERFACE" }
+func (*Union) Kind() string       { return "UNION" }
+func (*Enum) Kind() string        { return "ENUM" }
+func (*InputObject) Kind() string { return "INPUT_OBJECT" }
+
+func (t *Scalar) String() string      { return t.Name }
+func (t *Object) String() string      { return t.Name }
+func (t *Interface) String() string   { return t.Name }
+func (t *Union) String() string       { return t.Name }
+func (t *Enum) String() string        { return t.Name }
+func (t *InputObject) String() string { return t.Name }
+
+func (t *Scalar) TypeName() string      { return t.Name }
+func (t *Object) TypeName() string      { return t.Name }
+func (t *Interface) TypeName() string   { return t.Name }
+func (t *Union) TypeName() string       { return t.Name }
+func (t *Enum) TypeName() string        { return t.Name }
+func (t *InputObject) TypeName() string { return t.Name }
+
+func (t *Scalar) Description() string      { return t.Desc }
+func (t *Object) Description() string      { return t.Desc }
+func (t *Interface) Description() string   { return t.Desc }
+func (t *Union) Description() string       { return t.Desc }
+func (t *Enum) Description() string        { return t.Desc }
+func (t *InputObject) Description() string { return t.Desc }
+
+type Field struct {
+	Name       string
+	Args       common.InputValueList
+	Type       common.Type
+	Directives common.DirectiveList
+	Desc       string
+}
+
+func MustParse(str string) *Schema {
+	s := New()
+	err := s.Parse(str)
+	if err != nil {
+		panic(err)
+	}
+	return s
+}
+
+func New() *Schema {
+	s := &Schema{
+		entryPointNames: make(map[string]string),
+		Types:           make(map[string]NamedType),
+		Directives:      make(map[string]*DirectiveDecl),
+	}
+	for n, t := range Meta.Types {
+		s.Types[n] = t
+	}
+	for n, d := range Meta.Directives {
+		s.Directives[n] = d
+	}
+	return s
+}
+
+func (s *Schema) Parse(schemaString string) error {
+	sc := &scanner.Scanner{
+		Mode: scanner.ScanIdents | scanner.ScanInts | scanner.ScanFloats | scanner.ScanStrings,
+	}
+	sc.Init(strings.NewReader(schemaString))
+
+	l := common.New(sc)
+	err := l.CatchSyntaxError(func() {
+		parseSchema(s, l)
+	})
+	if err != nil {
+		return err
+	}
+
+	for _, t := range s.Types {
+		if err := resolveNamedType(s, t); err != nil {
+			return err
+		}
+	}
+	for _, d := range s.Directives {
+		for _, arg := range d.Args {
+			t, err := common.ResolveType(arg.Type, s.Resolve)
+			if err != nil {
+				return err
+			}
+			arg.Type = t
+		}
+	}
+
+	s.EntryPoints = make(map[string]NamedType)
+	for key, name := range s.entryPointNames {
+		t, ok := s.Types[name]
+		if !ok {
+			if !ok {
+				return errors.Errorf("type %q not found", name)
+			}
+		}
+		s.EntryPoints[key] = t
+	}
+
+	for entrypointName, typeName := range defaultEntrypoints {
+		if _, ok := s.EntryPoints[entrypointName]; ok {
+			continue
+		}
+
+		if _, ok := s.Types[typeName]; !ok {
+			continue
+		}
+
+		s.EntryPoints[entrypointName] = s.Types[typeName]
+	}
+
+	for _, obj := range s.objects {
+		obj.Interfaces = make([]*Interface, len(obj.interfaceNames))
+		for i, intfName := range obj.interfaceNames {
+			t, ok := s.Types[intfName]
+			if !ok {
+				return errors.Errorf("interface %q not found", intfName)
+			}
+			intf, ok := t.(*Interface)
+			if !ok {
+				return errors.Errorf("type %q is not an interface", intfName)
+			}
+			obj.Interfaces[i] = intf
+			intf.PossibleTypes = append(intf.PossibleTypes, obj)
+		}
+	}
+
+	for _, union := range s.unions {
+		union.PossibleTypes = make([]*Object, len(union.typeNames))
+		for i, name := range union.typeNames {
+			t, ok := s.Types[name]
+			if !ok {
+				return errors.Errorf("object type %q not found", name)
+			}
+			obj, ok := t.(*Object)
+			if !ok {
+				return errors.Errorf("type %q is not an object", name)
+			}
+			union.PossibleTypes[i] = obj
+		}
+	}
+
+	for _, enum := range s.enums {
+		for _, value := range enum.Values {
+			if err := resolveDirectives(s, value.Directives); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func resolveNamedType(s *Schema, t NamedType) error {
+	switch t := t.(type) {
+	case *Object:
+		for _, f := range t.Fields {
+			if err := resolveField(s, f); err != nil {
+				return err
+			}
+		}
+	case *Interface:
+		for _, f := range t.Fields {
+			if err := resolveField(s, f); err != nil {
+				return err
+			}
+		}
+	case *InputObject:
+		if err := resolveInputObject(s, t.Values); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func resolveField(s *Schema, f *Field) error {
+	t, err := common.ResolveType(f.Type, s.Resolve)
+	if err != nil {
+		return err
+	}
+	f.Type = t
+	if err := resolveDirectives(s, f.Directives); err != nil {
+		return err
+	}
+	return resolveInputObject(s, f.Args)
+}
+
+func resolveDirectives(s *Schema, directives common.DirectiveList) error {
+	for _, d := range directives {
+		dirName := d.Name.Name
+		dd, ok := s.Directives[dirName]
+		if !ok {
+			return errors.Errorf("directive %q not found", dirName)
+		}
+		for _, arg := range d.Args {
+			if dd.Args.Get(arg.Name.Name) == nil {
+				return errors.Errorf("invalid argument %q for directive %q", arg.Name.Name, dirName)
+			}
+		}
+		for _, arg := range dd.Args {
+			if _, ok := d.Args.Get(arg.Name.Name); !ok {
+				d.Args = append(d.Args, common.Argument{Name: arg.Name, Value: arg.Default})
+			}
+		}
+	}
+	return nil
+}
+
+func resolveInputObject(s *Schema, values common.InputValueList) error {
+	for _, v := range values {
+		t, err := common.ResolveType(v.Type, s.Resolve)
+		if err != nil {
+			return err
+		}
+		v.Type = t
+	}
+	return nil
+}
+
+func parseSchema(s *Schema, l *common.Lexer) {
+	for l.Peek() != scanner.EOF {
+		desc := l.DescComment()
+		switch x := l.ConsumeIdent(); x {
+		case "schema":
+			l.ConsumeToken('{')
+			for l.Peek() != '}' {
+				name := l.ConsumeIdent()
+				l.ConsumeToken(':')
+				typ := l.ConsumeIdent()
+				s.entryPointNames[name] = typ
+			}
+			l.ConsumeToken('}')
+		case "type":
+			obj := parseObjectDecl(l)
+			obj.Desc = desc
+			s.Types[obj.Name] = obj
+			s.objects = append(s.objects, obj)
+		case "interface":
+			intf := parseInterfaceDecl(l)
+			intf.Desc = desc
+			s.Types[intf.Name] = intf
+		case "union":
+			union := parseUnionDecl(l)
+			union.Desc = desc
+			s.Types[union.Name] = union
+			s.unions = append(s.unions, union)
+		case "enum":
+			enum := parseEnumDecl(l)
+			enum.Desc = desc
+			s.Types[enum.Name] = enum
+			s.enums = append(s.enums, enum)
+		case "input":
+			input := parseInputDecl(l)
+			input.Desc = desc
+			s.Types[input.Name] = input
+		case "scalar":
+			name := l.ConsumeIdent()
+			s.Types[name] = &Scalar{Name: name, Desc: desc}
+		case "directive":
+			directive := parseDirectiveDecl(l)
+			directive.Desc = desc
+			s.Directives[directive.Name] = directive
+		default:
+			l.SyntaxError(fmt.Sprintf(`unexpected %q, expecting "schema", "type", "enum", "interface", "union", "input", "scalar" or "directive"`, x))
+		}
+	}
+}
+
+func parseObjectDecl(l *common.Lexer) *Object {
+	o := &Object{}
+	o.Name = l.ConsumeIdent()
+	if l.Peek() == scanner.Ident {
+		l.ConsumeKeyword("implements")
+		for {
+			o.interfaceNames = append(o.interfaceNames, l.ConsumeIdent())
+			if l.Peek() == '{' {
+				break
+			}
+		}
+	}
+	l.ConsumeToken('{')
+	o.Fields = parseFields(l)
+	l.ConsumeToken('}')
+	return o
+}
+
+func parseInterfaceDecl(l *common.Lexer) *Interface {
+	i := &Interface{}
+	i.Name = l.ConsumeIdent()
+	l.ConsumeToken('{')
+	i.Fields = parseFields(l)
+	l.ConsumeToken('}')
+	return i
+}
+
+func parseUnionDecl(l *common.Lexer) *Union {
+	union := &Union{}
+	union.Name = l.ConsumeIdent()
+	l.ConsumeToken('=')
+	union.typeNames = []string{l.ConsumeIdent()}
+	for l.Peek() == '|' {
+		l.ConsumeToken('|')
+		union.typeNames = append(union.typeNames, l.ConsumeIdent())
+	}
+	return union
+}
+
+func parseInputDecl(l *common.Lexer) *InputObject {
+	i := &InputObject{}
+	i.Name = l.ConsumeIdent()
+	l.ConsumeToken('{')
+	for l.Peek() != '}' {
+		i.Values = append(i.Values, common.ParseInputValue(l))
+	}
+	l.ConsumeToken('}')
+	return i
+}
+
+func parseEnumDecl(l *common.Lexer) *Enum {
+	enum := &Enum{}
+	enum.Name = l.ConsumeIdent()
+	l.ConsumeToken('{')
+	for l.Peek() != '}' {
+		v := &EnumValue{}
+		v.Desc = l.DescComment()
+		v.Name = l.ConsumeIdent()
+		v.Directives = common.ParseDirectives(l)
+		enum.Values = append(enum.Values, v)
+	}
+	l.ConsumeToken('}')
+	return enum
+}
+
+func parseDirectiveDecl(l *common.Lexer) *DirectiveDecl {
+	d := &DirectiveDecl{}
+	l.ConsumeToken('@')
+	d.Name = l.ConsumeIdent()
+	if l.Peek() == '(' {
+		l.ConsumeToken('(')
+		for l.Peek() != ')' {
+			v := common.ParseInputValue(l)
+			d.Args = append(d.Args, v)
+		}
+		l.ConsumeToken(')')
+	}
+	l.ConsumeKeyword("on")
+	for {
+		loc := l.ConsumeIdent()
+		d.Locs = append(d.Locs, loc)
+		if l.Peek() != '|' {
+			break
+		}
+		l.ConsumeToken('|')
+	}
+	return d
+}
+
+func parseFields(l *common.Lexer) FieldList {
+	var fields FieldList
+	for l.Peek() != '}' {
+		f := &Field{}
+		f.Desc = l.DescComment()
+		f.Name = l.ConsumeIdent()
+		if l.Peek() == '(' {
+			l.ConsumeToken('(')
+			for l.Peek() != ')' {
+				f.Args = append(f.Args, common.ParseInputValue(l))
+			}
+			l.ConsumeToken(')')
+		}
+		l.ConsumeToken(':')
+		f.Type = common.ParseType(l)
+		f.Directives = common.ParseDirectives(l)
+		fields = append(fields, f)
+	}
+	return fields
+}

vendor/github.com/vektah/gqlgen/neelance/tests/testdata/LICENSE 🔗

@@ -0,0 +1,33 @@
+The files in this testdata directory are derived from the graphql-js project:
+https://github.com/graphql/graphql-js
+
+BSD License
+
+For GraphQL software
+
+Copyright (c) 2015, Facebook, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+
+ * Neither the name Facebook nor the names of its contributors may be used to
+   endorse or promote products derived from this software without specific
+   prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

vendor/github.com/vektah/gqlgen/neelance/validation/suggestion.go 🔗

@@ -0,0 +1,71 @@
+package validation
+
+import (
+	"fmt"
+	"sort"
+	"strconv"
+	"strings"
+)
+
+func makeSuggestion(prefix string, options []string, input string) string {
+	var selected []string
+	distances := make(map[string]int)
+	for _, opt := range options {
+		distance := levenshteinDistance(input, opt)
+		threshold := max(len(input)/2, max(len(opt)/2, 1))
+		if distance < threshold {
+			selected = append(selected, opt)
+			distances[opt] = distance
+		}
+	}
+
+	if len(selected) == 0 {
+		return ""
+	}
+	sort.Slice(selected, func(i, j int) bool {
+		return distances[selected[i]] < distances[selected[j]]
+	})
+
+	parts := make([]string, len(selected))
+	for i, opt := range selected {
+		parts[i] = strconv.Quote(opt)
+	}
+	if len(parts) > 1 {
+		parts[len(parts)-1] = "or " + parts[len(parts)-1]
+	}
+	return fmt.Sprintf(" %s %s?", prefix, strings.Join(parts, ", "))
+}
+
+func levenshteinDistance(s1, s2 string) int {
+	column := make([]int, len(s1)+1)
+	for y := range s1 {
+		column[y+1] = y + 1
+	}
+	for x, rx := range s2 {
+		column[0] = x + 1
+		lastdiag := x
+		for y, ry := range s1 {
+			olddiag := column[y+1]
+			if rx != ry {
+				lastdiag++
+			}
+			column[y+1] = min(column[y+1]+1, min(column[y]+1, lastdiag))
+			lastdiag = olddiag
+		}
+	}
+	return column[len(s1)]
+}
+
+func min(a, b int) int {
+	if a < b {
+		return a
+	}
+	return b
+}
+
+func max(a, b int) int {
+	if a > b {
+		return a
+	}
+	return b
+}

vendor/github.com/vektah/gqlgen/neelance/validation/validation.go 🔗

@@ -0,0 +1,861 @@
+package validation
+
+import (
+	"fmt"
+	"math"
+	"reflect"
+	"strconv"
+	"strings"
+	"text/scanner"
+
+	"github.com/vektah/gqlgen/neelance/common"
+	"github.com/vektah/gqlgen/neelance/errors"
+	"github.com/vektah/gqlgen/neelance/query"
+	"github.com/vektah/gqlgen/neelance/schema"
+)
+
+type varSet map[*common.InputValue]struct{}
+
+type selectionPair struct{ a, b query.Selection }
+
+type fieldInfo struct {
+	sf     *schema.Field
+	parent schema.NamedType
+}
+
+type context struct {
+	schema           *schema.Schema
+	doc              *query.Document
+	errs             []*errors.QueryError
+	opErrs           map[*query.Operation][]*errors.QueryError
+	usedVars         map[*query.Operation]varSet
+	fieldMap         map[*query.Field]fieldInfo
+	overlapValidated map[selectionPair]struct{}
+}
+
+func (c *context) addErr(loc errors.Location, rule string, format string, a ...interface{}) {
+	c.addErrMultiLoc([]errors.Location{loc}, rule, format, a...)
+}
+
+func (c *context) addErrMultiLoc(locs []errors.Location, rule string, format string, a ...interface{}) {
+	c.errs = append(c.errs, &errors.QueryError{
+		Message:   fmt.Sprintf(format, a...),
+		Locations: locs,
+		Rule:      rule,
+	})
+}
+
+type opContext struct {
+	*context
+	ops []*query.Operation
+}
+
+func Validate(s *schema.Schema, doc *query.Document) []*errors.QueryError {
+	c := &context{
+		schema:           s,
+		doc:              doc,
+		opErrs:           make(map[*query.Operation][]*errors.QueryError),
+		usedVars:         make(map[*query.Operation]varSet),
+		fieldMap:         make(map[*query.Field]fieldInfo),
+		overlapValidated: make(map[selectionPair]struct{}),
+	}
+
+	opNames := make(nameSet)
+	fragUsedBy := make(map[*query.FragmentDecl][]*query.Operation)
+	for _, op := range doc.Operations {
+		c.usedVars[op] = make(varSet)
+		opc := &opContext{c, []*query.Operation{op}}
+
+		if op.Name.Name == "" && len(doc.Operations) != 1 {
+			c.addErr(op.Loc, "LoneAnonymousOperation", "This anonymous operation must be the only defined operation.")
+		}
+		if op.Name.Name != "" {
+			validateName(c, opNames, op.Name, "UniqueOperationNames", "operation")
+		}
+
+		validateDirectives(opc, string(op.Type), op.Directives)
+
+		varNames := make(nameSet)
+		for _, v := range op.Vars {
+			validateName(c, varNames, v.Name, "UniqueVariableNames", "variable")
+
+			t := resolveType(c, v.Type)
+			if !canBeInput(t) {
+				c.addErr(v.TypeLoc, "VariablesAreInputTypes", "Variable %q cannot be non-input type %q.", "$"+v.Name.Name, t)
+			}
+
+			if v.Default != nil {
+				validateLiteral(opc, v.Default)
+
+				if t != nil {
+					if nn, ok := t.(*common.NonNull); ok {
+						c.addErr(v.Default.Location(), "DefaultValuesOfCorrectType", "Variable %q of type %q is required and will not use the default value. Perhaps you meant to use type %q.", "$"+v.Name.Name, t, nn.OfType)
+					}
+
+					if ok, reason := validateValueType(opc, v.Default, t); !ok {
+						c.addErr(v.Default.Location(), "DefaultValuesOfCorrectType", "Variable %q of type %q has invalid default value %s.\n%s", "$"+v.Name.Name, t, v.Default, reason)
+					}
+				}
+			}
+		}
+
+		var entryPoint schema.NamedType
+		switch op.Type {
+		case query.Query:
+			entryPoint = s.EntryPoints["query"]
+		case query.Mutation:
+			entryPoint = s.EntryPoints["mutation"]
+		case query.Subscription:
+			entryPoint = s.EntryPoints["subscription"]
+		default:
+			panic("unreachable")
+		}
+
+		validateSelectionSet(opc, op.Selections, entryPoint)
+
+		fragUsed := make(map[*query.FragmentDecl]struct{})
+		markUsedFragments(c, op.Selections, fragUsed)
+		for frag := range fragUsed {
+			fragUsedBy[frag] = append(fragUsedBy[frag], op)
+		}
+	}
+
+	fragNames := make(nameSet)
+	fragVisited := make(map[*query.FragmentDecl]struct{})
+	for _, frag := range doc.Fragments {
+		opc := &opContext{c, fragUsedBy[frag]}
+
+		validateName(c, fragNames, frag.Name, "UniqueFragmentNames", "fragment")
+		validateDirectives(opc, "FRAGMENT_DEFINITION", frag.Directives)
+
+		t := unwrapType(resolveType(c, &frag.On))
+		// continue even if t is nil
+		if t != nil && !canBeFragment(t) {
+			c.addErr(frag.On.Loc, "FragmentsOnCompositeTypes", "Fragment %q cannot condition on non composite type %q.", frag.Name.Name, t)
+			continue
+		}
+
+		validateSelectionSet(opc, frag.Selections, t)
+
+		if _, ok := fragVisited[frag]; !ok {
+			detectFragmentCycle(c, frag.Selections, fragVisited, nil, map[string]int{frag.Name.Name: 0})
+		}
+	}
+
+	for _, frag := range doc.Fragments {
+		if len(fragUsedBy[frag]) == 0 {
+			c.addErr(frag.Loc, "NoUnusedFragments", "Fragment %q is never used.", frag.Name.Name)
+		}
+	}
+
+	for _, op := range doc.Operations {
+		c.errs = append(c.errs, c.opErrs[op]...)
+
+		opUsedVars := c.usedVars[op]
+		for _, v := range op.Vars {
+			if _, ok := opUsedVars[v]; !ok {
+				opSuffix := ""
+				if op.Name.Name != "" {
+					opSuffix = fmt.Sprintf(" in operation %q", op.Name.Name)
+				}
+				c.addErr(v.Loc, "NoUnusedVariables", "Variable %q is never used%s.", "$"+v.Name.Name, opSuffix)
+			}
+		}
+	}
+
+	return c.errs
+}
+
+func validateSelectionSet(c *opContext, sels []query.Selection, t schema.NamedType) {
+	for _, sel := range sels {
+		validateSelection(c, sel, t)
+	}
+
+	for i, a := range sels {
+		for _, b := range sels[i+1:] {
+			c.validateOverlap(a, b, nil, nil)
+		}
+	}
+}
+
+func validateSelection(c *opContext, sel query.Selection, t schema.NamedType) {
+	switch sel := sel.(type) {
+	case *query.Field:
+		validateDirectives(c, "FIELD", sel.Directives)
+
+		fieldName := sel.Name.Name
+		var f *schema.Field
+		switch fieldName {
+		case "__typename":
+			f = &schema.Field{
+				Name: "__typename",
+				Type: c.schema.Types["String"],
+			}
+		case "__schema":
+			f = &schema.Field{
+				Name: "__schema",
+				Type: c.schema.Types["__Schema"],
+			}
+		case "__type":
+			f = &schema.Field{
+				Name: "__type",
+				Args: common.InputValueList{
+					&common.InputValue{
+						Name: common.Ident{Name: "name"},
+						Type: &common.NonNull{OfType: c.schema.Types["String"]},
+					},
+				},
+				Type: c.schema.Types["__Type"],
+			}
+		default:
+			f = fields(t).Get(fieldName)
+			if f == nil && t != nil {
+				suggestion := makeSuggestion("Did you mean", fields(t).Names(), fieldName)
+				c.addErr(sel.Alias.Loc, "FieldsOnCorrectType", "Cannot query field %q on type %q.%s", fieldName, t, suggestion)
+			}
+		}
+		c.fieldMap[sel] = fieldInfo{sf: f, parent: t}
+
+		validateArgumentLiterals(c, sel.Arguments)
+		if f != nil {
+			validateArgumentTypes(c, sel.Arguments, f.Args, sel.Alias.Loc,
+				func() string { return fmt.Sprintf("field %q of type %q", fieldName, t) },
+				func() string { return fmt.Sprintf("Field %q", fieldName) },
+			)
+		}
+
+		var ft common.Type
+		if f != nil {
+			ft = f.Type
+			sf := hasSubfields(ft)
+			if sf && sel.Selections == nil {
+				c.addErr(sel.Alias.Loc, "ScalarLeafs", "Field %q of type %q must have a selection of subfields. Did you mean \"%s { ... }\"?", fieldName, ft, fieldName)
+			}
+			if !sf && sel.Selections != nil {
+				c.addErr(sel.SelectionSetLoc, "ScalarLeafs", "Field %q must not have a selection since type %q has no subfields.", fieldName, ft)
+			}
+		}
+		if sel.Selections != nil {
+			validateSelectionSet(c, sel.Selections, unwrapType(ft))
+		}
+
+	case *query.InlineFragment:
+		validateDirectives(c, "INLINE_FRAGMENT", sel.Directives)
+		if sel.On.Name != "" {
+			fragTyp := unwrapType(resolveType(c.context, &sel.On))
+			if fragTyp != nil && !compatible(t, fragTyp) {
+				c.addErr(sel.Loc, "PossibleFragmentSpreads", "Fragment cannot be spread here as objects of type %q can never be of type %q.", t, fragTyp)
+			}
+			t = fragTyp
+			// continue even if t is nil
+		}
+		if t != nil && !canBeFragment(t) {
+			c.addErr(sel.On.Loc, "FragmentsOnCompositeTypes", "Fragment cannot condition on non composite type %q.", t)
+			return
+		}
+		validateSelectionSet(c, sel.Selections, unwrapType(t))
+
+	case *query.FragmentSpread:
+		validateDirectives(c, "FRAGMENT_SPREAD", sel.Directives)
+		frag := c.doc.Fragments.Get(sel.Name.Name)
+		if frag == nil {
+			c.addErr(sel.Name.Loc, "KnownFragmentNames", "Unknown fragment %q.", sel.Name.Name)
+			return
+		}
+		fragTyp := c.schema.Types[frag.On.Name]
+		if !compatible(t, fragTyp) {
+			c.addErr(sel.Loc, "PossibleFragmentSpreads", "Fragment %q cannot be spread here as objects of type %q can never be of type %q.", frag.Name.Name, t, fragTyp)
+		}
+
+	default:
+		panic("unreachable")
+	}
+}
+
+func compatible(a, b common.Type) bool {
+	for _, pta := range possibleTypes(a) {
+		for _, ptb := range possibleTypes(b) {
+			if pta == ptb {
+				return true
+			}
+		}
+	}
+	return false
+}
+
+func possibleTypes(t common.Type) []*schema.Object {
+	switch t := t.(type) {
+	case *schema.Object:
+		return []*schema.Object{t}
+	case *schema.Interface:
+		return t.PossibleTypes
+	case *schema.Union:
+		return t.PossibleTypes
+	default:
+		return nil
+	}
+}
+
+func markUsedFragments(c *context, sels []query.Selection, fragUsed map[*query.FragmentDecl]struct{}) {
+	for _, sel := range sels {
+		switch sel := sel.(type) {
+		case *query.Field:
+			if sel.Selections != nil {
+				markUsedFragments(c, sel.Selections, fragUsed)
+			}
+
+		case *query.InlineFragment:
+			markUsedFragments(c, sel.Selections, fragUsed)
+
+		case *query.FragmentSpread:
+			frag := c.doc.Fragments.Get(sel.Name.Name)
+			if frag == nil {
+				return
+			}
+
+			if _, ok := fragUsed[frag]; ok {
+				return
+			}
+			fragUsed[frag] = struct{}{}
+			markUsedFragments(c, frag.Selections, fragUsed)
+
+		default:
+			panic("unreachable")
+		}
+	}
+}
+
+func detectFragmentCycle(c *context, sels []query.Selection, fragVisited map[*query.FragmentDecl]struct{}, spreadPath []*query.FragmentSpread, spreadPathIndex map[string]int) {
+	for _, sel := range sels {
+		detectFragmentCycleSel(c, sel, fragVisited, spreadPath, spreadPathIndex)
+	}
+}
+
+func detectFragmentCycleSel(c *context, sel query.Selection, fragVisited map[*query.FragmentDecl]struct{}, spreadPath []*query.FragmentSpread, spreadPathIndex map[string]int) {
+	switch sel := sel.(type) {
+	case *query.Field:
+		if sel.Selections != nil {
+			detectFragmentCycle(c, sel.Selections, fragVisited, spreadPath, spreadPathIndex)
+		}
+
+	case *query.InlineFragment:
+		detectFragmentCycle(c, sel.Selections, fragVisited, spreadPath, spreadPathIndex)
+
+	case *query.FragmentSpread:
+		frag := c.doc.Fragments.Get(sel.Name.Name)
+		if frag == nil {
+			return
+		}
+
+		spreadPath = append(spreadPath, sel)
+		if i, ok := spreadPathIndex[frag.Name.Name]; ok {
+			cyclePath := spreadPath[i:]
+			via := ""
+			if len(cyclePath) > 1 {
+				names := make([]string, len(cyclePath)-1)
+				for i, frag := range cyclePath[:len(cyclePath)-1] {
+					names[i] = frag.Name.Name
+				}
+				via = " via " + strings.Join(names, ", ")
+			}
+
+			locs := make([]errors.Location, len(cyclePath))
+			for i, frag := range cyclePath {
+				locs[i] = frag.Loc
+			}
+			c.addErrMultiLoc(locs, "NoFragmentCycles", "Cannot spread fragment %q within itself%s.", frag.Name.Name, via)
+			return
+		}
+
+		if _, ok := fragVisited[frag]; ok {
+			return
+		}
+		fragVisited[frag] = struct{}{}
+
+		spreadPathIndex[frag.Name.Name] = len(spreadPath)
+		detectFragmentCycle(c, frag.Selections, fragVisited, spreadPath, spreadPathIndex)
+		delete(spreadPathIndex, frag.Name.Name)
+
+	default:
+		panic("unreachable")
+	}
+}
+
+func (c *context) validateOverlap(a, b query.Selection, reasons *[]string, locs *[]errors.Location) {
+	if a == b {
+		return
+	}
+
+	if _, ok := c.overlapValidated[selectionPair{a, b}]; ok {
+		return
+	}
+	c.overlapValidated[selectionPair{a, b}] = struct{}{}
+	c.overlapValidated[selectionPair{b, a}] = struct{}{}
+
+	switch a := a.(type) {
+	case *query.Field:
+		switch b := b.(type) {
+		case *query.Field:
+			if b.Alias.Loc.Before(a.Alias.Loc) {
+				a, b = b, a
+			}
+			if reasons2, locs2 := c.validateFieldOverlap(a, b); len(reasons2) != 0 {
+				locs2 = append(locs2, a.Alias.Loc, b.Alias.Loc)
+				if reasons == nil {
+					c.addErrMultiLoc(locs2, "OverlappingFieldsCanBeMerged", "Fields %q conflict because %s. Use different aliases on the fields to fetch both if this was intentional.", a.Alias.Name, strings.Join(reasons2, " and "))
+					return
+				}
+				for _, r := range reasons2 {
+					*reasons = append(*reasons, fmt.Sprintf("subfields %q conflict because %s", a.Alias.Name, r))
+				}
+				*locs = append(*locs, locs2...)
+			}
+
+		case *query.InlineFragment:
+			for _, sel := range b.Selections {
+				c.validateOverlap(a, sel, reasons, locs)
+			}
+
+		case *query.FragmentSpread:
+			if frag := c.doc.Fragments.Get(b.Name.Name); frag != nil {
+				for _, sel := range frag.Selections {
+					c.validateOverlap(a, sel, reasons, locs)
+				}
+			}
+
+		default:
+			panic("unreachable")
+		}
+
+	case *query.InlineFragment:
+		for _, sel := range a.Selections {
+			c.validateOverlap(sel, b, reasons, locs)
+		}
+
+	case *query.FragmentSpread:
+		if frag := c.doc.Fragments.Get(a.Name.Name); frag != nil {
+			for _, sel := range frag.Selections {
+				c.validateOverlap(sel, b, reasons, locs)
+			}
+		}
+
+	default:
+		panic("unreachable")
+	}
+}
+
+func (c *context) validateFieldOverlap(a, b *query.Field) ([]string, []errors.Location) {
+	if a.Alias.Name != b.Alias.Name {
+		return nil, nil
+	}
+
+	if asf := c.fieldMap[a].sf; asf != nil {
+		if bsf := c.fieldMap[b].sf; bsf != nil {
+			if !typesCompatible(asf.Type, bsf.Type) {
+				return []string{fmt.Sprintf("they return conflicting types %s and %s", asf.Type, bsf.Type)}, nil
+			}
+		}
+	}
+
+	at := c.fieldMap[a].parent
+	bt := c.fieldMap[b].parent
+	if at == nil || bt == nil || at == bt {
+		if a.Name.Name != b.Name.Name {
+			return []string{fmt.Sprintf("%s and %s are different fields", a.Name.Name, b.Name.Name)}, nil
+		}
+
+		if argumentsConflict(a.Arguments, b.Arguments) {
+			return []string{"they have differing arguments"}, nil
+		}
+	}
+
+	var reasons []string
+	var locs []errors.Location
+	for _, a2 := range a.Selections {
+		for _, b2 := range b.Selections {
+			c.validateOverlap(a2, b2, &reasons, &locs)
+		}
+	}
+	return reasons, locs
+}
+
+func argumentsConflict(a, b common.ArgumentList) bool {
+	if len(a) != len(b) {
+		return true
+	}
+	for _, argA := range a {
+		valB, ok := b.Get(argA.Name.Name)
+		if !ok || !reflect.DeepEqual(argA.Value.Value(nil), valB.Value(nil)) {
+			return true
+		}
+	}
+	return false
+}
+
+func fields(t common.Type) schema.FieldList {
+	switch t := t.(type) {
+	case *schema.Object:
+		return t.Fields
+	case *schema.Interface:
+		return t.Fields
+	default:
+		return nil
+	}
+}
+
+func unwrapType(t common.Type) schema.NamedType {
+	if t == nil {
+		return nil
+	}
+	for {
+		switch t2 := t.(type) {
+		case schema.NamedType:
+			return t2
+		case *common.List:
+			t = t2.OfType
+		case *common.NonNull:
+			t = t2.OfType
+		default:
+			panic("unreachable")
+		}
+	}
+}
+
+func resolveType(c *context, t common.Type) common.Type {
+	t2, err := common.ResolveType(t, c.schema.Resolve)
+	if err != nil {
+		c.errs = append(c.errs, err)
+	}
+	return t2
+}
+
+func validateDirectives(c *opContext, loc string, directives common.DirectiveList) {
+	directiveNames := make(nameSet)
+	for _, d := range directives {
+		dirName := d.Name.Name
+		validateNameCustomMsg(c.context, directiveNames, d.Name, "UniqueDirectivesPerLocation", func() string {
+			return fmt.Sprintf("The directive %q can only be used once at this location.", dirName)
+		})
+
+		validateArgumentLiterals(c, d.Args)
+
+		dd, ok := c.schema.Directives[dirName]
+		if !ok {
+			c.addErr(d.Name.Loc, "KnownDirectives", "Unknown directive %q.", dirName)
+			continue
+		}
+
+		locOK := false
+		for _, allowedLoc := range dd.Locs {
+			if loc == allowedLoc {
+				locOK = true
+				break
+			}
+		}
+		if !locOK {
+			c.addErr(d.Name.Loc, "KnownDirectives", "Directive %q may not be used on %s.", dirName, loc)
+		}
+
+		validateArgumentTypes(c, d.Args, dd.Args, d.Name.Loc,
+			func() string { return fmt.Sprintf("directive %q", "@"+dirName) },
+			func() string { return fmt.Sprintf("Directive %q", "@"+dirName) },
+		)
+	}
+}
+
+type nameSet map[string]errors.Location
+
+func validateName(c *context, set nameSet, name common.Ident, rule string, kind string) {
+	validateNameCustomMsg(c, set, name, rule, func() string {
+		return fmt.Sprintf("There can be only one %s named %q.", kind, name.Name)
+	})
+}
+
+func validateNameCustomMsg(c *context, set nameSet, name common.Ident, rule string, msg func() string) {
+	if loc, ok := set[name.Name]; ok {
+		c.addErrMultiLoc([]errors.Location{loc, name.Loc}, rule, msg())
+		return
+	}
+	set[name.Name] = name.Loc
+}
+
+func validateArgumentTypes(c *opContext, args common.ArgumentList, argDecls common.InputValueList, loc errors.Location, owner1, owner2 func() string) {
+	for _, selArg := range args {
+		arg := argDecls.Get(selArg.Name.Name)
+		if arg == nil {
+			c.addErr(selArg.Name.Loc, "KnownArgumentNames", "Unknown argument %q on %s.", selArg.Name.Name, owner1())
+			continue
+		}
+		value := selArg.Value
+		if ok, reason := validateValueType(c, value, arg.Type); !ok {
+			c.addErr(value.Location(), "ArgumentsOfCorrectType", "Argument %q has invalid value %s.\n%s", arg.Name.Name, value, reason)
+		}
+	}
+	for _, decl := range argDecls {
+		if _, ok := decl.Type.(*common.NonNull); ok {
+			if _, ok := args.Get(decl.Name.Name); !ok {
+				c.addErr(loc, "ProvidedNonNullArguments", "%s argument %q of type %q is required but not provided.", owner2(), decl.Name.Name, decl.Type)
+			}
+		}
+	}
+}
+
+func validateArgumentLiterals(c *opContext, args common.ArgumentList) {
+	argNames := make(nameSet)
+	for _, arg := range args {
+		validateName(c.context, argNames, arg.Name, "UniqueArgumentNames", "argument")
+		validateLiteral(c, arg.Value)
+	}
+}
+
+func validateLiteral(c *opContext, l common.Literal) {
+	switch l := l.(type) {
+	case *common.ObjectLit:
+		fieldNames := make(nameSet)
+		for _, f := range l.Fields {
+			validateName(c.context, fieldNames, f.Name, "UniqueInputFieldNames", "input field")
+			validateLiteral(c, f.Value)
+		}
+	case *common.ListLit:
+		for _, entry := range l.Entries {
+			validateLiteral(c, entry)
+		}
+	case *common.Variable:
+		for _, op := range c.ops {
+			v := op.Vars.Get(l.Name)
+			if v == nil {
+				byOp := ""
+				if op.Name.Name != "" {
+					byOp = fmt.Sprintf(" by operation %q", op.Name.Name)
+				}
+				c.opErrs[op] = append(c.opErrs[op], &errors.QueryError{
+					Message:   fmt.Sprintf("Variable %q is not defined%s.", "$"+l.Name, byOp),
+					Locations: []errors.Location{l.Loc, op.Loc},
+					Rule:      "NoUndefinedVariables",
+				})
+				continue
+			}
+			c.usedVars[op][v] = struct{}{}
+		}
+	}
+}
+
+func validateValueType(c *opContext, v common.Literal, t common.Type) (bool, string) {
+	if v, ok := v.(*common.Variable); ok {
+		for _, op := range c.ops {
+			if v2 := op.Vars.Get(v.Name); v2 != nil {
+				t2, err := common.ResolveType(v2.Type, c.schema.Resolve)
+				if _, ok := t2.(*common.NonNull); !ok && v2.Default != nil {
+					t2 = &common.NonNull{OfType: t2}
+				}
+				if err == nil && !typeCanBeUsedAs(t2, t) {
+					c.addErrMultiLoc([]errors.Location{v2.Loc, v.Loc}, "VariablesInAllowedPosition", "Variable %q of type %q used in position expecting type %q.", "$"+v.Name, t2, t)
+				}
+			}
+		}
+		return true, ""
+	}
+
+	if nn, ok := t.(*common.NonNull); ok {
+		if isNull(v) {
+			return false, fmt.Sprintf("Expected %q, found null.", t)
+		}
+		t = nn.OfType
+	}
+	if isNull(v) {
+		return true, ""
+	}
+
+	switch t := t.(type) {
+	case *schema.Scalar, *schema.Enum:
+		if lit, ok := v.(*common.BasicLit); ok {
+			if validateBasicLit(lit, t) {
+				return true, ""
+			}
+		} else {
+			// custom complex scalars will be validated when unmarshaling
+			return true, ""
+		}
+
+	case *common.List:
+		list, ok := v.(*common.ListLit)
+		if !ok {
+			return validateValueType(c, v, t.OfType) // single value instead of list
+		}
+		for i, entry := range list.Entries {
+			if ok, reason := validateValueType(c, entry, t.OfType); !ok {
+				return false, fmt.Sprintf("In element #%d: %s", i, reason)
+			}
+		}
+		return true, ""
+
+	case *schema.InputObject:
+		v, ok := v.(*common.ObjectLit)
+		if !ok {
+			return false, fmt.Sprintf("Expected %q, found not an object.", t)
+		}
+		for _, f := range v.Fields {
+			name := f.Name.Name
+			iv := t.Values.Get(name)
+			if iv == nil {
+				return false, fmt.Sprintf("In field %q: Unknown field.", name)
+			}
+			if ok, reason := validateValueType(c, f.Value, iv.Type); !ok {
+				return false, fmt.Sprintf("In field %q: %s", name, reason)
+			}
+		}
+		for _, iv := range t.Values {
+			found := false
+			for _, f := range v.Fields {
+				if f.Name.Name == iv.Name.Name {
+					found = true
+					break
+				}
+			}
+			if !found {
+				if _, ok := iv.Type.(*common.NonNull); ok && iv.Default == nil {
+					return false, fmt.Sprintf("In field %q: Expected %q, found null.", iv.Name.Name, iv.Type)
+				}
+			}
+		}
+		return true, ""
+	}
+
+	return false, fmt.Sprintf("Expected type %q, found %s.", t, v)
+}
+
+func validateBasicLit(v *common.BasicLit, t common.Type) bool {
+	switch t := t.(type) {
+	case *schema.Scalar:
+		switch t.Name {
+		case "Int":
+			if v.Type != scanner.Int {
+				return false
+			}
+			f, err := strconv.ParseFloat(v.Text, 64)
+			if err != nil {
+				panic(err)
+			}
+			return f >= math.MinInt32 && f <= math.MaxInt32
+		case "Float":
+			return v.Type == scanner.Int || v.Type == scanner.Float
+		case "String":
+			return v.Type == scanner.String
+		case "Boolean":
+			return v.Type == scanner.Ident && (v.Text == "true" || v.Text == "false")
+		case "ID":
+			return v.Type == scanner.Int || v.Type == scanner.String
+		default:
+			//TODO: Type-check against expected type by Unmarshaling
+			return true
+		}
+
+	case *schema.Enum:
+		if v.Type != scanner.Ident {
+			return false
+		}
+		for _, option := range t.Values {
+			if option.Name == v.Text {
+				return true
+			}
+		}
+		return false
+	}
+
+	return false
+}
+
+func canBeFragment(t common.Type) bool {
+	switch t.(type) {
+	case *schema.Object, *schema.Interface, *schema.Union:
+		return true
+	default:
+		return false
+	}
+}
+
+func canBeInput(t common.Type) bool {
+	switch t := t.(type) {
+	case *schema.InputObject, *schema.Scalar, *schema.Enum:
+		return true
+	case *common.List:
+		return canBeInput(t.OfType)
+	case *common.NonNull:
+		return canBeInput(t.OfType)
+	default:
+		return false
+	}
+}
+
+func hasSubfields(t common.Type) bool {
+	switch t := t.(type) {
+	case *schema.Object, *schema.Interface, *schema.Union:
+		return true
+	case *common.List:
+		return hasSubfields(t.OfType)
+	case *common.NonNull:
+		return hasSubfields(t.OfType)
+	default:
+		return false
+	}
+}
+
+func isLeaf(t common.Type) bool {
+	switch t.(type) {
+	case *schema.Scalar, *schema.Enum:
+		return true
+	default:
+		return false
+	}
+}
+
+func isNull(lit interface{}) bool {
+	_, ok := lit.(*common.NullLit)
+	return ok
+}
+
+func typesCompatible(a, b common.Type) bool {
+	al, aIsList := a.(*common.List)
+	bl, bIsList := b.(*common.List)
+	if aIsList || bIsList {
+		return aIsList && bIsList && typesCompatible(al.OfType, bl.OfType)
+	}
+
+	ann, aIsNN := a.(*common.NonNull)
+	bnn, bIsNN := b.(*common.NonNull)
+	if aIsNN || bIsNN {
+		return aIsNN && bIsNN && typesCompatible(ann.OfType, bnn.OfType)
+	}
+
+	if isLeaf(a) || isLeaf(b) {
+		return a == b
+	}
+
+	return true
+}
+
+func typeCanBeUsedAs(t, as common.Type) bool {
+	nnT, okT := t.(*common.NonNull)
+	if okT {
+		t = nnT.OfType
+	}
+
+	nnAs, okAs := as.(*common.NonNull)
+	if okAs {
+		as = nnAs.OfType
+		if !okT {
+			return false // nullable can not be used as non-null
+		}
+	}
+
+	if t == as {
+		return true
+	}
+
+	if lT, ok := t.(*common.List); ok {
+		if lAs, ok := as.(*common.List); ok {
+			return typeCanBeUsedAs(lT.OfType, lAs.OfType)
+		}
+	}
+	return false
+}