WIP

Michael Muré created

Change summary

cache/bug_subcache.go             | 25 ----------
cache/cached.go                   |  6 ++
cache/filter.go                   |  4 
cache/identity_subcache.go        |  2 
cache/repo_cache.go               | 75 ++++++++++++++++++--------------
cache/resolvers.go                | 42 ------------------
cache/subcache.go                 | 28 +++++++++---
entities/bug/bug.go               | 59 ++++++++-----------------
entities/bug/bug_actions.go       | 23 ---------
entity/dag/common_test.go         | 12 +++++
entity/dag/entity.go              | 69 ++++++++++++++++++------------
entity/dag/entity_actions.go      | 14 +++---
entity/dag/entity_actions_test.go | 34 +++++++-------
entity/dag/entity_test.go         | 12 ++--
entity/dag/example_test.go        | 14 +++---
entity/interface.go               |  2 
16 files changed, 186 insertions(+), 235 deletions(-)

Detailed changes

cache/bug_subcache.go 🔗

@@ -233,31 +233,6 @@ func (c *RepoCacheBug) NewRaw(author identity.Interface, unixTime int64, title s
 	return cached, op, nil
 }
 
-// Remove removes a bug from the cache and repo given a bug id prefix
-func (c *RepoCacheBug) Remove(prefix string) error {
-	b, err := c.ResolveBugPrefix(prefix)
-	if err != nil {
-		return err
-	}
-
-	c.muBug.Lock()
-
-	err = bug.Remove(c.repo, b.Id())
-	if err != nil {
-		c.muBug.Unlock()
-
-		return err
-	}
-
-	delete(c.bugs, b.Id())
-	delete(c.bugExcerpts, b.Id())
-	c.loadedBugs.Remove(b.Id())
-
-	c.muBug.Unlock()
-
-	return c.writeBugCache()
-}
-
 func (c *RepoCacheBug) addBugToSearchIndex(snap *bug.Snapshot) error {
 	searchableBug := struct {
 		Text []string

cache/cached.go 🔗

@@ -104,6 +104,12 @@ func (e *CachedEntityBase[SnapT, OpT]) ResolveOperationWithMetadata(key string,
 	return matching[0], nil
 }
 
+func (e *CachedEntityBase[SnapT, OpT]) Validate() error {
+	e.mu.RLock()
+	defer e.mu.RUnlock()
+	return e.entity.Validate()
+}
+
 func (e *CachedEntityBase[SnapT, OpT]) Commit() error {
 	e.mu.Lock()
 	err := e.entity.Commit(e.repo)

cache/filter.go 🔗

@@ -9,7 +9,7 @@ import (
 )
 
 // resolver has the resolving functions needed by filters.
-// This exist mainly to go through the functions of the cache with proper locking.
+// This exists mainly to go through the functions of the cache with proper locking.
 type resolver interface {
 	ResolveIdentityExcerpt(id entity.Id) (*IdentityExcerpt, error)
 }
@@ -211,7 +211,7 @@ func (*Matcher) orMatch(filters []Filter, excerpt *BugExcerpt, resolver resolver
 	return match
 }
 
-// Check if all of the filters provided match the bug
+// Check if all the filters provided match the bug
 func (*Matcher) andMatch(filters []Filter, excerpt *BugExcerpt, resolver resolver) bool {
 	if len(filters) == 0 {
 		return true

cache/repo_cache_identity.go → cache/identity_subcache.go 🔗

@@ -7,7 +7,7 @@ import (
 )
 
 type RepoCacheIdentity struct {
-	SubCache[*IdentityExcerpt, *IdentityCache]
+	SubCache[*IdentityExcerpt, *IdentityCache, identity.Interface]
 }
 
 // ResolveIdentityImmutableMetadata retrieve an Identity that has the exact given metadata on

cache/repo_cache.go 🔗

@@ -6,7 +6,8 @@ import (
 	"io/ioutil"
 	"os"
 	"strconv"
-	"sync"
+
+	"golang.org/x/sync/errgroup"
 
 	"github.com/MichaelMure/git-bug/entities/bug"
 	"github.com/MichaelMure/git-bug/entities/identity"
@@ -52,13 +53,10 @@ type RepoCache struct {
 	// resolvers for all known entities
 	resolvers entity.Resolvers
 
-	bugs *RepoCacheBug
+	bugs       *RepoCacheBug
+	identities *RepoCacheIdentity
 
-	muIdentity sync.RWMutex
-	// excerpt of identities data for all identities
-	identitiesExcerpts map[entity.Id]*IdentityExcerpt
-	// identities loaded in memory
-	identities map[entity.Id]*IdentityCache
+	subcaches []cacheMgmt
 
 	// the user identity's id, if known
 	userIdentityId entity.Id
@@ -72,14 +70,20 @@ func NewNamedRepoCache(r repository.ClockedRepo, name string) (*RepoCache, error
 	c := &RepoCache{
 		repo: r,
 		name: name,
-		bugs: NewCache(r),
-		// maxLoadedBugs: defaultMaxLoadedBugs,
-		// bugs:          make(map[entity.Id]*BugCache),
-		// loadedBugs:    newLRUIdCache(),
-		// identities:    make(map[entity.Id]*IdentityCache),
 	}
 
-	c.resolvers = makeResolvers(c)
+	bugs := NewSubCache[*BugExcerpt, *BugCache, bug.Interface](r,
+		c.getResolvers, c.GetUserIdentity,
+		"bug", "bugs",
+		formatVersion, defaultMaxLoadedBugs)
+
+	c.subcaches = append(c.subcaches, bugs)
+	c.bugs = &RepoCacheBug{SubCache: *bugs}
+
+	c.resolvers = entity.Resolvers{
+		&IdentityCache{}: entity.ResolverFunc((func(id entity.Id) (entity.Interface, error)(c.identities.Resolve)),
+		&BugCache{}:      c.bugs,
+	}
 
 	err := c.lock()
 	if err != nil {
@@ -105,6 +109,15 @@ func (c *RepoCache) Bugs() *RepoCacheBug {
 	return c.bugs
 }
 
+// Identities gives access to the Identity entities
+func (c *RepoCache) Identities() *RepoCacheIdentity {
+	return c.identities
+}
+
+func (c *RepoCache) getResolvers() entity.Resolvers {
+	return c.resolvers
+}
+
 // setCacheSize change the maximum number of loaded bugs
 func (c *RepoCache) setCacheSize(size int) {
 	c.maxLoadedBugs = size
@@ -113,21 +126,20 @@ func (c *RepoCache) setCacheSize(size int) {
 
 // load will try to read from the disk all the cache files
 func (c *RepoCache) load() error {
-	err := c.loadBugCache()
-	if err != nil {
-		return err
+	var errG errgroup.Group
+	for _, mgmt := range c.subcaches {
+		errG.Go(mgmt.Load)
 	}
-
-	return c.loadIdentityCache()
+	return errG.Wait()
 }
 
 // write will serialize on disk all the cache files
 func (c *RepoCache) write() error {
-	err := c.writeBugCache()
-	if err != nil {
-		return err
+	var errG errgroup.Group
+	for _, mgmt := range c.subcaches {
+		errG.Go(mgmt.Write)
 	}
-	return c.writeIdentityCache()
+	return errG.Wait()
 }
 
 func (c *RepoCache) lock() error {
@@ -151,17 +163,16 @@ func (c *RepoCache) lock() error {
 }
 
 func (c *RepoCache) Close() error {
-	c.muBug.Lock()
-	defer c.muBug.Unlock()
-	c.muIdentity.Lock()
-	defer c.muIdentity.Unlock()
-
-	c.identities = make(map[entity.Id]*IdentityCache)
-	c.identitiesExcerpts = nil
-	c.bugs = make(map[entity.Id]*BugCache)
-	c.bugExcerpts = nil
+	var errG errgroup.Group
+	for _, mgmt := range c.subcaches {
+		errG.Go(mgmt.Close)
+	}
+	err := errG.Wait()
+	if err != nil {
+		return err
+	}
 
-	err := c.repo.Close()
+	err = c.repo.Close()
 	if err != nil {
 		return err
 	}

cache/resolvers.go 🔗

@@ -1,42 +0,0 @@
-package cache
-
-import (
-	"github.com/MichaelMure/git-bug/entity"
-)
-
-func makeResolvers(cache *RepoCache) entity.Resolvers {
-	return entity.Resolvers{
-		&IdentityCache{}: newIdentityCacheResolver(cache),
-		&BugCache{}:      newBugCacheResolver(cache),
-	}
-}
-
-var _ entity.Resolver = &identityCacheResolver{}
-
-// identityCacheResolver is an identity Resolver that retrieve identities from
-// the cache
-type identityCacheResolver struct {
-	cache *RepoCache
-}
-
-func newIdentityCacheResolver(cache *RepoCache) *identityCacheResolver {
-	return &identityCacheResolver{cache: cache}
-}
-
-func (i *identityCacheResolver) Resolve(id entity.Id) (entity.Interface, error) {
-	return i.cache.ResolveIdentity(id)
-}
-
-var _ entity.Resolver = &bugCacheResolver{}
-
-type bugCacheResolver struct {
-	cache *RepoCache
-}
-
-func newBugCacheResolver(cache *RepoCache) *bugCacheResolver {
-	return &bugCacheResolver{cache: cache}
-}
-
-func (b *bugCacheResolver) Resolve(id entity.Id) (entity.Interface, error) {
-	return b.cache.ResolveBug(id)
-}

cache/subcache.go 🔗

@@ -9,7 +9,6 @@ import (
 	"github.com/pkg/errors"
 
 	"github.com/MichaelMure/git-bug/entities/bug"
-	"github.com/MichaelMure/git-bug/entities/identity"
 	"github.com/MichaelMure/git-bug/entity"
 	"github.com/MichaelMure/git-bug/repository"
 )
@@ -22,11 +21,18 @@ type CacheEntity interface {
 	NeedCommit() bool
 }
 
-type getUserIdentityFunc func() (identity.Interface, error)
+type cacheMgmt interface {
+	Load() error
+	Write() error
+	Build() error
+	Close() error
+}
+
+type getUserIdentityFunc func() (*IdentityCache, error)
 
 type SubCache[ExcerptT Excerpt, CacheT CacheEntity, EntityT entity.Interface] struct {
 	repo      repository.ClockedRepo
-	resolvers entity.Resolvers
+	resolvers func() entity.Resolvers
 
 	getUserIdentity  getUserIdentityFunc
 	readWithResolver func(repository.ClockedRepo, entity.Resolvers, entity.Id) (EntityT, error)
@@ -46,8 +52,8 @@ type SubCache[ExcerptT Excerpt, CacheT CacheEntity, EntityT entity.Interface] st
 
 func NewSubCache[ExcerptT Excerpt, CacheT CacheEntity, EntityT entity.Interface](
 	repo repository.ClockedRepo,
-	resolvers entity.Resolvers,
-	getUserIdentity func() (identity.Interface, error),
+	resolvers func() entity.Resolvers,
+	getUserIdentity getUserIdentityFunc,
 	typename, namespace string,
 	version uint, maxLoaded int) *SubCache[ExcerptT, CacheT, EntityT] {
 	return &SubCache[ExcerptT, CacheT, EntityT]{
@@ -144,8 +150,16 @@ func (sc *SubCache[ExcerptT, CacheT, EntityT]) Write() error {
 	return f.Close()
 }
 
-func (sc *SubCache[ExcerptT, CacheT, EntityT]) Build() {
+func (sc *SubCache[ExcerptT, CacheT, EntityT]) Build() error {
+
+}
 
+func (sc *SubCache[ExcerptT, CacheT, EntityT]) Close() error {
+	sc.mu.Lock()
+	defer sc.mu.Unlock()
+	sc.excerpts = nil
+	sc.cached = make(map[entity.Id]CacheT)
+	return nil
 }
 
 // AllIds return all known bug ids
@@ -175,7 +189,7 @@ func (sc *SubCache[ExcerptT, CacheT, EntityT]) Resolve(id entity.Id) (CacheT, er
 	}
 	sc.mu.RUnlock()
 
-	b, err := sc.readWithResolver(sc.repo, sc.resolvers, id)
+	b, err := sc.readWithResolver(sc.repo, sc.resolvers(), id)
 	if err != nil {
 		return nil, err
 	}

entities/bug/bug.go 🔗

@@ -27,6 +27,15 @@ var def = dag.Definition{
 	FormatVersion:        formatVersion,
 }
 
+var Actions = dag.Actions[*Bug]{
+	Wrap:             wrapper,
+	New:              NewBug,
+	Read:             Read,
+	ReadWithResolver: ReadWithResolver,
+	ReadAll:          ReadAll,
+	ListLocalIds:     ListLocalIds,
+}
+
 var ClockLoader = dag.ClockLoader(def)
 
 type Interface interface {
@@ -42,9 +51,11 @@ type Bug struct {
 
 // NewBug create a new Bug
 func NewBug() *Bug {
-	return &Bug{
-		Entity: dag.New(def),
-	}
+	return wrapper(dag.New(def))
+}
+
+func wrapper(e *dag.Entity) *Bug {
+	return &Bug{Entity: e}
 }
 
 func simpleResolvers(repo repository.ClockedRepo) entity.Resolvers {
@@ -60,49 +71,17 @@ func Read(repo repository.ClockedRepo, id entity.Id) (*Bug, error) {
 
 // ReadWithResolver will read a bug from its Id, with custom resolvers
 func ReadWithResolver(repo repository.ClockedRepo, resolvers entity.Resolvers, id entity.Id) (*Bug, error) {
-	e, err := dag.Read(def, repo, resolvers, id)
-	if err != nil {
-		return nil, err
-	}
-	return &Bug{Entity: e}, nil
-}
-
-type StreamedBug struct {
-	Bug *Bug
-	Err error
+	return dag.Read(def, wrapper, repo, resolvers, id)
 }
 
 // ReadAll read and parse all local bugs
-func ReadAll(repo repository.ClockedRepo) <-chan StreamedBug {
-	return readAll(repo, simpleResolvers(repo))
+func ReadAll(repo repository.ClockedRepo) <-chan dag.StreamedEntity[*Bug] {
+	return dag.ReadAll(def, wrapper, repo, simpleResolvers(repo))
 }
 
 // ReadAllWithResolver read and parse all local bugs
-func ReadAllWithResolver(repo repository.ClockedRepo, resolvers entity.Resolvers) <-chan StreamedBug {
-	return readAll(repo, resolvers)
-}
-
-// Read and parse all available bug with a given ref prefix
-func readAll(repo repository.ClockedRepo, resolvers entity.Resolvers) <-chan StreamedBug {
-	out := make(chan StreamedBug)
-
-	go func() {
-		defer close(out)
-
-		for streamedEntity := range dag.ReadAll(def, repo, resolvers) {
-			if streamedEntity.Err != nil {
-				out <- StreamedBug{
-					Err: streamedEntity.Err,
-				}
-			} else {
-				out <- StreamedBug{
-					Bug: &Bug{Entity: streamedEntity.Entity},
-				}
-			}
-		}
-	}()
-
-	return out
+func ReadAllWithResolver(repo repository.ClockedRepo, resolvers entity.Resolvers) <-chan dag.StreamedEntity[*Bug] {
+	return dag.ReadAll(def, wrapper, repo, resolvers)
 }
 
 // ListLocalIds list all the available local bug ids

entities/bug/bug_actions.go 🔗

@@ -23,33 +23,14 @@ func Push(repo repository.Repo, remote string) (string, error) {
 // Note: an author is necessary for the case where a merge commit is created, as this commit will
 // have an author and may be signed if a signing key is available.
 func Pull(repo repository.ClockedRepo, resolvers entity.Resolvers, remote string, mergeAuthor identity.Interface) error {
-	return dag.Pull(def, repo, resolvers, remote, mergeAuthor)
+	return dag.Pull(def, wrapper, repo, resolvers, remote, mergeAuthor)
 }
 
 // MergeAll will merge all the available remote bug
 // Note: an author is necessary for the case where a merge commit is created, as this commit will
 // have an author and may be signed if a signing key is available.
 func MergeAll(repo repository.ClockedRepo, resolvers entity.Resolvers, remote string, mergeAuthor identity.Interface) <-chan entity.MergeResult {
-	out := make(chan entity.MergeResult)
-
-	go func() {
-		defer close(out)
-
-		results := dag.MergeAll(def, repo, resolvers, remote, mergeAuthor)
-
-		// wrap the dag.Entity into a complete Bug
-		for result := range results {
-			result := result
-			if result.Entity != nil {
-				result.Entity = &Bug{
-					Entity: result.Entity.(*dag.Entity),
-				}
-			}
-			out <- result
-		}
-	}()
-
-	return out
+	return dag.MergeAll(def, wrapper, repo, resolvers, remote, mergeAuthor)
 }
 
 // Remove will remove a local bug from its entity.Id

entity/dag/common_test.go 🔗

@@ -87,6 +87,18 @@ func unmarshaler(raw json.RawMessage, resolvers entity.Resolvers) (Operation, er
 	return op, nil
 }
 
+/*
+  Entity
+*/
+
+type Foo struct {
+	*Entity
+}
+
+func wrapper(e *Entity) *Foo {
+	return &Foo{Entity: e}
+}
+
 /*
   Identities + repo + definition
 */

entity/dag/entity.go 🔗

@@ -33,6 +33,19 @@ type Definition struct {
 	FormatVersion uint
 }
 
+type Actions[EntityT entity.Interface] struct {
+	Wrap             func(e *Entity) EntityT
+	New              func() EntityT
+	Read             func(repo repository.ClockedRepo, id entity.Id) (EntityT, error)
+	ReadWithResolver func(repo repository.ClockedRepo, resolvers entity.Resolvers, id entity.Id) (EntityT, error)
+	ReadAll          func(repo repository.ClockedRepo) <-chan StreamedEntity[EntityT]
+	ListLocalIds     func(repo repository.Repo) ([]entity.Id, error)
+	Fetch            func(repo repository.Repo, remote string) (string, error)
+	Push             func(repo repository.Repo, remote string) (string, error)
+	Pull             func(repo repository.ClockedRepo, resolvers entity.Resolvers, remote string, mergeAuthor identity.Interface) error
+	MergeAll         func(repo repository.ClockedRepo, resolvers entity.Resolvers, remote string, mergeAuthor identity.Interface) <-chan entity.MergeResult
+}
+
 // Entity is a data structure stored in a chain of git objects, supporting actions like Push, Pull and Merge.
 type Entity struct {
 	// A Lamport clock is a logical clock that allow to order event
@@ -59,32 +72,32 @@ func New(definition Definition) *Entity {
 }
 
 // Read will read and decode a stored local Entity from a repository
-func Read(def Definition, repo repository.ClockedRepo, resolvers entity.Resolvers, id entity.Id) (*Entity, error) {
+func Read[EntityT entity.Interface](def Definition, wrapper func(e *Entity) EntityT, repo repository.ClockedRepo, resolvers entity.Resolvers, id entity.Id) (EntityT, error) {
 	if err := id.Validate(); err != nil {
-		return nil, errors.Wrap(err, "invalid id")
+		return *new(EntityT), errors.Wrap(err, "invalid id")
 	}
 
 	ref := fmt.Sprintf("refs/%s/%s", def.Namespace, id.String())
 
-	return read(def, repo, resolvers, ref)
+	return read[EntityT](def, wrapper, repo, resolvers, ref)
 }
 
 // readRemote will read and decode a stored remote Entity from a repository
-func readRemote(def Definition, repo repository.ClockedRepo, resolvers entity.Resolvers, remote string, id entity.Id) (*Entity, error) {
+func readRemote[EntityT entity.Interface](def Definition, wrapper func(e *Entity) EntityT, repo repository.ClockedRepo, resolvers entity.Resolvers, remote string, id entity.Id) (EntityT, error) {
 	if err := id.Validate(); err != nil {
-		return nil, errors.Wrap(err, "invalid id")
+		return *new(EntityT), errors.Wrap(err, "invalid id")
 	}
 
 	ref := fmt.Sprintf("refs/remotes/%s/%s/%s", def.Namespace, remote, id.String())
 
-	return read(def, repo, resolvers, ref)
+	return read[EntityT](def, wrapper, repo, resolvers, ref)
 }
 
 // read fetch from git and decode an Entity at an arbitrary git reference.
-func read(def Definition, repo repository.ClockedRepo, resolvers entity.Resolvers, ref string) (*Entity, error) {
+func read[EntityT entity.Interface](def Definition, wrapper func(e *Entity) EntityT, repo repository.ClockedRepo, resolvers entity.Resolvers, ref string) (EntityT, error) {
 	rootHash, err := repo.ResolveRef(ref)
 	if err != nil {
-		return nil, err
+		return *new(EntityT), err
 	}
 
 	// Perform a breadth-first search to get a topological order of the DAG where we discover the
@@ -104,7 +117,7 @@ func read(def Definition, repo repository.ClockedRepo, resolvers entity.Resolver
 
 		commit, err := repo.ReadCommit(hash)
 		if err != nil {
-			return nil, err
+			return *new(EntityT), err
 		}
 
 		BFSOrder = append(BFSOrder, commit)
@@ -137,26 +150,26 @@ func read(def Definition, repo repository.ClockedRepo, resolvers entity.Resolver
 		// can have no parents. Said otherwise, the DAG need to have exactly
 		// one leaf.
 		if !isFirstCommit && len(commit.Parents) == 0 {
-			return nil, fmt.Errorf("multiple leafs in the entity DAG")
+			return *new(EntityT), fmt.Errorf("multiple leafs in the entity DAG")
 		}
 
 		opp, err := readOperationPack(def, repo, resolvers, commit)
 		if err != nil {
-			return nil, err
+			return *new(EntityT), err
 		}
 
 		err = opp.Validate()
 		if err != nil {
-			return nil, err
+			return *new(EntityT), err
 		}
 
 		if isMerge && len(opp.Operations) > 0 {
-			return nil, fmt.Errorf("merge commit cannot have operations")
+			return *new(EntityT), fmt.Errorf("merge commit cannot have operations")
 		}
 
 		// Check that the create lamport clock is set (not checked in Validate() as it's optional)
 		if isFirstCommit && opp.CreateTime <= 0 {
-			return nil, fmt.Errorf("creation lamport time not set")
+			return *new(EntityT), fmt.Errorf("creation lamport time not set")
 		}
 
 		// make sure that the lamport clocks causality match the DAG topology
@@ -167,7 +180,7 @@ func read(def Definition, repo repository.ClockedRepo, resolvers entity.Resolver
 			}
 
 			if parentPack.EditTime >= opp.EditTime {
-				return nil, fmt.Errorf("lamport clock ordering doesn't match the DAG")
+				return *new(EntityT), fmt.Errorf("lamport clock ordering doesn't match the DAG")
 			}
 
 			// to avoid an attack where clocks are pushed toward the uint64 rollover, make sure
@@ -175,7 +188,7 @@ func read(def Definition, repo repository.ClockedRepo, resolvers entity.Resolver
 			// we ignore merge commits here to allow merging after a loooong time without breaking anything,
 			// as long as there is one valid chain of small hops, it's fine.
 			if !isMerge && opp.EditTime-parentPack.EditTime > 1_000_000 {
-				return nil, fmt.Errorf("lamport clock jumping too far in the future, likely an attack")
+				return *new(EntityT), fmt.Errorf("lamport clock jumping too far in the future, likely an attack")
 			}
 		}
 
@@ -187,11 +200,11 @@ func read(def Definition, repo repository.ClockedRepo, resolvers entity.Resolver
 	for _, opp := range oppMap {
 		err = repo.Witness(fmt.Sprintf(creationClockPattern, def.Namespace), opp.CreateTime)
 		if err != nil {
-			return nil, err
+			return *new(EntityT), err
 		}
 		err = repo.Witness(fmt.Sprintf(editClockPattern, def.Namespace), opp.EditTime)
 		if err != nil {
-			return nil, err
+			return *new(EntityT), err
 		}
 	}
 
@@ -232,13 +245,13 @@ func read(def Definition, repo repository.ClockedRepo, resolvers entity.Resolver
 		}
 	}
 
-	return &Entity{
+	return wrapper(&Entity{
 		Definition: def,
 		ops:        ops,
 		lastCommit: rootHash,
 		createTime: createTime,
 		editTime:   editTime,
-	}, nil
+	}), nil
 }
 
 // readClockNoCheck fetch from git, read and witness the clocks of an Entity at an arbitrary git reference.
@@ -293,14 +306,14 @@ func readClockNoCheck(def Definition, repo repository.ClockedRepo, ref string) e
 	return nil
 }
 
-type StreamedEntity struct {
-	Entity *Entity
+type StreamedEntity[EntityT entity.Interface] struct {
+	Entity EntityT
 	Err    error
 }
 
 // ReadAll read and parse all local Entity
-func ReadAll(def Definition, repo repository.ClockedRepo, resolvers entity.Resolvers) <-chan StreamedEntity {
-	out := make(chan StreamedEntity)
+func ReadAll[EntityT entity.Interface](def Definition, wrapper func(e *Entity) EntityT, repo repository.ClockedRepo, resolvers entity.Resolvers) <-chan StreamedEntity[EntityT] {
+	out := make(chan StreamedEntity[EntityT])
 
 	go func() {
 		defer close(out)
@@ -309,19 +322,19 @@ func ReadAll(def Definition, repo repository.ClockedRepo, resolvers entity.Resol
 
 		refs, err := repo.ListRefs(refPrefix)
 		if err != nil {
-			out <- StreamedEntity{Err: err}
+			out <- StreamedEntity[EntityT]{Err: err}
 			return
 		}
 
 		for _, ref := range refs {
-			e, err := read(def, repo, resolvers, ref)
+			e, err := read[EntityT](def, wrapper, repo, resolvers, ref)
 
 			if err != nil {
-				out <- StreamedEntity{Err: err}
+				out <- StreamedEntity[EntityT]{Err: err}
 				return
 			}
 
-			out <- StreamedEntity{Entity: e}
+			out <- StreamedEntity[EntityT]{Entity: e}
 		}
 	}()
 

entity/dag/entity_actions.go 🔗

@@ -32,13 +32,13 @@ func Push(def Definition, repo repository.Repo, remote string) (string, error) {
 
 // Pull will do a Fetch + MergeAll
 // Contrary to MergeAll, this function will return an error if a merge fail.
-func Pull(def Definition, repo repository.ClockedRepo, resolvers entity.Resolvers, remote string, author identity.Interface) error {
+func Pull[EntityT entity.Interface](def Definition, wrapper func(e *Entity) EntityT, repo repository.ClockedRepo, resolvers entity.Resolvers, remote string, author identity.Interface) error {
 	_, err := Fetch(def, repo, remote)
 	if err != nil {
 		return err
 	}
 
-	for merge := range MergeAll(def, repo, resolvers, remote, author) {
+	for merge := range MergeAll(def, wrapper, repo, resolvers, remote, author) {
 		if merge.Err != nil {
 			return merge.Err
 		}
@@ -68,7 +68,7 @@ func Pull(def Definition, repo repository.ClockedRepo, resolvers entity.Resolver
 //
 // Note: an author is necessary for the case where a merge commit is created, as this commit will
 // have an author and may be signed if a signing key is available.
-func MergeAll(def Definition, repo repository.ClockedRepo, resolvers entity.Resolvers, remote string, author identity.Interface) <-chan entity.MergeResult {
+func MergeAll[EntityT entity.Interface](def Definition, wrapper func(e *Entity) EntityT, repo repository.ClockedRepo, resolvers entity.Resolvers, remote string, author identity.Interface) <-chan entity.MergeResult {
 	out := make(chan entity.MergeResult)
 
 	go func() {
@@ -82,7 +82,7 @@ func MergeAll(def Definition, repo repository.ClockedRepo, resolvers entity.Reso
 		}
 
 		for _, remoteRef := range remoteRefs {
-			out <- merge(def, repo, resolvers, remoteRef, author)
+			out <- merge[EntityT](def, wrapper, repo, resolvers, remoteRef, author)
 		}
 	}()
 
@@ -91,14 +91,14 @@ func MergeAll(def Definition, repo repository.ClockedRepo, resolvers entity.Reso
 
 // merge perform a merge to make sure a local Entity is up-to-date.
 // See MergeAll for more details.
-func merge(def Definition, repo repository.ClockedRepo, resolvers entity.Resolvers, remoteRef string, author identity.Interface) entity.MergeResult {
+func merge[EntityT entity.Interface](def Definition, wrapper func(e *Entity) EntityT, repo repository.ClockedRepo, resolvers entity.Resolvers, remoteRef string, author identity.Interface) entity.MergeResult {
 	id := entity.RefToId(remoteRef)
 
 	if err := id.Validate(); err != nil {
 		return entity.NewMergeInvalidStatus(id, errors.Wrap(err, "invalid ref").Error())
 	}
 
-	remoteEntity, err := read(def, repo, resolvers, remoteRef)
+	remoteEntity, err := read[EntityT](def, wrapper, repo, resolvers, remoteRef)
 	if err != nil {
 		return entity.NewMergeInvalidStatus(id,
 			errors.Wrapf(err, "remote %s is not readable", def.Typename).Error())
@@ -197,7 +197,7 @@ func merge(def Definition, repo repository.ClockedRepo, resolvers entity.Resolve
 	// an empty operationPack.
 	// First step is to collect those clocks.
 
-	localEntity, err := read(def, repo, resolvers, localRef)
+	localEntity, err := read[EntityT](def, wrapper, repo, resolvers, localRef)
 	if err != nil {
 		return entity.NewMergeError(err, id)
 	}

entity/dag/entity_actions_test.go 🔗

@@ -11,10 +11,10 @@ import (
 	"github.com/MichaelMure/git-bug/repository"
 )
 
-func allEntities(t testing.TB, bugs <-chan StreamedEntity) []*Entity {
+func allEntities(t testing.TB, bugs <-chan StreamedEntity[*Foo]) []*Foo {
 	t.Helper()
 
-	var result []*Entity
+	var result []*Foo
 	for streamed := range bugs {
 		require.NoError(t, streamed.Err)
 
@@ -36,10 +36,10 @@ func TestEntityPushPull(t *testing.T) {
 	_, err = Push(def, repoA, "remote")
 	require.NoError(t, err)
 
-	err = Pull(def, repoB, resolvers, "remote", id1)
+	err = Pull(def, wrapper, repoB, resolvers, "remote", id1)
 	require.NoError(t, err)
 
-	entities := allEntities(t, ReadAll(def, repoB, resolvers))
+	entities := allEntities(t, ReadAll(def, wrapper, repoB, resolvers))
 	require.Len(t, entities, 1)
 
 	// B --> remote --> A
@@ -52,10 +52,10 @@ func TestEntityPushPull(t *testing.T) {
 	_, err = Push(def, repoB, "remote")
 	require.NoError(t, err)
 
-	err = Pull(def, repoA, resolvers, "remote", id1)
+	err = Pull(def, wrapper, repoA, resolvers, "remote", id1)
 	require.NoError(t, err)
 
-	entities = allEntities(t, ReadAll(def, repoB, resolvers))
+	entities = allEntities(t, ReadAll(def, wrapper, repoB, resolvers))
 	require.Len(t, entities, 2)
 }
 
@@ -85,7 +85,7 @@ func TestListLocalIds(t *testing.T) {
 	listLocalIds(t, def, repoA, 2)
 	listLocalIds(t, def, repoB, 0)
 
-	err = Pull(def, repoB, resolvers, "remote", id1)
+	err = Pull(def, wrapper, repoB, resolvers, "remote", id1)
 	require.NoError(t, err)
 
 	listLocalIds(t, def, repoA, 2)
@@ -228,7 +228,7 @@ func TestMerge(t *testing.T) {
 	_, err = Fetch(def, repoB, "remote")
 	require.NoError(t, err)
 
-	results := MergeAll(def, repoB, resolvers, "remote", id1)
+	results := MergeAll(def, wrapper, repoB, resolvers, "remote", id1)
 
 	assertMergeResults(t, []entity.MergeResult{
 		{
@@ -246,7 +246,7 @@ func TestMerge(t *testing.T) {
 	// SCENARIO 2
 	// if the remote and local Entity have the same state, nothing is changed
 
-	results = MergeAll(def, repoB, resolvers, "remote", id1)
+	results = MergeAll(def, wrapper, repoB, resolvers, "remote", id1)
 
 	assertMergeResults(t, []entity.MergeResult{
 		{
@@ -272,7 +272,7 @@ func TestMerge(t *testing.T) {
 	err = e2A.Commit(repoA)
 	require.NoError(t, err)
 
-	results = MergeAll(def, repoA, resolvers, "remote", id1)
+	results = MergeAll(def, wrapper, repoA, resolvers, "remote", id1)
 
 	assertMergeResults(t, []entity.MergeResult{
 		{
@@ -297,7 +297,7 @@ func TestMerge(t *testing.T) {
 	_, err = Fetch(def, repoB, "remote")
 	require.NoError(t, err)
 
-	results = MergeAll(def, repoB, resolvers, "remote", id1)
+	results = MergeAll(def, wrapper, repoB, resolvers, "remote", id1)
 
 	assertMergeResults(t, []entity.MergeResult{
 		{
@@ -324,10 +324,10 @@ func TestMerge(t *testing.T) {
 	err = e2A.Commit(repoA)
 	require.NoError(t, err)
 
-	e1B, err := Read(def, repoB, resolvers, e1A.Id())
+	e1B, err := Read(def, wrapper, repoB, resolvers, e1A.Id())
 	require.NoError(t, err)
 
-	e2B, err := Read(def, repoB, resolvers, e2A.Id())
+	e2B, err := Read(def, wrapper, repoB, resolvers, e2A.Id())
 	require.NoError(t, err)
 
 	e1B.Append(newOp1(id1, "barbarfoofoo"))
@@ -344,7 +344,7 @@ func TestMerge(t *testing.T) {
 	_, err = Fetch(def, repoB, "remote")
 	require.NoError(t, err)
 
-	results = MergeAll(def, repoB, resolvers, "remote", id1)
+	results = MergeAll(def, wrapper, repoB, resolvers, "remote", id1)
 
 	assertMergeResults(t, []entity.MergeResult{
 		{
@@ -365,7 +365,7 @@ func TestMerge(t *testing.T) {
 	_, err = Fetch(def, repoA, "remote")
 	require.NoError(t, err)
 
-	results = MergeAll(def, repoA, resolvers, "remote", id1)
+	results = MergeAll(def, wrapper, repoA, resolvers, "remote", id1)
 
 	assertMergeResults(t, []entity.MergeResult{
 		{
@@ -396,10 +396,10 @@ func TestRemove(t *testing.T) {
 	err = Remove(def, repoA, e.Id())
 	require.NoError(t, err)
 
-	_, err = Read(def, repoA, resolvers, e.Id())
+	_, err = Read(def, wrapper, repoA, resolvers, e.Id())
 	require.Error(t, err)
 
-	_, err = readRemote(def, repoA, resolvers, "remote", e.Id())
+	_, err = readRemote(def, wrapper, repoA, resolvers, "remote", e.Id())
 	require.Error(t, err)
 
 	// Remove is idempotent

entity/dag/entity_test.go 🔗

@@ -9,7 +9,7 @@ import (
 func TestWriteRead(t *testing.T) {
 	repo, id1, id2, resolver, def := makeTestContext()
 
-	entity := New(def)
+	entity := wrapper(New(def))
 	require.False(t, entity.NeedCommit())
 
 	entity.Append(newOp1(id1, "foo"))
@@ -24,16 +24,16 @@ func TestWriteRead(t *testing.T) {
 	require.NoError(t, entity.CommitAsNeeded(repo))
 	require.False(t, entity.NeedCommit())
 
-	read, err := Read(def, repo, resolver, entity.Id())
+	read, err := Read(def, wrapper, repo, resolver, entity.Id())
 	require.NoError(t, err)
 
-	assertEqualEntities(t, entity, read)
+	assertEqualEntities(t, entity.Entity, read.Entity)
 }
 
 func TestWriteReadMultipleAuthor(t *testing.T) {
 	repo, id1, id2, resolver, def := makeTestContext()
 
-	entity := New(def)
+	entity := wrapper(New(def))
 
 	entity.Append(newOp1(id1, "foo"))
 	entity.Append(newOp2(id2, "bar"))
@@ -43,10 +43,10 @@ func TestWriteReadMultipleAuthor(t *testing.T) {
 	entity.Append(newOp2(id1, "foobar"))
 	require.NoError(t, entity.CommitAsNeeded(repo))
 
-	read, err := Read(def, repo, resolver, entity.Id())
+	read, err := Read(def, wrapper, repo, resolver, entity.Id())
 	require.NoError(t, err)
 
-	assertEqualEntities(t, entity, read)
+	assertEqualEntities(t, entity.Entity, read.Entity)
 }
 
 func assertEqualEntities(t *testing.T, a, b *Entity) {

entity/dag/example_test.go 🔗

@@ -200,7 +200,11 @@ type ProjectConfig struct {
 }
 
 func NewProjectConfig() *ProjectConfig {
-	return &ProjectConfig{Entity: dag.New(def)}
+	return wrapper(dag.New(def))
+}
+
+func wrapper(e *dag.Entity) *ProjectConfig {
+	return &ProjectConfig{Entity: e}
 }
 
 // a Definition describes a few properties of the Entity, a sort of configuration to manipulate the
@@ -282,11 +286,7 @@ func (pc ProjectConfig) Compile() *Snapshot {
 
 // Read is a helper to load a ProjectConfig from a Repository
 func Read(repo repository.ClockedRepo, id entity.Id) (*ProjectConfig, error) {
-	e, err := dag.Read(def, repo, simpleResolvers(repo), id)
-	if err != nil {
-		return nil, err
-	}
-	return &ProjectConfig{Entity: e}, nil
+	return dag.Read(def, wrapper, repo, simpleResolvers(repo), id)
 }
 
 func simpleResolvers(repo repository.ClockedRepo) entity.Resolvers {
@@ -331,7 +331,7 @@ func Example_entity() {
 	_ = confRene.Commit(repoRene)
 
 	// Isaac pull and read the config
-	_ = dag.Pull(def, repoIsaac, simpleResolvers(repoIsaac), "origin", isaac)
+	_ = dag.Pull(def, wrapper, repoIsaac, simpleResolvers(repoIsaac), "origin", isaac)
 	confIsaac, _ := Read(repoIsaac, confRene.Id())
 
 	// Compile gives the current state of the config

entity/interface.go 🔗

@@ -9,6 +9,8 @@ type Interface interface {
 	// the root of the entity.
 	// It is acceptable to use such a hash and keep mutating that data as long as Id() is not called.
 	Id() Id
+	// Validate check if the Entity data is valid
+	Validate() error
 }
 
 // type Commitable interface {