Merge pull request #532 from MichaelMure/dag-entity

Michael Muré created

Work towards a reusable entity datastructure + commit signature

Change summary

.github/workflows/go.yml               |   2 
.gitignore                             |   1 
api/graphql/models/lazy_identity.go    |  49 -
api/graphql/resolvers/operations.go    |  12 
api/graphql/resolvers/query.go         |  13 
bridge/core/auth/credential_test.go    |   4 
bridge/github/export.go                |   2 
bridge/github/export_test.go           |   2 
bridge/github/import.go                |   2 
bridge/github/import_test.go           |  63 +-
bridge/gitlab/export.go                |   2 
bridge/gitlab/export_test.go           |   2 
bridge/gitlab/import.go                |   1 
bridge/gitlab/import_test.go           |  71 +-
bridge/jira/export.go                  |   2 
bridge/jira/import.go                  |   1 
bridge/launchpad/import.go             |   1 
bug/bug.go                             | 703 ++-------------------------
bug/bug_actions.go                     | 116 ---
bug/bug_actions_test.go                | 390 ---------------
bug/bug_test.go                        | 183 -------
bug/clocks.go                          |  40 -
bug/err.go                             |  17 
bug/identity.go                        |  27 -
bug/interface.go                       |   8 
bug/op_add_comment.go                  |  24 
bug/op_add_comment_test.go             |  10 
bug/op_create.go                       |  59 +
bug/op_create_test.go                  |  38 
bug/op_edit_comment.go                 |  17 
bug/op_edit_comment_test.go            |  71 +-
bug/op_label_change.go                 |  17 
bug/op_label_change_test.go            |  18 
bug/op_noop.go                         |  13 
bug/op_noop_test.go                    |  10 
bug/op_set_metadata.go                 |  26 
bug/op_set_metadata_test.go            |  57 +-
bug/op_set_status.go                   |  17 
bug/op_set_status_test.go              |  18 
bug/op_set_title.go                    |  29 
bug/op_set_title_test.go               |  18 
bug/operation.go                       | 200 +++++--
bug/operation_iterator.go              |  72 --
bug/operation_iterator_test.go         |  78 ---
bug/operation_pack.go                  | 188 -------
bug/operation_pack_test.go             |  79 ---
bug/operation_test.go                  |  38 +
bug/snapshot.go                        |   5 
bug/sorting.go                         |   8 
bug/with_snapshot.go                   |   8 
cache/bug_cache.go                     |   4 
cache/bug_excerpt.go                   |   2 
cache/identity_cache.go                |   8 
cache/repo_cache.go                    |   5 
cache/repo_cache_bug.go                |  54 +
cache/repo_cache_common.go             |  15 
cache/repo_cache_identity.go           |  13 
cache/repo_cache_test.go               |   8 
commands/comment.go                    |   1 
commands/comment_edit.go               |  71 ++
commands/show.go                       |   3 
commands/user.go                       |  14 
commands/user_create.go                |   2 
doc/man/git-bug-comment-edit.1         |  35 +
doc/man/git-bug-comment.1              |   2 
doc/man/git-bug-user.1                 |   2 
doc/md/git-bug_comment.md              |   1 
doc/md/git-bug_comment_edit.md         |  20 
doc/md/git-bug_user.md                 |   2 
entity/dag/clock.go                    |  37 +
entity/dag/common_test.go              | 173 ++++++
entity/dag/entity.go                   | 439 +++++++++++++++++
entity/dag/entity_actions.go           | 260 ++++++++++
entity/dag/entity_actions_test.go      | 412 ++++++++++++++++
entity/dag/entity_test.go              |  68 ++
entity/dag/operation.go                |  48 +
entity/dag/operation_pack.go           | 358 ++++++++++++++
entity/dag/operation_pack_test.go      | 159 ++++++
entity/doc.go                          |   8 
entity/err.go                          |  39 
entity/id.go                           |  20 
entity/id_interleaved.go               |  68 ++
entity/id_interleaved_test.go          |  36 +
entity/interface.go                    |   6 
entity/merge.go                        |  53 +
entity/refs.go                         |   6 
go.mod                                 |   3 
go.sum                                 |  10 
identity/identity.go                   | 290 +++++------
identity/identity_actions.go           |  17 
identity/identity_actions_test.go      |  40 
identity/identity_stub.go              |  22 
identity/identity_test.go              | 241 ++++-----
identity/interface.go                  |  28 
identity/key.go                        | 218 ++++++++
identity/key_test.go                   |  60 ++
identity/version.go                    | 173 ++++--
identity/version_test.go               |  67 ++
misc/bash_completion/git-bug           |  33 +
misc/random_bugs/create_random_bugs.go |  57 -
repository/common.go                   |  67 ++
repository/git.go                      | 500 -------------------
repository/git_cli.go                  |  57 --
repository/git_config.go               | 221 --------
repository/git_test.go                 |  10 
repository/git_testing.go              |  72 --
repository/gogit.go                    | 187 ++++++-
repository/gogit_testing.go            |   8 
repository/keyring.go                  |  12 
repository/mock_repo.go                | 257 ++++++---
repository/mock_repo_test.go           |   6 
repository/repo.go                     |  58 +
repository/repo_testing.go             |  84 +++
repository/tree_entry.go               |  10 
tests/read_bugs_test.go                |   4 
util/lamport/clock_testing.go          |   6 
util/lamport/mem_clock.go              |  14 
117 files changed, 4,305 insertions(+), 3,811 deletions(-)

Detailed changes

.github/workflows/go.yml 🔗

@@ -12,7 +12,7 @@ jobs:
 
     strategy:
       matrix:
-        go-version: [1.13.x, 1.14.x, 1.15.x]
+        go-version: [1.15.x]
         platform: [ubuntu-latest, macos-latest, windows-latest]
 
     runs-on: ${{ matrix.platform }}

.gitignore 🔗

@@ -7,3 +7,4 @@ git-bug
 dist
 coverage.txt
 .idea/
+.git_bak*

api/graphql/models/lazy_identity.go 🔗

@@ -7,8 +7,6 @@ import (
 	"github.com/MichaelMure/git-bug/cache"
 	"github.com/MichaelMure/git-bug/entity"
 	"github.com/MichaelMure/git-bug/identity"
-	"github.com/MichaelMure/git-bug/util/lamport"
-	"github.com/MichaelMure/git-bug/util/timestamp"
 )
 
 // IdentityWrapper is an interface used by the GraphQL resolvers to handle an identity.
@@ -21,11 +19,8 @@ type IdentityWrapper interface {
 	Login() (string, error)
 	AvatarUrl() (string, error)
 	Keys() ([]*identity.Key, error)
-	ValidKeysAtTime(time lamport.Time) ([]*identity.Key, error)
 	DisplayName() string
 	IsProtected() (bool, error)
-	LastModificationLamport() (lamport.Time, error)
-	LastModification() (timestamp.Timestamp, error)
 }
 
 var _ IdentityWrapper = &lazyIdentity{}
@@ -69,6 +64,10 @@ func (li *lazyIdentity) Name() string {
 	return li.excerpt.Name
 }
 
+func (li *lazyIdentity) DisplayName() string {
+	return li.excerpt.DisplayName()
+}
+
 func (li *lazyIdentity) Email() (string, error) {
 	id, err := li.load()
 	if err != nil {
@@ -101,18 +100,6 @@ func (li *lazyIdentity) Keys() ([]*identity.Key, error) {
 	return id.Keys(), nil
 }
 
-func (li *lazyIdentity) ValidKeysAtTime(time lamport.Time) ([]*identity.Key, error) {
-	id, err := li.load()
-	if err != nil {
-		return nil, err
-	}
-	return id.ValidKeysAtTime(time), nil
-}
-
-func (li *lazyIdentity) DisplayName() string {
-	return li.excerpt.DisplayName()
-}
-
 func (li *lazyIdentity) IsProtected() (bool, error) {
 	id, err := li.load()
 	if err != nil {
@@ -121,22 +108,6 @@ func (li *lazyIdentity) IsProtected() (bool, error) {
 	return id.IsProtected(), nil
 }
 
-func (li *lazyIdentity) LastModificationLamport() (lamport.Time, error) {
-	id, err := li.load()
-	if err != nil {
-		return 0, err
-	}
-	return id.LastModificationLamport(), nil
-}
-
-func (li *lazyIdentity) LastModification() (timestamp.Timestamp, error) {
-	id, err := li.load()
-	if err != nil {
-		return 0, err
-	}
-	return id.LastModification(), nil
-}
-
 var _ IdentityWrapper = &loadedIdentity{}
 
 type loadedIdentity struct {
@@ -163,18 +134,6 @@ func (l loadedIdentity) Keys() ([]*identity.Key, error) {
 	return l.Interface.Keys(), nil
 }
 
-func (l loadedIdentity) ValidKeysAtTime(time lamport.Time) ([]*identity.Key, error) {
-	return l.Interface.ValidKeysAtTime(time), nil
-}
-
 func (l loadedIdentity) IsProtected() (bool, error) {
 	return l.Interface.IsProtected(), nil
 }
-
-func (l loadedIdentity) LastModificationLamport() (lamport.Time, error) {
-	return l.Interface.LastModificationLamport(), nil
-}
-
-func (l loadedIdentity) LastModification() (timestamp.Timestamp, error) {
-	return l.Interface.LastModification(), nil
-}

api/graphql/resolvers/operations.go 🔗

@@ -19,7 +19,7 @@ func (createOperationResolver) ID(_ context.Context, obj *bug.CreateOperation) (
 }
 
 func (createOperationResolver) Author(_ context.Context, obj *bug.CreateOperation) (models.IdentityWrapper, error) {
-	return models.NewLoadedIdentity(obj.Author), nil
+	return models.NewLoadedIdentity(obj.Author()), nil
 }
 
 func (createOperationResolver) Date(_ context.Context, obj *bug.CreateOperation) (*time.Time, error) {
@@ -36,7 +36,7 @@ func (addCommentOperationResolver) ID(_ context.Context, obj *bug.AddCommentOper
 }
 
 func (addCommentOperationResolver) Author(_ context.Context, obj *bug.AddCommentOperation) (models.IdentityWrapper, error) {
-	return models.NewLoadedIdentity(obj.Author), nil
+	return models.NewLoadedIdentity(obj.Author()), nil
 }
 
 func (addCommentOperationResolver) Date(_ context.Context, obj *bug.AddCommentOperation) (*time.Time, error) {
@@ -57,7 +57,7 @@ func (editCommentOperationResolver) Target(_ context.Context, obj *bug.EditComme
 }
 
 func (editCommentOperationResolver) Author(_ context.Context, obj *bug.EditCommentOperation) (models.IdentityWrapper, error) {
-	return models.NewLoadedIdentity(obj.Author), nil
+	return models.NewLoadedIdentity(obj.Author()), nil
 }
 
 func (editCommentOperationResolver) Date(_ context.Context, obj *bug.EditCommentOperation) (*time.Time, error) {
@@ -74,7 +74,7 @@ func (labelChangeOperationResolver) ID(_ context.Context, obj *bug.LabelChangeOp
 }
 
 func (labelChangeOperationResolver) Author(_ context.Context, obj *bug.LabelChangeOperation) (models.IdentityWrapper, error) {
-	return models.NewLoadedIdentity(obj.Author), nil
+	return models.NewLoadedIdentity(obj.Author()), nil
 }
 
 func (labelChangeOperationResolver) Date(_ context.Context, obj *bug.LabelChangeOperation) (*time.Time, error) {
@@ -91,7 +91,7 @@ func (setStatusOperationResolver) ID(_ context.Context, obj *bug.SetStatusOperat
 }
 
 func (setStatusOperationResolver) Author(_ context.Context, obj *bug.SetStatusOperation) (models.IdentityWrapper, error) {
-	return models.NewLoadedIdentity(obj.Author), nil
+	return models.NewLoadedIdentity(obj.Author()), nil
 }
 
 func (setStatusOperationResolver) Date(_ context.Context, obj *bug.SetStatusOperation) (*time.Time, error) {
@@ -112,7 +112,7 @@ func (setTitleOperationResolver) ID(_ context.Context, obj *bug.SetTitleOperatio
 }
 
 func (setTitleOperationResolver) Author(_ context.Context, obj *bug.SetTitleOperation) (models.IdentityWrapper, error) {
-	return models.NewLoadedIdentity(obj.Author), nil
+	return models.NewLoadedIdentity(obj.Author()), nil
 }
 
 func (setTitleOperationResolver) Date(_ context.Context, obj *bug.SetTitleOperation) (*time.Time, error) {

api/graphql/resolvers/query.go 🔗

@@ -14,19 +14,6 @@ type rootQueryResolver struct {
 	cache *cache.MultiRepoCache
 }
 
-func (r rootQueryResolver) DefaultRepository(_ context.Context) (*models.Repository, error) {
-	repo, err := r.cache.DefaultRepo()
-
-	if err != nil {
-		return nil, err
-	}
-
-	return &models.Repository{
-		Cache: r.cache,
-		Repo:  repo,
-	}, nil
-}
-
 func (r rootQueryResolver) Repository(_ context.Context, ref *string) (*models.Repository, error) {
 	var repo *cache.RepoCache
 	var err error

bridge/core/auth/credential_test.go 🔗

@@ -11,7 +11,7 @@ import (
 )
 
 func TestCredential(t *testing.T) {
-	repo := repository.NewMockRepoForTest()
+	repo := repository.NewMockRepo()
 
 	storeToken := func(val string, target string) *Token {
 		token := NewToken(target, val)
@@ -102,7 +102,7 @@ func sameIds(t *testing.T, a []Credential, b []Credential) {
 }
 
 func testCredentialSerial(t *testing.T, original Credential) Credential {
-	repo := repository.NewMockRepoForTest()
+	repo := repository.NewMockRepo()
 
 	original.SetMetadata("test", "value")
 

bridge/github/export.go 🔗

@@ -294,7 +294,7 @@ func (ge *githubExporter) exportBug(ctx context.Context, b *cache.BugCache, out
 			continue
 		}
 
-		opAuthor := op.GetAuthor()
+		opAuthor := op.Author()
 		client, err := ge.getClientForIdentity(opAuthor.Id())
 		if err != nil {
 			continue

bridge/github/export_test.go 🔗

@@ -126,7 +126,7 @@ func testCases(t *testing.T, repo *cache.RepoCache) []*testCase {
 	}
 }
 
-func TestPushPull(t *testing.T) {
+func TestGithubPushPull(t *testing.T) {
 	// repo owner
 	envUser := os.Getenv("GITHUB_TEST_USER")
 

bridge/github/import.go 🔗

@@ -551,6 +551,7 @@ func (gi *githubImporter) ensurePerson(repo *cache.RepoCache, actor *actor) (*ca
 		email,
 		string(actor.Login),
 		string(actor.AvatarUrl),
+		nil,
 		map[string]string{
 			metaKeyGithubLogin: string(actor.Login),
 		},
@@ -598,6 +599,7 @@ func (gi *githubImporter) getGhost(repo *cache.RepoCache) (*cache.IdentityCache,
 		"",
 		string(q.User.Login),
 		string(q.User.AvatarUrl),
+		nil,
 		map[string]string{
 			metaKeyGithubLogin: string(q.User.Login),
 		},

bridge/github/import_test.go 🔗

@@ -7,7 +7,6 @@ import (
 	"testing"
 	"time"
 
-	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
 
 	"github.com/MichaelMure/git-bug/bridge/core"
@@ -19,8 +18,23 @@ import (
 	"github.com/MichaelMure/git-bug/util/interrupt"
 )
 
-func Test_Importer(t *testing.T) {
-	author := identity.NewIdentity("Michael Muré", "batolettre@gmail.com")
+func TestGithubImporter(t *testing.T) {
+	envToken := os.Getenv("GITHUB_TOKEN_PRIVATE")
+	if envToken == "" {
+		t.Skip("Env var GITHUB_TOKEN_PRIVATE missing")
+	}
+
+	repo := repository.CreateGoGitTestRepo(false)
+	defer repository.CleanupTestRepos(repo)
+
+	backend, err := cache.NewRepoCache(repo)
+	require.NoError(t, err)
+
+	defer backend.Close()
+	interrupt.RegisterCleaner(backend.Close)
+
+	author, err := identity.NewIdentity(repo, "Michael Muré", "batolettre@gmail.com")
+	require.NoError(t, err)
 
 	tests := []struct {
 		name string
@@ -127,20 +141,6 @@ func Test_Importer(t *testing.T) {
 		},
 	}
 
-	repo := repository.CreateGoGitTestRepo(false)
-	defer repository.CleanupTestRepos(repo)
-
-	backend, err := cache.NewRepoCache(repo)
-	require.NoError(t, err)
-
-	defer backend.Close()
-	interrupt.RegisterCleaner(backend.Close)
-
-	envToken := os.Getenv("GITHUB_TOKEN_PRIVATE")
-	if envToken == "" {
-		t.Skip("Env var GITHUB_TOKEN_PRIVATE missing")
-	}
-
 	login := "test-identity"
 	author.SetMetadata(metaKeyGithubLogin, login)
 
@@ -178,33 +178,28 @@ func Test_Importer(t *testing.T) {
 			require.NoError(t, err)
 
 			ops := b.Snapshot().Operations
-			assert.Len(t, tt.bug.Operations, len(b.Snapshot().Operations))
+			require.Len(t, tt.bug.Operations, len(b.Snapshot().Operations))
 
 			for i, op := range tt.bug.Operations {
 				require.IsType(t, ops[i], op)
+				require.Equal(t, op.Author().Name(), ops[i].Author().Name())
 
-				switch op.(type) {
+				switch op := op.(type) {
 				case *bug.CreateOperation:
-					assert.Equal(t, op.(*bug.CreateOperation).Title, ops[i].(*bug.CreateOperation).Title)
-					assert.Equal(t, op.(*bug.CreateOperation).Message, ops[i].(*bug.CreateOperation).Message)
-					assert.Equal(t, op.(*bug.CreateOperation).Author.Name(), ops[i].(*bug.CreateOperation).Author.Name())
+					require.Equal(t, op.Title, ops[i].(*bug.CreateOperation).Title)
+					require.Equal(t, op.Message, ops[i].(*bug.CreateOperation).Message)
 				case *bug.SetStatusOperation:
-					assert.Equal(t, op.(*bug.SetStatusOperation).Status, ops[i].(*bug.SetStatusOperation).Status)
-					assert.Equal(t, op.(*bug.SetStatusOperation).Author.Name(), ops[i].(*bug.SetStatusOperation).Author.Name())
+					require.Equal(t, op.Status, ops[i].(*bug.SetStatusOperation).Status)
 				case *bug.SetTitleOperation:
-					assert.Equal(t, op.(*bug.SetTitleOperation).Was, ops[i].(*bug.SetTitleOperation).Was)
-					assert.Equal(t, op.(*bug.SetTitleOperation).Title, ops[i].(*bug.SetTitleOperation).Title)
-					assert.Equal(t, op.(*bug.SetTitleOperation).Author.Name(), ops[i].(*bug.SetTitleOperation).Author.Name())
+					require.Equal(t, op.Was, ops[i].(*bug.SetTitleOperation).Was)
+					require.Equal(t, op.Title, ops[i].(*bug.SetTitleOperation).Title)
 				case *bug.LabelChangeOperation:
-					assert.ElementsMatch(t, op.(*bug.LabelChangeOperation).Added, ops[i].(*bug.LabelChangeOperation).Added)
-					assert.ElementsMatch(t, op.(*bug.LabelChangeOperation).Removed, ops[i].(*bug.LabelChangeOperation).Removed)
-					assert.Equal(t, op.(*bug.LabelChangeOperation).Author.Name(), ops[i].(*bug.LabelChangeOperation).Author.Name())
+					require.ElementsMatch(t, op.Added, ops[i].(*bug.LabelChangeOperation).Added)
+					require.ElementsMatch(t, op.Removed, ops[i].(*bug.LabelChangeOperation).Removed)
 				case *bug.AddCommentOperation:
-					assert.Equal(t, op.(*bug.AddCommentOperation).Message, ops[i].(*bug.AddCommentOperation).Message)
-					assert.Equal(t, op.(*bug.AddCommentOperation).Author.Name(), ops[i].(*bug.AddCommentOperation).Author.Name())
+					require.Equal(t, op.Message, ops[i].(*bug.AddCommentOperation).Message)
 				case *bug.EditCommentOperation:
-					assert.Equal(t, op.(*bug.EditCommentOperation).Message, ops[i].(*bug.EditCommentOperation).Message)
-					assert.Equal(t, op.(*bug.EditCommentOperation).Author.Name(), ops[i].(*bug.EditCommentOperation).Author.Name())
+					require.Equal(t, op.Message, ops[i].(*bug.EditCommentOperation).Message)
 
 				default:
 					panic("unknown operation type")

bridge/gitlab/export.go 🔗

@@ -267,7 +267,7 @@ func (ge *gitlabExporter) exportBug(ctx context.Context, b *cache.BugCache, out
 			continue
 		}
 
-		opAuthor := op.GetAuthor()
+		opAuthor := op.Author()
 		client, err := ge.getIdentityClient(opAuthor.Id())
 		if err != nil {
 			continue

bridge/gitlab/export_test.go 🔗

@@ -134,7 +134,7 @@ func testCases(t *testing.T, repo *cache.RepoCache) []*testCase {
 	}
 }
 
-func TestPushPull(t *testing.T) {
+func TestGitlabPushPull(t *testing.T) {
 	// token must have 'repo' and 'delete_repo' scopes
 	envToken := os.Getenv("GITLAB_API_TOKEN")
 	if envToken == "" {

bridge/gitlab/import.go 🔗

@@ -406,6 +406,7 @@ func (gi *gitlabImporter) ensurePerson(repo *cache.RepoCache, id int) (*cache.Id
 		user.PublicEmail,
 		user.Username,
 		user.AvatarURL,
+		nil,
 		map[string]string{
 			// because Gitlab
 			metaKeyGitlabId:    strconv.Itoa(id),

bridge/gitlab/import_test.go 🔗

@@ -7,7 +7,6 @@ import (
 	"testing"
 	"time"
 
-	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
 
 	"github.com/MichaelMure/git-bug/bridge/core"
@@ -19,8 +18,28 @@ import (
 	"github.com/MichaelMure/git-bug/util/interrupt"
 )
 
-func TestImport(t *testing.T) {
-	author := identity.NewIdentity("Amine Hilaly", "hilalyamine@gmail.com")
+func TestGitlabImport(t *testing.T) {
+	envToken := os.Getenv("GITLAB_API_TOKEN")
+	if envToken == "" {
+		t.Skip("Env var GITLAB_API_TOKEN missing")
+	}
+
+	projectID := os.Getenv("GITLAB_PROJECT_ID")
+	if projectID == "" {
+		t.Skip("Env var GITLAB_PROJECT_ID missing")
+	}
+
+	repo := repository.CreateGoGitTestRepo(false)
+	defer repository.CleanupTestRepos(repo)
+
+	backend, err := cache.NewRepoCache(repo)
+	require.NoError(t, err)
+
+	defer backend.Close()
+	interrupt.RegisterCleaner(backend.Close)
+
+	author, err := identity.NewIdentity(repo, "Amine Hilaly", "hilalyamine@gmail.com")
+	require.NoError(t, err)
 
 	tests := []struct {
 		name string
@@ -76,25 +95,6 @@ func TestImport(t *testing.T) {
 		},
 	}
 
-	repo := repository.CreateGoGitTestRepo(false)
-	defer repository.CleanupTestRepos(repo)
-
-	backend, err := cache.NewRepoCache(repo)
-	require.NoError(t, err)
-
-	defer backend.Close()
-	interrupt.RegisterCleaner(backend.Close)
-
-	envToken := os.Getenv("GITLAB_API_TOKEN")
-	if envToken == "" {
-		t.Skip("Env var GITLAB_API_TOKEN missing")
-	}
-
-	projectID := os.Getenv("GITLAB_PROJECT_ID")
-	if projectID == "" {
-		t.Skip("Env var GITLAB_PROJECT_ID missing")
-	}
-
 	login := "test-identity"
 	author.SetMetadata(metaKeyGitlabLogin, login)
 
@@ -138,29 +138,24 @@ func TestImport(t *testing.T) {
 			for i, op := range tt.bug.Operations {
 
 				require.IsType(t, ops[i], op)
+				require.Equal(t, op.Author().Name(), ops[i].Author().Name())
 
-				switch op.(type) {
+				switch op := op.(type) {
 				case *bug.CreateOperation:
-					assert.Equal(t, op.(*bug.CreateOperation).Title, ops[i].(*bug.CreateOperation).Title)
-					assert.Equal(t, op.(*bug.CreateOperation).Message, ops[i].(*bug.CreateOperation).Message)
-					assert.Equal(t, op.(*bug.CreateOperation).Author.Name(), ops[i].(*bug.CreateOperation).Author.Name())
+					require.Equal(t, op.Title, ops[i].(*bug.CreateOperation).Title)
+					require.Equal(t, op.Message, ops[i].(*bug.CreateOperation).Message)
 				case *bug.SetStatusOperation:
-					assert.Equal(t, op.(*bug.SetStatusOperation).Status, ops[i].(*bug.SetStatusOperation).Status)
-					assert.Equal(t, op.(*bug.SetStatusOperation).Author.Name(), ops[i].(*bug.SetStatusOperation).Author.Name())
+					require.Equal(t, op.Status, ops[i].(*bug.SetStatusOperation).Status)
 				case *bug.SetTitleOperation:
-					assert.Equal(t, op.(*bug.SetTitleOperation).Was, ops[i].(*bug.SetTitleOperation).Was)
-					assert.Equal(t, op.(*bug.SetTitleOperation).Title, ops[i].(*bug.SetTitleOperation).Title)
-					assert.Equal(t, op.(*bug.SetTitleOperation).Author.Name(), ops[i].(*bug.SetTitleOperation).Author.Name())
+					require.Equal(t, op.Was, ops[i].(*bug.SetTitleOperation).Was)
+					require.Equal(t, op.Title, ops[i].(*bug.SetTitleOperation).Title)
 				case *bug.LabelChangeOperation:
-					assert.ElementsMatch(t, op.(*bug.LabelChangeOperation).Added, ops[i].(*bug.LabelChangeOperation).Added)
-					assert.ElementsMatch(t, op.(*bug.LabelChangeOperation).Removed, ops[i].(*bug.LabelChangeOperation).Removed)
-					assert.Equal(t, op.(*bug.LabelChangeOperation).Author.Name(), ops[i].(*bug.LabelChangeOperation).Author.Name())
+					require.ElementsMatch(t, op.Added, ops[i].(*bug.LabelChangeOperation).Added)
+					require.ElementsMatch(t, op.Removed, ops[i].(*bug.LabelChangeOperation).Removed)
 				case *bug.AddCommentOperation:
-					assert.Equal(t, op.(*bug.AddCommentOperation).Message, ops[i].(*bug.AddCommentOperation).Message)
-					assert.Equal(t, op.(*bug.AddCommentOperation).Author.Name(), ops[i].(*bug.AddCommentOperation).Author.Name())
+					require.Equal(t, op.Message, ops[i].(*bug.AddCommentOperation).Message)
 				case *bug.EditCommentOperation:
-					assert.Equal(t, op.(*bug.EditCommentOperation).Message, ops[i].(*bug.EditCommentOperation).Message)
-					assert.Equal(t, op.(*bug.EditCommentOperation).Author.Name(), ops[i].(*bug.EditCommentOperation).Author.Name())
+					require.Equal(t, op.Message, ops[i].(*bug.EditCommentOperation).Message)
 
 				default:
 					panic("unknown operation type")

bridge/jira/export.go 🔗

@@ -309,7 +309,7 @@ func (je *jiraExporter) exportBug(ctx context.Context, b *cache.BugCache, out ch
 			continue
 		}
 
-		opAuthor := op.GetAuthor()
+		opAuthor := op.Author()
 		client, err := je.getClientForIdentity(opAuthor.Id())
 		if err != nil {
 			out <- core.NewExportError(

bridge/jira/import.go 🔗

@@ -196,6 +196,7 @@ func (ji *jiraImporter) ensurePerson(repo *cache.RepoCache, user User) (*cache.I
 		user.EmailAddress,
 		user.Key,
 		"",
+		nil,
 		map[string]string{
 			metaKeyJiraUser: user.Key,
 		},

bridge/launchpad/import.go 🔗

@@ -35,6 +35,7 @@ func (li *launchpadImporter) ensurePerson(repo *cache.RepoCache, owner LPPerson)
 		"",
 		owner.Login,
 		"",
+		nil,
 		map[string]string{
 			metaKeyLaunchpadLogin: owner.Login,
 		},

bug/bug.go 🔗

@@ -2,277 +2,62 @@
 package bug
 
 import (
-	"encoding/json"
 	"fmt"
-	"strings"
-
-	"github.com/pkg/errors"
 
 	"github.com/MichaelMure/git-bug/entity"
+	"github.com/MichaelMure/git-bug/entity/dag"
 	"github.com/MichaelMure/git-bug/identity"
 	"github.com/MichaelMure/git-bug/repository"
-	"github.com/MichaelMure/git-bug/util/lamport"
 )
 
-const bugsRefPattern = "refs/bugs/"
-const bugsRemoteRefPattern = "refs/remotes/%s/bugs/"
-
-const opsEntryName = "ops"
-const rootEntryName = "root"
-const mediaEntryName = "media"
-
-const createClockEntryPrefix = "create-clock-"
-const createClockEntryPattern = "create-clock-%d"
-const editClockEntryPrefix = "edit-clock-"
-const editClockEntryPattern = "edit-clock-%d"
-
-const creationClockName = "bug-create"
-const editClockName = "bug-edit"
-
-var ErrBugNotExist = errors.New("bug doesn't exist")
+var _ Interface = &Bug{}
+var _ entity.Interface = &Bug{}
 
-func NewErrMultipleMatchBug(matching []entity.Id) *entity.ErrMultipleMatch {
-	return entity.NewErrMultipleMatch("bug", matching)
-}
+// 1: original format
+// 2: no more legacy identities
+// 3: Ids are generated from the create operation serialized data instead of from the first git commit
+// 4: with DAG entity framework
+const formatVersion = 4
 
-func NewErrMultipleMatchOp(matching []entity.Id) *entity.ErrMultipleMatch {
-	return entity.NewErrMultipleMatch("operation", matching)
+var def = dag.Definition{
+	Typename:             "bug",
+	Namespace:            "bugs",
+	OperationUnmarshaler: operationUnmarshaller,
+	FormatVersion:        formatVersion,
 }
 
-var _ Interface = &Bug{}
-var _ entity.Interface = &Bug{}
+var ClockLoader = dag.ClockLoader(def)
 
 // Bug hold the data of a bug thread, organized in a way close to
 // how it will be persisted inside Git. This is the data structure
 // used to merge two different version of the same Bug.
 type Bug struct {
-
-	// A Lamport clock is a logical clock that allow to order event
-	// inside a distributed system.
-	// It must be the first field in this struct due to https://github.com/golang/go/issues/599
-	createTime lamport.Time
-	editTime   lamport.Time
-
-	// Id used as unique identifier
-	id entity.Id
-
-	lastCommit repository.Hash
-	rootPack   repository.Hash
-
-	// all the committed operations
-	packs []OperationPack
-
-	// a temporary pack of operations used for convenience to pile up new operations
-	// before a commit
-	staging OperationPack
+	*dag.Entity
 }
 
 // NewBug create a new Bug
 func NewBug() *Bug {
-	// No id yet
-	// No logical clock yet
-	return &Bug{}
-}
-
-// ReadLocal will read a local bug from its hash
-func ReadLocal(repo repository.ClockedRepo, id entity.Id) (*Bug, error) {
-	ref := bugsRefPattern + id.String()
-	return read(repo, identity.NewSimpleResolver(repo), ref)
-}
-
-// ReadLocalWithResolver will read a local bug from its hash
-func ReadLocalWithResolver(repo repository.ClockedRepo, identityResolver identity.Resolver, id entity.Id) (*Bug, error) {
-	ref := bugsRefPattern + id.String()
-	return read(repo, identityResolver, ref)
-}
-
-// ReadRemote will read a remote bug from its hash
-func ReadRemote(repo repository.ClockedRepo, remote string, id entity.Id) (*Bug, error) {
-	ref := fmt.Sprintf(bugsRemoteRefPattern, remote) + id.String()
-	return read(repo, identity.NewSimpleResolver(repo), ref)
-}
-
-// ReadRemoteWithResolver will read a remote bug from its hash
-func ReadRemoteWithResolver(repo repository.ClockedRepo, identityResolver identity.Resolver, remote string, id entity.Id) (*Bug, error) {
-	ref := fmt.Sprintf(bugsRemoteRefPattern, remote) + id.String()
-	return read(repo, identityResolver, ref)
-}
-
-// read will read and parse a Bug from git
-func read(repo repository.ClockedRepo, identityResolver identity.Resolver, ref string) (*Bug, error) {
-	refSplit := strings.Split(ref, "/")
-	id := entity.Id(refSplit[len(refSplit)-1])
-
-	if err := id.Validate(); err != nil {
-		return nil, errors.Wrap(err, "invalid ref ")
-	}
-
-	hashes, err := repo.ListCommits(ref)
-
-	// TODO: this is not perfect, it might be a command invoke error
-	if err != nil {
-		return nil, ErrBugNotExist
-	}
-
-	bug := Bug{
-		id:       id,
-		editTime: 0,
-	}
-
-	// Load each OperationPack
-	for _, hash := range hashes {
-		entries, err := repo.ReadTree(hash)
-		if err != nil {
-			return nil, errors.Wrap(err, "can't list git tree entries")
-		}
-
-		bug.lastCommit = hash
-
-		var opsEntry repository.TreeEntry
-		opsFound := false
-		var rootEntry repository.TreeEntry
-		rootFound := false
-		var createTime uint64
-		var editTime uint64
-
-		for _, entry := range entries {
-			if entry.Name == opsEntryName {
-				opsEntry = entry
-				opsFound = true
-				continue
-			}
-			if entry.Name == rootEntryName {
-				rootEntry = entry
-				rootFound = true
-			}
-			if strings.HasPrefix(entry.Name, createClockEntryPrefix) {
-				n, err := fmt.Sscanf(entry.Name, createClockEntryPattern, &createTime)
-				if err != nil {
-					return nil, errors.Wrap(err, "can't read create lamport time")
-				}
-				if n != 1 {
-					return nil, fmt.Errorf("could not parse create time lamport value")
-				}
-			}
-			if strings.HasPrefix(entry.Name, editClockEntryPrefix) {
-				n, err := fmt.Sscanf(entry.Name, editClockEntryPattern, &editTime)
-				if err != nil {
-					return nil, errors.Wrap(err, "can't read edit lamport time")
-				}
-				if n != 1 {
-					return nil, fmt.Errorf("could not parse edit time lamport value")
-				}
-			}
-		}
-
-		if !opsFound {
-			return nil, errors.New("invalid tree, missing the ops entry")
-		}
-		if !rootFound {
-			return nil, errors.New("invalid tree, missing the root entry")
-		}
-
-		if bug.rootPack == "" {
-			bug.rootPack = rootEntry.Hash
-			bug.createTime = lamport.Time(createTime)
-		}
-
-		// Due to rebase, edit Lamport time are not necessarily ordered
-		if editTime > uint64(bug.editTime) {
-			bug.editTime = lamport.Time(editTime)
-		}
-
-		// Update the clocks
-		createClock, err := repo.GetOrCreateClock(creationClockName)
-		if err != nil {
-			return nil, err
-		}
-		if err := createClock.Witness(bug.createTime); err != nil {
-			return nil, errors.Wrap(err, "failed to update create lamport clock")
-		}
-		editClock, err := repo.GetOrCreateClock(editClockName)
-		if err != nil {
-			return nil, err
-		}
-		if err := editClock.Witness(bug.editTime); err != nil {
-			return nil, errors.Wrap(err, "failed to update edit lamport clock")
-		}
-
-		data, err := repo.ReadData(opsEntry.Hash)
-		if err != nil {
-			return nil, errors.Wrap(err, "failed to read git blob data")
-		}
-
-		opp := &OperationPack{}
-		err = json.Unmarshal(data, &opp)
-
-		if err != nil {
-			return nil, errors.Wrap(err, "failed to decode OperationPack json")
-		}
-
-		// tag the pack with the commit hash
-		opp.commitHash = hash
-
-		bug.packs = append(bug.packs, *opp)
+	return &Bug{
+		Entity: dag.New(def),
 	}
+}
 
-	// Make sure that the identities are properly loaded
-	err = bug.EnsureIdentities(identityResolver)
+// Read will read a bug from a repository
+func Read(repo repository.ClockedRepo, id entity.Id) (*Bug, error) {
+	e, err := dag.Read(def, repo, identity.NewSimpleResolver(repo), id)
 	if err != nil {
 		return nil, err
 	}
-
-	return &bug, nil
+	return &Bug{Entity: e}, nil
 }
 
-// RemoveBug will remove a local bug from its entity.Id
-func RemoveBug(repo repository.ClockedRepo, id entity.Id) error {
-	var fullMatches []string
-
-	refs, err := repo.ListRefs(bugsRefPattern + id.String())
-	if err != nil {
-		return err
-	}
-	if len(refs) > 1 {
-		return NewErrMultipleMatchBug(entity.RefsToIds(refs))
-	}
-	if len(refs) == 1 {
-		// we have the bug locally
-		fullMatches = append(fullMatches, refs[0])
-	}
-
-	remotes, err := repo.GetRemotes()
+// ReadWithResolver will read a bug from its Id, with a custom identity.Resolver
+func ReadWithResolver(repo repository.ClockedRepo, identityResolver identity.Resolver, id entity.Id) (*Bug, error) {
+	e, err := dag.Read(def, repo, identityResolver, id)
 	if err != nil {
-		return err
-	}
-
-	for remote := range remotes {
-		remotePrefix := fmt.Sprintf(bugsRemoteRefPattern+id.String(), remote)
-		remoteRefs, err := repo.ListRefs(remotePrefix)
-		if err != nil {
-			return err
-		}
-		if len(remoteRefs) > 1 {
-			return NewErrMultipleMatchBug(entity.RefsToIds(refs))
-		}
-		if len(remoteRefs) == 1 {
-			// found the bug in a remote
-			fullMatches = append(fullMatches, remoteRefs[0])
-		}
-	}
-
-	if len(fullMatches) == 0 {
-		return ErrBugNotExist
-	}
-
-	for _, ref := range fullMatches {
-		err = repo.RemoveRef(ref)
-		if err != nil {
-			return err
-		}
+		return nil, err
 	}
-
-	return nil
+	return &Bug{Entity: e}, nil
 }
 
 type StreamedBug struct {
@@ -280,50 +65,33 @@ type StreamedBug struct {
 	Err error
 }
 
-// ReadAllLocal read and parse all local bugs
-func ReadAllLocal(repo repository.ClockedRepo) <-chan StreamedBug {
-	return readAll(repo, identity.NewSimpleResolver(repo), bugsRefPattern)
-}
-
-// ReadAllLocalWithResolver read and parse all local bugs
-func ReadAllLocalWithResolver(repo repository.ClockedRepo, identityResolver identity.Resolver) <-chan StreamedBug {
-	return readAll(repo, identityResolver, bugsRefPattern)
-}
-
-// ReadAllRemote read and parse all remote bugs for a given remote
-func ReadAllRemote(repo repository.ClockedRepo, remote string) <-chan StreamedBug {
-	refPrefix := fmt.Sprintf(bugsRemoteRefPattern, remote)
-	return readAll(repo, identity.NewSimpleResolver(repo), refPrefix)
+// ReadAll read and parse all local bugs
+func ReadAll(repo repository.ClockedRepo) <-chan StreamedBug {
+	return readAll(repo, identity.NewSimpleResolver(repo))
 }
 
-// ReadAllRemoteWithResolver read and parse all remote bugs for a given remote
-func ReadAllRemoteWithResolver(repo repository.ClockedRepo, identityResolver identity.Resolver, remote string) <-chan StreamedBug {
-	refPrefix := fmt.Sprintf(bugsRemoteRefPattern, remote)
-	return readAll(repo, identityResolver, refPrefix)
+// ReadAllWithResolver read and parse all local bugs
+func ReadAllWithResolver(repo repository.ClockedRepo, identityResolver identity.Resolver) <-chan StreamedBug {
+	return readAll(repo, identityResolver)
 }
 
 // Read and parse all available bug with a given ref prefix
-func readAll(repo repository.ClockedRepo, identityResolver identity.Resolver, refPrefix string) <-chan StreamedBug {
+func readAll(repo repository.ClockedRepo, identityResolver identity.Resolver) <-chan StreamedBug {
 	out := make(chan StreamedBug)
 
 	go func() {
 		defer close(out)
 
-		refs, err := repo.ListRefs(refPrefix)
-		if err != nil {
-			out <- StreamedBug{Err: err}
-			return
-		}
-
-		for _, ref := range refs {
-			b, err := read(repo, identityResolver, ref)
-
-			if err != nil {
-				out <- StreamedBug{Err: err}
-				return
+		for streamedEntity := range dag.ReadAll(def, repo, identityResolver) {
+			if streamedEntity.Err != nil {
+				out <- StreamedBug{
+					Err: streamedEntity.Err,
+				}
+			} else {
+				out <- StreamedBug{
+					Bug: &Bug{Entity: streamedEntity.Entity},
+				}
 			}
-
-			out <- StreamedBug{Bug: b}
 		}
 	}()
 
@@ -332,399 +100,78 @@ func readAll(repo repository.ClockedRepo, identityResolver identity.Resolver, re
 
 // ListLocalIds list all the available local bug ids
 func ListLocalIds(repo repository.Repo) ([]entity.Id, error) {
-	refs, err := repo.ListRefs(bugsRefPattern)
-	if err != nil {
-		return nil, err
-	}
-
-	return entity.RefsToIds(refs), nil
+	return dag.ListLocalIds(def, repo)
 }
 
 // Validate check if the Bug data is valid
 func (bug *Bug) Validate() error {
-	// non-empty
-	if len(bug.packs) == 0 && bug.staging.IsEmpty() {
-		return fmt.Errorf("bug has no operations")
-	}
-
-	// check if each pack and operations are valid
-	for _, pack := range bug.packs {
-		if err := pack.Validate(); err != nil {
-			return err
-		}
-	}
-
-	// check if staging is valid if needed
-	if !bug.staging.IsEmpty() {
-		if err := bug.staging.Validate(); err != nil {
-			return errors.Wrap(err, "staging")
-		}
+	if err := bug.Entity.Validate(); err != nil {
+		return err
 	}
 
 	// The very first Op should be a CreateOp
 	firstOp := bug.FirstOp()
-	if firstOp == nil || firstOp.base().OperationType != CreateOp {
+	if firstOp == nil || firstOp.Type() != CreateOp {
 		return fmt.Errorf("first operation should be a Create op")
 	}
 
-	// The bug Id should be the hash of the first commit
-	if len(bug.packs) > 0 && string(bug.packs[0].commitHash) != bug.id.String() {
-		return fmt.Errorf("bug id should be the first commit hash")
-	}
-
 	// Check that there is no more CreateOp op
-	// Check that there is no colliding operation's ID
-	it := NewOperationIterator(bug)
-	createCount := 0
-	ids := make(map[entity.Id]struct{})
-	for it.Next() {
-		if it.Value().base().OperationType == CreateOp {
-			createCount++
+	for i, op := range bug.Operations() {
+		if i == 0 {
+			continue
 		}
-		if _, ok := ids[it.Value().Id()]; ok {
-			return fmt.Errorf("id collision: %s", it.Value().Id())
+		if op.Type() == CreateOp {
+			return fmt.Errorf("only one Create op allowed")
 		}
-		ids[it.Value().Id()] = struct{}{}
-	}
-
-	if createCount != 1 {
-		return fmt.Errorf("only one Create op allowed")
 	}
 
 	return nil
 }
 
-// Append an operation into the staging area, to be committed later
+// Append add a new Operation to the Bug
 func (bug *Bug) Append(op Operation) {
-	bug.staging.Append(op)
+	bug.Entity.Append(op)
 }
 
-// Commit write the staging area in Git and move the operations to the packs
-func (bug *Bug) Commit(repo repository.ClockedRepo) error {
-
-	if !bug.NeedCommit() {
-		return fmt.Errorf("can't commit a bug with no pending operation")
-	}
-
-	if err := bug.Validate(); err != nil {
-		return errors.Wrap(err, "can't commit a bug with invalid data")
+// Operations return the ordered operations
+func (bug *Bug) Operations() []Operation {
+	source := bug.Entity.Operations()
+	result := make([]Operation, len(source))
+	for i, op := range source {
+		result[i] = op.(Operation)
 	}
-
-	// Write the Ops as a Git blob containing the serialized array
-	hash, err := bug.staging.Write(repo)
-	if err != nil {
-		return err
-	}
-
-	if bug.rootPack == "" {
-		bug.rootPack = hash
-	}
-
-	// Make a Git tree referencing this blob
-	tree := []repository.TreeEntry{
-		// the last pack of ops
-		{ObjectType: repository.Blob, Hash: hash, Name: opsEntryName},
-		// always the first pack of ops (might be the same)
-		{ObjectType: repository.Blob, Hash: bug.rootPack, Name: rootEntryName},
-	}
-
-	// Reference, if any, all the files required by the ops
-	// Git will check that they actually exist in the storage and will make sure
-	// to push/pull them as needed.
-	mediaTree := makeMediaTree(bug.staging)
-	if len(mediaTree) > 0 {
-		mediaTreeHash, err := repo.StoreTree(mediaTree)
-		if err != nil {
-			return err
-		}
-		tree = append(tree, repository.TreeEntry{
-			ObjectType: repository.Tree,
-			Hash:       mediaTreeHash,
-			Name:       mediaEntryName,
-		})
-	}
-
-	// Store the logical clocks as well
-	// --> edit clock for each OperationPack/commits
-	// --> create clock only for the first OperationPack/commits
-	//
-	// To avoid having one blob for each clock value, clocks are serialized
-	// directly into the entry name
-	emptyBlobHash, err := repo.StoreData([]byte{})
-	if err != nil {
-		return err
-	}
-
-	editClock, err := repo.GetOrCreateClock(editClockName)
-	if err != nil {
-		return err
-	}
-	bug.editTime, err = editClock.Increment()
-	if err != nil {
-		return err
-	}
-
-	tree = append(tree, repository.TreeEntry{
-		ObjectType: repository.Blob,
-		Hash:       emptyBlobHash,
-		Name:       fmt.Sprintf(editClockEntryPattern, bug.editTime),
-	})
-	if bug.lastCommit == "" {
-		createClock, err := repo.GetOrCreateClock(creationClockName)
-		if err != nil {
-			return err
-		}
-		bug.createTime, err = createClock.Increment()
-		if err != nil {
-			return err
-		}
-
-		tree = append(tree, repository.TreeEntry{
-			ObjectType: repository.Blob,
-			Hash:       emptyBlobHash,
-			Name:       fmt.Sprintf(createClockEntryPattern, bug.createTime),
-		})
-	}
-
-	// Store the tree
-	hash, err = repo.StoreTree(tree)
-	if err != nil {
-		return err
-	}
-
-	// Write a Git commit referencing the tree, with the previous commit as parent
-	if bug.lastCommit != "" {
-		hash, err = repo.StoreCommitWithParent(hash, bug.lastCommit)
-	} else {
-		hash, err = repo.StoreCommit(hash)
-	}
-
-	if err != nil {
-		return err
-	}
-
-	bug.lastCommit = hash
-
-	// if it was the first commit, use the commit hash as bug id
-	if bug.id == "" {
-		bug.id = entity.Id(hash)
-	}
-
-	// Create or update the Git reference for this bug
-	// When pushing later, the remote will ensure that this ref update
-	// is fast-forward, that is no data has been overwritten
-	ref := fmt.Sprintf("%s%s", bugsRefPattern, bug.id)
-	err = repo.UpdateRef(ref, hash)
-
-	if err != nil {
-		return err
-	}
-
-	bug.staging.commitHash = hash
-	bug.packs = append(bug.packs, bug.staging)
-	bug.staging = OperationPack{}
-
-	return nil
-}
-
-func (bug *Bug) CommitAsNeeded(repo repository.ClockedRepo) error {
-	if !bug.NeedCommit() {
-		return nil
-	}
-	return bug.Commit(repo)
-}
-
-func (bug *Bug) NeedCommit() bool {
-	return !bug.staging.IsEmpty()
+	return result
 }
 
-func makeMediaTree(pack OperationPack) []repository.TreeEntry {
-	var tree []repository.TreeEntry
-	counter := 0
-	added := make(map[repository.Hash]interface{})
-
-	for _, ops := range pack.Operations {
-		for _, file := range ops.GetFiles() {
-			if _, has := added[file]; !has {
-				tree = append(tree, repository.TreeEntry{
-					ObjectType: repository.Blob,
-					Hash:       file,
-					// The name is not important here, we only need to
-					// reference the blob.
-					Name: fmt.Sprintf("file%d", counter),
-				})
-				counter++
-				added[file] = struct{}{}
-			}
-		}
-	}
-
-	return tree
-}
-
-// Merge a different version of the same bug by rebasing operations of this bug
-// that are not present in the other on top of the chain of operations of the
-// other version.
-func (bug *Bug) Merge(repo repository.Repo, other Interface) (bool, error) {
-	var otherBug = bugFromInterface(other)
-
-	// Note: a faster merge should be possible without actually reading and parsing
-	// all operations pack of our side.
-	// Reading the other side is still necessary to validate remote data, at least
-	// for new operations
-
-	if bug.id != otherBug.id {
-		return false, errors.New("merging unrelated bugs is not supported")
-	}
-
-	if len(otherBug.staging.Operations) > 0 {
-		return false, errors.New("merging a bug with a non-empty staging is not supported")
-	}
-
-	if bug.lastCommit == "" || otherBug.lastCommit == "" {
-		return false, errors.New("can't merge a bug that has never been stored")
-	}
-
-	ancestor, err := repo.FindCommonAncestor(bug.lastCommit, otherBug.lastCommit)
-	if err != nil {
-		return false, errors.Wrap(err, "can't find common ancestor")
-	}
-
-	ancestorIndex := 0
-	newPacks := make([]OperationPack, 0, len(bug.packs))
-
-	// Find the root of the rebase
-	for i, pack := range bug.packs {
-		newPacks = append(newPacks, pack)
-
-		if pack.commitHash == ancestor {
-			ancestorIndex = i
-			break
-		}
-	}
-
-	if len(otherBug.packs) == ancestorIndex+1 {
-		// Nothing to rebase, return early
-		return false, nil
-	}
-
-	// get other bug's extra packs
-	for i := ancestorIndex + 1; i < len(otherBug.packs); i++ {
-		// clone is probably not necessary
-		newPack := otherBug.packs[i].Clone()
-
-		newPacks = append(newPacks, newPack)
-		bug.lastCommit = newPack.commitHash
-	}
-
-	// rebase our extra packs
-	for i := ancestorIndex + 1; i < len(bug.packs); i++ {
-		pack := bug.packs[i]
-
-		// get the referenced git tree
-		treeHash, err := repo.GetTreeHash(pack.commitHash)
-
-		if err != nil {
-			return false, err
-		}
-
-		// create a new commit with the correct ancestor
-		hash, err := repo.StoreCommitWithParent(treeHash, bug.lastCommit)
-
-		if err != nil {
-			return false, err
-		}
-
-		// replace the pack
-		newPack := pack.Clone()
-		newPack.commitHash = hash
-		newPacks = append(newPacks, newPack)
-
-		// update the bug
-		bug.lastCommit = hash
-	}
-
-	bug.packs = newPacks
-
-	// Update the git ref
-	err = repo.UpdateRef(bugsRefPattern+bug.id.String(), bug.lastCommit)
-	if err != nil {
-		return false, err
+// Compile a bug in a easily usable snapshot
+func (bug *Bug) Compile() Snapshot {
+	snap := Snapshot{
+		id:     bug.Id(),
+		Status: OpenStatus,
 	}
 
-	return true, nil
-}
-
-// Id return the Bug identifier
-func (bug *Bug) Id() entity.Id {
-	if bug.id == "" {
-		// simply panic as it would be a coding error
-		// (using an id of a bug not stored yet)
-		panic("no id yet")
+	for _, op := range bug.Operations() {
+		op.Apply(&snap)
+		snap.Operations = append(snap.Operations, op)
 	}
-	return bug.id
-}
-
-// CreateLamportTime return the Lamport time of creation
-func (bug *Bug) CreateLamportTime() lamport.Time {
-	return bug.createTime
-}
 
-// EditLamportTime return the Lamport time of the last edit
-func (bug *Bug) EditLamportTime() lamport.Time {
-	return bug.editTime
+	return snap
 }
 
 // Lookup for the very first operation of the bug.
 // For a valid Bug, this operation should be a CreateOp
 func (bug *Bug) FirstOp() Operation {
-	for _, pack := range bug.packs {
-		for _, op := range pack.Operations {
-			return op
-		}
-	}
-
-	if !bug.staging.IsEmpty() {
-		return bug.staging.Operations[0]
+	if fo := bug.Entity.FirstOp(); fo != nil {
+		return fo.(Operation)
 	}
-
 	return nil
 }
 
 // Lookup for the very last operation of the bug.
 // For a valid Bug, should never be nil
 func (bug *Bug) LastOp() Operation {
-	if !bug.staging.IsEmpty() {
-		return bug.staging.Operations[len(bug.staging.Operations)-1]
-	}
-
-	if len(bug.packs) == 0 {
-		return nil
-	}
-
-	lastPack := bug.packs[len(bug.packs)-1]
-
-	if len(lastPack.Operations) == 0 {
-		return nil
-	}
-
-	return lastPack.Operations[len(lastPack.Operations)-1]
-}
-
-// Compile a bug in a easily usable snapshot
-func (bug *Bug) Compile() Snapshot {
-	snap := Snapshot{
-		id:     bug.id,
-		Status: OpenStatus,
+	if lo := bug.Entity.LastOp(); lo != nil {
+		return lo.(Operation)
 	}
-
-	it := NewOperationIterator(bug)
-
-	for it.Next() {
-		op := it.Value()
-		op.Apply(&snap)
-		snap.Operations = append(snap.Operations, op)
-	}
-
-	return snap
+	return nil
 }

bug/bug_actions.go 🔗

@@ -1,42 +1,34 @@
 package bug
 
 import (
-	"fmt"
-	"strings"
+	"github.com/pkg/errors"
 
 	"github.com/MichaelMure/git-bug/entity"
+	"github.com/MichaelMure/git-bug/entity/dag"
 	"github.com/MichaelMure/git-bug/identity"
 	"github.com/MichaelMure/git-bug/repository"
-	"github.com/pkg/errors"
 )
 
 // Fetch retrieve updates from a remote
 // This does not change the local bugs state
 func Fetch(repo repository.Repo, remote string) (string, error) {
-	// "refs/bugs/*:refs/remotes/<remote>>/bugs/*"
-	remoteRefSpec := fmt.Sprintf(bugsRemoteRefPattern, remote)
-	fetchRefSpec := fmt.Sprintf("%s*:%s*", bugsRefPattern, remoteRefSpec)
-
-	return repo.FetchRefs(remote, fetchRefSpec)
+	return dag.Fetch(def, repo, remote)
 }
 
 // Push update a remote with the local changes
 func Push(repo repository.Repo, remote string) (string, error) {
-	// "refs/bugs/*:refs/bugs/*"
-	refspec := fmt.Sprintf("%s*:%s*", bugsRefPattern, bugsRefPattern)
-
-	return repo.PushRefs(remote, refspec)
+	return dag.Push(def, repo, remote)
 }
 
 // Pull will do a Fetch + MergeAll
 // This function will return an error if a merge fail
-func Pull(repo repository.ClockedRepo, remote string) error {
+func Pull(repo repository.ClockedRepo, remote string, author identity.Interface) error {
 	_, err := Fetch(repo, remote)
 	if err != nil {
 		return err
 	}
 
-	for merge := range MergeAll(repo, remote) {
+	for merge := range MergeAll(repo, remote, author) {
 		if merge.Err != nil {
 			return merge.Err
 		}
@@ -48,96 +40,38 @@ func Pull(repo repository.ClockedRepo, remote string) error {
 	return nil
 }
 
-// MergeAll will merge all the available remote bug:
-//
-// - If the remote has new commit, the local bug is updated to match the same history
-//   (fast-forward update)
-// - if the local bug has new commits but the remote don't, nothing is changed
-// - if both local and remote bug have new commits (that is, we have a concurrent edition),
-//   new local commits are rewritten at the head of the remote history (that is, a rebase)
-func MergeAll(repo repository.ClockedRepo, remote string) <-chan entity.MergeResult {
-	out := make(chan entity.MergeResult)
-
+// MergeAll will merge all the available remote bug
+// Note: an author is necessary for the case where a merge commit is created, as this commit will
+// have an author and may be signed if a signing key is available.
+func MergeAll(repo repository.ClockedRepo, remote string, author identity.Interface) <-chan entity.MergeResult {
 	// no caching for the merge, we load everything from git even if that means multiple
 	// copy of the same entity in memory. The cache layer will intercept the results to
 	// invalidate entities if necessary.
 	identityResolver := identity.NewSimpleResolver(repo)
 
+	out := make(chan entity.MergeResult)
+
 	go func() {
 		defer close(out)
 
-		remoteRefSpec := fmt.Sprintf(bugsRemoteRefPattern, remote)
-		remoteRefs, err := repo.ListRefs(remoteRefSpec)
+		results := dag.MergeAll(def, repo, identityResolver, remote, author)
 
-		if err != nil {
-			out <- entity.MergeResult{Err: err}
-			return
-		}
-
-		for _, remoteRef := range remoteRefs {
-			refSplit := strings.Split(remoteRef, "/")
-			id := entity.Id(refSplit[len(refSplit)-1])
-
-			if err := id.Validate(); err != nil {
-				out <- entity.NewMergeInvalidStatus(id, errors.Wrap(err, "invalid ref").Error())
-				continue
-			}
-
-			remoteBug, err := read(repo, identityResolver, remoteRef)
-
-			if err != nil {
-				out <- entity.NewMergeInvalidStatus(id, errors.Wrap(err, "remote bug is not readable").Error())
-				continue
-			}
-
-			// Check for error in remote data
-			if err := remoteBug.Validate(); err != nil {
-				out <- entity.NewMergeInvalidStatus(id, errors.Wrap(err, "remote bug is invalid").Error())
-				continue
-			}
-
-			localRef := bugsRefPattern + remoteBug.Id().String()
-			localExist, err := repo.RefExist(localRef)
-
-			if err != nil {
-				out <- entity.NewMergeError(err, id)
-				continue
-			}
-
-			// the bug is not local yet, simply create the reference
-			if !localExist {
-				err := repo.CopyRef(remoteRef, localRef)
-
-				if err != nil {
-					out <- entity.NewMergeError(err, id)
-					return
+		// wrap the dag.Entity into a complete Bug
+		for result := range results {
+			result := result
+			if result.Entity != nil {
+				result.Entity = &Bug{
+					Entity: result.Entity.(*dag.Entity),
 				}
-
-				out <- entity.NewMergeStatus(entity.MergeStatusNew, id, remoteBug)
-				continue
-			}
-
-			localBug, err := read(repo, identityResolver, localRef)
-
-			if err != nil {
-				out <- entity.NewMergeError(errors.Wrap(err, "local bug is not readable"), id)
-				return
-			}
-
-			updated, err := localBug.Merge(repo, remoteBug)
-
-			if err != nil {
-				out <- entity.NewMergeInvalidStatus(id, errors.Wrap(err, "merge failed").Error())
-				return
-			}
-
-			if updated {
-				out <- entity.NewMergeStatus(entity.MergeStatusUpdated, id, localBug)
-			} else {
-				out <- entity.NewMergeStatus(entity.MergeStatusNothing, id, localBug)
 			}
+			out <- result
 		}
 	}()
 
 	return out
 }
+
+// RemoveBug will remove a local bug from its entity.Id
+func RemoveBug(repo repository.ClockedRepo, id entity.Id) error {
+	return dag.Remove(def, repo, id)
+}

bug/bug_actions_test.go 🔗

@@ -1,390 +0,0 @@
-package bug
-
-import (
-	"testing"
-	"time"
-
-	"github.com/stretchr/testify/assert"
-	"github.com/stretchr/testify/require"
-
-	"github.com/MichaelMure/git-bug/identity"
-	"github.com/MichaelMure/git-bug/repository"
-)
-
-func TestPushPull(t *testing.T) {
-	repoA, repoB, remote := repository.SetupReposAndRemote()
-	defer repository.CleanupTestRepos(repoA, repoB, remote)
-
-	reneA := identity.NewIdentity("René Descartes", "rene@descartes.fr")
-	err := reneA.Commit(repoA)
-	require.NoError(t, err)
-
-	bug1, _, err := Create(reneA, time.Now().Unix(), "bug1", "message")
-	require.NoError(t, err)
-	assert.True(t, bug1.NeedCommit())
-	err = bug1.Commit(repoA)
-	require.NoError(t, err)
-	assert.False(t, bug1.NeedCommit())
-
-	// distribute the identity
-	_, err = identity.Push(repoA, "origin")
-	require.NoError(t, err)
-	err = identity.Pull(repoB, "origin")
-	require.NoError(t, err)
-
-	// A --> remote --> B
-	_, err = Push(repoA, "origin")
-	require.NoError(t, err)
-
-	err = Pull(repoB, "origin")
-	require.NoError(t, err)
-
-	bugs := allBugs(t, ReadAllLocal(repoB))
-
-	if len(bugs) != 1 {
-		t.Fatal("Unexpected number of bugs")
-	}
-
-	// B --> remote --> A
-	reneB, err := identity.ReadLocal(repoA, reneA.Id())
-	require.NoError(t, err)
-
-	bug2, _, err := Create(reneB, time.Now().Unix(), "bug2", "message")
-	require.NoError(t, err)
-	err = bug2.Commit(repoB)
-	require.NoError(t, err)
-
-	_, err = Push(repoB, "origin")
-	require.NoError(t, err)
-
-	err = Pull(repoA, "origin")
-	require.NoError(t, err)
-
-	bugs = allBugs(t, ReadAllLocal(repoA))
-
-	if len(bugs) != 2 {
-		t.Fatal("Unexpected number of bugs")
-	}
-}
-
-func allBugs(t testing.TB, bugs <-chan StreamedBug) []*Bug {
-	var result []*Bug
-	for streamed := range bugs {
-		if streamed.Err != nil {
-			t.Fatal(streamed.Err)
-		}
-		result = append(result, streamed.Bug)
-	}
-	return result
-}
-
-func TestRebaseTheirs(t *testing.T) {
-	_RebaseTheirs(t)
-}
-
-func BenchmarkRebaseTheirs(b *testing.B) {
-	for n := 0; n < b.N; n++ {
-		_RebaseTheirs(b)
-	}
-}
-
-func _RebaseTheirs(t testing.TB) {
-	repoA, repoB, remote := repository.SetupReposAndRemote()
-	defer repository.CleanupTestRepos(repoA, repoB, remote)
-
-	reneA := identity.NewIdentity("René Descartes", "rene@descartes.fr")
-	err := reneA.Commit(repoA)
-	require.NoError(t, err)
-
-	bug1, _, err := Create(reneA, time.Now().Unix(), "bug1", "message")
-	require.NoError(t, err)
-	assert.True(t, bug1.NeedCommit())
-	err = bug1.Commit(repoA)
-	require.NoError(t, err)
-	assert.False(t, bug1.NeedCommit())
-
-	// distribute the identity
-	_, err = identity.Push(repoA, "origin")
-	require.NoError(t, err)
-	err = identity.Pull(repoB, "origin")
-	require.NoError(t, err)
-
-	// A --> remote
-
-	_, err = Push(repoA, "origin")
-	require.NoError(t, err)
-
-	// remote --> B
-	err = Pull(repoB, "origin")
-	require.NoError(t, err)
-
-	bug2, err := ReadLocal(repoB, bug1.Id())
-	require.NoError(t, err)
-	assert.False(t, bug2.NeedCommit())
-
-	reneB, err := identity.ReadLocal(repoA, reneA.Id())
-	require.NoError(t, err)
-
-	_, err = AddComment(bug2, reneB, time.Now().Unix(), "message2")
-	require.NoError(t, err)
-	assert.True(t, bug2.NeedCommit())
-	_, err = AddComment(bug2, reneB, time.Now().Unix(), "message3")
-	require.NoError(t, err)
-	_, err = AddComment(bug2, reneB, time.Now().Unix(), "message4")
-	require.NoError(t, err)
-	err = bug2.Commit(repoB)
-	require.NoError(t, err)
-	assert.False(t, bug2.NeedCommit())
-
-	// B --> remote
-	_, err = Push(repoB, "origin")
-	require.NoError(t, err)
-
-	// remote --> A
-	err = Pull(repoA, "origin")
-	require.NoError(t, err)
-
-	bugs := allBugs(t, ReadAllLocal(repoB))
-
-	if len(bugs) != 1 {
-		t.Fatal("Unexpected number of bugs")
-	}
-
-	bug3, err := ReadLocal(repoA, bug1.Id())
-	require.NoError(t, err)
-
-	if nbOps(bug3) != 4 {
-		t.Fatal("Unexpected number of operations")
-	}
-}
-
-func TestRebaseOurs(t *testing.T) {
-	_RebaseOurs(t)
-}
-
-func BenchmarkRebaseOurs(b *testing.B) {
-	for n := 0; n < b.N; n++ {
-		_RebaseOurs(b)
-	}
-}
-
-func _RebaseOurs(t testing.TB) {
-	repoA, repoB, remote := repository.SetupReposAndRemote()
-	defer repository.CleanupTestRepos(repoA, repoB, remote)
-
-	reneA := identity.NewIdentity("René Descartes", "rene@descartes.fr")
-	err := reneA.Commit(repoA)
-	require.NoError(t, err)
-
-	bug1, _, err := Create(reneA, time.Now().Unix(), "bug1", "message")
-	require.NoError(t, err)
-	err = bug1.Commit(repoA)
-	require.NoError(t, err)
-
-	// distribute the identity
-	_, err = identity.Push(repoA, "origin")
-	require.NoError(t, err)
-	err = identity.Pull(repoB, "origin")
-	require.NoError(t, err)
-
-	// A --> remote
-	_, err = Push(repoA, "origin")
-	require.NoError(t, err)
-
-	// remote --> B
-	err = Pull(repoB, "origin")
-	require.NoError(t, err)
-
-	_, err = AddComment(bug1, reneA, time.Now().Unix(), "message2")
-	require.NoError(t, err)
-	_, err = AddComment(bug1, reneA, time.Now().Unix(), "message3")
-	require.NoError(t, err)
-	_, err = AddComment(bug1, reneA, time.Now().Unix(), "message4")
-	require.NoError(t, err)
-	err = bug1.Commit(repoA)
-	require.NoError(t, err)
-
-	_, err = AddComment(bug1, reneA, time.Now().Unix(), "message5")
-	require.NoError(t, err)
-	_, err = AddComment(bug1, reneA, time.Now().Unix(), "message6")
-	require.NoError(t, err)
-	_, err = AddComment(bug1, reneA, time.Now().Unix(), "message7")
-	require.NoError(t, err)
-	err = bug1.Commit(repoA)
-	require.NoError(t, err)
-
-	_, err = AddComment(bug1, reneA, time.Now().Unix(), "message8")
-	require.NoError(t, err)
-	_, err = AddComment(bug1, reneA, time.Now().Unix(), "message9")
-	require.NoError(t, err)
-	_, err = AddComment(bug1, reneA, time.Now().Unix(), "message10")
-	require.NoError(t, err)
-	err = bug1.Commit(repoA)
-	require.NoError(t, err)
-
-	// remote --> A
-	err = Pull(repoA, "origin")
-	require.NoError(t, err)
-
-	bugs := allBugs(t, ReadAllLocal(repoA))
-
-	if len(bugs) != 1 {
-		t.Fatal("Unexpected number of bugs")
-	}
-
-	bug2, err := ReadLocal(repoA, bug1.Id())
-	require.NoError(t, err)
-
-	if nbOps(bug2) != 10 {
-		t.Fatal("Unexpected number of operations")
-	}
-}
-
-func nbOps(b *Bug) int {
-	it := NewOperationIterator(b)
-	counter := 0
-	for it.Next() {
-		counter++
-	}
-	return counter
-}
-
-func TestRebaseConflict(t *testing.T) {
-	_RebaseConflict(t)
-}
-
-func BenchmarkRebaseConflict(b *testing.B) {
-	for n := 0; n < b.N; n++ {
-		_RebaseConflict(b)
-	}
-}
-
-func _RebaseConflict(t testing.TB) {
-	repoA, repoB, remote := repository.SetupReposAndRemote()
-	defer repository.CleanupTestRepos(repoA, repoB, remote)
-
-	reneA := identity.NewIdentity("René Descartes", "rene@descartes.fr")
-	err := reneA.Commit(repoA)
-	require.NoError(t, err)
-
-	bug1, _, err := Create(reneA, time.Now().Unix(), "bug1", "message")
-	require.NoError(t, err)
-	err = bug1.Commit(repoA)
-	require.NoError(t, err)
-
-	// distribute the identity
-	_, err = identity.Push(repoA, "origin")
-	require.NoError(t, err)
-	err = identity.Pull(repoB, "origin")
-	require.NoError(t, err)
-
-	// A --> remote
-	_, err = Push(repoA, "origin")
-	require.NoError(t, err)
-
-	// remote --> B
-	err = Pull(repoB, "origin")
-	require.NoError(t, err)
-
-	_, err = AddComment(bug1, reneA, time.Now().Unix(), "message2")
-	require.NoError(t, err)
-	_, err = AddComment(bug1, reneA, time.Now().Unix(), "message3")
-	require.NoError(t, err)
-	_, err = AddComment(bug1, reneA, time.Now().Unix(), "message4")
-	require.NoError(t, err)
-	err = bug1.Commit(repoA)
-	require.NoError(t, err)
-
-	_, err = AddComment(bug1, reneA, time.Now().Unix(), "message5")
-	require.NoError(t, err)
-	_, err = AddComment(bug1, reneA, time.Now().Unix(), "message6")
-	require.NoError(t, err)
-	_, err = AddComment(bug1, reneA, time.Now().Unix(), "message7")
-	require.NoError(t, err)
-	err = bug1.Commit(repoA)
-	require.NoError(t, err)
-
-	_, err = AddComment(bug1, reneA, time.Now().Unix(), "message8")
-	require.NoError(t, err)
-	_, err = AddComment(bug1, reneA, time.Now().Unix(), "message9")
-	require.NoError(t, err)
-	_, err = AddComment(bug1, reneA, time.Now().Unix(), "message10")
-	require.NoError(t, err)
-	err = bug1.Commit(repoA)
-	require.NoError(t, err)
-
-	bug2, err := ReadLocal(repoB, bug1.Id())
-	require.NoError(t, err)
-
-	reneB, err := identity.ReadLocal(repoA, reneA.Id())
-	require.NoError(t, err)
-
-	_, err = AddComment(bug2, reneB, time.Now().Unix(), "message11")
-	require.NoError(t, err)
-	_, err = AddComment(bug2, reneB, time.Now().Unix(), "message12")
-	require.NoError(t, err)
-	_, err = AddComment(bug2, reneB, time.Now().Unix(), "message13")
-	require.NoError(t, err)
-	err = bug2.Commit(repoB)
-	require.NoError(t, err)
-
-	_, err = AddComment(bug2, reneB, time.Now().Unix(), "message14")
-	require.NoError(t, err)
-	_, err = AddComment(bug2, reneB, time.Now().Unix(), "message15")
-	require.NoError(t, err)
-	_, err = AddComment(bug2, reneB, time.Now().Unix(), "message16")
-	require.NoError(t, err)
-	err = bug2.Commit(repoB)
-	require.NoError(t, err)
-
-	_, err = AddComment(bug2, reneB, time.Now().Unix(), "message17")
-	require.NoError(t, err)
-	_, err = AddComment(bug2, reneB, time.Now().Unix(), "message18")
-	require.NoError(t, err)
-	_, err = AddComment(bug2, reneB, time.Now().Unix(), "message19")
-	require.NoError(t, err)
-	err = bug2.Commit(repoB)
-	require.NoError(t, err)
-
-	// A --> remote
-	_, err = Push(repoA, "origin")
-	require.NoError(t, err)
-
-	// remote --> B
-	err = Pull(repoB, "origin")
-	require.NoError(t, err)
-
-	bugs := allBugs(t, ReadAllLocal(repoB))
-
-	if len(bugs) != 1 {
-		t.Fatal("Unexpected number of bugs")
-	}
-
-	bug3, err := ReadLocal(repoB, bug1.Id())
-	require.NoError(t, err)
-
-	if nbOps(bug3) != 19 {
-		t.Fatal("Unexpected number of operations")
-	}
-
-	// B --> remote
-	_, err = Push(repoB, "origin")
-	require.NoError(t, err)
-
-	// remote --> A
-	err = Pull(repoA, "origin")
-	require.NoError(t, err)
-
-	bugs = allBugs(t, ReadAllLocal(repoA))
-
-	if len(bugs) != 1 {
-		t.Fatal("Unexpected number of bugs")
-	}
-
-	bug4, err := ReadLocal(repoA, bug1.Id())
-	require.NoError(t, err)
-
-	if nbOps(bug4) != 19 {
-		t.Fatal("Unexpected number of operations")
-	}
-}

bug/bug_test.go 🔗

@@ -1,183 +0,0 @@
-package bug
-
-import (
-	"fmt"
-	"testing"
-	"time"
-
-	"github.com/stretchr/testify/require"
-
-	"github.com/MichaelMure/git-bug/identity"
-	"github.com/MichaelMure/git-bug/repository"
-)
-
-func TestBugId(t *testing.T) {
-	mockRepo := repository.NewMockRepoForTest()
-
-	bug1 := NewBug()
-
-	rene := identity.NewIdentity("René Descartes", "rene@descartes.fr")
-	err := rene.Commit(mockRepo)
-	require.NoError(t, err)
-
-	createOp := NewCreateOp(rene, time.Now().Unix(), "title", "message", nil)
-
-	bug1.Append(createOp)
-
-	err = bug1.Commit(mockRepo)
-	require.NoError(t, err)
-
-	bug1.Id()
-}
-
-func TestBugValidity(t *testing.T) {
-	mockRepo := repository.NewMockRepoForTest()
-
-	bug1 := NewBug()
-
-	rene := identity.NewIdentity("René Descartes", "rene@descartes.fr")
-	err := rene.Commit(mockRepo)
-	require.NoError(t, err)
-
-	createOp := NewCreateOp(rene, time.Now().Unix(), "title", "message", nil)
-
-	if bug1.Validate() == nil {
-		t.Fatal("Empty bug should be invalid")
-	}
-
-	bug1.Append(createOp)
-
-	if bug1.Validate() != nil {
-		t.Fatal("Bug with just a CreateOp should be valid")
-	}
-
-	err = bug1.Commit(mockRepo)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	bug1.Append(createOp)
-
-	if bug1.Validate() == nil {
-		t.Fatal("Bug with multiple CreateOp should be invalid")
-	}
-
-	err = bug1.Commit(mockRepo)
-	if err == nil {
-		t.Fatal("Invalid bug should not commit")
-	}
-}
-
-func TestBugCommitLoad(t *testing.T) {
-	repo := repository.NewMockRepoForTest()
-
-	bug1 := NewBug()
-
-	rene := identity.NewIdentity("René Descartes", "rene@descartes.fr")
-	err := rene.Commit(repo)
-	require.NoError(t, err)
-
-	createOp := NewCreateOp(rene, time.Now().Unix(), "title", "message", nil)
-	setTitleOp := NewSetTitleOp(rene, time.Now().Unix(), "title2", "title1")
-	addCommentOp := NewAddCommentOp(rene, time.Now().Unix(), "message2", nil)
-
-	bug1.Append(createOp)
-	bug1.Append(setTitleOp)
-
-	require.True(t, bug1.NeedCommit())
-
-	err = bug1.Commit(repo)
-	require.Nil(t, err)
-	require.False(t, bug1.NeedCommit())
-
-	bug2, err := ReadLocal(repo, bug1.Id())
-	require.NoError(t, err)
-	equivalentBug(t, bug1, bug2)
-
-	// add more op
-
-	bug1.Append(addCommentOp)
-
-	require.True(t, bug1.NeedCommit())
-
-	err = bug1.Commit(repo)
-	require.Nil(t, err)
-	require.False(t, bug1.NeedCommit())
-
-	bug3, err := ReadLocal(repo, bug1.Id())
-	require.NoError(t, err)
-	equivalentBug(t, bug1, bug3)
-}
-
-func equivalentBug(t *testing.T, expected, actual *Bug) {
-	require.Equal(t, len(expected.packs), len(actual.packs))
-
-	for i := range expected.packs {
-		for j := range expected.packs[i].Operations {
-			actual.packs[i].Operations[j].base().id = expected.packs[i].Operations[j].base().id
-		}
-	}
-
-	require.Equal(t, expected, actual)
-}
-
-func TestBugRemove(t *testing.T) {
-	repo := repository.CreateGoGitTestRepo(false)
-	remoteA := repository.CreateGoGitTestRepo(true)
-	remoteB := repository.CreateGoGitTestRepo(true)
-	defer repository.CleanupTestRepos(repo, remoteA, remoteB)
-
-	err := repo.AddRemote("remoteA", remoteA.GetLocalRemote())
-	require.NoError(t, err)
-
-	err = repo.AddRemote("remoteB", remoteB.GetLocalRemote())
-	require.NoError(t, err)
-
-	// generate a bunch of bugs
-	rene := identity.NewIdentity("René Descartes", "rene@descartes.fr")
-	err = rene.Commit(repo)
-	require.NoError(t, err)
-
-	for i := 0; i < 100; i++ {
-		b := NewBug()
-		createOp := NewCreateOp(rene, time.Now().Unix(), "title", fmt.Sprintf("message%v", i), nil)
-		b.Append(createOp)
-		err = b.Commit(repo)
-		require.NoError(t, err)
-	}
-
-	// and one more for testing
-	b := NewBug()
-	createOp := NewCreateOp(rene, time.Now().Unix(), "title", "message", nil)
-	b.Append(createOp)
-	err = b.Commit(repo)
-	require.NoError(t, err)
-
-	_, err = Push(repo, "remoteA")
-	require.NoError(t, err)
-
-	_, err = Push(repo, "remoteB")
-	require.NoError(t, err)
-
-	_, err = Fetch(repo, "remoteA")
-	require.NoError(t, err)
-
-	_, err = Fetch(repo, "remoteB")
-	require.NoError(t, err)
-
-	err = RemoveBug(repo, b.Id())
-	require.NoError(t, err)
-
-	_, err = ReadLocal(repo, b.Id())
-	require.Error(t, ErrBugNotExist, err)
-
-	_, err = ReadRemote(repo, "remoteA", b.Id())
-	require.Error(t, ErrBugNotExist, err)
-
-	_, err = ReadRemote(repo, "remoteB", b.Id())
-	require.Error(t, ErrBugNotExist, err)
-
-	ids, err := ListLocalIds(repo)
-	require.NoError(t, err)
-	require.Len(t, ids, 100)
-}

bug/clocks.go 🔗

@@ -1,40 +0,0 @@
-package bug
-
-import (
-	"github.com/MichaelMure/git-bug/identity"
-	"github.com/MichaelMure/git-bug/repository"
-)
-
-// ClockLoader is the repository.ClockLoader for the Bug entity
-var ClockLoader = repository.ClockLoader{
-	Clocks: []string{creationClockName, editClockName},
-	Witnesser: func(repo repository.ClockedRepo) error {
-		// We don't care about the actual identity so an IdentityStub will do
-		resolver := identity.NewStubResolver()
-		for b := range ReadAllLocalWithResolver(repo, resolver) {
-			if b.Err != nil {
-				return b.Err
-			}
-
-			createClock, err := repo.GetOrCreateClock(creationClockName)
-			if err != nil {
-				return err
-			}
-			err = createClock.Witness(b.Bug.createTime)
-			if err != nil {
-				return err
-			}
-
-			editClock, err := repo.GetOrCreateClock(editClockName)
-			if err != nil {
-				return err
-			}
-			err = editClock.Witness(b.Bug.editTime)
-			if err != nil {
-				return err
-			}
-		}
-
-		return nil
-	},
-}

bug/err.go 🔗

@@ -0,0 +1,17 @@
+package bug
+
+import (
+	"errors"
+
+	"github.com/MichaelMure/git-bug/entity"
+)
+
+var ErrBugNotExist = errors.New("bug doesn't exist")
+
+func NewErrMultipleMatchBug(matching []entity.Id) *entity.ErrMultipleMatch {
+	return entity.NewErrMultipleMatch("bug", matching)
+}
+
+func NewErrMultipleMatchOp(matching []entity.Id) *entity.ErrMultipleMatch {
+	return entity.NewErrMultipleMatch("operation", matching)
+}

bug/identity.go 🔗

@@ -1,27 +0,0 @@
-package bug
-
-import (
-	"github.com/MichaelMure/git-bug/identity"
-)
-
-// EnsureIdentities walk the graph of operations and make sure that all Identity
-// are properly loaded. That is, it replace all the IdentityStub with the full
-// Identity, loaded through a Resolver.
-func (bug *Bug) EnsureIdentities(resolver identity.Resolver) error {
-	it := NewOperationIterator(bug)
-
-	for it.Next() {
-		op := it.Value()
-		base := op.base()
-
-		if stub, ok := base.Author.(*identity.IdentityStub); ok {
-			i, err := resolver.ResolveIdentity(stub.Id())
-			if err != nil {
-				return err
-			}
-
-			base.Author = i
-		}
-	}
-	return nil
-}

bug/interface.go 🔗

@@ -16,17 +16,15 @@ type Interface interface {
 	// Append an operation into the staging area, to be committed later
 	Append(op Operation)
 
+	// Operations return the ordered operations
+	Operations() []Operation
+
 	// Indicate that the in-memory state changed and need to be commit in the repository
 	NeedCommit() bool
 
 	// Commit write the staging area in Git and move the operations to the packs
 	Commit(repo repository.ClockedRepo) error
 
-	// Merge a different version of the same bug by rebasing operations of this bug
-	// that are not present in the other on top of the chain of operations of the
-	// other version.
-	Merge(repo repository.Repo, other Interface) (bool, error)
-
 	// Lookup for the very first operation of the bug.
 	// For a valid Bug, this operation should be a CreateOp
 	FirstOp() Operation

bug/op_add_comment.go 🔗

@@ -5,6 +5,7 @@ import (
 	"fmt"
 
 	"github.com/MichaelMure/git-bug/entity"
+	"github.com/MichaelMure/git-bug/entity/dag"
 	"github.com/MichaelMure/git-bug/identity"
 	"github.com/MichaelMure/git-bug/repository"
 	"github.com/MichaelMure/git-bug/util/text"
@@ -12,6 +13,7 @@ import (
 )
 
 var _ Operation = &AddCommentOperation{}
+var _ dag.OperationWithFiles = &AddCommentOperation{}
 
 // AddCommentOperation will add a new comment in the bug
 type AddCommentOperation struct {
@@ -21,25 +23,19 @@ type AddCommentOperation struct {
 	Files []repository.Hash `json:"files"`
 }
 
-// Sign-post method for gqlgen
-func (op *AddCommentOperation) IsOperation() {}
-
-func (op *AddCommentOperation) base() *OpBase {
-	return &op.OpBase
-}
-
 func (op *AddCommentOperation) Id() entity.Id {
-	return idOperation(op)
+	return idOperation(op, &op.OpBase)
 }
 
 func (op *AddCommentOperation) Apply(snapshot *Snapshot) {
-	snapshot.addActor(op.Author)
-	snapshot.addParticipant(op.Author)
+	snapshot.addActor(op.Author_)
+	snapshot.addParticipant(op.Author_)
 
+	commentId := entity.CombineIds(snapshot.Id(), op.Id())
 	comment := Comment{
-		id:       op.Id(),
+		id:       commentId,
 		Message:  op.Message,
-		Author:   op.Author,
+		Author:   op.Author_,
 		Files:    op.Files,
 		UnixTime: timestamp.Timestamp(op.UnixTime),
 	}
@@ -47,7 +43,7 @@ func (op *AddCommentOperation) Apply(snapshot *Snapshot) {
 	snapshot.Comments = append(snapshot.Comments, comment)
 
 	item := &AddCommentTimelineItem{
-		CommentTimelineItem: NewCommentTimelineItem(op.Id(), comment),
+		CommentTimelineItem: NewCommentTimelineItem(commentId, comment),
 	}
 
 	snapshot.Timeline = append(snapshot.Timeline, item)
@@ -58,7 +54,7 @@ func (op *AddCommentOperation) GetFiles() []repository.Hash {
 }
 
 func (op *AddCommentOperation) Validate() error {
-	if err := opBaseValidate(op, AddCommentOp); err != nil {
+	if err := op.OpBase.Validate(op, AddCommentOp); err != nil {
 		return err
 	}
 

bug/op_add_comment_test.go 🔗

@@ -13,9 +13,9 @@ import (
 )
 
 func TestAddCommentSerialize(t *testing.T) {
-	repo := repository.NewMockRepoForTest()
-	rene := identity.NewIdentity("René Descartes", "rene@descartes.fr")
-	err := rene.Commit(repo)
+	repo := repository.NewMockRepo()
+
+	rene, err := identity.NewIdentity(repo, "René Descartes", "rene@descartes.fr")
 	require.NoError(t, err)
 
 	unix := time.Now().Unix()
@@ -32,8 +32,8 @@ func TestAddCommentSerialize(t *testing.T) {
 	before.Id()
 
 	// Replace the identity stub with the real thing
-	assert.Equal(t, rene.Id(), after.base().Author.Id())
-	after.Author = rene
+	assert.Equal(t, rene.Id(), after.Author().Id())
+	after.Author_ = rene
 
 	assert.Equal(t, before, &after)
 }

bug/op_create.go 🔗

@@ -6,6 +6,7 @@ import (
 	"strings"
 
 	"github.com/MichaelMure/git-bug/entity"
+	"github.com/MichaelMure/git-bug/entity/dag"
 	"github.com/MichaelMure/git-bug/identity"
 	"github.com/MichaelMure/git-bug/repository"
 	"github.com/MichaelMure/git-bug/util/text"
@@ -13,6 +14,7 @@ import (
 )
 
 var _ Operation = &CreateOperation{}
+var _ dag.OperationWithFiles = &CreateOperation{}
 
 // CreateOperation define the initial creation of a bug
 type CreateOperation struct {
@@ -22,37 +24,53 @@ type CreateOperation struct {
 	Files   []repository.Hash `json:"files"`
 }
 
-// Sign-post method for gqlgen
-func (op *CreateOperation) IsOperation() {}
-
-func (op *CreateOperation) base() *OpBase {
-	return &op.OpBase
+func (op *CreateOperation) Id() entity.Id {
+	return idOperation(op, &op.OpBase)
 }
 
-func (op *CreateOperation) Id() entity.Id {
-	return idOperation(op)
+// OVERRIDE
+func (op *CreateOperation) SetMetadata(key string, value string) {
+	// sanity check: we make sure we are not in the following scenario:
+	// - the bug is created with a first operation
+	// - Id() is used
+	// - metadata are added, which will change the Id
+	// - Id() is used again
+
+	if op.id != entity.UnsetId {
+		panic("usage of Id() after changing the first operation")
+	}
+
+	op.OpBase.SetMetadata(key, value)
 }
 
 func (op *CreateOperation) Apply(snapshot *Snapshot) {
-	snapshot.addActor(op.Author)
-	snapshot.addParticipant(op.Author)
+	// sanity check: will fail when adding a second Create
+	if snapshot.id != "" && snapshot.id != entity.UnsetId && snapshot.id != op.Id() {
+		panic("adding a second Create operation")
+	}
+
+	snapshot.id = op.Id()
+
+	snapshot.addActor(op.Author_)
+	snapshot.addParticipant(op.Author_)
 
 	snapshot.Title = op.Title
 
+	commentId := entity.CombineIds(snapshot.Id(), op.Id())
 	comment := Comment{
-		id:       op.Id(),
+		id:       commentId,
 		Message:  op.Message,
-		Author:   op.Author,
+		Author:   op.Author_,
 		UnixTime: timestamp.Timestamp(op.UnixTime),
 	}
 
 	snapshot.Comments = []Comment{comment}
-	snapshot.Author = op.Author
+	snapshot.Author = op.Author_
 	snapshot.CreateTime = op.Time()
 
 	snapshot.Timeline = []TimelineItem{
 		&CreateTimelineItem{
-			CommentTimelineItem: NewCommentTimelineItem(op.Id(), comment),
+			CommentTimelineItem: NewCommentTimelineItem(commentId, comment),
 		},
 	}
 }
@@ -62,18 +80,23 @@ func (op *CreateOperation) GetFiles() []repository.Hash {
 }
 
 func (op *CreateOperation) Validate() error {
-	if err := opBaseValidate(op, CreateOp); err != nil {
+	if err := op.OpBase.Validate(op, CreateOp); err != nil {
 		return err
 	}
 
+	if len(op.Nonce) > 64 {
+		return fmt.Errorf("create nonce is too big")
+	}
+	if len(op.Nonce) < 20 {
+		return fmt.Errorf("create nonce is too small")
+	}
+
 	if text.Empty(op.Title) {
 		return fmt.Errorf("title is empty")
 	}
-
 	if strings.Contains(op.Title, "\n") {
 		return fmt.Errorf("title should be a single line")
 	}
-
 	if !text.Safe(op.Title) {
 		return fmt.Errorf("title is not fully printable")
 	}
@@ -85,7 +108,7 @@ func (op *CreateOperation) Validate() error {
 	return nil
 }
 
-// UnmarshalJSON is a two step JSON unmarshaling
+// UnmarshalJSON is a two step JSON unmarshalling
 // This workaround is necessary to avoid the inner OpBase.MarshalJSON
 // overriding the outer op's MarshalJSON
 func (op *CreateOperation) UnmarshalJSON(data []byte) error {
@@ -98,6 +121,7 @@ func (op *CreateOperation) UnmarshalJSON(data []byte) error {
 	}
 
 	aux := struct {
+		Nonce   []byte            `json:"nonce"`
 		Title   string            `json:"title"`
 		Message string            `json:"message"`
 		Files   []repository.Hash `json:"files"`
@@ -109,6 +133,7 @@ func (op *CreateOperation) UnmarshalJSON(data []byte) error {
 	}
 
 	op.OpBase = base
+	op.Nonce = aux.Nonce
 	op.Title = aux.Title
 	op.Message = aux.Message
 	op.Files = aux.Files

bug/op_create_test.go 🔗

@@ -5,17 +5,22 @@ import (
 	"testing"
 	"time"
 
+	"github.com/stretchr/testify/require"
+
+	"github.com/MichaelMure/git-bug/entity"
 	"github.com/MichaelMure/git-bug/identity"
 	"github.com/MichaelMure/git-bug/repository"
 	"github.com/MichaelMure/git-bug/util/timestamp"
-	"github.com/stretchr/testify/assert"
-	"github.com/stretchr/testify/require"
 )
 
 func TestCreate(t *testing.T) {
 	snapshot := Snapshot{}
 
-	rene := identity.NewIdentity("René Descartes", "rene@descartes.fr")
+	repo := repository.NewMockRepoClock()
+
+	rene, err := identity.NewIdentity(repo, "René Descartes", "rene@descartes.fr")
+	require.NoError(t, err)
+
 	unix := time.Now().Unix()
 
 	create := NewCreateOp(rene, unix, "title", "message", nil)
@@ -23,16 +28,19 @@ func TestCreate(t *testing.T) {
 	create.Apply(&snapshot)
 
 	id := create.Id()
-	assert.NoError(t, id.Validate())
+	require.NoError(t, id.Validate())
+
+	commentId := entity.CombineIds(create.Id(), create.Id())
 
 	comment := Comment{
-		id:       id,
+		id:       commentId,
 		Author:   rene,
 		Message:  "message",
 		UnixTime: timestamp.Timestamp(create.UnixTime),
 	}
 
 	expected := Snapshot{
+		id:    create.Id(),
 		Title: "title",
 		Comments: []Comment{
 			comment,
@@ -43,36 +51,36 @@ func TestCreate(t *testing.T) {
 		CreateTime:   create.Time(),
 		Timeline: []TimelineItem{
 			&CreateTimelineItem{
-				CommentTimelineItem: NewCommentTimelineItem(id, comment),
+				CommentTimelineItem: NewCommentTimelineItem(commentId, comment),
 			},
 		},
 	}
 
-	assert.Equal(t, expected, snapshot)
+	require.Equal(t, expected, snapshot)
 }
 
 func TestCreateSerialize(t *testing.T) {
-	repo := repository.NewMockRepoForTest()
-	rene := identity.NewIdentity("René Descartes", "rene@descartes.fr")
-	err := rene.Commit(repo)
+	repo := repository.NewMockRepo()
+
+	rene, err := identity.NewIdentity(repo, "René Descartes", "rene@descartes.fr")
 	require.NoError(t, err)
 
 	unix := time.Now().Unix()
 	before := NewCreateOp(rene, unix, "title", "message", nil)
 
 	data, err := json.Marshal(before)
-	assert.NoError(t, err)
+	require.NoError(t, err)
 
 	var after CreateOperation
 	err = json.Unmarshal(data, &after)
-	assert.NoError(t, err)
+	require.NoError(t, err)
 
 	// enforce creating the ID
 	before.Id()
 
 	// Replace the identity stub with the real thing
-	assert.Equal(t, rene.Id(), after.base().Author.Id())
-	after.Author = rene
+	require.Equal(t, rene.Id(), after.Author().Id())
+	after.Author_ = rene
 
-	assert.Equal(t, before, &after)
+	require.Equal(t, before, &after)
 }

bug/op_edit_comment.go 🔗

@@ -7,6 +7,7 @@ import (
 	"github.com/pkg/errors"
 
 	"github.com/MichaelMure/git-bug/entity"
+	"github.com/MichaelMure/git-bug/entity/dag"
 	"github.com/MichaelMure/git-bug/identity"
 	"github.com/MichaelMure/git-bug/repository"
 	"github.com/MichaelMure/git-bug/util/timestamp"
@@ -15,6 +16,7 @@ import (
 )
 
 var _ Operation = &EditCommentOperation{}
+var _ dag.OperationWithFiles = &EditCommentOperation{}
 
 // EditCommentOperation will change a comment in the bug
 type EditCommentOperation struct {
@@ -24,22 +26,15 @@ type EditCommentOperation struct {
 	Files   []repository.Hash `json:"files"`
 }
 
-// Sign-post method for gqlgen
-func (op *EditCommentOperation) IsOperation() {}
-
-func (op *EditCommentOperation) base() *OpBase {
-	return &op.OpBase
-}
-
 func (op *EditCommentOperation) Id() entity.Id {
-	return idOperation(op)
+	return idOperation(op, &op.OpBase)
 }
 
 func (op *EditCommentOperation) Apply(snapshot *Snapshot) {
 	// Todo: currently any message can be edited, even by a different author
 	// crypto signature are needed.
 
-	snapshot.addActor(op.Author)
+	snapshot.addActor(op.Author_)
 
 	var target TimelineItem
 
@@ -85,7 +80,7 @@ func (op *EditCommentOperation) GetFiles() []repository.Hash {
 }
 
 func (op *EditCommentOperation) Validate() error {
-	if err := opBaseValidate(op, EditCommentOp); err != nil {
+	if err := op.OpBase.Validate(op, EditCommentOp); err != nil {
 		return err
 	}
 
@@ -100,7 +95,7 @@ func (op *EditCommentOperation) Validate() error {
 	return nil
 }
 
-// UnmarshalJSON is a two step JSON unmarshaling
+// UnmarshalJSON is a two step JSON unmarshalling
 // This workaround is necessary to avoid the inner OpBase.MarshalJSON
 // overriding the outer op's MarshalJSON
 func (op *EditCommentOperation) UnmarshalJSON(data []byte) error {

bug/op_edit_comment_test.go 🔗

@@ -5,7 +5,6 @@ import (
 	"testing"
 	"time"
 
-	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
 
 	"github.com/MichaelMure/git-bug/identity"
@@ -15,9 +14,9 @@ import (
 func TestEdit(t *testing.T) {
 	snapshot := Snapshot{}
 
-	repo := repository.NewMockRepoForTest()
-	rene := identity.NewIdentity("René Descartes", "rene@descartes.fr")
-	err := rene.Commit(repo)
+	repo := repository.NewMockRepo()
+
+	rene, err := identity.NewIdentity(repo, "René Descartes", "rene@descartes.fr")
 	require.NoError(t, err)
 
 	unix := time.Now().Unix()
@@ -44,62 +43,62 @@ func TestEdit(t *testing.T) {
 	id3 := comment2.Id()
 	require.NoError(t, id3.Validate())
 
-	edit := NewEditCommentOp(rene, unix, id1, "create edited", nil)
+	edit := NewEditCommentOp(rene, unix, snapshot.Comments[0].Id(), "create edited", nil)
 	edit.Apply(&snapshot)
 
-	assert.Equal(t, len(snapshot.Timeline), 4)
-	assert.Equal(t, len(snapshot.Timeline[0].(*CreateTimelineItem).History), 2)
-	assert.Equal(t, len(snapshot.Timeline[1].(*AddCommentTimelineItem).History), 1)
-	assert.Equal(t, len(snapshot.Timeline[3].(*AddCommentTimelineItem).History), 1)
-	assert.Equal(t, snapshot.Comments[0].Message, "create edited")
-	assert.Equal(t, snapshot.Comments[1].Message, "comment 1")
-	assert.Equal(t, snapshot.Comments[2].Message, "comment 2")
+	require.Len(t, snapshot.Timeline, 4)
+	require.Len(t, snapshot.Timeline[0].(*CreateTimelineItem).History, 2)
+	require.Len(t, snapshot.Timeline[1].(*AddCommentTimelineItem).History, 1)
+	require.Len(t, snapshot.Timeline[3].(*AddCommentTimelineItem).History, 1)
+	require.Equal(t, snapshot.Comments[0].Message, "create edited")
+	require.Equal(t, snapshot.Comments[1].Message, "comment 1")
+	require.Equal(t, snapshot.Comments[2].Message, "comment 2")
 
-	edit2 := NewEditCommentOp(rene, unix, id2, "comment 1 edited", nil)
+	edit2 := NewEditCommentOp(rene, unix, snapshot.Comments[1].Id(), "comment 1 edited", nil)
 	edit2.Apply(&snapshot)
 
-	assert.Equal(t, len(snapshot.Timeline), 4)
-	assert.Equal(t, len(snapshot.Timeline[0].(*CreateTimelineItem).History), 2)
-	assert.Equal(t, len(snapshot.Timeline[1].(*AddCommentTimelineItem).History), 2)
-	assert.Equal(t, len(snapshot.Timeline[3].(*AddCommentTimelineItem).History), 1)
-	assert.Equal(t, snapshot.Comments[0].Message, "create edited")
-	assert.Equal(t, snapshot.Comments[1].Message, "comment 1 edited")
-	assert.Equal(t, snapshot.Comments[2].Message, "comment 2")
+	require.Len(t, snapshot.Timeline, 4)
+	require.Len(t, snapshot.Timeline[0].(*CreateTimelineItem).History, 2)
+	require.Len(t, snapshot.Timeline[1].(*AddCommentTimelineItem).History, 2)
+	require.Len(t, snapshot.Timeline[3].(*AddCommentTimelineItem).History, 1)
+	require.Equal(t, snapshot.Comments[0].Message, "create edited")
+	require.Equal(t, snapshot.Comments[1].Message, "comment 1 edited")
+	require.Equal(t, snapshot.Comments[2].Message, "comment 2")
 
-	edit3 := NewEditCommentOp(rene, unix, id3, "comment 2 edited", nil)
+	edit3 := NewEditCommentOp(rene, unix, snapshot.Comments[2].Id(), "comment 2 edited", nil)
 	edit3.Apply(&snapshot)
 
-	assert.Equal(t, len(snapshot.Timeline), 4)
-	assert.Equal(t, len(snapshot.Timeline[0].(*CreateTimelineItem).History), 2)
-	assert.Equal(t, len(snapshot.Timeline[1].(*AddCommentTimelineItem).History), 2)
-	assert.Equal(t, len(snapshot.Timeline[3].(*AddCommentTimelineItem).History), 2)
-	assert.Equal(t, snapshot.Comments[0].Message, "create edited")
-	assert.Equal(t, snapshot.Comments[1].Message, "comment 1 edited")
-	assert.Equal(t, snapshot.Comments[2].Message, "comment 2 edited")
+	require.Len(t, snapshot.Timeline, 4)
+	require.Len(t, snapshot.Timeline[0].(*CreateTimelineItem).History, 2)
+	require.Len(t, snapshot.Timeline[1].(*AddCommentTimelineItem).History, 2)
+	require.Len(t, snapshot.Timeline[3].(*AddCommentTimelineItem).History, 2)
+	require.Equal(t, snapshot.Comments[0].Message, "create edited")
+	require.Equal(t, snapshot.Comments[1].Message, "comment 1 edited")
+	require.Equal(t, snapshot.Comments[2].Message, "comment 2 edited")
 }
 
 func TestEditCommentSerialize(t *testing.T) {
-	repo := repository.NewMockRepoForTest()
-	rene := identity.NewIdentity("René Descartes", "rene@descartes.fr")
-	err := rene.Commit(repo)
+	repo := repository.NewMockRepo()
+
+	rene, err := identity.NewIdentity(repo, "René Descartes", "rene@descartes.fr")
 	require.NoError(t, err)
 
 	unix := time.Now().Unix()
 	before := NewEditCommentOp(rene, unix, "target", "message", nil)
 
 	data, err := json.Marshal(before)
-	assert.NoError(t, err)
+	require.NoError(t, err)
 
 	var after EditCommentOperation
 	err = json.Unmarshal(data, &after)
-	assert.NoError(t, err)
+	require.NoError(t, err)
 
 	// enforce creating the ID
 	before.Id()
 
 	// Replace the identity stub with the real thing
-	assert.Equal(t, rene.Id(), after.base().Author.Id())
-	after.Author = rene
+	require.Equal(t, rene.Id(), after.Author().Id())
+	after.Author_ = rene
 
-	assert.Equal(t, before, &after)
+	require.Equal(t, before, &after)
 }

bug/op_label_change.go 🔗

@@ -21,20 +21,13 @@ type LabelChangeOperation struct {
 	Removed []Label `json:"removed"`
 }
 
-// Sign-post method for gqlgen
-func (op *LabelChangeOperation) IsOperation() {}
-
-func (op *LabelChangeOperation) base() *OpBase {
-	return &op.OpBase
-}
-
 func (op *LabelChangeOperation) Id() entity.Id {
-	return idOperation(op)
+	return idOperation(op, &op.OpBase)
 }
 
 // Apply apply the operation
 func (op *LabelChangeOperation) Apply(snapshot *Snapshot) {
-	snapshot.addActor(op.Author)
+	snapshot.addActor(op.Author_)
 
 	// Add in the set
 AddLoop:
@@ -66,7 +59,7 @@ AddLoop:
 
 	item := &LabelChangeTimelineItem{
 		id:       op.Id(),
-		Author:   op.Author,
+		Author:   op.Author_,
 		UnixTime: timestamp.Timestamp(op.UnixTime),
 		Added:    op.Added,
 		Removed:  op.Removed,
@@ -76,7 +69,7 @@ AddLoop:
 }
 
 func (op *LabelChangeOperation) Validate() error {
-	if err := opBaseValidate(op, LabelChangeOp); err != nil {
+	if err := op.OpBase.Validate(op, LabelChangeOp); err != nil {
 		return err
 	}
 
@@ -99,7 +92,7 @@ func (op *LabelChangeOperation) Validate() error {
 	return nil
 }
 
-// UnmarshalJSON is a two step JSON unmarshaling
+// UnmarshalJSON is a two step JSON unmarshalling
 // This workaround is necessary to avoid the inner OpBase.MarshalJSON
 // overriding the outer op's MarshalJSON
 func (op *LabelChangeOperation) UnmarshalJSON(data []byte) error {

bug/op_label_change_test.go 🔗

@@ -9,32 +9,30 @@ import (
 
 	"github.com/MichaelMure/git-bug/identity"
 	"github.com/MichaelMure/git-bug/repository"
-
-	"github.com/stretchr/testify/assert"
 )
 
 func TestLabelChangeSerialize(t *testing.T) {
-	repo := repository.NewMockRepoForTest()
-	rene := identity.NewIdentity("René Descartes", "rene@descartes.fr")
-	err := rene.Commit(repo)
+	repo := repository.NewMockRepo()
+
+	rene, err := identity.NewIdentity(repo, "René Descartes", "rene@descartes.fr")
 	require.NoError(t, err)
 
 	unix := time.Now().Unix()
 	before := NewLabelChangeOperation(rene, unix, []Label{"added"}, []Label{"removed"})
 
 	data, err := json.Marshal(before)
-	assert.NoError(t, err)
+	require.NoError(t, err)
 
 	var after LabelChangeOperation
 	err = json.Unmarshal(data, &after)
-	assert.NoError(t, err)
+	require.NoError(t, err)
 
 	// enforce creating the ID
 	before.Id()
 
 	// Replace the identity stub with the real thing
-	assert.Equal(t, rene.Id(), after.base().Author.Id())
-	after.Author = rene
+	require.Equal(t, rene.Id(), after.Author().Id())
+	after.Author_ = rene
 
-	assert.Equal(t, before, &after)
+	require.Equal(t, before, &after)
 }

bug/op_noop.go 🔗

@@ -16,15 +16,8 @@ type NoOpOperation struct {
 	OpBase
 }
 
-// Sign-post method for gqlgen
-func (op *NoOpOperation) IsOperation() {}
-
-func (op *NoOpOperation) base() *OpBase {
-	return &op.OpBase
-}
-
 func (op *NoOpOperation) Id() entity.Id {
-	return idOperation(op)
+	return idOperation(op, &op.OpBase)
 }
 
 func (op *NoOpOperation) Apply(snapshot *Snapshot) {
@@ -32,10 +25,10 @@ func (op *NoOpOperation) Apply(snapshot *Snapshot) {
 }
 
 func (op *NoOpOperation) Validate() error {
-	return opBaseValidate(op, NoOpOp)
+	return op.OpBase.Validate(op, NoOpOp)
 }
 
-// UnmarshalJSON is a two step JSON unmarshaling
+// UnmarshalJSON is a two step JSON unmarshalling
 // This workaround is necessary to avoid the inner OpBase.MarshalJSON
 // overriding the outer op's MarshalJSON
 func (op *NoOpOperation) UnmarshalJSON(data []byte) error {

bug/op_noop_test.go 🔗

@@ -14,9 +14,9 @@ import (
 )
 
 func TestNoopSerialize(t *testing.T) {
-	repo := repository.NewMockRepoForTest()
-	rene := identity.NewIdentity("René Descartes", "rene@descartes.fr")
-	err := rene.Commit(repo)
+	repo := repository.NewMockRepo()
+
+	rene, err := identity.NewIdentity(repo, "René Descartes", "rene@descartes.fr")
 	require.NoError(t, err)
 
 	unix := time.Now().Unix()
@@ -33,8 +33,8 @@ func TestNoopSerialize(t *testing.T) {
 	before.Id()
 
 	// Replace the identity stub with the real thing
-	assert.Equal(t, rene.Id(), after.base().Author.Id())
-	after.Author = rene
+	assert.Equal(t, rene.Id(), after.Author().Id())
+	after.Author_ = rene
 
 	assert.Equal(t, before, &after)
 }

bug/op_set_metadata.go 🔗

@@ -17,41 +17,25 @@ type SetMetadataOperation struct {
 	NewMetadata map[string]string `json:"new_metadata"`
 }
 
-// Sign-post method for gqlgen
-func (op *SetMetadataOperation) IsOperation() {}
-
-func (op *SetMetadataOperation) base() *OpBase {
-	return &op.OpBase
-}
-
 func (op *SetMetadataOperation) Id() entity.Id {
-	return idOperation(op)
+	return idOperation(op, &op.OpBase)
 }
 
 func (op *SetMetadataOperation) Apply(snapshot *Snapshot) {
 	for _, target := range snapshot.Operations {
 		if target.Id() == op.Target {
-			base := target.base()
-
-			if base.extraMetadata == nil {
-				base.extraMetadata = make(map[string]string)
-			}
-
 			// Apply the metadata in an immutable way: if a metadata already
 			// exist, it's not possible to override it.
-			for key, val := range op.NewMetadata {
-				if _, exist := base.extraMetadata[key]; !exist {
-					base.extraMetadata[key] = val
-				}
+			for key, value := range op.NewMetadata {
+				target.setExtraMetadataImmutable(key, value)
 			}
-
 			return
 		}
 	}
 }
 
 func (op *SetMetadataOperation) Validate() error {
-	if err := opBaseValidate(op, SetMetadataOp); err != nil {
+	if err := op.OpBase.Validate(op, SetMetadataOp); err != nil {
 		return err
 	}
 
@@ -62,7 +46,7 @@ func (op *SetMetadataOperation) Validate() error {
 	return nil
 }
 
-// UnmarshalJSON is a two step JSON unmarshaling
+// UnmarshalJSON is a two step JSON unmarshalling
 // This workaround is necessary to avoid the inner OpBase.MarshalJSON
 // overriding the outer op's MarshalJSON
 func (op *SetMetadataOperation) UnmarshalJSON(data []byte) error {

bug/op_set_metadata_test.go 🔗

@@ -8,16 +8,15 @@ import (
 	"github.com/MichaelMure/git-bug/identity"
 	"github.com/MichaelMure/git-bug/repository"
 
-	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
 )
 
 func TestSetMetadata(t *testing.T) {
 	snapshot := Snapshot{}
 
-	repo := repository.NewMockRepoForTest()
-	rene := identity.NewIdentity("René Descartes", "rene@descartes.fr")
-	err := rene.Commit(repo)
+	repo := repository.NewMockRepo()
+
+	rene, err := identity.NewIdentity(repo, "René Descartes", "rene@descartes.fr")
 	require.NoError(t, err)
 
 	unix := time.Now().Unix()
@@ -47,15 +46,15 @@ func TestSetMetadata(t *testing.T) {
 	snapshot.Operations = append(snapshot.Operations, op1)
 
 	createMetadata := snapshot.Operations[0].AllMetadata()
-	assert.Equal(t, len(createMetadata), 2)
+	require.Len(t, createMetadata, 2)
 	// original key is not overrided
-	assert.Equal(t, createMetadata["key"], "value")
+	require.Equal(t, createMetadata["key"], "value")
 	// new key is set
-	assert.Equal(t, createMetadata["key2"], "value")
+	require.Equal(t, createMetadata["key2"], "value")
 
 	commentMetadata := snapshot.Operations[1].AllMetadata()
-	assert.Equal(t, len(commentMetadata), 1)
-	assert.Equal(t, commentMetadata["key2"], "value2")
+	require.Len(t, commentMetadata, 1)
+	require.Equal(t, commentMetadata["key2"], "value2")
 
 	op2 := NewSetMetadataOp(rene, unix, id2, map[string]string{
 		"key2": "value",
@@ -66,16 +65,16 @@ func TestSetMetadata(t *testing.T) {
 	snapshot.Operations = append(snapshot.Operations, op2)
 
 	createMetadata = snapshot.Operations[0].AllMetadata()
-	assert.Equal(t, len(createMetadata), 2)
-	assert.Equal(t, createMetadata["key"], "value")
-	assert.Equal(t, createMetadata["key2"], "value")
+	require.Len(t, createMetadata, 2)
+	require.Equal(t, createMetadata["key"], "value")
+	require.Equal(t, createMetadata["key2"], "value")
 
 	commentMetadata = snapshot.Operations[1].AllMetadata()
-	assert.Equal(t, len(commentMetadata), 2)
+	require.Len(t, commentMetadata, 2)
 	// original key is not overrided
-	assert.Equal(t, commentMetadata["key2"], "value2")
+	require.Equal(t, commentMetadata["key2"], "value2")
 	// new key is set
-	assert.Equal(t, commentMetadata["key3"], "value3")
+	require.Equal(t, commentMetadata["key3"], "value3")
 
 	op3 := NewSetMetadataOp(rene, unix, id1, map[string]string{
 		"key":  "override",
@@ -86,22 +85,22 @@ func TestSetMetadata(t *testing.T) {
 	snapshot.Operations = append(snapshot.Operations, op3)
 
 	createMetadata = snapshot.Operations[0].AllMetadata()
-	assert.Equal(t, len(createMetadata), 2)
+	require.Len(t, createMetadata, 2)
 	// original key is not overrided
-	assert.Equal(t, createMetadata["key"], "value")
+	require.Equal(t, createMetadata["key"], "value")
 	// previously set key is not overrided
-	assert.Equal(t, createMetadata["key2"], "value")
+	require.Equal(t, createMetadata["key2"], "value")
 
 	commentMetadata = snapshot.Operations[1].AllMetadata()
-	assert.Equal(t, len(commentMetadata), 2)
-	assert.Equal(t, commentMetadata["key2"], "value2")
-	assert.Equal(t, commentMetadata["key3"], "value3")
+	require.Len(t, commentMetadata, 2)
+	require.Equal(t, commentMetadata["key2"], "value2")
+	require.Equal(t, commentMetadata["key3"], "value3")
 }
 
 func TestSetMetadataSerialize(t *testing.T) {
-	repo := repository.NewMockRepoForTest()
-	rene := identity.NewIdentity("René Descartes", "rene@descartes.fr")
-	err := rene.Commit(repo)
+	repo := repository.NewMockRepo()
+
+	rene, err := identity.NewIdentity(repo, "René Descartes", "rene@descartes.fr")
 	require.NoError(t, err)
 
 	unix := time.Now().Unix()
@@ -111,18 +110,18 @@ func TestSetMetadataSerialize(t *testing.T) {
 	})
 
 	data, err := json.Marshal(before)
-	assert.NoError(t, err)
+	require.NoError(t, err)
 
 	var after SetMetadataOperation
 	err = json.Unmarshal(data, &after)
-	assert.NoError(t, err)
+	require.NoError(t, err)
 
 	// enforce creating the ID
 	before.Id()
 
 	// Replace the identity stub with the real thing
-	assert.Equal(t, rene.Id(), after.base().Author.Id())
-	after.Author = rene
+	require.Equal(t, rene.Id(), after.Author().Id())
+	after.Author_ = rene
 
-	assert.Equal(t, before, &after)
+	require.Equal(t, before, &after)
 }

bug/op_set_status.go 🔗

@@ -18,24 +18,17 @@ type SetStatusOperation struct {
 	Status Status `json:"status"`
 }
 
-// Sign-post method for gqlgen
-func (op *SetStatusOperation) IsOperation() {}
-
-func (op *SetStatusOperation) base() *OpBase {
-	return &op.OpBase
-}
-
 func (op *SetStatusOperation) Id() entity.Id {
-	return idOperation(op)
+	return idOperation(op, &op.OpBase)
 }
 
 func (op *SetStatusOperation) Apply(snapshot *Snapshot) {
 	snapshot.Status = op.Status
-	snapshot.addActor(op.Author)
+	snapshot.addActor(op.Author_)
 
 	item := &SetStatusTimelineItem{
 		id:       op.Id(),
-		Author:   op.Author,
+		Author:   op.Author_,
 		UnixTime: timestamp.Timestamp(op.UnixTime),
 		Status:   op.Status,
 	}
@@ -44,7 +37,7 @@ func (op *SetStatusOperation) Apply(snapshot *Snapshot) {
 }
 
 func (op *SetStatusOperation) Validate() error {
-	if err := opBaseValidate(op, SetStatusOp); err != nil {
+	if err := op.OpBase.Validate(op, SetStatusOp); err != nil {
 		return err
 	}
 
@@ -55,7 +48,7 @@ func (op *SetStatusOperation) Validate() error {
 	return nil
 }
 
-// UnmarshalJSON is a two step JSON unmarshaling
+// UnmarshalJSON is a two step JSON unmarshalling
 // This workaround is necessary to avoid the inner OpBase.MarshalJSON
 // overriding the outer op's MarshalJSON
 func (op *SetStatusOperation) UnmarshalJSON(data []byte) error {

bug/op_set_status_test.go 🔗

@@ -9,32 +9,30 @@ import (
 
 	"github.com/MichaelMure/git-bug/identity"
 	"github.com/MichaelMure/git-bug/repository"
-
-	"github.com/stretchr/testify/assert"
 )
 
 func TestSetStatusSerialize(t *testing.T) {
-	repo := repository.NewMockRepoForTest()
-	rene := identity.NewIdentity("René Descartes", "rene@descartes.fr")
-	err := rene.Commit(repo)
+	repo := repository.NewMockRepo()
+
+	rene, err := identity.NewIdentity(repo, "René Descartes", "rene@descartes.fr")
 	require.NoError(t, err)
 
 	unix := time.Now().Unix()
 	before := NewSetStatusOp(rene, unix, ClosedStatus)
 
 	data, err := json.Marshal(before)
-	assert.NoError(t, err)
+	require.NoError(t, err)
 
 	var after SetStatusOperation
 	err = json.Unmarshal(data, &after)
-	assert.NoError(t, err)
+	require.NoError(t, err)
 
 	// enforce creating the ID
 	before.Id()
 
 	// Replace the identity stub with the real thing
-	assert.Equal(t, rene.Id(), after.base().Author.Id())
-	after.Author = rene
+	require.Equal(t, rene.Id(), after.Author().Id())
+	after.Author_ = rene
 
-	assert.Equal(t, before, &after)
+	require.Equal(t, before, &after)
 }

bug/op_set_title.go 🔗

@@ -21,24 +21,17 @@ type SetTitleOperation struct {
 	Was   string `json:"was"`
 }
 
-// Sign-post method for gqlgen
-func (op *SetTitleOperation) IsOperation() {}
-
-func (op *SetTitleOperation) base() *OpBase {
-	return &op.OpBase
-}
-
 func (op *SetTitleOperation) Id() entity.Id {
-	return idOperation(op)
+	return idOperation(op, &op.OpBase)
 }
 
 func (op *SetTitleOperation) Apply(snapshot *Snapshot) {
 	snapshot.Title = op.Title
-	snapshot.addActor(op.Author)
+	snapshot.addActor(op.Author_)
 
 	item := &SetTitleTimelineItem{
 		id:       op.Id(),
-		Author:   op.Author,
+		Author:   op.Author_,
 		UnixTime: timestamp.Timestamp(op.UnixTime),
 		Title:    op.Title,
 		Was:      op.Was,
@@ -48,7 +41,7 @@ func (op *SetTitleOperation) Apply(snapshot *Snapshot) {
 }
 
 func (op *SetTitleOperation) Validate() error {
-	if err := opBaseValidate(op, SetTitleOp); err != nil {
+	if err := op.OpBase.Validate(op, SetTitleOp); err != nil {
 		return err
 	}
 
@@ -75,7 +68,7 @@ func (op *SetTitleOperation) Validate() error {
 	return nil
 }
 
-// UnmarshalJSON is a two step JSON unmarshaling
+// UnmarshalJSON is a two step JSON unmarshalling
 // This workaround is necessary to avoid the inner OpBase.MarshalJSON
 // overriding the outer op's MarshalJSON
 func (op *SetTitleOperation) UnmarshalJSON(data []byte) error {
@@ -132,19 +125,17 @@ func (s *SetTitleTimelineItem) IsAuthored() {}
 
 // Convenience function to apply the operation
 func SetTitle(b Interface, author identity.Interface, unixTime int64, title string) (*SetTitleOperation, error) {
-	it := NewOperationIterator(b)
-
-	var lastTitleOp Operation
-	for it.Next() {
-		op := it.Value()
-		if op.base().OperationType == SetTitleOp {
+	var lastTitleOp *SetTitleOperation
+	for _, op := range b.Operations() {
+		switch op := op.(type) {
+		case *SetTitleOperation:
 			lastTitleOp = op
 		}
 	}
 
 	var was string
 	if lastTitleOp != nil {
-		was = lastTitleOp.(*SetTitleOperation).Title
+		was = lastTitleOp.Title
 	} else {
 		was = b.FirstOp().(*CreateOperation).Title
 	}

bug/op_set_title_test.go 🔗

@@ -9,32 +9,30 @@ import (
 
 	"github.com/MichaelMure/git-bug/identity"
 	"github.com/MichaelMure/git-bug/repository"
-
-	"github.com/stretchr/testify/assert"
 )
 
 func TestSetTitleSerialize(t *testing.T) {
-	repo := repository.NewMockRepoForTest()
-	rene := identity.NewIdentity("René Descartes", "rene@descartes.fr")
-	err := rene.Commit(repo)
+	repo := repository.NewMockRepo()
+
+	rene, err := identity.NewIdentity(repo, "René Descartes", "rene@descartes.fr")
 	require.NoError(t, err)
 
 	unix := time.Now().Unix()
 	before := NewSetTitleOp(rene, unix, "title", "was")
 
 	data, err := json.Marshal(before)
-	assert.NoError(t, err)
+	require.NoError(t, err)
 
 	var after SetTitleOperation
 	err = json.Unmarshal(data, &after)
-	assert.NoError(t, err)
+	require.NoError(t, err)
 
 	// enforce creating the ID
 	before.Id()
 
 	// Replace the identity stub with the real thing
-	assert.Equal(t, rene.Id(), after.base().Author.Id())
-	after.Author = rene
+	require.Equal(t, rene.Id(), after.Author().Id())
+	after.Author_ = rene
 
-	assert.Equal(t, before, &after)
+	require.Equal(t, before, &after)
 }

bug/operation.go 🔗

@@ -1,7 +1,7 @@
 package bug
 
 import (
-	"crypto/sha256"
+	"crypto/rand"
 	"encoding/json"
 	"fmt"
 	"time"
@@ -9,8 +9,8 @@ import (
 	"github.com/pkg/errors"
 
 	"github.com/MichaelMure/git-bug/entity"
+	"github.com/MichaelMure/git-bug/entity/dag"
 	"github.com/MichaelMure/git-bug/identity"
-	"github.com/MichaelMure/git-bug/repository"
 )
 
 // OperationType is an operation type identifier
@@ -30,39 +30,27 @@ const (
 
 // Operation define the interface to fulfill for an edit operation of a Bug
 type Operation interface {
-	// base return the OpBase of the Operation, for package internal use
-	base() *OpBase
-	// Id return the identifier of the operation, to be used for back references
-	Id() entity.Id
+	dag.Operation
+
+	// Type return the type of the operation
+	Type() OperationType
+
 	// Time return the time when the operation was added
 	Time() time.Time
-	// GetFiles return the files needed by this operation
-	GetFiles() []repository.Hash
 	// Apply the operation to a Snapshot to create the final state
 	Apply(snapshot *Snapshot)
-	// Validate check if the operation is valid (ex: a title is a single line)
-	Validate() error
+
 	// SetMetadata store arbitrary metadata about the operation
 	SetMetadata(key string, value string)
 	// GetMetadata retrieve arbitrary metadata about the operation
 	GetMetadata(key string) (string, bool)
 	// AllMetadata return all metadata for this operation
 	AllMetadata() map[string]string
-	// GetAuthor return the author identity
-	GetAuthor() identity.Interface
-
-	// sign-post method for gqlgen
-	IsOperation()
-}
 
-func deriveId(data []byte) entity.Id {
-	sum := sha256.Sum256(data)
-	return entity.Id(fmt.Sprintf("%x", sum))
+	setExtraMetadataImmutable(key string, value string)
 }
 
-func idOperation(op Operation) entity.Id {
-	base := op.base()
-
+func idOperation(op Operation, base *OpBase) entity.Id {
 	if base.id == "" {
 		// something went really wrong
 		panic("op's id not set")
@@ -78,18 +66,83 @@ func idOperation(op Operation) entity.Id {
 			panic(err)
 		}
 
-		base.id = deriveId(data)
+		base.id = entity.DeriveId(data)
 	}
 	return base.id
 }
 
+func operationUnmarshaller(author identity.Interface, raw json.RawMessage) (dag.Operation, error) {
+	var t struct {
+		OperationType OperationType `json:"type"`
+	}
+
+	if err := json.Unmarshal(raw, &t); err != nil {
+		return nil, err
+	}
+
+	var op Operation
+
+	switch t.OperationType {
+	case AddCommentOp:
+		op = &AddCommentOperation{}
+	case CreateOp:
+		op = &CreateOperation{}
+	case EditCommentOp:
+		op = &EditCommentOperation{}
+	case LabelChangeOp:
+		op = &LabelChangeOperation{}
+	case NoOpOp:
+		op = &NoOpOperation{}
+	case SetMetadataOp:
+		op = &SetMetadataOperation{}
+	case SetStatusOp:
+		op = &SetStatusOperation{}
+	case SetTitleOp:
+		op = &SetTitleOperation{}
+	default:
+		panic(fmt.Sprintf("unknown operation type %v", t.OperationType))
+	}
+
+	err := json.Unmarshal(raw, &op)
+	if err != nil {
+		return nil, err
+	}
+
+	switch op := op.(type) {
+	case *AddCommentOperation:
+		op.Author_ = author
+	case *CreateOperation:
+		op.Author_ = author
+	case *LabelChangeOperation:
+		op.Author_ = author
+	case *NoOpOperation:
+		op.Author_ = author
+	case *SetMetadataOperation:
+		op.Author_ = author
+	case *SetStatusOperation:
+		op.Author_ = author
+	case *SetTitleOperation:
+		op.Author_ = author
+	default:
+		panic(fmt.Sprintf("unknown operation type %T", op))
+	}
+
+	return op, nil
+}
+
 // OpBase implement the common code for all operations
 type OpBase struct {
 	OperationType OperationType      `json:"type"`
-	Author        identity.Interface `json:"author"`
+	Author_       identity.Interface `json:"author"`
 	// TODO: part of the data model upgrade, this should eventually be a timestamp + lamport
 	UnixTime int64             `json:"timestamp"`
 	Metadata map[string]string `json:"metadata,omitempty"`
+
+	// mandatory random bytes to ensure a better randomness of the data used to later generate the ID
+	// len(Nonce) should be > 20 and < 64 bytes
+	// It has no functional purpose and should be ignored.
+	Nonce []byte `json:"nonce"`
+
 	// Not serialized. Store the op's id in memory.
 	id entity.Id
 	// Not serialized. Store the extra metadata in memory,
@@ -101,21 +154,32 @@ type OpBase struct {
 func newOpBase(opType OperationType, author identity.Interface, unixTime int64) OpBase {
 	return OpBase{
 		OperationType: opType,
-		Author:        author,
+		Author_:       author,
 		UnixTime:      unixTime,
+		Nonce:         makeNonce(20),
 		id:            entity.UnsetId,
 	}
 }
 
-func (op *OpBase) UnmarshalJSON(data []byte) error {
+func makeNonce(len int) []byte {
+	result := make([]byte, len)
+	_, err := rand.Read(result)
+	if err != nil {
+		panic(err)
+	}
+	return result
+}
+
+func (base *OpBase) UnmarshalJSON(data []byte) error {
 	// Compute the Id when loading the op from disk.
-	op.id = deriveId(data)
+	base.id = entity.DeriveId(data)
 
 	aux := struct {
 		OperationType OperationType     `json:"type"`
 		Author        json.RawMessage   `json:"author"`
 		UnixTime      int64             `json:"timestamp"`
 		Metadata      map[string]string `json:"metadata,omitempty"`
+		Nonce         []byte            `json:"nonce"`
 	}{}
 
 	if err := json.Unmarshal(data, &aux); err != nil {
@@ -128,92 +192,110 @@ func (op *OpBase) UnmarshalJSON(data []byte) error {
 		return err
 	}
 
-	op.OperationType = aux.OperationType
-	op.Author = author
-	op.UnixTime = aux.UnixTime
-	op.Metadata = aux.Metadata
+	base.OperationType = aux.OperationType
+	base.Author_ = author
+	base.UnixTime = aux.UnixTime
+	base.Metadata = aux.Metadata
+	base.Nonce = aux.Nonce
 
 	return nil
 }
 
-// Time return the time when the operation was added
-func (op *OpBase) Time() time.Time {
-	return time.Unix(op.UnixTime, 0)
+func (base *OpBase) Type() OperationType {
+	return base.OperationType
 }
 
-// GetFiles return the files needed by this operation
-func (op *OpBase) GetFiles() []repository.Hash {
-	return nil
+// Time return the time when the operation was added
+func (base *OpBase) Time() time.Time {
+	return time.Unix(base.UnixTime, 0)
 }
 
 // Validate check the OpBase for errors
-func opBaseValidate(op Operation, opType OperationType) error {
-	if op.base().OperationType != opType {
-		return fmt.Errorf("incorrect operation type (expected: %v, actual: %v)", opType, op.base().OperationType)
+func (base *OpBase) Validate(op Operation, opType OperationType) error {
+	if base.OperationType != opType {
+		return fmt.Errorf("incorrect operation type (expected: %v, actual: %v)", opType, base.OperationType)
 	}
 
 	if op.Time().Unix() == 0 {
 		return fmt.Errorf("time not set")
 	}
 
-	if op.base().Author == nil {
+	if base.Author_ == nil {
 		return fmt.Errorf("author not set")
 	}
 
-	if err := op.base().Author.Validate(); err != nil {
+	if err := op.Author().Validate(); err != nil {
 		return errors.Wrap(err, "author")
 	}
 
-	for _, hash := range op.GetFiles() {
-		if !hash.IsValid() {
-			return fmt.Errorf("file with invalid hash %v", hash)
+	if op, ok := op.(dag.OperationWithFiles); ok {
+		for _, hash := range op.GetFiles() {
+			if !hash.IsValid() {
+				return fmt.Errorf("file with invalid hash %v", hash)
+			}
 		}
 	}
 
+	if len(base.Nonce) > 64 {
+		return fmt.Errorf("nonce is too big")
+	}
+	if len(base.Nonce) < 20 {
+		return fmt.Errorf("nonce is too small")
+	}
+
 	return nil
 }
 
 // SetMetadata store arbitrary metadata about the operation
-func (op *OpBase) SetMetadata(key string, value string) {
-	if op.Metadata == nil {
-		op.Metadata = make(map[string]string)
+func (base *OpBase) SetMetadata(key string, value string) {
+	if base.Metadata == nil {
+		base.Metadata = make(map[string]string)
 	}
 
-	op.Metadata[key] = value
-	op.id = entity.UnsetId
+	base.Metadata[key] = value
+	base.id = entity.UnsetId
 }
 
 // GetMetadata retrieve arbitrary metadata about the operation
-func (op *OpBase) GetMetadata(key string) (string, bool) {
-	val, ok := op.Metadata[key]
+func (base *OpBase) GetMetadata(key string) (string, bool) {
+	val, ok := base.Metadata[key]
 
 	if ok {
 		return val, true
 	}
 
 	// extraMetadata can't replace the original operations value if any
-	val, ok = op.extraMetadata[key]
+	val, ok = base.extraMetadata[key]
 
 	return val, ok
 }
 
 // AllMetadata return all metadata for this operation
-func (op *OpBase) AllMetadata() map[string]string {
+func (base *OpBase) AllMetadata() map[string]string {
 	result := make(map[string]string)
 
-	for key, val := range op.extraMetadata {
+	for key, val := range base.extraMetadata {
 		result[key] = val
 	}
 
 	// Original metadata take precedence
-	for key, val := range op.Metadata {
+	for key, val := range base.Metadata {
 		result[key] = val
 	}
 
 	return result
 }
 
-// GetAuthor return author identity
-func (op *OpBase) GetAuthor() identity.Interface {
-	return op.Author
+func (base *OpBase) setExtraMetadataImmutable(key string, value string) {
+	if base.extraMetadata == nil {
+		base.extraMetadata = make(map[string]string)
+	}
+	if _, exist := base.extraMetadata[key]; !exist {
+		base.extraMetadata[key] = value
+	}
+}
+
+// Author return author identity
+func (base *OpBase) Author() identity.Interface {
+	return base.Author_
 }

bug/operation_iterator.go 🔗

@@ -1,72 +0,0 @@
-package bug
-
-type OperationIterator struct {
-	bug       *Bug
-	packIndex int
-	opIndex   int
-}
-
-func NewOperationIterator(bug Interface) *OperationIterator {
-	return &OperationIterator{
-		bug:       bugFromInterface(bug),
-		packIndex: 0,
-		opIndex:   -1,
-	}
-}
-
-func (it *OperationIterator) Next() bool {
-	// Special case of the staging area
-	if it.packIndex == len(it.bug.packs) {
-		pack := it.bug.staging
-		it.opIndex++
-		return it.opIndex < len(pack.Operations)
-	}
-
-	if it.packIndex >= len(it.bug.packs) {
-		return false
-	}
-
-	pack := it.bug.packs[it.packIndex]
-
-	it.opIndex++
-
-	if it.opIndex < len(pack.Operations) {
-		return true
-	}
-
-	// Note: this iterator doesn't handle the empty pack case
-	it.opIndex = 0
-	it.packIndex++
-
-	// Special case of the non-empty staging area
-	if it.packIndex == len(it.bug.packs) && len(it.bug.staging.Operations) > 0 {
-		return true
-	}
-
-	return it.packIndex < len(it.bug.packs)
-}
-
-func (it *OperationIterator) Value() Operation {
-	// Special case of the staging area
-	if it.packIndex == len(it.bug.packs) {
-		pack := it.bug.staging
-
-		if it.opIndex >= len(pack.Operations) {
-			panic("Iterator is not valid anymore")
-		}
-
-		return pack.Operations[it.opIndex]
-	}
-
-	if it.packIndex >= len(it.bug.packs) {
-		panic("Iterator is not valid anymore")
-	}
-
-	pack := it.bug.packs[it.packIndex]
-
-	if it.opIndex >= len(pack.Operations) {
-		panic("Iterator is not valid anymore")
-	}
-
-	return pack.Operations[it.opIndex]
-}

bug/operation_iterator_test.go 🔗

@@ -1,78 +0,0 @@
-package bug
-
-import (
-	"fmt"
-	"testing"
-	"time"
-
-	"github.com/stretchr/testify/require"
-
-	"github.com/MichaelMure/git-bug/identity"
-	"github.com/MichaelMure/git-bug/repository"
-)
-
-func ExampleOperationIterator() {
-	b := NewBug()
-
-	// add operations
-
-	it := NewOperationIterator(b)
-
-	for it.Next() {
-		// do something with each operations
-		_ = it.Value()
-	}
-}
-
-func TestOpIterator(t *testing.T) {
-	mockRepo := repository.NewMockRepoForTest()
-
-	rene := identity.NewIdentity("René Descartes", "rene@descartes.fr")
-	err := rene.Commit(mockRepo)
-	require.NoError(t, err)
-
-	unix := time.Now().Unix()
-
-	createOp := NewCreateOp(rene, unix, "title", "message", nil)
-	addCommentOp := NewAddCommentOp(rene, unix, "message2", nil)
-	setStatusOp := NewSetStatusOp(rene, unix, ClosedStatus)
-	labelChangeOp := NewLabelChangeOperation(rene, unix, []Label{"added"}, []Label{"removed"})
-
-	var i int
-	genTitleOp := func() Operation {
-		i++
-		return NewSetTitleOp(rene, unix, fmt.Sprintf("title%d", i), "")
-	}
-
-	bug1 := NewBug()
-
-	// first pack
-	bug1.Append(createOp)
-	bug1.Append(addCommentOp)
-	bug1.Append(setStatusOp)
-	bug1.Append(labelChangeOp)
-	err = bug1.Commit(mockRepo)
-	require.NoError(t, err)
-
-	// second pack
-	bug1.Append(genTitleOp())
-	bug1.Append(genTitleOp())
-	bug1.Append(genTitleOp())
-	err = bug1.Commit(mockRepo)
-	require.NoError(t, err)
-
-	// staging
-	bug1.Append(genTitleOp())
-	bug1.Append(genTitleOp())
-	bug1.Append(genTitleOp())
-
-	it := NewOperationIterator(bug1)
-
-	counter := 0
-	for it.Next() {
-		_ = it.Value()
-		counter++
-	}
-
-	require.Equal(t, 10, counter)
-}

bug/operation_pack.go 🔗

@@ -1,188 +0,0 @@
-package bug
-
-import (
-	"encoding/json"
-	"fmt"
-
-	"github.com/pkg/errors"
-
-	"github.com/MichaelMure/git-bug/entity"
-	"github.com/MichaelMure/git-bug/repository"
-)
-
-// 1: original format
-// 2: no more legacy identities
-const formatVersion = 2
-
-// OperationPack represent an ordered set of operation to apply
-// to a Bug. These operations are stored in a single Git commit.
-//
-// These commits will be linked together in a linear chain of commits
-// inside Git to form the complete ordered chain of operation to
-// apply to get the final state of the Bug
-type OperationPack struct {
-	Operations []Operation
-
-	// Private field so not serialized
-	commitHash repository.Hash
-}
-
-func (opp *OperationPack) MarshalJSON() ([]byte, error) {
-	return json.Marshal(struct {
-		Version    uint        `json:"version"`
-		Operations []Operation `json:"ops"`
-	}{
-		Version:    formatVersion,
-		Operations: opp.Operations,
-	})
-}
-
-func (opp *OperationPack) UnmarshalJSON(data []byte) error {
-	aux := struct {
-		Version    uint              `json:"version"`
-		Operations []json.RawMessage `json:"ops"`
-	}{}
-
-	if err := json.Unmarshal(data, &aux); err != nil {
-		return err
-	}
-
-	if aux.Version < formatVersion {
-		return entity.NewErrOldFormatVersion(aux.Version)
-	}
-	if aux.Version > formatVersion {
-		return entity.NewErrNewFormatVersion(aux.Version)
-	}
-
-	for _, raw := range aux.Operations {
-		var t struct {
-			OperationType OperationType `json:"type"`
-		}
-
-		if err := json.Unmarshal(raw, &t); err != nil {
-			return err
-		}
-
-		// delegate to specialized unmarshal function
-		op, err := opp.unmarshalOp(raw, t.OperationType)
-		if err != nil {
-			return err
-		}
-
-		opp.Operations = append(opp.Operations, op)
-	}
-
-	return nil
-}
-
-func (opp *OperationPack) unmarshalOp(raw []byte, _type OperationType) (Operation, error) {
-	switch _type {
-	case AddCommentOp:
-		op := &AddCommentOperation{}
-		err := json.Unmarshal(raw, &op)
-		return op, err
-	case CreateOp:
-		op := &CreateOperation{}
-		err := json.Unmarshal(raw, &op)
-		return op, err
-	case EditCommentOp:
-		op := &EditCommentOperation{}
-		err := json.Unmarshal(raw, &op)
-		return op, err
-	case LabelChangeOp:
-		op := &LabelChangeOperation{}
-		err := json.Unmarshal(raw, &op)
-		return op, err
-	case NoOpOp:
-		op := &NoOpOperation{}
-		err := json.Unmarshal(raw, &op)
-		return op, err
-	case SetMetadataOp:
-		op := &SetMetadataOperation{}
-		err := json.Unmarshal(raw, &op)
-		return op, err
-	case SetStatusOp:
-		op := &SetStatusOperation{}
-		err := json.Unmarshal(raw, &op)
-		return op, err
-	case SetTitleOp:
-		op := &SetTitleOperation{}
-		err := json.Unmarshal(raw, &op)
-		return op, err
-	default:
-		return nil, fmt.Errorf("unknown operation type %v", _type)
-	}
-}
-
-// Append a new operation to the pack
-func (opp *OperationPack) Append(op Operation) {
-	opp.Operations = append(opp.Operations, op)
-}
-
-// IsEmpty tell if the OperationPack is empty
-func (opp *OperationPack) IsEmpty() bool {
-	return len(opp.Operations) == 0
-}
-
-// IsValid tell if the OperationPack is considered valid
-func (opp *OperationPack) Validate() error {
-	if opp.IsEmpty() {
-		return fmt.Errorf("empty")
-	}
-
-	for _, op := range opp.Operations {
-		if err := op.Validate(); err != nil {
-			return errors.Wrap(err, "op")
-		}
-	}
-
-	return nil
-}
-
-// Write will serialize and store the OperationPack as a git blob and return
-// its hash
-func (opp *OperationPack) Write(repo repository.ClockedRepo) (repository.Hash, error) {
-	// make sure we don't write invalid data
-	err := opp.Validate()
-	if err != nil {
-		return "", errors.Wrap(err, "validation error")
-	}
-
-	// First, make sure that all the identities are properly Commit as well
-	// TODO: this might be downgraded to "make sure it exist in git" but then, what make
-	// sure no data is lost on identities ?
-	for _, op := range opp.Operations {
-		if op.base().Author.NeedCommit() {
-			return "", fmt.Errorf("identity need commmit")
-		}
-	}
-
-	data, err := json.Marshal(opp)
-
-	if err != nil {
-		return "", err
-	}
-
-	hash, err := repo.StoreData(data)
-
-	if err != nil {
-		return "", err
-	}
-
-	return hash, nil
-}
-
-// Make a deep copy
-func (opp *OperationPack) Clone() OperationPack {
-
-	clone := OperationPack{
-		Operations: make([]Operation, len(opp.Operations)),
-		commitHash: opp.commitHash,
-	}
-
-	for i, op := range opp.Operations {
-		clone.Operations[i] = op
-	}
-
-	return clone
-}

bug/operation_pack_test.go 🔗

@@ -1,79 +0,0 @@
-package bug
-
-import (
-	"encoding/json"
-	"testing"
-	"time"
-
-	"github.com/stretchr/testify/assert"
-	"github.com/stretchr/testify/require"
-
-	"github.com/MichaelMure/git-bug/identity"
-	"github.com/MichaelMure/git-bug/repository"
-)
-
-func TestOperationPackSerialize(t *testing.T) {
-	opp := &OperationPack{}
-
-	repo := repository.NewMockRepoForTest()
-	rene := identity.NewIdentity("René Descartes", "rene@descartes.fr")
-	err := rene.Commit(repo)
-	require.NoError(t, err)
-
-	createOp := NewCreateOp(rene, time.Now().Unix(), "title", "message", nil)
-	setTitleOp := NewSetTitleOp(rene, time.Now().Unix(), "title2", "title1")
-	addCommentOp := NewAddCommentOp(rene, time.Now().Unix(), "message2", nil)
-	setStatusOp := NewSetStatusOp(rene, time.Now().Unix(), ClosedStatus)
-	labelChangeOp := NewLabelChangeOperation(rene, time.Now().Unix(), []Label{"added"}, []Label{"removed"})
-
-	opp.Append(createOp)
-	opp.Append(setTitleOp)
-	opp.Append(addCommentOp)
-	opp.Append(setStatusOp)
-	opp.Append(labelChangeOp)
-
-	opMeta := NewSetTitleOp(rene, time.Now().Unix(), "title3", "title2")
-	opMeta.SetMetadata("key", "value")
-	opp.Append(opMeta)
-
-	assert.Equal(t, 1, len(opMeta.Metadata))
-
-	opFile := NewAddCommentOp(rene, time.Now().Unix(), "message", []repository.Hash{
-		"abcdef",
-		"ghijkl",
-	})
-	opp.Append(opFile)
-
-	assert.Equal(t, 2, len(opFile.Files))
-
-	data, err := json.Marshal(opp)
-	assert.NoError(t, err)
-
-	var opp2 *OperationPack
-	err = json.Unmarshal(data, &opp2)
-	assert.NoError(t, err)
-
-	ensureIds(opp)
-	ensureAuthors(t, opp, opp2)
-
-	assert.Equal(t, opp, opp2)
-}
-
-func ensureIds(opp *OperationPack) {
-	for _, op := range opp.Operations {
-		op.Id()
-	}
-}
-
-func ensureAuthors(t *testing.T, opp1 *OperationPack, opp2 *OperationPack) {
-	require.Equal(t, len(opp1.Operations), len(opp2.Operations))
-	for i := 0; i < len(opp1.Operations); i++ {
-		op1 := opp1.Operations[i]
-		op2 := opp2.Operations[i]
-
-		// ensure we have equivalent authors (IdentityStub vs Identity) then
-		// enforce equality
-		require.Equal(t, op1.base().Author.Id(), op2.base().Author.Id())
-		op1.base().Author = op2.base().Author
-	}
-}

bug/operation_test.go 🔗

@@ -11,7 +11,16 @@ import (
 )
 
 func TestValidate(t *testing.T) {
-	rene := identity.NewIdentity("René Descartes", "rene@descartes.fr")
+	repo := repository.NewMockRepoClock()
+
+	makeIdentity := func(t *testing.T, name, email string) *identity.Identity {
+		i, err := identity.NewIdentity(repo, name, email)
+		require.NoError(t, err)
+		return i
+	}
+
+	rene := makeIdentity(t, "René Descartes", "rene@descartes.fr")
+
 	unix := time.Now().Unix()
 
 	good := []Operation{
@@ -30,13 +39,13 @@ func TestValidate(t *testing.T) {
 
 	bad := []Operation{
 		// opbase
-		NewSetStatusOp(identity.NewIdentity("", "rene@descartes.fr"), unix, ClosedStatus),
-		NewSetStatusOp(identity.NewIdentity("René Descartes\u001b", "rene@descartes.fr"), unix, ClosedStatus),
-		NewSetStatusOp(identity.NewIdentity("René Descartes", "rene@descartes.fr\u001b"), unix, ClosedStatus),
-		NewSetStatusOp(identity.NewIdentity("René \nDescartes", "rene@descartes.fr"), unix, ClosedStatus),
-		NewSetStatusOp(identity.NewIdentity("René Descartes", "rene@\ndescartes.fr"), unix, ClosedStatus),
+		NewSetStatusOp(makeIdentity(t, "", "rene@descartes.fr"), unix, ClosedStatus),
+		NewSetStatusOp(makeIdentity(t, "René Descartes\u001b", "rene@descartes.fr"), unix, ClosedStatus),
+		NewSetStatusOp(makeIdentity(t, "René Descartes", "rene@descartes.fr\u001b"), unix, ClosedStatus),
+		NewSetStatusOp(makeIdentity(t, "René \nDescartes", "rene@descartes.fr"), unix, ClosedStatus),
+		NewSetStatusOp(makeIdentity(t, "René Descartes", "rene@\ndescartes.fr"), unix, ClosedStatus),
 		&CreateOperation{OpBase: OpBase{
-			Author:        rene,
+			Author_:       rene,
 			UnixTime:      0,
 			OperationType: CreateOp,
 		},
@@ -68,7 +77,11 @@ func TestValidate(t *testing.T) {
 }
 
 func TestMetadata(t *testing.T) {
-	rene := identity.NewIdentity("René Descartes", "rene@descartes.fr")
+	repo := repository.NewMockRepoClock()
+
+	rene, err := identity.NewIdentity(repo, "René Descartes", "rene@descartes.fr")
+	require.NoError(t, err)
+
 	op := NewCreateOp(rene, time.Now().Unix(), "title", "message", nil)
 
 	op.SetMetadata("key", "value")
@@ -83,13 +96,14 @@ func TestID(t *testing.T) {
 	defer repository.CleanupTestRepos(repo)
 
 	repos := []repository.ClockedRepo{
-		repository.NewMockRepoForTest(),
+		repository.NewMockRepo(),
 		repo,
 	}
 
 	for _, repo := range repos {
-		rene := identity.NewIdentity("René Descartes", "rene@descartes.fr")
-		err := rene.Commit(repo)
+		rene, err := identity.NewIdentity(repo, "René Descartes", "rene@descartes.fr")
+		require.NoError(t, err)
+		err = rene.Commit(repo)
 		require.NoError(t, err)
 
 		b, op, err := Create(rene, time.Now().Unix(), "title", "message")
@@ -107,7 +121,7 @@ func TestID(t *testing.T) {
 		require.NoError(t, id2.Validate())
 		require.Equal(t, id1, id2)
 
-		b2, err := ReadLocal(repo, b.Id())
+		b2, err := Read(repo, b.Id())
 		require.NoError(t, err)
 
 		op3 := b2.FirstOp()

bug/snapshot.go 🔗

@@ -28,6 +28,11 @@ type Snapshot struct {
 
 // Return the Bug identifier
 func (snap *Snapshot) Id() entity.Id {
+	if snap.id == "" {
+		// simply panic as it would be a coding error
+		// (using an id of a bug not stored yet)
+		panic("no id yet")
+	}
 	return snap.id
 }
 

bug/sorting.go 🔗

@@ -7,11 +7,11 @@ func (b BugsByCreationTime) Len() int {
 }
 
 func (b BugsByCreationTime) Less(i, j int) bool {
-	if b[i].createTime < b[j].createTime {
+	if b[i].CreateLamportTime() < b[j].CreateLamportTime() {
 		return true
 	}
 
-	if b[i].createTime > b[j].createTime {
+	if b[i].CreateLamportTime() > b[j].CreateLamportTime() {
 		return false
 	}
 
@@ -35,11 +35,11 @@ func (b BugsByEditTime) Len() int {
 }
 
 func (b BugsByEditTime) Less(i, j int) bool {
-	if b[i].editTime < b[j].editTime {
+	if b[i].EditLamportTime() < b[j].EditLamportTime() {
 		return true
 	}
 
-	if b[i].editTime > b[j].editTime {
+	if b[i].EditLamportTime() > b[j].EditLamportTime() {
 		return false
 	}
 

bug/with_snapshot.go 🔗

@@ -47,12 +47,6 @@ func (b *WithSnapshot) Commit(repo repository.ClockedRepo) error {
 		return nil
 	}
 
-	b.snap.id = b.Bug.id
+	b.snap.id = b.Bug.Id()
 	return nil
 }
-
-// Merge intercept Bug.Merge() and clear the snapshot
-func (b *WithSnapshot) Merge(repo repository.Repo, other Interface) (bool, error) {
-	b.snap = nil
-	return b.Bug.Merge(repo, other)
-}

cache/bug_cache.go 🔗

@@ -51,9 +51,7 @@ func (c *BugCache) ResolveOperationWithMetadata(key string, value string) (entit
 	// preallocate but empty
 	matching := make([]entity.Id, 0, 5)
 
-	it := bug.NewOperationIterator(c.bug)
-	for it.Next() {
-		op := it.Value()
+	for _, op := range c.bug.Operations() {
 		opValue, ok := op.GetMetadata(key)
 		if ok && value == opValue {
 			matching = append(matching, op.Id())

cache/bug_excerpt.go 🔗

@@ -87,7 +87,7 @@ func NewBugExcerpt(b bug.Interface, snap *bug.Snapshot) *BugExcerpt {
 	}
 
 	switch snap.Author.(type) {
-	case *identity.Identity, *IdentityCache:
+	case *identity.Identity, *identity.IdentityStub, *IdentityCache:
 		e.AuthorId = snap.Author.Id()
 	default:
 		panic("unhandled identity type")

cache/identity_cache.go 🔗

@@ -2,6 +2,7 @@ package cache
 
 import (
 	"github.com/MichaelMure/git-bug/identity"
+	"github.com/MichaelMure/git-bug/repository"
 )
 
 var _ identity.Interface = &IdentityCache{}
@@ -23,8 +24,11 @@ func (i *IdentityCache) notifyUpdated() error {
 	return i.repoCache.identityUpdated(i.Identity.Id())
 }
 
-func (i *IdentityCache) Mutate(f func(identity.Mutator) identity.Mutator) error {
-	i.Identity.Mutate(f)
+func (i *IdentityCache) Mutate(repo repository.RepoClock, f func(*identity.Mutator)) error {
+	err := i.Identity.Mutate(repo, f)
+	if err != nil {
+		return err
+	}
 	return i.notifyUpdated()
 }
 

cache/repo_cache.go 🔗

@@ -18,7 +18,8 @@ import (
 // 1: original format
 // 2: added cache for identities with a reference in the bug cache
 // 3: no more legacy identity
-const formatVersion = 3
+// 4: entities make their IDs from data, not git commit
+const formatVersion = 4
 
 // The maximum number of bugs loaded in memory. After that, eviction will be done.
 const defaultMaxLoadedBugs = 1000
@@ -194,7 +195,7 @@ func (c *RepoCache) buildCache() error {
 
 	c.bugExcerpts = make(map[entity.Id]*BugExcerpt)
 
-	allBugs := bug.ReadAllLocal(c.repo)
+	allBugs := bug.ReadAll(c.repo)
 
 	// wipe the index just to be sure
 	err := c.repo.ClearBleveIndex("bug")

cache/repo_cache_bug.go 🔗

@@ -18,10 +18,7 @@ import (
 	"github.com/MichaelMure/git-bug/repository"
 )
 
-const (
-	bugCacheFile   = "bug-cache"
-	searchCacheDir = "search-cache"
-)
+const bugCacheFile = "bug-cache"
 
 var errBugNotInCache = errors.New("bug missing from cache")
 
@@ -156,7 +153,7 @@ func (c *RepoCache) ResolveBug(id entity.Id) (*BugCache, error) {
 	}
 	c.muBug.RUnlock()
 
-	b, err := bug.ReadLocalWithResolver(c.repo, newIdentityCacheResolver(c), id)
+	b, err := bug.ReadWithResolver(c.repo, newIdentityCacheResolver(c), id)
 	if err != nil {
 		return nil, err
 	}
@@ -263,6 +260,53 @@ func (c *RepoCache) resolveBugMatcher(f func(*BugExcerpt) bool) (entity.Id, erro
 	return matching[0], nil
 }
 
+// ResolveComment search for a Bug/Comment combination matching the merged
+// bug/comment Id prefix. Returns the Bug containing the Comment and the Comment's
+// Id.
+func (c *RepoCache) ResolveComment(prefix string) (*BugCache, entity.Id, error) {
+	bugPrefix, _ := entity.SeparateIds(prefix)
+	bugCandidate := make([]entity.Id, 0, 5)
+
+	// build a list of possible matching bugs
+	c.muBug.RLock()
+	for _, excerpt := range c.bugExcerpts {
+		if excerpt.Id.HasPrefix(bugPrefix) {
+			bugCandidate = append(bugCandidate, excerpt.Id)
+		}
+	}
+	c.muBug.RUnlock()
+
+	matchingBugIds := make([]entity.Id, 0, 5)
+	matchingCommentId := entity.UnsetId
+	var matchingBug *BugCache
+
+	// search for matching comments
+	// searching every bug candidate allow for some collision with the bug prefix only,
+	// before being refined with the full comment prefix
+	for _, bugId := range bugCandidate {
+		b, err := c.ResolveBug(bugId)
+		if err != nil {
+			return nil, entity.UnsetId, err
+		}
+
+		for _, comment := range b.Snapshot().Comments {
+			if comment.Id().HasPrefix(prefix) {
+				matchingBugIds = append(matchingBugIds, bugId)
+				matchingBug = b
+				matchingCommentId = comment.Id()
+			}
+		}
+	}
+
+	if len(matchingBugIds) > 1 {
+		return nil, entity.UnsetId, entity.NewErrMultipleMatch("bug/comment", matchingBugIds)
+	} else if len(matchingBugIds) == 0 {
+		return nil, entity.UnsetId, errors.New("comment doesn't exist")
+	}
+
+	return matchingBug, matchingCommentId, nil
+}
+
 // QueryBugs return the id of all Bug matching the given Query
 func (c *RepoCache) QueryBugs(q *query.Query) ([]entity.Id, error) {
 	c.muBug.RLock()

cache/repo_cache_common.go 🔗

@@ -95,6 +95,12 @@ func (c *RepoCache) MergeAll(remote string) <-chan entity.MergeResult {
 	go func() {
 		defer close(out)
 
+		author, err := c.GetUserIdentity()
+		if err != nil {
+			out <- entity.NewMergeError(err, "")
+			return
+		}
+
 		results := identity.MergeAll(c.repo, remote)
 		for result := range results {
 			out <- result
@@ -112,7 +118,7 @@ func (c *RepoCache) MergeAll(remote string) <-chan entity.MergeResult {
 			}
 		}
 
-		results = bug.MergeAll(c.repo, remote)
+		results = bug.MergeAll(c.repo, remote, author)
 		for result := range results {
 			out <- result
 
@@ -130,11 +136,10 @@ func (c *RepoCache) MergeAll(remote string) <-chan entity.MergeResult {
 			}
 		}
 
-		err := c.write()
-
-		// No easy way out here ..
+		err = c.write()
 		if err != nil {
-			panic(err)
+			out <- entity.NewMergeError(err, "")
+			return
 		}
 	}()
 

cache/repo_cache_identity.go 🔗

@@ -225,17 +225,20 @@ func (c *RepoCache) NewIdentityFromGitUserRaw(metadata map[string]string) (*Iden
 // NewIdentity create a new identity
 // The new identity is written in the repository (commit)
 func (c *RepoCache) NewIdentity(name string, email string) (*IdentityCache, error) {
-	return c.NewIdentityRaw(name, email, "", "", nil)
+	return c.NewIdentityRaw(name, email, "", "", nil, nil)
 }
 
 // NewIdentityFull create a new identity
 // The new identity is written in the repository (commit)
-func (c *RepoCache) NewIdentityFull(name string, email string, login string, avatarUrl string) (*IdentityCache, error) {
-	return c.NewIdentityRaw(name, email, login, avatarUrl, nil)
+func (c *RepoCache) NewIdentityFull(name string, email string, login string, avatarUrl string, keys []*identity.Key) (*IdentityCache, error) {
+	return c.NewIdentityRaw(name, email, login, avatarUrl, keys, nil)
 }
 
-func (c *RepoCache) NewIdentityRaw(name string, email string, login string, avatarUrl string, metadata map[string]string) (*IdentityCache, error) {
-	i := identity.NewIdentityFull(name, email, login, avatarUrl)
+func (c *RepoCache) NewIdentityRaw(name string, email string, login string, avatarUrl string, keys []*identity.Key, metadata map[string]string) (*IdentityCache, error) {
+	i, err := identity.NewIdentityFull(c.repo, name, email, login, avatarUrl, keys)
+	if err != nil {
+		return nil, err
+	}
 	return c.finishIdentity(i, metadata)
 }
 

cache/repo_cache_test.go 🔗

@@ -110,8 +110,8 @@ func TestCache(t *testing.T) {
 	require.NoError(t, err)
 }
 
-func TestPushPull(t *testing.T) {
-	repoA, repoB, remote := repository.SetupReposAndRemote()
+func TestCachePushPull(t *testing.T) {
+	repoA, repoB, remote := repository.SetupGoGitReposAndRemote()
 	defer repository.CleanupTestRepos(repoA, repoB, remote)
 
 	cacheA, err := NewRepoCache(repoA)
@@ -125,6 +125,10 @@ func TestPushPull(t *testing.T) {
 	require.NoError(t, err)
 	err = cacheA.SetUserIdentity(reneA)
 	require.NoError(t, err)
+	isaacB, err := cacheB.NewIdentity("Isaac Newton", "isaac@newton.uk")
+	require.NoError(t, err)
+	err = cacheB.SetUserIdentity(isaacB)
+	require.NoError(t, err)
 
 	// distribute the identity
 	_, err = cacheA.Push("origin")

commands/comment.go 🔗

@@ -22,6 +22,7 @@ func newCommentCommand() *cobra.Command {
 	}
 
 	cmd.AddCommand(newCommentAddCommand())
+	cmd.AddCommand(newCommentEditCommand())
 
 	return cmd
 }

commands/comment_edit.go 🔗

@@ -0,0 +1,71 @@
+package commands
+
+import (
+	"github.com/spf13/cobra"
+
+	"github.com/MichaelMure/git-bug/input"
+)
+
+type commentEditOptions struct {
+	messageFile string
+	message     string
+}
+
+func newCommentEditCommand() *cobra.Command {
+	env := newEnv()
+	options := commentEditOptions{}
+
+	cmd := &cobra.Command{
+		Use:      "edit [COMMENT_ID]",
+		Short:    "Edit an existing comment on a bug.",
+		Args:     cobra.ExactArgs(1),
+		PreRunE:  loadBackendEnsureUser(env),
+		PostRunE: closeBackend(env),
+		RunE: func(cmd *cobra.Command, args []string) error {
+			return runCommentEdit(env, options, args)
+		},
+	}
+
+	flags := cmd.Flags()
+	flags.SortFlags = false
+
+	flags.StringVarP(&options.messageFile, "file", "F", "",
+		"Take the message from the given file. Use - to read the message from the standard input")
+
+	flags.StringVarP(&options.message, "message", "m", "",
+		"Provide the new message from the command line")
+
+	return cmd
+}
+
+func runCommentEdit(env *Env, opts commentEditOptions, args []string) error {
+	b, commentId, err := env.backend.ResolveComment(args[0])
+	if err != nil {
+		return err
+	}
+
+	if opts.messageFile != "" && opts.message == "" {
+		opts.message, err = input.BugCommentFileInput(opts.messageFile)
+		if err != nil {
+			return err
+		}
+	}
+
+	if opts.messageFile == "" && opts.message == "" {
+		opts.message, err = input.BugCommentEditorInput(env.backend, "")
+		if err == input.ErrEmptyMessage {
+			env.err.Println("Empty message, aborting.")
+			return nil
+		}
+		if err != nil {
+			return err
+		}
+	}
+
+	_, err = b.EditComment(commentId, opts.message)
+	if err != nil {
+		return err
+	}
+
+	return b.Commit()
+}

commands/show.go 🔗

@@ -158,8 +158,9 @@ func showDefaultFormatter(env *Env, snapshot *bug.Snapshot) error {
 
 	for i, comment := range snapshot.Comments {
 		var message string
-		env.out.Printf("%s#%d %s <%s>\n\n",
+		env.out.Printf("%s%s #%d %s <%s>\n\n",
 			indent,
+			comment.Id().Human(),
 			i,
 			comment.Author.DisplayName(),
 			comment.Author.Email(),

commands/user.go 🔗

@@ -35,7 +35,7 @@ func newUserCommand() *cobra.Command {
 	flags.SortFlags = false
 
 	flags.StringVarP(&options.fields, "field", "f", "",
-		"Select field to display. Valid values are [email,humanId,id,lastModification,lastModificationLamport,login,metadata,name]")
+		"Select field to display. Valid values are [email,humanId,id,lastModification,lastModificationLamports,login,metadata,name]")
 
 	return cmd
 }
@@ -71,7 +71,9 @@ func runUser(env *Env, opts userOptions, args []string) error {
 			env.out.Printf("%s\n", id.LastModification().
 				Time().Format("Mon Jan 2 15:04:05 2006 +0200"))
 		case "lastModificationLamport":
-			env.out.Printf("%d\n", id.LastModificationLamport())
+			for name, t := range id.LastModificationLamports() {
+				env.out.Printf("%s\n%d\n", name, t)
+			}
 		case "metadata":
 			for key, value := range id.ImmutableMetadata() {
 				env.out.Printf("%s\n%s\n", key, value)
@@ -90,9 +92,11 @@ func runUser(env *Env, opts userOptions, args []string) error {
 	env.out.Printf("Name: %s\n", id.Name())
 	env.out.Printf("Email: %s\n", id.Email())
 	env.out.Printf("Login: %s\n", id.Login())
-	env.out.Printf("Last modification: %s (lamport %d)\n",
-		id.LastModification().Time().Format("Mon Jan 2 15:04:05 2006 +0200"),
-		id.LastModificationLamport())
+	env.out.Printf("Last modification: %s\n", id.LastModification().Time().Format("Mon Jan 2 15:04:05 2006 +0200"))
+	env.out.Printf("Last moditication (lamport):\n")
+	for name, t := range id.LastModificationLamports() {
+		env.out.Printf("\t%s: %d", name, t)
+	}
 	env.out.Println("Metadata:")
 	for key, value := range id.ImmutableMetadata() {
 		env.out.Printf("    %s --> %s\n", key, value)

commands/user_create.go 🔗

@@ -48,7 +48,7 @@ func runUserCreate(env *Env) error {
 		return err
 	}
 
-	id, err := env.backend.NewIdentityRaw(name, email, "", avatarURL, nil)
+	id, err := env.backend.NewIdentityRaw(name, email, "", avatarURL, nil, nil)
 	if err != nil {
 		return err
 	}

doc/man/git-bug-comment-edit.1 🔗

@@ -0,0 +1,35 @@
+.nh
+.TH "GIT\-BUG" "1" "Apr 2019" "Generated from git\-bug's source code" ""
+
+.SH NAME
+.PP
+git\-bug\-comment\-edit \- Edit an existing comment on a bug.
+
+
+.SH SYNOPSIS
+.PP
+\fBgit\-bug comment edit [COMMENT\_ID] [flags]\fP
+
+
+.SH DESCRIPTION
+.PP
+Edit an existing comment on a bug.
+
+
+.SH OPTIONS
+.PP
+\fB\-F\fP, \fB\-\-file\fP=""
+	Take the message from the given file. Use \- to read the message from the standard input
+
+.PP
+\fB\-m\fP, \fB\-\-message\fP=""
+	Provide the new message from the command line
+
+.PP
+\fB\-h\fP, \fB\-\-help\fP[=false]
+	help for edit
+
+
+.SH SEE ALSO
+.PP
+\fBgit\-bug\-comment(1)\fP

doc/man/git-bug-comment.1 🔗

@@ -24,4 +24,4 @@ Display or add comments to a bug.
 
 .SH SEE ALSO
 .PP
-\fBgit\-bug(1)\fP, \fBgit\-bug\-comment\-add(1)\fP
+\fBgit\-bug(1)\fP, \fBgit\-bug\-comment\-add(1)\fP, \fBgit\-bug\-comment\-edit(1)\fP

doc/man/git-bug-user.1 🔗

@@ -19,7 +19,7 @@ Display or change the user identity.
 .SH OPTIONS
 .PP
 \fB\-f\fP, \fB\-\-field\fP=""
-	Select field to display. Valid values are [email,humanId,id,lastModification,lastModificationLamport,login,metadata,name]
+	Select field to display. Valid values are [email,humanId,id,lastModification,lastModificationLamports,login,metadata,name]
 
 .PP
 \fB\-h\fP, \fB\-\-help\fP[=false]

doc/md/git-bug_comment.md 🔗

@@ -16,4 +16,5 @@ git-bug comment [ID] [flags]
 
 * [git-bug](git-bug.md)	 - A bug tracker embedded in Git.
 * [git-bug comment add](git-bug_comment_add.md)	 - Add a new comment to a bug.
+* [git-bug comment edit](git-bug_comment_edit.md)	 - Edit an existing comment on a bug.
 

doc/md/git-bug_comment_edit.md 🔗

@@ -0,0 +1,20 @@
+## git-bug comment edit
+
+Edit an existing comment on a bug.
+
+```
+git-bug comment edit [COMMENT_ID] [flags]
+```
+
+### Options
+
+```
+  -F, --file string      Take the message from the given file. Use - to read the message from the standard input
+  -m, --message string   Provide the new message from the command line
+  -h, --help             help for edit
+```
+
+### SEE ALSO
+
+* [git-bug comment](git-bug_comment.md)	 - Display or add comments to a bug.
+

doc/md/git-bug_user.md 🔗

@@ -9,7 +9,7 @@ git-bug user [USER-ID] [flags]
 ### Options
 
 ```
-  -f, --field string   Select field to display. Valid values are [email,humanId,id,lastModification,lastModificationLamport,login,metadata,name]
+  -f, --field string   Select field to display. Valid values are [email,humanId,id,lastModification,lastModificationLamports,login,metadata,name]
   -h, --help           help for user
 ```
 

entity/dag/clock.go 🔗

@@ -0,0 +1,37 @@
+package dag
+
+import (
+	"fmt"
+
+	"github.com/MichaelMure/git-bug/identity"
+	"github.com/MichaelMure/git-bug/repository"
+)
+
+// ClockLoader is the repository.ClockLoader for Entity
+func ClockLoader(defs ...Definition) repository.ClockLoader {
+	clocks := make([]string, len(defs)*2)
+	for _, def := range defs {
+		clocks = append(clocks, fmt.Sprintf(creationClockPattern, def.Namespace))
+		clocks = append(clocks, fmt.Sprintf(editClockPattern, def.Namespace))
+	}
+
+	return repository.ClockLoader{
+		Clocks: clocks,
+		Witnesser: func(repo repository.ClockedRepo) error {
+			// We don't care about the actual identity so an IdentityStub will do
+			resolver := identity.NewStubResolver()
+
+			for _, def := range defs {
+				// we actually just need to read all entities,
+				// as that will create and update the clocks
+				// TODO: concurrent loading to be faster?
+				for b := range ReadAll(def, repo, resolver) {
+					if b.Err != nil {
+						return b.Err
+					}
+				}
+			}
+			return nil
+		},
+	}
+}

entity/dag/common_test.go 🔗

@@ -0,0 +1,173 @@
+package dag
+
+import (
+	"encoding/json"
+	"fmt"
+	"testing"
+
+	"github.com/stretchr/testify/require"
+
+	"github.com/MichaelMure/git-bug/entity"
+	"github.com/MichaelMure/git-bug/identity"
+	"github.com/MichaelMure/git-bug/repository"
+)
+
+// This file contains an example dummy entity to be used in the tests
+
+/*
+ Operations
+*/
+
+type op1 struct {
+	author identity.Interface
+
+	OperationType int               `json:"type"`
+	Field1        string            `json:"field_1"`
+	Files         []repository.Hash `json:"files"`
+}
+
+func newOp1(author identity.Interface, field1 string, files ...repository.Hash) *op1 {
+	return &op1{author: author, OperationType: 1, Field1: field1, Files: files}
+}
+
+func (o *op1) Id() entity.Id {
+	data, _ := json.Marshal(o)
+	return entity.DeriveId(data)
+}
+
+func (o *op1) Validate() error { return nil }
+
+func (o *op1) Author() identity.Interface {
+	return o.author
+}
+
+func (o *op1) GetFiles() []repository.Hash {
+	return o.Files
+}
+
+type op2 struct {
+	author identity.Interface
+
+	OperationType int    `json:"type"`
+	Field2        string `json:"field_2"`
+}
+
+func newOp2(author identity.Interface, field2 string) *op2 {
+	return &op2{author: author, OperationType: 2, Field2: field2}
+}
+
+func (o *op2) Id() entity.Id {
+	data, _ := json.Marshal(o)
+	return entity.DeriveId(data)
+}
+
+func (o *op2) Validate() error { return nil }
+
+func (o *op2) Author() identity.Interface {
+	return o.author
+}
+
+func unmarshaler(author identity.Interface, raw json.RawMessage) (Operation, error) {
+	var t struct {
+		OperationType int `json:"type"`
+	}
+
+	if err := json.Unmarshal(raw, &t); err != nil {
+		return nil, err
+	}
+
+	switch t.OperationType {
+	case 1:
+		op := &op1{}
+		err := json.Unmarshal(raw, &op)
+		op.author = author
+		return op, err
+	case 2:
+		op := &op2{}
+		err := json.Unmarshal(raw, &op)
+		op.author = author
+		return op, err
+	default:
+		return nil, fmt.Errorf("unknown operation type %v", t.OperationType)
+	}
+}
+
+/*
+  Identities + repo + definition
+*/
+
+func makeTestContext() (repository.ClockedRepo, identity.Interface, identity.Interface, identity.Resolver, Definition) {
+	repo := repository.NewMockRepo()
+	id1, id2, resolver, def := makeTestContextInternal(repo)
+	return repo, id1, id2, resolver, def
+}
+
+func makeTestContextRemote(t *testing.T) (repository.ClockedRepo, repository.ClockedRepo, repository.ClockedRepo, identity.Interface, identity.Interface, identity.Resolver, Definition) {
+	repoA := repository.CreateGoGitTestRepo(false)
+	repoB := repository.CreateGoGitTestRepo(false)
+	remote := repository.CreateGoGitTestRepo(true)
+
+	err := repoA.AddRemote("remote", remote.GetLocalRemote())
+	require.NoError(t, err)
+	err = repoA.AddRemote("repoB", repoB.GetLocalRemote())
+	require.NoError(t, err)
+	err = repoB.AddRemote("remote", remote.GetLocalRemote())
+	require.NoError(t, err)
+	err = repoB.AddRemote("repoA", repoA.GetLocalRemote())
+	require.NoError(t, err)
+
+	id1, id2, resolver, def := makeTestContextInternal(repoA)
+
+	// distribute the identities
+	_, err = identity.Push(repoA, "remote")
+	require.NoError(t, err)
+	err = identity.Pull(repoB, "remote")
+	require.NoError(t, err)
+
+	return repoA, repoB, remote, id1, id2, resolver, def
+}
+
+func makeTestContextInternal(repo repository.ClockedRepo) (identity.Interface, identity.Interface, identity.Resolver, Definition) {
+	id1, err := identity.NewIdentity(repo, "name1", "email1")
+	if err != nil {
+		panic(err)
+	}
+	err = id1.Commit(repo)
+	if err != nil {
+		panic(err)
+	}
+	id2, err := identity.NewIdentity(repo, "name2", "email2")
+	if err != nil {
+		panic(err)
+	}
+	err = id2.Commit(repo)
+	if err != nil {
+		panic(err)
+	}
+
+	resolver := identityResolverFunc(func(id entity.Id) (identity.Interface, error) {
+		switch id {
+		case id1.Id():
+			return id1, nil
+		case id2.Id():
+			return id2, nil
+		default:
+			return nil, identity.ErrIdentityNotExist
+		}
+	})
+
+	def := Definition{
+		Typename:             "foo",
+		Namespace:            "foos",
+		OperationUnmarshaler: unmarshaler,
+		FormatVersion:        1,
+	}
+
+	return id1, id2, resolver, def
+}
+
+type identityResolverFunc func(id entity.Id) (identity.Interface, error)
+
+func (fn identityResolverFunc) ResolveIdentity(id entity.Id) (identity.Interface, error) {
+	return fn(id)
+}

entity/dag/entity.go 🔗

@@ -0,0 +1,439 @@
+// Package dag contains the base common code to define an entity stored
+// in a chain of git objects, supporting actions like Push, Pull and Merge.
+package dag
+
+import (
+	"encoding/json"
+	"fmt"
+	"sort"
+
+	"github.com/pkg/errors"
+
+	"github.com/MichaelMure/git-bug/entity"
+	"github.com/MichaelMure/git-bug/identity"
+	"github.com/MichaelMure/git-bug/repository"
+	"github.com/MichaelMure/git-bug/util/lamport"
+)
+
+const refsPattern = "refs/%s/%s"
+const creationClockPattern = "%s-create"
+const editClockPattern = "%s-edit"
+
+// Definition hold the details defining one specialization of an Entity.
+type Definition struct {
+	// the name of the entity (bug, pull-request, ...)
+	Typename string
+	// the Namespace in git (bugs, prs, ...)
+	Namespace string
+	// a function decoding a JSON message into an Operation
+	OperationUnmarshaler func(author identity.Interface, raw json.RawMessage) (Operation, error)
+	// the expected format version number, that can be used for data migration/upgrade
+	FormatVersion uint
+}
+
+// Entity is a data structure stored in a chain of git objects, supporting actions like Push, Pull and Merge.
+type Entity struct {
+	// A Lamport clock is a logical clock that allow to order event
+	// inside a distributed system.
+	// It must be the first field in this struct due to https://github.com/golang/go/issues/36606
+	createTime lamport.Time
+	editTime   lamport.Time
+
+	Definition
+
+	// operations that are already stored in the repository
+	ops []Operation
+	// operations not yet stored in the repository
+	staging []Operation
+
+	lastCommit repository.Hash
+}
+
+// New create an empty Entity
+func New(definition Definition) *Entity {
+	return &Entity{
+		Definition: definition,
+	}
+}
+
+// Read will read and decode a stored local Entity from a repository
+func Read(def Definition, repo repository.ClockedRepo, resolver identity.Resolver, id entity.Id) (*Entity, error) {
+	if err := id.Validate(); err != nil {
+		return nil, errors.Wrap(err, "invalid id")
+	}
+
+	ref := fmt.Sprintf("refs/%s/%s", def.Namespace, id.String())
+
+	return read(def, repo, resolver, ref)
+}
+
+// readRemote will read and decode a stored remote Entity from a repository
+func readRemote(def Definition, repo repository.ClockedRepo, resolver identity.Resolver, remote string, id entity.Id) (*Entity, error) {
+	if err := id.Validate(); err != nil {
+		return nil, errors.Wrap(err, "invalid id")
+	}
+
+	ref := fmt.Sprintf("refs/remotes/%s/%s/%s", def.Namespace, remote, id.String())
+
+	return read(def, repo, resolver, ref)
+}
+
+// read fetch from git and decode an Entity at an arbitrary git reference.
+func read(def Definition, repo repository.ClockedRepo, resolver identity.Resolver, ref string) (*Entity, error) {
+	rootHash, err := repo.ResolveRef(ref)
+	if err != nil {
+		return nil, err
+	}
+
+	// Perform a breadth-first search to get a topological order of the DAG where we discover the
+	// parents commit and go back in time up to the chronological root
+
+	queue := make([]repository.Hash, 0, 32)
+	visited := make(map[repository.Hash]struct{})
+	BFSOrder := make([]repository.Commit, 0, 32)
+
+	queue = append(queue, rootHash)
+	visited[rootHash] = struct{}{}
+
+	for len(queue) > 0 {
+		// pop
+		hash := queue[0]
+		queue = queue[1:]
+
+		commit, err := repo.ReadCommit(hash)
+		if err != nil {
+			return nil, err
+		}
+
+		BFSOrder = append(BFSOrder, commit)
+
+		for _, parent := range commit.Parents {
+			if _, ok := visited[parent]; !ok {
+				queue = append(queue, parent)
+				// mark as visited
+				visited[parent] = struct{}{}
+			}
+		}
+	}
+
+	// Now, we can reverse this topological order and read the commits in an order where
+	// we are sure to have read all the chronological ancestors when we read a commit.
+
+	// Next step is to:
+	// 1) read the operationPacks
+	// 2) make sure that the clocks causality respect the DAG topology.
+
+	oppMap := make(map[repository.Hash]*operationPack)
+	var opsCount int
+
+	for i := len(BFSOrder) - 1; i >= 0; i-- {
+		commit := BFSOrder[i]
+		isFirstCommit := i == len(BFSOrder)-1
+		isMerge := len(commit.Parents) > 1
+
+		// Verify DAG structure: single chronological root, so only the root
+		// can have no parents. Said otherwise, the DAG need to have exactly
+		// one leaf.
+		if !isFirstCommit && len(commit.Parents) == 0 {
+			return nil, fmt.Errorf("multiple leafs in the entity DAG")
+		}
+
+		opp, err := readOperationPack(def, repo, resolver, commit)
+		if err != nil {
+			return nil, err
+		}
+
+		err = opp.Validate()
+		if err != nil {
+			return nil, err
+		}
+
+		if isMerge && len(opp.Operations) > 0 {
+			return nil, fmt.Errorf("merge commit cannot have operations")
+		}
+
+		// Check that the create lamport clock is set (not checked in Validate() as it's optional)
+		if isFirstCommit && opp.CreateTime <= 0 {
+			return nil, fmt.Errorf("creation lamport time not set")
+		}
+
+		// make sure that the lamport clocks causality match the DAG topology
+		for _, parentHash := range commit.Parents {
+			parentPack, ok := oppMap[parentHash]
+			if !ok {
+				panic("DFS failed")
+			}
+
+			if parentPack.EditTime >= opp.EditTime {
+				return nil, fmt.Errorf("lamport clock ordering doesn't match the DAG")
+			}
+
+			// to avoid an attack where clocks are pushed toward the uint64 rollover, make sure
+			// that the clocks don't jump too far in the future
+			// we ignore merge commits here to allow merging after a loooong time without breaking anything,
+			// as long as there is one valid chain of small hops, it's fine.
+			if !isMerge && opp.EditTime-parentPack.EditTime > 1_000_000 {
+				return nil, fmt.Errorf("lamport clock jumping too far in the future, likely an attack")
+			}
+		}
+
+		oppMap[commit.Hash] = opp
+		opsCount += len(opp.Operations)
+	}
+
+	// The clocks are fine, we witness them
+	for _, opp := range oppMap {
+		err = repo.Witness(fmt.Sprintf(creationClockPattern, def.Namespace), opp.CreateTime)
+		if err != nil {
+			return nil, err
+		}
+		err = repo.Witness(fmt.Sprintf(editClockPattern, def.Namespace), opp.EditTime)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	// Now that we know that the topological order and clocks are fine, we order the operationPacks
+	// based on the logical clocks, entirely ignoring the DAG topology
+
+	oppSlice := make([]*operationPack, 0, len(oppMap))
+	for _, pack := range oppMap {
+		oppSlice = append(oppSlice, pack)
+	}
+	sort.Slice(oppSlice, func(i, j int) bool {
+		// Primary ordering with the EditTime.
+		if oppSlice[i].EditTime != oppSlice[j].EditTime {
+			return oppSlice[i].EditTime < oppSlice[j].EditTime
+		}
+		// We have equal EditTime, which means we have concurrent edition over different machines and we
+		// can't tell which one came first. So, what now? We still need a total ordering and the most stable possible.
+		// As a secondary ordering, we can order based on a hash of the serialized Operations in the
+		// operationPack. It doesn't carry much meaning but it's unbiased and hard to abuse.
+		// This is a lexicographic ordering on the stringified ID.
+		return oppSlice[i].Id() < oppSlice[j].Id()
+	})
+
+	// Now that we ordered the operationPacks, we have the order of the Operations
+
+	ops := make([]Operation, 0, opsCount)
+	var createTime lamport.Time
+	var editTime lamport.Time
+	for _, pack := range oppSlice {
+		for _, operation := range pack.Operations {
+			ops = append(ops, operation)
+		}
+		if pack.CreateTime > createTime {
+			createTime = pack.CreateTime
+		}
+		if pack.EditTime > editTime {
+			editTime = pack.EditTime
+		}
+	}
+
+	return &Entity{
+		Definition: def,
+		ops:        ops,
+		lastCommit: rootHash,
+		createTime: createTime,
+		editTime:   editTime,
+	}, nil
+}
+
+type StreamedEntity struct {
+	Entity *Entity
+	Err    error
+}
+
+// ReadAll read and parse all local Entity
+func ReadAll(def Definition, repo repository.ClockedRepo, resolver identity.Resolver) <-chan StreamedEntity {
+	out := make(chan StreamedEntity)
+
+	go func() {
+		defer close(out)
+
+		refPrefix := fmt.Sprintf("refs/%s/", def.Namespace)
+
+		refs, err := repo.ListRefs(refPrefix)
+		if err != nil {
+			out <- StreamedEntity{Err: err}
+			return
+		}
+
+		for _, ref := range refs {
+			e, err := read(def, repo, resolver, ref)
+
+			if err != nil {
+				out <- StreamedEntity{Err: err}
+				return
+			}
+
+			out <- StreamedEntity{Entity: e}
+		}
+	}()
+
+	return out
+}
+
+// Id return the Entity identifier
+func (e *Entity) Id() entity.Id {
+	// id is the id of the first operation
+	return e.FirstOp().Id()
+}
+
+// Validate check if the Entity data is valid
+func (e *Entity) Validate() error {
+	// non-empty
+	if len(e.ops) == 0 && len(e.staging) == 0 {
+		return fmt.Errorf("entity has no operations")
+	}
+
+	// check if each operations are valid
+	for _, op := range e.ops {
+		if err := op.Validate(); err != nil {
+			return err
+		}
+	}
+
+	// check if staging is valid if needed
+	for _, op := range e.staging {
+		if err := op.Validate(); err != nil {
+			return err
+		}
+	}
+
+	// Check that there is no colliding operation's ID
+	ids := make(map[entity.Id]struct{})
+	for _, op := range e.Operations() {
+		if _, ok := ids[op.Id()]; ok {
+			return fmt.Errorf("id collision: %s", op.Id())
+		}
+		ids[op.Id()] = struct{}{}
+	}
+
+	return nil
+}
+
+// Operations return the ordered operations
+func (e *Entity) Operations() []Operation {
+	return append(e.ops, e.staging...)
+}
+
+// FirstOp lookup for the very first operation of the Entity
+func (e *Entity) FirstOp() Operation {
+	for _, op := range e.ops {
+		return op
+	}
+	for _, op := range e.staging {
+		return op
+	}
+	return nil
+}
+
+// LastOp lookup for the very last operation of the Entity
+func (e *Entity) LastOp() Operation {
+	if len(e.staging) > 0 {
+		return e.staging[len(e.staging)-1]
+	}
+	if len(e.ops) > 0 {
+		return e.ops[len(e.ops)-1]
+	}
+	return nil
+}
+
+// Append add a new Operation to the Entity
+func (e *Entity) Append(op Operation) {
+	e.staging = append(e.staging, op)
+}
+
+// NeedCommit indicate if the in-memory state changed and need to be commit in the repository
+func (e *Entity) NeedCommit() bool {
+	return len(e.staging) > 0
+}
+
+// CommitAsNeeded execute a Commit only if necessary. This function is useful to avoid getting an error if the Entity
+// is already in sync with the repository.
+func (e *Entity) CommitAsNeeded(repo repository.ClockedRepo) error {
+	if e.NeedCommit() {
+		return e.Commit(repo)
+	}
+	return nil
+}
+
+// Commit write the appended operations in the repository
+func (e *Entity) Commit(repo repository.ClockedRepo) error {
+	if !e.NeedCommit() {
+		return fmt.Errorf("can't commit an entity with no pending operation")
+	}
+
+	err := e.Validate()
+	if err != nil {
+		return errors.Wrapf(err, "can't commit a %s with invalid data", e.Definition.Typename)
+	}
+
+	for len(e.staging) > 0 {
+		var author identity.Interface
+		var toCommit []Operation
+
+		// Split into chunks with the same author
+		for len(e.staging) > 0 {
+			op := e.staging[0]
+			if author != nil && op.Author().Id() != author.Id() {
+				break
+			}
+			author = e.staging[0].Author()
+			toCommit = append(toCommit, op)
+			e.staging = e.staging[1:]
+		}
+
+		e.editTime, err = repo.Increment(fmt.Sprintf(editClockPattern, e.Namespace))
+		if err != nil {
+			return err
+		}
+
+		opp := &operationPack{
+			Author:     author,
+			Operations: toCommit,
+			EditTime:   e.editTime,
+		}
+
+		if e.lastCommit == "" {
+			e.createTime, err = repo.Increment(fmt.Sprintf(creationClockPattern, e.Namespace))
+			if err != nil {
+				return err
+			}
+			opp.CreateTime = e.createTime
+		}
+
+		var parentCommit []repository.Hash
+		if e.lastCommit != "" {
+			parentCommit = []repository.Hash{e.lastCommit}
+		}
+
+		commitHash, err := opp.Write(e.Definition, repo, parentCommit...)
+		if err != nil {
+			return err
+		}
+
+		e.lastCommit = commitHash
+		e.ops = append(e.ops, toCommit...)
+	}
+
+	// not strictly necessary but make equality testing easier in tests
+	e.staging = nil
+
+	// Create or update the Git reference for this entity
+	// When pushing later, the remote will ensure that this ref update
+	// is fast-forward, that is no data has been overwritten.
+	ref := fmt.Sprintf(refsPattern, e.Namespace, e.Id().String())
+	return repo.UpdateRef(ref, e.lastCommit)
+}
+
+// CreateLamportTime return the Lamport time of creation
+func (e *Entity) CreateLamportTime() lamport.Time {
+	return e.createTime
+}
+
+// EditLamportTime return the Lamport time of the last edition
+func (e *Entity) EditLamportTime() lamport.Time {
+	return e.editTime
+}

entity/dag/entity_actions.go 🔗

@@ -0,0 +1,260 @@
+package dag
+
+import (
+	"fmt"
+
+	"github.com/pkg/errors"
+
+	"github.com/MichaelMure/git-bug/entity"
+	"github.com/MichaelMure/git-bug/identity"
+	"github.com/MichaelMure/git-bug/repository"
+)
+
+// ListLocalIds list all the available local Entity's Id
+func ListLocalIds(def Definition, repo repository.RepoData) ([]entity.Id, error) {
+	refs, err := repo.ListRefs(fmt.Sprintf("refs/%s/", def.Namespace))
+	if err != nil {
+		return nil, err
+	}
+	return entity.RefsToIds(refs), nil
+}
+
+// Fetch retrieve updates from a remote
+// This does not change the local entity state
+func Fetch(def Definition, repo repository.Repo, remote string) (string, error) {
+	return repo.FetchRefs(remote, def.Namespace)
+}
+
+// Push update a remote with the local changes
+func Push(def Definition, repo repository.Repo, remote string) (string, error) {
+	return repo.PushRefs(remote, def.Namespace)
+}
+
+// Pull will do a Fetch + MergeAll
+// Contrary to MergeAll, this function will return an error if a merge fail.
+func Pull(def Definition, repo repository.ClockedRepo, resolver identity.Resolver, remote string, author identity.Interface) error {
+	_, err := Fetch(def, repo, remote)
+	if err != nil {
+		return err
+	}
+
+	for merge := range MergeAll(def, repo, resolver, remote, author) {
+		if merge.Err != nil {
+			return merge.Err
+		}
+		if merge.Status == entity.MergeStatusInvalid {
+			return errors.Errorf("merge failure: %s", merge.Reason)
+		}
+	}
+
+	return nil
+}
+
+// MergeAll will merge all the available remote Entity:
+//
+// Multiple scenario exist:
+// 1. if the remote Entity doesn't exist locally, it's created
+//    --> emit entity.MergeStatusNew
+// 2. if the remote and local Entity have the same state, nothing is changed
+//    --> emit entity.MergeStatusNothing
+// 3. if the local Entity has new commits but the remote don't, nothing is changed
+//    --> emit entity.MergeStatusNothing
+// 4. if the remote has new commit, the local bug is updated to match the same history
+//    (fast-forward update)
+//    --> emit entity.MergeStatusUpdated
+// 5. if both local and remote Entity have new commits (that is, we have a concurrent edition),
+//    a merge commit with an empty operationPack is created to join both branch and form a DAG.
+//    --> emit entity.MergeStatusUpdated
+//
+// Note: an author is necessary for the case where a merge commit is created, as this commit will
+// have an author and may be signed if a signing key is available.
+func MergeAll(def Definition, repo repository.ClockedRepo, resolver identity.Resolver, remote string, author identity.Interface) <-chan entity.MergeResult {
+	out := make(chan entity.MergeResult)
+
+	go func() {
+		defer close(out)
+
+		remoteRefSpec := fmt.Sprintf("refs/remotes/%s/%s/", remote, def.Namespace)
+		remoteRefs, err := repo.ListRefs(remoteRefSpec)
+		if err != nil {
+			out <- entity.MergeResult{Err: err}
+			return
+		}
+
+		for _, remoteRef := range remoteRefs {
+			out <- merge(def, repo, resolver, remoteRef, author)
+		}
+	}()
+
+	return out
+}
+
+// merge perform a merge to make sure a local Entity is up to date.
+// See MergeAll for more details.
+func merge(def Definition, repo repository.ClockedRepo, resolver identity.Resolver, remoteRef string, author identity.Interface) entity.MergeResult {
+	id := entity.RefToId(remoteRef)
+
+	if err := id.Validate(); err != nil {
+		return entity.NewMergeInvalidStatus(id, errors.Wrap(err, "invalid ref").Error())
+	}
+
+	remoteEntity, err := read(def, repo, resolver, remoteRef)
+	if err != nil {
+		return entity.NewMergeInvalidStatus(id,
+			errors.Wrapf(err, "remote %s is not readable", def.Typename).Error())
+	}
+
+	// Check for error in remote data
+	if err := remoteEntity.Validate(); err != nil {
+		return entity.NewMergeInvalidStatus(id,
+			errors.Wrapf(err, "remote %s data is invalid", def.Typename).Error())
+	}
+
+	localRef := fmt.Sprintf("refs/%s/%s", def.Namespace, id.String())
+
+	// SCENARIO 1
+	// if the remote Entity doesn't exist locally, it's created
+
+	localExist, err := repo.RefExist(localRef)
+	if err != nil {
+		return entity.NewMergeError(err, id)
+	}
+
+	if !localExist {
+		// the bug is not local yet, simply create the reference
+		err := repo.CopyRef(remoteRef, localRef)
+		if err != nil {
+			return entity.NewMergeError(err, id)
+		}
+
+		return entity.NewMergeNewStatus(id, remoteEntity)
+	}
+
+	localCommit, err := repo.ResolveRef(localRef)
+	if err != nil {
+		return entity.NewMergeError(err, id)
+	}
+
+	remoteCommit, err := repo.ResolveRef(remoteRef)
+	if err != nil {
+		return entity.NewMergeError(err, id)
+	}
+
+	// SCENARIO 2
+	// if the remote and local Entity have the same state, nothing is changed
+
+	if localCommit == remoteCommit {
+		// nothing to merge
+		return entity.NewMergeNothingStatus(id)
+	}
+
+	// SCENARIO 3
+	// if the local Entity has new commits but the remote don't, nothing is changed
+
+	localCommits, err := repo.ListCommits(localRef)
+	if err != nil {
+		return entity.NewMergeError(err, id)
+	}
+
+	for _, hash := range localCommits {
+		if hash == remoteCommit {
+			return entity.NewMergeNothingStatus(id)
+		}
+	}
+
+	// SCENARIO 4
+	// if the remote has new commit, the local bug is updated to match the same history
+	// (fast-forward update)
+
+	remoteCommits, err := repo.ListCommits(remoteRef)
+	if err != nil {
+		return entity.NewMergeError(err, id)
+	}
+
+	// fast-forward is possible if otherRef include ref
+	fastForwardPossible := false
+	for _, hash := range remoteCommits {
+		if hash == localCommit {
+			fastForwardPossible = true
+			break
+		}
+	}
+
+	if fastForwardPossible {
+		err = repo.UpdateRef(localRef, remoteCommit)
+		if err != nil {
+			return entity.NewMergeError(err, id)
+		}
+		return entity.NewMergeUpdatedStatus(id, remoteEntity)
+	}
+
+	// SCENARIO 5
+	// if both local and remote Entity have new commits (that is, we have a concurrent edition),
+	// a merge commit with an empty operationPack is created to join both branch and form a DAG.
+
+	// fast-forward is not possible, we need to create a merge commit
+	// For simplicity when reading and to have clocks that record this change, we store
+	// an empty operationPack.
+	// First step is to collect those clocks.
+
+	localEntity, err := read(def, repo, resolver, localRef)
+	if err != nil {
+		return entity.NewMergeError(err, id)
+	}
+
+	editTime, err := repo.Increment(fmt.Sprintf(editClockPattern, def.Namespace))
+	if err != nil {
+		return entity.NewMergeError(err, id)
+	}
+
+	opp := &operationPack{
+		Author:     author,
+		Operations: nil,
+		CreateTime: 0,
+		EditTime:   editTime,
+	}
+
+	commitHash, err := opp.Write(def, repo, localCommit, remoteCommit)
+	if err != nil {
+		return entity.NewMergeError(err, id)
+	}
+
+	// finally update the ref
+	err = repo.UpdateRef(localRef, commitHash)
+	if err != nil {
+		return entity.NewMergeError(err, id)
+	}
+
+	// Note: we don't need to update localEntity state (lastCommit, operations...) as we
+	// discard it entirely anyway.
+
+	return entity.NewMergeUpdatedStatus(id, localEntity)
+}
+
+// Remove delete an Entity.
+// Remove is idempotent.
+func Remove(def Definition, repo repository.ClockedRepo, id entity.Id) error {
+	var matches []string
+
+	ref := fmt.Sprintf("refs/%s/%s", def.Namespace, id.String())
+	matches = append(matches, ref)
+
+	remotes, err := repo.GetRemotes()
+	if err != nil {
+		return err
+	}
+
+	for remote := range remotes {
+		ref = fmt.Sprintf("refs/remotes/%s/%s/%s", remote, def.Namespace, id.String())
+		matches = append(matches, ref)
+	}
+
+	for _, ref = range matches {
+		err = repo.RemoveRef(ref)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}

entity/dag/entity_actions_test.go 🔗

@@ -0,0 +1,412 @@
+package dag
+
+import (
+	"sort"
+	"strings"
+	"testing"
+
+	"github.com/stretchr/testify/require"
+
+	"github.com/MichaelMure/git-bug/entity"
+	"github.com/MichaelMure/git-bug/repository"
+)
+
+func allEntities(t testing.TB, bugs <-chan StreamedEntity) []*Entity {
+	t.Helper()
+
+	var result []*Entity
+	for streamed := range bugs {
+		require.NoError(t, streamed.Err)
+
+		result = append(result, streamed.Entity)
+	}
+	return result
+}
+
+func TestEntityPushPull(t *testing.T) {
+	repoA, repoB, remote, id1, id2, resolver, def := makeTestContextRemote(t)
+	defer repository.CleanupTestRepos(repoA, repoB, remote)
+
+	// A --> remote --> B
+	e := New(def)
+	e.Append(newOp1(id1, "foo"))
+
+	err := e.Commit(repoA)
+	require.NoError(t, err)
+
+	_, err = Push(def, repoA, "remote")
+	require.NoError(t, err)
+
+	err = Pull(def, repoB, resolver, "remote", id1)
+	require.NoError(t, err)
+
+	entities := allEntities(t, ReadAll(def, repoB, resolver))
+	require.Len(t, entities, 1)
+
+	// B --> remote --> A
+	e = New(def)
+	e.Append(newOp2(id2, "bar"))
+
+	err = e.Commit(repoB)
+	require.NoError(t, err)
+
+	_, err = Push(def, repoB, "remote")
+	require.NoError(t, err)
+
+	err = Pull(def, repoA, resolver, "remote", id1)
+	require.NoError(t, err)
+
+	entities = allEntities(t, ReadAll(def, repoB, resolver))
+	require.Len(t, entities, 2)
+}
+
+func TestListLocalIds(t *testing.T) {
+	repoA, repoB, remote, id1, id2, resolver, def := makeTestContextRemote(t)
+	defer repository.CleanupTestRepos(repoA, repoB, remote)
+
+	// A --> remote --> B
+	e := New(def)
+	e.Append(newOp1(id1, "foo"))
+	err := e.Commit(repoA)
+	require.NoError(t, err)
+
+	e = New(def)
+	e.Append(newOp2(id2, "bar"))
+	err = e.Commit(repoA)
+	require.NoError(t, err)
+
+	listLocalIds(t, def, repoA, 2)
+	listLocalIds(t, def, repoB, 0)
+
+	_, err = Push(def, repoA, "remote")
+	require.NoError(t, err)
+
+	_, err = Fetch(def, repoB, "remote")
+	require.NoError(t, err)
+
+	listLocalIds(t, def, repoA, 2)
+	listLocalIds(t, def, repoB, 0)
+
+	err = Pull(def, repoB, resolver, "remote", id1)
+	require.NoError(t, err)
+
+	listLocalIds(t, def, repoA, 2)
+	listLocalIds(t, def, repoB, 2)
+}
+
+func listLocalIds(t *testing.T, def Definition, repo repository.RepoData, expectedCount int) {
+	ids, err := ListLocalIds(def, repo)
+	require.NoError(t, err)
+	require.Len(t, ids, expectedCount)
+}
+
+func assertMergeResults(t *testing.T, expected []entity.MergeResult, results <-chan entity.MergeResult) {
+	t.Helper()
+
+	var allResults []entity.MergeResult
+	for result := range results {
+		allResults = append(allResults, result)
+	}
+
+	require.Equal(t, len(expected), len(allResults))
+
+	sort.Slice(allResults, func(i, j int) bool {
+		return allResults[i].Id < allResults[j].Id
+	})
+	sort.Slice(expected, func(i, j int) bool {
+		return expected[i].Id < expected[j].Id
+	})
+
+	for i, result := range allResults {
+		require.NoError(t, result.Err)
+
+		require.Equal(t, expected[i].Id, result.Id)
+		require.Equal(t, expected[i].Status, result.Status)
+
+		switch result.Status {
+		case entity.MergeStatusNew, entity.MergeStatusUpdated:
+			require.NotNil(t, result.Entity)
+			require.Equal(t, expected[i].Id, result.Entity.Id())
+		}
+
+		i++
+	}
+}
+
+func assertEqualRefs(t *testing.T, repoA, repoB repository.RepoData, prefix string) {
+	t.Helper()
+
+	refsA, err := repoA.ListRefs("")
+	require.NoError(t, err)
+
+	var refsAFiltered []string
+	for _, ref := range refsA {
+		if strings.HasPrefix(ref, prefix) {
+			refsAFiltered = append(refsAFiltered, ref)
+		}
+	}
+
+	refsB, err := repoB.ListRefs("")
+	require.NoError(t, err)
+
+	var refsBFiltered []string
+	for _, ref := range refsB {
+		if strings.HasPrefix(ref, prefix) {
+			refsBFiltered = append(refsBFiltered, ref)
+		}
+	}
+
+	require.NotEmpty(t, refsAFiltered)
+	require.Equal(t, refsAFiltered, refsBFiltered)
+
+	for _, ref := range refsAFiltered {
+		commitA, err := repoA.ResolveRef(ref)
+		require.NoError(t, err)
+		commitB, err := repoB.ResolveRef(ref)
+		require.NoError(t, err)
+
+		require.Equal(t, commitA, commitB)
+	}
+}
+
+func assertNotEqualRefs(t *testing.T, repoA, repoB repository.RepoData, prefix string) {
+	t.Helper()
+
+	refsA, err := repoA.ListRefs("")
+	require.NoError(t, err)
+
+	var refsAFiltered []string
+	for _, ref := range refsA {
+		if strings.HasPrefix(ref, prefix) {
+			refsAFiltered = append(refsAFiltered, ref)
+		}
+	}
+
+	refsB, err := repoB.ListRefs("")
+	require.NoError(t, err)
+
+	var refsBFiltered []string
+	for _, ref := range refsB {
+		if strings.HasPrefix(ref, prefix) {
+			refsBFiltered = append(refsBFiltered, ref)
+		}
+	}
+
+	require.NotEmpty(t, refsAFiltered)
+	require.Equal(t, refsAFiltered, refsBFiltered)
+
+	for _, ref := range refsAFiltered {
+		commitA, err := repoA.ResolveRef(ref)
+		require.NoError(t, err)
+		commitB, err := repoB.ResolveRef(ref)
+		require.NoError(t, err)
+
+		require.NotEqual(t, commitA, commitB)
+	}
+}
+
+func TestMerge(t *testing.T) {
+	repoA, repoB, remote, id1, id2, resolver, def := makeTestContextRemote(t)
+	defer repository.CleanupTestRepos(repoA, repoB, remote)
+
+	// SCENARIO 1
+	// if the remote Entity doesn't exist locally, it's created
+
+	// 2 entities in repoA + push to remote
+	e1A := New(def)
+	e1A.Append(newOp1(id1, "foo"))
+	err := e1A.Commit(repoA)
+	require.NoError(t, err)
+
+	e2A := New(def)
+	e2A.Append(newOp2(id2, "bar"))
+	err = e2A.Commit(repoA)
+	require.NoError(t, err)
+
+	_, err = Push(def, repoA, "remote")
+	require.NoError(t, err)
+
+	// repoB: fetch + merge from remote
+
+	_, err = Fetch(def, repoB, "remote")
+	require.NoError(t, err)
+
+	results := MergeAll(def, repoB, resolver, "remote", id1)
+
+	assertMergeResults(t, []entity.MergeResult{
+		{
+			Id:     e1A.Id(),
+			Status: entity.MergeStatusNew,
+		},
+		{
+			Id:     e2A.Id(),
+			Status: entity.MergeStatusNew,
+		},
+	}, results)
+
+	assertEqualRefs(t, repoA, repoB, "refs/"+def.Namespace)
+
+	// SCENARIO 2
+	// if the remote and local Entity have the same state, nothing is changed
+
+	results = MergeAll(def, repoB, resolver, "remote", id1)
+
+	assertMergeResults(t, []entity.MergeResult{
+		{
+			Id:     e1A.Id(),
+			Status: entity.MergeStatusNothing,
+		},
+		{
+			Id:     e2A.Id(),
+			Status: entity.MergeStatusNothing,
+		},
+	}, results)
+
+	assertEqualRefs(t, repoA, repoB, "refs/"+def.Namespace)
+
+	// SCENARIO 3
+	// if the local Entity has new commits but the remote don't, nothing is changed
+
+	e1A.Append(newOp1(id1, "barbar"))
+	err = e1A.Commit(repoA)
+	require.NoError(t, err)
+
+	e2A.Append(newOp2(id2, "barbarbar"))
+	err = e2A.Commit(repoA)
+	require.NoError(t, err)
+
+	results = MergeAll(def, repoA, resolver, "remote", id1)
+
+	assertMergeResults(t, []entity.MergeResult{
+		{
+			Id:     e1A.Id(),
+			Status: entity.MergeStatusNothing,
+		},
+		{
+			Id:     e2A.Id(),
+			Status: entity.MergeStatusNothing,
+		},
+	}, results)
+
+	assertNotEqualRefs(t, repoA, repoB, "refs/"+def.Namespace)
+
+	// SCENARIO 4
+	// if the remote has new commit, the local bug is updated to match the same history
+	// (fast-forward update)
+
+	_, err = Push(def, repoA, "remote")
+	require.NoError(t, err)
+
+	_, err = Fetch(def, repoB, "remote")
+	require.NoError(t, err)
+
+	results = MergeAll(def, repoB, resolver, "remote", id1)
+
+	assertMergeResults(t, []entity.MergeResult{
+		{
+			Id:     e1A.Id(),
+			Status: entity.MergeStatusUpdated,
+		},
+		{
+			Id:     e2A.Id(),
+			Status: entity.MergeStatusUpdated,
+		},
+	}, results)
+
+	assertEqualRefs(t, repoA, repoB, "refs/"+def.Namespace)
+
+	// SCENARIO 5
+	// if both local and remote Entity have new commits (that is, we have a concurrent edition),
+	// a merge commit with an empty operationPack is created to join both branch and form a DAG.
+
+	e1A.Append(newOp1(id1, "barbarfoo"))
+	err = e1A.Commit(repoA)
+	require.NoError(t, err)
+
+	e2A.Append(newOp2(id2, "barbarbarfoo"))
+	err = e2A.Commit(repoA)
+	require.NoError(t, err)
+
+	e1B, err := Read(def, repoB, resolver, e1A.Id())
+	require.NoError(t, err)
+
+	e2B, err := Read(def, repoB, resolver, e2A.Id())
+	require.NoError(t, err)
+
+	e1B.Append(newOp1(id1, "barbarfoofoo"))
+	err = e1B.Commit(repoB)
+	require.NoError(t, err)
+
+	e2B.Append(newOp2(id2, "barbarbarfoofoo"))
+	err = e2B.Commit(repoB)
+	require.NoError(t, err)
+
+	_, err = Push(def, repoA, "remote")
+	require.NoError(t, err)
+
+	_, err = Fetch(def, repoB, "remote")
+	require.NoError(t, err)
+
+	results = MergeAll(def, repoB, resolver, "remote", id1)
+
+	assertMergeResults(t, []entity.MergeResult{
+		{
+			Id:     e1A.Id(),
+			Status: entity.MergeStatusUpdated,
+		},
+		{
+			Id:     e2A.Id(),
+			Status: entity.MergeStatusUpdated,
+		},
+	}, results)
+
+	assertNotEqualRefs(t, repoA, repoB, "refs/"+def.Namespace)
+
+	_, err = Push(def, repoB, "remote")
+	require.NoError(t, err)
+
+	_, err = Fetch(def, repoA, "remote")
+	require.NoError(t, err)
+
+	results = MergeAll(def, repoA, resolver, "remote", id1)
+
+	assertMergeResults(t, []entity.MergeResult{
+		{
+			Id:     e1A.Id(),
+			Status: entity.MergeStatusUpdated,
+		},
+		{
+			Id:     e2A.Id(),
+			Status: entity.MergeStatusUpdated,
+		},
+	}, results)
+
+	// make sure that the graphs become stable over multiple repo, due to the
+	// fast-forward
+	assertEqualRefs(t, repoA, repoB, "refs/"+def.Namespace)
+}
+
+func TestRemove(t *testing.T) {
+	repoA, repoB, remote, id1, _, resolver, def := makeTestContextRemote(t)
+	defer repository.CleanupTestRepos(repoA, repoB, remote)
+
+	e := New(def)
+	e.Append(newOp1(id1, "foo"))
+	require.NoError(t, e.Commit(repoA))
+
+	_, err := Push(def, repoA, "remote")
+	require.NoError(t, err)
+
+	err = Remove(def, repoA, e.Id())
+	require.NoError(t, err)
+
+	_, err = Read(def, repoA, resolver, e.Id())
+	require.Error(t, err)
+
+	_, err = readRemote(def, repoA, resolver, "remote", e.Id())
+	require.Error(t, err)
+
+	// Remove is idempotent
+	err = Remove(def, repoA, e.Id())
+	require.NoError(t, err)
+}

entity/dag/entity_test.go 🔗

@@ -0,0 +1,68 @@
+package dag
+
+import (
+	"testing"
+
+	"github.com/stretchr/testify/require"
+)
+
+func TestWriteRead(t *testing.T) {
+	repo, id1, id2, resolver, def := makeTestContext()
+
+	entity := New(def)
+	require.False(t, entity.NeedCommit())
+
+	entity.Append(newOp1(id1, "foo"))
+	entity.Append(newOp2(id1, "bar"))
+
+	require.True(t, entity.NeedCommit())
+	require.NoError(t, entity.CommitAsNeeded(repo))
+	require.False(t, entity.NeedCommit())
+
+	entity.Append(newOp2(id2, "foobar"))
+	require.True(t, entity.NeedCommit())
+	require.NoError(t, entity.CommitAsNeeded(repo))
+	require.False(t, entity.NeedCommit())
+
+	read, err := Read(def, repo, resolver, entity.Id())
+	require.NoError(t, err)
+
+	assertEqualEntities(t, entity, read)
+}
+
+func TestWriteReadMultipleAuthor(t *testing.T) {
+	repo, id1, id2, resolver, def := makeTestContext()
+
+	entity := New(def)
+
+	entity.Append(newOp1(id1, "foo"))
+	entity.Append(newOp2(id2, "bar"))
+
+	require.NoError(t, entity.CommitAsNeeded(repo))
+
+	entity.Append(newOp2(id1, "foobar"))
+	require.NoError(t, entity.CommitAsNeeded(repo))
+
+	read, err := Read(def, repo, resolver, entity.Id())
+	require.NoError(t, err)
+
+	assertEqualEntities(t, entity, read)
+}
+
+func assertEqualEntities(t *testing.T, a, b *Entity) {
+	// testify doesn't support comparing functions and systematically fail if they are not nil
+	// so we have to set them to nil temporarily
+
+	backOpUnA := a.Definition.OperationUnmarshaler
+	backOpUnB := b.Definition.OperationUnmarshaler
+
+	a.Definition.OperationUnmarshaler = nil
+	b.Definition.OperationUnmarshaler = nil
+
+	defer func() {
+		a.Definition.OperationUnmarshaler = backOpUnA
+		b.Definition.OperationUnmarshaler = backOpUnB
+	}()
+
+	require.Equal(t, a, b)
+}

entity/dag/operation.go 🔗

@@ -0,0 +1,48 @@
+package dag
+
+import (
+	"github.com/MichaelMure/git-bug/entity"
+	"github.com/MichaelMure/git-bug/identity"
+	"github.com/MichaelMure/git-bug/repository"
+)
+
+// Operation is a piece of data defining a change to reflect on the state of an Entity.
+// What this Operation or Entity's state looks like is not of the resort of this package as it only deals with the
+// data structure and storage.
+type Operation interface {
+	// Id return the Operation identifier
+	//
+	// Some care need to be taken to define a correct Id derivation and enough entropy in the data used to avoid
+	// collisions. Notably:
+	// - the Id of the first Operation will be used as the Id of the Entity. Collision need to be avoided across entities
+	//   of the same type (example: no collision within the "bug" namespace).
+	// - collisions can also happen within the set of Operations of an Entity. Simple Operation might not have enough
+	//   entropy to yield unique Ids (example: two "close" operation within the same second, same author).
+	//   If this is a concern, it is recommended to include a piece of random data in the operation's data, to guarantee
+	//   a minimal amount of entropy and avoid collision.
+	//
+	//   Author's note: I tried to find a clever way around that inelegance (stuffing random useless data into the stored
+	//   structure is not exactly elegant) but I failed to find a proper way. Essentially, anything that would reuse some
+	//   other data (parent operation's Id, lamport clock) or the graph structure (depth) impose that the Id would only
+	//   make sense in the context of the graph and yield some deep coupling between Entity and Operation. This in turn
+	//   make the whole thing even less elegant.
+	//
+	// A common way to derive an Id will be to use the entity.DeriveId() function on the serialized operation data.
+	Id() entity.Id
+	// Validate check if the Operation data is valid
+	Validate() error
+	// Author returns the author of this operation
+	Author() identity.Interface
+}
+
+// OperationWithFiles is an extended Operation that has files dependency, stored in git.
+type OperationWithFiles interface {
+	Operation
+
+	// GetFiles return the files needed by this operation
+	// This implies that the Operation maintain and store internally the references to those files. This is how
+	// this information is read later, when loading from storage.
+	// For example, an operation that has a text value referencing some files would maintain a mapping (text ref -->
+	// hash).
+	GetFiles() []repository.Hash
+}

entity/dag/operation_pack.go 🔗

@@ -0,0 +1,358 @@
+package dag
+
+import (
+	"encoding/json"
+	"fmt"
+	"strconv"
+	"strings"
+
+	"github.com/pkg/errors"
+	"golang.org/x/crypto/openpgp"
+
+	"github.com/MichaelMure/git-bug/entity"
+	"github.com/MichaelMure/git-bug/identity"
+	"github.com/MichaelMure/git-bug/repository"
+	"github.com/MichaelMure/git-bug/util/lamport"
+)
+
+const opsEntryName = "ops"
+const extraEntryName = "extra"
+const versionEntryPrefix = "version-"
+const createClockEntryPrefix = "create-clock-"
+const editClockEntryPrefix = "edit-clock-"
+
+// operationPack is a wrapper structure to store multiple operations in a single git blob.
+// Additionally, it holds and store the metadata for those operations.
+type operationPack struct {
+	// An identifier, taken from a hash of the serialized Operations.
+	id entity.Id
+
+	// The author of the Operations. Must be the same author for all the Operations.
+	Author identity.Interface
+	// The list of Operation stored in the operationPack
+	Operations []Operation
+	// Encode the entity's logical time of creation across all entities of the same type.
+	// Only exist on the root operationPack
+	CreateTime lamport.Time
+	// Encode the entity's logical time of last edition across all entities of the same type.
+	// Exist on all operationPack
+	EditTime lamport.Time
+}
+
+func (opp *operationPack) Id() entity.Id {
+	if opp.id == "" || opp.id == entity.UnsetId {
+		// This means we are trying to get the opp's Id *before* it has been stored.
+		// As the Id is computed based on the actual bytes written on the disk, we are going to predict
+		// those and then get the Id. This is safe as it will be the exact same code writing on disk later.
+
+		data, err := json.Marshal(opp)
+		if err != nil {
+			panic(err)
+		}
+		opp.id = entity.DeriveId(data)
+	}
+
+	return opp.id
+}
+
+func (opp *operationPack) MarshalJSON() ([]byte, error) {
+	return json.Marshal(struct {
+		Author     identity.Interface `json:"author"`
+		Operations []Operation        `json:"ops"`
+	}{
+		Author:     opp.Author,
+		Operations: opp.Operations,
+	})
+}
+
+func (opp *operationPack) Validate() error {
+	if opp.Author == nil {
+		return fmt.Errorf("missing author")
+	}
+	for _, op := range opp.Operations {
+		if op.Author().Id() != opp.Author.Id() {
+			return fmt.Errorf("operation has different author than the operationPack's")
+		}
+	}
+	if opp.EditTime == 0 {
+		return fmt.Errorf("lamport edit time is zero")
+	}
+	return nil
+}
+
+// Write write the OperationPack in git, with zero, one or more parent commits.
+// If the repository has a keypair able to sign (that is, with a private key), the resulting commit is signed with that key.
+// Return the hash of the created commit.
+func (opp *operationPack) Write(def Definition, repo repository.Repo, parentCommit ...repository.Hash) (repository.Hash, error) {
+	if err := opp.Validate(); err != nil {
+		return "", err
+	}
+
+	// For different reason, we store the clocks and format version directly in the git tree.
+	// Version has to be accessible before any attempt to decode to return early with a unique error.
+	// Clocks could possibly be stored in the git blob but it's nice to separate data and metadata, and
+	// we are storing something directly in the tree already so why not.
+	//
+	// To have a valid Tree, we point the "fake" entries to always the same value, the empty blob.
+	emptyBlobHash, err := repo.StoreData([]byte{})
+	if err != nil {
+		return "", err
+	}
+
+	// Write the Ops as a Git blob containing the serialized array of operations
+	data, err := json.Marshal(opp)
+	if err != nil {
+		return "", err
+	}
+
+	// compute the Id while we have the serialized data
+	opp.id = entity.DeriveId(data)
+
+	hash, err := repo.StoreData(data)
+	if err != nil {
+		return "", err
+	}
+
+	// Make a Git tree referencing this blob and encoding the other values:
+	// - format version
+	// - clocks
+	// - extra data
+	tree := []repository.TreeEntry{
+		{ObjectType: repository.Blob, Hash: emptyBlobHash,
+			Name: fmt.Sprintf(versionEntryPrefix+"%d", def.FormatVersion)},
+		{ObjectType: repository.Blob, Hash: hash,
+			Name: opsEntryName},
+		{ObjectType: repository.Blob, Hash: emptyBlobHash,
+			Name: fmt.Sprintf(editClockEntryPrefix+"%d", opp.EditTime)},
+	}
+	if opp.CreateTime > 0 {
+		tree = append(tree, repository.TreeEntry{
+			ObjectType: repository.Blob,
+			Hash:       emptyBlobHash,
+			Name:       fmt.Sprintf(createClockEntryPrefix+"%d", opp.CreateTime),
+		})
+	}
+	if extraTree := opp.makeExtraTree(); len(extraTree) > 0 {
+		extraTreeHash, err := repo.StoreTree(extraTree)
+		if err != nil {
+			return "", err
+		}
+		tree = append(tree, repository.TreeEntry{
+			ObjectType: repository.Tree,
+			Hash:       extraTreeHash,
+			Name:       extraEntryName,
+		})
+	}
+
+	// Store the tree
+	treeHash, err := repo.StoreTree(tree)
+	if err != nil {
+		return "", err
+	}
+
+	// Write a Git commit referencing the tree, with the previous commit as parent
+	// If we have keys, sign.
+	var commitHash repository.Hash
+
+	// Sign the commit if we have a key
+	signingKey, err := opp.Author.SigningKey(repo)
+	if err != nil {
+		return "", err
+	}
+
+	if signingKey != nil {
+		commitHash, err = repo.StoreSignedCommit(treeHash, signingKey.PGPEntity(), parentCommit...)
+	} else {
+		commitHash, err = repo.StoreCommit(treeHash, parentCommit...)
+	}
+
+	if err != nil {
+		return "", err
+	}
+
+	return commitHash, nil
+}
+
+func (opp *operationPack) makeExtraTree() []repository.TreeEntry {
+	var tree []repository.TreeEntry
+	counter := 0
+	added := make(map[repository.Hash]interface{})
+
+	for _, ops := range opp.Operations {
+		ops, ok := ops.(OperationWithFiles)
+		if !ok {
+			continue
+		}
+
+		for _, file := range ops.GetFiles() {
+			if _, has := added[file]; !has {
+				tree = append(tree, repository.TreeEntry{
+					ObjectType: repository.Blob,
+					Hash:       file,
+					// The name is not important here, we only need to
+					// reference the blob.
+					Name: fmt.Sprintf("file%d", counter),
+				})
+				counter++
+				added[file] = struct{}{}
+			}
+		}
+	}
+
+	return tree
+}
+
+// readOperationPack read the operationPack encoded in git at the given Tree hash.
+//
+// Validity of the Lamport clocks is left for the caller to decide.
+func readOperationPack(def Definition, repo repository.RepoData, resolver identity.Resolver, commit repository.Commit) (*operationPack, error) {
+	entries, err := repo.ReadTree(commit.TreeHash)
+	if err != nil {
+		return nil, err
+	}
+
+	// check the format version first, fail early instead of trying to read something
+	var version uint
+	for _, entry := range entries {
+		if strings.HasPrefix(entry.Name, versionEntryPrefix) {
+			v, err := strconv.ParseUint(strings.TrimPrefix(entry.Name, versionEntryPrefix), 10, 64)
+			if err != nil {
+				return nil, errors.Wrap(err, "can't read format version")
+			}
+			if v > 1<<12 {
+				return nil, fmt.Errorf("format version too big")
+			}
+			version = uint(v)
+			break
+		}
+	}
+	if version == 0 {
+		return nil, entity.NewErrUnknownFormat(def.FormatVersion)
+	}
+	if version != def.FormatVersion {
+		return nil, entity.NewErrInvalidFormat(version, def.FormatVersion)
+	}
+
+	var id entity.Id
+	var author identity.Interface
+	var ops []Operation
+	var createTime lamport.Time
+	var editTime lamport.Time
+
+	for _, entry := range entries {
+		switch {
+		case entry.Name == opsEntryName:
+			data, err := repo.ReadData(entry.Hash)
+			if err != nil {
+				return nil, errors.Wrap(err, "failed to read git blob data")
+			}
+			ops, author, err = unmarshallPack(def, resolver, data)
+			if err != nil {
+				return nil, err
+			}
+			id = entity.DeriveId(data)
+
+		case strings.HasPrefix(entry.Name, createClockEntryPrefix):
+			v, err := strconv.ParseUint(strings.TrimPrefix(entry.Name, createClockEntryPrefix), 10, 64)
+			if err != nil {
+				return nil, errors.Wrap(err, "can't read creation lamport time")
+			}
+			createTime = lamport.Time(v)
+
+		case strings.HasPrefix(entry.Name, editClockEntryPrefix):
+			v, err := strconv.ParseUint(strings.TrimPrefix(entry.Name, editClockEntryPrefix), 10, 64)
+			if err != nil {
+				return nil, errors.Wrap(err, "can't read edit lamport time")
+			}
+			editTime = lamport.Time(v)
+		}
+	}
+
+	// Verify signature if we expect one
+	keys := author.ValidKeysAtTime(fmt.Sprintf(editClockPattern, def.Namespace), editTime)
+	if len(keys) > 0 {
+		keyring := PGPKeyring(keys)
+		_, err = openpgp.CheckDetachedSignature(keyring, commit.SignedData, commit.Signature)
+		if err != nil {
+			return nil, fmt.Errorf("signature failure: %v", err)
+		}
+	}
+
+	return &operationPack{
+		id:         id,
+		Author:     author,
+		Operations: ops,
+		CreateTime: createTime,
+		EditTime:   editTime,
+	}, nil
+}
+
+// unmarshallPack delegate the unmarshalling of the Operation's JSON to the decoding
+// function provided by the concrete entity. This gives access to the concrete type of each
+// Operation.
+func unmarshallPack(def Definition, resolver identity.Resolver, data []byte) ([]Operation, identity.Interface, error) {
+	aux := struct {
+		Author     identity.IdentityStub `json:"author"`
+		Operations []json.RawMessage     `json:"ops"`
+	}{}
+
+	if err := json.Unmarshal(data, &aux); err != nil {
+		return nil, nil, err
+	}
+
+	if aux.Author.Id() == "" || aux.Author.Id() == entity.UnsetId {
+		return nil, nil, fmt.Errorf("missing author")
+	}
+
+	author, err := resolver.ResolveIdentity(aux.Author.Id())
+	if err != nil {
+		return nil, nil, err
+	}
+
+	ops := make([]Operation, 0, len(aux.Operations))
+
+	for _, raw := range aux.Operations {
+		// delegate to specialized unmarshal function
+		op, err := def.OperationUnmarshaler(author, raw)
+		if err != nil {
+			return nil, nil, err
+		}
+		ops = append(ops, op)
+	}
+
+	return ops, author, nil
+}
+
+var _ openpgp.KeyRing = &PGPKeyring{}
+
+// PGPKeyring implement a openpgp.KeyRing from an slice of Key
+type PGPKeyring []*identity.Key
+
+func (pk PGPKeyring) KeysById(id uint64) []openpgp.Key {
+	var result []openpgp.Key
+	for _, key := range pk {
+		if key.Public().KeyId == id {
+			result = append(result, openpgp.Key{
+				PublicKey:  key.Public(),
+				PrivateKey: key.Private(),
+			})
+		}
+	}
+	return result
+}
+
+func (pk PGPKeyring) KeysByIdUsage(id uint64, requiredUsage byte) []openpgp.Key {
+	// the only usage we care about is the ability to sign, which all keys should already be capable of
+	return pk.KeysById(id)
+}
+
+func (pk PGPKeyring) DecryptionKeys() []openpgp.Key {
+	result := make([]openpgp.Key, len(pk))
+	for i, key := range pk {
+		result[i] = openpgp.Key{
+			PublicKey:  key.Public(),
+			PrivateKey: key.Private(),
+		}
+	}
+	return result
+}

entity/dag/operation_pack_test.go 🔗

@@ -0,0 +1,159 @@
+package dag
+
+import (
+	"math/rand"
+	"testing"
+
+	"github.com/stretchr/testify/require"
+
+	"github.com/MichaelMure/git-bug/identity"
+	"github.com/MichaelMure/git-bug/repository"
+)
+
+func TestOperationPackReadWrite(t *testing.T) {
+	repo, id1, _, resolver, def := makeTestContext()
+
+	opp := &operationPack{
+		Author: id1,
+		Operations: []Operation{
+			newOp1(id1, "foo"),
+			newOp2(id1, "bar"),
+		},
+		CreateTime: 123,
+		EditTime:   456,
+	}
+
+	commitHash, err := opp.Write(def, repo)
+	require.NoError(t, err)
+
+	commit, err := repo.ReadCommit(commitHash)
+	require.NoError(t, err)
+
+	opp2, err := readOperationPack(def, repo, resolver, commit)
+	require.NoError(t, err)
+
+	require.Equal(t, opp, opp2)
+
+	// make sure we get the same Id with the same data
+	opp3 := &operationPack{
+		Author: id1,
+		Operations: []Operation{
+			newOp1(id1, "foo"),
+			newOp2(id1, "bar"),
+		},
+		CreateTime: 123,
+		EditTime:   456,
+	}
+	require.Equal(t, opp.Id(), opp3.Id())
+}
+
+func TestOperationPackSignedReadWrite(t *testing.T) {
+	repo, id1, _, resolver, def := makeTestContext()
+
+	err := id1.(*identity.Identity).Mutate(repo, func(orig *identity.Mutator) {
+		orig.Keys = append(orig.Keys, identity.GenerateKey())
+	})
+	require.NoError(t, err)
+
+	opp := &operationPack{
+		Author: id1,
+		Operations: []Operation{
+			newOp1(id1, "foo"),
+			newOp2(id1, "bar"),
+		},
+		CreateTime: 123,
+		EditTime:   456,
+	}
+
+	commitHash, err := opp.Write(def, repo)
+	require.NoError(t, err)
+
+	commit, err := repo.ReadCommit(commitHash)
+	require.NoError(t, err)
+
+	opp2, err := readOperationPack(def, repo, resolver, commit)
+	require.NoError(t, err)
+
+	require.Equal(t, opp, opp2)
+
+	// make sure we get the same Id with the same data
+	opp3 := &operationPack{
+		Author: id1,
+		Operations: []Operation{
+			newOp1(id1, "foo"),
+			newOp2(id1, "bar"),
+		},
+		CreateTime: 123,
+		EditTime:   456,
+	}
+	require.Equal(t, opp.Id(), opp3.Id())
+}
+
+func TestOperationPackFiles(t *testing.T) {
+	repo, id1, _, resolver, def := makeTestContext()
+
+	blobHash1, err := repo.StoreData(randomData())
+	require.NoError(t, err)
+
+	blobHash2, err := repo.StoreData(randomData())
+	require.NoError(t, err)
+
+	opp := &operationPack{
+		Author: id1,
+		Operations: []Operation{
+			newOp1(id1, "foo", blobHash1, blobHash2),
+			newOp1(id1, "foo", blobHash2),
+		},
+		CreateTime: 123,
+		EditTime:   456,
+	}
+
+	commitHash, err := opp.Write(def, repo)
+	require.NoError(t, err)
+
+	commit, err := repo.ReadCommit(commitHash)
+	require.NoError(t, err)
+
+	opp2, err := readOperationPack(def, repo, resolver, commit)
+	require.NoError(t, err)
+
+	require.Equal(t, opp, opp2)
+
+	require.ElementsMatch(t, opp2.Operations[0].(OperationWithFiles).GetFiles(), []repository.Hash{
+		blobHash1,
+		blobHash2,
+	})
+	require.ElementsMatch(t, opp2.Operations[1].(OperationWithFiles).GetFiles(), []repository.Hash{
+		blobHash2,
+	})
+
+	tree, err := repo.ReadTree(commit.TreeHash)
+	require.NoError(t, err)
+
+	extraTreeHash, ok := repository.SearchTreeEntry(tree, extraEntryName)
+	require.True(t, ok)
+
+	extraTree, err := repo.ReadTree(extraTreeHash.Hash)
+	require.NoError(t, err)
+	require.ElementsMatch(t, extraTree, []repository.TreeEntry{
+		{
+			ObjectType: repository.Blob,
+			Hash:       blobHash1,
+			Name:       "file0",
+		},
+		{
+			ObjectType: repository.Blob,
+			Hash:       blobHash2,
+			Name:       "file1",
+		},
+	})
+}
+
+func randomData() []byte {
+	var letterRunes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
+	b := make([]byte, 32)
+	for i := range b {
+		b[i] = letterRunes[rand.Intn(len(letterRunes))]
+	}
+	return b
+}

entity/doc.go 🔗

@@ -1,8 +0,0 @@
-// Package entity contains the base common code to define an entity stored
-// in a chain of git objects, supporting actions like Push, Pull and Merge.
-package entity
-
-// TODO: Bug and Identity are very similar, right ? I expect that this package
-// will eventually hold the common code to define an entity and the related
-// helpers, errors and so on. When this work is done, it will become easier
-// to add new entities, for example to support pull requests.

entity/err.go 🔗

@@ -31,28 +31,31 @@ func IsErrMultipleMatch(err error) bool {
 	return ok
 }
 
-// ErrOldFormatVersion indicate that the read data has a too old format.
-type ErrOldFormatVersion struct {
-	formatVersion uint
+type ErrInvalidFormat struct {
+	version  uint
+	expected uint
 }
 
-func NewErrOldFormatVersion(formatVersion uint) *ErrOldFormatVersion {
-	return &ErrOldFormatVersion{formatVersion: formatVersion}
-}
-
-func (e ErrOldFormatVersion) Error() string {
-	return fmt.Sprintf("outdated repository format %v, please use https://github.com/MichaelMure/git-bug-migration to upgrade", e.formatVersion)
-}
-
-// ErrNewFormatVersion indicate that the read data is too new for this software.
-type ErrNewFormatVersion struct {
-	formatVersion uint
+func NewErrInvalidFormat(version uint, expected uint) *ErrInvalidFormat {
+	return &ErrInvalidFormat{
+		version:  version,
+		expected: expected,
+	}
 }
 
-func NewErrNewFormatVersion(formatVersion uint) *ErrNewFormatVersion {
-	return &ErrNewFormatVersion{formatVersion: formatVersion}
+func NewErrUnknownFormat(expected uint) *ErrInvalidFormat {
+	return &ErrInvalidFormat{
+		version:  0,
+		expected: expected,
+	}
 }
 
-func (e ErrNewFormatVersion) Error() string {
-	return fmt.Sprintf("your version of git-bug is too old for this repository (version %v), please upgrade to the latest version", e.formatVersion)
+func (e ErrInvalidFormat) Error() string {
+	if e.version == 0 {
+		return fmt.Sprintf("unreadable data, you likely have an outdated repository format, please use https://github.com/MichaelMure/git-bug-migration to upgrade to format version %v", e.expected)
+	}
+	if e.version < e.expected {
+		return fmt.Sprintf("outdated repository format %v, please use https://github.com/MichaelMure/git-bug-migration to upgrade to format version %v", e.version, e.expected)
+	}
+	return fmt.Sprintf("your version of git-bug is too old for this repository (format version %v, expected %v), please upgrade to the latest version", e.version, e.expected)
 }

entity/id.go 🔗

@@ -1,6 +1,7 @@
 package entity
 
 import (
+	"crypto/sha256"
 	"fmt"
 	"io"
 	"strings"
@@ -8,8 +9,8 @@ import (
 	"github.com/pkg/errors"
 )
 
-const IdLengthSHA1 = 40
-const IdLengthSHA256 = 64
+// sha-256
+const idLength = 64
 const humanIdLength = 7
 
 const UnsetId = Id("unset")
@@ -17,6 +18,15 @@ const UnsetId = Id("unset")
 // Id is an identifier for an entity or part of an entity
 type Id string
 
+// DeriveId generate an Id from the serialization of the object or part of the object.
+func DeriveId(data []byte) Id {
+	// My understanding is that sha256 is enough to prevent collision (git use that, so ...?)
+	// If you read this code, I'd be happy to be schooled.
+
+	sum := sha256.Sum256(data)
+	return Id(fmt.Sprintf("%x", sum))
+}
+
 // String return the identifier as a string
 func (i Id) String() string {
 	return string(i)
@@ -55,7 +65,11 @@ func (i Id) MarshalGQL(w io.Writer) {
 
 // IsValid tell if the Id is valid
 func (i Id) Validate() error {
-	if len(i) != IdLengthSHA1 && len(i) != IdLengthSHA256 {
+	// Special case to detect outdated repo
+	if len(i) == 40 {
+		return fmt.Errorf("outdated repository format, please use https://github.com/MichaelMure/git-bug-migration to upgrade")
+	}
+	if len(i) != idLength {
 		return fmt.Errorf("invalid length")
 	}
 	for _, r := range i {

entity/id_interleaved.go 🔗

@@ -0,0 +1,68 @@
+package entity
+
+import (
+	"strings"
+)
+
+// CombineIds compute a merged Id holding information from both the primary Id
+// and the secondary Id.
+//
+// This allow to later find efficiently a secondary element because we can access
+// the primary one directly instead of searching for a primary that has a
+// secondary matching the Id.
+//
+// An example usage is Comment in a Bug. The interleaved Id will hold part of the
+// Bug Id and part of the Comment Id.
+//
+// To allow the use of an arbitrary length prefix of this Id, Ids from primary
+// and secondary are interleaved with this irregular pattern to give the
+// best chance to find the secondary even with a 7 character prefix.
+//
+// Format is: PSPSPSPPPSPPPPSPPPPSPPPPSPPPPSPPPPSPPPPSPPPPSPPPPSPPPPSPPPPSPPPP
+//
+// A complete interleaved Id hold 50 characters for the primary and 14 for the
+// secondary, which give a key space of 36^50 for the primary (~6 * 10^77) and
+// 36^14 for the secondary (~6 * 10^21). This asymmetry assume a reasonable number
+// of secondary within a primary Entity, while still allowing for a vast key space
+// for the primary (that is, a globally merged database) with a low risk of collision.
+//
+// Here is the breakdown of several common prefix length:
+//
+// 5:    3P, 2S
+// 7:    4P, 3S
+// 10:   6P, 4S
+// 16:  11P, 5S
+func CombineIds(primary Id, secondary Id) Id {
+	var id strings.Builder
+
+	for i := 0; i < idLength; i++ {
+		switch {
+		default:
+			id.WriteByte(primary[0])
+			primary = primary[1:]
+		case i == 1, i == 3, i == 5, i == 9, i >= 10 && i%5 == 4:
+			id.WriteByte(secondary[0])
+			secondary = secondary[1:]
+		}
+	}
+
+	return Id(id.String())
+}
+
+// SeparateIds extract primary and secondary prefix from an arbitrary length prefix
+// of an Id created with CombineIds.
+func SeparateIds(prefix string) (primaryPrefix string, secondaryPrefix string) {
+	var primary strings.Builder
+	var secondary strings.Builder
+
+	for i, r := range prefix {
+		switch {
+		default:
+			primary.WriteRune(r)
+		case i == 1, i == 3, i == 5, i == 9, i >= 10 && i%5 == 4:
+			secondary.WriteRune(r)
+		}
+	}
+
+	return primary.String(), secondary.String()
+}

entity/id_interleaved_test.go 🔗

@@ -0,0 +1,36 @@
+package entity
+
+import (
+	"testing"
+
+	"github.com/stretchr/testify/require"
+)
+
+func TestInterleaved(t *testing.T) {
+	primary := Id("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWX______________")
+	secondary := Id("YZ0123456789+/________________________________________________")
+	expectedId := Id("aYbZc0def1ghij2klmn3opqr4stuv5wxyz6ABCD7EFGH8IJKL9MNOP+QRST/UVWX")
+
+	interleaved := CombineIds(primary, secondary)
+	require.Equal(t, expectedId, interleaved)
+
+	// full length
+	splitPrimary, splitSecondary := SeparateIds(interleaved.String())
+	require.Equal(t, string(primary[:50]), splitPrimary)
+	require.Equal(t, string(secondary[:14]), splitSecondary)
+
+	// partial
+	splitPrimary, splitSecondary = SeparateIds(string(expectedId[:7]))
+	require.Equal(t, string(primary[:4]), splitPrimary)
+	require.Equal(t, string(secondary[:3]), splitSecondary)
+
+	// partial
+	splitPrimary, splitSecondary = SeparateIds(string(expectedId[:10]))
+	require.Equal(t, string(primary[:6]), splitPrimary)
+	require.Equal(t, string(secondary[:4]), splitSecondary)
+
+	// partial
+	splitPrimary, splitSecondary = SeparateIds(string(expectedId[:16]))
+	require.Equal(t, string(primary[:11]), splitPrimary)
+	require.Equal(t, string(secondary[:5]), splitSecondary)
+}

entity/interface.go 🔗

@@ -2,5 +2,11 @@ package entity
 
 type Interface interface {
 	// Id return the Entity identifier
+	//
+	// This Id need to be immutable without having to store the entity somewhere (ie, an entity only in memory
+	// should have a valid Id, and it should not change if further edit are done on this entity).
+	// How to achieve that is up to the entity itself. A common way would be to take a hash of an immutable data at
+	// the root of the entity.
+	// It is acceptable to use such a hash and keep mutating that data as long as Id() is not called.
 	Id() Id
 }

entity/merge.go 🔗

@@ -8,14 +8,15 @@ import (
 type MergeStatus int
 
 const (
-	_ MergeStatus = iota
-	MergeStatusNew
-	MergeStatusInvalid
-	MergeStatusUpdated
-	MergeStatusNothing
-	MergeStatusError
+	_                  MergeStatus = iota
+	MergeStatusNew                 // a new Entity was created locally
+	MergeStatusInvalid             // the remote data is invalid
+	MergeStatusUpdated             // a local Entity has been updated
+	MergeStatusNothing             // no changes were made to a local Entity (already up to date)
+	MergeStatusError               // a terminal error happened
 )
 
+// MergeResult hold the result of a merge operation on an Entity.
 type MergeResult struct {
 	// Err is set when a terminal error occur in the process
 	Err error
@@ -23,10 +24,10 @@ type MergeResult struct {
 	Id     Id
 	Status MergeStatus
 
-	// Only set for invalid status
+	// Only set for Invalid status
 	Reason string
 
-	// Not set for invalid status
+	// Only set for New or Updated status
 	Entity Interface
 }
 
@@ -41,34 +42,50 @@ func (mr MergeResult) String() string {
 	case MergeStatusNothing:
 		return "nothing to do"
 	case MergeStatusError:
-		return fmt.Sprintf("merge error on %s: %s", mr.Id, mr.Err.Error())
+		if mr.Id != "" {
+			return fmt.Sprintf("merge error on %s: %s", mr.Id, mr.Err.Error())
+		}
+		return fmt.Sprintf("merge error: %s", mr.Err.Error())
 	default:
 		panic("unknown merge status")
 	}
 }
 
-func NewMergeError(err error, id Id) MergeResult {
+func NewMergeNewStatus(id Id, entity Interface) MergeResult {
 	return MergeResult{
-		Err:    err,
 		Id:     id,
-		Status: MergeStatusError,
+		Status: MergeStatusNew,
+		Entity: entity,
 	}
 }
 
-func NewMergeStatus(status MergeStatus, id Id, entity Interface) MergeResult {
+func NewMergeInvalidStatus(id Id, reason string) MergeResult {
 	return MergeResult{
 		Id:     id,
-		Status: status,
+		Status: MergeStatusInvalid,
+		Reason: reason,
+	}
+}
 
-		// Entity is not set for an invalid merge result
+func NewMergeUpdatedStatus(id Id, entity Interface) MergeResult {
+	return MergeResult{
+		Id:     id,
+		Status: MergeStatusUpdated,
 		Entity: entity,
 	}
 }
 
-func NewMergeInvalidStatus(id Id, reason string) MergeResult {
+func NewMergeNothingStatus(id Id) MergeResult {
 	return MergeResult{
 		Id:     id,
-		Status: MergeStatusInvalid,
-		Reason: reason,
+		Status: MergeStatusNothing,
+	}
+}
+
+func NewMergeError(err error, id Id) MergeResult {
+	return MergeResult{
+		Id:     id,
+		Status: MergeStatusError,
+		Err:    err,
 	}
 }

entity/refs.go 🔗

@@ -2,17 +2,19 @@ package entity
 
 import "strings"
 
+// RefsToIds parse a slice of git references and return the corresponding Entity's Id.
 func RefsToIds(refs []string) []Id {
 	ids := make([]Id, len(refs))
 
 	for i, ref := range refs {
-		ids[i] = refToId(ref)
+		ids[i] = RefToId(ref)
 	}
 
 	return ids
 }
 
-func refToId(ref string) Id {
+// RefsToIds parse a git reference and return the corresponding Entity's Id.
+func RefToId(ref string) Id {
 	split := strings.Split(ref, "/")
 	return Id(split[len(split)-1])
 }

go.mod 🔗

@@ -1,6 +1,6 @@
 module github.com/MichaelMure/git-bug
 
-go 1.13
+go 1.15
 
 require (
 	github.com/99designs/gqlgen v0.10.3-0.20200209012558-b7a58a1c0e4b
@@ -8,7 +8,6 @@ require (
 	github.com/MichaelMure/go-term-text v0.2.10
 	github.com/araddon/dateparse v0.0.0-20190622164848-0fb0a474d195
 	github.com/awesome-gocui/gocui v0.6.1-0.20191115151952-a34ffb055986
-	github.com/blang/semver v3.5.1+incompatible
 	github.com/blevesearch/bleve v1.0.14
 	github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9
 	github.com/corpix/uarand v0.1.1 // indirect

go.sum 🔗

@@ -90,12 +90,20 @@ github.com/blevesearch/segment v0.9.0/go.mod h1:9PfHYUdQCgHktBgvtUOF4x+pc4/l8rdH
 github.com/blevesearch/snowballstem v0.9.0 h1:lMQ189YspGP6sXvZQ4WZ+MLawfV8wOmPoD/iWeNXm8s=
 github.com/blevesearch/snowballstem v0.9.0/go.mod h1:PivSj3JMc8WuaFkTSRDW2SlrulNWPl4ABg1tC/hlgLs=
 github.com/blevesearch/zap/v11 v11.0.14 h1:IrDAvtlzDylh6H2QCmS0OGcN9Hpf6mISJlfKjcwJs7k=
+github.com/blevesearch/zap/v11 v11.0.14 h1:IrDAvtlzDylh6H2QCmS0OGcN9Hpf6mISJlfKjcwJs7k=
+github.com/blevesearch/zap/v11 v11.0.14/go.mod h1:MUEZh6VHGXv1PKx3WnCbdP404LGG2IZVa/L66pyFwnY=
 github.com/blevesearch/zap/v11 v11.0.14/go.mod h1:MUEZh6VHGXv1PKx3WnCbdP404LGG2IZVa/L66pyFwnY=
 github.com/blevesearch/zap/v12 v12.0.14 h1:2o9iRtl1xaRjsJ1xcqTyLX414qPAwykHNV7wNVmbp3w=
+github.com/blevesearch/zap/v12 v12.0.14 h1:2o9iRtl1xaRjsJ1xcqTyLX414qPAwykHNV7wNVmbp3w=
+github.com/blevesearch/zap/v12 v12.0.14/go.mod h1:rOnuZOiMKPQj18AEKEHJxuI14236tTQ1ZJz4PAnWlUg=
 github.com/blevesearch/zap/v12 v12.0.14/go.mod h1:rOnuZOiMKPQj18AEKEHJxuI14236tTQ1ZJz4PAnWlUg=
 github.com/blevesearch/zap/v13 v13.0.6 h1:r+VNSVImi9cBhTNNR+Kfl5uiGy8kIbb0JMz/h8r6+O4=
+github.com/blevesearch/zap/v13 v13.0.6 h1:r+VNSVImi9cBhTNNR+Kfl5uiGy8kIbb0JMz/h8r6+O4=
+github.com/blevesearch/zap/v13 v13.0.6/go.mod h1:L89gsjdRKGyGrRN6nCpIScCvvkyxvmeDCwZRcjjPCrw=
 github.com/blevesearch/zap/v13 v13.0.6/go.mod h1:L89gsjdRKGyGrRN6nCpIScCvvkyxvmeDCwZRcjjPCrw=
 github.com/blevesearch/zap/v14 v14.0.5 h1:NdcT+81Nvmp2zL+NhwSvGSLh7xNgGL8QRVZ67njR0NU=
+github.com/blevesearch/zap/v14 v14.0.5 h1:NdcT+81Nvmp2zL+NhwSvGSLh7xNgGL8QRVZ67njR0NU=
+github.com/blevesearch/zap/v14 v14.0.5/go.mod h1:bWe8S7tRrSBTIaZ6cLRbgNH4TUDaC9LZSpRGs85AsGY=
 github.com/blevesearch/zap/v14 v14.0.5/go.mod h1:bWe8S7tRrSBTIaZ6cLRbgNH4TUDaC9LZSpRGs85AsGY=
 github.com/blevesearch/zap/v15 v15.0.3 h1:Ylj8Oe+mo0P25tr9iLPp33lN6d4qcztGjaIsP51UxaY=
 github.com/blevesearch/zap/v15 v15.0.3/go.mod h1:iuwQrImsh1WjWJ0Ue2kBqY83a0rFtJTqfa9fp1rbVVU=
@@ -472,6 +480,8 @@ github.com/vektah/gqlparser v1.3.1 h1:8b0IcD3qZKWJQHSzynbDlrtP3IxVydZ2DZepCGofqf
 github.com/vektah/gqlparser v1.3.1/go.mod h1:bkVf0FX+Stjg/MHnm8mEyubuaArhNEqfQhF+OTiAL74=
 github.com/willf/bitset v1.1.10 h1:NotGKqX0KwQ72NUzqrjZq5ipPNDQex9lo3WpaS8L2sc=
 github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
+github.com/xanzy/go-gitlab v0.40.1 h1:jHueLh5Inzv20TL5Yki+CaLmyvtw3Yq7blbWx7GmglQ=
+github.com/xanzy/go-gitlab v0.40.1/go.mod h1:sPLojNBn68fMUWSxIJtdVVIP8uSBYqesTfDUseX11Ug=
 github.com/xanzy/go-gitlab v0.44.0 h1:cEiGhqu7EpFGuei2a2etAwB+x6403E5CvpLn35y+GPs=
 github.com/xanzy/go-gitlab v0.44.0/go.mod h1:sPLojNBn68fMUWSxIJtdVVIP8uSBYqesTfDUseX11Ug=
 github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4=

identity/identity.go 🔗

@@ -5,8 +5,6 @@ import (
 	"encoding/json"
 	"fmt"
 	"reflect"
-	"strings"
-	"time"
 
 	"github.com/pkg/errors"
 
@@ -35,47 +33,27 @@ var _ Interface = &Identity{}
 var _ entity.Interface = &Identity{}
 
 type Identity struct {
-	// Id used as unique identifier
-	id entity.Id
-
 	// all the successive version of the identity
-	versions []*Version
-
-	// not serialized
-	lastCommit repository.Hash
+	versions []*version
 }
 
-func NewIdentity(name string, email string) *Identity {
-	return &Identity{
-		id: entity.UnsetId,
-		versions: []*Version{
-			{
-				name:  name,
-				email: email,
-				nonce: makeNonce(20),
-			},
-		},
-	}
+func NewIdentity(repo repository.RepoClock, name string, email string) (*Identity, error) {
+	return NewIdentityFull(repo, name, email, "", "", nil)
 }
 
-func NewIdentityFull(name string, email string, login string, avatarUrl string) *Identity {
-	return &Identity{
-		id: entity.UnsetId,
-		versions: []*Version{
-			{
-				name:      name,
-				email:     email,
-				login:     login,
-				avatarURL: avatarUrl,
-				nonce:     makeNonce(20),
-			},
-		},
+func NewIdentityFull(repo repository.RepoClock, name string, email string, login string, avatarUrl string, keys []*Key) (*Identity, error) {
+	v, err := newVersion(repo, name, email, login, avatarUrl, keys)
+	if err != nil {
+		return nil, err
 	}
+	return &Identity{
+		versions: []*version{v},
+	}, nil
 }
 
 // NewFromGitUser will query the repository for user detail and
 // build the corresponding Identity
-func NewFromGitUser(repo repository.Repo) (*Identity, error) {
+func NewFromGitUser(repo repository.ClockedRepo) (*Identity, error) {
 	name, err := repo.GetUserName()
 	if err != nil {
 		return nil, err
@@ -92,13 +70,13 @@ func NewFromGitUser(repo repository.Repo) (*Identity, error) {
 		return nil, errors.New("user name is not configured in git yet. Please use `git config --global user.email johndoe@example.com`")
 	}
 
-	return NewIdentity(name, email), nil
+	return NewIdentity(repo, name, email)
 }
 
 // MarshalJSON will only serialize the id
 func (i *Identity) MarshalJSON() ([]byte, error) {
 	return json.Marshal(&IdentityStub{
-		id: i.id,
+		id: i.Id(),
 	})
 }
 
@@ -123,36 +101,32 @@ func ReadRemote(repo repository.Repo, remote string, id string) (*Identity, erro
 
 // read will load and parse an identity from git
 func read(repo repository.Repo, ref string) (*Identity, error) {
-	refSplit := strings.Split(ref, "/")
-	id := entity.Id(refSplit[len(refSplit)-1])
+	id := entity.RefToId(ref)
 
 	if err := id.Validate(); err != nil {
 		return nil, errors.Wrap(err, "invalid ref")
 	}
 
 	hashes, err := repo.ListCommits(ref)
-
-	// TODO: this is not perfect, it might be a command invoke error
 	if err != nil {
 		return nil, ErrIdentityNotExist
 	}
-
-	i := &Identity{
-		id: id,
+	if len(hashes) == 0 {
+		return nil, fmt.Errorf("empty identity")
 	}
 
+	i := &Identity{}
+
 	for _, hash := range hashes {
 		entries, err := repo.ReadTree(hash)
 		if err != nil {
 			return nil, errors.Wrap(err, "can't list git tree entries")
 		}
-
 		if len(entries) != 1 {
 			return nil, fmt.Errorf("invalid identity data at hash %s", hash)
 		}
 
 		entry := entries[0]
-
 		if entry.Name != versionEntryName {
 			return nil, fmt.Errorf("invalid identity data at hash %s", hash)
 		}
@@ -162,20 +136,22 @@ func read(repo repository.Repo, ref string) (*Identity, error) {
 			return nil, errors.Wrap(err, "failed to read git blob data")
 		}
 
-		var version Version
+		var version version
 		err = json.Unmarshal(data, &version)
-
 		if err != nil {
 			return nil, errors.Wrapf(err, "failed to decode Identity version json %s", hash)
 		}
 
 		// tag the version with the commit hash
 		version.commitHash = hash
-		i.lastCommit = hash
 
 		i.versions = append(i.versions, &version)
 	}
 
+	if id != i.versions[0].Id() {
+		return nil, fmt.Errorf("identity ID doesn't math the first version ID")
+	}
+
 	return i, nil
 }
 
@@ -292,32 +268,49 @@ type Mutator struct {
 }
 
 // Mutate allow to create a new version of the Identity in one go
-func (i *Identity) Mutate(f func(orig Mutator) Mutator) {
+func (i *Identity) Mutate(repo repository.RepoClock, f func(orig *Mutator)) error {
+	copyKeys := func(keys []*Key) []*Key {
+		result := make([]*Key, len(keys))
+		for i, key := range keys {
+			result[i] = key.Clone()
+		}
+		return result
+	}
+
 	orig := Mutator{
 		Name:      i.Name(),
 		Email:     i.Email(),
 		Login:     i.Login(),
 		AvatarUrl: i.AvatarUrl(),
-		Keys:      i.Keys(),
+		Keys:      copyKeys(i.Keys()),
 	}
-	mutated := f(orig)
+	mutated := orig
+	mutated.Keys = copyKeys(orig.Keys)
+
+	f(&mutated)
+
 	if reflect.DeepEqual(orig, mutated) {
-		return
-	}
-	i.versions = append(i.versions, &Version{
-		name:      mutated.Name,
-		email:     mutated.Email,
-		login:     mutated.Login,
-		avatarURL: mutated.AvatarUrl,
-		keys:      mutated.Keys,
-	})
+		return nil
+	}
+
+	v, err := newVersion(repo,
+		mutated.Name,
+		mutated.Email,
+		mutated.Login,
+		mutated.AvatarUrl,
+		mutated.Keys,
+	)
+	if err != nil {
+		return err
+	}
+
+	i.versions = append(i.versions, v)
+	return nil
 }
 
 // Write the identity into the Repository. In particular, this ensure that
 // the Id is properly set.
 func (i *Identity) Commit(repo repository.ClockedRepo) error {
-	// Todo: check for mismatch between memory and commit data
-
 	if !i.NeedCommit() {
 		return fmt.Errorf("can't commit an identity with no pending version")
 	}
@@ -326,24 +319,14 @@ func (i *Identity) Commit(repo repository.ClockedRepo) error {
 		return errors.Wrap(err, "can't commit an identity with invalid data")
 	}
 
+	var lastCommit repository.Hash
 	for _, v := range i.versions {
 		if v.commitHash != "" {
-			i.lastCommit = v.commitHash
+			lastCommit = v.commitHash
 			// ignore already commit versions
 			continue
 		}
 
-		// get the times where new versions starts to be valid
-		// TODO: instead of this hardcoded clock for bugs only, this need to be
-		// a vector of edit clock, one for each entity (bug, PR, config ..)
-		bugEditClock, err := repo.GetOrCreateClock("bug-edit")
-		if err != nil {
-			return err
-		}
-
-		v.time = bugEditClock.Time()
-		v.unixTime = time.Now().Unix()
-
 		blobHash, err := v.Write(repo)
 		if err != nil {
 			return err
@@ -360,37 +343,21 @@ func (i *Identity) Commit(repo repository.ClockedRepo) error {
 		}
 
 		var commitHash repository.Hash
-		if i.lastCommit != "" {
-			commitHash, err = repo.StoreCommitWithParent(treeHash, i.lastCommit)
+		if lastCommit != "" {
+			commitHash, err = repo.StoreCommit(treeHash, lastCommit)
 		} else {
 			commitHash, err = repo.StoreCommit(treeHash)
 		}
-
 		if err != nil {
 			return err
 		}
 
-		i.lastCommit = commitHash
+		lastCommit = commitHash
 		v.commitHash = commitHash
-
-		// if it was the first commit, use the commit hash as the Identity id
-		if i.id == "" || i.id == entity.UnsetId {
-			i.id = entity.Id(commitHash)
-		}
-	}
-
-	if i.id == "" {
-		panic("identity with no id")
 	}
 
-	ref := fmt.Sprintf("%s%s", identityRefPattern, i.id)
-	err := repo.UpdateRef(ref, i.lastCommit)
-
-	if err != nil {
-		return err
-	}
-
-	return nil
+	ref := fmt.Sprintf("%s%s", identityRefPattern, i.Id().String())
+	return repo.UpdateRef(ref, lastCommit)
 }
 
 func (i *Identity) CommitAsNeeded(repo repository.ClockedRepo) error {
@@ -433,20 +400,17 @@ func (i *Identity) NeedCommit() bool {
 // confident enough to implement that. I choose the strict fast-forward only approach,
 // despite it's potential problem with two different version as mentioned above.
 func (i *Identity) Merge(repo repository.Repo, other *Identity) (bool, error) {
-	if i.id != other.id {
+	if i.Id() != other.Id() {
 		return false, errors.New("merging unrelated identities is not supported")
 	}
 
-	if i.lastCommit == "" || other.lastCommit == "" {
-		return false, errors.New("can't merge identities that has never been stored")
-	}
-
 	modified := false
+	var lastCommit repository.Hash
 	for j, otherVersion := range other.versions {
 		// if there is more version in other, take them
 		if len(i.versions) == j {
 			i.versions = append(i.versions, otherVersion)
-			i.lastCommit = otherVersion.commitHash
+			lastCommit = otherVersion.commitHash
 			modified = true
 		}
 
@@ -458,7 +422,7 @@ func (i *Identity) Merge(repo repository.Repo, other *Identity) (bool, error) {
 	}
 
 	if modified {
-		err := repo.UpdateRef(identityRefPattern+i.id.String(), i.lastCommit)
+		err := repo.UpdateRef(identityRefPattern+i.Id().String(), lastCommit)
 		if err != nil {
 			return false, err
 		}
@@ -469,7 +433,7 @@ func (i *Identity) Merge(repo repository.Repo, other *Identity) (bool, error) {
 
 // Validate check if the Identity data is valid
 func (i *Identity) Validate() error {
-	lastTime := lamport.Time(0)
+	lastTimes := make(map[string]lamport.Time)
 
 	if len(i.versions) == 0 {
 		return fmt.Errorf("no version")
@@ -480,22 +444,27 @@ func (i *Identity) Validate() error {
 			return err
 		}
 
-		if v.commitHash != "" && v.time < lastTime {
-			return fmt.Errorf("non-chronological version (%d --> %d)", lastTime, v.time)
+		// check for always increasing lamport time
+		// check that a new version didn't drop a clock
+		for name, previous := range lastTimes {
+			if now, ok := v.times[name]; ok {
+				if now < previous {
+					return fmt.Errorf("non-chronological lamport clock %s (%d --> %d)", name, previous, now)
+				}
+			} else {
+				return fmt.Errorf("version has less lamport clocks than before (missing %s)", name)
+			}
 		}
 
-		lastTime = v.time
-	}
-
-	// The identity Id should be the hash of the first commit
-	if i.versions[0].commitHash != "" && string(i.versions[0].commitHash) != i.id.String() {
-		return fmt.Errorf("identity id should be the first commit hash")
+		for name, now := range v.times {
+			lastTimes[name] = now
+		}
 	}
 
 	return nil
 }
 
-func (i *Identity) lastVersion() *Version {
+func (i *Identity) lastVersion() *version {
 	if len(i.versions) <= 0 {
 		panic("no version at all")
 	}
@@ -505,12 +474,8 @@ func (i *Identity) lastVersion() *Version {
 
 // Id return the Identity identifier
 func (i *Identity) Id() entity.Id {
-	if i.id == "" || i.id == entity.UnsetId {
-		// simply panic as it would be a coding error
-		// (using an id of an identity not stored yet)
-		panic("no id yet")
-	}
-	return i.id
+	// id is the id of the first version
+	return i.versions[0].Id()
 }
 
 // Name return the last version of the name
@@ -518,6 +483,21 @@ func (i *Identity) Name() string {
 	return i.lastVersion().name
 }
 
+// DisplayName return a non-empty string to display, representing the
+// identity, based on the non-empty values.
+func (i *Identity) DisplayName() string {
+	switch {
+	case i.Name() == "" && i.Login() != "":
+		return i.Login()
+	case i.Name() != "" && i.Login() == "":
+		return i.Name()
+	case i.Name() != "" && i.Login() != "":
+		return fmt.Sprintf("%s (%s)", i.Name(), i.Login())
+	}
+
+	panic("invalid person data")
+}
+
 // Email return the last version of the email
 func (i *Identity) Email() string {
 	return i.lastVersion().email
@@ -538,12 +518,35 @@ func (i *Identity) Keys() []*Key {
 	return i.lastVersion().keys
 }
 
+// SigningKey return the key that should be used to sign new messages. If no key is available, return nil.
+func (i *Identity) SigningKey(repo repository.RepoKeyring) (*Key, error) {
+	keys := i.Keys()
+	for _, key := range keys {
+		err := key.ensurePrivateKey(repo)
+		if err == errNoPrivateKey {
+			continue
+		}
+		if err != nil {
+			return nil, err
+		}
+		return key, nil
+	}
+	return nil, nil
+}
+
 // ValidKeysAtTime return the set of keys valid at a given lamport time
-func (i *Identity) ValidKeysAtTime(time lamport.Time) []*Key {
+func (i *Identity) ValidKeysAtTime(clockName string, time lamport.Time) []*Key {
 	var result []*Key
 
+	var lastTime lamport.Time
 	for _, v := range i.versions {
-		if v.time > time {
+		refTime, ok := v.times[clockName]
+		if !ok {
+			refTime = lastTime
+		}
+		lastTime = refTime
+
+		if refTime > time {
 			return result
 		}
 
@@ -553,19 +556,14 @@ func (i *Identity) ValidKeysAtTime(time lamport.Time) []*Key {
 	return result
 }
 
-// DisplayName return a non-empty string to display, representing the
-// identity, based on the non-empty values.
-func (i *Identity) DisplayName() string {
-	switch {
-	case i.Name() == "" && i.Login() != "":
-		return i.Login()
-	case i.Name() != "" && i.Login() == "":
-		return i.Name()
-	case i.Name() != "" && i.Login() != "":
-		return fmt.Sprintf("%s (%s)", i.Name(), i.Login())
-	}
+// LastModification return the timestamp at which the last version of the identity became valid.
+func (i *Identity) LastModification() timestamp.Timestamp {
+	return timestamp.Timestamp(i.lastVersion().unixTime)
+}
 
-	panic("invalid person data")
+// LastModificationLamports return the lamport times at which the last version of the identity became valid.
+func (i *Identity) LastModificationLamports() map[string]lamport.Time {
+	return i.lastVersion().times
 }
 
 // IsProtected return true if the chain of git commits started to be signed.
@@ -575,27 +573,23 @@ func (i *Identity) IsProtected() bool {
 	return false
 }
 
-// LastModificationLamportTime return the Lamport time at which the last version of the identity became valid.
-func (i *Identity) LastModificationLamport() lamport.Time {
-	return i.lastVersion().time
-}
-
-// LastModification return the timestamp at which the last version of the identity became valid.
-func (i *Identity) LastModification() timestamp.Timestamp {
-	return timestamp.Timestamp(i.lastVersion().unixTime)
-}
-
-// SetMetadata store arbitrary metadata along the last not-commit Version.
-// If the Version has been commit to git already, a new identical version is added and will need to be
+// SetMetadata store arbitrary metadata along the last not-commit version.
+// If the version has been commit to git already, a new identical version is added and will need to be
 // commit.
 func (i *Identity) SetMetadata(key string, value string) {
+	// once commit, data is immutable so we create a new version
 	if i.lastVersion().commitHash != "" {
 		i.versions = append(i.versions, i.lastVersion().Clone())
 	}
+	// if Id() has been called, we can't change the first version anymore, so we create a new version
+	if len(i.versions) == 1 && i.versions[0].id != entity.UnsetId && i.versions[0].id != "" {
+		i.versions = append(i.versions, i.lastVersion().Clone())
+	}
+
 	i.lastVersion().SetMetadata(key, value)
 }
 
-// ImmutableMetadata return all metadata for this Identity, accumulated from each Version.
+// ImmutableMetadata return all metadata for this Identity, accumulated from each version.
 // If multiple value are found, the first defined takes precedence.
 func (i *Identity) ImmutableMetadata() map[string]string {
 	metadata := make(map[string]string)
@@ -611,7 +605,7 @@ func (i *Identity) ImmutableMetadata() map[string]string {
 	return metadata
 }
 
-// MutableMetadata return all metadata for this Identity, accumulated from each Version.
+// MutableMetadata return all metadata for this Identity, accumulated from each version.
 // If multiple value are found, the last defined takes precedence.
 func (i *Identity) MutableMetadata() map[string]string {
 	metadata := make(map[string]string)
@@ -624,9 +618,3 @@ func (i *Identity) MutableMetadata() map[string]string {
 
 	return metadata
 }
-
-// addVersionForTest add a new version to the identity
-// Only for testing !
-func (i *Identity) addVersionForTest(version *Version) {
-	i.versions = append(i.versions, version)
-}

identity/identity_actions.go 🔗

@@ -13,19 +13,12 @@ import (
 // Fetch retrieve updates from a remote
 // This does not change the local identities state
 func Fetch(repo repository.Repo, remote string) (string, error) {
-	// "refs/identities/*:refs/remotes/<remote>/identities/*"
-	remoteRefSpec := fmt.Sprintf(identityRemoteRefPattern, remote)
-	fetchRefSpec := fmt.Sprintf("%s*:%s*", identityRefPattern, remoteRefSpec)
-
-	return repo.FetchRefs(remote, fetchRefSpec)
+	return repo.FetchRefs(remote, "identities")
 }
 
 // Push update a remote with the local changes
 func Push(repo repository.Repo, remote string) (string, error) {
-	// "refs/identities/*:refs/identities/*"
-	refspec := fmt.Sprintf("%s*:%s*", identityRefPattern, identityRefPattern)
-
-	return repo.PushRefs(remote, refspec)
+	return repo.PushRefs(remote, "identities")
 }
 
 // Pull will do a Fetch + MergeAll
@@ -102,7 +95,7 @@ func MergeAll(repo repository.ClockedRepo, remote string) <-chan entity.MergeRes
 					return
 				}
 
-				out <- entity.NewMergeStatus(entity.MergeStatusNew, id, remoteIdentity)
+				out <- entity.NewMergeNewStatus(id, remoteIdentity)
 				continue
 			}
 
@@ -121,9 +114,9 @@ func MergeAll(repo repository.ClockedRepo, remote string) <-chan entity.MergeRes
 			}
 
 			if updated {
-				out <- entity.NewMergeStatus(entity.MergeStatusUpdated, id, localIdentity)
+				out <- entity.NewMergeUpdatedStatus(id, localIdentity)
 			} else {
-				out <- entity.NewMergeStatus(entity.MergeStatusNothing, id, localIdentity)
+				out <- entity.NewMergeNothingStatus(id)
 			}
 		}
 	}()

identity/identity_actions_test.go 🔗

@@ -8,12 +8,13 @@ import (
 	"github.com/MichaelMure/git-bug/repository"
 )
 
-func TestPushPull(t *testing.T) {
-	repoA, repoB, remote := repository.SetupReposAndRemote()
+func TestIdentityPushPull(t *testing.T) {
+	repoA, repoB, remote := repository.SetupGoGitReposAndRemote()
 	defer repository.CleanupTestRepos(repoA, repoB, remote)
 
-	identity1 := NewIdentity("name1", "email1")
-	err := identity1.Commit(repoA)
+	identity1, err := NewIdentity(repoA, "name1", "email1")
+	require.NoError(t, err)
+	err = identity1.Commit(repoA)
 	require.NoError(t, err)
 
 	// A --> remote --> B
@@ -30,7 +31,8 @@ func TestPushPull(t *testing.T) {
 	}
 
 	// B --> remote --> A
-	identity2 := NewIdentity("name2", "email2")
+	identity2, err := NewIdentity(repoB, "name2", "email2")
+	require.NoError(t, err)
 	err = identity2.Commit(repoB)
 	require.NoError(t, err)
 
@@ -48,17 +50,19 @@ func TestPushPull(t *testing.T) {
 
 	// Update both
 
-	identity1.addVersionForTest(&Version{
-		name:  "name1b",
-		email: "email1b",
+	err = identity1.Mutate(repoA, func(orig *Mutator) {
+		orig.Name = "name1b"
+		orig.Email = "email1b"
 	})
+	require.NoError(t, err)
 	err = identity1.Commit(repoA)
 	require.NoError(t, err)
 
-	identity2.addVersionForTest(&Version{
-		name:  "name2b",
-		email: "email2b",
+	err = identity2.Mutate(repoB, func(orig *Mutator) {
+		orig.Name = "name2b"
+		orig.Email = "email2b"
 	})
+	require.NoError(t, err)
 	err = identity2.Commit(repoB)
 	require.NoError(t, err)
 
@@ -92,20 +96,22 @@ func TestPushPull(t *testing.T) {
 
 	// Concurrent update
 
-	identity1.addVersionForTest(&Version{
-		name:  "name1c",
-		email: "email1c",
+	err = identity1.Mutate(repoA, func(orig *Mutator) {
+		orig.Name = "name1c"
+		orig.Email = "email1c"
 	})
+	require.NoError(t, err)
 	err = identity1.Commit(repoA)
 	require.NoError(t, err)
 
 	identity1B, err := ReadLocal(repoB, identity1.Id())
 	require.NoError(t, err)
 
-	identity1B.addVersionForTest(&Version{
-		name:  "name1concurrent",
-		email: "email1concurrent",
+	err = identity1B.Mutate(repoB, func(orig *Mutator) {
+		orig.Name = "name1concurrent"
+		orig.Email = "name1concurrent"
 	})
+	require.NoError(t, err)
 	err = identity1B.Commit(repoB)
 	require.NoError(t, err)
 

identity/identity_stub.go 🔗

@@ -52,6 +52,10 @@ func (IdentityStub) Name() string {
 	panic("identities needs to be properly loaded with identity.ReadLocal()")
 }
 
+func (IdentityStub) DisplayName() string {
+	panic("identities needs to be properly loaded with identity.ReadLocal()")
+}
+
 func (IdentityStub) Email() string {
 	panic("identities needs to be properly loaded with identity.ReadLocal()")
 }
@@ -68,23 +72,19 @@ func (IdentityStub) Keys() []*Key {
 	panic("identities needs to be properly loaded with identity.ReadLocal()")
 }
 
-func (IdentityStub) ValidKeysAtTime(_ lamport.Time) []*Key {
+func (i *IdentityStub) SigningKey(repo repository.RepoKeyring) (*Key, error) {
 	panic("identities needs to be properly loaded with identity.ReadLocal()")
 }
 
-func (IdentityStub) DisplayName() string {
+func (IdentityStub) ValidKeysAtTime(_ string, _ lamport.Time) []*Key {
 	panic("identities needs to be properly loaded with identity.ReadLocal()")
 }
 
-func (IdentityStub) Validate() error {
-	panic("identities needs to be properly loaded with identity.ReadLocal()")
-}
-
-func (IdentityStub) CommitWithRepo(repo repository.ClockedRepo) error {
+func (i *IdentityStub) LastModification() timestamp.Timestamp {
 	panic("identities needs to be properly loaded with identity.ReadLocal()")
 }
 
-func (i *IdentityStub) CommitAsNeededWithRepo(repo repository.ClockedRepo) error {
+func (i *IdentityStub) LastModificationLamports() map[string]lamport.Time {
 	panic("identities needs to be properly loaded with identity.ReadLocal()")
 }
 
@@ -92,11 +92,7 @@ func (IdentityStub) IsProtected() bool {
 	panic("identities needs to be properly loaded with identity.ReadLocal()")
 }
 
-func (i *IdentityStub) LastModificationLamport() lamport.Time {
-	panic("identities needs to be properly loaded with identity.ReadLocal()")
-}
-
-func (i *IdentityStub) LastModification() timestamp.Timestamp {
+func (IdentityStub) Validate() error {
 	panic("identities needs to be properly loaded with identity.ReadLocal()")
 }
 

identity/identity_test.go 🔗

@@ -6,120 +6,108 @@ import (
 
 	"github.com/stretchr/testify/require"
 
-	"github.com/MichaelMure/git-bug/entity"
 	"github.com/MichaelMure/git-bug/repository"
+	"github.com/MichaelMure/git-bug/util/lamport"
 )
 
 // Test the commit and load of an Identity with multiple versions
 func TestIdentityCommitLoad(t *testing.T) {
-	mockRepo := repository.NewMockRepoForTest()
+	repo := makeIdentityTestRepo(t)
 
 	// single version
 
-	identity := &Identity{
-		id: entity.UnsetId,
-		versions: []*Version{
-			{
-				name:  "René Descartes",
-				email: "rene.descartes@example.com",
-			},
-		},
-	}
+	identity, err := NewIdentity(repo, "René Descartes", "rene.descartes@example.com")
+	require.NoError(t, err)
 
-	err := identity.Commit(mockRepo)
+	idBeforeCommit := identity.Id()
 
+	err = identity.Commit(repo)
 	require.NoError(t, err)
-	require.NotEmpty(t, identity.id)
 
-	loaded, err := ReadLocal(mockRepo, identity.id)
+	commitsAreSet(t, identity)
+	require.NotEmpty(t, identity.Id())
+	require.Equal(t, idBeforeCommit, identity.Id())
+	require.Equal(t, idBeforeCommit, identity.versions[0].Id())
+
+	loaded, err := ReadLocal(repo, identity.Id())
 	require.NoError(t, err)
 	commitsAreSet(t, loaded)
 	require.Equal(t, identity, loaded)
 
-	// multiple version
+	// multiple versions
 
-	identity = &Identity{
-		id: entity.UnsetId,
-		versions: []*Version{
-			{
-				time:  100,
-				name:  "René Descartes",
-				email: "rene.descartes@example.com",
-				keys: []*Key{
-					{PubKey: "pubkeyA"},
-				},
-			},
-			{
-				time:  200,
-				name:  "René Descartes",
-				email: "rene.descartes@example.com",
-				keys: []*Key{
-					{PubKey: "pubkeyB"},
-				},
-			},
-			{
-				time:  201,
-				name:  "René Descartes",
-				email: "rene.descartes@example.com",
-				keys: []*Key{
-					{PubKey: "pubkeyC"},
-				},
-			},
-		},
-	}
+	identity, err = NewIdentityFull(repo, "René Descartes", "rene.descartes@example.com", "", "", []*Key{generatePublicKey()})
+	require.NoError(t, err)
 
-	err = identity.Commit(mockRepo)
+	idBeforeCommit = identity.Id()
 
+	err = identity.Mutate(repo, func(orig *Mutator) {
+		orig.Keys = []*Key{generatePublicKey()}
+	})
 	require.NoError(t, err)
-	require.NotEmpty(t, identity.id)
 
-	loaded, err = ReadLocal(mockRepo, identity.id)
+	err = identity.Mutate(repo, func(orig *Mutator) {
+		orig.Keys = []*Key{generatePublicKey()}
+	})
+	require.NoError(t, err)
+
+	require.Equal(t, idBeforeCommit, identity.Id())
+
+	err = identity.Commit(repo)
+	require.NoError(t, err)
+
+	commitsAreSet(t, identity)
+	require.NotEmpty(t, identity.Id())
+	require.Equal(t, idBeforeCommit, identity.Id())
+	require.Equal(t, idBeforeCommit, identity.versions[0].Id())
+
+	loaded, err = ReadLocal(repo, identity.Id())
 	require.NoError(t, err)
 	commitsAreSet(t, loaded)
 	require.Equal(t, identity, loaded)
 
 	// add more version
 
-	identity.addVersionForTest(&Version{
-		time:  201,
-		name:  "René Descartes",
-		email: "rene.descartes@example.com",
-		keys: []*Key{
-			{PubKey: "pubkeyD"},
-		},
+	err = identity.Mutate(repo, func(orig *Mutator) {
+		orig.Email = "rene@descartes.com"
+		orig.Keys = []*Key{generatePublicKey()}
 	})
+	require.NoError(t, err)
 
-	identity.addVersionForTest(&Version{
-		time:  300,
-		name:  "René Descartes",
-		email: "rene.descartes@example.com",
-		keys: []*Key{
-			{PubKey: "pubkeyE"},
-		},
+	err = identity.Mutate(repo, func(orig *Mutator) {
+		orig.Email = "rene@descartes.com"
+		orig.Keys = []*Key{generatePublicKey(), generatePublicKey()}
 	})
+	require.NoError(t, err)
 
-	err = identity.Commit(mockRepo)
-
+	err = identity.Commit(repo)
 	require.NoError(t, err)
-	require.NotEmpty(t, identity.id)
 
-	loaded, err = ReadLocal(mockRepo, identity.id)
+	commitsAreSet(t, identity)
+	require.NotEmpty(t, identity.Id())
+	require.Equal(t, idBeforeCommit, identity.Id())
+	require.Equal(t, idBeforeCommit, identity.versions[0].Id())
+
+	loaded, err = ReadLocal(repo, identity.Id())
 	require.NoError(t, err)
 	commitsAreSet(t, loaded)
 	require.Equal(t, identity, loaded)
 }
 
 func TestIdentityMutate(t *testing.T) {
-	identity := NewIdentity("René Descartes", "rene.descartes@example.com")
+	repo := makeIdentityTestRepo(t)
+
+	identity, err := NewIdentity(repo, "René Descartes", "rene.descartes@example.com")
+	require.NoError(t, err)
 
 	require.Len(t, identity.versions, 1)
 
-	identity.Mutate(func(orig Mutator) Mutator {
+	err = identity.Mutate(repo, func(orig *Mutator) {
 		orig.Email = "rene@descartes.fr"
 		orig.Name = "René"
 		orig.Login = "rene"
-		return orig
 	})
+	require.NoError(t, err)
 
 	require.Len(t, identity.versions, 2)
 	require.Equal(t, identity.Email(), "rene@descartes.fr")
@@ -135,97 +123,93 @@ func commitsAreSet(t *testing.T, identity *Identity) {
 
 // Test that the correct crypto keys are returned for a given lamport time
 func TestIdentity_ValidKeysAtTime(t *testing.T) {
+	pubKeyA := generatePublicKey()
+	pubKeyB := generatePublicKey()
+	pubKeyC := generatePublicKey()
+	pubKeyD := generatePublicKey()
+	pubKeyE := generatePublicKey()
+
 	identity := Identity{
-		id: entity.UnsetId,
-		versions: []*Version{
+		versions: []*version{
 			{
-				time:  100,
-				name:  "René Descartes",
-				email: "rene.descartes@example.com",
-				keys: []*Key{
-					{PubKey: "pubkeyA"},
-				},
+				times: map[string]lamport.Time{"foo": 100},
+				keys:  []*Key{pubKeyA},
 			},
 			{
-				time:  200,
-				name:  "René Descartes",
-				email: "rene.descartes@example.com",
-				keys: []*Key{
-					{PubKey: "pubkeyB"},
-				},
+				times: map[string]lamport.Time{"foo": 200},
+				keys:  []*Key{pubKeyB},
 			},
 			{
-				time:  201,
-				name:  "René Descartes",
-				email: "rene.descartes@example.com",
-				keys: []*Key{
-					{PubKey: "pubkeyC"},
-				},
+				times: map[string]lamport.Time{"foo": 201},
+				keys:  []*Key{pubKeyC},
 			},
 			{
-				time:  201,
-				name:  "René Descartes",
-				email: "rene.descartes@example.com",
-				keys: []*Key{
-					{PubKey: "pubkeyD"},
-				},
+				times: map[string]lamport.Time{"foo": 201},
+				keys:  []*Key{pubKeyD},
 			},
 			{
-				time:  300,
-				name:  "René Descartes",
-				email: "rene.descartes@example.com",
-				keys: []*Key{
-					{PubKey: "pubkeyE"},
-				},
+				times: map[string]lamport.Time{"foo": 300},
+				keys:  []*Key{pubKeyE},
 			},
 		},
 	}
 
-	require.Nil(t, identity.ValidKeysAtTime(10))
-	require.Equal(t, identity.ValidKeysAtTime(100), []*Key{{PubKey: "pubkeyA"}})
-	require.Equal(t, identity.ValidKeysAtTime(140), []*Key{{PubKey: "pubkeyA"}})
-	require.Equal(t, identity.ValidKeysAtTime(200), []*Key{{PubKey: "pubkeyB"}})
-	require.Equal(t, identity.ValidKeysAtTime(201), []*Key{{PubKey: "pubkeyD"}})
-	require.Equal(t, identity.ValidKeysAtTime(202), []*Key{{PubKey: "pubkeyD"}})
-	require.Equal(t, identity.ValidKeysAtTime(300), []*Key{{PubKey: "pubkeyE"}})
-	require.Equal(t, identity.ValidKeysAtTime(3000), []*Key{{PubKey: "pubkeyE"}})
+	require.Nil(t, identity.ValidKeysAtTime("foo", 10))
+	require.Equal(t, identity.ValidKeysAtTime("foo", 100), []*Key{pubKeyA})
+	require.Equal(t, identity.ValidKeysAtTime("foo", 140), []*Key{pubKeyA})
+	require.Equal(t, identity.ValidKeysAtTime("foo", 200), []*Key{pubKeyB})
+	require.Equal(t, identity.ValidKeysAtTime("foo", 201), []*Key{pubKeyD})
+	require.Equal(t, identity.ValidKeysAtTime("foo", 202), []*Key{pubKeyD})
+	require.Equal(t, identity.ValidKeysAtTime("foo", 300), []*Key{pubKeyE})
+	require.Equal(t, identity.ValidKeysAtTime("foo", 3000), []*Key{pubKeyE})
 }
 
 // Test the immutable or mutable metadata search
 func TestMetadata(t *testing.T) {
-	mockRepo := repository.NewMockRepoForTest()
+	repo := makeIdentityTestRepo(t)
 
-	identity := NewIdentity("René Descartes", "rene.descartes@example.com")
+	identity, err := NewIdentity(repo, "René Descartes", "rene.descartes@example.com")
+	require.NoError(t, err)
 
 	identity.SetMetadata("key1", "value1")
 	assertHasKeyValue(t, identity.ImmutableMetadata(), "key1", "value1")
 	assertHasKeyValue(t, identity.MutableMetadata(), "key1", "value1")
 
-	err := identity.Commit(mockRepo)
+	err = identity.Commit(repo)
 	require.NoError(t, err)
 
 	assertHasKeyValue(t, identity.ImmutableMetadata(), "key1", "value1")
 	assertHasKeyValue(t, identity.MutableMetadata(), "key1", "value1")
 
 	// try override
-	identity.addVersionForTest(&Version{
-		name:  "René Descartes",
-		email: "rene.descartes@example.com",
+	err = identity.Mutate(repo, func(orig *Mutator) {
+		orig.Email = "rene@descartes.fr"
 	})
+	require.NoError(t, err)
 
 	identity.SetMetadata("key1", "value2")
 	assertHasKeyValue(t, identity.ImmutableMetadata(), "key1", "value1")
 	assertHasKeyValue(t, identity.MutableMetadata(), "key1", "value2")
 
-	err = identity.Commit(mockRepo)
+	err = identity.Commit(repo)
 	require.NoError(t, err)
 
 	// reload
-	loaded, err := ReadLocal(mockRepo, identity.id)
+	loaded, err := ReadLocal(repo, identity.Id())
 	require.NoError(t, err)
 
 	assertHasKeyValue(t, loaded.ImmutableMetadata(), "key1", "value1")
 	assertHasKeyValue(t, loaded.MutableMetadata(), "key1", "value2")
+
+	// set metadata after commit
+	versionCount := len(identity.versions)
+	identity.SetMetadata("foo", "bar")
+	require.True(t, identity.NeedCommit())
+	require.Len(t, identity.versions, versionCount+1)
+
+	err = identity.Commit(repo)
+	require.NoError(t, err)
+	require.Len(t, identity.versions, versionCount+1)
 }
 
 func assertHasKeyValue(t *testing.T, metadata map[string]string, key, value string) {
@@ -235,22 +219,15 @@ func assertHasKeyValue(t *testing.T, metadata map[string]string, key, value stri
 }
 
 func TestJSON(t *testing.T) {
-	mockRepo := repository.NewMockRepoForTest()
+	repo := makeIdentityTestRepo(t)
 
-	identity := &Identity{
-		id: entity.UnsetId,
-		versions: []*Version{
-			{
-				name:  "René Descartes",
-				email: "rene.descartes@example.com",
-			},
-		},
-	}
+	identity, err := NewIdentity(repo, "René Descartes", "rene.descartes@example.com")
+	require.NoError(t, err)
 
 	// commit to make sure we have an Id
-	err := identity.Commit(mockRepo)
+	err = identity.Commit(repo)
 	require.NoError(t, err)
-	require.NotEmpty(t, identity.id)
+	require.NotEmpty(t, identity.Id())
 
 	// serialize
 	data, err := json.Marshal(identity)
@@ -260,10 +237,10 @@ func TestJSON(t *testing.T) {
 	var i Interface
 	i, err = UnmarshalJSON(data)
 	require.NoError(t, err)
-	require.Equal(t, identity.id, i.Id())
+	require.Equal(t, identity.Id(), i.Id())
 
 	// make sure we can load the identity properly
-	i, err = ReadLocal(mockRepo, i.Id())
+	i, err = ReadLocal(repo, i.Id())
 	require.NoError(t, err)
 }
 
@@ -280,7 +257,9 @@ func TestIdentityRemove(t *testing.T) {
 	require.NoError(t, err)
 
 	// generate an identity for testing
-	rene := NewIdentity("René Descartes", "rene@descartes.fr")
+	rene, err := NewIdentity(repo, "René Descartes", "rene@descartes.fr")
+	require.NoError(t, err)
+
 	err = rene.Commit(repo)
 	require.NoError(t, err)
 

identity/interface.go 🔗

@@ -2,6 +2,7 @@ package identity
 
 import (
 	"github.com/MichaelMure/git-bug/entity"
+	"github.com/MichaelMure/git-bug/repository"
 	"github.com/MichaelMure/git-bug/util/lamport"
 	"github.com/MichaelMure/git-bug/util/timestamp"
 )
@@ -13,6 +14,10 @@ type Interface interface {
 	// Can be empty.
 	Name() string
 
+	// DisplayName return a non-empty string to display, representing the
+	// identity, based on the non-empty values.
+	DisplayName() string
+
 	// Email return the last version of the email
 	// Can be empty.
 	Email() string
@@ -32,26 +37,25 @@ type Interface interface {
 	// Can be empty.
 	Keys() []*Key
 
-	// ValidKeysAtTime return the set of keys valid at a given lamport time
+	// SigningKey return the key that should be used to sign new messages. If no key is available, return nil.
+	SigningKey(repo repository.RepoKeyring) (*Key, error)
+
+	// ValidKeysAtTime return the set of keys valid at a given lamport time for a given clock of another entity
 	// Can be empty.
-	ValidKeysAtTime(time lamport.Time) []*Key
+	ValidKeysAtTime(clockName string, time lamport.Time) []*Key
 
-	// DisplayName return a non-empty string to display, representing the
-	// identity, based on the non-empty values.
-	DisplayName() string
+	// LastModification return the timestamp at which the last version of the identity became valid.
+	LastModification() timestamp.Timestamp
 
-	// Validate check if the Identity data is valid
-	Validate() error
+	// LastModificationLamports return the lamport times at which the last version of the identity became valid.
+	LastModificationLamports() map[string]lamport.Time
 
 	// IsProtected return true if the chain of git commits started to be signed.
 	// If that's the case, only signed commit with a valid key for this identity can be added.
 	IsProtected() bool
 
-	// LastModificationLamportTime return the Lamport time at which the last version of the identity became valid.
-	LastModificationLamport() lamport.Time
-
-	// LastModification return the timestamp at which the last version of the identity became valid.
-	LastModification() timestamp.Timestamp
+	// Validate check if the Identity data is valid
+	Validate() error
 
 	// Indicate that the in-memory state changed and need to be commit in the repository
 	NeedCommit() bool

identity/key.go 🔗

@@ -1,18 +1,224 @@
 package identity
 
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"io"
+	"strings"
+	"time"
+
+	"github.com/pkg/errors"
+	"golang.org/x/crypto/openpgp"
+	"golang.org/x/crypto/openpgp/armor"
+	"golang.org/x/crypto/openpgp/packet"
+
+	"github.com/MichaelMure/git-bug/repository"
+)
+
+var errNoPrivateKey = fmt.Errorf("no private key")
+
 type Key struct {
-	// The GPG fingerprint of the key
-	Fingerprint string `json:"fingerprint"`
-	PubKey      string `json:"pub_key"`
+	public  *packet.PublicKey
+	private *packet.PrivateKey
+}
+
+// GenerateKey generate a keypair (public+private)
+// The type and configuration of the key is determined by the default value in go's OpenPGP.
+func GenerateKey() *Key {
+	entity, err := openpgp.NewEntity("", "", "", &packet.Config{
+		// The armored format doesn't include the creation time, which makes the round-trip data not being fully equal.
+		// We don't care about the creation time so we can set it to the zero value.
+		Time: func() time.Time {
+			return time.Time{}
+		},
+	})
+	if err != nil {
+		panic(err)
+	}
+	return &Key{
+		public:  entity.PrimaryKey,
+		private: entity.PrivateKey,
+	}
+}
+
+// generatePublicKey generate only a public key (only useful for testing)
+// See GenerateKey for the details.
+func generatePublicKey() *Key {
+	k := GenerateKey()
+	k.private = nil
+	return k
+}
+
+func (k *Key) Public() *packet.PublicKey {
+	return k.public
+}
+
+func (k *Key) Private() *packet.PrivateKey {
+	return k.private
 }
 
 func (k *Key) Validate() error {
-	// Todo
+	if k.public == nil {
+		return fmt.Errorf("nil public key")
+	}
+	if !k.public.CanSign() {
+		return fmt.Errorf("public key can't sign")
+	}
+
+	if k.private != nil {
+		if !k.private.CanSign() {
+			return fmt.Errorf("private key can't sign")
+		}
+	}
 
 	return nil
 }
 
 func (k *Key) Clone() *Key {
-	clone := *k
-	return &clone
+	clone := &Key{}
+
+	pub := *k.public
+	clone.public = &pub
+
+	if k.private != nil {
+		priv := *k.private
+		clone.private = &priv
+	}
+
+	return clone
+}
+
+func (k *Key) MarshalJSON() ([]byte, error) {
+	// Serialize only the public key, in the armored format.
+	var buf bytes.Buffer
+	w, err := armor.Encode(&buf, openpgp.PublicKeyType, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	err = k.public.Serialize(w)
+	if err != nil {
+		return nil, err
+	}
+	err = w.Close()
+	if err != nil {
+		return nil, err
+	}
+	return json.Marshal(buf.String())
+}
+
+func (k *Key) UnmarshalJSON(data []byte) error {
+	// De-serialize only the public key, in the armored format.
+	var armored string
+	err := json.Unmarshal(data, &armored)
+	if err != nil {
+		return err
+	}
+
+	block, err := armor.Decode(strings.NewReader(armored))
+	if err == io.EOF {
+		return fmt.Errorf("no armored data found")
+	}
+	if err != nil {
+		return err
+	}
+
+	if block.Type != openpgp.PublicKeyType {
+		return fmt.Errorf("invalid key type")
+	}
+
+	p, err := packet.Read(block.Body)
+	if err != nil {
+		return errors.Wrap(err, "failed to read public key packet")
+	}
+
+	public, ok := p.(*packet.PublicKey)
+	if !ok {
+		return errors.New("got no packet.publicKey")
+	}
+
+	// The armored format doesn't include the creation time, which makes the round-trip data not being fully equal.
+	// We don't care about the creation time so we can set it to the zero value.
+	public.CreationTime = time.Time{}
+
+	k.public = public
+	return nil
+}
+
+func (k *Key) loadPrivate(repo repository.RepoKeyring) error {
+	item, err := repo.Keyring().Get(k.public.KeyIdString())
+	if err == repository.ErrKeyringKeyNotFound {
+		return errNoPrivateKey
+	}
+	if err != nil {
+		return err
+	}
+
+	block, err := armor.Decode(bytes.NewReader(item.Data))
+	if err == io.EOF {
+		return fmt.Errorf("no armored data found")
+	}
+	if err != nil {
+		return err
+	}
+
+	if block.Type != openpgp.PrivateKeyType {
+		return fmt.Errorf("invalid key type")
+	}
+
+	p, err := packet.Read(block.Body)
+	if err != nil {
+		return errors.Wrap(err, "failed to read private key packet")
+	}
+
+	private, ok := p.(*packet.PrivateKey)
+	if !ok {
+		return errors.New("got no packet.privateKey")
+	}
+
+	// The armored format doesn't include the creation time, which makes the round-trip data not being fully equal.
+	// We don't care about the creation time so we can set it to the zero value.
+	private.CreationTime = time.Time{}
+
+	k.private = private
+	return nil
+}
+
+// ensurePrivateKey attempt to load the corresponding private key if it is not loaded already.
+// If no private key is found, returns errNoPrivateKey
+func (k *Key) ensurePrivateKey(repo repository.RepoKeyring) error {
+	if k.private != nil {
+		return nil
+	}
+
+	return k.loadPrivate(repo)
+}
+
+func (k *Key) storePrivate(repo repository.RepoKeyring) error {
+	var buf bytes.Buffer
+	w, err := armor.Encode(&buf, openpgp.PrivateKeyType, nil)
+	if err != nil {
+		return err
+	}
+	err = k.private.Serialize(w)
+	if err != nil {
+		return err
+	}
+	err = w.Close()
+	if err != nil {
+		return err
+	}
+
+	return repo.Keyring().Set(repository.Item{
+		Key:  k.public.KeyIdString(),
+		Data: buf.Bytes(),
+	})
+}
+
+func (k *Key) PGPEntity() *openpgp.Entity {
+	return &openpgp.Entity{
+		PrimaryKey: k.public,
+		PrivateKey: k.private,
+	}
 }

identity/key_test.go 🔗

@@ -0,0 +1,60 @@
+package identity
+
+import (
+	"crypto/rsa"
+	"encoding/json"
+	"testing"
+
+	"github.com/stretchr/testify/require"
+
+	"github.com/MichaelMure/git-bug/repository"
+)
+
+func TestPublicKeyJSON(t *testing.T) {
+	k := generatePublicKey()
+
+	dataJSON, err := json.Marshal(k)
+	require.NoError(t, err)
+
+	var read Key
+	err = json.Unmarshal(dataJSON, &read)
+	require.NoError(t, err)
+
+	require.Equal(t, k, &read)
+}
+
+func TestStoreLoad(t *testing.T) {
+	repo := repository.NewMockRepoKeyring()
+
+	// public + private
+	k := GenerateKey()
+
+	// Store
+
+	dataJSON, err := json.Marshal(k)
+	require.NoError(t, err)
+
+	err = k.storePrivate(repo)
+	require.NoError(t, err)
+
+	// Load
+
+	var read Key
+	err = json.Unmarshal(dataJSON, &read)
+	require.NoError(t, err)
+
+	err = read.ensurePrivateKey(repo)
+	require.NoError(t, err)
+
+	require.Equal(t, k.public, read.public)
+
+	require.IsType(t, (*rsa.PrivateKey)(nil), k.private.PrivateKey)
+
+	// See https://github.com/golang/crypto/pull/175
+	rsaPriv := read.private.PrivateKey.(*rsa.PrivateKey)
+	back := rsaPriv.Primes[0]
+	rsaPriv.Primes[0] = rsaPriv.Primes[1]
+	rsaPriv.Primes[1] = back
+
+	require.True(t, k.private.PrivateKey.(*rsa.PrivateKey).Equal(read.private.PrivateKey))
+}

identity/version.go 🔗

@@ -5,6 +5,7 @@ import (
 	"encoding/json"
 	"fmt"
 	"strings"
+	"time"
 
 	"github.com/pkg/errors"
 
@@ -15,76 +16,131 @@ import (
 )
 
 // 1: original format
-const formatVersion = 1
-
-// Version is a complete set of information about an Identity at a point in time.
-type Version struct {
-	// The lamport time at which this version become effective
-	// The reference time is the bug edition lamport clock
-	// It must be the first field in this struct due to https://github.com/golang/go/issues/599
-	//
-	// TODO: BREAKING CHANGE - this need to actually be one edition lamport time **per entity**
-	// This is not a problem right now but will be when more entities are added (pull-request, config ...)
-	time     lamport.Time
-	unixTime int64
+// 2: Identity Ids are generated from the first version serialized data instead of from the first git commit
+//    + Identity hold multiple lamport clocks from other entities, instead of just bug edit
+const formatVersion = 2
 
+// version is a complete set of information about an Identity at a point in time.
+type version struct {
 	name      string
 	email     string // as defined in git or from a bridge when importing the identity
 	login     string // from a bridge when importing the identity
 	avatarURL string
 
+	// The lamport times of the other entities at which this version become effective
+	times    map[string]lamport.Time
+	unixTime int64
+
 	// The set of keys valid at that time, from this version onward, until they get removed
 	// in a new version. This allow to have multiple key for the same identity (e.g. one per
 	// device) as well as revoke key.
 	keys []*Key
 
-	// This optional array is here to ensure a better randomness of the identity id to avoid collisions.
+	// mandatory random bytes to ensure a better randomness of the data of the first
+	// version of an identity, used to later generate the ID
+	// len(Nonce) should be > 20 and < 64 bytes
 	// It has no functional purpose and should be ignored.
-	// It is advised to fill this array if there is not enough entropy, e.g. if there is no keys.
+	// TODO: optional after first version?
 	nonce []byte
 
 	// A set of arbitrary key/value to store metadata about a version or about an Identity in general.
 	metadata map[string]string
 
+	// Not serialized. Store the version's id in memory.
+	id entity.Id
 	// Not serialized
 	commitHash repository.Hash
 }
 
-type VersionJSON struct {
+func newVersion(repo repository.RepoClock, name string, email string, login string, avatarURL string, keys []*Key) (*version, error) {
+	clocks, err := repo.AllClocks()
+	if err != nil {
+		return nil, err
+	}
+
+	times := make(map[string]lamport.Time)
+	for name, clock := range clocks {
+		times[name] = clock.Time()
+	}
+
+	return &version{
+		id:        entity.UnsetId,
+		name:      name,
+		email:     email,
+		login:     login,
+		avatarURL: avatarURL,
+		times:     times,
+		unixTime:  time.Now().Unix(),
+		keys:      keys,
+		nonce:     makeNonce(20),
+	}, nil
+}
+
+type versionJSON struct {
 	// Additional field to version the data
 	FormatVersion uint `json:"version"`
 
-	Time      lamport.Time      `json:"time"`
-	UnixTime  int64             `json:"unix_time"`
-	Name      string            `json:"name,omitempty"`
-	Email     string            `json:"email,omitempty"`
-	Login     string            `json:"login,omitempty"`
-	AvatarUrl string            `json:"avatar_url,omitempty"`
-	Keys      []*Key            `json:"pub_keys,omitempty"`
-	Nonce     []byte            `json:"nonce,omitempty"`
-	Metadata  map[string]string `json:"metadata,omitempty"`
+	Times     map[string]lamport.Time `json:"times"`
+	UnixTime  int64                   `json:"unix_time"`
+	Name      string                  `json:"name,omitempty"`
+	Email     string                  `json:"email,omitempty"`
+	Login     string                  `json:"login,omitempty"`
+	AvatarUrl string                  `json:"avatar_url,omitempty"`
+	Keys      []*Key                  `json:"pub_keys,omitempty"`
+	Nonce     []byte                  `json:"nonce"`
+	Metadata  map[string]string       `json:"metadata,omitempty"`
+}
+
+// Id return the identifier of the version
+func (v *version) Id() entity.Id {
+	if v.id == "" {
+		// something went really wrong
+		panic("version's id not set")
+	}
+	if v.id == entity.UnsetId {
+		// This means we are trying to get the version's Id *before* it has been stored.
+		// As the Id is computed based on the actual bytes written on the disk, we are going to predict
+		// those and then get the Id. This is safe as it will be the exact same code writing on disk later.
+		data, err := json.Marshal(v)
+		if err != nil {
+			panic(err)
+		}
+		v.id = entity.DeriveId(data)
+	}
+	return v.id
 }
 
 // Make a deep copy
-func (v *Version) Clone() *Version {
-	clone := &Version{
-		name:      v.name,
-		email:     v.email,
-		avatarURL: v.avatarURL,
-		keys:      make([]*Key, len(v.keys)),
+func (v *version) Clone() *version {
+	// copy direct fields
+	clone := *v
+
+	// reset some fields
+	clone.commitHash = ""
+	clone.id = entity.UnsetId
+
+	clone.times = make(map[string]lamport.Time)
+	for name, t := range v.times {
+		clone.times[name] = t
 	}
 
+	clone.keys = make([]*Key, len(v.keys))
 	for i, key := range v.keys {
 		clone.keys[i] = key.Clone()
 	}
 
-	return clone
+	clone.nonce = make([]byte, len(v.nonce))
+	copy(clone.nonce, v.nonce)
+
+	// not copying metadata
+
+	return &clone
 }
 
-func (v *Version) MarshalJSON() ([]byte, error) {
-	return json.Marshal(VersionJSON{
+func (v *version) MarshalJSON() ([]byte, error) {
+	return json.Marshal(versionJSON{
 		FormatVersion: formatVersion,
-		Time:          v.time,
+		Times:         v.times,
 		UnixTime:      v.unixTime,
 		Name:          v.name,
 		Email:         v.email,
@@ -96,21 +152,19 @@ func (v *Version) MarshalJSON() ([]byte, error) {
 	})
 }
 
-func (v *Version) UnmarshalJSON(data []byte) error {
-	var aux VersionJSON
+func (v *version) UnmarshalJSON(data []byte) error {
+	var aux versionJSON
 
 	if err := json.Unmarshal(data, &aux); err != nil {
 		return err
 	}
 
-	if aux.FormatVersion < formatVersion {
-		return entity.NewErrOldFormatVersion(aux.FormatVersion)
-	}
-	if aux.FormatVersion > formatVersion {
-		return entity.NewErrNewFormatVersion(aux.FormatVersion)
+	if aux.FormatVersion != formatVersion {
+		return entity.NewErrInvalidFormat(aux.FormatVersion, formatVersion)
 	}
 
-	v.time = aux.Time
+	v.id = entity.DeriveId(data)
+	v.times = aux.Times
 	v.unixTime = aux.UnixTime
 	v.name = aux.Name
 	v.email = aux.Email
@@ -123,23 +177,18 @@ func (v *Version) UnmarshalJSON(data []byte) error {
 	return nil
 }
 
-func (v *Version) Validate() error {
+func (v *version) Validate() error {
 	// time must be set after a commit
 	if v.commitHash != "" && v.unixTime == 0 {
 		return fmt.Errorf("unix time not set")
 	}
-	if v.commitHash != "" && v.time == 0 {
-		return fmt.Errorf("lamport time not set")
-	}
 
 	if text.Empty(v.name) && text.Empty(v.login) {
 		return fmt.Errorf("either name or login should be set")
 	}
-
 	if strings.Contains(v.name, "\n") {
 		return fmt.Errorf("name should be a single line")
 	}
-
 	if !text.Safe(v.name) {
 		return fmt.Errorf("name is not fully printable")
 	}
@@ -147,7 +196,6 @@ func (v *Version) Validate() error {
 	if strings.Contains(v.login, "\n") {
 		return fmt.Errorf("login should be a single line")
 	}
-
 	if !text.Safe(v.login) {
 		return fmt.Errorf("login is not fully printable")
 	}
@@ -155,7 +203,6 @@ func (v *Version) Validate() error {
 	if strings.Contains(v.email, "\n") {
 		return fmt.Errorf("email should be a single line")
 	}
-
 	if !text.Safe(v.email) {
 		return fmt.Errorf("email is not fully printable")
 	}
@@ -167,6 +214,9 @@ func (v *Version) Validate() error {
 	if len(v.nonce) > 64 {
 		return fmt.Errorf("nonce is too big")
 	}
+	if len(v.nonce) < 20 {
+		return fmt.Errorf("nonce is too small")
+	}
 
 	for _, k := range v.keys {
 		if err := k.Validate(); err != nil {
@@ -177,9 +227,9 @@ func (v *Version) Validate() error {
 	return nil
 }
 
-// Write will serialize and store the Version as a git blob and return
+// Write will serialize and store the version as a git blob and return
 // its hash
-func (v *Version) Write(repo repository.Repo) (repository.Hash, error) {
+func (v *version) Write(repo repository.Repo) (repository.Hash, error) {
 	// make sure we don't write invalid data
 	err := v.Validate()
 	if err != nil {
@@ -187,17 +237,18 @@ func (v *Version) Write(repo repository.Repo) (repository.Hash, error) {
 	}
 
 	data, err := json.Marshal(v)
-
 	if err != nil {
 		return "", err
 	}
 
 	hash, err := repo.StoreData(data)
-
 	if err != nil {
 		return "", err
 	}
 
+	// make sure we set the Id when writing in the repo
+	v.id = entity.DeriveId(data)
+
 	return hash, nil
 }
 
@@ -211,22 +262,22 @@ func makeNonce(len int) []byte {
 }
 
 // SetMetadata store arbitrary metadata about a version or an Identity in general
-// If the Version has been commit to git already, it won't be overwritten.
-func (v *Version) SetMetadata(key string, value string) {
+// If the version has been commit to git already, it won't be overwritten.
+// Beware: changing the metadata on a version will change it's ID
+func (v *version) SetMetadata(key string, value string) {
 	if v.metadata == nil {
 		v.metadata = make(map[string]string)
 	}
-
 	v.metadata[key] = value
 }
 
-// GetMetadata retrieve arbitrary metadata about the Version
-func (v *Version) GetMetadata(key string) (string, bool) {
+// GetMetadata retrieve arbitrary metadata about the version
+func (v *version) GetMetadata(key string) (string, bool) {
 	val, ok := v.metadata[key]
 	return val, ok
 }
 
-// AllMetadata return all metadata for this Version
-func (v *Version) AllMetadata() map[string]string {
+// AllMetadata return all metadata for this version
+func (v *version) AllMetadata() map[string]string {
 	return v.metadata
 }

identity/version_test.go 🔗

@@ -3,39 +3,76 @@ package identity
 import (
 	"encoding/json"
 	"testing"
+	"time"
 
 	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+
+	"github.com/MichaelMure/git-bug/entity"
+	"github.com/MichaelMure/git-bug/repository"
+	"github.com/MichaelMure/git-bug/util/lamport"
 )
 
-func TestVersionSerialize(t *testing.T) {
-	before := &Version{
+func makeIdentityTestRepo(t *testing.T) repository.ClockedRepo {
+	repo := repository.NewMockRepo()
+
+	clock1, err := repo.GetOrCreateClock("foo")
+	require.NoError(t, err)
+	err = clock1.Witness(42)
+	require.NoError(t, err)
+
+	clock2, err := repo.GetOrCreateClock("bar")
+	require.NoError(t, err)
+	err = clock2.Witness(34)
+	require.NoError(t, err)
+
+	return repo
+}
+
+func TestVersionJSON(t *testing.T) {
+	repo := makeIdentityTestRepo(t)
+
+	keys := []*Key{
+		generatePublicKey(),
+		generatePublicKey(),
+	}
+
+	before, err := newVersion(repo, "name", "email", "login", "avatarUrl", keys)
+	require.NoError(t, err)
+
+	before.SetMetadata("key1", "value1")
+	before.SetMetadata("key2", "value2")
+
+	expected := &version{
+		id:        entity.UnsetId,
 		name:      "name",
 		email:     "email",
+		login:     "login",
 		avatarURL: "avatarUrl",
-		keys: []*Key{
-			{
-				Fingerprint: "fingerprint1",
-				PubKey:      "pubkey1",
-			},
-			{
-				Fingerprint: "fingerprint2",
-				PubKey:      "pubkey2",
-			},
+		unixTime:  time.Now().Unix(),
+		times: map[string]lamport.Time{
+			"foo": 42,
+			"bar": 34,
 		},
-		nonce: makeNonce(20),
+		keys:  keys,
+		nonce: before.nonce,
 		metadata: map[string]string{
 			"key1": "value1",
 			"key2": "value2",
 		},
-		time: 3,
 	}
 
+	require.Equal(t, expected, before)
+
 	data, err := json.Marshal(before)
 	assert.NoError(t, err)
 
-	var after Version
+	var after version
 	err = json.Unmarshal(data, &after)
 	assert.NoError(t, err)
 
-	assert.Equal(t, before, &after)
+	// make sure we now have an Id
+	expected.Id()
+
+	assert.Equal(t, expected, &after)
 }

misc/bash_completion/git-bug 🔗

@@ -722,6 +722,38 @@ _git-bug_comment_add()
     noun_aliases=()
 }
 
+_git-bug_comment_edit()
+{
+    last_command="git-bug_comment_edit"
+
+    command_aliases=()
+
+    commands=()
+
+    flags=()
+    two_word_flags=()
+    local_nonpersistent_flags=()
+    flags_with_completion=()
+    flags_completion=()
+
+    flags+=("--file=")
+    two_word_flags+=("--file")
+    two_word_flags+=("-F")
+    local_nonpersistent_flags+=("--file")
+    local_nonpersistent_flags+=("--file=")
+    local_nonpersistent_flags+=("-F")
+    flags+=("--message=")
+    two_word_flags+=("--message")
+    two_word_flags+=("-m")
+    local_nonpersistent_flags+=("--message")
+    local_nonpersistent_flags+=("--message=")
+    local_nonpersistent_flags+=("-m")
+
+    must_have_one_flag=()
+    must_have_one_noun=()
+    noun_aliases=()
+}
+
 _git-bug_comment()
 {
     last_command="git-bug_comment"
@@ -730,6 +762,7 @@ _git-bug_comment()
 
     commands=()
     commands+=("add")
+    commands+=("edit")
 
     flags=()
     two_word_flags=()

misc/random_bugs/create_random_bugs.go 🔗

@@ -111,54 +111,8 @@ func generateRandomBugsWithSeed(opts Options, seed int64) []*bug.Bug {
 	return result
 }
 
-func GenerateRandomOperationPacks(packNumber int, opNumber int) []*bug.OperationPack {
-	return GenerateRandomOperationPacksWithSeed(packNumber, opNumber, time.Now().UnixNano())
-}
-
-func GenerateRandomOperationPacksWithSeed(packNumber int, opNumber int, seed int64) []*bug.OperationPack {
-	// Note: this is a bit crude, only generate a Create + Comments
-
-	panic("this piece of code needs to be updated to make sure that the identities " +
-		"are properly commit before usage. That is, generateRandomPersons() need to be called.")
-
-	rand.Seed(seed)
-	fake.Seed(seed)
-
-	result := make([]*bug.OperationPack, packNumber)
-
-	for i := 0; i < packNumber; i++ {
-		opp := &bug.OperationPack{}
-
-		var op bug.Operation
-
-		op = bug.NewCreateOp(
-			randomPerson(),
-			time.Now().Unix(),
-			fake.Sentence(),
-			paragraphs(),
-			nil,
-		)
-
-		opp.Append(op)
-
-		for j := 0; j < opNumber-1; j++ {
-			op = bug.NewAddCommentOp(
-				randomPerson(),
-				time.Now().Unix(),
-				paragraphs(),
-				nil,
-			)
-			opp.Append(op)
-		}
-
-		result[i] = opp
-	}
-
-	return result
-}
-
-func person() *identity.Identity {
-	return identity.NewIdentity(fake.FullName(), fake.EmailAddress())
+func person(repo repository.RepoClock) (*identity.Identity, error) {
+	return identity.NewIdentity(repo, fake.FullName(), fake.EmailAddress())
 }
 
 var persons []*identity.Identity
@@ -166,8 +120,11 @@ var persons []*identity.Identity
 func generateRandomPersons(repo repository.ClockedRepo, n int) {
 	persons = make([]*identity.Identity, n)
 	for i := range persons {
-		p := person()
-		err := p.Commit(repo)
+		p, err := person(repo)
+		if err != nil {
+			panic(err)
+		}
+		err = p.Commit(repo)
 		if err != nil {
 			panic(err)
 		}

repository/common.go 🔗

@@ -0,0 +1,67 @@
+package repository
+
+import (
+	"io"
+
+	"golang.org/x/crypto/openpgp"
+	"golang.org/x/crypto/openpgp/armor"
+	"golang.org/x/crypto/openpgp/errors"
+)
+
+// nonNativeListCommits is an implementation for ListCommits, for the case where
+// the underlying git implementation doesn't support if natively.
+func nonNativeListCommits(repo RepoData, ref string) ([]Hash, error) {
+	var result []Hash
+
+	stack := make([]Hash, 0, 32)
+	visited := make(map[Hash]struct{})
+
+	hash, err := repo.ResolveRef(ref)
+	if err != nil {
+		return nil, err
+	}
+
+	stack = append(stack, hash)
+
+	for len(stack) > 0 {
+		// pop
+		hash := stack[len(stack)-1]
+		stack = stack[:len(stack)-1]
+
+		if _, ok := visited[hash]; ok {
+			continue
+		}
+
+		// mark as visited
+		visited[hash] = struct{}{}
+		result = append(result, hash)
+
+		commit, err := repo.ReadCommit(hash)
+		if err != nil {
+			return nil, err
+		}
+
+		for _, parent := range commit.Parents {
+			stack = append(stack, parent)
+		}
+	}
+
+	// reverse
+	for i, j := 0, len(result)-1; i < j; i, j = i+1, j-1 {
+		result[i], result[j] = result[j], result[i]
+	}
+
+	return result, nil
+}
+
+// deArmorSignature convert an armored (text serialized) signature into raw binary
+func deArmorSignature(armoredSig io.Reader) (io.Reader, error) {
+	block, err := armor.Decode(armoredSig)
+	if err != nil {
+		return nil, err
+	}
+	if block.Type != openpgp.SignatureType {
+		return nil, errors.InvalidArgumentError("expected '" + openpgp.SignatureType + "', got: " + block.Type)
+	}
+	return block.Body, nil
+}

repository/git.go 🔗

@@ -1,500 +0,0 @@
-// Package repository contains helper methods for working with the Git repo.
-package repository
-
-import (
-	"bytes"
-	"fmt"
-	"os"
-	"path/filepath"
-	"strings"
-	"sync"
-
-	"github.com/blevesearch/bleve"
-	"github.com/go-git/go-billy/v5"
-	"github.com/go-git/go-billy/v5/osfs"
-
-	"github.com/MichaelMure/git-bug/util/lamport"
-)
-
-const (
-	clockPath = "git-bug"
-)
-
-var _ ClockedRepo = &GitRepo{}
-var _ TestedRepo = &GitRepo{}
-
-// GitRepo represents an instance of a (local) git repository.
-type GitRepo struct {
-	gitCli
-	path string
-
-	clocksMutex sync.Mutex
-	clocks      map[string]lamport.Clock
-
-	indexesMutex sync.Mutex
-	indexes      map[string]bleve.Index
-
-	keyring Keyring
-}
-
-// OpenGitRepo determines if the given working directory is inside of a git repository,
-// and returns the corresponding GitRepo instance if it is.
-func OpenGitRepo(path string, clockLoaders []ClockLoader) (*GitRepo, error) {
-	k, err := defaultKeyring()
-	if err != nil {
-		return nil, err
-	}
-
-	repo := &GitRepo{
-		gitCli:  gitCli{path: path},
-		path:    path,
-		clocks:  make(map[string]lamport.Clock),
-		indexes: make(map[string]bleve.Index),
-		keyring: k,
-	}
-
-	// Check the repo and retrieve the root path
-	stdout, err := repo.runGitCommand("rev-parse", "--absolute-git-dir")
-
-	// Now dir is fetched with "git rev-parse --git-dir". May be it can
-	// still return nothing in some cases. Then empty stdout check is
-	// kept.
-	if err != nil || stdout == "" {
-		return nil, ErrNotARepo
-	}
-
-	// Fix the path to be sure we are at the root
-	repo.path = stdout
-	repo.gitCli.path = stdout
-
-	for _, loader := range clockLoaders {
-		allExist := true
-		for _, name := range loader.Clocks {
-			if _, err := repo.getClock(name); err != nil {
-				allExist = false
-			}
-		}
-
-		if !allExist {
-			err = loader.Witnesser(repo)
-			if err != nil {
-				return nil, err
-			}
-		}
-	}
-
-	return repo, nil
-}
-
-// InitGitRepo create a new empty git repo at the given path
-func InitGitRepo(path string) (*GitRepo, error) {
-	repo := &GitRepo{
-		gitCli:  gitCli{path: path},
-		path:    path + "/.git",
-		clocks:  make(map[string]lamport.Clock),
-		indexes: make(map[string]bleve.Index),
-	}
-
-	_, err := repo.runGitCommand("init", path)
-	if err != nil {
-		return nil, err
-	}
-
-	return repo, nil
-}
-
-// InitBareGitRepo create a new --bare empty git repo at the given path
-func InitBareGitRepo(path string) (*GitRepo, error) {
-	repo := &GitRepo{
-		gitCli:  gitCli{path: path},
-		path:    path,
-		clocks:  make(map[string]lamport.Clock),
-		indexes: make(map[string]bleve.Index),
-	}
-
-	_, err := repo.runGitCommand("init", "--bare", path)
-	if err != nil {
-		return nil, err
-	}
-
-	return repo, nil
-}
-
-func (repo *GitRepo) Close() error {
-	var firstErr error
-	for _, index := range repo.indexes {
-		err := index.Close()
-		if err != nil && firstErr == nil {
-			firstErr = err
-		}
-	}
-	return firstErr
-}
-
-// LocalConfig give access to the repository scoped configuration
-func (repo *GitRepo) LocalConfig() Config {
-	return newGitConfig(repo.gitCli, false)
-}
-
-// GlobalConfig give access to the global scoped configuration
-func (repo *GitRepo) GlobalConfig() Config {
-	return newGitConfig(repo.gitCli, true)
-}
-
-// AnyConfig give access to a merged local/global configuration
-func (repo *GitRepo) AnyConfig() ConfigRead {
-	return mergeConfig(repo.LocalConfig(), repo.GlobalConfig())
-}
-
-// Keyring give access to a user-wide storage for secrets
-func (repo *GitRepo) Keyring() Keyring {
-	return repo.keyring
-}
-
-// GetPath returns the path to the repo.
-func (repo *GitRepo) GetPath() string {
-	return repo.path
-}
-
-// GetUserName returns the name the the user has used to configure git
-func (repo *GitRepo) GetUserName() (string, error) {
-	return repo.runGitCommand("config", "user.name")
-}
-
-// GetUserEmail returns the email address that the user has used to configure git.
-func (repo *GitRepo) GetUserEmail() (string, error) {
-	return repo.runGitCommand("config", "user.email")
-}
-
-// GetCoreEditor returns the name of the editor that the user has used to configure git.
-func (repo *GitRepo) GetCoreEditor() (string, error) {
-	return repo.runGitCommand("var", "GIT_EDITOR")
-}
-
-// GetRemotes returns the configured remotes repositories.
-func (repo *GitRepo) GetRemotes() (map[string]string, error) {
-	stdout, err := repo.runGitCommand("remote", "--verbose")
-	if err != nil {
-		return nil, err
-	}
-
-	lines := strings.Split(stdout, "\n")
-	remotes := make(map[string]string, len(lines))
-
-	for _, line := range lines {
-		if strings.TrimSpace(line) == "" {
-			continue
-		}
-		elements := strings.Fields(line)
-		if len(elements) != 3 {
-			return nil, fmt.Errorf("git remote: unexpected output format: %s", line)
-		}
-
-		remotes[elements[0]] = elements[1]
-	}
-
-	return remotes, nil
-}
-
-// LocalStorage return a billy.Filesystem giving access to $RepoPath/.git/git-bug
-func (repo *GitRepo) LocalStorage() billy.Filesystem {
-	return osfs.New(repo.path)
-}
-
-// GetBleveIndex return a bleve.Index that can be used to index documents
-func (repo *GitRepo) GetBleveIndex(name string) (bleve.Index, error) {
-	repo.indexesMutex.Lock()
-	defer repo.indexesMutex.Unlock()
-
-	if index, ok := repo.indexes[name]; ok {
-		return index, nil
-	}
-
-	path := filepath.Join(repo.path, "indexes", name)
-
-	index, err := bleve.Open(path)
-	if err == nil {
-		repo.indexes[name] = index
-		return index, nil
-	}
-
-	err = os.MkdirAll(path, os.ModeDir)
-	if err != nil {
-		return nil, err
-	}
-
-	mapping := bleve.NewIndexMapping()
-	mapping.DefaultAnalyzer = "en"
-
-	index, err = bleve.New(path, mapping)
-	if err != nil {
-		return nil, err
-	}
-
-	repo.indexes[name] = index
-
-	return index, nil
-}
-
-// ClearBleveIndex will wipe the given index
-func (repo *GitRepo) ClearBleveIndex(name string) error {
-	repo.indexesMutex.Lock()
-	defer repo.indexesMutex.Unlock()
-
-	path := filepath.Join(repo.path, "indexes", name)
-
-	err := os.RemoveAll(path)
-	if err != nil {
-		return err
-	}
-
-	delete(repo.indexes, name)
-
-	return nil
-}
-
-// FetchRefs fetch git refs from a remote
-func (repo *GitRepo) FetchRefs(remote, refSpec string) (string, error) {
-	stdout, err := repo.runGitCommand("fetch", remote, refSpec)
-
-	if err != nil {
-		return stdout, fmt.Errorf("failed to fetch from the remote '%s': %v", remote, err)
-	}
-
-	return stdout, err
-}
-
-// PushRefs push git refs to a remote
-func (repo *GitRepo) PushRefs(remote string, refSpec string) (string, error) {
-	stdout, stderr, err := repo.runGitCommandRaw(nil, "push", remote, refSpec)
-
-	if err != nil {
-		return stdout + stderr, fmt.Errorf("failed to push to the remote '%s': %v", remote, stderr)
-	}
-	return stdout + stderr, nil
-}
-
-// StoreData will store arbitrary data and return the corresponding hash
-func (repo *GitRepo) StoreData(data []byte) (Hash, error) {
-	var stdin = bytes.NewReader(data)
-
-	stdout, err := repo.runGitCommandWithStdin(stdin, "hash-object", "--stdin", "-w")
-
-	return Hash(stdout), err
-}
-
-// ReadData will attempt to read arbitrary data from the given hash
-func (repo *GitRepo) ReadData(hash Hash) ([]byte, error) {
-	var stdout bytes.Buffer
-	var stderr bytes.Buffer
-
-	err := repo.runGitCommandWithIO(nil, &stdout, &stderr, "cat-file", "-p", string(hash))
-
-	if err != nil {
-		return []byte{}, err
-	}
-
-	return stdout.Bytes(), nil
-}
-
-// StoreTree will store a mapping key-->Hash as a Git tree
-func (repo *GitRepo) StoreTree(entries []TreeEntry) (Hash, error) {
-	buffer := prepareTreeEntries(entries)
-
-	stdout, err := repo.runGitCommandWithStdin(&buffer, "mktree")
-
-	if err != nil {
-		return "", err
-	}
-
-	return Hash(stdout), nil
-}
-
-// StoreCommit will store a Git commit with the given Git tree
-func (repo *GitRepo) StoreCommit(treeHash Hash) (Hash, error) {
-	stdout, err := repo.runGitCommand("commit-tree", string(treeHash))
-
-	if err != nil {
-		return "", err
-	}
-
-	return Hash(stdout), nil
-}
-
-// StoreCommitWithParent will store a Git commit with the given Git tree
-func (repo *GitRepo) StoreCommitWithParent(treeHash Hash, parent Hash) (Hash, error) {
-	stdout, err := repo.runGitCommand("commit-tree", string(treeHash),
-		"-p", string(parent))
-
-	if err != nil {
-		return "", err
-	}
-
-	return Hash(stdout), nil
-}
-
-// UpdateRef will create or update a Git reference
-func (repo *GitRepo) UpdateRef(ref string, hash Hash) error {
-	_, err := repo.runGitCommand("update-ref", ref, string(hash))
-
-	return err
-}
-
-// RemoveRef will remove a Git reference
-func (repo *GitRepo) RemoveRef(ref string) error {
-	_, err := repo.runGitCommand("update-ref", "-d", ref)
-
-	return err
-}
-
-// ListRefs will return a list of Git ref matching the given refspec
-func (repo *GitRepo) ListRefs(refPrefix string) ([]string, error) {
-	stdout, err := repo.runGitCommand("for-each-ref", "--format=%(refname)", refPrefix)
-
-	if err != nil {
-		return nil, err
-	}
-
-	split := strings.Split(stdout, "\n")
-
-	if len(split) == 1 && split[0] == "" {
-		return []string{}, nil
-	}
-
-	return split, nil
-}
-
-// RefExist will check if a reference exist in Git
-func (repo *GitRepo) RefExist(ref string) (bool, error) {
-	stdout, err := repo.runGitCommand("for-each-ref", ref)
-
-	if err != nil {
-		return false, err
-	}
-
-	return stdout != "", nil
-}
-
-// CopyRef will create a new reference with the same value as another one
-func (repo *GitRepo) CopyRef(source string, dest string) error {
-	_, err := repo.runGitCommand("update-ref", dest, source)
-
-	return err
-}
-
-// ListCommits will return the list of commit hashes of a ref, in chronological order
-func (repo *GitRepo) ListCommits(ref string) ([]Hash, error) {
-	stdout, err := repo.runGitCommand("rev-list", "--first-parent", "--reverse", ref)
-
-	if err != nil {
-		return nil, err
-	}
-
-	split := strings.Split(stdout, "\n")
-
-	casted := make([]Hash, len(split))
-	for i, line := range split {
-		casted[i] = Hash(line)
-	}
-
-	return casted, nil
-
-}
-
-// ReadTree will return the list of entries in a Git tree
-func (repo *GitRepo) ReadTree(hash Hash) ([]TreeEntry, error) {
-	stdout, err := repo.runGitCommand("ls-tree", string(hash))
-
-	if err != nil {
-		return nil, err
-	}
-
-	return readTreeEntries(stdout)
-}
-
-// FindCommonAncestor will return the last common ancestor of two chain of commit
-func (repo *GitRepo) FindCommonAncestor(hash1 Hash, hash2 Hash) (Hash, error) {
-	stdout, err := repo.runGitCommand("merge-base", string(hash1), string(hash2))
-
-	if err != nil {
-		return "", err
-	}
-
-	return Hash(stdout), nil
-}
-
-// GetTreeHash return the git tree hash referenced in a commit
-func (repo *GitRepo) GetTreeHash(commit Hash) (Hash, error) {
-	stdout, err := repo.runGitCommand("rev-parse", string(commit)+"^{tree}")
-
-	if err != nil {
-		return "", err
-	}
-
-	return Hash(stdout), nil
-}
-
-// GetOrCreateClock return a Lamport clock stored in the Repo.
-// If the clock doesn't exist, it's created.
-func (repo *GitRepo) GetOrCreateClock(name string) (lamport.Clock, error) {
-	repo.clocksMutex.Lock()
-	defer repo.clocksMutex.Unlock()
-
-	c, err := repo.getClock(name)
-	if err == nil {
-		return c, nil
-	}
-	if err != ErrClockNotExist {
-		return nil, err
-	}
-
-	c, err = lamport.NewPersistedClock(repo.LocalStorage(), name+"-clock")
-	if err != nil {
-		return nil, err
-	}
-
-	repo.clocks[name] = c
-	return c, nil
-}
-
-func (repo *GitRepo) getClock(name string) (lamport.Clock, error) {
-	if c, ok := repo.clocks[name]; ok {
-		return c, nil
-	}
-
-	c, err := lamport.LoadPersistedClock(repo.LocalStorage(), name+"-clock")
-	if err == nil {
-		repo.clocks[name] = c
-		return c, nil
-	}
-	if err == lamport.ErrClockNotExist {
-		return nil, ErrClockNotExist
-	}
-	return nil, err
-}
-
-// AddRemote add a new remote to the repository
-// Not in the interface because it's only used for testing
-func (repo *GitRepo) AddRemote(name string, url string) error {
-	_, err := repo.runGitCommand("remote", "add", name, url)
-
-	return err
-}
-
-// GetLocalRemote return the URL to use to add this repo as a local remote
-func (repo *GitRepo) GetLocalRemote() string {
-	return repo.path
-}
-
-// EraseFromDisk delete this repository entirely from the disk
-func (repo *GitRepo) EraseFromDisk() error {
-	err := repo.Close()
-	if err != nil {
-		return err
-	}
-
-	path := filepath.Clean(strings.TrimSuffix(repo.path, string(filepath.Separator)+".git"))
-
-	// fmt.Println("Cleaning repo:", path)
-	return os.RemoveAll(path)
-}

repository/git_cli.go 🔗

@@ -1,57 +0,0 @@
-package repository
-
-import (
-	"bytes"
-	"fmt"
-	"io"
-	"strings"
-
-	"golang.org/x/sys/execabs"
-)
-
-// gitCli is a helper to launch CLI git commands
-type gitCli struct {
-	path string
-}
-
-// Run the given git command with the given I/O reader/writers, returning an error if it fails.
-func (cli gitCli) runGitCommandWithIO(stdin io.Reader, stdout, stderr io.Writer, args ...string) error {
-	// make sure that the working directory for the command
-	// always exist, in particular when running "git init".
-	path := strings.TrimSuffix(cli.path, ".git")
-
-	// fmt.Printf("[%s] Running git %s\n", path, strings.Join(args, " "))
-
-	cmd := execabs.Command("git", args...)
-	cmd.Dir = path
-	cmd.Stdin = stdin
-	cmd.Stdout = stdout
-	cmd.Stderr = stderr
-
-	return cmd.Run()
-}
-
-// Run the given git command and return its stdout, or an error if the command fails.
-func (cli gitCli) runGitCommandRaw(stdin io.Reader, args ...string) (string, string, error) {
-	var stdout bytes.Buffer
-	var stderr bytes.Buffer
-	err := cli.runGitCommandWithIO(stdin, &stdout, &stderr, args...)
-	return strings.TrimSpace(stdout.String()), strings.TrimSpace(stderr.String()), err
-}
-
-// Run the given git command and return its stdout, or an error if the command fails.
-func (cli gitCli) runGitCommandWithStdin(stdin io.Reader, args ...string) (string, error) {
-	stdout, stderr, err := cli.runGitCommandRaw(stdin, args...)
-	if err != nil {
-		if stderr == "" {
-			stderr = "Error running git command: " + strings.Join(args, " ")
-		}
-		err = fmt.Errorf(stderr)
-	}
-	return stdout, err
-}
-
-// Run the given git command and return its stdout, or an error if the command fails.
-func (cli gitCli) runGitCommand(args ...string) (string, error) {
-	return cli.runGitCommandWithStdin(nil, args...)
-}

repository/git_config.go 🔗

@@ -1,221 +0,0 @@
-package repository
-
-import (
-	"fmt"
-	"regexp"
-	"strconv"
-	"strings"
-	"time"
-
-	"github.com/blang/semver"
-	"github.com/pkg/errors"
-)
-
-var _ Config = &gitConfig{}
-
-type gitConfig struct {
-	cli          gitCli
-	localityFlag string
-}
-
-func newGitConfig(cli gitCli, global bool) *gitConfig {
-	localityFlag := "--local"
-	if global {
-		localityFlag = "--global"
-	}
-	return &gitConfig{
-		cli:          cli,
-		localityFlag: localityFlag,
-	}
-}
-
-// StoreString store a single key/value pair in the config of the repo
-func (gc *gitConfig) StoreString(key string, value string) error {
-	_, err := gc.cli.runGitCommand("config", gc.localityFlag, "--replace-all", key, value)
-	return err
-}
-
-func (gc *gitConfig) StoreBool(key string, value bool) error {
-	return gc.StoreString(key, strconv.FormatBool(value))
-}
-
-func (gc *gitConfig) StoreTimestamp(key string, value time.Time) error {
-	return gc.StoreString(key, strconv.Itoa(int(value.Unix())))
-}
-
-// ReadAll read all key/value pair matching the key prefix
-func (gc *gitConfig) ReadAll(keyPrefix string) (map[string]string, error) {
-	stdout, err := gc.cli.runGitCommand("config", gc.localityFlag, "--includes", "--get-regexp", keyPrefix)
-
-	//   / \
-	//  / ! \
-	// -------
-	//
-	// There can be a legitimate error here, but I see no portable way to
-	// distinguish them from the git error that say "no matching value exist"
-	if err != nil {
-		return nil, nil
-	}
-
-	lines := strings.Split(stdout, "\n")
-
-	result := make(map[string]string, len(lines))
-
-	for _, line := range lines {
-		if strings.TrimSpace(line) == "" {
-			continue
-		}
-
-		parts := strings.SplitN(line, " ", 2)
-		result[parts[0]] = parts[1]
-	}
-
-	return result, nil
-}
-
-func (gc *gitConfig) ReadString(key string) (string, error) {
-	stdout, err := gc.cli.runGitCommand("config", gc.localityFlag, "--includes", "--get-all", key)
-
-	//   / \
-	//  / ! \
-	// -------
-	//
-	// There can be a legitimate error here, but I see no portable way to
-	// distinguish them from the git error that say "no matching value exist"
-	if err != nil {
-		return "", ErrNoConfigEntry
-	}
-
-	lines := strings.Split(stdout, "\n")
-
-	if len(lines) == 0 {
-		return "", ErrNoConfigEntry
-	}
-	if len(lines) > 1 {
-		return "", ErrMultipleConfigEntry
-	}
-
-	return lines[0], nil
-}
-
-func (gc *gitConfig) ReadBool(key string) (bool, error) {
-	val, err := gc.ReadString(key)
-	if err != nil {
-		return false, err
-	}
-
-	return strconv.ParseBool(val)
-}
-
-func (gc *gitConfig) ReadTimestamp(key string) (time.Time, error) {
-	value, err := gc.ReadString(key)
-	if err != nil {
-		return time.Time{}, err
-	}
-	return ParseTimestamp(value)
-}
-
-func (gc *gitConfig) rmSection(keyPrefix string) error {
-	_, err := gc.cli.runGitCommand("config", gc.localityFlag, "--remove-section", keyPrefix)
-	return err
-}
-
-func (gc *gitConfig) unsetAll(keyPrefix string) error {
-	_, err := gc.cli.runGitCommand("config", gc.localityFlag, "--unset-all", keyPrefix)
-	return err
-}
-
-// return keyPrefix section
-// example: sectionFromKey(a.b.c.d) return a.b.c
-func sectionFromKey(keyPrefix string) string {
-	s := strings.Split(keyPrefix, ".")
-	if len(s) == 1 {
-		return keyPrefix
-	}
-
-	return strings.Join(s[:len(s)-1], ".")
-}
-
-// rmConfigs with git version lesser than 2.18
-func (gc *gitConfig) rmConfigsGitVersionLT218(keyPrefix string) error {
-	// try to remove key/value pair by key
-	err := gc.unsetAll(keyPrefix)
-	if err != nil {
-		return gc.rmSection(keyPrefix)
-	}
-
-	m, err := gc.ReadAll(sectionFromKey(keyPrefix))
-	if err != nil {
-		return err
-	}
-
-	// if section doesn't have any left key/value remove the section
-	if len(m) == 0 {
-		return gc.rmSection(sectionFromKey(keyPrefix))
-	}
-
-	return nil
-}
-
-// RmConfigs remove all key/value pair matching the key prefix
-func (gc *gitConfig) RemoveAll(keyPrefix string) error {
-	// starting from git 2.18.0 sections are automatically deleted when the last existing
-	// key/value is removed. Before 2.18.0 we should remove the section
-	// see https://github.com/git/git/blob/master/Documentation/RelNotes/2.18.0.txt#L379
-	lt218, err := gc.gitVersionLT218()
-	if err != nil {
-		return errors.Wrap(err, "getting git version")
-	}
-
-	if lt218 {
-		return gc.rmConfigsGitVersionLT218(keyPrefix)
-	}
-
-	err = gc.unsetAll(keyPrefix)
-	if err != nil {
-		return gc.rmSection(keyPrefix)
-	}
-
-	return nil
-}
-
-func (gc *gitConfig) gitVersion() (*semver.Version, error) {
-	versionOut, err := gc.cli.runGitCommand("version")
-	if err != nil {
-		return nil, err
-	}
-	return parseGitVersion(versionOut)
-}
-
-func parseGitVersion(versionOut string) (*semver.Version, error) {
-	// extract the version and truncate potential bad parts
-	// ex: 2.23.0.rc1 instead of 2.23.0-rc1
-	r := regexp.MustCompile(`(\d+\.){1,2}\d+`)
-
-	extracted := r.FindString(versionOut)
-	if extracted == "" {
-		return nil, fmt.Errorf("unreadable git version %s", versionOut)
-	}
-
-	version, err := semver.Make(extracted)
-	if err != nil {
-		return nil, err
-	}
-
-	return &version, nil
-}
-
-func (gc *gitConfig) gitVersionLT218() (bool, error) {
-	version, err := gc.gitVersion()
-	if err != nil {
-		return false, err
-	}
-
-	version218string := "2.18.0"
-	gitVersion218, err := semver.Make(version218string)
-	if err != nil {
-		return false, err
-	}
-
-	return version.LT(gitVersion218), nil
-}

repository/git_test.go 🔗

@@ -1,10 +0,0 @@
-// Package repository contains helper methods for working with the Git repo.
-package repository
-
-import (
-	"testing"
-)
-
-func TestGitRepo(t *testing.T) {
-	RepoTest(t, CreateTestRepo, CleanupTestRepos)
-}

repository/git_testing.go 🔗

@@ -1,72 +0,0 @@
-package repository
-
-import (
-	"io/ioutil"
-	"log"
-
-	"github.com/99designs/keyring"
-)
-
-// This is intended for testing only
-
-func CreateTestRepo(bare bool) TestedRepo {
-	dir, err := ioutil.TempDir("", "")
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	var creator func(string) (*GitRepo, error)
-
-	if bare {
-		creator = InitBareGitRepo
-	} else {
-		creator = InitGitRepo
-	}
-
-	repo, err := creator(dir)
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	config := repo.LocalConfig()
-	if err := config.StoreString("user.name", "testuser"); err != nil {
-		log.Fatal("failed to set user.name for test repository: ", err)
-	}
-	if err := config.StoreString("user.email", "testuser@example.com"); err != nil {
-		log.Fatal("failed to set user.email for test repository: ", err)
-	}
-
-	// make sure we use a mock keyring for testing to not interact with the global system
-	return &replaceKeyring{
-		TestedRepo: repo,
-		keyring:    keyring.NewArrayKeyring(nil),
-	}
-}
-
-func SetupReposAndRemote() (repoA, repoB, remote TestedRepo) {
-	repoA = CreateGoGitTestRepo(false)
-	repoB = CreateGoGitTestRepo(false)
-	remote = CreateGoGitTestRepo(true)
-
-	err := repoA.AddRemote("origin", remote.GetLocalRemote())
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	err = repoB.AddRemote("origin", remote.GetLocalRemote())
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	return repoA, repoB, remote
-}
-
-// replaceKeyring allow to replace the Keyring of the underlying repo
-type replaceKeyring struct {
-	TestedRepo
-	keyring Keyring
-}
-
-func (rk replaceKeyring) Keyring() Keyring {
-	return rk.keyring
-}

repository/gogit.go 🔗

@@ -19,11 +19,14 @@ import (
 	"github.com/go-git/go-git/v5/plumbing"
 	"github.com/go-git/go-git/v5/plumbing/filemode"
 	"github.com/go-git/go-git/v5/plumbing/object"
+	"golang.org/x/crypto/openpgp"
 	"golang.org/x/sys/execabs"
 
 	"github.com/MichaelMure/git-bug/util/lamport"
 )
 
+const clockPath = "clocks"
+
 var _ ClockedRepo = &GoGitRepo{}
 var _ TestedRepo = &GoGitRepo{}
 
@@ -350,13 +353,17 @@ func (repo *GoGitRepo) ClearBleveIndex(name string) error {
 	return nil
 }
 
-// FetchRefs fetch git refs from a remote
-func (repo *GoGitRepo) FetchRefs(remote string, refSpec string) (string, error) {
+// FetchRefs fetch git refs matching a directory prefix to a remote
+// Ex: prefix="foo" will fetch any remote refs matching "refs/foo/*" locally.
+// The equivalent git refspec would be "refs/foo/*:refs/remotes/<remote>/foo/*"
+func (repo *GoGitRepo) FetchRefs(remote string, prefix string) (string, error) {
+	refspec := fmt.Sprintf("refs/%s/*:refs/remotes/%s/%s/*", prefix, remote, prefix)
+
 	buf := bytes.NewBuffer(nil)
 
 	err := repo.r.Fetch(&gogit.FetchOptions{
 		RemoteName: remote,
-		RefSpecs:   []config.RefSpec{config.RefSpec(refSpec)},
+		RefSpecs:   []config.RefSpec{config.RefSpec(refspec)},
 		Progress:   buf,
 	})
 	if err == gogit.NoErrAlreadyUpToDate {
@@ -369,13 +376,41 @@ func (repo *GoGitRepo) FetchRefs(remote string, refSpec string) (string, error)
 	return buf.String(), nil
 }
 
-// PushRefs push git refs to a remote
-func (repo *GoGitRepo) PushRefs(remote string, refSpec string) (string, error) {
+// PushRefs push git refs matching a directory prefix to a remote
+// Ex: prefix="foo" will push any local refs matching "refs/foo/*" to the remote.
+// The equivalent git refspec would be "refs/foo/*:refs/foo/*"
+//
+// Additionally, PushRefs will update the local references in refs/remotes/<remote>/foo to match
+// the remote state.
+func (repo *GoGitRepo) PushRefs(remote string, prefix string) (string, error) {
+	refspec := fmt.Sprintf("refs/%s/*:refs/%s/*", prefix, prefix)
+
+	remo, err := repo.r.Remote(remote)
+	if err != nil {
+		return "", err
+	}
+
+	// to make sure that the push also create the corresponding refs/remotes/<remote>/... references,
+	// we need to have a default fetch refspec configured on the remote, to make our refs "track" the remote ones.
+	// This does not change the config on disk, only on memory.
+	hasCustomFetch := false
+	fetchRefspec := fmt.Sprintf("refs/%s/*:refs/remotes/%s/%s/*", prefix, remote, prefix)
+	for _, r := range remo.Config().Fetch {
+		if string(r) == fetchRefspec {
+			hasCustomFetch = true
+			break
+		}
+	}
+
+	if !hasCustomFetch {
+		remo.Config().Fetch = append(remo.Config().Fetch, config.RefSpec(fetchRefspec))
+	}
+
 	buf := bytes.NewBuffer(nil)
 
-	err := repo.r.Push(&gogit.PushOptions{
+	err = remo.Push(&gogit.PushOptions{
 		RemoteName: remote,
-		RefSpecs:   []config.RefSpec{config.RefSpec(refSpec)},
+		RefSpecs:   []config.RefSpec{config.RefSpec(refspec)},
 		Progress:   buf,
 	})
 	if err == gogit.NoErrAlreadyUpToDate {
@@ -519,12 +554,13 @@ func (repo *GoGitRepo) ReadTree(hash Hash) ([]TreeEntry, error) {
 }
 
 // StoreCommit will store a Git commit with the given Git tree
-func (repo *GoGitRepo) StoreCommit(treeHash Hash) (Hash, error) {
-	return repo.StoreCommitWithParent(treeHash, "")
+func (repo *GoGitRepo) StoreCommit(treeHash Hash, parents ...Hash) (Hash, error) {
+	return repo.StoreSignedCommit(treeHash, nil, parents...)
 }
 
-// StoreCommit will store a Git commit with the given Git tree
-func (repo *GoGitRepo) StoreCommitWithParent(treeHash Hash, parent Hash) (Hash, error) {
+// StoreCommit will store a Git commit with the given Git tree. If signKey is not nil, the commit
+// will be signed accordingly.
+func (repo *GoGitRepo) StoreSignedCommit(treeHash Hash, signKey *openpgp.Entity, parents ...Hash) (Hash, error) {
 	cfg, err := repo.r.Config()
 	if err != nil {
 		return "", err
@@ -545,8 +581,28 @@ func (repo *GoGitRepo) StoreCommitWithParent(treeHash Hash, parent Hash) (Hash,
 		TreeHash: plumbing.NewHash(treeHash.String()),
 	}
 
-	if parent != "" {
-		commit.ParentHashes = []plumbing.Hash{plumbing.NewHash(parent.String())}
+	for _, parent := range parents {
+		commit.ParentHashes = append(commit.ParentHashes, plumbing.NewHash(parent.String()))
+	}
+
+	// Compute the signature if needed
+	if signKey != nil {
+		// first get the serialized commit
+		encoded := &plumbing.MemoryObject{}
+		if err := commit.Encode(encoded); err != nil {
+			return "", err
+		}
+		r, err := encoded.Reader()
+		if err != nil {
+			return "", err
+		}
+
+		// sign the data
+		var sig bytes.Buffer
+		if err := openpgp.ArmoredDetachSign(&sig, signKey, r, nil); err != nil {
+			return "", err
+		}
+		commit.PGPSignature = sig.String()
 	}
 
 	obj := repo.r.Storer.NewEncodedObject()
@@ -593,6 +649,14 @@ func (repo *GoGitRepo) FindCommonAncestor(commit1 Hash, commit2 Hash) (Hash, err
 	return Hash(commits[0].Hash.String()), nil
 }
 
+func (repo *GoGitRepo) ResolveRef(ref string) (Hash, error) {
+	r, err := repo.r.Reference(plumbing.ReferenceName(ref), false)
+	if err != nil {
+		return "", err
+	}
+	return Hash(r.Hash().String()), nil
+}
+
 // UpdateRef will create or update a Git reference
 func (repo *GoGitRepo) UpdateRef(ref string, hash Hash) error {
 	return repo.r.Storer.SetReference(plumbing.NewHashReference(plumbing.ReferenceName(ref), plumbing.NewHash(hash.String())))
@@ -647,34 +711,79 @@ func (repo *GoGitRepo) CopyRef(source string, dest string) error {
 
 // ListCommits will return the list of tree hashes of a ref, in chronological order
 func (repo *GoGitRepo) ListCommits(ref string) ([]Hash, error) {
-	r, err := repo.r.Reference(plumbing.ReferenceName(ref), false)
+	return nonNativeListCommits(repo, ref)
+}
+
+func (repo *GoGitRepo) ReadCommit(hash Hash) (Commit, error) {
+	commit, err := repo.r.CommitObject(plumbing.NewHash(hash.String()))
 	if err != nil {
-		return nil, err
+		return Commit{}, err
 	}
 
-	commit, err := repo.r.CommitObject(r.Hash())
-	if err != nil {
-		return nil, err
+	parents := make([]Hash, len(commit.ParentHashes))
+	for i, parentHash := range commit.ParentHashes {
+		parents[i] = Hash(parentHash.String())
 	}
-	hashes := []Hash{Hash(commit.Hash.String())}
 
-	for {
-		commit, err = commit.Parent(0)
-		if err == object.ErrParentNotFound {
-			break
+	result := Commit{
+		Hash:     hash,
+		Parents:  parents,
+		TreeHash: Hash(commit.TreeHash.String()),
+	}
+
+	if commit.PGPSignature != "" {
+		// I can't find a way to just remove the signature when reading the encoded commit so we need to
+		// re-encode the commit without signature.
+
+		encoded := &plumbing.MemoryObject{}
+		err := commit.EncodeWithoutSignature(encoded)
+		if err != nil {
+			return Commit{}, err
 		}
+
+		result.SignedData, err = encoded.Reader()
 		if err != nil {
-			return nil, err
+			return Commit{}, err
 		}
 
-		if commit.NumParents() > 1 {
-			return nil, fmt.Errorf("multiple parents")
+		result.Signature, err = deArmorSignature(strings.NewReader(commit.PGPSignature))
+		if err != nil {
+			return Commit{}, err
 		}
+	}
+
+	return result, nil
+}
+
+func (repo *GoGitRepo) AllClocks() (map[string]lamport.Clock, error) {
+	repo.clocksMutex.Lock()
+	defer repo.clocksMutex.Unlock()
+
+	result := make(map[string]lamport.Clock)
+
+	files, err := ioutil.ReadDir(filepath.Join(repo.path, "git-bug", clockPath))
+	if os.IsNotExist(err) {
+		return nil, nil
+	}
+	if err != nil {
+		return nil, err
+	}
 
-		hashes = append([]Hash{Hash(commit.Hash.String())}, hashes...)
+	for _, file := range files {
+		name := file.Name()
+		if c, ok := repo.clocks[name]; ok {
+			result[name] = c
+		} else {
+			c, err := lamport.LoadPersistedClock(repo.LocalStorage(), filepath.Join(clockPath, name))
+			if err != nil {
+				return nil, err
+			}
+			repo.clocks[name] = c
+			result[name] = c
+		}
 	}
 
-	return hashes, nil
+	return result, nil
 }
 
 // GetOrCreateClock return a Lamport clock stored in the Repo.
@@ -691,7 +800,7 @@ func (repo *GoGitRepo) GetOrCreateClock(name string) (lamport.Clock, error) {
 		return nil, err
 	}
 
-	c, err = lamport.NewPersistedClock(repo.localStorage, name+"-clock")
+	c, err = lamport.NewPersistedClock(repo.LocalStorage(), filepath.Join(clockPath, name))
 	if err != nil {
 		return nil, err
 	}
@@ -705,7 +814,7 @@ func (repo *GoGitRepo) getClock(name string) (lamport.Clock, error) {
 		return c, nil
 	}
 
-	c, err := lamport.LoadPersistedClock(repo.localStorage, name+"-clock")
+	c, err := lamport.LoadPersistedClock(repo.LocalStorage(), filepath.Join(clockPath, name))
 	if err == nil {
 		repo.clocks[name] = c
 		return c, nil
@@ -716,6 +825,24 @@ func (repo *GoGitRepo) getClock(name string) (lamport.Clock, error) {
 	return nil, err
 }
 
+// Increment is equivalent to c = GetOrCreateClock(name) + c.Increment()
+func (repo *GoGitRepo) Increment(name string) (lamport.Time, error) {
+	c, err := repo.GetOrCreateClock(name)
+	if err != nil {
+		return lamport.Time(0), err
+	}
+	return c.Increment()
+}
+
+// Witness is equivalent to c = GetOrCreateClock(name) + c.Witness(time)
+func (repo *GoGitRepo) Witness(name string, time lamport.Time) error {
+	c, err := repo.GetOrCreateClock(name)
+	if err != nil {
+		return err
+	}
+	return c.Witness(time)
+}
+
 // AddRemote add a new remote to the repository
 // Not in the interface because it's only used for testing
 func (repo *GoGitRepo) AddRemote(name string, url string) error {

repository/gogit_testing.go 🔗

@@ -3,6 +3,8 @@ package repository
 import (
 	"io/ioutil"
 	"log"
+
+	"github.com/99designs/keyring"
 )
 
 // This is intended for testing only
@@ -34,7 +36,11 @@ func CreateGoGitTestRepo(bare bool) TestedRepo {
 		log.Fatal("failed to set user.email for test repository: ", err)
 	}
 
-	return repo
+	// make sure we use a mock keyring for testing to not interact with the global system
+	return &replaceKeyring{
+		TestedRepo: repo,
+		keyring:    keyring.NewArrayKeyring(nil),
+	}
 }
 
 func SetupGoGitReposAndRemote() (repoA, repoB, remote TestedRepo) {

repository/keyring.go 🔗

@@ -15,7 +15,7 @@ var ErrKeyringKeyNotFound = keyring.ErrKeyNotFound
 type Keyring interface {
 	// Returns an Item matching the key or ErrKeyringKeyNotFound
 	Get(key string) (Item, error)
-	// Stores an Item on the keyring
+	// Stores an Item on the keyring. Set is idempotent.
 	Set(item Item) error
 	// Removes the item with matching key
 	Remove(key string) error
@@ -48,3 +48,13 @@ func defaultKeyring() (Keyring, error) {
 		},
 	})
 }
+
+// replaceKeyring allow to replace the Keyring of the underlying repo
+type replaceKeyring struct {
+	TestedRepo
+	keyring Keyring
+}
+
+func (rk replaceKeyring) Keyring() Keyring {
+	return rk.keyring
+}

repository/mock_repo.go 🔗

@@ -1,6 +1,7 @@
 package repository
 
 import (
+	"bytes"
 	"crypto/sha1"
 	"fmt"
 	"strings"
@@ -10,15 +11,16 @@ import (
 	"github.com/blevesearch/bleve"
 	"github.com/go-git/go-billy/v5"
 	"github.com/go-git/go-billy/v5/memfs"
+	"golang.org/x/crypto/openpgp"
 
 	"github.com/MichaelMure/git-bug/util/lamport"
 )
 
-var _ ClockedRepo = &mockRepoForTest{}
-var _ TestedRepo = &mockRepoForTest{}
+var _ ClockedRepo = &mockRepo{}
+var _ TestedRepo = &mockRepo{}
 
-// mockRepoForTest defines an instance of Repo that can be used for testing.
-type mockRepoForTest struct {
+// mockRepo defines an instance of Repo that can be used for testing.
+type mockRepo struct {
 	*mockRepoConfig
 	*mockRepoKeyring
 	*mockRepoCommon
@@ -26,12 +28,13 @@ type mockRepoForTest struct {
 	*mockRepoBleve
 	*mockRepoData
 	*mockRepoClock
+	*mockRepoTest
 }
 
-func (m *mockRepoForTest) Close() error { return nil }
+func (m *mockRepo) Close() error { return nil }
 
-func NewMockRepoForTest() *mockRepoForTest {
-	return &mockRepoForTest{
+func NewMockRepo() *mockRepo {
+	return &mockRepo{
 		mockRepoConfig:  NewMockRepoConfig(),
 		mockRepoKeyring: NewMockRepoKeyring(),
 		mockRepoCommon:  NewMockRepoCommon(),
@@ -39,6 +42,7 @@ func NewMockRepoForTest() *mockRepoForTest {
 		mockRepoBleve:   newMockRepoBleve(),
 		mockRepoData:    NewMockRepoData(),
 		mockRepoClock:   NewMockRepoClock(),
+		mockRepoTest:    NewMockRepoTest(),
 	}
 }
 
@@ -177,7 +181,8 @@ var _ RepoData = &mockRepoData{}
 
 type commit struct {
 	treeHash Hash
-	parent   Hash
+	parents  []Hash
+	sig      string
 }
 
 type mockRepoData struct {
@@ -196,13 +201,13 @@ func NewMockRepoData() *mockRepoData {
 	}
 }
 
-// PushRefs push git refs to a remote
-func (r *mockRepoData) PushRefs(remote string, refSpec string) (string, error) {
-	return "", nil
+func (r *mockRepoData) FetchRefs(remote string, prefix string) (string, error) {
+	panic("implement me")
 }
 
-func (r *mockRepoData) FetchRefs(remote string, refSpec string) (string, error) {
-	return "", nil
+// PushRefs push git refs to a remote
+func (r *mockRepoData) PushRefs(remote string, prefix string) (string, error) {
+	panic("implement me")
 }
 
 func (r *mockRepoData) StoreData(data []byte) (Hash, error) {
@@ -214,7 +219,6 @@ func (r *mockRepoData) StoreData(data []byte) (Hash, error) {
 
 func (r *mockRepoData) ReadData(hash Hash) ([]byte, error) {
 	data, ok := r.blobs[hash]
-
 	if !ok {
 		return nil, fmt.Errorf("unknown hash")
 	}
@@ -231,48 +235,103 @@ func (r *mockRepoData) StoreTree(entries []TreeEntry) (Hash, error) {
 	return hash, nil
 }
 
-func (r *mockRepoData) StoreCommit(treeHash Hash) (Hash, error) {
-	rawHash := sha1.Sum([]byte(treeHash))
-	hash := Hash(fmt.Sprintf("%x", rawHash))
-	r.commits[hash] = commit{
-		treeHash: treeHash,
+func (r *mockRepoData) ReadTree(hash Hash) ([]TreeEntry, error) {
+	var data string
+
+	data, ok := r.trees[hash]
+
+	if !ok {
+		// Git will understand a commit hash to reach a tree
+		commit, ok := r.commits[hash]
+
+		if !ok {
+			return nil, fmt.Errorf("unknown hash")
+		}
+
+		data, ok = r.trees[commit.treeHash]
+
+		if !ok {
+			return nil, fmt.Errorf("unknown hash")
+		}
 	}
-	return hash, nil
+
+	return readTreeEntries(data)
+}
+
+func (r *mockRepoData) StoreCommit(treeHash Hash, parents ...Hash) (Hash, error) {
+	return r.StoreSignedCommit(treeHash, nil, parents...)
 }
 
-func (r *mockRepoData) StoreCommitWithParent(treeHash Hash, parent Hash) (Hash, error) {
-	rawHash := sha1.Sum([]byte(treeHash + parent))
+func (r *mockRepoData) StoreSignedCommit(treeHash Hash, signKey *openpgp.Entity, parents ...Hash) (Hash, error) {
+	hasher := sha1.New()
+	hasher.Write([]byte(treeHash))
+	for _, parent := range parents {
+		hasher.Write([]byte(parent))
+	}
+	rawHash := hasher.Sum(nil)
 	hash := Hash(fmt.Sprintf("%x", rawHash))
-	r.commits[hash] = commit{
+	c := commit{
 		treeHash: treeHash,
-		parent:   parent,
+		parents:  parents,
+	}
+	if signKey != nil {
+		// unlike go-git, we only sign the tree hash for simplicity instead of all the fields (parents ...)
+		var sig bytes.Buffer
+		if err := openpgp.DetachSign(&sig, signKey, strings.NewReader(string(treeHash)), nil); err != nil {
+			return "", err
+		}
+		c.sig = sig.String()
 	}
+	r.commits[hash] = c
 	return hash, nil
 }
 
-func (r *mockRepoData) UpdateRef(ref string, hash Hash) error {
-	r.refs[ref] = hash
-	return nil
-}
+func (r *mockRepoData) ReadCommit(hash Hash) (Commit, error) {
+	c, ok := r.commits[hash]
+	if !ok {
+		return Commit{}, fmt.Errorf("unknown commit")
+	}
 
-func (r *mockRepoData) RemoveRef(ref string) error {
-	delete(r.refs, ref)
-	return nil
-}
+	result := Commit{
+		Hash:     hash,
+		Parents:  c.parents,
+		TreeHash: c.treeHash,
+	}
 
-func (r *mockRepoData) RefExist(ref string) (bool, error) {
-	_, exist := r.refs[ref]
-	return exist, nil
+	if c.sig != "" {
+		// Note: this is actually incorrect as the signed data should be the full commit (+comment, +date ...)
+		// but only the tree hash work for our purpose here.
+		result.SignedData = strings.NewReader(string(c.treeHash))
+		result.Signature = strings.NewReader(c.sig)
+	}
+
+	return result, nil
 }
 
-func (r *mockRepoData) CopyRef(source string, dest string) error {
-	hash, exist := r.refs[source]
+func (r *mockRepoData) GetTreeHash(commit Hash) (Hash, error) {
+	c, ok := r.commits[commit]
+	if !ok {
+		return "", fmt.Errorf("unknown commit")
+	}
 
-	if !exist {
-		return fmt.Errorf("Unknown ref")
+	return c.treeHash, nil
+}
+
+func (r *mockRepoData) ResolveRef(ref string) (Hash, error) {
+	h, ok := r.refs[ref]
+	if !ok {
+		return "", fmt.Errorf("unknown ref")
 	}
+	return h, nil
+}
 
-	r.refs[dest] = hash
+func (r *mockRepoData) UpdateRef(ref string, hash Hash) error {
+	r.refs[ref] = hash
+	return nil
+}
+
+func (r *mockRepoData) RemoveRef(ref string) error {
+	delete(r.refs, ref)
 	return nil
 }
 
@@ -288,46 +347,20 @@ func (r *mockRepoData) ListRefs(refPrefix string) ([]string, error) {
 	return keys, nil
 }
 
-func (r *mockRepoData) ListCommits(ref string) ([]Hash, error) {
-	var hashes []Hash
-
-	hash := r.refs[ref]
-
-	for {
-		commit, ok := r.commits[hash]
-
-		if !ok {
-			break
-		}
-
-		hashes = append([]Hash{hash}, hashes...)
-		hash = commit.parent
-	}
-
-	return hashes, nil
+func (r *mockRepoData) RefExist(ref string) (bool, error) {
+	_, exist := r.refs[ref]
+	return exist, nil
 }
 
-func (r *mockRepoData) ReadTree(hash Hash) ([]TreeEntry, error) {
-	var data string
-
-	data, ok := r.trees[hash]
-
-	if !ok {
-		// Git will understand a commit hash to reach a tree
-		commit, ok := r.commits[hash]
-
-		if !ok {
-			return nil, fmt.Errorf("unknown hash")
-		}
-
-		data, ok = r.trees[commit.treeHash]
+func (r *mockRepoData) CopyRef(source string, dest string) error {
+	hash, exist := r.refs[source]
 
-		if !ok {
-			return nil, fmt.Errorf("unknown hash")
-		}
+	if !exist {
+		return fmt.Errorf("Unknown ref")
 	}
 
-	return readTreeEntries(data)
+	r.refs[dest] = hash
+	return nil
 }
 
 func (r *mockRepoData) FindCommonAncestor(hash1 Hash, hash2 Hash) (Hash, error) {
@@ -338,8 +371,11 @@ func (r *mockRepoData) FindCommonAncestor(hash1 Hash, hash2 Hash) (Hash, error)
 		if !ok {
 			return "", fmt.Errorf("unknown commit %v", hash1)
 		}
-		ancestor1 = append(ancestor1, c.parent)
-		hash1 = c.parent
+		if len(c.parents) == 0 {
+			break
+		}
+		ancestor1 = append(ancestor1, c.parents[0])
+		hash1 = c.parents[0]
 	}
 
 	for {
@@ -354,35 +390,19 @@ func (r *mockRepoData) FindCommonAncestor(hash1 Hash, hash2 Hash) (Hash, error)
 			return "", fmt.Errorf("unknown commit %v", hash1)
 		}
 
-		if c.parent == "" {
+		if c.parents[0] == "" {
 			return "", fmt.Errorf("no ancestor found")
 		}
 
-		hash2 = c.parent
-	}
-}
-
-func (r *mockRepoData) GetTreeHash(commit Hash) (Hash, error) {
-	c, ok := r.commits[commit]
-	if !ok {
-		return "", fmt.Errorf("unknown commit")
+		hash2 = c.parents[0]
 	}
-
-	return c.treeHash, nil
 }
 
-func (r *mockRepoData) AddRemote(name string, url string) error {
-	panic("implement me")
-}
-
-func (m mockRepoForTest) GetLocalRemote() string {
-	panic("implement me")
+func (r *mockRepoData) ListCommits(ref string) ([]Hash, error) {
+	return nonNativeListCommits(r, ref)
 }
 
-func (m mockRepoForTest) EraseFromDisk() error {
-	// nothing to do
-	return nil
-}
+var _ RepoClock = &mockRepoClock{}
 
 type mockRepoClock struct {
 	mu     sync.Mutex
@@ -395,6 +415,10 @@ func NewMockRepoClock() *mockRepoClock {
 	}
 }
 
+func (r *mockRepoClock) AllClocks() (map[string]lamport.Clock, error) {
+	return r.clocks, nil
+}
+
 func (r *mockRepoClock) GetOrCreateClock(name string) (lamport.Clock, error) {
 	r.mu.Lock()
 	defer r.mu.Unlock()
@@ -407,3 +431,40 @@ func (r *mockRepoClock) GetOrCreateClock(name string) (lamport.Clock, error) {
 	r.clocks[name] = c
 	return c, nil
 }
+
+func (r *mockRepoClock) Increment(name string) (lamport.Time, error) {
+	c, err := r.GetOrCreateClock(name)
+	if err != nil {
+		return lamport.Time(0), err
+	}
+	return c.Increment()
+}
+
+func (r *mockRepoClock) Witness(name string, time lamport.Time) error {
+	c, err := r.GetOrCreateClock(name)
+	if err != nil {
+		return err
+	}
+	return c.Witness(time)
+}
+
+var _ repoTest = &mockRepoTest{}
+
+type mockRepoTest struct{}
+
+func NewMockRepoTest() *mockRepoTest {
+	return &mockRepoTest{}
+}
+
+func (r *mockRepoTest) AddRemote(name string, url string) error {
+	panic("implement me")
+}
+
+func (r mockRepoTest) GetLocalRemote() string {
+	panic("implement me")
+}
+
+func (r mockRepoTest) EraseFromDisk() error {
+	// nothing to do
+	return nil
+}

repository/mock_repo_test.go 🔗

@@ -1,9 +1,11 @@
 package repository
 
-import "testing"
+import (
+	"testing"
+)
 
 func TestMockRepo(t *testing.T) {
-	creator := func(bare bool) TestedRepo { return NewMockRepoForTest() }
+	creator := func(bare bool) TestedRepo { return NewMockRepo() }
 	cleaner := func(repos ...Repo) {}
 
 	RepoTest(t, creator, cleaner)

repository/repo.go 🔗

@@ -3,9 +3,11 @@ package repository
 
 import (
 	"errors"
+	"io"
 
 	"github.com/blevesearch/bleve"
 	"github.com/go-git/go-billy/v5"
+	"golang.org/x/crypto/openpgp"
 
 	"github.com/MichaelMure/git-bug/util/lamport"
 )
@@ -22,9 +24,9 @@ type Repo interface {
 	RepoConfig
 	RepoKeyring
 	RepoCommon
-	RepoData
 	RepoStorage
 	RepoBleve
+	RepoData
 
 	Close() error
 }
@@ -88,13 +90,28 @@ type RepoBleve interface {
 	ClearBleveIndex(name string) error
 }
 
+type Commit struct {
+	Hash       Hash
+	Parents    []Hash    // hashes of the parents, if any
+	TreeHash   Hash      // hash of the git Tree
+	SignedData io.Reader // if signed, reader for the signed data (likely, the serialized commit)
+	Signature  io.Reader // if signed, reader for the (non-armored) signature
+}
+
 // RepoData give access to the git data storage
 type RepoData interface {
-	// FetchRefs fetch git refs from a remote
-	FetchRefs(remote string, refSpec string) (string, error)
-
-	// PushRefs push git refs to a remote
-	PushRefs(remote string, refSpec string) (string, error)
+	// FetchRefs fetch git refs matching a directory prefix to a remote
+	// Ex: prefix="foo" will fetch any remote refs matching "refs/foo/*" locally.
+	// The equivalent git refspec would be "refs/foo/*:refs/remotes/<remote>/foo/*"
+	FetchRefs(remote string, prefix string) (string, error)
+
+	// PushRefs push git refs matching a directory prefix to a remote
+	// Ex: prefix="foo" will push any local refs matching "refs/foo/*" to the remote.
+	// The equivalent git refspec would be "refs/foo/*:refs/foo/*"
+	//
+	// Additionally, PushRefs will update the local references in refs/remotes/<remote>/foo to match
+	// the remote state.
+	PushRefs(remote string, prefix string) (string, error)
 
 	// StoreData will store arbitrary data and return the corresponding hash
 	StoreData(data []byte) (Hash, error)
@@ -110,21 +127,27 @@ type RepoData interface {
 	ReadTree(hash Hash) ([]TreeEntry, error)
 
 	// StoreCommit will store a Git commit with the given Git tree
-	StoreCommit(treeHash Hash) (Hash, error)
+	StoreCommit(treeHash Hash, parents ...Hash) (Hash, error)
 
-	// StoreCommit will store a Git commit with the given Git tree
-	StoreCommitWithParent(treeHash Hash, parent Hash) (Hash, error)
+	// StoreCommit will store a Git commit with the given Git tree. If signKey is not nil, the commit
+	// will be signed accordingly.
+	StoreSignedCommit(treeHash Hash, signKey *openpgp.Entity, parents ...Hash) (Hash, error)
+
+	// ReadCommit read a Git commit and returns some of its characteristic
+	ReadCommit(hash Hash) (Commit, error)
 
 	// GetTreeHash return the git tree hash referenced in a commit
+	// Deprecated
 	GetTreeHash(commit Hash) (Hash, error)
 
-	// FindCommonAncestor will return the last common ancestor of two chain of commit
-	FindCommonAncestor(commit1 Hash, commit2 Hash) (Hash, error)
+	// ResolveRef returns the hash of the target commit of the given ref
+	ResolveRef(ref string) (Hash, error)
 
 	// UpdateRef will create or update a Git reference
 	UpdateRef(ref string, hash Hash) error
 
 	// RemoveRef will remove a Git reference
+	// RemoveRef is idempotent.
 	RemoveRef(ref string) error
 
 	// ListRefs will return a list of Git ref matching the given refspec
@@ -136,15 +159,28 @@ type RepoData interface {
 	// CopyRef will create a new reference with the same value as another one
 	CopyRef(source string, dest string) error
 
+	// FindCommonAncestor will return the last common ancestor of two chain of commit
+	// Deprecated
+	FindCommonAncestor(commit1 Hash, commit2 Hash) (Hash, error)
+
 	// ListCommits will return the list of tree hashes of a ref, in chronological order
 	ListCommits(ref string) ([]Hash, error)
 }
 
 // RepoClock give access to Lamport clocks
 type RepoClock interface {
+	// AllClocks return all the known clocks
+	AllClocks() (map[string]lamport.Clock, error)
+
 	// GetOrCreateClock return a Lamport clock stored in the Repo.
 	// If the clock doesn't exist, it's created.
 	GetOrCreateClock(name string) (lamport.Clock, error)
+
+	// Increment is equivalent to c = GetOrCreateClock(name) + c.Increment()
+	Increment(name string) (lamport.Time, error)
+
+	// Witness is equivalent to c = GetOrCreateClock(name) + c.Witness(time)
+	Witness(name string, time lamport.Time) error
 }
 
 // ClockLoader hold which logical clock need to exist for an entity and

repository/repo_testing.go 🔗

@@ -6,10 +6,14 @@ import (
 	"testing"
 
 	"github.com/stretchr/testify/require"
+	"golang.org/x/crypto/openpgp"
 
 	"github.com/MichaelMure/git-bug/util/lamport"
 )
 
+// TODO: add tests for RepoBleve
+// TODO: add tests for RepoStorage
+
 func CleanupTestRepos(repos ...Repo) {
 	var firstErr error
 	for _, repo := range repos {
@@ -44,6 +48,7 @@ func RepoTest(t *testing.T, creator RepoCreator, cleaner RepoCleaner) {
 
 			t.Run("Data", func(t *testing.T) {
 				RepoDataTest(t, repo)
+				RepoDataSignatureTest(t, repo)
 			})
 
 			t.Run("Config", func(t *testing.T) {
@@ -135,7 +140,8 @@ func RepoDataTest(t *testing.T, repo RepoData) {
 	require.NoError(t, err)
 	require.Equal(t, treeHash1, treeHash1Read)
 
-	commit2, err := repo.StoreCommitWithParent(treeHash2, commit1)
+	// commit with a parent
+	commit2, err := repo.StoreCommit(treeHash2, commit1)
 	require.NoError(t, err)
 	require.True(t, commit2.IsValid())
 
@@ -148,6 +154,11 @@ func RepoDataTest(t *testing.T, repo RepoData) {
 	require.NoError(t, err)
 	require.Equal(t, tree1read, tree1)
 
+	c2, err := repo.ReadCommit(commit2)
+	require.NoError(t, err)
+	c2expected := Commit{Hash: commit2, Parents: []Hash{commit1}, TreeHash: treeHash2}
+	require.Equal(t, c2expected, c2)
+
 	// Ref
 
 	exist1, err := repo.RefExist("refs/bugs/ref1")
@@ -161,6 +172,10 @@ func RepoDataTest(t *testing.T, repo RepoData) {
 	require.NoError(t, err)
 	require.True(t, exist1)
 
+	h, err := repo.ResolveRef("refs/bugs/ref1")
+	require.NoError(t, err)
+	require.Equal(t, commit2, h)
+
 	ls, err := repo.ListRefs("refs/bugs")
 	require.NoError(t, err)
 	require.ElementsMatch(t, []string{"refs/bugs/ref1"}, ls)
@@ -178,7 +193,7 @@ func RepoDataTest(t *testing.T, repo RepoData) {
 
 	// Graph
 
-	commit3, err := repo.StoreCommitWithParent(treeHash1, commit1)
+	commit3, err := repo.StoreCommit(treeHash1, commit1)
 	require.NoError(t, err)
 
 	ancestorHash, err := repo.FindCommonAncestor(commit2, commit3)
@@ -187,17 +202,73 @@ func RepoDataTest(t *testing.T, repo RepoData) {
 
 	err = repo.RemoveRef("refs/bugs/ref1")
 	require.NoError(t, err)
+
+	// RemoveRef is idempotent
+	err = repo.RemoveRef("refs/bugs/ref1")
+	require.NoError(t, err)
+}
+
+func RepoDataSignatureTest(t *testing.T, repo RepoData) {
+	data := randomData()
+
+	blobHash, err := repo.StoreData(data)
+	require.NoError(t, err)
+
+	treeHash, err := repo.StoreTree([]TreeEntry{
+		{
+			ObjectType: Blob,
+			Hash:       blobHash,
+			Name:       "blob",
+		},
+	})
+	require.NoError(t, err)
+
+	pgpEntity1, err := openpgp.NewEntity("", "", "", nil)
+	require.NoError(t, err)
+	keyring1 := openpgp.EntityList{pgpEntity1}
+
+	pgpEntity2, err := openpgp.NewEntity("", "", "", nil)
+	require.NoError(t, err)
+	keyring2 := openpgp.EntityList{pgpEntity2}
+
+	commitHash1, err := repo.StoreSignedCommit(treeHash, pgpEntity1)
+	require.NoError(t, err)
+
+	commit1, err := repo.ReadCommit(commitHash1)
+	require.NoError(t, err)
+
+	_, err = openpgp.CheckDetachedSignature(keyring1, commit1.SignedData, commit1.Signature)
+	require.NoError(t, err)
+
+	_, err = openpgp.CheckDetachedSignature(keyring2, commit1.SignedData, commit1.Signature)
+	require.Error(t, err)
+
+	commitHash2, err := repo.StoreSignedCommit(treeHash, pgpEntity1, commitHash1)
+	require.NoError(t, err)
+
+	commit2, err := repo.ReadCommit(commitHash2)
+	require.NoError(t, err)
+
+	_, err = openpgp.CheckDetachedSignature(keyring1, commit2.SignedData, commit2.Signature)
+	require.NoError(t, err)
+
+	_, err = openpgp.CheckDetachedSignature(keyring2, commit2.SignedData, commit2.Signature)
+	require.Error(t, err)
 }
 
 // helper to test a RepoClock
 func RepoClockTest(t *testing.T, repo RepoClock) {
+	allClocks, err := repo.AllClocks()
+	require.NoError(t, err)
+	require.Len(t, allClocks, 0)
+
 	clock, err := repo.GetOrCreateClock("foo")
 	require.NoError(t, err)
 	require.Equal(t, lamport.Time(1), clock.Time())
 
 	time, err := clock.Increment()
 	require.NoError(t, err)
-	require.Equal(t, lamport.Time(1), time)
+	require.Equal(t, lamport.Time(2), time)
 	require.Equal(t, lamport.Time(2), clock.Time())
 
 	clock2, err := repo.GetOrCreateClock("foo")
@@ -207,6 +278,13 @@ func RepoClockTest(t *testing.T, repo RepoClock) {
 	clock3, err := repo.GetOrCreateClock("bar")
 	require.NoError(t, err)
 	require.Equal(t, lamport.Time(1), clock3.Time())
+
+	allClocks, err = repo.AllClocks()
+	require.NoError(t, err)
+	require.Equal(t, map[string]lamport.Clock{
+		"foo": clock,
+		"bar": clock3,
+	}, allClocks)
 }
 
 func randomData() []byte {

repository/tree_entry.go 🔗

@@ -100,3 +100,13 @@ func readTreeEntries(s string) ([]TreeEntry, error) {
 
 	return casted, nil
 }
+
+// SearchTreeEntry search a TreeEntry by name from an array
+func SearchTreeEntry(entries []TreeEntry, name string) (TreeEntry, bool) {
+	for _, entry := range entries {
+		if entry.Name == name {
+			return entry, true
+		}
+	}
+	return TreeEntry{}, false
+}

tests/read_bugs_test.go 🔗

@@ -14,7 +14,7 @@ func TestReadBugs(t *testing.T) {
 
 	random_bugs.FillRepoWithSeed(repo, 15, 42)
 
-	bugs := bug.ReadAllLocal(repo)
+	bugs := bug.ReadAll(repo)
 	for b := range bugs {
 		if b.Err != nil {
 			t.Fatal(b.Err)
@@ -30,7 +30,7 @@ func benchmarkReadBugs(bugNumber int, t *testing.B) {
 	t.ResetTimer()
 
 	for n := 0; n < t.N; n++ {
-		bugs := bug.ReadAllLocal(repo)
+		bugs := bug.ReadAll(repo)
 		for b := range bugs {
 			if b.Err != nil {
 				t.Fatal(b.Err)

util/lamport/clock_testing.go 🔗

@@ -11,14 +11,14 @@ func testClock(t *testing.T, c Clock) {
 
 	val, err := c.Increment()
 	assert.NoError(t, err)
-	assert.Equal(t, Time(1), val)
+	assert.Equal(t, Time(2), val)
 	assert.Equal(t, Time(2), c.Time())
 
-	err = c.Witness(41)
+	err = c.Witness(42)
 	assert.NoError(t, err)
 	assert.Equal(t, Time(42), c.Time())
 
-	err = c.Witness(41)
+	err = c.Witness(42)
 	assert.NoError(t, err)
 	assert.Equal(t, Time(42), c.Time())
 

util/lamport/mem_clock.go 🔗

@@ -25,6 +25,14 @@
 
 */
 
+// Note: this code originally originate from Hashicorp's Serf but has been changed since to fit git-bug's need.
+
+// Note: this Lamport clock implementation is different than the algorithms you can find, notably Wikipedia or the
+//       original Serf implementation. The reason is lie to what constitute an event in this distributed system.
+//       Commonly, events happen when messages are sent or received, whereas in git-bug events happen when some data is
+//       written, but *not* when read. This is why Witness set the time to the max seen value instead of max seen value +1.
+//       See https://cs.stackexchange.com/a/133730/129795
+
 package lamport
 
 import (
@@ -62,7 +70,7 @@ func (mc *MemClock) Time() Time {
 
 // Increment is used to return the value of the lamport clock and increment it afterwards
 func (mc *MemClock) Increment() (Time, error) {
-	return Time(atomic.AddUint64(&mc.counter, 1) - 1), nil
+	return Time(atomic.AddUint64(&mc.counter, 1)), nil
 }
 
 // Witness is called to update our local clock if necessary after
@@ -72,12 +80,12 @@ WITNESS:
 	// If the other value is old, we do not need to do anything
 	cur := atomic.LoadUint64(&mc.counter)
 	other := uint64(v)
-	if other < cur {
+	if other <= cur {
 		return nil
 	}
 
 	// Ensure that our local clock is at least one ahead.
-	if !atomic.CompareAndSwapUint64(&mc.counter, cur, other+1) {
+	if !atomic.CompareAndSwapUint64(&mc.counter, cur, other) {
 		// CAS: CompareAndSwap
 		// The CAS failed, so we just retry. Eventually our CAS should
 		// succeed or a future witness will pass us by and our witness