1// Package bug contains the bug data model and low-level related functions
2package bug
3
4import (
5 "encoding/json"
6 "fmt"
7 "strings"
8
9 "github.com/pkg/errors"
10
11 "github.com/MichaelMure/git-bug/entity"
12 "github.com/MichaelMure/git-bug/identity"
13 "github.com/MichaelMure/git-bug/repository"
14 "github.com/MichaelMure/git-bug/util/lamport"
15)
16
17const bugsRefPattern = "refs/bugs/"
18const bugsRemoteRefPattern = "refs/remotes/%s/bugs/"
19
20const opsEntryName = "ops"
21const rootEntryName = "root"
22const mediaEntryName = "media"
23
24const createClockEntryPrefix = "create-clock-"
25const createClockEntryPattern = "create-clock-%d"
26const editClockEntryPrefix = "edit-clock-"
27const editClockEntryPattern = "edit-clock-%d"
28
29const creationClockName = "bug-create"
30const editClockName = "bug-edit"
31
32var ErrBugNotExist = errors.New("bug doesn't exist")
33
34func NewErrMultipleMatchBug(matching []entity.Id) *entity.ErrMultipleMatch {
35 return entity.NewErrMultipleMatch("bug", matching)
36}
37
38func NewErrMultipleMatchOp(matching []entity.Id) *entity.ErrMultipleMatch {
39 return entity.NewErrMultipleMatch("operation", matching)
40}
41
42var _ Interface = &Bug{}
43var _ entity.Interface = &Bug{}
44
45// Bug hold the data of a bug thread, organized in a way close to
46// how it will be persisted inside Git. This is the data structure
47// used to merge two different version of the same Bug.
48type Bug struct {
49
50 // A Lamport clock is a logical clock that allow to order event
51 // inside a distributed system.
52 // It must be the first field in this struct due to https://github.com/golang/go/issues/599
53 createTime lamport.Time
54 editTime lamport.Time
55
56 // Id used as unique identifier
57 id entity.Id
58
59 lastCommit repository.Hash
60 rootPack repository.Hash
61
62 // all the committed operations
63 packs []OperationPack
64
65 // a temporary pack of operations used for convenience to pile up new operations
66 // before a commit
67 staging OperationPack
68}
69
70// NewBug create a new Bug
71func NewBug() *Bug {
72 // No id yet
73 // No logical clock yet
74 return &Bug{}
75}
76
77// ReadLocal will read a local bug from its hash
78func ReadLocal(repo repository.ClockedRepo, id entity.Id) (*Bug, error) {
79 ref := bugsRefPattern + id.String()
80 return read(repo, identity.NewSimpleResolver(repo), ref)
81}
82
83// ReadLocalWithResolver will read a local bug from its hash
84func ReadLocalWithResolver(repo repository.ClockedRepo, identityResolver identity.Resolver, id entity.Id) (*Bug, error) {
85 ref := bugsRefPattern + id.String()
86 return read(repo, identityResolver, ref)
87}
88
89// ReadRemote will read a remote bug from its hash
90func ReadRemote(repo repository.ClockedRepo, remote string, id entity.Id) (*Bug, error) {
91 ref := fmt.Sprintf(bugsRemoteRefPattern, remote) + id.String()
92 return read(repo, identity.NewSimpleResolver(repo), ref)
93}
94
95// ReadRemoteWithResolver will read a remote bug from its hash
96func ReadRemoteWithResolver(repo repository.ClockedRepo, identityResolver identity.Resolver, remote string, id entity.Id) (*Bug, error) {
97 ref := fmt.Sprintf(bugsRemoteRefPattern, remote) + id.String()
98 return read(repo, identityResolver, ref)
99}
100
101// read will read and parse a Bug from git
102func read(repo repository.ClockedRepo, identityResolver identity.Resolver, ref string) (*Bug, error) {
103 refSplit := strings.Split(ref, "/")
104 id := entity.Id(refSplit[len(refSplit)-1])
105
106 if err := id.Validate(); err != nil {
107 return nil, errors.Wrap(err, "invalid ref ")
108 }
109
110 hashes, err := repo.ListCommits(ref)
111
112 // TODO: this is not perfect, it might be a command invoke error
113 if err != nil {
114 return nil, ErrBugNotExist
115 }
116
117 bug := Bug{
118 id: id,
119 editTime: 0,
120 }
121
122 // Load each OperationPack
123 for _, hash := range hashes {
124 entries, err := repo.ReadTree(hash)
125 if err != nil {
126 return nil, errors.Wrap(err, "can't list git tree entries")
127 }
128
129 bug.lastCommit = hash
130
131 var opsEntry repository.TreeEntry
132 opsFound := false
133 var rootEntry repository.TreeEntry
134 rootFound := false
135 var createTime uint64
136 var editTime uint64
137
138 for _, entry := range entries {
139 if entry.Name == opsEntryName {
140 opsEntry = entry
141 opsFound = true
142 continue
143 }
144 if entry.Name == rootEntryName {
145 rootEntry = entry
146 rootFound = true
147 }
148 if strings.HasPrefix(entry.Name, createClockEntryPrefix) {
149 n, err := fmt.Sscanf(entry.Name, createClockEntryPattern, &createTime)
150 if err != nil {
151 return nil, errors.Wrap(err, "can't read create lamport time")
152 }
153 if n != 1 {
154 return nil, fmt.Errorf("could not parse create time lamport value")
155 }
156 }
157 if strings.HasPrefix(entry.Name, editClockEntryPrefix) {
158 n, err := fmt.Sscanf(entry.Name, editClockEntryPattern, &editTime)
159 if err != nil {
160 return nil, errors.Wrap(err, "can't read edit lamport time")
161 }
162 if n != 1 {
163 return nil, fmt.Errorf("could not parse edit time lamport value")
164 }
165 }
166 }
167
168 if !opsFound {
169 return nil, errors.New("invalid tree, missing the ops entry")
170 }
171 if !rootFound {
172 return nil, errors.New("invalid tree, missing the root entry")
173 }
174
175 if bug.rootPack == "" {
176 bug.rootPack = rootEntry.Hash
177 bug.createTime = lamport.Time(createTime)
178 }
179
180 // Due to rebase, edit Lamport time are not necessarily ordered
181 if editTime > uint64(bug.editTime) {
182 bug.editTime = lamport.Time(editTime)
183 }
184
185 // Update the clocks
186 createClock, err := repo.GetOrCreateClock(creationClockName)
187 if err != nil {
188 return nil, err
189 }
190 if err := createClock.Witness(bug.createTime); err != nil {
191 return nil, errors.Wrap(err, "failed to update create lamport clock")
192 }
193 editClock, err := repo.GetOrCreateClock(editClockName)
194 if err != nil {
195 return nil, err
196 }
197 if err := editClock.Witness(bug.editTime); err != nil {
198 return nil, errors.Wrap(err, "failed to update edit lamport clock")
199 }
200
201 data, err := repo.ReadData(opsEntry.Hash)
202 if err != nil {
203 return nil, errors.Wrap(err, "failed to read git blob data")
204 }
205
206 opp := &OperationPack{}
207 err = json.Unmarshal(data, &opp)
208
209 if err != nil {
210 return nil, errors.Wrap(err, "failed to decode OperationPack json")
211 }
212
213 // tag the pack with the commit hash
214 opp.commitHash = hash
215
216 bug.packs = append(bug.packs, *opp)
217 }
218
219 // Make sure that the identities are properly loaded
220 err = bug.EnsureIdentities(identityResolver)
221 if err != nil {
222 return nil, err
223 }
224
225 return &bug, nil
226}
227
228// RemoveBug will remove a local bug from its entity.Id
229func RemoveBug(repo repository.ClockedRepo, id entity.Id) error {
230 var fullMatches []string
231
232 refs, err := repo.ListRefs(bugsRefPattern + id.String())
233 if err != nil {
234 return err
235 }
236 if len(refs) > 1 {
237 return NewErrMultipleMatchBug(refsToIds(refs))
238 }
239 if len(refs) == 1 {
240 // we have the bug locally
241 fullMatches = append(fullMatches, refs[0])
242 }
243
244 remotes, err := repo.GetRemotes()
245 if err != nil {
246 return err
247 }
248
249 for remote := range remotes {
250 remotePrefix := fmt.Sprintf(bugsRemoteRefPattern+id.String(), remote)
251 remoteRefs, err := repo.ListRefs(remotePrefix)
252 if err != nil {
253 return err
254 }
255 if len(remoteRefs) > 1 {
256 return NewErrMultipleMatchBug(refsToIds(refs))
257 }
258 if len(remoteRefs) == 1 {
259 // found the bug in a remote
260 fullMatches = append(fullMatches, remoteRefs[0])
261 }
262 }
263
264 if len(fullMatches) == 0 {
265 return ErrBugNotExist
266 }
267
268 for _, ref := range fullMatches {
269 err = repo.RemoveRef(ref)
270 if err != nil {
271 return err
272 }
273 }
274
275 return nil
276}
277
278type StreamedBug struct {
279 Bug *Bug
280 Err error
281}
282
283// ReadAllLocal read and parse all local bugs
284func ReadAllLocal(repo repository.ClockedRepo) <-chan StreamedBug {
285 return readAll(repo, identity.NewSimpleResolver(repo), bugsRefPattern)
286}
287
288// ReadAllLocalWithResolver read and parse all local bugs
289func ReadAllLocalWithResolver(repo repository.ClockedRepo, identityResolver identity.Resolver) <-chan StreamedBug {
290 return readAll(repo, identityResolver, bugsRefPattern)
291}
292
293// ReadAllRemote read and parse all remote bugs for a given remote
294func ReadAllRemote(repo repository.ClockedRepo, remote string) <-chan StreamedBug {
295 refPrefix := fmt.Sprintf(bugsRemoteRefPattern, remote)
296 return readAll(repo, identity.NewSimpleResolver(repo), refPrefix)
297}
298
299// ReadAllRemoteWithResolver read and parse all remote bugs for a given remote
300func ReadAllRemoteWithResolver(repo repository.ClockedRepo, identityResolver identity.Resolver, remote string) <-chan StreamedBug {
301 refPrefix := fmt.Sprintf(bugsRemoteRefPattern, remote)
302 return readAll(repo, identityResolver, refPrefix)
303}
304
305// Read and parse all available bug with a given ref prefix
306func readAll(repo repository.ClockedRepo, identityResolver identity.Resolver, refPrefix string) <-chan StreamedBug {
307 out := make(chan StreamedBug)
308
309 go func() {
310 defer close(out)
311
312 refs, err := repo.ListRefs(refPrefix)
313 if err != nil {
314 out <- StreamedBug{Err: err}
315 return
316 }
317
318 for _, ref := range refs {
319 b, err := read(repo, identityResolver, ref)
320
321 if err != nil {
322 out <- StreamedBug{Err: err}
323 return
324 }
325
326 out <- StreamedBug{Bug: b}
327 }
328 }()
329
330 return out
331}
332
333// ListLocalIds list all the available local bug ids
334func ListLocalIds(repo repository.Repo) ([]entity.Id, error) {
335 refs, err := repo.ListRefs(bugsRefPattern)
336 if err != nil {
337 return nil, err
338 }
339
340 return refsToIds(refs), nil
341}
342
343func refsToIds(refs []string) []entity.Id {
344 ids := make([]entity.Id, len(refs))
345
346 for i, ref := range refs {
347 ids[i] = refToId(ref)
348 }
349
350 return ids
351}
352
353func refToId(ref string) entity.Id {
354 split := strings.Split(ref, "/")
355 return entity.Id(split[len(split)-1])
356}
357
358// Validate check if the Bug data is valid
359func (bug *Bug) Validate() error {
360 // non-empty
361 if len(bug.packs) == 0 && bug.staging.IsEmpty() {
362 return fmt.Errorf("bug has no operations")
363 }
364
365 // check if each pack and operations are valid
366 for _, pack := range bug.packs {
367 if err := pack.Validate(); err != nil {
368 return err
369 }
370 }
371
372 // check if staging is valid if needed
373 if !bug.staging.IsEmpty() {
374 if err := bug.staging.Validate(); err != nil {
375 return errors.Wrap(err, "staging")
376 }
377 }
378
379 // The very first Op should be a CreateOp
380 firstOp := bug.FirstOp()
381 if firstOp == nil || firstOp.base().OperationType != CreateOp {
382 return fmt.Errorf("first operation should be a Create op")
383 }
384
385 // The bug Id should be the hash of the first commit
386 if len(bug.packs) > 0 && string(bug.packs[0].commitHash) != bug.id.String() {
387 return fmt.Errorf("bug id should be the first commit hash")
388 }
389
390 // Check that there is no more CreateOp op
391 // Check that there is no colliding operation's ID
392 it := NewOperationIterator(bug)
393 createCount := 0
394 ids := make(map[entity.Id]struct{})
395 for it.Next() {
396 if it.Value().base().OperationType == CreateOp {
397 createCount++
398 }
399 if _, ok := ids[it.Value().Id()]; ok {
400 return fmt.Errorf("id collision: %s", it.Value().Id())
401 }
402 ids[it.Value().Id()] = struct{}{}
403 }
404
405 if createCount != 1 {
406 return fmt.Errorf("only one Create op allowed")
407 }
408
409 return nil
410}
411
412// Append an operation into the staging area, to be committed later
413func (bug *Bug) Append(op Operation) {
414 bug.staging.Append(op)
415}
416
417// Commit write the staging area in Git and move the operations to the packs
418func (bug *Bug) Commit(repo repository.ClockedRepo) error {
419
420 if !bug.NeedCommit() {
421 return fmt.Errorf("can't commit a bug with no pending operation")
422 }
423
424 if err := bug.Validate(); err != nil {
425 return errors.Wrap(err, "can't commit a bug with invalid data")
426 }
427
428 // Write the Ops as a Git blob containing the serialized array
429 hash, err := bug.staging.Write(repo)
430 if err != nil {
431 return err
432 }
433
434 if bug.rootPack == "" {
435 bug.rootPack = hash
436 }
437
438 // Make a Git tree referencing this blob
439 tree := []repository.TreeEntry{
440 // the last pack of ops
441 {ObjectType: repository.Blob, Hash: hash, Name: opsEntryName},
442 // always the first pack of ops (might be the same)
443 {ObjectType: repository.Blob, Hash: bug.rootPack, Name: rootEntryName},
444 }
445
446 // Reference, if any, all the files required by the ops
447 // Git will check that they actually exist in the storage and will make sure
448 // to push/pull them as needed.
449 mediaTree := makeMediaTree(bug.staging)
450 if len(mediaTree) > 0 {
451 mediaTreeHash, err := repo.StoreTree(mediaTree)
452 if err != nil {
453 return err
454 }
455 tree = append(tree, repository.TreeEntry{
456 ObjectType: repository.Tree,
457 Hash: mediaTreeHash,
458 Name: mediaEntryName,
459 })
460 }
461
462 // Store the logical clocks as well
463 // --> edit clock for each OperationPack/commits
464 // --> create clock only for the first OperationPack/commits
465 //
466 // To avoid having one blob for each clock value, clocks are serialized
467 // directly into the entry name
468 emptyBlobHash, err := repo.StoreData([]byte{})
469 if err != nil {
470 return err
471 }
472
473 editClock, err := repo.GetOrCreateClock(editClockName)
474 if err != nil {
475 return err
476 }
477 bug.editTime, err = editClock.Increment()
478 if err != nil {
479 return err
480 }
481
482 tree = append(tree, repository.TreeEntry{
483 ObjectType: repository.Blob,
484 Hash: emptyBlobHash,
485 Name: fmt.Sprintf(editClockEntryPattern, bug.editTime),
486 })
487 if bug.lastCommit == "" {
488 createClock, err := repo.GetOrCreateClock(creationClockName)
489 if err != nil {
490 return err
491 }
492 bug.createTime, err = createClock.Increment()
493 if err != nil {
494 return err
495 }
496
497 tree = append(tree, repository.TreeEntry{
498 ObjectType: repository.Blob,
499 Hash: emptyBlobHash,
500 Name: fmt.Sprintf(createClockEntryPattern, bug.createTime),
501 })
502 }
503
504 // Store the tree
505 hash, err = repo.StoreTree(tree)
506 if err != nil {
507 return err
508 }
509
510 // Write a Git commit referencing the tree, with the previous commit as parent
511 if bug.lastCommit != "" {
512 hash, err = repo.StoreCommitWithParent(hash, bug.lastCommit)
513 } else {
514 hash, err = repo.StoreCommit(hash)
515 }
516
517 if err != nil {
518 return err
519 }
520
521 bug.lastCommit = hash
522
523 // if it was the first commit, use the commit hash as bug id
524 if bug.id == "" {
525 bug.id = entity.Id(hash)
526 }
527
528 // Create or update the Git reference for this bug
529 // When pushing later, the remote will ensure that this ref update
530 // is fast-forward, that is no data has been overwritten
531 ref := fmt.Sprintf("%s%s", bugsRefPattern, bug.id)
532 err = repo.UpdateRef(ref, hash)
533
534 if err != nil {
535 return err
536 }
537
538 bug.staging.commitHash = hash
539 bug.packs = append(bug.packs, bug.staging)
540 bug.staging = OperationPack{}
541
542 return nil
543}
544
545func (bug *Bug) CommitAsNeeded(repo repository.ClockedRepo) error {
546 if !bug.NeedCommit() {
547 return nil
548 }
549 return bug.Commit(repo)
550}
551
552func (bug *Bug) NeedCommit() bool {
553 return !bug.staging.IsEmpty()
554}
555
556func makeMediaTree(pack OperationPack) []repository.TreeEntry {
557 var tree []repository.TreeEntry
558 counter := 0
559 added := make(map[repository.Hash]interface{})
560
561 for _, ops := range pack.Operations {
562 for _, file := range ops.GetFiles() {
563 if _, has := added[file]; !has {
564 tree = append(tree, repository.TreeEntry{
565 ObjectType: repository.Blob,
566 Hash: file,
567 // The name is not important here, we only need to
568 // reference the blob.
569 Name: fmt.Sprintf("file%d", counter),
570 })
571 counter++
572 added[file] = struct{}{}
573 }
574 }
575 }
576
577 return tree
578}
579
580// Merge a different version of the same bug by rebasing operations of this bug
581// that are not present in the other on top of the chain of operations of the
582// other version.
583func (bug *Bug) Merge(repo repository.Repo, other Interface) (bool, error) {
584 var otherBug = bugFromInterface(other)
585
586 // Note: a faster merge should be possible without actually reading and parsing
587 // all operations pack of our side.
588 // Reading the other side is still necessary to validate remote data, at least
589 // for new operations
590
591 if bug.id != otherBug.id {
592 return false, errors.New("merging unrelated bugs is not supported")
593 }
594
595 if len(otherBug.staging.Operations) > 0 {
596 return false, errors.New("merging a bug with a non-empty staging is not supported")
597 }
598
599 if bug.lastCommit == "" || otherBug.lastCommit == "" {
600 return false, errors.New("can't merge a bug that has never been stored")
601 }
602
603 ancestor, err := repo.FindCommonAncestor(bug.lastCommit, otherBug.lastCommit)
604 if err != nil {
605 return false, errors.Wrap(err, "can't find common ancestor")
606 }
607
608 ancestorIndex := 0
609 newPacks := make([]OperationPack, 0, len(bug.packs))
610
611 // Find the root of the rebase
612 for i, pack := range bug.packs {
613 newPacks = append(newPacks, pack)
614
615 if pack.commitHash == ancestor {
616 ancestorIndex = i
617 break
618 }
619 }
620
621 if len(otherBug.packs) == ancestorIndex+1 {
622 // Nothing to rebase, return early
623 return false, nil
624 }
625
626 // get other bug's extra packs
627 for i := ancestorIndex + 1; i < len(otherBug.packs); i++ {
628 // clone is probably not necessary
629 newPack := otherBug.packs[i].Clone()
630
631 newPacks = append(newPacks, newPack)
632 bug.lastCommit = newPack.commitHash
633 }
634
635 // rebase our extra packs
636 for i := ancestorIndex + 1; i < len(bug.packs); i++ {
637 pack := bug.packs[i]
638
639 // get the referenced git tree
640 treeHash, err := repo.GetTreeHash(pack.commitHash)
641
642 if err != nil {
643 return false, err
644 }
645
646 // create a new commit with the correct ancestor
647 hash, err := repo.StoreCommitWithParent(treeHash, bug.lastCommit)
648
649 if err != nil {
650 return false, err
651 }
652
653 // replace the pack
654 newPack := pack.Clone()
655 newPack.commitHash = hash
656 newPacks = append(newPacks, newPack)
657
658 // update the bug
659 bug.lastCommit = hash
660 }
661
662 bug.packs = newPacks
663
664 // Update the git ref
665 err = repo.UpdateRef(bugsRefPattern+bug.id.String(), bug.lastCommit)
666 if err != nil {
667 return false, err
668 }
669
670 return true, nil
671}
672
673// Id return the Bug identifier
674func (bug *Bug) Id() entity.Id {
675 if bug.id == "" {
676 // simply panic as it would be a coding error
677 // (using an id of a bug not stored yet)
678 panic("no id yet")
679 }
680 return bug.id
681}
682
683// CreateLamportTime return the Lamport time of creation
684func (bug *Bug) CreateLamportTime() lamport.Time {
685 return bug.createTime
686}
687
688// EditLamportTime return the Lamport time of the last edit
689func (bug *Bug) EditLamportTime() lamport.Time {
690 return bug.editTime
691}
692
693// Lookup for the very first operation of the bug.
694// For a valid Bug, this operation should be a CreateOp
695func (bug *Bug) FirstOp() Operation {
696 for _, pack := range bug.packs {
697 for _, op := range pack.Operations {
698 return op
699 }
700 }
701
702 if !bug.staging.IsEmpty() {
703 return bug.staging.Operations[0]
704 }
705
706 return nil
707}
708
709// Lookup for the very last operation of the bug.
710// For a valid Bug, should never be nil
711func (bug *Bug) LastOp() Operation {
712 if !bug.staging.IsEmpty() {
713 return bug.staging.Operations[len(bug.staging.Operations)-1]
714 }
715
716 if len(bug.packs) == 0 {
717 return nil
718 }
719
720 lastPack := bug.packs[len(bug.packs)-1]
721
722 if len(lastPack.Operations) == 0 {
723 return nil
724 }
725
726 return lastPack.Operations[len(lastPack.Operations)-1]
727}
728
729// Compile a bug in a easily usable snapshot
730func (bug *Bug) Compile() Snapshot {
731 snap := Snapshot{
732 id: bug.id,
733 Status: OpenStatus,
734 }
735
736 it := NewOperationIterator(bug)
737
738 for it.Next() {
739 op := it.Value()
740 op.Apply(&snap)
741 snap.Operations = append(snap.Operations, op)
742 }
743
744 return snap
745}