1// Package bug contains the bug data model and low-level related functions
2package bug
3
4import (
5 "encoding/json"
6 "fmt"
7 "strings"
8
9 "github.com/pkg/errors"
10
11 "github.com/MichaelMure/git-bug/entity"
12 "github.com/MichaelMure/git-bug/identity"
13 "github.com/MichaelMure/git-bug/repository"
14 "github.com/MichaelMure/git-bug/util/lamport"
15)
16
17const bugsRefPattern = "refs/bugs/"
18const bugsRemoteRefPattern = "refs/remotes/%s/bugs/"
19
20const opsEntryName = "ops"
21const rootEntryName = "root"
22const mediaEntryName = "media"
23
24const createClockEntryPrefix = "create-clock-"
25const createClockEntryPattern = "create-clock-%d"
26const editClockEntryPrefix = "edit-clock-"
27const editClockEntryPattern = "edit-clock-%d"
28
29const creationClockName = "bug-create"
30const editClockName = "bug-edit"
31
32var ErrBugNotExist = errors.New("bug doesn't exist")
33
34func NewErrMultipleMatchBug(matching []entity.Id) *entity.ErrMultipleMatch {
35 return entity.NewErrMultipleMatch("bug", matching)
36}
37
38func NewErrMultipleMatchOp(matching []entity.Id) *entity.ErrMultipleMatch {
39 return entity.NewErrMultipleMatch("operation", matching)
40}
41
42var _ Interface = &Bug{}
43var _ entity.Interface = &Bug{}
44
45// Bug hold the data of a bug thread, organized in a way close to
46// how it will be persisted inside Git. This is the data structure
47// used to merge two different version of the same Bug.
48type Bug struct {
49
50 // A Lamport clock is a logical clock that allow to order event
51 // inside a distributed system.
52 // It must be the first field in this struct due to https://github.com/golang/go/issues/599
53 createTime lamport.Time
54 editTime lamport.Time
55
56 // Id used as unique identifier
57 id entity.Id
58
59 lastCommit repository.Hash
60 rootPack repository.Hash
61
62 // all the committed operations
63 packs []OperationPack
64
65 // a temporary pack of operations used for convenience to pile up new operations
66 // before a commit
67 staging OperationPack
68}
69
70// NewBug create a new Bug
71func NewBug() *Bug {
72 // No id yet
73 // No logical clock yet
74 return &Bug{}
75}
76
77// FindLocalBug find an existing Bug matching a prefix
78func FindLocalBug(repo repository.ClockedRepo, prefix string) (*Bug, error) {
79 ids, err := ListLocalIds(repo)
80
81 if err != nil {
82 return nil, err
83 }
84
85 // preallocate but empty
86 matching := make([]entity.Id, 0, 5)
87
88 for _, id := range ids {
89 if id.HasPrefix(prefix) {
90 matching = append(matching, id)
91 }
92 }
93
94 if len(matching) == 0 {
95 return nil, errors.New("no matching bug found.")
96 }
97
98 if len(matching) > 1 {
99 return nil, NewErrMultipleMatchBug(matching)
100 }
101
102 return ReadLocalBug(repo, matching[0])
103}
104
105// ReadLocalBug will read a local bug from its hash
106func ReadLocalBug(repo repository.ClockedRepo, id entity.Id) (*Bug, error) {
107 ref := bugsRefPattern + id.String()
108 return readBug(repo, ref)
109}
110
111// ReadRemoteBug will read a remote bug from its hash
112func ReadRemoteBug(repo repository.ClockedRepo, remote string, id entity.Id) (*Bug, error) {
113 ref := fmt.Sprintf(bugsRemoteRefPattern, remote) + id.String()
114 return readBug(repo, ref)
115}
116
117// readBug will read and parse a Bug from git
118func readBug(repo repository.ClockedRepo, ref string) (*Bug, error) {
119 refSplit := strings.Split(ref, "/")
120 id := entity.Id(refSplit[len(refSplit)-1])
121
122 if err := id.Validate(); err != nil {
123 return nil, errors.Wrap(err, "invalid ref ")
124 }
125
126 hashes, err := repo.ListCommits(ref)
127
128 // TODO: this is not perfect, it might be a command invoke error
129 if err != nil {
130 return nil, ErrBugNotExist
131 }
132
133 bug := Bug{
134 id: id,
135 editTime: 0,
136 }
137
138 // Load each OperationPack
139 for _, hash := range hashes {
140 entries, err := repo.ReadTree(hash)
141 if err != nil {
142 return nil, errors.Wrap(err, "can't list git tree entries")
143 }
144
145 bug.lastCommit = hash
146
147 var opsEntry repository.TreeEntry
148 opsFound := false
149 var rootEntry repository.TreeEntry
150 rootFound := false
151 var createTime uint64
152 var editTime uint64
153
154 for _, entry := range entries {
155 if entry.Name == opsEntryName {
156 opsEntry = entry
157 opsFound = true
158 continue
159 }
160 if entry.Name == rootEntryName {
161 rootEntry = entry
162 rootFound = true
163 }
164 if strings.HasPrefix(entry.Name, createClockEntryPrefix) {
165 n, err := fmt.Sscanf(entry.Name, createClockEntryPattern, &createTime)
166 if err != nil {
167 return nil, errors.Wrap(err, "can't read create lamport time")
168 }
169 if n != 1 {
170 return nil, fmt.Errorf("could not parse create time lamport value")
171 }
172 }
173 if strings.HasPrefix(entry.Name, editClockEntryPrefix) {
174 n, err := fmt.Sscanf(entry.Name, editClockEntryPattern, &editTime)
175 if err != nil {
176 return nil, errors.Wrap(err, "can't read edit lamport time")
177 }
178 if n != 1 {
179 return nil, fmt.Errorf("could not parse edit time lamport value")
180 }
181 }
182 }
183
184 if !opsFound {
185 return nil, errors.New("invalid tree, missing the ops entry")
186 }
187 if !rootFound {
188 return nil, errors.New("invalid tree, missing the root entry")
189 }
190
191 if bug.rootPack == "" {
192 bug.rootPack = rootEntry.Hash
193 bug.createTime = lamport.Time(createTime)
194 }
195
196 // Due to rebase, edit Lamport time are not necessarily ordered
197 if editTime > uint64(bug.editTime) {
198 bug.editTime = lamport.Time(editTime)
199 }
200
201 // Update the clocks
202 createClock, err := repo.GetOrCreateClock(creationClockName)
203 if err != nil {
204 return nil, err
205 }
206 if err := createClock.Witness(bug.createTime); err != nil {
207 return nil, errors.Wrap(err, "failed to update create lamport clock")
208 }
209 editClock, err := repo.GetOrCreateClock(editClockName)
210 if err != nil {
211 return nil, err
212 }
213 if err := editClock.Witness(bug.editTime); err != nil {
214 return nil, errors.Wrap(err, "failed to update edit lamport clock")
215 }
216
217 data, err := repo.ReadData(opsEntry.Hash)
218 if err != nil {
219 return nil, errors.Wrap(err, "failed to read git blob data")
220 }
221
222 opp := &OperationPack{}
223 err = json.Unmarshal(data, &opp)
224
225 if err != nil {
226 return nil, errors.Wrap(err, "failed to decode OperationPack json")
227 }
228
229 // tag the pack with the commit hash
230 opp.commitHash = hash
231
232 bug.packs = append(bug.packs, *opp)
233 }
234
235 // Make sure that the identities are properly loaded
236 resolver := identity.NewSimpleResolver(repo)
237 err = bug.EnsureIdentities(resolver)
238 if err != nil {
239 return nil, err
240 }
241
242 return &bug, nil
243}
244
245// RemoveBug will remove a local bug from its entity.Id
246func RemoveBug(repo repository.ClockedRepo, id entity.Id) error {
247 var fullMatches []string
248
249 refs, err := repo.ListRefs(bugsRefPattern + id.String())
250 if err != nil {
251 return err
252 }
253 if len(refs) > 1 {
254 return NewErrMultipleMatchBug(refsToIds(refs))
255 }
256 if len(refs) == 1 {
257 // we have the bug locally
258 fullMatches = append(fullMatches, refs[0])
259 }
260
261 remotes, err := repo.GetRemotes()
262 if err != nil {
263 return err
264 }
265
266 for remote := range remotes {
267 remotePrefix := fmt.Sprintf(bugsRemoteRefPattern+id.String(), remote)
268 remoteRefs, err := repo.ListRefs(remotePrefix)
269 if err != nil {
270 return err
271 }
272 if len(remoteRefs) > 1 {
273 return NewErrMultipleMatchBug(refsToIds(refs))
274 }
275 if len(remoteRefs) == 1 {
276 // found the bug in a remote
277 fullMatches = append(fullMatches, remoteRefs[0])
278 }
279 }
280
281 if len(fullMatches) == 0 {
282 return ErrBugNotExist
283 }
284
285 for _, ref := range fullMatches {
286 err = repo.RemoveRef(ref)
287 if err != nil {
288 return err
289 }
290 }
291
292 return nil
293}
294
295type StreamedBug struct {
296 Bug *Bug
297 Err error
298}
299
300// ReadAllLocalBugs read and parse all local bugs
301func ReadAllLocalBugs(repo repository.ClockedRepo) <-chan StreamedBug {
302 return readAllBugs(repo, bugsRefPattern)
303}
304
305// ReadAllRemoteBugs read and parse all remote bugs for a given remote
306func ReadAllRemoteBugs(repo repository.ClockedRepo, remote string) <-chan StreamedBug {
307 refPrefix := fmt.Sprintf(bugsRemoteRefPattern, remote)
308 return readAllBugs(repo, refPrefix)
309}
310
311// Read and parse all available bug with a given ref prefix
312func readAllBugs(repo repository.ClockedRepo, refPrefix string) <-chan StreamedBug {
313 out := make(chan StreamedBug)
314
315 go func() {
316 defer close(out)
317
318 refs, err := repo.ListRefs(refPrefix)
319 if err != nil {
320 out <- StreamedBug{Err: err}
321 return
322 }
323
324 for _, ref := range refs {
325 b, err := readBug(repo, ref)
326
327 if err != nil {
328 out <- StreamedBug{Err: err}
329 return
330 }
331
332 out <- StreamedBug{Bug: b}
333 }
334 }()
335
336 return out
337}
338
339// ListLocalIds list all the available local bug ids
340func ListLocalIds(repo repository.Repo) ([]entity.Id, error) {
341 refs, err := repo.ListRefs(bugsRefPattern)
342 if err != nil {
343 return nil, err
344 }
345
346 return refsToIds(refs), nil
347}
348
349func refsToIds(refs []string) []entity.Id {
350 ids := make([]entity.Id, len(refs))
351
352 for i, ref := range refs {
353 ids[i] = refToId(ref)
354 }
355
356 return ids
357}
358
359func refToId(ref string) entity.Id {
360 split := strings.Split(ref, "/")
361 return entity.Id(split[len(split)-1])
362}
363
364// Validate check if the Bug data is valid
365func (bug *Bug) Validate() error {
366 // non-empty
367 if len(bug.packs) == 0 && bug.staging.IsEmpty() {
368 return fmt.Errorf("bug has no operations")
369 }
370
371 // check if each pack and operations are valid
372 for _, pack := range bug.packs {
373 if err := pack.Validate(); err != nil {
374 return err
375 }
376 }
377
378 // check if staging is valid if needed
379 if !bug.staging.IsEmpty() {
380 if err := bug.staging.Validate(); err != nil {
381 return errors.Wrap(err, "staging")
382 }
383 }
384
385 // The very first Op should be a CreateOp
386 firstOp := bug.FirstOp()
387 if firstOp == nil || firstOp.base().OperationType != CreateOp {
388 return fmt.Errorf("first operation should be a Create op")
389 }
390
391 // The bug Id should be the hash of the first commit
392 if len(bug.packs) > 0 && string(bug.packs[0].commitHash) != bug.id.String() {
393 return fmt.Errorf("bug id should be the first commit hash")
394 }
395
396 // Check that there is no more CreateOp op
397 // Check that there is no colliding operation's ID
398 it := NewOperationIterator(bug)
399 createCount := 0
400 ids := make(map[entity.Id]struct{})
401 for it.Next() {
402 if it.Value().base().OperationType == CreateOp {
403 createCount++
404 }
405 if _, ok := ids[it.Value().Id()]; ok {
406 return fmt.Errorf("id collision: %s", it.Value().Id())
407 }
408 ids[it.Value().Id()] = struct{}{}
409 }
410
411 if createCount != 1 {
412 return fmt.Errorf("only one Create op allowed")
413 }
414
415 return nil
416}
417
418// Append an operation into the staging area, to be committed later
419func (bug *Bug) Append(op Operation) {
420 bug.staging.Append(op)
421}
422
423// Commit write the staging area in Git and move the operations to the packs
424func (bug *Bug) Commit(repo repository.ClockedRepo) error {
425
426 if !bug.NeedCommit() {
427 return fmt.Errorf("can't commit a bug with no pending operation")
428 }
429
430 if err := bug.Validate(); err != nil {
431 return errors.Wrap(err, "can't commit a bug with invalid data")
432 }
433
434 // Write the Ops as a Git blob containing the serialized array
435 hash, err := bug.staging.Write(repo)
436 if err != nil {
437 return err
438 }
439
440 if bug.rootPack == "" {
441 bug.rootPack = hash
442 }
443
444 // Make a Git tree referencing this blob
445 tree := []repository.TreeEntry{
446 // the last pack of ops
447 {ObjectType: repository.Blob, Hash: hash, Name: opsEntryName},
448 // always the first pack of ops (might be the same)
449 {ObjectType: repository.Blob, Hash: bug.rootPack, Name: rootEntryName},
450 }
451
452 // Reference, if any, all the files required by the ops
453 // Git will check that they actually exist in the storage and will make sure
454 // to push/pull them as needed.
455 mediaTree := makeMediaTree(bug.staging)
456 if len(mediaTree) > 0 {
457 mediaTreeHash, err := repo.StoreTree(mediaTree)
458 if err != nil {
459 return err
460 }
461 tree = append(tree, repository.TreeEntry{
462 ObjectType: repository.Tree,
463 Hash: mediaTreeHash,
464 Name: mediaEntryName,
465 })
466 }
467
468 // Store the logical clocks as well
469 // --> edit clock for each OperationPack/commits
470 // --> create clock only for the first OperationPack/commits
471 //
472 // To avoid having one blob for each clock value, clocks are serialized
473 // directly into the entry name
474 emptyBlobHash, err := repo.StoreData([]byte{})
475 if err != nil {
476 return err
477 }
478
479 editClock, err := repo.GetOrCreateClock(editClockName)
480 if err != nil {
481 return err
482 }
483 bug.editTime, err = editClock.Increment()
484 if err != nil {
485 return err
486 }
487
488 tree = append(tree, repository.TreeEntry{
489 ObjectType: repository.Blob,
490 Hash: emptyBlobHash,
491 Name: fmt.Sprintf(editClockEntryPattern, bug.editTime),
492 })
493 if bug.lastCommit == "" {
494 createClock, err := repo.GetOrCreateClock(creationClockName)
495 if err != nil {
496 return err
497 }
498 bug.createTime, err = createClock.Increment()
499 if err != nil {
500 return err
501 }
502
503 tree = append(tree, repository.TreeEntry{
504 ObjectType: repository.Blob,
505 Hash: emptyBlobHash,
506 Name: fmt.Sprintf(createClockEntryPattern, bug.createTime),
507 })
508 }
509
510 // Store the tree
511 hash, err = repo.StoreTree(tree)
512 if err != nil {
513 return err
514 }
515
516 // Write a Git commit referencing the tree, with the previous commit as parent
517 if bug.lastCommit != "" {
518 hash, err = repo.StoreCommitWithParent(hash, bug.lastCommit)
519 } else {
520 hash, err = repo.StoreCommit(hash)
521 }
522
523 if err != nil {
524 return err
525 }
526
527 bug.lastCommit = hash
528
529 // if it was the first commit, use the commit hash as bug id
530 if bug.id == "" {
531 bug.id = entity.Id(hash)
532 }
533
534 // Create or update the Git reference for this bug
535 // When pushing later, the remote will ensure that this ref update
536 // is fast-forward, that is no data has been overwritten
537 ref := fmt.Sprintf("%s%s", bugsRefPattern, bug.id)
538 err = repo.UpdateRef(ref, hash)
539
540 if err != nil {
541 return err
542 }
543
544 bug.staging.commitHash = hash
545 bug.packs = append(bug.packs, bug.staging)
546 bug.staging = OperationPack{}
547
548 return nil
549}
550
551func (bug *Bug) CommitAsNeeded(repo repository.ClockedRepo) error {
552 if !bug.NeedCommit() {
553 return nil
554 }
555 return bug.Commit(repo)
556}
557
558func (bug *Bug) NeedCommit() bool {
559 return !bug.staging.IsEmpty()
560}
561
562func makeMediaTree(pack OperationPack) []repository.TreeEntry {
563 var tree []repository.TreeEntry
564 counter := 0
565 added := make(map[repository.Hash]interface{})
566
567 for _, ops := range pack.Operations {
568 for _, file := range ops.GetFiles() {
569 if _, has := added[file]; !has {
570 tree = append(tree, repository.TreeEntry{
571 ObjectType: repository.Blob,
572 Hash: file,
573 // The name is not important here, we only need to
574 // reference the blob.
575 Name: fmt.Sprintf("file%d", counter),
576 })
577 counter++
578 added[file] = struct{}{}
579 }
580 }
581 }
582
583 return tree
584}
585
586// Merge a different version of the same bug by rebasing operations of this bug
587// that are not present in the other on top of the chain of operations of the
588// other version.
589func (bug *Bug) Merge(repo repository.Repo, other Interface) (bool, error) {
590 var otherBug = bugFromInterface(other)
591
592 // Note: a faster merge should be possible without actually reading and parsing
593 // all operations pack of our side.
594 // Reading the other side is still necessary to validate remote data, at least
595 // for new operations
596
597 if bug.id != otherBug.id {
598 return false, errors.New("merging unrelated bugs is not supported")
599 }
600
601 if len(otherBug.staging.Operations) > 0 {
602 return false, errors.New("merging a bug with a non-empty staging is not supported")
603 }
604
605 if bug.lastCommit == "" || otherBug.lastCommit == "" {
606 return false, errors.New("can't merge a bug that has never been stored")
607 }
608
609 ancestor, err := repo.FindCommonAncestor(bug.lastCommit, otherBug.lastCommit)
610 if err != nil {
611 return false, errors.Wrap(err, "can't find common ancestor")
612 }
613
614 ancestorIndex := 0
615 newPacks := make([]OperationPack, 0, len(bug.packs))
616
617 // Find the root of the rebase
618 for i, pack := range bug.packs {
619 newPacks = append(newPacks, pack)
620
621 if pack.commitHash == ancestor {
622 ancestorIndex = i
623 break
624 }
625 }
626
627 if len(otherBug.packs) == ancestorIndex+1 {
628 // Nothing to rebase, return early
629 return false, nil
630 }
631
632 // get other bug's extra packs
633 for i := ancestorIndex + 1; i < len(otherBug.packs); i++ {
634 // clone is probably not necessary
635 newPack := otherBug.packs[i].Clone()
636
637 newPacks = append(newPacks, newPack)
638 bug.lastCommit = newPack.commitHash
639 }
640
641 // rebase our extra packs
642 for i := ancestorIndex + 1; i < len(bug.packs); i++ {
643 pack := bug.packs[i]
644
645 // get the referenced git tree
646 treeHash, err := repo.GetTreeHash(pack.commitHash)
647
648 if err != nil {
649 return false, err
650 }
651
652 // create a new commit with the correct ancestor
653 hash, err := repo.StoreCommitWithParent(treeHash, bug.lastCommit)
654
655 if err != nil {
656 return false, err
657 }
658
659 // replace the pack
660 newPack := pack.Clone()
661 newPack.commitHash = hash
662 newPacks = append(newPacks, newPack)
663
664 // update the bug
665 bug.lastCommit = hash
666 }
667
668 bug.packs = newPacks
669
670 // Update the git ref
671 err = repo.UpdateRef(bugsRefPattern+bug.id.String(), bug.lastCommit)
672 if err != nil {
673 return false, err
674 }
675
676 return true, nil
677}
678
679// Id return the Bug identifier
680func (bug *Bug) Id() entity.Id {
681 if bug.id == "" {
682 // simply panic as it would be a coding error
683 // (using an id of a bug not stored yet)
684 panic("no id yet")
685 }
686 return bug.id
687}
688
689// CreateLamportTime return the Lamport time of creation
690func (bug *Bug) CreateLamportTime() lamport.Time {
691 return bug.createTime
692}
693
694// EditLamportTime return the Lamport time of the last edit
695func (bug *Bug) EditLamportTime() lamport.Time {
696 return bug.editTime
697}
698
699// Lookup for the very first operation of the bug.
700// For a valid Bug, this operation should be a CreateOp
701func (bug *Bug) FirstOp() Operation {
702 for _, pack := range bug.packs {
703 for _, op := range pack.Operations {
704 return op
705 }
706 }
707
708 if !bug.staging.IsEmpty() {
709 return bug.staging.Operations[0]
710 }
711
712 return nil
713}
714
715// Lookup for the very last operation of the bug.
716// For a valid Bug, should never be nil
717func (bug *Bug) LastOp() Operation {
718 if !bug.staging.IsEmpty() {
719 return bug.staging.Operations[len(bug.staging.Operations)-1]
720 }
721
722 if len(bug.packs) == 0 {
723 return nil
724 }
725
726 lastPack := bug.packs[len(bug.packs)-1]
727
728 if len(lastPack.Operations) == 0 {
729 return nil
730 }
731
732 return lastPack.Operations[len(lastPack.Operations)-1]
733}
734
735// Compile a bug in a easily usable snapshot
736func (bug *Bug) Compile() Snapshot {
737 snap := Snapshot{
738 id: bug.id,
739 Status: OpenStatus,
740 }
741
742 it := NewOperationIterator(bug)
743
744 for it.Next() {
745 op := it.Value()
746 op.Apply(&snap)
747 snap.Operations = append(snap.Operations, op)
748 }
749
750 return snap
751}