1// Package bug contains the bug data model and low-level related functions
2package bug
3
4import (
5 "encoding/json"
6 "fmt"
7 "strings"
8
9 "github.com/pkg/errors"
10
11 "github.com/MichaelMure/git-bug/entity"
12 "github.com/MichaelMure/git-bug/identity"
13 "github.com/MichaelMure/git-bug/repository"
14 "github.com/MichaelMure/git-bug/util/lamport"
15)
16
17const bugsRefPattern = "refs/bugs/"
18const bugsRemoteRefPattern = "refs/remotes/%s/bugs/"
19
20const opsEntryName = "ops"
21const rootEntryName = "root"
22const mediaEntryName = "media"
23
24const createClockEntryPrefix = "create-clock-"
25const createClockEntryPattern = "create-clock-%d"
26const editClockEntryPrefix = "edit-clock-"
27const editClockEntryPattern = "edit-clock-%d"
28
29const creationClockName = "bug-create"
30const editClockName = "bug-edit"
31
32var ErrBugNotExist = errors.New("bug doesn't exist")
33
34func NewErrMultipleMatchBug(matching []entity.Id) *entity.ErrMultipleMatch {
35 return entity.NewErrMultipleMatch("bug", matching)
36}
37
38func NewErrMultipleMatchOp(matching []entity.Id) *entity.ErrMultipleMatch {
39 return entity.NewErrMultipleMatch("operation", matching)
40}
41
42var _ Interface = &Bug{}
43var _ entity.Interface = &Bug{}
44
45// Bug hold the data of a bug thread, organized in a way close to
46// how it will be persisted inside Git. This is the data structure
47// used to merge two different version of the same Bug.
48type Bug struct {
49
50 // A Lamport clock is a logical clock that allow to order event
51 // inside a distributed system.
52 // It must be the first field in this struct due to https://github.com/golang/go/issues/599
53 createTime lamport.Time
54 editTime lamport.Time
55
56 // Id used as unique identifier
57 id entity.Id
58
59 lastCommit repository.Hash
60 rootPack repository.Hash
61
62 // all the committed operations
63 packs []OperationPack
64
65 // a temporary pack of operations used for convenience to pile up new operations
66 // before a commit
67 staging OperationPack
68}
69
70// NewBug create a new Bug
71func NewBug() *Bug {
72 // No id yet
73 // No logical clock yet
74 return &Bug{}
75}
76
77// FindLocalBug find an existing Bug matching a prefix
78func FindLocalBug(repo repository.ClockedRepo, prefix string) (*Bug, error) {
79 ids, err := ListLocalIds(repo)
80
81 if err != nil {
82 return nil, err
83 }
84
85 // preallocate but empty
86 matching := make([]entity.Id, 0, 5)
87
88 for _, id := range ids {
89 if id.HasPrefix(prefix) {
90 matching = append(matching, id)
91 }
92 }
93
94 if len(matching) == 0 {
95 return nil, errors.New("no matching bug found.")
96 }
97
98 if len(matching) > 1 {
99 return nil, NewErrMultipleMatchBug(matching)
100 }
101
102 return ReadLocalBug(repo, matching[0])
103}
104
105// ReadLocalBug will read a local bug from its hash
106func ReadLocalBug(repo repository.ClockedRepo, id entity.Id) (*Bug, error) {
107 ref := bugsRefPattern + id.String()
108 return readBug(repo, ref)
109}
110
111// ReadRemoteBug will read a remote bug from its hash
112func ReadRemoteBug(repo repository.ClockedRepo, remote string, id entity.Id) (*Bug, error) {
113 ref := fmt.Sprintf(bugsRemoteRefPattern, remote) + id.String()
114 return readBug(repo, ref)
115}
116
117// readBug will read and parse a Bug from git
118func readBug(repo repository.ClockedRepo, ref string) (*Bug, error) {
119 refSplit := strings.Split(ref, "/")
120 id := entity.Id(refSplit[len(refSplit)-1])
121
122 if err := id.Validate(); err != nil {
123 return nil, errors.Wrap(err, "invalid ref ")
124 }
125
126 hashes, err := repo.ListCommits(ref)
127
128 // TODO: this is not perfect, it might be a command invoke error
129 if err != nil {
130 return nil, ErrBugNotExist
131 }
132
133 bug := Bug{
134 id: id,
135 editTime: 0,
136 }
137
138 // Load each OperationPack
139 for _, hash := range hashes {
140 entries, err := repo.ReadTree(hash)
141 if err != nil {
142 return nil, errors.Wrap(err, "can't list git tree entries")
143 }
144
145 bug.lastCommit = hash
146
147 var opsEntry repository.TreeEntry
148 opsFound := false
149 var rootEntry repository.TreeEntry
150 rootFound := false
151 var createTime uint64
152 var editTime uint64
153
154 for _, entry := range entries {
155 if entry.Name == opsEntryName {
156 opsEntry = entry
157 opsFound = true
158 continue
159 }
160 if entry.Name == rootEntryName {
161 rootEntry = entry
162 rootFound = true
163 }
164 if strings.HasPrefix(entry.Name, createClockEntryPrefix) {
165 n, err := fmt.Sscanf(entry.Name, createClockEntryPattern, &createTime)
166 if err != nil {
167 return nil, errors.Wrap(err, "can't read create lamport time")
168 }
169 if n != 1 {
170 return nil, fmt.Errorf("could not parse create time lamport value")
171 }
172 }
173 if strings.HasPrefix(entry.Name, editClockEntryPrefix) {
174 n, err := fmt.Sscanf(entry.Name, editClockEntryPattern, &editTime)
175 if err != nil {
176 return nil, errors.Wrap(err, "can't read edit lamport time")
177 }
178 if n != 1 {
179 return nil, fmt.Errorf("could not parse edit time lamport value")
180 }
181 }
182 }
183
184 if !opsFound {
185 return nil, errors.New("invalid tree, missing the ops entry")
186 }
187 if !rootFound {
188 return nil, errors.New("invalid tree, missing the root entry")
189 }
190
191 if bug.rootPack == "" {
192 bug.rootPack = rootEntry.Hash
193 bug.createTime = lamport.Time(createTime)
194 }
195
196 // Due to rebase, edit Lamport time are not necessarily ordered
197 if editTime > uint64(bug.editTime) {
198 bug.editTime = lamport.Time(editTime)
199 }
200
201 // Update the clocks
202 createClock, err := repo.GetOrCreateClock(creationClockName)
203 if err != nil {
204 return nil, err
205 }
206 if err := createClock.Witness(bug.createTime); err != nil {
207 return nil, errors.Wrap(err, "failed to update create lamport clock")
208 }
209 editClock, err := repo.GetOrCreateClock(editClockName)
210 if err != nil {
211 return nil, err
212 }
213 if err := editClock.Witness(bug.editTime); err != nil {
214 return nil, errors.Wrap(err, "failed to update edit lamport clock")
215 }
216
217 data, err := repo.ReadData(opsEntry.Hash)
218 if err != nil {
219 return nil, errors.Wrap(err, "failed to read git blob data")
220 }
221
222 opp := &OperationPack{}
223 err = json.Unmarshal(data, &opp)
224
225 if err != nil {
226 return nil, errors.Wrap(err, "failed to decode OperationPack json")
227 }
228
229 // tag the pack with the commit hash
230 opp.commitHash = hash
231
232 bug.packs = append(bug.packs, *opp)
233 }
234
235 // Make sure that the identities are properly loaded
236 resolver := identity.NewSimpleResolver(repo)
237 err = bug.EnsureIdentities(resolver)
238 if err != nil {
239 return nil, err
240 }
241
242 return &bug, nil
243}
244
245// RemoveBug will remove a local bug from its entity.Id
246func RemoveBug(repo repository.ClockedRepo, id entity.Id) error {
247 matching := entity.Id("")
248 fullMatches := []string{}
249
250 refs, err := repo.ListRefs(bugsRefPattern + id.String())
251 if err != nil {
252 return err
253 } else if num := len(refs); num > 1 {
254 return NewErrMultipleMatchBug(refsToIds(refs))
255 } else if num == 1 {
256 matching = refToId(refs[0])
257 fullMatches = []string{refs[0]}
258 }
259
260 remotes, err := repo.GetRemotes()
261 if err != nil {
262 return err
263 }
264
265 for remote := range remotes {
266 remotePrefix := fmt.Sprintf(bugsRemoteRefPattern+id.String(), remote)
267 remoteRefs, err := repo.ListRefs(remotePrefix)
268 if err != nil {
269 return err
270 } else if num := len(remoteRefs); num > 1 {
271 return NewErrMultipleMatchBug(refsToIds(refs))
272 } else if num == 1 {
273 id := refToId(remoteRefs[0])
274 fullMatches = append(fullMatches, remoteRefs[0])
275 if matching.String() == "" {
276 matching = id
277 } else if id != matching {
278 return NewErrMultipleMatchBug([]entity.Id{matching, id})
279 }
280 }
281 }
282
283 if matching == "" {
284 return ErrBugNotExist
285 }
286
287 for _, ref := range fullMatches {
288 err = repo.RemoveRef(ref)
289 if err != nil {
290 return err
291 }
292 }
293
294 return nil
295}
296
297type StreamedBug struct {
298 Bug *Bug
299 Err error
300}
301
302// ReadAllLocalBugs read and parse all local bugs
303func ReadAllLocalBugs(repo repository.ClockedRepo) <-chan StreamedBug {
304 return readAllBugs(repo, bugsRefPattern)
305}
306
307// ReadAllRemoteBugs read and parse all remote bugs for a given remote
308func ReadAllRemoteBugs(repo repository.ClockedRepo, remote string) <-chan StreamedBug {
309 refPrefix := fmt.Sprintf(bugsRemoteRefPattern, remote)
310 return readAllBugs(repo, refPrefix)
311}
312
313// Read and parse all available bug with a given ref prefix
314func readAllBugs(repo repository.ClockedRepo, refPrefix string) <-chan StreamedBug {
315 out := make(chan StreamedBug)
316
317 go func() {
318 defer close(out)
319
320 refs, err := repo.ListRefs(refPrefix)
321 if err != nil {
322 out <- StreamedBug{Err: err}
323 return
324 }
325
326 for _, ref := range refs {
327 b, err := readBug(repo, ref)
328
329 if err != nil {
330 out <- StreamedBug{Err: err}
331 return
332 }
333
334 out <- StreamedBug{Bug: b}
335 }
336 }()
337
338 return out
339}
340
341// ListLocalIds list all the available local bug ids
342func ListLocalIds(repo repository.Repo) ([]entity.Id, error) {
343 refs, err := repo.ListRefs(bugsRefPattern)
344 if err != nil {
345 return nil, err
346 }
347
348 return refsToIds(refs), nil
349}
350
351func refsToIds(refs []string) []entity.Id {
352 ids := make([]entity.Id, len(refs))
353
354 for i, ref := range refs {
355 ids[i] = refToId(ref)
356 }
357
358 return ids
359}
360
361func refToId(ref string) entity.Id {
362 split := strings.Split(ref, "/")
363 return entity.Id(split[len(split)-1])
364}
365
366// Validate check if the Bug data is valid
367func (bug *Bug) Validate() error {
368 // non-empty
369 if len(bug.packs) == 0 && bug.staging.IsEmpty() {
370 return fmt.Errorf("bug has no operations")
371 }
372
373 // check if each pack and operations are valid
374 for _, pack := range bug.packs {
375 if err := pack.Validate(); err != nil {
376 return err
377 }
378 }
379
380 // check if staging is valid if needed
381 if !bug.staging.IsEmpty() {
382 if err := bug.staging.Validate(); err != nil {
383 return errors.Wrap(err, "staging")
384 }
385 }
386
387 // The very first Op should be a CreateOp
388 firstOp := bug.FirstOp()
389 if firstOp == nil || firstOp.base().OperationType != CreateOp {
390 return fmt.Errorf("first operation should be a Create op")
391 }
392
393 // The bug Id should be the hash of the first commit
394 if len(bug.packs) > 0 && string(bug.packs[0].commitHash) != bug.id.String() {
395 return fmt.Errorf("bug id should be the first commit hash")
396 }
397
398 // Check that there is no more CreateOp op
399 // Check that there is no colliding operation's ID
400 it := NewOperationIterator(bug)
401 createCount := 0
402 ids := make(map[entity.Id]struct{})
403 for it.Next() {
404 if it.Value().base().OperationType == CreateOp {
405 createCount++
406 }
407 if _, ok := ids[it.Value().Id()]; ok {
408 return fmt.Errorf("id collision: %s", it.Value().Id())
409 }
410 ids[it.Value().Id()] = struct{}{}
411 }
412
413 if createCount != 1 {
414 return fmt.Errorf("only one Create op allowed")
415 }
416
417 return nil
418}
419
420// Append an operation into the staging area, to be committed later
421func (bug *Bug) Append(op Operation) {
422 bug.staging.Append(op)
423}
424
425// Commit write the staging area in Git and move the operations to the packs
426func (bug *Bug) Commit(repo repository.ClockedRepo) error {
427
428 if !bug.NeedCommit() {
429 return fmt.Errorf("can't commit a bug with no pending operation")
430 }
431
432 if err := bug.Validate(); err != nil {
433 return errors.Wrap(err, "can't commit a bug with invalid data")
434 }
435
436 // Write the Ops as a Git blob containing the serialized array
437 hash, err := bug.staging.Write(repo)
438 if err != nil {
439 return err
440 }
441
442 if bug.rootPack == "" {
443 bug.rootPack = hash
444 }
445
446 // Make a Git tree referencing this blob
447 tree := []repository.TreeEntry{
448 // the last pack of ops
449 {ObjectType: repository.Blob, Hash: hash, Name: opsEntryName},
450 // always the first pack of ops (might be the same)
451 {ObjectType: repository.Blob, Hash: bug.rootPack, Name: rootEntryName},
452 }
453
454 // Reference, if any, all the files required by the ops
455 // Git will check that they actually exist in the storage and will make sure
456 // to push/pull them as needed.
457 mediaTree := makeMediaTree(bug.staging)
458 if len(mediaTree) > 0 {
459 mediaTreeHash, err := repo.StoreTree(mediaTree)
460 if err != nil {
461 return err
462 }
463 tree = append(tree, repository.TreeEntry{
464 ObjectType: repository.Tree,
465 Hash: mediaTreeHash,
466 Name: mediaEntryName,
467 })
468 }
469
470 // Store the logical clocks as well
471 // --> edit clock for each OperationPack/commits
472 // --> create clock only for the first OperationPack/commits
473 //
474 // To avoid having one blob for each clock value, clocks are serialized
475 // directly into the entry name
476 emptyBlobHash, err := repo.StoreData([]byte{})
477 if err != nil {
478 return err
479 }
480
481 editClock, err := repo.GetOrCreateClock(editClockName)
482 if err != nil {
483 return err
484 }
485 bug.editTime, err = editClock.Increment()
486 if err != nil {
487 return err
488 }
489
490 tree = append(tree, repository.TreeEntry{
491 ObjectType: repository.Blob,
492 Hash: emptyBlobHash,
493 Name: fmt.Sprintf(editClockEntryPattern, bug.editTime),
494 })
495 if bug.lastCommit == "" {
496 createClock, err := repo.GetOrCreateClock(creationClockName)
497 if err != nil {
498 return err
499 }
500 bug.createTime, err = createClock.Increment()
501 if err != nil {
502 return err
503 }
504
505 tree = append(tree, repository.TreeEntry{
506 ObjectType: repository.Blob,
507 Hash: emptyBlobHash,
508 Name: fmt.Sprintf(createClockEntryPattern, bug.createTime),
509 })
510 }
511
512 // Store the tree
513 hash, err = repo.StoreTree(tree)
514 if err != nil {
515 return err
516 }
517
518 // Write a Git commit referencing the tree, with the previous commit as parent
519 if bug.lastCommit != "" {
520 hash, err = repo.StoreCommitWithParent(hash, bug.lastCommit)
521 } else {
522 hash, err = repo.StoreCommit(hash)
523 }
524
525 if err != nil {
526 return err
527 }
528
529 bug.lastCommit = hash
530
531 // if it was the first commit, use the commit hash as bug id
532 if bug.id == "" {
533 bug.id = entity.Id(hash)
534 }
535
536 // Create or update the Git reference for this bug
537 // When pushing later, the remote will ensure that this ref update
538 // is fast-forward, that is no data has been overwritten
539 ref := fmt.Sprintf("%s%s", bugsRefPattern, bug.id)
540 err = repo.UpdateRef(ref, hash)
541
542 if err != nil {
543 return err
544 }
545
546 bug.staging.commitHash = hash
547 bug.packs = append(bug.packs, bug.staging)
548 bug.staging = OperationPack{}
549
550 return nil
551}
552
553func (bug *Bug) CommitAsNeeded(repo repository.ClockedRepo) error {
554 if !bug.NeedCommit() {
555 return nil
556 }
557 return bug.Commit(repo)
558}
559
560func (bug *Bug) NeedCommit() bool {
561 return !bug.staging.IsEmpty()
562}
563
564func makeMediaTree(pack OperationPack) []repository.TreeEntry {
565 var tree []repository.TreeEntry
566 counter := 0
567 added := make(map[repository.Hash]interface{})
568
569 for _, ops := range pack.Operations {
570 for _, file := range ops.GetFiles() {
571 if _, has := added[file]; !has {
572 tree = append(tree, repository.TreeEntry{
573 ObjectType: repository.Blob,
574 Hash: file,
575 // The name is not important here, we only need to
576 // reference the blob.
577 Name: fmt.Sprintf("file%d", counter),
578 })
579 counter++
580 added[file] = struct{}{}
581 }
582 }
583 }
584
585 return tree
586}
587
588// Merge a different version of the same bug by rebasing operations of this bug
589// that are not present in the other on top of the chain of operations of the
590// other version.
591func (bug *Bug) Merge(repo repository.Repo, other Interface) (bool, error) {
592 var otherBug = bugFromInterface(other)
593
594 // Note: a faster merge should be possible without actually reading and parsing
595 // all operations pack of our side.
596 // Reading the other side is still necessary to validate remote data, at least
597 // for new operations
598
599 if bug.id != otherBug.id {
600 return false, errors.New("merging unrelated bugs is not supported")
601 }
602
603 if len(otherBug.staging.Operations) > 0 {
604 return false, errors.New("merging a bug with a non-empty staging is not supported")
605 }
606
607 if bug.lastCommit == "" || otherBug.lastCommit == "" {
608 return false, errors.New("can't merge a bug that has never been stored")
609 }
610
611 ancestor, err := repo.FindCommonAncestor(bug.lastCommit, otherBug.lastCommit)
612 if err != nil {
613 return false, errors.Wrap(err, "can't find common ancestor")
614 }
615
616 ancestorIndex := 0
617 newPacks := make([]OperationPack, 0, len(bug.packs))
618
619 // Find the root of the rebase
620 for i, pack := range bug.packs {
621 newPacks = append(newPacks, pack)
622
623 if pack.commitHash == ancestor {
624 ancestorIndex = i
625 break
626 }
627 }
628
629 if len(otherBug.packs) == ancestorIndex+1 {
630 // Nothing to rebase, return early
631 return false, nil
632 }
633
634 // get other bug's extra packs
635 for i := ancestorIndex + 1; i < len(otherBug.packs); i++ {
636 // clone is probably not necessary
637 newPack := otherBug.packs[i].Clone()
638
639 newPacks = append(newPacks, newPack)
640 bug.lastCommit = newPack.commitHash
641 }
642
643 // rebase our extra packs
644 for i := ancestorIndex + 1; i < len(bug.packs); i++ {
645 pack := bug.packs[i]
646
647 // get the referenced git tree
648 treeHash, err := repo.GetTreeHash(pack.commitHash)
649
650 if err != nil {
651 return false, err
652 }
653
654 // create a new commit with the correct ancestor
655 hash, err := repo.StoreCommitWithParent(treeHash, bug.lastCommit)
656
657 if err != nil {
658 return false, err
659 }
660
661 // replace the pack
662 newPack := pack.Clone()
663 newPack.commitHash = hash
664 newPacks = append(newPacks, newPack)
665
666 // update the bug
667 bug.lastCommit = hash
668 }
669
670 bug.packs = newPacks
671
672 // Update the git ref
673 err = repo.UpdateRef(bugsRefPattern+bug.id.String(), bug.lastCommit)
674 if err != nil {
675 return false, err
676 }
677
678 return true, nil
679}
680
681// Id return the Bug identifier
682func (bug *Bug) Id() entity.Id {
683 if bug.id == "" {
684 // simply panic as it would be a coding error
685 // (using an id of a bug not stored yet)
686 panic("no id yet")
687 }
688 return bug.id
689}
690
691// CreateLamportTime return the Lamport time of creation
692func (bug *Bug) CreateLamportTime() lamport.Time {
693 return bug.createTime
694}
695
696// EditLamportTime return the Lamport time of the last edit
697func (bug *Bug) EditLamportTime() lamport.Time {
698 return bug.editTime
699}
700
701// Lookup for the very first operation of the bug.
702// For a valid Bug, this operation should be a CreateOp
703func (bug *Bug) FirstOp() Operation {
704 for _, pack := range bug.packs {
705 for _, op := range pack.Operations {
706 return op
707 }
708 }
709
710 if !bug.staging.IsEmpty() {
711 return bug.staging.Operations[0]
712 }
713
714 return nil
715}
716
717// Lookup for the very last operation of the bug.
718// For a valid Bug, should never be nil
719func (bug *Bug) LastOp() Operation {
720 if !bug.staging.IsEmpty() {
721 return bug.staging.Operations[len(bug.staging.Operations)-1]
722 }
723
724 if len(bug.packs) == 0 {
725 return nil
726 }
727
728 lastPack := bug.packs[len(bug.packs)-1]
729
730 if len(lastPack.Operations) == 0 {
731 return nil
732 }
733
734 return lastPack.Operations[len(lastPack.Operations)-1]
735}
736
737// Compile a bug in a easily usable snapshot
738func (bug *Bug) Compile() Snapshot {
739 snap := Snapshot{
740 id: bug.id,
741 Status: OpenStatus,
742 }
743
744 it := NewOperationIterator(bug)
745
746 for it.Next() {
747 op := it.Value()
748 op.Apply(&snap)
749 snap.Operations = append(snap.Operations, op)
750 }
751
752 return snap
753}