1// Package bug contains the bug data model and low-level related functions
2package bug
3
4import (
5 "encoding/json"
6 "fmt"
7 "strings"
8
9 "github.com/pkg/errors"
10
11 "github.com/MichaelMure/git-bug/entity"
12 "github.com/MichaelMure/git-bug/identity"
13 "github.com/MichaelMure/git-bug/repository"
14 "github.com/MichaelMure/git-bug/util/git"
15 "github.com/MichaelMure/git-bug/util/lamport"
16)
17
18const bugsRefPattern = "refs/bugs/"
19const bugsRemoteRefPattern = "refs/remotes/%s/bugs/"
20
21const opsEntryName = "ops"
22const rootEntryName = "root"
23const mediaEntryName = "media"
24
25const createClockEntryPrefix = "create-clock-"
26const createClockEntryPattern = "create-clock-%d"
27const editClockEntryPrefix = "edit-clock-"
28const editClockEntryPattern = "edit-clock-%d"
29
30const creationClockName = "bug-create"
31const editClockName = "bug-edit"
32
33var ErrBugNotExist = errors.New("bug doesn't exist")
34
35func NewErrMultipleMatchBug(matching []entity.Id) *entity.ErrMultipleMatch {
36 return entity.NewErrMultipleMatch("bug", matching)
37}
38
39func NewErrMultipleMatchOp(matching []entity.Id) *entity.ErrMultipleMatch {
40 return entity.NewErrMultipleMatch("operation", matching)
41}
42
43var _ Interface = &Bug{}
44var _ entity.Interface = &Bug{}
45
46// Bug hold the data of a bug thread, organized in a way close to
47// how it will be persisted inside Git. This is the data structure
48// used to merge two different version of the same Bug.
49type Bug struct {
50
51 // A Lamport clock is a logical clock that allow to order event
52 // inside a distributed system.
53 // It must be the first field in this struct due to https://github.com/golang/go/issues/599
54 createTime lamport.Time
55 editTime lamport.Time
56
57 // Id used as unique identifier
58 id entity.Id
59
60 lastCommit git.Hash
61 rootPack git.Hash
62
63 // all the committed operations
64 packs []OperationPack
65
66 // a temporary pack of operations used for convenience to pile up new operations
67 // before a commit
68 staging OperationPack
69}
70
71// NewBug create a new Bug
72func NewBug() *Bug {
73 // No id yet
74 // No logical clock yet
75 return &Bug{}
76}
77
78// FindLocalBug find an existing Bug matching a prefix
79func FindLocalBug(repo repository.ClockedRepo, prefix string) (*Bug, error) {
80 ids, err := ListLocalIds(repo)
81
82 if err != nil {
83 return nil, err
84 }
85
86 // preallocate but empty
87 matching := make([]entity.Id, 0, 5)
88
89 for _, id := range ids {
90 if id.HasPrefix(prefix) {
91 matching = append(matching, id)
92 }
93 }
94
95 if len(matching) == 0 {
96 return nil, errors.New("no matching bug found.")
97 }
98
99 if len(matching) > 1 {
100 return nil, NewErrMultipleMatchBug(matching)
101 }
102
103 return ReadLocalBug(repo, matching[0])
104}
105
106// ReadLocalBug will read a local bug from its hash
107func ReadLocalBug(repo repository.ClockedRepo, id entity.Id) (*Bug, error) {
108 ref := bugsRefPattern + id.String()
109 return readBug(repo, ref)
110}
111
112// ReadRemoteBug will read a remote bug from its hash
113func ReadRemoteBug(repo repository.ClockedRepo, remote string, id string) (*Bug, error) {
114 ref := fmt.Sprintf(bugsRemoteRefPattern, remote) + id
115 return readBug(repo, ref)
116}
117
118// readBug will read and parse a Bug from git
119func readBug(repo repository.ClockedRepo, ref string) (*Bug, error) {
120 refSplit := strings.Split(ref, "/")
121 id := entity.Id(refSplit[len(refSplit)-1])
122
123 if err := id.Validate(); err != nil {
124 return nil, errors.Wrap(err, "invalid ref ")
125 }
126
127 hashes, err := repo.ListCommits(ref)
128
129 // TODO: this is not perfect, it might be a command invoke error
130 if err != nil {
131 return nil, ErrBugNotExist
132 }
133
134 bug := Bug{
135 id: id,
136 editTime: 0,
137 }
138
139 // Load each OperationPack
140 for _, hash := range hashes {
141 entries, err := repo.ReadTree(hash)
142 if err != nil {
143 return nil, errors.Wrap(err, "can't list git tree entries")
144 }
145
146 bug.lastCommit = hash
147
148 var opsEntry repository.TreeEntry
149 opsFound := false
150 var rootEntry repository.TreeEntry
151 rootFound := false
152 var createTime uint64
153 var editTime uint64
154
155 for _, entry := range entries {
156 if entry.Name == opsEntryName {
157 opsEntry = entry
158 opsFound = true
159 continue
160 }
161 if entry.Name == rootEntryName {
162 rootEntry = entry
163 rootFound = true
164 }
165 if strings.HasPrefix(entry.Name, createClockEntryPrefix) {
166 n, err := fmt.Sscanf(entry.Name, createClockEntryPattern, &createTime)
167 if err != nil {
168 return nil, errors.Wrap(err, "can't read create lamport time")
169 }
170 if n != 1 {
171 return nil, fmt.Errorf("could not parse create time lamport value")
172 }
173 }
174 if strings.HasPrefix(entry.Name, editClockEntryPrefix) {
175 n, err := fmt.Sscanf(entry.Name, editClockEntryPattern, &editTime)
176 if err != nil {
177 return nil, errors.Wrap(err, "can't read edit lamport time")
178 }
179 if n != 1 {
180 return nil, fmt.Errorf("could not parse edit time lamport value")
181 }
182 }
183 }
184
185 if !opsFound {
186 return nil, errors.New("invalid tree, missing the ops entry")
187 }
188 if !rootFound {
189 return nil, errors.New("invalid tree, missing the root entry")
190 }
191
192 if bug.rootPack == "" {
193 bug.rootPack = rootEntry.Hash
194 bug.createTime = lamport.Time(createTime)
195 }
196
197 // Due to rebase, edit Lamport time are not necessarily ordered
198 if editTime > uint64(bug.editTime) {
199 bug.editTime = lamport.Time(editTime)
200 }
201
202 // Update the clocks
203 createClock, err := repo.GetOrCreateClock(creationClockName)
204 if err != nil {
205 return nil, err
206 }
207 if err := createClock.Witness(bug.createTime); err != nil {
208 return nil, errors.Wrap(err, "failed to update create lamport clock")
209 }
210 editClock, err := repo.GetOrCreateClock(editClockName)
211 if err != nil {
212 return nil, err
213 }
214 if err := editClock.Witness(bug.editTime); err != nil {
215 return nil, errors.Wrap(err, "failed to update edit lamport clock")
216 }
217
218 data, err := repo.ReadData(opsEntry.Hash)
219 if err != nil {
220 return nil, errors.Wrap(err, "failed to read git blob data")
221 }
222
223 opp := &OperationPack{}
224 err = json.Unmarshal(data, &opp)
225
226 if err != nil {
227 return nil, errors.Wrap(err, "failed to decode OperationPack json")
228 }
229
230 // tag the pack with the commit hash
231 opp.commitHash = hash
232
233 bug.packs = append(bug.packs, *opp)
234 }
235
236 // Make sure that the identities are properly loaded
237 resolver := identity.NewSimpleResolver(repo)
238 err = bug.EnsureIdentities(resolver)
239 if err != nil {
240 return nil, err
241 }
242
243 return &bug, nil
244}
245
246type StreamedBug struct {
247 Bug *Bug
248 Err error
249}
250
251// ReadAllLocalBugs read and parse all local bugs
252func ReadAllLocalBugs(repo repository.ClockedRepo) <-chan StreamedBug {
253 return readAllBugs(repo, bugsRefPattern)
254}
255
256// ReadAllRemoteBugs read and parse all remote bugs for a given remote
257func ReadAllRemoteBugs(repo repository.ClockedRepo, remote string) <-chan StreamedBug {
258 refPrefix := fmt.Sprintf(bugsRemoteRefPattern, remote)
259 return readAllBugs(repo, refPrefix)
260}
261
262// Read and parse all available bug with a given ref prefix
263func readAllBugs(repo repository.ClockedRepo, refPrefix string) <-chan StreamedBug {
264 out := make(chan StreamedBug)
265
266 go func() {
267 defer close(out)
268
269 refs, err := repo.ListRefs(refPrefix)
270 if err != nil {
271 out <- StreamedBug{Err: err}
272 return
273 }
274
275 for _, ref := range refs {
276 b, err := readBug(repo, ref)
277
278 if err != nil {
279 out <- StreamedBug{Err: err}
280 return
281 }
282
283 out <- StreamedBug{Bug: b}
284 }
285 }()
286
287 return out
288}
289
290// ListLocalIds list all the available local bug ids
291func ListLocalIds(repo repository.Repo) ([]entity.Id, error) {
292 refs, err := repo.ListRefs(bugsRefPattern)
293 if err != nil {
294 return nil, err
295 }
296
297 return refsToIds(refs), nil
298}
299
300func refsToIds(refs []string) []entity.Id {
301 ids := make([]entity.Id, len(refs))
302
303 for i, ref := range refs {
304 split := strings.Split(ref, "/")
305 ids[i] = entity.Id(split[len(split)-1])
306 }
307
308 return ids
309}
310
311// Validate check if the Bug data is valid
312func (bug *Bug) Validate() error {
313 // non-empty
314 if len(bug.packs) == 0 && bug.staging.IsEmpty() {
315 return fmt.Errorf("bug has no operations")
316 }
317
318 // check if each pack and operations are valid
319 for _, pack := range bug.packs {
320 if err := pack.Validate(); err != nil {
321 return err
322 }
323 }
324
325 // check if staging is valid if needed
326 if !bug.staging.IsEmpty() {
327 if err := bug.staging.Validate(); err != nil {
328 return errors.Wrap(err, "staging")
329 }
330 }
331
332 // The very first Op should be a CreateOp
333 firstOp := bug.FirstOp()
334 if firstOp == nil || firstOp.base().OperationType != CreateOp {
335 return fmt.Errorf("first operation should be a Create op")
336 }
337
338 // The bug Id should be the hash of the first commit
339 if len(bug.packs) > 0 && string(bug.packs[0].commitHash) != bug.id.String() {
340 return fmt.Errorf("bug id should be the first commit hash")
341 }
342
343 // Check that there is no more CreateOp op
344 // Check that there is no colliding operation's ID
345 it := NewOperationIterator(bug)
346 createCount := 0
347 ids := make(map[entity.Id]struct{})
348 for it.Next() {
349 if it.Value().base().OperationType == CreateOp {
350 createCount++
351 }
352 if _, ok := ids[it.Value().Id()]; ok {
353 return fmt.Errorf("id collision: %s", it.Value().Id())
354 }
355 ids[it.Value().Id()] = struct{}{}
356 }
357
358 if createCount != 1 {
359 return fmt.Errorf("only one Create op allowed")
360 }
361
362 return nil
363}
364
365// Append an operation into the staging area, to be committed later
366func (bug *Bug) Append(op Operation) {
367 bug.staging.Append(op)
368}
369
370// Commit write the staging area in Git and move the operations to the packs
371func (bug *Bug) Commit(repo repository.ClockedRepo) error {
372
373 if !bug.NeedCommit() {
374 return fmt.Errorf("can't commit a bug with no pending operation")
375 }
376
377 if err := bug.Validate(); err != nil {
378 return errors.Wrap(err, "can't commit a bug with invalid data")
379 }
380
381 // Write the Ops as a Git blob containing the serialized array
382 hash, err := bug.staging.Write(repo)
383 if err != nil {
384 return err
385 }
386
387 if bug.rootPack == "" {
388 bug.rootPack = hash
389 }
390
391 // Make a Git tree referencing this blob
392 tree := []repository.TreeEntry{
393 // the last pack of ops
394 {ObjectType: repository.Blob, Hash: hash, Name: opsEntryName},
395 // always the first pack of ops (might be the same)
396 {ObjectType: repository.Blob, Hash: bug.rootPack, Name: rootEntryName},
397 }
398
399 // Reference, if any, all the files required by the ops
400 // Git will check that they actually exist in the storage and will make sure
401 // to push/pull them as needed.
402 mediaTree := makeMediaTree(bug.staging)
403 if len(mediaTree) > 0 {
404 mediaTreeHash, err := repo.StoreTree(mediaTree)
405 if err != nil {
406 return err
407 }
408 tree = append(tree, repository.TreeEntry{
409 ObjectType: repository.Tree,
410 Hash: mediaTreeHash,
411 Name: mediaEntryName,
412 })
413 }
414
415 // Store the logical clocks as well
416 // --> edit clock for each OperationPack/commits
417 // --> create clock only for the first OperationPack/commits
418 //
419 // To avoid having one blob for each clock value, clocks are serialized
420 // directly into the entry name
421 emptyBlobHash, err := repo.StoreData([]byte{})
422 if err != nil {
423 return err
424 }
425
426 editClock, err := repo.GetOrCreateClock(editClockName)
427 if err != nil {
428 return err
429 }
430 bug.editTime, err = editClock.Increment()
431 if err != nil {
432 return err
433 }
434
435 tree = append(tree, repository.TreeEntry{
436 ObjectType: repository.Blob,
437 Hash: emptyBlobHash,
438 Name: fmt.Sprintf(editClockEntryPattern, bug.editTime),
439 })
440 if bug.lastCommit == "" {
441 createClock, err := repo.GetOrCreateClock(creationClockName)
442 if err != nil {
443 return err
444 }
445 bug.createTime, err = createClock.Increment()
446 if err != nil {
447 return err
448 }
449
450 tree = append(tree, repository.TreeEntry{
451 ObjectType: repository.Blob,
452 Hash: emptyBlobHash,
453 Name: fmt.Sprintf(createClockEntryPattern, bug.createTime),
454 })
455 }
456
457 // Store the tree
458 hash, err = repo.StoreTree(tree)
459 if err != nil {
460 return err
461 }
462
463 // Write a Git commit referencing the tree, with the previous commit as parent
464 if bug.lastCommit != "" {
465 hash, err = repo.StoreCommitWithParent(hash, bug.lastCommit)
466 } else {
467 hash, err = repo.StoreCommit(hash)
468 }
469
470 if err != nil {
471 return err
472 }
473
474 bug.lastCommit = hash
475
476 // if it was the first commit, use the commit hash as bug id
477 if bug.id == "" {
478 bug.id = entity.Id(hash)
479 }
480
481 // Create or update the Git reference for this bug
482 // When pushing later, the remote will ensure that this ref update
483 // is fast-forward, that is no data has been overwritten
484 ref := fmt.Sprintf("%s%s", bugsRefPattern, bug.id)
485 err = repo.UpdateRef(ref, hash)
486
487 if err != nil {
488 return err
489 }
490
491 bug.staging.commitHash = hash
492 bug.packs = append(bug.packs, bug.staging)
493 bug.staging = OperationPack{}
494
495 return nil
496}
497
498func (bug *Bug) CommitAsNeeded(repo repository.ClockedRepo) error {
499 if !bug.NeedCommit() {
500 return nil
501 }
502 return bug.Commit(repo)
503}
504
505func (bug *Bug) NeedCommit() bool {
506 return !bug.staging.IsEmpty()
507}
508
509func makeMediaTree(pack OperationPack) []repository.TreeEntry {
510 var tree []repository.TreeEntry
511 counter := 0
512 added := make(map[git.Hash]interface{})
513
514 for _, ops := range pack.Operations {
515 for _, file := range ops.GetFiles() {
516 if _, has := added[file]; !has {
517 tree = append(tree, repository.TreeEntry{
518 ObjectType: repository.Blob,
519 Hash: file,
520 // The name is not important here, we only need to
521 // reference the blob.
522 Name: fmt.Sprintf("file%d", counter),
523 })
524 counter++
525 added[file] = struct{}{}
526 }
527 }
528 }
529
530 return tree
531}
532
533// Merge a different version of the same bug by rebasing operations of this bug
534// that are not present in the other on top of the chain of operations of the
535// other version.
536func (bug *Bug) Merge(repo repository.Repo, other Interface) (bool, error) {
537 var otherBug = bugFromInterface(other)
538
539 // Note: a faster merge should be possible without actually reading and parsing
540 // all operations pack of our side.
541 // Reading the other side is still necessary to validate remote data, at least
542 // for new operations
543
544 if bug.id != otherBug.id {
545 return false, errors.New("merging unrelated bugs is not supported")
546 }
547
548 if len(otherBug.staging.Operations) > 0 {
549 return false, errors.New("merging a bug with a non-empty staging is not supported")
550 }
551
552 if bug.lastCommit == "" || otherBug.lastCommit == "" {
553 return false, errors.New("can't merge a bug that has never been stored")
554 }
555
556 ancestor, err := repo.FindCommonAncestor(bug.lastCommit, otherBug.lastCommit)
557 if err != nil {
558 return false, errors.Wrap(err, "can't find common ancestor")
559 }
560
561 ancestorIndex := 0
562 newPacks := make([]OperationPack, 0, len(bug.packs))
563
564 // Find the root of the rebase
565 for i, pack := range bug.packs {
566 newPacks = append(newPacks, pack)
567
568 if pack.commitHash == ancestor {
569 ancestorIndex = i
570 break
571 }
572 }
573
574 if len(otherBug.packs) == ancestorIndex+1 {
575 // Nothing to rebase, return early
576 return false, nil
577 }
578
579 // get other bug's extra packs
580 for i := ancestorIndex + 1; i < len(otherBug.packs); i++ {
581 // clone is probably not necessary
582 newPack := otherBug.packs[i].Clone()
583
584 newPacks = append(newPacks, newPack)
585 bug.lastCommit = newPack.commitHash
586 }
587
588 // rebase our extra packs
589 for i := ancestorIndex + 1; i < len(bug.packs); i++ {
590 pack := bug.packs[i]
591
592 // get the referenced git tree
593 treeHash, err := repo.GetTreeHash(pack.commitHash)
594
595 if err != nil {
596 return false, err
597 }
598
599 // create a new commit with the correct ancestor
600 hash, err := repo.StoreCommitWithParent(treeHash, bug.lastCommit)
601
602 if err != nil {
603 return false, err
604 }
605
606 // replace the pack
607 newPack := pack.Clone()
608 newPack.commitHash = hash
609 newPacks = append(newPacks, newPack)
610
611 // update the bug
612 bug.lastCommit = hash
613 }
614
615 bug.packs = newPacks
616
617 // Update the git ref
618 err = repo.UpdateRef(bugsRefPattern+bug.id.String(), bug.lastCommit)
619 if err != nil {
620 return false, err
621 }
622
623 return true, nil
624}
625
626// Id return the Bug identifier
627func (bug *Bug) Id() entity.Id {
628 if bug.id == "" {
629 // simply panic as it would be a coding error
630 // (using an id of a bug not stored yet)
631 panic("no id yet")
632 }
633 return bug.id
634}
635
636// CreateLamportTime return the Lamport time of creation
637func (bug *Bug) CreateLamportTime() lamport.Time {
638 return bug.createTime
639}
640
641// EditLamportTime return the Lamport time of the last edit
642func (bug *Bug) EditLamportTime() lamport.Time {
643 return bug.editTime
644}
645
646// Lookup for the very first operation of the bug.
647// For a valid Bug, this operation should be a CreateOp
648func (bug *Bug) FirstOp() Operation {
649 for _, pack := range bug.packs {
650 for _, op := range pack.Operations {
651 return op
652 }
653 }
654
655 if !bug.staging.IsEmpty() {
656 return bug.staging.Operations[0]
657 }
658
659 return nil
660}
661
662// Lookup for the very last operation of the bug.
663// For a valid Bug, should never be nil
664func (bug *Bug) LastOp() Operation {
665 if !bug.staging.IsEmpty() {
666 return bug.staging.Operations[len(bug.staging.Operations)-1]
667 }
668
669 if len(bug.packs) == 0 {
670 return nil
671 }
672
673 lastPack := bug.packs[len(bug.packs)-1]
674
675 if len(lastPack.Operations) == 0 {
676 return nil
677 }
678
679 return lastPack.Operations[len(lastPack.Operations)-1]
680}
681
682// Compile a bug in a easily usable snapshot
683func (bug *Bug) Compile() Snapshot {
684 snap := Snapshot{
685 id: bug.id,
686 Status: OpenStatus,
687 }
688
689 it := NewOperationIterator(bug)
690
691 for it.Next() {
692 op := it.Value()
693 op.Apply(&snap)
694 snap.Operations = append(snap.Operations, op)
695 }
696
697 return snap
698}