1// Package bug contains the bug data model and low-level related functions
2package bug
3
4import (
5 "encoding/json"
6 "fmt"
7 "strings"
8
9 "github.com/pkg/errors"
10
11 "github.com/MichaelMure/git-bug/identity"
12 "github.com/MichaelMure/git-bug/repository"
13 "github.com/MichaelMure/git-bug/util/git"
14 "github.com/MichaelMure/git-bug/util/lamport"
15)
16
17const bugsRefPattern = "refs/bugs/"
18const bugsRemoteRefPattern = "refs/remotes/%s/bugs/"
19
20const opsEntryName = "ops"
21const rootEntryName = "root"
22const mediaEntryName = "media"
23
24const createClockEntryPrefix = "create-clock-"
25const createClockEntryPattern = "create-clock-%d"
26const editClockEntryPrefix = "edit-clock-"
27const editClockEntryPattern = "edit-clock-%d"
28
29const idLength = 40
30const humanIdLength = 7
31
32var ErrBugNotExist = errors.New("bug doesn't exist")
33
34type ErrMultipleMatch struct {
35 Matching []string
36}
37
38func (e ErrMultipleMatch) Error() string {
39 return fmt.Sprintf("Multiple matching bug found:\n%s", strings.Join(e.Matching, "\n"))
40}
41
42var _ Interface = &Bug{}
43
44// Bug hold the data of a bug thread, organized in a way close to
45// how it will be persisted inside Git. This is the data structure
46// used to merge two different version of the same Bug.
47type Bug struct {
48
49 // A Lamport clock is a logical clock that allow to order event
50 // inside a distributed system.
51 // It must be the first field in this struct due to https://github.com/golang/go/issues/599
52 createTime lamport.Time
53 editTime lamport.Time
54
55 // Id used as unique identifier
56 id string
57
58 lastCommit git.Hash
59 rootPack git.Hash
60
61 // all the committed operations
62 packs []OperationPack
63
64 // a temporary pack of operations used for convenience to pile up new operations
65 // before a commit
66 staging OperationPack
67}
68
69// NewBug create a new Bug
70func NewBug() *Bug {
71 // No id yet
72 // No logical clock yet
73 return &Bug{}
74}
75
76// FindLocalBug find an existing Bug matching a prefix
77func FindLocalBug(repo repository.ClockedRepo, prefix string) (*Bug, error) {
78 ids, err := ListLocalIds(repo)
79
80 if err != nil {
81 return nil, err
82 }
83
84 // preallocate but empty
85 matching := make([]string, 0, 5)
86
87 for _, id := range ids {
88 if strings.HasPrefix(id, prefix) {
89 matching = append(matching, id)
90 }
91 }
92
93 if len(matching) == 0 {
94 return nil, errors.New("no matching bug found.")
95 }
96
97 if len(matching) > 1 {
98 return nil, ErrMultipleMatch{Matching: matching}
99 }
100
101 return ReadLocalBug(repo, matching[0])
102}
103
104// ReadLocalBug will read a local bug from its hash
105func ReadLocalBug(repo repository.ClockedRepo, id string) (*Bug, error) {
106 ref := bugsRefPattern + id
107 return readBug(repo, ref)
108}
109
110// ReadRemoteBug will read a remote bug from its hash
111func ReadRemoteBug(repo repository.ClockedRepo, remote string, id string) (*Bug, error) {
112 ref := fmt.Sprintf(bugsRemoteRefPattern, remote) + id
113 return readBug(repo, ref)
114}
115
116// readBug will read and parse a Bug from git
117func readBug(repo repository.ClockedRepo, ref string) (*Bug, error) {
118 refSplit := strings.Split(ref, "/")
119 id := refSplit[len(refSplit)-1]
120
121 if len(id) != idLength {
122 return nil, fmt.Errorf("invalid ref length")
123 }
124
125 hashes, err := repo.ListCommits(ref)
126
127 // TODO: this is not perfect, it might be a command invoke error
128 if err != nil {
129 return nil, ErrBugNotExist
130 }
131
132 bug := Bug{
133 id: id,
134 }
135
136 // Load each OperationPack
137 for _, hash := range hashes {
138 entries, err := repo.ListEntries(hash)
139 if err != nil {
140 return nil, errors.Wrap(err, "can't list git tree entries")
141 }
142
143 bug.lastCommit = hash
144
145 var opsEntry repository.TreeEntry
146 opsFound := false
147 var rootEntry repository.TreeEntry
148 rootFound := false
149 var createTime uint64
150 var editTime uint64
151
152 for _, entry := range entries {
153 if entry.Name == opsEntryName {
154 opsEntry = entry
155 opsFound = true
156 continue
157 }
158 if entry.Name == rootEntryName {
159 rootEntry = entry
160 rootFound = true
161 }
162 if strings.HasPrefix(entry.Name, createClockEntryPrefix) {
163 n, err := fmt.Sscanf(string(entry.Name), createClockEntryPattern, &createTime)
164 if err != nil {
165 return nil, errors.Wrap(err, "can't read create lamport time")
166 }
167 if n != 1 {
168 return nil, fmt.Errorf("could not parse create time lamport value")
169 }
170 }
171 if strings.HasPrefix(entry.Name, editClockEntryPrefix) {
172 n, err := fmt.Sscanf(string(entry.Name), editClockEntryPattern, &editTime)
173 if err != nil {
174 return nil, errors.Wrap(err, "can't read edit lamport time")
175 }
176 if n != 1 {
177 return nil, fmt.Errorf("could not parse edit time lamport value")
178 }
179 }
180 }
181
182 if !opsFound {
183 return nil, errors.New("invalid tree, missing the ops entry")
184 }
185 if !rootFound {
186 return nil, errors.New("invalid tree, missing the root entry")
187 }
188
189 if bug.rootPack == "" {
190 bug.rootPack = rootEntry.Hash
191 bug.createTime = lamport.Time(createTime)
192 }
193
194 bug.editTime = lamport.Time(editTime)
195
196 // Update the clocks
197 if err := repo.CreateWitness(bug.createTime); err != nil {
198 return nil, errors.Wrap(err, "failed to update create lamport clock")
199 }
200 if err := repo.EditWitness(bug.editTime); err != nil {
201 return nil, errors.Wrap(err, "failed to update edit lamport clock")
202 }
203
204 data, err := repo.ReadData(opsEntry.Hash)
205 if err != nil {
206 return nil, errors.Wrap(err, "failed to read git blob data")
207 }
208
209 opp := &OperationPack{}
210 err = json.Unmarshal(data, &opp)
211
212 if err != nil {
213 return nil, errors.Wrap(err, "failed to decode OperationPack json")
214 }
215
216 // tag the pack with the commit hash
217 opp.commitHash = hash
218
219 bug.packs = append(bug.packs, *opp)
220 }
221
222 // Make sure that the identities are properly loaded
223 resolver := identity.NewSimpleResolver(repo)
224 err = bug.EnsureIdentities(resolver)
225 if err != nil {
226 return nil, err
227 }
228
229 return &bug, nil
230}
231
232type StreamedBug struct {
233 Bug *Bug
234 Err error
235}
236
237// ReadAllLocalBugs read and parse all local bugs
238func ReadAllLocalBugs(repo repository.ClockedRepo) <-chan StreamedBug {
239 return readAllBugs(repo, bugsRefPattern)
240}
241
242// ReadAllRemoteBugs read and parse all remote bugs for a given remote
243func ReadAllRemoteBugs(repo repository.ClockedRepo, remote string) <-chan StreamedBug {
244 refPrefix := fmt.Sprintf(bugsRemoteRefPattern, remote)
245 return readAllBugs(repo, refPrefix)
246}
247
248// Read and parse all available bug with a given ref prefix
249func readAllBugs(repo repository.ClockedRepo, refPrefix string) <-chan StreamedBug {
250 out := make(chan StreamedBug)
251
252 go func() {
253 defer close(out)
254
255 refs, err := repo.ListRefs(refPrefix)
256 if err != nil {
257 out <- StreamedBug{Err: err}
258 return
259 }
260
261 for _, ref := range refs {
262 b, err := readBug(repo, ref)
263
264 if err != nil {
265 out <- StreamedBug{Err: err}
266 return
267 }
268
269 out <- StreamedBug{Bug: b}
270 }
271 }()
272
273 return out
274}
275
276// ListLocalIds list all the available local bug ids
277func ListLocalIds(repo repository.Repo) ([]string, error) {
278 refs, err := repo.ListRefs(bugsRefPattern)
279 if err != nil {
280 return nil, err
281 }
282
283 return refsToIds(refs), nil
284}
285
286func refsToIds(refs []string) []string {
287 ids := make([]string, len(refs))
288
289 for i, ref := range refs {
290 split := strings.Split(ref, "/")
291 ids[i] = split[len(split)-1]
292 }
293
294 return ids
295}
296
297// Validate check if the Bug data is valid
298func (bug *Bug) Validate() error {
299 // non-empty
300 if len(bug.packs) == 0 && bug.staging.IsEmpty() {
301 return fmt.Errorf("bug has no operations")
302 }
303
304 // check if each pack and operations are valid
305 for _, pack := range bug.packs {
306 if err := pack.Validate(); err != nil {
307 return err
308 }
309 }
310
311 // check if staging is valid if needed
312 if !bug.staging.IsEmpty() {
313 if err := bug.staging.Validate(); err != nil {
314 return errors.Wrap(err, "staging")
315 }
316 }
317
318 // The very first Op should be a CreateOp
319 firstOp := bug.FirstOp()
320 if firstOp == nil || firstOp.base().OperationType != CreateOp {
321 return fmt.Errorf("first operation should be a Create op")
322 }
323
324 // The bug ID should be the hash of the first commit
325 if len(bug.packs) > 0 && string(bug.packs[0].commitHash) != bug.id {
326 return fmt.Errorf("bug id should be the first commit hash")
327 }
328
329 // Check that there is no more CreateOp op
330 it := NewOperationIterator(bug)
331 createCount := 0
332 for it.Next() {
333 if it.Value().base().OperationType == CreateOp {
334 createCount++
335 }
336 }
337
338 if createCount != 1 {
339 return fmt.Errorf("only one Create op allowed")
340 }
341
342 return nil
343}
344
345// Append an operation into the staging area, to be committed later
346func (bug *Bug) Append(op Operation) {
347 bug.staging.Append(op)
348}
349
350// HasPendingOp tell if the bug need to be committed
351func (bug *Bug) HasPendingOp() bool {
352 return !bug.staging.IsEmpty()
353}
354
355// Commit write the staging area in Git and move the operations to the packs
356func (bug *Bug) Commit(repo repository.ClockedRepo) error {
357
358 if !bug.NeedCommit() {
359 return fmt.Errorf("can't commit a bug with no pending operation")
360 }
361
362 if err := bug.Validate(); err != nil {
363 return errors.Wrap(err, "can't commit a bug with invalid data")
364 }
365
366 // Write the Ops as a Git blob containing the serialized array
367 hash, err := bug.staging.Write(repo)
368 if err != nil {
369 return err
370 }
371
372 if bug.rootPack == "" {
373 bug.rootPack = hash
374 }
375
376 // Make a Git tree referencing this blob
377 tree := []repository.TreeEntry{
378 // the last pack of ops
379 {ObjectType: repository.Blob, Hash: hash, Name: opsEntryName},
380 // always the first pack of ops (might be the same)
381 {ObjectType: repository.Blob, Hash: bug.rootPack, Name: rootEntryName},
382 }
383
384 // Reference, if any, all the files required by the ops
385 // Git will check that they actually exist in the storage and will make sure
386 // to push/pull them as needed.
387 mediaTree := makeMediaTree(bug.staging)
388 if len(mediaTree) > 0 {
389 mediaTreeHash, err := repo.StoreTree(mediaTree)
390 if err != nil {
391 return err
392 }
393 tree = append(tree, repository.TreeEntry{
394 ObjectType: repository.Tree,
395 Hash: mediaTreeHash,
396 Name: mediaEntryName,
397 })
398 }
399
400 // Store the logical clocks as well
401 // --> edit clock for each OperationPack/commits
402 // --> create clock only for the first OperationPack/commits
403 //
404 // To avoid having one blob for each clock value, clocks are serialized
405 // directly into the entry name
406 emptyBlobHash, err := repo.StoreData([]byte{})
407 if err != nil {
408 return err
409 }
410
411 bug.editTime, err = repo.EditTimeIncrement()
412 if err != nil {
413 return err
414 }
415
416 tree = append(tree, repository.TreeEntry{
417 ObjectType: repository.Blob,
418 Hash: emptyBlobHash,
419 Name: fmt.Sprintf(editClockEntryPattern, bug.editTime),
420 })
421 if bug.lastCommit == "" {
422 bug.createTime, err = repo.CreateTimeIncrement()
423 if err != nil {
424 return err
425 }
426
427 tree = append(tree, repository.TreeEntry{
428 ObjectType: repository.Blob,
429 Hash: emptyBlobHash,
430 Name: fmt.Sprintf(createClockEntryPattern, bug.createTime),
431 })
432 }
433
434 // Store the tree
435 hash, err = repo.StoreTree(tree)
436 if err != nil {
437 return err
438 }
439
440 // Write a Git commit referencing the tree, with the previous commit as parent
441 if bug.lastCommit != "" {
442 hash, err = repo.StoreCommitWithParent(hash, bug.lastCommit)
443 } else {
444 hash, err = repo.StoreCommit(hash)
445 }
446
447 if err != nil {
448 return err
449 }
450
451 bug.lastCommit = hash
452
453 // if it was the first commit, use the commit hash as bug id
454 if bug.id == "" {
455 bug.id = string(hash)
456 }
457
458 // Create or update the Git reference for this bug
459 // When pushing later, the remote will ensure that this ref update
460 // is fast-forward, that is no data has been overwritten
461 ref := fmt.Sprintf("%s%s", bugsRefPattern, bug.id)
462 err = repo.UpdateRef(ref, hash)
463
464 if err != nil {
465 return err
466 }
467
468 bug.staging.commitHash = hash
469 bug.packs = append(bug.packs, bug.staging)
470 bug.staging = OperationPack{}
471
472 return nil
473}
474
475func (bug *Bug) CommitAsNeeded(repo repository.ClockedRepo) error {
476 if !bug.NeedCommit() {
477 return nil
478 }
479 return bug.Commit(repo)
480}
481
482func (bug *Bug) NeedCommit() bool {
483 return !bug.staging.IsEmpty()
484}
485
486func makeMediaTree(pack OperationPack) []repository.TreeEntry {
487 var tree []repository.TreeEntry
488 counter := 0
489 added := make(map[git.Hash]interface{})
490
491 for _, ops := range pack.Operations {
492 for _, file := range ops.GetFiles() {
493 if _, has := added[file]; !has {
494 tree = append(tree, repository.TreeEntry{
495 ObjectType: repository.Blob,
496 Hash: file,
497 // The name is not important here, we only need to
498 // reference the blob.
499 Name: fmt.Sprintf("file%d", counter),
500 })
501 counter++
502 added[file] = struct{}{}
503 }
504 }
505 }
506
507 return tree
508}
509
510// Merge a different version of the same bug by rebasing operations of this bug
511// that are not present in the other on top of the chain of operations of the
512// other version.
513func (bug *Bug) Merge(repo repository.Repo, other Interface) (bool, error) {
514 var otherBug = bugFromInterface(other)
515
516 // Note: a faster merge should be possible without actually reading and parsing
517 // all operations pack of our side.
518 // Reading the other side is still necessary to validate remote data, at least
519 // for new operations
520
521 if bug.id != otherBug.id {
522 return false, errors.New("merging unrelated bugs is not supported")
523 }
524
525 if len(otherBug.staging.Operations) > 0 {
526 return false, errors.New("merging a bug with a non-empty staging is not supported")
527 }
528
529 if bug.lastCommit == "" || otherBug.lastCommit == "" {
530 return false, errors.New("can't merge a bug that has never been stored")
531 }
532
533 ancestor, err := repo.FindCommonAncestor(bug.lastCommit, otherBug.lastCommit)
534 if err != nil {
535 return false, errors.Wrap(err, "can't find common ancestor")
536 }
537
538 ancestorIndex := 0
539 newPacks := make([]OperationPack, 0, len(bug.packs))
540
541 // Find the root of the rebase
542 for i, pack := range bug.packs {
543 newPacks = append(newPacks, pack)
544
545 if pack.commitHash == ancestor {
546 ancestorIndex = i
547 break
548 }
549 }
550
551 if len(otherBug.packs) == ancestorIndex+1 {
552 // Nothing to rebase, return early
553 return false, nil
554 }
555
556 // get other bug's extra packs
557 for i := ancestorIndex + 1; i < len(otherBug.packs); i++ {
558 // clone is probably not necessary
559 newPack := otherBug.packs[i].Clone()
560
561 newPacks = append(newPacks, newPack)
562 bug.lastCommit = newPack.commitHash
563 }
564
565 // rebase our extra packs
566 for i := ancestorIndex + 1; i < len(bug.packs); i++ {
567 pack := bug.packs[i]
568
569 // get the referenced git tree
570 treeHash, err := repo.GetTreeHash(pack.commitHash)
571
572 if err != nil {
573 return false, err
574 }
575
576 // create a new commit with the correct ancestor
577 hash, err := repo.StoreCommitWithParent(treeHash, bug.lastCommit)
578
579 if err != nil {
580 return false, err
581 }
582
583 // replace the pack
584 newPack := pack.Clone()
585 newPack.commitHash = hash
586 newPacks = append(newPacks, newPack)
587
588 // update the bug
589 bug.lastCommit = hash
590 }
591
592 // Update the git ref
593 err = repo.UpdateRef(bugsRefPattern+bug.id, bug.lastCommit)
594 if err != nil {
595 return false, err
596 }
597
598 return true, nil
599}
600
601// Id return the Bug identifier
602func (bug *Bug) Id() string {
603 if bug.id == "" {
604 // simply panic as it would be a coding error
605 // (using an id of a bug not stored yet)
606 panic("no id yet")
607 }
608 return bug.id
609}
610
611// HumanId return the Bug identifier truncated for human consumption
612func (bug *Bug) HumanId() string {
613 return FormatHumanID(bug.Id())
614}
615
616func FormatHumanID(id string) string {
617 format := fmt.Sprintf("%%.%ds", humanIdLength)
618 return fmt.Sprintf(format, id)
619}
620
621// CreateLamportTime return the Lamport time of creation
622func (bug *Bug) CreateLamportTime() lamport.Time {
623 return bug.createTime
624}
625
626// EditLamportTime return the Lamport time of the last edit
627func (bug *Bug) EditLamportTime() lamport.Time {
628 return bug.editTime
629}
630
631// Lookup for the very first operation of the bug.
632// For a valid Bug, this operation should be a CreateOp
633func (bug *Bug) FirstOp() Operation {
634 for _, pack := range bug.packs {
635 for _, op := range pack.Operations {
636 return op
637 }
638 }
639
640 if !bug.staging.IsEmpty() {
641 return bug.staging.Operations[0]
642 }
643
644 return nil
645}
646
647// Lookup for the very last operation of the bug.
648// For a valid Bug, should never be nil
649func (bug *Bug) LastOp() Operation {
650 if !bug.staging.IsEmpty() {
651 return bug.staging.Operations[len(bug.staging.Operations)-1]
652 }
653
654 if len(bug.packs) == 0 {
655 return nil
656 }
657
658 lastPack := bug.packs[len(bug.packs)-1]
659
660 if len(lastPack.Operations) == 0 {
661 return nil
662 }
663
664 return lastPack.Operations[len(lastPack.Operations)-1]
665}
666
667// Compile a bug in a easily usable snapshot
668func (bug *Bug) Compile() Snapshot {
669 snap := Snapshot{
670 id: bug.id,
671 Status: OpenStatus,
672 }
673
674 it := NewOperationIterator(bug)
675
676 for it.Next() {
677 op := it.Value()
678 op.Apply(&snap)
679 snap.Operations = append(snap.Operations, op)
680 }
681
682 return snap
683}