bug.go

  1// Package bug contains the bug data model and low-level related functions
  2package bug
  3
  4import (
  5	"encoding/json"
  6	"fmt"
  7	"strings"
  8
  9	"github.com/pkg/errors"
 10
 11	"github.com/MichaelMure/git-bug/entity"
 12	"github.com/MichaelMure/git-bug/identity"
 13	"github.com/MichaelMure/git-bug/repository"
 14	"github.com/MichaelMure/git-bug/util/lamport"
 15)
 16
 17const bugsRefPattern = "refs/bugs/"
 18const bugsRemoteRefPattern = "refs/remotes/%s/bugs/"
 19
 20const opsEntryName = "ops"
 21const rootEntryName = "root"
 22const mediaEntryName = "media"
 23
 24const createClockEntryPrefix = "create-clock-"
 25const createClockEntryPattern = "create-clock-%d"
 26const editClockEntryPrefix = "edit-clock-"
 27const editClockEntryPattern = "edit-clock-%d"
 28
 29const creationClockName = "bug-create"
 30const editClockName = "bug-edit"
 31
 32var ErrBugNotExist = errors.New("bug doesn't exist")
 33
 34func NewErrMultipleMatchBug(matching []entity.Id) *entity.ErrMultipleMatch {
 35	return entity.NewErrMultipleMatch("bug", matching)
 36}
 37
 38func NewErrMultipleMatchOp(matching []entity.Id) *entity.ErrMultipleMatch {
 39	return entity.NewErrMultipleMatch("operation", matching)
 40}
 41
 42var _ Interface = &Bug{}
 43var _ entity.Interface = &Bug{}
 44
 45// Bug hold the data of a bug thread, organized in a way close to
 46// how it will be persisted inside Git. This is the data structure
 47// used to merge two different version of the same Bug.
 48type Bug struct {
 49
 50	// A Lamport clock is a logical clock that allow to order event
 51	// inside a distributed system.
 52	// It must be the first field in this struct due to https://github.com/golang/go/issues/599
 53	createTime lamport.Time
 54	editTime   lamport.Time
 55
 56	// Id used as unique identifier
 57	id entity.Id
 58
 59	lastCommit repository.Hash
 60	rootPack   repository.Hash
 61
 62	// all the committed operations
 63	packs []OperationPack
 64
 65	// a temporary pack of operations used for convenience to pile up new operations
 66	// before a commit
 67	staging OperationPack
 68}
 69
 70// NewBug create a new Bug
 71func NewBug() *Bug {
 72	// No id yet
 73	// No logical clock yet
 74	return &Bug{}
 75}
 76
 77// FindLocalBug find an existing Bug matching a prefix
 78func FindLocalBug(repo repository.ClockedRepo, prefix string) (*Bug, error) {
 79	ids, err := ListLocalIds(repo)
 80
 81	if err != nil {
 82		return nil, err
 83	}
 84
 85	// preallocate but empty
 86	matching := make([]entity.Id, 0, 5)
 87
 88	for _, id := range ids {
 89		if id.HasPrefix(prefix) {
 90			matching = append(matching, id)
 91		}
 92	}
 93
 94	if len(matching) == 0 {
 95		return nil, errors.New("no matching bug found.")
 96	}
 97
 98	if len(matching) > 1 {
 99		return nil, NewErrMultipleMatchBug(matching)
100	}
101
102	return ReadLocalBug(repo, matching[0])
103}
104
105// ReadLocalBug will read a local bug from its hash
106func ReadLocalBug(repo repository.ClockedRepo, id entity.Id) (*Bug, error) {
107	ref := bugsRefPattern + id.String()
108	return readBug(repo, ref)
109}
110
111// ReadRemoteBug will read a remote bug from its hash
112func ReadRemoteBug(repo repository.ClockedRepo, remote string, id string) (*Bug, error) {
113	ref := fmt.Sprintf(bugsRemoteRefPattern, remote) + id
114	return readBug(repo, ref)
115}
116
117// readBug will read and parse a Bug from git
118func readBug(repo repository.ClockedRepo, ref string) (*Bug, error) {
119	refSplit := strings.Split(ref, "/")
120	id := entity.Id(refSplit[len(refSplit)-1])
121
122	if err := id.Validate(); err != nil {
123		return nil, errors.Wrap(err, "invalid ref ")
124	}
125
126	hashes, err := repo.ListCommits(ref)
127
128	// TODO: this is not perfect, it might be a command invoke error
129	if err != nil {
130		return nil, ErrBugNotExist
131	}
132
133	bug := Bug{
134		id:       id,
135		editTime: 0,
136	}
137
138	// Load each OperationPack
139	for _, hash := range hashes {
140		entries, err := repo.ReadTree(hash)
141		if err != nil {
142			return nil, errors.Wrap(err, "can't list git tree entries")
143		}
144
145		bug.lastCommit = hash
146
147		var opsEntry repository.TreeEntry
148		opsFound := false
149		var rootEntry repository.TreeEntry
150		rootFound := false
151		var createTime uint64
152		var editTime uint64
153
154		for _, entry := range entries {
155			if entry.Name == opsEntryName {
156				opsEntry = entry
157				opsFound = true
158				continue
159			}
160			if entry.Name == rootEntryName {
161				rootEntry = entry
162				rootFound = true
163			}
164			if strings.HasPrefix(entry.Name, createClockEntryPrefix) {
165				n, err := fmt.Sscanf(entry.Name, createClockEntryPattern, &createTime)
166				if err != nil {
167					return nil, errors.Wrap(err, "can't read create lamport time")
168				}
169				if n != 1 {
170					return nil, fmt.Errorf("could not parse create time lamport value")
171				}
172			}
173			if strings.HasPrefix(entry.Name, editClockEntryPrefix) {
174				n, err := fmt.Sscanf(entry.Name, editClockEntryPattern, &editTime)
175				if err != nil {
176					return nil, errors.Wrap(err, "can't read edit lamport time")
177				}
178				if n != 1 {
179					return nil, fmt.Errorf("could not parse edit time lamport value")
180				}
181			}
182		}
183
184		if !opsFound {
185			return nil, errors.New("invalid tree, missing the ops entry")
186		}
187		if !rootFound {
188			return nil, errors.New("invalid tree, missing the root entry")
189		}
190
191		if bug.rootPack == "" {
192			bug.rootPack = rootEntry.Hash
193			bug.createTime = lamport.Time(createTime)
194		}
195
196		// Due to rebase, edit Lamport time are not necessarily ordered
197		if editTime > uint64(bug.editTime) {
198			bug.editTime = lamport.Time(editTime)
199		}
200
201		// Update the clocks
202		createClock, err := repo.GetOrCreateClock(creationClockName)
203		if err != nil {
204			return nil, err
205		}
206		if err := createClock.Witness(bug.createTime); err != nil {
207			return nil, errors.Wrap(err, "failed to update create lamport clock")
208		}
209		editClock, err := repo.GetOrCreateClock(editClockName)
210		if err != nil {
211			return nil, err
212		}
213		if err := editClock.Witness(bug.editTime); err != nil {
214			return nil, errors.Wrap(err, "failed to update edit lamport clock")
215		}
216
217		data, err := repo.ReadData(opsEntry.Hash)
218		if err != nil {
219			return nil, errors.Wrap(err, "failed to read git blob data")
220		}
221
222		opp := &OperationPack{}
223		err = json.Unmarshal(data, &opp)
224
225		if err != nil {
226			return nil, errors.Wrap(err, "failed to decode OperationPack json")
227		}
228
229		// tag the pack with the commit hash
230		opp.commitHash = hash
231
232		bug.packs = append(bug.packs, *opp)
233	}
234
235	// Make sure that the identities are properly loaded
236	resolver := identity.NewSimpleResolver(repo)
237	err = bug.EnsureIdentities(resolver)
238	if err != nil {
239		return nil, err
240	}
241
242	return &bug, nil
243}
244
245type StreamedBug struct {
246	Bug *Bug
247	Err error
248}
249
250// ReadAllLocalBugs read and parse all local bugs
251func ReadAllLocalBugs(repo repository.ClockedRepo) <-chan StreamedBug {
252	return readAllBugs(repo, bugsRefPattern)
253}
254
255// ReadAllRemoteBugs read and parse all remote bugs for a given remote
256func ReadAllRemoteBugs(repo repository.ClockedRepo, remote string) <-chan StreamedBug {
257	refPrefix := fmt.Sprintf(bugsRemoteRefPattern, remote)
258	return readAllBugs(repo, refPrefix)
259}
260
261// Read and parse all available bug with a given ref prefix
262func readAllBugs(repo repository.ClockedRepo, refPrefix string) <-chan StreamedBug {
263	out := make(chan StreamedBug)
264
265	go func() {
266		defer close(out)
267
268		refs, err := repo.ListRefs(refPrefix)
269		if err != nil {
270			out <- StreamedBug{Err: err}
271			return
272		}
273
274		for _, ref := range refs {
275			b, err := readBug(repo, ref)
276
277			if err != nil {
278				out <- StreamedBug{Err: err}
279				return
280			}
281
282			out <- StreamedBug{Bug: b}
283		}
284	}()
285
286	return out
287}
288
289// ListLocalIds list all the available local bug ids
290func ListLocalIds(repo repository.Repo) ([]entity.Id, error) {
291	refs, err := repo.ListRefs(bugsRefPattern)
292	if err != nil {
293		return nil, err
294	}
295
296	return refsToIds(refs), nil
297}
298
299func refsToIds(refs []string) []entity.Id {
300	ids := make([]entity.Id, len(refs))
301
302	for i, ref := range refs {
303		split := strings.Split(ref, "/")
304		ids[i] = entity.Id(split[len(split)-1])
305	}
306
307	return ids
308}
309
310// Validate check if the Bug data is valid
311func (bug *Bug) Validate() error {
312	// non-empty
313	if len(bug.packs) == 0 && bug.staging.IsEmpty() {
314		return fmt.Errorf("bug has no operations")
315	}
316
317	// check if each pack and operations are valid
318	for _, pack := range bug.packs {
319		if err := pack.Validate(); err != nil {
320			return err
321		}
322	}
323
324	// check if staging is valid if needed
325	if !bug.staging.IsEmpty() {
326		if err := bug.staging.Validate(); err != nil {
327			return errors.Wrap(err, "staging")
328		}
329	}
330
331	// The very first Op should be a CreateOp
332	firstOp := bug.FirstOp()
333	if firstOp == nil || firstOp.base().OperationType != CreateOp {
334		return fmt.Errorf("first operation should be a Create op")
335	}
336
337	// The bug Id should be the hash of the first commit
338	if len(bug.packs) > 0 && string(bug.packs[0].commitHash) != bug.id.String() {
339		return fmt.Errorf("bug id should be the first commit hash")
340	}
341
342	// Check that there is no more CreateOp op
343	// Check that there is no colliding operation's ID
344	it := NewOperationIterator(bug)
345	createCount := 0
346	ids := make(map[entity.Id]struct{})
347	for it.Next() {
348		if it.Value().base().OperationType == CreateOp {
349			createCount++
350		}
351		if _, ok := ids[it.Value().Id()]; ok {
352			return fmt.Errorf("id collision: %s", it.Value().Id())
353		}
354		ids[it.Value().Id()] = struct{}{}
355	}
356
357	if createCount != 1 {
358		return fmt.Errorf("only one Create op allowed")
359	}
360
361	return nil
362}
363
364// Append an operation into the staging area, to be committed later
365func (bug *Bug) Append(op Operation) {
366	bug.staging.Append(op)
367}
368
369// Commit write the staging area in Git and move the operations to the packs
370func (bug *Bug) Commit(repo repository.ClockedRepo) error {
371
372	if !bug.NeedCommit() {
373		return fmt.Errorf("can't commit a bug with no pending operation")
374	}
375
376	if err := bug.Validate(); err != nil {
377		return errors.Wrap(err, "can't commit a bug with invalid data")
378	}
379
380	// Write the Ops as a Git blob containing the serialized array
381	hash, err := bug.staging.Write(repo)
382	if err != nil {
383		return err
384	}
385
386	if bug.rootPack == "" {
387		bug.rootPack = hash
388	}
389
390	// Make a Git tree referencing this blob
391	tree := []repository.TreeEntry{
392		// the last pack of ops
393		{ObjectType: repository.Blob, Hash: hash, Name: opsEntryName},
394		// always the first pack of ops (might be the same)
395		{ObjectType: repository.Blob, Hash: bug.rootPack, Name: rootEntryName},
396	}
397
398	// Reference, if any, all the files required by the ops
399	// Git will check that they actually exist in the storage and will make sure
400	// to push/pull them as needed.
401	mediaTree := makeMediaTree(bug.staging)
402	if len(mediaTree) > 0 {
403		mediaTreeHash, err := repo.StoreTree(mediaTree)
404		if err != nil {
405			return err
406		}
407		tree = append(tree, repository.TreeEntry{
408			ObjectType: repository.Tree,
409			Hash:       mediaTreeHash,
410			Name:       mediaEntryName,
411		})
412	}
413
414	// Store the logical clocks as well
415	// --> edit clock for each OperationPack/commits
416	// --> create clock only for the first OperationPack/commits
417	//
418	// To avoid having one blob for each clock value, clocks are serialized
419	// directly into the entry name
420	emptyBlobHash, err := repo.StoreData([]byte{})
421	if err != nil {
422		return err
423	}
424
425	editClock, err := repo.GetOrCreateClock(editClockName)
426	if err != nil {
427		return err
428	}
429	bug.editTime, err = editClock.Increment()
430	if err != nil {
431		return err
432	}
433
434	tree = append(tree, repository.TreeEntry{
435		ObjectType: repository.Blob,
436		Hash:       emptyBlobHash,
437		Name:       fmt.Sprintf(editClockEntryPattern, bug.editTime),
438	})
439	if bug.lastCommit == "" {
440		createClock, err := repo.GetOrCreateClock(creationClockName)
441		if err != nil {
442			return err
443		}
444		bug.createTime, err = createClock.Increment()
445		if err != nil {
446			return err
447		}
448
449		tree = append(tree, repository.TreeEntry{
450			ObjectType: repository.Blob,
451			Hash:       emptyBlobHash,
452			Name:       fmt.Sprintf(createClockEntryPattern, bug.createTime),
453		})
454	}
455
456	// Store the tree
457	hash, err = repo.StoreTree(tree)
458	if err != nil {
459		return err
460	}
461
462	// Write a Git commit referencing the tree, with the previous commit as parent
463	if bug.lastCommit != "" {
464		hash, err = repo.StoreCommitWithParent(hash, bug.lastCommit)
465	} else {
466		hash, err = repo.StoreCommit(hash)
467	}
468
469	if err != nil {
470		return err
471	}
472
473	bug.lastCommit = hash
474
475	// if it was the first commit, use the commit hash as bug id
476	if bug.id == "" {
477		bug.id = entity.Id(hash)
478	}
479
480	// Create or update the Git reference for this bug
481	// When pushing later, the remote will ensure that this ref update
482	// is fast-forward, that is no data has been overwritten
483	ref := fmt.Sprintf("%s%s", bugsRefPattern, bug.id)
484	err = repo.UpdateRef(ref, hash)
485
486	if err != nil {
487		return err
488	}
489
490	bug.staging.commitHash = hash
491	bug.packs = append(bug.packs, bug.staging)
492	bug.staging = OperationPack{}
493
494	return nil
495}
496
497func (bug *Bug) CommitAsNeeded(repo repository.ClockedRepo) error {
498	if !bug.NeedCommit() {
499		return nil
500	}
501	return bug.Commit(repo)
502}
503
504func (bug *Bug) NeedCommit() bool {
505	return !bug.staging.IsEmpty()
506}
507
508func makeMediaTree(pack OperationPack) []repository.TreeEntry {
509	var tree []repository.TreeEntry
510	counter := 0
511	added := make(map[repository.Hash]interface{})
512
513	for _, ops := range pack.Operations {
514		for _, file := range ops.GetFiles() {
515			if _, has := added[file]; !has {
516				tree = append(tree, repository.TreeEntry{
517					ObjectType: repository.Blob,
518					Hash:       file,
519					// The name is not important here, we only need to
520					// reference the blob.
521					Name: fmt.Sprintf("file%d", counter),
522				})
523				counter++
524				added[file] = struct{}{}
525			}
526		}
527	}
528
529	return tree
530}
531
532// Merge a different version of the same bug by rebasing operations of this bug
533// that are not present in the other on top of the chain of operations of the
534// other version.
535func (bug *Bug) Merge(repo repository.Repo, other Interface) (bool, error) {
536	var otherBug = bugFromInterface(other)
537
538	// Note: a faster merge should be possible without actually reading and parsing
539	// all operations pack of our side.
540	// Reading the other side is still necessary to validate remote data, at least
541	// for new operations
542
543	if bug.id != otherBug.id {
544		return false, errors.New("merging unrelated bugs is not supported")
545	}
546
547	if len(otherBug.staging.Operations) > 0 {
548		return false, errors.New("merging a bug with a non-empty staging is not supported")
549	}
550
551	if bug.lastCommit == "" || otherBug.lastCommit == "" {
552		return false, errors.New("can't merge a bug that has never been stored")
553	}
554
555	ancestor, err := repo.FindCommonAncestor(bug.lastCommit, otherBug.lastCommit)
556	if err != nil {
557		return false, errors.Wrap(err, "can't find common ancestor")
558	}
559
560	ancestorIndex := 0
561	newPacks := make([]OperationPack, 0, len(bug.packs))
562
563	// Find the root of the rebase
564	for i, pack := range bug.packs {
565		newPacks = append(newPacks, pack)
566
567		if pack.commitHash == ancestor {
568			ancestorIndex = i
569			break
570		}
571	}
572
573	if len(otherBug.packs) == ancestorIndex+1 {
574		// Nothing to rebase, return early
575		return false, nil
576	}
577
578	// get other bug's extra packs
579	for i := ancestorIndex + 1; i < len(otherBug.packs); i++ {
580		// clone is probably not necessary
581		newPack := otherBug.packs[i].Clone()
582
583		newPacks = append(newPacks, newPack)
584		bug.lastCommit = newPack.commitHash
585	}
586
587	// rebase our extra packs
588	for i := ancestorIndex + 1; i < len(bug.packs); i++ {
589		pack := bug.packs[i]
590
591		// get the referenced git tree
592		treeHash, err := repo.GetTreeHash(pack.commitHash)
593
594		if err != nil {
595			return false, err
596		}
597
598		// create a new commit with the correct ancestor
599		hash, err := repo.StoreCommitWithParent(treeHash, bug.lastCommit)
600
601		if err != nil {
602			return false, err
603		}
604
605		// replace the pack
606		newPack := pack.Clone()
607		newPack.commitHash = hash
608		newPacks = append(newPacks, newPack)
609
610		// update the bug
611		bug.lastCommit = hash
612	}
613
614	bug.packs = newPacks
615
616	// Update the git ref
617	err = repo.UpdateRef(bugsRefPattern+bug.id.String(), bug.lastCommit)
618	if err != nil {
619		return false, err
620	}
621
622	return true, nil
623}
624
625// Id return the Bug identifier
626func (bug *Bug) Id() entity.Id {
627	if bug.id == "" {
628		// simply panic as it would be a coding error
629		// (using an id of a bug not stored yet)
630		panic("no id yet")
631	}
632	return bug.id
633}
634
635// CreateLamportTime return the Lamport time of creation
636func (bug *Bug) CreateLamportTime() lamport.Time {
637	return bug.createTime
638}
639
640// EditLamportTime return the Lamport time of the last edit
641func (bug *Bug) EditLamportTime() lamport.Time {
642	return bug.editTime
643}
644
645// Lookup for the very first operation of the bug.
646// For a valid Bug, this operation should be a CreateOp
647func (bug *Bug) FirstOp() Operation {
648	for _, pack := range bug.packs {
649		for _, op := range pack.Operations {
650			return op
651		}
652	}
653
654	if !bug.staging.IsEmpty() {
655		return bug.staging.Operations[0]
656	}
657
658	return nil
659}
660
661// Lookup for the very last operation of the bug.
662// For a valid Bug, should never be nil
663func (bug *Bug) LastOp() Operation {
664	if !bug.staging.IsEmpty() {
665		return bug.staging.Operations[len(bug.staging.Operations)-1]
666	}
667
668	if len(bug.packs) == 0 {
669		return nil
670	}
671
672	lastPack := bug.packs[len(bug.packs)-1]
673
674	if len(lastPack.Operations) == 0 {
675		return nil
676	}
677
678	return lastPack.Operations[len(lastPack.Operations)-1]
679}
680
681// Compile a bug in a easily usable snapshot
682func (bug *Bug) Compile() Snapshot {
683	snap := Snapshot{
684		id:     bug.id,
685		Status: OpenStatus,
686	}
687
688	it := NewOperationIterator(bug)
689
690	for it.Next() {
691		op := it.Value()
692		op.Apply(&snap)
693		snap.Operations = append(snap.Operations, op)
694	}
695
696	return snap
697}