1package dag
2
3import (
4 "encoding/json"
5 "fmt"
6 "strconv"
7 "strings"
8
9 "github.com/pkg/errors"
10 "golang.org/x/crypto/openpgp"
11
12 "github.com/MichaelMure/git-bug/entity"
13 "github.com/MichaelMure/git-bug/identity"
14 "github.com/MichaelMure/git-bug/repository"
15 "github.com/MichaelMure/git-bug/util/lamport"
16)
17
18// TODO: extra data tree
19const extraEntryName = "extra"
20
21const opsEntryName = "ops"
22const versionEntryPrefix = "version-"
23const createClockEntryPrefix = "create-clock-"
24const editClockEntryPrefix = "edit-clock-"
25
26// operationPack is a wrapper structure to store multiple operations in a single git blob.
27// Additionally, it holds and store the metadata for those operations.
28type operationPack struct {
29 // An identifier, taken from a hash of the serialized Operations.
30 id entity.Id
31
32 // The author of the Operations. Must be the same author for all the Operations.
33 Author identity.Interface
34 // The list of Operation stored in the operationPack
35 Operations []Operation
36 // Encode the entity's logical time of creation across all entities of the same type.
37 // Only exist on the root operationPack
38 CreateTime lamport.Time
39 // Encode the entity's logical time of last edition across all entities of the same type.
40 // Exist on all operationPack
41 EditTime lamport.Time
42}
43
44func (opp *operationPack) Id() entity.Id {
45 if opp.id == "" || opp.id == entity.UnsetId {
46 // This means we are trying to get the opp's Id *before* it has been stored.
47 // As the Id is computed based on the actual bytes written on the disk, we are going to predict
48 // those and then get the Id. This is safe as it will be the exact same code writing on disk later.
49
50 data, err := json.Marshal(opp)
51 if err != nil {
52 panic(err)
53 }
54 opp.id = entity.DeriveId(data)
55 }
56
57 return opp.id
58}
59
60func (opp *operationPack) MarshalJSON() ([]byte, error) {
61 return json.Marshal(struct {
62 Author identity.Interface `json:"author"`
63 Operations []Operation `json:"ops"`
64 }{
65 Author: opp.Author,
66 Operations: opp.Operations,
67 })
68}
69
70func (opp *operationPack) Validate() error {
71 if opp.Author == nil {
72 return fmt.Errorf("missing author")
73 }
74 for _, op := range opp.Operations {
75 if op.Author().Id() != opp.Author.Id() {
76 return fmt.Errorf("operation has different author than the operationPack's")
77 }
78 }
79 if opp.EditTime == 0 {
80 return fmt.Errorf("lamport edit time is zero")
81 }
82 return nil
83}
84
85// Write write the OperationPack in git, with zero, one or more parent commits.
86// If the repository has a keypair able to sign (that is, with a private key), the resulting commit is signed with that key.
87// Return the hash of the created commit.
88func (opp *operationPack) Write(def Definition, repo repository.Repo, parentCommit ...repository.Hash) (repository.Hash, error) {
89 if err := opp.Validate(); err != nil {
90 return "", err
91 }
92
93 // For different reason, we store the clocks and format version directly in the git tree.
94 // Version has to be accessible before any attempt to decode to return early with a unique error.
95 // Clocks could possibly be stored in the git blob but it's nice to separate data and metadata, and
96 // we are storing something directly in the tree already so why not.
97 //
98 // To have a valid Tree, we point the "fake" entries to always the same value, the empty blob.
99 emptyBlobHash, err := repo.StoreData([]byte{})
100 if err != nil {
101 return "", err
102 }
103
104 // Write the Ops as a Git blob containing the serialized array of operations
105 data, err := json.Marshal(opp)
106 if err != nil {
107 return "", err
108 }
109
110 // compute the Id while we have the serialized data
111 opp.id = entity.DeriveId(data)
112
113 hash, err := repo.StoreData(data)
114 if err != nil {
115 return "", err
116 }
117
118 // Make a Git tree referencing this blob and encoding the other values:
119 // - format version
120 // - clocks
121 tree := []repository.TreeEntry{
122 {ObjectType: repository.Blob, Hash: emptyBlobHash,
123 Name: fmt.Sprintf(versionEntryPrefix+"%d", def.FormatVersion)},
124 {ObjectType: repository.Blob, Hash: hash,
125 Name: opsEntryName},
126 {ObjectType: repository.Blob, Hash: emptyBlobHash,
127 Name: fmt.Sprintf(editClockEntryPrefix+"%d", opp.EditTime)},
128 }
129 if opp.CreateTime > 0 {
130 tree = append(tree, repository.TreeEntry{
131 ObjectType: repository.Blob,
132 Hash: emptyBlobHash,
133 Name: fmt.Sprintf(createClockEntryPrefix+"%d", opp.CreateTime),
134 })
135 }
136
137 // Store the tree
138 treeHash, err := repo.StoreTree(tree)
139 if err != nil {
140 return "", err
141 }
142
143 // Write a Git commit referencing the tree, with the previous commit as parent
144 // If we have keys, sign.
145 var commitHash repository.Hash
146
147 // Sign the commit if we have a key
148 signingKey, err := opp.Author.SigningKey(repo)
149 if err != nil {
150 return "", err
151 }
152
153 if signingKey != nil {
154 commitHash, err = repo.StoreSignedCommit(treeHash, signingKey.PGPEntity(), parentCommit...)
155 } else {
156 commitHash, err = repo.StoreCommit(treeHash, parentCommit...)
157 }
158
159 if err != nil {
160 return "", err
161 }
162
163 return commitHash, nil
164}
165
166// readOperationPack read the operationPack encoded in git at the given Tree hash.
167//
168// Validity of the Lamport clocks is left for the caller to decide.
169func readOperationPack(def Definition, repo repository.RepoData, resolver identity.Resolver, commit repository.Commit) (*operationPack, error) {
170 entries, err := repo.ReadTree(commit.TreeHash)
171 if err != nil {
172 return nil, err
173 }
174
175 // check the format version first, fail early instead of trying to read something
176 var version uint
177 for _, entry := range entries {
178 if strings.HasPrefix(entry.Name, versionEntryPrefix) {
179 v, err := strconv.ParseUint(strings.TrimPrefix(entry.Name, versionEntryPrefix), 10, 64)
180 if err != nil {
181 return nil, errors.Wrap(err, "can't read format version")
182 }
183 if v > 1<<12 {
184 return nil, fmt.Errorf("format version too big")
185 }
186 version = uint(v)
187 break
188 }
189 }
190 if version == 0 {
191 return nil, entity.NewErrUnknowFormat(def.FormatVersion)
192 }
193 if version != def.FormatVersion {
194 return nil, entity.NewErrInvalidFormat(version, def.FormatVersion)
195 }
196
197 var id entity.Id
198 var author identity.Interface
199 var ops []Operation
200 var createTime lamport.Time
201 var editTime lamport.Time
202
203 for _, entry := range entries {
204 switch {
205 case entry.Name == opsEntryName:
206 data, err := repo.ReadData(entry.Hash)
207 if err != nil {
208 return nil, errors.Wrap(err, "failed to read git blob data")
209 }
210 ops, author, err = unmarshallPack(def, resolver, data)
211 if err != nil {
212 return nil, err
213 }
214 id = entity.DeriveId(data)
215
216 case strings.HasPrefix(entry.Name, createClockEntryPrefix):
217 v, err := strconv.ParseUint(strings.TrimPrefix(entry.Name, createClockEntryPrefix), 10, 64)
218 if err != nil {
219 return nil, errors.Wrap(err, "can't read creation lamport time")
220 }
221 createTime = lamport.Time(v)
222
223 case strings.HasPrefix(entry.Name, editClockEntryPrefix):
224 v, err := strconv.ParseUint(strings.TrimPrefix(entry.Name, editClockEntryPrefix), 10, 64)
225 if err != nil {
226 return nil, errors.Wrap(err, "can't read edit lamport time")
227 }
228 editTime = lamport.Time(v)
229 }
230 }
231
232 // Verify signature if we expect one
233 keys := author.ValidKeysAtTime(fmt.Sprintf(editClockPattern, def.Namespace), editTime)
234 if len(keys) > 0 {
235 keyring := PGPKeyring(keys)
236 _, err = openpgp.CheckDetachedSignature(keyring, commit.SignedData, commit.Signature)
237 if err != nil {
238 return nil, fmt.Errorf("signature failure: %v", err)
239 }
240 }
241
242 return &operationPack{
243 id: id,
244 Author: author,
245 Operations: ops,
246 CreateTime: createTime,
247 EditTime: editTime,
248 }, nil
249}
250
251// unmarshallPack delegate the unmarshalling of the Operation's JSON to the decoding
252// function provided by the concrete entity. This gives access to the concrete type of each
253// Operation.
254func unmarshallPack(def Definition, resolver identity.Resolver, data []byte) ([]Operation, identity.Interface, error) {
255 aux := struct {
256 Author identity.IdentityStub `json:"author"`
257 Operations []json.RawMessage `json:"ops"`
258 }{}
259
260 if err := json.Unmarshal(data, &aux); err != nil {
261 return nil, nil, err
262 }
263
264 if aux.Author.Id() == "" || aux.Author.Id() == entity.UnsetId {
265 return nil, nil, fmt.Errorf("missing author")
266 }
267
268 author, err := resolver.ResolveIdentity(aux.Author.Id())
269 if err != nil {
270 return nil, nil, err
271 }
272
273 ops := make([]Operation, 0, len(aux.Operations))
274
275 for _, raw := range aux.Operations {
276 // delegate to specialized unmarshal function
277 op, err := def.OperationUnmarshaler(author, raw)
278 if err != nil {
279 return nil, nil, err
280 }
281 ops = append(ops, op)
282 }
283
284 return ops, author, nil
285}
286
287var _ openpgp.KeyRing = &PGPKeyring{}
288
289// PGPKeyring implement a openpgp.KeyRing from an slice of Key
290type PGPKeyring []*identity.Key
291
292func (pk PGPKeyring) KeysById(id uint64) []openpgp.Key {
293 var result []openpgp.Key
294 for _, key := range pk {
295 if key.Public().KeyId == id {
296 result = append(result, openpgp.Key{
297 PublicKey: key.Public(),
298 PrivateKey: key.Private(),
299 })
300 }
301 }
302 return result
303}
304
305func (pk PGPKeyring) KeysByIdUsage(id uint64, requiredUsage byte) []openpgp.Key {
306 // the only usage we care about is the ability to sign, which all keys should already be capable of
307 return pk.KeysById(id)
308}
309
310func (pk PGPKeyring) DecryptionKeys() []openpgp.Key {
311 result := make([]openpgp.Key, len(pk))
312 for i, key := range pk {
313 result[i] = openpgp.Key{
314 PublicKey: key.Public(),
315 PrivateKey: key.Private(),
316 }
317 }
318 return result
319}