1
1
use std:: { cmp:: Ordering , marker:: PhantomData } ;
2
2
3
+ use bytemuck:: { Pod , Zeroable } ;
3
4
use hash:: compute_root;
4
5
use light_hasher:: { errors:: HasherError , Hasher } ;
5
6
@@ -23,6 +24,7 @@ use crate::{changelog::ChangelogEntry, hash::compute_parent_node};
23
24
/// Due to ability to make a decent number of concurrent update requests to be
24
25
/// valid, no lock is necessary.
25
26
#[ repr( C ) ]
27
+ #[ derive( Copy , Clone ) ]
26
28
pub struct ConcurrentMerkleTree <
27
29
H ,
28
30
const HEIGHT : usize ,
@@ -31,18 +33,18 @@ pub struct ConcurrentMerkleTree<
31
33
> where
32
34
H : Hasher ,
33
35
{
36
+ /// Index of the newest non-empty leaf.
37
+ pub next_index : u64 ,
38
+ /// History of roots.
39
+ pub roots : [ [ u8 ; 32 ] ; MAX_ROOTS ] ,
34
40
/// History of Merkle proofs.
35
41
pub changelog : [ ChangelogEntry < HEIGHT > ; MAX_CHANGELOG ] ,
36
42
/// Index of the newest changelog.
37
43
pub current_changelog_index : u64 ,
38
- /// History of roots.
39
- pub roots : [ [ u8 ; 32 ] ; MAX_ROOTS ] ,
40
44
/// Index of the newest root.
41
45
pub current_root_index : u64 ,
42
46
/// The newest Merkle proof.
43
47
pub rightmost_proof : [ [ u8 ; 32 ] ; HEIGHT ] ,
44
- /// Index of the newest non-empty leaf.
45
- pub rightmost_index : u64 ,
46
48
/// The newest non-empty leaf.
47
49
pub rightmost_leaf : [ u8 ; 32 ] ,
48
50
@@ -61,13 +63,54 @@ where
61
63
roots : [ [ 0u8 ; 32 ] ; MAX_ROOTS ] ,
62
64
current_root_index : 0 ,
63
65
rightmost_proof : [ [ 0u8 ; 32 ] ; HEIGHT ] ,
64
- rightmost_index : 0 ,
66
+ next_index : 0 ,
65
67
rightmost_leaf : [ 0u8 ; 32 ] ,
66
68
_hasher : PhantomData ,
67
69
}
68
70
}
69
71
}
70
72
73
+ /// Mark `ConcurrentMerkleTree` as `Zeroable`, providing Anchor a guarantee
74
+ /// that it can be always initialized with zeros.
75
+ ///
76
+ /// # Safety
77
+ ///
78
+ /// [`bytemuck`](bytemuck) is not able to ensure that our custom types (`Hasher`
79
+ /// and `ConcurrentMerkleTree`) can be a subject of initializing with zeros. It
80
+ /// also doesn't support structs with const generics (it would need to ensure
81
+ /// alignment).
82
+ ///
83
+ /// Therefore, it's our responsibility to guarantee that `ConcurrentMerkleTree`
84
+ /// doesn't contain any fields which are not zeroable.
85
+ unsafe impl < H , const HEIGHT : usize , const MAX_CHANGELOG : usize , const MAX_ROOTS : usize > Zeroable
86
+ for ConcurrentMerkleTree < H , HEIGHT , MAX_CHANGELOG , MAX_ROOTS >
87
+ where
88
+ H : Hasher ,
89
+ {
90
+ }
91
+
92
+ /// Mark `ConcurrentMerkleTree` as `Pod` (Plain Old Data), providing Anchor a
93
+ /// guarantee that it can be used in a zero-copy account.
94
+ ///
95
+ /// # Safety
96
+ ///
97
+ /// [`bytemuck`](bytemuck) is not able to ensure that our custom types (`Hasher`
98
+ /// and `ConcurrentMerkleTree`) can be a subject of byte serialization. It also
99
+ /// doesn't support structs with const generics (it would need to ensure
100
+ /// alignment).
101
+ ///
102
+ /// Therefore, it's our responsibility to guarantee that:
103
+ ///
104
+ /// * `Hasher` and `ConcurrentMerkleTree` with given const generics are aligned.
105
+ /// * They don't contain any fields which are not implementing `Copy` or are
106
+ /// not an easy subject for byte serialization.
107
+ unsafe impl < H , const HEIGHT : usize , const MAX_CHANGELOG : usize , const MAX_ROOTS : usize > Pod
108
+ for ConcurrentMerkleTree < H , HEIGHT , MAX_CHANGELOG , MAX_ROOTS >
109
+ where
110
+ H : Hasher + Copy + ' static ,
111
+ {
112
+ }
113
+
71
114
impl < H , const HEIGHT : usize , const MAX_CHANGELOG : usize , const MAX_ROOTS : usize >
72
115
ConcurrentMerkleTree < H , HEIGHT , MAX_CHANGELOG , MAX_ROOTS >
73
116
where
@@ -236,14 +279,14 @@ where
236
279
. ok_or ( HasherError :: RootsZero ) ? = node;
237
280
238
281
// Update the rightmost proof. It has to be done only if tree is not full.
239
- if self . rightmost_index < ( 1 << HEIGHT ) {
240
- if self . rightmost_index > 0 && leaf_index < self . rightmost_index as usize - 1 {
282
+ if self . next_index < ( 1 << HEIGHT ) {
283
+ if self . next_index > 0 && leaf_index < self . next_index as usize - 1 {
241
284
// Update the rightmost proof with the current changelog entry when:
242
285
//
243
286
// * `rightmost_index` is greater than 0 (tree is non-empty).
244
287
// * The updated leaf is non-rightmost.
245
288
if let Some ( proof) = changelog_entry
246
- . update_proof ( self . rightmost_index as usize - 1 , & self . rightmost_proof )
289
+ . update_proof ( self . next_index as usize - 1 , & self . rightmost_proof )
247
290
{
248
291
self . rightmost_proof = proof;
249
292
}
@@ -272,7 +315,7 @@ where
272
315
leaf_index : usize ,
273
316
proof : & [ [ u8 ; 32 ] ; HEIGHT ] ,
274
317
) -> Result < ( ) , HasherError > {
275
- let updated_proof = if self . rightmost_index > 0 && MAX_CHANGELOG > 0 {
318
+ let updated_proof = if self . next_index > 0 && MAX_CHANGELOG > 0 {
276
319
match self . update_proof_or_leaf ( changelog_index, leaf_index, proof) {
277
320
Some ( proof) => proof,
278
321
// This case means that the leaf we are trying to update was
@@ -284,7 +327,7 @@ where
284
327
}
285
328
}
286
329
} else {
287
- if leaf_index != self . rightmost_index as usize {
330
+ if leaf_index != self . next_index as usize {
288
331
return Err ( HasherError :: AppendOnly ) ;
289
332
}
290
333
proof. to_owned ( )
@@ -296,11 +339,11 @@ where
296
339
297
340
/// Appends a new leaf to the tree.
298
341
pub fn append ( & mut self , leaf : & [ u8 ; 32 ] ) -> Result < ( ) , HasherError > {
299
- if self . rightmost_index >= 1 << HEIGHT {
342
+ if self . next_index >= 1 << HEIGHT {
300
343
return Err ( HasherError :: TreeFull ) ;
301
344
}
302
345
303
- if self . rightmost_index == 0 {
346
+ if self . next_index == 0 {
304
347
// NOTE(vadorovsky): This is not mentioned in the whitepaper, but
305
348
// appending to an empty Merkle tree is a special case, where
306
349
// `computer_parent_node` can't be called, because the usual
@@ -321,7 +364,7 @@ where
321
364
} else {
322
365
let mut current_node = * leaf;
323
366
let mut intersection_node = self . rightmost_leaf ;
324
- let intersection_index = self . rightmost_index . trailing_zeros ( ) as usize ;
367
+ let intersection_index = self . next_index . trailing_zeros ( ) as usize ;
325
368
let mut changelog_path = [ [ 0u8 ; 32 ] ; HEIGHT ] ;
326
369
327
370
for ( i, item) in changelog_path. iter_mut ( ) . enumerate ( ) {
@@ -334,7 +377,7 @@ where
334
377
intersection_node = compute_parent_node :: < H > (
335
378
& intersection_node,
336
379
& self . rightmost_proof [ i] ,
337
- self . rightmost_index as usize - 1 ,
380
+ self . next_index as usize - 1 ,
338
381
i,
339
382
) ?;
340
383
self . rightmost_proof [ i] = empty_node;
@@ -347,7 +390,7 @@ where
347
390
current_node = compute_parent_node :: < H > (
348
391
& current_node,
349
392
& self . rightmost_proof [ i] ,
350
- self . rightmost_index as usize - 1 ,
393
+ self . next_index as usize - 1 ,
351
394
i,
352
395
) ?;
353
396
}
@@ -360,7 +403,7 @@ where
360
403
. get_mut ( self . current_changelog_index as usize )
361
404
{
362
405
* changelog_element =
363
- ChangelogEntry :: new ( current_node, changelog_path, self . rightmost_index as usize )
406
+ ChangelogEntry :: new ( current_node, changelog_path, self . next_index as usize )
364
407
}
365
408
self . inc_current_root_index ( ) ;
366
409
* self
@@ -369,9 +412,21 @@ where
369
412
. ok_or ( HasherError :: RootsZero ) ? = current_node;
370
413
}
371
414
372
- self . rightmost_index += 1 ;
415
+ self . next_index += 1 ;
373
416
self . rightmost_leaf = * leaf;
374
417
375
418
Ok ( ( ) )
376
419
}
420
+
421
+ /// Appends a new pair of leaves to the tree.
422
+ pub fn append_two (
423
+ & mut self ,
424
+ leaf_left : & [ u8 ; 32 ] ,
425
+ leaf_right : & [ u8 ; 32 ] ,
426
+ ) -> Result < ( ) , HasherError > {
427
+ // TODO(vadorovsky): Instead of this naive double append, implement an
428
+ // optimized insertion of two leaves.
429
+ self . append ( leaf_left) ?;
430
+ self . append ( leaf_right)
431
+ }
377
432
}
0 commit comments