|
| 1 | +use std::{ |
| 2 | + collections::HashMap, |
| 3 | + fs::File, |
| 4 | + io::{BufReader, BufWriter}, |
| 5 | +}; |
| 6 | + |
| 7 | +use lambdaworks_crypto::merkle_tree::{merkle::MerkleTree, traits::IsMerkleTreeBackend}; |
| 8 | +use primitive_types::{H160, U256}; |
| 9 | +use rand::Rng; |
| 10 | +use serde::{Deserialize, Serialize}; |
| 11 | +use sha3::{Digest, Keccak256}; |
| 12 | + |
| 13 | +#[derive(Clone, Default, Serialize, Deserialize)] |
| 14 | +pub struct UserState { |
| 15 | + pub address: H160, |
| 16 | + pub balance: U256, |
| 17 | + pub nonce: U256, |
| 18 | +} |
| 19 | + |
| 20 | +impl IsMerkleTreeBackend for UserState { |
| 21 | + type Node = [u8; 32]; |
| 22 | + type Data = UserState; |
| 23 | + |
| 24 | + fn hash_data(leaf: &Self::Data) -> Self::Node { |
| 25 | + let mut hasher = Keccak256::new(); |
| 26 | + |
| 27 | + let mut balance_bytes: [u8; 32] = [0u8; 32]; |
| 28 | + let mut nonce_bytes: [u8; 32] = [0u8; 32]; |
| 29 | + leaf.balance.to_little_endian(&mut balance_bytes); |
| 30 | + leaf.nonce.to_little_endian(&mut nonce_bytes); |
| 31 | + |
| 32 | + hasher.update(leaf.address); |
| 33 | + hasher.update(&balance_bytes); |
| 34 | + hasher.update(&nonce_bytes); |
| 35 | + hasher.finalize().into() |
| 36 | + } |
| 37 | + |
| 38 | + fn hash_new_parent(child_1: &Self::Node, child_2: &Self::Node) -> Self::Node { |
| 39 | + let mut hasher = Keccak256::new(); |
| 40 | + hasher.update(child_1); |
| 41 | + hasher.update(child_2); |
| 42 | + hasher.finalize().into() |
| 43 | + } |
| 44 | +} |
| 45 | + |
| 46 | +struct DB { |
| 47 | + pub user_states: HashMap<H160, UserState>, |
| 48 | + pub root: [u8; 32], |
| 49 | + pub file_path: String, |
| 50 | +} |
| 51 | + |
| 52 | +#[derive(Debug)] |
| 53 | +pub enum DBError { |
| 54 | + IO(String), |
| 55 | +} |
| 56 | + |
| 57 | +impl DB { |
| 58 | + pub fn new(file_path: String) -> Result<Self, DBError> { |
| 59 | + let file = File::open(&file_path).map_err(|e| DBError::IO(e.to_string()))?; |
| 60 | + let reader = BufReader::new(file); |
| 61 | + let user_states: Vec<UserState> = |
| 62 | + serde_json::from_reader(reader).map_err(|e| DBError::IO(e.to_string()))?; |
| 63 | + let root = MerkleTree::<UserState>::build(&user_states).unwrap().root; |
| 64 | + |
| 65 | + let mut user_states_map: HashMap<H160, UserState> = HashMap::new(); |
| 66 | + for state in user_states { |
| 67 | + user_states_map.insert(state.address, state); |
| 68 | + } |
| 69 | + |
| 70 | + let db = Self { |
| 71 | + user_states: user_states_map, |
| 72 | + root, |
| 73 | + file_path, |
| 74 | + }; |
| 75 | + |
| 76 | + Ok(db) |
| 77 | + } |
| 78 | + |
| 79 | + pub fn save(&self) -> Result<(), DBError> { |
| 80 | + let file = File::create(&self.file_path).map_err(|e| DBError::IO(e.to_string()))?; |
| 81 | + let writer = BufWriter::new(file); |
| 82 | + let values: Vec<UserState> = self.user_states.clone().into_values().collect(); |
| 83 | + serde_json::to_writer(writer, &values).map_err(|e| DBError::IO(e.to_string()))?; |
| 84 | + |
| 85 | + Ok(()) |
| 86 | + } |
| 87 | + |
| 88 | + pub fn commitment(&self) -> [u8; 32] { |
| 89 | + let values: Vec<UserState> = self.user_states.clone().into_values().collect(); |
| 90 | + let root = MerkleTree::<UserState>::build(&values).unwrap().root; |
| 91 | + root |
| 92 | + } |
| 93 | + |
| 94 | + fn initial_state() -> Vec<UserState> { |
| 95 | + vec![] |
| 96 | + } |
| 97 | + |
| 98 | + pub fn upsert(&mut self, address: H160, newState: UserState) { |
| 99 | + self.user_states.insert(address, newState); |
| 100 | + } |
| 101 | +} |
| 102 | + |
| 103 | +struct Transfer { |
| 104 | + pub from: H160, |
| 105 | + pub to: H160, |
| 106 | + pub amount: U256, |
| 107 | +} |
| 108 | + |
| 109 | +fn generate_random_transfers(db: &DB, num_to_generate: usize) -> Vec<Transfer> { |
| 110 | + let mut transfers = vec![]; |
| 111 | + let mut rng = rand::thread_rng(); |
| 112 | + |
| 113 | + for _ in 0..num_to_generate { |
| 114 | + let accounts: Vec<&UserState> = db.user_states.values().collect(); |
| 115 | + |
| 116 | + let sender = accounts |
| 117 | + .get(rng.gen_range(0..db.user_states.len())) |
| 118 | + .cloned() |
| 119 | + .unwrap(); |
| 120 | + |
| 121 | + let receiver = accounts |
| 122 | + .get(rng.gen_range(0..db.user_states.len())) |
| 123 | + .cloned() |
| 124 | + .unwrap(); |
| 125 | + |
| 126 | + let transfer = Transfer { |
| 127 | + amount: sender.balance / 2, |
| 128 | + from: sender.address, |
| 129 | + to: receiver.address, |
| 130 | + }; |
| 131 | + |
| 132 | + transfers.push(transfer); |
| 133 | + } |
| 134 | + |
| 135 | + transfers |
| 136 | +} |
| 137 | + |
| 138 | +fn prove_state_transition(db: &mut DB, transfers: Vec<Transfer>) {} |
| 139 | + |
| 140 | +fn main() { |
| 141 | + // 0. Load merkle tree file, if not created, create initial state |
| 142 | + let mut db = DB::new("./db".to_string()).expect("create db"); |
| 143 | + |
| 144 | + // 1. Create random transfers |
| 145 | + let account_updates = generate_random_transfers(&db, 10); |
| 146 | + |
| 147 | + // 2. Call zkvm and pass (MerkleTree, Updates to perform) |
| 148 | + let proof = prove_state_transition(&mut db, account_updates); |
| 149 | + |
| 150 | + // Fow now, in order for a proof to be aggregated, we first need to submit it via the fast mode or verification layer |
| 151 | + // Let's suppose that our L2 would run the prover once every 24hs and submit it on aligned |
| 152 | + // Once aligned aggregates the proof we will be notified and we'll send the new state commitment on chain |
| 153 | + |
| 154 | + // 4. Send the proof to aligned and wait for verification |
| 155 | + // let response = send_proof_to_be_verified_on_aligned(proof); |
| 156 | + // 5. Wait until proof is aggregated |
| 157 | + // ... |
| 158 | + // 6. Send updateState transaction to Ethereum |
| 159 | + // let receipt = update_state_on_chain(); |
| 160 | +} |
0 commit comments