|
1 | | -use crate::{ |
2 | | - beacon::{BeaconClient, BeaconClientError}, |
3 | | - core::types::Network, |
4 | | -}; |
5 | | -use ethers::{ |
6 | | - providers::{Http, Middleware, Provider}, |
7 | | - types::Filter, |
8 | | -}; |
9 | | -use sha3::{Digest, Keccak256}; |
10 | 1 |
|
11 | | -/// How much to go back from current block if from_block is not provided |
12 | | -/// 7500 blocks = 25hr |
13 | | -const FROM_BLOCKS_AGO_DEFAULT: u64 = 7500; |
14 | | - |
15 | | -#[derive(Debug)] |
16 | | -pub enum ProofData { |
17 | | - SP1 { |
18 | | - vk: [u8; 32], |
19 | | - public_inputs: Vec<u8>, |
20 | | - }, |
21 | | -} |
22 | | - |
23 | | -impl ProofData { |
24 | | - fn commitment(&self) -> [u8; 32] { |
25 | | - match self { |
26 | | - ProofData::SP1 { vk, public_inputs } => { |
27 | | - let mut hasher = Keccak256::new(); |
28 | | - hasher.update(vk); |
29 | | - hasher.update(public_inputs); |
30 | | - hasher.finalize().into() |
31 | | - } |
32 | | - } |
33 | | - } |
34 | | -} |
35 | | - |
36 | | -#[derive(Debug)] |
37 | | -pub enum ProofVerificationAggModeError { |
38 | | - ProvingSystemNotSupportedInAggMode, |
39 | | - EthereumProviderError(String), |
40 | | - BeaconClient(BeaconClientError), |
41 | | - UnmatchedBlobAndEventMerkleRoot, |
42 | | - ProofNotFoundInLogs, |
43 | | - EventDecoding, |
44 | | -} |
45 | | - |
46 | | -/// Given the [`ProofData`], this function checks whether a proof was included |
47 | | -/// in the most recent aggregated proof and verifies the corresponding Merkle root commitment. |
48 | | -/// |
49 | | -/// Note: This functionality is currently in Beta. As a result, we cannot determine with certainty |
50 | | -/// which specific aggregation a proof belongs to. Instead, we optimistically check the from the specified `from_block`. |
51 | | -/// |
52 | | -/// Note: The `from_block` must not be older than 18 days, |
53 | | -/// as blobs expire after that period and will no longer be retrievable. |
54 | | -/// If not provided, it defaults to fetch logs from [`FROM_BLOCKS_AGO_DEFAULT`] |
55 | | -/// |
56 | | -/// The step-by-step verification process includes: |
57 | | -/// 1. Querying the blob versioned hash from the events emitted by the aligned proof aggregation service contract since `from_block` |
58 | | -/// 2. Retrieving the corresponding beacon block using the block’s parent beacon root |
59 | | -/// 3. Fetching the blobs associated with that slot |
60 | | -/// 4. Filtering the blob that matches the queried blob versioned hash |
61 | | -/// 5. Decoding the blob to extract the proofs commitments |
62 | | -/// 6. Checking if the given proof commitment exists within the blob’s proofs |
63 | | -/// 7. Reconstructing the Merkle root and verifying it against the root stored in the contract |
64 | | -pub async fn is_proof_verified_in_aggregation_mode( |
65 | | - proof_data: ProofData, |
66 | | - network: Network, |
67 | | - eth_rpc_url: String, |
68 | | - beacon_client_url: String, |
69 | | - from_block: Option<u64>, |
70 | | -) -> Result<[u8; 32], ProofVerificationAggModeError> { |
71 | | - let eth_rpc_provider = Provider::<Http>::try_from(eth_rpc_url) |
72 | | - .map_err(|e| ProofVerificationAggModeError::EthereumProviderError(e.to_string()))?; |
73 | | - let beacon_client = BeaconClient::new(beacon_client_url); |
74 | | - |
75 | | - let from_block = match from_block { |
76 | | - Some(from_block) => from_block, |
77 | | - None => { |
78 | | - let block_number = eth_rpc_provider |
79 | | - .get_block_number() |
80 | | - .await |
81 | | - .map_err(|e| ProofVerificationAggModeError::EthereumProviderError(e.to_string()))?; |
82 | | - |
83 | | - block_number |
84 | | - .as_u64() |
85 | | - .saturating_sub(FROM_BLOCKS_AGO_DEFAULT) |
86 | | - } |
87 | | - }; |
88 | | - |
89 | | - let filter = Filter::new() |
90 | | - .address(network.get_aligned_proof_agg_service_address()) |
91 | | - .event("AggregatedProofVerified(bytes32,bytes32)") |
92 | | - .from_block(from_block); |
93 | | - |
94 | | - let logs = eth_rpc_provider.get_logs(&filter).await.unwrap(); |
95 | | - for log in logs { |
96 | | - let blob_versioned_hash: [u8; 32] = log.data[0..32] |
97 | | - .try_into() |
98 | | - .map_err(|_| ProofVerificationAggModeError::EventDecoding)?; |
99 | | - let merkle_root = log.topics[1].0; |
100 | | - let Some(block_number) = log.block_number else { |
101 | | - continue; |
102 | | - }; |
103 | | - |
104 | | - let Some(block) = eth_rpc_provider |
105 | | - .get_block(block_number.as_u64()) |
106 | | - .await |
107 | | - .map_err(|e| ProofVerificationAggModeError::EthereumProviderError(e.to_string()))? |
108 | | - else { |
109 | | - continue; |
110 | | - }; |
111 | | - |
112 | | - let Some(beacon_parent_root) = block.parent_beacon_block_root else { |
113 | | - continue; |
114 | | - }; |
115 | | - |
116 | | - let Some(beacon_block) = beacon_client |
117 | | - .get_block_header_from_parent_hash(beacon_parent_root.0) |
118 | | - .await |
119 | | - .map_err(ProofVerificationAggModeError::BeaconClient)? |
120 | | - else { |
121 | | - continue; |
122 | | - }; |
123 | | - |
124 | | - let slot: u64 = beacon_block |
125 | | - .header |
126 | | - .message |
127 | | - .slot |
128 | | - .parse() |
129 | | - .expect("Slot to be parsable number"); |
130 | | - |
131 | | - let Some(blob_data) = beacon_client |
132 | | - .get_blob_by_versioned_hash(slot, blob_versioned_hash) |
133 | | - .await |
134 | | - .map_err(ProofVerificationAggModeError::BeaconClient)? |
135 | | - else { |
136 | | - continue; |
137 | | - }; |
138 | | - |
139 | | - let blob_bytes = |
140 | | - hex::decode(blob_data.blob.replace("0x", "")).expect("A valid hex encoded data"); |
141 | | - let proof_commitments = decoded_blob(blob_bytes); |
142 | | - |
143 | | - if proof_commitments.contains(&proof_data.commitment()) { |
144 | | - if verify_blob_merkle_root(proof_commitments, merkle_root) { |
145 | | - return Ok(merkle_root); |
146 | | - } else { |
147 | | - return Err(ProofVerificationAggModeError::UnmatchedBlobAndEventMerkleRoot); |
148 | | - } |
149 | | - } else { |
150 | | - continue; |
151 | | - } |
152 | | - } |
153 | | - |
154 | | - Err(ProofVerificationAggModeError::ProofNotFoundInLogs) |
155 | | -} |
156 | | - |
157 | | -fn decoded_blob(blob_data: Vec<u8>) -> Vec<[u8; 32]> { |
158 | | - let mut proof_hashes = vec![]; |
159 | | - |
160 | | - let mut current_hash = [0u8; 32]; |
161 | | - let mut current_hash_count = 0; |
162 | | - let mut total_bytes_count = 0; |
163 | | - |
164 | | - while total_bytes_count < blob_data.len() { |
165 | | - // Every 32 bytes there is a 0x0 acting as padding, so we need to skip the byte |
166 | | - let is_pad = total_bytes_count % 32 == 0; |
167 | | - if is_pad { |
168 | | - total_bytes_count += 1; |
169 | | - continue; |
170 | | - } |
171 | | - |
172 | | - current_hash[current_hash_count] = blob_data[total_bytes_count]; |
173 | | - |
174 | | - if current_hash_count + 1 == 32 { |
175 | | - // if the current_hash is the zero hash, then there are no more proofs in the blob |
176 | | - if current_hash == [0u8; 32] { |
177 | | - break; |
178 | | - } |
179 | | - proof_hashes.push(current_hash); |
180 | | - current_hash = [0u8; 32]; |
181 | | - current_hash_count = 0; |
182 | | - } else { |
183 | | - current_hash_count += 1; |
184 | | - } |
185 | | - |
186 | | - total_bytes_count += 1; |
187 | | - } |
188 | | - |
189 | | - proof_hashes |
190 | | -} |
191 | | - |
192 | | -pub fn combine_hashes(hash_a: &[u8; 32], hash_b: &[u8; 32]) -> [u8; 32] { |
193 | | - let mut hasher = Keccak256::new(); |
194 | | - hasher.update(hash_a); |
195 | | - hasher.update(hash_b); |
196 | | - hasher.finalize().into() |
197 | | -} |
198 | | - |
199 | | -fn verify_blob_merkle_root(mut commitments: Vec<[u8; 32]>, merkle_root: [u8; 32]) -> bool { |
200 | | - while commitments.len() > 1 { |
201 | | - commitments = commitments |
202 | | - .chunks(2) |
203 | | - .map(|chunk| match chunk { |
204 | | - [a, b] => combine_hashes(a, b), |
205 | | - [a] => combine_hashes(a, a), |
206 | | - _ => panic!("Unexpected chunk size in leaves"), |
207 | | - }) |
208 | | - .collect() |
209 | | - } |
210 | | - |
211 | | - commitments[0] == merkle_root |
212 | | -} |
0 commit comments