diff --git a/accounts/Cargo.toml b/accounts/Cargo.toml index beb4cfb4b..4ac89cda1 100644 --- a/accounts/Cargo.toml +++ b/accounts/Cargo.toml @@ -27,6 +27,9 @@ path = "../musig" [dependencies.zkvm] path = "../zkvm" +[dev-dependencies.blockchain] +path = "../blockchain" + [dev-dependencies] rand_chacha = "0.2" hex = "^0.3" diff --git a/accounts/src/tests.rs b/accounts/src/tests.rs index ccfaa05f3..8ee6532e7 100644 --- a/accounts/src/tests.rs +++ b/accounts/src/tests.rs @@ -7,8 +7,7 @@ use curve25519_dalek::scalar::Scalar; use keytree::Xprv; use musig::{Multisignature, Signature}; -use zkvm::blockchain::{BlockHeader, BlockchainState, Mempool, MempoolItem}; -use zkvm::utreexo; +use blockchain::{utreexo, BlockHeader, BlockchainState, Mempool, MempoolItem}; use zkvm::{ Anchor, ClearValue, Contract, ContractID, Program, Prover, TxEntry, TxHeader, VerifiedTx, }; diff --git a/blockchain/.gitignore b/blockchain/.gitignore new file mode 100644 index 000000000..693699042 --- /dev/null +++ b/blockchain/.gitignore @@ -0,0 +1,3 @@ +/target +**/*.rs.bk +Cargo.lock diff --git a/blockchain/Cargo.toml b/blockchain/Cargo.toml new file mode 100644 index 000000000..395aa52b6 --- /dev/null +++ b/blockchain/Cargo.toml @@ -0,0 +1,34 @@ +[package] +name = "blockchain" +version = "0.1.0" +authors = ["Oleg Andreev "] +edition = "2018" +readme = "README.md" +license = "Apache-2.0" +repository = "https://github.com/stellar/slingshot" +categories = ["cryptography", "blockchain"] +keywords = ["cryptography", "blockchain", "zero-knowledge", "bulletproofs"] +description = "A blockchain protocol for ZkVM transactions" + +[dependencies] +failure = "0.1" +byteorder = "1" +merlin = "2" +rand = "0.7" +subtle = "2" +curve25519-dalek = { version = "2", features = ["serde"] } +serde = { version = "1.0", features=["derive"] } +subtle-encoding = "0.3" +hex = "^0.3" +async-trait = "0.1.24" +siphasher = "0.3.1" + +[dependencies.zkvm] +path = "../zkvm" + +[dependencies.starsig] +path = "../starsig" + +[dev-dependencies] +criterion = "0.2" +serde_json = "1.0" diff --git a/blockchain/README.md b/blockchain/README.md new file mode 100644 index 000000000..05012e363 --- /dev/null +++ b/blockchain/README.md @@ -0,0 +1,6 @@ +# Slingshot Blockchain Protocol + +This is an implementation of a blockchain synchronization protocol for ZkVM transactions. + +This does not include p2p networking or persistent data storage. +Only abstract interfaces are used to be implemented in a concrete application. diff --git a/blockchain/rust-toolchain b/blockchain/rust-toolchain new file mode 100644 index 000000000..1afbfba08 --- /dev/null +++ b/blockchain/rust-toolchain @@ -0,0 +1 @@ +nightly-2019-11-02 diff --git a/zkvm/src/blockchain/block.rs b/blockchain/src/block.rs similarity index 98% rename from zkvm/src/blockchain/block.rs rename to blockchain/src/block.rs index c00ccd529..b6ccd550d 100644 --- a/zkvm/src/blockchain/block.rs +++ b/blockchain/src/block.rs @@ -1,7 +1,7 @@ use merlin::Transcript; use serde::{Deserialize, Serialize}; -use crate::{Hash, MerkleTree}; +use zkvm::{Hash, MerkleTree}; /// Identifier of the block, computed as a hash of the `BlockHeader`. #[derive(Clone, Copy, PartialEq, Default, Debug)] diff --git a/zkvm/src/blockchain/errors.rs b/blockchain/src/errors.rs similarity index 97% rename from zkvm/src/blockchain/errors.rs rename to blockchain/src/errors.rs index 393d3df30..8830bd128 100644 --- a/zkvm/src/blockchain/errors.rs +++ b/blockchain/src/errors.rs @@ -1,5 +1,5 @@ -use crate::errors::VMError; use crate::utreexo::UtreexoError; +use zkvm::VMError; /// Blockchain state machine error conditions. #[derive(Debug, Fail)] diff --git a/zkvm/src/blockchain/mod.rs b/blockchain/src/lib.rs similarity index 50% rename from zkvm/src/blockchain/mod.rs rename to blockchain/src/lib.rs index e47154424..06ccd177d 100644 --- a/zkvm/src/blockchain/mod.rs +++ b/blockchain/src/lib.rs @@ -1,12 +1,25 @@ //! Implementation of the blockchain state machine. +#[macro_use] +extern crate failure; +extern crate serde; + +#[macro_use] +extern crate zkvm; + +extern crate starsig; + mod block; mod errors; +mod protocol; +mod shortid; mod state; +pub mod utreexo; #[cfg(test)] mod tests; pub use self::block::*; pub use self::errors::*; +pub use self::protocol::*; pub use self::state::*; diff --git a/blockchain/src/protocol.rs b/blockchain/src/protocol.rs new file mode 100644 index 000000000..90d84fce2 --- /dev/null +++ b/blockchain/src/protocol.rs @@ -0,0 +1,337 @@ +//! Blockchain protocol implementation. +//! This is an implementation of a p2p protocol to synchronize +//! mempool transactions and blocks. + +use async_trait::async_trait; +use core::convert::AsRef; +use core::hash::Hash; +use rand::{thread_rng, Rng}; +use serde::{Deserialize, Serialize}; +use starsig::{Signature, SigningKey, VerificationKey}; +use std::collections::HashMap; +use std::time::{Duration, Instant}; + +use super::block::{BlockHeader, BlockID}; +use super::shortid::{self, ShortID}; +use super::state::Mempool; +use super::utreexo; +use merlin::Transcript; +use zkvm::Tx; + +const CURRENT_VERSION: u64 = 0; +const SHORTID_NONCE_TTL: usize = 50; + +#[async_trait] +pub trait Network { + type PeerIdentifier: Clone + AsRef<[u8]> + Eq + Hash; + + /// ID of our node. + fn self_id(&self) -> Self::PeerIdentifier; + + /// Send a message to a given peer. + async fn send(&mut self, peer: Self::PeerIdentifier, message: Message); +} + +pub trait Storage { + /// Returns the signed tip of the blockchain + fn tip(&self) -> (BlockHeader, Signature); + + /// Returns a block at a given height + fn block_at_height(&self, height: u64) -> Option; +} + +pub enum ProtocolError { + IncompatibleVersion, + BlockNotFound(u64), + InvalidBlockSignature, +} + +pub struct Node { + network_pubkey: VerificationKey, + network: N, + storage: S, + target_tip: BlockHeader, + peers: HashMap, + shortid_nonce: u64, + shortid_nonce_ttl: usize, + // TBD: add mempool in here +} + +impl Node { + /// Create a new node. + pub fn new(network_pubkey: VerificationKey, network: N, storage: S) -> Self { + let tip = storage.tip().0; + Node { + network_pubkey, + network, + storage, + target_tip: tip, + peers: HashMap::new(), + shortid_nonce: thread_rng().gen::(), + shortid_nonce_ttl: SHORTID_NONCE_TTL, + } + } + + /// Called when a node receives a message from the peer. + pub async fn process_message( + &mut self, + pid: N::PeerIdentifier, + message: Message, + ) -> Result<(), ProtocolError> { + match message { + Message::GetInventory(msg) => self.process_inventory_request(pid, msg).await?, + Message::Inventory(msg) => self.receive_inventory(pid, msg).await, + Message::GetBlock(msg) => self.send_block(pid, msg).await?, + Message::Block(msg) => self.receive_block(pid, msg).await?, + Message::GetMempoolTxs(msg) => self.send_txs(pid, msg).await, + Message::MempoolTxs(msg) => self.receive_txs(pid, msg).await, + } + Ok(()) + } + + /// Called periodically (every 1-2 seconds). + pub async fn synchronize(&mut self) { + self.rotate_shortid_nonce_if_needed(); + + let (tip_header, tip_signature) = self.storage.tip(); + + for (pid, peer) in self.peers.iter().filter(|(_, p)| p.needs_our_inventory) { + self.network + .send( + pid.clone(), + Message::Inventory(Inventory { + version: CURRENT_VERSION, + tip: tip_header.clone(), + tip_signature: tip_signature.clone(), + shortid_nonce: peer.their_short_id_nonce, + shortid_list: self + .mempool_inventory_for_peer(pid, peer.their_short_id_nonce), + }), + ) + .await; + } + + for (pid, peer) in self.peers.iter_mut() { + peer.needs_our_inventory = false; + } + + if self.target_tip.id() != self.storage.tip().0.id() { + self.synchronize_chain().await; + } else { + self.synchronize_mempool().await; + } + + // For peers who have not sent inventory for over a minute, we request inventory again. + let now = Instant::now(); + let interval_secs = 60; + let invpids: Vec<_> = self + .peers + .iter() + .filter(|(_, peer)| { + now.duration_since(peer.last_inventory_received).as_secs() > interval_secs + }) + .map(|(pid, _)| pid.clone()) + .collect(); + for pid in invpids.into_iter() { + self.request_inventory(pid).await; + } + } + + /// Called when a peer connects. + pub async fn peer_connected(&mut self, pid: N::PeerIdentifier) { + self.peers.insert( + pid.clone(), + PeerInfo { + tip: None, + needs_our_inventory: false, + their_short_id_nonce: 0, + missing_shortids: Vec::new(), + shortid_nonce: self.shortid_nonce, + last_inventory_received: Instant::now(), + }, + ); + + self.request_inventory(pid).await; + } + + /// Called when a peer disconnects. + pub async fn peer_diconnected(&mut self, pid: N::PeerIdentifier) { + self.peers.remove(&pid); + } +} + +impl Node { + async fn synchronize_chain(&mut self) {} + + async fn synchronize_mempool(&mut self) { + // 3. **If the target tip is the latest**, the node walks all peers in round-robin and constructs lists of [short IDs](#short-id) to request from each peer, + // keeping track of already used IDs. Once all requests are constructed, the [`GetMempoolTxs`](#getmempooltxs) messages are sent out to respective peers. + } + + async fn process_inventory_request( + &mut self, + pid: N::PeerIdentifier, + request: GetInventory, + ) -> Result<(), ProtocolError> { + // FIXME: check the version across all messages + if request.version != CURRENT_VERSION { + return Err(ProtocolError::IncompatibleVersion); + } + self.peers.get_mut(&pid).map(|peer| { + peer.needs_our_inventory = true; + peer.their_short_id_nonce = request.shortid_nonce; + }); + Ok(()) + } + + async fn request_inventory(&mut self, pid: N::PeerIdentifier) { + self.network + .send( + pid, + Message::GetInventory(GetInventory { + version: CURRENT_VERSION, + shortid_nonce: self.shortid_nonce, + }), + ) + .await; + } + + async fn receive_inventory(&mut self, pid: N::PeerIdentifier, inventory: Inventory) {} + + async fn receive_block( + &mut self, + pid: N::PeerIdentifier, + block_msg: Block, + ) -> Result<(), ProtocolError> { + // Check the block signature. + if !verify_block_signature(&block_msg.header, &block_msg.signature, self.network_pubkey) { + return Err(ProtocolError::InvalidBlockSignature); + } + + // Check the block self-consistency, then verify transactions, then apply changes. + + // If block applied well, remove conflicting transactions from mempool.\\ + + Ok(()) + } + + async fn receive_txs(&mut self, pid: N::PeerIdentifier, request: MempoolTxs) {} + + async fn send_block( + &mut self, + pid: N::PeerIdentifier, + request: GetBlock, + ) -> Result<(), ProtocolError> { + let block = self + .storage + .block_at_height(request.height) + .ok_or(ProtocolError::BlockNotFound(request.height))?; + self.network.send(pid, Message::Block(block)).await; + Ok(()) + } + + async fn send_txs(&mut self, pid: N::PeerIdentifier, request: GetMempoolTxs) {} + + fn rotate_shortid_nonce_if_needed(&mut self) { + self.shortid_nonce_ttl -= 1; + if self.shortid_nonce_ttl == 0 { + self.shortid_nonce_ttl = SHORTID_NONCE_TTL; + let new_nonce = thread_rng().gen::(); + self.shortid_nonce = new_nonce; + for (pid, peer) in self.peers.iter_mut() { + peer.shortid_nonce = new_nonce; + peer.missing_shortids.clear(); + } + } + } + + fn mempool_inventory_for_peer(&self, pid: &N::PeerIdentifier, nonce: u64) -> Vec { + // TBD: list txs in mempool and convert them into short ids. + unimplemented!() + } +} + +/// Signs a block. +fn create_block_signature(header: &BlockHeader, privkey: SigningKey) -> Signature { + let mut t = Transcript::new(b"ZkVM.stubnet1"); + t.append_message(b"block_id", &header.id()); + Signature::sign(&mut t, privkey) +} + +fn verify_block_signature( + header: &BlockHeader, + signature: &Signature, + pubkey: VerificationKey, +) -> bool { + let mut t = Transcript::new(b"ZkVM.stubnet1"); + t.append_message(b"block_id", &header.id()); + signature.verify(&mut t, pubkey).is_ok() +} + +/// Enumeration of all protocol messages +#[derive(Clone, Serialize, Deserialize)] +pub enum Message { + GetInventory(GetInventory), + Inventory(Inventory), + GetBlock(GetBlock), + Block(Block), + GetMempoolTxs(GetMempoolTxs), + MempoolTxs(MempoolTxs), +} + +#[derive(Clone, Serialize, Deserialize)] +pub struct GetInventory { + version: u64, + shortid_nonce: u64, +} + +#[derive(Clone, Serialize, Deserialize)] +pub struct Inventory { + version: u64, + tip: BlockHeader, + tip_signature: Signature, + shortid_nonce: u64, + shortid_list: Vec, +} + +#[derive(Clone, Serialize, Deserialize)] +pub struct GetBlock { + height: u64, +} + +#[derive(Clone, Serialize, Deserialize)] +pub struct Block { + header: BlockHeader, + signature: Signature, + txs: Vec, +} + +/// Transaction annotated with Utreexo proofs. +#[derive(Clone, Serialize, Deserialize)] +pub struct BlockTx { + /// Utreexo proofs. + pub proofs: Vec, + /// ZkVM transaction. + pub tx: Tx, +} + +#[derive(Clone, Serialize, Deserialize)] +pub struct GetMempoolTxs { + shortid_nonce: u64, + shortids: Vec, +} + +#[derive(Clone, Serialize, Deserialize)] +pub struct MempoolTxs { + tip: BlockID, + txs: Vec, +} + +struct PeerInfo { + tip: Option, + needs_our_inventory: bool, + their_short_id_nonce: u64, + missing_shortids: Vec, + shortid_nonce: u64, + last_inventory_received: Instant, +} diff --git a/blockchain/src/shortid.rs b/blockchain/src/shortid.rs new file mode 100644 index 000000000..1c8992139 --- /dev/null +++ b/blockchain/src/shortid.rs @@ -0,0 +1,104 @@ +//! Short ID implementation. +//! A 6-byte transaction ID, specified for a given nonce and a context (u64-sized slice). +//! +//! 1. Initialize [SipHash-2-4](https://131002.net/siphash/) with k0 set to nonce, k1 set to the little-endian u64 read from the context string. +//! 2. Feed transaction ID as an input to SipHash. +//! 3. Read u64 output, drop two most significant bytes. +//! +//! Based on [BIP-152](https://github.com/bitcoin/bips/blob/master/bip-0152.mediawiki). + +use core::hash::Hasher; +use serde::{Deserialize, Serialize}; +use siphasher::sip::SipHasher; + +/// Short ID definition +#[derive(Copy, Clone, Debug, PartialEq, Serialize, Deserialize)] +#[serde(transparent)] +pub struct ShortID { + inner: u64, +} + +/// Hasher that produces `ID`s +#[derive(Copy, Clone, Debug)] +pub struct Transform { + sip: SipHasher, +} + +impl ShortID { + /// Reads Short ID from a slice of bytes. + /// Returns None if the slice is shorter than 6 bytes. + pub fn from_bytes(slice: &[u8]) -> Option { + if slice.len() == 6 { + Some(ShortID { + inner: slice[0] as u64 + + ((slice[1] as u64) << 8) + + ((slice[2] as u64) << 16) + + ((slice[3] as u64) << 24) + + ((slice[4] as u64) << 32) + + ((slice[5] as u64) << 40), + }) + } else { + None + } + } + + pub fn to_bytes(self) -> [u8; 6] { + [ + (self.inner & 0xff) as u8, + ((self.inner >> 8) & 0xff) as u8, + ((self.inner >> 16) & 0xff) as u8, + ((self.inner >> 24) & 0xff) as u8, + ((self.inner >> 32) & 0xff) as u8, + ((self.inner >> 40) & 0xff) as u8, + ] + } + + fn from_u64(int: u64) -> Self { + ShortID { + inner: int & 0xffff_ffff_ffff, + } + } +} + +impl Transform { + /// Creates a new Short ID hasher from a nonce and a context string. + pub fn new(nonce: u64, context: &[u8]) -> Self { + Self { + sip: SipHasher::new_with_keys(nonce, read_le64(context)), + } + } + + /// Transforms a long identifier into a `ShortID`. + pub fn apply(&self, longid: impl AsRef<[u8]>) -> ShortID { + let mut h = self.sip.clone(); + h.write(longid.as_ref()); + ShortID::from_u64(h.finish()) + } +} + +/// Reads little-endian u64 from a slice. +/// Treats missing higher-order bits as zeroes. +fn read_le64(slice: &[u8]) -> u64 { + slice + .iter() + .enumerate() + .fold(0u64, |r, (i, b)| r + ((*b as u64) << (i * 8))) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn invalid_proofs() { + let t = Transform::new(0u64, &[42u8]); + let id_foo = t.apply(b"foo"); + let id_bar = t.apply(b"bar"); + assert_eq!(id_foo.to_bytes(), [0x50, 0x74, 0x5c, 0xd8, 0x7d, 0xd7]); + assert_eq!(id_bar.to_bytes(), [0x5a, 0x48, 0x9e, 0xb8, 0x6e, 0x61]); + let id_foo2 = ShortID::from_bytes(&[0x50, 0x74, 0x5c, 0xd8, 0x7d, 0xd7]).unwrap(); + assert_eq!(id_foo2.to_bytes(), [0x50, 0x74, 0x5c, 0xd8, 0x7d, 0xd7]); + let id_foo3 = ShortID::from_u64(0xdead_d77d_d85c_7450); // top 2 bytes are zeroed. + assert_eq!(id_foo3.to_bytes(), [0x50, 0x74, 0x5c, 0xd8, 0x7d, 0xd7]); + } +} diff --git a/zkvm/src/blockchain/state.rs b/blockchain/src/state.rs similarity index 98% rename from zkvm/src/blockchain/state.rs rename to blockchain/src/state.rs index 833e30376..9a10d9016 100644 --- a/zkvm/src/blockchain/state.rs +++ b/blockchain/src/state.rs @@ -4,9 +4,8 @@ use serde::{Deserialize, Serialize}; use super::block::{BlockHeader, BlockID}; use super::errors::BlockchainError; -use crate::merkle::{Hasher, MerkleTree}; use crate::utreexo::{self, utreexo_hasher, Catchup, Forest, WorkForest}; -use crate::{ContractID, TxEntry, TxHeader, TxLog, VerifiedTx}; +use zkvm::{ContractID, Hasher, MerkleTree, TxEntry, TxHeader, TxLog, VerifiedTx}; /// State of the blockchain node. #[derive(Clone, Serialize, Deserialize)] diff --git a/zkvm/src/blockchain/tests.rs b/blockchain/src/tests.rs similarity index 92% rename from zkvm/src/blockchain/tests.rs rename to blockchain/src/tests.rs index a6409139f..2c62ac637 100644 --- a/zkvm/src/blockchain/tests.rs +++ b/blockchain/src/tests.rs @@ -1,13 +1,12 @@ -use bulletproofs::BulletproofGens; use curve25519_dalek::scalar::Scalar; use merlin::Transcript; -use musig::{Multisignature, Signature}; use rand::RngCore; +use zkvm::bulletproofs::BulletproofGens; use super::*; -use crate::{ - utreexo, Anchor, Commitment, Contract, ContractID, PortableItem, Predicate, Program, Prover, - String, TxHeader, Value, VerificationKey, VerifiedTx, +use zkvm::{ + Anchor, Commitment, Contract, ContractID, Multisignature, PortableItem, Predicate, Program, + Prover, Signature, String, TxHeader, Value, VerificationKey, VerifiedTx, }; fn make_predicate(privkey: u64) -> Predicate { diff --git a/zkvm/src/utreexo/forest.rs b/blockchain/src/utreexo/forest.rs similarity index 89% rename from zkvm/src/utreexo/forest.rs rename to blockchain/src/utreexo/forest.rs index 3c375b5b9..aa1e9f889 100644 --- a/zkvm/src/utreexo/forest.rs +++ b/blockchain/src/utreexo/forest.rs @@ -5,12 +5,12 @@ use std::fmt; use std::mem; use super::heap::{Heap, HeapIndex}; -use crate::merkle::{Directions, Hash, Hasher, MerkleItem, MerkleTree, Path, Position}; +use zkvm::merkle::{Directions, Hash, Hasher, MerkleItem, MerkleTree, Path, Position}; /// Forest consists of a number of roots of merkle binary trees. #[derive(Clone, Serialize, Deserialize)] pub struct Forest { - #[serde(with = "crate::serialization::array64")] + #[serde(with = "array64")] pub(super) roots: [Option; 64], // roots of the trees for levels 0 to 63 } @@ -480,10 +480,7 @@ impl Catchup { // Construct a new directions object. // We cannot take it from path because it does not have all neighbors yet. - let directions = Directions { - position: path.position, - depth: self.forest.heap.get_ref(root_index).level, - }; + let directions = Directions::new(path.position, self.forest.heap.get_ref(root_index).level); path.neighbors = directions .rev() @@ -636,3 +633,100 @@ impl Node { Ok(()) } } + +/// Serde adaptor for 64-item array +mod array64 { + use serde::{Deserialize, Deserializer, Serialize, Serializer}; + + pub fn serialize(value: &[T; 64], serializer: S) -> Result + where + T: Serialize + Clone, + S: Serializer, + { + value.to_vec().serialize(serializer) + } + + pub fn deserialize<'de, D, T>(deserializer: D) -> Result<[T; 64], D::Error> + where + D: Deserializer<'de>, + T: Deserialize<'de> + Default, + { + let mut vec = Vec::::deserialize(deserializer)?; + if vec.len() != 64 { + return Err(serde::de::Error::invalid_length( + vec.len(), + &"a 64-item array", + )); + } + let mut buf: [T; 64] = [ + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + T::default(), + ]; + for i in 0..64 { + buf[63 - i] = vec.pop().unwrap(); + } + Ok(buf) + } +} diff --git a/zkvm/src/utreexo/heap.rs b/blockchain/src/utreexo/heap.rs similarity index 100% rename from zkvm/src/utreexo/heap.rs rename to blockchain/src/utreexo/heap.rs diff --git a/zkvm/src/utreexo/mod.rs b/blockchain/src/utreexo/mod.rs similarity index 78% rename from zkvm/src/utreexo/mod.rs rename to blockchain/src/utreexo/mod.rs index 579049937..8bd93c774 100644 --- a/zkvm/src/utreexo/mod.rs +++ b/blockchain/src/utreexo/mod.rs @@ -8,9 +8,9 @@ mod tests; // Public API pub use self::forest::{Catchup, Forest, Proof, UtreexoError, WorkForest}; -pub use super::merkle::Hasher; +pub use zkvm::Hasher; /// Utreexo-labeled hasher for the merkle tree nodes. -pub fn utreexo_hasher() -> Hasher { +pub fn utreexo_hasher() -> Hasher { Hasher::new(b"ZkVM.utreexo") } diff --git a/zkvm/src/utreexo/tests.rs b/blockchain/src/utreexo/tests.rs similarity index 71% rename from zkvm/src/utreexo/tests.rs rename to blockchain/src/utreexo/tests.rs index 8518f941e..6048803bf 100644 --- a/zkvm/src/utreexo/tests.rs +++ b/blockchain/src/utreexo/tests.rs @@ -1,17 +1,25 @@ use merlin::Transcript; use super::*; -use crate::merkle::*; +use zkvm::merkle::*; -impl MerkleItem for u64 { +struct Item(u64); + +impl MerkleItem for Item { fn commit(&self, t: &mut Transcript) { - t.append_u64(b"test_item", *self); + t.append_u64(b"test_item", self.0); + } +} + +impl From for Item { + fn from(x: u64) -> Item { + Item(x) } } #[test] fn empty_utreexo() { - let hasher = utreexo_hasher::(); + let hasher = utreexo_hasher::(); let forest0 = Forest::new(); assert_eq!( forest0.root(&hasher), @@ -27,23 +35,23 @@ fn transient_items_utreexo() { let (_forest1, _catchup) = forest0 .work_forest() .batch::<_, ()>(|forest| { - forest.insert(&0, &hasher); - forest.insert(&1, &hasher); + forest.insert(&Item(0), &hasher); + forest.insert(&Item(1), &hasher); forest - .delete(&1, Proof::Transient, &hasher) + .delete(&Item(1), Proof::Transient, &hasher) .expect("just received proof should not fail"); forest - .delete(&0, Proof::Transient, &hasher) + .delete(&Item(0), Proof::Transient, &hasher) .expect("just received proof should not fail"); // double spends are not allowed assert_eq!( - forest.delete(&1, Proof::Transient, &hasher), + forest.delete(&Item(1), Proof::Transient, &hasher), Err(UtreexoError::InvalidProof) ); assert_eq!( - forest.delete(&0, Proof::Transient, &hasher), + forest.delete(&Item(0), Proof::Transient, &hasher), Err(UtreexoError::InvalidProof) ); @@ -61,7 +69,7 @@ fn insert_to_utreexo() { .work_forest() .batch::<_, ()>(|forest| { for i in 0..6 { - forest.insert(&i, &hasher); + forest.insert(&Item(i), &hasher); } Ok(()) }) @@ -70,14 +78,14 @@ fn insert_to_utreexo() { assert_eq!( forest1.root(&hasher), - MerkleTree::root(b"ZkVM.utreexo", 0..6) + MerkleTree::root(b"ZkVM.utreexo", (0..6).map(Item)) ); // update the proofs let proofs1 = (0..6) .map(|i| { catchup1 - .update_proof(&(i as u64), Proof::Transient, &hasher) + .update_proof(&Item(i), Proof::Transient, &hasher) .unwrap() }) .collect::>(); @@ -86,8 +94,8 @@ fn insert_to_utreexo() { let _ = forest1 .work_forest() .batch::<_, UtreexoError>(|forest| { - for i in 0..6u64 { - forest.delete(&i, &proofs1[i as usize], &hasher)?; + for i in 0..6 { + forest.delete(&Item(i), &proofs1[i as usize], &hasher)?; } Ok(()) }) @@ -102,7 +110,7 @@ fn transaction_success() { .work_forest() .batch::<_, ()>(|forest| { for i in 0..6 { - forest.insert(&i, &hasher); + forest.insert(&Item(i), &hasher); } Ok(()) }) @@ -113,7 +121,7 @@ fn transaction_success() { let proofs1 = (0..6) .map(|i| { catchup1 - .update_proof(&(i as u64), Proof::Transient, &hasher) + .update_proof(&Item(i), Proof::Transient, &hasher) .unwrap() }) .collect::>(); @@ -121,7 +129,7 @@ fn transaction_success() { let proofs1 = proofs1 .into_iter() .enumerate() - .map(|(i, p)| catchup1.update_proof(&(i as u64), p, &hasher).unwrap()) + .map(|(i, p)| catchup1.update_proof(&Item(i as u64), p, &hasher).unwrap()) .collect::>(); // d @@ -134,8 +142,8 @@ fn transaction_success() { // and check that all pre-transaction changes were respected. let mut wf = forest1.work_forest(); - wf.insert(&6, &hasher); - wf.delete(&0, &proofs1[0], &hasher) + wf.insert(&Item(6), &hasher); + wf.delete(&Item(0), &proofs1[0], &hasher) .expect("Should not fail."); // d @@ -145,11 +153,11 @@ fn transaction_success() { // x 1 2 3 4 5 6 match wf.batch::<_, ()>(|wf| { - wf.insert(&7, &hasher); - wf.insert(&8, &hasher); - wf.delete(&7, &Proof::Transient, &hasher) + wf.insert(&Item(7), &hasher); + wf.insert(&Item(8), &hasher); + wf.delete(&Item(7), &Proof::Transient, &hasher) .expect("Should not fail."); - wf.delete(&1, &proofs1[1], &hasher) + wf.delete(&Item(1), &proofs1[1], &hasher) .expect("Should not fail."); Ok(()) }) { @@ -166,7 +174,10 @@ fn transaction_success() { // x x 2 3 4 5 6 8 assert_eq!( new_forest.root(&hasher), - MerkleTree::root(b"ZkVM.utreexo", &[2, 3, 4, 5, 6, 8]) + MerkleTree::root( + b"ZkVM.utreexo", + vec![2, 3, 4, 5, 6, 8].into_iter().map(Item) + ) ); } @@ -178,7 +189,7 @@ fn transaction_fail() { .work_forest() .batch::<_, ()>(|forest| { for i in 0..6 { - forest.insert(&i, &hasher); + forest.insert(&Item(i), &hasher); } Ok(()) }) @@ -189,7 +200,7 @@ fn transaction_fail() { let proofs1 = (0..6) .map(|i| { catchup1 - .update_proof(&(i as u64), Proof::Transient, &hasher) + .update_proof(&Item(i), Proof::Transient, &hasher) .unwrap() }) .collect::>(); @@ -204,8 +215,8 @@ fn transaction_fail() { // and check that all pre-transaction changes were respected. let mut wf = forest1.work_forest(); - wf.insert(&6, &hasher); - wf.delete(&0, &proofs1[0], &hasher) + wf.insert(&Item(6), &hasher); + wf.delete(&Item(0), &proofs1[0], &hasher) .expect("Should not fail."); // d @@ -215,11 +226,11 @@ fn transaction_fail() { // x 1 2 3 4 5 6 match wf.batch(|wf| { - wf.insert(&7, &hasher); - wf.insert(&8, &hasher); - wf.delete(&7, &Proof::Transient, &hasher) + wf.insert(&Item(7), &hasher); + wf.insert(&Item(8), &hasher); + wf.delete(&Item(7), &Proof::Transient, &hasher) .expect("Should not fail."); - wf.delete(&1, &proofs1[1], &hasher) + wf.delete(&Item(1), &proofs1[1], &hasher) .expect("Should not fail."); Err(UtreexoError::InvalidProof) // dummy error to fail the update batch }) { @@ -237,7 +248,10 @@ fn transaction_fail() { // x 1 2 3 4 5 6 x 1 2 3 4 5 6 2 3 4 5 1 6 assert_eq!( new_forest.root(&hasher), - MerkleTree::root(b"ZkVM.utreexo", &[2, 3, 4, 5, 1, 6]) + MerkleTree::root( + b"ZkVM.utreexo", + vec![2, 3, 4, 5, 1, 6].into_iter().map(Item) + ) ); } @@ -250,7 +264,7 @@ fn insert_and_delete_utreexo() { .work_forest() .batch::<_, ()>(|forest| { for i in 0..n { - forest.insert(&i, &hasher); + forest.insert(&Item(i), &hasher); } Ok(()) }) @@ -261,7 +275,7 @@ fn insert_and_delete_utreexo() { let proofs1 = (0..n) .map(|i| { catchup1 - .update_proof(&(i as u64), Proof::Transient, &hasher) + .update_proof(&Item(i), Proof::Transient, &hasher) .unwrap() }) .collect::>(); @@ -270,19 +284,24 @@ fn insert_and_delete_utreexo() { forest1 .work_forest() - .delete(&0u64, &proofs1[0], &hasher) + .delete(&Item(0), &proofs1[0], &hasher) .expect("proof should be valid"); forest1 .work_forest() - .delete(&5u64, &proofs1[5], &hasher) + .delete(&Item(5), &proofs1[5], &hasher) .expect("proof should be valid"); - fn verify_update( + fn verify_update( forest: &Forest, - new_set: &[M], + new_set: I, upd: impl FnOnce(&mut WorkForest), - ) -> (Forest, Catchup) { - let hasher = utreexo_hasher::(); + ) -> (Forest, Catchup) + where + I: IntoIterator, + I::Item: core::borrow::Borrow, + { + use core::borrow::Borrow; + let hasher = utreexo_hasher::(); let (forest2, catchup2) = forest .work_forest() .batch::<_, ()>(|forest| { @@ -294,7 +313,10 @@ fn insert_and_delete_utreexo() { assert_eq!( forest2.root(&hasher), - MerkleTree::root(b"ZkVM.utreexo", new_set) + MerkleTree::root( + b"ZkVM.utreexo", + new_set.into_iter().map(|x| Item(*x.borrow())) + ) ); (forest2, catchup2) @@ -308,10 +330,10 @@ fn insert_and_delete_utreexo() { // 0 1 2 3 4 5 x 1 2 3 4 5 2 3 4 5 1 forest1 .work_forest() - .delete(&0u64, &proofs1[0], &hasher) + .delete(&Item(0), &proofs1[0], &hasher) .unwrap(); let (_, _) = verify_update(&forest1, &[2, 3, 4, 5, 1], |forest| { - forest.delete(&0u64, &proofs1[0], &hasher).unwrap(); + forest.delete(&Item(0), &proofs1[0], &hasher).unwrap(); }); // delete 1: @@ -322,10 +344,10 @@ fn insert_and_delete_utreexo() { // 0 1 2 3 4 5 0 x 2 3 4 5 2 3 4 5 0 forest1 .work_forest() - .delete(&1u64, &proofs1[1], &hasher) + .delete(&Item(1), &proofs1[1], &hasher) .unwrap(); let (_, _) = verify_update(&forest1, &[2, 3, 4, 5, 0], |forest| { - forest.delete(&1u64, &proofs1[1], &hasher).unwrap(); + forest.delete(&Item(1), &proofs1[1], &hasher).unwrap(); }); // delete 2: @@ -335,7 +357,7 @@ fn insert_and_delete_utreexo() { // |\ |\ |\ |\ |\ |\ |\ // 0 1 2 3 4 5 0 1 x 3 4 5 0 1 4 5 3 let (_, _) = verify_update(&forest1, &[0, 1, 4, 5, 3], |forest| { - forest.delete(&2u64, &proofs1[2], &hasher).unwrap(); + forest.delete(&Item(2), &proofs1[2], &hasher).unwrap(); }); // delete 5: @@ -345,7 +367,7 @@ fn insert_and_delete_utreexo() { // |\ |\ |\ |\ |\ |\ |\ // 0 1 2 3 4 5 0 1 2 3 4 x 0 1 2 3 4 let (_, _) = verify_update(&forest1, &[0, 1, 2, 3, 4], |forest| { - forest.delete(&5u64, &proofs1[5], &hasher).unwrap(); + forest.delete(&Item(5), &proofs1[5], &hasher).unwrap(); }); // delete 2,3: @@ -355,14 +377,14 @@ fn insert_and_delete_utreexo() { // |\ |\ |\ |\ |\ |\ |\ // 0 1 2 3 4 5 0 1 x x 4 5 0 1 4 5 let (_, _) = verify_update(&forest1, &[0, 1, 4, 5], |forest| { - forest.delete(&2u64, &proofs1[2], &hasher).unwrap(); - forest.delete(&3u64, &proofs1[3], &hasher).unwrap(); + forest.delete(&Item(2), &proofs1[2], &hasher).unwrap(); + forest.delete(&Item(3), &proofs1[3], &hasher).unwrap(); }); // delete in another order let (_, _) = verify_update(&forest1, &[0, 1, 4, 5], |forest| { - forest.delete(&3u64, &proofs1[3], &hasher).unwrap(); - forest.delete(&2u64, &proofs1[2], &hasher).unwrap(); + forest.delete(&Item(3), &proofs1[3], &hasher).unwrap(); + forest.delete(&Item(2), &proofs1[2], &hasher).unwrap(); }); // delete 0,3: @@ -372,8 +394,8 @@ fn insert_and_delete_utreexo() { // |\ |\ |\ |\ |\ |\ // 0 1 2 3 4 5 x 1 2 x 4 5 1 2 4 5 let (_, _) = verify_update(&forest1, &[1, 2, 4, 5], |forest| { - forest.delete(&0u64, &proofs1[0], &hasher).unwrap(); - forest.delete(&3u64, &proofs1[3], &hasher).unwrap(); + forest.delete(&Item(0), &proofs1[0], &hasher).unwrap(); + forest.delete(&Item(3), &proofs1[3], &hasher).unwrap(); }); // delete 0, insert 6, 7: @@ -383,16 +405,16 @@ fn insert_and_delete_utreexo() { // |\ |\ |\ |\ |\ |\ |\ |\ // 0 1 2 3 4 5 x 1 2 3 4 5 6 7 2 3 4 5 1 6 7 let (forest2, catchup) = verify_update(&forest1, &[2, 3, 4, 5, 1, 6, 7], |forest| { - forest.delete(&0u64, &proofs1[0], &hasher).unwrap(); - forest.insert(&6u64, &hasher); - forest.insert(&7u64, &hasher); + forest.delete(&Item(0), &proofs1[0], &hasher).unwrap(); + forest.insert(&Item(6), &hasher); + forest.insert(&Item(7), &hasher); }); let proof7 = catchup - .update_proof(&7u64, Proof::Transient, &hasher) + .update_proof(&Item(7), Proof::Transient, &hasher) .unwrap(); let proof2 = catchup - .update_proof(&2u64, proofs1[2].clone(), &hasher) + .update_proof(&Item(2), proofs1[2].clone(), &hasher) .unwrap(); // delete 2, 7: @@ -403,7 +425,7 @@ fn insert_and_delete_utreexo() { // 2 3 4 5 1 6 7 x 3 4 5 1 6 x 4 5 1 6 3 // let (_forest2, _catchup) = verify_update(&forest2, &[4, 5, 1, 6, 3], |forest| { - forest.delete(&2u64, &proof2, &hasher).unwrap(); - forest.delete(&7u64, &proof7, &hasher).unwrap(); + forest.delete(&Item(2), &proof2, &hasher).unwrap(); + forest.delete(&Item(7), &proof7, &hasher).unwrap(); }); } diff --git a/demo/Cargo.toml b/demo/Cargo.toml index e4e3d017f..4cb1a82cc 100644 --- a/demo/Cargo.toml +++ b/demo/Cargo.toml @@ -32,6 +32,9 @@ path = "../musig" [dependencies.zkvm] path = "../zkvm" +[dependencies.blockchain] +path = "../blockchain" + [dependencies.accounts] path = "../accounts" diff --git a/demo/src/account.rs b/demo/src/account.rs index 891bbb9f4..045959eac 100644 --- a/demo/src/account.rs +++ b/demo/src/account.rs @@ -9,7 +9,7 @@ use accounts::{Account, Receiver, ReceiverWitness}; use keytree::Xprv; use musig::Multisignature; -use zkvm::utreexo; +use blockchain::utreexo; use zkvm::{Anchor, ClearValue, Contract, ContractID, Tx, TxEntry, TxLog, VerifiedTx}; use crate::asset::AssetRecord; @@ -321,7 +321,7 @@ impl Wallet { ( zkvm::Tx, zkvm::TxID, - Vec, + Vec, accounts::ReceiverReply, ), &'static str, @@ -475,7 +475,7 @@ impl Wallet { ( zkvm::Tx, zkvm::TxID, - Vec, + Vec, accounts::ReceiverReply, ), &'static str, diff --git a/demo/src/blockchain.rs b/demo/src/blockchain.rs index 08bb6ec3a..ba6417681 100644 --- a/demo/src/blockchain.rs +++ b/demo/src/blockchain.rs @@ -3,8 +3,8 @@ use super::schema::*; use super::util; -use zkvm::blockchain::{BlockHeader, BlockchainState}; -use zkvm::utreexo; +use blockchain::utreexo; +use blockchain::{BlockHeader, BlockchainState}; use zkvm::{Tx, TxEntry}; use serde_json::Value as JsonValue; diff --git a/demo/src/db.rs b/demo/src/db.rs index f3b81ecad..4f024ffbb 100644 --- a/demo/src/db.rs +++ b/demo/src/db.rs @@ -4,7 +4,8 @@ use std::env; use diesel::prelude::*; use diesel::sqlite::SqliteConnection; -use zkvm::{Anchor, BlockchainState}; +use blockchain::BlockchainState; +use zkvm::Anchor; use crate::account::{AccountRecord, Wallet}; use crate::asset::{self, AssetRecord}; diff --git a/demo/src/handlers.rs b/demo/src/handlers.rs index d5a518cd0..e2b463bb9 100644 --- a/demo/src/handlers.rs +++ b/demo/src/handlers.rs @@ -11,8 +11,8 @@ use rocket::{Request, State}; use rocket_contrib::serve::StaticFiles; use rocket_contrib::templates::Template; +use blockchain::utreexo; use bulletproofs::BulletproofGens; -use zkvm::utreexo; use p2p::Direction; diff --git a/demo/src/mempool.rs b/demo/src/mempool.rs index 051d6e306..ce9124030 100644 --- a/demo/src/mempool.rs +++ b/demo/src/mempool.rs @@ -1,5 +1,5 @@ -use zkvm::blockchain::MempoolItem; -use zkvm::utreexo; +use blockchain::utreexo; +use blockchain::{self, MempoolItem}; use zkvm::{Encodable, Tx, VerifiedTx}; /// Mempool item @@ -20,7 +20,7 @@ impl MempoolItem for MempoolTx { } /// Our concrete instance of mempool -pub type Mempool = zkvm::blockchain::Mempool; +pub type Mempool = blockchain::Mempool; // Estimated cost of a memory occupied by transactions in the mempool. pub fn estimated_memory_cost(mempool: &Mempool) -> usize { diff --git a/demo/templates/network/status.html.tera b/demo/templates/network/status.html.tera index 39d273cdc..4f5e14757 100644 --- a/demo/templates/network/status.html.tera +++ b/demo/templates/network/status.html.tera @@ -62,7 +62,7 @@

-

Connect a peer

+

Add connection

diff --git a/starsig/src/key.rs b/starsig/src/key.rs index b0f59dd99..17d943964 100644 --- a/starsig/src/key.rs +++ b/starsig/src/key.rs @@ -3,6 +3,9 @@ use curve25519_dalek::ristretto::{CompressedRistretto, RistrettoPoint}; use curve25519_dalek::scalar::Scalar; use serde::{Deserialize, Serialize}; +/// Signing key (aka "privkey") is a type alias for the scalar in Ristretto255 group. +pub type SigningKey = Scalar; + /// Verification key (aka "pubkey") is a wrapper type around a Ristretto point /// that lets the verifier to check the signature. #[derive(Copy, Clone, PartialEq, Eq, Default, Debug, Serialize, Deserialize)] diff --git a/starsig/src/lib.rs b/starsig/src/lib.rs index 920d4bdf4..5b808cf93 100644 --- a/starsig/src/lib.rs +++ b/starsig/src/lib.rs @@ -17,6 +17,6 @@ mod tests; pub use self::batch::{BatchVerification, BatchVerifier, SingleVerifier}; pub use self::errors::StarsigError; -pub use self::key::VerificationKey; +pub use self::key::{SigningKey,VerificationKey}; pub use self::signature::Signature; pub use self::transcript::TranscriptProtocol; diff --git a/zkvm/docs/zkvm-stubnet.md b/zkvm/docs/zkvm-stubnet.md new file mode 100644 index 000000000..cef9f8963 --- /dev/null +++ b/zkvm/docs/zkvm-stubnet.md @@ -0,0 +1,184 @@ +# ZkVM stubnet protocol + +This document describes the "stubnet" p2p communication protocol for ZkVM blockchain. + +## Stubnet goal + +It uses proper p2p transaction and block broadcast, but uses a single pre-determinate party to announce blocks (centralized block signer). + +However, to make transition to decentralized consensus easier, nothing else in the protocol assumes the central party. +All peers are equal and signed block can originate from any node. + +## Definitions + +### Node + +A member of the network that maintains a blockchain state and sends/receives [messages](#messages) to/from its [peers](#peer). + +### Peer + +Another [node](#node) that’s connected to your node. + +### Inbound peer + +A connection initiated by a [peer](#peer) to your [node](#node). + +### Outbound peer + +A connection initiated by your [node](#node) to a [peer](#peer). + +### BlockchainTx + +A transaction envelope format that contains pure ZkVM transaction and a list of Utreexo proofs. + +### Block + +A block envelope format that contains a BlockID and a list of [BlockchainTx](#blockchaintx) objects. + + +### Short ID + +A 6-byte transaction ID, specified for a given _nonce_ (little-endian u64). + +1. Initialize [SipHash-2-4](https://131002.net/siphash/) with k0 set to nonce, k1 set to the first 8 bytes as little-endian u64 of the recipient’s Peer ID. +2. Feed transaction ID as an input to SipHash. +3. Read u64 output, drop two most significant bytes. + +See also [BIP-152](https://github.com/bitcoin/bips/blob/master/bip-0152.mediawiki). + + +## Protocol + +The node maintains the following state: + +1. Blockchain state and mempool. +2. Target tip. +3. Current nonce for [short IDs](#short-id) +4. States of connected peers. +5. Configuration parameter `max_msg_size` that limits amount of data to be sent or received. + +Each peer has the following state: + +1. Peer's tip. +2. Flag: `needs_inventory`. +3. List of short IDs that are missing in the mempool, along with their nonce. +4. Timestamp of the last inventory received. + +Upon receiving an inbound connection, or making an outbound connection, a node sends [`GetInventory`](#getinventory) to the peer +with the same random nonce across all peers (so responses contain comparable [short IDs](#short-id)). The random nonce is rotated every minute. + +When receiving a [`GetInventory`](#getinventory) message, the peer is marked as `needs_inventory`. +Required delay allows avoiding resource exhaustion with repeated request and probing the state of the node. + +When receiving an [`Inventory`](#inventory) message: + +1. Peer's tip is remembered per-peer. +2. If the tip block header is higher than the current target one, it is verified and remembered as a new target one. +3. If the tip matches, the list of mempool transactions is remembered per-peer and filtered down against already present transactions, so it only contains transactions that the node does not have, but the peer does have. +4. Bump the timestamp of the inventory for the peer. + +Periodically, every 2 seconds: + +1. The peers who have `needs_inventory=true` are sent a new [`Inventory`](#inventory) message. +2. **If the target tip does not match the current state,** the node requests the next block using [`GetBlock`](#getblock) from the random peer. +3. **If the target tip is the latest**, the node walks all peers in round-robin and constructs lists of [short IDs](#short-id) to request from each peer, keeping track of already used IDs. Once all requests are constructed, the [`GetMempoolTxs`](#getmempooltxs) messages are sent out to respective peers. +4. For peers who have not sent inventory for over a minute, we send [`GetInventory`](#getinventory) again. + +Periodically, every 60 seconds: + +1. Set a new random [short ID](#short-id) nonce. +2. Clear all the short IDs stored per peer. + +When [`GetBlock`](#getblock) message is received, +we reply immediately with the block requested using [`Block`](#block) message. + +When [`Blocks`](#blocks) message is received: +1. If the block is a direct descendant: + 1. It is verified and advances the state. + 2. Orphan blocks from other peers are tried to be applied. + 3. Duplicates or conflicting transactions are removed from mempool. + 4. Missing block is sent unsolicited to the peers who have `tip` set to one less than the current block and latest message timestamp less than 10 seconds ago. + This ensures that blocks propagate quickly among live nodes while not spending bandwidth too aggressively. Lagging nodes would request missing blocks at their pace. +2. Earlier blocks are discarded. +3. Orphan blocks are stored in a LRU buffer per peer. + +When [`MempoolTxs`](#mempooltxs) message is received: + +1. If the tip matches the current state, transactions are applied to the mempool. +2. Otherwise, the message is discarded as stale. + + +## Messages + +### `GetInventory` + +"Get inventory". Requests the state of the node: its blockchain state and transactions in the mempool. + +``` +struct GetInventory { + version: u64, + shortid_nonce: u64 +} +``` + +### `Inventory` + +Sends the inventory of a node back to the peer who requested it with [`GetInventory`](#getinventory) message. +Contains the block tip and the contents of mempool as a list of [short IDs](#short-id). + +``` +struct Inventory { + version: u64, + tip: BlockHeader, + tip_signature: starsig::Signature, + shortid_nonce: u64, + shortid_list: Vec, +} +``` + +### `GetBlock` + +Requests a block at a given height. + +``` +struct GetBlock { + height: u64, +} +``` + +### `Block` + +Sends a block requested with [`GetBlock`](#getblock) message. + +``` +struct Block { + header: BlockHeader, + signature: starsig::Signature, + txs: Vec, +} +``` + +### `GetMempoolTxs` + +Requests a subset of mempool transactions with the given [short IDs](#short-id) after receiving the [`Inventory`](#inventory) message. + +``` +struct GetMempoolTxs { + shortid_nonce: u64, + shortids: Vec +} +``` + +### `MempoolTxs` + +Sends a subset of mempool transactions in response to [`GetMempoolTxs`](#getmempooltxs) message. + +The node sends a list of [blockchain transaction](#blockchaintx) packages matching the [short IDs](#short-id) requested. + +``` +struct MempoolTxs { + tip: BlockID, + txs: Vec +} +``` + diff --git a/zkvm/src/fees.rs b/zkvm/src/fees.rs index 43cba31ac..f064934e2 100644 --- a/zkvm/src/fees.rs +++ b/zkvm/src/fees.rs @@ -6,6 +6,7 @@ use serde::{Deserialize, Serialize}; /// Maximum amount of fee, which allows overflow-safe size-by-fee multiplication. pub const MAX_FEE: u64 = 1 << 24; +/// Fee checked to be less or equal to `MAX_FEE`. #[derive(Copy, Clone, Debug)] pub struct CheckedFee { inner: u64, @@ -57,14 +58,17 @@ impl FeeRate { } impl CheckedFee { + /// Creates a zero fee. pub const fn zero() -> Self { CheckedFee { inner: 0 } } + /// Creates a fee checked to be ≤ `MAX_FEE`. pub fn new(fee: u64) -> Option { CheckedFee::zero().add(fee) } + /// Adds a fee and checks the result for being within `MAX_FEE`. pub fn add(mut self, fee: u64) -> Option { if fee > MAX_FEE { return None; diff --git a/zkvm/src/lib.rs b/zkvm/src/lib.rs index 87eb0e86e..d3e9bf906 100644 --- a/zkvm/src/lib.rs +++ b/zkvm/src/lib.rs @@ -8,18 +8,18 @@ #[macro_use] extern crate failure; +pub extern crate bulletproofs; extern crate serde; #[macro_use] mod serialization; -pub mod blockchain; mod constraints; mod contract; mod debug; mod encoding; mod errors; mod fees; -mod merkle; +pub mod merkle; mod ops; mod predicate; mod program; @@ -28,7 +28,6 @@ mod scalar_witness; mod transcript; mod tx; mod types; -pub mod utreexo; mod verifier; mod vm; @@ -36,7 +35,8 @@ pub use self::constraints::{Commitment, CommitmentWitness, Constraint, Expressio pub use self::contract::{Anchor, Contract, ContractID, PortableItem}; pub use self::encoding::Encodable; pub use self::errors::VMError; -pub use self::merkle::{Hash, MerkleItem, MerkleTree}; +pub use self::fees::{fee_flavor, CheckedFee, FeeRate, MAX_FEE}; +pub use self::merkle::{Hash, Hasher, MerkleItem, MerkleTree}; pub use self::ops::{Instruction, Opcode}; pub use self::predicate::{Predicate, PredicateTree}; pub use self::program::{Program, ProgramItem}; @@ -47,6 +47,4 @@ pub use self::tx::{Tx, TxEntry, TxHeader, TxID, TxLog, UnsignedTx, VerifiedTx}; pub use self::types::{ClearValue, Item, String, Value, WideValue}; pub use self::verifier::Verifier; -pub use self::blockchain::*; - pub use musig::{Multikey, Multisignature, Signature, VerificationKey}; diff --git a/zkvm/src/merkle.rs b/zkvm/src/merkle.rs index 2a45459a7..6da679f44 100644 --- a/zkvm/src/merkle.rs +++ b/zkvm/src/merkle.rs @@ -1,3 +1,4 @@ +//! API for operations on merkle binary trees. use crate::encoding::{self, Encodable, SliceReader}; use crate::errors::VMError; use core::marker::PhantomData; @@ -246,6 +247,7 @@ pub struct Path { pub neighbors: Vec, } +/// Side of the neighbour produced by the `Directions` iterator. #[derive(Copy, Clone, PartialEq, Debug)] pub enum Side { /// Indicates that the item is to the left of its neighbor. @@ -359,6 +361,9 @@ impl Path { // zkvm-specific impl impl Path { + /// Decodes Path from a byte slice. First 8 bytes is a LE64-encoded position, + /// followed by 4 bytes of LE32-encoded number of neighbours, + /// than the 32-byte neighbour hashes. pub fn decode<'a>(reader: &mut SliceReader<'a>) -> Result { let position = reader.read_u64()?; let neighbors_len = reader.read_u32()? as usize; @@ -392,12 +397,19 @@ impl Encodable for Path { } } -/// Simialr to Path, but does not contain neighbors - only left/right directions +/// Similar to `Path`, but does not contain neighbors - only left/right directions /// as indicated by the bits in the `position`. #[derive(Copy, Clone, PartialEq, Debug)] pub struct Directions { - pub position: Position, - pub depth: usize, + position: Position, + depth: usize, +} + +impl Directions { + /// Creates a new directions object for a specified item’s position and depth. + pub fn new(position: Position, depth: usize) -> Self { + Self { position, depth } + } } impl ExactSizeIterator for Directions { diff --git a/zkvm/src/serialization.rs b/zkvm/src/serialization.rs index 32328eb25..bc0e540ae 100644 --- a/zkvm/src/serialization.rs +++ b/zkvm/src/serialization.rs @@ -1,5 +1,7 @@ //! Utilities to support serialization needs +/// Implements `serde::Serialize` and `serde::Deserialize` for a tuple-struct that wraps `[u8;32]`. +#[macro_export] macro_rules! serialize_bytes32 { ($type_name:ident) => { impl serde::Serialize for $type_name { @@ -47,100 +49,3 @@ macro_rules! serialize_bytes32 { } }; } - -/// Serde adaptor for 64-item array -pub mod array64 { - use serde::{Deserialize, Deserializer, Serialize, Serializer}; - - pub fn serialize(value: &[T; 64], serializer: S) -> Result - where - T: Serialize + Clone, - S: Serializer, - { - value.to_vec().serialize(serializer) - } - - pub fn deserialize<'de, D, T>(deserializer: D) -> Result<[T; 64], D::Error> - where - D: Deserializer<'de>, - T: Deserialize<'de> + Default, - { - let mut vec = Vec::::deserialize(deserializer)?; - if vec.len() != 64 { - return Err(serde::de::Error::invalid_length( - vec.len(), - &"a 64-item array", - )); - } - let mut buf: [T; 64] = [ - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - T::default(), - ]; - for i in 0..64 { - buf[63 - i] = vec.pop().unwrap(); - } - Ok(buf) - } -}