use std::collections::{VecDeque, HashSet, HashMap};
use std::sync::Arc;
use std::hash::Hash as StdHash;
use parking_lot::{Mutex, RwLock, RwLockUpgradableReadGuard};
use linked_hash_map::{LinkedHashMap, Entry};
use hash_db::Hasher;
use sp_runtime::traits::{Block as BlockT, Header, HashFor, NumberFor};
use sp_core::hexdisplay::HexDisplay;
use sp_core::storage::ChildInfo;
use sp_state_machine::{
backend::Backend as StateBackend, TrieBackend, StorageKey, StorageValue,
StorageCollection, ChildStorageCollection,
};
use log::trace;
use crate::{utils::Meta, stats::StateUsageStats};
const STATE_CACHE_BLOCKS: usize = 12;
type ChildStorageKey = (Vec<u8>, Vec<u8>);
pub struct Cache<B: BlockT> {
lru_storage: LRUMap<StorageKey, Option<StorageValue>>,
lru_hashes: LRUMap<StorageKey, OptionHOut<B::Hash>>,
lru_child_storage: LRUMap<ChildStorageKey, Option<StorageValue>>,
modifications: VecDeque<BlockChanges<B::Header>>,
}
struct LRUMap<K, V>(LinkedHashMap<K, V>, usize, usize);
trait EstimateSize {
fn estimate_size(&self) -> usize;
}
impl EstimateSize for Vec<u8> {
fn estimate_size(&self) -> usize {
self.capacity()
}
}
impl EstimateSize for Option<Vec<u8>> {
fn estimate_size(&self) -> usize {
self.as_ref().map(|v|v.capacity()).unwrap_or(0)
}
}
struct OptionHOut<T: AsRef<[u8]>>(Option<T>);
impl<T: AsRef<[u8]>> EstimateSize for OptionHOut<T> {
fn estimate_size(&self) -> usize {
self.0.as_ref().map(|v|v.as_ref().len()).unwrap_or(0)
}
}
impl<T: EstimateSize> EstimateSize for (T, T) {
fn estimate_size(&self) -> usize {
self.0.estimate_size() + self.1.estimate_size()
}
}
impl<K: EstimateSize + Eq + StdHash, V: EstimateSize> LRUMap<K, V> {
fn remove(&mut self, k: &K) {
let map = &mut self.0;
let storage_used_size = &mut self.1;
if let Some(v) = map.remove(k) {
*storage_used_size -= k.estimate_size();
*storage_used_size -= v.estimate_size();
}
}
fn add(&mut self, k: K, v: V) {
let lmap = &mut self.0;
let storage_used_size = &mut self.1;
let limit = self.2;
let klen = k.estimate_size();
*storage_used_size += v.estimate_size();
match lmap.entry(k) {
Entry::Occupied(mut entry) => {
*storage_used_size -= entry.get().estimate_size();
entry.insert(v);
},
Entry::Vacant(entry) => {
*storage_used_size += klen;
entry.insert(v);
},
};
while *storage_used_size > limit {
if let Some((k,v)) = lmap.pop_front() {
*storage_used_size -= k.estimate_size();
*storage_used_size -= v.estimate_size();
} else {
break;
}
}
}
fn get<Q:?Sized>(&mut self, k: &Q) -> Option<&mut V>
where K: std::borrow::Borrow<Q>,
Q: StdHash + Eq {
self.0.get_refresh(k)
}
fn used_size(&self) -> usize {
self.1
}
fn clear(&mut self) {
self.0.clear();
self.1 = 0;
}
}
impl<B: BlockT> Cache<B> {
pub fn used_storage_cache_size(&self) -> usize {
self.lru_storage.used_size()
+ self.lru_child_storage.used_size()
}
pub fn sync(&mut self, enacted: &[B::Hash], retracted: &[B::Hash]) {
trace!("Syncing shared cache, enacted = {:?}, retracted = {:?}", enacted, retracted);
let mut clear = false;
for block in enacted {
clear = clear || {
if let Some(m) = self.modifications.iter_mut().find(|m| &m.hash == block) {
trace!("Reverting enacted block {:?}", block);
m.is_canon = true;
for a in &m.storage {
trace!("Reverting enacted key {:?}", HexDisplay::from(a));
self.lru_storage.remove(a);
}
for a in &m.child_storage {
trace!("Reverting enacted child key {:?}", a);
self.lru_child_storage.remove(a);
}
false
} else {
true
}
};
}
for block in retracted {
clear = clear || {
if let Some(m) = self.modifications.iter_mut().find(|m| &m.hash == block) {
trace!("Retracting block {:?}", block);
m.is_canon = false;
for a in &m.storage {
trace!("Retracted key {:?}", HexDisplay::from(a));
self.lru_storage.remove(a);
}
for a in &m.child_storage {
trace!("Retracted child key {:?}", a);
self.lru_child_storage.remove(a);
}
false
} else {
true
}
};
}
if clear {
trace!("Wiping cache");
self.lru_storage.clear();
self.lru_child_storage.clear();
self.lru_hashes.clear();
self.modifications.clear();
}
}
}
pub type SharedCache<B> = Arc<Mutex<Cache<B>>>;
const FIX_LRU_HASH_SIZE: usize = 65_536;
pub fn new_shared_cache<B: BlockT>(
shared_cache_size: usize,
child_ratio: (usize, usize),
) -> SharedCache<B> {
let top = child_ratio.1.saturating_sub(child_ratio.0);
Arc::new(
Mutex::new(
Cache {
lru_storage: LRUMap(
LinkedHashMap::new(), 0, shared_cache_size * top / child_ratio.1
),
lru_hashes: LRUMap(LinkedHashMap::new(), 0, FIX_LRU_HASH_SIZE),
lru_child_storage: LRUMap(
LinkedHashMap::new(), 0, shared_cache_size * child_ratio.0 / child_ratio.1
),
modifications: VecDeque::new(),
}
)
)
}
#[derive(Debug)]
struct BlockChanges<B: Header> {
number: B::Number,
hash: B::Hash,
parent: B::Hash,
storage: HashSet<StorageKey>,
child_storage: HashSet<ChildStorageKey>,
is_canon: bool,
}
struct LocalCache<H: Hasher> {
storage: HashMap<StorageKey, Option<StorageValue>>,
hashes: HashMap<StorageKey, Option<H::Out>>,
child_storage: HashMap<ChildStorageKey, Option<StorageValue>>,
}
pub struct CacheChanges<B: BlockT> {
shared_cache: SharedCache<B>,
local_cache: RwLock<LocalCache<HashFor<B>>>,
pub parent_hash: Option<B::Hash>,
}
pub struct CachingState<S, B: BlockT> {
usage: StateUsageStats,
overlay_stats: sp_state_machine::StateMachineStats,
state: S,
cache: CacheChanges<B>,
}
impl<S, B: BlockT> std::fmt::Debug for CachingState<S, B> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "Block {:?}", self.cache.parent_hash)
}
}
impl<B: BlockT> CacheChanges<B> {
pub fn sync_cache(
&mut self,
enacted: &[B::Hash],
retracted: &[B::Hash],
changes: StorageCollection,
child_changes: ChildStorageCollection,
commit_hash: Option<B::Hash>,
commit_number: Option<NumberFor<B>>,
is_best: bool,
) {
let mut cache = self.shared_cache.lock();
trace!(
"Syncing cache, id = (#{:?}, {:?}), parent={:?}, best={}",
commit_number,
commit_hash,
self.parent_hash,
is_best,
);
let cache = &mut *cache;
let mut enacted: Vec<_> = enacted
.iter()
.filter(|h| commit_hash.as_ref().map_or(true, |p| *h != p))
.cloned()
.collect();
let mut retracted = std::borrow::Cow::Borrowed(retracted);
if let Some(commit_hash) = &commit_hash {
if let Some(m) = cache.modifications.iter_mut().find(|m| &m.hash == commit_hash) {
if m.is_canon != is_best {
if is_best {
enacted.push(commit_hash.clone());
} else {
retracted.to_mut().push(commit_hash.clone());
}
}
}
}
cache.sync(&enacted, &retracted);
if let Some(_) = self.parent_hash {
let mut local_cache = self.local_cache.write();
if is_best {
trace!(
"Committing {} local, {} hashes, {} modified root entries, {} modified child entries",
local_cache.storage.len(),
local_cache.hashes.len(),
changes.len(),
child_changes.iter().map(|v|v.1.len()).sum::<usize>(),
);
for (k, v) in local_cache.storage.drain() {
cache.lru_storage.add(k, v);
}
for (k, v) in local_cache.child_storage.drain() {
cache.lru_child_storage.add(k, v);
}
for (k, v) in local_cache.hashes.drain() {
cache.lru_hashes.add(k, OptionHOut(v));
}
}
}
if let (
Some(ref number), Some(ref hash), Some(ref parent))
= (commit_number, commit_hash, self.parent_hash)
{
if cache.modifications.len() == STATE_CACHE_BLOCKS {
cache.modifications.pop_back();
}
let mut modifications = HashSet::new();
let mut child_modifications = HashSet::new();
child_changes.into_iter().for_each(|(sk, changes)|
for (k, v) in changes.into_iter() {
let k = (sk.clone(), k);
if is_best {
cache.lru_child_storage.add(k.clone(), v);
}
child_modifications.insert(k);
}
);
for (k, v) in changes.into_iter() {
if is_best {
cache.lru_hashes.remove(&k);
cache.lru_storage.add(k.clone(), v);
}
modifications.insert(k);
}
let block_changes = BlockChanges {
storage: modifications,
child_storage: child_modifications,
number: *number,
hash: hash.clone(),
is_canon: is_best,
parent: parent.clone(),
};
let insert_at = cache.modifications.iter()
.enumerate()
.find(|(_, m)| m.number < *number)
.map(|(i, _)| i);
trace!("Inserting modifications at {:?}", insert_at);
if let Some(insert_at) = insert_at {
cache.modifications.insert(insert_at, block_changes);
} else {
cache.modifications.push_back(block_changes);
}
}
}
}
impl<S: StateBackend<HashFor<B>>, B: BlockT> CachingState<S, B> {
pub(crate) fn new(
state: S,
shared_cache: SharedCache<B>,
parent_hash: Option<B::Hash>,
) -> Self {
CachingState {
usage: StateUsageStats::new(),
overlay_stats: sp_state_machine::StateMachineStats::default(),
state,
cache: CacheChanges {
shared_cache,
local_cache: RwLock::new(LocalCache {
storage: Default::default(),
hashes: Default::default(),
child_storage: Default::default(),
}),
parent_hash,
},
}
}
fn is_allowed(
key: Option<&[u8]>,
child_key: Option<&ChildStorageKey>,
parent_hash: &Option<B::Hash>,
modifications: &VecDeque<BlockChanges<B::Header>>
) -> bool {
let mut parent = match *parent_hash {
None => {
trace!("Cache lookup skipped for {:?}: no parent hash", key.as_ref().map(HexDisplay::from));
return false;
}
Some(ref parent) => parent,
};
for m in modifications {
if &m.hash == parent {
if m.is_canon {
return true;
}
parent = &m.parent;
}
if let Some(key) = key {
if m.storage.contains(key) {
trace!("Cache lookup skipped for {:?}: modified in a later block", HexDisplay::from(&key));
return false;
}
}
if let Some(child_key) = child_key {
if m.child_storage.contains(child_key) {
trace!("Cache lookup skipped for {:?}: modified in a later block", child_key);
return false;
}
}
}
trace!(
"Cache lookup skipped for {:?}: parent hash is unknown",
key.as_ref().map(HexDisplay::from),
);
false
}
}
impl<S: StateBackend<HashFor<B>>, B: BlockT> StateBackend<HashFor<B>> for CachingState<S, B> {
type Error = S::Error;
type Transaction = S::Transaction;
type TrieBackendStorage = S::TrieBackendStorage;
fn storage(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
let local_cache = self.cache.local_cache.upgradable_read();
if let Some(entry) = local_cache.storage.get(key).cloned() {
trace!("Found in local cache: {:?}", HexDisplay::from(&key));
self.usage.tally_key_read(key, entry.as_ref(), true);
return Ok(entry)
}
let mut cache = self.cache.shared_cache.lock();
if Self::is_allowed(Some(key), None, &self.cache.parent_hash, &cache.modifications) {
if let Some(entry) = cache.lru_storage.get(key).map(|a| a.clone()) {
trace!("Found in shared cache: {:?}", HexDisplay::from(&key));
self.usage.tally_key_read(key, entry.as_ref(), true);
return Ok(entry)
}
}
trace!("Cache miss: {:?}", HexDisplay::from(&key));
let value = self.state.storage(key)?;
RwLockUpgradableReadGuard::upgrade(local_cache).storage.insert(key.to_vec(), value.clone());
self.usage.tally_key_read(key, value.as_ref(), false);
Ok(value)
}
fn storage_hash(&self, key: &[u8]) -> Result<Option<B::Hash>, Self::Error> {
let local_cache = self.cache.local_cache.upgradable_read();
if let Some(entry) = local_cache.hashes.get(key).cloned() {
trace!("Found hash in local cache: {:?}", HexDisplay::from(&key));
return Ok(entry)
}
let mut cache = self.cache.shared_cache.lock();
if Self::is_allowed(Some(key), None, &self.cache.parent_hash, &cache.modifications) {
if let Some(entry) = cache.lru_hashes.get(key).map(|a| a.0.clone()) {
trace!("Found hash in shared cache: {:?}", HexDisplay::from(&key));
return Ok(entry)
}
}
trace!("Cache hash miss: {:?}", HexDisplay::from(&key));
let hash = self.state.storage_hash(key)?;
RwLockUpgradableReadGuard::upgrade(local_cache).hashes.insert(key.to_vec(), hash);
Ok(hash)
}
fn child_storage(
&self,
child_info: &ChildInfo,
key: &[u8],
) -> Result<Option<Vec<u8>>, Self::Error> {
let key = (child_info.storage_key().to_vec(), key.to_vec());
let local_cache = self.cache.local_cache.upgradable_read();
if let Some(entry) = local_cache.child_storage.get(&key).cloned() {
trace!("Found in local cache: {:?}", key);
return Ok(
self.usage.tally_child_key_read(&key, entry, true)
)
}
let mut cache = self.cache.shared_cache.lock();
if Self::is_allowed(None, Some(&key), &self.cache.parent_hash, &cache.modifications) {
if let Some(entry) = cache.lru_child_storage.get(&key).map(|a| a.clone()) {
trace!("Found in shared cache: {:?}", key);
return Ok(
self.usage.tally_child_key_read(&key, entry, true)
)
}
}
trace!("Cache miss: {:?}", key);
let value = self.state.child_storage(child_info, &key.1[..])?;
let value = self.usage.tally_child_key_read(&key, value, false);
RwLockUpgradableReadGuard::upgrade(local_cache).child_storage.insert(key, value.clone());
Ok(value)
}
fn exists_storage(&self, key: &[u8]) -> Result<bool, Self::Error> {
Ok(self.storage(key)?.is_some())
}
fn exists_child_storage(
&self,
child_info: &ChildInfo,
key: &[u8],
) -> Result<bool, Self::Error> {
self.state.exists_child_storage(child_info, key)
}
fn apply_to_child_keys_while<F: FnMut(&[u8]) -> bool>(
&self,
child_info: &ChildInfo,
f: F,
) {
self.state.apply_to_child_keys_while(child_info, f)
}
fn next_storage_key(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
self.state.next_storage_key(key)
}
fn next_child_storage_key(
&self,
child_info: &ChildInfo,
key: &[u8],
) -> Result<Option<Vec<u8>>, Self::Error> {
self.state.next_child_storage_key(child_info, key)
}
fn for_keys_with_prefix<F: FnMut(&[u8])>(&self, prefix: &[u8], f: F) {
self.state.for_keys_with_prefix(prefix, f)
}
fn for_key_values_with_prefix<F: FnMut(&[u8], &[u8])>(&self, prefix: &[u8], f: F) {
self.state.for_key_values_with_prefix(prefix, f)
}
fn for_child_keys_with_prefix<F: FnMut(&[u8])>(
&self,
child_info: &ChildInfo,
prefix: &[u8],
f: F,
) {
self.state.for_child_keys_with_prefix(child_info, prefix, f)
}
fn storage_root<'a>(
&self,
delta: impl Iterator<Item=(&'a [u8], Option<&'a [u8]>)>,
) -> (B::Hash, Self::Transaction) where B::Hash: Ord {
self.state.storage_root(delta)
}
fn child_storage_root<'a>(
&self,
child_info: &ChildInfo,
delta: impl Iterator<Item=(&'a [u8], Option<&'a [u8]>)>,
) -> (B::Hash, bool, Self::Transaction) where B::Hash: Ord {
self.state.child_storage_root(child_info, delta)
}
fn pairs(&self) -> Vec<(Vec<u8>, Vec<u8>)> {
self.state.pairs()
}
fn keys(&self, prefix: &[u8]) -> Vec<Vec<u8>> {
self.state.keys(prefix)
}
fn child_keys(
&self,
child_info: &ChildInfo,
prefix: &[u8],
) -> Vec<Vec<u8>> {
self.state.child_keys(child_info, prefix)
}
fn as_trie_backend(&mut self) -> Option<&TrieBackend<Self::TrieBackendStorage, HashFor<B>>> {
self.state.as_trie_backend()
}
fn register_overlay_stats(&mut self, stats: &sp_state_machine::StateMachineStats) {
self.overlay_stats.add(stats);
}
fn usage_info(&self) -> sp_state_machine::UsageInfo {
let mut info = self.usage.take();
info.include_state_machine_states(&self.overlay_stats);
info
}
}
pub struct SyncingCachingState<S, Block: BlockT> {
state_usage: Arc<StateUsageStats>,
meta: Arc<RwLock<Meta<NumberFor<Block>, Block::Hash>>>,
lock: Arc<RwLock<()>>,
caching_state: Option<CachingState<S, Block>>,
disable_syncing: bool,
}
impl<S, B: BlockT> SyncingCachingState<S, B> {
pub fn new(
caching_state: CachingState<S, B>,
state_usage: Arc<StateUsageStats>,
meta: Arc<RwLock<Meta<NumberFor<B>, B::Hash>>>,
lock: Arc<RwLock<()>>,
) -> Self {
Self {
caching_state: Some(caching_state),
state_usage,
meta,
lock,
disable_syncing: false,
}
}
fn caching_state(&self) -> &CachingState<S, B> {
self.caching_state
.as_ref()
.expect("`caching_state` is always valid for the lifetime of the object; qed")
}
pub fn into_cache_changes(mut self) -> CacheChanges<B> {
self.caching_state
.take()
.expect("`caching_state` is always valid for the lifetime of the object; qed")
.cache
}
pub fn disable_syncing(&mut self) {
self.disable_syncing = true;
}
}
impl<S, B: BlockT> std::fmt::Debug for SyncingCachingState<S, B> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.caching_state().fmt(f)
}
}
impl<S: StateBackend<HashFor<B>>, B: BlockT> StateBackend<HashFor<B>> for SyncingCachingState<S, B> {
type Error = S::Error;
type Transaction = S::Transaction;
type TrieBackendStorage = S::TrieBackendStorage;
fn storage(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
self.caching_state().storage(key)
}
fn storage_hash(&self, key: &[u8]) -> Result<Option<B::Hash>, Self::Error> {
self.caching_state().storage_hash(key)
}
fn child_storage(
&self,
child_info: &ChildInfo,
key: &[u8],
) -> Result<Option<Vec<u8>>, Self::Error> {
self.caching_state().child_storage(child_info, key)
}
fn exists_storage(&self, key: &[u8]) -> Result<bool, Self::Error> {
self.caching_state().exists_storage(key)
}
fn exists_child_storage(
&self,
child_info: &ChildInfo,
key: &[u8],
) -> Result<bool, Self::Error> {
self.caching_state().exists_child_storage(child_info, key)
}
fn apply_to_child_keys_while<F: FnMut(&[u8]) -> bool>(
&self,
child_info: &ChildInfo,
f: F,
) {
self.caching_state().apply_to_child_keys_while(child_info, f)
}
fn next_storage_key(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
self.caching_state().next_storage_key(key)
}
fn next_child_storage_key(
&self,
child_info: &ChildInfo,
key: &[u8],
) -> Result<Option<Vec<u8>>, Self::Error> {
self.caching_state().next_child_storage_key(child_info, key)
}
fn for_keys_with_prefix<F: FnMut(&[u8])>(&self, prefix: &[u8], f: F) {
self.caching_state().for_keys_with_prefix(prefix, f)
}
fn for_key_values_with_prefix<F: FnMut(&[u8], &[u8])>(&self, prefix: &[u8], f: F) {
self.caching_state().for_key_values_with_prefix(prefix, f)
}
fn for_child_keys_with_prefix<F: FnMut(&[u8])>(
&self,
child_info: &ChildInfo,
prefix: &[u8],
f: F,
) {
self.caching_state().for_child_keys_with_prefix(child_info, prefix, f)
}
fn storage_root<'a>(
&self,
delta: impl Iterator<Item=(&'a [u8], Option<&'a [u8]>)>,
) -> (B::Hash, Self::Transaction) where B::Hash: Ord {
self.caching_state().storage_root(delta)
}
fn child_storage_root<'a>(
&self,
child_info: &ChildInfo,
delta: impl Iterator<Item=(&'a [u8], Option<&'a [u8]>)>,
) -> (B::Hash, bool, Self::Transaction) where B::Hash: Ord {
self.caching_state().child_storage_root(child_info, delta)
}
fn pairs(&self) -> Vec<(Vec<u8>, Vec<u8>)> {
self.caching_state().pairs()
}
fn keys(&self, prefix: &[u8]) -> Vec<Vec<u8>> {
self.caching_state().keys(prefix)
}
fn child_keys(
&self,
child_info: &ChildInfo,
prefix: &[u8],
) -> Vec<Vec<u8>> {
self.caching_state().child_keys(child_info, prefix)
}
fn as_trie_backend(&mut self) -> Option<&TrieBackend<Self::TrieBackendStorage, HashFor<B>>> {
self.caching_state
.as_mut()
.expect("`caching_state` is valid for the lifetime of the object; qed")
.as_trie_backend()
}
fn register_overlay_stats(&mut self, stats: &sp_state_machine::StateMachineStats) {
self.caching_state().register_overlay_stats(stats);
}
fn usage_info(&self) -> sp_state_machine::UsageInfo {
self.caching_state().usage_info()
}
}
impl<S, B: BlockT> Drop for SyncingCachingState<S, B> {
fn drop(&mut self) {
if self.disable_syncing {
return;
}
if let Some(mut caching_state) = self.caching_state.take() {
let _lock = self.lock.read();
self.state_usage.merge_sm(caching_state.usage.take());
if let Some(hash) = caching_state.cache.parent_hash.clone() {
let is_best = self.meta.read().best_hash == hash;
caching_state.cache.sync_cache(&[], &[], vec![], vec![], None, None, is_best);
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use sp_runtime::{
traits::BlakeTwo256,
testing::{H256, Block as RawBlock, ExtrinsicWrapper},
};
use sp_state_machine::InMemoryBackend;
type Block = RawBlock<ExtrinsicWrapper<u32>>;
#[test]
fn smoke() {
let root_parent = H256::random();
let key = H256::random()[..].to_vec();
let h0 = H256::random();
let h1a = H256::random();
let h1b = H256::random();
let h2a = H256::random();
let h2b = H256::random();
let h3a = H256::random();
let h3b = H256::random();
let shared = new_shared_cache::<Block>(256 * 1024, (0, 1));
let mut s = CachingState::new(
InMemoryBackend::<BlakeTwo256>::default(),
shared.clone(),
Some(root_parent),
);
s.cache.sync_cache(
&[],
&[],
vec![(key.clone(), Some(vec![2]))],
vec![],
Some(h0),
Some(0),
true,
);
let mut s = CachingState::new(
InMemoryBackend::<BlakeTwo256>::default(),
shared.clone(),
Some(h0),
);
s.cache.sync_cache(&[], &[], vec![], vec![], Some(h1a), Some(1), true);
let mut s = CachingState::new(
InMemoryBackend::<BlakeTwo256>::default(),
shared.clone(),
Some(h0),
);
s.cache.sync_cache(
&[],
&[],
vec![(key.clone(), Some(vec![3]))],
vec![],
Some(h1b),
Some(1),
false,
);
let mut s = CachingState::new(
InMemoryBackend::<BlakeTwo256>::default(),
shared.clone(),
Some(h1b),
);
s.cache.sync_cache(
&[],
&[],
vec![(key.clone(), Some(vec![4]))],
vec![],
Some(h2b),
Some(2),
false,
);
let mut s = CachingState::new(
InMemoryBackend::<BlakeTwo256>::default(),
shared.clone(),
Some(h1a),
);
s.cache.sync_cache(
&[],
&[],
vec![(key.clone(), Some(vec![5]))],
vec![],
Some(h2a),
Some(2),
true,
);
let mut s = CachingState::new(
InMemoryBackend::<BlakeTwo256>::default(),
shared.clone(),
Some(h2a),
);
s.cache.sync_cache(&[], &[], vec![], vec![], Some(h3a), Some(3), true);
let s = CachingState::new(
InMemoryBackend::<BlakeTwo256>::default(),
shared.clone(),
Some(h3a),
);
assert_eq!(s.storage(&key).unwrap().unwrap(), vec![5]);
let s = CachingState::new(
InMemoryBackend::<BlakeTwo256>::default(),
shared.clone(),
Some(h1a),
);
assert!(s.storage(&key).unwrap().is_none());
let s = CachingState::new(
InMemoryBackend::<BlakeTwo256>::default(),
shared.clone(),
Some(h2b),
);
assert!(s.storage(&key).unwrap().is_none());
let s = CachingState::new(
InMemoryBackend::<BlakeTwo256>::default(),
shared.clone(),
Some(h1b),
);
assert!(s.storage(&key).unwrap().is_none());
let mut s = CachingState::new(
InMemoryBackend::<BlakeTwo256>::default(),
shared.clone(),
Some(h2b),
);
s.cache.sync_cache(
&[h1b, h2b, h3b],
&[h1a, h2a, h3a],
vec![],
vec![],
Some(h3b),
Some(3),
true,
);
let s = CachingState::new(
InMemoryBackend::<BlakeTwo256>::default(),
shared.clone(),
Some(h3a),
);
assert!(s.storage(&key).unwrap().is_none());
}
#[test]
fn simple_fork() {
sp_tracing::try_init_simple();
let root_parent = H256::random();
let key = H256::random()[..].to_vec();
let h1 = H256::random();
let h2a = H256::random();
let h2b = H256::random();
let h3b = H256::random();
let shared = new_shared_cache::<Block>(256*1024, (0,1));
let mut s = CachingState::new(
InMemoryBackend::<BlakeTwo256>::default(),
shared.clone(),
Some(root_parent),
);
s.cache.sync_cache(
&[],
&[],
vec![(key.clone(), Some(vec![2]))],
vec![],
Some(h1),
Some(1),
true,
);
let mut s = CachingState::new(
InMemoryBackend::<BlakeTwo256>::default(),
shared.clone(),
Some(h1),
);
s.cache.sync_cache(&[], &[], vec![], vec![], Some(h2a), Some(2), true);
let mut s = CachingState::new(
InMemoryBackend::<BlakeTwo256>::default(),
shared.clone(),
Some(h1),
);
s.cache.sync_cache(
&[],
&[],
vec![(key.clone(), Some(vec![3]))],
vec![],
Some(h2b),
Some(2),
false,
);
let mut s = CachingState::new(
InMemoryBackend::<BlakeTwo256>::default(),
shared.clone(),
Some(h2b),
);
s.cache.sync_cache(
&[],
&[],
vec![(key.clone(), Some(vec![3]))],
vec![],
Some(h3b),
Some(2),
false,
);
let s = CachingState::new(
InMemoryBackend::<BlakeTwo256>::default(),
shared.clone(),
Some(h2a),
);
assert_eq!(s.storage(&key).unwrap().unwrap(), vec![2]);
}
#[test]
fn double_fork() {
let root_parent = H256::random();
let key = H256::random()[..].to_vec();
let h1 = H256::random();
let h2a = H256::random();
let h2b = H256::random();
let h3a = H256::random();
let h3b = H256::random();
let shared = new_shared_cache::<Block>(256*1024, (0,1));
let mut s = CachingState::new(
InMemoryBackend::<BlakeTwo256>::default(),
shared.clone(),
Some(root_parent),
);
s.cache.sync_cache(&[], &[], vec![], vec![], Some(h1), Some(1), true);
let mut s = CachingState::new(
InMemoryBackend::<BlakeTwo256>::default(),
shared.clone(),
Some(h1),
);
s.cache.sync_cache(&[], &[], vec![], vec![], Some(h2a), Some(2), true);
let mut s = CachingState::new(
InMemoryBackend::<BlakeTwo256>::default(),
shared.clone(),
Some(h2a),
);
s.cache.sync_cache(
&[],
&[],
vec![(key.clone(), Some(vec![2]))],
vec![],
Some(h3a),
Some(3),
true,
);
let mut s = CachingState::new(
InMemoryBackend::<BlakeTwo256>::default(),
shared.clone(),
Some(h1),
);
s.cache.sync_cache(&[], &[], vec![], vec![], Some(h2b), Some(2), false);
let mut s = CachingState::new(
InMemoryBackend::<BlakeTwo256>::default(),
shared.clone(),
Some(h2b),
);
s.cache.sync_cache(
&[],
&[],
vec![(key.clone(), Some(vec![3]))],
vec![],
Some(h3b),
Some(3),
false,
);
let s = CachingState::new(
InMemoryBackend::<BlakeTwo256>::default(),
shared.clone(),
Some(h3a),
);
assert_eq!(s.storage(&key).unwrap().unwrap(), vec![2]);
}
#[test]
fn should_track_used_size_correctly() {
let root_parent = H256::random();
let shared = new_shared_cache::<Block>(109, ((109-36), 109));
let h0 = H256::random();
let mut s = CachingState::new(
InMemoryBackend::<BlakeTwo256>::default(), shared.clone(), Some(root_parent.clone()),
);
let key = H256::random()[..].to_vec();
let s_key = H256::random()[..].to_vec();
s.cache.sync_cache(
&[],
&[],
vec![(key.clone(), Some(vec![1, 2, 3]))],
vec![],
Some(h0),
Some(0),
true,
);
assert_eq!(shared.lock().used_storage_cache_size(), 35 );
let key = H256::random()[..].to_vec();
s.cache.sync_cache(
&[],
&[],
vec![],
vec![(s_key.clone(), vec![(key.clone(), Some(vec![1, 2]))])],
Some(h0),
Some(0),
true,
);
assert_eq!(shared.lock().used_storage_cache_size(), 101 );
}
#[test]
fn should_remove_lru_items_based_on_tracking_used_size() {
let root_parent = H256::random();
let shared = new_shared_cache::<Block>(36*3, (2,3));
let h0 = H256::random();
let mut s = CachingState::new(
InMemoryBackend::<BlakeTwo256>::default(),
shared.clone(),
Some(root_parent),
);
let key = H256::random()[..].to_vec();
s.cache.sync_cache(
&[],
&[],
vec![(key.clone(), Some(vec![1, 2, 3, 4]))],
vec![],
Some(h0),
Some(0),
true,
);
assert_eq!(shared.lock().used_storage_cache_size(), 36 );
let key = H256::random()[..].to_vec();
s.cache.sync_cache(
&[],
&[],
vec![(key.clone(), Some(vec![1, 2]))],
vec![],
Some(h0),
Some(0),
true,
);
assert_eq!(shared.lock().used_storage_cache_size(), 34 );
}
#[test]
fn fix_storage_mismatch_issue() {
sp_tracing::try_init_simple();
let root_parent = H256::random();
let key = H256::random()[..].to_vec();
let h0 = H256::random();
let h1 = H256::random();
let shared = new_shared_cache::<Block>(256 * 1024, (0, 1));
let mut s = CachingState::new(
InMemoryBackend::<BlakeTwo256>::default(),
shared.clone(),
Some(root_parent.clone()),
);
s.cache.sync_cache(
&[],
&[],
vec![(key.clone(), Some(vec![2]))],
vec![],
Some(h0.clone()),
Some(0),
true,
);
let mut s = CachingState::new(
InMemoryBackend::<BlakeTwo256>::default(),
shared.clone(),
Some(h0),
);
s.cache.sync_cache(
&[],
&[],
vec![(key.clone(), Some(vec![3]))],
vec![],
Some(h1),
Some(1),
true,
);
let mut s = CachingState::new(
InMemoryBackend::<BlakeTwo256>::default(),
shared.clone(),
Some(h1),
);
assert_eq!(s.storage(&key).unwrap(), Some(vec![3]));
{
let mut cache = s.cache.shared_cache.lock();
let cache = &mut *cache;
cache.lru_storage.clear();
cache.lru_hashes.clear();
cache.lru_child_storage.clear();
cache.modifications.clear();
}
s.cache.local_cache.write().storage.insert(key.clone(), Some(vec![42]));
s.cache.sync_cache(&[], &[], vec![], vec![], None, None, true);
let s = CachingState::new(
InMemoryBackend::<BlakeTwo256>::default(),
shared.clone(),
Some(h1),
);
assert_eq!(s.storage(&key).unwrap(), None);
}
#[test]
fn same_block_no_changes() {
sp_tracing::try_init_simple();
let root_parent = H256::random();
let key = H256::random()[..].to_vec();
let h1 = H256::random();
let h2 = H256::random();
let shared = new_shared_cache::<Block>(256*1024, (0,1));
let mut s = CachingState::new(
InMemoryBackend::<BlakeTwo256>::default(),
shared.clone(),
Some(root_parent),
);
s.cache.sync_cache(
&[],
&[],
vec![(key.clone(), Some(vec![1]))],
vec![],
Some(h1),
Some(1),
true,
);
assert_eq!(shared.lock().lru_storage.get(&key).unwrap(), &Some(vec![1]));
let mut s = CachingState::new(
InMemoryBackend::<BlakeTwo256>::default(),
shared.clone(),
Some(h1),
);
s.cache.sync_cache(
&[],
&[],
vec![(key.clone(), Some(vec![2]))],
vec![],
Some(h2),
Some(2),
false,
);
assert_eq!(shared.lock().lru_storage.get(&key).unwrap(), &Some(vec![1]));
let mut s = CachingState::new(
InMemoryBackend::<BlakeTwo256>::default(),
shared.clone(),
Some(h1),
);
s.cache.sync_cache(
&[],
&[],
vec![],
vec![],
Some(h2),
Some(2),
true,
);
assert_eq!(s.storage(&key).unwrap(), None);
}
}
#[cfg(test)]
mod qc {
use std::collections::{HashMap, hash_map::Entry};
use quickcheck::{quickcheck, TestResult, Arbitrary};
use super::*;
use sp_runtime::{
traits::BlakeTwo256,
testing::{H256, Block as RawBlock, ExtrinsicWrapper},
};
use sp_state_machine::InMemoryBackend;
type Block = RawBlock<ExtrinsicWrapper<u32>>;
type KeySet = Vec<(Vec<u8>, Option<Vec<u8>>)>;
type KeyMap = HashMap<Vec<u8>, Option<Vec<u8>>>;
#[derive(Debug, Clone)]
struct Node {
hash: H256,
parent: H256,
state: KeyMap,
changes: KeySet,
}
impl Node {
fn new_next(&self, hash: H256, changes: KeySet) -> Self {
let mut state = self.state.clone();
for (k, v) in self.state.iter() { state.insert(k.clone(), v.clone()); }
for (k, v) in changes.clone().into_iter() { state.insert(k, v); }
Self {
hash,
parent: self.hash,
changes,
state,
}
}
fn new(hash: H256, parent: H256, changes: KeySet) -> Self {
let mut state = KeyMap::new();
for (k, v) in changes.clone().into_iter() { state.insert(k, v); }
Self {
hash,
parent,
state,
changes,
}
}
fn purge(&mut self, other_changes: &KeySet) {
for (k, _) in other_changes.iter() {
self.state.remove(k);
}
}
}
#[derive(Debug, Clone)]
enum Action {
Next { hash: H256, changes: KeySet },
Fork { depth: usize, hash: H256, changes: KeySet },
ReorgWithImport { depth: usize, hash: H256 },
FinalizationReorg { fork_depth: usize, depth: usize },
}
impl Arbitrary for Action {
fn arbitrary(gen: &mut quickcheck::Gen) -> Self {
let path = u8::arbitrary(gen);
let buf = (0..32).map(|_| u8::arbitrary(gen)).collect::<Vec<_>>();
match path {
0..=175 => {
Action::Next {
hash: H256::from_slice(&buf[..]),
changes: {
let mut set = Vec::new();
for _ in 0..<u32>::arbitrary(gen)/(64*256*256*256) {
set.push((vec![u8::arbitrary(gen)], Some(vec![u8::arbitrary(gen)])));
}
set
}
}
},
176..=220 => {
Action::Fork {
hash: H256::from_slice(&buf[..]),
depth: ((u8::arbitrary(gen)) / 32) as usize,
changes: {
let mut set = Vec::new();
for _ in 0..<u32>::arbitrary(gen)/(64*256*256*256) {
set.push((vec![u8::arbitrary(gen)], Some(vec![u8::arbitrary(gen)])));
}
set
}
}
},
221..=240 => {
Action::ReorgWithImport {
hash: H256::from_slice(&buf[..]),
depth: ((u8::arbitrary(gen)) / 32) as usize,
}
},
_ => {
Action::FinalizationReorg {
fork_depth: ((u8::arbitrary(gen)) / 32) as usize,
depth: ((u8::arbitrary(gen)) / 64) as usize,
}
},
}
}
}
struct Mutator {
shared: SharedCache<Block>,
canon: Vec<Node>,
forks: HashMap<H256, Vec<Node>>,
}
impl Mutator {
fn new_empty() -> Self {
let shared = new_shared_cache::<Block>(256*1024, (0,1));
Self {
shared,
canon: vec![],
forks: HashMap::new(),
}
}
fn head_state(&self, hash: H256) -> CachingState<InMemoryBackend<BlakeTwo256>, Block> {
CachingState::new(
InMemoryBackend::<BlakeTwo256>::default(),
self.shared.clone(),
Some(hash),
)
}
fn canon_head_state(&self) -> CachingState<InMemoryBackend<BlakeTwo256>, Block> {
self.head_state(self.canon.last().expect("Expected to be one commit").hash)
}
fn mutate_static(
&mut self,
action: Action,
) -> CachingState<InMemoryBackend<BlakeTwo256>, Block> {
self.mutate(action).expect("Expected to provide only valid actions to the mutate_static")
}
fn canon_len(&self) -> usize {
return self.canon.len();
}
fn head_storage_ref(&self) -> &KeyMap {
&self.canon.last().expect("Expected to be one commit").state
}
fn key_permutations() -> Vec<Vec<u8>> {
(0u8..255).map(|x| vec![x]).collect()
}
fn mutate(
&mut self,
action: Action,
) -> Result<CachingState<InMemoryBackend<BlakeTwo256>, Block>, ()> {
let state = match action {
Action::Fork { depth, hash, changes } => {
let pos = self.canon.len() as isize - depth as isize;
if pos < 0 || self.canon.len() == 0 || pos >= (self.canon.len()-1) as isize
{
return Err(());
}
let pos = pos as usize;
let fork_at = self.canon[pos].hash;
let (total_h, parent) = match self.forks.entry(fork_at) {
Entry::Occupied(occupied) => {
let chain = occupied.into_mut();
let parent = chain.last().expect("No empty forks are ever created").clone();
let mut node = parent.new_next(hash, changes.clone());
for earlier in chain.iter() {
node.purge(&earlier.changes.clone());
}
chain.push(node);
(pos + chain.len(), parent.hash)
},
Entry::Vacant(vacant) => {
let canon_parent = &self.canon[pos];
vacant.insert(vec![canon_parent.new_next(hash, changes.clone())]);
(pos + 1, fork_at)
}
};
let mut state = CachingState::new(
InMemoryBackend::<BlakeTwo256>::default(),
self.shared.clone(),
Some(parent),
);
state.cache.sync_cache(
&[],
&[],
changes,
vec![],
Some(hash),
Some(total_h as u64),
false,
);
state
},
Action::Next { hash, changes } => {
let (next, parent_hash) = match self.canon.last() {
None => {
let parent_hash = H256::from(&[0u8; 32]);
(Node::new(hash, parent_hash, changes.clone()), parent_hash)
},
Some(parent) => {
(parent.new_next(hash, changes.clone()), parent.hash)
}
};
for node in self.canon.iter_mut() {
node.purge(&next.changes);
if let Some(fork) = self.forks.get_mut(&node.hash) {
for node in fork.iter_mut() {
node.purge(&next.changes);
}
}
}
let mut state = CachingState::new(
InMemoryBackend::<BlakeTwo256>::default(),
self.shared.clone(),
Some(parent_hash),
);
state.cache.sync_cache(
&[],
&[],
next.changes.clone(),
vec![],
Some(hash),
Some(self.canon.len() as u64 + 1),
true,
);
self.canon.push(next);
state
},
Action::ReorgWithImport { depth, hash } => {
let pos = self.canon.len() as isize - depth as isize;
if pos < 0 || pos+1 >= self.canon.len() as isize { return Err(()); }
let fork_at = self.canon[pos as usize].hash;
let pos = pos as usize;
match self.forks.get_mut(&fork_at) {
Some(chain) => {
let mut new_fork = self.canon.drain(pos+1..).collect::<Vec<Node>>();
let retracted: Vec<H256> = new_fork.iter().map(|node| node.hash).collect();
let enacted: Vec<H256> = chain.iter().map(|node| node.hash).collect();
std::mem::swap(chain, &mut new_fork);
let mut node = new_fork.last().map(
|node| node.new_next(hash, vec![])
).expect("No empty fork ever created!");
for invalidators in chain.iter().chain(new_fork.iter()) {
node.purge(&invalidators.changes);
}
self.canon.extend(new_fork.into_iter());
self.canon.push(node);
let mut state = CachingState::new(
InMemoryBackend::<BlakeTwo256>::default(),
self.shared.clone(),
Some(fork_at),
);
let height = pos as u64 + enacted.len() as u64 + 2;
state.cache.sync_cache(
&enacted[..],
&retracted[..],
vec![],
vec![],
Some(hash),
Some(height),
true,
);
state
}
None => {
return Err(());
},
}
},
Action::FinalizationReorg { fork_depth, depth } => {
let pos = self.canon.len() as isize - fork_depth as isize;
if pos < 0 || pos+1 >= self.canon.len() as isize { return Err(()); }
let fork_at = self.canon[pos as usize].hash;
let pos = pos as usize;
match self.forks.get_mut(&fork_at) {
Some(fork_chain) => {
let sync_pos = fork_chain.len() as isize - fork_chain.len() as isize - depth as isize;
if sync_pos < 0 || sync_pos >= fork_chain.len() as isize { return Err (()); }
let sync_pos = sync_pos as usize;
let mut new_fork = self.canon.drain(pos+1..).collect::<Vec<Node>>();
let retracted: Vec<H256> = new_fork.iter().map(|node| node.hash).collect();
let enacted: Vec<H256> = fork_chain.iter().take(sync_pos+1).map(|node| node.hash).collect();
std::mem::swap(fork_chain, &mut new_fork);
self.shared.lock().sync(&retracted, &enacted);
self.head_state(
self.canon.last()
.expect("wasn't forking to emptiness so there should be one!")
.hash
)
},
None => {
return Err(());
}
}
},
};
Ok(state)
}
}
#[test]
fn smoke() {
let key = H256::random()[..].to_vec();
let h0 = H256::random();
let h1a = H256::random();
let h1b = H256::random();
let h2a = H256::random();
let h2b = H256::random();
let h3a = H256::random();
let h3b = H256::random();
let mut mutator = Mutator::new_empty();
mutator.mutate_static(Action::Next { hash: h0, changes: vec![(key.clone(), Some(vec![2]))] });
mutator.mutate_static(Action::Next { hash: h1a, changes: vec![] });
mutator.mutate_static(Action::Fork { depth: 2, hash: h1b, changes: vec![(key.clone(), Some(vec![3]))] });
mutator.mutate_static(Action::Fork { depth: 2, hash: h2b, changes: vec![(key.clone(), Some(vec![4]))] });
mutator.mutate_static(Action::Next { hash: h2a, changes: vec![(key.clone(), Some(vec![5]))] });
mutator.mutate_static(Action::Next { hash: h3a, changes: vec![] });
assert_eq!(mutator.head_state(h3a).storage(&key).unwrap().expect("there should be a value"), vec![5]);
assert!(mutator.head_state(h1a).storage(&key).unwrap().is_none());
assert!(mutator.head_state(h2b).storage(&key).unwrap().is_none());
assert!(mutator.head_state(h1b).storage(&key).unwrap().is_none());
mutator.mutate_static(Action::ReorgWithImport { depth: 4, hash: h3b });
assert!(mutator.head_state(h3a).storage(&key).unwrap().is_none());
}
fn is_head_match(mutator: &Mutator) -> bool {
let head_state = mutator.canon_head_state();
for key in Mutator::key_permutations() {
match (head_state.storage(&key).unwrap(), mutator.head_storage_ref().get(&key)) {
(Some(x), Some(y)) => {
if Some(&x) != y.as_ref() {
eprintln!("{:?} != {:?}", x, y);
return false;
}
},
(None, Some(_y)) => {
},
(Some(x), None) => {
eprintln!("{:?} != <missing>", x);
return false;
},
_ => continue,
}
}
true
}
fn is_canon_match(mutator: &Mutator) -> bool {
for node in mutator.canon.iter() {
let head_state = mutator.head_state(node.hash);
for key in Mutator::key_permutations() {
match (head_state.storage(&key).unwrap(), node.state.get(&key)) {
(Some(x), Some(y)) => {
if Some(&x) != y.as_ref() {
eprintln!("at [{}]: {:?} != {:?}", node.hash, x, y);
return false;
}
},
(None, Some(_y)) => {
},
(Some(x), None) => {
eprintln!("at [{}]: {:?} != <missing>", node.hash, x);
return false;
},
_ => continue,
}
}
}
true
}
#[test]
fn reorg() {
let key = H256::random()[..].to_vec();
let h0 = H256::random();
let h1 = H256::random();
let h2 = H256::random();
let h1b = H256::random();
let h2b = H256::random();
let mut mutator = Mutator::new_empty();
mutator.mutate_static(Action::Next { hash: h0, changes: vec![] });
mutator.mutate_static(Action::Next { hash: h1, changes: vec![] });
mutator.mutate_static(Action::Next { hash: h2, changes: vec![(key.clone(), Some(vec![2]))] });
mutator.mutate_static(Action::Fork { depth: 2, hash: h1b, changes: vec![(key.clone(), Some(vec![3]))] });
mutator.mutate_static(Action::ReorgWithImport { depth: 2, hash: h2b });
assert!(is_head_match(&mutator))
}
fn key(k: u8) -> Vec<u8> { vec![k] }
fn val(v: u8) -> Option<Vec<u8>> { Some(vec![v]) }
fn keyval(k: u8, v: u8) -> KeySet { vec![(key(k), val(v))] }
#[test]
fn reorg2() {
let h0 = H256::random();
let h1a = H256::random();
let h1b = H256::random();
let h2b = H256::random();
let h2a = H256::random();
let h3a = H256::random();
let mut mutator = Mutator::new_empty();
mutator.mutate_static(Action::Next { hash: h0, changes: keyval(1, 1) });
mutator.mutate_static(Action::Next { hash: h1a, changes: keyval(1, 1) });
mutator.mutate_static(Action::Fork { depth: 2, hash: h1b, changes: keyval(2, 2 ) });
mutator.mutate_static(Action::Next { hash: h2a, changes: keyval(3, 3) });
mutator.mutate_static(Action::Next { hash: h3a, changes: keyval(4, 4) });
mutator.mutate_static(Action::ReorgWithImport { depth: 4, hash: h2b });
assert!(is_head_match(&mutator))
}
#[test]
fn fork2() {
let h1 = H256::random();
let h2a = H256::random();
let h2b = H256::random();
let h3a = H256::random();
let h3b = H256::random();
let mut mutator = Mutator::new_empty();
mutator.mutate_static(Action::Next { hash: h1, changes: vec![] });
mutator.mutate_static(Action::Next { hash: h2a, changes: vec![] });
mutator.mutate_static(Action::Next { hash: h3a, changes: keyval(1, 1) });
mutator.mutate_static(Action::Fork { depth: 2, hash: h2b, changes: vec![] });
mutator.mutate_static(Action::Fork { depth: 2, hash: h3b, changes: keyval(1, 2) });
assert!(is_head_match(&mutator))
}
#[test]
fn fork3() {
let h1 = H256::random();
let h2a = H256::random();
let h2b = H256::random();
let h3a = H256::random();
let mut mutator = Mutator::new_empty();
mutator.mutate_static(Action::Next { hash: h1, changes: keyval(1, 1) });
mutator.mutate_static(Action::Next { hash: h2a, changes: keyval(2, 2) });
mutator.mutate_static(Action::Next { hash: h3a, changes: keyval(3, 3) });
mutator.mutate_static(Action::Fork { depth: 2, hash: h2b, changes: keyval(1, 3) });
assert!(is_canon_match(&mutator))
}
quickcheck! {
fn head_complete(actions: Vec<Action>) -> TestResult {
let mut mutator = Mutator::new_empty();
for action in actions.into_iter() {
if let Err(_) = mutator.mutate(action) {
return TestResult::discard();
}
}
if mutator.canon_len() == 0 {
return TestResult::discard();
}
TestResult::from_bool(is_head_match(&mutator))
}
fn canon_complete(actions: Vec<Action>) -> TestResult {
let mut mutator = Mutator::new_empty();
for action in actions.into_iter() {
if let Err(_) = mutator.mutate(action) {
return TestResult::discard();
}
}
if mutator.canon_len() == 0 {
return TestResult::discard();
}
TestResult::from_bool(is_canon_match(&mutator))
}
}
}