From 7b4c6d8a5000a50c4f01e44b880715339c42f441 Mon Sep 17 00:00:00 2001 From: democ98 <1239865849@qq.com> Date: Thu, 22 May 2025 15:13:37 +0800 Subject: [PATCH] feat:the proof of the Pois algorithm is partially completed --- Cargo.lock | 92 +- Cargo.toml | 1 + crates/ces-pois/Cargo.toml | 11 +- crates/ces-pois/src/acc/file_manager.rs | 84 ++ crates/ces-pois/src/acc/mod.rs | 149 ++- crates/ces-pois/src/acc/multi_level_acc.rs | 714 ++++++++++- .../src/expanders/generate_expanders.rs | 66 +- .../src/expanders/generate_idle_file.rs | 221 +++- crates/ces-pois/src/expanders/mod.rs | 42 +- crates/ces-pois/src/pois/challenge.rs | 2 +- crates/ces-pois/src/pois/mod.rs | 2 +- crates/ces-pois/src/pois/prove.rs | 1115 ++++++++++++++++- crates/ces-pois/src/tree/mod.rs | 137 +- crates/ces-pois/src/util/mod.rs | 150 ++- crates/cestory/Cargo.toml | 13 +- crates/cestory/src/lib.rs | 23 +- crates/cestory/src/types.rs | 4 +- scripts/docker/env/gramine-rust.Dockerfile | 7 +- standalone/teeworker/ceseal/Cargo.lock | 6 +- 19 files changed, 2710 insertions(+), 129 deletions(-) create mode 100644 crates/ces-pois/src/acc/file_manager.rs diff --git a/Cargo.lock b/Cargo.lock index d07e38b7..d8306fe3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1918,7 +1918,9 @@ name = "ces-pois" version = "0.4.5" dependencies = [ "anyhow", + "async-trait", "bigdecimal", + "byteorder", "dashmap", "hex", "lazy_static", @@ -1930,7 +1932,10 @@ dependencies = [ "rand 0.8.5", "rsa", "serde", + "serde_json", "sha2 0.10.8", + "sysinfo", + "tokio", ] [[package]] @@ -6730,7 +6735,7 @@ dependencies = [ "rtnetlink", "system-configuration", "tokio", - "windows", + "windows 0.53.0", ] [[package]] @@ -7537,9 +7542,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.169" +version = "0.2.172" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a" +checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa" [[package]] name = "libloading" @@ -8954,6 +8959,15 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be" +[[package]] +name = "ntapi" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8a3895c6391c39d7fe7ebc444a87eb2991b2a0bc718fdabd071eec617fc68e4" +dependencies = [ + "winapi", +] + [[package]] name = "nu-ansi-term" version = "0.46.0" @@ -9108,7 +9122,7 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af1844ef2428cc3e1cb900be36181049ef3d3193c63e43026cfe202983b27a56" dependencies = [ - "proc-macro-crate 1.1.3", + "proc-macro-crate 3.2.0", "proc-macro2", "quote", "syn 2.0.94", @@ -9120,6 +9134,15 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" +[[package]] +name = "objc2-core-foundation" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daeaf60f25471d26948a1c2f840e3f7d86f4109e3af4e8e4b5cd70c39690d925" +dependencies = [ + "bitflags 2.6.0", +] + [[package]] name = "object" version = "0.30.4" @@ -11624,7 +11647,7 @@ checksum = "4e69bf016dc406eff7d53a7d3f7cf1c2e72c82b9088aac1118591e36dd2cd3e9" dependencies = [ "bitcoin_hashes", "rand 0.8.5", - "rand_core 0.4.2", + "rand_core 0.6.4", "serde", "unicode-normalization", ] @@ -20035,6 +20058,19 @@ dependencies = [ "syn 2.0.94", ] +[[package]] +name = "sysinfo" +version = "0.34.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4b93974b3d3aeaa036504b8eefd4c039dced109171c1ae973f1dc63b2c7e4b2" +dependencies = [ + "libc", + "memchr", + "ntapi", + "objc2-core-foundation", + "windows 0.57.0", +] + [[package]] name = "system-configuration" version = "0.6.1" @@ -20844,7 +20880,7 @@ checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ "cfg-if", "digest 0.10.7", - "rand 0.3.23", + "rand 0.8.5", "static_assertions", ] @@ -21800,6 +21836,16 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows" +version = "0.57.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12342cb4d8e3b046f3d80effd474a7a02447231330ef77d71daa6fbc40681143" +dependencies = [ + "windows-core 0.57.0", + "windows-targets 0.52.6", +] + [[package]] name = "windows-core" version = "0.52.0" @@ -21819,6 +21865,40 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-core" +version = "0.57.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2ed2439a290666cd67ecce2b0ffaad89c2a56b976b736e6ece670297897832d" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-result 0.1.2", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-implement" +version = "0.57.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.94", +] + +[[package]] +name = "windows-interface" +version = "0.57.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.94", +] + [[package]] name = "windows-registry" version = "0.2.0" diff --git a/Cargo.toml b/Cargo.toml index 9d9c666f..db48b2a1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -394,6 +394,7 @@ spin = { version = "0.9", default-features = false, features = [ static_assertions = "1.1.0" subxt = { git = "https://github.com/CESSProject/subxt", branch = "polkadot-stable2412", default-features = false } syn = "2.0" +sysinfo = { version = "0.34.2" } tempfile = "3.10" thiserror = "1.0" threadpool = "1.8.1" diff --git a/crates/ces-pois/Cargo.toml b/crates/ces-pois/Cargo.toml index 955a5238..b6698b6a 100644 --- a/crates/ces-pois/Cargo.toml +++ b/crates/ces-pois/Cargo.toml @@ -3,6 +3,10 @@ name = "ces-pois" version = "0.4.5" edition = "2021" +[features] +default = [] +use-sysinfo = [] + # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] @@ -19,4 +23,9 @@ prost = { workspace = true } rand = { workspace = true } rsa = { workspace = true, features = ["std"] } serde = { workspace = true, features = ["derive"] } -sha2 = { workspace = true } \ No newline at end of file +serde_json = { workspace = true, features = ["std"] } +sha2 = { workspace = true } +tokio = { workspace = true, features = ["full"] } +sysinfo = { workspace = true, optional = true } +async-trait = { workspace = true } +byteorder = { workspace = true } diff --git a/crates/ces-pois/src/acc/file_manager.rs b/crates/ces-pois/src/acc/file_manager.rs new file mode 100644 index 00000000..123e4dc1 --- /dev/null +++ b/crates/ces-pois/src/acc/file_manager.rs @@ -0,0 +1,84 @@ +use super::multi_level_acc::{AccData, DEFAULT_BACKUP_NAME, DEFAULT_NAME}; +use crate::util; +use anyhow::{Context, Result}; +use std::fs::{self, File}; +use std::io::Read; +use std::path::Path; + +pub fn save_acc_data(dir: &str, index: i64, elems: Vec>, wits: Vec>) -> Result<()> { + let data = AccData { values: elems, wits }; + + let jbytes = serde_json::to_vec(&data)?; + + let fpath = Path::new(dir).join(format!("{}-{}", DEFAULT_NAME, index)); + + util::save_file(&fpath, &jbytes) +} + +pub fn read_acc_data(dir: &str, index: i64) -> Result { + let fpath = format!("{}/{}-{}", dir, DEFAULT_NAME, index); + read_data(&fpath) +} +pub fn read_backup(dir: &str, index: i64) -> Result { + let fpath = format!("{}/{}-{}", dir, DEFAULT_BACKUP_NAME, index); + read_data(&fpath) +} + +pub fn read_data(fpath: &str) -> Result { + let mut file = File::open(fpath).context("read element data error")?; + let mut data = Vec::new(); + file.read_to_end(&mut data).context("read element data error")?; + serde_json::from_slice(&data).context("read element data error") +} + +// deleteAccData delete from the given index +pub fn delete_acc_data(dir: &str, last: i32) -> Result<()> { + let fs = fs::read_dir(dir).context("delete element data error")?; + for entry in fs { + let entry = entry.context("delete element data error")?; + let path = entry.path(); + if let Some(file_name) = path.file_name() { + if let Some(file_name_str) = file_name.to_str() { + if let Some(index_str) = file_name_str.rsplit('-').next() { + if let Ok(index) = index_str.parse::() { + if index <= last { + fs::remove_file(path).context("delete element data error")?; + } + } + } + } + } + } + Ok(()) +} + +pub fn clean_backup(dir: &str, index: i64) -> Result<()> { + let backup = format!("{}/{}-{}", dir, DEFAULT_BACKUP_NAME, index); + fs::remove_file(backup).context("clean backup error") +} + +pub fn backup_acc_data(dir: &str, index: i64) -> Result<()> { + let fpath = format!("{}/{}-{}", dir, DEFAULT_NAME, index); + let backup = format!("{}/{}-{}", dir, DEFAULT_BACKUP_NAME, index); + util::copy_file(&fpath, &backup).context("backup element data error") +} + +pub fn backup_acc_data_for_chall(src: &str, des: &str, index: i64) -> Result<()> { + let fpath = Path::new(src).join(format!("{}-{}", DEFAULT_NAME, index)); + let backup = Path::new(des).join(format!("{}-{}", DEFAULT_NAME, index)); + util::copy_file(fpath.to_str().unwrap(), backup.to_str().unwrap()) + .context("backup acc data for challenge error")?; + Ok(()) +} + +pub fn recovery_acc_data(dir: &str, index: i64) -> Result<()> { + let backup = Path::join(Path::new(dir), &format!("{}-{}", DEFAULT_BACKUP_NAME, index)); + let fpath = Path::join(Path::new(dir), &format!("{}-{}", DEFAULT_NAME, index)); + + if !backup.exists() { + return Ok(()); + } + + fs::rename(&backup, &fpath).context("recovery acc data error")?; + Ok(()) +} diff --git a/crates/ces-pois/src/acc/mod.rs b/crates/ces-pois/src/acc/mod.rs index b8cdcca5..80bdc2fa 100644 --- a/crates/ces-pois/src/acc/mod.rs +++ b/crates/ces-pois/src/acc/mod.rs @@ -1,10 +1,17 @@ +pub mod file_manager; pub mod hash_2_prime; pub mod multi_level_acc; +use byteorder::{BigEndian, ByteOrder}; +use std::sync::Arc; +use tokio::task; + +use multi_level_acc::AccNode; use num_bigint_dig::{BigUint, RandBigInt}; use num_integer::Integer; use num_traits::One; use rsa::{PublicKeyParts, RsaPrivateKey}; +use tokio::sync::RwLock; use hash_2_prime::h_prime; @@ -13,12 +20,16 @@ pub struct RsaKey { pub n: BigUint, //Z/nZ pub g: BigUint, // Generator } + impl RsaKey { pub(crate) fn new(n: BigUint, g: BigUint) -> RsaKey { Self { n, g } } } +// Generate N and G +// lamda is the bit size of N(preferably 2048 bit) +// Note that the primes factors of N are not exposed for security reason pub fn rsa_keygen(lambda: usize) -> RsaKey { let mut rng = rand::thread_rng(); let pk = RsaPrivateKey::new(&mut rng, lambda).expect("Failed to generate RSA key"); @@ -36,6 +47,26 @@ pub fn rsa_keygen(lambda: usize) -> RsaKey { RsaKey { n: n.clone(), g } } +pub fn get_key_from_bytes(data: Vec) -> RsaKey { + if data.len() < 16 { + return rsa_keygen(2048); + } + let nl = BigEndian::read_u64(&data[0..8]); + let gl = BigEndian::read_u64(&data[8..16]); + + if nl == 0 || gl == 0 || data.len() - 16 != (nl + gl) as usize { + return rsa_keygen(2048); + } + + let n_bytes = &data[16..16 + nl as usize]; + let g_bytes = &data[16 + nl as usize..]; + + let n = BigUint::from_bytes_be(n_bytes); + let g = BigUint::from_bytes_be(g_bytes); + + RsaKey { n, g } +} + pub fn generate_acc(key: &RsaKey, acc: &[u8], elems: Vec>) -> Option> { if acc.is_empty() { return None; @@ -48,4 +79,120 @@ pub fn generate_acc(key: &RsaKey, acc: &[u8], elems: Vec>) -> Option>) -> Vec> { + if us.is_empty() { + return vec![]; + } + if us.len() == 1 { + return vec![g.to_bytes_be()]; + } + let (left, right) = us.split_at(us.len() / 2); + + let left_cloned = left.to_vec(); + let right_cloned = right.to_vec(); + + let g1 = Arc::new(tokio::sync::RwLock::new(g.clone())); + let g2 = Arc::new(tokio::sync::RwLock::new(g.clone())); + + let g1_clone = Arc::clone(&g1); + let g2_clone = Arc::clone(&g2); + + let n_clone = n.clone(); + let right_task = task::spawn(async move { + for u in right_cloned { + let e = h_prime(&BigUint::from_bytes_be(&u)); + let mut g1_locked = g1_clone.write().await; + *g1_locked = g1_locked.modpow(&e, &n_clone); + } + }); + + let n_clone = n.clone(); + let left_task = task::spawn(async move { + for u in left_cloned { + let e = h_prime(&BigUint::from_bytes_be(&u)); + let mut g2_locked = g2_clone.write().await; + *g2_locked = g2_locked.modpow(&e, &n_clone); + } + }); + + // Wait for both tasks to complete + let _ = tokio::try_join!(right_task, left_task); + + let u1 = Box::pin(generate_witness(g1.read().await.clone(), n.clone(), left.to_vec())).await; + let u2 = Box::pin(generate_witness(g2.read().await.clone(), n.clone(), right.to_vec())).await; + + let mut result = u1; + result.extend(u2); + result +} + +pub async fn gen_wits_for_acc_nodes(g: &BigUint, n: &BigUint, elems: &mut Vec>>) { + let lens = elems.len(); + if lens == 0 { + return; + } + if lens == 1 { + elems[0].write().await.wit = g.to_bytes_be(); + return; + } + let left = &mut elems[0..lens / 2].to_vec(); + let right = &mut elems[lens / 2..].to_vec(); + let g1 = g; + let g2 = g; + + //todo:use tokio::spawn to speed up + for u in right.clone() { + let e = h_prime(&BigUint::from_bytes_be(&u.read().await.value)); + g1.modpow(&e, &n); + } + + for u in left.clone() { + let e = h_prime(&BigUint::from_bytes_be(&u.read().await.value)); + g2.modpow(&e, &n); + } + Box::pin(gen_wits_for_acc_nodes(g1, n, left)).await; + Box::pin(gen_wits_for_acc_nodes(g2, n, right)).await; +} +// pub async fn gen_wits_for_acc_nodes(g: &BigUint, n: &BigUint, elems: &mut Vec>>) { +// let lens = elems.len(); +// if lens == 0 { +// return; +// } +// if lens == 1 { +// elems[0].write().await.wit = g.to_bytes_be(); +// return; +// } + +// let (left, right) = elems.split_at_mut(lens / 2); + +// let g1 = g.clone(); // Clone the g value for use in the right part +// let g2 = g.clone(); // Clone the g value for use in the left part + +// let n_clone = n.clone(); +// // Spawn tasks to process the right and left parts concurrently +// let right_task = task::spawn(async move { +// for u in right { +// let e = h_prime(&BigUint::from_bytes_be(&u.read().await.value)); +// let mut g1_locked = g1.clone(); // Clone the value of g1 for use in the operation +// g1_locked.modpow(&e, &n_clone); // Perform the modpow operation for the right part +// } +// }); + +// let n_clone = n.clone(); +// let left_task = task::spawn(async move { +// for u in left { +// let e = h_prime(&BigUint::from_bytes_be(&u.read().await.value)); +// let mut g2_locked = g2.clone(); // Clone the value of g2 for use in the operation +// g2_locked.modpow(&e, &n_clone); // Perform the modpow operation for the left part +// } +// }); + +// // Wait for both tasks to complete +// let _ = tokio::try_join!(right_task, left_task); + +// // Recursively generate witnesses for the left and right parts +// gen_wits_for_acc_nodes(&g1, n, &mut left.to_vec()).await; +// gen_wits_for_acc_nodes(&g2, n, &mut right.to_vec()).await; +// } diff --git a/crates/ces-pois/src/acc/multi_level_acc.rs b/crates/ces-pois/src/acc/multi_level_acc.rs index 48dfb107..cdd3b526 100644 --- a/crates/ces-pois/src/acc/multi_level_acc.rs +++ b/crates/ces-pois/src/acc/multi_level_acc.rs @@ -1,11 +1,54 @@ +use std::sync::Arc; + +use tokio::{fs, sync::RwLock}; + +use crate::acc::file_manager::{recovery_acc_data, save_acc_data}; + +use super::{file_manager::*, gen_wits_for_acc_nodes, generate_acc, generate_witness, hash_2_prime::h_prime, RsaKey}; +use anyhow::{bail, Context, Result}; +use async_trait::async_trait; use num_bigint_dig::BigUint; use rand::Rng; use serde::{Deserialize, Serialize}; -use super::{generate_acc, hash_2_prime::h_prime, RsaKey}; +pub const DEFAULT_PATH: &str = "./acc/"; +pub const DEFAULT_ELEMS_NUM: i32 = 256; +pub const DEFAULT_LEVEL: i32 = 3; +pub const DEFAULT_NAME: &str = "sub-acc"; +pub const DEFAULT_BACKUP_NAME: &str = "sub-acc"; + +#[async_trait] +pub trait AccHandle { + async fn get_snapshot(&mut self) -> Arc>; + + async fn add_elements_and_proof(&mut self, elems: Vec>) -> Result<(WitnessNode, Vec>)>; + + async fn delete_elements_and_proof(&mut self, num: i64) -> Result<(WitnessNode, Vec>)>; -const DEFAULT_LEVEL: i32 = 3; -const DEFAULT_ELEMS_NUM: i32 = 256; + async fn get_witness_chains(&mut self, indexes: Vec) -> Result>; + + async fn update_snapshot(&mut self) -> bool; + + async fn rollback(&mut self) -> bool; + + async fn restore_sub_acc_file(&self, index: i64, elems: Vec>) -> Result<()>; + + async fn get_file_path(&self) -> &str; +} + +#[derive(Clone, Debug, Default)] +pub struct AccNode { + pub value: Vec, + pub children: Vec>>, + pub len: i64, + pub wit: Vec, +} + +#[derive(Clone, Debug, Default, Deserialize, Serialize)] +pub struct AccData { + pub values: Vec>, + pub wits: Vec>, +} #[derive(Clone, Debug, Default, Deserialize, Serialize)] pub struct WitnessNode { @@ -14,6 +57,653 @@ pub struct WitnessNode { pub acc: Option>, } +#[derive(Clone, Debug)] +pub struct MutiLevelAcc { + pub accs: Option>>, + pub key: RsaKey, + pub elem_nums: i64, + pub deleted: i64, + pub curr_count: i64, + pub curr: Option>>, + pub parent: Option>>, + pub snapshot: Option>>, + pub file_path: String, +} + +impl Default for MutiLevelAcc { + fn default() -> Self { + Self { + accs: Default::default(), + key: Default::default(), + elem_nums: 0, + deleted: 0, + curr_count: 0, + curr: Default::default(), + parent: Default::default(), + snapshot: None, + file_path: "".to_string(), + } + } +} + +pub async fn recovery(acc_path: &str, key: RsaKey, front: i64, rear: i64) -> Result { + let acc_path = if acc_path.is_empty() { DEFAULT_PATH } else { acc_path }; + + if !std::path::Path::new(&acc_path).exists() { + fs::create_dir_all(&acc_path) + .await + .context("recovery muti-acc error:Failed to create directory for the accumulator")?; + } + let acc = AccNode { value: key.g.to_bytes_be(), ..Default::default() }; + let mut acc_manager = MutiLevelAcc { + accs: Some(Arc::new(RwLock::new(acc))), + key, + file_path: acc_path.to_string(), + deleted: front, + elem_nums: 0, + curr_count: 0, + curr: None, + parent: None, + snapshot: None, + }; + acc_manager.construct_muti_acc(rear).await.context("recovery muti-acc error")?; + Ok(acc_manager) +} + +pub async fn new_muti_level_acc(path: &str, key: RsaKey) -> Result { + let path = if path == "" { DEFAULT_PATH.to_string() } else { path.to_string() }; + + if !std::path::Path::new(&path).exists() { + fs::create_dir_all(&path).await?; + } + let mut acc = AccNode::default(); + acc.value = key.g.to_bytes_be(); + + let mut acc_manager = MutiLevelAcc::default(); + + acc_manager.accs = Some(Arc::new(RwLock::new(acc))); + acc_manager.key = key; + acc_manager.file_path = path; + + Ok(acc_manager) +} + +#[async_trait] +impl AccHandle for MutiLevelAcc { + async fn get_snapshot(&mut self) -> Arc> { + if self.snapshot.is_none() { + self.create_snap_shot().await; + }; + self.snapshot.clone().unwrap() + } + + // AddElementsAndProof adds elements to muti-level acc and create proof of added elements + async fn add_elements_and_proof(&mut self, elems: Vec>) -> Result<(WitnessNode, Vec>)> { + let snapshot = self.get_snapshot().await; + let mut exist = WitnessNode { + elem: snapshot.read().await.accs.clone().unwrap().read().await.value.clone(), + ..Default::default() + }; + + self.add_elements(elems).await.context("prove acc insertion proof")?; + //the proof of adding elements consists of two parts, + //the first part is the witness chain of the bottom accumulator where the element is located, + //witness chain node is a special structure(Elem(acc value) is G,Wit is parent node's Elem) + //when inserting an element needs to trigger the generation of a new accumulator, + //the second part is an accumulator list, which contains the accumulator value + //recalculated from the bottom to the top after inserting elements + let mut count = 1; + let mut p = self.accs.clone(); + let mut q = self.snapshot.clone().unwrap().read().await.accs.clone(); + while p.is_some() && q.is_some() && count < DEFAULT_LEVEL as usize { + if p.clone().unwrap().read().await.len > q.clone().unwrap().read().await.len { + for _ in count..DEFAULT_LEVEL as usize { + exist = WitnessNode { acc: Some(Box::new(exist)), ..Default::default() }; + exist.elem = self.key.g.to_bytes_be(); + exist.wit = exist.acc.as_ref().unwrap().elem.clone(); + } + break; + } + count += 1; + let p_len = p.clone().unwrap().read().await.len as usize; + let q_len = q.clone().unwrap().read().await.len as usize; + p = Some(p.clone().unwrap().read().await.children[p_len - 1].clone()); + q = Some(q.clone().unwrap().read().await.children[q_len - 1].clone()); + + exist = WitnessNode { acc: Some(Box::new(exist)), ..Default::default() }; + exist.elem = q.clone().unwrap().read().await.value.clone(); + exist.wit = q.clone().unwrap().read().await.wit.clone(); + } + + p = self.accs.clone(); + let mut accs = vec![vec![]; DEFAULT_LEVEL as usize]; + for i in 0..DEFAULT_LEVEL { + accs[(DEFAULT_LEVEL - i - 1) as usize] = p.clone().unwrap().read().await.value.clone(); + if p.clone().unwrap().read().await.children.len() > 0 { + let p_len = p.clone().unwrap().read().await.len as usize; + p = Some(p.clone().clone().unwrap().read().await.children[p_len - 1].clone()); + }; + } + + Ok((exist, accs)) + } + + // DeleteElementsAndProof deletes elements from muti-level acc and create proof of deleted elements + async fn delete_elements_and_proof(&mut self, num: i64) -> Result<(WitnessNode, Vec>)> { + if self.elem_nums == 0 { + bail!("delete null set:prove acc deletion proof error") + } + + //Before deleting elements, get their chain of witness + let exist = WitnessNode { + elem: self.accs.clone().unwrap().read().await.children[0].read().await.children[0] + .read() + .await + .value + .clone(), + wit: self.accs.clone().unwrap().read().await.children[0].read().await.children[0] + .read() + .await + .wit + .clone(), + acc: Some(Box::new(WitnessNode { + elem: self.accs.clone().unwrap().read().await.children[0].read().await.value.clone(), + wit: self.accs.clone().unwrap().read().await.children[0].read().await.wit.clone(), + acc: Some(Box::new(WitnessNode { + elem: self.accs.clone().unwrap().read().await.value.clone(), + wit: Vec::new(), + acc: None, + })), + })), + }; + + let snapshot = self.get_snapshot().await; + + self.delete_elements(num).await.context("prove acc deletion proof error")?; + + let mut accs = vec![vec![]; DEFAULT_LEVEL as usize]; + accs[DEFAULT_LEVEL as usize - 1] = self.accs.clone().unwrap().read().await.value.clone(); + let mut count = 1; + let mut p = self.accs.clone(); + let mut q = snapshot.read().await.accs.clone(); + while p.is_some() && q.is_some() && count < DEFAULT_LEVEL as usize { + if p.clone().unwrap().read().await.len < q.clone().unwrap().read().await.len { + for i in (DEFAULT_LEVEL as usize - count - 1)..=0 { + accs[i] = self.key.g.to_bytes_be(); + } + break; + } + count += 1; + p = Some(p.clone().unwrap().read().await.children[0].clone()); + q = Some(q.clone().unwrap().read().await.children[0].clone()); + accs[DEFAULT_LEVEL as usize - count] = p.clone().unwrap().read().await.value.clone(); + } + + Ok((exist, accs)) + } + + // get witness chains for prove space challenge + async fn get_witness_chains(&mut self, indexes: Vec) -> Result> { + let mut data = AccData::default(); + let snapshot = self.get_snapshot().await; + let mut chains = Vec::new(); + let mut fidx = -1; + + for i in 0..indexes.len() { + if indexes[i] <= self.deleted || indexes[i] > self.deleted + self.elem_nums { + bail!("bad index") + } + + if (indexes[i] - 1) / DEFAULT_ELEMS_NUM as i64 > fidx { + fidx = (indexes[i] - 1) / DEFAULT_ELEMS_NUM as i64; + data = read_acc_data(&self.file_path, fidx as i64)?; + } + + let chain = snapshot + .clone() + .write() + .await + .get_witness_chain(indexes[i], &data) + .await + .context("get witness chains error")?; + chains.push(chain); + } + Ok(chains) + } + + //UpdateSnapshot will update acc's snapshot,this method should be called after acc updated + async fn update_snapshot(&mut self) -> bool { + self.create_snap_shot().await; + true + } + + //RollBack will roll back acc to snapshot version,please use with caution + async fn rollback(&mut self) -> bool { + if self.snapshot.is_none() { + return false; + } + if self.deleted != self.snapshot.clone().unwrap().read().await.deleted { + if recovery_acc_data(&self.file_path, self.deleted as i64 / DEFAULT_ELEMS_NUM as i64).is_err() { + return false; + }; + }; + let other = self.snapshot.clone().unwrap(); + let other_guard = other.read().await; + let mut accs = AccNode::default(); + copy_acc_node(&other_guard.accs.clone().unwrap().read().await.clone(), &mut accs).await; + self.accs = Some(Arc::new(RwLock::new(accs))); + self.key = other_guard.key.clone(); + self.elem_nums = other_guard.elem_nums; + self.curr_count = other_guard.curr_count; + + if self.accs.clone().unwrap().read().await.len > 0 { + let index = self.accs.clone().unwrap().read().await.len as usize - 1; + self.parent = Some(self.accs.clone().unwrap().read().await.children[index].clone()); + } + + if let Some(ref parent) = self.parent { + if parent.read().await.len > 0 { + let index = self.parent.clone().unwrap().read().await.len as usize - 1; + self.curr = Some(self.parent.clone().unwrap().read().await.children[index].clone()); + } + } + self.deleted = other_guard.deleted; + self.file_path = other_guard.file_path.clone(); + true + } + + async fn restore_sub_acc_file(&self, index: i64, elems: Vec>) -> Result<()> { + if elems.len() != DEFAULT_ELEMS_NUM as usize { + bail!("wrong number of elements") + } + let mut data = AccData { values: elems, wits: Vec::new() }; + data.wits = generate_witness(self.key.g.clone(), self.key.n.clone(), data.values.clone()).await; + + save_acc_data(self.file_path.as_str(), index, data.values, data.wits) + } + + async fn get_file_path(&self) -> &str { + &self.file_path + } +} + +impl MutiLevelAcc { + pub async fn create_snap_shot(&mut self) { + // self.snapshot = Some(Arc::new(RwLock::new(MutiLevelAcc::default()))); + let mut new_snapshot = MutiLevelAcc::default(); + let mut accs = AccNode::default(); + copy_acc_node(&self.accs.clone().unwrap().read().await.clone(), &mut accs).await; + new_snapshot.accs = Some(Arc::new(RwLock::new(accs))); + new_snapshot.key = self.key.clone(); + new_snapshot.elem_nums = self.elem_nums; + new_snapshot.curr_count = self.curr_count; + + if new_snapshot.accs.clone().unwrap().read().await.len > 0 { + let index = new_snapshot.accs.clone().unwrap().read().await.len as usize - 1; + new_snapshot.parent = Some(new_snapshot.accs.clone().unwrap().read().await.children[index].clone()); + } + if let Some(ref parent) = new_snapshot.parent { + if parent.read().await.len > 0 { + let index = new_snapshot.parent.clone().unwrap().read().await.len as usize - 1; + new_snapshot.curr = Some(new_snapshot.parent.clone().unwrap().read().await.children[index].clone()); + } + } + new_snapshot.deleted = self.deleted; + new_snapshot.file_path = self.file_path.clone(); + self.snapshot = Some(Arc::new(RwLock::new(new_snapshot))); + } + + // pub async fn set_update(&mut self, yes: bool) {} + + pub async fn add_elements(&mut self, elems: Vec>) -> Result<()> { + let lens = elems.len(); + // the range of length of elems to insert is [0, 1024] + if lens == 0 + || (self.curr_count < DEFAULT_ELEMS_NUM as i64 && lens as i64 + self.curr_count > DEFAULT_ELEMS_NUM as i64) + { + bail!("add elements error:illegal number of elements") + } + let new_acc = self.add_elements_internal(elems).await.context("add elements error")?; + self.add_sub_acc(new_acc).await; + Ok(()) + } + + async fn add_elements_internal(&mut self, elems: Vec>) -> Result { + let mut node = AccNode::default(); + + let mut data = if self.curr_count > 0 && self.curr_count < DEFAULT_ELEMS_NUM as i64 { + let index = (self.deleted + self.elem_nums - 1) / DEFAULT_ELEMS_NUM as i64; + let mut data = read_acc_data(&self.file_path, index).context("add elements to sub acc error")?; + data.values.extend_from_slice(&elems); + data + } else { + let mut data = AccData::default(); + data.values = elems.clone(); + data + }; + + data.wits = generate_witness(self.key.g.clone(), self.key.n.clone(), data.values.clone()).await; + node.len = data.values.len() as i64; + node.value = generate_acc( + &self.key, + &data.wits[(node.len - 1) as usize], + vec![data.values[(node.len - 1) as usize].clone()], + ) + .unwrap(); + + let index = ((self.deleted + self.elem_nums + elems.len() as i64) - 1) / DEFAULT_ELEMS_NUM as i64; + + save_acc_data(&self.file_path, index, data.values, data.wits).context("add elements to sub acc error")?; + + Ok(node) + } + + // addSubAccs inserts the sub acc built with new elements into the multilevel accumulator + pub async fn add_sub_acc(&mut self, mut sub_acc: AccNode) { + // acc.CurrCount will be equal to zero when the accumulator is empty + if self.curr_count == 0 { + sub_acc.wit = self.key.g.to_bytes_be(); + self.curr_count = sub_acc.len; + self.curr = Some(Arc::new(RwLock::new(sub_acc))); + self.parent = Some(Arc::new(RwLock::new(AccNode { + value: generate_acc( + &self.key, + &self.key.g.to_bytes_be(), + vec![self.curr.clone().unwrap().read().await.value.clone()], + ) + .unwrap(), + wit: self.key.g.to_bytes_be(), + children: vec![self.curr.clone().unwrap()], + len: 1, + }))); + self.accs = Some(Arc::new(RwLock::new(AccNode { + value: generate_acc( + &self.key, + &self.key.g.to_bytes_be(), + vec![self.parent.clone().unwrap().read().await.value.clone()], + ) + .unwrap(), + children: vec![self.parent.clone().unwrap()], + len: 1, + wit: Vec::new(), + }))); + self.elem_nums += self.curr_count; + return; + } + + let sub_acc_pointer = Arc::new(RwLock::new(sub_acc.clone())); + // The upper function has judged that acc.CurrCount + elemNums is less than or equal DEFAULT_ELEMS_NUM + if self.curr_count > 0 && self.curr_count < DEFAULT_ELEMS_NUM as i64 { + self.elem_nums += sub_acc.len - self.curr_count; + let lens = self.parent.clone().unwrap().read().await.children.len(); + self.parent.clone().unwrap().write().await.children[lens - 1] = sub_acc_pointer.clone(); + } else if self.parent.clone().unwrap().read().await.children.len() + 1 <= DEFAULT_ELEMS_NUM as usize { + self.elem_nums += sub_acc.len; + self.parent + .clone() + .unwrap() + .write() + .await + .children + .push(sub_acc_pointer.clone()); + } else { + self.elem_nums += sub_acc.len; + let node = Arc::new(RwLock::new(AccNode { + value: Vec::new(), + children: vec![sub_acc_pointer.clone()], + len: 1, + wit: self.key.g.to_bytes_be(), + })); + self.accs.clone().unwrap().write().await.children.push(node.clone()); + self.parent = Some(node.clone()) + } + + self.curr = Some(sub_acc_pointer.clone()); + self.curr_count = self.curr.clone().unwrap().read().await.len as i64; + // Update sibling witness and parent acc + self.parent.clone().unwrap().write().await.update_acc(&self.key).await; + // Update parents and top acc + self.accs.clone().unwrap().write().await.update_acc(&self.key).await; + } + + // addSubAccBybatch inserts the sub acc built with new elements into the multilevel accumulator, + // However, the lazy update mechanism is adopted, and the final update is performed after the accumulator is built. + pub async fn add_sub_acc_by_batch(&mut self, sub_acc: AccNode) { + let sub_acc_pointer = Arc::new(RwLock::new(sub_acc.clone())); + // acc.CurrCount will be equal to zero when the accumulator is empty + if self.curr_count == 0 { + self.curr = Some(sub_acc_pointer.clone()); + self.curr_count = self.curr.as_ref().unwrap().read().await.len; + let parent_node = AccNode { children: vec![self.curr.clone().unwrap()], len: 1, ..Default::default() }; + self.parent = Some(Arc::new(RwLock::new(parent_node))); + + let acc_node = AccNode { children: vec![self.parent.clone().unwrap()], len: 1, ..Default::default() }; + self.accs = Some(Arc::new(RwLock::new(acc_node))); + self.elem_nums += self.curr_count; + return; + } + + // The upper function has judged that acc.CurrCount + elemNums is less than or equal DEFAULT_ELEMS_NUM + if self.curr_count > 0 && self.curr_count < DEFAULT_ELEMS_NUM as i64 { + self.elem_nums += sub_acc.len - self.curr_count; + let lens = self.parent.clone().unwrap().read().await.children.len(); + self.parent.clone().unwrap().write().await.children[lens - 1] = sub_acc_pointer.clone(); + } else if self.parent.clone().unwrap().read().await.children.len() + 1 <= DEFAULT_ELEMS_NUM as usize { + self.elem_nums += sub_acc.len; + self.parent + .clone() + .unwrap() + .write() + .await + .children + .push(sub_acc_pointer.clone()); + } else { + self.elem_nums += sub_acc.len; + let node = AccNode { children: vec![sub_acc_pointer.clone()], len: 1, ..Default::default() }; + let node_pointer = Arc::new(RwLock::new(node)); + self.accs.clone().unwrap().write().await.children.push(node_pointer.clone()); + self.parent = Some(node_pointer); + } + + self.curr = Some(sub_acc_pointer.clone()); + self.curr_count = self.curr.clone().unwrap().read().await.len; + } + + pub async fn delete_elements(&mut self, num: i64) -> Result<()> { + let index = self.deleted / DEFAULT_ELEMS_NUM as i64; + let offset = self.deleted % DEFAULT_ELEMS_NUM as i64; + + if num <= 0 || num > self.elem_nums || num + offset > DEFAULT_ELEMS_NUM as i64 { + bail!("illegal number of elements") + } + + // Read data from disk + let mut data = read_acc_data(&self.file_path, index).context("delet elements error")?; + // Backup file + backup_acc_data(&self.file_path, index).context("delet elements error")?; + + // Delete elements from acc and update acc + if num < data.values.len() as i64 { + data.values = data.values[num as usize..].to_vec(); + data.wits = generate_witness(self.key.g.clone(), self.key.n.clone(), data.values.clone()).await; + save_acc_data(&self.file_path, index, data.values.clone(), data.wits.clone())?; + self.accs.clone().unwrap().write().await.children[0].write().await.children[0] + .write() + .await + .len -= num; + let len = self.accs.clone().unwrap().read().await.children[0].read().await.children[0] + .read() + .await + .len; + self.accs.clone().unwrap().write().await.children[0].write().await.children[0] + .write() + .await + .value = + generate_acc(&self.key, &data.wits[(len - 1) as usize], vec![data.values[(len - 1) as usize].clone()]) + .unwrap(); + } else { + delete_acc_data(&self.file_path, index as i32).context("delet elements error")?; + + // Update mid-level acc + self.accs.clone().unwrap().write().await.children[0].write().await.children = + self.accs.clone().unwrap().read().await.children[0].read().await.children[1..].to_vec(); + self.accs.clone().unwrap().write().await.children[0].write().await.len -= 1; + if self.accs.clone().unwrap().read().await.children[0].read().await.len == 0 + && self.accs.as_ref().unwrap().read().await.len >= 1 + { + self.accs.clone().unwrap().write().await.children = + self.accs.clone().unwrap().read().await.children[1..].to_vec(); + self.accs.clone().unwrap().write().await.len -= 1; + } + + // Update top-level acc + if self.accs.clone().unwrap().read().await.len == 0 { + self.parent = None; + self.curr = None; + self.curr_count = 0; + } + } + + self.elem_nums -= num; + // Update sibling witness and parent acc + self.accs.clone().unwrap().write().await.children[0] + .clone() + .write() + .await + .update_acc(&self.key) + .await; + // Update parents and top acc + self.accs.clone().unwrap().write().await.update_acc(&self.key).await; + self.deleted += num; + Ok(()) + } + + pub async fn get_witness_chain(&mut self, index: i64, data: &AccData) -> Result { + let idx = (index - (DEFAULT_ELEMS_NUM as i64 - data.values.len() as i64) - 1) % DEFAULT_ELEMS_NUM as i64; + let index = index - (self.deleted - self.deleted % DEFAULT_ELEMS_NUM as i64); + let mut p = self.accs.clone().unwrap(); + let mut wit = WitnessNode::default(); + let mut i = 0; + + for _ in 0..DEFAULT_LEVEL { + if i == 0 { + wit = WitnessNode { elem: p.read().await.value.clone(), wit: p.read().await.wit.clone(), acc: None }; + } else { + wit = WitnessNode { + elem: p.read().await.value.clone(), + wit: p.read().await.wit.clone(), + acc: Some(Box::new(wit)), + }; + } + + let size = (DEFAULT_ELEMS_NUM as f64).powf((DEFAULT_LEVEL - i - 1) as f64) as i64; + let mut idx = (index - 1) / size; + idx = idx % size; + + if p.read().await.children.len() < idx as usize + 1 || p.read().await.children.is_empty() { + i += 1; + continue; + } + let p_child = p.read().await.children[idx as usize].clone(); + + p = p_child.clone(); + i += 1; + } + + if i < DEFAULT_LEVEL { + return Err(anyhow::anyhow!("get witness node error").into()); + } + + let wit_node = WitnessNode { + elem: data.values[idx as usize].clone(), + wit: data.wits[idx as usize].clone(), + acc: Some(Box::new(wit)), + }; + + Ok(wit_node) + } + + pub async fn construct_muti_acc(&mut self, rear: i64) -> Result<()> { + if rear == self.deleted { + return Ok(()); + } + let num = (rear - self.deleted - 1) / DEFAULT_ELEMS_NUM as i64; + let offset = self.deleted % DEFAULT_ELEMS_NUM as i64; + for i in 0..=num { + let index = self.deleted / DEFAULT_ELEMS_NUM as i64 + i; + let mut backup = read_backup(&self.file_path, index)?; + if backup.values.len() + offset as usize != DEFAULT_ELEMS_NUM as usize { + backup = read_acc_data(&self.file_path, index)?; + } else { + recovery_acc_data(&self.file_path, index)?; + } + + let mut node = AccNode::default(); + let right = backup.values.len(); + if i == 0 && (DEFAULT_ELEMS_NUM as i64 - offset as i64) < (right as i64) { + let left = self.deleted % DEFAULT_ELEMS_NUM as i64 - (DEFAULT_ELEMS_NUM as i64 - right as i64); //sub real file offset + backup.values = backup.values[left as usize..right].to_vec(); + backup.wits = generate_witness(self.key.g.clone(), self.key.n.clone(), backup.values.clone()).await; + + save_acc_data(&self.file_path, index, backup.values.clone(), backup.wits.clone())?; + } + + node.len = backup.values.len() as i64; + node.value = generate_acc( + &self.key, + &backup.wits[(node.len - 1) as usize], + vec![backup.values[(node.len - 1) as usize].clone()], + ) + .unwrap(); + self.add_sub_acc_by_batch(node).await; + if i == 0 && offset > 0 { + self.curr_count += self.deleted % DEFAULT_ELEMS_NUM as i64; + } + } + + // Update the upper accumulator and its evidence + for acc in self.accs.clone().unwrap().write().await.children.iter_mut() { + acc.write().await.update_acc(&self.key).await; + } + self.accs.clone().unwrap().write().await.update_acc(&self.key).await; + + Ok(()) + } +} + +impl AccNode { + pub async fn update_acc(&mut self, key: &RsaKey) { + let lens = self.children.len(); + self.len = lens as i64; + if lens == 0 { + self.value = key.g.to_bytes_be(); + self.wit = Vec::new(); + return; + } + gen_wits_for_acc_nodes(&key.g, &key.n, &mut self.children).await; + let last = self.children[lens - 1].clone(); + self.value = generate_acc(key, &last.read().await.wit, vec![last.read().await.value.clone()]).unwrap(); + } +} + +async fn copy_acc_node(src: &AccNode, target: &mut AccNode) { + target.value = src.value.clone(); + target.children = src.children.clone(); + target.len = src.len; + target.wit = src.wit.clone(); + for child in &src.children { + let child_guard = child.read().await.clone(); + let mut new_child = AccNode { + value: child_guard.value.clone(), + children: child_guard.children.clone(), + len: child_guard.len, + wit: child_guard.wit.clone(), + }; + Box::pin(copy_acc_node(&child_guard, &mut new_child)).await; + target.children.push(Arc::new(RwLock::new(new_child))); + } +} + pub fn verify_insert_update( key: RsaKey, exist: Option>, @@ -81,12 +771,7 @@ pub fn verify_mutilevel_acc(key: &RsaKey, wits: Option<&mut WitnessNode>, acc: & current_wit.elem.eq(acc) } -pub fn verify_mutilevel_acc_for_batch( - key: &RsaKey, - base_idx: i64, - wits: Vec, - acc: &[u8], -) -> bool { +pub fn verify_mutilevel_acc_for_batch(key: &RsaKey, base_idx: i64, wits: Vec, acc: &[u8]) -> bool { let mut sub_acc: Option> = None; let default_elems_num = DEFAULT_ELEMS_NUM as i64; for (i, witness) in wits.iter().enumerate() { @@ -106,12 +791,7 @@ pub fn verify_mutilevel_acc_for_batch( let mut rng = rand::thread_rng(); if rng.gen_range(0..100) < 25 - && !verify_acc( - key, - &witness.acc.clone().unwrap().elem, - &witness.elem, - &witness.wit, - ) + && !verify_acc(key, &witness.acc.clone().unwrap().elem, &witness.elem, &witness.wit) { return false; } @@ -142,7 +822,7 @@ pub fn verify_delete_update( let mut p = exist; let mut count = 1; while p.acc.is_some() { - if accs[count - 1].eq(&key.g.to_bytes_be()) { + if !accs[count - 1].eq(&key.g.to_bytes_be()) { sub_acc = generate_acc(&key, &p.wit, vec![accs[count - 1].clone()]); } else { sub_acc = Some(p.wit.clone()); @@ -155,4 +835,4 @@ pub fn verify_delete_update( } true -} \ No newline at end of file +} diff --git a/crates/ces-pois/src/expanders/generate_expanders.rs b/crates/ces-pois/src/expanders/generate_expanders.rs index 3eb486cc..29ad735e 100644 --- a/crates/ces-pois/src/expanders/generate_expanders.rs +++ b/crates/ces-pois/src/expanders/generate_expanders.rs @@ -6,13 +6,7 @@ pub fn construct_stacked_expanders(k: i64, n: i64, d: i64) -> Expanders { Expanders::new(k, n, d) } -pub fn calc_parents( - expanders: &Expanders, - node: &mut Node, - miner_id: &[u8], - count: i64, - rlayer: i64, -) { +pub fn calc_parents(expanders: &Expanders, node: &mut Node, miner_id: &[u8], count: i64, rlayer: i64) { if node.parents.capacity() != (expanders.d + 1) as usize { return; } @@ -25,38 +19,38 @@ pub fn calc_parents( let group_size = expanders.n / expanders.d; let offset = group_size / 256; - let mut hash = Sha512::new(); - hash.update(miner_id); - hash.update(count.to_be_bytes()); - hash.update(rlayer.to_be_bytes()); // add real layer - hash.update((node.index as i64).to_be_bytes()); - let res = hash.clone().finalize(); - let mut res = res.to_vec(); + let mut hasher = Sha512::new(); + hasher.update(miner_id); + hasher.update(&count.to_be_bytes()); + hasher.update(&rlayer.to_be_bytes()); + hasher.update((node.index as i64).to_be_bytes()); + + let mut res = hasher.finalize().to_vec(); + if expanders.d > 64 { - hash.reset(); - hash.update(res.clone()); - let result = hash.finalize(); - res.append(&mut result.to_vec()); + let mut hasher2 = Sha512::new(); + hasher2.update(&res); + let extra = hasher2.finalize().to_vec(); + res.extend_from_slice(&extra); } - res = res[..expanders.d as usize].to_vec(); - let parent = node.index - expanders.n as NodeType; + res.truncate(expanders.d as usize); + + let parent = (node.index - expanders.n as i32) as NodeType; node.add_parent(parent); - for i in 0..res.len() as i64 { - let index = (layer - 1) * expanders.n - + i * group_size - + res[i as usize] as i64 * offset - + res[i as usize] as i64 % offset; - match index { - i if i == parent as i64 => { - node.add_parent((i + 1) as i32); - } - i if i < parent as i64 => { - node.add_parent((i + expanders.n) as i32); - } - _ => { - node.add_parent(index as i32); - } + + for (i, &byte_val) in res.iter().enumerate() { + let byte_i64 = byte_val as i64; + let calc_index = (layer - 1) * expanders.n + i as i64 * group_size + byte_i64 * offset + (byte_i64 % offset); + + let index = calc_index as NodeType; + + if index == parent { + node.add_parent(index + 1); + } else if index < parent { + node.add_parent(index + expanders.n as i32); + } else { + node.add_parent(index); } } -} \ No newline at end of file +} diff --git a/crates/ces-pois/src/expanders/generate_idle_file.rs b/crates/ces-pois/src/expanders/generate_idle_file.rs index cc9f990b..0d788f99 100644 --- a/crates/ces-pois/src/expanders/generate_idle_file.rs +++ b/crates/ces-pois/src/expanders/generate_idle_file.rs @@ -1,4 +1,25 @@ +use std::{ + path::{self, Path}, + vec, +}; + +use super::{generate_expanders::calc_parents, get_bytes, Expanders, Node, NodeType}; +use crate::{ + tree::{self}, + util, +}; +use anyhow::{Context, Result}; use sha2::{Digest, Sha256, Sha512}; +use tokio::{fs, io::AsyncReadExt}; + +pub const DEFAULT_IDLE_FILES_PATH: &str = "./proofs"; +pub const FILE_NAME: &str = "sub-file"; +pub const COMMIT_FILE: &str = "file-roots"; +pub const CLUSTER_DIR_NAME: &str = "file-cluster"; +pub const SET_DIR_NAME: &str = "idle-files"; +pub const AUX_FILE: &str = "aux-file"; +pub const DEFAULT_AUX_SIZE: i64 = 64; +pub const DEFAULT_NODES_CACHE: i64 = 1024; pub const HASH_SIZE: i32 = 64; @@ -7,6 +28,16 @@ pub enum Hasher { SHA512(Sha512), } +pub async fn make_proof_dir(dir: &str) -> Result<()> { + if fs::metadata(dir).await.is_err() { + fs::DirBuilder::new().recursive(true).create(dir).await?; + } else { + fs::remove_dir_all(dir).await?; + fs::DirBuilder::new().recursive(true).create(dir).await?; + } + Ok(()) +} + pub fn new_hash() -> Hasher { match HASH_SIZE { 32 => Hasher::SHA256(Sha256::new()), @@ -28,12 +59,198 @@ pub fn get_hash(data: &[u8]) -> Vec { hash.update(data); let result = hash.finalize(); result.to_vec() - } + }, Hasher::SHA512(hash) => { let mut hash = hash; hash.update(data); let result = hash.finalize(); result.to_vec() + }, + } +} + +impl Expanders { + pub async fn generate_idle_file_set( + &mut self, + miner_id: Vec, + start: i64, + size: i64, + root_dir: &str, + ) -> Result<()> { + let mut clusters = vec![0_i64; size as usize]; + let set_dir = format!("{}/{}-{}", root_dir, SET_DIR_NAME, (start + size) / size); + + for i in start..start + size { + let dir = format!("{}/{}-{}", set_dir, CLUSTER_DIR_NAME, i); + make_proof_dir(&dir).await.context("generate idle file error")?; + clusters[(i - start) as usize] = i; } + + // Number of idle files in each file cluster + let file_num = self.k; + //create aux slices + let mut roots = vec![vec![0, 0]; (self.k + file_num) as usize * size as usize + 1]; + let mut elders = self.file_pool.clone(); + let mut labels = self.file_pool.clone(); + let mut mht = tree::get_light_mht(self.n); + let mut aux = vec![0u8; (DEFAULT_AUX_SIZE * tree::DEFAULT_HASH_SIZE as i64) as usize]; + + //calc node labels + let front_size = miner_id.len() + std::mem::size_of::() + 8 + 8; + let mut label = vec![0u8; front_size + 2 * HASH_SIZE as usize]; + util::copy_data(&mut label, &[&miner_id]); + // let node = Node::default(); + + for i in 0..(self.k + file_num) { + let mut logical_layer = i; + for j in 0..size { + let mut parents = Vec::new(); + util::copy_data( + &mut label[miner_id.len()..], + &[&get_bytes(clusters[j as usize]), &get_bytes(0 as i64)], + ); + //calc nodes relationship + //read parents' label of file j, and fill elder node labels to add files relationship + if i >= self.k { + logical_layer = self.k; + //When the last level is reached, join the file index + util::copy_data( + &mut label[miner_id.len() + 8..], + &[&get_bytes((clusters[j as usize] - 1) * file_num + i - self.k + 1)], + ); + self.read_elders_data(&set_dir, i, j, &mut elders, &clusters).await?; + } + + if i > 0 { + fs::File::open( + &path::Path::new(&set_dir) + .join(format!("{}-{}", CLUSTER_DIR_NAME, clusters[j as usize])) + .join(format!("{}-{}", FILE_NAME, logical_layer - 1)), + ) + .await? + .read_to_end(&mut parents) + .await + .context("generate idle file error")?; + } + for k in 0..self.n { + let mut hasher = Sha512::new(); + util::copy_data( + &mut label[miner_id.len() + 8 + 8..], + &[&get_bytes((logical_layer * self.n + k) as NodeType)], + ); + util::clear_data(&mut label[front_size..]); + let node = self.calc_nodes_parents(i, &miner_id, clusters[j as usize], k); + if i > 0 && !node.no_parents() { + for p in node.parents.iter() { + let idx = *p as i64 % self.n; + let l = idx * HASH_SIZE as i64; + let r = (idx + 1) * HASH_SIZE as i64; + if (*p as i64) < logical_layer * self.n { + util::add_data( + &mut label[front_size..front_size + HASH_SIZE as usize], + &[&parents[l as usize..r as usize]], + ); + } else { + // let label_tmp = label.clone()[l as usize..r as usize].to_vec(); + util::add_data( + &mut label[front_size..front_size + HASH_SIZE as usize], + &[&labels[l as usize..r as usize]], + ); + } + } + // //add files relationship + if i >= self.k { + util::add_data( + &mut label[front_size + HASH_SIZE as usize..front_size + 2 * HASH_SIZE as usize], + &[&elders[(k * HASH_SIZE as i64) as usize..((k + 1) * HASH_SIZE as i64) as usize]], + ); + } + } + hasher.update(&label); + if i + j > 0 { + //add same layer dependency relationship + hasher.update(&labels[(k * HASH_SIZE as i64) as usize..((k + 1) * HASH_SIZE as i64) as usize]); + }; + labels[(k * HASH_SIZE as i64) as usize..((k + 1) * HASH_SIZE as i64) as usize] + .copy_from_slice(&hasher.finalize_reset()); + } + + //calc merkel tree root hash + tree::calc_light_mht_with_bytes(&mut mht, &labels, HASH_SIZE as i64); + roots[(i * size + j) as usize] = tree::get_root(&mht); + aux.copy_from_slice( + &mht[DEFAULT_AUX_SIZE as usize * tree::DEFAULT_HASH_SIZE as usize + ..2 * DEFAULT_AUX_SIZE as usize * tree::DEFAULT_HASH_SIZE as usize], + ); + + //save aux data + util::save_file( + &Path::new(set_dir.as_str()) + .join(format!("{}-{}", CLUSTER_DIR_NAME, clusters[j as usize])) + .join(format!("{}-{}", AUX_FILE, i)), + &aux, + )?; + + //save one layer labels of one file + util::save_file( + &Path::new(set_dir.as_str()) + .join(format!("{}-{}", CLUSTER_DIR_NAME, clusters[j as usize])) + .join(format!("{}-{}", FILE_NAME, i)), + &labels, + )?; + } + } + //return memory space + drop(labels); + drop(elders); + drop(mht); + //calculate new dir name + let mut hasher = Sha256::new(); + for i in 0..roots.len() - 1 { + hasher.update(&roots[i]) + } + roots[((self.k + file_num) * size) as usize] = hasher.finalize().to_vec(); + + util::save_proof_file(&Path::new(set_dir.as_str()).join(COMMIT_FILE), &roots)?; + + Ok(()) } -} \ No newline at end of file + + pub fn calc_nodes_parents(&self, layer: i64, miner_id: &[u8], count: i64, j: i64) -> Node { + let logical_layer = if layer >= self.k { self.k } else { layer }; + + let mut node = self.nodes_pool.clone(); + node.index = (j + logical_layer * self.n) as NodeType; + node.parents = Vec::with_capacity(self.d as usize + 1); + + calc_parents(self, &mut node, miner_id, count, layer); + node + } + + pub async fn read_elders_data( + &self, + set_dir: &str, + layer: i64, + cidx: i64, + elders: &mut Vec, + clusters: &[i64], + ) -> Result<()> { + let base_layer = ((layer - self.k / 2) / self.k) as usize; + util::clear_data(elders); + for l in 0..(self.k / 2) as usize { + let mut temp = Vec::new(); + fs::File::open( + &path::Path::new(set_dir) + .join(format!("{}-{}", CLUSTER_DIR_NAME, clusters[cidx as usize])) + .join(format!("{}-{}", FILE_NAME, base_layer + 2 * l)), + ) + .await? + .read_to_end(&mut temp) + .await?; + + util::add_data(elders, &[&temp]); + } + + Ok(()) + } +} diff --git a/crates/ces-pois/src/expanders/mod.rs b/crates/ces-pois/src/expanders/mod.rs index 0df7c642..8abd7ec5 100644 --- a/crates/ces-pois/src/expanders/mod.rs +++ b/crates/ces-pois/src/expanders/mod.rs @@ -2,10 +2,10 @@ pub mod generate_expanders; pub mod generate_idle_file; use std::mem; +use generate_idle_file::HASH_SIZE; use num_bigint_dig::BigInt; use num_traits::{Signed, ToPrimitive}; -pub use generate_idle_file::new_hash; pub type NodeType = i32; #[derive(Clone, Debug)] @@ -15,8 +15,11 @@ pub struct Expanders { pub d: i64, pub size: i64, pub hash_size: i64, + pub file_pool: Vec, + pub nodes_pool: Node, } +#[derive(Clone, Debug, Default)] pub struct Node { pub index: NodeType, pub parents: Vec, @@ -30,40 +33,41 @@ impl Expanders { d, size: (k + 1) * n, hash_size: 64, + file_pool: vec![0u8; (n * HASH_SIZE as i64) as usize], + nodes_pool: Node { index: 0, parents: Vec::with_capacity(d as usize + 1) }, } } } impl Node { pub fn new(idx: NodeType) -> Self { - Self { - index: idx, - parents: Vec::new(), - } + Self { index: idx, parents: Vec::new() } } pub fn add_parent(&mut self, parent: NodeType) -> bool { if self.index == parent { return false; } - if self.parents.is_empty() || self.parents.len() >= self.parents.capacity() { + + if self.parents.len() >= self.parents.capacity() { return false; } - let (i, ok) = self.parent_in_list(parent); - if ok { + let (insert_pos, found) = self.parent_in_list(parent); + if found { return false; } + self.parents.push(0); let lens = self.parents.len(); - if lens == 1 || i == lens as i32 - 1 { - self.parents[i as usize] = parent; + + if lens == 1 || insert_pos as usize == lens - 1 { + self.parents[insert_pos as usize] = parent; return true; } - self.parents - .copy_within(i as usize + 1..lens - 1, i as usize); - self.parents[i as usize] = parent; + self.parents.copy_within(insert_pos as usize..lens - 1, insert_pos as usize + 1); + self.parents[insert_pos as usize] = parent; true } @@ -127,13 +131,9 @@ pub fn bytes_to_node_value(data: &[u8], max: i64) -> NodeType { let big_max = BigInt::from(max); let value = value % &big_max; - let i_value = value.to_i64().unwrap_or_else(|| { - if value.is_negative() { - i64::min_value() - } else { - i64::max_value() - } - }); + let i_value = value + .to_i64() + .unwrap_or_else(|| if value.is_negative() { i64::min_value() } else { i64::max_value() }); ((i_value + max) % max) as NodeType -} \ No newline at end of file +} diff --git a/crates/ces-pois/src/pois/challenge.rs b/crates/ces-pois/src/pois/challenge.rs index 1e04a272..36f80676 100644 --- a/crates/ces-pois/src/pois/challenge.rs +++ b/crates/ces-pois/src/pois/challenge.rs @@ -48,4 +48,4 @@ pub fn new_challenge_handle( } true }) -} \ No newline at end of file +} diff --git a/crates/ces-pois/src/pois/mod.rs b/crates/ces-pois/src/pois/mod.rs index 8fd14324..e0dc761f 100644 --- a/crates/ces-pois/src/pois/mod.rs +++ b/crates/ces-pois/src/pois/mod.rs @@ -1,3 +1,3 @@ pub mod challenge; pub mod prove; -pub mod verify; \ No newline at end of file +pub mod verify; diff --git a/crates/ces-pois/src/pois/prove.rs b/crates/ces-pois/src/pois/prove.rs index ad29f619..cf893974 100644 --- a/crates/ces-pois/src/pois/prove.rs +++ b/crates/ces-pois/src/pois/prove.rs @@ -1,22 +1,107 @@ +use crate::{ + acc::{ + file_manager::*, + multi_level_acc::{ + new_muti_level_acc, recovery, AccHandle, MutiLevelAcc, WitnessNode, DEFAULT_ELEMS_NUM, DEFAULT_PATH, + }, + RsaKey, + }, + expanders::{ + self, + generate_expanders::{self, construct_stacked_expanders}, + generate_idle_file::{ + AUX_FILE, CLUSTER_DIR_NAME, COMMIT_FILE, DEFAULT_AUX_SIZE, FILE_NAME, HASH_SIZE, SET_DIR_NAME, + }, + Node, NodeType, + }, + tree::{self, get_path_proof, DEFAULT_HASH_SIZE}, + util, +}; +use anyhow::{anyhow, bail, Context as AnyhowContext, Result}; +use num_bigint_dig::BigUint; use serde::{Deserialize, Serialize}; +use std::{path::Path, sync::Arc, vec}; +use tokio::{fs, sync::RwLock}; -use crate::{acc::multi_level_acc::WitnessNode, expanders::NodeType}; +const FILE_SIZE: i64 = HASH_SIZE as i64; +const ACC_PATH: &str = DEFAULT_PATH; +const CHALL_ACC_PATH: &str = "./chall_acc/"; +const IDLE_FILE_PATH: &str = "./proofs"; +const MAXPROOFTHREAD: i64 = 4; +const MINIFILESIZE: i64 = 1024 * 1024; -#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] -pub struct Commits { - pub file_indexs: Vec, - pub roots: Vec>, +pub struct Prover { + pub rw: Arc>>, +} + +#[derive(Clone)] +pub struct ProverBody { + pub expanders: expanders::Expanders, + pub rear: i64, + pub front: i64, + pub space: i64, + pub set_len: i64, + pub cluster_size: i64, + context: Context, + pub id: Vec, + pub chain_state: ChainState, + pub acc_manager: Option, + pub config: Config, +} + +#[derive(Clone)] +pub struct Config { + pub file_size: i64, + pub acc_path: String, + pub chall_acc_path: String, + pub idle_file_path: String, + pub max_proof_thread: i64, +} + +impl Default for Config { + fn default() -> Self { + Self { + file_size: FILE_SIZE, + acc_path: ACC_PATH.to_string(), + chall_acc_path: CHALL_ACC_PATH.to_string(), + idle_file_path: IDLE_FILE_PATH.to_string(), + max_proof_thread: MAXPROOFTHREAD, + } + } +} + +#[derive(Clone)] +struct Context { + pub commited: i64, + pub added: i64, + pub generated: i64, + pub proofed: i64, +} + +#[derive(Clone)] +pub struct ChainState { + pub acc: Option, + pub challenging: bool, + // pub del_ch :chan struct{}, + pub rear: i64, + pub front: i64, } -#[derive(Clone, Debug, Deserialize, Serialize)] +#[derive(Clone, Debug, Deserialize, Serialize, Default)] pub struct MhtProof { - pub index: NodeType, + pub index: expanders::NodeType, pub label: Vec, pub paths: Vec>, pub locs: Vec, } -#[derive(Clone, Debug, Deserialize, Serialize)] +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct Commits { + pub file_indexs: Vec, + pub roots: Vec>, +} + +#[derive(Clone, Debug, Deserialize, Serialize, Default)] pub struct CommitProof { pub node: MhtProof, pub parents: Vec, @@ -45,4 +130,1016 @@ pub struct DeletionProof { pub roots: Vec>, pub wit_chain: WitnessNode, pub acc_path: Vec>, -} \ No newline at end of file +} + +pub async fn new_prover( + k: i64, + n: i64, + d: i64, + id: Vec, + space: i64, + set_len: i64, +) -> Result> { + if k <= 0 || n <= 0 || d <= 0 || space <= 0 || id.len() == 0 { + return Err(anyhow!("bad params")); + } + + let prover = ProverBody { + expanders: construct_stacked_expanders(k, n, d), + rear: 0, + front: 0, + space, + set_len, + cluster_size: k, + id, + context: Context { commited: 0, added: 0, generated: 0, proofed: 0 }, + chain_state: ChainState { acc: None, challenging: false, rear: 0, front: 0 }, + acc_manager: None, + config: Config::default(), + }; + + Ok(Prover { rw: Arc::new(RwLock::new(prover)) }) +} + +impl Prover { + pub async fn init(&mut self, key: RsaKey, config: Config) -> Result<()> { + if key.g.to_bytes_be().len() == 0 || key.n.to_bytes_be().len() == 0 { + return Err(anyhow!("bad init params")); + } + let mut prover_guard = self.rw.write().await; + + prover_guard.config = config; + + prover_guard.acc_manager = Some(new_muti_level_acc(&prover_guard.config.acc_path, key.clone()).await?); + let _ = new_muti_level_acc(&prover_guard.config.chall_acc_path, key).await?; + Ok(()) + } + + pub async fn recovery(&mut self, key: RsaKey, front: i64, rear: i64, config: Config) -> Result<()> { + { + let mut prover_guard = self.rw.write().await; + if key.g.to_bytes_be().len() == 0 + || key.n.to_bytes_be().len() == 0 + || front < 0 + || rear < 0 + || front > rear + || rear % (prover_guard.set_len * prover_guard.cluster_size) != 0 + { + bail!("bad recovery params"); + } + prover_guard.config = config.clone(); + + //recovery front and rear + prover_guard.front = front; + prover_guard.rear = rear; + + //recovery acc + prover_guard.acc_manager = Some(recovery(&prover_guard.config.acc_path, key.clone(), front, rear).await?); + } + { + //recovery context + let mut generated = self.calc_generated_file(&config.idle_file_path).await?; + let mut prover_guard = self.rw.write().await; + if generated % (prover_guard.set_len * prover_guard.cluster_size) != 0 { + //restores must be performed in units of the number of files in a set + generated -= generated % (prover_guard.set_len * prover_guard.cluster_size) + }; + prover_guard.context.generated = rear + generated; //generated files do not need to be generated again + prover_guard.context.added = rear + generated; //the file index to be generated should be consistent with the generated file index firstly + prover_guard.context.commited = rear; + prover_guard.space -= (prover_guard.rear - prover_guard.front) * prover_guard.config.file_size; //calc proved space + prover_guard.space -= generated / prover_guard.cluster_size + * (prover_guard.cluster_size + prover_guard.expanders.k) + * prover_guard.config.file_size; //calc generated space + } + //backup acc file for challenge + util::copy_files(&config.acc_path, &config.chall_acc_path)?; + + Ok(()) + } + + pub async fn set_challenge_state(&mut self, key: RsaKey, acc_snp: Vec, front: i64, rear: i64) -> Result<()> { + let mut prover_guard = self.rw.write().await; + + prover_guard.chain_state.rear = rear; + prover_guard.chain_state.front = front; + + let chain_acc = BigUint::from_bytes_be(&acc_snp); + + prover_guard.chain_state.acc = Some( + recovery(CHALL_ACC_PATH, key, front, rear) + .await + .context("recovery chain state error")?, + ); + + let local_acc = BigUint::from_bytes_be( + &prover_guard + .chain_state + .acc + .as_mut() + .ok_or_else(|| anyhow!("Accumulator manager is not initialized."))? + .get_snapshot() + .await + .read() + .await + .accs + .as_ref() + .unwrap() + .read() + .await + .value + .clone(), + ); + + if chain_acc != local_acc { + bail!("recovery chain state error:The restored accumulator value is not equal to the snapshot value"); + } + + prover_guard.chain_state.challenging = true; + // prover_guard.chain_state.del_ch = None; + + Ok(()) + } + + pub async fn set_challenge_state_for_test(&mut self, front: i64, rear: i64) { + self.rw.write().await.chain_state = ChainState { acc: None, challenging: false, rear, front }; + } + + // GenerateIdleFileSet generate num=(p.setLen*p.clusterSize(==k)) idle files, num must be consistent with the data given by CESS, otherwise it cannot pass the verification + // This method is not thread-safe, please do not use it concurrently! + pub async fn generate_idle_file_set(&mut self) -> Result<()> { + let mut prover_guard = self.rw.write().await; + + let file_num = prover_guard.set_len * prover_guard.cluster_size; + let idle_file_path = prover_guard.config.idle_file_path.clone(); + let free_space = util::get_dir_free_space(&idle_file_path).context("get free space error")? / 1024 * 1024; + let reserved = 256_i64; + + if prover_guard.space == file_num * prover_guard.config.file_size + && free_space > (prover_guard.expanders.k * prover_guard.config.file_size + reserved) as u64 + { + prover_guard.space += prover_guard.expanders.k * prover_guard.config.file_size; + } + + if prover_guard.space + < (file_num + prover_guard.set_len * prover_guard.expanders.k) * prover_guard.config.file_size + { + bail!("generate idle file set error: not enough space") + } + + prover_guard.context.added += file_num; + prover_guard.space -= + (file_num + prover_guard.set_len * prover_guard.expanders.k) * prover_guard.config.file_size; + let start = (prover_guard.context.added - file_num) / prover_guard.cluster_size + 1; + + let id = prover_guard.id.clone(); + let set_len = prover_guard.set_len; + prover_guard + .expanders + .generate_idle_file_set(id, start, set_len, &idle_file_path) + .await + .map_err(|e| { + prover_guard.context.added -= file_num; + prover_guard.space += + (file_num + prover_guard.set_len * prover_guard.expanders.k) * prover_guard.config.file_size; + e + })?; + prover_guard.context.generated += file_num; + + Ok(()) + } + + pub async fn generate_idle_file_sets(&mut self, t_num: i64) -> Result<()> { + let mut prover_guard = self.rw.write().await; + let mut t_num = t_num; + + if t_num <= 0 { + bail!("generate idle file sets error: bad thread number"); + } + + // Get available space + let free_space = util::get_dir_free_space(IDLE_FILE_PATH)? / (1024 * 1024); // MB + let reserved = 256_i64; + + let file_num = prover_guard.set_len * prover_guard.cluster_size; + + if prover_guard.space == file_num * FILE_SIZE + && free_space > (prover_guard.expanders.k * FILE_SIZE + reserved) as u64 + { + prover_guard.space += prover_guard.expanders.k * FILE_SIZE; + } + + if prover_guard.space < (file_num + prover_guard.set_len * prover_guard.expanders.k) * FILE_SIZE * t_num { + if prover_guard.space >= (file_num + prover_guard.set_len * prover_guard.expanders.k) * FILE_SIZE { + t_num = 1; + } else { + bail!("generate idle file sets error: space is full"); + } + }; + + let curr_index = prover_guard.context.added / prover_guard.cluster_size + 1; + prover_guard.context.added += file_num * t_num; + prover_guard.space -= (file_num + prover_guard.set_len * prover_guard.expanders.k) * FILE_SIZE * t_num; + + let mut tasks = Vec::new(); + + for i in 0..t_num { + let start = curr_index + i * prover_guard.set_len; + let mut prover_guard = prover_guard.clone(); + + let task = tokio::task::spawn(async move { + prover_guard + .expanders + .generate_idle_file_set(prover_guard.id.clone(), start, prover_guard.set_len, IDLE_FILE_PATH) + .await + }); + tasks.push(task); + } + + // Wait for all tasks to finish + for (_, task) in tasks.into_iter().enumerate() { + task.await.context("spawn taks failed")??; + } + + prover_guard.context.generated += file_num * t_num; + Ok(()) + } + + // CommitRollback need to be invoked when submit commits to verifier failure + pub async fn commit_roll_back(&mut self) -> bool { + let mut prover_guard = self.rw.write().await; + prover_guard.context.commited -= prover_guard.set_len * prover_guard.cluster_size; + true + } + + // AccRollback need to be invoked when submit or verify acc proof failure, + // the update of the accumulator is serial and blocking, you need to update or roll back in time. + pub async fn acc_roll_back(&mut self, is_del: bool) -> bool { + let mut prover_guard = self.rw.write().await; + + if !is_del { + prover_guard.context.commited -= prover_guard.set_len * prover_guard.cluster_size; + }; + + prover_guard.acc_manager.as_mut().unwrap().rollback().await + } + + pub async fn sync_chain_pois_status(&mut self, front: i64, rear: i64) -> Result<()> { + let mut prover_guard = self.rw.write().await; + if prover_guard.front == front && prover_guard.rear == rear { + return Ok(()); + }; + + prover_guard.acc_manager = Some( + recovery( + ACC_PATH, + prover_guard + .acc_manager + .as_mut() + .unwrap() + .get_snapshot() + .await + .read() + .await + .key + .clone(), + front, + rear, + ) + .await + .context("reflash acc error")?, + ); + //recovery front and rear + prover_guard.front = front; + prover_guard.rear = rear; + prover_guard.context.commited = rear; + + Ok(()) + } + + // UpdateStatus need to be invoked after verify commit proof and acc proof success, + // the update of the accumulator is serial and blocking, you need to update or roll back in time. + pub async fn update_status(&mut self, num: i64, is_delete: bool) -> Result<()> { + if num < 0 { + bail!("bad files number"); + } + + let mut prover_guard = self.rw.write().await; + + if is_delete { + prover_guard.front += num; + prover_guard.acc_manager.as_mut().unwrap().update_snapshot().await; + + let index = (prover_guard.front - 1) / DEFAULT_ELEMS_NUM as i64; + if let Err(err) = clean_backup(ACC_PATH, index) { + bail!("delete idle files error: {}", err); + } + return Ok(()); + } + + prover_guard.rear += num; + prover_guard.acc_manager.as_mut().unwrap().update_snapshot().await; + let rear = prover_guard.rear; + let front = prover_guard.front; + drop(prover_guard); + + if let Err(err) = self.organize_files(rear - num, num).await { + bail!("update prover status error: {}", err); + } + + for i in ((front + num - 1) / num) * num..rear - num { + if let Err(err) = self.organize_files(i, num).await { + bail!("update prover status error: {}", err); + } + } + Ok(()) + } + + pub async fn get_space(&self) -> i64 { + self.rw.read().await.space + } + + pub async fn return_space(&mut self, size: i64) { + self.rw.write().await.space += size; + } + + pub async fn get_acc_value(&self) -> Vec { + let key_len = 256; + + let acc = self + .rw + .write() + .await + .acc_manager + .clone() + .unwrap() + .get_snapshot() + .await + .read() + .await + .accs + .clone() + .unwrap() + .read() + .await + .value + .clone(); + let mut res = vec![0_u8; key_len]; + if acc.len() > key_len { + res.copy_from_slice(&acc[acc.len() - 256..]); + return res; + } + res[256 - acc.len()..].copy_from_slice(&acc); + return res; + } + + // GetCount get Count Safely + pub async fn get_rear(&self) -> i64 { + self.rw.read().await.rear + } + + pub async fn get_front(&self) -> i64 { + self.rw.read().await.front + } + + pub async fn get_num_of_file_in_set(&self) -> i64 { + self.rw.read().await.set_len * self.rw.read().await.cluster_size + } + + pub async fn commit_data_is_ready(&self) -> bool { + let file_num = self.rw.read().await.context.generated; + let commited = self.rw.read().await.context.commited; + return file_num - commited >= self.rw.read().await.set_len * self.rw.read().await.cluster_size; + } + + pub async fn get_chain_state(&self) -> ChainState { + let prover_guard = self.rw.read().await; + ChainState { + rear: prover_guard.chain_state.rear, + front: prover_guard.chain_state.front, + acc: prover_guard.chain_state.acc.clone(), + challenging: false, + } + } + + // RestChallengeState must be called when space proof is finished + pub async fn rest_challenge_state(&mut self) { + let mut prover_guard = self.rw.write().await; + + prover_guard.context.proofed = 0; + // prover_guard.chain_state.del_ch = None; + prover_guard.chain_state.challenging = false; + + if prover_guard.chain_state.front >= DEFAULT_ELEMS_NUM as i64 { + let index = (prover_guard.chain_state.front - DEFAULT_ELEMS_NUM as i64) / DEFAULT_ELEMS_NUM as i64; + let _ = delete_acc_data(CHALL_ACC_PATH, index as i32); + }; + } + + // GetIdleFileSetCommits can not run concurrently! And num must be consistent with the data given by CESS. + pub async fn get_idle_file_set_commits(&mut self) -> Result { + let mut commits = Commits::default(); + let mut prover_guard = self.rw.write().await; + + let file_num = prover_guard.context.generated; + let commited = prover_guard.context.commited; + let commit_num = prover_guard.set_len * prover_guard.cluster_size; + + if file_num - commited < commit_num { + bail!("get commits error:bad commit data"); + } + //read commit file of idle file set + let name = Path::new(IDLE_FILE_PATH) + .join(format!( + "{}-{}", + expanders::generate_idle_file::SET_DIR_NAME, + (commited) / (prover_guard.set_len * prover_guard.cluster_size) + 1 + )) + .join(expanders::generate_idle_file::COMMIT_FILE); + let root_num = commit_num + prover_guard.expanders.k * prover_guard.set_len + 1; + commits.roots = + util::read_proof_file(&name, root_num as usize, tree::DEFAULT_HASH_SIZE as usize).map_err(|e| e)?; + commits.file_indexs = vec![0_i64; commit_num as usize]; + for i in 0..commit_num { + commits.file_indexs[i as usize] = commited + i + 1; + } + prover_guard.context.commited += commit_num; + + Ok(commits) + } + + pub async fn prove_commit_and_acc( + &mut self, + challenges: Vec>, + ) -> Result<(Option>>, Option)> { + let commit_proofs = self.prove_commits(challenges.clone()).await?; + let acc_proof = self.prove_acc(challenges).await?; + + //copy new acc data to challenging acc path + let prover_guard = self.rw.read().await; + let index = prover_guard.rear / DEFAULT_ELEMS_NUM as i64; + backup_acc_data_for_chall(ACC_PATH, CHALL_ACC_PATH, index)?; + + Ok((commit_proofs, acc_proof)) + } + + pub async fn prove_acc(&mut self, challenges: Vec>) -> Result> { + let mut prover_guard = self.rw.write().await; + if challenges.len() != prover_guard.set_len as usize { + bail!("update acc error:bad challenges data") + } + let file_num = prover_guard.set_len * prover_guard.cluster_size; + let mut labels: Vec> = vec![Vec::new(); file_num as usize]; + let mut proof = AccProof::default(); + proof.indexs = vec![0_i64; file_num as usize]; + //read commit roots file + let fname = Path::new(IDLE_FILE_PATH) + .join(format!("{}-{}", SET_DIR_NAME, (challenges[0][0] - 1) / prover_guard.set_len + 1)) + .join(COMMIT_FILE); + + let roots = util::read_proof_file( + &fname, + ((prover_guard.expanders.k + prover_guard.cluster_size) * prover_guard.set_len + 1) as usize, + DEFAULT_HASH_SIZE as usize, + ) + .context("update acc error")?; + + for i in 0..prover_guard.set_len as usize { + for j in 0..prover_guard.cluster_size as usize { + let index = (challenges[i][0] - 1) * prover_guard.cluster_size + j as i64 + 1; + proof.indexs[i * prover_guard.cluster_size as usize + j] = index; + let root = roots[(prover_guard.expanders.k as usize + j) * prover_guard.set_len as usize + i].clone(); + let mut label = prover_guard.id.clone(); + label.extend_from_slice(&expanders::get_bytes(index)); + label.extend_from_slice(&root); + labels[i * prover_guard.cluster_size as usize + j] = expanders::generate_idle_file::get_hash(&label); + } + } + let (wit_chains, acc_path) = prover_guard + .acc_manager + .as_mut() + .ok_or_else(|| anyhow!("acc manager is none"))? + .add_elements_and_proof(labels.clone()) + .await?; + proof.wit_chains = Some(Box::new(wit_chains)); + proof.acc_path = acc_path; + + proof.labels = labels; + + Ok(Some(proof)) + } + + pub async fn read_file_labels(&self, cluster: i64, fidx: i64, buf: &mut Vec) -> Result<()> { + let file_name = Path::new(IDLE_FILE_PATH) + .join(format!("{}-{}", SET_DIR_NAME, (cluster - 1) / self.rw.read().await.set_len + 1)) + .join(format!("{}-{}", CLUSTER_DIR_NAME, cluster)) + .join(format!("{}-{}", FILE_NAME, fidx + self.rw.read().await.expanders.k)); + + util::read_file_to_buf(&file_name, buf).context("read file labels error") + } + + pub async fn read_aux_data(&self, cluster: i64, fidx: i64, buf: &mut Vec) -> Result<()> { + let file_name = Path::new(IDLE_FILE_PATH) + .join(format!("{}-{}", SET_DIR_NAME, (cluster - 1) / self.rw.read().await.set_len + 1)) + .join(format!("{}-{}", CLUSTER_DIR_NAME, cluster)) + .join(format!("{}-{}", AUX_FILE, fidx + self.rw.read().await.expanders.k)); + + util::read_file_to_buf(&file_name, buf).context("read aux data error") + } + + // ProveCommit prove commits no more than MaxCommitProofThread + pub async fn prove_commits(&self, challenges: Vec>) -> Result>>> { + let lens = challenges.len(); + let mut proof_set: Vec> = vec![Vec::new(); lens]; + let prover_guard = self.rw.read().await; + for i in 0..lens { + let mut proofs: Vec = vec![CommitProof::default(); challenges[i].len() - 1]; + let fdir = Path::new(IDLE_FILE_PATH) + .join(format!( + "{}-{}", + expanders::generate_idle_file::SET_DIR_NAME, + (challenges[i][0] - 1) / prover_guard.set_len + 1 + )) + .join(format!("{}-{}", expanders::generate_idle_file::CLUSTER_DIR_NAME, challenges[i][0])); + for j in 1..(proofs.len() + 1) as i64 { + let mut index = challenges[i][j as usize]; + if j > prover_guard.cluster_size + 1 { + index = proofs[j as usize - 2].parents[challenges[i][j as usize] as usize].index as i64; + } + let mut layer = index / prover_guard.expanders.n; + if j < prover_guard.cluster_size + 1 { + layer = prover_guard.expanders.k + j - 1; + } + let neighbor = if layer != 0 || i != 0 { + let mut cid = challenges[i][0] - 1; + if cid % prover_guard.set_len == 0 { + cid += prover_guard.set_len; + } + let path_buf = Path::new(IDLE_FILE_PATH) + .join(format!( + "{}-{}", + expanders::generate_idle_file::SET_DIR_NAME, + (challenges[i][0] - 1) / prover_guard.set_len + 1, + )) + .join(format!("{}-{}", expanders::generate_idle_file::CLUSTER_DIR_NAME, cid)) + .join(format!( + "{}-{}", + expanders::generate_idle_file::FILE_NAME, + layer - (prover_guard.set_len - i as i64) / prover_guard.set_len + )); + path_buf + } else { + Path::new(IDLE_FILE_PATH).to_path_buf() + }; + proofs[j as usize - 1] = self + .generate_commit_proof(&fdir, neighbor.as_path(), challenges[i][0], index, layer) + .await?; + } + proof_set[i] = proofs; + } + Ok(Some(proof_set)) + } + + pub async fn generate_path_proof( + &self, + mht: &mut tree::LightMHT, + data: &mut [u8], + index: i64, + node_idx: i64, + ) -> Result { + tree::calc_light_mht_with_bytes(mht, data, HASH_SIZE as i64); + let path_proof = + tree::get_path_proof(&mht, data, index, HASH_SIZE as i64, false).context("generate path proof error")?; + + let mut label: Vec = vec![0u8; HASH_SIZE as usize]; + label.copy_from_slice(&data[index as usize * HASH_SIZE as usize..(index + 1) as usize * HASH_SIZE as usize]); + + Ok(MhtProof { index: node_idx as NodeType, label, paths: path_proof.path, locs: path_proof.locs }) + } + + pub async fn get_path_proof_with_aux( + &self, + aux: &mut Vec, + data: &mut Vec, + index: i64, + node_idx: i64, + ) -> Result { + let path_proof = tree::get_path_proof_with_aux(data, aux, index as usize, HASH_SIZE as usize)?; + + let mut label = vec![0u8; HASH_SIZE as usize]; + label.copy_from_slice(&data[index as usize * HASH_SIZE as usize..(index + 1) as usize * HASH_SIZE as usize]); + + Ok(MhtProof { index: node_idx as NodeType, label, paths: path_proof.path, locs: path_proof.locs }) + } + + pub async fn generate_commit_proof( + &self, + fdir: &Path, + neighbor: &Path, + count: i64, + c: i64, + mut subfile: i64, + ) -> Result { + let prover_guard = self.rw.read().await; + if subfile < 0 || subfile > prover_guard.cluster_size + prover_guard.expanders.k - 1 { + bail!("generate commit proof error: bad node index") + } + let mut data = prover_guard.expanders.file_pool.clone(); + let fpath = fdir.join(format!("{}-{}", expanders::generate_idle_file::FILE_NAME, subfile)); + + util::read_file_to_buf(&fpath, &mut data).context("generate commit proof error")?; + + let mut node_tree = tree::get_light_mht(prover_guard.expanders.n); + let mut parent_tree = tree::get_light_mht(prover_guard.expanders.n); + let index = c % prover_guard.expanders.n; + + let path_proof = self + .generate_path_proof(&mut node_tree, &mut data, index, c) + .await + .context("generate commit proof error")?; + + let mut proof = CommitProof::default(); + proof.node = path_proof; + + let mut pdata = prover_guard.expanders.file_pool.clone(); + + let mut aux: Vec = vec![0u8; DEFAULT_AUX_SIZE as usize * DEFAULT_HASH_SIZE as usize]; + + //add neighbor node dependency + proof.elders = + vec![MhtProof::default(); (subfile / prover_guard.expanders.k * prover_guard.expanders.k / 2) as usize + 1]; + + if !neighbor.eq(Path::new("")) { + util::read_file_to_buf(&neighbor, &mut pdata).context("generate commit proof error")?; + + util::read_file_to_buf( + Path::new(&neighbor.to_str().unwrap_or("").replacen(FILE_NAME, AUX_FILE, 1)), + &mut aux, + ) + .context("generate commit proof error")?; + proof.elders[0] = self.get_path_proof_with_aux(&mut aux, &mut pdata, index, index).await?; + } + + if subfile == 0 { + return Ok(proof); + } + + //file remapping + let layer = subfile; + if subfile >= prover_guard.expanders.k { + let base_layer = (subfile - prover_guard.expanders.k / 2) / prover_guard.expanders.k; + subfile = prover_guard.expanders.k; + + //add elder node dependency + for i in 0..prover_guard.expanders.k / 2 { + let f_path = fdir.join(format!("{}-{}", FILE_NAME, base_layer + i * 2)); + let a_path = fdir.join(format!("{}-{}", AUX_FILE, base_layer + i * 2)); + + util::read_file_to_buf(&f_path, &mut pdata).context("generate commit proof error")?; + util::read_file_to_buf(&a_path, &mut aux).context("generate commit proof error")?; + proof.elders[i as usize + 1] = self + .get_path_proof_with_aux( + &mut aux, + &mut pdata, + index, + index + (base_layer + i * 2) * prover_guard.expanders.n, + ) + .await + .context("generate commit proof error")?; + } + } + + let mut node = Node::new(c as NodeType); + node.parents = Vec::with_capacity(prover_guard.expanders.d as usize + 1); + generate_expanders::calc_parents(&prover_guard.expanders, &mut node, &prover_guard.id, count, layer); + + let fpath = fdir.join(format!("{}-{}", FILE_NAME, subfile - 1)); + + util::read_file_to_buf(&fpath, &mut pdata).context("generate commit proof error")?; + + tree::calc_light_mht_with_bytes(&mut parent_tree, &mut pdata, HASH_SIZE as i64); + let lens = node.parents.len(); + let mut parent_proofs = vec![MhtProof::default(); lens]; + + for i in 0..lens { + let index = node.parents[i] as usize % prover_guard.expanders.n as usize; + let mut label = vec![0u8; HASH_SIZE as usize]; + + let mut path_proof = if node.parents[i] as i64 >= subfile * prover_guard.expanders.n { + label.copy_from_slice(&data[index * HASH_SIZE as usize..(index + 1) * HASH_SIZE as usize]); + get_path_proof(&node_tree, &data, index as i64, HASH_SIZE as i64, false)? + } else { + label.copy_from_slice(&pdata[index * HASH_SIZE as usize..(index + 1) * HASH_SIZE as usize]); + get_path_proof(&parent_tree, &pdata, index as i64, HASH_SIZE as i64, false)? + }; + if node.parents[i] % 6 != 0 { + path_proof.path = Vec::new(); + path_proof.locs = Vec::new(); + } + parent_proofs[i] = MhtProof { index: node.parents[i], label, paths: path_proof.path, locs: path_proof.locs } + } + + proof.parents = parent_proofs; + Ok(proof) + } + + pub async fn prove_space(&self, challenges: Vec, left: i64, right: i64) -> Result>> { + if challenges.is_empty() + || right - left <= 0 + || left <= self.rw.read().await.chain_state.front + || right > self.rw.read().await.chain_state.rear + 1 + { + bail!("prove space error:bad challenge range"); + } + + let proof = Arc::new(RwLock::new(SpaceProof { + proofs: vec![vec![]; (right - left) as usize], + roots: vec![vec![]; (right - left) as usize], + wit_chains: vec![WitnessNode::default(); (right - left) as usize], + left, + right, + })); + + let indexs = Arc::new(RwLock::new(vec![0; (right - left) as usize])); + let mut threads = MAXPROOFTHREAD; + if right - left < threads { + threads = right - left; + } + if threads <= 0 { + threads = 2; + } + let block = ((right - left) / threads) * threads; + + let mut tasks = Vec::new(); + for i in 0..threads { + let gl = left + i * block; + let mut gr = left + (i + 1) * block; + if gr > right { + gr = right; + } + + let p = self.rw.clone(); + let proof_clone = proof.clone(); + let challenges = challenges.clone(); + let indexs = indexs.clone(); + + let task = tokio::spawn(async move { + let p = Prover { rw: p }; + for fidx in gl..gr { + let mut data = p.rw.read().await.expanders.file_pool.clone(); + + if let Err(e) = p + .read_file_labels( + ((fidx - 1) / p.rw.read().await.cluster_size) + 1, + (fidx - 1) % p.rw.read().await.cluster_size, + &mut data, + ) + .await + { + return Err(e); + } + + let mut aux = vec![0u8; DEFAULT_AUX_SIZE as usize * tree::DEFAULT_HASH_SIZE as usize]; + if let Err(e) = p + .read_aux_data( + ((fidx - 1) / p.rw.read().await.cluster_size) + 1, + (fidx - 1) % p.rw.read().await.cluster_size, + &mut aux, + ) + .await + { + return Err(e); + } + + let mut mht = tree::get_light_mht(DEFAULT_AUX_SIZE); + tree::calc_light_mht_with_aux(&mut mht, &aux); + + indexs.write().await[(fidx - left) as usize] = fidx; + + let mut proof_guard = proof_clone.write().await; + proof_guard.roots[(fidx - left) as usize] = tree::get_root(&mht); + proof_guard.proofs[(fidx - left) as usize] = vec![]; + + for (_, challenge) in challenges.clone().into_iter().enumerate() { + let idx = (challenge % p.rw.read().await.expanders.n) as usize; + + let path_proof = tree::get_path_proof_with_aux(&data, &mut aux, idx, HASH_SIZE as usize)?; + let mut label = vec![0u8; HASH_SIZE as usize]; + label.copy_from_slice(&data[idx * HASH_SIZE as usize..(idx + 1) * HASH_SIZE as usize]); + proof_guard.proofs[(fidx - left) as usize].push(MhtProof { + paths: path_proof.path, + locs: path_proof.locs, + index: challenge as expanders::NodeType, + label, + }); + } + } + Ok(()) + }); + tasks.push(task); + } + for (i, task) in tasks.into_iter().enumerate() { + task.await + .context("prove space error") + .map(|e| anyhow!("prove space task index {} error: {:?}", i, e))?; + } + + proof.write().await.wit_chains = self + .rw + .write() + .await + .chain_state + .acc + .as_mut() + .unwrap() + .get_witness_chains(indexs.read().await.clone()) + .await + .context("prove space error")?; + self.rw.write().await.context.proofed = right - 1; + + Ok(proof) + } + + // ProveDeletion sort out num*IdleFileSize(unit MiB) available space, + // you need to update prover status with this value rather than num after the verification is successful. + pub async fn prove_deletion(&mut self, num: i64) -> Result { + if num <= 0 { + bail!("prove deletion error: bad file number"); + } + + if self.rw.read().await.rear - self.rw.read().await.front < num { + bail!("prove deletion error: insufficient operating space"); + } + let mut data = self.rw.read().await.expanders.file_pool.clone(); + let mut roots: Vec> = vec![vec![]; num as usize]; + let mut aux = vec![0u8; DEFAULT_AUX_SIZE as usize * tree::DEFAULT_HASH_SIZE as usize]; + let mut mht = tree::get_light_mht(DEFAULT_AUX_SIZE); + for i in 1..num { + let cluster = (self.rw.read().await.front + i - 1) / self.rw.read().await.cluster_size + 1; + let subfile = (self.rw.read().await.front + i - 1) % self.rw.read().await.cluster_size; + self.read_file_labels(cluster, subfile, &mut data) + .await + .context("prove deletion error")?; + self.read_aux_data(cluster, subfile, &mut aux) + .await + .context("prove deletion error")?; + tree::calc_light_mht_with_aux(&mut mht, &aux); + roots[i as usize - 1] = tree::get_root(&mht); + } + let (wit_chain, acc_path) = self + .rw + .write() + .await + .acc_manager + .as_mut() + .unwrap() + .delete_elements_and_proof(num) + .await + .context("prove deletion error")?; + + let proof = DeletionProof { roots, wit_chain, acc_path }; + + Ok(proof) + } + + pub async fn organize_files(&mut self, idx: i64, num: i64) -> Result<()> { + let cluster_size = self.rw.read().await.cluster_size; + let set_len = self.rw.read().await.set_len; + let k = self.rw.read().await.expanders.k; + + let dir = Path::new(IDLE_FILE_PATH).join(format!("{}-{}", SET_DIR_NAME, idx / (cluster_size * set_len) + 1)); + + let mut i = idx + 1; + while i <= idx + num { + for j in 0..k { + // delete idle file + let name = dir + .join(format!("{}-{}", CLUSTER_DIR_NAME, (i - 1) / cluster_size + 1)) + .join(format!("{}-{}", FILE_NAME, j)); + util::delete_file(name.to_str().unwrap())?; + + // delete aux file + let name = dir + .join(format!("{}-{}", CLUSTER_DIR_NAME, (i - 1) / cluster_size + 1)) + .join(format!("{}-{}", AUX_FILE, j)); + util::delete_file(name.to_str().unwrap())?; + } + i += 8; + } + let name = dir.join(COMMIT_FILE); + util::delete_file(name.to_str().unwrap())?; + + self.rw.write().await.space += num / cluster_size * k * FILE_SIZE; + + Ok(()) + } + + pub async fn delete_files(&mut self) -> Result<()> { + // // delete all files before front + let prove_guard = self.rw.read().await; + let mut indexs: Vec = vec![]; + indexs.push((prove_guard.front - 1) / (prove_guard.set_len * prove_guard.cluster_size) + 1); //idle-files-i + indexs.push((prove_guard.front - 1) / prove_guard.cluster_size + 1); //file-cluster-i + indexs.push((prove_guard.front - 1) % prove_guard.cluster_size + prove_guard.expanders.k); //sub-file-i + + deleter(IDLE_FILE_PATH, indexs).context("delete idle files error") + } + + pub async fn calc_generated_file(&mut self, dir: &str) -> Result { + let mut count = 0_i64; + let prover_guard = self.rw.read().await; + let file_total_size = + prover_guard.config.file_size * (prover_guard.expanders.k + prover_guard.cluster_size) * 1024 * 1024; + let root_size = (prover_guard.set_len * (prover_guard.expanders.k + prover_guard.cluster_size) + 1) + * (DEFAULT_HASH_SIZE as i64); + let mut next = 1_i64; + + let mut files = fs::read_dir(dir).await?; + while let Some(file) = files.next_entry().await? { + let file_name = file + .file_name() + .into_string() + .map_err(|_| anyhow!("failed to convert file name to string"))?; + let sidxs = file_name.split("-").collect::>(); + if sidxs.len() < 3 { + continue; + } + let number: i64 = sidxs[2].parse()?; + if number != prover_guard.rear / (prover_guard.set_len * prover_guard.cluster_size) + next { + continue; + } + if !file.file_type().await?.is_dir() { + continue; + } + let roots_file = file.path().join(COMMIT_FILE); + match fs::metadata(roots_file).await { + Ok(metadata) => { + if metadata.len() != root_size as u64 { + continue; + } + }, + Err(_) => continue, + } + + let mut clusters = fs::read_dir(file.path()).await?; + let mut i = 0; + while let Some(cluster) = clusters.next_entry().await? { + if !cluster.metadata().await?.is_dir() { + continue; + } + + let mut size = 0; + let mut files = fs::read_dir(cluster.path()).await?; + + while let Some(file) = files.next_entry().await? { + if !file.metadata().await?.is_dir() && file.metadata().await?.len() >= MINIFILESIZE as u64 { + size += file.metadata().await?.len() as i64; + } + } + if size == file_total_size { + count += prover_guard.cluster_size; + i += 1; + } + } + if i == prover_guard.set_len as usize { + next += 1; + } + } + Ok(count) + } +} + +pub fn deleter(root_dir: &str, indexs: Vec) -> Result<()> { + if indexs.is_empty() { + return Ok(()); + } + + let entries = std::fs::read_dir(root_dir)?; + + for entry in entries { + let entry = entry?; + let file_name = entry.file_name(); + let names: Vec<&str> = file_name.to_str().unwrap().split('-').collect(); + + let idx: i64 = match names[names.len() - 1].parse() { + Ok(idx) => idx, + Err(_) => continue, + }; + + if idx < indexs[0] || (idx == indexs[0] && !entry.path().is_dir()) { + util::delete_file(Path::new(root_dir).join(entry.path().to_str().unwrap()).to_str().unwrap())?; + continue; + } + + if idx != indexs[0] { + continue; + } + + // Recursive call + if let Err(e) = deleter(entry.path().to_str().unwrap(), indexs[1..].to_vec()) { + return Err(e); + } + } + + Ok(()) +} diff --git a/crates/ces-pois/src/tree/mod.rs b/crates/ces-pois/src/tree/mod.rs index a729003a..2b4ccd00 100644 --- a/crates/ces-pois/src/tree/mod.rs +++ b/crates/ces-pois/src/tree/mod.rs @@ -1,6 +1,11 @@ +use std::vec; + +use anyhow::Result; use sha2::{Digest, Sha256, Sha512}; -#[derive(Debug)] +pub type LightMHT = Vec; + +#[derive(Debug, Default)] pub struct PathProof { pub locs: Vec, pub path: Vec>, @@ -8,6 +13,126 @@ pub struct PathProof { pub const DEFAULT_HASH_SIZE: u32 = 32; +pub fn get_light_mht(e_len: i64) -> LightMHT { + vec![0u8; (e_len as u32 * DEFAULT_HASH_SIZE) as usize] +} + +// CalcLightMhtWithBytes calc light weight mht whit fixed size elements data +pub fn calc_light_mht_with_bytes(mht: &mut LightMHT, data: &[u8], size: i64) { + let mut hasher = Sha256::new(); + for i in 0..(data.len() as i64 / size) as usize { + hasher.update(&data[i * size as usize..(i + 1) * size as usize]); + mht[i * DEFAULT_HASH_SIZE as usize..(i + 1) * DEFAULT_HASH_SIZE as usize] + .copy_from_slice(&hasher.finalize_reset()); + } + calc_light_mht(mht); +} + +pub fn calc_light_mht_with_aux(mht: &mut LightMHT, aux: &[u8]) { + mht.copy_from_slice(aux); + calc_light_mht(mht); +} +pub fn calc_light_mht(mht: &mut LightMHT) { + let lens = mht.len() as i64; + let mut p = lens / 2; + let mut src = mht.clone(); + let size = DEFAULT_HASH_SIZE as i64; + + for i in 0..((lens as f64) / (size as f64)).log2() as i64 + 1 { + let num = lens / (1 << (i + 1)); + let target = &mut mht[p as usize..p as usize + num as usize]; + + let mut j = (num / size) - 1; + let mut k = num * 2 / size - 2; + while j >= 0 && k >= 0 { + let mut hash = Sha256::new(); + hash.update(&src[(k * size) as usize..((k + 2) * size) as usize]); + + target[(j * size) as usize..((j + 1) * size) as usize].copy_from_slice(&hash.finalize()); + j = j - 1; + k = k - 2; + } + + p /= 2; + src.truncate(target.len()); + src.as_mut_slice().clone_from_slice(&target); + } +} + +pub fn get_root(mht: &LightMHT) -> Vec { + if mht.len() < (DEFAULT_HASH_SIZE * 2) as usize { + return Vec::new(); + } + let mut root = vec![0u8; DEFAULT_HASH_SIZE as usize]; + root.copy_from_slice(&mht[DEFAULT_HASH_SIZE as usize..(DEFAULT_HASH_SIZE * 2) as usize]); + + root +} + +pub fn get_path_proof(mht: &LightMHT, data: &[u8], index: i64, size: i64, hashed: bool) -> Result { + let mut size = size; + let mut index = index; + let deep = f64::log2(data.len() as f64 / size as f64) as i64; + let mut proof = PathProof { locs: vec![0u8; deep as usize], path: vec![Vec::new(); deep as usize] }; + let mut num = mht.len(); + let mut p = mht.len(); + let mut data = data.to_vec().clone(); + + for i in 0..deep { + let (d, loc) = if (index + 1) % 2 == 0 { + (data[((index - 1) * size) as usize..(index * size) as usize].to_vec(), 0) + } else { + (data[((index + 1) * size) as usize..((index + 2) * size) as usize].to_vec(), 1) + }; + if i == 0 && (size != DEFAULT_HASH_SIZE as i64 || !hashed) { + let mut hasher = Sha256::new(); + hasher.update(&d); + proof.path[i as usize] = hasher.finalize().to_vec(); + size = DEFAULT_HASH_SIZE as i64; + } else { + proof.path[i as usize] = vec![0u8; size as usize]; + proof.path[i as usize].copy_from_slice(&d); + } + proof.locs[i as usize] = loc; + num = num / 2; + index = index / 2; + p -= num; + data = mht[p..p + num].to_vec(); + } + Ok(proof) +} + +pub fn get_path_proof_with_aux(data: &Vec, aux: &mut Vec, index: usize, size: usize) -> Result { + let mut proof = PathProof::default(); + let aux_size = aux.len() / (DEFAULT_HASH_SIZE as usize); + let plate_size = data.len() / size / aux_size; + let mut mht = vec![0u8; plate_size * DEFAULT_HASH_SIZE as usize]; + let left = index / plate_size; + let data = &mut data[left * plate_size * size..(left + 1) * plate_size * size].to_vec().clone(); + + for i in 0..plate_size { + let mut hasher = Sha256::new(); + hasher.update(&data[i * size..(i + 1) * size]); + mht[i * DEFAULT_HASH_SIZE as usize..(i + 1) * DEFAULT_HASH_SIZE as usize] + .copy_from_slice(hasher.finalize().as_slice()); + } + + calc_light_mht(&mut mht); + let mut sub_proof = get_path_proof(&mht, data, (index % plate_size) as i64, size as i64, false)?; + + mht = vec![0u8; aux.len()]; + mht.copy_from_slice(&aux); + calc_light_mht(&mut mht); + let top_proof = get_path_proof(&mht, aux, left as i64, DEFAULT_HASH_SIZE as i64, true)?; + + sub_proof.locs.extend_from_slice(&top_proof.locs); + sub_proof.path.extend_from_slice(&top_proof.path); + proof.locs = sub_proof.locs; + proof.path = sub_proof.path; + + Ok(proof) +} + pub fn verify_path_proof(root: &[u8], data: &[u8], proof: PathProof) -> bool { if proof.locs.len() != proof.path.len() { return false; @@ -22,14 +147,14 @@ pub fn verify_path_proof(root: &[u8], data: &[u8], proof: PathProof) -> bool { let result = hash.finalize(); result.to_vec() - } + }, Hasher::SHA512(hash) => { let mut hash = hash; hash.update(data); let result = hash.finalize(); result.to_vec() - } + }, }; if data.len() != root.len() { @@ -54,7 +179,7 @@ pub fn verify_path_proof(root: &[u8], data: &[u8], proof: PathProof) -> bool { } let result = hash.finalize(); result.to_vec() - } + }, Hasher::SHA512(hash) => { let mut hash = hash; if proof.locs[i] == 0 { @@ -69,7 +194,7 @@ pub fn verify_path_proof(root: &[u8], data: &[u8], proof: PathProof) -> bool { } let result = hash.finalize(); result.to_vec() - } + }, }; } root.eq(&data) @@ -93,4 +218,4 @@ pub fn check_index_path(index: i64, locs: &[u8]) -> bool { pub enum Hasher { SHA256(Sha256), SHA512(Sha512), -} \ No newline at end of file +} diff --git a/crates/ces-pois/src/util/mod.rs b/crates/ces-pois/src/util/mod.rs index 702fa3f4..b0492354 100644 --- a/crates/ces-pois/src/util/mod.rs +++ b/crates/ces-pois/src/util/mod.rs @@ -1,9 +1,111 @@ -use std::fs; +use std::{ + env::current_dir, + fs, + io::{BufReader, BufWriter, Read, Write}, + path::Path, +}; use crate::acc::{self, RsaKey}; -use anyhow::Result; +use anyhow::{anyhow, bail, Context, Result}; use num_bigint_dig::BigUint; +#[cfg(feature = "use-sysinfo")] +use sysinfo::Disks; + +pub fn save_proof_file(path: &Path, data: &[Vec]) -> Result<()> { + let f = fs::File::create(path).context("save proof file error")?; + let mut writer = BufWriter::new(f); + + for d in data { + let n = writer.write(d)?; + if n != d.len() { + bail!("write proof file error:write label error") + } + } + + writer.flush()?; + Ok(()) +} + +#[cfg(feature = "use-sysinfo")] +pub fn get_dir_free_space(dir: &str) -> Result { + let current_dir = current_dir()?; + let mut dir = Path::new(dir); + let joined_dir = current_dir.join(dir); + + dir = if dir.is_absolute() { dir } else { &joined_dir }; + + let disks = Disks::new_with_refreshed_list(); + let mut available_space = 0; + + for disk in disks.list() { + if dir.starts_with(disk.mount_point().to_path_buf()) { + available_space = disk.available_space(); + break; + } + } + Ok(available_space) +} + +#[cfg(not(feature = "use-sysinfo"))] +pub fn get_dir_free_space(_dir: &str) -> Result { + Err(anyhow!("get_dir_free_space is not available without 'use-sysinfo' feature")) +} + +pub fn read_proof_file(path: &Path, num: usize, len: usize) -> Result>> { + if num <= 0 { + bail!("illegal label number") + } + + let file = fs::File::open(path)?; + let mut reader = BufReader::new(file); + let mut data = Vec::with_capacity(num); + + for _ in 0..num { + let mut label = vec![0; len]; + let n = reader.read(&mut label)?; + if n != len { + bail!("read label error: expected {} bytes, got {}", len, n) + } + data.push(label); + } + + Ok(data) +} + +pub fn delete_dir(dir: &str) -> Result<()> { + fs::remove_dir_all(dir).context("delete dir error") +} + +pub fn save_file(path: &Path, data: &[u8]) -> Result<()> { + let f = fs::File::create(path)?; + let mut writer = BufWriter::new(f); + + writer.write_all(data)?; + writer.flush()?; + + Ok(()) +} + +pub fn delete_file(path: &str) -> Result<()> { + if Path::new(path).exists() { + fs::remove_file(path)?; + } + Ok(()) +} + +pub fn read_file_to_buf(path: &Path, buf: &mut [u8]) -> Result<()> { + if buf.is_empty() { + return Ok(()); + } + let mut file = fs::File::open(path)?; + let bytes_read = file.read(buf)?; + if bytes_read != buf.len() { + bail!("byte number read does not match") + } + Ok(()) +} + pub fn copy_data(target: &mut [u8], src: &[&[u8]]) { let mut count = 0; let lens = target.len(); @@ -53,4 +155,46 @@ pub fn clear_data(target: &mut [u8]) { for element in target.iter_mut() { *element = 0; } -} \ No newline at end of file +} + +pub fn copy_files(src: &str, dst: &str) -> Result<()> { + if !fs::read_dir(dst).is_err() { + fs::remove_dir_all(dst)?; + } + + fs::create_dir_all(dst)?; + + let files = fs::read_dir(src)?; + + //check file in src directory is folder or not , if is folder then continue, otherwise open the file and copy on into det directory + for file in files { + let file_path = file?.path(); + if file_path.is_dir() { + continue; + } else { + fs::copy( + &file_path, + Path::new(dst).join( + file_path + .file_name() + .ok_or_else(|| anyhow!("Invalid file name"))? + .to_str() + .unwrap(), + ), + )?; + } + } + + // fs::copy(src, dst)?; + + Ok(()) +} + +pub fn copy_file(src: &str, des: &str) -> Result<()> { + let mut df = fs::File::create(des)?; + let mut sf = fs::File::open(src)?; + + std::io::copy(&mut sf, &mut df)?; + df.flush()?; + Ok(()) +} diff --git a/crates/cestory/Cargo.toml b/crates/cestory/Cargo.toml index fef7952f..509e3082 100644 --- a/crates/cestory/Cargo.toml +++ b/crates/cestory/Cargo.toml @@ -60,10 +60,7 @@ tonic = { workspace = true } rsa = { workspace = true } # CESS specific -ces-crypto = { workspace = true, features = [ - "stream", - "full_crypto", -] } +ces-crypto = { workspace = true, features = ["stream", "full_crypto"] } ces-mq = { workspace = true, features = [ "dispatcher", "queue", @@ -103,9 +100,5 @@ ciborium = "0.2.0" default = ["sp-io/disable_panic_handler", "sp-io/disable_oom"] shadow-gk = [] -only-attestation = [ - "runtime/only-attestation" -] -verify-cesealbin = [ - "runtime/verify-cesealbin" -] \ No newline at end of file +only-attestation = ["runtime/only-attestation"] +verify-cesealbin = ["runtime/verify-cesealbin"] diff --git a/crates/cestory/src/lib.rs b/crates/cestory/src/lib.rs index 5afdfad5..a2cac13c 100644 --- a/crates/cestory/src/lib.rs +++ b/crates/cestory/src/lib.rs @@ -178,13 +178,14 @@ fn glob_checkpoint_files_sorted(basedir: &str) -> Result error!("Error globbing checkpoints: {:?}", err), - Ok(iter) => + Ok(iter) => { for filename in iter { info!("Removing {}", filename.display()); if let Err(e) = std::fs::remove_file(&filename) { error!("Failed to remove {}: {}", filename.display(), e); } - }, + } + }, } } @@ -194,7 +195,7 @@ fn remove_outdated_checkpoints(basedir: &str, max_kept: u32, current_block: chai glob_checkpoint_files_sorted(basedir).map_err(|e| anyhow!("error in glob_checkpoint_files_sorted(): {e}"))?; for (block, filename) in checkpoints { if block > current_block { - continue + continue; } kept += 1; if kept > max_kept { @@ -288,7 +289,7 @@ pub struct Ceseal { #[serde(skip)] #[serde(default = "Instant::now")] last_checkpoint: Instant, - + #[codec(skip)] #[serde(skip)] trusted_sk: bool, @@ -365,10 +366,10 @@ impl Ceseal { if let Some(ref keyfariy) = system.keyfairy { Ok(keyfariy.master_key().clone()) } else { - return Err(types::Error::KeyfairyNotReady) + return Err(types::Error::KeyfairyNotReady); } } else { - return Err(types::Error::SystemNotReady) + return Err(types::Error::SystemNotReady); } } @@ -444,7 +445,7 @@ impl Ceseal { let filepath = PathBuf::from(sealing_path).join(RUNTIME_SEALED_DATA_FILE); let data = platform .unseal_data(filepath) - .map_err(|_| Error::UnsealOnLoad)? + .map_err(|e| Error::UnsealOnLoad(e.into()))? .ok_or(Error::PersistentRuntimeNotFound)?; let data: RuntimeDataSeal = Decode::decode(&mut &data[..]).map_err(Error::DecodeError)?; match data { @@ -587,7 +588,7 @@ impl Ceseal { let files = glob_checkpoint_files_sorted(&args.storage_path) .map_err(|e| anyhow!("Glob checkpoint files failed: {e}"))?; if files.is_empty() { - return Ok(None) + return Ok(None); } let (_block, ckpt_filename) = &files[0]; @@ -669,7 +670,7 @@ impl Ceseal { .next_element()? .ok_or_else(|| de::Error::custom("Checkpoint version missing"))?; if version > CHECKPOINT_VERSION { - return Err(de::Error::custom(format!("Checkpoint version {version} is not supported"))) + return Err(de::Error::custom(format!("Checkpoint version {version} is not supported"))); } let mut factory: Self::Value = @@ -788,10 +789,10 @@ impl CesealSafeBox { let guard = self.0.lock().map_err(|e| CesealLockError::Poison(e.to_string()))?; trace!(target: "cestory::lock", "Locked cestory"); if !allow_rcu && guard.rcu_dispatching { - return Err(CesealLockError::Rcu) + return Err(CesealLockError::Rcu); } if !allow_safemode && guard.args.safe_mode_level > 0 { - return Err(CesealLockError::SafeMode) + return Err(CesealLockError::SafeMode); } Ok(LogOnDrop { inner: guard, msg: "Unlocked cestory" }) } diff --git a/crates/cestory/src/types.rs b/crates/cestory/src/types.rs index 4a4e735c..28ab1a15 100644 --- a/crates/cestory/src/types.rs +++ b/crates/cestory/src/types.rs @@ -76,8 +76,8 @@ pub enum Error { #[error("external server already closed")] ExternalServerAlreadyClosed, - #[error("unseal error on load_runtime_data()")] - UnsealOnLoad, + #[error("unseal error on load_runtime_data() because : {0}")] + UnsealOnLoad(#[from] anyhow::Error), #[error("{0}")] Anyhow(anyhow::Error), diff --git a/scripts/docker/env/gramine-rust.Dockerfile b/scripts/docker/env/gramine-rust.Dockerfile index 9c971556..46cf893b 100644 --- a/scripts/docker/env/gramine-rust.Dockerfile +++ b/scripts/docker/env/gramine-rust.Dockerfile @@ -29,4 +29,9 @@ RUN curl -fsSL 'https://static.rust-lang.org/rustup/dist/x86_64-unknown-linux-gn echo 'source /root/.cargo/env' >> .bashrc && \ .cargo/bin/rustup component add rust-src rust-analysis clippy && \ .cargo/bin/rustup target add wasm32-unknown-unknown && \ - rm rustup-init && rm -rf .cargo/registry && rm -rf .cargo/git \ No newline at end of file + rm rustup-init && rm -rf .cargo/registry && rm -rf .cargo/git + +RUN .cargo/bin/rustup install 1.82.0 && \ + .cargo/bin/rustup default 1.82.0 && \ + .cargo/bin/rustup component add cargo clippy rust-analyzer rust-src rust-std rustc-dev rustc rustfmt && \ + .cargo/bin/rustup target add wasm32-unknown-unknown \ No newline at end of file diff --git a/standalone/teeworker/ceseal/Cargo.lock b/standalone/teeworker/ceseal/Cargo.lock index a4d7920f..f8133dfd 100644 --- a/standalone/teeworker/ceseal/Cargo.lock +++ b/standalone/teeworker/ceseal/Cargo.lock @@ -1403,7 +1403,9 @@ name = "ces-pois" version = "0.4.5" dependencies = [ "anyhow", + "async-trait", "bigdecimal", + "byteorder", "dashmap", "hex", "lazy_static", @@ -1415,7 +1417,9 @@ dependencies = [ "rand 0.8.5", "rsa", "serde", + "serde_json", "sha2 0.10.8", + "tokio", ] [[package]] @@ -1519,7 +1523,7 @@ dependencies = [ [[package]] name = "cess-node-runtime" -version = "0.10.0" +version = "0.10.1" dependencies = [ "ces-pallet-mq", "ces-pallet-mq-runtime-api",