record work
This commit is contained in:
parent
8368ca461a
commit
36cb0687cb
@ -1,38 +0,0 @@
|
|||||||
use super::*;
|
|
||||||
use crate::storage_manager::ValueDetail;
|
|
||||||
|
|
||||||
pub fn encode_value_detail(
|
|
||||||
value_detail: &ValueDetail,
|
|
||||||
builder: &mut veilid_capnp::value_detail::Builder,
|
|
||||||
) -> Result<(), RPCError> {
|
|
||||||
let mut svdb = builder.reborrow().init_signed_value_data();
|
|
||||||
encode_signed_value_data(value_detail.signed_value_data(), &mut svdb)?;
|
|
||||||
if let Some(descriptor) = value_detail.descriptor() {
|
|
||||||
let mut db = builder.reborrow().init_descriptor();
|
|
||||||
encode_signed_value_descriptor(descriptor, &mut db)?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn decode_value_detail(
|
|
||||||
reader: &veilid_capnp::value_detail::Reader,
|
|
||||||
) -> Result<ValueDetail, RPCError> {
|
|
||||||
let svdr = reader.get_signed_value_data().map_err(RPCError::protocol)?;
|
|
||||||
let signed_value_data = decode_signed_value_data(&svdr)?;
|
|
||||||
|
|
||||||
let descriptor = if reader.has_descriptor() {
|
|
||||||
let dr = reader
|
|
||||||
.reborrow()
|
|
||||||
.get_descriptor()
|
|
||||||
.map_err(RPCError::protocol)?;
|
|
||||||
let descriptor = decode_signed_value_descriptor(&dr)?;
|
|
||||||
Some(descriptor)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(ValueDetail {
|
|
||||||
signed_value_data,
|
|
||||||
descriptor,
|
|
||||||
})
|
|
||||||
}
|
|
@ -1,14 +1,10 @@
|
|||||||
mod keys;
|
mod keys;
|
||||||
mod record;
|
|
||||||
mod record_data;
|
|
||||||
mod record_store;
|
mod record_store;
|
||||||
mod record_store_limits;
|
mod record_store_limits;
|
||||||
mod tasks;
|
mod tasks;
|
||||||
mod types;
|
mod types;
|
||||||
|
|
||||||
use keys::*;
|
use keys::*;
|
||||||
use record::*;
|
|
||||||
use record_data::*;
|
|
||||||
use record_store::*;
|
use record_store::*;
|
||||||
use record_store_limits::*;
|
use record_store_limits::*;
|
||||||
|
|
||||||
@ -28,10 +24,12 @@ const FLUSH_RECORD_STORES_INTERVAL_SECS: u32 = 1;
|
|||||||
struct StorageManagerInner {
|
struct StorageManagerInner {
|
||||||
/// If we are started up
|
/// If we are started up
|
||||||
initialized: bool,
|
initialized: bool,
|
||||||
/// Records that have been 'created' or 'opened' by this node
|
/// Records that have been 'opened' and are not yet closed
|
||||||
local_record_store: Option<RecordStore>,
|
opened_records: HashMap<TypedKey, OpenedRecord>,
|
||||||
/// Records that have been pushed to this node for distribution by other nodes
|
/// Records that have ever been 'created' or 'opened' by this node, things we care about that we must republish to keep alive
|
||||||
remote_record_store: Option<RecordStore>,
|
local_record_store: Option<RecordStore<LocalRecordDetail>>,
|
||||||
|
/// Records that have been pushed to this node for distribution by other nodes, that we make an effort to republish
|
||||||
|
remote_record_store: Option<RecordStore<RemoteRecordDetail>>,
|
||||||
/// RPC processor if it is available
|
/// RPC processor if it is available
|
||||||
rpc_processor: Option<RPCProcessor>,
|
rpc_processor: Option<RPCProcessor>,
|
||||||
/// Background processing task (not part of attachment manager tick tree so it happens when detached too)
|
/// Background processing task (not part of attachment manager tick tree so it happens when detached too)
|
||||||
@ -75,6 +73,7 @@ impl StorageManager {
|
|||||||
fn new_inner() -> StorageManagerInner {
|
fn new_inner() -> StorageManagerInner {
|
||||||
StorageManagerInner {
|
StorageManagerInner {
|
||||||
initialized: false,
|
initialized: false,
|
||||||
|
opened_records: HashMap::new(),
|
||||||
local_record_store: None,
|
local_record_store: None,
|
||||||
remote_record_store: None,
|
remote_record_store: None,
|
||||||
rpc_processor: None,
|
rpc_processor: None,
|
||||||
@ -201,7 +200,7 @@ impl StorageManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// # DHT Key = Hash(ownerKeyKind) of: [ ownerKeyValue, schema ]
|
/// # DHT Key = Hash(ownerKeyKind) of: [ ownerKeyValue, schema ]
|
||||||
fn get_key(&self, vcrypto: CryptoSystemVersion, record: &Record) -> TypedKey {
|
fn get_key<D>(vcrypto: CryptoSystemVersion, record: &Record<D>) -> TypedKey {
|
||||||
let compiled = record.descriptor().schema_data();
|
let compiled = record.descriptor().schema_data();
|
||||||
let mut hash_data = Vec::<u8>::with_capacity(PUBLIC_KEY_LENGTH + 4 + compiled.len());
|
let mut hash_data = Vec::<u8>::with_capacity(PUBLIC_KEY_LENGTH + 4 + compiled.len());
|
||||||
hash_data.extend_from_slice(&vcrypto.kind().0);
|
hash_data.extend_from_slice(&vcrypto.kind().0);
|
||||||
@ -211,20 +210,12 @@ impl StorageManager {
|
|||||||
TypedKey::new(vcrypto.kind(), hash)
|
TypedKey::new(vcrypto.kind(), hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn new_local_record(
|
async fn lock(&self) -> Result<AsyncMutexGuardArc<StorageManagerInner>, VeilidAPIError> {
|
||||||
&self,
|
let inner = asyncmutex_lock_arc!(&self.inner);
|
||||||
vcrypto: CryptoSystemVersion,
|
|
||||||
record: Record,
|
|
||||||
) -> Result<TypedKey, VeilidAPIError> {
|
|
||||||
// add value record to record store
|
|
||||||
let mut inner = self.inner.lock().await;
|
|
||||||
if !inner.initialized {
|
if !inner.initialized {
|
||||||
apibail_generic!("not initialized");
|
apibail_generic!("not initialized");
|
||||||
}
|
}
|
||||||
let local_record_store = inner.local_record_store.as_mut().unwrap();
|
Ok(inner)
|
||||||
let key = self.get_key(vcrypto.clone(), &record);
|
|
||||||
local_record_store.new_record(key, record).await?;
|
|
||||||
Ok(key)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn create_record(
|
pub async fn create_record(
|
||||||
@ -232,7 +223,9 @@ impl StorageManager {
|
|||||||
kind: CryptoKind,
|
kind: CryptoKind,
|
||||||
schema: DHTSchema,
|
schema: DHTSchema,
|
||||||
safety_selection: SafetySelection,
|
safety_selection: SafetySelection,
|
||||||
) -> Result<TypedKey, VeilidAPIError> {
|
) -> Result<DHTRecordDescriptor, VeilidAPIError> {
|
||||||
|
let mut inner = self.lock().await?;
|
||||||
|
|
||||||
// Get cryptosystem
|
// Get cryptosystem
|
||||||
let Some(vcrypto) = self.unlocked_inner.crypto.get(kind) else {
|
let Some(vcrypto) = self.unlocked_inner.crypto.get(kind) else {
|
||||||
apibail_generic!("unsupported cryptosystem");
|
apibail_generic!("unsupported cryptosystem");
|
||||||
@ -254,34 +247,113 @@ impl StorageManager {
|
|||||||
|
|
||||||
// Add new local value record
|
// Add new local value record
|
||||||
let cur_ts = get_aligned_timestamp();
|
let cur_ts = get_aligned_timestamp();
|
||||||
let record = Record::new(
|
let local_record_detail = LocalRecordDetail { safety_selection };
|
||||||
cur_ts,
|
let record =
|
||||||
signed_value_descriptor,
|
Record::<LocalRecordDetail>::new(cur_ts, signed_value_descriptor, local_record_detail)?;
|
||||||
Some(owner.secret),
|
|
||||||
safety_selection,
|
|
||||||
)?;
|
|
||||||
let dht_key = self
|
|
||||||
.new_local_record(vcrypto, record)
|
|
||||||
.await
|
|
||||||
.map_err(VeilidAPIError::internal)?;
|
|
||||||
|
|
||||||
Ok(dht_key)
|
let local_record_store = inner.local_record_store.as_mut().unwrap();
|
||||||
|
let dht_key = Self::get_key(vcrypto.clone(), &record);
|
||||||
|
local_record_store.new_record(dht_key, record).await?;
|
||||||
|
|
||||||
|
// Open the record
|
||||||
|
self.open_record_inner(inner, dht_key, Some(owner), safety_selection)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn open_record_inner(
|
||||||
|
&self,
|
||||||
|
mut inner: AsyncMutexGuardArc<StorageManagerInner>,
|
||||||
|
key: TypedKey,
|
||||||
|
writer: Option<KeyPair>,
|
||||||
|
safety_selection: SafetySelection,
|
||||||
|
) -> Result<DHTRecordDescriptor, VeilidAPIError> {
|
||||||
|
// Get cryptosystem
|
||||||
|
let Some(vcrypto) = self.unlocked_inner.crypto.get(key.kind) else {
|
||||||
|
apibail_generic!("unsupported cryptosystem");
|
||||||
|
};
|
||||||
|
|
||||||
|
// See if we have a local record already or not
|
||||||
|
let cb = |r: &Record<LocalRecordDetail>| {
|
||||||
|
// Process local record
|
||||||
|
(r.owner().clone(), r.schema())
|
||||||
|
};
|
||||||
|
if let Some((owner, schema)) = inner.local_record_store.unwrap().with_record(key, cb) {
|
||||||
|
// Had local record
|
||||||
|
|
||||||
|
// If the writer we chose is also the owner, we have the owner secret
|
||||||
|
// Otherwise this is just another subkey writer
|
||||||
|
let owner_secret = if let Some(writer) = writer {
|
||||||
|
if writer.key == owner {
|
||||||
|
Some(writer.secret)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
// Write open record
|
||||||
|
inner.opened_records.insert(key, OpenedRecord { writer });
|
||||||
|
|
||||||
|
// Make DHT Record Descriptor to return
|
||||||
|
let descriptor = DHTRecordDescriptor {
|
||||||
|
key,
|
||||||
|
owner,
|
||||||
|
owner_secret,
|
||||||
|
schema,
|
||||||
|
};
|
||||||
|
Ok(descriptor)
|
||||||
|
} else {
|
||||||
|
// No record yet
|
||||||
|
|
||||||
|
// Make DHT Record Descriptor to return
|
||||||
|
let descriptor = DHTRecordDescriptor {
|
||||||
|
key,
|
||||||
|
owner,
|
||||||
|
owner_secret,
|
||||||
|
schema,
|
||||||
|
};
|
||||||
|
Ok(descriptor)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn open_record(
|
pub async fn open_record(
|
||||||
&self,
|
&self,
|
||||||
key: TypedKey,
|
key: TypedKey,
|
||||||
secret: Option<SecretKey>,
|
writer: Option<KeyPair>,
|
||||||
safety_selection: SafetySelection,
|
safety_selection: SafetySelection,
|
||||||
) -> Result<DHTRecordDescriptor, VeilidAPIError> {
|
) -> Result<DHTRecordDescriptor, VeilidAPIError> {
|
||||||
unimplemented!();
|
let inner = self.lock().await?;
|
||||||
|
self.open_record_inner(inner, key, writer, safety_selection)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn close_record_inner(
|
||||||
|
&self,
|
||||||
|
mut inner: AsyncMutexGuardArc<StorageManagerInner>,
|
||||||
|
key: TypedKey,
|
||||||
|
) -> Result<(), VeilidAPIError> {
|
||||||
|
let Some(opened_record) = inner.opened_records.remove(&key) else {
|
||||||
|
apibail_generic!("record not open");
|
||||||
|
};
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn close_record(&self, key: TypedKey) -> Result<(), VeilidAPIError> {
|
pub async fn close_record(&self, key: TypedKey) -> Result<(), VeilidAPIError> {
|
||||||
unimplemented!();
|
let inner = self.lock().await?;
|
||||||
|
self.close_record_inner(inner, key).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn delete_record(&self, key: TypedKey) -> Result<(), VeilidAPIError> {
|
pub async fn delete_record(&self, key: TypedKey) -> Result<(), VeilidAPIError> {
|
||||||
|
let inner = self.lock().await?;
|
||||||
|
|
||||||
|
// Ensure the record is closed
|
||||||
|
if inner.opened_records.contains_key(&key) {
|
||||||
|
self.close_record_inner(inner, key).await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove
|
||||||
|
|
||||||
unimplemented!();
|
unimplemented!();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -291,6 +363,7 @@ impl StorageManager {
|
|||||||
subkey: ValueSubkey,
|
subkey: ValueSubkey,
|
||||||
force_refresh: bool,
|
force_refresh: bool,
|
||||||
) -> Result<Option<ValueData>, VeilidAPIError> {
|
) -> Result<Option<ValueData>, VeilidAPIError> {
|
||||||
|
let inner = self.lock().await?;
|
||||||
unimplemented!();
|
unimplemented!();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -300,6 +373,7 @@ impl StorageManager {
|
|||||||
subkey: ValueSubkey,
|
subkey: ValueSubkey,
|
||||||
data: Vec<u8>,
|
data: Vec<u8>,
|
||||||
) -> Result<Option<ValueData>, VeilidAPIError> {
|
) -> Result<Option<ValueData>, VeilidAPIError> {
|
||||||
|
let inner = self.lock().await?;
|
||||||
unimplemented!();
|
unimplemented!();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -310,6 +384,7 @@ impl StorageManager {
|
|||||||
expiration: Timestamp,
|
expiration: Timestamp,
|
||||||
count: u32,
|
count: u32,
|
||||||
) -> Result<Timestamp, VeilidAPIError> {
|
) -> Result<Timestamp, VeilidAPIError> {
|
||||||
|
let inner = self.lock().await?;
|
||||||
unimplemented!();
|
unimplemented!();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -318,6 +393,7 @@ impl StorageManager {
|
|||||||
key: TypedKey,
|
key: TypedKey,
|
||||||
subkeys: &[ValueSubkeyRange],
|
subkeys: &[ValueSubkeyRange],
|
||||||
) -> Result<bool, VeilidAPIError> {
|
) -> Result<bool, VeilidAPIError> {
|
||||||
|
let inner = self.lock().await?;
|
||||||
unimplemented!();
|
unimplemented!();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -7,25 +7,25 @@
|
|||||||
use super::*;
|
use super::*;
|
||||||
use hashlink::LruCache;
|
use hashlink::LruCache;
|
||||||
|
|
||||||
pub struct RecordStore {
|
pub struct RecordStore<D> {
|
||||||
table_store: TableStore,
|
table_store: TableStore,
|
||||||
name: String,
|
name: String,
|
||||||
limits: RecordStoreLimits,
|
limits: RecordStoreLimits,
|
||||||
|
|
||||||
record_table: Option<TableDB>,
|
record_table: Option<TableDB>,
|
||||||
subkey_table: Option<TableDB>,
|
subkey_table: Option<TableDB>,
|
||||||
record_index: LruCache<RecordTableKey, Record>,
|
record_index: LruCache<RecordTableKey, Record<D>>,
|
||||||
subkey_cache: LruCache<SubkeyTableKey, RecordData>,
|
subkey_cache: LruCache<SubkeyTableKey, RecordData>,
|
||||||
subkey_cache_total_size: usize,
|
subkey_cache_total_size: usize,
|
||||||
total_storage_space: usize,
|
total_storage_space: usize,
|
||||||
|
|
||||||
dead_records: Vec<(RecordTableKey, Record)>,
|
dead_records: Vec<(RecordTableKey, Record<D>)>,
|
||||||
changed_records: HashSet<RecordTableKey>,
|
changed_records: HashSet<RecordTableKey>,
|
||||||
|
|
||||||
purge_dead_records_mutex: Arc<AsyncMutex<()>>,
|
purge_dead_records_mutex: Arc<AsyncMutex<()>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl RecordStore {
|
impl<D> RecordStore<D> {
|
||||||
pub fn new(table_store: TableStore, name: &str, limits: RecordStoreLimits) -> Self {
|
pub fn new(table_store: TableStore, name: &str, limits: RecordStoreLimits) -> Self {
|
||||||
let subkey_cache_size = limits.subkey_cache_size as usize;
|
let subkey_cache_size = limits.subkey_cache_size as usize;
|
||||||
Self {
|
Self {
|
||||||
@ -92,7 +92,7 @@ impl RecordStore {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn add_dead_record(&mut self, key: RecordTableKey, record: Record) {
|
fn add_dead_record(&mut self, key: RecordTableKey, record: Record<D>) {
|
||||||
self.dead_records.push((key, record));
|
self.dead_records.push((key, record));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -135,7 +135,7 @@ impl RecordStore {
|
|||||||
async fn purge_dead_records(&mut self, lazy: bool) {
|
async fn purge_dead_records(&mut self, lazy: bool) {
|
||||||
let purge_dead_records_mutex = self.purge_dead_records_mutex.clone();
|
let purge_dead_records_mutex = self.purge_dead_records_mutex.clone();
|
||||||
let _lock = if lazy {
|
let _lock = if lazy {
|
||||||
match mutex_try_lock!(purge_dead_records_mutex) {
|
match asyncmutex_try_lock!(purge_dead_records_mutex) {
|
||||||
Some(v) => v,
|
Some(v) => v,
|
||||||
None => {
|
None => {
|
||||||
// If not ready now, just skip it if we're lazy
|
// If not ready now, just skip it if we're lazy
|
||||||
@ -221,7 +221,7 @@ impl RecordStore {
|
|||||||
pub async fn new_record(
|
pub async fn new_record(
|
||||||
&mut self,
|
&mut self,
|
||||||
key: TypedKey,
|
key: TypedKey,
|
||||||
record: Record,
|
record: Record<D>,
|
||||||
) -> Result<(), VeilidAPIError> {
|
) -> Result<(), VeilidAPIError> {
|
||||||
let rtk = RecordTableKey { key };
|
let rtk = RecordTableKey { key };
|
||||||
if self.record_index.contains_key(&rtk) {
|
if self.record_index.contains_key(&rtk) {
|
||||||
@ -269,7 +269,7 @@ impl RecordStore {
|
|||||||
|
|
||||||
pub fn with_record<R, F>(&mut self, key: TypedKey, f: F) -> Option<R>
|
pub fn with_record<R, F>(&mut self, key: TypedKey, f: F) -> Option<R>
|
||||||
where
|
where
|
||||||
F: FnOnce(&Record) -> R,
|
F: FnOnce(&Record<D>) -> R,
|
||||||
{
|
{
|
||||||
// Get record from index
|
// Get record from index
|
||||||
let mut out = None;
|
let mut out = None;
|
||||||
|
15
veilid-core/src/storage_manager/types/local_record_detail.rs
Normal file
15
veilid-core/src/storage_manager/types/local_record_detail.rs
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
use super::*;
|
||||||
|
|
||||||
|
use rkyv::{Archive as RkyvArchive, Deserialize as RkyvDeserialize, Serialize as RkyvSerialize};
|
||||||
|
use serde::*;
|
||||||
|
|
||||||
|
/// Information required to handle locally opened records
|
||||||
|
#[derive(
|
||||||
|
Clone, Debug, PartialEq, Eq, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize,
|
||||||
|
)]
|
||||||
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
|
pub struct LocalRecordDetail {
|
||||||
|
/// The last 'safety selection' used when creating/opening this record.
|
||||||
|
/// Even when closed, this safety selection applies to republication attempts by the system.
|
||||||
|
safety_selection: SafetySelection,
|
||||||
|
}
|
@ -1,7 +1,17 @@
|
|||||||
|
mod local_record_detail;
|
||||||
|
mod opened_record;
|
||||||
|
mod record;
|
||||||
|
mod record_data;
|
||||||
|
mod remote_record_detail;
|
||||||
mod signed_value_data;
|
mod signed_value_data;
|
||||||
mod signed_value_descriptor;
|
mod signed_value_descriptor;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
|
pub use local_record_detail::*;
|
||||||
|
pub use opened_record::*;
|
||||||
|
pub use record::*;
|
||||||
|
pub use record_data::*;
|
||||||
|
pub use remote_record_detail::*;
|
||||||
pub use signed_value_data::*;
|
pub use signed_value_data::*;
|
||||||
pub use signed_value_descriptor::*;
|
pub use signed_value_descriptor::*;
|
||||||
|
21
veilid-core/src/storage_manager/types/opened_record.rs
Normal file
21
veilid-core/src/storage_manager/types/opened_record.rs
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
use super::*;
|
||||||
|
|
||||||
|
/// The state associated with a local record when it is opened
|
||||||
|
/// This is not serialized to storage as it is ephemeral for the lifetime of the opened record
|
||||||
|
#[derive(Clone, Debug, Default)]
|
||||||
|
pub struct OpenedRecord {
|
||||||
|
/// The key pair used to perform writes to subkey on this opened record
|
||||||
|
/// Without this, set_value() will fail regardless of which key or subkey is being written to
|
||||||
|
/// as all writes are signed
|
||||||
|
writer: Option<KeyPair>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl OpenedRecord {
|
||||||
|
pub fn new(writer: Option<KeyPair>) -> Self {
|
||||||
|
Self { writer }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn writer(&self) -> Option<&KeyPair> {
|
||||||
|
self.writer.as_ref()
|
||||||
|
}
|
||||||
|
}
|
@ -6,32 +6,28 @@ use serde::*;
|
|||||||
Clone, Debug, PartialEq, Eq, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize,
|
Clone, Debug, PartialEq, Eq, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize,
|
||||||
)]
|
)]
|
||||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
pub struct Record {
|
pub struct Record<D> {
|
||||||
last_touched_ts: Timestamp,
|
|
||||||
descriptor: SignedValueDescriptor,
|
descriptor: SignedValueDescriptor,
|
||||||
subkey_count: usize,
|
subkey_count: usize,
|
||||||
|
last_touched_ts: Timestamp,
|
||||||
owner_secret: Option<SecretKey>,
|
|
||||||
safety_selection: SafetySelection,
|
|
||||||
record_data_size: usize,
|
record_data_size: usize,
|
||||||
|
detail: D,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Record {
|
impl<D> Record<D> {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
cur_ts: Timestamp,
|
cur_ts: Timestamp,
|
||||||
descriptor: SignedValueDescriptor,
|
descriptor: SignedValueDescriptor,
|
||||||
owner_secret: Option<SecretKey>,
|
detail: D,
|
||||||
safety_selection: SafetySelection,
|
|
||||||
) -> Result<Self, VeilidAPIError> {
|
) -> Result<Self, VeilidAPIError> {
|
||||||
let schema = descriptor.schema()?;
|
let schema = descriptor.schema()?;
|
||||||
let subkey_count = schema.subkey_count();
|
let subkey_count = schema.subkey_count();
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
last_touched_ts: cur_ts,
|
|
||||||
descriptor,
|
descriptor,
|
||||||
subkey_count,
|
subkey_count,
|
||||||
owner_secret,
|
last_touched_ts: cur_ts,
|
||||||
safety_selection,
|
|
||||||
record_data_size: 0,
|
record_data_size: 0,
|
||||||
|
detail,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -68,6 +64,13 @@ impl Record {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn total_size(&self) -> usize {
|
pub fn total_size(&self) -> usize {
|
||||||
mem::size_of::<Record>() + self.descriptor.total_size() + self.record_data_size
|
mem::size_of::<Record<D>>() + self.descriptor.total_size() + self.record_data_size
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn detail(&self) -> &D {
|
||||||
|
&self.detail
|
||||||
|
}
|
||||||
|
pub fn detail_mut(&mut self) -> &mut D {
|
||||||
|
&mut self.detail
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -0,0 +1,10 @@
|
|||||||
|
use super::*;
|
||||||
|
|
||||||
|
use rkyv::{Archive as RkyvArchive, Deserialize as RkyvDeserialize, Serialize as RkyvSerialize};
|
||||||
|
use serde::*;
|
||||||
|
|
||||||
|
#[derive(
|
||||||
|
Clone, Debug, PartialEq, Eq, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize,
|
||||||
|
)]
|
||||||
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
|
pub struct RemoteRecordDetail {}
|
@ -1,77 +0,0 @@
|
|||||||
use super::*;
|
|
||||||
use rkyv::{Archive as RkyvArchive, Deserialize as RkyvDeserialize, Serialize as RkyvSerialize};
|
|
||||||
use serde::*;
|
|
||||||
|
|
||||||
/////////////////////////////////////////////////////////////////////////////////////////////////////
|
|
||||||
///
|
|
||||||
|
|
||||||
#[derive(
|
|
||||||
Clone,
|
|
||||||
Debug,
|
|
||||||
PartialOrd,
|
|
||||||
PartialEq,
|
|
||||||
Eq,
|
|
||||||
Ord,
|
|
||||||
Serialize,
|
|
||||||
Deserialize,
|
|
||||||
RkyvArchive,
|
|
||||||
RkyvSerialize,
|
|
||||||
RkyvDeserialize,
|
|
||||||
)]
|
|
||||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
|
||||||
pub struct ValueDetail {
|
|
||||||
signed_value_data: SignedValueData,
|
|
||||||
descriptor: Option<SignedValueDescriptor>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ValueDetail {
|
|
||||||
pub fn new(
|
|
||||||
signed_value_data: SignedValueData,
|
|
||||||
descriptor: Option<SignedValueDescriptor>,
|
|
||||||
) -> Self {
|
|
||||||
Self {
|
|
||||||
signed_value_data,
|
|
||||||
descriptor,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn validate(
|
|
||||||
&self,
|
|
||||||
last_descriptor: Option<&SignedValueDescriptor>,
|
|
||||||
subkey: ValueSubkey,
|
|
||||||
vcrypto: CryptoSystemVersion,
|
|
||||||
) -> Result<(), VeilidAPIError> {
|
|
||||||
// Get descriptor to validate with
|
|
||||||
let descriptor = if let Some(descriptor) = &self.descriptor {
|
|
||||||
if let Some(last_descriptor) = last_descriptor {
|
|
||||||
if descriptor.cmp_no_sig(&last_descriptor) != cmp::Ordering::Equal {
|
|
||||||
return Err(VeilidAPIError::generic(
|
|
||||||
"value detail descriptor does not match last descriptor",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
descriptor
|
|
||||||
} else {
|
|
||||||
let Some(descriptor) = last_descriptor else {
|
|
||||||
return Err(VeilidAPIError::generic(
|
|
||||||
"no last descriptor, requires a descriptor",
|
|
||||||
));
|
|
||||||
};
|
|
||||||
descriptor
|
|
||||||
};
|
|
||||||
|
|
||||||
// Ensure the descriptor itself validates
|
|
||||||
descriptor.validate(vcrypto.clone())?;
|
|
||||||
|
|
||||||
// And the signed value data
|
|
||||||
self.signed_value_data
|
|
||||||
.validate(descriptor.owner(), subkey, vcrypto)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn signed_value_data(&self) -> &SignedValueData {
|
|
||||||
&self.signed_value_data
|
|
||||||
}
|
|
||||||
pub fn descriptor(&self) -> Option<&SignedValueDescriptor> {
|
|
||||||
self.descriptor.as_ref()
|
|
||||||
}
|
|
||||||
}
|
|
@ -203,7 +203,7 @@ impl RoutingContext {
|
|||||||
&self,
|
&self,
|
||||||
kind: CryptoKind,
|
kind: CryptoKind,
|
||||||
schema: DHTSchema,
|
schema: DHTSchema,
|
||||||
) -> Result<TypedKey, VeilidAPIError> {
|
) -> Result<DHTRecordDescriptor, VeilidAPIError> {
|
||||||
let storage_manager = self.api.storage_manager()?;
|
let storage_manager = self.api.storage_manager()?;
|
||||||
storage_manager
|
storage_manager
|
||||||
.create_record(kind, schema, self.unlocked_inner.safety_selection)
|
.create_record(kind, schema, self.unlocked_inner.safety_selection)
|
||||||
@ -216,7 +216,7 @@ impl RoutingContext {
|
|||||||
pub async fn open_dht_record(
|
pub async fn open_dht_record(
|
||||||
&self,
|
&self,
|
||||||
key: TypedKey,
|
key: TypedKey,
|
||||||
secret: Option<SecretKey>,
|
writer: Option<KeyPair>,
|
||||||
) -> Result<DHTRecordDescriptor, VeilidAPIError> {
|
) -> Result<DHTRecordDescriptor, VeilidAPIError> {
|
||||||
let storage_manager = self.api.storage_manager()?;
|
let storage_manager = self.api.storage_manager()?;
|
||||||
storage_manager
|
storage_manager
|
||||||
@ -232,7 +232,7 @@ impl RoutingContext {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Deletes a DHT record at a specific key. If the record is opened, it must be closed before it is deleted.
|
/// Deletes a DHT record at a specific key. If the record is opened, it must be closed before it is deleted.
|
||||||
/// Deleting a record does not delete it from the network immediately, but will remove the storage of the record
|
/// Deleting a record does not delete it from the network, but will remove the storage of the record
|
||||||
/// locally, and will prevent its value from being refreshed on the network by this node.
|
/// locally, and will prevent its value from being refreshed on the network by this node.
|
||||||
pub async fn delete_dht_record(&self, key: TypedKey) -> Result<(), VeilidAPIError> {
|
pub async fn delete_dht_record(&self, key: TypedKey) -> Result<(), VeilidAPIError> {
|
||||||
let storage_manager = self.api.storage_manager()?;
|
let storage_manager = self.api.storage_manager()?;
|
||||||
|
@ -16,19 +16,40 @@ use super::*;
|
|||||||
)]
|
)]
|
||||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
pub struct DHTRecordDescriptor {
|
pub struct DHTRecordDescriptor {
|
||||||
|
/// DHT Key = Hash(ownerKeyKind) of: [ ownerKeyValue, schema ]
|
||||||
|
key: TypedKey,
|
||||||
|
/// The public key of the owner
|
||||||
owner: PublicKey,
|
owner: PublicKey,
|
||||||
|
/// If this key is being created: Some(the secret key of the owner)
|
||||||
|
/// If this key is just being opened: None
|
||||||
|
owner_secret: Option<SecretKey>,
|
||||||
|
/// The schema in use associated with the key
|
||||||
schema: DHTSchema,
|
schema: DHTSchema,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DHTRecordDescriptor {
|
impl DHTRecordDescriptor {
|
||||||
pub fn new(owner: PublicKey, schema: DHTSchema) -> Self {
|
pub fn new(
|
||||||
Self { owner, schema }
|
key: TypedKey,
|
||||||
|
owner: PublicKey,
|
||||||
|
owner_secret: Option<SecretKey>,
|
||||||
|
schema: DHTSchema,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
key,
|
||||||
|
owner,
|
||||||
|
owner_secret,
|
||||||
|
schema,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn owner(&self) -> &PublicKey {
|
pub fn owner(&self) -> &PublicKey {
|
||||||
&self.owner
|
&self.owner
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn owner_secret(&self) -> Option<&SecretKey> {
|
||||||
|
self.owner_secret.as_ref()
|
||||||
|
}
|
||||||
|
|
||||||
pub fn schema(&self) -> &DHTSchema {
|
pub fn schema(&self) -> &DHTSchema {
|
||||||
&self.schema
|
&self.schema
|
||||||
}
|
}
|
||||||
|
@ -35,18 +35,31 @@ macro_rules! bail_io_error_other {
|
|||||||
cfg_if::cfg_if! {
|
cfg_if::cfg_if! {
|
||||||
if #[cfg(feature="rt-tokio")] {
|
if #[cfg(feature="rt-tokio")] {
|
||||||
#[macro_export]
|
#[macro_export]
|
||||||
macro_rules! mutex_try_lock {
|
macro_rules! asyncmutex_try_lock {
|
||||||
($x:expr) => {
|
($x:expr) => {
|
||||||
$x.try_lock().ok()
|
$x.try_lock().ok()
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[macro_export]
|
||||||
|
macro_rules! asyncmutex_lock_arc {
|
||||||
|
($x:expr) => {
|
||||||
|
$x.clone().lock_owned().await
|
||||||
|
};
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
#[macro_export]
|
#[macro_export]
|
||||||
macro_rules! mutex_try_lock {
|
macro_rules! asyncmutex_try_lock {
|
||||||
($x:expr) => {
|
($x:expr) => {
|
||||||
$x.try_lock()
|
$x.try_lock()
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
#[macro_export]
|
||||||
|
macro_rules! asyncmutex_lock_arc {
|
||||||
|
($x:expr) => {
|
||||||
|
$x.lock_arc().await
|
||||||
|
};
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user