| use std::{ |
| cell::SyncUnsafeCell, |
| fs::File, |
| io::Write, |
| mem::{replace, take}, |
| path::PathBuf, |
| sync::atomic::{AtomicU32, AtomicU64, Ordering}, |
| }; |
|
|
| use anyhow::{Context, Result}; |
| use byteorder::{BE, WriteBytesExt}; |
| use lzzzz::lz4::{self, ACC_LEVEL_DEFAULT}; |
| use parking_lot::Mutex; |
| use rayon::{ |
| iter::{Either, IndexedParallelIterator, IntoParallelIterator, ParallelIterator}, |
| scope, |
| }; |
| use smallvec::SmallVec; |
| use thread_local::ThreadLocal; |
| use tracing::Span; |
|
|
| use crate::{ |
| ValueBuffer, |
| collector::Collector, |
| collector_entry::CollectorEntry, |
| constants::{MAX_MEDIUM_VALUE_SIZE, THREAD_LOCAL_SIZE_SHIFT}, |
| key::StoreKey, |
| meta_file_builder::MetaFileBuilder, |
| static_sorted_file_builder::{StaticSortedFileBuilder, StaticSortedFileBuilderMeta}, |
| }; |
|
|
| |
| |
| |
| |
| |
| struct ThreadLocalState<K: StoreKey + Send, const FAMILIES: usize> { |
| |
| collectors: [Option<Collector<K, THREAD_LOCAL_SIZE_SHIFT>>; FAMILIES], |
| |
| |
| new_blob_files: Vec<(u32, File)>, |
| } |
|
|
| const COLLECTOR_SHARDS: usize = 4; |
| const COLLECTOR_SHARD_SHIFT: usize = |
| u64::BITS as usize - COLLECTOR_SHARDS.trailing_zeros() as usize; |
|
|
| |
| pub(crate) struct FinishResult { |
| pub(crate) sequence_number: u32, |
| |
| pub(crate) new_meta_files: Vec<(u32, File)>, |
| |
| pub(crate) new_sst_files: Vec<(u32, File)>, |
| |
| pub(crate) new_blob_files: Vec<(u32, File)>, |
| |
| pub(crate) keys_written: u64, |
| } |
|
|
| enum GlobalCollectorState<K: StoreKey + Send> { |
| |
| Unsharded(Collector<K>), |
| |
| |
| Sharded([Collector<K>; COLLECTOR_SHARDS]), |
| } |
|
|
| |
| pub struct WriteBatch<K: StoreKey + Send, const FAMILIES: usize> { |
| |
| db_path: PathBuf, |
| |
| current_sequence_number: AtomicU32, |
| |
| thread_locals: ThreadLocal<SyncUnsafeCell<ThreadLocalState<K, FAMILIES>>>, |
| |
| collectors: [Mutex<GlobalCollectorState<K>>; FAMILIES], |
| |
| meta_collectors: [Mutex<Vec<(u32, StaticSortedFileBuilderMeta<'static>)>>; FAMILIES], |
| |
| |
| new_sst_files: Mutex<Vec<(u32, File)>>, |
| |
| idle_collectors: Mutex<Vec<Collector<K>>>, |
| |
| idle_thread_local_collectors: Mutex<Vec<Collector<K, THREAD_LOCAL_SIZE_SHIFT>>>, |
| } |
|
|
| impl<K: StoreKey + Send + Sync, const FAMILIES: usize> WriteBatch<K, FAMILIES> { |
| |
| pub(crate) fn new(path: PathBuf, current: u32) -> Self { |
| const { |
| assert!(FAMILIES <= usize_from_u32(u32::MAX)); |
| }; |
| Self { |
| db_path: path, |
| current_sequence_number: AtomicU32::new(current), |
| thread_locals: ThreadLocal::new(), |
| collectors: [(); FAMILIES] |
| .map(|_| Mutex::new(GlobalCollectorState::Unsharded(Collector::new()))), |
| meta_collectors: [(); FAMILIES].map(|_| Mutex::new(Vec::new())), |
| new_sst_files: Mutex::new(Vec::new()), |
| idle_collectors: Mutex::new(Vec::new()), |
| idle_thread_local_collectors: Mutex::new(Vec::new()), |
| } |
| } |
|
|
| |
| |
| pub(crate) fn reset(&mut self, current: u32) { |
| self.current_sequence_number |
| .store(current, Ordering::SeqCst); |
| } |
|
|
| |
| #[allow(clippy::mut_from_ref)] |
| fn thread_local_state(&self) -> &mut ThreadLocalState<K, FAMILIES> { |
| let cell = self.thread_locals.get_or(|| { |
| SyncUnsafeCell::new(ThreadLocalState { |
| collectors: [const { None }; FAMILIES], |
| new_blob_files: Vec::new(), |
| }) |
| }); |
| |
| unsafe { &mut *cell.get() } |
| } |
|
|
| |
| fn thread_local_collector_mut<'l>( |
| &self, |
| state: &'l mut ThreadLocalState<K, FAMILIES>, |
| family: u32, |
| ) -> Result<&'l mut Collector<K, THREAD_LOCAL_SIZE_SHIFT>> { |
| debug_assert!(usize_from_u32(family) < FAMILIES); |
| let collector = state.collectors[usize_from_u32(family)].get_or_insert_with(|| { |
| self.idle_thread_local_collectors |
| .lock() |
| .pop() |
| .unwrap_or_else(|| Collector::new()) |
| }); |
| if collector.is_full() { |
| self.flush_thread_local_collector(family, collector)?; |
| } |
| Ok(collector) |
| } |
|
|
| #[tracing::instrument(level = "trace", skip(self, collector))] |
| fn flush_thread_local_collector( |
| &self, |
| family: u32, |
| collector: &mut Collector<K, THREAD_LOCAL_SIZE_SHIFT>, |
| ) -> Result<()> { |
| let mut full_collectors = SmallVec::<[_; 2]>::new(); |
| { |
| let mut global_collector_state = self.collectors[usize_from_u32(family)].lock(); |
| for entry in collector.drain() { |
| match &mut *global_collector_state { |
| GlobalCollectorState::Unsharded(collector) => { |
| collector.add_entry(entry); |
| if collector.is_full() { |
| |
| let mut shards: [Collector<K>; 4] = |
| [(); COLLECTOR_SHARDS].map(|_| Collector::new()); |
| for entry in collector.drain() { |
| let shard = (entry.key.hash >> COLLECTOR_SHARD_SHIFT) as usize; |
| shards[shard].add_entry(entry); |
| } |
| |
| |
| for collector in shards.iter_mut() { |
| if collector.is_full() { |
| full_collectors |
| .push(replace(&mut *collector, self.get_new_collector())); |
| } |
| } |
| *global_collector_state = GlobalCollectorState::Sharded(shards); |
| } |
| } |
| GlobalCollectorState::Sharded(shards) => { |
| let shard = (entry.key.hash >> COLLECTOR_SHARD_SHIFT) as usize; |
| let collector = &mut shards[shard]; |
| collector.add_entry(entry); |
| if collector.is_full() { |
| full_collectors |
| .push(replace(&mut *collector, self.get_new_collector())); |
| } |
| } |
| } |
| } |
| } |
| for mut global_collector in full_collectors { |
| |
| let sst = self.create_sst_file(family, global_collector.sorted())?; |
| global_collector.clear(); |
| self.new_sst_files.lock().push(sst); |
| self.dispose_collector(global_collector); |
| } |
| Ok(()) |
| } |
|
|
| fn get_new_collector(&self) -> Collector<K> { |
| self.idle_collectors |
| .lock() |
| .pop() |
| .unwrap_or_else(|| Collector::new()) |
| } |
|
|
| fn dispose_collector(&self, collector: Collector<K>) { |
| self.idle_collectors.lock().push(collector); |
| } |
|
|
| fn dispose_thread_local_collector(&self, collector: Collector<K, THREAD_LOCAL_SIZE_SHIFT>) { |
| self.idle_thread_local_collectors.lock().push(collector); |
| } |
|
|
| |
| pub fn put(&self, family: u32, key: K, value: ValueBuffer<'_>) -> Result<()> { |
| let state = self.thread_local_state(); |
| let collector = self.thread_local_collector_mut(state, family)?; |
| if value.len() <= MAX_MEDIUM_VALUE_SIZE { |
| collector.put(key, value); |
| } else { |
| let (blob, file) = self.create_blob(&value)?; |
| collector.put_blob(key, blob); |
| state.new_blob_files.push((blob, file)); |
| } |
| Ok(()) |
| } |
|
|
| |
| pub fn delete(&self, family: u32, key: K) -> Result<()> { |
| let state = self.thread_local_state(); |
| let collector = self.thread_local_collector_mut(state, family)?; |
| collector.delete(key); |
| Ok(()) |
| } |
|
|
| |
| |
| |
| |
| |
| |
| |
| #[tracing::instrument(level = "trace", skip(self))] |
| pub unsafe fn flush(&self, family: u32) -> Result<()> { |
| |
| let mut collectors = Vec::new(); |
| for cell in self.thread_locals.iter() { |
| let state = unsafe { &mut *cell.get() }; |
| if let Some(collector) = state.collectors[usize_from_u32(family)].take() |
| && !collector.is_empty() |
| { |
| collectors.push(collector); |
| } |
| } |
|
|
| let span = Span::current(); |
| collectors.into_par_iter().try_for_each(|mut collector| { |
| let _span = span.clone().entered(); |
| self.flush_thread_local_collector(family, &mut collector)?; |
| self.dispose_thread_local_collector(collector); |
| anyhow::Ok(()) |
| })?; |
|
|
| |
| let mut collector_state = self.collectors[usize_from_u32(family)].lock(); |
| match &mut *collector_state { |
| GlobalCollectorState::Unsharded(collector) => { |
| if !collector.is_empty() { |
| let sst = self.create_sst_file(family, collector.sorted())?; |
| collector.clear(); |
| self.new_sst_files.lock().push(sst); |
| } |
| } |
| GlobalCollectorState::Sharded(_) => { |
| let GlobalCollectorState::Sharded(shards) = replace( |
| &mut *collector_state, |
| GlobalCollectorState::Unsharded(self.get_new_collector()), |
| ) else { |
| unreachable!(); |
| }; |
| shards.into_par_iter().try_for_each(|mut collector| { |
| let _span = span.clone().entered(); |
| if !collector.is_empty() { |
| let sst = self.create_sst_file(family, collector.sorted())?; |
| collector.clear(); |
| self.new_sst_files.lock().push(sst); |
| self.dispose_collector(collector); |
| } |
| anyhow::Ok(()) |
| })?; |
| } |
| } |
|
|
| Ok(()) |
| } |
|
|
| |
| |
| #[tracing::instrument(level = "trace", skip(self))] |
| pub(crate) fn finish(&mut self) -> Result<FinishResult> { |
| let mut new_blob_files = Vec::new(); |
| let shared_error = Mutex::new(Ok(())); |
|
|
| |
| scope(|scope| { |
| let _span = tracing::trace_span!("flush thread local collectors").entered(); |
| let mut collectors = [const { Vec::new() }; FAMILIES]; |
| for cell in self.thread_locals.iter_mut() { |
| let state = cell.get_mut(); |
| new_blob_files.append(&mut state.new_blob_files); |
| for (family, thread_local_collector) in state.collectors.iter_mut().enumerate() { |
| if let Some(collector) = thread_local_collector.take() |
| && !collector.is_empty() |
| { |
| collectors[family].push(collector); |
| } |
| } |
| } |
| for (family, thread_local_collectors) in collectors.into_iter().enumerate() { |
| for mut collector in thread_local_collectors { |
| let this = &self; |
| let shared_error = &shared_error; |
| let span = Span::current(); |
| scope.spawn(move |_| { |
| let _span = span.entered(); |
| if let Err(err) = |
| this.flush_thread_local_collector(family as u32, &mut collector) |
| { |
| *shared_error.lock() = Err(err); |
| } |
| this.dispose_thread_local_collector(collector); |
| }); |
| } |
| } |
| }); |
|
|
| let _span = tracing::trace_span!("flush collectors").entered(); |
|
|
| |
| let mut new_sst_files = take(self.new_sst_files.get_mut()); |
| let shared_new_sst_files = Mutex::new(&mut new_sst_files); |
|
|
| let new_collectors = [(); FAMILIES] |
| .map(|_| Mutex::new(GlobalCollectorState::Unsharded(self.get_new_collector()))); |
| let collectors = replace(&mut self.collectors, new_collectors); |
| let span = Span::current(); |
| collectors |
| .into_par_iter() |
| .enumerate() |
| .flat_map(|(family, state)| { |
| let collector = state.into_inner(); |
| match collector { |
| GlobalCollectorState::Unsharded(collector) => { |
| Either::Left([(family, collector)].into_par_iter()) |
| } |
| GlobalCollectorState::Sharded(shards) => Either::Right( |
| shards |
| .into_par_iter() |
| .map(move |collector| (family, collector)), |
| ), |
| } |
| }) |
| .try_for_each(|(family, mut collector)| { |
| let _span = span.clone().entered(); |
| let family = family as u32; |
| if !collector.is_empty() { |
| let sst = self.create_sst_file(family, collector.sorted())?; |
| collector.clear(); |
| self.dispose_collector(collector); |
| shared_new_sst_files.lock().push(sst); |
| } |
| anyhow::Ok(()) |
| })?; |
|
|
| shared_error.into_inner()?; |
|
|
| |
| let new_meta_collectors = [(); FAMILIES].map(|_| Mutex::new(Vec::new())); |
| let meta_collectors = replace(&mut self.meta_collectors, new_meta_collectors); |
| let keys_written = AtomicU64::new(0); |
| let new_meta_files = meta_collectors |
| .into_par_iter() |
| .map(|mutex| mutex.into_inner()) |
| .enumerate() |
| .filter(|(_, sst_files)| !sst_files.is_empty()) |
| .map(|(family, sst_files)| { |
| let family = family as u32; |
| let mut entries = 0; |
| let mut builder = MetaFileBuilder::new(family); |
| for (seq, sst) in sst_files { |
| entries += sst.entries; |
| builder.add(seq, sst); |
| } |
| keys_written.fetch_add(entries, Ordering::Relaxed); |
| let seq = self.current_sequence_number.fetch_add(1, Ordering::SeqCst) + 1; |
| let file = builder.write(&self.db_path, seq)?; |
| Ok((seq, file)) |
| }) |
| .collect::<Result<Vec<_>>>()?; |
|
|
| |
| let seq = self.current_sequence_number.load(Ordering::SeqCst); |
| Ok(FinishResult { |
| sequence_number: seq, |
| new_meta_files, |
| new_sst_files, |
| new_blob_files, |
| keys_written: keys_written.into_inner(), |
| }) |
| } |
|
|
| |
| |
| #[tracing::instrument(level = "trace", skip(self, value), fields(value_len = value.len()))] |
| fn create_blob(&self, value: &[u8]) -> Result<(u32, File)> { |
| let seq = self.current_sequence_number.fetch_add(1, Ordering::SeqCst) + 1; |
| let mut buffer = Vec::new(); |
| buffer.write_u32::<BE>(value.len() as u32)?; |
| lz4::compress_to_vec(value, &mut buffer, ACC_LEVEL_DEFAULT) |
| .context("Compression of value for blob file failed")?; |
|
|
| let file = self.db_path.join(format!("{seq:08}.blob")); |
| let mut file = File::create(&file).context("Unable to create blob file")?; |
| file.write_all(&buffer) |
| .context("Unable to write blob file")?; |
| file.flush().context("Unable to flush blob file")?; |
| Ok((seq, file)) |
| } |
|
|
| |
| |
| #[tracing::instrument(level = "trace", skip(self, collector_data))] |
| fn create_sst_file( |
| &self, |
| family: u32, |
| collector_data: (&[CollectorEntry<K>], usize, usize), |
| ) -> Result<(u32, File)> { |
| let (entries, total_key_size, total_value_size) = collector_data; |
| let seq = self.current_sequence_number.fetch_add(1, Ordering::SeqCst) + 1; |
|
|
| let builder = StaticSortedFileBuilder::new(entries, total_key_size, total_value_size)?; |
|
|
| let path = self.db_path.join(format!("{seq:08}.sst")); |
| let (meta, file) = builder |
| .write(&path) |
| .with_context(|| format!("Unable to write SST file {seq:08}.sst"))?; |
|
|
| #[cfg(feature = "verify_sst_content")] |
| { |
| use core::panic; |
|
|
| use crate::{ |
| collector_entry::CollectorEntryValue, |
| key::hash_key, |
| lookup_entry::LookupValue, |
| static_sorted_file::{ |
| BlockCache, SstLookupResult, StaticSortedFile, StaticSortedFileMetaData, |
| }, |
| static_sorted_file_builder::Entry, |
| }; |
|
|
| file.sync_all()?; |
| let sst = StaticSortedFile::open( |
| &self.db_path, |
| StaticSortedFileMetaData { |
| sequence_number: seq, |
| key_compression_dictionary_length: meta.key_compression_dictionary_length, |
| value_compression_dictionary_length: meta.value_compression_dictionary_length, |
| block_count: meta.block_count, |
| }, |
| )?; |
| let cache2 = BlockCache::with( |
| 10, |
| u64::MAX, |
| Default::default(), |
| Default::default(), |
| Default::default(), |
| ); |
| let cache3 = BlockCache::with( |
| 10, |
| u64::MAX, |
| Default::default(), |
| Default::default(), |
| Default::default(), |
| ); |
| let mut key_buf = Vec::new(); |
| for entry in entries { |
| entry.write_key_to(&mut key_buf); |
| let result = sst |
| .lookup(hash_key(&key_buf), &key_buf, &cache2, &cache3) |
| .expect("key found"); |
| key_buf.clear(); |
| match result { |
| SstLookupResult::Found(LookupValue::Deleted) => {} |
| SstLookupResult::Found(LookupValue::Slice { |
| value: lookup_value, |
| }) => { |
| let expected_value_slice = match &entry.value { |
| CollectorEntryValue::Small { value } => &**value, |
| CollectorEntryValue::Medium { value } => &**value, |
| _ => panic!("Unexpected value"), |
| }; |
| assert_eq!(*lookup_value, *expected_value_slice); |
| } |
| SstLookupResult::Found(LookupValue::Blob { sequence_number: _ }) => {} |
| SstLookupResult::NotFound => panic!("All keys must exist"), |
| } |
| } |
| } |
|
|
| self.meta_collectors[usize_from_u32(family)] |
| .lock() |
| .push((seq, meta)); |
|
|
| Ok((seq, file)) |
| } |
| } |
|
|
| #[inline(always)] |
| const fn usize_from_u32(value: u32) -> usize { |
| |
| |
| const { |
| assert!(u32::BITS < usize::BITS); |
| }; |
| value as usize |
| } |
|
|