Skip to content

Commit

Permalink
Several more corrections from clippy + formatting.
Browse files Browse the repository at this point in the history
  • Loading branch information
MathieuDutSik committed Nov 14, 2024
1 parent 09767a3 commit b399617
Show file tree
Hide file tree
Showing 4 changed files with 84 additions and 26 deletions.
82 changes: 65 additions & 17 deletions linera-views/src/backends/scylla_db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,10 @@ impl ScyllaDbClient {
root_key: &[u8],
key: Vec<u8>,
) -> Result<Option<Vec<u8>>, ScyllaDbStoreInternalError> {
ensure!(key.len() <= MAX_KEY_SIZE, ScyllaDbStoreInternalError::KeyTooLong);
ensure!(
key.len() <= MAX_KEY_SIZE,
ScyllaDbStoreInternalError::KeyTooLong
);
let session = &self.session;
// Read the value of a key
let values = (root_key.to_vec(), key);
Expand All @@ -191,7 +194,10 @@ impl ScyllaDbClient {
let mut inputs = Vec::new();
inputs.push(root_key.to_vec());
for (i_key, key) in keys.into_iter().enumerate() {
ensure!(key.len() <= MAX_KEY_SIZE, ScyllaDbStoreInternalError::KeyTooLong);
ensure!(
key.len() <= MAX_KEY_SIZE,
ScyllaDbStoreInternalError::KeyTooLong
);
match map.entry(key.clone()) {
Entry::Occupied(entry) => {
let entry = entry.into_mut();
Expand Down Expand Up @@ -237,7 +243,10 @@ impl ScyllaDbClient {
let mut inputs = Vec::new();
inputs.push(root_key.to_vec());
for (i_key, key) in keys.into_iter().enumerate() {
ensure!(key.len() <= MAX_KEY_SIZE, ScyllaDbStoreInternalError::KeyTooLong);
ensure!(
key.len() <= MAX_KEY_SIZE,
ScyllaDbStoreInternalError::KeyTooLong
);
match map.entry(key.clone()) {
Entry::Occupied(entry) => {
let entry = entry.into_mut();
Expand Down Expand Up @@ -273,7 +282,10 @@ impl ScyllaDbClient {
root_key: &[u8],
key: Vec<u8>,
) -> Result<bool, ScyllaDbStoreInternalError> {
ensure!(key.len() <= MAX_KEY_SIZE, ScyllaDbStoreInternalError::KeyTooLong);
ensure!(
key.len() <= MAX_KEY_SIZE,
ScyllaDbStoreInternalError::KeyTooLong
);
let session = &self.session;
// Read the value of a key
let values = (root_key.to_vec(), key);
Expand All @@ -293,8 +305,14 @@ impl ScyllaDbClient {
let query1 = &self.write_batch_delete_prefix_unbounded;
let query2 = &self.write_batch_delete_prefix_bounded;
println!("|batch|={}", batch.len());
ensure!(batch.len() <= MAX_BATCH_SIZE, ScyllaDbStoreInternalError::TooLargeBatch);
println!("|key_prefix_deletions|={}", batch.key_prefix_deletions.len());
ensure!(
batch.len() <= MAX_BATCH_SIZE,
ScyllaDbStoreInternalError::TooLargeBatch
);
println!(
"|key_prefix_deletions|={}",
batch.key_prefix_deletions.len()
);
for key_prefix in batch.key_prefix_deletions {
ensure!(
key_prefix.len() <= MAX_KEY_SIZE,
Expand All @@ -314,7 +332,10 @@ impl ScyllaDbClient {
}
}
let query3 = &self.write_batch_deletion;
println!("|deletions|={}", batch.simple_unordered_batch.deletions.len());
println!(
"|deletions|={}",
batch.simple_unordered_batch.deletions.len()
);
for key in batch.simple_unordered_batch.deletions {
ensure!(
key.len() <= MAX_KEY_SIZE,
Expand All @@ -325,12 +346,18 @@ impl ScyllaDbClient {
batch_query.append_statement(query3.clone());
}
let query4 = &self.write_batch_insertion;
println!("|insertions|={}", batch.simple_unordered_batch.insertions.len());
println!(
"|insertions|={}",
batch.simple_unordered_batch.insertions.len()
);
let mut total_size = 0;
for (key, value) in batch.simple_unordered_batch.insertions {
println!("|key|={} |value|={}", key.len(), value.len());
total_size += key.len() + value.len();
ensure!(key.len() <= MAX_KEY_SIZE, ScyllaDbStoreInternalError::KeyTooLong);
ensure!(
key.len() <= MAX_KEY_SIZE,
ScyllaDbStoreInternalError::KeyTooLong
);
ensure!(
value.len() <= RAW_MAX_VALUE_SIZE,
ScyllaDbStoreInternalError::ValueTooLong
Expand Down Expand Up @@ -485,7 +512,10 @@ impl ReadableKeyValueStore for ScyllaDbStoreInternal {
self.max_stream_queries
}

async fn read_value_bytes(&self, key: &[u8]) -> Result<Option<Vec<u8>>, ScyllaDbStoreInternalError> {
async fn read_value_bytes(
&self,
key: &[u8],
) -> Result<Option<Vec<u8>>, ScyllaDbStoreInternalError> {
let store = self.store.deref();
let _guard = self.acquire().await;
store
Expand All @@ -501,7 +531,10 @@ impl ReadableKeyValueStore for ScyllaDbStoreInternal {
.await
}

async fn contains_keys(&self, keys: Vec<Vec<u8>>) -> Result<Vec<bool>, ScyllaDbStoreInternalError> {
async fn contains_keys(
&self,
keys: Vec<Vec<u8>>,
) -> Result<Vec<bool>, ScyllaDbStoreInternalError> {
if keys.is_empty() {
return Ok(Vec::new());
}
Expand Down Expand Up @@ -702,7 +735,10 @@ impl AdminKeyValueStore for ScyllaDbStoreInternal {
Ok(())
}

async fn exists(config: &Self::Config, namespace: &str) -> Result<bool, ScyllaDbStoreInternalError> {
async fn exists(
config: &Self::Config,
namespace: &str,
) -> Result<bool, ScyllaDbStoreInternalError> {
Self::check_namespace(namespace)?;
let session = SessionBuilder::new()
.known_node(config.uri.as_str())
Expand Down Expand Up @@ -746,7 +782,10 @@ impl AdminKeyValueStore for ScyllaDbStoreInternal {
}
}

async fn create(config: &Self::Config, namespace: &str) -> Result<(), ScyllaDbStoreInternalError> {
async fn create(
config: &Self::Config,
namespace: &str,
) -> Result<(), ScyllaDbStoreInternalError> {
Self::check_namespace(namespace)?;
let session = SessionBuilder::new()
.known_node(config.uri.as_str())
Expand All @@ -771,7 +810,10 @@ impl AdminKeyValueStore for ScyllaDbStoreInternal {
Ok(())
}

async fn delete(config: &Self::Config, namespace: &str) -> Result<(), ScyllaDbStoreInternalError> {
async fn delete(
config: &Self::Config,
namespace: &str,
) -> Result<(), ScyllaDbStoreInternalError> {
Self::check_namespace(namespace)?;
let session = SessionBuilder::new()
.known_node(config.uri.as_str())
Expand Down Expand Up @@ -828,12 +870,18 @@ impl TestKeyValueStore for JournalingKeyValueStore<ScyllaDbStoreInternal> {

/// The `ScyllaDbStore` composed type with metrics
#[cfg(with_metrics)]
pub type ScyllaDbStore =
MeteredStore<LruCachingStore<MeteredStore<ValueSplittingStore<MeteredStore<JournalingKeyValueStore<ScyllaDbStoreInternal>>>>>>;
pub type ScyllaDbStore = MeteredStore<
LruCachingStore<
MeteredStore<
ValueSplittingStore<MeteredStore<JournalingKeyValueStore<ScyllaDbStoreInternal>>>,
>,
>,
>;

/// The `ScyllaDbStore` composed type
#[cfg(not(with_metrics))]
pub type ScyllaDbStore = LruCachingStore<ValueSplittingStore<JournalingKeyValueStore<ScyllaDbStoreInternal>>>;
pub type ScyllaDbStore =
LruCachingStore<ValueSplittingStore<JournalingKeyValueStore<ScyllaDbStoreInternal>>>;

/// The `ScyllaDbStoreConfig` input type
pub type ScyllaDbStoreConfig = LruSplittingConfig<ScyllaDbStoreInternalConfig>;
Expand Down
5 changes: 5 additions & 0 deletions linera-views/src/batch.rs
Original file line number Diff line number Diff line change
Expand Up @@ -153,6 +153,11 @@ impl UnorderedBatch {
pub fn len(&self) -> usize {
self.key_prefix_deletions.len() + self.simple_unordered_batch.len()
}

/// Tests whether the batch is empty or not
pub fn is_empty(&self) -> bool {
self.key_prefix_deletions.len() == 0 && self.simple_unordered_batch.len() == 0
}
}

/// Checks if `key` is matched by any prefix in `key_prefix_set`.
Expand Down
15 changes: 12 additions & 3 deletions linera-views/src/test_utils/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -419,12 +419,18 @@ pub async fn run_writes_from_blank<C: LocalRestrictedKeyValueStore>(key_value_st
}

/// Doing a big read of many keys could trigger some error. That need to be tested.
pub async fn big_read_multi_values<C: LocalKeyValueStore>(config: C::Config, value_size: usize, n_entries: usize) {
pub async fn big_read_multi_values<C: LocalKeyValueStore>(
config: C::Config,
value_size: usize,
n_entries: usize,
) {
let mut rng = make_deterministic_rng();
let namespace = generate_test_namespace();
let root_key = &[];
//
let store = C::recreate_and_connect(&config, &namespace, root_key).await.unwrap();
let store = C::recreate_and_connect(&config, &namespace, root_key)
.await
.unwrap();
let key_prefix = vec![42, 54];
let mut batch = Batch::new();
let mut keys = Vec::new();
Expand Down Expand Up @@ -497,7 +503,10 @@ pub async fn run_big_write_read<C: LocalRestrictedKeyValueStore>(
let mut rng = make_deterministic_rng();
for (pos, value_size) in value_sizes.into_iter().enumerate() {
let n_entry: usize = target_size / value_size;
println!("n_entry={} target_size={} value_size={}", n_entry, target_size, value_size);
println!(
"n_entry={} target_size={} value_size={}",
n_entry, target_size, value_size
);
let mut batch = Batch::new();
let key_prefix = vec![0, pos as u8];
for i in 0..n_entry {
Expand Down
8 changes: 2 additions & 6 deletions linera-views/tests/store_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,9 +32,7 @@ async fn test_read_multi_values_memory() {
#[tokio::test]
async fn test_read_multi_values_dynamo_db() {
use linera_views::dynamo_db::DynamoDbStore;
let config = DynamoDbStore::new_test_config()
.await
.unwrap();
let config = DynamoDbStore::new_test_config().await.unwrap();
big_read_multi_values::<DynamoDbStore>(config, 22000000, 1000).await;
}

Expand All @@ -43,9 +41,7 @@ async fn test_read_multi_values_dynamo_db() {
#[tokio::test]
async fn test_read_multi_values_scylla_db() {
use linera_views::scylla_db::ScyllaDbStore;
let config = ScyllaDbStore::new_test_config()
.await
.unwrap();
let config = ScyllaDbStore::new_test_config().await.unwrap();
big_read_multi_values::<ScyllaDbStore>(config, 22200000, 200).await;
}

Expand Down

0 comments on commit b399617

Please sign in to comment.