Skip to content

Commit

Permalink
clippy refactoring
Browse files Browse the repository at this point in the history
  • Loading branch information
dermesser committed Jun 15, 2024
1 parent deba74f commit 0d24811
Show file tree
Hide file tree
Showing 21 changed files with 263 additions and 318 deletions.
8 changes: 4 additions & 4 deletions examples/mcpe/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,8 @@ use rusty_leveldb::{Compressor, CompressorList, Options, DB};
use std::rc::Rc;

/// A zlib compressor that with zlib wrapper
///
/// This is use for old world format
///
/// This is use for old world format
struct ZlibCompressor(u8);

impl ZlibCompressor {
Expand Down Expand Up @@ -67,10 +67,10 @@ pub fn mcpe_options(compression_level: u8) -> Options {

// Mojang create a custom [compressor list](https://github.com/reedacartwright/rbedrock/blob/fb32a899da4e15c1aaa0d6de2b459e914e183516/src/leveldb-mcpe/include/leveldb/options.h#L123)
// Sample config for compressor list can be find in [here](https://github.com/reedacartwright/rbedrock/blob/fb32a899da4e15c1aaa0d6de2b459e914e183516/src/leveldb-mcpe/mcpe_sample_setup.cpp#L24-L28)
//
//
// Their compression id can be find in [here](https://github.com/reedacartwright/rbedrock/blob/fb32a899da4e15c1aaa0d6de2b459e914e183516/src/leveldb-mcpe/include/leveldb/zlib_compressor.h#L38)
// and [here](https://github.com/reedacartwright/rbedrock/blob/fb32a899da4e15c1aaa0d6de2b459e914e183516/src/leveldb-mcpe/include/leveldb/zlib_compressor.h#L48)
//
//
// Compression id will be use in [here](https://github.com/reedacartwright/rbedrock/blob/fb32a899da4e15c1aaa0d6de2b459e914e183516/src/leveldb-mcpe/table/format.cc#L125-L150)
let mut list = CompressorList::new();
list.set_with_id(0, NoneCompressor::default());
Expand Down
2 changes: 1 addition & 1 deletion src/asyncdb.rs
Original file line number Diff line number Diff line change
Expand Up @@ -234,7 +234,7 @@ impl AsyncDB {
Request::GetAt { snapshot, key } => {
let snapshot_id = snapshot.0;
if let Some(snapshot) = snapshots.get(&snapshot_id) {
let ok = db.get_at(&snapshot, &key);
let ok = db.get_at(snapshot, &key);
match ok {
Err(e) => {
message.resp_channel.send(Response::Error(e)).ok();
Expand Down
4 changes: 2 additions & 2 deletions src/block_builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ impl BlockBuilder {
}
} else {
self.restarts.push(self.buffer.len() as u32);
self.last_key.resize(0, 0);
self.last_key.clear();
self.restart_counter = 0;
}

Expand Down Expand Up @@ -105,7 +105,7 @@ impl BlockBuilder {
// 1. Append RESTARTS
for r in self.restarts.iter() {
self.buffer
.write_fixedint(*r as u32)
.write_fixedint(*r)
.expect("write to buffer failed");
}

Expand Down
22 changes: 11 additions & 11 deletions src/cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ impl<T> LRUList<T> {
assert!(self.head.prev.is_some());
self.head.prev = last.prev;
self.count -= 1;
(*last).data.take()
last.data.take()
} else {
None
}
Expand Down Expand Up @@ -214,7 +214,7 @@ impl<T> Cache<T> {
pub fn get<'a>(&'a mut self, key: &CacheKey) -> Option<&'a T> {
match self.map.get(key) {
None => None,
Some(&(ref elem, ref lru_handle)) => {
Some((elem, lru_handle)) => {
self.list.reinsert_front(*lru_handle);
Some(elem)
}
Expand Down Expand Up @@ -347,19 +347,19 @@ mod tests {
let handle2 = lru.insert(22);
let handle3 = lru.insert(244);

assert_eq!(lru._testing_head_ref().map(|r| (*r)).unwrap(), 244);
assert_eq!(lru._testing_head_ref().copied().unwrap(), 244);

lru.reinsert_front(handle1);

assert_eq!(lru._testing_head_ref().map(|r| (*r)).unwrap(), 56);
assert_eq!(lru._testing_head_ref().copied().unwrap(), 56);

lru.reinsert_front(handle3);

assert_eq!(lru._testing_head_ref().map(|r| (*r)).unwrap(), 244);
assert_eq!(lru._testing_head_ref().copied().unwrap(), 244);

lru.reinsert_front(handle2);

assert_eq!(lru._testing_head_ref().map(|r| (*r)).unwrap(), 22);
assert_eq!(lru._testing_head_ref().copied().unwrap(), 22);

assert_eq!(lru.remove_last(), Some(56));
assert_eq!(lru.remove_last(), Some(244));
Expand All @@ -370,7 +370,7 @@ mod tests {
fn test_blockcache_lru_reinsert_2() {
let mut lru = LRUList::<usize>::new();

let handles = vec![
let handles = [
lru.insert(0),
lru.insert(1),
lru.insert(2),
Expand All @@ -382,10 +382,10 @@ mod tests {
lru.insert(8),
];

for i in 0..9 {
(0..9).for_each(|i| {
lru.reinsert_front(handles[i]);
assert_eq!(lru._testing_head_ref().map(|x| *x), Some(i));
}
assert_eq!(lru._testing_head_ref().copied(), Some(i));
});
}

#[test]
Expand All @@ -395,7 +395,7 @@ mod tests {
let handle = lru.insert(3);

lru.reinsert_front(handle);
assert_eq!(lru._testing_head_ref().map(|x| *x), Some(3));
assert_eq!(lru._testing_head_ref().copied(), Some(3));
assert_eq!(lru.remove_last(), Some(3));
assert_eq!(lru.remove_last(), None);
assert_eq!(lru.remove_last(), None);
Expand Down
1 change: 0 additions & 1 deletion src/cmp.rs
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,6 @@ impl Cmp for MemtableKeyCmp {
mod tests {
use super::*;
use key_types::LookupKey;
use types;

#[test]
fn test_cmp_defaultcmp_shortest_sep() {
Expand Down
22 changes: 8 additions & 14 deletions src/db_iter.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,6 @@ use std::cmp::Ordering;
use std::mem;
use std::rc::Rc;

use rand;

const READ_BYTES_PERIOD: isize = 1048576;

/// DBIterator is an iterator over the contents of a database.
Expand Down Expand Up @@ -464,18 +462,14 @@ mod tests {
// xx5 should not be visible.
db.put(b"xx5", b"223").unwrap();

let expected: HashMap<Vec<u8>, Vec<u8>> = HashMap::from_iter(
vec![
(b"xx1".to_vec(), b"111".to_vec()),
(b"xx4".to_vec(), b"222".to_vec()),
(b"aaa".to_vec(), b"val1".to_vec()),
(b"cab".to_vec(), b"val2".to_vec()),
]
.into_iter(),
);
let non_existing: HashSet<Vec<u8>> = HashSet::from_iter(
vec![b"gca".to_vec(), b"xx2".to_vec(), b"xx5".to_vec()].into_iter(),
);
let expected: HashMap<Vec<u8>, Vec<u8>> = HashMap::from_iter(vec![
(b"xx1".to_vec(), b"111".to_vec()),
(b"xx4".to_vec(), b"222".to_vec()),
(b"aaa".to_vec(), b"val1".to_vec()),
(b"cab".to_vec(), b"val2".to_vec()),
]);
let non_existing: HashSet<Vec<u8>> =
HashSet::from_iter(vec![b"gca".to_vec(), b"xx2".to_vec(), b"xx5".to_vec()]);

let mut iter = db.new_iter_at(ss.clone()).unwrap();
for (k, v) in LdbIteratorIter::wrap(&mut iter) {
Expand Down
31 changes: 20 additions & 11 deletions src/disk_env.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,12 @@ pub struct PosixDiskEnv {
locks: Arc<Mutex<HashMap<String, File>>>,
}

impl Default for PosixDiskEnv {
fn default() -> Self {
Self::new()
}
}

impl PosixDiskEnv {
pub fn new() -> PosixDiskEnv {
PosixDiskEnv {
Expand Down Expand Up @@ -44,19 +50,20 @@ impl Env for PosixDiskEnv {
))
}
fn open_random_access_file(&self, p: &Path) -> Result<Box<dyn RandomAccess>> {
Ok(fs::OpenOptions::new()
fs::OpenOptions::new()
.read(true)
.open(p)
.map(|f| {
let b: Box<dyn RandomAccess> = Box::new(f);
b
})
.map_err(|e| map_err_with_name("open (randomaccess)", p, e))?)
.map_err(|e| map_err_with_name("open (randomaccess)", p, e))
}
fn open_writable_file(&self, p: &Path) -> Result<Box<dyn Write>> {
Ok(Box::new(
fs::OpenOptions::new()
.create(true)
.truncate(true)
.write(true)
.append(false)
.open(p)
Expand All @@ -67,7 +74,6 @@ impl Env for PosixDiskEnv {
Ok(Box::new(
fs::OpenOptions::new()
.create(true)
.write(true)
.append(true)
.open(p)
.map_err(|e| map_err_with_name("open (append)", p, e))?,
Expand Down Expand Up @@ -96,27 +102,28 @@ impl Env for PosixDiskEnv {
}

fn delete(&self, p: &Path) -> Result<()> {
Ok(fs::remove_file(p).map_err(|e| map_err_with_name("delete", p, e))?)
fs::remove_file(p).map_err(|e| map_err_with_name("delete", p, e))
}
fn mkdir(&self, p: &Path) -> Result<()> {
Ok(fs::create_dir_all(p).map_err(|e| map_err_with_name("mkdir", p, e))?)
fs::create_dir_all(p).map_err(|e| map_err_with_name("mkdir", p, e))
}
fn rmdir(&self, p: &Path) -> Result<()> {
Ok(fs::remove_dir_all(p).map_err(|e| map_err_with_name("rmdir", p, e))?)
fs::remove_dir_all(p).map_err(|e| map_err_with_name("rmdir", p, e))
}
fn rename(&self, old: &Path, new: &Path) -> Result<()> {
Ok(fs::rename(old, new).map_err(|e| map_err_with_name("rename", old, e))?)
fs::rename(old, new).map_err(|e| map_err_with_name("rename", old, e))
}

fn lock(&self, p: &Path) -> Result<FileLock> {
let mut locks = self.locks.lock().unwrap();

if locks.contains_key(&p.to_str().unwrap().to_string()) {
Err(Status::new(StatusCode::AlreadyExists, "Lock is held"))
} else {
if let std::collections::hash_map::Entry::Vacant(e) =
locks.entry(p.to_str().unwrap().to_string())
{
let f = fs::OpenOptions::new()
.write(true)
.create(true)
.truncate(true)
.open(p)
.map_err(|e| map_err_with_name("lock", p, e))?;

Expand All @@ -136,11 +143,13 @@ impl Env for PosixDiskEnv {
_ => (),
};

locks.insert(p.to_str().unwrap().to_string(), f);
e.insert(f);
let lock = FileLock {
id: p.to_str().unwrap().to_string(),
};
Ok(lock)
} else {
Err(Status::new(StatusCode::AlreadyExists, "Lock is held"))
}
}
fn unlock(&self, l: FileLock) -> Result<()> {
Expand Down
15 changes: 4 additions & 11 deletions src/error.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,6 @@ use std::io;
use std::result;
use std::sync;

#[cfg(feature = "fs")]
use errno;

use snap;

/// StatusCode describes various failure modes of database operations.
#[derive(Clone, Debug, PartialEq)]
#[allow(dead_code)]
Expand Down Expand Up @@ -63,12 +58,10 @@ impl Error for Status {

impl Status {
pub fn new(code: StatusCode, msg: &str) -> Status {
let err;
if msg.is_empty() {
err = format!("{:?}", code)
} else {
err = format!("{:?}: {}", code, msg);
}
let err = match msg.is_empty() {
true => format!("{:?}", code),
false => format!("{:?}: {}", code, msg),
};
Status { code, err }
}
pub fn annotate<S: AsRef<str>>(self, msg: S) -> Status {
Expand Down
8 changes: 2 additions & 6 deletions src/filter.rs
Original file line number Diff line number Diff line change
Expand Up @@ -104,11 +104,8 @@ impl BloomPolicy {
assert!(limit - ix < 4);

if limit - ix > 0 {
let mut i = 0;

for b in data[ix..].iter() {
for (i, b) in data[ix..].iter().enumerate() {
h = h.overflowing_add((*b as u32) << (8 * i)).0;
i += 1;
}

h = (h as u64 * m as u64) as u32;
Expand Down Expand Up @@ -276,9 +273,8 @@ mod tests {
intoffs.push(intdata.len());
intdata.extend_from_slice(ikey.internal_key());
});
let filter = fpol.create_filter(&intdata, &intoffs);

filter
fpol.create_filter(&intdata, &intoffs)
}

#[test]
Expand Down
25 changes: 7 additions & 18 deletions src/key_types.rs
Original file line number Diff line number Diff line change
Expand Up @@ -111,11 +111,8 @@ pub fn build_memtable_key(key: &[u8], value: &[u8], t: ValueType, seq: SequenceN

let keysize = key.len() + U64_SPACE;
let valsize = value.len();
let mut buf = Vec::new();
buf.resize(
keysize + valsize + keysize.required_space() + valsize.required_space(),
0,
);
let mut buf =
vec![0_u8; keysize + valsize + keysize.required_space() + valsize.required_space()];

{
let mut writer = buf.as_mut_slice();
Expand All @@ -135,7 +132,7 @@ pub fn build_memtable_key(key: &[u8], value: &[u8], t: ValueType, seq: SequenceN
/// If the key only contains (keylen, key, tag), the vallen and val offset return values will be
/// meaningless.
pub fn parse_memtable_key(mkey: MemtableKey) -> (usize, usize, u64, usize, usize) {
let (keylen, mut i): (usize, usize) = VarInt::decode_var(&mkey).unwrap();
let (keylen, mut i): (usize, usize) = VarInt::decode_var(mkey).unwrap();
let keyoff = i;
i += keylen - 8;

Expand All @@ -152,13 +149,9 @@ pub fn parse_memtable_key(mkey: MemtableKey) -> (usize, usize, u64, usize, usize
}

/// cmp_memtable_key efficiently compares two memtable keys by only parsing what's actually needed.
pub fn cmp_memtable_key<'a, 'b>(
ucmp: &dyn Cmp,
a: MemtableKey<'a>,
b: MemtableKey<'b>,
) -> Ordering {
let (alen, aoff): (usize, usize) = VarInt::decode_var(&a).unwrap();
let (blen, boff): (usize, usize) = VarInt::decode_var(&b).unwrap();
pub fn cmp_memtable_key(ucmp: &dyn Cmp, a: MemtableKey<'_>, b: MemtableKey<'_>) -> Ordering {
let (alen, aoff): (usize, usize) = VarInt::decode_var(a).unwrap();
let (blen, boff): (usize, usize) = VarInt::decode_var(b).unwrap();
let userkey_a = &a[aoff..aoff + alen - 8];
let userkey_b = &b[boff..boff + blen - 8];

Expand Down Expand Up @@ -189,11 +182,7 @@ pub fn parse_internal_key(ikey: InternalKey) -> (ValueType, SequenceNumber, User

/// cmp_internal_key efficiently compares keys in InternalKey format by only parsing the parts that
/// are actually needed for a comparison.
pub fn cmp_internal_key<'a, 'b>(
ucmp: &dyn Cmp,
a: InternalKey<'a>,
b: InternalKey<'b>,
) -> Ordering {
pub fn cmp_internal_key(ucmp: &dyn Cmp, a: InternalKey<'_>, b: InternalKey<'_>) -> Ordering {
match ucmp.cmp(&a[0..a.len() - 8], &b[0..b.len() - 8]) {
Ordering::Less => Ordering::Less,
Ordering::Greater => Ordering::Greater,
Expand Down
Loading

0 comments on commit 0d24811

Please sign in to comment.