changeset 528:0c3b736b2781

cargo clippy
author João Oliveira <hello@jxs.pt>
date Tue, 19 May 2020 18:02:55 +0100
parents deb79bf7f240
children 5072ed1ffabd
files src/benches/maps_bench.rs src/block.rs src/block_builder.rs src/blockhandle.rs src/cache.rs src/cmp.rs src/db_impl.rs src/db_iter.rs src/disk_env.rs src/env.rs src/env_common.rs src/error.rs src/filter.rs src/key_types.rs src/log.rs src/memtable.rs src/merging_iter.rs src/skipmap.rs src/snapshot.rs src/table_builder.rs src/table_reader.rs src/test_util.rs src/version.rs src/version_edit.rs src/version_set.rs src/write_batch.rs
diffstat 26 files changed, 138 insertions(+), 143 deletions(-) [+]
line wrap: on
line diff
--- a/src/benches/maps_bench.rs	Mon May 18 23:50:41 2020 +0100
+++ b/src/benches/maps_bench.rs	Tue May 19 18:02:55 2020 +0100
@@ -21,10 +21,10 @@
     let mut val = Vec::with_capacity(vallen);
 
     for _i in 0..keylen {
-        key.push(gen.gen_range('a' as u8, 'z' as u8));
+        key.push(gen.gen_range(b'a', b'z'));
     }
     for _i in 0..vallen {
-        val.push(gen.gen_range('a' as u8, 'z' as u8));
+        val.push(gen.gen_range(b'a', b'z'));
     }
     (key, val)
 }
--- a/src/block.rs	Mon May 18 23:50:41 2020 +0100
+++ b/src/block.rs	Tue May 19 18:02:55 2020 +0100
@@ -67,7 +67,7 @@
         assert!(contents.len() > 4);
         Block {
             block: Rc::new(contents),
-            opt: opt,
+            opt,
         }
     }
 }
--- a/src/block_builder.rs	Mon May 18 23:50:41 2020 +0100
+++ b/src/block_builder.rs	Tue May 19 18:02:55 2020 +0100
@@ -25,7 +25,7 @@
         BlockBuilder {
             buffer: Vec::with_capacity(o.block_size),
             opt: o,
-            restarts: restarts,
+            restarts,
             last_key: Vec::new(),
             restart_counter: 0,
             counter: 0,
@@ -36,7 +36,7 @@
         self.counter
     }
 
-    pub fn last_key<'a>(&'a self) -> &'a [u8] {
+    pub fn last_key(&self) -> &[u8] {
         &self.last_key
     }
 
--- a/src/blockhandle.rs	Mon May 18 23:50:41 2020 +0100
+++ b/src/blockhandle.rs	Tue May 19 18:02:55 2020 +0100
@@ -28,8 +28,8 @@
 
     pub fn new(offset: usize, size: usize) -> BlockHandle {
         BlockHandle {
-            offset: offset,
-            size: size,
+            offset,
+            size,
         }
     }
 
--- a/src/cache.rs	Mon May 18 23:50:41 2020 +0100
+++ b/src/cache.rs	Tue May 19 18:02:55 2020 +0100
@@ -82,7 +82,7 @@
             assert!(self.head.prev.is_some());
             self.head.prev = last.prev;
             self.count -= 1;
-            return replace(&mut (*last).data, None);
+            replace(&mut (*last).data, None)
         } else {
             None
         }
@@ -185,17 +185,17 @@
     /// among several users.
     pub fn new_cache_id(&mut self) -> CacheID {
         self.id += 1;
-        return self.id;
+        self.id
     }
 
     /// How many the cache currently contains
     pub fn count(&self) -> usize {
-        return self.list.count();
+        self.list.count()
     }
 
     /// The capacity of this cache
     pub fn cap(&self) -> usize {
-        return self.cap;
+        self.cap
     }
 
     /// Insert a new element into the cache. The returned `CacheHandle` can be used for further
@@ -211,8 +211,8 @@
             }
         }
 
-        let lru_handle = self.list.insert(key.clone());
-        self.map.insert(key.clone(), (elem, lru_handle));
+        let lru_handle = self.list.insert(*key);
+        self.map.insert(*key, (elem, lru_handle));
     }
 
     /// Retrieve an element from the cache.
--- a/src/cmp.rs	Mon May 18 23:50:41 2020 +0100
+++ b/src/cmp.rs	Tue May 19 18:02:55 2020 +0100
@@ -85,7 +85,7 @@
         // "abc\0", which is greater than abc and lesser than abd.
         // Append a 0 byte; by making it longer than a, it will compare greater to it.
         sep.extend_from_slice(&[0]);
-        return sep;
+        sep
     }
 
     fn find_short_succ(&self, a: &[u8]) -> Vec<u8> {
@@ -99,7 +99,7 @@
         }
         // Rare path
         result.push(255);
-        return result;
+        result
     }
 }
 
@@ -131,13 +131,13 @@
                 .internal_key()
                 .to_vec();
         }
-        return LookupKey::new(&sep, seqa).internal_key().to_vec();
+       LookupKey::new(&sep, seqa).internal_key().to_vec()
     }
 
     fn find_short_succ(&self, a: &[u8]) -> Vec<u8> {
         let (_, seq, key) = key_types::parse_internal_key(a);
         let succ: Vec<u8> = self.0.find_short_succ(key);
-        return LookupKey::new(&succ, seq).internal_key().to_vec();
+        LookupKey::new(&succ, seq).internal_key().to_vec()
     }
 }
 
--- a/src/db_impl.rs	Mon May 18 23:50:41 2020 +0100
+++ b/src/db_impl.rs	Tue May 19 18:02:55 2020 +0100
@@ -79,7 +79,7 @@
 
         DB {
             name: name.to_owned(),
-            path: path,
+            path,
             lock: None,
             internal_cmp: Rc::new(Box::new(InternalKeyCmp(opt.cmp.clone()))),
             fpol: InternalFilterPolicy::new(opt.filter_policy.clone()),
@@ -87,11 +87,11 @@
             mem: MemTable::new(opt.cmp.clone()),
             imm: None,
 
-            opt: opt,
+            opt,
 
             log: None,
             log_num: None,
-            cache: cache,
+            cache,
             vset: share(vset),
             snaps: SnapshotList::new(),
 
@@ -1028,15 +1028,18 @@
     }
 
     let mut md = FileMetaData::default();
-    if firstkey.is_none() {
-        let _ = opt.env.delete(Path::new(&filename));
-    } else {
-        md.num = num;
-        md.size = opt.env.size_of(Path::new(&filename))?;
-        md.smallest = firstkey.unwrap();
-        md.largest = kbuf;
+    match firstkey {
+        None => {
+            let _ = opt.env.delete(Path::new(&filename));
+        },
+        Some(key) => {
+            md.num = num;
+            md.size = opt.env.size_of(Path::new(&filename))?;
+            md.smallest = key;
+            md.largest = kbuf;
+        }
     }
-    Ok(md)
+        Ok(md)
 }
 
 fn log_file_name(db: &Path, num: FileNum) -> PathBuf {
--- a/src/db_iter.rs	Mon May 18 23:50:41 2020 +0100
+++ b/src/db_iter.rs	Tue May 19 18:02:55 2020 +0100
@@ -42,10 +42,10 @@
         ss: Snapshot,
     ) -> DBIterator {
         DBIterator {
-            cmp: cmp,
-            vset: vset,
-            iter: iter,
-            ss: ss,
+            cmp,
+            vset,
+            iter,
+            ss,
             dir: Direction::Forward,
             byte_count: random_period(),
 
@@ -59,7 +59,7 @@
 
     /// record_read_sample records a read sample using the current contents of self.keybuf, which
     /// should be an InternalKey.
-    fn record_read_sample<'a>(&mut self, len: usize) {
+    fn record_read_sample(&mut self, len: usize) {
         self.byte_count -= len as isize;
         if self.byte_count < 0 {
             let v = self.vset.borrow().current();
--- a/src/disk_env.rs	Mon May 18 23:50:41 2020 +0100
+++ b/src/disk_env.rs	Tue May 19 18:02:55 2020 +0100
@@ -81,11 +81,12 @@
         let dir_reader = fs::read_dir(p).map_err(|e| map_err_with_name("children", p, e))?;
         let filenames = dir_reader
             .map(|r| {
-                if !r.is_ok() {
-                    Path::new("").to_owned()
-                } else {
-                    let direntry = r.unwrap();
-                    Path::new(&direntry.file_name()).to_owned()
+                match r {
+                    Ok(_) => {
+                        let direntry = r.unwrap();
+                        Path::new(&direntry.file_name()).to_owned()
+                    },
+                    Err(_) => Path::new("").to_owned(),
                 }
             })
             .filter(|s| !s.as_os_str().is_empty());
@@ -143,13 +144,13 @@
     fn unlock(&self, l: FileLock) -> Result<()> {
         let mut locks = self.locks.lock().unwrap();
         if !locks.contains_key(&l.id) {
-            return err(
+            err(
                 StatusCode::LockError,
                 &format!("unlocking a file that is not locked: {}", l.id),
-            );
+            )
         } else {
             let f = locks.remove(&l.id).unwrap();
-            if let Err(_) = f.unlock() {
+            if f.unlock().is_err() {
                 return err(StatusCode::LockError, &format!("unlock failed: {}", l.id));
             }
             Ok(())
--- a/src/env.rs	Mon May 18 23:50:41 2020 +0100
+++ b/src/env.rs	Tue May 19 18:02:55 2020 +0100
@@ -66,7 +66,7 @@
         Logger { dst: w }
     }
 
-    pub fn log(&mut self, message: &String) {
+    pub fn log(&mut self, message: &str) {
         let _ = self.dst.write(message.as_bytes());
         let _ = self.dst.write("\n".as_bytes());
     }
--- a/src/env_common.rs	Mon May 18 23:50:41 2020 +0100
+++ b/src/env_common.rs	Tue May 19 18:02:55 2020 +0100
@@ -7,7 +7,7 @@
 
         match now {
             Err(_) => continue,
-            Ok(dur) => return dur.as_secs() * 1000000 + (dur.subsec_nanos() / 1000) as u64,
+            Ok(dur) => return dur.as_secs() * 1000000 + dur.subsec_micros() as u64,
         }
     }
 }
--- a/src/error.rs	Mon May 18 23:50:41 2020 +0100
+++ b/src/error.rs	Tue May 19 18:02:55 2020 +0100
@@ -65,10 +65,10 @@
         } else {
             err = format!("{:?}: {}", code, msg);
         }
-        return Status {
-            code: code,
-            err: err,
-        };
+        Status {
+            code,
+            err,
+        }
     }
 }
 
--- a/src/filter.rs	Mon May 18 23:50:41 2020 +0100
+++ b/src/filter.rs	Tue May 19 18:02:55 2020 +0100
@@ -80,8 +80,8 @@
         }
 
         BloomPolicy {
-            bits_per_key: bits_per_key,
-            k: k,
+            bits_per_key,
+            k,
         }
     }
 
@@ -156,7 +156,7 @@
         filter
     }
     fn key_may_match(&self, key: &[u8], filter: &[u8]) -> bool {
-        if filter.len() == 0 {
+        if filter.is_empty() {
             return true;
         }
 
--- a/src/key_types.rs	Mon May 18 23:50:41 2020 +0100
+++ b/src/key_types.rs	Tue May 19 18:02:55 2020 +0100
@@ -44,11 +44,11 @@
 const U64_SPACE: usize = 8;
 
 impl LookupKey {
-    pub fn new<'a>(k: UserKey<'a>, s: SequenceNumber) -> LookupKey {
+    pub fn new(k: UserKey, s: SequenceNumber) -> LookupKey {
         LookupKey::new_full(k, s, ValueType::TypeValue)
     }
 
-    pub fn new_full<'a>(k: UserKey<'a>, s: SequenceNumber, t: ValueType) -> LookupKey {
+    pub fn new_full(k: UserKey, s: SequenceNumber, t: ValueType) -> LookupKey {
         let mut key = Vec::new();
         let internal_keylen = k.len() + U64_SPACE;
         key.resize(k.len() + internal_keylen.required_space() + U64_SPACE, 0);
@@ -58,30 +58,30 @@
             writer
                 .write_varint(internal_keylen)
                 .expect("write to slice failed");
-            writer.write(k).expect("write to slice failed");
+            writer.write_all(k).expect("write to slice failed");
             writer
                 .write_fixedint(s << 8 | t as u64)
                 .expect("write to slice failed");
         }
 
         LookupKey {
-            key: key,
+            key,
             key_offset: internal_keylen.required_space(),
         }
     }
 
     /// Returns the full memtable-formatted key.
-    pub fn memtable_key<'a>(&'a self) -> MemtableKey<'a> {
+    pub fn memtable_key(&self) -> MemtableKey {
         self.key.as_slice()
     }
 
     /// Returns only the user key portion.
-    pub fn user_key<'a>(&'a self) -> UserKey<'a> {
+    pub fn user_key(&self) -> UserKey {
         &self.key[self.key_offset..self.key.len() - 8]
     }
 
     /// Returns key and tag.
-    pub fn internal_key<'a>(&'a self) -> InternalKey<'a> {
+    pub fn internal_key(&self) -> InternalKey {
         &self.key[self.key_offset..]
     }
 }
@@ -120,12 +120,12 @@
     {
         let mut writer = buf.as_mut_slice();
         writer.write_varint(keysize).expect("write to slice failed");
-        writer.write(key).expect("write to slice failed");
+        writer.write_all(key).expect("write to slice failed");
         writer
             .write_fixedint((t as u64) | (seq << 8))
             .expect("write to slice failed");
         writer.write_varint(valsize).expect("write to slice failed");
-        writer.write(value).expect("write to slice failed");
+        writer.write_all(value).expect("write to slice failed");
         assert_eq!(writer.len(), 0);
     }
     buf
@@ -134,7 +134,7 @@
 /// Parses a memtable key and returns  (keylen, key offset, tag, vallen, val offset).
 /// If the key only contains (keylen, key, tag), the vallen and val offset return values will be
 /// meaningless.
-pub fn parse_memtable_key<'a>(mkey: MemtableKey<'a>) -> (usize, usize, u64, usize, usize) {
+pub fn parse_memtable_key(mkey: MemtableKey) -> (usize, usize, u64, usize, usize) {
     let (keylen, mut i): (usize, usize) = VarInt::decode_var(&mkey);
     let keyoff = i;
     i += keylen - 8;
@@ -145,9 +145,9 @@
         let (vallen, j): (usize, usize) = VarInt::decode_var(&mkey[i..]);
         i += j;
         let valoff = i;
-        return (keylen - 8, keyoff, tag, vallen, valoff);
+        (keylen - 8, keyoff, tag, vallen, valoff)
     } else {
-        return (keylen - 8, keyoff, 0, 0, 0);
+        (keylen - 8, keyoff, 0, 0, 0)
     }
 }
 
@@ -178,13 +178,13 @@
 }
 
 /// Parse a key in InternalKey format.
-pub fn parse_internal_key<'a>(ikey: InternalKey<'a>) -> (ValueType, SequenceNumber, UserKey<'a>) {
+pub fn parse_internal_key(ikey: InternalKey) -> (ValueType, SequenceNumber, UserKey) {
     if ikey.is_empty() {
         return (ValueType::TypeDeletion, 0, &ikey[0..0]);
     }
     assert!(ikey.len() >= 8);
     let (typ, seq) = parse_tag(FixedInt::decode_fixed(&ikey[ikey.len() - 8..]));
-    return (typ, seq, &ikey[0..ikey.len() - 8]);
+    (typ, seq, &ikey[0..ikey.len() - 8])
 }
 
 /// cmp_internal_key efficiently compares keys in InternalKey format by only parsing the parts that
--- a/src/log.rs	Mon May 18 23:50:41 2020 +0100
+++ b/src/log.rs	Tue May 19 18:02:55 2020 +0100
@@ -37,7 +37,7 @@
             dst: writer,
             current_block_offset: 0,
             block_size: BLOCK_SIZE,
-            digest: digest,
+            digest,
         }
     }
 
@@ -53,14 +53,14 @@
         let mut record = &r[..];
         let mut first_frag = true;
         let mut result = Ok(0);
-        while result.is_ok() && record.len() > 0 {
+        while result.is_ok() && !record.is_empty() {
             assert!(self.block_size > HEADER_SIZE);
 
             let space_left = self.block_size - self.current_block_offset;
 
             // Fill up block; go to next block.
             if space_left < HEADER_SIZE {
-                self.dst.write(&vec![0, 0, 0, 0, 0, 0][0..space_left])?;
+                self.dst.write_all(&vec![0, 0, 0, 0, 0, 0][0..space_left])?;
                 self.current_block_offset = 0;
             }
 
@@ -129,7 +129,7 @@
 impl<R: Read> LogReader<R> {
     pub fn new(src: R, chksum: bool) -> LogReader<R> {
         LogReader {
-            src: src,
+            src,
             blk_off: 0,
             blocksize: BLOCK_SIZE,
             checksums: chksum,
@@ -151,7 +151,7 @@
             if self.blocksize - self.blk_off < HEADER_SIZE {
                 // skip to next block
                 self.src
-                    .read(&mut self.head_scratch[0..self.blocksize - self.blk_off])?;
+                    .read_exact(&mut self.head_scratch[0..self.blocksize - self.blk_off])?;
                 self.blk_off = 0;
             }
 
--- a/src/memtable.rs	Mon May 18 23:50:41 2020 +0100
+++ b/src/memtable.rs	Tue May 19 18:02:55 2020 +0100
@@ -127,7 +127,7 @@
             shift_left(key, keyoff);
             // Truncate key to key+tag.
             key.truncate(keylen + u64::required_space());
-            return true;
+            true
         } else {
             panic!("should not happen");
         }
--- a/src/merging_iter.rs	Mon May 18 23:50:41 2020 +0100
+++ b/src/merging_iter.rs	Tue May 19 18:02:55 2020 +0100
@@ -26,13 +26,12 @@
 impl MergingIter {
     /// Construct a new merging iterator.
     pub fn new(cmp: Rc<Box<dyn Cmp>>, iters: Vec<Box<dyn LdbIterator>>) -> MergingIter {
-        let mi = MergingIter {
-            iters: iters,
+        MergingIter {
+            iters,
             current: None,
             direction: Direction::Forward,
-            cmp: cmp,
-        };
-        mi
+            cmp,
+        }
     }
 
     fn init(&mut self) {
@@ -68,10 +67,8 @@
                                 // This doesn't work if two iterators are returning the exact same
                                 // keys. However, in reality, two entries will always have differing
                                 // sequence numbers.
-                                if self.iters[i].current(&mut keybuf, &mut valbuf) {
-                                    if self.cmp.cmp(&keybuf, &key) == Ordering::Equal {
-                                        self.iters[i].advance();
-                                    }
+                                if self.iters[i].current(&mut keybuf, &mut valbuf) && self.cmp.cmp(&keybuf, &key) == Ordering::Equal  {
+                                    self.iters[i].advance();
                                 }
                             }
                         }
--- a/src/skipmap.rs	Mon May 18 23:50:41 2020 +0100
+++ b/src/skipmap.rs	Tue May 19 18:02:55 2020 +0100
@@ -59,7 +59,7 @@
                 rand: StdRng::seed_from_u64(0xdeadbeef),
                 len: 0,
                 approx_mem: size_of::<Self>() + MAX_HEIGHT * size_of::<Option<*mut Node>>(),
-                cmp: cmp,
+                cmp,
             })),
         }
     }
@@ -141,11 +141,11 @@
 
         unsafe {
             if current.is_null() || current == self.head.as_ref() {
-                return None;
+                None
             } else if self.cmp.cmp(&(*current).key, key) == Ordering::Less {
-                return None;
+                None
             } else {
-                return Some(&(*current));
+                Some(&(*current))
             }
         }
     }
@@ -180,11 +180,11 @@
         unsafe {
             if current.is_null() || current == self.head.as_ref() {
                 // If we're past the end for some reason or at the head
-                return None;
+                None
             } else if self.cmp.cmp(&(*current).key, key) != Ordering::Less {
-                return None;
+                None
             } else {
-                return Some(&(*current));
+                Some(&(*current))
             }
         }
     }
@@ -238,7 +238,7 @@
         let mut new = Box::new(Node {
             skips: new_skips,
             next: None,
-            key: key,
+            key,
             value: val,
         });
         let newp = new.as_mut() as *mut Node;
--- a/src/snapshot.rs	Mon May 18 23:50:41 2020 +0100
+++ b/src/snapshot.rs	Tue May 19 18:02:55 2020 +0100
@@ -69,7 +69,7 @@
         Snapshot {
             inner: Rc::new(InnerSnapshot {
                 id: sl.newest,
-                seq: seq,
+                seq,
                 sl: inner,
             }),
         }
--- a/src/table_builder.rs	Mon May 18 23:50:41 2020 +0100
+++ b/src/table_builder.rs	Tue May 19 18:02:55 2020 +0100
@@ -41,7 +41,7 @@
     pub fn new(metaix: BlockHandle, index: BlockHandle) -> Footer {
         Footer {
             meta_index: metaix,
-            index: index,
+            index,
         }
     }
 
@@ -117,7 +117,7 @@
     pub fn new_raw(opt: Options, dst: Dst) -> TableBuilder<Dst> {
         TableBuilder {
             opt: opt.clone(),
-            dst: dst,
+            dst,
             offset: 0,
             prev_block_last_key: vec![],
             num_entries: 0,
--- a/src/table_reader.rs	Mon May 18 23:50:41 2020 +0100
+++ b/src/table_reader.rs	Tue May 19 18:02:55 2020 +0100
@@ -51,13 +51,13 @@
         let cache_id = opt.block_cache.borrow_mut().new_cache_id();
 
         Ok(Table {
-            file: file,
+            file,
             file_size: size,
-            cache_id: cache_id,
-            opt: opt,
-            footer: footer,
+            cache_id,
+            opt,
+            footer,
             filters: filter_block_reader,
-            indexblock: indexblock,
+            indexblock,
         })
     }
 
@@ -143,18 +143,17 @@
             return location.offset();
         }
 
-        return self.footer.meta_index.offset();
+        self.footer.meta_index.offset()
     }
 
     /// Iterators read from the file; thus only one iterator can be borrowed (mutably) per scope
     pub fn iter(&self) -> TableIterator {
-        let iter = TableIterator {
+        TableIterator {
             current_block: None,
             current_block_off: 0,
             index_block: self.indexblock.iter(),
             table: self.clone(),
-        };
-        iter
+        }
     }
 
     /// Retrieve next-biggest entry for key from table. This function uses the attached filters, so
--- a/src/test_util.rs	Mon May 18 23:50:41 2020 +0100
+++ b/src/test_util.rs	Tue May 19 18:02:55 2020 +0100
@@ -12,11 +12,11 @@
 
 impl<'a> TestLdbIter<'a> {
     pub fn new(c: Vec<(&'a [u8], &'a [u8])>) -> TestLdbIter<'a> {
-        return TestLdbIter {
+        TestLdbIter {
             v: c,
             ix: 0,
             init: false,
-        };
+        }
     }
 }
 
--- a/src/version.rs	Mon May 18 23:50:41 2020 +0100
+++ b/src/version.rs	Tue May 19 18:02:55 2020 +0100
@@ -189,7 +189,7 @@
                 level += 1;
             }
         }
-        return level;
+        level
     }
 
     /// record_read_sample returns true if there is a new file to be compacted. It counts the
@@ -202,11 +202,9 @@
         let mut first_file = None;
         let mut first_file_level = None;
         for level in &levels {
-            if !level.is_empty() {
-                if first_file.is_none() && first_file_level.is_none() {
-                    first_file = Some(level[0].clone());
-                    first_file_level = Some(i);
-                }
+            if !level.is_empty() && first_file.is_none() && first_file_level.is_none() {
+                first_file = Some(level[0].clone());
+                first_file_level = Some(i);
             }
             contained_in += level.len();
             i += 1;
@@ -227,7 +225,7 @@
     pub fn update_stats(&mut self, stats: GetStats) -> bool {
         if let Some(file) = stats.file {
             if file.borrow().allowed_seeks <= 1 && self.file_to_compact.is_none() {
-                self.file_to_compact = Some(file.clone());
+                self.file_to_compact = Some(file);
                 self.file_to_compact_lvl = stats.level;
                 return true;
             } else if file.borrow().allowed_seeks > 0 {
@@ -256,7 +254,7 @@
 
     /// overlap_in_level returns true if the specified level's files overlap the range [smallest;
     /// largest].
-    pub fn overlap_in_level<'a, 'b>(
+    pub fn overlap_in_level<'a> (
         &self,
         level: usize,
         smallest: UserKey<'a>,
@@ -389,8 +387,8 @@
     ucmp: Rc<Box<dyn Cmp>>,
 ) -> VersionIter {
     VersionIter {
-        files: files,
-        cache: cache,
+        files,
+        cache,
         cmp: InternalKeyCmp(ucmp),
         current: None,
         current_ix: 0,
@@ -593,7 +591,7 @@
         share(FileMetaData {
             allowed_seeks: 10,
             size: 163840,
-            num: num,
+            num,
             smallest: LookupKey::new(smallest, smallestix).internal_key().to_vec(),
             largest: LookupKey::new(largest, largestix).internal_key().to_vec(),
         })
--- a/src/version_edit.rs	Mon May 18 23:50:41 2020 +0100
+++ b/src/version_edit.rs	Tue May 19 18:02:55 2020 +0100
@@ -48,12 +48,12 @@
             if l != klen {
                 return err(StatusCode::IOError, "Couldn't read full key");
             }
-            return Ok(keybuf);
+            Ok(keybuf)
         } else {
-            return err(StatusCode::IOError, "Couldn't read key");
+            err(StatusCode::IOError, "Couldn't read key")
         }
     } else {
-        return err(StatusCode::IOError, "Couldn't read key length");
+        err(StatusCode::IOError, "Couldn't read key length")
     }
 }
 
@@ -89,7 +89,7 @@
     }
 
     pub fn add_file(&mut self, level: usize, file: FileMetaData) {
-        self.new_files.push((level, file.clone()))
+        self.new_files.push((level, file))
     }
 
     pub fn delete_file(&mut self, level: usize, file_num: FileNum) {
@@ -118,7 +118,7 @@
 
     pub fn set_compact_pointer(&mut self, level: usize, key: InternalKey) {
         self.compaction_ptrs.push(CompactionPointer {
-            level: level,
+            level,
             key: Vec::from(key),
         })
     }
@@ -132,7 +132,7 @@
             buf.write_varint(EditTag::Comparator as u32).unwrap();
             // data is prefixed by a varint32 describing the length of the following chunk
             buf.write_varint(cmp.len()).unwrap();
-            buf.write(cmp.as_bytes()).unwrap();
+            buf.write_all(cmp.as_bytes()).unwrap();
         }
 
         if let Some(lognum) = self.log_number {
@@ -159,7 +159,7 @@
             buf.write_varint(EditTag::CompactPointer as u32).unwrap();
             buf.write_varint(cptr.level).unwrap();
             buf.write_varint(cptr.key.len()).unwrap();
-            buf.write(cptr.key.as_ref()).unwrap();
+            buf.write_all(cptr.key.as_ref()).unwrap();
         }
 
         for df in self.deleted.iter() {
@@ -175,9 +175,9 @@
             buf.write_varint(nf.1.size).unwrap();
 
             buf.write_varint(nf.1.smallest.len()).unwrap();
-            buf.write(nf.1.smallest.as_ref()).unwrap();
+            buf.write_all(nf.1.smallest.as_ref()).unwrap();
             buf.write_varint(nf.1.largest.len()).unwrap();
-            buf.write(nf.1.largest.as_ref()).unwrap();
+            buf.write_all(nf.1.largest.as_ref()).unwrap();
         }
 
         buf
@@ -238,7 +238,7 @@
 
                             ve.compaction_ptrs.push(CompactionPointer {
                                 level: lvl,
-                                key: key,
+                                key,
                             });
                         } else {
                             return err(StatusCode::IOError, "Couldn't read level");
@@ -266,10 +266,10 @@
                                     ve.new_files.push((
                                         lvl,
                                         FileMetaData {
-                                            num: num,
-                                            size: size,
-                                            smallest: smallest,
-                                            largest: largest,
+                                            num,
+                                            size,
+                                            smallest,
+                                            largest,
                                             allowed_seeks: 0,
                                         },
                                     ))
--- a/src/version_set.rs	Mon May 18 23:50:41 2020 +0100
+++ b/src/version_set.rs	Tue May 19 18:02:55 2020 +0100
@@ -42,7 +42,7 @@
     // Note: opt.cmp should be the user-supplied or default comparator (not an InternalKeyCmp).
     pub fn new(opt: &Options, level: usize, input: Option<Shared<Version>>) -> Compaction {
         Compaction {
-            level: level,
+            level,
             max_file_size: opt.max_file_size,
             input_version: input,
             level_ixs: Default::default(),
@@ -190,8 +190,8 @@
         VersionSet {
             dbname: db.as_ref().to_owned(),
             cmp: InternalKeyCmp(opt.cmp.clone()),
-            opt: opt,
-            cache: cache,
+            opt,
+            cache,
 
             next_file_num: 2,
             manifest_num: 0,
@@ -269,10 +269,8 @@
                     if level > 0 {
                         break;
                     }
-                } else {
-                    if let Ok(tbl) = self.cache.borrow_mut().get_table(f.borrow().num) {
-                        offset += tbl.approx_offset_of(key);
-                    }
+                } else if let Ok(tbl) = self.cache.borrow_mut().get_table(f.borrow().num) {
+                    offset += tbl.approx_offset_of(key);
                 }
             }
         }
@@ -849,14 +847,13 @@
 }
 
 pub fn manifest_file_name<P: AsRef<Path>>(dbname: P, file_num: FileNum) -> PathBuf {
-    dbname.as_ref().join(manifest_name(file_num)).to_owned()
+    dbname.as_ref().join(manifest_name(file_num))
 }
 
 fn temp_file_name<P: AsRef<Path>>(dbname: P, file_num: FileNum) -> PathBuf {
     dbname
         .as_ref()
         .join(format!("{:06}.dbtmp", file_num))
-        .to_owned()
 }
 
 fn current_file_name<P: AsRef<Path>>(dbname: P) -> PathBuf {
--- a/src/write_batch.rs	Mon May 18 23:50:41 2020 +0100
+++ b/src/write_batch.rs	Tue May 19 18:02:55 2020 +0100
@@ -35,11 +35,11 @@
     /// Adds an entry to a WriteBatch, to be added to the database.
     #[allow(unused_assignments)]
     pub fn put(&mut self, k: &[u8], v: &[u8]) {
-        self.entries.write(&[ValueType::TypeValue as u8]).unwrap();
+        self.entries.write_all(&[ValueType::TypeValue as u8]).unwrap();
         self.entries.write_varint(k.len()).unwrap();
-        self.entries.write(k).unwrap();
+        self.entries.write_all(k).unwrap();
         self.entries.write_varint(v.len()).unwrap();
-        self.entries.write(v).unwrap();
+        self.entries.write_all(v).unwrap();
 
         let c = self.count();
         self.set_count(c + 1);
@@ -52,7 +52,7 @@
             .write(&[ValueType::TypeDeletion as u8])
             .unwrap();
         self.entries.write_varint(k.len()).unwrap();
-        self.entries.write(k).unwrap();
+        self.entries.write_all(k).unwrap();
 
         let c = self.count();
         self.set_count(c + 1);
@@ -84,7 +84,7 @@
         u64::decode_fixed(&self.entries[SEQNUM_OFFSET..SEQNUM_OFFSET + 8])
     }
 
-    pub fn iter<'a>(&'a self) -> WriteBatchIter<'a> {
+    pub fn iter(&self) -> WriteBatchIter {
         WriteBatchIter {
             batch: self,
             ix: HEADER_SIZE,
@@ -95,7 +95,7 @@
         for (k, v) in self.iter() {
             match v {
                 Some(v_) => mt.add(seq, ValueType::TypeValue, k, v_),
-                None => mt.add(seq, ValueType::TypeDeletion, k, "".as_bytes()),
+                None => mt.add(seq, ValueType::TypeDeletion, k, b""),
             }
             seq += 1;
         }
@@ -134,9 +134,9 @@
             let v = &self.batch.entries[self.ix..self.ix + vlen];
             self.ix += vlen;
 
-            return Some((k, Some(v)));
+            Some((k, Some(v)))
         } else {
-            return Some((k, None));
+            Some((k, None))
         }
     }
 }