changeset 449:9f01a2fbcda2

chore(fmt): Run rustfmt. (rustfmt 0.3.4-nightly)
author Lewin Bormann <lbo@spheniscida.de>
date Sat, 03 Mar 2018 11:53:18 +0100
parents 18671ad3d4b4
children c3e6a52115da
files examples/leveldb-tool/src/main.rs examples/write-a-lot/src/main.rs src/block.rs src/block_builder.rs src/blockhandle.rs src/cache.rs src/cmp.rs src/db_impl.rs src/db_iter.rs src/disk_env.rs src/env_common.rs src/filter.rs src/filter_block.rs src/infolog.rs src/key_types.rs src/log.rs src/mem_env.rs src/memtable.rs src/merging_iter.rs src/options.rs src/skipmap.rs src/snapshot.rs src/table_block.rs src/table_builder.rs src/table_cache.rs src/table_reader.rs src/test_util.rs src/types.rs src/version.rs src/version_edit.rs src/version_set.rs src/write_batch.rs
diffstat 32 files changed, 1766 insertions(+), 993 deletions(-) [+]
line wrap: on
line diff
--- a/examples/leveldb-tool/src/main.rs	Sat Mar 03 11:49:17 2018 +0100
+++ b/examples/leveldb-tool/src/main.rs	Sat Mar 03 11:53:18 2018 +0100
@@ -1,6 +1,6 @@
 extern crate rusty_leveldb;
 
-use rusty_leveldb::{DB, LdbIterator, Options};
+use rusty_leveldb::{LdbIterator, Options, DB};
 
 use std::env::args;
 use std::io::{self, Write};
@@ -44,8 +44,10 @@
     let args = Vec::from_iter(args());
 
     if args.len() < 2 {
-        panic!("Usage: {} [get|put|delete|iter|compact] [key|from] [val|to]",
-               args[0]);
+        panic!(
+            "Usage: {} [get|put|delete|iter|compact] [key|from] [val|to]",
+            args[0]
+        );
     }
 
     let mut opt = Options::default();
--- a/examples/write-a-lot/src/main.rs	Sat Mar 03 11:49:17 2018 +0100
+++ b/examples/write-a-lot/src/main.rs	Sat Mar 03 11:53:18 2018 +0100
@@ -1,5 +1,5 @@
+extern crate rand;
 extern crate rusty_leveldb;
-extern crate rand;
 
 use rusty_leveldb::CompressionType;
 use rusty_leveldb::DB;
--- a/src/block.rs	Sat Mar 03 11:49:17 2018 +0100
+++ b/src/block.rs	Sat Mar 03 11:53:18 2018 +0100
@@ -157,7 +157,8 @@
     /// Only self.key is mutated.
     fn assemble_key(&mut self, off: usize, shared: usize, non_shared: usize) {
         self.key.truncate(shared);
-        self.key.extend_from_slice(&self.block[off..off + non_shared]);
+        self.key
+            .extend_from_slice(&self.block[off..off + non_shared]);
     }
 
     pub fn seek_to_last(&mut self) {
@@ -195,8 +196,9 @@
 
         // Adjust current_restart_ix
         let num_restarts = self.number_restarts();
-        while self.current_restart_ix + 1 < num_restarts &&
-              self.get_restart_point(self.current_restart_ix + 1) < self.current_entry_offset {
+        while self.current_restart_ix + 1 < num_restarts
+            && self.get_restart_point(self.current_restart_ix + 1) < self.current_entry_offset
+        {
             self.current_restart_ix += 1;
         }
         true
@@ -307,12 +309,17 @@
     use types::{current_key_val, LdbIterator};
 
     fn get_data() -> Vec<(&'static [u8], &'static [u8])> {
-        vec![("key1".as_bytes(), "value1".as_bytes()),
-             ("loooooooooooooooooooooooooooooooooongerkey1".as_bytes(), "shrtvl1".as_bytes()),
-             ("medium length key 1".as_bytes(), "some value 2".as_bytes()),
-             ("prefix_key1".as_bytes(), "value".as_bytes()),
-             ("prefix_key2".as_bytes(), "value".as_bytes()),
-             ("prefix_key3".as_bytes(), "value".as_bytes())]
+        vec![
+            ("key1".as_bytes(), "value1".as_bytes()),
+            (
+                "loooooooooooooooooooooooooooooooooongerkey1".as_bytes(),
+                "shrtvl1".as_bytes(),
+            ),
+            ("medium length key 1".as_bytes(), "some value 2".as_bytes()),
+            ("prefix_key1".as_bytes(), "value".as_bytes()),
+            ("prefix_key2".as_bytes(), "value".as_bytes()),
+            ("prefix_key3".as_bytes(), "value".as_bytes()),
+        ]
     }
 
     #[test]
@@ -385,27 +392,35 @@
         let mut block = Block::new(o.clone(), block_contents).iter();
 
         assert!(!block.valid());
-        assert_eq!(block.next(),
-                   Some(("key1".as_bytes().to_vec(), "value1".as_bytes().to_vec())));
+        assert_eq!(
+            block.next(),
+            Some(("key1".as_bytes().to_vec(), "value1".as_bytes().to_vec()))
+        );
         assert!(block.valid());
         block.next();
         assert!(block.valid());
         block.prev();
         assert!(block.valid());
-        assert_eq!(current_key_val(&block),
-                   Some(("key1".as_bytes().to_vec(), "value1".as_bytes().to_vec())));
+        assert_eq!(
+            current_key_val(&block),
+            Some(("key1".as_bytes().to_vec(), "value1".as_bytes().to_vec()))
+        );
         block.prev();
         assert!(!block.valid());
 
         // Verify that prev() from the last entry goes to the prev-to-last entry
         // (essentially, that next() returning None doesn't advance anything)
-        while let Some(_) = block.next() {
-        }
+        while let Some(_) = block.next() {}
 
         block.prev();
         assert!(block.valid());
-        assert_eq!(current_key_val(&block),
-                   Some(("prefix_key2".as_bytes().to_vec(), "value".as_bytes().to_vec())));
+        assert_eq!(
+            current_key_val(&block),
+            Some((
+                "prefix_key2".as_bytes().to_vec(),
+                "value".as_bytes().to_vec()
+            ))
+        );
     }
 
     #[test]
@@ -426,23 +441,40 @@
 
         block.seek(&"prefix_key2".as_bytes());
         assert!(block.valid());
-        assert_eq!(current_key_val(&block),
-                   Some(("prefix_key2".as_bytes().to_vec(), "value".as_bytes().to_vec())));
+        assert_eq!(
+            current_key_val(&block),
+            Some((
+                "prefix_key2".as_bytes().to_vec(),
+                "value".as_bytes().to_vec()
+            ))
+        );
 
         block.seek(&"prefix_key0".as_bytes());
         assert!(block.valid());
-        assert_eq!(current_key_val(&block),
-                   Some(("prefix_key1".as_bytes().to_vec(), "value".as_bytes().to_vec())));
+        assert_eq!(
+            current_key_val(&block),
+            Some((
+                "prefix_key1".as_bytes().to_vec(),
+                "value".as_bytes().to_vec()
+            ))
+        );
 
         block.seek(&"key1".as_bytes());
         assert!(block.valid());
-        assert_eq!(current_key_val(&block),
-                   Some(("key1".as_bytes().to_vec(), "value1".as_bytes().to_vec())));
+        assert_eq!(
+            current_key_val(&block),
+            Some(("key1".as_bytes().to_vec(), "value1".as_bytes().to_vec()))
+        );
 
         block.seek(&"prefix_key3".as_bytes());
         assert!(block.valid());
-        assert_eq!(current_key_val(&block),
-                   Some(("prefix_key3".as_bytes().to_vec(), "value".as_bytes().to_vec())));
+        assert_eq!(
+            current_key_val(&block),
+            Some((
+                "prefix_key3".as_bytes().to_vec(),
+                "value".as_bytes().to_vec()
+            ))
+        );
 
         block.seek(&"prefix_key8".as_bytes());
         assert!(!block.valid());
@@ -470,8 +502,13 @@
 
             block.seek_to_last();
             assert!(block.valid());
-            assert_eq!(current_key_val(&block),
-                       Some(("prefix_key3".as_bytes().to_vec(), "value".as_bytes().to_vec())));
+            assert_eq!(
+                current_key_val(&block),
+                Some((
+                    "prefix_key3".as_bytes().to_vec(),
+                    "value".as_bytes().to_vec()
+                ))
+            );
         }
     }
 }
--- a/src/block_builder.rs	Sat Mar 03 11:49:17 2018 +0100
+++ b/src/block_builder.rs	Sat Mar 03 11:53:18 2018 +0100
@@ -54,8 +54,10 @@
 
     pub fn add(&mut self, key: &[u8], val: &[u8]) {
         assert!(self.restart_counter <= self.opt.block_restart_interval);
-        assert!(self.buffer.is_empty() ||
-                self.opt.cmp.cmp(self.last_key.as_slice(), key) == Ordering::Less);
+        assert!(
+            self.buffer.is_empty()
+                || self.opt.cmp.cmp(self.last_key.as_slice(), key) == Ordering::Less
+        );
 
         let mut shared = 0;
 
@@ -77,9 +79,15 @@
 
         let non_shared = key.len() - shared;
 
-        self.buffer.write_varint(shared).expect("write to buffer failed");
-        self.buffer.write_varint(non_shared).expect("write to buffer failed");
-        self.buffer.write_varint(val.len()).expect("write to buffer failed");
+        self.buffer
+            .write_varint(shared)
+            .expect("write to buffer failed");
+        self.buffer
+            .write_varint(non_shared)
+            .expect("write to buffer failed");
+        self.buffer
+            .write_varint(val.len())
+            .expect("write to buffer failed");
         self.buffer.extend_from_slice(&key[shared..]);
         self.buffer.extend_from_slice(val);
 
@@ -96,11 +104,15 @@
 
         // 1. Append RESTARTS
         for r in self.restarts.iter() {
-            self.buffer.write_fixedint(*r as u32).expect("write to buffer failed");
+            self.buffer
+                .write_fixedint(*r as u32)
+                .expect("write to buffer failed");
         }
 
         // 2. Append N_RESTARTS
-        self.buffer.write_fixedint(self.restarts.len() as u32).expect("write to buffer failed");
+        self.buffer
+            .write_fixedint(self.restarts.len() as u32)
+            .expect("write to buffer failed");
 
         // done
         self.buffer
@@ -113,12 +125,17 @@
     use options;
 
     fn get_data() -> Vec<(&'static [u8], &'static [u8])> {
-        vec![("key1".as_bytes(), "value1".as_bytes()),
-             ("loooooooooooooooooooooooooooooooooongerkey1".as_bytes(), "shrtvl1".as_bytes()),
-             ("medium length key 1".as_bytes(), "some value 2".as_bytes()),
-             ("prefix_key1".as_bytes(), "value".as_bytes()),
-             ("prefix_key2".as_bytes(), "value".as_bytes()),
-             ("prefix_key3".as_bytes(), "value".as_bytes())]
+        vec![
+            ("key1".as_bytes(), "value1".as_bytes()),
+            (
+                "loooooooooooooooooooooooooooooooooongerkey1".as_bytes(),
+                "shrtvl1".as_bytes(),
+            ),
+            ("medium length key 1".as_bytes(), "some value 2".as_bytes()),
+            ("prefix_key1".as_bytes(), "value".as_bytes()),
+            ("prefix_key2".as_bytes(), "value".as_bytes()),
+            ("prefix_key3".as_bytes(), "value".as_bytes()),
+        ]
     }
 
     #[test]
--- a/src/blockhandle.rs	Sat Mar 03 11:49:17 2018 +0100
+++ b/src/blockhandle.rs	Sat Mar 03 11:53:18 2018 +0100
@@ -17,11 +17,13 @@
         let (off, offsize) = usize::decode_var(from);
         let (sz, szsize) = usize::decode_var(&from[offsize..]);
 
-        (BlockHandle {
-            offset: off,
-            size: sz,
-        },
-         offsize + szsize)
+        (
+            BlockHandle {
+                offset: off,
+                size: sz,
+            },
+            offsize + szsize,
+        )
     }
 
     pub fn new(offset: usize, size: usize) -> BlockHandle {
--- a/src/cache.rs	Sat Mar 03 11:49:17 2018 +0100
+++ b/src/cache.rs	Sat Mar 03 11:53:18 2018 +0100
@@ -1,5 +1,5 @@
 use std::collections::HashMap;
-use std::mem::{swap, replace};
+use std::mem::{replace, swap};
 
 // No clone, no copy! That asserts that an LRUHandle exists only once.
 type LRUHandle<T> = *mut LRUNode<T>;
@@ -68,8 +68,10 @@
     fn remove_last(&mut self) -> Option<T> {
         if self.head.prev.is_some() {
             let mut lasto = unsafe {
-                replace(&mut (*((*self.head.prev.unwrap()).prev.unwrap())).next,
-                        None)
+                replace(
+                    &mut (*((*self.head.prev.unwrap()).prev.unwrap())).next,
+                    None,
+                )
             };
 
             if let Some(ref mut last) = lasto {
--- a/src/cmp.rs	Sat Mar 03 11:49:17 2018 +0100
+++ b/src/cmp.rs	Sat Mar 03 11:53:18 2018 +0100
@@ -41,7 +41,11 @@
             return a.to_vec();
         }
 
-        let min = if a.len() < b.len() { a.len() } else { b.len() };
+        let min = if a.len() < b.len() {
+            a.len()
+        } else {
+            b.len()
+        };
         let mut diff_at = 0;
 
         while diff_at < min && a[diff_at] == b[diff_at] {
@@ -102,7 +106,9 @@
         let sep: Vec<u8> = self.0.find_shortest_sep(keya, keyb);
 
         if sep.len() < keya.len() && self.0.cmp(keya, &sep) == Ordering::Less {
-            return LookupKey::new(&sep, types::MAX_SEQUENCE_NUMBER).internal_key().to_vec();
+            return LookupKey::new(&sep, types::MAX_SEQUENCE_NUMBER)
+                .internal_key()
+                .to_vec();
         }
 
         return LookupKey::new(&sep, seqa).internal_key().to_vec();
@@ -179,56 +185,102 @@
 
     #[test]
     fn test_cmp_defaultcmp_shortest_sep() {
-        assert_eq!(DefaultCmp.find_shortest_sep("abcd".as_bytes(), "abcf".as_bytes()),
-                   "abce".as_bytes());
-        assert_eq!(DefaultCmp.find_shortest_sep("abc".as_bytes(), "acd".as_bytes()),
-                   "abd".as_bytes());
-        assert_eq!(DefaultCmp.find_shortest_sep("abcdefghi".as_bytes(), "abcffghi".as_bytes()),
-                   "abce".as_bytes());
-        assert_eq!(DefaultCmp.find_shortest_sep("a".as_bytes(), "a".as_bytes()),
-                   "a".as_bytes());
-        assert_eq!(DefaultCmp.find_shortest_sep("a".as_bytes(), "b".as_bytes()),
-                   "b".as_bytes());
-        assert_eq!(DefaultCmp.find_shortest_sep("abc".as_bytes(), "zzz".as_bytes()),
-                   "b".as_bytes());
-        assert_eq!(DefaultCmp.find_shortest_sep("yyy".as_bytes(), "z".as_bytes()),
-                   "yyz".as_bytes());
-        assert_eq!(DefaultCmp.find_shortest_sep("".as_bytes(), "".as_bytes()),
-                   "".as_bytes());
+        assert_eq!(
+            DefaultCmp.find_shortest_sep("abcd".as_bytes(), "abcf".as_bytes()),
+            "abce".as_bytes()
+        );
+        assert_eq!(
+            DefaultCmp.find_shortest_sep("abc".as_bytes(), "acd".as_bytes()),
+            "abd".as_bytes()
+        );
+        assert_eq!(
+            DefaultCmp.find_shortest_sep("abcdefghi".as_bytes(), "abcffghi".as_bytes()),
+            "abce".as_bytes()
+        );
+        assert_eq!(
+            DefaultCmp.find_shortest_sep("a".as_bytes(), "a".as_bytes()),
+            "a".as_bytes()
+        );
+        assert_eq!(
+            DefaultCmp.find_shortest_sep("a".as_bytes(), "b".as_bytes()),
+            "b".as_bytes()
+        );
+        assert_eq!(
+            DefaultCmp.find_shortest_sep("abc".as_bytes(), "zzz".as_bytes()),
+            "b".as_bytes()
+        );
+        assert_eq!(
+            DefaultCmp.find_shortest_sep("yyy".as_bytes(), "z".as_bytes()),
+            "yyz".as_bytes()
+        );
+        assert_eq!(
+            DefaultCmp.find_shortest_sep("".as_bytes(), "".as_bytes()),
+            "".as_bytes()
+        );
     }
 
     #[test]
     fn test_cmp_defaultcmp_short_succ() {
-        assert_eq!(DefaultCmp.find_short_succ("abcd".as_bytes()),
-                   "b".as_bytes());
-        assert_eq!(DefaultCmp.find_short_succ("zzzz".as_bytes()),
-                   "{".as_bytes());
+        assert_eq!(
+            DefaultCmp.find_short_succ("abcd".as_bytes()),
+            "b".as_bytes()
+        );
+        assert_eq!(
+            DefaultCmp.find_short_succ("zzzz".as_bytes()),
+            "{".as_bytes()
+        );
         assert_eq!(DefaultCmp.find_short_succ(&[]), &[0xff]);
-        assert_eq!(DefaultCmp.find_short_succ(&[0xff, 0xff, 0xff]),
-                   &[0xff, 0xff, 0xff, 0xff]);
+        assert_eq!(
+            DefaultCmp.find_short_succ(&[0xff, 0xff, 0xff]),
+            &[0xff, 0xff, 0xff, 0xff]
+        );
     }
 
     #[test]
     fn test_cmp_internalkeycmp_shortest_sep() {
         let cmp = InternalKeyCmp(Rc::new(Box::new(DefaultCmp)));
-        assert_eq!(cmp.find_shortest_sep(LookupKey::new("abcd".as_bytes(), 1).internal_key(),
-                                         LookupKey::new("abcf".as_bytes(), 2).internal_key()),
-                   LookupKey::new("abce".as_bytes(), 1).internal_key());
-        assert_eq!(cmp.find_shortest_sep(LookupKey::new("abc".as_bytes(), 1).internal_key(),
-                                         LookupKey::new("zzz".as_bytes(), 2).internal_key()),
-                   LookupKey::new("b".as_bytes(), types::MAX_SEQUENCE_NUMBER).internal_key());
-        assert_eq!(cmp.find_shortest_sep(LookupKey::new("abc".as_bytes(), 1).internal_key(),
-                                         LookupKey::new("acd".as_bytes(), 2).internal_key()),
-                   LookupKey::new("abd".as_bytes(), 1).internal_key());
-        assert_eq!(cmp.find_shortest_sep(LookupKey::new("abc".as_bytes(), 1).internal_key(),
-                                         LookupKey::new("abe".as_bytes(), 2).internal_key()),
-                   LookupKey::new("abd".as_bytes(), 1).internal_key());
-        assert_eq!(cmp.find_shortest_sep(LookupKey::new("".as_bytes(), 1).internal_key(),
-                                         LookupKey::new("".as_bytes(), 2).internal_key()),
-                   LookupKey::new("".as_bytes(), 1).internal_key());
-        assert_eq!(cmp.find_shortest_sep(LookupKey::new("abc".as_bytes(), 2).internal_key(),
-                                         LookupKey::new("abc".as_bytes(), 2).internal_key()),
-                   LookupKey::new("abc".as_bytes(), 2).internal_key());
+        assert_eq!(
+            cmp.find_shortest_sep(
+                LookupKey::new("abcd".as_bytes(), 1).internal_key(),
+                LookupKey::new("abcf".as_bytes(), 2).internal_key()
+            ),
+            LookupKey::new("abce".as_bytes(), 1).internal_key()
+        );
+        assert_eq!(
+            cmp.find_shortest_sep(
+                LookupKey::new("abc".as_bytes(), 1).internal_key(),
+                LookupKey::new("zzz".as_bytes(), 2).internal_key()
+            ),
+            LookupKey::new("b".as_bytes(), types::MAX_SEQUENCE_NUMBER).internal_key()
+        );
+        assert_eq!(
+            cmp.find_shortest_sep(
+                LookupKey::new("abc".as_bytes(), 1).internal_key(),
+                LookupKey::new("acd".as_bytes(), 2).internal_key()
+            ),
+            LookupKey::new("abd".as_bytes(), 1).internal_key()
+        );
+        assert_eq!(
+            cmp.find_shortest_sep(
+                LookupKey::new("abc".as_bytes(), 1).internal_key(),
+                LookupKey::new("abe".as_bytes(), 2).internal_key()
+            ),
+            LookupKey::new("abd".as_bytes(), 1).internal_key()
+        );
+        assert_eq!(
+            cmp.find_shortest_sep(
+                LookupKey::new("".as_bytes(), 1).internal_key(),
+                LookupKey::new("".as_bytes(), 2).internal_key()
+            ),
+            LookupKey::new("".as_bytes(), 1).internal_key()
+        );
+        assert_eq!(
+            cmp.find_shortest_sep(
+                LookupKey::new("abc".as_bytes(), 2).internal_key(),
+                LookupKey::new("abc".as_bytes(), 2).internal_key()
+            ),
+            LookupKey::new("abc".as_bytes(), 2).internal_key()
+        );
     }
 
     #[test]
--- a/src/db_impl.rs	Sat Mar 03 11:49:17 2018 +0100
+++ b/src/db_impl.rs	Sat Mar 03 11:53:18 2018 +0100
@@ -7,7 +7,7 @@
 
 use cmp::{Cmp, InternalKeyCmp};
 use env::{Env, FileLock};
-use error::{err, StatusCode, Result};
+use error::{err, Result, StatusCode};
 use filter::{BoxedFilterPolicy, InternalFilterPolicy};
 use infolog::Logger;
 use log::{LogReader, LogWriter};
@@ -18,8 +18,8 @@
 use snapshot::{Snapshot, SnapshotList};
 use table_builder::TableBuilder;
 use table_cache::{table_file_name, TableCache};
-use types::{parse_file_name, share, FileMetaData, FileNum, FileType, LdbIterator,
-            MAX_SEQUENCE_NUMBER, NUM_LEVELS, SequenceNumber, Shared};
+use types::{parse_file_name, share, FileMetaData, FileNum, FileType, LdbIterator, SequenceNumber,
+            Shared, MAX_SEQUENCE_NUMBER, NUM_LEVELS};
 use version_edit::VersionEdit;
 use version_set::{manifest_file_name, read_current_file, set_current_file, Compaction, VersionSet};
 use version::Version;
@@ -105,8 +105,9 @@
         // Create log file if an old one is not being reused.
         if db.log.is_none() {
             let lognum = db.vset.borrow_mut().new_file_number();
-            let logfile =
-                db.opt.env.open_writable_file(Path::new(&log_file_name(&db.name, lognum)))?;
+            let logfile = db.opt
+                .env
+                .open_writable_file(Path::new(&log_file_name(&db.name, lognum)))?;
             ve.set_log_num(lognum);
             db.log = Some(LogWriter::new(BufWriter::new(logfile)));
             db.log_num = Some(lognum);
@@ -154,8 +155,10 @@
             if e.code == StatusCode::NotFound && self.opt.create_if_missing {
                 self.initialize_db()?;
             } else {
-                return err(StatusCode::InvalidArgument,
-                           "database does not exist and create_if_missing is false");
+                return err(
+                    StatusCode::InvalidArgument,
+                    "database does not exist and create_if_missing is false",
+                );
             }
         }
 
@@ -172,8 +175,9 @@
         for file in &filenames {
             if let Ok((num, typ)) = parse_file_name(&file) {
                 expected.remove(&num);
-                if typ == FileType::Log &&
-                   (num >= self.vset.borrow().log_num || num == self.vset.borrow().prev_log_num) {
+                if typ == FileType::Log
+                    && (num >= self.vset.borrow().log_num || num == self.vset.borrow().prev_log_num)
+                {
                     log_files.push(num);
                 }
             }
@@ -206,19 +210,22 @@
     /// recover_log_file reads a single log file into a memtable, writing new L0 tables if
     /// necessary. If is_last is true, it checks whether the log file can be reused, and sets up
     /// the database's logging handles appropriately if that's the case.
-    fn recover_log_file(&mut self,
-                        log_num: FileNum,
-                        is_last: bool,
-                        ve: &mut VersionEdit)
-                        -> Result<(bool, SequenceNumber)> {
+    fn recover_log_file(
+        &mut self,
+        log_num: FileNum,
+        is_last: bool,
+        ve: &mut VersionEdit,
+    ) -> Result<(bool, SequenceNumber)> {
         let filename = log_file_name(&self.name, log_num);
         let logfile = self.opt.env.open_sequential_file(Path::new(&filename))?;
         // Use the user-supplied comparator; it will be wrapped inside a MemtableKeyCmp.
         let cmp: Rc<Box<Cmp>> = self.opt.cmp.clone();
 
-        let mut logreader = LogReader::new(logfile,
-                                           // checksum=
-                                           true);
+        let mut logreader = LogReader::new(
+            logfile,
+            // checksum=
+            true,
+        );
         log!(self.opt.log, "Recovering log file {}", filename);
         let mut scratch = vec![];
         let mut mem = MemTable::new(cmp.clone());
@@ -233,9 +240,11 @@
                 break;
             }
             if len < 12 {
-                log!(self.opt.log,
-                     "corruption in log file {:06}: record shorter than 12B",
-                     log_num);
+                log!(
+                    self.opt.log,
+                    "corruption in log file {:06}: record shorter than 12B",
+                    log_num
+                );
                 continue;
             }
 
@@ -314,7 +323,8 @@
                 log!(self.opt.log, "Deleting file type={:?} num={}", typ, num);
                 if let Err(e) = self.opt
                     .env
-                    .delete(Path::new(&format!("{}/{}", &self.name, &name))) {
+                    .delete(Path::new(&format!("{}/{}", &self.name, &name)))
+                {
                     log!(self.opt.log, "Deleting file num={} failed: {}", num, e);
                 }
             }
@@ -330,10 +340,10 @@
                 self.lock = Some(lockfile);
                 Ok(())
             }
-            Err(ref e) if e.code == StatusCode::LockError => {
-                err(StatusCode::LockError,
-                    "database lock is held by another instance")
-            }
+            Err(ref e) if e.code == StatusCode::LockError => err(
+                StatusCode::LockError,
+                "database lock is held by another instance",
+            ),
             Err(e) => Err(e),
         }
     }
@@ -463,10 +473,12 @@
 
     /// new_iter_at returns a DBIterator at the supplied snapshot.
     pub fn new_iter_at(&mut self, ss: Snapshot) -> Result<DBIterator> {
-        Ok(DBIterator::new(self.opt.cmp.clone(),
-                           self.vset.clone(),
-                           self.merge_iterators()?,
-                           ss))
+        Ok(DBIterator::new(
+            self.opt.cmp.clone(),
+            self.vset.clone(),
+            self.merge_iterators()?,
+            ss,
+        ))
     }
 
     /// merge_iterators produces a MergingIter merging the entries in the memtable, the immutable
@@ -532,7 +544,9 @@
         } else {
             // Create new memtable.
             let logn = self.vset.borrow_mut().new_file_number();
-            let logf = self.opt.env.open_writable_file(Path::new(&log_file_name(&self.name, logn)));
+            let logf = self.opt
+                .env
+                .open_writable_file(Path::new(&log_file_name(&self.name, logn)));
             if logf.is_err() {
                 self.vset.borrow_mut().reuse_file_number(logn);
                 Err(logf.err().unwrap())
@@ -584,12 +598,16 @@
         // Compact memtable.
         self.make_room_for_write(true)?;
 
-        let mut ifrom = LookupKey::new(from, MAX_SEQUENCE_NUMBER).internal_key().to_vec();
+        let mut ifrom = LookupKey::new(from, MAX_SEQUENCE_NUMBER)
+            .internal_key()
+            .to_vec();
         let iend = LookupKey::new_full(to, 0, ValueType::TypeDeletion);
 
         for l in 0..max_level + 1 {
             loop {
-                let c_ = self.vset.borrow_mut().compact_range(l, &ifrom, iend.internal_key());
+                let c_ = self.vset
+                    .borrow_mut()
+                    .compact_range(l, &ifrom, iend.internal_key());
                 if let Some(c) = c_ {
                     // Update ifrom to the largest key of the last file in this compaction.
                     let ix = c.num_inputs(0) - 1;
@@ -621,15 +639,19 @@
                 log!(self.opt.log, "trivial move failed: {}", e);
                 Err(e)
             } else {
-                log!(self.opt.log,
-                     "Moved num={} bytes={} from L{} to L{}",
-                     num,
-                     size,
-                     level,
-                     level + 1);
-                log!(self.opt.log,
-                     "Summary: {}",
-                     self.vset.borrow().current_summary());
+                log!(
+                    self.opt.log,
+                    "Moved num={} bytes={} from L{} to L{}",
+                    num,
+                    size,
+                    level,
+                    level + 1
+                );
+                log!(
+                    self.opt.log,
+                    "Summary: {}",
+                    self.vset.borrow().current_summary()
+                );
                 Ok(())
             }
         } else {
@@ -644,9 +666,11 @@
                 log!(self.opt.log, "Compaction work failed: {}", e);
             }
             self.install_compaction_results(state)?;
-            log!(self.opt.log,
-                 "Compaction finished: {}",
-                 self.vset.borrow().current_summary());
+            log!(
+                self.opt.log,
+                "Compaction finished: {}",
+                self.vset.borrow().current_summary()
+            );
 
             self.delete_obsolete_files()
         }
@@ -672,11 +696,12 @@
     }
 
     /// write_l0_table writes the given memtable to a table file.
-    fn write_l0_table(&mut self,
-                      memt: &MemTable,
-                      ve: &mut VersionEdit,
-                      base: Option<&Version>)
-                      -> Result<()> {
+    fn write_l0_table(
+        &mut self,
+        memt: &MemTable,
+        ve: &mut VersionEdit,
+        base: Option<&Version>,
+    ) -> Result<()> {
         let start_ts = self.opt.env.micros();
         let num = self.vset.borrow_mut().new_file_number();
         log!(self.opt.log, "Start write of L0 table {:06}", num);
@@ -691,11 +716,16 @@
 
         let cache_result = self.cache.borrow_mut().get_table(num);
         if let Err(e) = cache_result {
-            log!(self.opt.log,
-                 "L0 table {:06} not returned by cache: {}",
-                 num,
-                 e);
-            self.opt.env.delete(Path::new(&table_file_name(&self.name, num))).is_ok();
+            log!(
+                self.opt.log,
+                "L0 table {:06} not returned by cache: {}",
+                num,
+                e
+            );
+            self.opt
+                .env
+                .delete(Path::new(&table_file_name(&self.name, num)))
+                .is_ok();
             return Err(e);
         }
 
@@ -705,8 +735,10 @@
 
         let mut level = 0;
         if let Some(b) = base {
-            level = b.pick_memtable_output_level(parse_internal_key(&fmd.smallest).2,
-                                                 parse_internal_key(&fmd.largest).2);
+            level = b.pick_memtable_output_level(
+                parse_internal_key(&fmd.smallest).2,
+                parse_internal_key(&fmd.largest).2,
+            );
         }
 
         self.add_stats(level, stats);
@@ -722,12 +754,14 @@
             assert!(cs.builder.is_none());
         }
         let start_ts = self.opt.env.micros();
-        log!(self.opt.log,
-             "Compacting {} files at L{} and {} files at L{}",
-             cs.compaction.num_inputs(0),
-             cs.compaction.level(),
-             cs.compaction.num_inputs(1),
-             cs.compaction.level() + 1);
+        log!(
+            self.opt.log,
+            "Compacting {} files at L{} and {} files at L{}",
+            cs.compaction.num_inputs(0),
+            cs.compaction.level(),
+            cs.compaction.num_inputs(1),
+            cs.compaction.level() + 1
+        );
 
         let mut input = self.vset.borrow().make_input_iterator(&cs.compaction);
         input.seek_to_first();
@@ -772,8 +806,9 @@
             }
             // Entry is deletion; no older version is observable by any snapshot; and all entries
             // in compacted levels with smaller sequence numbers will
-            if ktyp == ValueType::TypeDeletion && seq <= cs.smallest_seq &&
-               cs.compaction.is_base_level_for(ukey) {
+            if ktyp == ValueType::TypeDeletion && seq <= cs.smallest_seq
+                && cs.compaction.is_base_level_for(ukey)
+            {
                 last_seq_for_key = seq;
                 input.advance();
                 continue;
@@ -822,10 +857,11 @@
         Ok(())
     }
 
-    fn finish_compaction_output(&mut self,
-                                cs: &mut CompactionState,
-                                largest: Vec<u8>)
-                                -> Result<()> {
+    fn finish_compaction_output(
+        &mut self,
+        cs: &mut CompactionState,
+        largest: Vec<u8>,
+    ) -> Result<()> {
         assert!(cs.builder.is_some());
         let output_num = cs.current_output().num;
         assert!(output_num > 0);
@@ -849,29 +885,35 @@
                 log!(self.opt.log, "New table can't be read: {}", e);
                 return Err(e);
             }
-            log!(self.opt.log,
-                 "New table num={}: keys={} size={}",
-                 output_num,
-                 entries,
-                 bytes);
+            log!(
+                self.opt.log,
+                "New table num={}: keys={} size={}",
+                output_num,
+                entries,
+                bytes
+            );
         }
         Ok(())
     }
 
     fn install_compaction_results(&mut self, mut cs: CompactionState) -> Result<()> {
-        log!(self.opt.log,
-             "Compacted {} L{} files + {} L{} files => {}B",
-             cs.compaction.num_inputs(0),
-             cs.compaction.level(),
-             cs.compaction.num_inputs(1),
-             cs.compaction.level() + 1,
-             cs.total_bytes);
+        log!(
+            self.opt.log,
+            "Compacted {} L{} files + {} L{} files => {}B",
+            cs.compaction.num_inputs(0),
+            cs.compaction.level(),
+            cs.compaction.num_inputs(1),
+            cs.compaction.level() + 1,
+            cs.total_bytes
+        );
         cs.compaction.add_input_deletions();
         let level = cs.compaction.level();
         for output in &cs.outputs {
             cs.compaction.edit().add_file(level + 1, output.clone());
         }
-        self.vset.borrow_mut().log_and_apply(cs.compaction.into_edit())
+        self.vset
+            .borrow_mut()
+            .log_and_apply(cs.compaction.into_edit())
     }
 }
 
@@ -929,11 +971,12 @@
     }
 }
 
-pub fn build_table<I: LdbIterator>(dbname: &str,
-                                   opt: &Options,
-                                   mut from: I,
-                                   num: FileNum)
-                                   -> Result<FileMetaData> {
+pub fn build_table<I: LdbIterator>(
+    dbname: &str,
+    opt: &Options,
+    mut from: I,
+    num: FileNum,
+) -> Result<FileMetaData> {
     from.reset();
     let filename = table_file_name(dbname, num);
 
@@ -992,7 +1035,8 @@
     env.mkdir(Path::new(db)).is_ok();
     if let Ok(e) = env.exists(Path::new(&logfilename)) {
         if e {
-            env.rename(Path::new(&logfilename), Path::new(&oldlogfilename)).is_ok();
+            env.rename(Path::new(&logfilename), Path::new(&oldlogfilename))
+                .is_ok();
         }
     }
     if let Ok(w) = env.open_writable_file(Path::new(&logfilename)) {
@@ -1101,10 +1145,12 @@
         let mut mt = MemTable::new(options::for_test().cmp);
         let mut i = 1;
         for k in ["abc", "def", "ghi", "jkl", "mno", "aabc", "test123"].iter() {
-            mt.add(i,
-                   ValueType::TypeValue,
-                   k.as_bytes(),
-                   "looooongval".as_bytes());
+            mt.add(
+                i,
+                ValueType::TypeValue,
+                k.as_bytes(),
+                "looooongval".as_bytes(),
+            );
             i += 1;
         }
         mt
@@ -1124,8 +1170,10 @@
             opt.reuse_manifest = false;
             let _ = DB::open("otherdb", opt.clone()).unwrap();
 
-            println!("children after: {:?}",
-                     env.children(Path::new("otherdb/")).unwrap());
+            println!(
+                "children after: {:?}",
+                env.children(Path::new("otherdb/")).unwrap()
+            );
             assert!(env.exists(Path::new("otherdb/CURRENT")).unwrap());
             // Database is initialized and initial manifest reused.
             assert!(!env.exists(Path::new("otherdb/MANIFEST-000001")).unwrap());
@@ -1138,8 +1186,10 @@
             opt.reuse_manifest = true;
             let mut db = DB::open("db", opt.clone()).unwrap();
 
-            println!("children after: {:?}",
-                     env.children(Path::new("db/")).unwrap());
+            println!(
+                "children after: {:?}",
+                env.children(Path::new("db/")).unwrap()
+            );
             assert!(env.exists(Path::new("db/CURRENT")).unwrap());
             // Database is initialized and initial manifest reused.
             assert!(env.exists(Path::new("db/MANIFEST-000001")).unwrap());
@@ -1151,15 +1201,19 @@
         }
 
         {
-            println!("children before: {:?}",
-                     env.children(Path::new("db/")).unwrap());
+            println!(
+                "children before: {:?}",
+                env.children(Path::new("db/")).unwrap()
+            );
             let mut opt = opt.clone();
             opt.reuse_manifest = false;
             opt.reuse_logs = false;
             let mut db = DB::open("db", opt.clone()).unwrap();
 
-            println!("children after: {:?}",
-                     env.children(Path::new("db/")).unwrap());
+            println!(
+                "children after: {:?}",
+                env.children(Path::new("db/")).unwrap()
+            );
             // Obsolete manifest is deleted.
             assert!(!env.exists(Path::new("db/MANIFEST-000001")).unwrap());
             // New manifest is created.
@@ -1172,27 +1226,34 @@
             // Check that entry exists and is correct. Phew, long call chain!
             let current = db.current();
             log!(opt.log, "files: {:?}", current.borrow().files);
-            assert_eq!("def".as_bytes(),
-                       current.borrow_mut()
-                           .get(LookupKey::new("abc".as_bytes(), 1).internal_key())
-                           .unwrap()
-                           .unwrap()
-                           .0
-                           .as_slice());
+            assert_eq!(
+                "def".as_bytes(),
+                current
+                    .borrow_mut()
+                    .get(LookupKey::new("abc".as_bytes(), 1).internal_key())
+                    .unwrap()
+                    .unwrap()
+                    .0
+                    .as_slice()
+            );
             db.put("abe".as_bytes(), "def".as_bytes()).unwrap();
         }
 
         {
-            println!("children before: {:?}",
-                     env.children(Path::new("db/")).unwrap());
+            println!(
+                "children before: {:?}",
+                env.children(Path::new("db/")).unwrap()
+            );
             // reuse_manifest above causes the old manifest to be deleted as obsolete, but no new
             // manifest is written. CURRENT becomes stale.
             let mut opt = opt.clone();
             opt.reuse_logs = true;
             let db = DB::open("db", opt).unwrap();
 
-            println!("children after: {:?}",
-                     env.children(Path::new("db/")).unwrap());
+            println!(
+                "children after: {:?}",
+                env.children(Path::new("db/")).unwrap()
+            );
             assert!(!env.exists(Path::new("db/MANIFEST-000001")).unwrap());
             assert!(env.exists(Path::new("db/MANIFEST-000002")).unwrap());
             assert!(!env.exists(Path::new("db/MANIFEST-000005")).unwrap());
@@ -1201,12 +1262,14 @@
             assert!(!env.exists(Path::new("db/000006.log")).unwrap());
             // Log is reused, so memtable should contain last written entry from above.
             assert_eq!(1, db.mem.len());
-            assert_eq!("def".as_bytes(),
-                       db.mem
-                           .get(&LookupKey::new("abe".as_bytes(), 3))
-                           .0
-                           .unwrap()
-                           .as_slice());
+            assert_eq!(
+                "def".as_bytes(),
+                db.mem
+                    .get(&LookupKey::new("abe".as_bytes(), 3))
+                    .0
+                    .unwrap()
+                    .as_slice()
+            );
         }
     }
 
@@ -1215,11 +1278,15 @@
         let (mut db, opt) = build_db();
         let env = &opt.env;
 
-        println!("children before: {:?}",
-                 env.children(Path::new("db/")).unwrap());
+        println!(
+            "children before: {:?}",
+            env.children(Path::new("db/")).unwrap()
+        );
         db.compact_range(b"aaa", b"dba").unwrap();
-        println!("children after: {:?}",
-                 env.children(Path::new("db/")).unwrap());
+        println!(
+            "children after: {:?}",
+            env.children(Path::new("db/")).unwrap()
+        );
 
         assert_eq!(250, opt.env.size_of(Path::new("db/000007.ldb")).unwrap());
         assert_eq!(200, opt.env.size_of(Path::new("db/000008.ldb")).unwrap());
@@ -1247,11 +1314,15 @@
 
         db.put(b"xxx", b"123").unwrap();
 
-        println!("children before: {:?}",
-                 env.children(Path::new("db/")).unwrap());
+        println!(
+            "children before: {:?}",
+            env.children(Path::new("db/")).unwrap()
+        );
         db.compact_range(b"aaa", b"dba").unwrap();
-        println!("children after: {:?}",
-                 env.children(Path::new("db/")).unwrap());
+        println!(
+            "children after: {:?}",
+            env.children(Path::new("db/")).unwrap()
+        );
 
         assert_eq!(250, opt.env.size_of(Path::new("db/000007.ldb")).unwrap());
         assert_eq!(200, opt.env.size_of(Path::new("db/000008.ldb")).unwrap());
@@ -1280,8 +1351,10 @@
     fn test_db_impl_locking() {
         let opt = options::for_test();
         let db = DB::open("db", opt.clone()).unwrap();
-        let want_err = Status::new(StatusCode::LockError,
-                                   "database lock is held by another instance");
+        let want_err = Status::new(
+            StatusCode::LockError,
+            "database lock is held by another instance",
+        );
         assert_eq!(want_err, DB::open("db", opt.clone()).err().unwrap());
     }
 
@@ -1294,10 +1367,14 @@
         let f = build_table("db", &opt, mt.iter(), 123).unwrap();
         let path = Path::new("db/000123.ldb");
 
-        assert_eq!(LookupKey::new("aabc".as_bytes(), 6).internal_key(),
-                   f.smallest.as_slice());
-        assert_eq!(LookupKey::new("test123".as_bytes(), 7).internal_key(),
-                   f.largest.as_slice());
+        assert_eq!(
+            LookupKey::new("aabc".as_bytes(), 6).internal_key(),
+            f.smallest.as_slice()
+        );
+        assert_eq!(
+            LookupKey::new("test123".as_bytes(), 7).internal_key(),
+            f.largest.as_slice()
+        );
         assert_eq!(379, f.size);
         assert_eq!(123, f.num);
         assert!(opt.env.exists(path).unwrap());
@@ -1312,15 +1389,27 @@
         {
             // Corrupt table; make sure it doesn't load fully.
             let mut buf = vec![];
-            opt.env.open_sequential_file(path).unwrap().read_to_end(&mut buf).unwrap();
+            opt.env
+                .open_sequential_file(path)
+                .unwrap()
+                .read_to_end(&mut buf)
+                .unwrap();
             buf[150] += 1;
-            opt.env.open_writable_file(path).unwrap().write_all(&buf).unwrap();
+            opt.env
+                .open_writable_file(path)
+                .unwrap()
+                .write_all(&buf)
+                .unwrap();
 
             let mut tc = TableCache::new("db", opt.clone(), 100);
             let tbl = tc.get_table(123).unwrap();
             // The last two entries are skipped due to the corruption above.
-            assert_eq!(5,
-                       LdbIteratorIter::wrap(&mut tbl.iter()).map(|v| println!("{:?}", v)).count());
+            assert_eq!(
+                5,
+                LdbIteratorIter::wrap(&mut tbl.iter())
+                    .map(|v| println!("{:?}", v))
+                    .count()
+            );
         }
     }
 
@@ -1350,8 +1439,10 @@
         assert!(db.get_at(&old_ss, "xyz".as_bytes()).unwrap().is_none());
 
         // memtable get
-        assert_eq!("123".as_bytes(),
-                   db.get("xyz".as_bytes()).unwrap().as_slice());
+        assert_eq!(
+            "123".as_bytes(),
+            db.get("xyz".as_bytes()).unwrap().as_slice()
+        );
         assert!(db.get_internal(31, "xyy".as_bytes()).unwrap().is_some());
         assert!(db.get_internal(32, "xyy".as_bytes()).unwrap().is_some());
 
@@ -1359,20 +1450,29 @@
         assert!(db.get_internal(32, "xyz".as_bytes()).unwrap().is_some());
 
         // table get
-        assert_eq!("val2".as_bytes(),
-                   db.get("eab".as_bytes()).unwrap().as_slice());
+        assert_eq!(
+            "val2".as_bytes(),
+            db.get("eab".as_bytes()).unwrap().as_slice()
+        );
         assert!(db.get_internal(3, "eab".as_bytes()).unwrap().is_none());
         assert!(db.get_internal(32, "eab".as_bytes()).unwrap().is_some());
 
         {
             let ss = db.get_snapshot();
-            assert_eq!("val2".as_bytes(),
-                       db.get_at(&ss, "eab".as_bytes()).unwrap().unwrap().as_slice());
+            assert_eq!(
+                "val2".as_bytes(),
+                db.get_at(&ss, "eab".as_bytes())
+                    .unwrap()
+                    .unwrap()
+                    .as_slice()
+            );
         }
 
         // from table.
-        assert_eq!("val2".as_bytes(),
-                   db.get("cab".as_bytes()).unwrap().as_slice());
+        assert_eq!(
+            "val2".as_bytes(),
+            db.get("cab".as_bytes()).unwrap().as_slice()
+        );
     }
 
     #[test]
@@ -1408,7 +1508,6 @@
         assert!(env.exists(Path::new(&table_file_name(name, 13))).unwrap());
     }
 
-
     #[test]
     fn test_db_impl_compaction_trivial_move() {
         let mut db = DB::open("db", options::for_test()).unwrap();
@@ -1423,8 +1522,10 @@
         db.imm = Some(imm);
         db.compact_memtable().unwrap();
 
-        println!("children after: {:?}",
-                 db.opt.env.children(Path::new("db/")).unwrap());
+        println!(
+            "children after: {:?}",
+            db.opt.env.children(Path::new("db/")).unwrap()
+        );
         assert!(db.opt.env.exists(Path::new("db/000004.ldb")).unwrap());
 
         {
@@ -1458,9 +1559,10 @@
         assert!(db.opt.env.exists(Path::new("db/000002.log")).unwrap());
         assert!(db.opt.env.exists(Path::new("db/000003.ldb")).unwrap());
         assert_eq!(351, db.opt.env.size_of(Path::new("db/000003.ldb")).unwrap());
-        assert_eq!(7,
-                   LdbIteratorIter::wrap(&mut db.cache.borrow_mut().get_table(3).unwrap().iter())
-                       .count());
+        assert_eq!(
+            7,
+            LdbIteratorIter::wrap(&mut db.cache.borrow_mut().get_table(3).unwrap().iter()).count()
+        );
     }
 
     #[test]
@@ -1511,7 +1613,10 @@
         let name = "db";
 
         let stuff = "abcdefghijkl".as_bytes();
-        env.open_writable_file(Path::new("db/000001.ldb")).unwrap().write_all(stuff).unwrap();
+        env.open_writable_file(Path::new("db/000001.ldb"))
+            .unwrap()
+            .write_all(stuff)
+            .unwrap();
         let mut fmd = FileMetaData::default();
         fmd.num = 1;
 
--- a/src/db_iter.rs	Sat Mar 03 11:49:17 2018 +0100
+++ b/src/db_iter.rs	Sat Mar 03 11:53:18 2018 +0100
@@ -1,4 +1,3 @@
-
 use cmp::Cmp;
 use key_types::{parse_internal_key, truncate_to_userkey, LookupKey, ValueType};
 use merging_iter::MergingIter;
@@ -36,11 +35,12 @@
 }
 
 impl DBIterator {
-    pub fn new(cmp: Rc<Box<Cmp>>,
-               vset: Shared<VersionSet>,
-               iter: MergingIter,
-               ss: Snapshot)
-               -> DBIterator {
+    pub fn new(
+        cmp: Rc<Box<Cmp>>,
+        vset: Shared<VersionSet>,
+        iter: MergingIter,
+        ss: Snapshot,
+    ) -> DBIterator {
         DBIterator {
             cmp: cmp,
             vset: vset,
@@ -130,8 +130,9 @@
             let (typ, seq, ukey) = parse_internal_key(&self.keybuf);
 
             if seq > 0 && seq <= self.ss.sequence() {
-                if value_type != ValueType::TypeDeletion &&
-                   self.cmp.cmp(ukey, &self.savedkey) == Ordering::Less {
+                if value_type != ValueType::TypeDeletion
+                    && self.cmp.cmp(ukey, &self.savedkey) == Ordering::Less
+                {
                     // We found a non-deleted entry for a previous key (in the previous iteration)
                     break;
                 }
@@ -185,8 +186,10 @@
             assert!(self.iter.current(&mut self.savedkey, &mut self.savedval));
             truncate_to_userkey(&mut self.savedkey);
         }
-        self.find_next_user_entry(// skipping=
-                                  true)
+        self.find_next_user_entry(
+            // skipping=
+            true,
+        )
     }
     fn current(&self, key: &mut Vec<u8>, val: &mut Vec<u8>) -> bool {
         if !self.valid() {
@@ -243,11 +246,14 @@
         self.dir = Direction::Forward;
         self.savedkey.clear();
         self.savedval.clear();
-        self.savedkey.extend_from_slice(LookupKey::new(to, self.ss.sequence()).internal_key());
+        self.savedkey
+            .extend_from_slice(LookupKey::new(to, self.ss.sequence()).internal_key());
         self.iter.seek(&self.savedkey);
         if self.iter.valid() {
-            self.find_next_user_entry(// skipping=
-                                      false);
+            self.find_next_user_entry(
+                // skipping=
+                false,
+            );
         } else {
             self.valid = false;
         }
@@ -257,8 +263,10 @@
         self.savedval.clear();
         self.iter.seek_to_first();
         if self.iter.valid() {
-            self.find_next_user_entry(// skipping=
-                                      false);
+            self.find_next_user_entry(
+                // skipping=
+                false,
+            );
         } else {
             self.valid = false;
         }
@@ -294,9 +302,12 @@
         let mut iter = db.new_iter().unwrap();
 
         // keys and values come from make_version(); they are each the latest entry.
-        let keys: &[&[u8]] = &[b"aaa", b"aab", b"aax", b"aba", b"bab", b"bba", b"cab", b"cba"];
-        let vals: &[&[u8]] = &[b"val1", b"val2", b"val2", b"val3", b"val4", b"val5", b"val2",
-                               b"val3"];
+        let keys: &[&[u8]] = &[
+            b"aaa", b"aab", b"aax", b"aba", b"bab", b"bba", b"cab", b"cba"
+        ];
+        let vals: &[&[u8]] = &[
+            b"val1", b"val2", b"val2", b"val3", b"val4", b"val5", b"val2", b"val3"
+        ];
 
         for (k, v) in keys.iter().zip(vals.iter()) {
             assert!(iter.advance());
@@ -323,31 +334,38 @@
         let mut iter = db.new_iter().unwrap();
 
         // keys and values come from make_version(); they are each the latest entry.
-        let keys: &[&[u8]] = &[b"aaa", b"aab", b"aax", b"aba", b"bab", b"bba", b"cab", b"cba"];
-        let vals: &[&[u8]] = &[b"val1", b"val2", b"val2", b"val3", b"val4", b"val5", b"val2",
-                               b"val3"];
+        let keys: &[&[u8]] = &[
+            b"aaa", b"aab", b"aax", b"aba", b"bab", b"bba", b"cab", b"cba"
+        ];
+        let vals: &[&[u8]] = &[
+            b"val1", b"val2", b"val2", b"val3", b"val4", b"val5", b"val2", b"val3"
+        ];
 
         // This specifies the direction that the iterator should move to. Based on this, an index
         // into keys/vals is incremented/decremented so that we get a nice test checking iterator
         // move correctness.
-        let dirs: &[Direction] = &[Direction::Forward,
-                                   Direction::Forward,
-                                   Direction::Forward,
-                                   Direction::Reverse,
-                                   Direction::Reverse,
-                                   Direction::Reverse,
-                                   Direction::Forward,
-                                   Direction::Forward,
-                                   Direction::Reverse,
-                                   Direction::Forward,
-                                   Direction::Forward,
-                                   Direction::Forward,
-                                   Direction::Forward];
+        let dirs: &[Direction] = &[
+            Direction::Forward,
+            Direction::Forward,
+            Direction::Forward,
+            Direction::Reverse,
+            Direction::Reverse,
+            Direction::Reverse,
+            Direction::Forward,
+            Direction::Forward,
+            Direction::Reverse,
+            Direction::Forward,
+            Direction::Forward,
+            Direction::Forward,
+            Direction::Forward,
+        ];
         let mut i = 0;
         iter.advance();
         for d in dirs {
-            assert_eq!((keys[i].to_vec(), vals[i].to_vec()),
-                       current_key_val(&iter).unwrap());
+            assert_eq!(
+                (keys[i].to_vec(), vals[i].to_vec()),
+                current_key_val(&iter).unwrap()
+            );
             match *d {
                 Direction::Forward => {
                     assert!(iter.advance());
@@ -368,7 +386,9 @@
 
         // gca is the deleted entry.
         let keys: &[&[u8]] = &[b"aab", b"aaa", b"cab", b"eaa", b"aaa", b"iba", b"fba"];
-        let vals: &[&[u8]] = &[b"val2", b"val1", b"val2", b"val1", b"val1", b"val2", b"val3"];
+        let vals: &[&[u8]] = &[
+            b"val2", b"val1", b"val2", b"val1", b"val1", b"val2", b"val3"
+        ];
 
         for (k, v) in keys.iter().zip(vals.iter()) {
             println!("{:?}", String::from_utf8(k.to_vec()).unwrap());
@@ -385,8 +405,10 @@
         // Seek skips over deleted entry.
         iter.seek(b"gca");
         assert!(iter.valid());
-        assert_eq!((b"gda".to_vec(), b"val5".to_vec()),
-                   current_key_val(&iter).unwrap());
+        assert_eq!(
+            (b"gda".to_vec(), b"val5".to_vec()),
+            current_key_val(&iter).unwrap()
+        );
     }
 
     #[test]
@@ -429,7 +451,6 @@
             db.delete(b"xx2").unwrap();
         }
 
-
         {
             let mut db = DB::open("db", opt.clone()).unwrap();
             db.put(b"xx4", b"222").unwrap();
@@ -442,19 +463,17 @@
             // xx5 should not be visible.
             db.put(b"xx5", b"223").unwrap();
 
-            let expected: HashMap<Vec<u8>, Vec<u8>> = HashMap::from_iter(vec![
-                                                  (b"xx1".to_vec(), b"111".to_vec()),
-                                                  (b"xx4".to_vec(), b"222".to_vec()),
-                                                  (b"aaa".to_vec(), b"val1".to_vec()),
-                                                  (b"cab".to_vec(), b"val2".to_vec()),
-            ]
-                .into_iter());
-            let non_existing: HashSet<Vec<u8>> = HashSet::from_iter(vec![
-                                                      b"gca".to_vec(),
-                                                      b"xx2".to_vec(),
-                                                      b"xx5".to_vec(),
-            ]
-                .into_iter());
+            let expected: HashMap<Vec<u8>, Vec<u8>> = HashMap::from_iter(
+                vec![
+                    (b"xx1".to_vec(), b"111".to_vec()),
+                    (b"xx4".to_vec(), b"222".to_vec()),
+                    (b"aaa".to_vec(), b"val1".to_vec()),
+                    (b"cab".to_vec(), b"val2".to_vec()),
+                ].into_iter(),
+            );
+            let non_existing: HashSet<Vec<u8>> = HashSet::from_iter(
+                vec![b"gca".to_vec(), b"xx2".to_vec(), b"xx5".to_vec()].into_iter(),
+            );
 
             let mut iter = db.new_iter_at(ss.clone()).unwrap();
             for (k, v) in LdbIteratorIter::wrap(&mut iter) {
--- a/src/disk_env.rs	Sat Mar 03 11:49:17 2018 +0100
+++ b/src/disk_env.rs	Sat Mar 03 11:53:18 2018 +0100
@@ -1,6 +1,6 @@
 use env::{path_to_str, Env, FileLock, Logger, RandomAccess};
 use env_common::{micros, sleep_for};
-use error::{err, Status, StatusCode, Result};
+use error::{err, Result, Status, StatusCode};
 
 use std::collections::HashMap;
 use std::fs;
@@ -26,7 +26,9 @@
 
 impl PosixDiskEnv {
     pub fn new() -> PosixDiskEnv {
-        PosixDiskEnv { locks: Arc::new(Mutex::new(HashMap::new())) }
+        PosixDiskEnv {
+            locks: Arc::new(Mutex::new(HashMap::new())),
+        }
     }
 }
 
@@ -41,12 +43,14 @@
 // error conversion using std::convert::From.
 impl Env for PosixDiskEnv {
     fn open_sequential_file(&self, p: &Path) -> Result<Box<Read>> {
-        Ok(Box::new(fs::OpenOptions::new().read(true)
+        Ok(Box::new(fs::OpenOptions::new()
+            .read(true)
             .open(p)
             .map_err(|e| map_err_with_name("open (seq)", p, e))?))
     }
     fn open_random_access_file(&self, p: &Path) -> Result<Box<RandomAccess>> {
-        Ok(fs::OpenOptions::new().read(true)
+        Ok(fs::OpenOptions::new()
+            .read(true)
             .open(p)
             .map(|f| {
                 let b: Box<RandomAccess> = Box::new(f);
@@ -55,14 +59,16 @@
             .map_err(|e| map_err_with_name("open (randomaccess)", p, e))?)
     }
     fn open_writable_file(&self, p: &Path) -> Result<Box<Write>> {
-        Ok(Box::new(fs::OpenOptions::new().create(true)
+        Ok(Box::new(fs::OpenOptions::new()
+            .create(true)
             .write(true)
             .append(false)
             .open(p)
             .map_err(|e| map_err_with_name("open (write)", p, e))?))
     }
     fn open_appendable_file(&self, p: &Path) -> Result<Box<Write>> {
-        Ok(Box::new(fs::OpenOptions::new().create(true)
+        Ok(Box::new(fs::OpenOptions::new()
+            .create(true)
             .write(true)
             .append(true)
             .open(p)
@@ -74,7 +80,8 @@
     }
     fn children(&self, p: &Path) -> Result<Vec<String>> {
         let dir_reader = fs::read_dir(p).map_err(|e| map_err_with_name("children", p, e))?;
-        let filenames = dir_reader.map(|r| {
+        let filenames = dir_reader
+            .map(|r| {
                 if !r.is_ok() {
                     "".to_string()
                 } else {
@@ -109,7 +116,8 @@
         if locks.contains_key(&p.to_str().unwrap().to_string()) {
             Err(Status::new(StatusCode::AlreadyExists, "Lock is held"))
         } else {
-            let f = fs::OpenOptions::new().write(true)
+            let f = fs::OpenOptions::new()
+                .write(true)
                 .create(true)
                 .open(p)
                 .map_err(|e| map_err_with_name("lock", p, e))?;
@@ -123,17 +131,24 @@
             };
             let fd = f.into_raw_fd();
             let result = unsafe {
-                libc::fcntl(fd,
-                            libc::F_SETLK,
-                            mem::transmute::<&libc::flock, *const libc::flock>(&&flock_arg))
+                libc::fcntl(
+                    fd,
+                    libc::F_SETLK,
+                    mem::transmute::<&libc::flock, *const libc::flock>(&&flock_arg),
+                )
             };
 
             if result < 0 {
-                return Err(Status::new(StatusCode::AlreadyExists, "Lock is held (fcntl)"));
+                return Err(Status::new(
+                    StatusCode::AlreadyExists,
+                    "Lock is held (fcntl)",
+                ));
             }
 
             locks.insert(p.to_str().unwrap().to_string(), fd);
-            let lock = FileLock { id: p.to_str().unwrap().to_string() };
+            let lock = FileLock {
+                id: p.to_str().unwrap().to_string(),
+            };
             Ok(lock)
         }
     }
@@ -141,8 +156,10 @@
         let mut locks = self.locks.lock().unwrap();
 
         if !locks.contains_key(&l.id) {
-            return err(StatusCode::LockError,
-                       &format!("unlocking a file that is not locked: {}", l.id));
+            return err(
+                StatusCode::LockError,
+                &format!("unlocking a file that is not locked: {}", l.id),
+            );
         } else {
             let fd = locks.remove(&l.id).unwrap();
             let flock_arg = libc::flock {
@@ -153,9 +170,11 @@
                 l_pid: 0,
             };
             let result = unsafe {
-                libc::fcntl(fd,
-                            libc::F_SETLK,
-                            mem::transmute::<&libc::flock, *const libc::flock>(&&flock_arg))
+                libc::fcntl(
+                    fd,
+                    libc::F_SETLK,
+                    mem::transmute::<&libc::flock, *const libc::flock>(&&flock_arg),
+                )
             };
             if result < 0 {
                 return err(StatusCode::LockError, &format!("unlock failed: {}", l.id));
@@ -165,7 +184,8 @@
     }
 
     fn new_logger(&self, p: &Path) -> Result<Logger> {
-        self.open_appendable_file(p).map(|dst| Logger::new(Box::new(dst)))
+        self.open_appendable_file(p)
+            .map(|dst| Logger::new(Box::new(dst)))
     }
 
     fn micros(&self) -> u64 {
@@ -259,10 +279,9 @@
         let env = PosixDiskEnv::new();
 
         assert!(env.mkdir(dirname).is_ok());
-        assert!(env.open_writable_file(String::from_iter(vec![d.to_string(), "f1.txt".to_string()]
-                    .into_iter())
-                .as_ref())
-            .is_ok());
+        assert!(env.open_writable_file(
+            String::from_iter(vec![d.to_string(), "f1.txt".to_string()].into_iter()).as_ref()
+        ).is_ok());
         assert_eq!(env.children(dirname).unwrap().len(), 1);
         assert!(env.rmdir(dirname).is_ok());
     }
--- a/src/env_common.rs	Sat Mar 03 11:49:17 2018 +0100
+++ b/src/env_common.rs	Sat Mar 03 11:53:18 2018 +0100
@@ -1,7 +1,6 @@
 use std::thread;
 use std::time;
 
-
 pub fn micros() -> u64 {
     loop {
         let now = time::SystemTime::now().duration_since(time::UNIX_EPOCH);
--- a/src/filter.rs	Sat Mar 03 11:49:17 2018 +0100
+++ b/src/filter.rs	Sat Mar 03 11:53:18 2018 +0100
@@ -1,4 +1,3 @@
-
 use std::rc::Rc;
 
 use integer_encoding::FixedInt;
@@ -243,11 +242,13 @@
         let mut concat = vec![];
         let mut offs = vec![];
 
-        for d in ["abc123def456".as_bytes(),
-                  "xxx111xxx222".as_bytes(),
-                  "ab00cd00ab".as_bytes(),
-                  "908070605040302010".as_bytes()]
-            .iter() {
+        for d in [
+            "abc123def456".as_bytes(),
+            "xxx111xxx222".as_bytes(),
+            "ab00cd00ab".as_bytes(),
+            "908070605040302010".as_bytes(),
+        ].iter()
+        {
             offs.push(concat.len());
             concat.extend_from_slice(d);
         }
@@ -266,7 +267,9 @@
 
     /// Creates a filter using the keys from input_data() but converted to InternalKey format.
     fn create_internalkey_filter() -> Vec<u8> {
-        let fpol = Rc::new(Box::new(InternalFilterPolicy::new(BloomPolicy::new(_BITS_PER_KEY))));
+        let fpol = Rc::new(Box::new(InternalFilterPolicy::new(BloomPolicy::new(
+            _BITS_PER_KEY,
+        ))));
         let (data, offs) = input_data();
         let (mut intdata, mut intoffs) = (vec![], vec![]);
 
--- a/src/filter_block.rs	Sat Mar 03 11:49:17 2018 +0100
+++ b/src/filter_block.rs	Sat Mar 03 11:53:18 2018 +0100
@@ -158,7 +158,8 @@
         assert!(filter_begin < filter_end);
         assert!(filter_end <= self.offsets_offset);
 
-        self.policy.key_may_match(key, &self.block[filter_begin..filter_end])
+        self.policy
+            .key_may_match(key, &self.block[filter_begin..filter_end])
     }
 }
 
@@ -176,7 +177,12 @@
     }
 
     fn get_keys() -> Vec<&'static [u8]> {
-        vec!["abcd".as_bytes(), "efgh".as_bytes(), "ijkl".as_bytes(), "mnopqrstuvwxyz".as_bytes()]
+        vec![
+            "abcd".as_bytes(),
+            "efgh".as_bytes(),
+            "ijkl".as_bytes(),
+            "mnopqrstuvwxyz".as_bytes(),
+        ]
     }
 
     fn produce_filter_block() -> Vec<u8> {
@@ -205,10 +211,14 @@
         // 2 blocks of 4 filters of 4 bytes plus 1B for `k`; plus three filter offsets (because of
         //   the block offsets of 0 and 5000); plus footer
         assert_eq!(result.len(), 2 * (get_keys().len() * 4 + 1) + (3 * 4) + 5);
-        assert_eq!(result,
-                   vec![234, 195, 25, 155, 61, 141, 173, 140, 221, 28, 222, 92, 220, 112, 234,
-                        227, 22, 234, 195, 25, 155, 61, 141, 173, 140, 221, 28, 222, 92, 220,
-                        112, 234, 227, 22, 0, 0, 0, 0, 17, 0, 0, 0, 17, 0, 0, 0, 34, 0, 0, 0, 11]);
+        assert_eq!(
+            result,
+            vec![
+                234, 195, 25, 155, 61, 141, 173, 140, 221, 28, 222, 92, 220, 112, 234, 227, 22,
+                234, 195, 25, 155, 61, 141, 173, 140, 221, 28, 222, 92, 220, 112, 234, 227, 22, 0,
+                0, 0, 0, 17, 0, 0, 0, 17, 0, 0, 0, 34, 0, 0, 0, 11,
+            ]
+        );
     }
 
     #[test]
@@ -216,15 +226,23 @@
         let result = produce_filter_block();
         let reader = FilterBlockReader::new_owned(Rc::new(Box::new(BloomPolicy::new(32))), result);
 
-        assert_eq!(reader.offset_of(get_filter_index(5121, FILTER_BASE_LOG2)),
-                   17); // third block in third filter
+        assert_eq!(
+            reader.offset_of(get_filter_index(5121, FILTER_BASE_LOG2)),
+            17
+        ); // third block in third filter
 
-        let unknown_keys = vec!["xsb".as_bytes(), "9sad".as_bytes(), "assssaaaass".as_bytes()];
+        let unknown_keys = vec![
+            "xsb".as_bytes(),
+            "9sad".as_bytes(),
+            "assssaaaass".as_bytes(),
+        ];
 
         for block_offset in vec![0, 1024, 5000, 6025].into_iter() {
             for key in get_keys().iter() {
-                assert!(reader.key_may_match(block_offset, key),
-                        format!("{} {:?} ", block_offset, key));
+                assert!(
+                    reader.key_may_match(block_offset, key),
+                    format!("{} {:?} ", block_offset, key)
+                );
             }
             for key in unknown_keys.iter() {
                 assert!(!reader.key_may_match(block_offset, key));
--- a/src/infolog.rs	Sat Mar 03 11:49:17 2018 +0100
+++ b/src/infolog.rs	Sat Mar 03 11:53:18 2018 +0100
@@ -1,4 +1,3 @@
-
 use std::io::{self, Write};
 
 pub struct Logger(pub Box<Write>);
--- a/src/key_types.rs	Sat Mar 03 11:49:17 2018 +0100
+++ b/src/key_types.rs	Sat Mar 03 11:53:18 2018 +0100
@@ -1,11 +1,10 @@
-
 use cmp::Cmp;
 use types::SequenceNumber;
 
 use std::cmp::Ordering;
 use std::io::Write;
 
-use integer_encoding::{FixedInt, VarInt, VarIntWriter, FixedIntWriter};
+use integer_encoding::{FixedInt, FixedIntWriter, VarInt, VarIntWriter};
 
 // The following typedefs are used to distinguish between the different key formats used internally
 // by different modules.
@@ -56,9 +55,12 @@
 
         {
             let mut writer = key.as_mut_slice();
-            writer.write_varint(internal_keylen).expect("write to slice failed");
+            writer
+                .write_varint(internal_keylen)
+                .expect("write to slice failed");
             writer.write(k).expect("write to slice failed");
-            writer.write_fixedint(s << 8 | t as u64)
+            writer
+                .write_fixedint(s << 8 | t as u64)
                 .expect("write to slice failed");
         }
 
@@ -110,14 +112,18 @@
     let keysize = key.len() + U64_SPACE;
     let valsize = value.len();
     let mut buf = Vec::new();
-    buf.resize(keysize + valsize + keysize.required_space() + valsize.required_space(),
-               0);
+    buf.resize(
+        keysize + valsize + keysize.required_space() + valsize.required_space(),
+        0,
+    );
 
     {
         let mut writer = buf.as_mut_slice();
         writer.write_varint(keysize).expect("write to slice failed");
         writer.write(key).expect("write to slice failed");
-        writer.write_fixedint((t as u64) | (seq << 8)).expect("write to slice failed");
+        writer
+            .write_fixedint((t as u64) | (seq << 8))
+            .expect("write to slice failed");
         writer.write_varint(valsize).expect("write to slice failed");
         writer.write(value).expect("write to slice failed");
         assert_eq!(writer.len(), 0);
@@ -216,28 +222,44 @@
 
         assert_eq!(lk1.user_key(), "abcde".as_bytes());
         assert_eq!(u32::decode_var(lk1.memtable_key()), (13, 1));
-        assert_eq!(lk2.internal_key(),
-                   vec![120, 121, 97, 98, 120, 121, 1, 97, 0, 0, 0, 0, 0, 0].as_slice());
+        assert_eq!(
+            lk2.internal_key(),
+            vec![120, 121, 97, 98, 120, 121, 1, 97, 0, 0, 0, 0, 0, 0].as_slice()
+        );
     }
 
     #[test]
     fn test_build_memtable_key() {
-        assert_eq!(build_memtable_key("abc".as_bytes(),
-                                      "123".as_bytes(),
-                                      ValueType::TypeValue,
-                                      231),
-                   vec![11, 97, 98, 99, 1, 231, 0, 0, 0, 0, 0, 0, 3, 49, 50, 51]);
-        assert_eq!(build_memtable_key("".as_bytes(), "123".as_bytes(), ValueType::TypeValue, 231),
-                   vec![8, 1, 231, 0, 0, 0, 0, 0, 0, 3, 49, 50, 51]);
-        assert_eq!(build_memtable_key("abc".as_bytes(),
-                                      "123".as_bytes(),
-                                      ValueType::TypeDeletion,
-                                      231),
-                   vec![11, 97, 98, 99, 0, 231, 0, 0, 0, 0, 0, 0, 3, 49, 50, 51]);
-        assert_eq!(build_memtable_key("abc".as_bytes(),
-                                      "".as_bytes(),
-                                      ValueType::TypeDeletion,
-                                      231),
-                   vec![11, 97, 98, 99, 0, 231, 0, 0, 0, 0, 0, 0, 0]);
+        assert_eq!(
+            build_memtable_key(
+                "abc".as_bytes(),
+                "123".as_bytes(),
+                ValueType::TypeValue,
+                231
+            ),
+            vec![11, 97, 98, 99, 1, 231, 0, 0, 0, 0, 0, 0, 3, 49, 50, 51]
+        );
+        assert_eq!(
+            build_memtable_key("".as_bytes(), "123".as_bytes(), ValueType::TypeValue, 231),
+            vec![8, 1, 231, 0, 0, 0, 0, 0, 0, 3, 49, 50, 51]
+        );
+        assert_eq!(
+            build_memtable_key(
+                "abc".as_bytes(),
+                "123".as_bytes(),
+                ValueType::TypeDeletion,
+                231
+            ),
+            vec![11, 97, 98, 99, 0, 231, 0, 0, 0, 0, 0, 0, 3, 49, 50, 51]
+        );
+        assert_eq!(
+            build_memtable_key(
+                "abc".as_bytes(),
+                "".as_bytes(),
+                ValueType::TypeDeletion,
+                231
+            ),
+            vec![11, 97, 98, 99, 0, 231, 0, 0, 0, 0, 0, 0, 0]
+        );
     }
 }
--- a/src/log.rs	Sat Mar 03 11:49:17 2018 +0100
+++ b/src/log.rs	Sat Mar 03 11:53:18 2018 +0100
@@ -3,7 +3,7 @@
 //! A record is a bytestring: [checksum: uint32, length: uint16, type: uint8, data: [u8]]
 //! checksum is the crc32 sum of type and data; type is one of RecordType::{Full/First/Middle/Last}
 
-use error::{err, StatusCode, Result};
+use error::{err, Result, StatusCode};
 
 use std::io::{Read, Write};
 
@@ -116,7 +116,6 @@
     }
 }
 
-
 pub struct LogReader<R: Read> {
     // TODO: Wrap src in a buffer to enhance read performance.
     src: R,
@@ -151,7 +150,10 @@
         loop {
             if self.blocksize - self.blk_off < HEADER_SIZE {
                 // skip to next block
-                try!(self.src.read(&mut self.head_scratch[0..self.blocksize - self.blk_off]));
+                try!(
+                    self.src
+                        .read(&mut self.head_scratch[0..self.blocksize - self.blk_off])
+                );
                 self.blk_off = 0;
             }
 
@@ -169,12 +171,15 @@
             typ = self.head_scratch[6];
 
             dst.resize(dst_offset + length as usize, 0);
-            bytes_read = try!(self.src
-                .read(&mut dst[dst_offset..dst_offset + length as usize]));
+            bytes_read = try!(
+                self.src
+                    .read(&mut dst[dst_offset..dst_offset + length as usize])
+            );
             self.blk_off += bytes_read;
 
-            if self.checksums &&
-               !self.check_integrity(typ, &dst[dst_offset..dst_offset + bytes_read], checksum) {
+            if self.checksums
+                && !self.check_integrity(typ, &dst[dst_offset..dst_offset + bytes_read], checksum)
+            {
                 return err(StatusCode::Corruption, "Invalid Checksum");
             }
 
@@ -231,7 +236,11 @@
 
     #[test]
     fn test_writer() {
-        let data = &["hello world. My first log entry.", "and my second", "and my third"];
+        let data = &[
+            "hello world. My first log entry.",
+            "and my second",
+            "and my third",
+        ];
         let mut lw = LogWriter::new(Vec::new());
         let total_len = data.iter().fold(0, |l, d| l + d.len());
 
@@ -244,7 +253,11 @@
 
     #[test]
     fn test_writer_append() {
-        let data = &["hello world. My first log entry.", "and my second", "and my third"];
+        let data = &[
+            "hello world. My first log entry.",
+            "and my second",
+            "and my third",
+        ];
 
         let mut dst = Vec::new();
         dst.resize(1024, 0 as u8);
@@ -262,21 +275,22 @@
         // cursors and stuff is required.
         {
             let offset = data[0].len() + super::HEADER_SIZE;
-            let mut lw = LogWriter::new_with_off(Cursor::new(&mut dst.as_mut_slice()[offset..]),
-                                                 offset);
+            let mut lw =
+                LogWriter::new_with_off(Cursor::new(&mut dst.as_mut_slice()[offset..]), offset);
             for d in &data[1..] {
                 let _ = lw.add_record(d.as_bytes());
             }
         }
         assert_eq!(old, dst);
-
     }
 
     #[test]
     fn test_reader() {
-        let data = vec!["abcdefghi".as_bytes().to_vec(), // fits one block of 17
-                        "123456789012".as_bytes().to_vec(), // spans two blocks of 17
-                        "0101010101010101010101".as_bytes().to_vec()]; // spans three blocks of 17
+        let data = vec![
+            "abcdefghi".as_bytes().to_vec(),    // fits one block of 17
+            "123456789012".as_bytes().to_vec(), // spans two blocks of 17
+            "0101010101010101010101".as_bytes().to_vec(),
+        ]; // spans three blocks of 17
         let mut lw = LogWriter::new(Vec::new());
         lw.block_size = super::HEADER_SIZE + 10;
 
@@ -293,8 +307,10 @@
         let mut dst = Vec::with_capacity(128);
 
         // First record is corrupted.
-        assert_eq!(err(StatusCode::Corruption, "Invalid Checksum"),
-                   lr.read(&mut dst));
+        assert_eq!(
+            err(StatusCode::Corruption, "Invalid Checksum"),
+            lr.read(&mut dst)
+        );
 
         let mut i = 1;
         loop {
--- a/src/mem_env.rs	Sat Mar 03 11:49:17 2018 +0100
+++ b/src/mem_env.rs	Sat Mar 03 11:53:18 2018 +0100
@@ -130,7 +130,9 @@
 
 impl MemFS {
     fn new() -> MemFS {
-        MemFS { store: Arc::new(Mutex::new(HashMap::new())) }
+        MemFS {
+            store: Arc::new(Mutex::new(HashMap::new())),
+        }
     }
 
     /// Open a file. The caller can use the MemFile either inside a MemFileReader or as
@@ -141,8 +143,10 @@
             Entry::Occupied(o) => Ok(o.get().f.clone()),
             Entry::Vacant(v) => {
                 if !create {
-                    return err(StatusCode::NotFound,
-                               &format!("open: file not found: {}", path_to_str(p)));
+                    return err(
+                        StatusCode::NotFound,
+                        &format!("open: file not found: {}", path_to_str(p)),
+                    );
                 }
                 let f = MemFile::new();
                 v.insert(MemFSEntry {
@@ -183,10 +187,10 @@
         let mut fs = self.store.lock()?;
         match fs.entry(path_to_string(p)) {
             Entry::Occupied(o) => Ok(o.get().f.0.lock()?.len()),
-            _ => {
-                err(StatusCode::NotFound,
-                    &format!("size_of: file not found: {}", path_to_str(p)))
-            }
+            _ => err(
+                StatusCode::NotFound,
+                &format!("size_of: file not found: {}", path_to_str(p)),
+            ),
         }
     }
     fn delete_(&self, p: &Path) -> Result<()> {
@@ -196,10 +200,10 @@
                 o.remove_entry();
                 Ok(())
             }
-            _ => {
-                err(StatusCode::NotFound,
-                    &format!("delete: file not found: {}", path_to_str(p)))
-            }
+            _ => err(
+                StatusCode::NotFound,
+                &format!("delete: file not found: {}", path_to_str(p)),
+            ),
         }
     }
     fn rename_(&self, from: &Path, to: &Path) -> Result<()> {
@@ -209,10 +213,10 @@
                 fs.insert(path_to_string(to), v);
                 Ok(())
             }
-            _ => {
-                err(StatusCode::NotFound,
-                    &format!("rename: file not found: {}", path_to_str(from)))
-            }
+            _ => err(
+                StatusCode::NotFound,
+                &format!("rename: file not found: {}", path_to_str(from)),
+            ),
         }
     }
     fn lock_(&self, p: &Path) -> Result<FileLock> {
@@ -220,11 +224,15 @@
         match fs.entry(path_to_string(p)) {
             Entry::Occupied(mut o) => {
                 if o.get().locked {
-                    err(StatusCode::LockError,
-                        &format!("already locked: {}", path_to_str(p)))
+                    err(
+                        StatusCode::LockError,
+                        &format!("already locked: {}", path_to_str(p)),
+                    )
                 } else {
                     o.get_mut().locked = true;
-                    Ok(FileLock { id: path_to_string(p) })
+                    Ok(FileLock {
+                        id: path_to_string(p),
+                    })
                 }
             }
             Entry::Vacant(v) => {
@@ -233,7 +241,9 @@
                     f: f.clone(),
                     locked: true,
                 });
-                Ok(FileLock { id: path_to_string(p) })
+                Ok(FileLock {
+                    id: path_to_string(p),
+                })
             }
         }
     }
@@ -243,17 +253,19 @@
         match fs.entry(l.id) {
             Entry::Occupied(mut o) => {
                 if !o.get().locked {
-                    err(StatusCode::LockError,
-                        &format!("unlocking unlocked file: {}", id))
+                    err(
+                        StatusCode::LockError,
+                        &format!("unlocking unlocked file: {}", id),
+                    )
                 } else {
                     o.get_mut().locked = false;
                     Ok(())
                 }
             }
-            _ => {
-                err(StatusCode::NotFound,
-                    &format!("unlock: file not found: {}", id))
-            }
+            _ => err(
+                StatusCode::NotFound,
+                &format!("unlock: file not found: {}", id),
+            ),
         }
     }
 }
@@ -274,7 +286,9 @@
         Ok(Box::new(MemFileReader::new(f, 0)))
     }
     fn open_random_access_file(&self, p: &Path) -> Result<Box<RandomAccess>> {
-        self.0.open(p, false).map(|m| Box::new(m) as Box<RandomAccess>)
+        self.0
+            .open(p, false)
+            .map(|m| Box::new(m) as Box<RandomAccess>)
     }
     fn open_writable_file(&self, p: &Path) -> Result<Box<Write>> {
         self.0.open_w(p, true, true)
@@ -329,7 +343,8 @@
     }
 
     fn new_logger(&self, p: &Path) -> Result<Logger> {
-        self.open_appendable_file(p).map(|dst| Logger::new(Box::new(dst)))
+        self.open_appendable_file(p)
+            .map(|dst| Logger::new(Box::new(dst)))
     }
 }
 
@@ -364,8 +379,10 @@
         assert_eq!(w1.write(&[1, 7, 8, 9]).unwrap(), 4);
         assert_eq!(w2.write(&[4, 5, 6]).unwrap(), 3);
 
-        assert_eq!((w1.0).0.lock().unwrap().as_ref() as &Vec<u8>,
-                   &[1, 2, 3, 4, 5, 6, 9]);
+        assert_eq!(
+            (w1.0).0.lock().unwrap().as_ref() as &Vec<u8>,
+            &[1, 2, 3, 4, 5, 6, 9]
+        );
     }
 
     #[test]
@@ -411,7 +428,6 @@
             s.clear();
             assert_eq!(r2.read_to_string(&mut s).unwrap(), 8);
             assert_eq!(s, "lloWorld");
-
         }
         assert_eq!(fs.size_of_(&path).unwrap(), 10);
         assert!(fs.exists_(&path).unwrap());
@@ -442,7 +458,6 @@
             let mut s = String::new();
             assert_eq!(r.read_to_string(&mut s).unwrap(), 32);
             assert_eq!(s, "OveXyzitingEverythingWithGarbage");
-
         }
         assert!(fs.exists_(&path).unwrap());
         assert_eq!(fs.size_of_(&path).unwrap(), 32);
@@ -490,8 +505,11 @@
     #[test]
     fn test_mem_fs_children() {
         let fs = MemFS::new();
-        let (path1, path2, path3) =
-            (Path::new("/a/1.txt"), Path::new("/a/2.txt"), Path::new("/b/1.txt"));
+        let (path1, path2, path3) = (
+            Path::new("/a/1.txt"),
+            Path::new("/a/2.txt"),
+            Path::new("/b/1.txt"),
+        );
 
         for p in &[&path1, &path2, &path3] {
             fs.open_w(*p, false, false).unwrap();
@@ -525,12 +543,16 @@
         assert!(fs.unlock_(lock).is_ok());
 
         // Rogue operation.
-        assert!(fs.unlock_(env::FileLock { id: "/a/lock".to_string() }).is_err());
+        assert!(fs.unlock_(env::FileLock {
+            id: "/a/lock".to_string(),
+        }).is_err());
 
         // Non-existent files.
         let p2 = Path::new("/a/lock2");
         assert!(fs.lock_(p2).is_ok());
-        assert!(fs.unlock_(env::FileLock { id: "/a/lock2".to_string() }).is_ok());
+        assert!(fs.unlock_(env::FileLock {
+            id: "/a/lock2".to_string(),
+        }).is_ok());
     }
 
     #[test]
--- a/src/memtable.rs	Sat Mar 03 11:49:17 2018 +0100
+++ b/src/memtable.rs	Sat Mar 03 11:53:18 2018 +0100
@@ -1,6 +1,6 @@
 use key_types::{LookupKey, UserKey};
 use cmp::{Cmp, MemtableKeyCmp};
-use key_types::{parse_internal_key, parse_memtable_key, build_memtable_key, ValueType};
+use key_types::{build_memtable_key, parse_internal_key, parse_memtable_key, ValueType};
 use types::{current_key_val, LdbIterator, SequenceNumber};
 use skipmap::{SkipMap, SkipMapIter};
 
@@ -23,7 +23,9 @@
 
     /// Doesn't wrap the comparator in a MemtableKeyCmp.
     fn new_raw(cmp: Rc<Box<Cmp>>) -> MemTable {
-        MemTable { map: SkipMap::new(cmp) }
+        MemTable {
+            map: SkipMap::new(cmp),
+        }
     }
 
     pub fn len(&self) -> usize {
@@ -35,7 +37,8 @@
     }
 
     pub fn add<'a>(&mut self, seq: SequenceNumber, t: ValueType, key: UserKey<'a>, value: &[u8]) {
-        self.map.insert(build_memtable_key(key, value, t, seq), Vec::new())
+        self.map
+            .insert(build_memtable_key(key, value, t, seq), Vec::new())
     }
 
     /// get returns the value for the given entry and whether the entry is marked as deleted. This
@@ -62,7 +65,9 @@
     }
 
     pub fn iter(&self) -> MemtableIterator {
-        MemtableIterator { skipmapiter: self.map.iter() }
+        MemtableIterator {
+            skipmapiter: self.map.iter(),
+        }
     }
 }
 
@@ -131,7 +136,8 @@
     fn seek(&mut self, to: &[u8]) {
         // Assemble the correct memtable key from the supplied InternalKey.
         let (_, seq, ukey) = parse_internal_key(to);
-        self.skipmapiter.seek(LookupKey::new(ukey, seq).memtable_key());
+        self.skipmapiter
+            .seek(LookupKey::new(ukey, seq).memtable_key());
     }
 }
 
@@ -165,11 +171,13 @@
 
     fn get_memtable() -> MemTable {
         let mut mt = MemTable::new(options::for_test().cmp);
-        let entries = vec![(ValueType::TypeValue, 115, "abc", "122"),
-                           (ValueType::TypeValue, 120, "abc", "123"),
-                           (ValueType::TypeValue, 121, "abd", "124"),
-                           (ValueType::TypeDeletion, 122, "abe", "125"),
-                           (ValueType::TypeValue, 123, "abf", "126")];
+        let entries = vec![
+            (ValueType::TypeValue, 115, "abc", "122"),
+            (ValueType::TypeValue, 120, "abc", "123"),
+            (ValueType::TypeValue, 121, "abd", "124"),
+            (ValueType::TypeDeletion, 122, "abe", "125"),
+            (ValueType::TypeValue, 123, "abf", "126"),
+        ];
 
         for e in entries.iter() {
             mt.add(e.1, e.0, e.2.as_bytes(), e.3.as_bytes());
@@ -186,15 +194,21 @@
     #[test]
     fn test_memtable_add() {
         let mut mt = MemTable::new(options::for_test().cmp);
-        mt.add(123,
-               ValueType::TypeValue,
-               "abc".as_bytes(),
-               "123".as_bytes());
+        mt.add(
+            123,
+            ValueType::TypeValue,
+            "abc".as_bytes(),
+            "123".as_bytes(),
+        );
 
-        assert_eq!(mt.map.iter().next().unwrap().0,
-                   &[11, 97, 98, 99, 1, 123, 0, 0, 0, 0, 0, 0, 3, 49, 50, 51]);
-        assert_eq!(mt.iter().next().unwrap().0,
-                   &[97, 98, 99, 1, 123, 0, 0, 0, 0, 0, 0]);
+        assert_eq!(
+            mt.map.iter().next().unwrap().0,
+            &[11, 97, 98, 99, 1, 123, 0, 0, 0, 0, 0, 0, 3, 49, 50, 51]
+        );
+        assert_eq!(
+            mt.iter().next().unwrap().0,
+            &[97, 98, 99, 1, 123, 0, 0, 0, 0, 0, 0]
+        );
     }
 
     #[test]
@@ -248,8 +262,10 @@
         assert!(!iter.valid());
         iter.next();
         assert!(iter.valid());
-        assert_eq!(current_key_val(&iter).unwrap().0,
-                   vec![97, 98, 99, 1, 120, 0, 0, 0, 0, 0, 0].as_slice());
+        assert_eq!(
+            current_key_val(&iter).unwrap().0,
+            vec![97, 98, 99, 1, 120, 0, 0, 0, 0, 0, 0].as_slice()
+        );
         iter.reset();
         assert!(!iter.valid());
     }
@@ -264,8 +280,10 @@
         iter.seek(LookupKey::new("abc".as_bytes(), 400).internal_key());
         let (mut gotkey, gotval) = current_key_val(&iter).unwrap();
         truncate_to_userkey(&mut gotkey);
-        assert_eq!(("abc".as_bytes(), "123".as_bytes()),
-                   (gotkey.as_slice(), gotval.as_slice()));
+        assert_eq!(
+            ("abc".as_bytes(), "123".as_bytes()),
+            (gotkey.as_slice(), gotval.as_slice())
+        );
 
         iter.seek(LookupKey::new("xxx".as_bytes(), 400).internal_key());
         assert!(!iter.valid());
@@ -273,8 +291,10 @@
         iter.seek(LookupKey::new("abd".as_bytes(), 400).internal_key());
         let (mut gotkey, gotval) = current_key_val(&iter).unwrap();
         truncate_to_userkey(&mut gotkey);
-        assert_eq!(("abd".as_bytes(), "124".as_bytes()),
-                   (gotkey.as_slice(), gotval.as_slice()));
+        assert_eq!(
+            ("abd".as_bytes(), "124".as_bytes()),
+            (gotkey.as_slice(), gotval.as_slice())
+        );
     }
 
     #[test]
@@ -282,13 +302,15 @@
         let mt = get_memtable();
         let mut iter = mt.iter();
 
-        let expected = vec!["123".as_bytes(), /* i.e., the abc entry with
-                                               * higher sequence number comes first */
-                            "122".as_bytes(),
-                            "124".as_bytes(),
-                            // deleted entry:
-                            "125".as_bytes(),
-                            "126".as_bytes()];
+        let expected = vec![
+            "123".as_bytes(), /* i.e., the abc entry with
+                               * higher sequence number comes first */
+            "122".as_bytes(),
+            "124".as_bytes(),
+            // deleted entry:
+            "125".as_bytes(),
+            "126".as_bytes(),
+        ];
         let mut i = 0;
 
         for (k, v) in LdbIteratorIter::wrap(&mut iter) {
@@ -305,28 +327,38 @@
         // Bigger sequence number comes first
         iter.next();
         assert!(iter.valid());
-        assert_eq!(current_key_val(&iter).unwrap().0,
-                   vec![97, 98, 99, 1, 120, 0, 0, 0, 0, 0, 0].as_slice());
+        assert_eq!(
+            current_key_val(&iter).unwrap().0,
+            vec![97, 98, 99, 1, 120, 0, 0, 0, 0, 0, 0].as_slice()
+        );
 
         iter.next();
         assert!(iter.valid());
-        assert_eq!(current_key_val(&iter).unwrap().0,
-                   vec![97, 98, 99, 1, 115, 0, 0, 0, 0, 0, 0].as_slice());
+        assert_eq!(
+            current_key_val(&iter).unwrap().0,
+            vec![97, 98, 99, 1, 115, 0, 0, 0, 0, 0, 0].as_slice()
+        );
 
         iter.next();
         assert!(iter.valid());
-        assert_eq!(current_key_val(&iter).unwrap().0,
-                   vec![97, 98, 100, 1, 121, 0, 0, 0, 0, 0, 0].as_slice());
+        assert_eq!(
+            current_key_val(&iter).unwrap().0,
+            vec![97, 98, 100, 1, 121, 0, 0, 0, 0, 0, 0].as_slice()
+        );
 
         iter.prev();
         assert!(iter.valid());
-        assert_eq!(current_key_val(&iter).unwrap().0,
-                   vec![97, 98, 99, 1, 115, 0, 0, 0, 0, 0, 0].as_slice());
+        assert_eq!(
+            current_key_val(&iter).unwrap().0,
+            vec![97, 98, 99, 1, 115, 0, 0, 0, 0, 0, 0].as_slice()
+        );
 
         iter.prev();
         assert!(iter.valid());
-        assert_eq!(current_key_val(&iter).unwrap().0,
-                   vec![97, 98, 99, 1, 120, 0, 0, 0, 0, 0, 0].as_slice());
+        assert_eq!(
+            current_key_val(&iter).unwrap().0,
+            vec![97, 98, 99, 1, 120, 0, 0, 0, 0, 0, 0].as_slice()
+        );
 
         iter.prev();
         assert!(!iter.valid());
@@ -346,10 +378,12 @@
     #[test]
     fn test_memtable_iterator_behavior() {
         let mut mt = MemTable::new(options::for_test().cmp);
-        let entries = vec![(115, "abc", "122"),
-                           (120, "abd", "123"),
-                           (121, "abe", "124"),
-                           (123, "abf", "126")];
+        let entries = vec![
+            (115, "abc", "122"),
+            (120, "abd", "123"),
+            (121, "abe", "124"),
+            (123, "abf", "126"),
+        ];
 
         for e in entries.iter() {
             mt.add(e.0, ValueType::TypeValue, e.1.as_bytes(), e.2.as_bytes());
--- a/src/merging_iter.rs	Sat Mar 03 11:49:17 2018 +0100
+++ b/src/merging_iter.rs	Sat Mar 03 11:53:18 2018 +0100
@@ -231,8 +231,10 @@
         let iter = skm.iter();
         let iter2 = skm.iter();
 
-        let mut miter = MergingIter::new(Rc::new(Box::new(DefaultCmp)),
-                                         vec![Box::new(iter), Box::new(iter2)]);
+        let mut miter = MergingIter::new(
+            Rc::new(Box::new(DefaultCmp)),
+            vec![Box::new(iter), Box::new(iter2)],
+        );
 
         loop {
             if let Some((k, v)) = miter.next() {
@@ -259,8 +261,10 @@
         let val = "def".as_bytes();
         let iter = TestLdbIter::new(vec![(b("aba"), val), (b("abc"), val)]);
         let iter2 = TestLdbIter::new(vec![(b("abb"), val), (b("abd"), val)]);
-        let miter = MergingIter::new(Rc::new(Box::new(DefaultCmp)),
-                                     vec![Box::new(iter), Box::new(iter2)]);
+        let miter = MergingIter::new(
+            Rc::new(Box::new(DefaultCmp)),
+            vec![Box::new(iter), Box::new(iter2)],
+        );
         test_iterator_properties(miter);
     }
 
@@ -270,8 +274,10 @@
         let iter = TestLdbIter::new(vec![(b("aba"), val), (b("abc"), val), (b("abe"), val)]);
         let iter2 = TestLdbIter::new(vec![(b("abb"), val), (b("abd"), val)]);
 
-        let mut miter = MergingIter::new(Rc::new(Box::new(DefaultCmp)),
-                                         vec![Box::new(iter), Box::new(iter2)]);
+        let mut miter = MergingIter::new(
+            Rc::new(Box::new(DefaultCmp)),
+            vec![Box::new(iter), Box::new(iter2)],
+        );
 
         // miter should return the following sequence: [aba, abb, abc, abd, abe]
 
@@ -298,8 +304,10 @@
         assert_eq!(third, current_key_val(&miter));
         // -> abd
         assert!(miter.advance());
-        assert_eq!(Some((b("abd").to_vec(), val.to_vec())),
-                   current_key_val(&miter));
+        assert_eq!(
+            Some((b("abd").to_vec(), val.to_vec())),
+            current_key_val(&miter)
+        );
     }
 
     fn b(s: &'static str) -> &'static [u8] {
@@ -314,8 +322,10 @@
         let it2 = TestLdbIter::new(vec![(&b("abb"), val), (&b("abd"), val)]);
         let expected = vec![b("aba"), b("abb"), b("abc"), b("abd"), b("abe")];
 
-        let mut iter = MergingIter::new(Rc::new(Box::new(DefaultCmp)),
-                                        vec![Box::new(it1), Box::new(it2)]);
+        let mut iter = MergingIter::new(
+            Rc::new(Box::new(DefaultCmp)),
+            vec![Box::new(it1), Box::new(it2)],
+        );
 
         let mut i = 0;
         for (k, _) in LdbIteratorIter::wrap(&mut iter) {
@@ -331,8 +341,10 @@
         let it1 = TestLdbIter::new(vec![(b("aba"), val), (b("abc"), val), (b("abe"), val)]);
         let it2 = TestLdbIter::new(vec![(b("abb"), val), (b("abd"), val)]);
 
-        let mut iter = MergingIter::new(Rc::new(Box::new(DefaultCmp)),
-                                        vec![Box::new(it1), Box::new(it2)]);
+        let mut iter = MergingIter::new(
+            Rc::new(Box::new(DefaultCmp)),
+            vec![Box::new(it1), Box::new(it2)],
+        );
 
         assert!(!iter.valid());
         iter.advance();
@@ -340,18 +352,24 @@
         assert!(current_key_val(&iter).is_some());
 
         iter.seek("abc".as_bytes());
-        assert_eq!(current_key_val(&iter),
-                   Some((b("abc").to_vec(), val.to_vec())));
+        assert_eq!(
+            current_key_val(&iter),
+            Some((b("abc").to_vec(), val.to_vec()))
+        );
         iter.seek("ab0".as_bytes());
-        assert_eq!(current_key_val(&iter),
-                   Some((b("aba").to_vec(), val.to_vec())));
+        assert_eq!(
+            current_key_val(&iter),
+            Some((b("aba").to_vec(), val.to_vec()))
+        );
         iter.seek("abx".as_bytes());
         assert_eq!(current_key_val(&iter), None);
 
         iter.reset();
         assert!(!iter.valid());
         iter.next();
-        assert_eq!(current_key_val(&iter),
-                   Some((b("aba").to_vec(), val.to_vec())));
+        assert_eq!(
+            current_key_val(&iter),
+            Some((b("aba").to_vec(), val.to_vec()))
+        );
     }
 }
--- a/src/options.rs	Sat Mar 03 11:49:17 2018 +0100
+++ b/src/options.rs	Sat Mar 03 11:53:18 2018 +0100
@@ -1,4 +1,3 @@
-
 use block::Block;
 use cache::Cache;
 use cmp::{Cmp, DefaultCmp};
--- a/src/skipmap.rs	Sat Mar 03 11:49:17 2018 +0100
+++ b/src/skipmap.rs	Sat Mar 03 11:53:18 2018 +0100
@@ -1,4 +1,3 @@
-
 use cmp::{Cmp, MemtableKeyCmp};
 use types::LdbIterator;
 use rand::{Rng, SeedableRng, StdRng};
@@ -252,8 +251,8 @@
             }
         }
 
-        let added_mem = size_of::<Node>() + size_of::<Option<*mut Node>>() * new.skips.len() +
-                        new.key.len() + new.value.len();
+        let added_mem = size_of::<Node>() + size_of::<Option<*mut Node>>() * new.skips.len()
+            + new.key.len() + new.value.len();
         self.approx_mem += added_mem;
         self.len += 1;
 
@@ -268,11 +267,13 @@
         let mut current = self.head.as_ref() as *const Node;
         loop {
             unsafe {
-                println!("{:?} {:?}/{:?} - {:?}",
-                         current,
-                         (*current).key,
-                         (*current).value,
-                         (*current).skips);
+                println!(
+                    "{:?} {:?}/{:?} - {:?}",
+                    current,
+                    (*current).key,
+                    (*current).value,
+                    (*current).skips
+                );
                 if let Some(next) = (*current).skips[0].clone() {
                     current = next;
                 } else {
@@ -337,7 +338,8 @@
         if self.valid() {
             if let Some(prev) = self.map
                 .borrow()
-                .get_next_smaller(unsafe { &(*self.current).key }) {
+                .get_next_smaller(unsafe { &(*self.current).key })
+            {
                 self.current = prev as *const Node;
                 if !prev.key.is_empty() {
                     return true;
@@ -359,9 +361,11 @@
 
     pub fn make_skipmap() -> SkipMap {
         let mut skm = SkipMap::new(options::for_test().cmp);
-        let keys = vec!["aba", "abb", "abc", "abd", "abe", "abf", "abg", "abh", "abi", "abj",
-                        "abk", "abl", "abm", "abn", "abo", "abp", "abq", "abr", "abs", "abt",
-                        "abu", "abv", "abw", "abx", "aby", "abz"];
+        let keys = vec![
+            "aba", "abb", "abc", "abd", "abe", "abf", "abg", "abh", "abi", "abj", "abk", "abl",
+            "abm", "abn", "abo", "abp", "abq", "abr", "abs", "abt", "abu", "abv", "abw", "abx",
+            "aby", "abz",
+        ];
 
         for k in keys {
             skm.insert(k.as_bytes().to_vec(), "def".as_bytes().to_vec());
@@ -400,25 +404,70 @@
     #[test]
     fn test_find() {
         let skm = make_skipmap();
-        assert_eq!(skm.map.borrow().get_greater_or_equal(&"abf".as_bytes().to_vec()).unwrap().key,
-                   "abf".as_bytes().to_vec());
-        assert!(skm.map.borrow().get_greater_or_equal(&"ab{".as_bytes().to_vec()).is_none());
-        assert_eq!(skm.map.borrow().get_greater_or_equal(&"aaa".as_bytes().to_vec()).unwrap().key,
-                   "aba".as_bytes().to_vec());
-        assert_eq!(skm.map.borrow().get_greater_or_equal(&"ab".as_bytes()).unwrap().key.as_slice(),
-                   "aba".as_bytes());
-        assert_eq!(skm.map
-                       .borrow()
-                       .get_greater_or_equal(&"abc".as_bytes())
-                       .unwrap()
-                       .key
-                       .as_slice(),
-                   "abc".as_bytes());
-        assert!(skm.map.borrow().get_next_smaller(&"ab0".as_bytes()).is_none());
-        assert_eq!(skm.map.borrow().get_next_smaller(&"abd".as_bytes()).unwrap().key.as_slice(),
-                   "abc".as_bytes());
-        assert_eq!(skm.map.borrow().get_next_smaller(&"ab{".as_bytes()).unwrap().key.as_slice(),
-                   "abz".as_bytes());
+        assert_eq!(
+            skm.map
+                .borrow()
+                .get_greater_or_equal(&"abf".as_bytes().to_vec())
+                .unwrap()
+                .key,
+            "abf".as_bytes().to_vec()
+        );
+        assert!(
+            skm.map
+                .borrow()
+                .get_greater_or_equal(&"ab{".as_bytes().to_vec())
+                .is_none()
+        );
+        assert_eq!(
+            skm.map
+                .borrow()
+                .get_greater_or_equal(&"aaa".as_bytes().to_vec())
+                .unwrap()
+                .key,
+            "aba".as_bytes().to_vec()
+        );
+        assert_eq!(
+            skm.map
+                .borrow()
+                .get_greater_or_equal(&"ab".as_bytes())
+                .unwrap()
+                .key
+                .as_slice(),
+            "aba".as_bytes()
+        );
+        assert_eq!(
+            skm.map
+                .borrow()
+                .get_greater_or_equal(&"abc".as_bytes())
+                .unwrap()
+                .key
+                .as_slice(),
+            "abc".as_bytes()
+        );
+        assert!(
+            skm.map
+                .borrow()
+                .get_next_smaller(&"ab0".as_bytes())
+                .is_none()
+        );
+        assert_eq!(
+            skm.map
+                .borrow()
+                .get_next_smaller(&"abd".as_bytes())
+                .unwrap()
+                .key
+                .as_slice(),
+            "abc".as_bytes()
+        );
+        assert_eq!(
+            skm.map
+                .borrow()
+                .get_next_smaller(&"ab{".as_bytes())
+                .unwrap()
+                .key
+                .as_slice(),
+            "abz".as_bytes()
+        );
     }
 
     #[test]
@@ -484,12 +533,16 @@
         assert!(iter.valid());
         assert_eq!(current_key_val(&iter).unwrap().0, "aba".as_bytes().to_vec());
         iter.seek(&"abz".as_bytes().to_vec());
-        assert_eq!(current_key_val(&iter).unwrap(),
-                   ("abz".as_bytes().to_vec(), "def".as_bytes().to_vec()));
+        assert_eq!(
+            current_key_val(&iter).unwrap(),
+            ("abz".as_bytes().to_vec(), "def".as_bytes().to_vec())
+        );
         // go back to beginning
         iter.seek(&"aba".as_bytes().to_vec());
-        assert_eq!(current_key_val(&iter).unwrap(),
-                   ("aba".as_bytes().to_vec(), "def".as_bytes().to_vec()));
+        assert_eq!(
+            current_key_val(&iter).unwrap(),
+            ("aba".as_bytes().to_vec(), "def".as_bytes().to_vec())
+        );
 
         iter.seek(&"".as_bytes().to_vec());
         assert!(iter.valid());
@@ -523,8 +576,10 @@
         assert!(!iter.valid());
         iter.seek(&"abc".as_bytes());
         iter.prev();
-        assert_eq!(current_key_val(&iter).unwrap(),
-                   ("abb".as_bytes().to_vec(), "def".as_bytes().to_vec()));
+        assert_eq!(
+            current_key_val(&iter).unwrap(),
+            ("abb".as_bytes().to_vec(), "def".as_bytes().to_vec())
+        );
     }
 
     #[test]
--- a/src/snapshot.rs	Sat Mar 03 11:49:17 2018 +0100
+++ b/src/snapshot.rs	Sat Mar 03 11:53:18 2018 +0100
@@ -1,5 +1,5 @@
 use std::collections::HashMap;
-use types::{share, MAX_SEQUENCE_NUMBER, SequenceNumber, Shared};
+use types::{share, SequenceNumber, Shared, MAX_SEQUENCE_NUMBER};
 
 use std::rc::Rc;
 
@@ -78,9 +78,14 @@
     /// oldest returns the lowest sequence number of all snapshots. It returns 0 if no snapshots
     /// are present.
     pub fn oldest(&self) -> SequenceNumber {
-        let oldest =
-            self.inner.borrow().map.iter().fold(MAX_SEQUENCE_NUMBER,
-                                                |s, (seq, _)| if *seq < s { *seq } else { s });
+        let oldest = self.inner
+            .borrow()
+            .map
+            .iter()
+            .fold(
+                MAX_SEQUENCE_NUMBER,
+                |s, (seq, _)| if *seq < s { *seq } else { s },
+            );
         if oldest == MAX_SEQUENCE_NUMBER {
             0
         } else {
@@ -91,7 +96,11 @@
     /// newest returns the newest sequence number of all snapshots. If no snapshots are present, it
     /// returns 0.
     pub fn newest(&self) -> SequenceNumber {
-        self.inner.borrow().map.iter().fold(0, |s, (seq, _)| if *seq > s { *seq } else { s })
+        self.inner
+            .borrow()
+            .map
+            .iter()
+            .fold(0, |s, (seq, _)| if *seq > s { *seq } else { s })
     }
 
     pub fn empty(&self) -> bool {
--- a/src/table_block.rs	Sat Mar 03 11:49:17 2018 +0100
+++ b/src/table_block.rs	Sat Mar 03 11:53:18 2018 +0100
@@ -1,7 +1,7 @@
 use block::Block;
 use blockhandle::BlockHandle;
 use env::RandomAccess;
-use error::{err, StatusCode, Result};
+use error::{err, Result, StatusCode};
 use filter;
 use filter_block::FilterBlockReader;
 use log::unmask_crc;
@@ -19,13 +19,16 @@
 }
 
 /// Reads a serialized filter block from a file and returns a FilterBlockReader.
-pub fn read_filter_block(src: &RandomAccess,
-                         location: &BlockHandle,
-                         policy: filter::BoxedFilterPolicy)
-                         -> Result<FilterBlockReader> {
+pub fn read_filter_block(
+    src: &RandomAccess,
+    location: &BlockHandle,
+    policy: filter::BoxedFilterPolicy,
+) -> Result<FilterBlockReader> {
     if location.size() == 0 {
-        return err(StatusCode::InvalidArgument,
-                   "no filter block in empty location");
+        return err(
+            StatusCode::InvalidArgument,
+            "no filter block in empty location",
+        );
     }
     let buf = read_bytes(src, location)?;
     Ok(FilterBlockReader::new_owned(policy, buf))
@@ -39,18 +42,29 @@
     // table is followed by 1B compression type and 4B checksum.
     // The checksum refers to the compressed contents.
     let buf = try!(read_bytes(f, location));
-    let compress = try!(read_bytes(f,
-                                   &BlockHandle::new(location.offset() + location.size(),
-                                                     table_builder::TABLE_BLOCK_COMPRESS_LEN)));
-    let cksum = try!(read_bytes(f,
-                                &BlockHandle::new(location.offset() + location.size() +
-                                                  table_builder::TABLE_BLOCK_COMPRESS_LEN,
-                                                  table_builder::TABLE_BLOCK_CKSUM_LEN)));
+    let compress = try!(read_bytes(
+        f,
+        &BlockHandle::new(
+            location.offset() + location.size(),
+            table_builder::TABLE_BLOCK_COMPRESS_LEN
+        )
+    ));
+    let cksum = try!(read_bytes(
+        f,
+        &BlockHandle::new(
+            location.offset() + location.size() + table_builder::TABLE_BLOCK_COMPRESS_LEN,
+            table_builder::TABLE_BLOCK_CKSUM_LEN
+        )
+    ));
 
     if !verify_table_block(&buf, compress[0], unmask_crc(u32::decode_fixed(&cksum))) {
-        return err(StatusCode::Corruption,
-                   &format!("checksum verification failed for block at {}",
-                            location.offset()));
+        return err(
+            StatusCode::Corruption,
+            &format!(
+                "checksum verification failed for block at {}",
+                location.offset()
+            ),
+        );
     }
 
     if let Some(ctype) = options::int_to_compressiontype(compress[0] as u32) {
--- a/src/table_builder.rs	Sat Mar 03 11:49:17 2018 +0100
+++ b/src/table_builder.rs	Sat Mar 03 11:53:18 2018 +0100
@@ -185,7 +185,10 @@
         let mut handle_enc = [0 as u8; 16];
         let enc_len = handle.encode_to(&mut handle_enc);
 
-        self.index_block.as_mut().unwrap().add(&sep, &handle_enc[0..enc_len]);
+        self.index_block
+            .as_mut()
+            .unwrap()
+            .add(&sep, &handle_enc[0..enc_len]);
         self.data_block = Some(BlockBuilder::new(self.opt.clone()));
 
         if let Some(ref mut fblock) = self.filter_block {
@@ -225,8 +228,9 @@
         // If there's a pending data block, write it
         if self.data_block.as_ref().unwrap().entries() > 0 {
             // Find a key reliably past the last key
-            let key_past_last =
-                self.opt.cmp.find_short_succ(self.data_block.as_ref().unwrap().last_key());
+            let key_past_last = self.opt
+                .cmp
+                .find_short_succ(self.data_block.as_ref().unwrap().last_key());
             self.write_data_block(&key_past_last)?;
         }
 
@@ -282,7 +286,6 @@
         assert_eq!(f2.meta_index.size(), 4);
         assert_eq!(f2.index.offset(), 55);
         assert_eq!(f2.index.size(), 5);
-
     }
 
     #[test]
@@ -293,12 +296,23 @@
         opt.compression_type = CompressionType::CompressionSnappy;
         let mut b = TableBuilder::new_raw(opt, &mut d);
 
-        let data = vec![("abc", "def"), ("abe", "dee"), ("bcd", "asa"), ("dcc", "a00")];
-        let data2 = vec![("abd", "def"), ("abf", "dee"), ("ccd", "asa"), ("dcd", "a00")];
+        let data = vec![
+            ("abc", "def"),
+            ("abe", "dee"),
+            ("bcd", "asa"),
+            ("dcc", "a00"),
+        ];
+        let data2 = vec![
+            ("abd", "def"),
+            ("abf", "dee"),
+            ("ccd", "asa"),
+            ("dcd", "a00"),
+        ];
 
         for i in 0..data.len() {
             b.add(&data[i].0.as_bytes(), &data[i].1.as_bytes()).unwrap();
-            b.add(&data2[i].0.as_bytes(), &data2[i].1.as_bytes()).unwrap();
+            b.add(&data2[i].0.as_bytes(), &data2[i].1.as_bytes())
+                .unwrap();
         }
 
         let estimate = b.size_estimate();
@@ -319,7 +333,12 @@
         let mut b = TableBuilder::new_raw(opt, &mut d);
 
         // Test two equal consecutive keys
-        let data = vec![("abc", "def"), ("abc", "dee"), ("bcd", "asa"), ("bsr", "a00")];
+        let data = vec![
+            ("abc", "def"),
+            ("abc", "dee"),
+            ("bcd", "asa"),
+            ("bsr", "a00"),
+        ];
 
         for &(k, v) in data.iter() {
             b.add(k.as_bytes(), v.as_bytes()).unwrap();
--- a/src/table_cache.rs	Sat Mar 03 11:49:17 2018 +0100
+++ b/src/table_cache.rs	Sat Mar 03 11:53:18 2018 +0100
@@ -3,7 +3,7 @@
 //! returned.
 
 use cache::{self, Cache};
-use error::{err, StatusCode, Result};
+use error::{err, Result, StatusCode};
 use key_types::InternalKey;
 use options::Options;
 use table_reader::Table;
@@ -43,10 +43,11 @@
         }
     }
 
-    pub fn get<'a>(&mut self,
-                   file_num: FileNum,
-                   key: InternalKey<'a>)
-                   -> Result<Option<(Vec<u8>, Vec<u8>)>> {
+    pub fn get<'a>(
+        &mut self,
+        file_num: FileNum,
+        key: InternalKey<'a>,
+    ) -> Result<Option<(Vec<u8>, Vec<u8>)>> {
         let tbl = self.get_table(file_num)?;
         tbl.get(key)
     }
@@ -114,7 +115,12 @@
         let w = o.env.open_writable_file(p).unwrap();
         let mut b = TableBuilder::new_raw(o, w);
 
-        let data = vec![("abc", "def"), ("abd", "dee"), ("bcd", "asa"), ("bsr", "a00")];
+        let data = vec![
+            ("abc", "def"),
+            ("abd", "dee"),
+            ("bcd", "asa"),
+            ("bsr", "a00"),
+        ];
 
         for &(k, v) in data.iter() {
             b.add(k.as_bytes(), v.as_bytes()).unwrap();
@@ -138,11 +144,15 @@
 
         let mut cache = TableCache::new(dbname, opt.clone(), 10);
         assert!(cache.cache.get(&filenum_to_key(123)).is_none());
-        assert_eq!(LdbIteratorIter::wrap(&mut cache.get_table(123).unwrap().iter()).count(),
-                   4);
+        assert_eq!(
+            LdbIteratorIter::wrap(&mut cache.get_table(123).unwrap().iter()).count(),
+            4
+        );
         // Test cached table.
-        assert_eq!(LdbIteratorIter::wrap(&mut cache.get_table(123).unwrap().iter()).count(),
-                   4);
+        assert_eq!(
+            LdbIteratorIter::wrap(&mut cache.get_table(123).unwrap().iter()).count(),
+            4
+        );
 
         assert!(cache.cache.get(&filenum_to_key(123)).is_some());
         assert!(cache.evict(123).is_ok());
--- a/src/table_reader.rs	Sat Mar 03 11:49:17 2018 +0100
+++ b/src/table_reader.rs	Sat Mar 03 11:53:18 2018 +0100
@@ -41,11 +41,16 @@
     /// Creates a new table reader operating on unformatted keys (i.e., UserKey).
     fn new_raw(opt: Options, file: Rc<Box<RandomAccess>>, size: usize) -> Result<Table> {
         let footer = try!(read_footer(file.as_ref().as_ref(), size));
-        let indexblock =
-            try!(table_block::read_table_block(opt.clone(), file.as_ref().as_ref(), &footer.index));
-        let metaindexblock = try!(table_block::read_table_block(opt.clone(),
-                                                                file.as_ref().as_ref(),
-                                                                &footer.meta_index));
+        let indexblock = try!(table_block::read_table_block(
+            opt.clone(),
+            file.as_ref().as_ref(),
+            &footer.index
+        ));
+        let metaindexblock = try!(table_block::read_table_block(
+            opt.clone(),
+            file.as_ref().as_ref(),
+            &footer.meta_index
+        ));
 
         let filter_block_reader =
             Table::read_filter_block(&metaindexblock, file.as_ref().as_ref(), &opt)?;
@@ -62,12 +67,15 @@
         })
     }
 
-    fn read_filter_block(metaix: &Block,
-                         file: &RandomAccess,
-                         options: &Options)
-                         -> Result<Option<FilterBlockReader>> {
+    fn read_filter_block(
+        metaix: &Block,
+        file: &RandomAccess,
+        options: &Options,
+    ) -> Result<Option<FilterBlockReader>> {
         // Open filter block for reading
-        let filter_name = format!("filter.{}", options.filter_policy.name()).as_bytes().to_vec();
+        let filter_name = format!("filter.{}", options.filter_policy.name())
+            .as_bytes()
+            .to_vec();
 
         let mut metaindexiter = metaix.iter();
         metaindexiter.seek(&filter_name);
@@ -75,9 +83,11 @@
         if let Some((_key, val)) = current_key_val(&metaindexiter) {
             let filter_block_location = BlockHandle::decode(&val).0;
             if filter_block_location.size() > 0 {
-                return Ok(Some(table_block::read_filter_block(file,
-                                                              &filter_block_location,
-                                                              options.filter_policy.clone())?));
+                return Ok(Some(table_block::read_filter_block(
+                    file,
+                    &filter_block_location,
+                    options.filter_policy.clone(),
+                )?));
             }
         }
         Ok(None)
@@ -88,7 +98,9 @@
     /// (InternalFilterPolicy) are used.
     pub fn new(mut opt: Options, file: Rc<Box<RandomAccess>>, size: usize) -> Result<Table> {
         opt.cmp = Rc::new(Box::new(InternalKeyCmp(opt.cmp.clone())));
-        opt.filter_policy = Rc::new(Box::new(filter::InternalFilterPolicy::new(opt.filter_policy)));
+        opt.filter_policy = Rc::new(Box::new(filter::InternalFilterPolicy::new(
+            opt.filter_policy,
+        )));
         Table::new_raw(opt, file, size)
     }
 
@@ -96,8 +108,12 @@
     /// block cache.
     fn block_cache_handle(&self, block_off: usize) -> cache::CacheKey {
         let mut dst = [0; 2 * 8];
-        (&mut dst[..8]).write_fixedint(self.cache_id).expect("error writing to vec");
-        (&mut dst[8..]).write_fixedint(block_off as u64).expect("error writing to vec");
+        (&mut dst[..8])
+            .write_fixedint(self.cache_id)
+            .expect("error writing to vec");
+        (&mut dst[8..])
+            .write_fixedint(block_off as u64)
+            .expect("error writing to vec");
         dst
     }
 
@@ -110,12 +126,17 @@
         }
 
         // Two times as_ref(): First time to get a ref from Rc<>, then one from Box<>.
-        let b = try!(table_block::read_table_block(self.opt.clone(),
-                                                   self.file.as_ref().as_ref(),
-                                                   location));
+        let b = try!(table_block::read_table_block(
+            self.opt.clone(),
+            self.file.as_ref().as_ref(),
+            location
+        ));
 
         // insert a cheap copy (Rc).
-        self.opt.block_cache.borrow_mut().insert(&cachekey, b.clone());
+        self.opt
+            .block_cache
+            .borrow_mut()
+            .insert(&cachekey, b.clone());
 
         Ok(b)
     }
@@ -350,16 +371,18 @@
     use super::*;
 
     fn build_data() -> Vec<(&'static str, &'static str)> {
-        vec![// block 1
-             ("abc", "def"),
-             ("abd", "dee"),
-             ("bcd", "asa"),
-             // block 2
-             ("bsr", "a00"),
-             ("xyz", "xxx"),
-             ("xzz", "yyy"),
-             // block 3
-             ("zzz", "111")]
+        vec![
+            // block 1
+            ("abc", "def"),
+            ("abd", "dee"),
+            ("bcd", "asa"),
+            // block 2
+            ("bsr", "a00"),
+            ("xyz", "xxx"),
+            ("xzz", "yyy"),
+            // block 3
+            ("zzz", "111"),
+        ]
     }
 
     // Build a table containing raw keys (no format). It returns (vector, length) for convenience
@@ -473,8 +496,10 @@
         let mut i = 0;
 
         while let Some((k, v)) = iter.next() {
-            assert_eq!((data[i].0.as_bytes(), data[i].1.as_bytes()),
-                       (k.as_ref(), v.as_ref()));
+            assert_eq!(
+                (data[i].0.as_bytes(), data[i].1.as_bytes()),
+                (k.as_ref(), v.as_ref())
+            );
             i += 1;
         }
 
@@ -495,9 +520,13 @@
         while iter.prev() {
             if let Some((k, v)) = current_key_val(&iter) {
                 j += 1;
-                assert_eq!((data[data.len() - 1 - j].0.as_bytes(),
-                            data[data.len() - 1 - j].1.as_bytes()),
-                           (k.as_ref(), v.as_ref()));
+                assert_eq!(
+                    (
+                        data[data.len() - 1 - j].0.as_bytes(),
+                        data[data.len() - 1 - j].1.as_bytes()
+                    ),
+                    (k.as_ref(), v.as_ref())
+                );
             } else {
                 break;
             }
@@ -583,8 +612,10 @@
             iter.prev();
 
             if let Some((k, v)) = current_key_val(&iter) {
-                assert_eq!((data[i].0.as_bytes(), data[i].1.as_bytes()),
-                           (k.as_ref(), v.as_ref()));
+                assert_eq!(
+                    (data[i].0.as_bytes(), data[i].1.as_bytes()),
+                    (k.as_ref(), v.as_ref())
+                );
             } else {
                 break;
             }
@@ -608,12 +639,16 @@
 
         iter.seek(b"bcd");
         assert!(iter.valid());
-        assert_eq!(current_key_val(&iter),
-                   Some((b"bcd".to_vec(), b"asa".to_vec())));
+        assert_eq!(
+            current_key_val(&iter),
+            Some((b"bcd".to_vec(), b"asa".to_vec()))
+        );
         iter.seek(b"abc");
         assert!(iter.valid());
-        assert_eq!(current_key_val(&iter),
-                   Some((b"abc".to_vec(), b"def".to_vec())));
+        assert_eq!(
+            current_key_val(&iter),
+            Some((b"abc".to_vec(), b"def".to_vec()))
+        );
 
         // Seek-past-last invalidates.
         iter.seek("{{{".as_bytes());
@@ -670,7 +705,12 @@
             assert_eq!((k.to_vec(), v.to_vec()), table.get(k).unwrap().unwrap());
         }
 
-        assert!(table.get(LookupKey::new(b"abc", 1000).internal_key()).unwrap().is_some());
+        assert!(
+            table
+                .get(LookupKey::new(b"abc", 1000).internal_key())
+                .unwrap()
+                .is_some()
+        );
 
         let mut iter = table.iter();
 
--- a/src/test_util.rs	Sat Mar 03 11:49:17 2018 +0100
+++ b/src/test_util.rs	Sat Mar 03 11:53:18 2018 +0100
@@ -130,17 +130,21 @@
     assert!(!it.valid());
 }
 
-
 #[cfg(test)]
 mod tests {
     use super::*;
 
     #[test]
     fn test_test_util_basic() {
-        let v = vec![("abc".as_bytes(), "def".as_bytes()), ("abd".as_bytes(), "deg".as_bytes())];
+        let v = vec![
+            ("abc".as_bytes(), "def".as_bytes()),
+            ("abd".as_bytes(), "deg".as_bytes()),
+        ];
         let mut iter = TestLdbIter::new(v);
-        assert_eq!(iter.next(),
-                   Some((Vec::from("abc".as_bytes()), Vec::from("def".as_bytes()))));
+        assert_eq!(
+            iter.next(),
+            Some((Vec::from("abc".as_bytes()), Vec::from("def".as_bytes())))
+        );
     }
 
     #[test]
@@ -149,10 +153,12 @@
         let v;
         {
             time_test!("init");
-            v = vec![("abc".as_bytes(), "def".as_bytes()),
-                     ("abd".as_bytes(), "deg".as_bytes()),
-                     ("abe".as_bytes(), "deg".as_bytes()),
-                     ("abf".as_bytes(), "deg".as_bytes())];
+            v = vec![
+                ("abc".as_bytes(), "def".as_bytes()),
+                ("abd".as_bytes(), "deg".as_bytes()),
+                ("abe".as_bytes(), "deg".as_bytes()),
+                ("abf".as_bytes(), "deg".as_bytes()),
+            ];
         }
         test_iterator_properties(TestLdbIter::new(v));
     }
--- a/src/types.rs	Sat Mar 03 11:49:17 2018 +0100
+++ b/src/types.rs	Sat Mar 03 11:53:18 2018 +0100
@@ -151,8 +151,10 @@
             if let Ok(num) = FileNum::from_str_radix(&f[ix + 1..], 10) {
                 return Ok((num, FileType::Descriptor));
             }
-            return err(StatusCode::InvalidArgument,
-                       "manifest file number is invalid");
+            return err(
+                StatusCode::InvalidArgument,
+                "manifest file number is invalid",
+            );
         }
         return err(StatusCode::InvalidArgument, "manifest file has no dash");
     } else if let Some(ix) = f.find('.') {
@@ -163,14 +165,18 @@
                 "sst" | "ldb" => FileType::Table,
                 "dbtmp" => FileType::Temp,
                 _ => {
-                    return err(StatusCode::InvalidArgument,
-                               "unknown numbered file extension")
+                    return err(
+                        StatusCode::InvalidArgument,
+                        "unknown numbered file extension",
+                    )
                 }
             };
             return Ok((num, typ));
         }
-        return err(StatusCode::InvalidArgument,
-                   "invalid file number for table or temp file");
+        return err(
+            StatusCode::InvalidArgument,
+            "invalid file number for table or temp file",
+        );
     }
     err(StatusCode::InvalidArgument, "unknown file type")
 }
@@ -181,14 +187,16 @@
 
     #[test]
     fn test_types_parse_file_name() {
-        for c in &[("CURRENT", (0, FileType::Current)),
-                   ("LOCK", (0, FileType::DBLock)),
-                   ("LOG", (0, FileType::InfoLog)),
-                   ("LOG.old", (0, FileType::InfoLog)),
-                   ("MANIFEST-01234", (1234, FileType::Descriptor)),
-                   ("001122.sst", (1122, FileType::Table)),
-                   ("001122.ldb", (1122, FileType::Table)),
-                   ("001122.dbtmp", (1122, FileType::Temp))] {
+        for c in &[
+            ("CURRENT", (0, FileType::Current)),
+            ("LOCK", (0, FileType::DBLock)),
+            ("LOG", (0, FileType::InfoLog)),
+            ("LOG.old", (0, FileType::InfoLog)),
+            ("MANIFEST-01234", (1234, FileType::Descriptor)),
+            ("001122.sst", (1122, FileType::Table)),
+            ("001122.ldb", (1122, FileType::Table)),
+            ("001122.dbtmp", (1122, FileType::Temp)),
+        ] {
             assert_eq!(parse_file_name(c.0).unwrap(), c.1);
         }
         assert!(parse_file_name("xyz.LOCK").is_err());
--- a/src/version.rs	Sat Mar 03 11:49:17 2018 +0100
+++ b/src/version.rs	Sat Mar 03 11:53:18 2018 +0100
@@ -3,7 +3,7 @@
 use key_types::{parse_internal_key, InternalKey, LookupKey, UserKey, ValueType};
 use table_cache::TableCache;
 use table_reader::TableIterator;
-use types::{MAX_SEQUENCE_NUMBER, NUM_LEVELS, FileMetaData, FileNum, LdbIterator, Shared};
+use types::{FileMetaData, FileNum, LdbIterator, Shared, MAX_SEQUENCE_NUMBER, NUM_LEVELS};
 
 use std::cmp::Ordering;
 use std::default::Default;
@@ -54,7 +54,6 @@
         self.files[l].len()
     }
 
-
     /// get returns the value for the specified key using the persistent tables contained in this
     /// Version.
     #[allow(unused_assignments)]
@@ -87,8 +86,9 @@
                     // We don't need to check the sequence number; get() will not return an entry
                     // with a higher sequence number than the one in the supplied key.
                     let (typ, _, foundkey) = parse_internal_key(&k);
-                    if typ == ValueType::TypeValue &&
-                       self.user_cmp.cmp(foundkey, ukey) == Ordering::Equal {
+                    if typ == ValueType::TypeValue
+                        && self.user_cmp.cmp(foundkey, ukey) == Ordering::Equal
+                    {
                         return Ok(Some((v, stats)));
                     } else if typ == ValueType::TypeDeletion {
                         // Skip looking once we have found a deletion.
@@ -110,10 +110,13 @@
         levels[0].reserve(files.len());
         for f_ in files {
             let f = f_.borrow();
-            let (fsmallest, flargest) = (parse_internal_key(&f.smallest).2,
-                                         parse_internal_key(&f.largest).2);
-            if self.user_cmp.cmp(ukey, fsmallest) >= Ordering::Equal &&
-               self.user_cmp.cmp(ukey, flargest) <= Ordering::Equal {
+            let (fsmallest, flargest) = (
+                parse_internal_key(&f.smallest).2,
+                parse_internal_key(&f.largest).2,
+            );
+            if self.user_cmp.cmp(ukey, fsmallest) >= Ordering::Equal
+                && self.user_cmp.cmp(ukey, flargest) <= Ordering::Equal
+            {
                 levels[0].push(f_.clone());
             }
         }
@@ -146,11 +149,13 @@
             let filedesc: Vec<(FileNum, usize)> = fs.iter()
                 .map(|f| (f.borrow().num, f.borrow().size))
                 .collect();
-            let desc = format!("level {}: {} files, {} bytes ({:?}); ",
-                               level,
-                               fs.len(),
-                               total_size(fs.iter()),
-                               filedesc);
+            let desc = format!(
+                "level {}: {} files, {} bytes ({:?}); ",
+                level,
+                fs.len(),
+                total_size(fs.iter()),
+                filedesc
+            );
             acc.push_str(&desc);
         }
         acc
@@ -170,10 +175,11 @@
                     break;
                 }
                 if level + 2 < NUM_LEVELS {
-                    let overlaps =
-                        self.overlapping_inputs(level + 2,
-                                                start.internal_key(),
-                                                &limit.internal_key());
+                    let overlaps = self.overlapping_inputs(
+                        level + 2,
+                        start.internal_key(),
+                        &limit.internal_key(),
+                    );
                     let size = total_size(overlaps.iter());
                     if size > 10 * (2 << 20) {
                         break;
@@ -249,34 +255,42 @@
 
     /// overlap_in_level returns true if the specified level's files overlap the range [smallest;
     /// largest].
-    pub fn overlap_in_level<'a, 'b>(&self,
-                                    level: usize,
-                                    smallest: UserKey<'a>,
-                                    largest: UserKey<'a>)
-                                    -> bool {
+    pub fn overlap_in_level<'a, 'b>(
+        &self,
+        level: usize,
+        smallest: UserKey<'a>,
+        largest: UserKey<'a>,
+    ) -> bool {
         assert!(level < NUM_LEVELS);
         if level == 0 {
-            some_file_overlaps_range_disjoint(&InternalKeyCmp(self.user_cmp.clone()),
-                                              &self.files[level],
-                                              smallest,
-                                              largest)
+            some_file_overlaps_range_disjoint(
+                &InternalKeyCmp(self.user_cmp.clone()),
+                &self.files[level],
+                smallest,
+                largest,
+            )
         } else {
-            some_file_overlaps_range(&InternalKeyCmp(self.user_cmp.clone()),
-                                     &self.files[level],
-                                     smallest,
-                                     largest)
+            some_file_overlaps_range(
+                &InternalKeyCmp(self.user_cmp.clone()),
+                &self.files[level],
+                smallest,
+                largest,
+            )
         }
     }
 
     /// overlapping_inputs returns all files that may contain keys between begin and end.
-    pub fn overlapping_inputs<'a, 'b>(&self,
-                                      level: usize,
-                                      begin: InternalKey<'a>,
-                                      end: InternalKey<'b>)
-                                      -> Vec<FileMetaHandle> {
+    pub fn overlapping_inputs<'a, 'b>(
+        &self,
+        level: usize,
+        begin: InternalKey<'a>,
+        end: InternalKey<'b>,
+    ) -> Vec<FileMetaHandle> {
         assert!(level < NUM_LEVELS);
-        let (mut ubegin, mut uend) = (parse_internal_key(begin).2.to_vec(),
-                                      parse_internal_key(end).2.to_vec());
+        let (mut ubegin, mut uend) = (
+            parse_internal_key(begin).2.to_vec(),
+            parse_internal_key(end).2.to_vec(),
+        );
 
         loop {
             match do_search(self, level, ubegin, uend) {
@@ -291,21 +305,25 @@
         // the actual search happens in this inner function. This is done to enhance the control
         // flow. It takes the smallest and largest user keys and returns a new pair of user keys if
         // the search range should be expanded, or a list of overlapping files.
-        fn do_search(myself: &Version,
-                     level: usize,
-                     ubegin: Vec<u8>,
-                     uend: Vec<u8>)
-                     -> (Option<(Vec<u8>, Vec<u8>)>, Vec<FileMetaHandle>) {
+        fn do_search(
+            myself: &Version,
+            level: usize,
+            ubegin: Vec<u8>,
+            uend: Vec<u8>,
+        ) -> (Option<(Vec<u8>, Vec<u8>)>, Vec<FileMetaHandle>) {
             let mut inputs = vec![];
             for f_ in myself.files[level].iter() {
                 let f = f_.borrow();
-                let (fsmallest, flargest) = (parse_internal_key(&f.smallest).2,
-                                             parse_internal_key(&f.largest).2);
+                let (fsmallest, flargest) = (
+                    parse_internal_key(&f.smallest).2,
+                    parse_internal_key(&f.largest).2,
+                );
                 // Skip files that are not overlapping.
                 if !ubegin.is_empty() && myself.user_cmp.cmp(flargest, &ubegin) == Ordering::Less {
                     continue;
-                } else if !uend.is_empty() &&
-                          myself.user_cmp.cmp(fsmallest, &uend) == Ordering::Greater {
+                } else if !uend.is_empty()
+                    && myself.user_cmp.cmp(fsmallest, &uend) == Ordering::Greater
+                {
                     continue;
                 } else {
                     inputs.push(f_.clone());
@@ -313,11 +331,13 @@
                     // before ubegin or ends after uend, and expand the range, if so. Then, restart
                     // the search.
                     if level == 0 {
-                        if !ubegin.is_empty() &&
-                           myself.user_cmp.cmp(fsmallest, &ubegin) == Ordering::Less {
+                        if !ubegin.is_empty()
+                            && myself.user_cmp.cmp(fsmallest, &ubegin) == Ordering::Less
+                        {
                             return (Some((fsmallest.to_vec(), uend)), inputs);
-                        } else if !uend.is_empty() &&
-                                  myself.user_cmp.cmp(flargest, &uend) == Ordering::Greater {
+                        } else if !uend.is_empty()
+                            && myself.user_cmp.cmp(flargest, &uend) == Ordering::Greater
+                        {
                             return (Some((ubegin, flargest.to_vec())), inputs);
                         }
                     }
@@ -330,9 +350,11 @@
     /// new_concat_iter returns an iterator that iterates over the files in a level. Note that this
     /// only really makes sense for levels > 0.
     fn new_concat_iter(&self, level: usize) -> VersionIter {
-        new_version_iter(self.files[level].clone(),
-                         self.table_cache.clone(),
-                         self.user_cmp.clone())
+        new_version_iter(
+            self.files[level].clone(),
+            self.table_cache.clone(),
+            self.user_cmp.clone(),
+        )
     }
 
     /// new_iters returns a set of iterators that can be merged to yield all entries in this
@@ -340,7 +362,12 @@
     pub fn new_iters(&self) -> Result<Vec<Box<LdbIterator>>> {
         let mut iters: Vec<Box<LdbIterator>> = vec![];
         for f in &self.files[0] {
-            iters.push(Box::new(self.table_cache.borrow_mut().get_table(f.borrow().num)?.iter()));
+            iters.push(Box::new(
+                self.table_cache
+                    .borrow_mut()
+                    .get_table(f.borrow().num)?
+                    .iter(),
+            ));
         }
 
         for l in 1..NUM_LEVELS {
@@ -355,10 +382,11 @@
 
 /// new_version_iter returns an iterator over the entries in the specified ordered list of table
 /// files.
-pub fn new_version_iter(files: Vec<FileMetaHandle>,
-                        cache: Shared<TableCache>,
-                        ucmp: Rc<Box<Cmp>>)
-                        -> VersionIter {
+pub fn new_version_iter(
+    files: Vec<FileMetaHandle>,
+    cache: Shared<TableCache>,
+    ucmp: Rc<Box<Cmp>>,
+) -> VersionIter {
     VersionIter {
         files: files,
         cache: cache,
@@ -402,7 +430,8 @@
         // Initialize iterator or load next table.
         if let Ok(tbl) = self.cache
             .borrow_mut()
-            .get_table(self.files[self.current_ix].borrow().num) {
+            .get_table(self.files[self.current_ix].borrow().num)
+        {
             self.current = Some(tbl.iter());
         } else {
             return false;
@@ -418,7 +447,10 @@
     }
     fn seek(&mut self, key: &[u8]) {
         if let Some(ix) = find_file(&self.cmp, &self.files, key) {
-            if let Ok(tbl) = self.cache.borrow_mut().get_table(self.files[ix].borrow().num) {
+            if let Ok(tbl) = self.cache
+                .borrow_mut()
+                .get_table(self.files[ix].borrow().num)
+            {
                 let mut iter = tbl.iter();
                 iter.seek(key);
                 if iter.valid() {
@@ -482,10 +514,11 @@
 /// find_file returns the index of the file in files that potentially contains the internal key
 /// key. files must not overlap and be ordered ascendingly. If no file can contain the key, None is
 /// returned.
-fn find_file<'a>(cmp: &InternalKeyCmp,
-                 files: &[FileMetaHandle],
-                 key: InternalKey<'a>)
-                 -> Option<usize> {
+fn find_file<'a>(
+    cmp: &InternalKeyCmp,
+    files: &[FileMetaHandle],
+    key: InternalKey<'a>,
+) -> Option<usize> {
     let (mut left, mut right) = (0, files.len());
     while left < right {
         let mid = (left + right) / 2;
@@ -504,11 +537,12 @@
 
 /// some_file_overlaps_range_disjoint returns true if any of the given disjoint files (i.e. level >
 /// 1) contain keys in the range defined by the user keys [smallest; largest].
-fn some_file_overlaps_range_disjoint<'a, 'b>(cmp: &InternalKeyCmp,
-                                             files: &[FileMetaHandle],
-                                             smallest: UserKey<'a>,
-                                             largest: UserKey<'b>)
-                                             -> bool {
+fn some_file_overlaps_range_disjoint<'a, 'b>(
+    cmp: &InternalKeyCmp,
+    files: &[FileMetaHandle],
+    smallest: UserKey<'a>,
+    largest: UserKey<'b>,
+) -> bool {
     let ikey = LookupKey::new(smallest, MAX_SEQUENCE_NUMBER);
     if let Some(ix) = find_file(cmp, files, ikey.internal_key()) {
         !key_is_before_file(cmp, largest, &files[ix])
@@ -519,11 +553,12 @@
 
 /// some_file_overlaps_range returns true if any of the given possibly overlapping files contains
 /// keys in the range [smallest; largest].
-fn some_file_overlaps_range<'a, 'b>(cmp: &InternalKeyCmp,
-                                    files: &[FileMetaHandle],
-                                    smallest: UserKey<'a>,
-                                    largest: UserKey<'b>)
-                                    -> bool {
+fn some_file_overlaps_range<'a, 'b>(
+    cmp: &InternalKeyCmp,
+    files: &[FileMetaHandle],
+    smallest: UserKey<'a>,
+    largest: UserKey<'b>,
+) -> bool {
     for f in files {
         if !(key_is_after_file(cmp, smallest, f) || key_is_before_file(cmp, largest, f)) {
             return true;
@@ -545,12 +580,13 @@
 
     use std::path::Path;
 
-    pub fn new_file(num: u64,
-                    smallest: &[u8],
-                    smallestix: u64,
-                    largest: &[u8],
-                    largestix: u64)
-                    -> FileMetaHandle {
+    pub fn new_file(
+        num: u64,
+        smallest: &[u8],
+        smallestix: u64,
+        largest: &[u8],
+        largestix: u64,
+    ) -> FileMetaHandle {
         share(FileMetaData {
             allowed_seeks: 10,
             size: 163840,
@@ -562,14 +598,17 @@
 
     /// write_table creates a table with the given number and contents (must be sorted!) in the
     /// memenv. The sequence numbers given to keys start with startseq.
-    pub fn write_table(me: &Box<Env>,
-                       contents: &[(&[u8], &[u8], ValueType)],
-                       startseq: u64,
-                       num: FileNum)
-                       -> FileMetaHandle {
-        let dst = me.open_writable_file(Path::new(&table_file_name("db", num))).unwrap();
+    pub fn write_table(
+        me: &Box<Env>,
+        contents: &[(&[u8], &[u8], ValueType)],
+        startseq: u64,
+        num: FileNum,
+    ) -> FileMetaHandle {
+        let dst = me.open_writable_file(Path::new(&table_file_name("db", num)))
+            .unwrap();
         let mut seq = startseq;
-        let keys: Vec<Vec<u8>> = contents.iter()
+        let keys: Vec<Vec<u8>> = contents
+            .iter()
             .map(|&(k, _, typ)| {
                 seq += 1;
                 LookupKey::new_full(k, seq - 1, typ).internal_key().to_vec()
@@ -582,11 +621,13 @@
             seq += 1;
         }
 
-        let f = new_file(num,
-                         contents[0].0,
-                         startseq,
-                         contents[contents.len() - 1].0,
-                         startseq + (contents.len() - 1) as u64);
+        let f = new_file(
+            num,
+            contents[0].0,
+            startseq,
+            contents[contents.len() - 1].0,
+            startseq + (contents.len() - 1) as u64,
+        );
         f.borrow_mut().size = tbl.finish().unwrap();
         f
     }
@@ -601,59 +642,67 @@
         // numbers.
 
         // Level 0 (overlapping)
-        let f2: &[(&[u8], &[u8], ValueType)] =
-            &[("aac".as_bytes(), "val1".as_bytes(), ValueType::TypeDeletion),
-              ("aax".as_bytes(), "val2".as_bytes(), ValueType::TypeValue),
-              ("aba".as_bytes(), "val3".as_bytes(), ValueType::TypeValue),
-              ("bab".as_bytes(), "val4".as_bytes(), ValueType::TypeValue),
-              ("bba".as_bytes(), "val5".as_bytes(), ValueType::TypeValue)];
+        let f2: &[(&[u8], &[u8], ValueType)] = &[
+            ("aac".as_bytes(), "val1".as_bytes(), ValueType::TypeDeletion),
+            ("aax".as_bytes(), "val2".as_bytes(), ValueType::TypeValue),
+            ("aba".as_bytes(), "val3".as_bytes(), ValueType::TypeValue),
+            ("bab".as_bytes(), "val4".as_bytes(), ValueType::TypeValue),
+            ("bba".as_bytes(), "val5".as_bytes(), ValueType::TypeValue),
+        ];
         let t2 = write_table(&env, f2, 26, 2);
-        let f1: &[(&[u8], &[u8], ValueType)] =
-            &[("aaa".as_bytes(), "val1".as_bytes(), ValueType::TypeValue),
-              ("aab".as_bytes(), "val2".as_bytes(), ValueType::TypeValue),
-              ("aac".as_bytes(), "val3".as_bytes(), ValueType::TypeValue),
-              ("aba".as_bytes(), "val4".as_bytes(), ValueType::TypeValue)];
+        let f1: &[(&[u8], &[u8], ValueType)] = &[
+            ("aaa".as_bytes(), "val1".as_bytes(), ValueType::TypeValue),
+            ("aab".as_bytes(), "val2".as_bytes(), ValueType::TypeValue),
+            ("aac".as_bytes(), "val3".as_bytes(), ValueType::TypeValue),
+            ("aba".as_bytes(), "val4".as_bytes(), ValueType::TypeValue),
+        ];
         let t1 = write_table(&env, f1, 22, 1);
         // Level 1
-        let f3: &[(&[u8], &[u8], ValueType)] =
-            &[("aaa".as_bytes(), "val0".as_bytes(), ValueType::TypeValue),
-              ("cab".as_bytes(), "val2".as_bytes(), ValueType::TypeValue),
-              ("cba".as_bytes(), "val3".as_bytes(), ValueType::TypeValue)];
+        let f3: &[(&[u8], &[u8], ValueType)] = &[
+            ("aaa".as_bytes(), "val0".as_bytes(), ValueType::TypeValue),
+            ("cab".as_bytes(), "val2".as_bytes(), ValueType::TypeValue),
+            ("cba".as_bytes(), "val3".as_bytes(), ValueType::TypeValue),
+        ];
         let t3 = write_table(&env, f3, 19, 3);
-        let f4: &[(&[u8], &[u8], ValueType)] =
-            &[("daa".as_bytes(), "val1".as_bytes(), ValueType::TypeValue),
-              ("dab".as_bytes(), "val2".as_bytes(), ValueType::TypeValue),
-              ("dba".as_bytes(), "val3".as_bytes(), ValueType::TypeValue)];
+        let f4: &[(&[u8], &[u8], ValueType)] = &[
+            ("daa".as_bytes(), "val1".as_bytes(), ValueType::TypeValue),
+            ("dab".as_bytes(), "val2".as_bytes(), ValueType::TypeValue),
+            ("dba".as_bytes(), "val3".as_bytes(), ValueType::TypeValue),
+        ];
         let t4 = write_table(&env, f4, 16, 4);
-        let f5: &[(&[u8], &[u8], ValueType)] =
-            &[("eaa".as_bytes(), "val1".as_bytes(), ValueType::TypeValue),
-              ("eab".as_bytes(), "val2".as_bytes(), ValueType::TypeValue),
-              ("fab".as_bytes(), "val3".as_bytes(), ValueType::TypeValue)];
+        let f5: &[(&[u8], &[u8], ValueType)] = &[
+            ("eaa".as_bytes(), "val1".as_bytes(), ValueType::TypeValue),
+            ("eab".as_bytes(), "val2".as_bytes(), ValueType::TypeValue),
+            ("fab".as_bytes(), "val3".as_bytes(), ValueType::TypeValue),
+        ];
         let t5 = write_table(&env, f5, 13, 5);
         // Level 2
-        let f6: &[(&[u8], &[u8], ValueType)] =
-            &[("cab".as_bytes(), "val1".as_bytes(), ValueType::TypeValue),
-              ("fab".as_bytes(), "val2".as_bytes(), ValueType::TypeValue),
-              ("fba".as_bytes(), "val3".as_bytes(), ValueType::TypeValue)];
+        let f6: &[(&[u8], &[u8], ValueType)] = &[
+            ("cab".as_bytes(), "val1".as_bytes(), ValueType::TypeValue),
+            ("fab".as_bytes(), "val2".as_bytes(), ValueType::TypeValue),
+            ("fba".as_bytes(), "val3".as_bytes(), ValueType::TypeValue),
+        ];
         let t6 = write_table(&env, f6, 10, 6);
-        let f7: &[(&[u8], &[u8], ValueType)] =
-            &[("gaa".as_bytes(), "val1".as_bytes(), ValueType::TypeValue),
-              ("gab".as_bytes(), "val2".as_bytes(), ValueType::TypeValue),
-              ("gba".as_bytes(), "val3".as_bytes(), ValueType::TypeValue),
-              ("gca".as_bytes(), "val4".as_bytes(), ValueType::TypeDeletion),
-              ("gda".as_bytes(), "val5".as_bytes(), ValueType::TypeValue)];
+        let f7: &[(&[u8], &[u8], ValueType)] = &[
+            ("gaa".as_bytes(), "val1".as_bytes(), ValueType::TypeValue),
+            ("gab".as_bytes(), "val2".as_bytes(), ValueType::TypeValue),
+            ("gba".as_bytes(), "val3".as_bytes(), ValueType::TypeValue),
+            ("gca".as_bytes(), "val4".as_bytes(), ValueType::TypeDeletion),
+            ("gda".as_bytes(), "val5".as_bytes(), ValueType::TypeValue),
+        ];
         let t7 = write_table(&env, f7, 5, 7);
         // Level 3 (2 * 2 entries, for iterator behavior).
-        let f8: &[(&[u8], &[u8], ValueType)] =
-            &[("haa".as_bytes(), "val1".as_bytes(), ValueType::TypeValue),
-              ("hba".as_bytes(), "val2".as_bytes(), ValueType::TypeValue)];
+        let f8: &[(&[u8], &[u8], ValueType)] = &[
+            ("haa".as_bytes(), "val1".as_bytes(), ValueType::TypeValue),
+            ("hba".as_bytes(), "val2".as_bytes(), ValueType::TypeValue),
+        ];
         let t8 = write_table(&env, f8, 3, 8);
-        let f9: &[(&[u8], &[u8], ValueType)] =
-            &[("iaa".as_bytes(), "val1".as_bytes(), ValueType::TypeValue),
-              ("iba".as_bytes(), "val2".as_bytes(), ValueType::TypeValue)];
+        let f9: &[(&[u8], &[u8], ValueType)] = &[
+            ("iaa".as_bytes(), "val1".as_bytes(), ValueType::TypeValue),
+            ("iba".as_bytes(), "val2".as_bytes(), ValueType::TypeValue),
+        ];
         let t9 = write_table(&env, f9, 1, 9);
 
-
         let cache = TableCache::new("db", opts.clone(), 100);
         let mut v = Version::new(share(cache), Rc::new(Box::new(DefaultCmp)));
         v.files[0] = vec![t1, t2];
@@ -731,23 +780,24 @@
     #[test]
     fn test_version_get_simple() {
         let v = make_version().0;
-        let cases: &[(&[u8], u64, Result<Option<Vec<u8>>>)] =
-            &[("aaa".as_bytes(), 1, Ok(None)),
-              ("aaa".as_bytes(), 100, Ok(Some("val1".as_bytes().to_vec()))),
-              ("aaa".as_bytes(), 21, Ok(Some("val0".as_bytes().to_vec()))),
-              ("aab".as_bytes(), 0, Ok(None)),
-              ("aab".as_bytes(), 100, Ok(Some("val2".as_bytes().to_vec()))),
-              ("aac".as_bytes(), 100, Ok(None)),
-              ("aac".as_bytes(), 25, Ok(Some("val3".as_bytes().to_vec()))),
-              ("aba".as_bytes(), 100, Ok(Some("val3".as_bytes().to_vec()))),
-              ("aba".as_bytes(), 25, Ok(Some("val4".as_bytes().to_vec()))),
-              ("daa".as_bytes(), 100, Ok(Some("val1".as_bytes().to_vec()))),
-              ("dab".as_bytes(), 1, Ok(None)),
-              ("dac".as_bytes(), 100, Ok(None)),
-              ("gba".as_bytes(), 100, Ok(Some("val3".as_bytes().to_vec()))),
-              // deleted key
-              ("gca".as_bytes(), 100, Ok(None)),
-              ("gbb".as_bytes(), 100, Ok(None))];
+        let cases: &[(&[u8], u64, Result<Option<Vec<u8>>>)] = &[
+            ("aaa".as_bytes(), 1, Ok(None)),
+            ("aaa".as_bytes(), 100, Ok(Some("val1".as_bytes().to_vec()))),
+            ("aaa".as_bytes(), 21, Ok(Some("val0".as_bytes().to_vec()))),
+            ("aab".as_bytes(), 0, Ok(None)),
+            ("aab".as_bytes(), 100, Ok(Some("val2".as_bytes().to_vec()))),
+            ("aac".as_bytes(), 100, Ok(None)),
+            ("aac".as_bytes(), 25, Ok(Some("val3".as_bytes().to_vec()))),
+            ("aba".as_bytes(), 100, Ok(Some("val3".as_bytes().to_vec()))),
+            ("aba".as_bytes(), 25, Ok(Some("val4".as_bytes().to_vec()))),
+            ("daa".as_bytes(), 100, Ok(Some("val1".as_bytes().to_vec()))),
+            ("dab".as_bytes(), 1, Ok(None)),
+            ("dac".as_bytes(), 100, Ok(None)),
+            ("gba".as_bytes(), 100, Ok(Some("val3".as_bytes().to_vec()))),
+            // deleted key
+            ("gca".as_bytes(), 100, Ok(None)),
+            ("gbb".as_bytes(), 100, Ok(None)),
+        ];
 
         for ref c in cases {
             match v.get(LookupKey::new(c.0, c.1).internal_key()) {
@@ -783,12 +833,14 @@
     fn test_version_overlap_in_level() {
         let v = make_version().0;
 
-        for &(level, (k1, k2), want) in &[(0, ("000".as_bytes(), "003".as_bytes()), false),
-                                          (0, ("aa0".as_bytes(), "abx".as_bytes()), true),
-                                          (1, ("012".as_bytes(), "013".as_bytes()), false),
-                                          (1, ("abc".as_bytes(), "def".as_bytes()), true),
-                                          (2, ("xxx".as_bytes(), "xyz".as_bytes()), false),
-                                          (2, ("gac".as_bytes(), "gaz".as_bytes()), true)] {
+        for &(level, (k1, k2), want) in &[
+            (0, ("000".as_bytes(), "003".as_bytes()), false),
+            (0, ("aa0".as_bytes(), "abx".as_bytes()), true),
+            (1, ("012".as_bytes(), "013".as_bytes()), false),
+            (1, ("abc".as_bytes(), "def".as_bytes()), true),
+            (2, ("xxx".as_bytes(), "xyz".as_bytes()), false),
+            (2, ("gac".as_bytes(), "gaz".as_bytes()), true),
+        ] {
             if want {
                 assert!(v.overlap_in_level(level, k1, k2));
             } else {
@@ -801,10 +853,12 @@
     fn test_version_pick_memtable_output_level() {
         let v = make_version().0;
 
-        for c in [("000".as_bytes(), "abc".as_bytes(), 0),
-                  ("gab".as_bytes(), "hhh".as_bytes(), 1),
-                  ("000".as_bytes(), "111".as_bytes(), 2)]
-            .iter() {
+        for c in [
+            ("000".as_bytes(), "abc".as_bytes(), 0),
+            ("gab".as_bytes(), "hhh".as_bytes(), 1),
+            ("000".as_bytes(), "111".as_bytes(), 2),
+        ].iter()
+        {
             assert_eq!(c.2, v.pick_memtable_output_level(c.0, c.1));
         }
     }
@@ -880,7 +934,13 @@
             assert!(!key_is_after_file(&cmp, k, &fmh));
         }
         // Keys in file.
-        for k in &[&[1, 0, 0][..], &[1, 0, 1], &[1, 2, 3, 4], &[1, 9, 9], &[2, 0, 0]] {
+        for k in &[
+            &[1, 0, 0][..],
+            &[1, 0, 1],
+            &[1, 2, 3, 4],
+            &[1, 9, 9],
+            &[2, 0, 0],
+        ] {
             assert!(!key_is_before_file(&cmp, k, &fmh));
             assert!(!key_is_after_file(&cmp, k, &fmh));
         }
@@ -895,27 +955,96 @@
     fn test_version_file_overlaps() {
         time_test!();
 
-        let files_disjoint = [new_file(1, &[2, 0, 0], 0, &[3, 0, 0], 1),
-                              new_file(2, &[3, 0, 1], 0, &[4, 0, 0], 1),
-                              new_file(3, &[4, 0, 1], 0, &[5, 0, 0], 1)];
-        let files_joint = [new_file(1, &[2, 0, 0], 0, &[3, 0, 0], 1),
-                           new_file(2, &[2, 5, 0], 0, &[4, 0, 0], 1),
-                           new_file(3, &[3, 5, 1], 0, &[5, 0, 0], 1)];
+        let files_disjoint = [
+            new_file(1, &[2, 0, 0], 0, &[3, 0, 0], 1),
+            new_file(2, &[3, 0, 1], 0, &[4, 0, 0], 1),
+            new_file(3, &[4, 0, 1], 0, &[5, 0, 0], 1),
+        ];
+        let files_joint = [
+            new_file(1, &[2, 0, 0], 0, &[3, 0, 0], 1),
+            new_file(2, &[2, 5, 0], 0, &[4, 0, 0], 1),
+            new_file(3, &[3, 5, 1], 0, &[5, 0, 0], 1),
+        ];
         let cmp = InternalKeyCmp(Rc::new(Box::new(DefaultCmp)));
 
-        assert!(some_file_overlaps_range(&cmp, &files_joint, &[2, 5, 0], &[3, 1, 0]));
-        assert!(some_file_overlaps_range(&cmp, &files_joint, &[2, 5, 0], &[7, 0, 0]));
-        assert!(some_file_overlaps_range(&cmp, &files_joint, &[0, 0], &[2, 0, 0]));
-        assert!(some_file_overlaps_range(&cmp, &files_joint, &[0, 0], &[7, 0, 0]));
-        assert!(!some_file_overlaps_range(&cmp, &files_joint, &[0, 0], &[0, 5]));
-        assert!(!some_file_overlaps_range(&cmp, &files_joint, &[6, 0], &[7, 5]));
+        assert!(some_file_overlaps_range(
+            &cmp,
+            &files_joint,
+            &[2, 5, 0],
+            &[3, 1, 0]
+        ));
+        assert!(some_file_overlaps_range(
+            &cmp,
+            &files_joint,
+            &[2, 5, 0],
+            &[7, 0, 0]
+        ));
+        assert!(some_file_overlaps_range(
+            &cmp,
+            &files_joint,
+            &[0, 0],
+            &[2, 0, 0]
+        ));
+        assert!(some_file_overlaps_range(
+            &cmp,
+            &files_joint,
+            &[0, 0],
+            &[7, 0, 0]
+        ));
+        assert!(!some_file_overlaps_range(
+            &cmp,
+            &files_joint,
+            &[0, 0],
+            &[0, 5]
+        ));
+        assert!(!some_file_overlaps_range(
+            &cmp,
+            &files_joint,
+            &[6, 0],
+            &[7, 5]
+        ));
 
-        assert!(some_file_overlaps_range_disjoint(&cmp, &files_disjoint, &[2, 0, 1], &[2, 5, 0]));
-        assert!(some_file_overlaps_range_disjoint(&cmp, &files_disjoint, &[3, 0, 1], &[4, 9, 0]));
-        assert!(some_file_overlaps_range_disjoint(&cmp, &files_disjoint, &[2, 0, 1], &[6, 5, 0]));
-        assert!(some_file_overlaps_range_disjoint(&cmp, &files_disjoint, &[0, 0, 1], &[2, 5, 0]));
-        assert!(some_file_overlaps_range_disjoint(&cmp, &files_disjoint, &[0, 0, 1], &[6, 5, 0]));
-        assert!(!some_file_overlaps_range_disjoint(&cmp, &files_disjoint, &[0, 0, 1], &[0, 1]));
-        assert!(!some_file_overlaps_range_disjoint(&cmp, &files_disjoint, &[6, 0, 1], &[7, 0, 1]));
+        assert!(some_file_overlaps_range_disjoint(
+            &cmp,
+            &files_disjoint,
+            &[2, 0, 1],
+            &[2, 5, 0]
+        ));
+        assert!(some_file_overlaps_range_disjoint(
+            &cmp,
+            &files_disjoint,
+            &[3, 0, 1],
+            &[4, 9, 0]
+        ));
+        assert!(some_file_overlaps_range_disjoint(
+            &cmp,
+            &files_disjoint,
+            &[2, 0, 1],
+            &[6, 5, 0]
+        ));
+        assert!(some_file_overlaps_range_disjoint(
+            &cmp,
+            &files_disjoint,
+            &[0, 0, 1],
+            &[2, 5, 0]
+        ));
+        assert!(some_file_overlaps_range_disjoint(
+            &cmp,
+            &files_disjoint,
+            &[0, 0, 1],
+            &[6, 5, 0]
+        ));
+        assert!(!some_file_overlaps_range_disjoint(
+            &cmp,
+            &files_disjoint,
+            &[0, 0, 1],
+            &[0, 1]
+        ));
+        assert!(!some_file_overlaps_range_disjoint(
+            &cmp,
+            &files_disjoint,
+            &[6, 0, 1],
+            &[7, 0, 1]
+        ));
     }
 }
--- a/src/version_edit.rs	Sat Mar 03 11:49:17 2018 +0100
+++ b/src/version_edit.rs	Sat Mar 03 11:53:18 2018 +0100
@@ -263,14 +263,16 @@
                                 if let Ok(size) = reader.read_varint() {
                                     let smallest = try!(read_length_prefixed(&mut reader));
                                     let largest = try!(read_length_prefixed(&mut reader));
-                                    ve.new_files.push((lvl,
-                                                       FileMetaData {
-                                        num: num,
-                                        size: size,
-                                        smallest: smallest,
-                                        largest: largest,
-                                        allowed_seeks: 0,
-                                    }))
+                                    ve.new_files.push((
+                                        lvl,
+                                        FileMetaData {
+                                            num: num,
+                                            size: size,
+                                            smallest: smallest,
+                                            largest: largest,
+                                            allowed_seeks: 0,
+                                        },
+                                    ))
                                 } else {
                                     return err(StatusCode::IOError, "Couldn't read file size");
                                 }
@@ -283,8 +285,10 @@
                     }
                 }
             } else {
-                return err(StatusCode::Corruption,
-                           &format!("Invalid tag number {}", tag));
+                return err(
+                    StatusCode::Corruption,
+                    &format!("Invalid tag number {}", tag),
+                );
             }
         }
 
@@ -310,14 +314,16 @@
         ve.set_compact_pointer(0, &[0, 1, 2]);
         ve.set_compact_pointer(1, &[3, 4, 5]);
         ve.set_compact_pointer(2, &[6, 7, 8]);
-        ve.add_file(0,
-                    FileMetaData {
-                        allowed_seeks: 12345,
-                        num: 901,
-                        size: 234,
-                        smallest: vec![5, 6, 7],
-                        largest: vec![8, 9, 0],
-                    });
+        ve.add_file(
+            0,
+            FileMetaData {
+                allowed_seeks: 12345,
+                num: 901,
+                size: 234,
+                smallest: vec![5, 6, 7],
+                largest: vec![8, 9, 0],
+            },
+        );
         ve.delete_file(1, 132);
 
         let encoded = ve.encode();
@@ -328,31 +334,41 @@
         assert_eq!(decoded.log_number, Some(123));
         assert_eq!(decoded.next_file_number, Some(456));
         assert_eq!(decoded.compaction_ptrs.len(), 3);
-        assert_eq!(decoded.compaction_ptrs[0],
-                   CompactionPointer {
-                       level: 0,
-                       key: vec![0, 1, 2],
-                   });
-        assert_eq!(decoded.compaction_ptrs[1],
-                   CompactionPointer {
-                       level: 1,
-                       key: vec![3, 4, 5],
-                   });
-        assert_eq!(decoded.compaction_ptrs[2],
-                   CompactionPointer {
-                       level: 2,
-                       key: vec![6, 7, 8],
-                   });
+        assert_eq!(
+            decoded.compaction_ptrs[0],
+            CompactionPointer {
+                level: 0,
+                key: vec![0, 1, 2],
+            }
+        );
+        assert_eq!(
+            decoded.compaction_ptrs[1],
+            CompactionPointer {
+                level: 1,
+                key: vec![3, 4, 5],
+            }
+        );
+        assert_eq!(
+            decoded.compaction_ptrs[2],
+            CompactionPointer {
+                level: 2,
+                key: vec![6, 7, 8],
+            }
+        );
         assert_eq!(decoded.new_files.len(), 1);
-        assert_eq!(decoded.new_files[0],
-                   (0,
-                    FileMetaData {
-                       allowed_seeks: 0,
-                       num: 901,
-                       size: 234,
-                       smallest: vec![5, 6, 7],
-                       largest: vec![8, 9, 0],
-                   }));
+        assert_eq!(
+            decoded.new_files[0],
+            (
+                0,
+                FileMetaData {
+                    allowed_seeks: 0,
+                    num: 901,
+                    size: 234,
+                    smallest: vec![5, 6, 7],
+                    largest: vec![8, 9, 0],
+                }
+            )
+        );
         assert_eq!(decoded.deleted.len(), 1);
         assert!(decoded.deleted.contains(&(1, 132)));
     }
--- a/src/version_set.rs	Sat Mar 03 11:49:17 2018 +0100
+++ b/src/version_set.rs	Sat Mar 03 11:53:18 2018 +0100
@@ -1,14 +1,13 @@
-
 use cmp::{Cmp, InternalKeyCmp};
 use env::Env;
-use error::{err, Status, StatusCode, Result};
+use error::{err, Result, Status, StatusCode};
 use key_types::{parse_internal_key, InternalKey, UserKey};
-use log::{LogWriter, LogReader};
+use log::{LogReader, LogWriter};
 use merging_iter::MergingIter;
 use options::Options;
 use table_cache::TableCache;
-use types::{parse_file_name, share, NUM_LEVELS, FileMetaData, FileNum, FileType, LdbIterator,
-            Shared};
+use types::{parse_file_name, share, FileMetaData, FileNum, FileType, LdbIterator, Shared,
+            NUM_LEVELS};
 use version::{new_version_iter, total_size, FileMetaHandle, Version};
 use version_edit::VersionEdit;
 
@@ -140,9 +139,11 @@
             return false;
         }
         let grandparents = self.grandparents.as_ref().unwrap();
-        while self.grandparent_ix < grandparents.len() &&
-              self.icmp.cmp(k, &grandparents[self.grandparent_ix].borrow().largest) ==
-              Ordering::Greater {
+        while self.grandparent_ix < grandparents.len()
+            && self.icmp
+                .cmp(k, &grandparents[self.grandparent_ix].borrow().largest)
+                == Ordering::Greater
+        {
             if self.seen_key {
                 self.overlapped_bytes += grandparents[self.grandparent_ix].borrow().size;
             }
@@ -290,9 +291,11 @@
             assert!(level < NUM_LEVELS - 1);
 
             for f in &current.files[level] {
-                if self.compaction_ptrs[level].is_empty() ||
-                   self.cmp.cmp(&f.borrow().largest, &self.compaction_ptrs[level]) ==
-                   Ordering::Greater {
+                if self.compaction_ptrs[level].is_empty()
+                    || self.cmp
+                        .cmp(&f.borrow().largest, &self.compaction_ptrs[level])
+                        == Ordering::Greater
+                {
                     c.add_input(0, f.clone());
                     break;
                 }
@@ -324,14 +327,18 @@
         Some(c)
     }
 
-    pub fn compact_range<'a, 'b>(&mut self,
-                                 level: usize,
-                                 from: InternalKey<'a>,
-                                 to: InternalKey<'b>)
-                                 -> Option<Compaction> {
+    pub fn compact_range<'a, 'b>(
+        &mut self,
+        level: usize,
+        from: InternalKey<'a>,
+        to: InternalKey<'b>,
+    ) -> Option<Compaction> {
         assert!(self.current.is_some());
-        let mut inputs =
-            self.current.as_ref().unwrap().borrow().overlapping_inputs(level, from, to);
+        let mut inputs = self.current.as_ref().unwrap().borrow().overlapping_inputs(
+            level,
+            from,
+            to,
+        );
         if inputs.is_empty() {
             return None;
         }
@@ -365,9 +372,12 @@
         // Set up level+1 inputs.
         compaction.inputs[1] = current.overlapping_inputs(level + 1, &smallest, &largest);
 
-        let (mut allstart, mut alllimit) =
-            get_range(&self.cmp,
-                      compaction.inputs[0].iter().chain(compaction.inputs[1].iter()));
+        let (mut allstart, mut alllimit) = get_range(
+            &self.cmp,
+            compaction.inputs[0]
+                .iter()
+                .chain(compaction.inputs[1].iter()),
+        );
 
         // Check if we can add more inputs in the current level without having to compact more
         // inputs from level+1.
@@ -376,30 +386,36 @@
             let inputs1_size = total_size(compaction.inputs[1].iter());
             let expanded0_size = total_size(expanded0.iter());
             // ...if we picked up more files in the current level, and the total size is acceptable
-            if expanded0.len() > compaction.num_inputs(0) &&
-               (inputs1_size + expanded0_size) < 25 * self.opt.max_file_size {
+            if expanded0.len() > compaction.num_inputs(0)
+                && (inputs1_size + expanded0_size) < 25 * self.opt.max_file_size
+            {
                 let (new_start, new_limit) = get_range(&self.cmp, expanded0.iter());
                 let expanded1 = current.overlapping_inputs(level + 1, &new_start, &new_limit);
                 if expanded1.len() == compaction.num_inputs(1) {
-                    log!(self.opt.log,
-                         "Expanding inputs@{} {}+{} ({}+{} bytes) to {}+{} ({}+{} bytes)",
-                         level,
-                         compaction.inputs[0].len(),
-                         compaction.inputs[1].len(),
-                         total_size(compaction.inputs[0].iter()),
-                         total_size(compaction.inputs[1].iter()),
-                         expanded0.len(),
-                         expanded1.len(),
-                         total_size(expanded0.iter()),
-                         total_size(expanded1.iter()));
+                    log!(
+                        self.opt.log,
+                        "Expanding inputs@{} {}+{} ({}+{} bytes) to {}+{} ({}+{} bytes)",
+                        level,
+                        compaction.inputs[0].len(),
+                        compaction.inputs[1].len(),
+                        total_size(compaction.inputs[0].iter()),
+                        total_size(compaction.inputs[1].iter()),
+                        expanded0.len(),
+                        expanded1.len(),
+                        total_size(expanded0.iter()),
+                        total_size(expanded1.iter())
+                    );
 
                     smallest = new_start;
                     largest = new_limit;
                     compaction.inputs[0] = expanded0;
                     compaction.inputs[1] = expanded1;
-                    let (newallstart, newalllimit) =
-                        get_range(&self.cmp,
-                                  compaction.inputs[0].iter().chain(compaction.inputs[1].iter()));
+                    let (newallstart, newalllimit) = get_range(
+                        &self.cmp,
+                        compaction.inputs[0]
+                            .iter()
+                            .chain(compaction.inputs[1].iter()),
+                    );
                     allstart = newallstart;
                     alllimit = newalllimit;
                 }
@@ -409,19 +425,21 @@
         // Set the list of grandparent (l+2) inputs to the files overlapped by the current overall
         // range.
         if level + 2 < NUM_LEVELS {
-            let grandparents = self.current
-                .as_ref()
-                .unwrap()
-                .borrow()
-                .overlapping_inputs(level + 2, &allstart, &alllimit);
+            let grandparents = self.current.as_ref().unwrap().borrow().overlapping_inputs(
+                level + 2,
+                &allstart,
+                &alllimit,
+            );
             compaction.grandparents = Some(grandparents);
         }
 
-        log!(self.opt.log,
-             "Compacting @{} {:?} .. {:?}",
-             level,
-             smallest,
-             largest);
+        log!(
+            self.opt.log,
+            "Compacting @{} {:?} .. {:?}",
+            level,
+            smallest,
+            largest
+        );
 
         compaction.edit().set_compact_pointer(level, &largest);
         self.compaction_ptrs[level] = largest;
@@ -449,7 +467,10 @@
                 edit.add_file(level, f.borrow().clone());
             }
         }
-        self.descriptor_log.as_mut().unwrap().add_record(&edit.encode())
+        self.descriptor_log
+            .as_mut()
+            .unwrap()
+            .add_record(&edit.encode())
     }
 
     /// log_and_apply merges the given edit with the current state and generates a new version. It
@@ -480,8 +501,9 @@
         if self.descriptor_log.is_none() {
             let descname = manifest_file_name(&self.dbname, self.manifest_num);
             edit.set_next_file(self.next_file_num);
-            self.descriptor_log =
-                Some(LogWriter::new(self.opt.env.open_writable_file(Path::new(&descname))?));
+            self.descriptor_log = Some(LogWriter::new(self.opt
+                .env
+                .open_writable_file(Path::new(&descname))?));
             self.write_snapshot()?;
         }
 
@@ -542,9 +564,11 @@
         let mut builder = Builder::new();
         {
             let mut descfile = self.opt.env.open_sequential_file(Path::new(&descfilename))?;
-            let mut logreader = LogReader::new(&mut descfile,
-                                               // checksum=
-                                               true);
+            let mut logreader = LogReader::new(
+                &mut descfile,
+                // checksum=
+                true,
+            );
 
             let mut log_number = None;
             let mut prev_log_number = None;
@@ -576,20 +600,26 @@
                 self.log_num = ln;
                 self.mark_file_number_used(ln);
             } else {
-                return err(StatusCode::Corruption,
-                           "no meta-lognumber entry in descriptor");
+                return err(
+                    StatusCode::Corruption,
+                    "no meta-lognumber entry in descriptor",
+                );
             }
             if let Some(nfn) = next_file_number {
                 self.next_file_num = nfn + 1;
             } else {
-                return err(StatusCode::Corruption,
-                           "no meta-next-file entry in descriptor");
+                return err(
+                    StatusCode::Corruption,
+                    "no meta-next-file entry in descriptor",
+                );
             }
             if let Some(ls) = last_seq {
                 self.last_seq = ls;
             } else {
-                return err(StatusCode::Corruption,
-                           "no last-sequence entry in descriptor");
+                return err(
+                    StatusCode::Corruption,
+                    "no last-sequence entry in descriptor",
+                );
             }
             if let Some(pln) = prev_log_number {
                 self.prev_log_num = pln;
@@ -604,14 +634,16 @@
         self.finalize(&mut v);
         self.add_version(v);
         self.manifest_num = self.next_file_num - 1;
-        log!(self.opt.log,
-             "Recovered manifest with next_file={} manifest_num={} log_num={} prev_log_num={} \
-              last_seq={}",
-             self.next_file_num,
-             self.manifest_num,
-             self.log_num,
-             self.prev_log_num,
-             self.last_seq);
+        log!(
+            self.opt.log,
+            "Recovered manifest with next_file={} manifest_num={} log_num={} prev_log_num={} \
+             last_seq={}",
+            self.next_file_num,
+            self.manifest_num,
+            self.log_num,
+            self.prev_log_num,
+            self.last_seq
+        );
 
         // A new manifest needs to be written only if we don't reuse the existing one.
         Ok(!self.reuse_manifest(&descfilename, &current))
@@ -642,7 +674,9 @@
             }
 
             assert!(self.descriptor_log.is_none());
-            let s = self.opt.env.open_appendable_file(Path::new(current_manifest_path));
+            let s = self.opt
+                .env
+                .open_appendable_file(Path::new(current_manifest_path));
             if let Ok(f) = s {
                 log!(self.opt.log, "reusing manifest {}", current_manifest_path);
                 self.descriptor_log = Some(LogWriter::new(f));
@@ -671,17 +705,21 @@
                     if let Ok(tbl) = s {
                         iters.push(Box::new(tbl.iter()));
                     } else {
-                        log!(self.opt.log,
-                             "error opening table {}: {}",
-                             f.borrow().num,
-                             s.err().unwrap());
+                        log!(
+                            self.opt.log,
+                            "error opening table {}: {}",
+                            f.borrow().num,
+                            s.err().unwrap()
+                        );
                     }
                 }
             } else {
                 // Create concatenating iterator higher levels.
-                iters.push(Box::new(new_version_iter(c.inputs[i].clone(),
-                                                     self.cache.clone(),
-                                                     self.opt.cmp.clone())));
+                iters.push(Box::new(new_version_iter(
+                    c.inputs[i].clone(),
+                    self.cache.clone(),
+                    self.opt.cmp.clone(),
+                )));
             }
         }
         assert!(iters.len() <= cap);
@@ -730,11 +768,13 @@
 
     /// maybe_add_file adds a file f at level to version v, if it's not already marked as deleted
     /// in this edit. It also asserts that the ordering of files is preserved.
-    fn maybe_add_file(&mut self,
-                      cmp: &InternalKeyCmp,
-                      v: &mut Version,
-                      level: usize,
-                      f: FileMetaHandle) {
+    fn maybe_add_file(
+        &mut self,
+        cmp: &InternalKeyCmp,
+        v: &mut Version,
+        level: usize,
+        f: FileMetaHandle,
+    ) {
         // Only add file if it's not already deleted.
         if self.deleted[level].iter().any(|d| *d == f.borrow().num) {
             return;
@@ -743,9 +783,13 @@
             let files = &v.files[level];
             if level > 0 && !files.is_empty() {
                 // File must be after last file in level.
-                assert_eq!(cmp.cmp(&files[files.len() - 1].borrow().largest,
-                                   &f.borrow().smallest),
-                           Ordering::Less);
+                assert_eq!(
+                    cmp.cmp(
+                        &files[files.len() - 1].borrow().largest,
+                        &f.borrow().smallest
+                    ),
+                    Ordering::Less
+                );
             }
         }
         v.files[level].push(f);
@@ -765,9 +809,9 @@
 
             let iadded = added.into_iter();
             let ibasefiles = basefiles.into_iter();
-            let merged = merge_iters(iadded,
-                                     ibasefiles,
-                                     |a, b| cmp.cmp(&a.borrow().smallest, &b.borrow().smallest));
+            let merged = merge_iters(iadded, ibasefiles, |a, b| {
+                cmp.cmp(&a.borrow().smallest, &b.borrow().smallest)
+            });
             for m in merged {
                 self.maybe_add_file(cmp, v, level, m);
             }
@@ -777,8 +821,10 @@
                 continue;
             }
             for i in 1..v.files[level].len() {
-                let (prev_end, this_begin) = (&v.files[level][i - 1].borrow().largest,
-                                              &v.files[level][i].borrow().smallest);
+                let (prev_end, this_begin) = (
+                    &v.files[level][i - 1].borrow().largest,
+                    &v.files[level][i].borrow().smallest,
+                );
                 assert!(cmp.cmp(prev_end, this_begin) < Ordering::Equal);
             }
         }
@@ -806,8 +852,10 @@
     let mut f = env.open_sequential_file(Path::new(&current_file_name(dbname)))?;
     f.read_to_string(&mut current)?;
     if current.is_empty() || !current.ends_with('\n') {
-        return err(StatusCode::Corruption,
-                   "current file is empty or has no newline");
+        return err(
+            StatusCode::Corruption,
+            "current file is empty or has no newline",
+        );
     }
     Ok(current)
 }
@@ -835,14 +883,16 @@
 }
 
 /// merge_iters merges and collects the items from two sorted iterators.
-fn merge_iters<Item,
-               C: Fn(&Item, &Item) -> Ordering,
-               I: Iterator<Item = Item>,
-               J: Iterator<Item = Item>>
-    (mut iter_a: I,
-     mut iter_b: J,
-     cmp: C)
-     -> Vec<Item> {
+fn merge_iters<
+    Item,
+    C: Fn(&Item, &Item) -> Ordering,
+    I: Iterator<Item = Item>,
+    J: Iterator<Item = Item>,
+>(
+    mut iter_a: I,
+    mut iter_b: J,
+    cmp: C,
+) -> Vec<Item> {
     let mut a = iter_a.next();
     let mut b = iter_b.next();
     let mut out = vec![];
@@ -877,9 +927,10 @@
 
 /// get_range returns the indices of the files within files that have the smallest lower bound
 /// respectively the largest upper bound.
-fn get_range<'a, C: Cmp, I: Iterator<Item = &'a FileMetaHandle>>(c: &C,
-                                                                 files: I)
-                                                                 -> (Vec<u8>, Vec<u8>) {
+fn get_range<'a, C: Cmp, I: Iterator<Item = &'a FileMetaHandle>>(
+    c: &C,
+    files: I,
+) -> (Vec<u8>, Vec<u8>) {
     let mut smallest = None;
     let mut largest = None;
     for f in files {
@@ -937,9 +988,10 @@
     fn test_version_set_merge_iters() {
         let v1 = vec![2, 4, 6, 8, 10];
         let v2 = vec![1, 3, 5, 7];
-        assert_eq!(vec![1, 2, 3, 4, 5, 6, 7, 8, 10],
-                   merge_iters(v1.into_iter(), v2.into_iter(), |a, b| a.cmp(&b)));
-
+        assert_eq!(
+            vec![1, 2, 3, 4, 5, 6, 7, 8, 10],
+            merge_iters(v1.into_iter(), v2.into_iter(), |a, b| a.cmp(&b))
+        );
     }
 
     #[test]
@@ -951,8 +1003,10 @@
     fn test_version_set_get_range() {
         let cmp = DefaultCmp;
         let fs = example_files();
-        assert_eq!(("a".as_bytes().to_vec(), "z".as_bytes().to_vec()),
-                   get_range(&cmp, fs.iter()));
+        assert_eq!(
+            ("a".as_bytes().to_vec(), "z".as_bytes().to_vec()),
+            get_range(&cmp, fs.iter())
+        );
     }
 
     #[test]
@@ -963,8 +1017,12 @@
         let mut fmd = FileMetaData::default();
         fmd.num = 21;
         fmd.size = 123;
-        fmd.smallest = LookupKey::new("klm".as_bytes(), 777).internal_key().to_vec();
-        fmd.largest = LookupKey::new("kop".as_bytes(), 700).internal_key().to_vec();
+        fmd.smallest = LookupKey::new("klm".as_bytes(), 777)
+            .internal_key()
+            .to_vec();
+        fmd.largest = LookupKey::new("kop".as_bytes(), 700)
+            .internal_key()
+            .to_vec();
 
         let mut ve = VersionEdit::new();
         ve.add_file(1, fmd);
@@ -977,13 +1035,17 @@
         let mut ptrs: [Vec<u8>; NUM_LEVELS] = Default::default();
         b.apply(&ve, &mut ptrs);
 
-        assert_eq!(&[120 as u8, 120, 120, 1, 123, 0, 0, 0, 0, 0, 0],
-                   ptrs[2].as_slice());
+        assert_eq!(
+            &[120 as u8, 120, 120, 1, 123, 0, 0, 0, 0, 0, 0],
+            ptrs[2].as_slice()
+        );
         assert_eq!(2, b.deleted[0][0]);
         assert_eq!(1, b.added[1].len());
 
-        let mut v2 = Version::new(share(TableCache::new("db", opt.clone(), 100)),
-                                  opt.cmp.clone());
+        let mut v2 = Version::new(
+            share(TableCache::new("db", opt.clone(), 100)),
+            opt.cmp.clone(),
+        );
         b.save_to(&InternalKeyCmp(opt.cmp.clone()), &v, &mut v2);
         // Second file in L0 was removed.
         assert_eq!(1, v2.files[0].len());
@@ -995,9 +1057,11 @@
     #[test]
     fn test_version_set_log_and_apply() {
         let (_, opt) = make_version();
-        let mut vs = VersionSet::new("db",
-                                     opt.clone(),
-                                     share(TableCache::new("db", opt.clone(), 100)));
+        let mut vs = VersionSet::new(
+            "db",
+            opt.clone(),
+            share(TableCache::new("db", opt.clone(), 100)),
+        );
 
         assert_eq!(2, vs.new_file_number());
         // Simulate NewDB
@@ -1035,8 +1099,12 @@
             let mut fmd = FileMetaData::default();
             fmd.num = 21;
             fmd.size = 123;
-            fmd.smallest = LookupKey::new("abc".as_bytes(), 777).internal_key().to_vec();
-            fmd.largest = LookupKey::new("def".as_bytes(), 700).internal_key().to_vec();
+            fmd.smallest = LookupKey::new("abc".as_bytes(), 777)
+                .internal_key()
+                .to_vec();
+            fmd.largest = LookupKey::new("def".as_bytes(), 700)
+                .internal_key()
+                .to_vec();
             ve.add_file(1, fmd);
             vs.log_and_apply(ve).unwrap();
 
@@ -1140,18 +1208,18 @@
         {
             // approximate_offset()
             let v = vs.current();
-            assert_eq!(0,
-                       vs.approximate_offset(&v,
-                                             LookupKey::new("aaa".as_bytes(), 9000)
-                                                 .internal_key()));
-            assert_eq!(232,
-                       vs.approximate_offset(&v,
-                                             LookupKey::new("bab".as_bytes(), 9000)
-                                                 .internal_key()));
-            assert_eq!(1134,
-                       vs.approximate_offset(&v,
-                                             LookupKey::new("fab".as_bytes(), 9000)
-                                                 .internal_key()));
+            assert_eq!(
+                0,
+                vs.approximate_offset(&v, LookupKey::new("aaa".as_bytes(), 9000).internal_key())
+            );
+            assert_eq!(
+                232,
+                vs.approximate_offset(&v, LookupKey::new("bab".as_bytes(), 9000).internal_key())
+            );
+            assert_eq!(
+                1134,
+                vs.approximate_offset(&v, LookupKey::new("fab".as_bytes(), 9000).internal_key())
+            );
         }
         // The following tests reuse the same version set and verify that various compactions work
         // like they should.
@@ -1160,7 +1228,8 @@
             // compact level 0 with a partial range.
             let from = LookupKey::new("000".as_bytes(), 1000);
             let to = LookupKey::new("ab".as_bytes(), 1010);
-            let c = vs.compact_range(0, from.internal_key(), to.internal_key()).unwrap();
+            let c = vs.compact_range(0, from.internal_key(), to.internal_key())
+                .unwrap();
             assert_eq!(2, c.inputs[0].len());
             assert_eq!(1, c.inputs[1].len());
             assert_eq!(1, c.grandparents.unwrap().len());
@@ -1168,29 +1237,36 @@
             // compact level 0, but entire range of keys in version.
             let from = LookupKey::new("000".as_bytes(), 1000);
             let to = LookupKey::new("zzz".as_bytes(), 1010);
-            let c = vs.compact_range(0, from.internal_key(), to.internal_key()).unwrap();
+            let c = vs.compact_range(0, from.internal_key(), to.internal_key())
+                .unwrap();
             assert_eq!(2, c.inputs[0].len());
             assert_eq!(1, c.inputs[1].len());
             assert_eq!(1, c.grandparents.as_ref().unwrap().len());
-            iterator_properties(vs.make_input_iterator(&c),
-                                12,
-                                Rc::new(Box::new(vs.cmp.clone())));
+            iterator_properties(
+                vs.make_input_iterator(&c),
+                12,
+                Rc::new(Box::new(vs.cmp.clone())),
+            );
 
             // Expand input range on higher level.
             let from = LookupKey::new("dab".as_bytes(), 1000);
             let to = LookupKey::new("eab".as_bytes(), 1010);
-            let c = vs.compact_range(1, from.internal_key(), to.internal_key()).unwrap();
+            let c = vs.compact_range(1, from.internal_key(), to.internal_key())
+                .unwrap();
             assert_eq!(3, c.inputs[0].len());
             assert_eq!(1, c.inputs[1].len());
             assert_eq!(0, c.grandparents.as_ref().unwrap().len());
-            iterator_properties(vs.make_input_iterator(&c),
-                                12,
-                                Rc::new(Box::new(vs.cmp.clone())));
+            iterator_properties(
+                vs.make_input_iterator(&c),
+                12,
+                Rc::new(Box::new(vs.cmp.clone())),
+            );
 
             // is_trivial_move
             let from = LookupKey::new("fab".as_bytes(), 1000);
             let to = LookupKey::new("fba".as_bytes(), 1010);
-            let mut c = vs.compact_range(2, from.internal_key(), to.internal_key()).unwrap();
+            let mut c = vs.compact_range(2, from.internal_key(), to.internal_key())
+                .unwrap();
             // pretend it's not manual
             c.manual = false;
             assert!(c.is_trivial_move());
@@ -1199,7 +1275,8 @@
             let from = LookupKey::new("000".as_bytes(), 1000);
             let to = LookupKey::new("zzz".as_bytes(), 1010);
             let mid = LookupKey::new("abc".as_bytes(), 1010);
-            let mut c = vs.compact_range(0, from.internal_key(), to.internal_key()).unwrap();
+            let mut c = vs.compact_range(0, from.internal_key(), to.internal_key())
+                .unwrap();
             assert!(!c.should_stop_before(from.internal_key()));
             assert!(!c.should_stop_before(mid.internal_key()));
             assert!(!c.should_stop_before(to.internal_key()));
@@ -1207,14 +1284,16 @@
             // is_base_level_for
             let from = LookupKey::new("000".as_bytes(), 1000);
             let to = LookupKey::new("zzz".as_bytes(), 1010);
-            let mut c = vs.compact_range(0, from.internal_key(), to.internal_key()).unwrap();
+            let mut c = vs.compact_range(0, from.internal_key(), to.internal_key())
+                .unwrap();
             assert!(c.is_base_level_for("aaa".as_bytes()));
             assert!(!c.is_base_level_for("hac".as_bytes()));
 
             // input/add_input_deletions
             let from = LookupKey::new("000".as_bytes(), 1000);
             let to = LookupKey::new("zzz".as_bytes(), 1010);
-            let mut c = vs.compact_range(0, from.internal_key(), to.internal_key()).unwrap();
+            let mut c = vs.compact_range(0, from.internal_key(), to.internal_key())
+                .unwrap();
             for inp in &[(0, 0, 1), (0, 1, 2), (1, 0, 3)] {
                 let f = &c.inputs[inp.0][inp.1];
                 assert_eq!(inp.2, f.borrow().num);
--- a/src/write_batch.rs	Sat Mar 03 11:49:17 2018 +0100
+++ b/src/write_batch.rs	Sat Mar 03 11:53:18 2018 +0100
@@ -1,7 +1,7 @@
 use key_types::ValueType;
 use memtable::MemTable;
 use types::SequenceNumber;
-use integer_encoding::{VarInt, VarIntWriter, FixedInt};
+use integer_encoding::{FixedInt, VarInt, VarIntWriter};
 
 use std::io::Write;
 
@@ -48,7 +48,9 @@
     /// Marks an entry to be deleted from the database.
     #[allow(unused_assignments)]
     pub fn delete(&mut self, k: &[u8]) {
-        self.entries.write(&[ValueType::TypeDeletion as u8]).unwrap();
+        self.entries
+            .write(&[ValueType::TypeDeletion as u8])
+            .unwrap();
         self.entries.write_varint(k.len()).unwrap();
         self.entries.write(k).unwrap();
 
@@ -126,7 +128,6 @@
         let k = &self.batch.entries[self.ix..self.ix + klen];
         self.ix += klen;
 
-
         if tag == ValueType::TypeValue as u8 {
             let (vlen, m) = usize::decode_var(&self.batch.entries[self.ix..]);
             self.ix += m;
@@ -148,11 +149,13 @@
     #[test]
     fn test_write_batch() {
         let mut b = WriteBatch::new();
-        let entries = vec![("abc".as_bytes(), "def".as_bytes()),
-                           ("123".as_bytes(), "456".as_bytes()),
-                           ("xxx".as_bytes(), "yyy".as_bytes()),
-                           ("zzz".as_bytes(), "".as_bytes()),
-                           ("010".as_bytes(), "".as_bytes())];
+        let entries = vec![
+            ("abc".as_bytes(), "def".as_bytes()),
+            ("123".as_bytes(), "456".as_bytes()),
+            ("xxx".as_bytes(), "yyy".as_bytes()),
+            ("zzz".as_bytes(), "".as_bytes()),
+            ("010".as_bytes(), "".as_bytes()),
+        ];
 
         for &(k, v) in entries.iter() {
             if !v.is_empty() {