more taskdb refactoring

This commit is contained in:
Dustin J. Mitchell 2021-10-11 20:56:10 -04:00
parent f229715242
commit 79f07b57ad
4 changed files with 499 additions and 389 deletions

View file

@ -1,10 +1,10 @@
use crate::server::Server;
use crate::storage::{Operation, Storage, TaskMap};
use std::collections::HashSet;
use uuid::Uuid;
mod ops;
mod sync;
mod working_set;
/// A TaskDb is the backend for a replica. It manages the storage, operations, synchronization,
/// and so on, and all the invariants that come with it. It leaves the meaning of particular task
@ -74,57 +74,7 @@ impl TaskDb {
where
F: Fn(&TaskMap) -> bool,
{
let mut txn = self.storage.txn()?;
let mut new_ws = vec![None]; // index 0 is always None
let mut seen = HashSet::new();
// The goal here is for existing working-set items to be "compressed' down to index 1, so
// we begin by scanning the current working set and inserting any tasks that should still
// be in the set into new_ws, implicitly dropping any tasks that are no longer in the
// working set.
for elt in txn.get_working_set()?.drain(1..) {
if let Some(uuid) = elt {
if let Some(task) = txn.get_task(uuid)? {
if in_working_set(&task) {
new_ws.push(Some(uuid));
seen.insert(uuid);
continue;
}
}
}
// if we are not renumbering, then insert a blank working-set entry here
if !renumber {
new_ws.push(None);
}
}
// if renumbering, clear the working set and re-add
if renumber {
txn.clear_working_set()?;
for elt in new_ws.drain(1..new_ws.len()).flatten() {
txn.add_to_working_set(elt)?;
}
} else {
// ..otherwise, just clear the None items determined above from the working set
for (i, elt) in new_ws.iter().enumerate().skip(1) {
if elt.is_none() {
txn.set_working_set_item(i, None)?;
}
}
}
// Now go hunting for tasks that should be in this list but are not, adding them at the
// end of the list, whether renumbering or not
for (uuid, task) in txn.all_tasks()? {
if !seen.contains(&uuid) && in_working_set(&task) {
txn.add_to_working_set(uuid)?;
}
}
txn.commit()?;
Ok(())
working_set::rebuild(self.storage.txn()?.as_mut(), in_working_set, renumber)
}
/// Add the given uuid to the working set and return its index; if it is already in the working
@ -190,11 +140,12 @@ mod tests {
use chrono::Utc;
use pretty_assertions::assert_eq;
use proptest::prelude::*;
use std::collections::HashMap;
use uuid::Uuid;
#[test]
fn test_apply_create() {
fn test_apply() {
// this verifies that the operation is both applied and included in the list of
// operations; more detailed tests are in the `ops` module.
let mut db = TaskDb::new_inmemory();
let uuid = Uuid::new_v4();
let op = Operation::Create { uuid };
@ -204,345 +155,10 @@ mod tests {
assert_eq!(db.operations(), vec![op]);
}
#[test]
fn test_apply_create_exists() {
let mut db = TaskDb::new_inmemory();
let uuid = Uuid::new_v4();
let op = Operation::Create { uuid };
db.apply(op.clone()).unwrap();
assert_eq!(
db.apply(op.clone()).err().unwrap().to_string(),
format!("Task Database Error: Task {} already exists", uuid)
);
assert_eq!(db.sorted_tasks(), vec![(uuid, vec![])]);
assert_eq!(db.operations(), vec![op]);
}
#[test]
fn test_apply_create_update() {
let mut db = TaskDb::new_inmemory();
let uuid = Uuid::new_v4();
let op1 = Operation::Create { uuid };
db.apply(op1.clone()).unwrap();
let op2 = Operation::Update {
uuid,
property: String::from("title"),
value: Some("my task".into()),
timestamp: Utc::now(),
};
db.apply(op2.clone()).unwrap();
assert_eq!(
db.sorted_tasks(),
vec![(uuid, vec![("title".into(), "my task".into())])]
);
assert_eq!(db.operations(), vec![op1, op2]);
}
#[test]
fn test_apply_create_update_delete_prop() {
let mut db = TaskDb::new_inmemory();
let uuid = Uuid::new_v4();
let op1 = Operation::Create { uuid };
db.apply(op1.clone()).unwrap();
let op2 = Operation::Update {
uuid,
property: String::from("title"),
value: Some("my task".into()),
timestamp: Utc::now(),
};
db.apply(op2.clone()).unwrap();
let op3 = Operation::Update {
uuid,
property: String::from("priority"),
value: Some("H".into()),
timestamp: Utc::now(),
};
db.apply(op3.clone()).unwrap();
let op4 = Operation::Update {
uuid,
property: String::from("title"),
value: None,
timestamp: Utc::now(),
};
db.apply(op4.clone()).unwrap();
let mut exp = HashMap::new();
let mut task = HashMap::new();
task.insert(String::from("priority"), String::from("H"));
exp.insert(uuid, task);
assert_eq!(
db.sorted_tasks(),
vec![(uuid, vec![("priority".into(), "H".into())])]
);
assert_eq!(db.operations(), vec![op1, op2, op3, op4]);
}
#[test]
fn test_apply_update_does_not_exist() {
let mut db = TaskDb::new_inmemory();
let uuid = Uuid::new_v4();
let op = Operation::Update {
uuid,
property: String::from("title"),
value: Some("my task".into()),
timestamp: Utc::now(),
};
assert_eq!(
db.apply(op).err().unwrap().to_string(),
format!("Task Database Error: Task {} does not exist", uuid)
);
assert_eq!(db.sorted_tasks(), vec![]);
assert_eq!(db.operations(), vec![]);
}
#[test]
fn test_apply_create_delete() {
let mut db = TaskDb::new_inmemory();
let uuid = Uuid::new_v4();
let op1 = Operation::Create { uuid };
db.apply(op1.clone()).unwrap();
let op2 = Operation::Delete { uuid };
db.apply(op2.clone()).unwrap();
assert_eq!(db.sorted_tasks(), vec![]);
assert_eq!(db.operations(), vec![op1, op2]);
}
#[test]
fn test_apply_delete_not_present() {
let mut db = TaskDb::new_inmemory();
let uuid = Uuid::new_v4();
let op1 = Operation::Delete { uuid };
assert_eq!(
db.apply(op1).err().unwrap().to_string(),
format!("Task Database Error: Task {} does not exist", uuid)
);
assert_eq!(db.sorted_tasks(), vec![]);
assert_eq!(db.operations(), vec![]);
}
#[test]
fn rebuild_working_set_renumber() -> anyhow::Result<()> {
rebuild_working_set(true)
}
#[test]
fn rebuild_working_set_no_renumber() -> anyhow::Result<()> {
rebuild_working_set(false)
}
fn rebuild_working_set(renumber: bool) -> anyhow::Result<()> {
let mut db = TaskDb::new_inmemory();
let mut uuids = vec![];
uuids.push(Uuid::new_v4());
println!("uuids[0]: {:?} - pending, not in working set", uuids[0]);
uuids.push(Uuid::new_v4());
println!("uuids[1]: {:?} - pending, in working set", uuids[1]);
uuids.push(Uuid::new_v4());
println!("uuids[2]: {:?} - not pending, not in working set", uuids[2]);
uuids.push(Uuid::new_v4());
println!("uuids[3]: {:?} - not pending, in working set", uuids[3]);
uuids.push(Uuid::new_v4());
println!("uuids[4]: {:?} - pending, in working set", uuids[4]);
// add everything to the TaskDb
for uuid in &uuids {
db.apply(Operation::Create { uuid: *uuid })?;
}
for i in &[0usize, 1, 4] {
db.apply(Operation::Update {
uuid: uuids[*i].clone(),
property: String::from("status"),
value: Some("pending".into()),
timestamp: Utc::now(),
})?;
}
// set the existing working_set as we want it
{
let mut txn = db.storage.txn()?;
txn.clear_working_set()?;
for i in &[1usize, 3, 4] {
txn.add_to_working_set(uuids[*i])?;
}
txn.commit()?;
}
assert_eq!(
db.working_set()?,
vec![
None,
Some(uuids[1].clone()),
Some(uuids[3].clone()),
Some(uuids[4].clone())
]
);
db.rebuild_working_set(
|t| {
if let Some(status) = t.get("status") {
status == "pending"
} else {
false
}
},
renumber,
)?;
let exp = if renumber {
// uuids[1] and uuids[4] are already in the working set, so are compressed
// to the top, and then uuids[0] is added.
vec![
None,
Some(uuids[1].clone()),
Some(uuids[4].clone()),
Some(uuids[0].clone()),
]
} else {
// uuids[1] and uuids[4] are already in the working set, at indexes 1 and 3,
// and then uuids[0] is added.
vec![
None,
Some(uuids[1].clone()),
None,
Some(uuids[4].clone()),
Some(uuids[0].clone()),
]
};
assert_eq!(db.working_set()?, exp);
Ok(())
}
fn newdb() -> TaskDb {
TaskDb::new(Box::new(InMemoryStorage::new()))
}
#[test]
fn test_sync() {
let mut server: Box<dyn Server> = Box::new(TestServer::new());
let mut db1 = newdb();
db1.sync(&mut server).unwrap();
let mut db2 = newdb();
db2.sync(&mut server).unwrap();
// make some changes in parallel to db1 and db2..
let uuid1 = Uuid::new_v4();
db1.apply(Operation::Create { uuid: uuid1 }).unwrap();
db1.apply(Operation::Update {
uuid: uuid1,
property: "title".into(),
value: Some("my first task".into()),
timestamp: Utc::now(),
})
.unwrap();
let uuid2 = Uuid::new_v4();
db2.apply(Operation::Create { uuid: uuid2 }).unwrap();
db2.apply(Operation::Update {
uuid: uuid2,
property: "title".into(),
value: Some("my second task".into()),
timestamp: Utc::now(),
})
.unwrap();
// and synchronize those around
db1.sync(&mut server).unwrap();
db2.sync(&mut server).unwrap();
db1.sync(&mut server).unwrap();
assert_eq!(db1.sorted_tasks(), db2.sorted_tasks());
// now make updates to the same task on both sides
db1.apply(Operation::Update {
uuid: uuid2,
property: "priority".into(),
value: Some("H".into()),
timestamp: Utc::now(),
})
.unwrap();
db2.apply(Operation::Update {
uuid: uuid2,
property: "project".into(),
value: Some("personal".into()),
timestamp: Utc::now(),
})
.unwrap();
// and synchronize those around
db1.sync(&mut server).unwrap();
db2.sync(&mut server).unwrap();
db1.sync(&mut server).unwrap();
assert_eq!(db1.sorted_tasks(), db2.sorted_tasks());
}
#[test]
fn test_sync_create_delete() {
let mut server: Box<dyn Server> = Box::new(TestServer::new());
let mut db1 = newdb();
db1.sync(&mut server).unwrap();
let mut db2 = newdb();
db2.sync(&mut server).unwrap();
// create and update a task..
let uuid = Uuid::new_v4();
db1.apply(Operation::Create { uuid }).unwrap();
db1.apply(Operation::Update {
uuid,
property: "title".into(),
value: Some("my first task".into()),
timestamp: Utc::now(),
})
.unwrap();
// and synchronize those around
db1.sync(&mut server).unwrap();
db2.sync(&mut server).unwrap();
db1.sync(&mut server).unwrap();
assert_eq!(db1.sorted_tasks(), db2.sorted_tasks());
// delete and re-create the task on db1
db1.apply(Operation::Delete { uuid }).unwrap();
db1.apply(Operation::Create { uuid }).unwrap();
db1.apply(Operation::Update {
uuid,
property: "title".into(),
value: Some("my second task".into()),
timestamp: Utc::now(),
})
.unwrap();
// and on db2, update a property of the task
db2.apply(Operation::Update {
uuid,
property: "project".into(),
value: Some("personal".into()),
timestamp: Utc::now(),
})
.unwrap();
db1.sync(&mut server).unwrap();
db2.sync(&mut server).unwrap();
db1.sync(&mut server).unwrap();
assert_eq!(db1.sorted_tasks(), db2.sorted_tasks());
}
#[derive(Debug)]
enum Action {
Op(Operation),

View file

@ -35,3 +35,199 @@ pub(super) fn apply_op(txn: &mut dyn StorageTxn, op: &Operation) -> anyhow::Resu
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use crate::taskdb::TaskDb;
use chrono::Utc;
use pretty_assertions::assert_eq;
use std::collections::HashMap;
use uuid::Uuid;
#[test]
fn test_apply_create() -> anyhow::Result<()> {
let mut db = TaskDb::new_inmemory();
let uuid = Uuid::new_v4();
let op = Operation::Create { uuid };
{
let mut txn = db.storage.txn()?;
apply_op(txn.as_mut(), &op)?;
txn.commit()?;
}
assert_eq!(db.sorted_tasks(), vec![(uuid, vec![]),]);
Ok(())
}
#[test]
fn test_apply_create_exists() -> anyhow::Result<()> {
let mut db = TaskDb::new_inmemory();
let uuid = Uuid::new_v4();
let op = Operation::Create { uuid };
{
let mut txn = db.storage.txn()?;
apply_op(txn.as_mut(), &op)?;
assert_eq!(
apply_op(txn.as_mut(), &op).err().unwrap().to_string(),
format!("Task Database Error: Task {} already exists", uuid)
);
txn.commit()?;
}
// first op was applied
assert_eq!(db.sorted_tasks(), vec![(uuid, vec![])]);
Ok(())
}
#[test]
fn test_apply_create_update() -> anyhow::Result<()> {
let mut db = TaskDb::new_inmemory();
let uuid = Uuid::new_v4();
let op1 = Operation::Create { uuid };
{
let mut txn = db.storage.txn()?;
apply_op(txn.as_mut(), &op1)?;
txn.commit()?;
}
let op2 = Operation::Update {
uuid,
property: String::from("title"),
value: Some("my task".into()),
timestamp: Utc::now(),
};
{
let mut txn = db.storage.txn()?;
apply_op(txn.as_mut(), &op2)?;
txn.commit()?;
}
assert_eq!(
db.sorted_tasks(),
vec![(uuid, vec![("title".into(), "my task".into())])]
);
Ok(())
}
#[test]
fn test_apply_create_update_delete_prop() -> anyhow::Result<()> {
let mut db = TaskDb::new_inmemory();
let uuid = Uuid::new_v4();
let op1 = Operation::Create { uuid };
{
let mut txn = db.storage.txn()?;
apply_op(txn.as_mut(), &op1)?;
txn.commit()?;
}
let op2 = Operation::Update {
uuid,
property: String::from("title"),
value: Some("my task".into()),
timestamp: Utc::now(),
};
{
let mut txn = db.storage.txn()?;
apply_op(txn.as_mut(), &op2)?;
txn.commit()?;
}
let op3 = Operation::Update {
uuid,
property: String::from("priority"),
value: Some("H".into()),
timestamp: Utc::now(),
};
{
let mut txn = db.storage.txn()?;
apply_op(txn.as_mut(), &op3)?;
txn.commit()?;
}
let op4 = Operation::Update {
uuid,
property: String::from("title"),
value: None,
timestamp: Utc::now(),
};
{
let mut txn = db.storage.txn()?;
apply_op(txn.as_mut(), &op4)?;
txn.commit()?;
}
let mut exp = HashMap::new();
let mut task = HashMap::new();
task.insert(String::from("priority"), String::from("H"));
exp.insert(uuid, task);
assert_eq!(
db.sorted_tasks(),
vec![(uuid, vec![("priority".into(), "H".into())])]
);
Ok(())
}
#[test]
fn test_apply_update_does_not_exist() -> anyhow::Result<()> {
let mut db = TaskDb::new_inmemory();
let uuid = Uuid::new_v4();
let op = Operation::Update {
uuid,
property: String::from("title"),
value: Some("my task".into()),
timestamp: Utc::now(),
};
{
let mut txn = db.storage.txn()?;
assert_eq!(
apply_op(txn.as_mut(), &op).err().unwrap().to_string(),
format!("Task Database Error: Task {} does not exist", uuid)
);
txn.commit()?;
}
Ok(())
}
#[test]
fn test_apply_create_delete() -> anyhow::Result<()> {
let mut db = TaskDb::new_inmemory();
let uuid = Uuid::new_v4();
let op1 = Operation::Create { uuid };
let op2 = Operation::Delete { uuid };
{
let mut txn = db.storage.txn()?;
apply_op(txn.as_mut(), &op1)?;
apply_op(txn.as_mut(), &op2)?;
txn.commit()?;
}
assert_eq!(db.sorted_tasks(), vec![]);
Ok(())
}
#[test]
fn test_apply_delete_not_present() -> anyhow::Result<()> {
let mut db = TaskDb::new_inmemory();
let uuid = Uuid::new_v4();
let op = Operation::Delete { uuid };
{
let mut txn = db.storage.txn()?;
assert_eq!(
apply_op(txn.as_mut(), &op).err().unwrap().to_string(),
format!("Task Database Error: Task {} does not exist", uuid)
);
txn.commit()?;
}
Ok(())
}
}

View file

@ -143,3 +143,134 @@ fn apply_version(txn: &mut dyn StorageTxn, mut version: Version) -> anyhow::Resu
txn.set_operations(local_operations)?;
Ok(())
}
#[cfg(test)]
mod test {
use super::*;
use crate::server::test::TestServer;
use crate::storage::{InMemoryStorage, Operation};
use crate::taskdb::TaskDb;
use chrono::Utc;
use uuid::Uuid;
fn newdb() -> TaskDb {
TaskDb::new(Box::new(InMemoryStorage::new()))
}
#[test]
fn test_sync() -> anyhow::Result<()> {
let mut server: Box<dyn Server> = Box::new(TestServer::new());
let mut db1 = newdb();
sync(&mut server, db1.storage.txn()?.as_mut()).unwrap();
let mut db2 = newdb();
sync(&mut server, db2.storage.txn()?.as_mut()).unwrap();
// make some changes in parallel to db1 and db2..
let uuid1 = Uuid::new_v4();
db1.apply(Operation::Create { uuid: uuid1 }).unwrap();
db1.apply(Operation::Update {
uuid: uuid1,
property: "title".into(),
value: Some("my first task".into()),
timestamp: Utc::now(),
})
.unwrap();
let uuid2 = Uuid::new_v4();
db2.apply(Operation::Create { uuid: uuid2 }).unwrap();
db2.apply(Operation::Update {
uuid: uuid2,
property: "title".into(),
value: Some("my second task".into()),
timestamp: Utc::now(),
})
.unwrap();
// and synchronize those around
sync(&mut server, db1.storage.txn()?.as_mut()).unwrap();
sync(&mut server, db2.storage.txn()?.as_mut()).unwrap();
sync(&mut server, db1.storage.txn()?.as_mut()).unwrap();
assert_eq!(db1.sorted_tasks(), db2.sorted_tasks());
// now make updates to the same task on both sides
db1.apply(Operation::Update {
uuid: uuid2,
property: "priority".into(),
value: Some("H".into()),
timestamp: Utc::now(),
})
.unwrap();
db2.apply(Operation::Update {
uuid: uuid2,
property: "project".into(),
value: Some("personal".into()),
timestamp: Utc::now(),
})
.unwrap();
// and synchronize those around
sync(&mut server, db1.storage.txn()?.as_mut()).unwrap();
sync(&mut server, db2.storage.txn()?.as_mut()).unwrap();
sync(&mut server, db1.storage.txn()?.as_mut()).unwrap();
assert_eq!(db1.sorted_tasks(), db2.sorted_tasks());
Ok(())
}
#[test]
fn test_sync_create_delete() -> anyhow::Result<()> {
let mut server: Box<dyn Server> = Box::new(TestServer::new());
let mut db1 = newdb();
sync(&mut server, db1.storage.txn()?.as_mut()).unwrap();
let mut db2 = newdb();
sync(&mut server, db2.storage.txn()?.as_mut()).unwrap();
// create and update a task..
let uuid = Uuid::new_v4();
db1.apply(Operation::Create { uuid }).unwrap();
db1.apply(Operation::Update {
uuid,
property: "title".into(),
value: Some("my first task".into()),
timestamp: Utc::now(),
})
.unwrap();
// and synchronize those around
sync(&mut server, db1.storage.txn()?.as_mut()).unwrap();
sync(&mut server, db2.storage.txn()?.as_mut()).unwrap();
sync(&mut server, db1.storage.txn()?.as_mut()).unwrap();
assert_eq!(db1.sorted_tasks(), db2.sorted_tasks());
// delete and re-create the task on db1
db1.apply(Operation::Delete { uuid }).unwrap();
db1.apply(Operation::Create { uuid }).unwrap();
db1.apply(Operation::Update {
uuid,
property: "title".into(),
value: Some("my second task".into()),
timestamp: Utc::now(),
})
.unwrap();
// and on db2, update a property of the task
db2.apply(Operation::Update {
uuid,
property: "project".into(),
value: Some("personal".into()),
timestamp: Utc::now(),
})
.unwrap();
sync(&mut server, db1.storage.txn()?.as_mut()).unwrap();
sync(&mut server, db2.storage.txn()?.as_mut()).unwrap();
sync(&mut server, db1.storage.txn()?.as_mut()).unwrap();
assert_eq!(db1.sorted_tasks(), db2.sorted_tasks());
Ok(())
}
}

View file

@ -0,0 +1,167 @@
use crate::storage::{StorageTxn, TaskMap};
use std::collections::HashSet;
/// Rebuild the working set using a function to identify tasks that should be in the set. This
/// renumbers the existing working-set tasks to eliminate gaps, and also adds any tasks that
/// are not already in the working set but should be. The rebuild occurs in a single
/// trasnsaction against the storage backend.
pub fn rebuild<F>(txn: &mut dyn StorageTxn, in_working_set: F, renumber: bool) -> anyhow::Result<()>
where
F: Fn(&TaskMap) -> bool,
{
let mut new_ws = vec![None]; // index 0 is always None
let mut seen = HashSet::new();
// The goal here is for existing working-set items to be "compressed' down to index 1, so
// we begin by scanning the current working set and inserting any tasks that should still
// be in the set into new_ws, implicitly dropping any tasks that are no longer in the
// working set.
for elt in txn.get_working_set()?.drain(1..) {
if let Some(uuid) = elt {
if let Some(task) = txn.get_task(uuid)? {
if in_working_set(&task) {
new_ws.push(Some(uuid));
seen.insert(uuid);
continue;
}
}
}
// if we are not renumbering, then insert a blank working-set entry here
if !renumber {
new_ws.push(None);
}
}
// if renumbering, clear the working set and re-add
if renumber {
txn.clear_working_set()?;
for elt in new_ws.drain(1..new_ws.len()).flatten() {
txn.add_to_working_set(elt)?;
}
} else {
// ..otherwise, just clear the None items determined above from the working set
for (i, elt) in new_ws.iter().enumerate().skip(1) {
if elt.is_none() {
txn.set_working_set_item(i, None)?;
}
}
}
// Now go hunting for tasks that should be in this list but are not, adding them at the
// end of the list, whether renumbering or not
for (uuid, task) in txn.all_tasks()? {
if !seen.contains(&uuid) && in_working_set(&task) {
txn.add_to_working_set(uuid)?;
}
}
txn.commit()?;
Ok(())
}
#[cfg(test)]
mod test {
use super::*;
use crate::storage::Operation;
use crate::taskdb::TaskDb;
use chrono::Utc;
use uuid::Uuid;
#[test]
fn rebuild_working_set_renumber() -> anyhow::Result<()> {
rebuild_working_set(true)
}
#[test]
fn rebuild_working_set_no_renumber() -> anyhow::Result<()> {
rebuild_working_set(false)
}
fn rebuild_working_set(renumber: bool) -> anyhow::Result<()> {
let mut db = TaskDb::new_inmemory();
let mut uuids = vec![];
uuids.push(Uuid::new_v4());
println!("uuids[0]: {:?} - pending, not in working set", uuids[0]);
uuids.push(Uuid::new_v4());
println!("uuids[1]: {:?} - pending, in working set", uuids[1]);
uuids.push(Uuid::new_v4());
println!("uuids[2]: {:?} - not pending, not in working set", uuids[2]);
uuids.push(Uuid::new_v4());
println!("uuids[3]: {:?} - not pending, in working set", uuids[3]);
uuids.push(Uuid::new_v4());
println!("uuids[4]: {:?} - pending, in working set", uuids[4]);
// add everything to the TaskDb
for uuid in &uuids {
db.apply(Operation::Create { uuid: *uuid })?;
}
for i in &[0usize, 1, 4] {
db.apply(Operation::Update {
uuid: uuids[*i].clone(),
property: String::from("status"),
value: Some("pending".into()),
timestamp: Utc::now(),
})?;
}
// set the existing working_set as we want it
{
let mut txn = db.storage.txn()?;
txn.clear_working_set()?;
for i in &[1usize, 3, 4] {
txn.add_to_working_set(uuids[*i])?;
}
txn.commit()?;
}
assert_eq!(
db.working_set()?,
vec![
None,
Some(uuids[1].clone()),
Some(uuids[3].clone()),
Some(uuids[4].clone())
]
);
rebuild(
db.storage.txn()?.as_mut(),
|t| {
if let Some(status) = t.get("status") {
status == "pending"
} else {
false
}
},
renumber,
)?;
let exp = if renumber {
// uuids[1] and uuids[4] are already in the working set, so are compressed
// to the top, and then uuids[0] is added.
vec![
None,
Some(uuids[1].clone()),
Some(uuids[4].clone()),
Some(uuids[0].clone()),
]
} else {
// uuids[1] and uuids[4] are already in the working set, at indexes 1 and 3,
// and then uuids[0] is added.
vec![
None,
Some(uuids[1].clone()),
None,
Some(uuids[4].clone()),
Some(uuids[0].clone()),
]
};
assert_eq!(db.working_set()?, exp);
Ok(())
}
}