move contents of taskchampion repo to tc/

This commit is contained in:
Dustin J. Mitchell 2022-05-08 19:01:20 +00:00
parent 73baefa0a5
commit 2a92b2a4b9
No known key found for this signature in database
219 changed files with 0 additions and 0 deletions

View file

@ -0,0 +1,33 @@
[package]
name = "taskchampion"
version = "0.4.1"
authors = ["Dustin J. Mitchell <dustin@mozilla.com>"]
description = "Personal task-tracking"
homepage = "https://taskchampion.github.io/taskchampion/"
documentation = "https://docs.rs/crate/taskchampion"
repository = "https://github.com/taskchampion/taskchampion"
readme = "../README.md"
license = "MIT"
edition = "2018"
[dependencies]
uuid = { version = "^0.8.2", features = ["serde", "v4"] }
serde = { version = "^1.0.125", features = ["derive"] }
serde_json = "^1.0"
chrono = { version = "^0.4.10", features = ["serde"] }
anyhow = "1.0"
thiserror = "1.0"
ureq = "^2.1.0"
log = "^0.4.14"
rusqlite = { version = "0.25", features = ["bundled"] }
strum = "0.21"
strum_macros = "0.21"
flate2 = "1"
byteorder = "1.0"
ring = "0.16"
[dev-dependencies]
proptest = "^1.0.0"
tempfile = "3"
rstest = "0.10"
pretty_assertions = "1"

View file

@ -0,0 +1,81 @@
use uuid::Uuid;
/// DependencyMap stores information on task dependencies between pending tasks.
///
/// This information requires a scan of the working set to generate, so it is
/// typically calculated once and re-used.
#[derive(Debug, PartialEq)]
pub struct DependencyMap {
/// Edges of the dependency graph. If (a, b) is in this array, then task a depends on tsak b.
edges: Vec<(Uuid, Uuid)>,
}
impl DependencyMap {
/// Create a new, empty DependencyMap.
pub(super) fn new() -> Self {
Self { edges: Vec::new() }
}
/// Add a dependency of a on b.
pub(super) fn add_dependency(&mut self, a: Uuid, b: Uuid) {
self.edges.push((a, b));
}
/// Return an iterator of Uuids on which task `deps_of` depends. This is equivalent to
/// `task.get_dependencies()`.
pub fn dependencies(&self, dep_of: Uuid) -> impl Iterator<Item = Uuid> + '_ {
self.edges
.iter()
.filter_map(move |(a, b)| if a == &dep_of { Some(*b) } else { None })
}
/// Return an iterator of Uuids of tasks that depend on `dep_on`
/// `task.get_dependencies()`.
pub fn dependents(&self, dep_on: Uuid) -> impl Iterator<Item = Uuid> + '_ {
self.edges
.iter()
.filter_map(move |(a, b)| if b == &dep_on { Some(*a) } else { None })
}
}
#[cfg(test)]
mod test {
use super::*;
use pretty_assertions::assert_eq;
use std::collections::HashSet;
#[test]
fn dependencies() {
let t = Uuid::new_v4();
let uuid1 = Uuid::new_v4();
let uuid2 = Uuid::new_v4();
let mut dm = DependencyMap::new();
dm.add_dependency(t, uuid1);
dm.add_dependency(t, uuid2);
dm.add_dependency(Uuid::new_v4(), t);
dm.add_dependency(Uuid::new_v4(), uuid1);
dm.add_dependency(uuid2, Uuid::new_v4());
assert_eq!(
dm.dependencies(t).collect::<HashSet<_>>(),
set![uuid1, uuid2]
);
}
#[test]
fn dependents() {
let t = Uuid::new_v4();
let uuid1 = Uuid::new_v4();
let uuid2 = Uuid::new_v4();
let mut dm = DependencyMap::new();
dm.add_dependency(uuid1, t);
dm.add_dependency(uuid2, t);
dm.add_dependency(t, Uuid::new_v4());
dm.add_dependency(Uuid::new_v4(), uuid1);
dm.add_dependency(uuid2, Uuid::new_v4());
assert_eq!(dm.dependents(t).collect::<HashSet<_>>(), set![uuid1, uuid2]);
}
}

View file

@ -0,0 +1,15 @@
use thiserror::Error;
#[derive(Debug, Error, Eq, PartialEq, Clone)]
#[non_exhaustive]
/// Errors returned from taskchampion operations
pub enum Error {
/// A task-database-related error
#[error("Task Database Error: {0}")]
Database(String),
/// An error specifically indicating that the local replica cannot
/// be synchronized with the sever, due to being out of date or some
/// other irrecoverable error.
#[error("Local replica is out of sync with the server")]
OutOfSync,
}

View file

@ -0,0 +1,73 @@
#![deny(clippy::all)]
/*!
This crate implements the core of TaskChampion, the [replica](crate::Replica).
Users of this crate can manipulate a task database using this API, including synchronizing that task database with others via a synchronization server.
Example uses of this crate:
* user interfaces for task management, such as mobile apps, web apps, or command-line interfaces
* integrations for task management, such as synchronization with ticket-tracking systems or
request forms.
# Replica
A TaskChampion replica is a local copy of a user's task data. As the name suggests, several
replicas of the same data can exist (such as on a user's laptop and on their phone) and can
synchronize with one another.
Replicas are accessed using the [`Replica`](crate::Replica) type.
# Task Storage
Replicas access the task database via a [storage object](crate::storage::Storage).
Create a storage object with [`StorageConfig`](crate::storage::StorageConfig).
The [`storage`](crate::storage) module supports pluggable storage for a replica's data.
An implementation is provided, but users of this crate can provide their own implementation as well.
# Server
Replica synchronization takes place against a server.
Create a server with [`ServerConfig`](crate::ServerConfig).
The [`server`](crate::server) module defines the interface a server must meet.
Users can define their own server impelementations.
# See Also
See the [TaskChampion Book](http://taskchampion.github.com/taskchampion)
for more information about the design and usage of the tool.
# Minimum Supported Rust Version
This crate supports Rust version 1.47 and higher.
*/
// NOTE: it's important that this 'mod' comes first so that the macros can be used in other modules
mod macros;
mod depmap;
mod errors;
mod replica;
pub mod server;
pub mod storage;
mod task;
mod taskdb;
mod utils;
mod workingset;
pub use depmap::DependencyMap;
pub use errors::Error;
pub use replica::Replica;
pub use server::{Server, ServerConfig};
pub use storage::StorageConfig;
pub use task::{Annotation, Status, Tag, Task, TaskMut};
pub use workingset::WorkingSet;
/// Re-exported type from the `uuid` crate, for ease of compatibility for consumers of this crate.
pub use uuid::Uuid;
/// Re-exported chrono module.
pub use chrono;

View file

@ -0,0 +1,17 @@
#![macro_use]
/// Create a hashset, similar to vec!
// NOTE: in Rust 1.56.0, this can be changed to HashSet::from([..])
#[cfg(test)]
macro_rules! set(
{ $($key:expr),* $(,)? } => {
{
#[allow(unused_mut)]
let mut s = ::std::collections::HashSet::new();
$(
s.insert($key);
)*
s
}
};
);

View file

@ -0,0 +1,565 @@
use crate::depmap::DependencyMap;
use crate::server::{Server, SyncOp};
use crate::storage::{Storage, TaskMap};
use crate::task::{Status, Task};
use crate::taskdb::TaskDb;
use crate::workingset::WorkingSet;
use anyhow::Context;
use chrono::{Duration, Utc};
use log::trace;
use std::collections::HashMap;
use std::rc::Rc;
use uuid::Uuid;
/// A replica represents an instance of a user's task data, providing an easy interface
/// for querying and modifying that data.
///
/// ## Tasks
///
/// Tasks are uniquely identified by UUIDs.
/// Most task modifications are performed via the [`Task`](crate::Task) and
/// [`TaskMut`](crate::TaskMut) types. Use of two types for tasks allows easy
/// read-only manipulation of lots of tasks, with exclusive access required only
/// for modifications.
///
/// ## Working Set
///
/// A replica maintains a "working set" of tasks that are of current concern to the user,
/// specifically pending tasks. These are indexed with small, easy-to-type integers. Newly
/// pending tasks are automatically added to the working set, and the working set is "renumbered"
/// during the garbage-collection process.
pub struct Replica {
taskdb: TaskDb,
/// If true, this replica has already added an undo point.
added_undo_point: bool,
/// The dependency map for this replica, if it has been calculated.
depmap: Option<Rc<DependencyMap>>,
}
impl Replica {
pub fn new(storage: Box<dyn Storage>) -> Replica {
Replica {
taskdb: TaskDb::new(storage),
added_undo_point: false,
depmap: None,
}
}
#[cfg(test)]
pub fn new_inmemory() -> Replica {
Replica::new(Box::new(crate::storage::InMemoryStorage::new()))
}
/// Update an existing task. If the value is Some, the property is added or updated. If the
/// value is None, the property is deleted. It is not an error to delete a nonexistent
/// property.
///
/// This is a low-level method, and requires knowledge of the Task data model. Prefer to
/// use the [`TaskMut`] methods to modify tasks, where possible.
pub fn update_task<S1, S2>(
&mut self,
uuid: Uuid,
property: S1,
value: Option<S2>,
) -> anyhow::Result<TaskMap>
where
S1: Into<String>,
S2: Into<String>,
{
self.add_undo_point(false)?;
self.taskdb.apply(SyncOp::Update {
uuid,
property: property.into(),
value: value.map(|v| v.into()),
timestamp: Utc::now(),
})
}
/// Add the given uuid to the working set, returning its index.
pub(crate) fn add_to_working_set(&mut self, uuid: Uuid) -> anyhow::Result<usize> {
self.taskdb.add_to_working_set(uuid)
}
/// Get all tasks represented as a map keyed by UUID
pub fn all_tasks(&mut self) -> anyhow::Result<HashMap<Uuid, Task>> {
let depmap = self.dependency_map(false)?;
let mut res = HashMap::new();
for (uuid, tm) in self.taskdb.all_tasks()?.drain(..) {
res.insert(uuid, Task::new(uuid, tm, depmap.clone()));
}
Ok(res)
}
/// Get the UUIDs of all tasks
pub fn all_task_uuids(&mut self) -> anyhow::Result<Vec<Uuid>> {
self.taskdb.all_task_uuids()
}
/// Get the "working set" for this replica. This is a snapshot of the current state,
/// and it is up to the caller to decide how long to store this value.
pub fn working_set(&mut self) -> anyhow::Result<WorkingSet> {
Ok(WorkingSet::new(self.taskdb.working_set()?))
}
/// Get the dependency map for all pending tasks.
///
/// The data in this map is cached when it is first requested and may not contain modifications
/// made locally in this Replica instance. The result is reference-counted and may
/// outlive the Replica.
///
/// If `force` is true, then the result is re-calculated from the current state of the replica,
/// although previously-returned dependency maps are not updated.
pub fn dependency_map(&mut self, force: bool) -> anyhow::Result<Rc<DependencyMap>> {
if force || self.depmap.is_none() {
let mut dm = DependencyMap::new();
let ws = self.working_set()?;
for i in 1..=ws.largest_index() {
if let Some(u) = ws.by_index(i) {
// note: we can't use self.get_task here, as that depends on a
// DependencyMap
if let Some(taskmap) = self.taskdb.get_task(u)? {
for p in taskmap.keys() {
if let Some(dep_str) = p.strip_prefix("dep_") {
if let Ok(dep) = Uuid::parse_str(dep_str) {
dm.add_dependency(u, dep);
}
}
}
}
}
}
self.depmap = Some(Rc::new(dm));
}
// at this point self.depmap is guaranteed to be Some(_)
Ok(self.depmap.as_ref().unwrap().clone())
}
/// Get an existing task by its UUID
pub fn get_task(&mut self, uuid: Uuid) -> anyhow::Result<Option<Task>> {
let depmap = self.dependency_map(false)?;
Ok(self
.taskdb
.get_task(uuid)?
.map(move |tm| Task::new(uuid, tm, depmap)))
}
/// Create a new task.
pub fn new_task(&mut self, status: Status, description: String) -> anyhow::Result<Task> {
let uuid = Uuid::new_v4();
self.add_undo_point(false)?;
let taskmap = self.taskdb.apply(SyncOp::Create { uuid })?;
let depmap = self.dependency_map(false)?;
let mut task = Task::new(uuid, taskmap, depmap).into_mut(self);
task.set_description(description)?;
task.set_status(status)?;
task.set_entry(Some(Utc::now()))?;
trace!("task {} created", uuid);
Ok(task.into_immut())
}
/// Create a new, empty task with the given UUID. This is useful for importing tasks, but
/// otherwise should be avoided in favor of `new_task`. If the task already exists, this
/// does nothing and returns the existing task.
pub fn import_task_with_uuid(&mut self, uuid: Uuid) -> anyhow::Result<Task> {
self.add_undo_point(false)?;
let taskmap = self.taskdb.apply(SyncOp::Create { uuid })?;
let depmap = self.dependency_map(false)?;
Ok(Task::new(uuid, taskmap, depmap))
}
/// Delete a task. The task must exist. Note that this is different from setting status to
/// Deleted; this is the final purge of the task. This is not a public method as deletion
/// should only occur through expiration.
fn delete_task(&mut self, uuid: Uuid) -> anyhow::Result<()> {
self.add_undo_point(false)?;
self.taskdb.apply(SyncOp::Delete { uuid })?;
trace!("task {} deleted", uuid);
Ok(())
}
/// Synchronize this replica against the given server. The working set is rebuilt after
/// this occurs, but without renumbering, so any newly-pending tasks should appear in
/// the working set.
///
/// If `avoid_snapshots` is true, the sync operations produces a snapshot only when the server
/// indicate it is urgent (snapshot urgency "high"). This allows time for other replicas to
/// create a snapshot before this one does.
///
/// Set this to true on systems more constrained in CPU, memory, or bandwidth than a typical desktop
/// system
pub fn sync(
&mut self,
server: &mut Box<dyn Server>,
avoid_snapshots: bool,
) -> anyhow::Result<()> {
self.taskdb
.sync(server, avoid_snapshots)
.context("Failed to synchronize with server")?;
self.rebuild_working_set(false)
.context("Failed to rebuild working set after sync")?;
Ok(())
}
/// Undo local operations until the most recent UndoPoint, returning false if there are no
/// local operations to undo.
pub fn undo(&mut self) -> anyhow::Result<bool> {
self.taskdb.undo()
}
/// Rebuild this replica's working set, based on whether tasks are pending or not. If
/// `renumber` is true, then existing tasks may be moved to new working-set indices; in any
/// case, on completion all pending tasks are in the working set and all non- pending tasks are
/// not.
pub fn rebuild_working_set(&mut self, renumber: bool) -> anyhow::Result<()> {
let pending = String::from(Status::Pending.to_taskmap());
self.taskdb
.rebuild_working_set(|t| t.get("status") == Some(&pending), renumber)?;
Ok(())
}
/// Expire old, deleted tasks.
///
/// Expiration entails removal of tasks from the replica. Any modifications that occur after
/// the deletion (such as operations synchronized from other replicas) will do nothing.
///
/// Tasks are eligible for expiration when they have status Deleted and have not been modified
/// for 180 days (about six months). Note that completed tasks are not eligible.
pub fn expire_tasks(&mut self) -> anyhow::Result<()> {
let six_mos_ago = Utc::now() - Duration::days(180);
self.all_tasks()?
.iter()
.filter(|(_, t)| t.get_status() == Status::Deleted)
.filter(|(_, t)| {
if let Some(m) = t.get_modified() {
m < six_mos_ago
} else {
false
}
})
.try_for_each(|(u, _)| self.delete_task(*u))?;
Ok(())
}
/// Add an UndoPoint, if one has not already been added by this Replica. This occurs
/// automatically when a change is made. The `force` flag allows forcing a new UndoPoint
/// even if one has already been created by this Replica, and may be useful when a Replica
/// instance is held for a long time and used to apply more than one user-visible change.
pub fn add_undo_point(&mut self, force: bool) -> anyhow::Result<()> {
if force || !self.added_undo_point {
self.taskdb.add_undo_point()?;
self.added_undo_point = true;
}
Ok(())
}
/// Get the number of operations local to this replica and not yet synchronized to the server.
pub fn num_local_operations(&mut self) -> anyhow::Result<usize> {
self.taskdb.num_operations()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::storage::ReplicaOp;
use crate::task::Status;
use chrono::TimeZone;
use pretty_assertions::assert_eq;
use std::collections::HashSet;
use uuid::Uuid;
#[test]
fn new_task() {
let mut rep = Replica::new_inmemory();
let t = rep.new_task(Status::Pending, "a task".into()).unwrap();
assert_eq!(t.get_description(), String::from("a task"));
assert_eq!(t.get_status(), Status::Pending);
assert!(t.get_modified().is_some());
}
#[test]
fn modify_task() {
let mut rep = Replica::new_inmemory();
let t = rep.new_task(Status::Pending, "a task".into()).unwrap();
let mut t = t.into_mut(&mut rep);
t.set_description(String::from("past tense")).unwrap();
t.set_status(Status::Completed).unwrap();
// check that values have changed on the TaskMut
assert_eq!(t.get_description(), "past tense");
assert_eq!(t.get_status(), Status::Completed);
// check that values have changed after into_immut
let t = t.into_immut();
assert_eq!(t.get_description(), "past tense");
assert_eq!(t.get_status(), Status::Completed);
// check that values have changed in storage, too
let t = rep.get_task(t.get_uuid()).unwrap().unwrap();
assert_eq!(t.get_description(), "past tense");
assert_eq!(t.get_status(), Status::Completed);
// and check for the corresponding operations, cleaning out the timestamps
// and modified properties as these are based on the current time
let now = Utc::now();
let clean_op = |op: ReplicaOp| {
if let ReplicaOp::Update {
uuid,
property,
mut old_value,
mut value,
..
} = op
{
// rewrite automatically-created dates to "just-now" for ease
// of testing
if property == "modified" || property == "end" || property == "entry" {
if value.is_some() {
value = Some("just-now".into());
}
if old_value.is_some() {
old_value = Some("just-now".into());
}
}
ReplicaOp::Update {
uuid,
property,
old_value,
value,
timestamp: now,
}
} else {
op
}
};
assert_eq!(
rep.taskdb
.operations()
.drain(..)
.map(clean_op)
.collect::<Vec<_>>(),
vec![
ReplicaOp::UndoPoint,
ReplicaOp::Create { uuid: t.get_uuid() },
ReplicaOp::Update {
uuid: t.get_uuid(),
property: "modified".into(),
old_value: None,
value: Some("just-now".into()),
timestamp: now,
},
ReplicaOp::Update {
uuid: t.get_uuid(),
property: "description".into(),
old_value: None,
value: Some("a task".into()),
timestamp: now,
},
ReplicaOp::Update {
uuid: t.get_uuid(),
property: "status".into(),
old_value: None,
value: Some("pending".into()),
timestamp: now,
},
ReplicaOp::Update {
uuid: t.get_uuid(),
property: "entry".into(),
old_value: None,
value: Some("just-now".into()),
timestamp: now,
},
ReplicaOp::Update {
uuid: t.get_uuid(),
property: "modified".into(),
old_value: Some("just-now".into()),
value: Some("just-now".into()),
timestamp: now,
},
ReplicaOp::Update {
uuid: t.get_uuid(),
property: "description".into(),
old_value: Some("a task".into()),
value: Some("past tense".into()),
timestamp: now,
},
ReplicaOp::Update {
uuid: t.get_uuid(),
property: "end".into(),
old_value: None,
value: Some("just-now".into()),
timestamp: now,
},
ReplicaOp::Update {
uuid: t.get_uuid(),
property: "status".into(),
old_value: Some("pending".into()),
value: Some("completed".into()),
timestamp: now,
},
]
);
assert_eq!(rep.num_local_operations().unwrap(), 10);
}
#[test]
fn delete_task() {
let mut rep = Replica::new_inmemory();
let t = rep.new_task(Status::Pending, "a task".into()).unwrap();
let uuid = t.get_uuid();
rep.delete_task(uuid).unwrap();
assert_eq!(rep.get_task(uuid).unwrap(), None);
}
#[test]
fn get_and_modify() {
let mut rep = Replica::new_inmemory();
let t = rep
.new_task(Status::Pending, "another task".into())
.unwrap();
let uuid = t.get_uuid();
let t = rep.get_task(uuid).unwrap().unwrap();
assert_eq!(t.get_description(), String::from("another task"));
let mut t = t.into_mut(&mut rep);
t.set_status(Status::Deleted).unwrap();
t.set_description("gone".into()).unwrap();
let t = rep.get_task(uuid).unwrap().unwrap();
assert_eq!(t.get_status(), Status::Deleted);
assert_eq!(t.get_description(), "gone");
rep.rebuild_working_set(true).unwrap();
let ws = rep.working_set().unwrap();
assert!(ws.by_uuid(t.get_uuid()).is_none());
}
#[test]
fn new_pending_adds_to_working_set() {
let mut rep = Replica::new_inmemory();
let t = rep
.new_task(Status::Pending, "to-be-pending".into())
.unwrap();
let uuid = t.get_uuid();
let ws = rep.working_set().unwrap();
assert_eq!(ws.len(), 1); // only one non-none value
assert!(ws.by_index(0).is_none());
assert_eq!(ws.by_index(1), Some(uuid));
let ws = rep.working_set().unwrap();
assert_eq!(ws.by_uuid(t.get_uuid()), Some(1));
}
#[test]
fn get_does_not_exist() {
let mut rep = Replica::new_inmemory();
let uuid = Uuid::new_v4();
assert_eq!(rep.get_task(uuid).unwrap(), None);
}
#[test]
fn expire() {
let mut rep = Replica::new_inmemory();
let mut t;
rep.new_task(Status::Pending, "keeper 1".into()).unwrap();
rep.new_task(Status::Completed, "keeper 2".into()).unwrap();
t = rep.new_task(Status::Deleted, "keeper 3".into()).unwrap();
{
let mut t = t.into_mut(&mut rep);
// set entry, with modification set as a side-effect
t.set_entry(Some(Utc::now())).unwrap();
}
t = rep.new_task(Status::Deleted, "goner".into()).unwrap();
{
let mut t = t.into_mut(&mut rep);
t.set_modified(Utc.ymd(1980, 1, 1).and_hms(0, 0, 0))
.unwrap();
}
rep.expire_tasks().unwrap();
for (_, t) in rep.all_tasks().unwrap() {
println!("got task {}", t.get_description());
assert!(t.get_description().starts_with("keeper"));
}
}
#[test]
fn dependency_map() {
let mut rep = Replica::new_inmemory();
let mut tasks = vec![];
for _ in 0..4 {
tasks.push(rep.new_task(Status::Pending, "t".into()).unwrap());
}
let uuids: Vec<_> = tasks.iter().map(|t| t.get_uuid()).collect();
// t[3] depends on t[2], and t[1]
{
let mut t = tasks.pop().unwrap().into_mut(&mut rep);
t.add_dependency(uuids[2]).unwrap();
t.add_dependency(uuids[1]).unwrap();
}
// t[2] depends on t[0]
{
let mut t = tasks.pop().unwrap().into_mut(&mut rep);
t.add_dependency(uuids[0]).unwrap();
}
// t[1] depends on t[0]
{
let mut t = tasks.pop().unwrap().into_mut(&mut rep);
t.add_dependency(uuids[0]).unwrap();
}
// generate the dependency map, forcing an update based on the newly-added
// dependencies
let dm = rep.dependency_map(true).unwrap();
assert_eq!(
dm.dependencies(uuids[3]).collect::<HashSet<_>>(),
set![uuids[1], uuids[2]]
);
assert_eq!(
dm.dependencies(uuids[2]).collect::<HashSet<_>>(),
set![uuids[0]]
);
assert_eq!(
dm.dependencies(uuids[1]).collect::<HashSet<_>>(),
set![uuids[0]]
);
assert_eq!(dm.dependencies(uuids[0]).collect::<HashSet<_>>(), set![]);
assert_eq!(dm.dependents(uuids[3]).collect::<HashSet<_>>(), set![]);
assert_eq!(
dm.dependents(uuids[2]).collect::<HashSet<_>>(),
set![uuids[3]]
);
assert_eq!(
dm.dependents(uuids[1]).collect::<HashSet<_>>(),
set![uuids[3]]
);
assert_eq!(
dm.dependents(uuids[0]).collect::<HashSet<_>>(),
set![uuids[1], uuids[2]]
);
}
}

View file

@ -0,0 +1,39 @@
use super::types::Server;
use super::{LocalServer, RemoteServer};
use std::path::PathBuf;
use uuid::Uuid;
/// The configuration for a replica's access to a sync server.
pub enum ServerConfig {
/// A local task database, for situations with a single replica.
Local {
/// Path containing the server's DB
server_dir: PathBuf,
},
/// A remote taskchampion-sync-server instance
Remote {
/// Sync server "origin"; a URL with schema and hostname but no path or trailing `/`
origin: String,
/// Client Key to identify and authenticate this replica to the server
client_key: Uuid,
/// Private encryption secret used to encrypt all data sent to the server. This can
/// be any suitably un-guessable string of bytes.
encryption_secret: Vec<u8>,
},
}
impl ServerConfig {
/// Get a server based on this configuration
pub fn into_server(self) -> anyhow::Result<Box<dyn Server>> {
Ok(match self {
ServerConfig::Local { server_dir } => Box::new(LocalServer::new(server_dir)?),
ServerConfig::Remote {
origin,
client_key,
encryption_secret,
} => Box::new(RemoteServer::new(origin, client_key, encryption_secret)?),
})
}
}

View file

@ -0,0 +1,412 @@
/// This module implements the encryption specified in the sync-protocol
/// document.
use ring::{aead, digest, pbkdf2, rand, rand::SecureRandom};
use std::io::Read;
use uuid::Uuid;
const PBKDF2_ITERATIONS: u32 = 100000;
const ENVELOPE_VERSION: u8 = 1;
const AAD_LEN: usize = 17;
const TASK_APP_ID: u8 = 1;
/// An Cryptor stores a secret and allows sealing and unsealing. It derives a key from the secret,
/// which takes a nontrivial amount of time, so it should be created once and re-used for the given
/// client_key.
pub(super) struct Cryptor {
key: aead::LessSafeKey,
rng: rand::SystemRandom,
}
impl Cryptor {
pub(super) fn new(client_key: Uuid, secret: &Secret) -> anyhow::Result<Self> {
Ok(Cryptor {
key: Self::derive_key(client_key, secret)?,
rng: rand::SystemRandom::new(),
})
}
/// Derive a key as specified for version 1. Note that this may take 10s of ms.
fn derive_key(client_key: Uuid, secret: &Secret) -> anyhow::Result<aead::LessSafeKey> {
let salt = digest::digest(&digest::SHA256, client_key.as_bytes());
let mut key_bytes = vec![0u8; aead::CHACHA20_POLY1305.key_len()];
pbkdf2::derive(
pbkdf2::PBKDF2_HMAC_SHA256,
std::num::NonZeroU32::new(PBKDF2_ITERATIONS).unwrap(),
salt.as_ref(),
secret.as_ref(),
&mut key_bytes,
);
let unbound_key = aead::UnboundKey::new(&aead::CHACHA20_POLY1305, &key_bytes)
.map_err(|_| anyhow::anyhow!("error while creating AEAD key"))?;
Ok(aead::LessSafeKey::new(unbound_key))
}
/// Encrypt the given payload.
pub(super) fn seal(&self, payload: Unsealed) -> anyhow::Result<Sealed> {
let Unsealed {
version_id,
mut payload,
} = payload;
let mut nonce_buf = [0u8; aead::NONCE_LEN];
self.rng
.fill(&mut nonce_buf)
.map_err(|_| anyhow::anyhow!("error generating random nonce"))?;
let nonce = aead::Nonce::assume_unique_for_key(nonce_buf);
let aad = self.make_aad(version_id);
let tag = self
.key
.seal_in_place_separate_tag(nonce, aad, &mut payload)
.map_err(|_| anyhow::anyhow!("error while sealing"))?;
payload.extend_from_slice(tag.as_ref());
let env = Envelope {
nonce: &nonce_buf,
payload: payload.as_ref(),
};
Ok(Sealed {
version_id,
payload: env.to_bytes(),
})
}
/// Decrypt the given payload, verifying it was created for the given version_id
pub(super) fn unseal(&self, payload: Sealed) -> anyhow::Result<Unsealed> {
let Sealed {
version_id,
payload,
} = payload;
let env = Envelope::from_bytes(&payload)?;
let mut nonce = [0u8; aead::NONCE_LEN];
nonce.copy_from_slice(env.nonce);
let nonce = aead::Nonce::assume_unique_for_key(nonce);
let aad = self.make_aad(version_id);
let mut payload = env.payload.to_vec();
let plaintext = self
.key
.open_in_place(nonce, aad, payload.as_mut())
.map_err(|_| anyhow::anyhow!("error while creating AEAD key"))?;
Ok(Unsealed {
version_id,
payload: plaintext.to_vec(),
})
}
fn make_aad(&self, version_id: Uuid) -> aead::Aad<[u8; AAD_LEN]> {
let mut aad = [0u8; AAD_LEN];
aad[0] = TASK_APP_ID;
aad[1..].copy_from_slice(version_id.as_bytes());
aead::Aad::from(aad)
}
}
/// Secret represents a secret key as used for encryption and decryption.
pub(super) struct Secret(pub(super) Vec<u8>);
impl From<Vec<u8>> for Secret {
fn from(bytes: Vec<u8>) -> Self {
Self(bytes)
}
}
impl AsRef<[u8]> for Secret {
fn as_ref(&self) -> &[u8] {
&self.0
}
}
/// Envelope for the data stored on the server, containing the information
/// required to decrypt.
#[derive(Debug, PartialEq, Eq)]
struct Envelope<'a> {
nonce: &'a [u8],
payload: &'a [u8],
}
impl<'a> Envelope<'a> {
fn from_bytes(buf: &'a [u8]) -> anyhow::Result<Envelope<'a>> {
if buf.len() <= 1 + aead::NONCE_LEN {
anyhow::bail!("envelope is too small");
}
let version = buf[0];
if version != ENVELOPE_VERSION {
anyhow::bail!("unrecognized encryption envelope version {}", version);
}
Ok(Envelope {
nonce: &buf[1..1 + aead::NONCE_LEN],
payload: &buf[1 + aead::NONCE_LEN..],
})
}
fn to_bytes(&self) -> Vec<u8> {
let mut buf = Vec::with_capacity(1 + self.nonce.len() + self.payload.len());
buf.push(ENVELOPE_VERSION);
buf.extend_from_slice(self.nonce);
buf.extend_from_slice(self.payload);
buf
}
}
/// A unsealed payload with an attached version_id. The version_id is used to
/// validate the context of the payload on unsealing.
pub(super) struct Unsealed {
pub(super) version_id: Uuid,
pub(super) payload: Vec<u8>,
}
/// An encrypted payload
pub(super) struct Sealed {
pub(super) version_id: Uuid,
pub(super) payload: Vec<u8>,
}
impl Sealed {
pub(super) fn from_resp(
resp: ureq::Response,
version_id: Uuid,
content_type: &str,
) -> Result<Sealed, anyhow::Error> {
if resp.header("Content-Type") == Some(content_type) {
let mut reader = resp.into_reader();
let mut payload = vec![];
reader.read_to_end(&mut payload)?;
Ok(Self {
version_id,
payload,
})
} else {
Err(anyhow::anyhow!(
"Response did not have expected content-type"
))
}
}
}
impl AsRef<[u8]> for Sealed {
fn as_ref(&self) -> &[u8] {
self.payload.as_ref()
}
}
#[cfg(test)]
mod test {
use super::*;
use pretty_assertions::assert_eq;
#[test]
fn envelope_round_trip() {
let env = Envelope {
nonce: &[2; 12],
payload: b"HELLO",
};
let bytes = env.to_bytes();
let env2 = Envelope::from_bytes(&bytes).unwrap();
assert_eq!(env, env2);
}
#[test]
fn envelope_bad_version() {
let env = Envelope {
nonce: &[2; 12],
payload: b"HELLO",
};
let mut bytes = env.to_bytes();
bytes[0] = 99;
assert!(Envelope::from_bytes(&bytes).is_err());
}
#[test]
fn envelope_too_short() {
let env = Envelope {
nonce: &[2; 12],
payload: b"HELLO",
};
let bytes = env.to_bytes();
let bytes = &bytes[..10];
assert!(Envelope::from_bytes(bytes).is_err());
}
#[test]
fn round_trip() {
let version_id = Uuid::new_v4();
let payload = b"HISTORY REPEATS ITSELF".to_vec();
let secret = Secret(b"SEKRIT".to_vec());
let cryptor = Cryptor::new(Uuid::new_v4(), &secret).unwrap();
let unsealed = Unsealed {
version_id,
payload: payload.clone(),
};
let sealed = cryptor.seal(unsealed).unwrap();
let unsealed = cryptor.unseal(sealed).unwrap();
assert_eq!(unsealed.payload, payload);
assert_eq!(unsealed.version_id, version_id);
}
#[test]
fn round_trip_bad_key() {
let version_id = Uuid::new_v4();
let payload = b"HISTORY REPEATS ITSELF".to_vec();
let client_key = Uuid::new_v4();
let secret = Secret(b"SEKRIT".to_vec());
let cryptor = Cryptor::new(client_key, &secret).unwrap();
let unsealed = Unsealed {
version_id,
payload: payload.clone(),
};
let sealed = cryptor.seal(unsealed).unwrap();
let secret = Secret(b"DIFFERENT_SECRET".to_vec());
let cryptor = Cryptor::new(client_key, &secret).unwrap();
assert!(cryptor.unseal(sealed).is_err());
}
#[test]
fn round_trip_bad_version() {
let version_id = Uuid::new_v4();
let payload = b"HISTORY REPEATS ITSELF".to_vec();
let client_key = Uuid::new_v4();
let secret = Secret(b"SEKRIT".to_vec());
let cryptor = Cryptor::new(client_key, &secret).unwrap();
let unsealed = Unsealed {
version_id,
payload: payload.clone(),
};
let mut sealed = cryptor.seal(unsealed).unwrap();
sealed.version_id = Uuid::new_v4(); // change the version_id
assert!(cryptor.unseal(sealed).is_err());
}
#[test]
fn round_trip_bad_client_key() {
let version_id = Uuid::new_v4();
let payload = b"HISTORY REPEATS ITSELF".to_vec();
let client_key = Uuid::new_v4();
let secret = Secret(b"SEKRIT".to_vec());
let cryptor = Cryptor::new(client_key, &secret).unwrap();
let unsealed = Unsealed {
version_id,
payload: payload.clone(),
};
let sealed = cryptor.seal(unsealed).unwrap();
let client_key = Uuid::new_v4();
let cryptor = Cryptor::new(client_key, &secret).unwrap();
assert!(cryptor.unseal(sealed).is_err());
}
mod externally_valid {
// validate data generated by generate-test-data.py. The intent is to
// validate that this format matches the specification by implementing
// the specification in a second language
use super::*;
use pretty_assertions::assert_eq;
/// The values in generate-test-data.py
fn defaults() -> (Uuid, Uuid, Vec<u8>) {
(
Uuid::parse_str("b0517957-f912-4d49-8330-f612e73030c4").unwrap(),
Uuid::parse_str("0666d464-418a-4a08-ad53-6f15c78270cd").unwrap(),
b"b4a4e6b7b811eda1dc1a2693ded".to_vec(),
)
}
#[test]
fn good() {
let (version_id, client_key, encryption_secret) = defaults();
let sealed = Sealed {
version_id,
payload: include_bytes!("test-good.data").to_vec(),
};
let cryptor = Cryptor::new(client_key, &Secret(encryption_secret)).unwrap();
let unsealed = cryptor.unseal(sealed).unwrap();
assert_eq!(unsealed.payload, b"SUCCESS");
assert_eq!(unsealed.version_id, version_id);
}
#[test]
fn bad_version_id() {
let (version_id, client_key, encryption_secret) = defaults();
let sealed = Sealed {
version_id,
payload: include_bytes!("test-bad-version-id.data").to_vec(),
};
let cryptor = Cryptor::new(client_key, &Secret(encryption_secret)).unwrap();
assert!(cryptor.unseal(sealed).is_err());
}
#[test]
fn bad_client_key() {
let (version_id, client_key, encryption_secret) = defaults();
let sealed = Sealed {
version_id,
payload: include_bytes!("test-bad-client-key.data").to_vec(),
};
let cryptor = Cryptor::new(client_key, &Secret(encryption_secret)).unwrap();
assert!(cryptor.unseal(sealed).is_err());
}
#[test]
fn bad_secret() {
let (version_id, client_key, encryption_secret) = defaults();
let sealed = Sealed {
version_id,
payload: include_bytes!("test-bad-secret.data").to_vec(),
};
let cryptor = Cryptor::new(client_key, &Secret(encryption_secret)).unwrap();
assert!(cryptor.unseal(sealed).is_err());
}
#[test]
fn bad_version() {
let (version_id, client_key, encryption_secret) = defaults();
let sealed = Sealed {
version_id,
payload: include_bytes!("test-bad-version.data").to_vec(),
};
let cryptor = Cryptor::new(client_key, &Secret(encryption_secret)).unwrap();
assert!(cryptor.unseal(sealed).is_err());
}
#[test]
fn bad_app_id() {
let (version_id, client_key, encryption_secret) = defaults();
let sealed = Sealed {
version_id,
payload: include_bytes!("test-bad-app-id.data").to_vec(),
};
let cryptor = Cryptor::new(client_key, &Secret(encryption_secret)).unwrap();
assert!(cryptor.unseal(sealed).is_err());
}
}
}

View file

@ -0,0 +1,77 @@
# This file generates test-encrypted.data. To run it:
# - pip install cryptography pbkdf2
# - python taskchampion/src/server/generate-test-data.py taskchampion/src/server/
import os
import hashlib
import pbkdf2
import secrets
import sys
import uuid
from cryptography.hazmat.primitives.ciphers.aead import ChaCha20Poly1305
# these values match values used in the rust tests
client_key = "0666d464-418a-4a08-ad53-6f15c78270cd"
encryption_secret = b"b4a4e6b7b811eda1dc1a2693ded"
version_id = "b0517957-f912-4d49-8330-f612e73030c4"
def gen(
version_id=version_id, client_key=client_key, encryption_secret=encryption_secret,
app_id=1, version=1):
# first, generate the encryption key
salt = hashlib.sha256(uuid.UUID(client_key).bytes).digest()
key = pbkdf2.PBKDF2(
encryption_secret,
salt,
digestmodule=hashlib.sha256,
iterations=100000,
).read(32)
# create a nonce
nonce = secrets.token_bytes(12)
assert len(b"\x01") == 1
# create the AAD
aad = b''.join([
bytes([app_id]),
uuid.UUID(version_id).bytes,
])
# encrypt using AEAD
chacha = ChaCha20Poly1305(key)
ciphertext = chacha.encrypt(nonce, b"SUCCESS", aad)
# create the envelope
envelope = b''.join([
bytes([version]),
nonce,
ciphertext,
])
return envelope
def main():
dir = sys.argv[1]
with open(os.path.join(dir, 'test-good.data'), "wb") as f:
f.write(gen())
with open(os.path.join(dir, 'test-bad-version-id.data'), "wb") as f:
f.write(gen(version_id=uuid.uuid4().hex))
with open(os.path.join(dir, 'test-bad-client-key.data'), "wb") as f:
f.write(gen(client_key=uuid.uuid4().hex))
with open(os.path.join(dir, 'test-bad-secret.data'), "wb") as f:
f.write(gen(encryption_secret=b"xxxxxxxxxxxxxxxxxxxxx"))
with open(os.path.join(dir, 'test-bad-version.data'), "wb") as f:
f.write(gen(version=99))
with open(os.path.join(dir, 'test-bad-app-id.data'), "wb") as f:
f.write(gen(app_id=99))
main()

View file

@ -0,0 +1,260 @@
use crate::server::{
AddVersionResult, GetVersionResult, HistorySegment, Server, Snapshot, SnapshotUrgency,
VersionId, NIL_VERSION_ID,
};
use crate::storage::sqlite::StoredUuid;
use anyhow::Context;
use rusqlite::params;
use rusqlite::OptionalExtension;
use serde::{Deserialize, Serialize};
use std::path::Path;
use uuid::Uuid;
#[derive(Serialize, Deserialize, Debug)]
struct Version {
version_id: VersionId,
parent_version_id: VersionId,
history_segment: HistorySegment,
}
pub struct LocalServer {
con: rusqlite::Connection,
}
impl LocalServer {
fn txn(&mut self) -> anyhow::Result<rusqlite::Transaction> {
let txn = self.con.transaction()?;
Ok(txn)
}
/// A server which has no notion of clients, signatures, encryption, etc.
pub fn new<P: AsRef<Path>>(directory: P) -> anyhow::Result<LocalServer> {
let db_file = directory
.as_ref()
.join("taskchampion-local-sync-server.sqlite3");
let con = rusqlite::Connection::open(&db_file)?;
let queries = vec![
"CREATE TABLE IF NOT EXISTS data (key STRING PRIMARY KEY, value STRING);",
"CREATE TABLE IF NOT EXISTS versions (version_id STRING PRIMARY KEY, parent_version_id STRING, data STRING);",
];
for q in queries {
con.execute(q, []).context("Creating table")?;
}
Ok(LocalServer { con })
}
fn get_latest_version_id(&mut self) -> anyhow::Result<VersionId> {
let t = self.txn()?;
let result: Option<StoredUuid> = t
.query_row(
"SELECT value FROM data WHERE key = 'latest_version_id' LIMIT 1",
rusqlite::params![],
|r| r.get(0),
)
.optional()?;
Ok(result.map(|x| x.0).unwrap_or(NIL_VERSION_ID))
}
fn set_latest_version_id(&mut self, version_id: VersionId) -> anyhow::Result<()> {
let t = self.txn()?;
t.execute(
"INSERT OR REPLACE INTO data (key, value) VALUES ('latest_version_id', ?)",
params![&StoredUuid(version_id)],
)
.context("Update task query")?;
t.commit()?;
Ok(())
}
fn get_version_by_parent_version_id(
&mut self,
parent_version_id: VersionId,
) -> anyhow::Result<Option<Version>> {
let t = self.txn()?;
let r = t.query_row(
"SELECT version_id, parent_version_id, data FROM versions WHERE parent_version_id = ?",
params![&StoredUuid(parent_version_id)],
|r| {
let version_id: StoredUuid = r.get("version_id")?;
let parent_version_id: StoredUuid = r.get("parent_version_id")?;
Ok(Version{
version_id: version_id.0,
parent_version_id: parent_version_id.0,
history_segment: r.get("data")?,
})}
)
.optional()
.context("Get version query")
?;
Ok(r)
}
fn add_version_by_parent_version_id(&mut self, version: Version) -> anyhow::Result<()> {
let t = self.txn()?;
t.execute(
"INSERT INTO versions (version_id, parent_version_id, data) VALUES (?, ?, ?)",
params![
StoredUuid(version.version_id),
StoredUuid(version.parent_version_id),
version.history_segment
],
)?;
t.commit()?;
Ok(())
}
}
impl Server for LocalServer {
// TODO: better transaction isolation for add_version (gets and sets should be in the same
// transaction)
fn add_version(
&mut self,
parent_version_id: VersionId,
history_segment: HistorySegment,
) -> anyhow::Result<(AddVersionResult, SnapshotUrgency)> {
// no client lookup
// no signature validation
// check the parent_version_id for linearity
let latest_version_id = self.get_latest_version_id()?;
if latest_version_id != NIL_VERSION_ID && parent_version_id != latest_version_id {
return Ok((
AddVersionResult::ExpectedParentVersion(latest_version_id),
SnapshotUrgency::None,
));
}
// invent a new ID for this version
let version_id = Uuid::new_v4();
self.add_version_by_parent_version_id(Version {
version_id,
parent_version_id,
history_segment,
})?;
self.set_latest_version_id(version_id)?;
Ok((AddVersionResult::Ok(version_id), SnapshotUrgency::None))
}
fn get_child_version(
&mut self,
parent_version_id: VersionId,
) -> anyhow::Result<GetVersionResult> {
if let Some(version) = self.get_version_by_parent_version_id(parent_version_id)? {
Ok(GetVersionResult::Version {
version_id: version.version_id,
parent_version_id: version.parent_version_id,
history_segment: version.history_segment,
})
} else {
Ok(GetVersionResult::NoSuchVersion)
}
}
fn add_snapshot(&mut self, _version_id: VersionId, _snapshot: Snapshot) -> anyhow::Result<()> {
// the local server never requests a snapshot, so it should never get one
unreachable!()
}
fn get_snapshot(&mut self) -> anyhow::Result<Option<(VersionId, Snapshot)>> {
Ok(None)
}
}
#[cfg(test)]
mod test {
use super::*;
use pretty_assertions::assert_eq;
use tempfile::TempDir;
#[test]
fn test_empty() -> anyhow::Result<()> {
let tmp_dir = TempDir::new()?;
let mut server = LocalServer::new(&tmp_dir.path())?;
let child_version = server.get_child_version(NIL_VERSION_ID)?;
assert_eq!(child_version, GetVersionResult::NoSuchVersion);
Ok(())
}
#[test]
fn test_add_zero_base() -> anyhow::Result<()> {
let tmp_dir = TempDir::new()?;
let mut server = LocalServer::new(&tmp_dir.path())?;
let history = b"1234".to_vec();
match server.add_version(NIL_VERSION_ID, history.clone())?.0 {
AddVersionResult::ExpectedParentVersion(_) => {
panic!("should have accepted the version")
}
AddVersionResult::Ok(version_id) => {
let new_version = server.get_child_version(NIL_VERSION_ID)?;
assert_eq!(
new_version,
GetVersionResult::Version {
version_id,
parent_version_id: NIL_VERSION_ID,
history_segment: history,
}
);
}
}
Ok(())
}
#[test]
fn test_add_nonzero_base() -> anyhow::Result<()> {
let tmp_dir = TempDir::new()?;
let mut server = LocalServer::new(&tmp_dir.path())?;
let history = b"1234".to_vec();
let parent_version_id = Uuid::new_v4() as VersionId;
// This is OK because the server has no latest_version_id yet
match server.add_version(parent_version_id, history.clone())?.0 {
AddVersionResult::ExpectedParentVersion(_) => {
panic!("should have accepted the version")
}
AddVersionResult::Ok(version_id) => {
let new_version = server.get_child_version(parent_version_id)?;
assert_eq!(
new_version,
GetVersionResult::Version {
version_id,
parent_version_id,
history_segment: history,
}
);
}
}
Ok(())
}
#[test]
fn test_add_nonzero_base_forbidden() -> anyhow::Result<()> {
let tmp_dir = TempDir::new()?;
let mut server = LocalServer::new(&tmp_dir.path())?;
let history = b"1234".to_vec();
let parent_version_id = Uuid::new_v4() as VersionId;
// add a version
if let (AddVersionResult::ExpectedParentVersion(_), SnapshotUrgency::None) =
server.add_version(parent_version_id, history.clone())?
{
panic!("should have accepted the version")
}
// then add another, not based on that one
if let (AddVersionResult::Ok(_), SnapshotUrgency::None) =
server.add_version(parent_version_id, history.clone())?
{
panic!("should not have accepted the version")
}
Ok(())
}
}

View file

@ -0,0 +1,26 @@
/**
This module defines the client interface to TaskChampion sync servers.
It defines a [trait](crate::server::Server) for servers, and implements both local and remote servers.
Typical uses of this crate do not interact directly with this module; [`ServerConfig`](crate::ServerConfig) is sufficient.
However, users who wish to implement their own server interfaces can implement the traits defined here and pass the result to [`Replica`](crate::Replica).
*/
#[cfg(test)]
pub(crate) mod test;
mod config;
mod crypto;
mod local;
mod op;
mod remote;
mod types;
pub use config::ServerConfig;
pub use local::LocalServer;
pub use remote::RemoteServer;
pub use types::*;
pub(crate) use op::SyncOp;

View file

@ -0,0 +1,420 @@
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use uuid::Uuid;
/// A SyncOp defines a single change to the task database, that can be synchronized
/// via a server.
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
pub enum SyncOp {
/// Create a new task.
///
/// On application, if the task already exists, the operation does nothing.
Create { uuid: Uuid },
/// Delete an existing task.
///
/// On application, if the task does not exist, the operation does nothing.
Delete { uuid: Uuid },
/// Update an existing task, setting the given property to the given value. If the value is
/// None, then the corresponding property is deleted.
///
/// If the given task does not exist, the operation does nothing.
Update {
uuid: Uuid,
property: String,
value: Option<String>,
timestamp: DateTime<Utc>,
},
}
use SyncOp::*;
impl SyncOp {
// Transform takes two operations A and B that happened concurrently and produces two
// operations A' and B' such that `apply(apply(S, A), B') = apply(apply(S, B), A')`. This
// function is used to serialize operations in a process similar to a Git "rebase".
//
// *
// / \
// op1 / \ op2
// / \
// * *
//
// this function "completes the diamond:
//
// * *
// \ /
// op2' \ / op1'
// \ /
// *
//
// such that applying op2' after op1 has the same effect as applying op1' after op2. This
// allows two different systems which have already applied op1 and op2, respectively, and thus
// reached different states, to return to the same state by applying op2' and op1',
// respectively.
pub fn transform(operation1: SyncOp, operation2: SyncOp) -> (Option<SyncOp>, Option<SyncOp>) {
match (&operation1, &operation2) {
// Two creations or deletions of the same uuid reach the same state, so there's no need
// for any further operations to bring the state together.
(&Create { uuid: uuid1 }, &Create { uuid: uuid2 }) if uuid1 == uuid2 => (None, None),
(&Delete { uuid: uuid1 }, &Delete { uuid: uuid2 }) if uuid1 == uuid2 => (None, None),
// Given a create and a delete of the same task, one of the operations is invalid: the
// create implies the task does not exist, but the delete implies it exists. Somewhat
// arbitrarily, we prefer the Create
(&Create { uuid: uuid1 }, &Delete { uuid: uuid2 }) if uuid1 == uuid2 => {
(Some(operation1), None)
}
(&Delete { uuid: uuid1 }, &Create { uuid: uuid2 }) if uuid1 == uuid2 => {
(None, Some(operation2))
}
// And again from an Update and a Create, prefer the Update
(&Update { uuid: uuid1, .. }, &Create { uuid: uuid2 }) if uuid1 == uuid2 => {
(Some(operation1), None)
}
(&Create { uuid: uuid1 }, &Update { uuid: uuid2, .. }) if uuid1 == uuid2 => {
(None, Some(operation2))
}
// Given a delete and an update, prefer the delete
(&Update { uuid: uuid1, .. }, &Delete { uuid: uuid2 }) if uuid1 == uuid2 => {
(None, Some(operation2))
}
(&Delete { uuid: uuid1 }, &Update { uuid: uuid2, .. }) if uuid1 == uuid2 => {
(Some(operation1), None)
}
// Two updates to the same property of the same task might conflict.
(
&Update {
uuid: ref uuid1,
property: ref property1,
value: ref value1,
timestamp: ref timestamp1,
},
&Update {
uuid: ref uuid2,
property: ref property2,
value: ref value2,
timestamp: ref timestamp2,
},
) if uuid1 == uuid2 && property1 == property2 => {
// if the value is the same, there's no conflict
if value1 == value2 {
(None, None)
} else if timestamp1 < timestamp2 {
// prefer the later modification
(None, Some(operation2))
} else {
// prefer the later modification or, if the modifications are the same,
// just choose one of them
(Some(operation1), None)
}
}
// anything else is not a conflict of any sort, so return the operations unchanged
(_, _) => (Some(operation1), Some(operation2)),
}
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::storage::InMemoryStorage;
use crate::taskdb::TaskDb;
use chrono::{Duration, Utc};
use pretty_assertions::assert_eq;
use proptest::prelude::*;
#[test]
fn test_json_create() -> anyhow::Result<()> {
let uuid = Uuid::new_v4();
let op = Create { uuid };
let json = serde_json::to_string(&op)?;
assert_eq!(json, format!(r#"{{"Create":{{"uuid":"{}"}}}}"#, uuid));
let deser: SyncOp = serde_json::from_str(&json)?;
assert_eq!(deser, op);
Ok(())
}
#[test]
fn test_json_delete() -> anyhow::Result<()> {
let uuid = Uuid::new_v4();
let op = Delete { uuid };
let json = serde_json::to_string(&op)?;
assert_eq!(json, format!(r#"{{"Delete":{{"uuid":"{}"}}}}"#, uuid));
let deser: SyncOp = serde_json::from_str(&json)?;
assert_eq!(deser, op);
Ok(())
}
#[test]
fn test_json_update() -> anyhow::Result<()> {
let uuid = Uuid::new_v4();
let timestamp = Utc::now();
let op = Update {
uuid,
property: "abc".into(),
value: Some("false".into()),
timestamp,
};
let json = serde_json::to_string(&op)?;
assert_eq!(
json,
format!(
r#"{{"Update":{{"uuid":"{}","property":"abc","value":"false","timestamp":"{:?}"}}}}"#,
uuid, timestamp,
)
);
let deser: SyncOp = serde_json::from_str(&json)?;
assert_eq!(deser, op);
Ok(())
}
#[test]
fn test_json_update_none() -> anyhow::Result<()> {
let uuid = Uuid::new_v4();
let timestamp = Utc::now();
let op = Update {
uuid,
property: "abc".into(),
value: None,
timestamp,
};
let json = serde_json::to_string(&op)?;
assert_eq!(
json,
format!(
r#"{{"Update":{{"uuid":"{}","property":"abc","value":null,"timestamp":"{:?}"}}}}"#,
uuid, timestamp,
)
);
let deser: SyncOp = serde_json::from_str(&json)?;
assert_eq!(deser, op);
Ok(())
}
fn test_transform(
setup: Option<SyncOp>,
o1: SyncOp,
o2: SyncOp,
exp1p: Option<SyncOp>,
exp2p: Option<SyncOp>,
) {
let (o1p, o2p) = SyncOp::transform(o1.clone(), o2.clone());
assert_eq!((&o1p, &o2p), (&exp1p, &exp2p));
// check that the two operation sequences have the same effect, enforcing the invariant of
// the transform function.
let mut db1 = TaskDb::new_inmemory();
if let Some(ref o) = setup {
db1.apply(o.clone()).unwrap();
}
db1.apply(o1).unwrap();
if let Some(o) = o2p {
db1.apply(o).unwrap();
}
let mut db2 = TaskDb::new_inmemory();
if let Some(ref o) = setup {
db2.apply(o.clone()).unwrap();
}
db2.apply(o2).unwrap();
if let Some(o) = o1p {
db2.apply(o).unwrap();
}
assert_eq!(db1.sorted_tasks(), db2.sorted_tasks());
}
#[test]
fn test_unrelated_create() {
let uuid1 = Uuid::new_v4();
let uuid2 = Uuid::new_v4();
test_transform(
None,
Create { uuid: uuid1 },
Create { uuid: uuid2 },
Some(Create { uuid: uuid1 }),
Some(Create { uuid: uuid2 }),
);
}
#[test]
fn test_related_updates_different_props() {
let uuid = Uuid::new_v4();
let timestamp = Utc::now();
test_transform(
Some(Create { uuid }),
Update {
uuid,
property: "abc".into(),
value: Some("true".into()),
timestamp,
},
Update {
uuid,
property: "def".into(),
value: Some("false".into()),
timestamp,
},
Some(Update {
uuid,
property: "abc".into(),
value: Some("true".into()),
timestamp,
}),
Some(Update {
uuid,
property: "def".into(),
value: Some("false".into()),
timestamp,
}),
);
}
#[test]
fn test_related_updates_same_prop() {
let uuid = Uuid::new_v4();
let timestamp1 = Utc::now();
let timestamp2 = timestamp1 + Duration::seconds(10);
test_transform(
Some(Create { uuid }),
Update {
uuid,
property: "abc".into(),
value: Some("true".into()),
timestamp: timestamp1,
},
Update {
uuid,
property: "abc".into(),
value: Some("false".into()),
timestamp: timestamp2,
},
None,
Some(Update {
uuid,
property: "abc".into(),
value: Some("false".into()),
timestamp: timestamp2,
}),
);
}
#[test]
fn test_related_updates_same_prop_same_time() {
let uuid = Uuid::new_v4();
let timestamp = Utc::now();
test_transform(
Some(Create { uuid }),
Update {
uuid,
property: "abc".into(),
value: Some("true".into()),
timestamp,
},
Update {
uuid,
property: "abc".into(),
value: Some("false".into()),
timestamp,
},
Some(Update {
uuid,
property: "abc".into(),
value: Some("true".into()),
timestamp,
}),
None,
);
}
fn uuid_strategy() -> impl Strategy<Value = Uuid> {
prop_oneof![
Just(Uuid::parse_str("83a2f9ef-f455-4195-b92e-a54c161eebfc").unwrap()),
Just(Uuid::parse_str("56e0be07-c61f-494c-a54c-bdcfdd52d2a7").unwrap()),
Just(Uuid::parse_str("4b7ed904-f7b0-4293-8a10-ad452422c7b3").unwrap()),
Just(Uuid::parse_str("9bdd0546-07c8-4e1f-a9bc-9d6299f4773b").unwrap()),
]
}
fn operation_strategy() -> impl Strategy<Value = SyncOp> {
prop_oneof![
uuid_strategy().prop_map(|uuid| Create { uuid }),
uuid_strategy().prop_map(|uuid| Delete { uuid }),
(uuid_strategy(), "(title|project|status)").prop_map(|(uuid, property)| {
Update {
uuid,
property,
value: Some("true".into()),
timestamp: Utc::now(),
}
}),
]
}
proptest! {
#![proptest_config(ProptestConfig {
cases: 1024, .. ProptestConfig::default()
})]
#[test]
// check that the two operation sequences have the same effect, enforcing the invariant of
// the transform function.
fn transform_invariant_holds(o1 in operation_strategy(), o2 in operation_strategy()) {
let (o1p, o2p) = SyncOp::transform(o1.clone(), o2.clone());
let mut db1 = TaskDb::new(Box::new(InMemoryStorage::new()));
let mut db2 = TaskDb::new(Box::new(InMemoryStorage::new()));
// Ensure that any expected tasks already exist
if let Update{ uuid, .. } = o1 {
let _ = db1.apply(Create{uuid});
let _ = db2.apply(Create{uuid});
}
if let Update{ uuid, .. } = o2 {
let _ = db1.apply(Create{uuid});
let _ = db2.apply(Create{uuid});
}
if let Delete{ uuid } = o1 {
let _ = db1.apply(Create{uuid});
let _ = db2.apply(Create{uuid});
}
if let Delete{ uuid } = o2 {
let _ = db1.apply(Create{uuid});
let _ = db2.apply(Create{uuid});
}
// if applying the initial operations fail, that indicates the operation was invalid
// in the base state, so consider the case successful.
if db1.apply(o1).is_err() {
return Ok(());
}
if db2.apply(o2).is_err() {
return Ok(());
}
if let Some(o) = o2p {
db1.apply(o).map_err(|e| TestCaseError::Fail(format!("Applying to db1: {}", e).into()))?;
}
if let Some(o) = o1p {
db2.apply(o).map_err(|e| TestCaseError::Fail(format!("Applying to db2: {}", e).into()))?;
}
assert_eq!(db1.sorted_tasks(), db2.sorted_tasks());
}
}
}

View file

@ -0,0 +1,176 @@
use crate::server::{
AddVersionResult, GetVersionResult, HistorySegment, Server, Snapshot, SnapshotUrgency,
VersionId,
};
use std::time::Duration;
use uuid::Uuid;
use super::crypto::{Cryptor, Sealed, Secret, Unsealed};
pub struct RemoteServer {
origin: String,
client_key: Uuid,
cryptor: Cryptor,
agent: ureq::Agent,
}
/// The content-type for history segments (opaque blobs of bytes)
const HISTORY_SEGMENT_CONTENT_TYPE: &str = "application/vnd.taskchampion.history-segment";
/// The content-type for snapshots (opaque blobs of bytes)
const SNAPSHOT_CONTENT_TYPE: &str = "application/vnd.taskchampion.snapshot";
/// A RemoeServer communicates with a remote server over HTTP (such as with
/// taskchampion-sync-server).
impl RemoteServer {
/// Construct a new RemoteServer. The `origin` is the sync server's protocol and hostname
/// without a trailing slash, such as `https://tcsync.example.com`. Pass a client_key to
/// identify this client to the server. Multiple replicas synchronizing the same task history
/// should use the same client_key.
pub fn new(
origin: String,
client_key: Uuid,
encryption_secret: Vec<u8>,
) -> anyhow::Result<RemoteServer> {
Ok(RemoteServer {
origin,
client_key,
cryptor: Cryptor::new(client_key, &Secret(encryption_secret.to_vec()))?,
agent: ureq::AgentBuilder::new()
.timeout_connect(Duration::from_secs(10))
.timeout_read(Duration::from_secs(60))
.build(),
})
}
}
/// Read a UUID-bearing header or fail trying
fn get_uuid_header(resp: &ureq::Response, name: &str) -> anyhow::Result<Uuid> {
let value = resp
.header(name)
.ok_or_else(|| anyhow::anyhow!("Response does not have {} header", name))?;
let value = Uuid::parse_str(value)
.map_err(|e| anyhow::anyhow!("{} header is not a valid UUID: {}", name, e))?;
Ok(value)
}
/// Read the X-Snapshot-Request header and return a SnapshotUrgency
fn get_snapshot_urgency(resp: &ureq::Response) -> SnapshotUrgency {
match resp.header("X-Snapshot-Request") {
None => SnapshotUrgency::None,
Some(hdr) => match hdr {
"urgency=low" => SnapshotUrgency::Low,
"urgency=high" => SnapshotUrgency::High,
_ => SnapshotUrgency::None,
},
}
}
impl Server for RemoteServer {
fn add_version(
&mut self,
parent_version_id: VersionId,
history_segment: HistorySegment,
) -> anyhow::Result<(AddVersionResult, SnapshotUrgency)> {
let url = format!(
"{}/v1/client/add-version/{}",
self.origin, parent_version_id
);
let unsealed = Unsealed {
version_id: parent_version_id,
payload: history_segment,
};
let sealed = self.cryptor.seal(unsealed)?;
match self
.agent
.post(&url)
.set("Content-Type", HISTORY_SEGMENT_CONTENT_TYPE)
.set("X-Client-Key", &self.client_key.to_string())
.send_bytes(sealed.as_ref())
{
Ok(resp) => {
let version_id = get_uuid_header(&resp, "X-Version-Id")?;
Ok((
AddVersionResult::Ok(version_id),
get_snapshot_urgency(&resp),
))
}
Err(ureq::Error::Status(status, resp)) if status == 409 => {
let parent_version_id = get_uuid_header(&resp, "X-Parent-Version-Id")?;
Ok((
AddVersionResult::ExpectedParentVersion(parent_version_id),
SnapshotUrgency::None,
))
}
Err(err) => Err(err.into()),
}
}
fn get_child_version(
&mut self,
parent_version_id: VersionId,
) -> anyhow::Result<GetVersionResult> {
let url = format!(
"{}/v1/client/get-child-version/{}",
self.origin, parent_version_id
);
match self
.agent
.get(&url)
.set("X-Client-Key", &self.client_key.to_string())
.call()
{
Ok(resp) => {
let parent_version_id = get_uuid_header(&resp, "X-Parent-Version-Id")?;
let version_id = get_uuid_header(&resp, "X-Version-Id")?;
let sealed =
Sealed::from_resp(resp, parent_version_id, HISTORY_SEGMENT_CONTENT_TYPE)?;
let history_segment = self.cryptor.unseal(sealed)?.payload;
Ok(GetVersionResult::Version {
version_id,
parent_version_id,
history_segment,
})
}
Err(ureq::Error::Status(status, _)) if status == 404 => {
Ok(GetVersionResult::NoSuchVersion)
}
Err(err) => Err(err.into()),
}
}
fn add_snapshot(&mut self, version_id: VersionId, snapshot: Snapshot) -> anyhow::Result<()> {
let url = format!("{}/v1/client/add-snapshot/{}", self.origin, version_id);
let unsealed = Unsealed {
version_id,
payload: snapshot,
};
let sealed = self.cryptor.seal(unsealed)?;
Ok(self
.agent
.post(&url)
.set("Content-Type", SNAPSHOT_CONTENT_TYPE)
.set("X-Client-Key", &self.client_key.to_string())
.send_bytes(sealed.as_ref())
.map(|_| ())?)
}
fn get_snapshot(&mut self) -> anyhow::Result<Option<(VersionId, Snapshot)>> {
let url = format!("{}/v1/client/snapshot", self.origin);
match self
.agent
.get(&url)
.set("X-Client-Key", &self.client_key.to_string())
.call()
{
Ok(resp) => {
let version_id = get_uuid_header(&resp, "X-Version-Id")?;
let sealed = Sealed::from_resp(resp, version_id, SNAPSHOT_CONTENT_TYPE)?;
let snapshot = self.cryptor.unseal(sealed)?.payload;
Ok(Some((version_id, snapshot)))
}
Err(ureq::Error::Status(status, _)) if status == 404 => Ok(None),
Err(err) => Err(err.into()),
}
}
}

View file

@ -0,0 +1,2 @@
$á­
†—Õ^~B>n)ji†¯1—î9™|µœÓ~

View file

@ -0,0 +1 @@
<01>ΝA4φ―Γθ t;Δτ υηp¦Ο¦x^Αύreό…<CF8C>JΤ¤<CEA4>

View file

@ -0,0 +1 @@
/}åd E°‡dIcÁXéè-‡!V°Û%è4îáòd]³ÃÇ}

View file

@ -0,0 +1 @@
lΰζδa|ο@Ο<>S_¬…γzέV9£q¦Ρ…‘)+¦

View file

@ -0,0 +1 @@
c╙╤TH╗Гp>╔╚Ф╨╕m4О╧к~в1╣0P░IЖ╢W╒

View file

@ -0,0 +1,2 @@
B∙
Ат-в3%╕jё,*ъ╨7Й╘√QьKЗO╕°FPZщ

View file

@ -0,0 +1 @@
pÑ¿µÒŸ½V²ûÝäToë"}cT·äY7Æ ˆÀ@ÙdLTý`Ò

View file

@ -0,0 +1,133 @@
use crate::server::{
AddVersionResult, GetVersionResult, HistorySegment, Server, Snapshot, SnapshotUrgency,
VersionId, NIL_VERSION_ID,
};
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
use uuid::Uuid;
struct Version {
version_id: VersionId,
parent_version_id: VersionId,
history_segment: HistorySegment,
}
/// TestServer implements the Server trait with a test implementation.
#[derive(Clone)]
pub(crate) struct TestServer(Arc<Mutex<Inner>>);
pub(crate) struct Inner {
latest_version_id: VersionId,
// NOTE: indexed by parent_version_id!
versions: HashMap<VersionId, Version>,
snapshot_urgency: SnapshotUrgency,
snapshot: Option<(VersionId, Snapshot)>,
}
impl TestServer {
/// A test server has no notion of clients, signatures, encryption, etc.
pub(crate) fn new() -> TestServer {
TestServer(Arc::new(Mutex::new(Inner {
latest_version_id: NIL_VERSION_ID,
versions: HashMap::new(),
snapshot_urgency: SnapshotUrgency::None,
snapshot: None,
})))
}
// feel free to add any test utility functions here
/// Get a boxed Server implementation referring to this TestServer
pub(crate) fn server(&self) -> Box<dyn Server> {
Box::new(self.clone())
}
pub(crate) fn set_snapshot_urgency(&self, urgency: SnapshotUrgency) {
let mut inner = self.0.lock().unwrap();
inner.snapshot_urgency = urgency;
}
/// Get the latest snapshot added to this server
pub(crate) fn snapshot(&self) -> Option<(VersionId, Snapshot)> {
let inner = self.0.lock().unwrap();
inner.snapshot.as_ref().cloned()
}
/// Delete a version from storage
pub(crate) fn delete_version(&mut self, parent_version_id: VersionId) {
let mut inner = self.0.lock().unwrap();
inner.versions.remove(&parent_version_id);
}
}
impl Server for TestServer {
/// Add a new version. If the given version number is incorrect, this responds with the
/// appropriate version and expects the caller to try again.
fn add_version(
&mut self,
parent_version_id: VersionId,
history_segment: HistorySegment,
) -> anyhow::Result<(AddVersionResult, SnapshotUrgency)> {
let mut inner = self.0.lock().unwrap();
// no client lookup
// no signature validation
// check the parent_version_id for linearity
if inner.latest_version_id != NIL_VERSION_ID && parent_version_id != inner.latest_version_id
{
return Ok((
AddVersionResult::ExpectedParentVersion(inner.latest_version_id),
SnapshotUrgency::None,
));
}
// invent a new ID for this version
let version_id = Uuid::new_v4();
inner.versions.insert(
parent_version_id,
Version {
version_id,
parent_version_id,
history_segment,
},
);
inner.latest_version_id = version_id;
// reply with the configured urgency and reset it to None
let urgency = inner.snapshot_urgency;
inner.snapshot_urgency = SnapshotUrgency::None;
Ok((AddVersionResult::Ok(version_id), urgency))
}
/// Get a vector of all versions after `since_version`
fn get_child_version(
&mut self,
parent_version_id: VersionId,
) -> anyhow::Result<GetVersionResult> {
let inner = self.0.lock().unwrap();
if let Some(version) = inner.versions.get(&parent_version_id) {
Ok(GetVersionResult::Version {
version_id: version.version_id,
parent_version_id: version.parent_version_id,
history_segment: version.history_segment.clone(),
})
} else {
Ok(GetVersionResult::NoSuchVersion)
}
}
fn add_snapshot(&mut self, version_id: VersionId, snapshot: Snapshot) -> anyhow::Result<()> {
let mut inner = self.0.lock().unwrap();
// test implementation -- does not perform any validation
inner.snapshot = Some((version_id, snapshot));
Ok(())
}
fn get_snapshot(&mut self) -> anyhow::Result<Option<(VersionId, Snapshot)>> {
let inner = self.0.lock().unwrap();
Ok(inner.snapshot.clone())
}
}

View file

@ -0,0 +1,70 @@
use uuid::Uuid;
/// Versions are referred to with sha2 hashes.
pub type VersionId = Uuid;
/// The distinguished value for "no version"
pub const NIL_VERSION_ID: VersionId = Uuid::nil();
/// A segment in the history of this task database, in the form of a sequence of operations. This
/// data is pre-encoded, and from the protocol level appears as a sequence of bytes.
pub type HistorySegment = Vec<u8>;
/// A snapshot of the state of the task database. This is encoded by the taskdb implementation
/// and treated as a sequence of bytes by the server implementation.
pub type Snapshot = Vec<u8>;
/// AddVersionResult is the response type from [`crate::server::Server::add_version`].
#[derive(Debug, PartialEq)]
pub enum AddVersionResult {
/// OK, version added with the given ID
Ok(VersionId),
/// Rejected; expected a version with the given parent version
ExpectedParentVersion(VersionId),
}
/// SnapshotUrgency indicates how much the server would like this replica to send a snapshot.
#[derive(PartialEq, Debug, Clone, Copy, Eq, PartialOrd, Ord)]
pub enum SnapshotUrgency {
/// Don't need a snapshot right now.
None,
/// A snapshot would be good, but can wait for other replicas to provide it.
Low,
/// A snapshot is needed right now.
High,
}
/// A version as downloaded from the server
#[derive(Debug, PartialEq)]
pub enum GetVersionResult {
/// No such version exists
NoSuchVersion,
/// The requested version
Version {
version_id: VersionId,
parent_version_id: VersionId,
history_segment: HistorySegment,
},
}
/// A value implementing this trait can act as a server against which a replica can sync.
pub trait Server {
/// Add a new version.
fn add_version(
&mut self,
parent_version_id: VersionId,
history_segment: HistorySegment,
) -> anyhow::Result<(AddVersionResult, SnapshotUrgency)>;
/// Get the version with the given parent VersionId
fn get_child_version(
&mut self,
parent_version_id: VersionId,
) -> anyhow::Result<GetVersionResult>;
/// Add a snapshot on the server
fn add_snapshot(&mut self, version_id: VersionId, snapshot: Snapshot) -> anyhow::Result<()>;
fn get_snapshot(&mut self) -> anyhow::Result<Option<(VersionId, Snapshot)>>;
}

View file

@ -0,0 +1,22 @@
use super::{InMemoryStorage, SqliteStorage, Storage};
use std::path::PathBuf;
/// The configuration required for a replica's storage.
pub enum StorageConfig {
/// Store the data on disk. This is the common choice.
OnDisk {
/// Path containing the task DB.
taskdb_dir: PathBuf,
},
/// Store the data in memory. This is only useful for testing.
InMemory,
}
impl StorageConfig {
pub fn into_storage(self) -> anyhow::Result<Box<dyn Storage>> {
Ok(match self {
StorageConfig::OnDisk { taskdb_dir } => Box::new(SqliteStorage::new(taskdb_dir)?),
StorageConfig::InMemory => Box::new(InMemoryStorage::new()),
})
}
}

View file

@ -0,0 +1,242 @@
#![allow(clippy::new_without_default)]
use crate::storage::{ReplicaOp, Storage, StorageTxn, TaskMap, VersionId, DEFAULT_BASE_VERSION};
use std::collections::hash_map::Entry;
use std::collections::HashMap;
use uuid::Uuid;
#[derive(PartialEq, Debug, Clone)]
struct Data {
tasks: HashMap<Uuid, TaskMap>,
base_version: VersionId,
operations: Vec<ReplicaOp>,
working_set: Vec<Option<Uuid>>,
}
struct Txn<'t> {
storage: &'t mut InMemoryStorage,
new_data: Option<Data>,
}
impl<'t> Txn<'t> {
fn mut_data_ref(&mut self) -> &mut Data {
if self.new_data.is_none() {
self.new_data = Some(self.storage.data.clone());
}
if let Some(ref mut data) = self.new_data {
data
} else {
unreachable!();
}
}
fn data_ref(&mut self) -> &Data {
if let Some(ref data) = self.new_data {
data
} else {
&self.storage.data
}
}
}
impl<'t> StorageTxn for Txn<'t> {
fn get_task(&mut self, uuid: Uuid) -> anyhow::Result<Option<TaskMap>> {
match self.data_ref().tasks.get(&uuid) {
None => Ok(None),
Some(t) => Ok(Some(t.clone())),
}
}
fn create_task(&mut self, uuid: Uuid) -> anyhow::Result<bool> {
if let ent @ Entry::Vacant(_) = self.mut_data_ref().tasks.entry(uuid) {
ent.or_insert_with(TaskMap::new);
Ok(true)
} else {
Ok(false)
}
}
fn set_task(&mut self, uuid: Uuid, task: TaskMap) -> anyhow::Result<()> {
self.mut_data_ref().tasks.insert(uuid, task);
Ok(())
}
fn delete_task(&mut self, uuid: Uuid) -> anyhow::Result<bool> {
Ok(self.mut_data_ref().tasks.remove(&uuid).is_some())
}
fn all_tasks<'a>(&mut self) -> anyhow::Result<Vec<(Uuid, TaskMap)>> {
Ok(self
.data_ref()
.tasks
.iter()
.map(|(u, t)| (*u, t.clone()))
.collect())
}
fn all_task_uuids<'a>(&mut self) -> anyhow::Result<Vec<Uuid>> {
Ok(self.data_ref().tasks.keys().copied().collect())
}
fn base_version(&mut self) -> anyhow::Result<VersionId> {
Ok(self.data_ref().base_version)
}
fn set_base_version(&mut self, version: VersionId) -> anyhow::Result<()> {
self.mut_data_ref().base_version = version;
Ok(())
}
fn operations(&mut self) -> anyhow::Result<Vec<ReplicaOp>> {
Ok(self.data_ref().operations.clone())
}
fn num_operations(&mut self) -> anyhow::Result<usize> {
Ok(self.data_ref().operations.len())
}
fn add_operation(&mut self, op: ReplicaOp) -> anyhow::Result<()> {
self.mut_data_ref().operations.push(op);
Ok(())
}
fn set_operations(&mut self, ops: Vec<ReplicaOp>) -> anyhow::Result<()> {
self.mut_data_ref().operations = ops;
Ok(())
}
fn get_working_set(&mut self) -> anyhow::Result<Vec<Option<Uuid>>> {
Ok(self.data_ref().working_set.clone())
}
fn add_to_working_set(&mut self, uuid: Uuid) -> anyhow::Result<usize> {
let working_set = &mut self.mut_data_ref().working_set;
working_set.push(Some(uuid));
Ok(working_set.len())
}
fn set_working_set_item(&mut self, index: usize, uuid: Option<Uuid>) -> anyhow::Result<()> {
let working_set = &mut self.mut_data_ref().working_set;
if index >= working_set.len() {
anyhow::bail!("Index {} is not in the working set", index);
}
working_set[index] = uuid;
Ok(())
}
fn clear_working_set(&mut self) -> anyhow::Result<()> {
self.mut_data_ref().working_set = vec![None];
Ok(())
}
fn commit(&mut self) -> anyhow::Result<()> {
// copy the new_data back into storage to commit the transaction
if let Some(data) = self.new_data.take() {
self.storage.data = data;
}
Ok(())
}
}
/// InMemoryStorage is a simple in-memory task storage implementation. It is not useful for
/// production data, but is useful for testing purposes.
#[derive(PartialEq, Debug, Clone)]
pub struct InMemoryStorage {
data: Data,
}
impl InMemoryStorage {
pub fn new() -> InMemoryStorage {
InMemoryStorage {
data: Data {
tasks: HashMap::new(),
base_version: DEFAULT_BASE_VERSION,
operations: vec![],
working_set: vec![None],
},
}
}
}
impl Storage for InMemoryStorage {
fn txn<'a>(&'a mut self) -> anyhow::Result<Box<dyn StorageTxn + 'a>> {
Ok(Box::new(Txn {
storage: self,
new_data: None,
}))
}
}
#[cfg(test)]
mod test {
use super::*;
use pretty_assertions::assert_eq;
// (note: this module is heavily used in tests so most of its functionality is well-tested
// elsewhere and not tested here)
#[test]
fn get_working_set_empty() -> anyhow::Result<()> {
let mut storage = InMemoryStorage::new();
{
let mut txn = storage.txn()?;
let ws = txn.get_working_set()?;
assert_eq!(ws, vec![None]);
}
Ok(())
}
#[test]
fn add_to_working_set() -> anyhow::Result<()> {
let mut storage = InMemoryStorage::new();
let uuid1 = Uuid::new_v4();
let uuid2 = Uuid::new_v4();
{
let mut txn = storage.txn()?;
txn.add_to_working_set(uuid1)?;
txn.add_to_working_set(uuid2)?;
txn.commit()?;
}
{
let mut txn = storage.txn()?;
let ws = txn.get_working_set()?;
assert_eq!(ws, vec![None, Some(uuid1), Some(uuid2)]);
}
Ok(())
}
#[test]
fn clear_working_set() -> anyhow::Result<()> {
let mut storage = InMemoryStorage::new();
let uuid1 = Uuid::new_v4();
let uuid2 = Uuid::new_v4();
{
let mut txn = storage.txn()?;
txn.add_to_working_set(uuid1)?;
txn.add_to_working_set(uuid2)?;
txn.commit()?;
}
{
let mut txn = storage.txn()?;
txn.clear_working_set()?;
txn.add_to_working_set(uuid2)?;
txn.add_to_working_set(uuid1)?;
txn.commit()?;
}
{
let mut txn = storage.txn()?;
let ws = txn.get_working_set()?;
assert_eq!(ws, vec![None, Some(uuid2), Some(uuid1)]);
}
Ok(())
}
}

View file

@ -0,0 +1,133 @@
/**
This module defines the backend storage used by [`Replica`](crate::Replica).
It defines a [trait](crate::storage::Storage) for storage implementations, and provides a default on-disk implementation as well as an in-memory implementation for testing.
Typical uses of this crate do not interact directly with this module; [`StorageConfig`](crate::StorageConfig) is sufficient.
However, users who wish to implement their own storage backends can implement the traits defined here and pass the result to [`Replica`](crate::Replica).
*/
use anyhow::Result;
use std::collections::HashMap;
use uuid::Uuid;
mod config;
mod inmemory;
mod op;
pub(crate) mod sqlite;
pub use config::StorageConfig;
pub use inmemory::InMemoryStorage;
pub use sqlite::SqliteStorage;
pub use op::ReplicaOp;
/// An in-memory representation of a task as a simple hashmap
pub type TaskMap = HashMap<String, String>;
#[cfg(test)]
fn taskmap_with(mut properties: Vec<(String, String)>) -> TaskMap {
let mut rv = TaskMap::new();
for (p, v) in properties.drain(..) {
rv.insert(p, v);
}
rv
}
/// The type of VersionIds
pub use crate::server::VersionId;
/// The default for base_version.
pub(crate) const DEFAULT_BASE_VERSION: Uuid = crate::server::NIL_VERSION_ID;
/// A Storage transaction, in which storage operations are performed.
///
/// # Concurrency
///
/// Serializable consistency must be maintained. Concurrent access is unusual
/// and some implementations may simply apply a mutex to limit access to
/// one transaction at a time.
///
/// # Commiting and Aborting
///
/// A transaction is not visible to other readers until it is committed with
/// [`crate::storage::StorageTxn::commit`]. Transactions are aborted if they are dropped.
/// It is safe and performant to drop transactions that did not modify any data without committing.
pub trait StorageTxn {
/// Get an (immutable) task, if it is in the storage
fn get_task(&mut self, uuid: Uuid) -> Result<Option<TaskMap>>;
/// Create an (empty) task, only if it does not already exist. Returns true if
/// the task was created (did not already exist).
fn create_task(&mut self, uuid: Uuid) -> Result<bool>;
/// Set a task, overwriting any existing task. If the task does not exist, this implicitly
/// creates it (use `get_task` to check first, if necessary).
fn set_task(&mut self, uuid: Uuid, task: TaskMap) -> Result<()>;
/// Delete a task, if it exists. Returns true if the task was deleted (already existed)
fn delete_task(&mut self, uuid: Uuid) -> Result<bool>;
/// Get the uuids and bodies of all tasks in the storage, in undefined order.
fn all_tasks(&mut self) -> Result<Vec<(Uuid, TaskMap)>>;
/// Get the uuids of all tasks in the storage, in undefined order.
fn all_task_uuids(&mut self) -> Result<Vec<Uuid>>;
/// Get the current base_version for this storage -- the last version synced from the server.
fn base_version(&mut self) -> Result<VersionId>;
/// Set the current base_version for this storage.
fn set_base_version(&mut self, version: VersionId) -> Result<()>;
/// Get the current set of outstanding operations (operations that have not been sync'd to the
/// server yet)
fn operations(&mut self) -> Result<Vec<ReplicaOp>>;
/// Get the current set of outstanding operations (operations that have not been sync'd to the
/// server yet)
fn num_operations(&mut self) -> Result<usize>;
/// Add an operation to the end of the list of operations in the storage. Note that this
/// merely *stores* the operation; it is up to the TaskDb to apply it.
fn add_operation(&mut self, op: ReplicaOp) -> Result<()>;
/// Replace the current list of operations with a new list.
fn set_operations(&mut self, ops: Vec<ReplicaOp>) -> Result<()>;
/// Get the entire working set, with each task UUID at its appropriate (1-based) index.
/// Element 0 is always None.
fn get_working_set(&mut self) -> Result<Vec<Option<Uuid>>>;
/// Add a task to the working set and return its (one-based) index. This index will be one greater
/// than the highest used index.
fn add_to_working_set(&mut self, uuid: Uuid) -> Result<usize>;
/// Update the working set task at the given index. This cannot add a new item to the
/// working set.
fn set_working_set_item(&mut self, index: usize, uuid: Option<Uuid>) -> Result<()>;
/// Clear all tasks from the working set in preparation for a garbage-collection operation.
/// Note that this is the only way items are removed from the set.
fn clear_working_set(&mut self) -> Result<()>;
/// Check whether this storage is entirely empty
#[allow(clippy::wrong_self_convention)] // mut is required here for storage access
fn is_empty(&mut self) -> Result<bool> {
let mut empty = true;
empty = empty && self.all_tasks()?.is_empty();
empty = empty && self.get_working_set()? == vec![None];
empty = empty && self.base_version()? == Uuid::nil();
empty = empty && self.operations()?.is_empty();
Ok(empty)
}
/// Commit any changes made in the transaction. It is an error to call this more than
/// once.
fn commit(&mut self) -> Result<()>;
}
/// A trait for objects able to act as task storage. Most of the interesting behavior is in the
/// [`crate::storage::StorageTxn`] trait.
pub trait Storage {
/// Begin a transaction
fn txn<'a>(&'a mut self) -> Result<Box<dyn StorageTxn + 'a>>;
}

View file

@ -0,0 +1,283 @@
use crate::server::SyncOp;
use crate::storage::TaskMap;
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use uuid::Uuid;
/// A ReplicaOp defines a single change to the task database, as stored locally in the replica.
/// This contains additional information not included in SyncOp.
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
pub enum ReplicaOp {
/// Create a new task.
///
/// On undo, the task is deleted.
Create { uuid: Uuid },
/// Delete an existing task.
///
/// On undo, the task's data is restored from old_task.
Delete { uuid: Uuid, old_task: TaskMap },
/// Update an existing task, setting the given property to the given value. If the value is
/// None, then the corresponding property is deleted.
///
/// On undo, the property is set back to its previous value.
Update {
uuid: Uuid,
property: String,
old_value: Option<String>,
value: Option<String>,
timestamp: DateTime<Utc>,
},
/// Mark a point in the operations history to which the user might like to undo. Users
/// typically want to undo more than one operation at a time (for example, most changes update
/// both the `modified` property and some other task property -- the user would like to "undo"
/// both updates at the same time). Applying an UndoPoint does nothing.
UndoPoint,
}
impl ReplicaOp {
/// Convert this operation into a [`SyncOp`].
pub fn into_sync(self) -> Option<SyncOp> {
match self {
Self::Create { uuid } => Some(SyncOp::Create { uuid }),
Self::Delete { uuid, .. } => Some(SyncOp::Delete { uuid }),
Self::Update {
uuid,
property,
value,
timestamp,
..
} => Some(SyncOp::Update {
uuid,
property,
value,
timestamp,
}),
Self::UndoPoint => None,
}
}
/// Generate a sequence of SyncOp's to reverse the effects of this ReplicaOp.
pub fn reverse_ops(self) -> Vec<SyncOp> {
match self {
Self::Create { uuid } => vec![SyncOp::Delete { uuid }],
Self::Delete { uuid, mut old_task } => {
let mut ops = vec![SyncOp::Create { uuid }];
// We don't have the original update timestamp, but it doesn't
// matter because this SyncOp will just be applied and discarded.
let timestamp = Utc::now();
for (property, value) in old_task.drain() {
ops.push(SyncOp::Update {
uuid,
property,
value: Some(value),
timestamp,
});
}
ops
}
Self::Update {
uuid,
property,
old_value,
timestamp,
..
} => vec![SyncOp::Update {
uuid,
property,
value: old_value,
timestamp,
}],
Self::UndoPoint => vec![],
}
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::storage::taskmap_with;
use chrono::Utc;
use pretty_assertions::assert_eq;
use ReplicaOp::*;
#[test]
fn test_json_create() -> anyhow::Result<()> {
let uuid = Uuid::new_v4();
let op = Create { uuid };
let json = serde_json::to_string(&op)?;
assert_eq!(json, format!(r#"{{"Create":{{"uuid":"{}"}}}}"#, uuid));
let deser: ReplicaOp = serde_json::from_str(&json)?;
assert_eq!(deser, op);
Ok(())
}
#[test]
fn test_json_delete() -> anyhow::Result<()> {
let uuid = Uuid::new_v4();
let old_task = vec![("foo".into(), "bar".into())].drain(..).collect();
let op = Delete { uuid, old_task };
let json = serde_json::to_string(&op)?;
assert_eq!(
json,
format!(
r#"{{"Delete":{{"uuid":"{}","old_task":{{"foo":"bar"}}}}}}"#,
uuid
)
);
let deser: ReplicaOp = serde_json::from_str(&json)?;
assert_eq!(deser, op);
Ok(())
}
#[test]
fn test_json_update() -> anyhow::Result<()> {
let uuid = Uuid::new_v4();
let timestamp = Utc::now();
let op = Update {
uuid,
property: "abc".into(),
old_value: Some("true".into()),
value: Some("false".into()),
timestamp,
};
let json = serde_json::to_string(&op)?;
assert_eq!(
json,
format!(
r#"{{"Update":{{"uuid":"{}","property":"abc","old_value":"true","value":"false","timestamp":"{:?}"}}}}"#,
uuid, timestamp,
)
);
let deser: ReplicaOp = serde_json::from_str(&json)?;
assert_eq!(deser, op);
Ok(())
}
#[test]
fn test_json_update_none() -> anyhow::Result<()> {
let uuid = Uuid::new_v4();
let timestamp = Utc::now();
let op = Update {
uuid,
property: "abc".into(),
old_value: None,
value: None,
timestamp,
};
let json = serde_json::to_string(&op)?;
assert_eq!(
json,
format!(
r#"{{"Update":{{"uuid":"{}","property":"abc","old_value":null,"value":null,"timestamp":"{:?}"}}}}"#,
uuid, timestamp,
)
);
let deser: ReplicaOp = serde_json::from_str(&json)?;
assert_eq!(deser, op);
Ok(())
}
#[test]
fn test_into_sync_create() {
let uuid = Uuid::new_v4();
assert_eq!(Create { uuid }.into_sync(), Some(SyncOp::Create { uuid }));
}
#[test]
fn test_into_sync_delete() {
let uuid = Uuid::new_v4();
assert_eq!(
Delete {
uuid,
old_task: TaskMap::new()
}
.into_sync(),
Some(SyncOp::Delete { uuid })
);
}
#[test]
fn test_into_sync_update() {
let uuid = Uuid::new_v4();
let timestamp = Utc::now();
assert_eq!(
Update {
uuid,
property: "prop".into(),
old_value: Some("foo".into()),
value: Some("v".into()),
timestamp,
}
.into_sync(),
Some(SyncOp::Update {
uuid,
property: "prop".into(),
value: Some("v".into()),
timestamp,
})
);
}
#[test]
fn test_into_sync_undo_point() {
assert_eq!(UndoPoint.into_sync(), None);
}
#[test]
fn test_reverse_create() {
let uuid = Uuid::new_v4();
assert_eq!(Create { uuid }.reverse_ops(), vec![SyncOp::Delete { uuid }]);
}
#[test]
fn test_reverse_delete() {
let uuid = Uuid::new_v4();
let reversed = Delete {
uuid,
old_task: taskmap_with(vec![("prop1".into(), "v1".into())]),
}
.reverse_ops();
assert_eq!(reversed.len(), 2);
assert_eq!(reversed[0], SyncOp::Create { uuid });
assert!(matches!(
&reversed[1],
SyncOp::Update { uuid: u, property: p, value: Some(v), ..}
if u == &uuid && p == "prop1" && v == "v1"
));
}
#[test]
fn test_reverse_update() {
let uuid = Uuid::new_v4();
let timestamp = Utc::now();
assert_eq!(
Update {
uuid,
property: "prop".into(),
old_value: Some("foo".into()),
value: Some("v".into()),
timestamp,
}
.reverse_ops(),
vec![SyncOp::Update {
uuid,
property: "prop".into(),
value: Some("foo".into()),
timestamp,
}]
);
}
#[test]
fn test_reverse_undo_point() {
assert_eq!(UndoPoint.reverse_ops(), vec![]);
}
}

View file

@ -0,0 +1,810 @@
use crate::storage::{ReplicaOp, Storage, StorageTxn, TaskMap, VersionId, DEFAULT_BASE_VERSION};
use anyhow::Context;
use rusqlite::types::{FromSql, ToSql};
use rusqlite::{params, Connection, OptionalExtension};
use std::path::Path;
use uuid::Uuid;
#[derive(Debug, thiserror::Error)]
enum SqliteError {
#[error("SQLite transaction already committted")]
TransactionAlreadyCommitted,
}
/// Newtype to allow implementing `FromSql` for foreign `uuid::Uuid`
pub(crate) struct StoredUuid(pub(crate) Uuid);
/// Conversion from Uuid stored as a string (rusqlite's uuid feature stores as binary blob)
impl FromSql for StoredUuid {
fn column_result(value: rusqlite::types::ValueRef<'_>) -> rusqlite::types::FromSqlResult<Self> {
let u = Uuid::parse_str(value.as_str()?)
.map_err(|_| rusqlite::types::FromSqlError::InvalidType)?;
Ok(StoredUuid(u))
}
}
/// Store Uuid as string in database
impl ToSql for StoredUuid {
fn to_sql(&self) -> rusqlite::Result<rusqlite::types::ToSqlOutput<'_>> {
let s = self.0.to_string();
Ok(s.into())
}
}
/// Wraps [`TaskMap`] (type alias for HashMap) so we can implement rusqlite conversion traits for it
struct StoredTaskMap(TaskMap);
/// Parses TaskMap stored as JSON in string column
impl FromSql for StoredTaskMap {
fn column_result(value: rusqlite::types::ValueRef<'_>) -> rusqlite::types::FromSqlResult<Self> {
let o: TaskMap = serde_json::from_str(value.as_str()?)
.map_err(|_| rusqlite::types::FromSqlError::InvalidType)?;
Ok(StoredTaskMap(o))
}
}
/// Stores TaskMap in string column
impl ToSql for StoredTaskMap {
fn to_sql(&self) -> rusqlite::Result<rusqlite::types::ToSqlOutput<'_>> {
let s = serde_json::to_string(&self.0)
.map_err(|e| rusqlite::Error::ToSqlConversionFailure(Box::new(e)))?;
Ok(s.into())
}
}
/// Stores [`ReplicaOp`] in SQLite
impl FromSql for ReplicaOp {
fn column_result(value: rusqlite::types::ValueRef<'_>) -> rusqlite::types::FromSqlResult<Self> {
let o: ReplicaOp = serde_json::from_str(value.as_str()?)
.map_err(|_| rusqlite::types::FromSqlError::InvalidType)?;
Ok(o)
}
}
/// Parses ReplicaOp stored as JSON in string column
impl ToSql for ReplicaOp {
fn to_sql(&self) -> rusqlite::Result<rusqlite::types::ToSqlOutput<'_>> {
let s = serde_json::to_string(&self)
.map_err(|e| rusqlite::Error::ToSqlConversionFailure(Box::new(e)))?;
Ok(s.into())
}
}
/// SqliteStorage is an on-disk storage backed by SQLite3.
pub struct SqliteStorage {
con: Connection,
}
impl SqliteStorage {
pub fn new<P: AsRef<Path>>(directory: P) -> anyhow::Result<SqliteStorage> {
// Ensure parent folder exists
std::fs::create_dir_all(&directory)?;
// Open (or create) database
let db_file = directory.as_ref().join("taskchampion.sqlite3");
let con = Connection::open(db_file)?;
// Initialize database
let queries = vec![
"CREATE TABLE IF NOT EXISTS operations (id INTEGER PRIMARY KEY AUTOINCREMENT, data STRING);",
"CREATE TABLE IF NOT EXISTS sync_meta (key STRING PRIMARY KEY, value STRING);",
"CREATE TABLE IF NOT EXISTS tasks (uuid STRING PRIMARY KEY, data STRING);",
"CREATE TABLE IF NOT EXISTS working_set (id INTEGER PRIMARY KEY, uuid STRING);",
];
for q in queries {
con.execute(q, []).context("Creating table")?;
}
Ok(SqliteStorage { con })
}
}
struct Txn<'t> {
txn: Option<rusqlite::Transaction<'t>>,
}
impl<'t> Txn<'t> {
fn get_txn(&self) -> Result<&rusqlite::Transaction<'t>, SqliteError> {
self.txn
.as_ref()
.ok_or(SqliteError::TransactionAlreadyCommitted)
}
fn get_next_working_set_number(&self) -> anyhow::Result<usize> {
let t = self.get_txn()?;
let next_id: Option<usize> = t
.query_row(
"SELECT COALESCE(MAX(id), 0) + 1 FROM working_set",
[],
|r| r.get(0),
)
.optional()
.context("Getting highest working set ID")?;
Ok(next_id.unwrap_or(0))
}
}
impl Storage for SqliteStorage {
fn txn<'a>(&'a mut self) -> anyhow::Result<Box<dyn StorageTxn + 'a>> {
let txn = self.con.transaction()?;
Ok(Box::new(Txn { txn: Some(txn) }))
}
}
impl<'t> StorageTxn for Txn<'t> {
fn get_task(&mut self, uuid: Uuid) -> anyhow::Result<Option<TaskMap>> {
let t = self.get_txn()?;
let result: Option<StoredTaskMap> = t
.query_row(
"SELECT data FROM tasks WHERE uuid = ? LIMIT 1",
[&StoredUuid(uuid)],
|r| r.get("data"),
)
.optional()?;
// Get task from "stored" wrapper
Ok(result.map(|t| t.0))
}
fn create_task(&mut self, uuid: Uuid) -> anyhow::Result<bool> {
let t = self.get_txn()?;
let count: usize = t.query_row(
"SELECT count(uuid) FROM tasks WHERE uuid = ?",
[&StoredUuid(uuid)],
|x| x.get(0),
)?;
if count > 0 {
return Ok(false);
}
let data = TaskMap::default();
t.execute(
"INSERT INTO tasks (uuid, data) VALUES (?, ?)",
params![&StoredUuid(uuid), &StoredTaskMap(data)],
)
.context("Create task query")?;
Ok(true)
}
fn set_task(&mut self, uuid: Uuid, task: TaskMap) -> anyhow::Result<()> {
let t = self.get_txn()?;
t.execute(
"INSERT OR REPLACE INTO tasks (uuid, data) VALUES (?, ?)",
params![&StoredUuid(uuid), &StoredTaskMap(task)],
)
.context("Update task query")?;
Ok(())
}
fn delete_task(&mut self, uuid: Uuid) -> anyhow::Result<bool> {
let t = self.get_txn()?;
let changed = t
.execute("DELETE FROM tasks WHERE uuid = ?", [&StoredUuid(uuid)])
.context("Delete task query")?;
Ok(changed > 0)
}
fn all_tasks(&mut self) -> anyhow::Result<Vec<(Uuid, TaskMap)>> {
let t = self.get_txn()?;
let mut q = t.prepare("SELECT uuid, data FROM tasks")?;
let rows = q.query_map([], |r| {
let uuid: StoredUuid = r.get("uuid")?;
let data: StoredTaskMap = r.get("data")?;
Ok((uuid.0, data.0))
})?;
let mut ret = vec![];
for r in rows {
ret.push(r?);
}
Ok(ret)
}
fn all_task_uuids(&mut self) -> anyhow::Result<Vec<Uuid>> {
let t = self.get_txn()?;
let mut q = t.prepare("SELECT uuid FROM tasks")?;
let rows = q.query_map([], |r| {
let uuid: StoredUuid = r.get("uuid")?;
Ok(uuid.0)
})?;
let mut ret = vec![];
for r in rows {
ret.push(r?);
}
Ok(ret)
}
fn base_version(&mut self) -> anyhow::Result<VersionId> {
let t = self.get_txn()?;
let version: Option<StoredUuid> = t
.query_row(
"SELECT value FROM sync_meta WHERE key = 'base_version'",
[],
|r| r.get("value"),
)
.optional()?;
Ok(version.map(|u| u.0).unwrap_or(DEFAULT_BASE_VERSION))
}
fn set_base_version(&mut self, version: VersionId) -> anyhow::Result<()> {
let t = self.get_txn()?;
t.execute(
"INSERT OR REPLACE INTO sync_meta (key, value) VALUES (?, ?)",
params!["base_version", &StoredUuid(version)],
)
.context("Set base version")?;
Ok(())
}
fn operations(&mut self) -> anyhow::Result<Vec<ReplicaOp>> {
let t = self.get_txn()?;
let mut q = t.prepare("SELECT data FROM operations ORDER BY id ASC")?;
let rows = q.query_map([], |r| {
let data: ReplicaOp = r.get("data")?;
Ok(data)
})?;
let mut ret = vec![];
for r in rows {
ret.push(r?);
}
Ok(ret)
}
fn num_operations(&mut self) -> anyhow::Result<usize> {
let t = self.get_txn()?;
let count: usize = t.query_row("SELECT count(*) FROM operations", [], |x| x.get(0))?;
Ok(count)
}
fn add_operation(&mut self, op: ReplicaOp) -> anyhow::Result<()> {
let t = self.get_txn()?;
t.execute("INSERT INTO operations (data) VALUES (?)", params![&op])
.context("Add operation query")?;
Ok(())
}
fn set_operations(&mut self, ops: Vec<ReplicaOp>) -> anyhow::Result<()> {
let t = self.get_txn()?;
t.execute("DELETE FROM operations", [])
.context("Clear all existing operations")?;
t.execute("DELETE FROM sqlite_sequence WHERE name = 'operations'", [])
.context("Clear all existing operations")?;
for o in ops {
self.add_operation(o)?;
}
Ok(())
}
fn get_working_set(&mut self) -> anyhow::Result<Vec<Option<Uuid>>> {
let t = self.get_txn()?;
let mut q = t.prepare("SELECT id, uuid FROM working_set ORDER BY id ASC")?;
let rows = q
.query_map([], |r| {
let id: usize = r.get("id")?;
let uuid: StoredUuid = r.get("uuid")?;
Ok((id, uuid.0))
})
.context("Get working set query")?;
let rows: Vec<Result<(usize, Uuid), _>> = rows.collect();
let mut res = Vec::with_capacity(rows.len());
for _ in 0..self
.get_next_working_set_number()
.context("Getting working set number")?
{
res.push(None);
}
for r in rows {
let (id, uuid) = r?;
res[id as usize] = Some(uuid);
}
Ok(res)
}
fn add_to_working_set(&mut self, uuid: Uuid) -> anyhow::Result<usize> {
let t = self.get_txn()?;
let next_working_id = self.get_next_working_set_number()?;
t.execute(
"INSERT INTO working_set (id, uuid) VALUES (?, ?)",
params![next_working_id, &StoredUuid(uuid)],
)
.context("Create task query")?;
Ok(next_working_id)
}
fn set_working_set_item(&mut self, index: usize, uuid: Option<Uuid>) -> anyhow::Result<()> {
let t = self.get_txn()?;
match uuid {
// Add or override item
Some(uuid) => t.execute(
"INSERT OR REPLACE INTO working_set (id, uuid) VALUES (?, ?)",
params![index, &StoredUuid(uuid)],
),
// Setting to None removes the row from database
None => t.execute("DELETE FROM working_set WHERE id = ?", [index]),
}
.context("Set working set item query")?;
Ok(())
}
fn clear_working_set(&mut self) -> anyhow::Result<()> {
let t = self.get_txn()?;
t.execute("DELETE FROM working_set", [])
.context("Clear working set query")?;
Ok(())
}
fn commit(&mut self) -> anyhow::Result<()> {
let t = self
.txn
.take()
.ok_or(SqliteError::TransactionAlreadyCommitted)?;
t.commit().context("Committing transaction")?;
Ok(())
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::storage::taskmap_with;
use pretty_assertions::assert_eq;
use tempfile::TempDir;
#[test]
fn test_empty_dir() -> anyhow::Result<()> {
let tmp_dir = TempDir::new()?;
let non_existant = tmp_dir.path().join("subdir");
let mut storage = SqliteStorage::new(&non_existant)?;
let uuid = Uuid::new_v4();
{
let mut txn = storage.txn()?;
assert!(txn.create_task(uuid)?);
txn.commit()?;
}
{
let mut txn = storage.txn()?;
let task = txn.get_task(uuid)?;
assert_eq!(task, Some(taskmap_with(vec![])));
}
Ok(())
}
#[test]
fn drop_transaction() -> anyhow::Result<()> {
let tmp_dir = TempDir::new()?;
let mut storage = SqliteStorage::new(&tmp_dir.path())?;
let uuid1 = Uuid::new_v4();
let uuid2 = Uuid::new_v4();
{
let mut txn = storage.txn()?;
assert!(txn.create_task(uuid1)?);
txn.commit()?;
}
{
let mut txn = storage.txn()?;
assert!(txn.create_task(uuid2)?);
std::mem::drop(txn); // Unnecessary explicit drop of transaction
}
{
let mut txn = storage.txn()?;
let uuids = txn.all_task_uuids()?;
assert_eq!(uuids, [uuid1]);
}
Ok(())
}
#[test]
fn test_create() -> anyhow::Result<()> {
let tmp_dir = TempDir::new()?;
let mut storage = SqliteStorage::new(&tmp_dir.path())?;
let uuid = Uuid::new_v4();
{
let mut txn = storage.txn()?;
assert!(txn.create_task(uuid)?);
txn.commit()?;
}
{
let mut txn = storage.txn()?;
let task = txn.get_task(uuid)?;
assert_eq!(task, Some(taskmap_with(vec![])));
}
Ok(())
}
#[test]
fn test_create_exists() -> anyhow::Result<()> {
let tmp_dir = TempDir::new()?;
let mut storage = SqliteStorage::new(&tmp_dir.path())?;
let uuid = Uuid::new_v4();
{
let mut txn = storage.txn()?;
assert!(txn.create_task(uuid)?);
txn.commit()?;
}
{
let mut txn = storage.txn()?;
assert!(!txn.create_task(uuid)?);
txn.commit()?;
}
Ok(())
}
#[test]
fn test_get_missing() -> anyhow::Result<()> {
let tmp_dir = TempDir::new()?;
let mut storage = SqliteStorage::new(&tmp_dir.path())?;
let uuid = Uuid::new_v4();
{
let mut txn = storage.txn()?;
let task = txn.get_task(uuid)?;
assert_eq!(task, None);
}
Ok(())
}
#[test]
fn test_set_task() -> anyhow::Result<()> {
let tmp_dir = TempDir::new()?;
let mut storage = SqliteStorage::new(&tmp_dir.path())?;
let uuid = Uuid::new_v4();
{
let mut txn = storage.txn()?;
txn.set_task(uuid, taskmap_with(vec![("k".to_string(), "v".to_string())]))?;
txn.commit()?;
}
{
let mut txn = storage.txn()?;
let task = txn.get_task(uuid)?;
assert_eq!(
task,
Some(taskmap_with(vec![("k".to_string(), "v".to_string())]))
);
}
Ok(())
}
#[test]
fn test_delete_task_missing() -> anyhow::Result<()> {
let tmp_dir = TempDir::new()?;
let mut storage = SqliteStorage::new(&tmp_dir.path())?;
let uuid = Uuid::new_v4();
{
let mut txn = storage.txn()?;
assert!(!txn.delete_task(uuid)?);
}
Ok(())
}
#[test]
fn test_delete_task_exists() -> anyhow::Result<()> {
let tmp_dir = TempDir::new()?;
let mut storage = SqliteStorage::new(&tmp_dir.path())?;
let uuid = Uuid::new_v4();
{
let mut txn = storage.txn()?;
assert!(txn.create_task(uuid)?);
txn.commit()?;
}
{
let mut txn = storage.txn()?;
assert!(txn.delete_task(uuid)?);
}
Ok(())
}
#[test]
fn test_all_tasks_empty() -> anyhow::Result<()> {
let tmp_dir = TempDir::new()?;
let mut storage = SqliteStorage::new(&tmp_dir.path())?;
{
let mut txn = storage.txn()?;
let tasks = txn.all_tasks()?;
assert_eq!(tasks, vec![]);
}
Ok(())
}
#[test]
fn test_all_tasks_and_uuids() -> anyhow::Result<()> {
let tmp_dir = TempDir::new()?;
let mut storage = SqliteStorage::new(&tmp_dir.path())?;
let uuid1 = Uuid::new_v4();
let uuid2 = Uuid::new_v4();
{
let mut txn = storage.txn()?;
assert!(txn.create_task(uuid1.clone())?);
txn.set_task(
uuid1.clone(),
taskmap_with(vec![("num".to_string(), "1".to_string())]),
)?;
assert!(txn.create_task(uuid2.clone())?);
txn.set_task(
uuid2.clone(),
taskmap_with(vec![("num".to_string(), "2".to_string())]),
)?;
txn.commit()?;
}
{
let mut txn = storage.txn()?;
let mut tasks = txn.all_tasks()?;
// order is nondeterministic, so sort by uuid
tasks.sort_by(|a, b| a.0.cmp(&b.0));
let mut exp = vec![
(
uuid1.clone(),
taskmap_with(vec![("num".to_string(), "1".to_string())]),
),
(
uuid2.clone(),
taskmap_with(vec![("num".to_string(), "2".to_string())]),
),
];
exp.sort_by(|a, b| a.0.cmp(&b.0));
assert_eq!(tasks, exp);
}
{
let mut txn = storage.txn()?;
let mut uuids = txn.all_task_uuids()?;
uuids.sort();
let mut exp = vec![uuid1.clone(), uuid2.clone()];
exp.sort();
assert_eq!(uuids, exp);
}
Ok(())
}
#[test]
fn test_base_version_default() -> anyhow::Result<()> {
let tmp_dir = TempDir::new()?;
let mut storage = SqliteStorage::new(&tmp_dir.path())?;
{
let mut txn = storage.txn()?;
assert_eq!(txn.base_version()?, DEFAULT_BASE_VERSION);
}
Ok(())
}
#[test]
fn test_base_version_setting() -> anyhow::Result<()> {
let tmp_dir = TempDir::new()?;
let mut storage = SqliteStorage::new(&tmp_dir.path())?;
let u = Uuid::new_v4();
{
let mut txn = storage.txn()?;
txn.set_base_version(u)?;
txn.commit()?;
}
{
let mut txn = storage.txn()?;
assert_eq!(txn.base_version()?, u);
}
Ok(())
}
#[test]
fn test_operations() -> anyhow::Result<()> {
let tmp_dir = TempDir::new()?;
let mut storage = SqliteStorage::new(&tmp_dir.path())?;
let uuid1 = Uuid::new_v4();
let uuid2 = Uuid::new_v4();
let uuid3 = Uuid::new_v4();
// create some operations
{
let mut txn = storage.txn()?;
txn.add_operation(ReplicaOp::Create { uuid: uuid1 })?;
txn.add_operation(ReplicaOp::Create { uuid: uuid2 })?;
txn.commit()?;
}
// read them back
{
let mut txn = storage.txn()?;
let ops = txn.operations()?;
assert_eq!(
ops,
vec![
ReplicaOp::Create { uuid: uuid1 },
ReplicaOp::Create { uuid: uuid2 },
]
);
assert_eq!(txn.num_operations()?, 2);
}
// set them to a different bunch
{
let mut txn = storage.txn()?;
txn.set_operations(vec![
ReplicaOp::Delete {
uuid: uuid2,
old_task: TaskMap::new(),
},
ReplicaOp::Delete {
uuid: uuid1,
old_task: TaskMap::new(),
},
])?;
txn.commit()?;
}
// create some more operations (to test adding operations after clearing)
{
let mut txn = storage.txn()?;
txn.add_operation(ReplicaOp::Create { uuid: uuid3 })?;
txn.add_operation(ReplicaOp::Delete {
uuid: uuid3,
old_task: TaskMap::new(),
})?;
txn.commit()?;
}
// read them back
{
let mut txn = storage.txn()?;
let ops = txn.operations()?;
assert_eq!(
ops,
vec![
ReplicaOp::Delete {
uuid: uuid2,
old_task: TaskMap::new()
},
ReplicaOp::Delete {
uuid: uuid1,
old_task: TaskMap::new()
},
ReplicaOp::Create { uuid: uuid3 },
ReplicaOp::Delete {
uuid: uuid3,
old_task: TaskMap::new()
},
]
);
assert_eq!(txn.num_operations()?, 4);
}
Ok(())
}
#[test]
fn get_working_set_empty() -> anyhow::Result<()> {
let tmp_dir = TempDir::new()?;
let mut storage = SqliteStorage::new(&tmp_dir.path())?;
{
let mut txn = storage.txn()?;
let ws = txn.get_working_set()?;
assert_eq!(ws, vec![None]);
}
Ok(())
}
#[test]
fn add_to_working_set() -> anyhow::Result<()> {
let tmp_dir = TempDir::new()?;
let mut storage = SqliteStorage::new(&tmp_dir.path())?;
let uuid1 = Uuid::new_v4();
let uuid2 = Uuid::new_v4();
{
let mut txn = storage.txn()?;
txn.add_to_working_set(uuid1)?;
txn.add_to_working_set(uuid2)?;
txn.commit()?;
}
{
let mut txn = storage.txn()?;
let ws = txn.get_working_set()?;
assert_eq!(ws, vec![None, Some(uuid1), Some(uuid2)]);
}
Ok(())
}
#[test]
fn clear_working_set() -> anyhow::Result<()> {
let tmp_dir = TempDir::new()?;
let mut storage = SqliteStorage::new(&tmp_dir.path())?;
let uuid1 = Uuid::new_v4();
let uuid2 = Uuid::new_v4();
{
let mut txn = storage.txn()?;
txn.add_to_working_set(uuid1)?;
txn.add_to_working_set(uuid2)?;
txn.commit()?;
}
{
let mut txn = storage.txn()?;
txn.clear_working_set()?;
txn.add_to_working_set(uuid2)?;
txn.add_to_working_set(uuid1)?;
txn.commit()?;
}
{
let mut txn = storage.txn()?;
let ws = txn.get_working_set()?;
assert_eq!(ws, vec![None, Some(uuid2), Some(uuid1)]);
}
Ok(())
}
#[test]
fn set_working_set_item() -> anyhow::Result<()> {
let tmp_dir = TempDir::new()?;
let mut storage = SqliteStorage::new(&tmp_dir.path())?;
let uuid1 = Uuid::new_v4();
let uuid2 = Uuid::new_v4();
{
let mut txn = storage.txn()?;
txn.add_to_working_set(uuid1)?;
txn.add_to_working_set(uuid2)?;
txn.commit()?;
}
{
let mut txn = storage.txn()?;
let ws = txn.get_working_set()?;
assert_eq!(ws, vec![None, Some(uuid1), Some(uuid2)]);
}
// Clear one item
{
let mut txn = storage.txn()?;
txn.set_working_set_item(1, None)?;
txn.commit()?;
}
{
let mut txn = storage.txn()?;
let ws = txn.get_working_set()?;
assert_eq!(ws, vec![None, None, Some(uuid2)]);
}
// Override item
{
let mut txn = storage.txn()?;
txn.set_working_set_item(2, Some(uuid1))?;
txn.commit()?;
}
{
let mut txn = storage.txn()?;
let ws = txn.get_working_set()?;
assert_eq!(ws, vec![None, None, Some(uuid1)]);
}
Ok(())
}
}

View file

@ -0,0 +1,10 @@
use super::Timestamp;
/// An annotation for a task
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct Annotation {
/// Time the annotation was made
pub entry: Timestamp,
/// Content of the annotation
pub description: String,
}

View file

@ -0,0 +1,14 @@
#![allow(clippy::module_inception)]
use chrono::prelude::*;
mod annotation;
mod status;
mod tag;
mod task;
pub use annotation::Annotation;
pub use status::Status;
pub use tag::Tag;
pub use task::{Task, TaskMut};
pub type Timestamp = DateTime<Utc>;

View file

@ -0,0 +1,69 @@
/// The status of a task, as defined by the task data model.
#[derive(Debug, PartialEq, Clone, strum_macros::Display)]
#[repr(C)]
pub enum Status {
Pending,
Completed,
Deleted,
/// Unknown signifies a status in the task DB that was not
/// recognized. This supports forward-compatibility if a
/// new status is added. Tasks with unknown status should
/// be ignored (but not deleted).
Unknown(String),
}
impl Status {
/// Get a Status from the 1-character value in a TaskMap,
/// defaulting to Pending
pub(crate) fn from_taskmap(s: &str) -> Status {
match s {
"pending" => Status::Pending,
"completed" => Status::Completed,
"deleted" => Status::Deleted,
v => Status::Unknown(v.to_string()),
}
}
/// Get the 1-character value for this status to use in the TaskMap.
pub(crate) fn to_taskmap(&self) -> &str {
match self {
Status::Pending => "pending",
Status::Completed => "completed",
Status::Deleted => "deleted",
Status::Unknown(v) => v.as_ref(),
}
}
}
#[cfg(test)]
mod test {
use super::*;
use pretty_assertions::assert_eq;
#[test]
fn to_taskmap() {
assert_eq!(Status::Pending.to_taskmap(), "pending");
assert_eq!(Status::Completed.to_taskmap(), "completed");
assert_eq!(Status::Deleted.to_taskmap(), "deleted");
assert_eq!(Status::Unknown("wishful".into()).to_taskmap(), "wishful");
}
#[test]
fn from_taskmap() {
assert_eq!(Status::from_taskmap("pending"), Status::Pending);
assert_eq!(Status::from_taskmap("completed"), Status::Completed);
assert_eq!(Status::from_taskmap("deleted"), Status::Deleted);
assert_eq!(
Status::from_taskmap("something-else"),
Status::Unknown("something-else".into())
);
}
#[test]
fn display() {
assert_eq!(format!("{}", Status::Pending), "Pending");
assert_eq!(format!("{}", Status::Completed), "Completed");
assert_eq!(format!("{}", Status::Deleted), "Deleted");
assert_eq!(format!("{}", Status::Unknown("wishful".into())), "Unknown");
}
}

View file

@ -0,0 +1,174 @@
use std::convert::TryFrom;
use std::fmt;
use std::str::FromStr;
/// A Tag is a descriptor for a task, that is either present or absent, and can be used for
/// filtering. Tags composed of all uppercase letters are reserved for synthetic tags.
///
/// Valid tags must not contain whitespace or any of the characters in `+-*/(<>^! %=~`.
/// The first characters additionally cannot be a digit, and subsequent characters cannot be `:`.
/// This definition is based on [that of
/// TaskWarrior](https://github.com/GothenburgBitFactory/taskwarrior/blob/663c6575ceca5bd0135ae884879339dac89d3142/src/Lexer.cpp#L146-L164).
#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)]
pub struct Tag(TagInner);
/// Inner type to hide the implementation
#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)]
pub(super) enum TagInner {
User(String),
Synthetic(SyntheticTag),
}
// see doc comment for Tag, above
pub const INVALID_TAG_CHARACTERS: &str = "+-*/(<>^! %=~";
impl Tag {
/// True if this tag is a synthetic tag
pub fn is_synthetic(&self) -> bool {
matches!(self.0, TagInner::Synthetic(_))
}
/// True if this tag is a user-provided tag (not synthetic)
pub fn is_user(&self) -> bool {
matches!(self.0, TagInner::User(_))
}
pub(super) fn inner(&self) -> &TagInner {
&self.0
}
pub(super) fn from_inner(inner: TagInner) -> Self {
Self(inner)
}
}
impl FromStr for Tag {
type Err = anyhow::Error;
fn from_str(value: &str) -> Result<Tag, anyhow::Error> {
fn err(value: &str) -> Result<Tag, anyhow::Error> {
anyhow::bail!("invalid tag {:?}", value)
}
// first, look for synthetic tags
if value.chars().all(|c| c.is_ascii_uppercase()) {
if let Ok(st) = SyntheticTag::from_str(value) {
return Ok(Self(TagInner::Synthetic(st)));
}
// all uppercase, but not a valid synthetic tag
return err(value);
}
if let Some(c) = value.chars().next() {
if c.is_whitespace() || c.is_ascii_digit() || INVALID_TAG_CHARACTERS.contains(c) {
return err(value);
}
} else {
return err(value);
}
if !value
.chars()
.skip(1)
.all(|c| !(c.is_whitespace() || c == ':' || INVALID_TAG_CHARACTERS.contains(c)))
{
return err(value);
}
Ok(Self(TagInner::User(String::from(value))))
}
}
impl TryFrom<&str> for Tag {
type Error = anyhow::Error;
fn try_from(value: &str) -> Result<Tag, Self::Error> {
Self::from_str(value)
}
}
impl TryFrom<&String> for Tag {
type Error = anyhow::Error;
fn try_from(value: &String) -> Result<Tag, Self::Error> {
Self::from_str(&value[..])
}
}
impl fmt::Display for Tag {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match &self.0 {
TagInner::User(s) => s.fmt(f),
TagInner::Synthetic(st) => st.as_ref().fmt(f),
}
}
}
impl AsRef<str> for Tag {
fn as_ref(&self) -> &str {
match &self.0 {
TagInner::User(s) => s.as_ref(),
TagInner::Synthetic(st) => st.as_ref(),
}
}
}
/// A synthetic tag, represented as an `enum`. This type is used directly by
/// [`taskchampion::task::task`] for efficiency.
#[derive(
Debug,
Clone,
Eq,
PartialEq,
Ord,
PartialOrd,
Hash,
strum_macros::EnumString,
strum_macros::AsRefStr,
strum_macros::EnumIter,
)]
#[strum(serialize_all = "SCREAMING_SNAKE_CASE")]
pub(super) enum SyntheticTag {
// When adding items here, also implement and test them in `task.rs` and document them in
// `docs/src/tags.md`.
Waiting,
Active,
Pending,
Completed,
Deleted,
Blocked,
Unblocked,
Blocking,
}
#[cfg(test)]
mod test {
use super::*;
use pretty_assertions::assert_eq;
use rstest::rstest;
use std::convert::TryInto;
#[rstest]
#[case::simple("abc")]
#[case::colon_prefix(":abc")]
#[case::letters_and_numbers("a123_456")]
#[case::synthetic("WAITING")]
fn test_tag_try_into_success(#[case] s: &'static str) {
let tag: Tag = s.try_into().unwrap();
// check Display (via to_string) and AsRef while we're here
assert_eq!(tag.to_string(), s.to_owned());
assert_eq!(tag.as_ref(), s);
}
#[rstest]
#[case::empty("")]
#[case::colon_infix("a:b")]
#[case::digits("999")]
#[case::bangs("abc!!!")]
#[case::no_such_synthetic("NOSUCH")]
fn test_tag_try_into_err(#[case] s: &'static str) {
let tag: Result<Tag, _> = s.try_into();
assert_eq!(
tag.unwrap_err().to_string(),
format!("invalid tag \"{}\"", s)
);
}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,406 @@
use crate::errors::Error;
use crate::server::SyncOp;
use crate::storage::{ReplicaOp, StorageTxn, TaskMap};
/// Apply the given SyncOp to the replica, updating both the task data and adding a
/// ReplicaOp to the list of operations. Returns the TaskMap of the task after the
/// operation has been applied (or an empty TaskMap for Delete). It is not an error
/// to create an existing task, nor to delete a nonexistent task.
pub(super) fn apply_and_record(txn: &mut dyn StorageTxn, op: SyncOp) -> anyhow::Result<TaskMap> {
match op {
SyncOp::Create { uuid } => {
let created = txn.create_task(uuid)?;
if created {
txn.add_operation(ReplicaOp::Create { uuid })?;
txn.commit()?;
Ok(TaskMap::new())
} else {
Ok(txn
.get_task(uuid)?
.expect("create_task failed but task does not exist"))
}
}
SyncOp::Delete { uuid } => {
let task = txn.get_task(uuid)?;
if let Some(task) = task {
txn.delete_task(uuid)?;
txn.add_operation(ReplicaOp::Delete {
uuid,
old_task: task,
})?;
txn.commit()?;
Ok(TaskMap::new())
} else {
Ok(TaskMap::new())
}
}
SyncOp::Update {
uuid,
property,
value,
timestamp,
} => {
let task = txn.get_task(uuid)?;
if let Some(mut task) = task {
let old_value = task.get(&property).cloned();
if let Some(ref v) = value {
task.insert(property.clone(), v.clone());
} else {
task.remove(&property);
}
txn.set_task(uuid, task.clone())?;
txn.add_operation(ReplicaOp::Update {
uuid,
property,
old_value,
value,
timestamp,
})?;
txn.commit()?;
Ok(task)
} else {
Err(Error::Database(format!("Task {} does not exist", uuid)).into())
}
}
}
}
/// Apply an op to the TaskDb's set of tasks (without recording it in the list of operations)
pub(super) fn apply_op(txn: &mut dyn StorageTxn, op: &SyncOp) -> anyhow::Result<()> {
// TODO: test
// TODO: it'd be nice if this was integrated into apply() somehow, but that clones TaskMaps
// unnecessariliy
match op {
SyncOp::Create { uuid } => {
// insert if the task does not already exist
if !txn.create_task(*uuid)? {
return Err(Error::Database(format!("Task {} already exists", uuid)).into());
}
}
SyncOp::Delete { ref uuid } => {
if !txn.delete_task(*uuid)? {
return Err(Error::Database(format!("Task {} does not exist", uuid)).into());
}
}
SyncOp::Update {
ref uuid,
ref property,
ref value,
timestamp: _,
} => {
// update if this task exists, otherwise ignore
if let Some(mut task) = txn.get_task(*uuid)? {
match value {
Some(ref val) => task.insert(property.to_string(), val.clone()),
None => task.remove(property),
};
txn.set_task(*uuid, task)?;
} else {
return Err(Error::Database(format!("Task {} does not exist", uuid)).into());
}
}
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use crate::storage::TaskMap;
use crate::taskdb::TaskDb;
use chrono::Utc;
use pretty_assertions::assert_eq;
use std::collections::HashMap;
use uuid::Uuid;
#[test]
fn test_apply_create() -> anyhow::Result<()> {
let mut db = TaskDb::new_inmemory();
let uuid = Uuid::new_v4();
let op = SyncOp::Create { uuid };
{
let mut txn = db.storage.txn()?;
let taskmap = apply_and_record(txn.as_mut(), op)?;
assert_eq!(taskmap.len(), 0);
txn.commit()?;
}
assert_eq!(db.sorted_tasks(), vec![(uuid, vec![]),]);
assert_eq!(db.operations(), vec![ReplicaOp::Create { uuid }]);
Ok(())
}
#[test]
fn test_apply_create_exists() -> anyhow::Result<()> {
let mut db = TaskDb::new_inmemory();
let uuid = Uuid::new_v4();
{
let mut txn = db.storage.txn()?;
txn.create_task(uuid)?;
let mut taskmap = TaskMap::new();
taskmap.insert("foo".into(), "bar".into());
txn.set_task(uuid, taskmap)?;
txn.commit()?;
}
let op = SyncOp::Create { uuid };
{
let mut txn = db.storage.txn()?;
let taskmap = apply_and_record(txn.as_mut(), op.clone())?;
assert_eq!(taskmap.len(), 1);
assert_eq!(taskmap.get("foo").unwrap(), "bar");
txn.commit()?;
}
// create did not delete the old task..
assert_eq!(
db.sorted_tasks(),
vec![(uuid, vec![("foo".into(), "bar".into())])]
);
// create was done "manually" above, and no new op was added
assert_eq!(db.operations(), vec![]);
Ok(())
}
#[test]
fn test_apply_create_update() -> anyhow::Result<()> {
let mut db = TaskDb::new_inmemory();
let uuid = Uuid::new_v4();
let now = Utc::now();
let op1 = SyncOp::Create { uuid };
{
let mut txn = db.storage.txn()?;
let taskmap = apply_and_record(txn.as_mut(), op1)?;
assert_eq!(taskmap.len(), 0);
txn.commit()?;
}
let op2 = SyncOp::Update {
uuid,
property: String::from("title"),
value: Some("my task".into()),
timestamp: now,
};
{
let mut txn = db.storage.txn()?;
let mut taskmap = apply_and_record(txn.as_mut(), op2)?;
assert_eq!(
taskmap.drain().collect::<Vec<(_, _)>>(),
vec![("title".into(), "my task".into())]
);
txn.commit()?;
}
assert_eq!(
db.sorted_tasks(),
vec![(uuid, vec![("title".into(), "my task".into())])]
);
assert_eq!(
db.operations(),
vec![
ReplicaOp::Create { uuid },
ReplicaOp::Update {
uuid,
property: "title".into(),
old_value: None,
value: Some("my task".into()),
timestamp: now
}
]
);
Ok(())
}
#[test]
fn test_apply_create_update_delete_prop() -> anyhow::Result<()> {
let mut db = TaskDb::new_inmemory();
let uuid = Uuid::new_v4();
let now = Utc::now();
let op1 = SyncOp::Create { uuid };
{
let mut txn = db.storage.txn()?;
let taskmap = apply_and_record(txn.as_mut(), op1)?;
assert_eq!(taskmap.len(), 0);
txn.commit()?;
}
let op2 = SyncOp::Update {
uuid,
property: String::from("title"),
value: Some("my task".into()),
timestamp: now,
};
{
let mut txn = db.storage.txn()?;
let taskmap = apply_and_record(txn.as_mut(), op2)?;
assert_eq!(taskmap.get("title"), Some(&"my task".to_owned()));
txn.commit()?;
}
let op3 = SyncOp::Update {
uuid,
property: String::from("priority"),
value: Some("H".into()),
timestamp: now,
};
{
let mut txn = db.storage.txn()?;
let taskmap = apply_and_record(txn.as_mut(), op3)?;
assert_eq!(taskmap.get("priority"), Some(&"H".to_owned()));
txn.commit()?;
}
let op4 = SyncOp::Update {
uuid,
property: String::from("title"),
value: None,
timestamp: now,
};
{
let mut txn = db.storage.txn()?;
let taskmap = apply_and_record(txn.as_mut(), op4)?;
assert_eq!(taskmap.get("title"), None);
assert_eq!(taskmap.get("priority"), Some(&"H".to_owned()));
txn.commit()?;
}
let mut exp = HashMap::new();
let mut task = HashMap::new();
task.insert(String::from("priority"), String::from("H"));
exp.insert(uuid, task);
assert_eq!(
db.sorted_tasks(),
vec![(uuid, vec![("priority".into(), "H".into())])]
);
assert_eq!(
db.operations(),
vec![
ReplicaOp::Create { uuid },
ReplicaOp::Update {
uuid,
property: "title".into(),
old_value: None,
value: Some("my task".into()),
timestamp: now,
},
ReplicaOp::Update {
uuid,
property: "priority".into(),
old_value: None,
value: Some("H".into()),
timestamp: now,
},
ReplicaOp::Update {
uuid,
property: "title".into(),
old_value: Some("my task".into()),
value: None,
timestamp: now,
}
]
);
Ok(())
}
#[test]
fn test_apply_update_does_not_exist() -> anyhow::Result<()> {
let mut db = TaskDb::new_inmemory();
let uuid = Uuid::new_v4();
let op = SyncOp::Update {
uuid,
property: String::from("title"),
value: Some("my task".into()),
timestamp: Utc::now(),
};
{
let mut txn = db.storage.txn()?;
assert_eq!(
apply_and_record(txn.as_mut(), op)
.err()
.unwrap()
.to_string(),
format!("Task Database Error: Task {} does not exist", uuid)
);
txn.commit()?;
}
Ok(())
}
#[test]
fn test_apply_create_delete() -> anyhow::Result<()> {
let mut db = TaskDb::new_inmemory();
let uuid = Uuid::new_v4();
let now = Utc::now();
let op1 = SyncOp::Create { uuid };
{
let mut txn = db.storage.txn()?;
let taskmap = apply_and_record(txn.as_mut(), op1)?;
assert_eq!(taskmap.len(), 0);
}
let op2 = SyncOp::Update {
uuid,
property: String::from("priority"),
value: Some("H".into()),
timestamp: now,
};
{
let mut txn = db.storage.txn()?;
let taskmap = apply_and_record(txn.as_mut(), op2)?;
assert_eq!(taskmap.get("priority"), Some(&"H".to_owned()));
txn.commit()?;
}
let op3 = SyncOp::Delete { uuid };
{
let mut txn = db.storage.txn()?;
let taskmap = apply_and_record(txn.as_mut(), op3)?;
assert_eq!(taskmap.len(), 0);
txn.commit()?;
}
assert_eq!(db.sorted_tasks(), vec![]);
let mut old_task = TaskMap::new();
old_task.insert("priority".into(), "H".into());
assert_eq!(
db.operations(),
vec![
ReplicaOp::Create { uuid },
ReplicaOp::Update {
uuid,
property: "priority".into(),
old_value: None,
value: Some("H".into()),
timestamp: now,
},
ReplicaOp::Delete { uuid, old_task },
]
);
Ok(())
}
#[test]
fn test_apply_delete_not_present() -> anyhow::Result<()> {
let mut db = TaskDb::new_inmemory();
let uuid = Uuid::new_v4();
let op = SyncOp::Delete { uuid };
{
let mut txn = db.storage.txn()?;
let taskmap = apply_and_record(txn.as_mut(), op)?;
assert_eq!(taskmap.len(), 0);
txn.commit()?;
}
Ok(())
}
}

View file

@ -0,0 +1,263 @@
use crate::server::{Server, SyncOp};
use crate::storage::{ReplicaOp, Storage, TaskMap};
use uuid::Uuid;
mod apply;
mod snapshot;
mod sync;
mod undo;
mod working_set;
/// A TaskDb is the backend for a replica. It manages the storage, operations, synchronization,
/// and so on, and all the invariants that come with it. It leaves the meaning of particular task
/// properties to the replica and task implementations.
pub struct TaskDb {
storage: Box<dyn Storage>,
}
impl TaskDb {
/// Create a new TaskDb with the given backend storage
pub fn new(storage: Box<dyn Storage>) -> TaskDb {
TaskDb { storage }
}
#[cfg(test)]
pub fn new_inmemory() -> TaskDb {
#[cfg(test)]
use crate::storage::InMemoryStorage;
TaskDb::new(Box::new(InMemoryStorage::new()))
}
/// Apply an operation to the TaskDb. This will update the set of tasks and add a ReplicaOp to
/// the set of operations in the TaskDb, and return the TaskMap containing the resulting task's
/// properties (or an empty TaskMap for deletion).
///
/// Aside from synchronization operations, this is the only way to modify the TaskDb. In cases
/// where an operation does not make sense, this function will do nothing and return an error
/// (but leave the TaskDb in a consistent state).
pub fn apply(&mut self, op: SyncOp) -> anyhow::Result<TaskMap> {
let mut txn = self.storage.txn()?;
apply::apply_and_record(txn.as_mut(), op)
}
/// Add an UndoPoint operation to the list of replica operations.
pub fn add_undo_point(&mut self) -> anyhow::Result<()> {
let mut txn = self.storage.txn()?;
txn.add_operation(ReplicaOp::UndoPoint)?;
txn.commit()
}
/// Get all tasks.
pub fn all_tasks(&mut self) -> anyhow::Result<Vec<(Uuid, TaskMap)>> {
let mut txn = self.storage.txn()?;
txn.all_tasks()
}
/// Get the UUIDs of all tasks
pub fn all_task_uuids(&mut self) -> anyhow::Result<Vec<Uuid>> {
let mut txn = self.storage.txn()?;
txn.all_task_uuids()
}
/// Get the working set
pub fn working_set(&mut self) -> anyhow::Result<Vec<Option<Uuid>>> {
let mut txn = self.storage.txn()?;
txn.get_working_set()
}
/// Get a single task, by uuid.
pub fn get_task(&mut self, uuid: Uuid) -> anyhow::Result<Option<TaskMap>> {
let mut txn = self.storage.txn()?;
txn.get_task(uuid)
}
/// Rebuild the working set using a function to identify tasks that should be in the set. This
/// renumbers the existing working-set tasks to eliminate gaps, and also adds any tasks that
/// are not already in the working set but should be. The rebuild occurs in a single
/// trasnsaction against the storage backend.
pub fn rebuild_working_set<F>(
&mut self,
in_working_set: F,
renumber: bool,
) -> anyhow::Result<()>
where
F: Fn(&TaskMap) -> bool,
{
working_set::rebuild(self.storage.txn()?.as_mut(), in_working_set, renumber)
}
/// Add the given uuid to the working set and return its index; if it is already in the working
/// set, its index is returned. This does *not* renumber any existing tasks.
pub fn add_to_working_set(&mut self, uuid: Uuid) -> anyhow::Result<usize> {
let mut txn = self.storage.txn()?;
// search for an existing entry for this task..
for (i, elt) in txn.get_working_set()?.iter().enumerate() {
if *elt == Some(uuid) {
// (note that this drops the transaction with no changes made)
return Ok(i);
}
}
// and if not found, add one
let i = txn.add_to_working_set(uuid)?;
txn.commit()?;
Ok(i)
}
/// Sync to the given server, pulling remote changes and pushing local changes.
///
/// If `avoid_snapshots` is true, the sync operations produces a snapshot only when the server
/// indicate it is urgent (snapshot urgency "high"). This allows time for other replicas to
/// create a snapshot before this one does.
///
/// Set this to true on systems more constrained in CPU, memory, or bandwidth than a typical desktop
/// system
pub fn sync(
&mut self,
server: &mut Box<dyn Server>,
avoid_snapshots: bool,
) -> anyhow::Result<()> {
let mut txn = self.storage.txn()?;
sync::sync(server, txn.as_mut(), avoid_snapshots)
}
/// Undo local operations until the most recent UndoPoint, returning false if there are no
/// local operations to undo.
pub fn undo(&mut self) -> anyhow::Result<bool> {
let mut txn = self.storage.txn()?;
undo::undo(txn.as_mut())
}
/// Get the number of un-synchronized operations in storage.
pub fn num_operations(&mut self) -> anyhow::Result<usize> {
let mut txn = self.storage.txn().unwrap();
txn.num_operations()
}
// functions for supporting tests
#[cfg(test)]
pub(crate) fn sorted_tasks(&mut self) -> Vec<(Uuid, Vec<(String, String)>)> {
let mut res: Vec<(Uuid, Vec<(String, String)>)> = self
.all_tasks()
.unwrap()
.iter()
.map(|(u, t)| {
let mut t = t
.iter()
.map(|(p, v)| (p.clone(), v.clone()))
.collect::<Vec<(String, String)>>();
t.sort();
(u.clone(), t)
})
.collect();
res.sort();
res
}
#[cfg(test)]
pub(crate) fn operations(&mut self) -> Vec<ReplicaOp> {
let mut txn = self.storage.txn().unwrap();
txn.operations()
.unwrap()
.iter()
.map(|o| o.clone())
.collect()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::server::test::TestServer;
use crate::storage::{InMemoryStorage, ReplicaOp};
use chrono::Utc;
use pretty_assertions::assert_eq;
use proptest::prelude::*;
use uuid::Uuid;
#[test]
fn test_apply() {
// this verifies that the operation is both applied and included in the list of
// operations; more detailed tests are in the `apply` module.
let mut db = TaskDb::new_inmemory();
let uuid = Uuid::new_v4();
let op = SyncOp::Create { uuid };
db.apply(op.clone()).unwrap();
assert_eq!(db.sorted_tasks(), vec![(uuid, vec![]),]);
assert_eq!(db.operations(), vec![ReplicaOp::Create { uuid }]);
}
#[test]
fn test_add_undo_point() {
let mut db = TaskDb::new_inmemory();
db.add_undo_point().unwrap();
assert_eq!(db.operations(), vec![ReplicaOp::UndoPoint]);
}
fn newdb() -> TaskDb {
TaskDb::new(Box::new(InMemoryStorage::new()))
}
#[derive(Debug)]
enum Action {
Op(SyncOp),
Sync,
}
fn action_sequence_strategy() -> impl Strategy<Value = Vec<(Action, u8)>> {
// Create, Update, Delete, or Sync on client 1, 2, .., followed by a round of syncs
"([CUDS][123])*S1S2S3S1S2".prop_map(|seq| {
let uuid = Uuid::parse_str("83a2f9ef-f455-4195-b92e-a54c161eebfc").unwrap();
seq.as_bytes()
.chunks(2)
.map(|action_on| {
let action = match action_on[0] {
b'C' => Action::Op(SyncOp::Create { uuid }),
b'U' => Action::Op(SyncOp::Update {
uuid,
property: "title".into(),
value: Some("foo".into()),
timestamp: Utc::now(),
}),
b'D' => Action::Op(SyncOp::Delete { uuid }),
b'S' => Action::Sync,
_ => unreachable!(),
};
let acton = action_on[1] - b'1';
(action, acton)
})
.collect::<Vec<(Action, u8)>>()
})
}
proptest! {
#[test]
// check that various sequences of operations on mulitple db's do not get the db's into an
// incompatible state. The main concern here is that there might be a sequence of create
// and delete operations that results in a task existing in one TaskDb but not existing in
// another. So, the generated sequences focus on a single task UUID.
fn transform_sequences_of_operations(action_sequence in action_sequence_strategy()) {
let mut server: Box<dyn Server> = Box::new(TestServer::new());
let mut dbs = [newdb(), newdb(), newdb()];
for (action, db) in action_sequence {
println!("{:?} on db {}", action, db);
let db = &mut dbs[db as usize];
match action {
Action::Op(op) => {
if let Err(e) = db.apply(op) {
println!(" {:?} (ignored)", e);
}
},
Action::Sync => db.sync(&mut server, false).unwrap(),
}
}
assert_eq!(dbs[0].sorted_tasks(), dbs[0].sorted_tasks());
assert_eq!(dbs[1].sorted_tasks(), dbs[2].sorted_tasks());
}
}
}

View file

@ -0,0 +1,178 @@
use crate::storage::{StorageTxn, TaskMap, VersionId};
use flate2::{read::ZlibDecoder, write::ZlibEncoder, Compression};
use serde::de::{Deserialize, Deserializer, MapAccess, Visitor};
use serde::ser::{Serialize, SerializeMap, Serializer};
use std::fmt;
use uuid::Uuid;
/// A newtype to wrap the result of [`crate::storage::StorageTxn::all_tasks`]
pub(super) struct SnapshotTasks(Vec<(Uuid, TaskMap)>);
impl Serialize for SnapshotTasks {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut map = serializer.serialize_map(Some(self.0.len()))?;
for (k, v) in &self.0 {
map.serialize_entry(k, v)?;
}
map.end()
}
}
struct TaskDbVisitor;
impl<'de> Visitor<'de> for TaskDbVisitor {
type Value = SnapshotTasks;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("a map representing a task snapshot")
}
fn visit_map<M>(self, mut access: M) -> Result<Self::Value, M::Error>
where
M: MapAccess<'de>,
{
let mut map = SnapshotTasks(Vec::with_capacity(access.size_hint().unwrap_or(0)));
while let Some((key, value)) = access.next_entry()? {
map.0.push((key, value));
}
Ok(map)
}
}
impl<'de> Deserialize<'de> for SnapshotTasks {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_map(TaskDbVisitor)
}
}
impl SnapshotTasks {
pub(super) fn encode(&self) -> anyhow::Result<Vec<u8>> {
let mut encoder = ZlibEncoder::new(Vec::new(), Compression::default());
serde_json::to_writer(&mut encoder, &self)?;
Ok(encoder.finish()?)
}
pub(super) fn decode(snapshot: &[u8]) -> anyhow::Result<Self> {
let decoder = ZlibDecoder::new(snapshot);
Ok(serde_json::from_reader(decoder)?)
}
pub(super) fn into_inner(self) -> Vec<(Uuid, TaskMap)> {
self.0
}
}
/// Generate a snapshot (compressed, unencrypted) for the current state of the taskdb in the given
/// storage.
pub(super) fn make_snapshot(txn: &mut dyn StorageTxn) -> anyhow::Result<Vec<u8>> {
let all_tasks = SnapshotTasks(txn.all_tasks()?);
all_tasks.encode()
}
/// Apply the given snapshot (compressed, unencrypted) to the taskdb's storage.
pub(super) fn apply_snapshot(
txn: &mut dyn StorageTxn,
version: VersionId,
snapshot: &[u8],
) -> anyhow::Result<()> {
let all_tasks = SnapshotTasks::decode(snapshot)?;
// double-check emptiness
if !txn.is_empty()? {
anyhow::bail!("Cannot apply snapshot to a non-empty task database");
}
for (uuid, task) in all_tasks.into_inner().drain(..) {
txn.set_task(uuid, task)?;
}
txn.set_base_version(version)?;
Ok(())
}
#[cfg(test)]
mod test {
use super::*;
use crate::storage::{InMemoryStorage, Storage, TaskMap};
use pretty_assertions::assert_eq;
#[test]
fn test_serialize_empty() -> anyhow::Result<()> {
let empty = SnapshotTasks(vec![]);
assert_eq!(serde_json::to_vec(&empty)?, b"{}".to_owned());
Ok(())
}
#[test]
fn test_serialize_tasks() -> anyhow::Result<()> {
let u = Uuid::new_v4();
let m: TaskMap = vec![("description".to_owned(), "my task".to_owned())]
.drain(..)
.collect();
let all_tasks = SnapshotTasks(vec![(u, m)]);
assert_eq!(
serde_json::to_vec(&all_tasks)?,
format!("{{\"{}\":{{\"description\":\"my task\"}}}}", u).into_bytes(),
);
Ok(())
}
#[test]
fn test_round_trip() -> anyhow::Result<()> {
let mut storage = InMemoryStorage::new();
let version = Uuid::new_v4();
let task1 = (
Uuid::new_v4(),
vec![("description".to_owned(), "one".to_owned())]
.drain(..)
.collect::<TaskMap>(),
);
let task2 = (
Uuid::new_v4(),
vec![("description".to_owned(), "two".to_owned())]
.drain(..)
.collect::<TaskMap>(),
);
{
let mut txn = storage.txn()?;
txn.set_task(task1.0, task1.1.clone())?;
txn.set_task(task2.0, task2.1.clone())?;
txn.commit()?;
}
let snap = {
let mut txn = storage.txn()?;
make_snapshot(txn.as_mut())?
};
// apply that snapshot to a fresh bit of fake
let mut storage = InMemoryStorage::new();
{
let mut txn = storage.txn()?;
apply_snapshot(txn.as_mut(), version, &snap)?;
txn.commit()?
}
{
let mut txn = storage.txn()?;
assert_eq!(txn.get_task(task1.0)?, Some(task1.1));
assert_eq!(txn.get_task(task2.0)?, Some(task2.1));
assert_eq!(txn.all_tasks()?.len(), 2);
assert_eq!(txn.base_version()?, version);
assert_eq!(txn.operations()?.len(), 0);
assert_eq!(txn.get_working_set()?.len(), 1);
}
Ok(())
}
}

View file

@ -0,0 +1,385 @@
use super::{apply, snapshot};
use crate::server::{AddVersionResult, GetVersionResult, Server, SnapshotUrgency, SyncOp};
use crate::storage::StorageTxn;
use crate::Error;
use log::{info, trace, warn};
use serde::{Deserialize, Serialize};
use std::str;
#[derive(Serialize, Deserialize, Debug)]
struct Version {
operations: Vec<SyncOp>,
}
/// Sync to the given server, pulling remote changes and pushing local changes.
pub(super) fn sync(
server: &mut Box<dyn Server>,
txn: &mut dyn StorageTxn,
avoid_snapshots: bool,
) -> anyhow::Result<()> {
// if this taskdb is entirely empty, then start by getting and applying a snapshot
if txn.is_empty()? {
trace!("storage is empty; attempting to apply a snapshot");
if let Some((version, snap)) = server.get_snapshot()? {
snapshot::apply_snapshot(txn, version, snap.as_ref())?;
trace!("applied snapshot for version {}", version);
}
}
// retry synchronizing until the server accepts our version (this allows for races between
// replicas trying to sync to the same server). If the server insists on the same base
// version twice, then we have diverged.
let mut requested_parent_version_id = None;
loop {
trace!("beginning sync outer loop");
let mut base_version_id = txn.base_version()?;
let mut local_ops: Vec<SyncOp> = txn
.operations()?
.drain(..)
.filter_map(|op| op.into_sync())
.collect();
// first pull changes and "rebase" on top of them
loop {
trace!("beginning sync inner loop");
if let GetVersionResult::Version {
version_id,
history_segment,
..
} = server.get_child_version(base_version_id)?
{
let version_str = str::from_utf8(&history_segment).unwrap();
let version: Version = serde_json::from_str(version_str).unwrap();
// apply this verison and update base_version in storage
info!("applying version {:?} from server", version_id);
apply_version(txn, &mut local_ops, version)?;
txn.set_base_version(version_id)?;
base_version_id = version_id;
} else {
info!("no child versions of {:?}", base_version_id);
// at the moment, no more child versions, so we can try adding our own
break;
}
}
if local_ops.is_empty() {
info!("no changes to push to server");
// nothing to sync back to the server..
break;
}
trace!("sending {} operations to the server", local_ops.len());
// now make a version of our local changes and push those
let new_version = Version {
operations: local_ops,
};
let history_segment = serde_json::to_string(&new_version).unwrap().into();
info!("sending new version to server");
let (res, snapshot_urgency) = server.add_version(base_version_id, history_segment)?;
match res {
AddVersionResult::Ok(new_version_id) => {
info!("version {:?} received by server", new_version_id);
txn.set_base_version(new_version_id)?;
// make a snapshot if the server indicates it is urgent enough
let base_urgency = if avoid_snapshots {
SnapshotUrgency::High
} else {
SnapshotUrgency::Low
};
if snapshot_urgency >= base_urgency {
let snapshot = snapshot::make_snapshot(txn)?;
server.add_snapshot(new_version_id, snapshot)?;
}
break;
}
AddVersionResult::ExpectedParentVersion(parent_version_id) => {
info!(
"new version rejected; must be based on {:?}",
parent_version_id
);
if let Some(requested) = requested_parent_version_id {
if parent_version_id == requested {
return Err(Error::OutOfSync.into());
}
}
requested_parent_version_id = Some(parent_version_id);
}
}
}
txn.set_operations(vec![])?;
txn.commit()?;
Ok(())
}
fn apply_version(
txn: &mut dyn StorageTxn,
local_ops: &mut Vec<SyncOp>,
mut version: Version,
) -> anyhow::Result<()> {
// The situation here is that the server has already applied all server operations, and we
// have already applied all local operations, so states have diverged by several
// operations. We need to figure out what operations to apply locally and on the server in
// order to return to the same state.
//
// Operational transforms provide this on an operation-by-operation basis. To break this
// down, we treat each server operation individually, in order. For each such operation,
// we start in this state:
//
//
// base state-*
// / \-server op
// * *
// local / \ /
// ops * *
// / \ / new
// * * local
// local / \ / ops
// state-* *
// new-\ /
// server op *-new local state
//
// This is slightly complicated by the fact that the transform function can return None,
// indicating no operation is required. If this happens for a local op, we can just omit
// it. If it happens for server op, then we must copy the remaining local ops.
for server_op in version.operations.drain(..) {
trace!(
"rebasing local operations onto server operation {:?}",
server_op
);
let mut new_local_ops = Vec::with_capacity(local_ops.len());
let mut svr_op = Some(server_op);
for local_op in local_ops.drain(..) {
if let Some(o) = svr_op {
let (new_server_op, new_local_op) = SyncOp::transform(o, local_op.clone());
trace!("local operation {:?} -> {:?}", local_op, new_local_op);
svr_op = new_server_op;
if let Some(o) = new_local_op {
new_local_ops.push(o);
}
} else {
trace!(
"local operation {:?} unchanged (server operation consumed)",
local_op
);
new_local_ops.push(local_op);
}
}
if let Some(o) = svr_op {
if let Err(e) = apply::apply_op(txn, &o) {
warn!("Invalid operation when syncing: {} (ignored)", e);
}
}
*local_ops = new_local_ops;
}
Ok(())
}
#[cfg(test)]
mod test {
use super::*;
use crate::server::{test::TestServer, SyncOp};
use crate::storage::InMemoryStorage;
use crate::taskdb::{snapshot::SnapshotTasks, TaskDb};
use chrono::Utc;
use pretty_assertions::assert_eq;
use uuid::Uuid;
fn newdb() -> TaskDb {
TaskDb::new(Box::new(InMemoryStorage::new()))
}
#[test]
fn test_sync() -> anyhow::Result<()> {
let mut server: Box<dyn Server> = TestServer::new().server();
let mut db1 = newdb();
sync(&mut server, db1.storage.txn()?.as_mut(), false).unwrap();
let mut db2 = newdb();
sync(&mut server, db2.storage.txn()?.as_mut(), false).unwrap();
// make some changes in parallel to db1 and db2..
let uuid1 = Uuid::new_v4();
db1.apply(SyncOp::Create { uuid: uuid1 }).unwrap();
db1.apply(SyncOp::Update {
uuid: uuid1,
property: "title".into(),
value: Some("my first task".into()),
timestamp: Utc::now(),
})
.unwrap();
let uuid2 = Uuid::new_v4();
db2.apply(SyncOp::Create { uuid: uuid2 }).unwrap();
db2.apply(SyncOp::Update {
uuid: uuid2,
property: "title".into(),
value: Some("my second task".into()),
timestamp: Utc::now(),
})
.unwrap();
// and synchronize those around
sync(&mut server, db1.storage.txn()?.as_mut(), false).unwrap();
sync(&mut server, db2.storage.txn()?.as_mut(), false).unwrap();
sync(&mut server, db1.storage.txn()?.as_mut(), false).unwrap();
assert_eq!(db1.sorted_tasks(), db2.sorted_tasks());
// now make updates to the same task on both sides
db1.apply(SyncOp::Update {
uuid: uuid2,
property: "priority".into(),
value: Some("H".into()),
timestamp: Utc::now(),
})
.unwrap();
db2.apply(SyncOp::Update {
uuid: uuid2,
property: "project".into(),
value: Some("personal".into()),
timestamp: Utc::now(),
})
.unwrap();
// and synchronize those around
sync(&mut server, db1.storage.txn()?.as_mut(), false).unwrap();
sync(&mut server, db2.storage.txn()?.as_mut(), false).unwrap();
sync(&mut server, db1.storage.txn()?.as_mut(), false).unwrap();
assert_eq!(db1.sorted_tasks(), db2.sorted_tasks());
Ok(())
}
#[test]
fn test_sync_create_delete() -> anyhow::Result<()> {
let mut server: Box<dyn Server> = TestServer::new().server();
let mut db1 = newdb();
sync(&mut server, db1.storage.txn()?.as_mut(), false).unwrap();
let mut db2 = newdb();
sync(&mut server, db2.storage.txn()?.as_mut(), false).unwrap();
// create and update a task..
let uuid = Uuid::new_v4();
db1.apply(SyncOp::Create { uuid }).unwrap();
db1.apply(SyncOp::Update {
uuid,
property: "title".into(),
value: Some("my first task".into()),
timestamp: Utc::now(),
})
.unwrap();
// and synchronize those around
sync(&mut server, db1.storage.txn()?.as_mut(), false).unwrap();
sync(&mut server, db2.storage.txn()?.as_mut(), false).unwrap();
sync(&mut server, db1.storage.txn()?.as_mut(), false).unwrap();
assert_eq!(db1.sorted_tasks(), db2.sorted_tasks());
// delete and re-create the task on db1
db1.apply(SyncOp::Delete { uuid }).unwrap();
db1.apply(SyncOp::Create { uuid }).unwrap();
db1.apply(SyncOp::Update {
uuid,
property: "title".into(),
value: Some("my second task".into()),
timestamp: Utc::now(),
})
.unwrap();
// and on db2, update a property of the task
db2.apply(SyncOp::Update {
uuid,
property: "project".into(),
value: Some("personal".into()),
timestamp: Utc::now(),
})
.unwrap();
sync(&mut server, db1.storage.txn()?.as_mut(), false).unwrap();
sync(&mut server, db2.storage.txn()?.as_mut(), false).unwrap();
sync(&mut server, db1.storage.txn()?.as_mut(), false).unwrap();
assert_eq!(db1.sorted_tasks(), db2.sorted_tasks());
Ok(())
}
#[test]
fn test_sync_add_snapshot_start_with_snapshot() -> anyhow::Result<()> {
let mut test_server = TestServer::new();
let mut server: Box<dyn Server> = test_server.server();
let mut db1 = newdb();
let uuid = Uuid::new_v4();
db1.apply(SyncOp::Create { uuid })?;
db1.apply(SyncOp::Update {
uuid,
property: "title".into(),
value: Some("my first task".into()),
timestamp: Utc::now(),
})?;
test_server.set_snapshot_urgency(SnapshotUrgency::High);
sync(&mut server, db1.storage.txn()?.as_mut(), false)?;
// assert that a snapshot was added
let base_version = db1.storage.txn()?.base_version()?;
let (v, s) = test_server
.snapshot()
.ok_or_else(|| anyhow::anyhow!("no snapshot"))?;
assert_eq!(v, base_version);
let tasks = SnapshotTasks::decode(&s)?.into_inner();
assert_eq!(tasks[0].0, uuid);
// update the taskdb and sync again
db1.apply(SyncOp::Update {
uuid,
property: "title".into(),
value: Some("my first task, updated".into()),
timestamp: Utc::now(),
})?;
sync(&mut server, db1.storage.txn()?.as_mut(), false)?;
// delete the first version, so that db2 *must* initialize from
// the snapshot
test_server.delete_version(Uuid::nil());
// sync to a new DB and check that we got the expected results
let mut db2 = newdb();
sync(&mut server, db2.storage.txn()?.as_mut(), false)?;
let task = db2.get_task(uuid)?.unwrap();
assert_eq!(task.get("title").unwrap(), "my first task, updated");
Ok(())
}
#[test]
fn test_sync_avoids_snapshot() -> anyhow::Result<()> {
let test_server = TestServer::new();
let mut server: Box<dyn Server> = test_server.server();
let mut db1 = newdb();
let uuid = Uuid::new_v4();
db1.apply(SyncOp::Create { uuid }).unwrap();
test_server.set_snapshot_urgency(SnapshotUrgency::Low);
sync(&mut server, db1.storage.txn()?.as_mut(), true).unwrap();
// assert that a snapshot was not added, because we indicated
// we wanted to avoid snapshots and it was only low urgency
assert_eq!(test_server.snapshot(), None);
Ok(())
}
}

View file

@ -0,0 +1,117 @@
use super::apply;
use crate::storage::{ReplicaOp, StorageTxn};
use log::{debug, trace};
/// Undo local operations until an UndoPoint.
pub(super) fn undo(txn: &mut dyn StorageTxn) -> anyhow::Result<bool> {
let mut applied = false;
let mut popped = false;
let mut local_ops = txn.operations()?;
while let Some(op) = local_ops.pop() {
popped = true;
if op == ReplicaOp::UndoPoint {
break;
}
debug!("Reversing operation {:?}", op);
let rev_ops = op.reverse_ops();
for op in rev_ops {
trace!("Applying reversed operation {:?}", op);
apply::apply_op(txn, &op)?;
applied = true;
}
}
if popped {
txn.set_operations(local_ops)?;
txn.commit()?;
}
Ok(applied)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::server::SyncOp;
use crate::taskdb::TaskDb;
use chrono::Utc;
use pretty_assertions::assert_eq;
use uuid::Uuid;
#[test]
fn test_apply_create() -> anyhow::Result<()> {
let mut db = TaskDb::new_inmemory();
let uuid1 = Uuid::new_v4();
let uuid2 = Uuid::new_v4();
let timestamp = Utc::now();
// apply a few ops, capture the DB state, make an undo point, and then apply a few more
// ops.
db.apply(SyncOp::Create { uuid: uuid1 })?;
db.apply(SyncOp::Update {
uuid: uuid1,
property: "prop".into(),
value: Some("v1".into()),
timestamp,
})?;
db.apply(SyncOp::Create { uuid: uuid2 })?;
db.apply(SyncOp::Update {
uuid: uuid2,
property: "prop".into(),
value: Some("v2".into()),
timestamp,
})?;
db.apply(SyncOp::Update {
uuid: uuid2,
property: "prop2".into(),
value: Some("v3".into()),
timestamp,
})?;
let db_state = db.sorted_tasks();
db.add_undo_point()?;
db.apply(SyncOp::Delete { uuid: uuid1 })?;
db.apply(SyncOp::Update {
uuid: uuid2,
property: "prop".into(),
value: None,
timestamp,
})?;
db.apply(SyncOp::Update {
uuid: uuid2,
property: "prop2".into(),
value: Some("new-value".into()),
timestamp,
})?;
assert_eq!(db.operations().len(), 9);
{
let mut txn = db.storage.txn()?;
assert!(undo(txn.as_mut())?);
}
// undo took db back to the snapshot
assert_eq!(db.operations().len(), 5);
assert_eq!(db.sorted_tasks(), db_state);
{
let mut txn = db.storage.txn()?;
assert!(undo(txn.as_mut())?);
}
// empty db
assert_eq!(db.operations().len(), 0);
assert_eq!(db.sorted_tasks(), vec![]);
{
let mut txn = db.storage.txn()?;
// nothing left to undo, so undo() returns false
assert!(!undo(txn.as_mut())?);
}
Ok(())
}
}

View file

@ -0,0 +1,167 @@
use crate::storage::{StorageTxn, TaskMap};
use std::collections::HashSet;
/// Rebuild the working set using a function to identify tasks that should be in the set. This
/// renumbers the existing working-set tasks to eliminate gaps, and also adds any tasks that
/// are not already in the working set but should be. The rebuild occurs in a single
/// trasnsaction against the storage backend.
pub fn rebuild<F>(txn: &mut dyn StorageTxn, in_working_set: F, renumber: bool) -> anyhow::Result<()>
where
F: Fn(&TaskMap) -> bool,
{
let mut new_ws = vec![None]; // index 0 is always None
let mut seen = HashSet::new();
// The goal here is for existing working-set items to be "compressed' down to index 1, so
// we begin by scanning the current working set and inserting any tasks that should still
// be in the set into new_ws, implicitly dropping any tasks that are no longer in the
// working set.
for elt in txn.get_working_set()?.drain(1..) {
if let Some(uuid) = elt {
if let Some(task) = txn.get_task(uuid)? {
if in_working_set(&task) {
new_ws.push(Some(uuid));
seen.insert(uuid);
continue;
}
}
}
// if we are not renumbering, then insert a blank working-set entry here
if !renumber {
new_ws.push(None);
}
}
// if renumbering, clear the working set and re-add
if renumber {
txn.clear_working_set()?;
for elt in new_ws.drain(1..new_ws.len()).flatten() {
txn.add_to_working_set(elt)?;
}
} else {
// ..otherwise, just clear the None items determined above from the working set
for (i, elt) in new_ws.iter().enumerate().skip(1) {
if elt.is_none() {
txn.set_working_set_item(i, None)?;
}
}
}
// Now go hunting for tasks that should be in this list but are not, adding them at the
// end of the list, whether renumbering or not
for (uuid, task) in txn.all_tasks()? {
if !seen.contains(&uuid) && in_working_set(&task) {
txn.add_to_working_set(uuid)?;
}
}
txn.commit()?;
Ok(())
}
#[cfg(test)]
mod test {
use super::*;
use crate::server::SyncOp;
use crate::taskdb::TaskDb;
use chrono::Utc;
use uuid::Uuid;
#[test]
fn rebuild_working_set_renumber() -> anyhow::Result<()> {
rebuild_working_set(true)
}
#[test]
fn rebuild_working_set_no_renumber() -> anyhow::Result<()> {
rebuild_working_set(false)
}
fn rebuild_working_set(renumber: bool) -> anyhow::Result<()> {
let mut db = TaskDb::new_inmemory();
let mut uuids = vec![];
uuids.push(Uuid::new_v4());
println!("uuids[0]: {:?} - pending, not in working set", uuids[0]);
uuids.push(Uuid::new_v4());
println!("uuids[1]: {:?} - pending, in working set", uuids[1]);
uuids.push(Uuid::new_v4());
println!("uuids[2]: {:?} - not pending, not in working set", uuids[2]);
uuids.push(Uuid::new_v4());
println!("uuids[3]: {:?} - not pending, in working set", uuids[3]);
uuids.push(Uuid::new_v4());
println!("uuids[4]: {:?} - pending, in working set", uuids[4]);
// add everything to the TaskDb
for uuid in &uuids {
db.apply(SyncOp::Create { uuid: *uuid })?;
}
for i in &[0usize, 1, 4] {
db.apply(SyncOp::Update {
uuid: uuids[*i].clone(),
property: String::from("status"),
value: Some("pending".into()),
timestamp: Utc::now(),
})?;
}
// set the existing working_set as we want it
{
let mut txn = db.storage.txn()?;
txn.clear_working_set()?;
for i in &[1usize, 3, 4] {
txn.add_to_working_set(uuids[*i])?;
}
txn.commit()?;
}
assert_eq!(
db.working_set()?,
vec![
None,
Some(uuids[1].clone()),
Some(uuids[3].clone()),
Some(uuids[4].clone())
]
);
rebuild(
db.storage.txn()?.as_mut(),
|t| {
if let Some(status) = t.get("status") {
status == "pending"
} else {
false
}
},
renumber,
)?;
let exp = if renumber {
// uuids[1] and uuids[4] are already in the working set, so are compressed
// to the top, and then uuids[0] is added.
vec![
None,
Some(uuids[1].clone()),
Some(uuids[4].clone()),
Some(uuids[0].clone()),
]
} else {
// uuids[1] and uuids[4] are already in the working set, at indexes 1 and 3,
// and then uuids[0] is added.
vec![
None,
Some(uuids[1].clone()),
None,
Some(uuids[4].clone()),
Some(uuids[0].clone()),
]
};
assert_eq!(db.working_set()?, exp);
Ok(())
}
}

View file

@ -0,0 +1,61 @@
use std::convert::TryInto;
use uuid::Uuid;
/// A representation of a UUID as a key. This is just a newtype wrapping the 128-bit packed form
/// of a UUID.
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
pub(crate) struct Key(uuid::Bytes);
impl From<&[u8]> for Key {
fn from(bytes: &[u8]) -> Key {
Key(bytes.try_into().expect("expected 16 bytes"))
}
}
impl From<&Uuid> for Key {
fn from(uuid: &Uuid) -> Key {
let key = Key(*uuid.as_bytes());
key
}
}
impl From<Uuid> for Key {
fn from(uuid: Uuid) -> Key {
let key = Key(*uuid.as_bytes());
key
}
}
impl From<Key> for Uuid {
fn from(key: Key) -> Uuid {
Uuid::from_bytes(key.0)
}
}
impl AsRef<[u8]> for Key {
fn as_ref(&self) -> &[u8] {
&self.0[..]
}
}
#[cfg(test)]
mod test {
use super::*;
use pretty_assertions::assert_eq;
#[test]
fn test_from_bytes() {
let k: Key = (&[1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16][..]).into();
let u: Uuid = k.into();
assert_eq!(
u,
Uuid::parse_str("01020304-0506-0708-090a-0b0c0d0e0f10").unwrap()
);
}
#[test]
#[should_panic]
fn test_from_bytes_bad_len() {
let _: Key = (&[1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11][..]).into();
}
}

View file

@ -0,0 +1,154 @@
use std::collections::HashMap;
use uuid::Uuid;
/// A WorkingSet represents a snapshot of the working set from a replica.
///
/// A replica's working set is a mapping from small integers to task uuids for all pending tasks.
/// The small integers are meant to be stable, easily-typed identifiers for users to interact with
/// important tasks.
///
/// IMPORTANT: the content of the working set may change at any time that a DB transaction is not
/// in progress, and the data in this type will not be updated automatically. It is up to the
/// caller to decide how long to keep this value, and how much to trust the accuracy of its
/// contents. In practice, the answers are usually "a few milliseconds" and treating unexpected
/// results as non-fatal.
pub struct WorkingSet {
by_index: Vec<Option<Uuid>>,
by_uuid: HashMap<Uuid, usize>,
}
impl WorkingSet {
/// Create a new WorkingSet. Typically this is acquired via `replica.working_set()`
pub(crate) fn new(by_index: Vec<Option<Uuid>>) -> Self {
let mut by_uuid = HashMap::new();
// working sets are 1-indexed, so element 0 should always be None
assert!(by_index.is_empty() || by_index[0].is_none());
for (index, uuid) in by_index.iter().enumerate() {
if let Some(uuid) = uuid {
by_uuid.insert(*uuid, index);
}
}
Self { by_index, by_uuid }
}
/// Get the "length" of the working set: the total number of uuids in the set.
pub fn len(&self) -> usize {
self.by_index.iter().filter(|e| e.is_some()).count()
}
/// Get the largest index in the working set, or zero if the set is empty.
pub fn largest_index(&self) -> usize {
self.by_index.len().saturating_sub(1)
}
/// True if the length is zero
pub fn is_empty(&self) -> bool {
self.by_index.iter().all(|e| e.is_none())
}
/// Get the uuid with the given index, if any exists.
pub fn by_index(&self, index: usize) -> Option<Uuid> {
if let Some(Some(uuid)) = self.by_index.get(index) {
Some(*uuid)
} else {
None
}
}
/// Get the index for the given uuid, if any
pub fn by_uuid(&self, uuid: Uuid) -> Option<usize> {
self.by_uuid.get(&uuid).copied()
}
/// Iterate over pairs (index, uuid), in order by index.
pub fn iter(&self) -> impl Iterator<Item = (usize, Uuid)> + '_ {
self.by_index
.iter()
.enumerate()
.filter_map(|(index, uuid)| uuid.as_ref().map(|uuid| (index, *uuid)))
}
}
#[cfg(test)]
mod test {
use super::*;
use pretty_assertions::assert_eq;
fn make() -> (Uuid, Uuid, WorkingSet) {
let uuid1 = Uuid::new_v4();
let uuid2 = Uuid::new_v4();
(
uuid1,
uuid2,
WorkingSet::new(vec![None, Some(uuid1), None, Some(uuid2), None]),
)
}
#[test]
fn test_new() {
let (_, uuid2, ws) = make();
assert_eq!(ws.by_index[3], Some(uuid2));
assert_eq!(ws.by_uuid.get(&uuid2), Some(&3));
}
#[test]
fn test_len_and_is_empty() {
let (_, _, ws) = make();
assert_eq!(ws.len(), 2);
assert_eq!(ws.is_empty(), false);
let ws = WorkingSet::new(vec![]);
assert_eq!(ws.len(), 0);
assert_eq!(ws.is_empty(), true);
let ws = WorkingSet::new(vec![None, None, None]);
assert_eq!(ws.len(), 0);
assert_eq!(ws.is_empty(), true);
}
#[test]
fn test_largest_index() {
let uuid1 = Uuid::new_v4();
let uuid2 = Uuid::new_v4();
let ws = WorkingSet::new(vec![]);
assert_eq!(ws.largest_index(), 0);
let ws = WorkingSet::new(vec![None, Some(uuid1)]);
assert_eq!(ws.largest_index(), 1);
let ws = WorkingSet::new(vec![None, Some(uuid1), None, Some(uuid2)]);
assert_eq!(ws.largest_index(), 3);
let ws = WorkingSet::new(vec![None, Some(uuid1), None, Some(uuid2), None]);
assert_eq!(ws.largest_index(), 4);
}
#[test]
fn test_by_index() {
let (uuid1, uuid2, ws) = make();
assert_eq!(ws.by_index(0), None);
assert_eq!(ws.by_index(1), Some(uuid1));
assert_eq!(ws.by_index(2), None);
assert_eq!(ws.by_index(3), Some(uuid2));
assert_eq!(ws.by_index(4), None);
assert_eq!(ws.by_index(100), None); // past the end of the vector
}
#[test]
fn test_by_uuid() {
let (uuid1, uuid2, ws) = make();
let nosuch = Uuid::new_v4();
assert_eq!(ws.by_uuid(uuid1), Some(1));
assert_eq!(ws.by_uuid(uuid2), Some(3));
assert_eq!(ws.by_uuid(nosuch), None);
}
#[test]
fn test_iter() {
let (uuid1, uuid2, ws) = make();
assert_eq!(ws.iter().collect::<Vec<_>>(), vec![(1, uuid1), (3, uuid2),]);
}
}