Add support for cloud sync, specifically GCP (#3223)

* Add support for cloud sync, specifically GCP

This adds generic support for sync to cloud services, with specific
spuport for GCP. Adding others -- so long as they support a
compare-and-set operation -- should be comparatively straightforward.

The cloud support includes cleanup of unnecessary data, and should keep
total space usage roughly proportional to the number of tasks.

Co-authored-by: ryneeverett <ryneeverett@gmail.com>
This commit is contained in:
Dustin J. Mitchell 2024-01-21 12:36:37 -05:00 committed by GitHub
parent 6f1c16fecd
commit 9566c929e2
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
36 changed files with 4012 additions and 401 deletions

View file

@ -29,7 +29,7 @@ jobs:
- uses: actions-rs/toolchain@v1 - uses: actions-rs/toolchain@v1
with: with:
toolchain: "1.65" # MSRV toolchain: "1.70.0" # MSRV
override: true override: true
- uses: actions-rs/cargo@v1.0.3 - uses: actions-rs/cargo@v1.0.3
@ -98,7 +98,7 @@ jobs:
- uses: actions-rs/toolchain@v1 - uses: actions-rs/toolchain@v1
with: with:
toolchain: "1.65" # MSRV toolchain: "1.70.0" # MSRV
override: true override: true
- uses: actions-rs/cargo@v1.0.3 - uses: actions-rs/cargo@v1.0.3

View file

@ -49,7 +49,7 @@ jobs:
strategy: strategy:
matrix: matrix:
rust: rust:
- "1.65" # MSRV - "1.70.0" # MSRV
- "stable" - "stable"
os: os:
- ubuntu-latest - ubuntu-latest

741
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -27,12 +27,13 @@ env_logger = "^0.10.0"
ffizz-header = "0.5" ffizz-header = "0.5"
flate2 = "1" flate2 = "1"
futures = "^0.3.25" futures = "^0.3.25"
google-cloud-storage = { version = "0.15.0", default-features = false, features = ["rustls-tls", "auth"] }
lazy_static = "1" lazy_static = "1"
libc = "0.2.136" libc = "0.2.136"
log = "^0.4.17" log = "^0.4.17"
pretty_assertions = "1" pretty_assertions = "1"
proptest = "^1.4.0" proptest = "^1.4.0"
ring = "0.16" ring = "0.17"
rstest = "0.17" rstest = "0.17"
rusqlite = { version = "0.29", features = ["bundled"] } rusqlite = { version = "0.29", features = ["bundled"] }
serde_json = "^1.0" serde_json = "^1.0"
@ -40,6 +41,7 @@ serde = { version = "^1.0.147", features = ["derive"] }
strum = "0.25" strum = "0.25"
strum_macros = "0.25" strum_macros = "0.25"
tempfile = "3" tempfile = "3"
tokio = { version = "1", features = ["rt-multi-thread"] }
thiserror = "1.0" thiserror = "1.0"
ureq = "^2.9.0" ureq = { version = "^2.9.0", features = ["tls"] }
uuid = { version = "^1.7.0", features = ["serde", "v4"] } uuid = { version = "^1.7.0", features = ["serde", "v4"] }

View file

@ -21,8 +21,35 @@ NOTE: A side-effect of synchronization is that once changes have been
synchronized, they cannot be undone. This means that each time synchronization synchronized, they cannot be undone. This means that each time synchronization
is run, it is no longer possible to undo previous operations. is run, it is no longer possible to undo previous operations.
.SH MANAGING SYNCHRONIZATION
.SS Adding a Replica
To add a new replica, configure a new, empty replica identically to
the existing replica, and run `task sync`.
.SS When to Synchronize
Taskwarrior can perform a sync operation at every garbage collection (gc) run.
This is the default, and is appropriate for local synchronization.
For synchronization to a server, a better solution is to run
$ task sync
periodically, such as via
.BR cron (8) .
.SH CONFIGURATION .SH CONFIGURATION
Taskwarrior provides several options for synchronizing your tasks:
- To a server specifically designed to handle Taskwarrior data.
+ To a cloud storage provider. Currently only GCP is supported.
- To a local, on-disk file.
.SS Sync Server
To synchronize your tasks to a sync server, you will need the following To synchronize your tasks to a sync server, you will need the following
information from the server administrator: information from the server administrator:
@ -43,22 +70,20 @@ Configure Taskwarrior with these details:
$ task config sync.server.client_id <client_id> $ task config sync.server.client_id <client_id>
$ task config sync.server.encryption_secret <encryption_secret> $ task config sync.server.encryption_secret <encryption_secret>
.SS Adding a Replica .SS Google Cloud Platform
To add a new replica, configure a new, empty replica identically to To synchronize your tasks to GCP, use the GCP Console to create a new project,
the existing replica, and run `task sync`. and within that project a new Cloud Storage bucket. The default settings for
the bucket are adequate.
.SS When to Synchronize Authenticate to the project with:
Taskwarrior can perform a sync operation at every garbage collection (gc) run. $ gcloud config set project $PROJECT_NAME
This is the default, and is appropriate for local synchronization. $ gcloud auth application-default login
For synchronization to a server, a better solution is to run Then configure Taskwarrior with:
$ task sync $ task config sync.gcp.bucket <bucket-name>
periodically, such as via
.BR cron (8) .
.SS Local Synchronization .SS Local Synchronization

View file

@ -56,9 +56,10 @@ target_link_libraries (task_executable task tc tc-rust commands tc columns libsh
target_link_libraries (calc_executable task tc tc-rust commands tc columns libshared task libshared ${TASK_LIBRARIES}) target_link_libraries (calc_executable task tc tc-rust commands tc columns libshared task libshared ${TASK_LIBRARIES})
target_link_libraries (lex_executable task tc tc-rust commands tc columns libshared task libshared ${TASK_LIBRARIES}) target_link_libraries (lex_executable task tc tc-rust commands tc columns libshared task libshared ${TASK_LIBRARIES})
if (DARWIN) if (DARWIN)
target_link_libraries (task_executable "-framework CoreFoundation -framework Security") # SystemConfiguration is required by Rust libraries like reqwest, to get proxy configuration.
target_link_libraries (calc_executable "-framework CoreFoundation -framework Security") target_link_libraries (task_executable "-framework CoreFoundation -framework Security -framework SystemConfiguration")
target_link_libraries (lex_executable "-framework CoreFoundation -framework Security") target_link_libraries (calc_executable "-framework CoreFoundation -framework Security -framework SystemConfiguration")
target_link_libraries (lex_executable "-framework CoreFoundation -framework Security -framework SystemConfiguration")
endif (DARWIN) endif (DARWIN)
set_property (TARGET task_executable PROPERTY OUTPUT_NAME "task") set_property (TARGET task_executable PROPERTY OUTPUT_NAME "task")
@ -71,4 +72,4 @@ set_property (TARGET lex_executable PROPERTY OUTPUT_NAME "lex")
#SET(CMAKE_BUILD_TYPE gcov) #SET(CMAKE_BUILD_TYPE gcov)
#SET(CMAKE_CXX_FLAGS_GCOV "--coverage") #SET(CMAKE_CXX_FLAGS_GCOV "--coverage")
#SET(CMAKE_C_FLAGS_GCOV "--coverage") #SET(CMAKE_C_FLAGS_GCOV "--coverage")
#SET(CMAKE_EXE_LINKER_FLAGS_GCOV "--coverage") #SET(CMAKE_EXE_LINKER_FLAGS_GCOV "--coverage")

View file

@ -283,6 +283,7 @@ std::string configurationDefaults =
"#sync.server.encryption_secret # Encryption secret for sync to a server\n" "#sync.server.encryption_secret # Encryption secret for sync to a server\n"
"#sync.server.origin # Origin of the sync server\n" "#sync.server.origin # Origin of the sync server\n"
"#sync.local.server_dir # Directory for local sync\n" "#sync.local.server_dir # Directory for local sync\n"
"#sync.gcp.bucket # Bucket for sync to GCP\n"
"\n" "\n"
"# Aliases - alternate names for commands\n" "# Aliases - alternate names for commands\n"
"alias.rm=delete # Alias for the delete command\n" "alias.rm=delete # Alias for the delete command\n"

View file

@ -193,6 +193,7 @@ int CmdShow::execute (std::string& output)
" sugar" " sugar"
" summary.all.projects" " summary.all.projects"
" sync.local.server_dir" " sync.local.server_dir"
" sync.gcp.bucket"
" sync.server.client_id" " sync.server.client_id"
" sync.server.encryption_secret" " sync.server.encryption_secret"
" sync.server.origin" " sync.server.origin"

View file

@ -63,17 +63,32 @@ int CmdSync::execute (std::string& output)
// If no server is set up, quit. // If no server is set up, quit.
std::string origin = Context::getContext ().config.get ("sync.server.origin"); std::string origin = Context::getContext ().config.get ("sync.server.origin");
std::string client_id = Context::getContext ().config.get ("sync.server.client_id");
std::string encryption_secret = Context::getContext ().config.get ("sync.server.encryption_secret");
std::string server_dir = Context::getContext ().config.get ("sync.local.server_dir"); std::string server_dir = Context::getContext ().config.get ("sync.local.server_dir");
std::string gcp_bucket = Context::getContext ().config.get ("sync.gcp.bucket");
if (server_dir != "") { if (server_dir != "") {
server = tc::Server (server_dir); server = tc::Server::new_local (server_dir);
server_ident = server_dir; server_ident = server_dir;
} else if (origin != "" && client_id != "" && encryption_secret != "") { } else if (gcp_bucket != "") {
server = tc::Server (origin, client_id, encryption_secret); std::string encryption_secret = Context::getContext ().config.get ("sync.gcp.encryption_secret");
server_ident = origin; if (encryption_secret == "") {
throw std::string ("sync.gcp.encryption_secret is required");
}
server = tc::Server::new_gcp (gcp_bucket, encryption_secret);
std::ostringstream os;
os << "GCP bucket " << gcp_bucket;
server_ident = os.str();
} else if (origin != "") {
std::string client_id = Context::getContext ().config.get ("sync.server.client_id");
std::string encryption_secret = Context::getContext ().config.get ("sync.server.encryption_secret");
if (client_id == "" || encryption_secret == "") {
throw std::string ("sync.server.client_id and encryption_secret are required");
}
server = tc::Server::new_sync (origin, client_id, encryption_secret);
std::ostringstream os;
os << "Sync server at " << origin;
server_ident = os.str();
} else { } else {
throw std::string ("Neither sync.server nor sync.local are configured."); throw std::string ("No sync.* settings are configured.");
} }
std::stringstream out; std::stringstream out;

View file

@ -32,7 +32,8 @@
using namespace tc::ffi; using namespace tc::ffi;
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
tc::Server::Server (const std::string &server_dir) tc::Server
tc::Server::new_local (const std::string &server_dir)
{ {
TCString tc_server_dir = tc_string_borrow (server_dir.c_str ()); TCString tc_server_dir = tc_string_borrow (server_dir.c_str ());
TCString error; TCString error;
@ -43,18 +44,17 @@ tc::Server::Server (const std::string &server_dir)
tc_string_free (&error); tc_string_free (&error);
throw errmsg; throw errmsg;
} }
inner = unique_tcserver_ptr ( return Server (unique_tcserver_ptr (
tcserver, tcserver,
[](TCServer* rep) { tc_server_free (rep); }); [](TCServer* rep) { tc_server_free (rep); }));
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
tc::Server::Server (const std::string &origin, const std::string &client_id, const std::string &encryption_secret) tc::Server
tc::Server::new_sync (const std::string &origin, const std::string &client_id, const std::string &encryption_secret)
{ {
TCString tc_origin = tc_string_borrow (origin.c_str ()); TCString tc_origin = tc_string_borrow (origin.c_str ());
TCString tc_client_id = tc_string_borrow (client_id.c_str ()); TCString tc_client_id = tc_string_borrow (client_id.c_str ());
TCString tc_encryption_secret = tc_string_borrow (encryption_secret.c_str ()); TCString tc_encryption_secret = tc_string_borrow (encryption_secret.c_str ());
TCUuid tc_client_uuid; TCUuid tc_client_uuid;
@ -65,16 +65,36 @@ tc::Server::Server (const std::string &origin, const std::string &client_id, con
} }
TCString error; TCString error;
auto tcserver = tc_server_new_remote (tc_origin, tc_client_uuid, tc_encryption_secret, &error); auto tcserver = tc_server_new_sync (tc_origin, tc_client_uuid, tc_encryption_secret, &error);
if (!tcserver) { if (!tcserver) {
auto errmsg = format ("Could not configure connection to server at {1}: {2}", auto errmsg = format ("Could not configure connection to server at {1}: {2}",
origin, tc_string_content (&error)); origin, tc_string_content (&error));
tc_string_free (&error); tc_string_free (&error);
throw errmsg; throw errmsg;
} }
inner = unique_tcserver_ptr ( return Server (unique_tcserver_ptr (
tcserver, tcserver,
[](TCServer* rep) { tc_server_free (rep); }); [](TCServer* rep) { tc_server_free (rep); }));
}
////////////////////////////////////////////////////////////////////////////////
tc::Server
tc::Server::new_gcp (const std::string &bucket, const std::string &encryption_secret)
{
TCString tc_bucket = tc_string_borrow (bucket.c_str ());
TCString tc_encryption_secret = tc_string_borrow (encryption_secret.c_str ());
TCString error;
auto tcserver = tc_server_new_gcp (tc_bucket, tc_encryption_secret, &error);
if (!tcserver) {
auto errmsg = format ("Could not configure connection to GCP bucket {1}: {2}",
bucket, tc_string_content (&error));
tc_string_free (&error);
throw errmsg;
}
return Server (unique_tcserver_ptr (
tcserver,
[](TCServer* rep) { tc_server_free (rep); }));
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////

View file

@ -43,7 +43,7 @@ namespace tc {
// Server wraps the TCServer type, managing its memory, errors, and so on. // Server wraps the TCServer type, managing its memory, errors, and so on.
// //
// Except as noted, method names match the suffix to `tc_replica_..`. // Except as noted, method names match the suffix to `tc_server_..`.
class Server class Server
{ {
public: public:
@ -51,10 +51,13 @@ namespace tc {
Server () = default; Server () = default;
// Construct a local server (tc_server_new_local). // Construct a local server (tc_server_new_local).
Server (const std::string& server_dir); static Server new_local (const std::string& server_dir);
// Construct a remote server (tc_server_new_remote). // Construct a remote server (tc_server_new_sync).
Server (const std::string &origin, const std::string &client_id, const std::string &encryption_secret); static Server new_sync (const std::string &origin, const std::string &client_id, const std::string &encryption_secret);
// Construct a GCP server (tc_server_new_gcp).
static Server new_gcp (const std::string &bucket, const std::string &encryption_secret);
// This object "owns" inner, so copy is not allowed. // This object "owns" inner, so copy is not allowed.
Server (const Server &) = delete; Server (const Server &) = delete;
@ -65,6 +68,8 @@ namespace tc {
Server &operator=(Server &&) noexcept; Server &operator=(Server &&) noexcept;
protected: protected:
Server (unique_tcserver_ptr inner) : inner(std::move(inner)) {};
unique_tcserver_ptr inner; unique_tcserver_ptr inner;
// Replica accesses the inner pointer to call tc_replica_sync // Replica accesses the inner pointer to call tc_replica_sync

1299
src/tc/rust/Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -6,4 +6,5 @@ version = "0.1.0"
crate-type = ["staticlib"] crate-type = ["staticlib"]
[dependencies] [dependencies]
taskchampion-lib = {path = "../../../taskchampion/lib"} taskchampion = { path = "../../../taskchampion/taskchampion", features = ["server-gcp", "server-sync"] }
taskchampion-lib = { path = "../../../taskchampion/lib" }

View file

@ -11,4 +11,7 @@
* [Synchronization Model](./sync-model.md) * [Synchronization Model](./sync-model.md)
* [Snapshots](./snapshots.md) * [Snapshots](./snapshots.md)
* [Server-Replica Protocol](./sync-protocol.md) * [Server-Replica Protocol](./sync-protocol.md)
* [Encryption](./encryption.md)
* [HTTP Implementation](./http.md)
* [Object-Store Implementation](./object-store.md)
* [Planned Functionality](./plans.md) * [Planned Functionality](./plans.md)

View file

@ -0,0 +1,38 @@
# Encryption
The client configuration includes an encryption secret of arbitrary length.
This section describes how that information is used to encrypt and decrypt data sent to the server (versions and snapshots).
Encryption is not used for local (on-disk) sync, but is used for all cases where data is sent from the local host.
## Key Derivation
The client derives the 32-byte encryption key from the configured encryption secret using PBKDF2 with HMAC-SHA256 and 100,000 iterations.
The salt value depends on the implementation of the protocol, as described in subsequent chapters.
## Encryption
The client uses [AEAD](https://commondatastorage.googleapis.com/chromium-boringssl-docs/aead.h.html), with algorithm CHACHA20_POLY1305.
The client should generate a random nonce, noting that AEAD is _not secure_ if a nonce is used repeatedly for the same key.
AEAD supports additional authenticated data (AAD) which must be provided for both open and seal operations.
In this protocol, the AAD is always 17 bytes of the form:
* `app_id` (byte) - always 1
* `version_id` (16 bytes) - 16-byte form of the version ID associated with this data
* for versions (AddVersion, GetChildVersion), the _parent_ version_id
* for snapshots (AddSnapshot, GetSnapshot), the snapshot version_id
The `app_id` field is for future expansion to handle other, non-task data using this protocol.
Including it in the AAD ensures that such data cannot be confused with task data.
Although the AEAD specification distinguishes ciphertext and tags, for purposes of this specification they are considered concatenated into a single bytestring as in BoringSSL's `EVP_AEAD_CTX_seal`.
## Representation
The final byte-stream is comprised of the following structure:
* `version` (byte) - format version (always 1)
* `nonce` (12 bytes) - encryption nonce
* `ciphertext` (remaining bytes) - ciphertext from sealing operation
The `version` field identifies this data format, and future formats will have a value other than 1 in this position.

View file

@ -0,0 +1,65 @@
# HTTP Representation
The transactions in the sync protocol are realized for an HTTP server at `<origin>` using the HTTP requests and responses described here.
The `origin` *should* be an HTTPS endpoint on general principle, but nothing in the functonality or security of the protocol depends on connection encryption.
The replica identifies itself to the server using a `client_id` in the form of a UUID.
This value is passed with every request in the `X-Client-Id` header, in its dashed-hex format.
The salt used in key derivation is the SHA256 hash of the 16-byte form of the client ID.
## AddVersion
The request is a `POST` to `<origin>/v1/client/add-version/<parentVersionId>`.
The request body contains the history segment, optionally encoded using any encoding supported by actix-web.
The content-type must be `application/vnd.taskchampion.history-segment`.
The success response is a 200 OK with an empty body.
The new version ID appears in the `X-Version-Id` header.
If included, a snapshot request appears in the `X-Snapshot-Request` header with value `urgency=low` or `urgency=high`.
On conflict, the response is a 409 CONFLICT with an empty body.
The expected parent version ID appears in the `X-Parent-Version-Id` header.
Other error responses (4xx or 5xx) may be returned and should be treated appropriately to their meanings in the HTTP specification.
## GetChildVersion
The request is a `GET` to `<origin>/v1/client/get-child-version/<parentVersionId>`.
The response is determined as described above.
The _not-found_ response is 404 NOT FOUND.
The _gone_ response is 410 GONE.
Neither has a response body.
On success, the response is a 200 OK.
The version's history segment is returned in the response body, with content-type `application/vnd.taskchampion.history-segment`.
The version ID appears in the `X-Version-Id` header.
The response body may be encoded, in accordance with any `Accept-Encoding` header in the request.
On failure, a client should treat a 404 NOT FOUND as indicating that it is up-to-date.
Clients should treat a 410 GONE as a synchronization error.
If the client has pending changes to send to the server, based on a now-removed version, then those changes cannot be reconciled and will be lost.
The client should, optionally after consulting the user, download and apply the latest snapshot.
## AddSnapshot
The request is a `POST` to `<origin>/v1/client/add-snapshot/<versionId>`.
The request body contains the snapshot data, optionally encoded using any encoding supported by actix-web.
The content-type must be `application/vnd.taskchampion.snapshot`.
If the version is invalid, as described above, the response should be 400 BAD REQUEST.
The server response should be 200 OK on success.
## GetSnapshot
The request is a `GET` to `<origin>/v1/client/snapshot`.
The response is a 200 OK.
The snapshot is returned in the response body, with content-type `application/vnd.taskchampion.snapshot`.
The version ID appears in the `X-Version-Id` header.
The response body may be encoded, in accordance with any `Accept-Encoding` header in the request.
After downloading and decrypting a snapshot, a client must replace its entire local task database with the content of the snapshot.
Any local operations that had not yet been synchronized must be discarded.
After the snapshot is applied, the client should begin the synchronization process again, starting from the snapshot version.

View file

@ -0,0 +1,9 @@
# Object Store Representation
TaskChampion also supports use of a generic key-value store to synchronize replicas.
In this case, the salt used in key derivation is a random 16-byte value, stored
in the object store and retrieved as needed.
The details of the mapping from this protocol to keys and values are private to the implementation.
Other applications should not access the key-value store directly.

View file

@ -2,7 +2,7 @@
The basic synchronization model described in the previous page has a few shortcomings: The basic synchronization model described in the previous page has a few shortcomings:
* servers must store an ever-increasing quantity of versions * servers must store an ever-increasing quantity of versions
* a new replica must download all versions since the beginning in order to derive the current state * a new replica must download all versions since the beginning (the nil UUID) in order to derive the current state
Snapshots allow TaskChampion to avoid both of these issues. Snapshots allow TaskChampion to avoid both of these issues.
A snapshot is a copy of the task database at a specific version. A snapshot is a copy of the task database at a specific version.
@ -37,12 +37,3 @@ This saves resources in these restricted environments.
A snapshot must be made on a replica with no unsynchronized operations. A snapshot must be made on a replica with no unsynchronized operations.
As such, it only makes sense to request a snapshot in response to a successful AddVersion request. As such, it only makes sense to request a snapshot in response to a successful AddVersion request.
## Handling Deleted Versions
When a replica requests a child version, the response must distinguish two cases:
1. No such child version exists because the replica is up-to-date.
1. No such child version exists because it has been deleted, and the replica must re-initialize itself.
The details of this logic are covered in the [Server-Replica Protocol](./sync-protocol.md).

View file

@ -32,7 +32,10 @@ For those familiar with distributed version control systems, a state is analogou
Fundamentally, synchronization involves all replicas agreeing on a single, linear sequence of operations and the state that those operations create. Fundamentally, synchronization involves all replicas agreeing on a single, linear sequence of operations and the state that those operations create.
Since the replicas are not connected, each may have additional operations that have been applied locally, but which have not yet been agreed on. Since the replicas are not connected, each may have additional operations that have been applied locally, but which have not yet been agreed on.
The synchronization process uses operational transformation to "linearize" those operations. The synchronization process uses operational transformation to "linearize" those operations.
This process is analogous (vaguely) to rebasing a sequence of Git commits. This process is analogous (vaguely) to rebasing a sequence of Git commits.
Critically, though, operations cannot merge; in effect, the only option is rebasing.
Furthermore, once an operation has been sent to the server it cannot be changed; in effect, the server does not permit "force push".
### Sync Operations ### Sync Operations
@ -135,4 +138,4 @@ Without synchronization, its list of pending operations would grow indefinitely,
So all replicas, even "singleton" replicas which do not replicate task data with any other replica, must synchronize periodically. So all replicas, even "singleton" replicas which do not replicate task data with any other replica, must synchronize periodically.
TaskChampion provides a `LocalServer` for this purpose. TaskChampion provides a `LocalServer` for this purpose.
It implements the `get_child_version` and `add_version` operations as described, storing data on-disk locally, all within the `ta` binary. It implements the `get_child_version` and `add_version` operations as described, storing data on-disk locally.

View file

@ -1,91 +1,42 @@
# Server-Replica Protocol # Server-Replica Protocol
The server-replica protocol is defined abstractly in terms of request/response transactions from the replica to the server. The server-replica protocol is defined abstractly in terms of request/response transactions.
This is made concrete in an HTTP representation.
The protocol builds on the model presented in the previous chapter, and in particular on the synchronization process. The protocol builds on the model presented in the previous chapters, and in particular on the synchronization process.
## Clients ## Clients
From the server's perspective, replicas accessing the same task history are indistinguishable, so this protocol uses the term "client" to refer generically to all replicas replicating a single task history. From the protocol's perspective, replicas accessing the same task history are indistinguishable, so this protocol uses the term "client" to refer generically to all replicas replicating a single task history.
Each client is identified and authenticated with a "client_id key", known only to the server and to the replicas replicating the task history.
## Server ## Server
A server implements the requests and responses described below.
Where the logic is implemented depends on the specific implementation of the protocol.
For each client, the server is responsible for storing the task history, in the form of a branch-free sequence of versions. For each client, the server is responsible for storing the task history, in the form of a branch-free sequence of versions.
It also stores the latest snapshot, if any exists. It also stores the latest snapshot, if any exists.
From the server's perspective, snapshots and versions are opaque byte sequences.
* versions: a set of {versionId: UUID, parentVersionId: UUID, historySegment: bytes} ## Version Invariant
* latestVersionId: UUID
* snapshotVersionId: UUID
* snapshot: bytes
For each client, it stores a set of versions as well as the latest version ID, defaulting to the nil UUID. The following invariant must always hold:
Each version has a version ID, a parent version ID, and a history segment (opaque data containing the operations for that version).
The server should maintain the following invariants for each client:
1. latestVersionId is nil or exists in the set of versions. > All versions are linked by parent-child relationships to form a single chain.
2. Given versions v1 and v2 for a client, with v1.versionId != v2.versionId and v1.parentVersionId != nil, v1.parentVersionId != v2.parentVersionId. > That is, each version must have no more than one parent and one child, and no more than one version may have zero parents or zero children.
In other words, versions do not branch.
3. If snapshotVersionId is nil, then there is a version with parentVersionId == nil.
4. If snapshotVersionId is not nil, then there is a version with parentVersionId = snapshotVersionId.
Note that versions form a linked list beginning with the latestVersionId stored for the client.
This linked list need not continue back to a version with v.parentVersionId = nil.
It may end at any point when v.parentVersionId is not found in the set of Versions.
This observation allows the server to discard older versions.
The third invariant prevents the server from discarding versions if there is no snapshot.
The fourth invariant prevents the server from discarding versions newer than the snapshot.
## Data Formats ## Data Formats
### Encryption Task data sent to the server is encrypted by the client, using the scheme described in the "Encryption" chapter.
The client configuration includes an encryption secret of arbitrary length and a clientId to identify itself.
This section describes how that information is used to encrypt and decrypt data sent to the server (versions and snapshots).
#### Key Derivation
The client derives the 32-byte encryption key from the configured encryption secret using PBKDF2 with HMAC-SHA256 and 100,000 iterations.
The salt is the SHA256 hash of the 16-byte form of the client ID.
#### Encryption
The client uses [AEAD](https://commondatastorage.googleapis.com/chromium-boringssl-docs/aead.h.html), with algorithm CHACHA20_POLY1305.
The client should generate a random nonce, noting that AEAD is _not secure_ if a nonce is used repeatedly for the same key.
AEAD supports additional authenticated data (AAD) which must be provided for both open and seal operations.
In this protocol, the AAD is always 17 bytes of the form:
* `app_id` (byte) - always 1
* `version_id` (16 bytes) - 16-byte form of the version ID associated with this data
* for versions (AddVersion, GetChildVersion), the _parent_ version_id
* for snapshots (AddSnapshot, GetSnapshot), the snapshot version_id
The `app_id` field is for future expansion to handle other, non-task data using this protocol.
Including it in the AAD ensures that such data cannot be confused with task data.
Although the AEAD specification distinguishes ciphertext and tags, for purposes of this specification they are considered concatenated into a single bytestring as in BoringSSL's `EVP_AEAD_CTX_seal`.
#### Representation
The final byte-stream is comprised of the following structure:
* `version` (byte) - format version (always 1)
* `nonce` (12 bytes) - encryption nonce
* `ciphertext` (remaining bytes) - ciphertext from sealing operation
The `version` field identifies this data format, and future formats will have a value other than 1 in this position.
### Version ### Version
The decrypted form of a version is a JSON array containing operations in the order they should be applied. The decrypted form of a version is a JSON array containing operations in the order they should be applied.
Each operation has the form `{TYPE: DATA}`, for example: Each operation has the form `{TYPE: DATA}`, for example:
* `{"Create":{"uuid":"56e0be07-c61f-494c-a54c-bdcfdd52d2a7"}}` * `[{"Create":{"uuid":"56e0be07-c61f-494c-a54c-bdcfdd52d2a7"}}]`
* `{"Delete":{"uuid":"56e0be07-c61f-494c-a54c-bdcfdd52d2a7"}}` * `[{"Delete":{"uuid":"56e0be07-c61f-494c-a54c-bdcfdd52d2a7"}}]`
* `{"Update":{"uuid":"56e0be07-c61f-494c-a54c-bdcfdd52d2a7","property":"prop","value":"v","timestamp":"2021-10-11T12:47:07.188090948Z"}}` * `[{"Update":{"uuid":"56e0be07-c61f-494c-a54c-bdcfdd52d2a7","property":"prop","value":"v","timestamp":"2021-10-11T12:47:07.188090948Z"}}]`
* `{"Update":{"uuid":"56e0be07-c61f-494c-a54c-bdcfdd52d2a7","property":"prop","value":null,"timestamp":"2021-10-11T12:47:07.188090948Z"}}` (to delete a property) * `[{"Update":{"uuid":"56e0be07-c61f-494c-a54c-bdcfdd52d2a7","property":"prop","value":null,"timestamp":"2021-10-11T12:47:07.188090948Z"}}]` (to delete a property)
Timestamps are in RFC3339 format with a `Z` suffix. Timestamps are in RFC3339 format with a `Z` suffix.
@ -108,24 +59,25 @@ For example (pretty-printed for clarity):
## Transactions ## Transactions
All interactions between the client and server are defined in terms of request/response transactions, as described here.
### AddVersion ### AddVersion
The AddVersion transaction requests that the server add a new version to the client's task history. The AddVersion transaction requests that the server add a new version to the client's task history.
The request contains the following; The request contains the following;
* parent version ID * parent version ID, and
* history segment * encrypted version data.
The server determines whether the new version is acceptable, atomically with respect to other requests for the same client. The server determines whether the new version is acceptable, atomically with respect to other requests for the same client.
If it has no versions for the client, it accepts the version. If it has no versions for the client, it accepts the version.
If it already has one or more versions for the client, then it accepts the version only if the given parent version ID matches its stored latest parent ID. If it already has one or more versions for the client, then it accepts the version only if the given parent version has no children, thereby maintaining the version invariant.
If the version is accepted, the server generates a new version ID for it. If the version is accepted, the server generates a new version ID for it.
The version is added to the set of versions for the client, the client's latest version ID is set to the new version ID. The version is added to the chain of versions for the client, and the new version ID is returned in the response to the client.
The new version ID is returned in the response to the client.
The response may also include a request for a snapshot, with associated urgency. The response may also include a request for a snapshot, with associated urgency.
If the version is not accepted, the server makes no changes, but responds to the client with a conflict indication containing the latest version ID. If the version is not accepted, the server makes no changes, but responds to the client with a conflict indication containing the ID of the version which has no children.
The client may then "rebase" its operations and try again. The client may then "rebase" its operations and try again.
Note that if a client receives two conflict responses with the same parent version ID, it is an indication that the client's version history has diverged from that on the server. Note that if a client receives two conflict responses with the same parent version ID, it is an indication that the client's version history has diverged from that on the server.
@ -138,23 +90,17 @@ If found, it returns the version's
* version ID, * version ID,
* parent version ID (matching that in the request), and * parent version ID (matching that in the request), and
* history segment. * encrypted version data.
The response is either a version (success, _not-found_, or _gone_, as determined by the first of the following to apply: If not found, it returns an indication that no such version exists.
* If a version with parentVersionId equal to the requested parentVersionId exists, it is returned.
* If the requested parentVersionId is the nil UUID ..
* ..and snapshotVersionId is nil, the response is _not-found_ (the client has no versions).
* ..and snapshotVersionId is not nil, the response is _gone_ (the first version has been deleted).
* If a version with versionId equal to the requested parentVersionId exists, the response is _not-found_ (the client is up-to-date)
* Otherwise, the response is _gone_ (the requested version has been deleted).
### AddSnapshot ### AddSnapshot
The AddSnapshot transaction requests that the server store a new snapshot, generated by the client. The AddSnapshot transaction requests that the server store a new snapshot, generated by the client.
The request contains the following: The request contains the following:
* version ID at which the snapshot was made * version ID at which the snapshot was made, and
* snapshot data (opaque to the server) * encrypted snapshot data.
The server should validate that the snapshot is for an existing version and is newer than any existing snapshot. The server should validate that the snapshot is for an existing version and is newer than any existing snapshot.
It may also validate that the snapshot is for a "recent" version (e.g., one of the last 5 versions). It may also validate that the snapshot is for a "recent" version (e.g., one of the last 5 versions).
@ -167,66 +113,3 @@ The server response is empty.
The GetSnapshot transaction requests that the server provide the latest snapshot. The GetSnapshot transaction requests that the server provide the latest snapshot.
The response contains the snapshot version ID and the snapshot data, if those exist. The response contains the snapshot version ID and the snapshot data, if those exist.
## HTTP Representation
The transactions above are realized for an HTTP server at `<origin>` using the HTTP requests and responses described here.
The `origin` *should* be an HTTPS endpoint on general principle, but nothing in the functonality or security of the protocol depends on connection encryption.
The replica identifies itself to the server using a `client_id` in the form of a UUID.
This value is passed with every request in the `X-Client-Id` header, in its dashed-hex format.
### AddVersion
The request is a `POST` to `<origin>/v1/client/add-version/<parentVersionId>`.
The request body contains the history segment, optionally encoded using any encoding supported by actix-web.
The content-type must be `application/vnd.taskchampion.history-segment`.
The success response is a 200 OK with an empty body.
The new version ID appears in the `X-Version-Id` header.
If included, a snapshot request appears in the `X-Snapshot-Request` header with value `urgency=low` or `urgency=high`.
On conflict, the response is a 409 CONFLICT with an empty body.
The expected parent version ID appears in the `X-Parent-Version-Id` header.
Other error responses (4xx or 5xx) may be returned and should be treated appropriately to their meanings in the HTTP specification.
### GetChildVersion
The request is a `GET` to `<origin>/v1/client/get-child-version/<parentVersionId>`.
The response is determined as described above.
The _not-found_ response is 404 NOT FOUND.
The _gone_ response is 410 GONE.
Neither has a response body.
On success, the response is a 200 OK.
The version's history segment is returned in the response body, with content-type `application/vnd.taskchampion.history-segment`.
The version ID appears in the `X-Version-Id` header.
The response body may be encoded, in accordance with any `Accept-Encoding` header in the request.
On failure, a client should treat a 404 NOT FOUND as indicating that it is up-to-date.
Clients should treat a 410 GONE as a synchronization error.
If the client has pending changes to send to the server, based on a now-removed version, then those changes cannot be reconciled and will be lost.
The client should, optionally after consulting the user, download and apply the latest snapshot.
### AddSnapshot
The request is a `POST` to `<origin>/v1/client/add-snapshot/<versionId>`.
The request body contains the snapshot data, optionally encoded using any encoding supported by actix-web.
The content-type must be `application/vnd.taskchampion.snapshot`.
If the version is invalid, as described above, the response should be 400 BAD REQUEST.
The server response should be 200 OK on success.
### GetSnapshot
The request is a `GET` to `<origin>/v1/client/snapshot`.
The response is a 200 OK.
The snapshot is returned in the response body, with content-type `application/vnd.taskchampion.snapshot`.
The version ID appears in the `X-Version-Id` header.
The response body may be encoded, in accordance with any `Accept-Encoding` header in the request.
After downloading and decrypting a snapshot, a client must replace its entire local task database with the content of the snapshot.
Any local operations that had not yet been synchronized must be discarded.
After the snapshot is applied, the client should begin the synchronization process again, starting from the snapshot version.

View file

@ -185,7 +185,7 @@ static void test_replica_sync_local(void) {
// When tc_replica_undo is passed NULL for undone_out, it still succeeds // When tc_replica_undo is passed NULL for undone_out, it still succeeds
static void test_replica_remote_server(void) { static void test_replica_remote_server(void) {
TCString err; TCString err;
TCServer *server = tc_server_new_remote( TCServer *server = tc_server_new_sync(
tc_string_borrow("tc.freecinc.com"), tc_string_borrow("tc.freecinc.com"),
tc_uuid_new_v4(), tc_uuid_new_v4(),
tc_string_borrow("\xf0\x28\x8c\x28"), // NOTE: not utf-8 tc_string_borrow("\xf0\x28\x8c\x28"), // NOTE: not utf-8

View file

@ -108,13 +108,13 @@ pub unsafe extern "C" fn tc_server_new_local(
/// The server must be freed after it is used - tc_replica_sync does not automatically free it. /// The server must be freed after it is used - tc_replica_sync does not automatically free it.
/// ///
/// ```c /// ```c
/// EXTERN_C struct TCServer *tc_server_new_remote(struct TCString origin, /// EXTERN_C struct TCServer *tc_server_new_sync(struct TCString origin,
/// struct TCUuid client_id, /// struct TCUuid client_id,
/// struct TCString encryption_secret, /// struct TCString encryption_secret,
/// struct TCString *error_out); /// struct TCString *error_out);
/// ``` /// ```
#[no_mangle] #[no_mangle]
pub unsafe extern "C" fn tc_server_new_remote( pub unsafe extern "C" fn tc_server_new_sync(
origin: TCString, origin: TCString,
client_id: TCUuid, client_id: TCUuid,
encryption_secret: TCString, encryption_secret: TCString,
@ -129,8 +129,8 @@ pub unsafe extern "C" fn tc_server_new_remote(
// SAFETY: // SAFETY:
// - client_id is a valid Uuid (any 8-byte sequence counts) // - client_id is a valid Uuid (any 8-byte sequence counts)
let client_id = unsafe { TCUuid::val_from_arg(client_id) }; let client_id = unsafe { TCUuid::val_from_arg(client_id) };
// SAFETY: // SAFETY:
// - encryption_secret is valid (promised by caller) // - encryption_secret is valid (promised by caller)
// - encryption_secret ownership is transferred to this function // - encryption_secret ownership is transferred to this function
@ -154,6 +154,54 @@ pub unsafe extern "C" fn tc_server_new_remote(
#[ffizz_header::item] #[ffizz_header::item]
#[ffizz(order = 802)] #[ffizz(order = 802)]
/// Create a new TCServer that connects to the Google Cloud Platform. See the TaskChampion docs
/// for the description of the arguments.
///
/// On error, a string is written to the error_out parameter (if it is not NULL) and NULL is
/// returned. The caller must free this string.
///
/// The server must be freed after it is used - tc_replica_sync does not automatically free it.
///
/// ```c
/// EXTERN_C struct TCServer *tc_server_new_gcp(struct TCString bucket,
/// struct TCString encryption_secret,
/// struct TCString *error_out);
/// ```
#[no_mangle]
pub unsafe extern "C" fn tc_server_new_gcp(
bucket: TCString,
encryption_secret: TCString,
error_out: *mut TCString,
) -> *mut TCServer {
wrap(
|| {
// SAFETY:
// - bucket is valid (promised by caller)
// - bucket ownership is transferred to this function
let bucket = unsafe { TCString::val_from_arg(bucket) }.into_string()?;
// SAFETY:
// - encryption_secret is valid (promised by caller)
// - encryption_secret ownership is transferred to this function
let encryption_secret = unsafe { TCString::val_from_arg(encryption_secret) }
.as_bytes()
.to_vec();
let server_config = ServerConfig::Gcp {
bucket,
encryption_secret,
};
let server = server_config.into_server()?;
// SAFETY: caller promises to free this server.
Ok(unsafe { TCServer::return_ptr(server.into()) })
},
error_out,
std::ptr::null_mut(),
)
}
#[ffizz_header::item]
#[ffizz(order = 899)]
/// Free a server. The server may not be used after this function returns and must not be freed /// Free a server. The server may not be used after this function returns and must not be freed
/// more than once. /// more than once.
/// ///

View file

@ -116,8 +116,9 @@ impl TCString {
} }
} }
#[derive(PartialEq, Eq, Debug)] #[derive(PartialEq, Eq, Debug, Default)]
pub enum RustString<'a> { pub enum RustString<'a> {
#[default]
Null, Null,
CString(CString), CString(CString),
CStr(&'a CStr), CStr(&'a CStr),
@ -125,12 +126,6 @@ pub enum RustString<'a> {
Bytes(Vec<u8>), Bytes(Vec<u8>),
} }
impl<'a> Default for RustString<'a> {
fn default() -> Self {
RustString::Null
}
}
impl PassByValue for TCString { impl PassByValue for TCString {
type RustType = RustString<'static>; type RustType = RustString<'static>;

View file

@ -433,11 +433,22 @@ EXTERN_C struct TCServer *tc_server_new_local(struct TCString server_dir, struct
// returned. The caller must free this string. // returned. The caller must free this string.
// //
// The server must be freed after it is used - tc_replica_sync does not automatically free it. // The server must be freed after it is used - tc_replica_sync does not automatically free it.
EXTERN_C struct TCServer *tc_server_new_remote(struct TCString origin, EXTERN_C struct TCServer *tc_server_new_sync(struct TCString origin,
struct TCUuid client_id, struct TCUuid client_id,
struct TCString encryption_secret, struct TCString encryption_secret,
struct TCString *error_out); struct TCString *error_out);
// Create a new TCServer that connects to the Google Cloud Platform. See the TaskChampion docs
// for the description of the arguments.
//
// On error, a string is written to the error_out parameter (if it is not NULL) and NULL is
// returned. The caller must free this string.
//
// The server must be freed after it is used - tc_replica_sync does not automatically free it.
EXTERN_C struct TCServer *tc_server_new_gcp(struct TCString bucket,
struct TCString encryption_secret,
struct TCString *error_out);
// Free a server. The server may not be used after this function returns and must not be freed // Free a server. The server may not be used after this function returns and must not be freed
// more than once. // more than once.
EXTERN_C void tc_server_free(struct TCServer *server); EXTERN_C void tc_server_free(struct TCServer *server);

View file

@ -9,12 +9,19 @@ repository = "https://github.com/GothenburgBitFactory/taskwarrior"
readme = "../README.md" readme = "../README.md"
license = "MIT" license = "MIT"
edition = "2021" edition = "2021"
rust-version = "1.65" rust-version = "1.70.0"
[features] [features]
default = ["server-sync" ] default = ["server-sync", "server-gcp"]
server-sync = ["crypto", "dep:ureq"]
crypto = ["dep:ring"] # Support for sync to a server
server-sync = ["encryption", "dep:ureq"]
# Support for sync to GCP
server-gcp = ["cloud", "encryption", "dep:google-cloud-storage", "dep:tokio"]
# (private) Support for sync protocol encryption
encryption = ["dep:ring"]
# (private) Generic support for cloud sync
cloud = []
[package.metadata.docs.rs] [package.metadata.docs.rs]
all-features = true all-features = true
@ -34,7 +41,11 @@ strum_macros.workspace = true
flate2.workspace = true flate2.workspace = true
byteorder.workspace = true byteorder.workspace = true
ring.workspace = true ring.workspace = true
google-cloud-storage.workspace = true
tokio.workspace = true
google-cloud-storage.optional = true
tokio.optional = true
ureq.optional = true ureq.optional = true
ring.optional = true ring.optional = true

View file

@ -40,5 +40,9 @@ other_error!(io::Error);
other_error!(serde_json::Error); other_error!(serde_json::Error);
other_error!(rusqlite::Error); other_error!(rusqlite::Error);
other_error!(crate::storage::sqlite::SqliteError); other_error!(crate::storage::sqlite::SqliteError);
#[cfg(feature = "server-gcp")]
other_error!(google_cloud_storage::http::Error);
#[cfg(feature = "server-gcp")]
other_error!(google_cloud_storage::client::google_cloud_auth::error::Error);
pub type Result<T> = std::result::Result<T, Error>; pub type Result<T> = std::result::Result<T, Error>;

View file

@ -40,6 +40,7 @@ Support for some optional functionality is controlled by feature flags.
Sync server client support: Sync server client support:
* `server-gcp` - sync to Google Cloud Platform
* `server-sync` - sync to the taskchampion-sync-server * `server-sync` - sync to the taskchampion-sync-server
# See Also # See Also
@ -49,7 +50,7 @@ for more information about the design and usage of the tool.
# Minimum Supported Rust Version (MSRV) # Minimum Supported Rust Version (MSRV)
This crate supports Rust version 1.65 and higher. This crate supports Rust version 1.70.0 and higher.
*/ */

View file

@ -0,0 +1,392 @@
use super::service::{ObjectInfo, Service};
use crate::errors::Result;
use google_cloud_storage::client::{Client, ClientConfig};
use google_cloud_storage::http::error::ErrorResponse;
use google_cloud_storage::http::Error as GcsError;
use google_cloud_storage::http::{self, objects};
use tokio::runtime::Runtime;
/// A [`Service`] implementation based on the Google Cloud Storage service.
pub(in crate::server) struct GcpService {
client: Client,
rt: Runtime,
bucket: String,
}
/// Determine whether the given result contains an HTTP error with the given code.
fn is_http_error<T>(query: u16, res: &std::result::Result<T, http::Error>) -> bool {
match res {
// Errors from RPC's.
Err(GcsError::Response(ErrorResponse { code, .. })) => *code == query,
// Errors from reqwest (downloads, uploads).
Err(GcsError::HttpClient(e)) => e.status().map(|s| s.as_u16()) == Some(query),
_ => false,
}
}
impl GcpService {
pub(in crate::server) fn new(bucket: String) -> Result<Self> {
let rt = Runtime::new()?;
let config = rt.block_on(ClientConfig::default().with_auth())?;
Ok(Self {
client: Client::new(config),
rt,
bucket,
})
}
}
impl Service for GcpService {
fn put(&mut self, name: &[u8], value: &[u8]) -> Result<()> {
let name = String::from_utf8(name.to_vec()).expect("non-UTF8 object name");
let upload_type = objects::upload::UploadType::Simple(objects::upload::Media::new(name));
self.rt.block_on(self.client.upload_object(
&objects::upload::UploadObjectRequest {
bucket: self.bucket.clone(),
..Default::default()
},
value.to_vec(),
&upload_type,
))?;
Ok(())
}
fn get(&mut self, name: &[u8]) -> Result<Option<Vec<u8>>> {
let name = String::from_utf8(name.to_vec()).expect("non-UTF8 object name");
let download_res = self.rt.block_on(self.client.download_object(
&objects::get::GetObjectRequest {
bucket: self.bucket.clone(),
object: name,
..Default::default()
},
&objects::download::Range::default(),
));
if is_http_error(404, &download_res) {
Ok(None)
} else {
Ok(Some(download_res?))
}
}
fn del(&mut self, name: &[u8]) -> Result<()> {
let name = String::from_utf8(name.to_vec()).expect("non-UTF8 object name");
let del_res = self.rt.block_on(self.client.delete_object(
&objects::delete::DeleteObjectRequest {
bucket: self.bucket.clone(),
object: name,
..Default::default()
},
));
if !is_http_error(404, &del_res) {
del_res?;
}
Ok(())
}
fn list<'a>(&'a mut self, prefix: &[u8]) -> Box<dyn Iterator<Item = Result<ObjectInfo>> + 'a> {
let prefix = String::from_utf8(prefix.to_vec()).expect("non-UTF8 object prefix");
Box::new(ObjectIterator {
service: self,
prefix,
last_response: None,
next_index: 0,
})
}
fn compare_and_swap(
&mut self,
name: &[u8],
existing_value: Option<Vec<u8>>,
new_value: Vec<u8>,
) -> Result<bool> {
let name = String::from_utf8(name.to_vec()).expect("non-UTF8 object name");
let get_res = self
.rt
.block_on(self.client.get_object(&objects::get::GetObjectRequest {
bucket: self.bucket.clone(),
object: name.clone(),
..Default::default()
}));
// Determine the object's generation. See https://cloud.google.com/storage/docs/metadata#generation-number
let generation = if is_http_error(404, &get_res) {
// If a value was expected, that expectation has not been met.
if existing_value.is_some() {
return Ok(false);
}
// Generation 0 indicates that the object does not yet exist.
0
} else {
get_res?.generation
};
// If the file existed, then verify its contents.
if generation > 0 {
let data = self.rt.block_on(self.client.download_object(
&objects::get::GetObjectRequest {
bucket: self.bucket.clone(),
object: name.clone(),
// Fetch the same generation.
generation: Some(generation),
..Default::default()
},
&objects::download::Range::default(),
))?;
if Some(data) != existing_value {
return Ok(false);
}
}
// Finally, put the new value with a condition that the generation hasn't changed.
let upload_type = objects::upload::UploadType::Simple(objects::upload::Media::new(name));
let upload_res = self.rt.block_on(self.client.upload_object(
&objects::upload::UploadObjectRequest {
bucket: self.bucket.clone(),
if_generation_match: Some(generation),
..Default::default()
},
new_value.to_vec(),
&upload_type,
));
if is_http_error(412, &upload_res) {
// A 412 indicates the precondition was not satisfied: the given generation
// is no longer the latest.
Ok(false)
} else {
upload_res?;
Ok(true)
}
}
}
/// An Iterator returning names of objects from `list_objects`.
///
/// This handles response pagination by fetching one page at a time.
struct ObjectIterator<'a> {
service: &'a mut GcpService,
prefix: String,
last_response: Option<objects::list::ListObjectsResponse>,
next_index: usize,
}
impl<'a> ObjectIterator<'a> {
fn fetch_batch(&mut self) -> Result<()> {
let mut page_token = None;
if let Some(ref resp) = self.last_response {
page_token = resp.next_page_token.clone();
}
self.last_response = Some(self.service.rt.block_on(self.service.client.list_objects(
&objects::list::ListObjectsRequest {
bucket: self.service.bucket.clone(),
prefix: Some(self.prefix.clone()),
page_token,
#[cfg(test)] // For testing, use a small page size.
max_results: Some(6),
..Default::default()
},
))?);
self.next_index = 0;
Ok(())
}
}
impl<'a> Iterator for ObjectIterator<'a> {
type Item = Result<ObjectInfo>;
fn next(&mut self) -> Option<Self::Item> {
// If the iterator is just starting, fetch the first response.
if self.last_response.is_none() {
if let Err(e) = self.fetch_batch() {
return Some(Err(e));
}
}
if let Some(ref result) = self.last_response {
if let Some(ref items) = result.items {
if self.next_index < items.len() {
// Return a result from the existing response.
let obj = &items[self.next_index];
self.next_index += 1;
// It's unclear when `time_created` would be None, so default to 0 in that case
// or when the timestamp is not a valid u64 (before 1970).
let creation = obj.time_created.map(|t| t.unix_timestamp()).unwrap_or(0);
let creation: u64 = creation.try_into().unwrap_or(0);
return Some(Ok(ObjectInfo {
name: obj.name.as_bytes().to_vec(),
creation,
}));
} else if result.next_page_token.is_some() {
// Fetch the next page and try again.
if let Err(e) = self.fetch_batch() {
return Some(Err(e));
}
return self.next();
}
}
}
None
}
}
#[cfg(test)]
mod tests {
use super::*;
use uuid::Uuid;
/// Make a service if `GCP_TEST_BUCKET` is set, as well as a function to put a unique prefix on
/// an object name, so that tests do not interfere with one another.
///
/// Set up this bucket with a lifecyle policy to delete objects with age > 1 day. While passing
/// tests should correctly clean up after themselves, failing tests may leave objects in the
/// bucket.
///
/// When the environment variable is not set, this returns false and the test does not run.
/// Note that the Rust test runner will still show "ok" for the test, as there is no way to
/// indicate anything else.
fn make_service() -> Option<(GcpService, impl Fn(&str) -> Vec<u8>)> {
let Ok(bucket) = std::env::var("GCP_TEST_BUCKET") else {
return None;
};
let prefix = Uuid::new_v4();
Some((GcpService::new(bucket).unwrap(), move |n: &_| {
format!("{}-{}", prefix.as_simple(), n).into_bytes()
}))
}
#[test]
fn put_and_get() {
let Some((mut svc, pfx)) = make_service() else {
return;
};
svc.put(&pfx("testy"), b"foo").unwrap();
let got = svc.get(&pfx("testy")).unwrap();
assert_eq!(got, Some(b"foo".to_vec()));
// Clean up.
svc.del(&pfx("testy")).unwrap();
}
#[test]
fn get_missing() {
let Some((mut svc, pfx)) = make_service() else {
return;
};
let got = svc.get(&pfx("testy")).unwrap();
assert_eq!(got, None);
}
#[test]
fn del() {
let Some((mut svc, pfx)) = make_service() else {
return;
};
svc.put(&pfx("testy"), b"data").unwrap();
svc.del(&pfx("testy")).unwrap();
let got = svc.get(&pfx("testy")).unwrap();
assert_eq!(got, None);
}
#[test]
fn del_missing() {
// Deleting an object that does not exist is not an error.
let Some((mut svc, pfx)) = make_service() else {
return;
};
assert!(svc.del(&pfx("testy")).is_ok());
}
#[test]
fn list() {
let Some((mut svc, pfx)) = make_service() else {
return;
};
let mut names: Vec<_> = (0..20).map(|i| pfx(&format!("pp-{i:02}"))).collect();
names.sort();
// Create 20 objects that will be listed.
for n in &names {
svc.put(n, b"data").unwrap();
}
// And another object that should not be included in the list.
svc.put(&pfx("xxx"), b"data").unwrap();
let got_objects: Vec<_> = svc.list(&pfx("pp-")).collect::<Result<_>>().unwrap();
let mut got_names: Vec<_> = got_objects.into_iter().map(|oi| oi.name).collect();
got_names.sort();
assert_eq!(got_names, names);
// Clean up.
for n in got_names {
svc.del(&n).unwrap();
}
svc.del(&pfx("xxx")).unwrap();
}
#[test]
fn compare_and_swap_create() {
let Some((mut svc, pfx)) = make_service() else {
return;
};
assert!(svc
.compare_and_swap(&pfx("testy"), None, b"bar".to_vec())
.unwrap());
let got = svc.get(&pfx("testy")).unwrap();
assert_eq!(got, Some(b"bar".to_vec()));
// Clean up.
svc.del(&pfx("testy")).unwrap();
}
#[test]
fn compare_and_swap_matches() {
let Some((mut svc, pfx)) = make_service() else {
return;
};
// Create the existing file, with two generations.
svc.put(&pfx("testy"), b"foo1").unwrap();
svc.put(&pfx("testy"), b"foo2").unwrap();
assert!(svc
.compare_and_swap(&pfx("testy"), Some(b"foo2".to_vec()), b"bar".to_vec())
.unwrap());
let got = svc.get(&pfx("testy")).unwrap();
assert_eq!(got, Some(b"bar".to_vec()));
// Clean up.
svc.del(&pfx("testy")).unwrap();
}
#[test]
fn compare_and_swap_expected_no_file() {
let Some((mut svc, pfx)) = make_service() else {
return;
};
svc.put(&pfx("testy"), b"foo1").unwrap();
assert!(!svc
.compare_and_swap(&pfx("testy"), None, b"bar".to_vec())
.unwrap());
let got = svc.get(&pfx("testy")).unwrap();
assert_eq!(got, Some(b"foo1".to_vec()));
// Clean up.
svc.del(&pfx("testy")).unwrap();
}
#[test]
fn compare_and_swap_mismatch() {
let Some((mut svc, pfx)) = make_service() else {
return;
};
// Create the existing file, with two generations.
svc.put(&pfx("testy"), b"foo1").unwrap();
svc.put(&pfx("testy"), b"foo2").unwrap();
assert!(!svc
.compare_and_swap(&pfx("testy"), Some(b"foo1".to_vec()), b"bar".to_vec())
.unwrap());
let got = svc.get(&pfx("testy")).unwrap();
assert_eq!(got, Some(b"foo2".to_vec()));
// Clean up.
svc.del(&pfx("testy")).unwrap();
}
}

View file

@ -0,0 +1,16 @@
/*!
* Support for cloud-service-backed sync.
*
* All of these operate using a similar approach, with specific patterns of object names. The
* process of adding a new version requires a compare-and-swap operation that sets a new version
* as the "latest" only if the existing "latest" has the expected value. This ensures a continuous
* chain of versions, even if multiple replicas attempt to sync at the same time.
*/
mod server;
mod service;
pub(in crate::server) use server::CloudServer;
#[cfg(feature = "server-gcp")]
pub(in crate::server) mod gcp;

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,38 @@
use crate::errors::Result;
/// Information about an object as returned from `Service::list`
pub(in crate::server) struct ObjectInfo {
/// Name of the object.
pub(in crate::server) name: Vec<u8>,
/// Creation time of the object, in seconds since the UNIX epoch.
pub(in crate::server) creation: u64,
}
/// An abstraction of a cloud-storage service.
///
/// The underlying cloud storage is assumed to be a map from object names to object values,
/// similar to a HashMap, with the addition of a compare-and-swap operation. Object names
/// are always simple strings from the character set `[a-zA-Z0-9-]`, no more than 100 characters
/// in length.
pub(in crate::server) trait Service {
/// Put an object into cloud storage. If the object exists, it is overwritten.
fn put(&mut self, name: &[u8], value: &[u8]) -> Result<()>;
/// Get an object from cloud storage, or None if the object does not exist.
fn get(&mut self, name: &[u8]) -> Result<Option<Vec<u8>>>;
/// Delete an object. Does nothing if the object does not exist.
fn del(&mut self, name: &[u8]) -> Result<()>;
/// Enumerate objects with the given prefix.
fn list<'a>(&'a mut self, prefix: &[u8]) -> Box<dyn Iterator<Item = Result<ObjectInfo>> + 'a>;
/// Compare the existing object's value with `existing_value`, and replace with `new_value`
/// only if the values match. Returns true if the replacement occurred.
fn compare_and_swap(
&mut self,
name: &[u8],
existing_value: Option<Vec<u8>>,
new_value: Vec<u8>,
) -> Result<bool>;
}

View file

@ -1,5 +1,9 @@
use super::types::Server; use super::types::Server;
use crate::errors::Result; use crate::errors::Result;
#[cfg(feature = "server-gcp")]
use crate::server::cloud::gcp::GcpService;
#[cfg(feature = "cloud")]
use crate::server::cloud::CloudServer;
use crate::server::local::LocalServer; use crate::server::local::LocalServer;
#[cfg(feature = "server-sync")] #[cfg(feature = "server-sync")]
use crate::server::sync::SyncServer; use crate::server::sync::SyncServer;
@ -23,6 +27,17 @@ pub enum ServerConfig {
/// Client ID to identify and authenticate this replica to the server /// Client ID to identify and authenticate this replica to the server
client_id: Uuid, client_id: Uuid,
/// Private encryption secret used to encrypt all data sent to the server. This can
/// be any suitably un-guessable string of bytes.
encryption_secret: Vec<u8>,
},
/// A remote taskchampion-sync-server instance
#[cfg(feature = "server-gcp")]
Gcp {
/// Bucket in which to store the task data. This bucket must not be used for any other
/// purpose.
bucket: String,
/// Private encryption secret used to encrypt all data sent to the server. This can /// Private encryption secret used to encrypt all data sent to the server. This can
/// be any suitably un-guessable string of bytes. /// be any suitably un-guessable string of bytes.
encryption_secret: Vec<u8>, encryption_secret: Vec<u8>,
@ -40,6 +55,14 @@ impl ServerConfig {
client_id, client_id,
encryption_secret, encryption_secret,
} => Box::new(SyncServer::new(origin, client_id, encryption_secret)?), } => Box::new(SyncServer::new(origin, client_id, encryption_secret)?),
#[cfg(feature = "server-gcp")]
ServerConfig::Gcp {
bucket,
encryption_secret,
} => Box::new(CloudServer::new(
GcpService::new(bucket)?,
encryption_secret,
)?),
}) })
} }
} }

View file

@ -1,7 +1,7 @@
/// This module implements the encryption specified in the sync-protocol /// This module implements the encryption specified in the sync-protocol
/// document. /// document.
use crate::errors::{Error, Result}; use crate::errors::{Error, Result};
use ring::{aead, digest, pbkdf2, rand, rand::SecureRandom}; use ring::{aead, pbkdf2, rand, rand::SecureRandom};
use uuid::Uuid; use uuid::Uuid;
const PBKDF2_ITERATIONS: u32 = 100000; const PBKDF2_ITERATIONS: u32 = 100000;
@ -11,24 +11,32 @@ const TASK_APP_ID: u8 = 1;
/// An Cryptor stores a secret and allows sealing and unsealing. It derives a key from the secret, /// An Cryptor stores a secret and allows sealing and unsealing. It derives a key from the secret,
/// which takes a nontrivial amount of time, so it should be created once and re-used for the given /// which takes a nontrivial amount of time, so it should be created once and re-used for the given
/// client_id. /// context.
#[derive(Clone)]
pub(super) struct Cryptor { pub(super) struct Cryptor {
key: aead::LessSafeKey, key: aead::LessSafeKey,
rng: rand::SystemRandom, rng: rand::SystemRandom,
} }
impl Cryptor { impl Cryptor {
pub(super) fn new(client_id: Uuid, secret: &Secret) -> Result<Self> { pub(super) fn new(salt: impl AsRef<[u8]>, secret: &Secret) -> Result<Self> {
Ok(Cryptor { Ok(Cryptor {
key: Self::derive_key(client_id, secret)?, key: Self::derive_key(salt, secret)?,
rng: rand::SystemRandom::new(), rng: rand::SystemRandom::new(),
}) })
} }
/// Derive a key as specified for version 1. Note that this may take 10s of ms. /// Generate a suitable random salt.
fn derive_key(client_id: Uuid, secret: &Secret) -> Result<aead::LessSafeKey> { pub(super) fn gen_salt() -> Result<Vec<u8>> {
let salt = digest::digest(&digest::SHA256, client_id.as_bytes()); let rng = rand::SystemRandom::new();
let mut salt = [0u8; 16];
rng.fill(&mut salt)
.map_err(|_| anyhow::anyhow!("error generating random salt"))?;
Ok(salt.to_vec())
}
/// Derive a key as specified for version 1. Note that this may take 10s of ms.
fn derive_key(salt: impl AsRef<[u8]>, secret: &Secret) -> Result<aead::LessSafeKey> {
let mut key_bytes = vec![0u8; aead::CHACHA20_POLY1305.key_len()]; let mut key_bytes = vec![0u8; aead::CHACHA20_POLY1305.key_len()];
pbkdf2::derive( pbkdf2::derive(
pbkdf2::PBKDF2_HMAC_SHA256, pbkdf2::PBKDF2_HMAC_SHA256,
@ -93,7 +101,7 @@ impl Cryptor {
let plaintext = self let plaintext = self
.key .key
.open_in_place(nonce, aad, payload.as_mut()) .open_in_place(nonce, aad, payload.as_mut())
.map_err(|_| anyhow::anyhow!("error while creating AEAD key"))?; .map_err(|_| anyhow::anyhow!("error while unsealing encrypted value"))?;
Ok(Unsealed { Ok(Unsealed {
version_id, version_id,
@ -169,46 +177,39 @@ pub(super) struct Unsealed {
pub(super) payload: Vec<u8>, pub(super) payload: Vec<u8>,
} }
impl From<Unsealed> for Vec<u8> {
fn from(val: Unsealed) -> Self {
val.payload
}
}
/// An encrypted payload /// An encrypted payload
pub(super) struct Sealed { pub(super) struct Sealed {
pub(super) version_id: Uuid, pub(super) version_id: Uuid,
pub(super) payload: Vec<u8>, pub(super) payload: Vec<u8>,
} }
impl Sealed {
#[cfg(feature = "server-sync")]
pub(super) fn from_resp(
resp: ureq::Response,
version_id: Uuid,
content_type: &str,
) -> Result<Sealed> {
use std::io::Read;
if resp.header("Content-Type") == Some(content_type) {
let mut reader = resp.into_reader();
let mut payload = vec![];
reader.read_to_end(&mut payload)?;
Ok(Self {
version_id,
payload,
})
} else {
Err(Error::Server(String::from(
"Response did not have expected content-type",
)))
}
}
}
impl AsRef<[u8]> for Sealed { impl AsRef<[u8]> for Sealed {
fn as_ref(&self) -> &[u8] { fn as_ref(&self) -> &[u8] {
self.payload.as_ref() self.payload.as_ref()
} }
} }
impl From<Sealed> for Vec<u8> {
fn from(val: Sealed) -> Self {
val.payload
}
}
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use super::*; use super::*;
use pretty_assertions::assert_eq; use pretty_assertions::assert_eq;
use ring::digest;
fn make_salt() -> Vec<u8> {
Cryptor::gen_salt().unwrap()
}
#[test] #[test]
fn envelope_round_trip() { fn envelope_round_trip() {
@ -252,7 +253,7 @@ mod test {
let payload = b"HISTORY REPEATS ITSELF".to_vec(); let payload = b"HISTORY REPEATS ITSELF".to_vec();
let secret = Secret(b"SEKRIT".to_vec()); let secret = Secret(b"SEKRIT".to_vec());
let cryptor = Cryptor::new(Uuid::new_v4(), &secret).unwrap(); let cryptor = Cryptor::new(make_salt(), &secret).unwrap();
let unsealed = Unsealed { let unsealed = Unsealed {
version_id, version_id,
@ -269,10 +270,10 @@ mod test {
fn round_trip_bad_key() { fn round_trip_bad_key() {
let version_id = Uuid::new_v4(); let version_id = Uuid::new_v4();
let payload = b"HISTORY REPEATS ITSELF".to_vec(); let payload = b"HISTORY REPEATS ITSELF".to_vec();
let client_id = Uuid::new_v4(); let salt = make_salt();
let secret = Secret(b"SEKRIT".to_vec()); let secret = Secret(b"SEKRIT".to_vec());
let cryptor = Cryptor::new(client_id, &secret).unwrap(); let cryptor = Cryptor::new(&salt, &secret).unwrap();
let unsealed = Unsealed { let unsealed = Unsealed {
version_id, version_id,
@ -281,7 +282,7 @@ mod test {
let sealed = cryptor.seal(unsealed).unwrap(); let sealed = cryptor.seal(unsealed).unwrap();
let secret = Secret(b"DIFFERENT_SECRET".to_vec()); let secret = Secret(b"DIFFERENT_SECRET".to_vec());
let cryptor = Cryptor::new(client_id, &secret).unwrap(); let cryptor = Cryptor::new(&salt, &secret).unwrap();
assert!(cryptor.unseal(sealed).is_err()); assert!(cryptor.unseal(sealed).is_err());
} }
@ -289,10 +290,9 @@ mod test {
fn round_trip_bad_version() { fn round_trip_bad_version() {
let version_id = Uuid::new_v4(); let version_id = Uuid::new_v4();
let payload = b"HISTORY REPEATS ITSELF".to_vec(); let payload = b"HISTORY REPEATS ITSELF".to_vec();
let client_id = Uuid::new_v4();
let secret = Secret(b"SEKRIT".to_vec()); let secret = Secret(b"SEKRIT".to_vec());
let cryptor = Cryptor::new(client_id, &secret).unwrap(); let cryptor = Cryptor::new(make_salt(), &secret).unwrap();
let unsealed = Unsealed { let unsealed = Unsealed {
version_id, version_id,
@ -304,13 +304,12 @@ mod test {
} }
#[test] #[test]
fn round_trip_bad_client_id() { fn round_trip_bad_salt() {
let version_id = Uuid::new_v4(); let version_id = Uuid::new_v4();
let payload = b"HISTORY REPEATS ITSELF".to_vec(); let payload = b"HISTORY REPEATS ITSELF".to_vec();
let client_id = Uuid::new_v4();
let secret = Secret(b"SEKRIT".to_vec()); let secret = Secret(b"SEKRIT".to_vec());
let cryptor = Cryptor::new(client_id, &secret).unwrap(); let cryptor = Cryptor::new(make_salt(), &secret).unwrap();
let unsealed = Unsealed { let unsealed = Unsealed {
version_id, version_id,
@ -318,8 +317,7 @@ mod test {
}; };
let sealed = cryptor.seal(unsealed).unwrap(); let sealed = cryptor.seal(unsealed).unwrap();
let client_id = Uuid::new_v4(); let cryptor = Cryptor::new(make_salt(), &secret).unwrap();
let cryptor = Cryptor::new(client_id, &secret).unwrap();
assert!(cryptor.unseal(sealed).is_err()); assert!(cryptor.unseal(sealed).is_err());
} }
@ -331,23 +329,25 @@ mod test {
use pretty_assertions::assert_eq; use pretty_assertions::assert_eq;
/// The values in generate-test-data.py /// The values in generate-test-data.py
fn defaults() -> (Uuid, Uuid, Vec<u8>) { fn defaults() -> (Uuid, Vec<u8>, Vec<u8>) {
( let version_id = Uuid::parse_str("b0517957-f912-4d49-8330-f612e73030c4").unwrap();
Uuid::parse_str("b0517957-f912-4d49-8330-f612e73030c4").unwrap(), let encryption_secret = b"b4a4e6b7b811eda1dc1a2693ded".to_vec();
Uuid::parse_str("0666d464-418a-4a08-ad53-6f15c78270cd").unwrap(), let client_id = Uuid::parse_str("0666d464-418a-4a08-ad53-6f15c78270cd").unwrap();
b"b4a4e6b7b811eda1dc1a2693ded".to_vec(), let salt = dbg!(digest::digest(&digest::SHA256, client_id.as_ref()))
) .as_ref()
.to_vec();
(version_id, salt, encryption_secret)
} }
#[test] #[test]
fn good() { fn good() {
let (version_id, client_id, encryption_secret) = defaults(); let (version_id, salt, encryption_secret) = defaults();
let sealed = Sealed { let sealed = Sealed {
version_id, version_id,
payload: include_bytes!("test-good.data").to_vec(), payload: include_bytes!("test-good.data").to_vec(),
}; };
let cryptor = Cryptor::new(client_id, &Secret(encryption_secret)).unwrap(); let cryptor = Cryptor::new(salt, &Secret(encryption_secret)).unwrap();
let unsealed = cryptor.unseal(sealed).unwrap(); let unsealed = cryptor.unseal(sealed).unwrap();
assert_eq!(unsealed.payload, b"SUCCESS"); assert_eq!(unsealed.payload, b"SUCCESS");
@ -356,61 +356,61 @@ mod test {
#[test] #[test]
fn bad_version_id() { fn bad_version_id() {
let (version_id, client_id, encryption_secret) = defaults(); let (version_id, salt, encryption_secret) = defaults();
let sealed = Sealed { let sealed = Sealed {
version_id, version_id,
payload: include_bytes!("test-bad-version-id.data").to_vec(), payload: include_bytes!("test-bad-version-id.data").to_vec(),
}; };
let cryptor = Cryptor::new(client_id, &Secret(encryption_secret)).unwrap(); let cryptor = Cryptor::new(salt, &Secret(encryption_secret)).unwrap();
assert!(cryptor.unseal(sealed).is_err()); assert!(cryptor.unseal(sealed).is_err());
} }
#[test] #[test]
fn bad_client_id() { fn bad_salt() {
let (version_id, client_id, encryption_secret) = defaults(); let (version_id, salt, encryption_secret) = defaults();
let sealed = Sealed { let sealed = Sealed {
version_id, version_id,
payload: include_bytes!("test-bad-client-id.data").to_vec(), payload: include_bytes!("test-bad-client-id.data").to_vec(),
}; };
let cryptor = Cryptor::new(client_id, &Secret(encryption_secret)).unwrap(); let cryptor = Cryptor::new(salt, &Secret(encryption_secret)).unwrap();
assert!(cryptor.unseal(sealed).is_err()); assert!(cryptor.unseal(sealed).is_err());
} }
#[test] #[test]
fn bad_secret() { fn bad_secret() {
let (version_id, client_id, encryption_secret) = defaults(); let (version_id, salt, encryption_secret) = defaults();
let sealed = Sealed { let sealed = Sealed {
version_id, version_id,
payload: include_bytes!("test-bad-secret.data").to_vec(), payload: include_bytes!("test-bad-secret.data").to_vec(),
}; };
let cryptor = Cryptor::new(client_id, &Secret(encryption_secret)).unwrap(); let cryptor = Cryptor::new(salt, &Secret(encryption_secret)).unwrap();
assert!(cryptor.unseal(sealed).is_err()); assert!(cryptor.unseal(sealed).is_err());
} }
#[test] #[test]
fn bad_version() { fn bad_version() {
let (version_id, client_id, encryption_secret) = defaults(); let (version_id, salt, encryption_secret) = defaults();
let sealed = Sealed { let sealed = Sealed {
version_id, version_id,
payload: include_bytes!("test-bad-version.data").to_vec(), payload: include_bytes!("test-bad-version.data").to_vec(),
}; };
let cryptor = Cryptor::new(client_id, &Secret(encryption_secret)).unwrap(); let cryptor = Cryptor::new(salt, &Secret(encryption_secret)).unwrap();
assert!(cryptor.unseal(sealed).is_err()); assert!(cryptor.unseal(sealed).is_err());
} }
#[test] #[test]
fn bad_app_id() { fn bad_app_id() {
let (version_id, client_id, encryption_secret) = defaults(); let (version_id, salt, encryption_secret) = defaults();
let sealed = Sealed { let sealed = Sealed {
version_id, version_id,
payload: include_bytes!("test-bad-app-id.data").to_vec(), payload: include_bytes!("test-bad-app-id.data").to_vec(),
}; };
let cryptor = Cryptor::new(client_id, &Secret(encryption_secret)).unwrap(); let cryptor = Cryptor::new(salt, &Secret(encryption_secret)).unwrap();
assert!(cryptor.unseal(sealed).is_err()); assert!(cryptor.unseal(sealed).is_err());
} }
} }

View file

@ -16,12 +16,15 @@ mod local;
mod op; mod op;
mod types; mod types;
#[cfg(feature = "crypto")] #[cfg(feature = "encryption")]
mod crypto; mod encryption;
#[cfg(feature = "server-sync")] #[cfg(feature = "server-sync")]
mod sync; mod sync;
#[cfg(feature = "cloud")]
mod cloud;
pub use config::ServerConfig; pub use config::ServerConfig;
pub use types::*; pub use types::*;

View file

@ -1,12 +1,13 @@
use crate::errors::Result; use crate::errors::{Error, Result};
use crate::server::{ use crate::server::{
AddVersionResult, GetVersionResult, HistorySegment, Server, Snapshot, SnapshotUrgency, AddVersionResult, GetVersionResult, HistorySegment, Server, Snapshot, SnapshotUrgency,
VersionId, VersionId,
}; };
use ring::digest;
use std::time::Duration; use std::time::Duration;
use uuid::Uuid; use uuid::Uuid;
use super::crypto::{Cryptor, Sealed, Secret, Unsealed}; use super::encryption::{Cryptor, Sealed, Secret, Unsealed};
pub struct SyncServer { pub struct SyncServer {
origin: String, origin: String,
@ -28,10 +29,11 @@ impl SyncServer {
/// identify this client to the server. Multiple replicas synchronizing the same task history /// identify this client to the server. Multiple replicas synchronizing the same task history
/// should use the same client_id. /// should use the same client_id.
pub fn new(origin: String, client_id: Uuid, encryption_secret: Vec<u8>) -> Result<SyncServer> { pub fn new(origin: String, client_id: Uuid, encryption_secret: Vec<u8>) -> Result<SyncServer> {
let salt = dbg!(digest::digest(&digest::SHA256, client_id.as_ref()));
Ok(SyncServer { Ok(SyncServer {
origin, origin,
client_id, client_id,
cryptor: Cryptor::new(client_id, &Secret(encryption_secret.to_vec()))?, cryptor: Cryptor::new(salt, &Secret(encryption_secret.to_vec()))?,
agent: ureq::AgentBuilder::new() agent: ureq::AgentBuilder::new()
.timeout_connect(Duration::from_secs(10)) .timeout_connect(Duration::from_secs(10))
.timeout_read(Duration::from_secs(60)) .timeout_read(Duration::from_secs(60))
@ -62,6 +64,23 @@ fn get_snapshot_urgency(resp: &ureq::Response) -> SnapshotUrgency {
} }
} }
fn sealed_from_resp(resp: ureq::Response, version_id: Uuid, content_type: &str) -> Result<Sealed> {
use std::io::Read;
if resp.header("Content-Type") == Some(content_type) {
let mut reader = resp.into_reader();
let mut payload = vec![];
reader.read_to_end(&mut payload)?;
Ok(Sealed {
version_id,
payload,
})
} else {
Err(Error::Server(String::from(
"Response did not have expected content-type",
)))
}
}
impl Server for SyncServer { impl Server for SyncServer {
fn add_version( fn add_version(
&mut self, &mut self,
@ -117,7 +136,7 @@ impl Server for SyncServer {
let parent_version_id = get_uuid_header(&resp, "X-Parent-Version-Id")?; let parent_version_id = get_uuid_header(&resp, "X-Parent-Version-Id")?;
let version_id = get_uuid_header(&resp, "X-Version-Id")?; let version_id = get_uuid_header(&resp, "X-Version-Id")?;
let sealed = let sealed =
Sealed::from_resp(resp, parent_version_id, HISTORY_SEGMENT_CONTENT_TYPE)?; sealed_from_resp(resp, parent_version_id, HISTORY_SEGMENT_CONTENT_TYPE)?;
let history_segment = self.cryptor.unseal(sealed)?.payload; let history_segment = self.cryptor.unseal(sealed)?.payload;
Ok(GetVersionResult::Version { Ok(GetVersionResult::Version {
version_id, version_id,
@ -158,7 +177,7 @@ impl Server for SyncServer {
{ {
Ok(resp) => { Ok(resp) => {
let version_id = get_uuid_header(&resp, "X-Version-Id")?; let version_id = get_uuid_header(&resp, "X-Version-Id")?;
let sealed = Sealed::from_resp(resp, version_id, SNAPSHOT_CONTENT_TYPE)?; let sealed = sealed_from_resp(resp, version_id, SNAPSHOT_CONTENT_TYPE)?;
let snapshot = self.cryptor.unseal(sealed)?.payload; let snapshot = self.cryptor.unseal(sealed)?.payload;
Ok(Some((version_id, snapshot))) Ok(Some((version_id, snapshot)))
} }

View file

@ -31,7 +31,7 @@ foreach (src_FILE ${test_SRCS})
add_executable (${src_FILE} "${src_FILE}.cpp" test.cpp) add_executable (${src_FILE} "${src_FILE}.cpp" test.cpp)
target_link_libraries (${src_FILE} task tc commands columns libshared task tc tc-rust commands columns libshared task commands columns libshared ${TASK_LIBRARIES}) target_link_libraries (${src_FILE} task tc commands columns libshared task tc tc-rust commands columns libshared task commands columns libshared ${TASK_LIBRARIES})
if (DARWIN) if (DARWIN)
target_link_libraries (${src_FILE} "-framework CoreFoundation -framework Security") target_link_libraries (${src_FILE} "-framework CoreFoundation -framework Security -framework SystemConfiguration")
endif (DARWIN) endif (DARWIN)
endforeach (src_FILE) endforeach (src_FILE)