Update Actix crates to latest versions

This avoids a vulnerability in tokio (#3085). The major version updates
of both actix-web and actix-rt required some signficant changes. Chief
among those, it turns out we were relying on actix-rt to run the
HttpServer in a different thread from the rest of the test, so that we
could talk to it from sync code in the test thread. This no longer
works, so the sync code is now run in a dedicated thread with
`actix_rt::task::spawn_blocking`.
This commit is contained in:
Dustin J. Mitchell 2023-04-08 09:30:28 -04:00 committed by Dustin J. Mitchell
parent 33366e2f05
commit 52fdc6a877
10 changed files with 466 additions and 990 deletions

View file

@ -13,8 +13,8 @@ taskchampion-sync-server = { path = "../sync-server" }
[dev-dependencies]
anyhow = "1.0"
actix-web = "^3.3.2"
actix-rt = "^1.1.1"
actix-web = "^4.3.1"
actix-rt = "2"
tempfile = "3"
pretty_assertions = "1"
log = "^0.4.17"

View file

@ -5,86 +5,93 @@ use taskchampion_sync_server::{storage::InMemoryStorage, Server};
#[actix_rt::test]
async fn cross_sync() -> anyhow::Result<()> {
let _ = env_logger::builder()
.is_test(true)
.filter_level(log::LevelFilter::Trace)
.try_init();
async fn server() -> anyhow::Result<u16> {
let _ = env_logger::builder()
.is_test(true)
.filter_level(log::LevelFilter::Trace)
.try_init();
let server = Server::new(Default::default(), Box::new(InMemoryStorage::new()));
let httpserver =
HttpServer::new(move || App::new().configure(|sc| server.config(sc))).bind("0.0.0.0:0")?;
let server = Server::new(Default::default(), Box::new(InMemoryStorage::new()));
let httpserver = HttpServer::new(move || App::new().configure(|sc| server.config(sc)))
.bind("0.0.0.0:0")?;
// bind was to :0, so the kernel will have selected an unused port
let port = httpserver.addrs()[0].port();
// bind was to :0, so the kernel will have selected an unused port
let port = httpserver.addrs()[0].port();
actix_rt::spawn(httpserver.run());
Ok(port)
}
httpserver.run();
fn client(port: u16) -> anyhow::Result<()> {
// set up two replicas, and demonstrate replication between them
let mut rep1 = Replica::new(StorageConfig::InMemory.into_storage()?);
let mut rep2 = Replica::new(StorageConfig::InMemory.into_storage()?);
// set up two replicas, and demonstrate replication between them
let mut rep1 = Replica::new(StorageConfig::InMemory.into_storage()?);
let mut rep2 = Replica::new(StorageConfig::InMemory.into_storage()?);
let client_key = Uuid::new_v4();
let encryption_secret = b"abc123".to_vec();
let make_server = || {
ServerConfig::Remote {
origin: format!("http://127.0.0.1:{}", port),
client_key,
encryption_secret: encryption_secret.clone(),
}
.into_server()
};
let client_key = Uuid::new_v4();
let encryption_secret = b"abc123".to_vec();
let make_server = || {
ServerConfig::Remote {
origin: format!("http://127.0.0.1:{}", port),
client_key,
encryption_secret: encryption_secret.clone(),
}
.into_server()
};
let mut serv1 = make_server()?;
let mut serv2 = make_server()?;
let mut serv1 = make_server()?;
let mut serv2 = make_server()?;
// add some tasks on rep1
let t1 = rep1.new_task(Status::Pending, "test 1".into())?;
let t2 = rep1.new_task(Status::Pending, "test 2".into())?;
// add some tasks on rep1
let t1 = rep1.new_task(Status::Pending, "test 1".into())?;
let t2 = rep1.new_task(Status::Pending, "test 2".into())?;
// modify t1
let mut t1 = t1.into_mut(&mut rep1);
t1.start()?;
let t1 = t1.into_immut();
// modify t1
let mut t1 = t1.into_mut(&mut rep1);
t1.start()?;
let t1 = t1.into_immut();
rep1.sync(&mut serv1, false)?;
rep2.sync(&mut serv2, false)?;
rep1.sync(&mut serv1, false)?;
rep2.sync(&mut serv2, false)?;
// those tasks should exist on rep2 now
let t12 = rep2
.get_task(t1.get_uuid())?
.expect("expected task 1 on rep2");
let t22 = rep2
.get_task(t2.get_uuid())?
.expect("expected task 2 on rep2");
// those tasks should exist on rep2 now
let t12 = rep2
.get_task(t1.get_uuid())?
.expect("expected task 1 on rep2");
let t22 = rep2
.get_task(t2.get_uuid())?
.expect("expected task 2 on rep2");
assert_eq!(t12.get_description(), "test 1");
assert_eq!(t12.is_active(), true);
assert_eq!(t22.get_description(), "test 2");
assert_eq!(t22.is_active(), false);
assert_eq!(t12.get_description(), "test 1");
assert_eq!(t12.is_active(), true);
assert_eq!(t22.get_description(), "test 2");
assert_eq!(t22.is_active(), false);
// make non-conflicting changes on the two replicas
let mut t2 = t2.into_mut(&mut rep1);
t2.set_status(Status::Completed)?;
let t2 = t2.into_immut();
// make non-conflicting changes on the two replicas
let mut t2 = t2.into_mut(&mut rep1);
t2.set_status(Status::Completed)?;
let t2 = t2.into_immut();
let mut t12 = t12.into_mut(&mut rep2);
t12.set_status(Status::Completed)?;
let mut t12 = t12.into_mut(&mut rep2);
t12.set_status(Status::Completed)?;
// sync those changes back and forth
rep1.sync(&mut serv1, false)?; // rep1 -> server
rep2.sync(&mut serv2, false)?; // server -> rep2, rep2 -> server
rep1.sync(&mut serv1, false)?; // server -> rep1
// sync those changes back and forth
rep1.sync(&mut serv1, false)?; // rep1 -> server
rep2.sync(&mut serv2, false)?; // server -> rep2, rep2 -> server
rep1.sync(&mut serv1, false)?; // server -> rep1
let t1 = rep1
.get_task(t1.get_uuid())?
.expect("expected task 1 on rep1");
assert_eq!(t1.get_status(), Status::Completed);
let t1 = rep1
.get_task(t1.get_uuid())?
.expect("expected task 1 on rep1");
assert_eq!(t1.get_status(), Status::Completed);
let t22 = rep2
.get_task(t2.get_uuid())?
.expect("expected task 2 on rep2");
assert_eq!(t22.get_status(), Status::Completed);
let t22 = rep2
.get_task(t2.get_uuid())?
.expect("expected task 2 on rep2");
assert_eq!(t22.get_status(), Status::Completed);
Ok(())
}
// note that we just drop the server here..
let port = server().await?;
actix_rt::task::spawn_blocking(move || client(port)).await??;
Ok(())
}

View file

@ -14,81 +14,85 @@ async fn sync_with_snapshots() -> anyhow::Result<()> {
.filter_level(log::LevelFilter::Trace)
.try_init();
let sync_server_config = SyncServerConfig {
snapshot_days: 100,
snapshot_versions: 3,
};
let server = Server::new(sync_server_config, Box::new(InMemoryStorage::new()));
let httpserver =
HttpServer::new(move || App::new().configure(|sc| server.config(sc))).bind("0.0.0.0:0")?;
async fn server() -> anyhow::Result<u16> {
let sync_server_config = SyncServerConfig {
snapshot_days: 100,
snapshot_versions: 3,
};
let server = Server::new(sync_server_config, Box::new(InMemoryStorage::new()));
let httpserver = HttpServer::new(move || App::new().configure(|sc| server.config(sc)))
.bind("0.0.0.0:0")?;
// bind was to :0, so the kernel will have selected an unused port
let port = httpserver.addrs()[0].port();
httpserver.run();
let client_key = Uuid::new_v4();
let encryption_secret = b"abc123".to_vec();
let make_server = || {
ServerConfig::Remote {
origin: format!("http://127.0.0.1:{}", port),
client_key,
encryption_secret: encryption_secret.clone(),
}
.into_server()
};
// first we set up a single replica and sync it a lot of times, to establish a sync history.
let mut rep1 = Replica::new(StorageConfig::InMemory.into_storage()?);
let mut serv1 = make_server()?;
let mut t1 = rep1.new_task(Status::Pending, "test 1".into())?;
log::info!("Applying modifications on replica 1");
for i in 0..=NUM_VERSIONS {
let mut t1m = t1.into_mut(&mut rep1);
t1m.start()?;
t1m.stop()?;
t1m.set_description(format!("revision {}", i))?;
t1 = t1m.into_immut();
rep1.sync(&mut serv1, false)?;
// bind was to :0, so the kernel will have selected an unused port
let port = httpserver.addrs()[0].port();
actix_rt::spawn(httpserver.run());
Ok(port)
}
// now set up a second replica and sync it; it should catch up on that history, using a
// snapshot. Note that we can't verify that it used a snapshot, because the server currently
// keeps all versions (so rep2 could sync from the beginning of the version history). You can
// manually verify that it is applying a snapshot by adding `assert!(false)` below and skimming
// the logs.
fn client(port: u16) -> anyhow::Result<()> {
let client_key = Uuid::new_v4();
let encryption_secret = b"abc123".to_vec();
let make_server = || {
ServerConfig::Remote {
origin: format!("http://127.0.0.1:{}", port),
client_key,
encryption_secret: encryption_secret.clone(),
}
.into_server()
};
let mut rep2 = Replica::new(StorageConfig::InMemory.into_storage()?);
let mut serv2 = make_server()?;
// first we set up a single replica and sync it a lot of times, to establish a sync history.
let mut rep1 = Replica::new(StorageConfig::InMemory.into_storage()?);
let mut serv1 = make_server()?;
log::info!("Syncing replica 2");
rep2.sync(&mut serv2, false)?;
let mut t1 = rep1.new_task(Status::Pending, "test 1".into())?;
log::info!("Applying modifications on replica 1");
for i in 0..=NUM_VERSIONS {
let mut t1m = t1.into_mut(&mut rep1);
t1m.start()?;
t1m.stop()?;
t1m.set_description(format!("revision {}", i))?;
t1 = t1m.into_immut();
// those tasks should exist on rep2 now
let t12 = rep2
.get_task(t1.get_uuid())?
.expect("expected task 1 on rep2");
rep1.sync(&mut serv1, false)?;
}
assert_eq!(t12.get_description(), format!("revision {}", NUM_VERSIONS));
assert_eq!(t12.is_active(), false);
// now set up a second replica and sync it; it should catch up on that history, using a
// snapshot. Note that we can't verify that it used a snapshot, because the server
// currently keeps all versions (so rep2 could sync from the beginning of the version
// history). You can manually verify that it is applying a snapshot by adding
// `assert!(false)` below and skimming the logs.
// sync that back to replica 1
t12.into_mut(&mut rep2)
.set_description("sync-back".to_owned())?;
rep2.sync(&mut serv2, false)?;
rep1.sync(&mut serv1, false)?;
let mut rep2 = Replica::new(StorageConfig::InMemory.into_storage()?);
let mut serv2 = make_server()?;
let t11 = rep1
.get_task(t1.get_uuid())?
.expect("expected task 1 on rep1");
log::info!("Syncing replica 2");
rep2.sync(&mut serv2, false)?;
assert_eq!(t11.get_description(), "sync-back");
// those tasks should exist on rep2 now
let t12 = rep2
.get_task(t1.get_uuid())?
.expect("expected task 1 on rep2");
// uncomment this to force a failure and see the logs
// assert!(false);
assert_eq!(t12.get_description(), format!("revision {}", NUM_VERSIONS));
assert_eq!(t12.is_active(), false);
// note that we just drop the server here..
// sync that back to replica 1
t12.into_mut(&mut rep2)
.set_description("sync-back".to_owned())?;
rep2.sync(&mut serv2, false)?;
rep1.sync(&mut serv1, false)?;
let t11 = rep1
.get_task(t1.get_uuid())?
.expect("expected task 1 on rep1");
assert_eq!(t11.get_description(), "sync-back");
Ok(())
}
let port = server().await?;
actix_rt::task::spawn_blocking(move || client(port)).await??;
Ok(())
}

View file

@ -9,7 +9,7 @@ publish = false
[dependencies]
uuid = { version = "^1.3.0", features = ["serde", "v4"] }
actix-web = "^3.3.2"
actix-web = "^4.3.1"
anyhow = "1.0"
thiserror = "1.0"
futures = "^0.3.25"
@ -22,6 +22,6 @@ rusqlite = { version = "0.29", features = ["bundled"] }
chrono = { version = "^0.4.22", features = ["serde"] }
[dev-dependencies]
actix-rt = "^1.1.1"
actix-rt = "2"
tempfile = "3"
pretty_assertions = "1"

View file

@ -19,9 +19,11 @@ const MAX_SIZE: usize = 100 * 1024 * 1024;
pub(crate) async fn service(
req: HttpRequest,
server_state: web::Data<Arc<ServerState>>,
web::Path((version_id,)): web::Path<(VersionId,)>,
path: web::Path<VersionId>,
mut payload: web::Payload,
) -> Result<HttpResponse> {
let version_id = path.into_inner();
// check content-type
if req.content_type() != SNAPSHOT_CONTENT_TYPE {
return Err(error::ErrorBadRequest("Bad content-type"));
@ -100,8 +102,8 @@ mod test {
let uri = format!("/v1/client/add-snapshot/{}", version_id);
let req = test::TestRequest::post()
.uri(&uri)
.header("Content-Type", "application/vnd.taskchampion.snapshot")
.header("X-Client-Key", client_key.to_string())
.insert_header(("Content-Type", "application/vnd.taskchampion.snapshot"))
.insert_header(("X-Client-Key", client_key.to_string()))
.set_payload(b"abcd".to_vec())
.to_request();
let resp = test::call_service(&mut app, req).await;
@ -111,14 +113,14 @@ mod test {
let uri = "/v1/client/snapshot";
let req = test::TestRequest::get()
.uri(uri)
.header("X-Client-Key", client_key.to_string())
.append_header(("X-Client-Key", client_key.to_string()))
.to_request();
let mut resp = test::call_service(&mut app, req).await;
let resp = test::call_service(&mut app, req).await;
assert_eq!(resp.status(), StatusCode::OK);
use futures::StreamExt;
let (bytes, _) = resp.take_body().into_future().await;
assert_eq!(bytes.unwrap().unwrap().as_ref(), b"abcd");
use actix_web::body::MessageBody;
let bytes = resp.into_body().try_into_bytes().unwrap();
assert_eq!(bytes.as_ref(), b"abcd");
Ok(())
}
@ -143,8 +145,8 @@ mod test {
let uri = format!("/v1/client/add-snapshot/{}", version_id);
let req = test::TestRequest::post()
.uri(&uri)
.header("Content-Type", "application/vnd.taskchampion.snapshot")
.header("X-Client-Key", client_key.to_string())
.append_header(("Content-Type", "application/vnd.taskchampion.snapshot"))
.append_header(("X-Client-Key", client_key.to_string()))
.set_payload(b"abcd".to_vec())
.to_request();
let resp = test::call_service(&mut app, req).await;
@ -154,7 +156,7 @@ mod test {
let uri = "/v1/client/snapshot";
let req = test::TestRequest::get()
.uri(uri)
.header("X-Client-Key", client_key.to_string())
.append_header(("X-Client-Key", client_key.to_string()))
.to_request();
let resp = test::call_service(&mut app, req).await;
assert_eq!(resp.status(), StatusCode::NOT_FOUND);
@ -172,8 +174,8 @@ mod test {
let uri = format!("/v1/client/add-snapshot/{}", version_id);
let req = test::TestRequest::post()
.uri(&uri)
.header("Content-Type", "not/correct")
.header("X-Client-Key", client_key.to_string())
.append_header(("Content-Type", "not/correct"))
.append_header(("X-Client-Key", client_key.to_string()))
.set_payload(b"abcd".to_vec())
.to_request();
let resp = test::call_service(&mut app, req).await;
@ -192,11 +194,11 @@ mod test {
let uri = format!("/v1/client/add-snapshot/{}", version_id);
let req = test::TestRequest::post()
.uri(&uri)
.header(
.append_header((
"Content-Type",
"application/vnd.taskchampion.history-segment",
)
.header("X-Client-Key", client_key.to_string())
))
.append_header(("X-Client-Key", client_key.to_string()))
.to_request();
let resp = test::call_service(&mut app, req).await;
assert_eq!(resp.status(), StatusCode::BAD_REQUEST);

View file

@ -27,9 +27,11 @@ const MAX_SIZE: usize = 100 * 1024 * 1024;
pub(crate) async fn service(
req: HttpRequest,
server_state: web::Data<Arc<ServerState>>,
web::Path((parent_version_id,)): web::Path<(VersionId,)>,
path: web::Path<VersionId>,
mut payload: web::Payload,
) -> Result<HttpResponse> {
let parent_version_id = path.into_inner();
// check content-type
if req.content_type() != HISTORY_SEGMENT_CONTENT_TYPE {
return Err(error::ErrorBadRequest("Bad content-type"));
@ -80,21 +82,21 @@ pub(crate) async fn service(
Ok(match result {
AddVersionResult::Ok(version_id) => {
let mut rb = HttpResponse::Ok();
rb.header(VERSION_ID_HEADER, version_id.to_string());
rb.append_header((VERSION_ID_HEADER, version_id.to_string()));
match snap_urgency {
SnapshotUrgency::None => {}
SnapshotUrgency::Low => {
rb.header(SNAPSHOT_REQUEST_HEADER, "urgency=low");
rb.append_header((SNAPSHOT_REQUEST_HEADER, "urgency=low"));
}
SnapshotUrgency::High => {
rb.header(SNAPSHOT_REQUEST_HEADER, "urgency=high");
rb.append_header((SNAPSHOT_REQUEST_HEADER, "urgency=high"));
}
};
rb.finish()
}
AddVersionResult::ExpectedParentVersion(parent_version_id) => {
let mut rb = HttpResponse::Conflict();
rb.header(PARENT_VERSION_ID_HEADER, parent_version_id.to_string());
rb.append_header((PARENT_VERSION_ID_HEADER, parent_version_id.to_string()));
rb.finish()
}
})
@ -128,11 +130,11 @@ mod test {
let uri = format!("/v1/client/add-version/{}", parent_version_id);
let req = test::TestRequest::post()
.uri(&uri)
.header(
.append_header((
"Content-Type",
"application/vnd.taskchampion.history-segment",
)
.header("X-Client-Key", client_key.to_string())
))
.append_header(("X-Client-Key", client_key.to_string()))
.set_payload(b"abcd".to_vec())
.to_request();
let resp = test::call_service(&mut app, req).await;
@ -170,11 +172,11 @@ mod test {
let uri = format!("/v1/client/add-version/{}", parent_version_id);
let req = test::TestRequest::post()
.uri(&uri)
.header(
.append_header((
"Content-Type",
"application/vnd.taskchampion.history-segment",
)
.header("X-Client-Key", client_key.to_string())
))
.append_header(("X-Client-Key", client_key.to_string()))
.set_payload(b"abcd".to_vec())
.to_request();
let resp = test::call_service(&mut app, req).await;
@ -198,8 +200,8 @@ mod test {
let uri = format!("/v1/client/add-version/{}", parent_version_id);
let req = test::TestRequest::post()
.uri(&uri)
.header("Content-Type", "not/correct")
.header("X-Client-Key", client_key.to_string())
.append_header(("Content-Type", "not/correct"))
.append_header(("X-Client-Key", client_key.to_string()))
.set_payload(b"abcd".to_vec())
.to_request();
let resp = test::call_service(&mut app, req).await;
@ -218,11 +220,11 @@ mod test {
let uri = format!("/v1/client/add-version/{}", parent_version_id);
let req = test::TestRequest::post()
.uri(&uri)
.header(
.append_header((
"Content-Type",
"application/vnd.taskchampion.history-segment",
)
.header("X-Client-Key", client_key.to_string())
))
.append_header(("X-Client-Key", client_key.to_string()))
.to_request();
let resp = test::call_service(&mut app, req).await;
assert_eq!(resp.status(), StatusCode::BAD_REQUEST);

View file

@ -18,8 +18,10 @@ use std::sync::Arc;
pub(crate) async fn service(
req: HttpRequest,
server_state: web::Data<Arc<ServerState>>,
web::Path((parent_version_id,)): web::Path<(VersionId,)>,
path: web::Path<VersionId>,
) -> Result<HttpResponse> {
let parent_version_id = path.into_inner();
let mut txn = server_state.storage.txn().map_err(failure_to_ise)?;
let client_key = client_key_header(&req)?;
@ -44,8 +46,8 @@ pub(crate) async fn service(
history_segment,
} => Ok(HttpResponse::Ok()
.content_type(HISTORY_SEGMENT_CONTENT_TYPE)
.header(VERSION_ID_HEADER, version_id.to_string())
.header(PARENT_VERSION_ID_HEADER, parent_version_id.to_string())
.append_header((VERSION_ID_HEADER, version_id.to_string()))
.append_header((PARENT_VERSION_ID_HEADER, parent_version_id.to_string()))
.body(history_segment)),
GetVersionResult::NotFound => Err(error::ErrorNotFound("no such version")),
GetVersionResult::Gone => Err(error::ErrorGone("version has been deleted")),
@ -83,9 +85,9 @@ mod test {
let uri = format!("/v1/client/get-child-version/{}", parent_version_id);
let req = test::TestRequest::get()
.uri(&uri)
.header("X-Client-Key", client_key.to_string())
.append_header(("X-Client-Key", client_key.to_string()))
.to_request();
let mut resp = test::call_service(&mut app, req).await;
let resp = test::call_service(&mut app, req).await;
assert_eq!(resp.status(), StatusCode::OK);
assert_eq!(
resp.headers().get("X-Version-Id").unwrap(),
@ -100,9 +102,9 @@ mod test {
&"application/vnd.taskchampion.history-segment".to_string()
);
use futures::StreamExt;
let (bytes, _) = resp.take_body().into_future().await;
assert_eq!(bytes.unwrap().unwrap().as_ref(), b"abcd");
use actix_web::body::MessageBody;
let bytes = resp.into_body().try_into_bytes().unwrap();
assert_eq!(bytes.as_ref(), b"abcd");
}
#[actix_rt::test]
@ -117,7 +119,7 @@ mod test {
let uri = format!("/v1/client/get-child-version/{}", parent_version_id);
let req = test::TestRequest::get()
.uri(&uri)
.header("X-Client-Key", client_key.to_string())
.append_header(("X-Client-Key", client_key.to_string()))
.to_request();
let resp = test::call_service(&mut app, req).await;
assert_eq!(resp.status(), StatusCode::NOT_FOUND);
@ -144,7 +146,7 @@ mod test {
let uri = format!("/v1/client/get-child-version/{}", parent_version_id);
let req = test::TestRequest::get()
.uri(&uri)
.header("X-Client-Key", client_key.to_string())
.append_header(("X-Client-Key", client_key.to_string()))
.to_request();
let resp = test::call_service(&mut app, req).await;
assert_eq!(resp.status(), StatusCode::GONE);
@ -157,7 +159,7 @@ mod test {
let uri = format!("/v1/client/get-child-version/{}", NIL_VERSION_ID);
let req = test::TestRequest::get()
.uri(&uri)
.header("X-Client-Key", client_key.to_string())
.append_header(("X-Client-Key", client_key.to_string()))
.to_request();
let resp = test::call_service(&mut app, req).await;
assert_eq!(resp.status(), StatusCode::NOT_FOUND);

View file

@ -32,7 +32,7 @@ pub(crate) async fn service(
{
Ok(HttpResponse::Ok()
.content_type(SNAPSHOT_CONTENT_TYPE)
.header(VERSION_ID_HEADER, version_id.to_string())
.append_header((VERSION_ID_HEADER, version_id.to_string()))
.body(data))
} else {
Err(error::ErrorNotFound("no snapshot"))
@ -66,7 +66,7 @@ mod test {
let uri = "/v1/client/snapshot";
let req = test::TestRequest::get()
.uri(uri)
.header("X-Client-Key", client_key.to_string())
.append_header(("X-Client-Key", client_key.to_string()))
.to_request();
let resp = test::call_service(&mut app, req).await;
assert_eq!(resp.status(), StatusCode::NOT_FOUND);
@ -102,13 +102,13 @@ mod test {
let uri = "/v1/client/snapshot";
let req = test::TestRequest::get()
.uri(uri)
.header("X-Client-Key", client_key.to_string())
.append_header(("X-Client-Key", client_key.to_string()))
.to_request();
let mut resp = test::call_service(&mut app, req).await;
let resp = test::call_service(&mut app, req).await;
assert_eq!(resp.status(), StatusCode::OK);
use futures::StreamExt;
let (bytes, _) = resp.take_body().into_future().await;
assert_eq!(bytes.unwrap().unwrap().as_ref(), snapshot_data);
use actix_web::body::MessageBody;
let bytes = resp.into_body().try_into_bytes().unwrap();
assert_eq!(bytes.as_ref(), snapshot_data);
}
}

View file

@ -34,10 +34,9 @@ impl Server {
pub fn config(&self, cfg: &mut web::ServiceConfig) {
cfg.service(
web::scope("")
.data(self.server_state.clone())
.app_data(web::Data::new(self.server_state.clone()))
.wrap(
middleware::DefaultHeaders::new()
.header("Cache-Control", "no-store, max-age=0"),
middleware::DefaultHeaders::new().add(("Cache-Control", "no-store, max-age=0")),
)
.service(index)
.service(api_scope()),