1
0
Fork 0
mirror of https://github.com/actix/actix-web.git synced 2024-12-30 03:50:42 +00:00

Add benchmark for full stack request lifecycle (#1298)

* add benchmark for full stack request lifecycle

* add direct service benchmarks

* fix newline

* add cloneable service benchmarks

* remove cloneable bench experiments + cargo fmt

Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
This commit is contained in:
Maxim Vorobjov 2020-01-25 01:05:25 +02:00 committed by Yuki Okushi
parent cf3577550c
commit 8888520d83
5 changed files with 184 additions and 3 deletions

View file

@ -99,6 +99,7 @@ env_logger = "0.6"
serde_derive = "1.0"
brotli2 = "0.3.2"
flate2 = "1.0.13"
criterion = "0.3"
[profile.release]
lto = true
@ -116,3 +117,11 @@ actix-session = { path = "actix-session" }
actix-files = { path = "actix-files" }
actix-multipart = { path = "actix-multipart" }
awc = { path = "awc" }
[[bench]]
name = "server"
harness = false
[[bench]]
name = "service"
harness = false

View file

@ -6,7 +6,7 @@ use actix_service::Service;
#[doc(hidden)]
/// Service that allows to turn non-clone service to a service with `Clone` impl
///
///
/// # Panics
/// CloneableService might panic with some creative use of thread local storage.
/// See https://github.com/actix/actix-web/issues/1295 for example

View file

@ -14,13 +14,13 @@ use std::fmt;
/// normal. In some browsers, this will implicitly handle the cookie as if "Lax"
/// and in others, "None". It's best to explicitly set the `SameSite` attribute
/// to avoid inconsistent behavior.
///
///
/// **Note:** Depending on browser, the `Secure` attribute may be required for
/// `SameSite` "None" cookies to be accepted.
///
/// **Note:** This cookie attribute is an HTTP draft! Its meaning and definition
/// are subject to change.
///
///
/// More info about these draft changes can be found in the draft spec:
/// - https://tools.ietf.org/html/draft-west-cookie-incrementalism-00
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]

64
benches/server.rs Normal file
View file

@ -0,0 +1,64 @@
use actix_web::{test, web, App, HttpResponse};
use awc::Client;
use criterion::{criterion_group, criterion_main, Criterion};
use futures::future::join_all;
const STR: &str = "Hello World Hello World Hello World Hello World Hello World \
Hello World Hello World Hello World Hello World Hello World \
Hello World Hello World Hello World Hello World Hello World \
Hello World Hello World Hello World Hello World Hello World \
Hello World Hello World Hello World Hello World Hello World \
Hello World Hello World Hello World Hello World Hello World \
Hello World Hello World Hello World Hello World Hello World \
Hello World Hello World Hello World Hello World Hello World \
Hello World Hello World Hello World Hello World Hello World \
Hello World Hello World Hello World Hello World Hello World \
Hello World Hello World Hello World Hello World Hello World \
Hello World Hello World Hello World Hello World Hello World \
Hello World Hello World Hello World Hello World Hello World \
Hello World Hello World Hello World Hello World Hello World \
Hello World Hello World Hello World Hello World Hello World \
Hello World Hello World Hello World Hello World Hello World \
Hello World Hello World Hello World Hello World Hello World \
Hello World Hello World Hello World Hello World Hello World \
Hello World Hello World Hello World Hello World Hello World \
Hello World Hello World Hello World Hello World Hello World \
Hello World Hello World Hello World Hello World Hello World";
// benchmark sending all requests at the same time
fn bench_async_burst(c: &mut Criterion) {
let srv = test::start(|| {
App::new()
.service(web::resource("/").route(web::to(|| HttpResponse::Ok().body(STR))))
});
// We are using System here, since Runtime requires preinitialized tokio
// Maybe add to actix_rt docs
let url = srv.url("/");
let mut rt = actix_rt::System::new("test");
c.bench_function("get_body_async_burst", move |b| {
b.iter_custom(|iters| {
let client = Client::new().get(url.clone()).freeze().unwrap();
let start = std::time::Instant::now();
// benchmark body
let resps = rt.block_on(async move {
let burst = (0..iters).map(|_| client.send());
join_all(burst).await
});
let elapsed = start.elapsed();
// if there are failed requests that might be an issue
let failed = resps.iter().filter(|r| r.is_err()).count();
if failed > 0 {
eprintln!("failed {} requests (might be bench timeout)", failed);
};
elapsed
})
});
}
criterion_group!(server_benches, bench_async_burst);
criterion_main!(server_benches);

108
benches/service.rs Normal file
View file

@ -0,0 +1,108 @@
use actix_service::Service;
use actix_web::dev::{ServiceRequest, ServiceResponse};
use actix_web::{web, App, Error, HttpResponse};
use criterion::{criterion_main, Criterion};
use std::cell::RefCell;
use std::rc::Rc;
use actix_web::test::{init_service, ok_service, TestRequest};
/// Criterion Benchmark for async Service
/// Should be used from within criterion group:
/// ```rust,ignore
/// let mut criterion: ::criterion::Criterion<_> =
/// ::criterion::Criterion::default().configure_from_args();
/// bench_async_service(&mut criterion, ok_service(), "async_service_direct");
/// ```
///
/// Usable for benching Service wrappers:
/// Using minimum service code implementation we first measure
/// time to run minimum service, then measure time with wrapper.
///
/// Sample output
/// async_service_direct time: [1.0908 us 1.1656 us 1.2613 us]
pub fn bench_async_service<S>(c: &mut Criterion, srv: S, name: &str)
where
S: Service<Request = ServiceRequest, Response = ServiceResponse, Error = Error>
+ 'static,
{
let mut rt = actix_rt::System::new("test");
let srv = Rc::new(RefCell::new(srv));
let req = TestRequest::default().to_srv_request();
assert!(rt
.block_on(srv.borrow_mut().call(req))
.unwrap()
.status()
.is_success());
// start benchmark loops
c.bench_function(name, move |b| {
b.iter_custom(|iters| {
let srv = srv.clone();
// exclude request generation, it appears it takes significant time vs call (3us vs 1us)
let reqs: Vec<_> = (0..iters)
.map(|_| TestRequest::default().to_srv_request())
.collect();
let start = std::time::Instant::now();
// benchmark body
rt.block_on(async move {
for req in reqs {
srv.borrow_mut().call(req).await.unwrap();
}
});
let elapsed = start.elapsed();
// check that at least first request succeeded
elapsed
})
});
}
async fn index(req: ServiceRequest) -> Result<ServiceResponse, Error> {
Ok(req.into_response(HttpResponse::Ok().finish()))
}
// Benchmark basic WebService directly
// this approach is usable for benching WebService, though it adds some time to direct service call:
// Sample results on MacBook Pro '14
// time: [2.0724 us 2.1345 us 2.2074 us]
fn async_web_service(c: &mut Criterion) {
let mut rt = actix_rt::System::new("test");
let srv = Rc::new(RefCell::new(rt.block_on(init_service(
App::new().service(web::service("/").finish(index)),
))));
let req = TestRequest::get().uri("/").to_request();
assert!(rt
.block_on(srv.borrow_mut().call(req))
.unwrap()
.status()
.is_success());
// start benchmark loops
c.bench_function("async_web_service_direct", move |b| {
b.iter_custom(|iters| {
let srv = srv.clone();
let reqs = (0..iters).map(|_| TestRequest::get().uri("/").to_request());
let start = std::time::Instant::now();
// benchmark body
rt.block_on(async move {
for req in reqs {
srv.borrow_mut().call(req).await.unwrap();
}
});
let elapsed = start.elapsed();
// check that at least first request succeeded
elapsed
})
});
}
pub fn service_benches() {
let mut criterion: ::criterion::Criterion<_> =
::criterion::Criterion::default().configure_from_args();
bench_async_service(&mut criterion, ok_service(), "async_service_direct");
async_web_service(&mut criterion);
}
criterion_main!(service_benches);