1
0
Fork 0
mirror of https://github.com/actix/actix-web.git synced 2024-06-11 01:39:33 +00:00

dispatcher internals testing (#1840)

This commit is contained in:
Rob Ede 2020-12-23 01:28:17 +00:00 committed by GitHub
parent 05f104c240
commit 2a7f2c1d59
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
8 changed files with 519 additions and 58 deletions

View file

@ -4,12 +4,12 @@ use std::task::{Context, Poll};
use actix_service::Service;
#[doc(hidden)]
/// Service that allows to turn non-clone service to a service with `Clone` impl
///
/// # Panics
/// CloneableService might panic with some creative use of thread local storage.
/// See https://github.com/actix/actix-web/issues/1295 for example
#[doc(hidden)]
pub(crate) struct CloneableService<T: Service>(Rc<RefCell<T>>);
impl<T: Service> CloneableService<T> {

View file

@ -3,8 +3,8 @@ use std::{fmt, mem};
use fxhash::FxHashMap;
#[derive(Default)]
/// A type map of request extensions.
#[derive(Default)]
pub struct Extensions {
/// Use FxHasher with a std HashMap with for faster
/// lookups on the small `TypeId` (u64 equivalent) keys.

View file

@ -58,6 +58,7 @@ impl Codec {
} else {
Flags::empty()
};
Codec {
config,
flags,
@ -69,26 +70,26 @@ impl Codec {
}
}
/// Check if request is upgrade.
#[inline]
/// Check if request is upgrade
pub fn upgrade(&self) -> bool {
self.ctype == ConnectionType::Upgrade
}
/// Check if last response is keep-alive.
#[inline]
/// Check if last response is keep-alive
pub fn keepalive(&self) -> bool {
self.ctype == ConnectionType::KeepAlive
}
/// Check if keep-alive enabled on server level.
#[inline]
/// Check if keep-alive enabled on server level
pub fn keepalive_enabled(&self) -> bool {
self.flags.contains(Flags::KEEPALIVE_ENABLED)
}
/// Check last request's message type.
#[inline]
/// Check last request's message type
pub fn message_type(&self) -> MessageType {
if self.flags.contains(Flags::STREAM) {
MessageType::Stream

View file

@ -1,8 +1,11 @@
use std::collections::VecDeque;
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::{fmt, io, net};
use std::{
collections::VecDeque,
fmt,
future::Future,
io, mem, net,
pin::Pin,
task::{Context, Poll},
};
use actix_codec::{AsyncRead, AsyncWrite, Decoder, Encoder, Framed, FramedParts};
use actix_rt::time::{delay_until, Delay, Instant};
@ -59,6 +62,9 @@ where
{
#[pin]
inner: DispatcherState<T, S, B, X, U>,
#[cfg(test)]
poll_count: u64,
}
#[pin_project(project = DispatcherStateProj)]
@ -247,6 +253,9 @@ where
ka_expire,
ka_timer,
}),
#[cfg(test)]
poll_count: 0,
}
}
}
@ -511,12 +520,12 @@ where
}
}
/// Process one incoming requests
/// Process one incoming request.
pub(self) fn poll_request(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Result<bool, DispatchError> {
// limit a mount of non processed requests
// limit amount of non-processed requests
if self.messages.len() >= MAX_PIPELINED_MESSAGES || !self.can_read(cx) {
return Ok(false);
}
@ -725,6 +734,12 @@ where
#[inline]
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.as_mut().project();
#[cfg(test)]
{
*this.poll_count += 1;
}
match this.inner.project() {
DispatcherStateProj::Normal(mut inner) => {
inner.as_mut().poll_keepalive(cx)?;
@ -788,10 +803,10 @@ where
let inner_p = inner.as_mut().project();
let mut parts = FramedParts::with_read_buf(
inner_p.io.take().unwrap(),
std::mem::take(inner_p.codec),
std::mem::take(inner_p.read_buf),
mem::take(inner_p.codec),
mem::take(inner_p.read_buf),
);
parts.write_buf = std::mem::take(inner_p.write_buf);
parts.write_buf = mem::take(inner_p.write_buf);
let framed = Framed::from_parts(parts);
let upgrade =
inner_p.upgrade.take().unwrap().call((req, framed));
@ -803,8 +818,11 @@ where
}
// we didn't get WouldBlock from write operation,
// so data get written to kernel completely (OSX)
// so data get written to kernel completely (macOS)
// and we have to write again otherwise response can get stuck
//
// TODO: what? is WouldBlock good or bad?
// want to find a reference for this macOS behavior
if inner.as_mut().poll_flush(cx)? || !drain {
break;
}
@ -854,6 +872,11 @@ where
}
}
/// Returns either:
/// - `Ok(Some(true))` - data was read and done reading all data.
/// - `Ok(Some(false))` - data was read but there should be more to read.
/// - `Ok(None)` - no data was read but there should be more to read later.
/// - Unhandled Errors
fn read_available<T>(
cx: &mut Context<'_>,
io: &mut T,
@ -887,17 +910,17 @@ where
read_some = true;
}
}
Poll::Ready(Err(e)) => {
return if e.kind() == io::ErrorKind::WouldBlock {
Poll::Ready(Err(err)) => {
return if err.kind() == io::ErrorKind::WouldBlock {
if read_some {
Ok(Some(false))
} else {
Ok(None)
}
} else if e.kind() == io::ErrorKind::ConnectionReset && read_some {
} else if err.kind() == io::ErrorKind::ConnectionReset && read_some {
Ok(Some(true))
} else {
Err(e)
Err(err)
}
}
}
@ -917,13 +940,64 @@ where
#[cfg(test)]
mod tests {
use actix_service::IntoService;
use futures_util::future::{lazy, ok};
use std::{marker::PhantomData, str};
use actix_service::fn_service;
use futures_util::future::{lazy, ready};
use super::*;
use crate::error::Error;
use crate::h1::{ExpectHandler, UpgradeHandler};
use crate::test::TestBuffer;
use crate::{error::Error, KeepAlive};
use crate::{
h1::{ExpectHandler, UpgradeHandler},
test::TestSeqBuffer,
};
fn find_slice(haystack: &[u8], needle: &[u8], from: usize) -> Option<usize> {
haystack[from..]
.windows(needle.len())
.position(|window| window == needle)
}
fn stabilize_date_header(payload: &mut [u8]) {
let mut from = 0;
while let Some(pos) = find_slice(&payload, b"date", from) {
payload[(from + pos)..(from + pos + 35)]
.copy_from_slice(b"date: Thu, 01 Jan 1970 12:34:56 UTC");
from += 35;
}
}
fn ok_service() -> impl Service<Request = Request, Response = Response, Error = Error>
{
fn_service(|_req: Request| ready(Ok::<_, Error>(Response::Ok().finish())))
}
fn echo_path_service(
) -> impl Service<Request = Request, Response = Response, Error = Error> {
fn_service(|req: Request| {
let path = req.path().as_bytes();
ready(Ok::<_, Error>(Response::Ok().body(Body::from_slice(path))))
})
}
fn echo_payload_service(
) -> impl Service<Request = Request, Response = Response, Error = Error> {
fn_service(|mut req: Request| {
Box::pin(async move {
use futures_util::stream::StreamExt as _;
let mut pl = req.take_payload();
let mut body = BytesMut::new();
while let Some(chunk) = pl.next().await {
body.extend_from_slice(chunk.unwrap().bytes())
}
Ok::<_, Error>(Response::Ok().body(body))
})
})
}
#[actix_rt::test]
async fn test_req_parse_err() {
@ -933,9 +1007,7 @@ mod tests {
let mut h1 = Dispatcher::<_, _, _, _, UpgradeHandler<TestBuffer>>::new(
buf,
ServiceConfig::default(),
CloneableService::new(
(|_| ok::<_, Error>(Response::Ok().finish())).into_service(),
),
CloneableService::new(ok_service()),
CloneableService::new(ExpectHandler),
None,
None,
@ -958,4 +1030,274 @@ mod tests {
})
.await;
}
#[actix_rt::test]
async fn test_pipelining() {
lazy(|cx| {
let buf = TestBuffer::new(
"\
GET /abcd HTTP/1.1\r\n\r\n\
GET /def HTTP/1.1\r\n\r\n\
",
);
let cfg = ServiceConfig::new(KeepAlive::Disabled, 1, 1, false, None);
let mut h1 = Dispatcher::<_, _, _, _, UpgradeHandler<TestBuffer>>::new(
buf,
cfg,
CloneableService::new(echo_path_service()),
CloneableService::new(ExpectHandler),
None,
None,
Extensions::new(),
None,
);
assert!(matches!(&h1.inner, DispatcherState::Normal(_)));
match Pin::new(&mut h1).poll(cx) {
Poll::Pending => panic!("first poll should not be pending"),
Poll::Ready(res) => assert!(res.is_ok()),
}
// polls: initial => shutdown
assert_eq!(h1.poll_count, 2);
if let DispatcherState::Normal(ref mut inner) = h1.inner {
let res = &mut inner.io.take().unwrap().write_buf[..];
stabilize_date_header(res);
let exp = b"\
HTTP/1.1 200 OK\r\n\
content-length: 5\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
/abcd\
HTTP/1.1 200 OK\r\n\
content-length: 4\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
/def\
";
assert_eq!(res.to_vec(), exp.to_vec());
}
})
.await;
lazy(|cx| {
let buf = TestBuffer::new(
"\
GET /abcd HTTP/1.1\r\n\r\n\
GET /def HTTP/1\r\n\r\n\
",
);
let cfg = ServiceConfig::new(KeepAlive::Disabled, 1, 1, false, None);
let mut h1 = Dispatcher::<_, _, _, _, UpgradeHandler<TestBuffer>>::new(
buf,
cfg,
CloneableService::new(echo_path_service()),
CloneableService::new(ExpectHandler),
None,
None,
Extensions::new(),
None,
);
assert!(matches!(&h1.inner, DispatcherState::Normal(_)));
match Pin::new(&mut h1).poll(cx) {
Poll::Pending => panic!("first poll should not be pending"),
Poll::Ready(res) => assert!(res.is_err()),
}
// polls: initial => shutdown
assert_eq!(h1.poll_count, 1);
if let DispatcherState::Normal(ref mut inner) = h1.inner {
let res = &mut inner.io.take().unwrap().write_buf[..];
stabilize_date_header(res);
let exp = b"\
HTTP/1.1 200 OK\r\n\
content-length: 5\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
/abcd\
HTTP/1.1 400 Bad Request\r\n\
content-length: 0\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
";
assert_eq!(res.to_vec(), exp.to_vec());
}
})
.await;
}
#[actix_rt::test]
async fn test_expect() {
lazy(|cx| {
let mut buf = TestSeqBuffer::empty();
let cfg = ServiceConfig::new(KeepAlive::Disabled, 0, 0, false, None);
let mut h1 = Dispatcher::<_, _, _, _, UpgradeHandler<_>>::new(
buf.clone(),
cfg,
CloneableService::new(echo_payload_service()),
CloneableService::new(ExpectHandler),
None,
None,
Extensions::new(),
None,
);
buf.extend_read_buf(
"\
POST /upload HTTP/1.1\r\n\
Content-Length: 5\r\n\
Expect: 100-continue\r\n\
\r\n\
",
);
assert!(Pin::new(&mut h1).poll(cx).is_pending());
assert!(matches!(&h1.inner, DispatcherState::Normal(_)));
// polls: manual
assert_eq!(h1.poll_count, 1);
eprintln!("poll count: {}", h1.poll_count);
if let DispatcherState::Normal(ref inner) = h1.inner {
let io = inner.io.as_ref().unwrap();
let res = &io.write_buf()[..];
assert_eq!(
str::from_utf8(res).unwrap(),
"HTTP/1.1 100 Continue\r\n\r\n"
);
}
buf.extend_read_buf("12345");
assert!(Pin::new(&mut h1).poll(cx).is_ready());
// polls: manual manual shutdown
assert_eq!(h1.poll_count, 3);
if let DispatcherState::Normal(ref inner) = h1.inner {
let io = inner.io.as_ref().unwrap();
let mut res = (&io.write_buf()[..]).to_owned();
stabilize_date_header(&mut res);
assert_eq!(
str::from_utf8(&res).unwrap(),
"\
HTTP/1.1 100 Continue\r\n\
\r\n\
HTTP/1.1 200 OK\r\n\
content-length: 5\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\
\r\n\
12345\
"
);
}
})
.await;
}
#[actix_rt::test]
async fn test_eager_expect() {
lazy(|cx| {
let mut buf = TestSeqBuffer::empty();
let cfg = ServiceConfig::new(KeepAlive::Disabled, 0, 0, false, None);
let mut h1 = Dispatcher::<_, _, _, _, UpgradeHandler<_>>::new(
buf.clone(),
cfg,
CloneableService::new(echo_path_service()),
CloneableService::new(ExpectHandler),
None,
None,
Extensions::new(),
None,
);
buf.extend_read_buf(
"\
POST /upload HTTP/1.1\r\n\
Content-Length: 5\r\n\
Expect: 100-continue\r\n\
\r\n\
",
);
assert!(Pin::new(&mut h1).poll(cx).is_ready());
assert!(matches!(&h1.inner, DispatcherState::Normal(_)));
// polls: manual shutdown
assert_eq!(h1.poll_count, 2);
if let DispatcherState::Normal(ref inner) = h1.inner {
let io = inner.io.as_ref().unwrap();
let mut res = (&io.write_buf()[..]).to_owned();
stabilize_date_header(&mut res);
// Despite the content-length header and even though the request payload has not
// been sent, this test expects a complete service response since the payload
// is not used at all. The service passed to dispatcher is path echo and doesn't
// consume payload bytes.
assert_eq!(
str::from_utf8(&res).unwrap(),
"\
HTTP/1.1 100 Continue\r\n\
\r\n\
HTTP/1.1 200 OK\r\n\
content-length: 7\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\
\r\n\
/upload\
"
);
}
})
.await;
}
#[actix_rt::test]
async fn test_upgrade() {
lazy(|cx| {
let mut buf = TestSeqBuffer::empty();
let cfg = ServiceConfig::new(KeepAlive::Disabled, 0, 0, false, None);
let mut h1 = Dispatcher::<_, _, _, _, UpgradeHandler<_>>::new(
buf.clone(),
cfg,
CloneableService::new(ok_service()),
CloneableService::new(ExpectHandler),
Some(CloneableService::new(UpgradeHandler(PhantomData))),
None,
Extensions::new(),
None,
);
buf.extend_read_buf(
"\
GET /ws HTTP/1.1\r\n\
Connection: Upgrade\r\n\
Upgrade: websocket\r\n\
\r\n\
",
);
assert!(Pin::new(&mut h1).poll(cx).is_ready());
assert!(matches!(&h1.inner, DispatcherState::Upgrade(_)));
// polls: manual shutdown
assert_eq!(h1.poll_count, 2);
})
.await;
}
}

View file

@ -1,7 +1,7 @@
use std::task::{Context, Poll};
use actix_service::{Service, ServiceFactory};
use futures_util::future::{ok, Ready};
use futures_util::future::{ready, Ready};
use crate::error::Error;
use crate::request::Request;
@ -17,8 +17,8 @@ impl ServiceFactory for ExpectHandler {
type InitError = Error;
type Future = Ready<Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: ()) -> Self::Future {
ok(ExpectHandler)
fn new_service(&self, _: Self::Config) -> Self::Future {
ready(Ok(ExpectHandler))
}
}
@ -33,6 +33,8 @@ impl Service for ExpectHandler {
}
fn call(&mut self, req: Request) -> Self::Future {
ok(req)
ready(Ok(req))
// TODO: add some way to trigger error
// Err(error::ErrorExpectationFailed("test"))
}
}

View file

@ -3,13 +3,13 @@ use std::task::{Context, Poll};
use actix_codec::Framed;
use actix_service::{Service, ServiceFactory};
use futures_util::future::Ready;
use futures_util::future::{ready, Ready};
use crate::error::Error;
use crate::h1::Codec;
use crate::request::Request;
pub struct UpgradeHandler<T>(PhantomData<T>);
pub struct UpgradeHandler<T>(pub(crate) PhantomData<T>);
impl<T> ServiceFactory for UpgradeHandler<T> {
type Config = ();
@ -36,6 +36,6 @@ impl<T> Service for UpgradeHandler<T> {
}
fn call(&mut self, _: Self::Request) -> Self::Future {
unimplemented!()
ready(Ok(()))
}
}

View file

@ -1,9 +1,14 @@
//! Test Various helpers for Actix applications to use during testing.
use std::convert::TryFrom;
use std::io::{self, Read, Write};
use std::pin::Pin;
use std::str::FromStr;
use std::task::{Context, Poll};
//! Various testing helpers for use in internal and app tests.
use std::{
cell::{Ref, RefCell},
convert::TryFrom,
io::{self, Read, Write},
pin::Pin,
rc::Rc,
str::FromStr,
task::{Context, Poll},
};
use actix_codec::{AsyncRead, AsyncWrite};
use bytes::{Bytes, BytesMut};
@ -183,7 +188,7 @@ fn parts(parts: &mut Option<Inner>) -> &mut Inner {
parts.as_mut().expect("cannot reuse test request builder")
}
/// Async io buffer
/// Async I/O test buffer.
pub struct TestBuffer {
pub read_buf: BytesMut,
pub write_buf: BytesMut,
@ -191,24 +196,24 @@ pub struct TestBuffer {
}
impl TestBuffer {
/// Create new TestBuffer instance
pub fn new<T>(data: T) -> TestBuffer
/// Create new `TestBuffer` instance with initial read buffer.
pub fn new<T>(data: T) -> Self
where
BytesMut: From<T>,
T: Into<BytesMut>,
{
TestBuffer {
read_buf: BytesMut::from(data),
Self {
read_buf: data.into(),
write_buf: BytesMut::new(),
err: None,
}
}
/// Create new empty TestBuffer instance
pub fn empty() -> TestBuffer {
TestBuffer::new("")
/// Create new empty `TestBuffer` instance.
pub fn empty() -> Self {
Self::new("")
}
/// Add extra data to read buffer.
/// Add data to read buffer.
pub fn extend_read_buf<T: AsRef<[u8]>>(&mut self, data: T) {
self.read_buf.extend_from_slice(data.as_ref())
}
@ -236,6 +241,7 @@ impl io::Write for TestBuffer {
self.write_buf.extend(buf);
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
@ -268,3 +274,113 @@ impl AsyncWrite for TestBuffer {
Poll::Ready(Ok(()))
}
}
/// Async I/O test buffer with ability to incrementally add to the read buffer.
#[derive(Clone)]
pub struct TestSeqBuffer(Rc<RefCell<TestSeqInner>>);
impl TestSeqBuffer {
/// Create new `TestBuffer` instance with initial read buffer.
pub fn new<T>(data: T) -> Self
where
T: Into<BytesMut>,
{
Self(Rc::new(RefCell::new(TestSeqInner {
read_buf: data.into(),
write_buf: BytesMut::new(),
err: None,
})))
}
/// Create new empty `TestBuffer` instance.
pub fn empty() -> Self {
Self::new("")
}
pub fn read_buf(&self) -> Ref<'_, BytesMut> {
Ref::map(self.0.borrow(), |inner| &inner.read_buf)
}
pub fn write_buf(&self) -> Ref<'_, BytesMut> {
Ref::map(self.0.borrow(), |inner| &inner.write_buf)
}
pub fn err(&self) -> Ref<'_, Option<io::Error>> {
Ref::map(self.0.borrow(), |inner| &inner.err)
}
/// Add data to read buffer.
pub fn extend_read_buf<T: AsRef<[u8]>>(&mut self, data: T) {
self.0
.borrow_mut()
.read_buf
.extend_from_slice(data.as_ref())
}
}
pub struct TestSeqInner {
read_buf: BytesMut,
write_buf: BytesMut,
err: Option<io::Error>,
}
impl io::Read for TestSeqBuffer {
fn read(&mut self, dst: &mut [u8]) -> Result<usize, io::Error> {
if self.0.borrow().read_buf.is_empty() {
if self.0.borrow().err.is_some() {
Err(self.0.borrow_mut().err.take().unwrap())
} else {
Err(io::Error::new(io::ErrorKind::WouldBlock, ""))
}
} else {
let size = std::cmp::min(self.0.borrow().read_buf.len(), dst.len());
let b = self.0.borrow_mut().read_buf.split_to(size);
dst[..size].copy_from_slice(&b);
Ok(size)
}
}
}
impl io::Write for TestSeqBuffer {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.0.borrow_mut().write_buf.extend(buf);
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl AsyncRead for TestSeqBuffer {
fn poll_read(
self: Pin<&mut Self>,
_: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<io::Result<usize>> {
let r = self.get_mut().read(buf);
match r {
Ok(n) => Poll::Ready(Ok(n)),
Err(err) if err.kind() == io::ErrorKind::WouldBlock => Poll::Pending,
Err(err) => Poll::Ready(Err(err)),
}
}
}
impl AsyncWrite for TestSeqBuffer {
fn poll_write(
self: Pin<&mut Self>,
_: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
Poll::Ready(self.get_mut().write(buf))
}
fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> {
Poll::Ready(Ok(()))
}
fn poll_shutdown(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> {
Poll::Ready(Ok(()))
}
}

View file

@ -197,13 +197,13 @@ mod tests {
let req = TestRequest::default().method(Method::POST).finish();
assert_eq!(
HandshakeError::GetMethodRequired,
verify_handshake(req.head()).err().unwrap()
verify_handshake(req.head()).unwrap_err(),
);
let req = TestRequest::default().finish();
assert_eq!(
HandshakeError::NoWebsocketUpgrade,
verify_handshake(req.head()).err().unwrap()
verify_handshake(req.head()).unwrap_err(),
);
let req = TestRequest::default()
@ -211,7 +211,7 @@ mod tests {
.finish();
assert_eq!(
HandshakeError::NoWebsocketUpgrade,
verify_handshake(req.head()).err().unwrap()
verify_handshake(req.head()).unwrap_err(),
);
let req = TestRequest::default()
@ -222,7 +222,7 @@ mod tests {
.finish();
assert_eq!(
HandshakeError::NoConnectionUpgrade,
verify_handshake(req.head()).err().unwrap()
verify_handshake(req.head()).unwrap_err(),
);
let req = TestRequest::default()
@ -237,7 +237,7 @@ mod tests {
.finish();
assert_eq!(
HandshakeError::NoVersionHeader,
verify_handshake(req.head()).err().unwrap()
verify_handshake(req.head()).unwrap_err(),
);
let req = TestRequest::default()
@ -256,7 +256,7 @@ mod tests {
.finish();
assert_eq!(
HandshakeError::UnsupportedVersion,
verify_handshake(req.head()).err().unwrap()
verify_handshake(req.head()).unwrap_err(),
);
let req = TestRequest::default()
@ -275,7 +275,7 @@ mod tests {
.finish();
assert_eq!(
HandshakeError::BadWebsocketKey,
verify_handshake(req.head()).err().unwrap()
verify_handshake(req.head()).unwrap_err(),
);
let req = TestRequest::default()