1
0
Fork 0
mirror of https://github.com/actix/actix-web.git synced 2024-06-11 01:39:33 +00:00

fix some docs

This commit is contained in:
Rob Ede 2021-12-08 05:43:50 +00:00
parent d35b7644dc
commit e49e559f47
No known key found for this signature in database
GPG key ID: 97C636207D3EF933
7 changed files with 41 additions and 46 deletions

View file

@ -27,6 +27,7 @@ use crate::{
use super::{
codec::Codec,
decoder::MAX_BUFFER_SIZE,
payload::{Payload, PayloadSender, PayloadStatus},
Message, MessageType,
};
@ -793,7 +794,6 @@ where
/// Returns true when io stream can be disconnected after write to it.
///
/// It covers these conditions:
///
/// - `std::io::ErrorKind::ConnectionReset` after partial read.
/// - all data read done.
#[inline(always)]
@ -813,46 +813,39 @@ where
loop {
// Return early when read buf exceed decoder's max buffer size.
if this.read_buf.len() >= super::decoder::MAX_BUFFER_SIZE {
/*
At this point it's not known IO stream is still scheduled
to be waked up. so force wake up dispatcher just in case.
if this.read_buf.len() >= MAX_BUFFER_SIZE {
// At this point it's not known IO stream is still scheduled to be waked up so
// force wake up dispatcher just in case.
//
// Reason:
// AsyncRead mostly would only have guarantee wake up when the poll_read
// return Poll::Pending.
//
// Case:
// When read_buf is beyond max buffer size the early return could be successfully
// be parsed as a new Request. This case would not generate ParseError::TooLarge and
// at this point IO stream is not fully read to Pending and would result in
// dispatcher stuck until timeout (KA)
//
// Note:
// This is a perf choice to reduce branch on <Request as MessageType>::decode.
//
// A Request head too large to parse is only checked on
// `httparse::Status::Partial` condition.
Reason:
AsyncRead mostly would only have guarantee wake up
when the poll_read return Poll::Pending.
Case:
When read_buf is beyond max buffer size the early return
could be successfully be parsed as a new Request.
This case would not generate ParseError::TooLarge
and at this point IO stream is not fully read to Pending
and would result in dispatcher stuck until timeout (KA)
Note:
This is a perf choice to reduce branch on
<Request as MessageType>::decode.
A Request head too large to parse is only checked on
httparse::Status::Partial condition.
*/
if this.payload.is_none() {
/*
When dispatcher has a payload the responsibility of
wake up it would be shift to h1::payload::Payload.
Reason:
Self wake up when there is payload would waste poll
and/or result in over read.
Case:
When payload is (partial) dropped by user there is
no need to do read anymore.
At this case read_buf could always remain beyond
MAX_BUFFER_SIZE and self wake up would be busy poll
dispatcher and waste resource.
*/
// When dispatcher has a payload the responsibility of wake up it would be shift
// to h1::payload::Payload.
//
// Reason:
// Self wake up when there is payload would waste poll and/or result in
// over read.
//
// Case:
// When payload is (partial) dropped by user there is no need to do
// read anymore. At this case read_buf could always remain beyond
// MAX_BUFFER_SIZE and self wake up would be busy poll dispatcher and
// waste resources.
cx.waker().wake_by_ref();
}

View file

@ -109,7 +109,7 @@ where
Poll::Ready(Some((req, tx))) => {
let (parts, body) = req.into_parts();
let pl = crate::h2::Payload::new(body);
let pl = Payload::<crate::payload::PayloadStream>::H2(pl);
let pl = Payload::H2(pl);
let mut req = Request::with_payload(pl);
let head = req.head_mut();

View file

@ -6,7 +6,7 @@ use http::header::{HeaderName, InvalidHeaderName};
/// Sealed trait implemented for types that can be effectively borrowed as a [`HeaderValue`].
///
/// [`HeaderValue`]: crate::http::HeaderValue
/// [`HeaderValue`]: super::HeaderValue
pub trait AsHeaderName: Sealed {}
pub struct Seal;

View file

@ -12,7 +12,7 @@ use super::{Header, IntoHeaderValue};
/// An interface for types that can be converted into a [`HeaderName`]/[`HeaderValue`] pair for
/// insertion into a [`HeaderMap`].
///
/// [`HeaderMap`]: crate::http::HeaderMap
/// [`HeaderMap`]: super::HeaderMap
pub trait IntoHeaderPair: Sized {
type Error: Into<HttpError>;

View file

@ -47,7 +47,8 @@ impl ResponseBuilder {
/// Create response builder
///
/// # Examples
// /// use actix_http::{Response, ResponseBuilder, StatusCode};, / ``
/// ```
/// use actix_http::{Response, ResponseBuilder, StatusCode};
/// let res: Response<_> = ResponseBuilder::default().finish();
/// assert_eq!(res.status(), StatusCode::OK);
/// ```
@ -62,7 +63,8 @@ impl ResponseBuilder {
/// Set HTTP status code of this response.
///
/// # Examples
// /// use actix_http::{ResponseBuilder, StatusCode};, / ``
/// ```
/// use actix_http::{ResponseBuilder, StatusCode};
/// let res = ResponseBuilder::default().status(StatusCode::NOT_FOUND).finish();
/// assert_eq!(res.status(), StatusCode::NOT_FOUND);
/// ```

View file

@ -2,7 +2,7 @@
/// This is meant to be a glob import of the whole error module, but rustdoc can't handle
/// shadowing `Error` type, so it is expanded manually.
/// See https://github.com/rust-lang/rust/issues/83375
/// See <https://github.com/rust-lang/rust/issues/83375>
pub use actix_http::error::{
BlockingError, ContentTypeError, DispatchError, HttpError, ParseError, PayloadError,
};

View file

@ -174,7 +174,7 @@ impl HttpRequest {
/// let opt_t = req.conn_data::<PeerCertificate>();
/// ```
///
/// [on-connect]: crate::HttpServiceBuilder::on_connect_ext
/// [on-connect]: crate::HttpServer::on_connect
pub fn conn_data<T: 'static>(&self) -> Option<&T> {
self.inner
.conn_data