1
0
mirror of https://github.com/fafhrd91/actix-web synced 2025-07-22 08:57:17 +02:00

Compare commits

...

25 Commits

Author SHA1 Message Date
Nikolay Kim
cd9901c928 prepare release 2018-11-14 16:24:01 -08:00
Nikolay Kim
1ef0eed0bd do not stop on keep-alive timer if sink is not completly flushed 2018-11-08 20:46:13 -08:00
Nikolay Kim
61b1030882 Fix websockets connection drop if request contains content-length header #567 2018-11-08 20:35:47 -08:00
Nikolay Kim
7065c540e1 set nodelay on socket #560 2018-11-08 16:29:43 -08:00
Nikolay Kim
aed3933ae8 Merge branch 'master' of github.com:actix/actix-web 2018-11-08 16:15:45 -08:00
Nikolay Kim
5b7740dee3 hide ChunkedReadFile 2018-11-08 16:12:16 -08:00
imaperson
1a0bf32ec7 Fix unnecessary owned string and change htmlescape in favor of askama_escape (#584) 2018-11-08 16:08:06 -08:00
Nikolay Kim
9ab586e24e update actix-net dep 2018-11-08 16:06:23 -08:00
Nikolay Kim
62f1c90c8d update base64 dep 2018-11-07 21:18:40 -08:00
Nikolay Kim
2677d325a7 fix keep-alive timer reset 2018-11-07 21:09:33 -08:00
Julian Tescher
8e354021d4 Add SameSite option to identity middleware cookie (#581) 2018-11-07 23:24:06 +03:00
Stanislav Tkach
3b536ee96c Use old clippy attributes syntax (#562) 2018-11-01 11:14:48 +03:00
Nikolay Kim
cfd9a56ff7 Add async/await ref 2018-10-28 09:24:19 -07:00
Douman
5f91f5eda6 Correct IoStream::set_keepalive for UDS (#564)
Enable uds feature in tests
2018-10-26 10:59:06 +03:00
François
42d5d48e71 add a way to configure error treatment for Query and Path extractors (#550)
* add a way to configure error treatment for Query extractor

* allow error handler to be customized for Path extractor
2018-10-20 06:43:43 +03:00
Douman
960274ada8 Refactoring of server output to not exclude HTTP_10 (#552) 2018-10-19 07:52:10 +03:00
ivan-ochc
f383f618b5 Fix typo in error message (#554) 2018-10-18 21:27:31 +03:00
Nikolay Kim
c04b4678f1 bump version 2018-10-14 08:10:41 -07:00
Nikolay Kim
dd948f836e HttpServer not sending streamed request body on HTTP/2 requests #544 2018-10-14 08:08:12 -07:00
Douman
63a443fce0 Correct build script 2018-10-13 10:05:21 +03:00
Douman
d145136e56 Add individual check for TLS features 2018-10-13 09:54:03 +03:00
jeizsm
32145cf6c3 fix after update tokio-rustls (#542) 2018-10-11 11:05:07 +03:00
Nikolay Kim
ec8aef6b43 update dep versions 2018-10-10 08:36:16 -07:00
Nikolay Kim
f45038bbfe remove unused code 2018-10-09 13:23:37 -07:00
Nikolay Kim
c63838bb71 fix 204 support for http/2 2018-10-09 13:12:49 -07:00
31 changed files with 348 additions and 298 deletions

View File

@@ -32,7 +32,10 @@ script:
- |
if [[ "$TRAVIS_RUST_VERSION" != "nightly" ]]; then
cargo clean
cargo test --features="ssl,tls,rust-tls" -- --nocapture
cargo check --features rust-tls
cargo check --features ssl
cargo check --features tls
cargo test --features="ssl,tls,rust-tls,uds" -- --nocapture
fi
- |
if [[ "$TRAVIS_RUST_VERSION" == "nightly" ]]; then

View File

@@ -1,5 +1,50 @@
# Changes
## [0.7.14] - 2018-11-14
### Added
* Add method to configure custom error handler to `Query` and `Path` extractors.
* Add method to configure `SameSite` option in `CookieIdentityPolicy`.
### Fixed
* Fix websockets connection drop if request contains "content-length" header #567
* Fix keep-alive timer reset
* HttpServer now treats streaming bodies the same for HTTP/1.x protocols. #549
* Set nodelay for socket #560
## [0.7.13] - 2018-10-14
### Fixed
* Fixed rustls support
* HttpServer not sending streamed request body on HTTP/2 requests #544
## [0.7.12] - 2018-10-10
### Changed
* Set min version for actix
* Set min version for actix-net
## [0.7.11] - 2018-10-09
### Fixed
* Fixed 204 responses for http/2
## [0.7.10] - 2018-10-09
### Fixed

View File

@@ -1,6 +1,6 @@
[package]
name = "actix-web"
version = "0.7.10"
version = "0.7.14"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix web is a simple, pragmatic and extremely fast web framework for Rust."
readme = "README.md"
@@ -61,13 +61,14 @@ flate2-rust = ["flate2/rust_backend"]
cell = ["actix-net/cell"]
[dependencies]
actix = "0.7.0"
actix-net = "0.1.0"
actix = "0.7.6"
actix-net = "0.2.2"
base64 = "0.9"
askama_escape = "0.1.0"
base64 = "0.10"
bitflags = "1.0"
failure = "^0.1.2"
h2 = "0.1"
htmlescape = "0.3"
http = "^0.1.8"
httparse = "1.3"
log = "0.4"
@@ -93,8 +94,6 @@ cookie = { version="0.11", features=["percent-encode"] }
brotli2 = { version="^0.3.2", optional = true }
flate2 = { version="^1.0.2", optional = true, default-features = false }
failure = "^0.1.2"
# io
mio = "^0.6.13"
net2 = "0.2"

View File

@@ -8,13 +8,13 @@ Actix web is a simple, pragmatic and extremely fast web framework for Rust.
* Client/server [WebSockets](https://actix.rs/docs/websockets/) support
* Transparent content compression/decompression (br, gzip, deflate)
* Configurable [request routing](https://actix.rs/docs/url-dispatch/)
* Graceful server shutdown
* Multipart streams
* Static assets
* SSL support with OpenSSL or `native-tls`
* Middlewares ([Logger, Session, CORS, CSRF, etc](https://actix.rs/docs/middleware/))
* Includes an asynchronous [HTTP client](https://actix.rs/actix-web/actix_web/client/index.html)
* Built on top of [Actix actor framework](https://github.com/actix/actix)
* Experimental [Async/Await](https://github.com/mehcode/actix-web-async-await) support.
## Documentation & community resources
@@ -51,7 +51,7 @@ fn main() {
* [Protobuf support](https://github.com/actix/examples/tree/master/protobuf/)
* [Multipart streams](https://github.com/actix/examples/tree/master/multipart/)
* [Simple websocket](https://github.com/actix/examples/tree/master/websocket/)
* [Tera](https://github.com/actix/examples/tree/master/template_tera/) /
* [Tera](https://github.com/actix/examples/tree/master/template_tera/) /
[Askama](https://github.com/actix/examples/tree/master/template_askama/) templates
* [Diesel integration](https://github.com/actix/examples/tree/master/diesel/)
* [r2d2](https://github.com/actix/examples/tree/master/r2d2/)
@@ -66,8 +66,6 @@ You may consider checking out
* [TechEmpower Framework Benchmark](https://www.techempower.com/benchmarks/#section=data-r16&hw=ph&test=plaintext)
* Some basic benchmarks could be found in this [repository](https://github.com/fafhrd91/benchmarks).
## License
This project is licensed under either of

View File

@@ -37,15 +37,9 @@ use {
))]
use {
rustls::ClientConfig, std::io::Error as SslError, std::sync::Arc,
tokio_rustls::ClientConfigExt, webpki::DNSNameRef, webpki_roots,
tokio_rustls::TlsConnector as SslConnector, webpki::DNSNameRef, webpki_roots,
};
#[cfg(all(
feature = "rust-tls",
not(any(feature = "alpn", feature = "tls", feature = "ssl"))
))]
type SslConnector = Arc<ClientConfig>;
#[cfg(not(any(
feature = "alpn",
feature = "ssl",
@@ -282,7 +276,7 @@ impl Default for ClientConnector {
config
.root_store
.add_server_trust_anchors(&webpki_roots::TLS_SERVER_ROOTS);
Arc::new(config)
SslConnector::from(Arc::new(config))
}
#[cfg_attr(rustfmt, rustfmt_skip)]
@@ -293,7 +287,7 @@ impl Default for ClientConnector {
}
};
#[cfg_attr(feature = "cargo-clippy", allow(clippy::let_unit_value))]
#[cfg_attr(feature = "cargo-clippy", allow(let_unit_value))]
ClientConnector::with_connector_impl(connector)
}
}
@@ -373,7 +367,7 @@ impl ClientConnector {
/// config
/// .root_store
/// .add_server_trust_anchors(&webpki_roots::TLS_SERVER_ROOTS);
/// let conn = ClientConnector::with_connector(Arc::new(config)).start();
/// let conn = ClientConnector::with_connector(config).start();
///
/// conn.send(
/// Connect::new("https://www.rust-lang.org").unwrap()) // <- connect to host
@@ -390,7 +384,7 @@ impl ClientConnector {
/// ```
pub fn with_connector(connector: ClientConfig) -> ClientConnector {
// keep level of indirection for docstrings matching featureflags
Self::with_connector_impl(Arc::new(connector))
Self::with_connector_impl(SslConnector::from(Arc::new(connector)))
}
#[cfg(all(
@@ -832,7 +826,7 @@ impl ClientConnector {
let host = DNSNameRef::try_from_ascii_str(&key.host).unwrap();
fut::Either::A(
act.connector
.connect_async(host, stream)
.connect(host, stream)
.into_actor(act)
.then(move |res, _, _| {
match res {

View File

@@ -56,7 +56,7 @@ impl HttpResponseParser {
return Ok(Async::Ready(msg));
}
Async::NotReady => {
if buf.capacity() >= MAX_BUFFER_SIZE {
if buf.len() >= MAX_BUFFER_SIZE {
return Err(HttpResponseParserError::Error(
ParseError::TooLarge,
));

View File

@@ -1,6 +1,6 @@
#![cfg_attr(
feature = "cargo-clippy",
allow(clippy::redundant_field_names)
allow(redundant_field_names)
)]
use std::cell::RefCell;

View File

@@ -111,18 +111,64 @@ impl<T, S> FromRequest<S> for Path<T>
where
T: DeserializeOwned,
{
type Config = ();
type Config = PathConfig<S>;
type Result = Result<Self, Error>;
#[inline]
fn from_request(req: &HttpRequest<S>, _: &Self::Config) -> Self::Result {
fn from_request(req: &HttpRequest<S>, cfg: &Self::Config) -> Self::Result {
let req = req.clone();
let req2 = req.clone();
let err = Rc::clone(&cfg.ehandler);
de::Deserialize::deserialize(PathDeserializer::new(&req))
.map_err(ErrorNotFound)
.map_err(move |e| (*err)(e, &req2))
.map(|inner| Path { inner })
}
}
/// Path extractor configuration
///
/// ```rust
/// # extern crate actix_web;
/// use actix_web::{error, http, App, HttpResponse, Path, Result};
///
/// /// deserialize `Info` from request's body, max payload size is 4kb
/// fn index(info: Path<(u32, String)>) -> Result<String> {
/// Ok(format!("Welcome {}!", info.1))
/// }
///
/// fn main() {
/// let app = App::new().resource("/index.html/{id}/{name}", |r| {
/// r.method(http::Method::GET).with_config(index, |cfg| {
/// cfg.0.error_handler(|err, req| {
/// // <- create custom error response
/// error::InternalError::from_response(err, HttpResponse::Conflict().finish()).into()
/// });
/// })
/// });
/// }
/// ```
pub struct PathConfig<S> {
ehandler: Rc<Fn(serde_urlencoded::de::Error, &HttpRequest<S>) -> Error>,
}
impl<S> PathConfig<S> {
/// Set custom error handler
pub fn error_handler<F>(&mut self, f: F) -> &mut Self
where
F: Fn(serde_urlencoded::de::Error, &HttpRequest<S>) -> Error + 'static,
{
self.ehandler = Rc::new(f);
self
}
}
impl<S> Default for PathConfig<S> {
fn default() -> Self {
PathConfig {
ehandler: Rc::new(|e, _| ErrorNotFound(e)),
}
}
}
impl<T: fmt::Debug> fmt::Debug for Path<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.inner.fmt(f)
@@ -200,17 +246,69 @@ impl<T, S> FromRequest<S> for Query<T>
where
T: de::DeserializeOwned,
{
type Config = ();
type Config = QueryConfig<S>;
type Result = Result<Self, Error>;
#[inline]
fn from_request(req: &HttpRequest<S>, _: &Self::Config) -> Self::Result {
fn from_request(req: &HttpRequest<S>, cfg: &Self::Config) -> Self::Result {
let req2 = req.clone();
let err = Rc::clone(&cfg.ehandler);
serde_urlencoded::from_str::<T>(req.query_string())
.map_err(|e| e.into())
.map_err(move |e| (*err)(e, &req2))
.map(Query)
}
}
/// Query extractor configuration
///
/// ```rust
/// # extern crate actix_web;
/// #[macro_use] extern crate serde_derive;
/// use actix_web::{error, http, App, HttpResponse, Query, Result};
///
/// #[derive(Deserialize)]
/// struct Info {
/// username: String,
/// }
///
/// /// deserialize `Info` from request's body, max payload size is 4kb
/// fn index(info: Query<Info>) -> Result<String> {
/// Ok(format!("Welcome {}!", info.username))
/// }
///
/// fn main() {
/// let app = App::new().resource("/index.html", |r| {
/// r.method(http::Method::GET).with_config(index, |cfg| {
/// cfg.0.error_handler(|err, req| {
/// // <- create custom error response
/// error::InternalError::from_response(err, HttpResponse::Conflict().finish()).into()
/// });
/// })
/// });
/// }
/// ```
pub struct QueryConfig<S> {
ehandler: Rc<Fn(serde_urlencoded::de::Error, &HttpRequest<S>) -> Error>,
}
impl<S> QueryConfig<S> {
/// Set custom error handler
pub fn error_handler<F>(&mut self, f: F) -> &mut Self
where
F: Fn(serde_urlencoded::de::Error, &HttpRequest<S>) -> Error + 'static,
{
self.ehandler = Rc::new(f);
self
}
}
impl<S> Default for QueryConfig<S> {
fn default() -> Self {
QueryConfig {
ehandler: Rc::new(|e, _| e.into()),
}
}
}
impl<T: fmt::Debug> fmt::Debug for Query<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
@@ -951,15 +1049,15 @@ mod tests {
let info = router.recognize(&req, &(), 0);
let req = req.with_route_info(info);
let s = Path::<MyStruct>::from_request(&req, &()).unwrap();
let s = Path::<MyStruct>::from_request(&req, &PathConfig::default()).unwrap();
assert_eq!(s.key, "name");
assert_eq!(s.value, "user1");
let s = Path::<(String, String)>::from_request(&req, &()).unwrap();
let s = Path::<(String, String)>::from_request(&req, &PathConfig::default()).unwrap();
assert_eq!(s.0, "name");
assert_eq!(s.1, "user1");
let s = Query::<Id>::from_request(&req, &()).unwrap();
let s = Query::<Id>::from_request(&req, &QueryConfig::default()).unwrap();
assert_eq!(s.id, "test");
let mut router = Router::<()>::default();
@@ -968,11 +1066,11 @@ mod tests {
let info = router.recognize(&req, &(), 0);
let req = req.with_route_info(info);
let s = Path::<Test2>::from_request(&req, &()).unwrap();
let s = Path::<Test2>::from_request(&req, &PathConfig::default()).unwrap();
assert_eq!(s.as_ref().key, "name");
assert_eq!(s.value, 32);
let s = Path::<(String, u8)>::from_request(&req, &()).unwrap();
let s = Path::<(String, u8)>::from_request(&req, &PathConfig::default()).unwrap();
assert_eq!(s.0, "name");
assert_eq!(s.1, 32);
@@ -989,7 +1087,7 @@ mod tests {
let req = TestRequest::with_uri("/32/").finish();
let info = router.recognize(&req, &(), 0);
let req = req.with_route_info(info);
assert_eq!(*Path::<i8>::from_request(&req, &()).unwrap(), 32);
assert_eq!(*Path::<i8>::from_request(&req, &&PathConfig::default()).unwrap(), 32);
}
#[test]

View File

@@ -11,10 +11,10 @@ use std::{cmp, io};
#[cfg(unix)]
use std::os::unix::fs::MetadataExt;
use askama_escape::{escape as escape_html_entity};
use bytes::Bytes;
use futures::{Async, Future, Poll, Stream};
use futures_cpupool::{CpuFuture, CpuPool};
use htmlescape::encode_minimal as escape_html_entity;
use mime;
use mime_guess::{get_mime_type, guess_mime_type};
use percent_encoding::{utf8_percent_encode, DEFAULT_ENCODE_SET};
@@ -472,6 +472,7 @@ impl<C: StaticFileConfig> Responder for NamedFile<C> {
}
}
#[doc(hidden)]
/// A helper created from a `std::fs::File` which reads the file
/// chunk-by-chunk on a `CpuPool`.
pub struct ChunkedReadFile {
@@ -561,8 +562,23 @@ impl Directory {
}
}
// show file url as relative to static path
macro_rules! encode_file_url {
($path:ident) => {
utf8_percent_encode(&$path.to_string_lossy(), DEFAULT_ENCODE_SET)
};
}
// " -- &quot; & -- &amp; ' -- &#x27; < -- &lt; > -- &gt; / -- &#x2f;
macro_rules! encode_file_name {
($entry:ident) => {
escape_html_entity(&$entry.file_name().to_string_lossy())
};
}
fn directory_listing<S>(
dir: &Directory, req: &HttpRequest<S>,
dir: &Directory,
req: &HttpRequest<S>,
) -> Result<HttpResponse, io::Error> {
let index_of = format!("Index of {}", req.path());
let mut body = String::new();
@@ -575,11 +591,6 @@ fn directory_listing<S>(
Ok(p) => base.join(p),
Err(_) => continue,
};
// show file url as relative to static path
let file_url = utf8_percent_encode(&p.to_string_lossy(), DEFAULT_ENCODE_SET)
.to_string();
// " -- &quot; & -- &amp; ' -- &#x27; < -- &lt; > -- &gt;
let file_name = escape_html_entity(&entry.file_name().to_string_lossy());
// if file is a directory, add '/' to the end of the name
if let Ok(metadata) = entry.metadata() {
@@ -587,13 +598,15 @@ fn directory_listing<S>(
let _ = write!(
body,
"<li><a href=\"{}\">{}/</a></li>",
file_url, file_name
encode_file_url!(p),
encode_file_name!(entry),
);
} else {
let _ = write!(
body,
"<li><a href=\"{}\">{}</a></li>",
file_url, file_name
encode_file_url!(p),
encode_file_name!(entry),
);
}
} else {
@@ -656,7 +669,8 @@ impl<S: 'static> StaticFiles<S> {
/// Create new `StaticFiles` instance for specified base directory and
/// `CpuPool`.
pub fn with_pool<T: Into<PathBuf>>(
dir: T, pool: CpuPool,
dir: T,
pool: CpuPool,
) -> Result<StaticFiles<S>, Error> {
Self::with_config_pool(dir, pool, DefaultConfig)
}
@@ -667,7 +681,8 @@ impl<S: 'static, C: StaticFileConfig> StaticFiles<S, C> {
///
/// Identical with `new` but allows to specify configiration to use.
pub fn with_config<T: Into<PathBuf>>(
dir: T, config: C,
dir: T,
config: C,
) -> Result<StaticFiles<S, C>, Error> {
// use default CpuPool
let pool = { DEFAULT_CPUPOOL.lock().clone() };
@@ -678,7 +693,9 @@ impl<S: 'static, C: StaticFileConfig> StaticFiles<S, C> {
/// Create new `StaticFiles` instance for specified base directory with config and
/// `CpuPool`.
pub fn with_config_pool<T: Into<PathBuf>>(
dir: T, pool: CpuPool, _: C,
dir: T,
pool: CpuPool,
_: C,
) -> Result<StaticFiles<S, C>, Error> {
let dir = dir.into().canonicalize()?;
@@ -736,7 +753,8 @@ impl<S: 'static, C: StaticFileConfig> StaticFiles<S, C> {
}
fn try_handle(
&self, req: &HttpRequest<S>,
&self,
req: &HttpRequest<S>,
) -> Result<AsyncResult<HttpResponse>, Error> {
let tail: String = req.match_info().query("tail")?;
let relpath = PathBuf::from_param(tail.trim_left_matches('/'))?;

View File

@@ -216,7 +216,7 @@ impl<S> HttpRequest<S> {
self.url_for(name, &NO_PARAMS)
}
/// This method returns reference to current `RouteInfo` object.
/// This method returns reference to current `ResourceInfo` object.
#[inline]
pub fn resource(&self) -> &ResourceInfo {
&self.resource

View File

@@ -694,7 +694,7 @@ impl HttpResponseBuilder {
}
#[inline]
#[cfg_attr(feature = "cargo-clippy", allow(clippy::borrowed_box))]
#[cfg_attr(feature = "cargo-clippy", allow(borrowed_box))]
fn parts<'a>(
parts: &'a mut Option<Box<InnerHttpResponse>>, err: &Option<HttpError>,
) -> Option<&'a mut Box<InnerHttpResponse>> {

View File

@@ -18,7 +18,7 @@ impl ConnectionInfo {
/// Create *ConnectionInfo* instance for a request.
#[cfg_attr(
feature = "cargo-clippy",
allow(clippy::cyclomatic_complexity)
allow(cyclomatic_complexity)
)]
pub fn update(&mut self, req: &Request) {
let mut host = None;

View File

@@ -100,9 +100,9 @@ extern crate failure;
extern crate lazy_static;
#[macro_use]
extern crate futures;
extern crate askama_escape;
extern crate cookie;
extern crate futures_cpupool;
extern crate htmlescape;
extern crate http as modhttp;
extern crate httparse;
extern crate language_tags;

View File

@@ -48,7 +48,7 @@ impl DefaultHeaders {
/// Set a header.
#[inline]
#[cfg_attr(feature = "cargo-clippy", allow(clippy::match_wild_err_arm))]
#[cfg_attr(feature = "cargo-clippy", allow(match_wild_err_arm))]
pub fn header<K, V>(mut self, key: K, value: V) -> Self
where
HeaderName: HttpTryFrom<K>,

View File

@@ -48,7 +48,7 @@
//! ```
use std::rc::Rc;
use cookie::{Cookie, CookieJar, Key};
use cookie::{Cookie, CookieJar, Key, SameSite};
use futures::future::{err as FutErr, ok as FutOk, FutureResult};
use futures::Future;
use time::Duration;
@@ -237,6 +237,7 @@ struct CookieIdentityInner {
domain: Option<String>,
secure: bool,
max_age: Option<Duration>,
same_site: Option<SameSite>,
}
impl CookieIdentityInner {
@@ -248,6 +249,7 @@ impl CookieIdentityInner {
domain: None,
secure: true,
max_age: None,
same_site: None,
}
}
@@ -268,6 +270,10 @@ impl CookieIdentityInner {
cookie.set_max_age(max_age);
}
if let Some(same_site) = self.same_site {
cookie.set_same_site(same_site);
}
let mut jar = CookieJar::new();
if some {
jar.private(&self.key).add(cookie);
@@ -370,6 +376,12 @@ impl CookieIdentityPolicy {
Rc::get_mut(&mut self.0).unwrap().max_age = Some(value);
self
}
/// Sets the `same_site` field in the session cookie being built.
pub fn same_site(mut self, same_site: SameSite) -> Self {
Rc::get_mut(&mut self.0).unwrap().same_site = Some(same_site);
self
}
}
impl<S> IdentityPolicy<S> for CookieIdentityPolicy {

View File

@@ -551,12 +551,12 @@ impl<S: 'static, H> ProcessResponse<S, H> {
if self.resp.as_ref().unwrap().status().is_server_error()
{
error!(
"Error occured during request handling, status: {} {}",
"Error occurred during request handling, status: {} {}",
self.resp.as_ref().unwrap().status(), err
);
} else {
warn!(
"Error occured during request handling: {}",
"Error occurred during request handling: {}",
err
);
}

View File

@@ -61,7 +61,7 @@ pub struct Scope<S> {
#[cfg_attr(
feature = "cargo-clippy",
allow(clippy::new_without_default_derive)
allow(new_without_default_derive)
)]
impl<S: 'static> Scope<S> {
/// Create a new scope

View File

@@ -9,10 +9,7 @@ use tokio_reactor::Handle;
use tokio_tcp::TcpStream;
use tokio_timer::{sleep, Delay};
// use super::channel::HttpProtocol;
use super::error::AcceptorError;
use super::handler::HttpHandler;
use super::settings::ServiceConfig;
use super::IoStream;
/// This trait indicates types that can create acceptor service for http server.
@@ -275,56 +272,49 @@ impl<T: Service> Future for AcceptorTimeoutResponse<T> {
}
}
pub(crate) struct ServerMessageAcceptor<T, H: HttpHandler> {
pub(crate) struct ServerMessageAcceptor<T> {
inner: T,
settings: ServiceConfig<H>,
}
impl<T, H> ServerMessageAcceptor<T, H>
impl<T> ServerMessageAcceptor<T>
where
H: HttpHandler,
T: NewService<Request = net::TcpStream>,
{
pub(crate) fn new(settings: ServiceConfig<H>, inner: T) -> Self {
ServerMessageAcceptor { inner, settings }
pub(crate) fn new(inner: T) -> Self {
ServerMessageAcceptor { inner }
}
}
impl<T, H> NewService for ServerMessageAcceptor<T, H>
impl<T> NewService for ServerMessageAcceptor<T>
where
H: HttpHandler,
T: NewService<Request = net::TcpStream>,
{
type Request = ServerMessage;
type Response = ();
type Error = T::Error;
type InitError = T::InitError;
type Service = ServerMessageAcceptorService<T::Service, H>;
type Future = ServerMessageAcceptorResponse<T, H>;
type Service = ServerMessageAcceptorService<T::Service>;
type Future = ServerMessageAcceptorResponse<T>;
fn new_service(&self) -> Self::Future {
ServerMessageAcceptorResponse {
fut: self.inner.new_service(),
settings: self.settings.clone(),
}
}
}
pub(crate) struct ServerMessageAcceptorResponse<T, H>
pub(crate) struct ServerMessageAcceptorResponse<T>
where
H: HttpHandler,
T: NewService<Request = net::TcpStream>,
{
fut: T::Future,
settings: ServiceConfig<H>,
}
impl<T, H> Future for ServerMessageAcceptorResponse<T, H>
impl<T> Future for ServerMessageAcceptorResponse<T>
where
H: HttpHandler,
T: NewService<Request = net::TcpStream>,
{
type Item = ServerMessageAcceptorService<T::Service, H>;
type Item = ServerMessageAcceptorService<T::Service>;
type Error = T::InitError;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
@@ -332,20 +322,17 @@ where
Async::NotReady => Ok(Async::NotReady),
Async::Ready(service) => Ok(Async::Ready(ServerMessageAcceptorService {
inner: service,
settings: self.settings.clone(),
})),
}
}
}
pub(crate) struct ServerMessageAcceptorService<T, H: HttpHandler> {
pub(crate) struct ServerMessageAcceptorService<T> {
inner: T,
settings: ServiceConfig<H>,
}
impl<T, H> Service for ServerMessageAcceptorService<T, H>
impl<T> Service for ServerMessageAcceptorService<T>
where
H: HttpHandler,
T: Service<Request = net::TcpStream>,
{
type Request = ServerMessage;

View File

@@ -9,14 +9,20 @@ use super::acceptor::{
};
use super::error::AcceptorError;
use super::handler::IntoHttpHandler;
use super::service::HttpService;
use super::service::{HttpService, StreamConfiguration};
use super::settings::{ServerSettings, ServiceConfig};
use super::KeepAlive;
pub(crate) trait ServiceProvider {
fn register(
&self, server: Server, lst: net::TcpListener, host: String,
addr: net::SocketAddr, keep_alive: KeepAlive, secure: bool, client_timeout: u64,
&self,
server: Server,
lst: net::TcpListener,
host: String,
addr: net::SocketAddr,
keep_alive: KeepAlive,
secure: bool,
client_timeout: u64,
client_shutdown: u64,
) -> Server;
}
@@ -43,8 +49,13 @@ where
}
fn finish(
&self, host: String, addr: net::SocketAddr, keep_alive: KeepAlive, secure: bool,
client_timeout: u64, client_shutdown: u64,
&self,
host: String,
addr: net::SocketAddr,
keep_alive: KeepAlive,
secure: bool,
client_timeout: u64,
client_shutdown: u64,
) -> impl ServiceFactory {
let factory = self.factory.clone();
let acceptor = self.acceptor.clone();
@@ -60,12 +71,12 @@ where
if secure {
Either::B(ServerMessageAcceptor::new(
settings.clone(),
TcpAcceptor::new(AcceptorTimeout::new(
client_timeout,
acceptor.create(),
)).map_err(|_| ())
.map_init_err(|_| ())
.and_then(StreamConfiguration::new().nodelay(true))
.and_then(
HttpService::new(settings)
.map_init_err(|_| ())
@@ -74,10 +85,10 @@ where
))
} else {
Either::A(ServerMessageAcceptor::new(
settings.clone(),
TcpAcceptor::new(acceptor.create().map_err(AcceptorError::Service))
.map_err(|_| ())
.map_init_err(|_| ())
.and_then(StreamConfiguration::new().nodelay(true))
.and_then(
HttpService::new(settings)
.map_init_err(|_| ())
@@ -97,8 +108,14 @@ where
H: IntoHttpHandler,
{
fn register(
&self, server: Server, lst: net::TcpListener, host: String,
addr: net::SocketAddr, keep_alive: KeepAlive, secure: bool, client_timeout: u64,
&self,
server: Server,
lst: net::TcpListener,
host: String,
addr: net::SocketAddr,
keep_alive: KeepAlive,
secure: bool,
client_timeout: u64,
client_shutdown: u64,
) -> Server {
server.listen2(

View File

@@ -50,7 +50,6 @@ where
H: HttpHandler + 'static,
{
proto: HttpProtocol<T, H>,
node: Option<Node<()>>,
ka_timeout: Option<Delay>,
}
@@ -64,24 +63,11 @@ where
HttpChannel {
ka_timeout,
node: None,
proto: HttpProtocol::Unknown(settings, io, BytesMut::with_capacity(8192)),
}
}
}
impl<T, H> Drop for HttpChannel<T, H>
where
T: IoStream,
H: HttpHandler + 'static,
{
fn drop(&mut self) {
if let Some(mut node) = self.node.take() {
node.remove()
}
}
}
impl<T, H> Future for HttpChannel<T, H>
where
T: IoStream,
@@ -114,22 +100,6 @@ where
}
}
if self.node.is_none() {
self.node = Some(Node::new(()));
let _ = match self.proto {
HttpProtocol::H1(ref mut h1) => {
self.node.as_mut().map(|n| h1.settings().head().insert(n))
}
HttpProtocol::H2(ref mut h2) => {
self.node.as_mut().map(|n| h2.settings().head().insert(n))
}
HttpProtocol::Unknown(ref mut settings, _, _) => {
self.node.as_mut().map(|n| settings.head().insert(n))
}
HttpProtocol::None => unreachable!(),
};
}
let mut is_eof = false;
let kind = match self.proto {
HttpProtocol::H1(ref mut h1) => return h1.poll(),
@@ -206,7 +176,6 @@ where
H: HttpHandler + 'static,
{
proto: HttpProtocol<T, H>,
node: Option<Node<()>>,
}
impl<T, H> H1Channel<T, H>
@@ -216,7 +185,6 @@ where
{
pub(crate) fn new(settings: ServiceConfig<H>, io: T) -> H1Channel<T, H> {
H1Channel {
node: None,
proto: HttpProtocol::H1(h1::Http1Dispatcher::new(
settings,
io,
@@ -228,18 +196,6 @@ where
}
}
impl<T, H> Drop for H1Channel<T, H>
where
T: IoStream,
H: HttpHandler + 'static,
{
fn drop(&mut self) {
if let Some(mut node) = self.node.take() {
node.remove();
}
}
}
impl<T, H> Future for H1Channel<T, H>
where
T: IoStream,
@@ -249,16 +205,6 @@ where
type Error = HttpDispatchError;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
if self.node.is_none() {
self.node = Some(Node::new(()));
match self.proto {
HttpProtocol::H1(ref mut h1) => {
self.node.as_mut().map(|n| h1.settings().head().insert(n));
}
_ => unreachable!(),
};
}
match self.proto {
HttpProtocol::H1(ref mut h1) => h1.poll(),
_ => unreachable!(),
@@ -266,88 +212,6 @@ where
}
}
pub(crate) struct Node<T> {
next: Option<*mut Node<T>>,
prev: Option<*mut Node<T>>,
element: T,
}
impl<T> Node<T> {
fn new(element: T) -> Self {
Node {
element,
next: None,
prev: None,
}
}
fn insert<I>(&mut self, next_el: &mut Node<I>) {
let next: *mut Node<T> = next_el as *const _ as *mut _;
if let Some(next2) = self.next {
unsafe {
let n = next2.as_mut().unwrap();
n.prev = Some(next);
}
next_el.next = Some(next2 as *mut _);
}
self.next = Some(next);
unsafe {
let next: &mut Node<T> = &mut *next;
next.prev = Some(self as *mut _);
}
}
fn remove(&mut self) {
let next = self.next.take();
let prev = self.prev.take();
if let Some(prev) = prev {
unsafe {
prev.as_mut().unwrap().next = next;
}
}
if let Some(next) = next {
unsafe {
next.as_mut().unwrap().prev = prev;
}
}
}
}
impl Node<()> {
pub(crate) fn head() -> Self {
Node {
next: None,
prev: None,
element: (),
}
}
pub(crate) fn traverse<T, H, F: Fn(&mut HttpProtocol<T, H>)>(&self, f: F)
where
T: IoStream,
H: HttpHandler + 'static,
{
if let Some(n) = self.next.as_ref() {
unsafe {
let mut next: &mut Node<HttpProtocol<T, H>> =
&mut *(n.as_ref().unwrap() as *const _ as *mut _);
loop {
f(&mut next.element);
next = if let Some(n) = next.next.as_ref() {
&mut **n
} else {
return;
}
}
}
}
}
}
/// Wrapper for `AsyncRead + AsyncWrite` types
pub(crate) struct WrapperStream<T>
where

View File

@@ -87,7 +87,10 @@ where
H: HttpHandler + 'static,
{
pub fn new(
settings: ServiceConfig<H>, stream: T, buf: BytesMut, is_eof: bool,
settings: ServiceConfig<H>,
stream: T,
buf: BytesMut,
is_eof: bool,
keepalive_timer: Option<Delay>,
) -> Self {
let addr = stream.peer_addr();
@@ -123,8 +126,11 @@ where
}
pub(crate) fn for_error(
settings: ServiceConfig<H>, stream: T, status: StatusCode,
mut keepalive_timer: Option<Delay>, buf: BytesMut,
settings: ServiceConfig<H>,
stream: T,
status: StatusCode,
mut keepalive_timer: Option<Delay>,
buf: BytesMut,
) -> Self {
if let Some(deadline) = settings.client_timer_expire() {
let _ = keepalive_timer.as_mut().map(|delay| delay.reset(deadline));
@@ -147,16 +153,6 @@ where
disp
}
#[inline]
pub fn settings(&self) -> &ServiceConfig<H> {
&self.settings
}
#[inline]
pub(crate) fn io(&mut self) -> &mut T {
self.stream.get_mut()
}
#[inline]
fn can_read(&self) -> bool {
if self.flags.contains(Flags::READ_DISCONNECTED) {
@@ -290,7 +286,7 @@ where
}
if timer.deadline() >= self.ka_expire {
// check for any outstanding request handling
if self.tasks.is_empty() {
if self.tasks.is_empty() && self.flags.contains(Flags::FLUSHED) {
if !self.flags.contains(Flags::STARTED) {
// timeout on first request (slow request) return 408
trace!("Slow request timeout");
@@ -308,16 +304,19 @@ where
if let Some(deadline) =
self.settings.client_shutdown_timer()
{
timer.reset(deadline)
timer.reset(deadline);
let _ = timer.poll();
} else {
return Ok(());
}
}
} else if let Some(dl) = self.settings.keep_alive_expire() {
timer.reset(dl)
timer.reset(dl);
let _ = timer.poll();
}
} else {
timer.reset(self.ka_expire)
timer.reset(self.ka_expire);
let _ = timer.poll();
}
}
Ok(Async::NotReady) => (),

View File

@@ -43,7 +43,9 @@ impl H1Decoder {
}
pub fn decode<H>(
&mut self, src: &mut BytesMut, settings: &ServiceConfig<H>,
&mut self,
src: &mut BytesMut,
settings: &ServiceConfig<H>,
) -> Result<Option<Message>, DecoderError> {
// read payload
if self.decoder.is_some() {
@@ -80,7 +82,9 @@ impl H1Decoder {
}
fn parse_message<H>(
&self, buf: &mut BytesMut, settings: &ServiceConfig<H>,
&self,
buf: &mut BytesMut,
settings: &ServiceConfig<H>,
) -> Poll<(Request, Option<EncodingDecoder>), ParseError> {
// Parse http message
let mut has_upgrade = false;
@@ -178,6 +182,13 @@ impl H1Decoder {
}
header::UPGRADE => {
has_upgrade = true;
// check content-length, some clients (dart)
// sends "content-length: 0" with websocket upgrade
if let Ok(val) = value.to_str() {
if val == "websocket" {
content_length = None;
}
}
}
_ => (),
}
@@ -221,7 +232,9 @@ pub(crate) struct HeaderIndex {
impl HeaderIndex {
pub(crate) fn record(
bytes: &[u8], headers: &[httparse::Header], indices: &mut [HeaderIndex],
bytes: &[u8],
headers: &[httparse::Header],
indices: &mut [HeaderIndex],
) {
let bytes_ptr = bytes.as_ptr() as usize;
for (header, indices) in headers.iter().zip(indices.iter_mut()) {
@@ -369,7 +382,10 @@ macro_rules! byte (
impl ChunkedState {
fn step(
&self, body: &mut BytesMut, size: &mut u64, buf: &mut Option<Bytes>,
&self,
body: &mut BytesMut,
size: &mut u64,
buf: &mut Option<Bytes>,
) -> Poll<ChunkedState, io::Error> {
use self::ChunkedState::*;
match *self {
@@ -432,7 +448,8 @@ impl ChunkedState {
}
}
fn read_size_lf(
rdr: &mut BytesMut, size: &mut u64,
rdr: &mut BytesMut,
size: &mut u64,
) -> Poll<ChunkedState, io::Error> {
match byte!(rdr) {
b'\n' if *size > 0 => Ok(Async::Ready(ChunkedState::Body)),
@@ -445,7 +462,9 @@ impl ChunkedState {
}
fn read_body(
rdr: &mut BytesMut, rem: &mut u64, buf: &mut Option<Bytes>,
rdr: &mut BytesMut,
rem: &mut u64,
buf: &mut Option<Bytes>,
) -> Poll<ChunkedState, io::Error> {
trace!("Chunked read, remaining={:?}", rem);

View File

@@ -60,7 +60,10 @@ where
H: HttpHandler + 'static,
{
pub fn new(
settings: ServiceConfig<H>, io: T, buf: Bytes, keepalive_timer: Option<Delay>,
settings: ServiceConfig<H>,
io: T,
buf: Bytes,
keepalive_timer: Option<Delay>,
) -> Self {
let addr = io.peer_addr();
let extensions = io.extensions();
@@ -89,15 +92,6 @@ where
}
}
pub(crate) fn shutdown(&mut self) {
self.state = State::Empty;
self.tasks.clear();
}
pub fn settings(&self) -> &ServiceConfig<H> {
&self.settings
}
pub fn poll(&mut self) -> Poll<(), HttpDispatchError> {
self.poll_keepalive()?;
@@ -293,10 +287,12 @@ where
if self.tasks.is_empty() {
return Err(HttpDispatchError::ShutdownTimeout);
} else if let Some(dl) = self.settings.keep_alive_expire() {
timer.reset(dl)
timer.reset(dl);
let _ = timer.poll();
}
} else {
timer.reset(self.ka_expire)
timer.reset(self.ka_expire);
let _ = timer.poll();
}
}
Ok(Async::NotReady) => (),
@@ -357,8 +353,11 @@ struct Entry<H: HttpHandler + 'static> {
impl<H: HttpHandler + 'static> Entry<H> {
fn new(
parts: Parts, recv: RecvStream, resp: SendResponse<Bytes>,
addr: Option<SocketAddr>, settings: ServiceConfig<H>,
parts: Parts,
recv: RecvStream,
resp: SendResponse<Bytes>,
addr: Option<SocketAddr>,
settings: ServiceConfig<H>,
extensions: Option<Rc<Extensions>>,
) -> Entry<H>
where

View File

@@ -1,6 +1,6 @@
#![cfg_attr(
feature = "cargo-clippy",
allow(clippy::redundant_field_names)
allow(redundant_field_names)
)]
use std::{cmp, io};
@@ -96,6 +96,7 @@ impl<H: 'static> Writer for H2Writer<H> {
let mut has_date = false;
let mut resp = Response::new(());
let mut len_is_set = false;
*resp.status_mut() = msg.status();
*resp.version_mut() = Version::HTTP_2;
for (key, value) in msg.headers().iter() {
@@ -107,6 +108,9 @@ impl<H: 'static> Writer for H2Writer<H> {
},
CONTENT_LENGTH => match info.length {
ResponseLength::None => (),
ResponseLength::Zero => {
len_is_set = true;
}
_ => continue,
},
DATE => has_date = true,
@@ -126,8 +130,10 @@ impl<H: 'static> Writer for H2Writer<H> {
// content length
match info.length {
ResponseLength::Zero => {
resp.headers_mut()
.insert(CONTENT_LENGTH, HeaderValue::from_static("0"));
if !len_is_set {
resp.headers_mut()
.insert(CONTENT_LENGTH, HeaderValue::from_static("0"));
}
self.flags.insert(Flags::EOF);
}
ResponseLength::Length(len) => {
@@ -144,6 +150,9 @@ impl<H: 'static> Writer for H2Writer<H> {
resp.headers_mut()
.insert(CONTENT_LENGTH, HeaderValue::try_from(l.as_str()).unwrap());
}
ResponseLength::None => {
self.flags.insert(Flags::EOF);
}
_ => (),
}
if let Some(ce) = info.content_encoding {

View File

@@ -326,7 +326,7 @@ where
#[doc(hidden)]
#[cfg_attr(
feature = "cargo-clippy",
allow(clippy::needless_pass_by_value)
allow(needless_pass_by_value)
)]
pub fn bind_with<S, A>(mut self, addr: S, acceptor: A) -> io::Result<Self>
where

View File

@@ -334,7 +334,7 @@ impl IoStream for ::tokio_uds::UnixStream {
}
#[inline]
fn set_keepalive(&mut self, _nodelay: bool) -> io::Result<()> {
fn set_keepalive(&mut self, _dur: Option<time::Duration>) -> io::Result<()> {
Ok(())
}
}

View File

@@ -299,11 +299,10 @@ impl Output {
match resp.chunked() {
Some(true) => {
// Enable transfer encoding
info.length = ResponseLength::Chunked;
if version == Version::HTTP_2 {
info.length = ResponseLength::None;
TransferEncoding::eof(buf)
} else {
info.length = ResponseLength::Chunked;
TransferEncoding::chunked(buf)
}
}
@@ -337,15 +336,11 @@ impl Output {
}
} else {
// Enable transfer encoding
match version {
Version::HTTP_11 => {
info.length = ResponseLength::Chunked;
TransferEncoding::chunked(buf)
}
_ => {
info.length = ResponseLength::None;
TransferEncoding::eof(buf)
}
info.length = ResponseLength::Chunked;
if version == Version::HTTP_2 {
TransferEncoding::eof(buf)
} else {
TransferEncoding::chunked(buf)
}
}
}
@@ -443,7 +438,7 @@ impl ContentEncoder {
}
}
#[cfg_attr(feature = "cargo-clippy", allow(clippy::inline_always))]
#[cfg_attr(feature = "cargo-clippy", allow(inline_always))]
#[inline(always)]
pub fn write_eof(&mut self) -> Result<bool, io::Error> {
let encoder =
@@ -485,7 +480,7 @@ impl ContentEncoder {
}
}
#[cfg_attr(feature = "cargo-clippy", allow(clippy::inline_always))]
#[cfg_attr(feature = "cargo-clippy", allow(inline_always))]
#[inline(always)]
pub fn write(&mut self, data: &[u8]) -> Result<(), io::Error> {
match *self {

View File

@@ -1,4 +1,4 @@
use std::cell::{Cell, RefCell, RefMut};
use std::cell::{Cell, RefCell};
use std::collections::VecDeque;
use std::fmt::Write;
use std::rc::Rc;
@@ -15,7 +15,6 @@ use time;
use tokio_current_thread::spawn;
use tokio_timer::{sleep, Delay};
use super::channel::Node;
use super::message::{Request, RequestPool};
use super::KeepAlive;
use body::Body;
@@ -138,7 +137,6 @@ struct Inner<H> {
ka_enabled: bool,
bytes: Rc<SharedBytesPool>,
messages: &'static RequestPool,
node: RefCell<Node<()>>,
date: Cell<Option<Date>>,
}
@@ -173,7 +171,6 @@ impl<H> ServiceConfig<H> {
client_shutdown,
bytes: Rc::new(SharedBytesPool::new()),
messages: RequestPool::pool(settings),
node: RefCell::new(Node::head()),
date: Cell::new(None),
}))
}
@@ -183,10 +180,6 @@ impl<H> ServiceConfig<H> {
ServiceConfigBuilder::new(handler)
}
pub(crate) fn head(&self) -> RefMut<Node<()>> {
self.0.node.borrow_mut()
}
pub(crate) fn handler(&self) -> &H {
&self.0.handler
}

View File

@@ -46,7 +46,7 @@ impl Frame {
Frame::message(payload, OpCode::Close, true, genmask)
}
#[cfg_attr(feature = "cargo-clippy", allow(clippy::type_complexity))]
#[cfg_attr(feature = "cargo-clippy", allow(type_complexity))]
fn read_copy_md<S>(
pl: &mut PayloadBuffer<S>, server: bool, max_size: usize,
) -> Poll<Option<(usize, bool, OpCode, usize, Option<u32>)>, ProtocolError>

View File

@@ -1,5 +1,5 @@
//! This is code from [Tungstenite project](https://github.com/snapview/tungstenite-rs)
#![cfg_attr(feature = "cargo-clippy", allow(clippy::cast_ptr_alignment))]
#![cfg_attr(feature = "cargo-clippy", allow(cast_ptr_alignment))]
use std::ptr::copy_nonoverlapping;
use std::slice;
@@ -19,7 +19,7 @@ impl<'a> ShortSlice<'a> {
/// Faster version of `apply_mask()` which operates on 8-byte blocks.
#[inline]
#[cfg_attr(feature = "cargo-clippy", allow(clippy::cast_lossless))]
#[cfg_attr(feature = "cargo-clippy", allow(cast_lossless))]
pub(crate) fn apply_mask(buf: &mut [u8], mask_u32: u32) {
// Extend the mask to 64 bits
let mut mask_u64 = ((mask_u32 as u64) << 32) | (mask_u32 as u64);
@@ -52,7 +52,7 @@ pub(crate) fn apply_mask(buf: &mut [u8], mask_u32: u32) {
// a `ShortSlice` must be smaller than a u64.
#[cfg_attr(
feature = "cargo-clippy",
allow(clippy::needless_pass_by_value)
allow(needless_pass_by_value)
)]
fn xor_short(buf: ShortSlice, mask: u64) {
// Unsafe: we know that a `ShortSlice` fits in a u64

View File

@@ -385,10 +385,11 @@ fn test_ws_stopped() {
{
let (reader, mut writer) = srv.ws().unwrap();
writer.text("text");
writer.close(None);
let (item, _) = srv.execute(reader.into_future()).unwrap();
assert_eq!(item, Some(ws::Message::Text("text".to_owned())));
}
thread::sleep(time::Duration::from_millis(1000));
thread::sleep(time::Duration::from_millis(100));
assert_eq!(num.load(Ordering::Relaxed), 1);
}