1
0
mirror of https://github.com/fafhrd91/actix-web synced 2025-07-17 15:06:51 +02:00

Compare commits

...

68 Commits

Author SHA1 Message Date
Nikolay Kim
c3ad516f56 disable shutdown atm 2018-10-09 09:45:24 -07:00
Nikolay Kim
93b1c5fd46 update deps 2018-10-08 21:58:37 -07:00
Nikolay Kim
4e7fac08b9 do not override content-length header 2018-10-08 15:30:59 -07:00
Nikolay Kim
07f6ca4b71 Merge branch 'master' of github.com:actix/actix-web 2018-10-08 13:06:49 -07:00
Nikolay Kim
03d988b898 refactor date rendering 2018-10-08 10:16:19 -07:00
Nikolay Kim
cfad5bf1f3 enable slow request timeout for h2 dispatcher 2018-10-08 07:47:42 -07:00
Danil Berestov
10678a22af test content length (#532) 2018-10-06 08:17:20 +03:00
lzx
7ae5a43877 httpresponse.rs doc fix (#534) 2018-10-06 08:16:12 +03:00
Nikolay Kim
1e1a4f846e use actix-net cell features 2018-10-02 22:23:51 -07:00
Nikolay Kim
49eea3bf76 travis config 2018-10-02 20:22:51 -07:00
Nikolay Kim
b0677aa029 fix stable compatibility 2018-10-02 19:42:24 -07:00
Nikolay Kim
401ea574c0 make AcceptorTimeout::new public 2018-10-02 19:31:30 -07:00
Nikolay Kim
bbcd618304 export AcceptorTimeout 2018-10-02 19:12:08 -07:00
Nikolay Kim
1f68ce8541 fix tests 2018-10-02 19:05:58 -07:00
Nikolay Kim
2710f70e39 add H1 transport 2018-10-02 17:30:29 -07:00
Nikolay Kim
ae5c4dfb78 refactor http channels list; rename WorkerSettings 2018-10-02 15:25:32 -07:00
Nikolay Kim
d7379bd10b update server ssl tests; upgrade rustls 2018-10-02 13:41:33 -07:00
Nikolay Kim
b59712c439 add ssl handshake timeout tests 2018-10-02 11:32:43 -07:00
Nikolay Kim
724668910b fix ssh handshake timeout 2018-10-02 11:18:59 -07:00
Nikolay Kim
61c7534e03 fix stream flushing 2018-10-02 10:43:23 -07:00
Douman
f8b176de9e Fix no_http2 flag in HttpServer (#526) 2018-10-02 20:09:31 +03:00
Danil Berestov
c8505bb53f content-length bug fix (#525)
* content-length bug fix

* changes.md is updated

* typo
2018-10-02 09:15:48 -07:00
Nikolay Kim
eed377e773 uneeded dep 2018-10-02 00:20:27 -07:00
Nikolay Kim
f3ce6574e4 fix client timer and add slow request tests 2018-10-02 00:19:28 -07:00
Nikolay Kim
f007860a16 cleanup warnings 2018-10-01 22:48:11 -07:00
Nikolay Kim
fdfadb52e1 fix doc test for State 2018-10-01 22:29:30 -07:00
Nikolay Kim
368f73513a set tcp-keepalive for test as well 2018-10-01 22:25:53 -07:00
Nikolay Kim
c674ea9126 add StreamConfiguration service 2018-10-01 22:23:02 -07:00
Nikolay Kim
7c78797d9b proper stop for test_ws_stopped test 2018-10-01 21:30:00 -07:00
Nikolay Kim
84edc57fd9 increase sleep time 2018-10-01 21:19:27 -07:00
Nikolay Kim
127af92541 clippy warnings 2018-10-01 21:16:56 -07:00
Nikolay Kim
e4686f6c8d set socket linger to 0 on timeout 2018-10-01 20:53:22 -07:00
Nikolay Kim
1bac65de4c add websocket stopped test 2018-10-01 20:15:26 -07:00
Nikolay Kim
16945a554a add client shutdown timeout 2018-10-01 20:04:16 -07:00
Nikolay Kim
91af3ca148 simplify h1 dispatcher 2018-10-01 19:18:24 -07:00
Nikolay Kim
2217a152cb expose app error by http service 2018-10-01 15:19:49 -07:00
Nikolay Kim
c1e0b4f322 expose internal http server types and allow to create custom http pipelines 2018-10-01 14:43:06 -07:00
Nikolay Kim
5966ee6192 add HttpServer::register() function, allows to register services in actix net server 2018-09-28 16:03:53 -07:00
Nikolay Kim
4aac3d6a92 refactor keep-alive timer 2018-09-28 15:04:59 -07:00
Nikolay Kim
e95babf8d3 log acctor init errors 2018-09-28 12:37:20 -07:00
Nikolay Kim
f2d42e5e77 refactor acceptor error handling 2018-09-28 11:50:47 -07:00
Nikolay Kim
0f1c80ccc6 deprecate start_incoming 2018-09-28 08:45:49 -07:00
Nikolay Kim
fc5088b55e fix tarpaulin args 2018-09-28 00:08:23 -07:00
Nikolay Kim
bec37fdbd5 update travis config 2018-09-27 22:23:29 -07:00
Nikolay Kim
4b59ae2476 fix ssl config for client connector 2018-09-27 22:15:38 -07:00
Nikolay Kim
d0fc9d7b99 simplify listen_ and bind_ methods 2018-09-27 21:55:44 -07:00
Nikolay Kim
1ff86e5ac4 restore rust-tls support 2018-09-27 21:24:21 -07:00
Nikolay Kim
ecfda64f6d add native-tls support 2018-09-27 20:40:34 -07:00
Nikolay Kim
0bca21ec6d fix ssl tests 2018-09-27 19:57:40 -07:00
Nikolay Kim
3173c9fa83 diesable client timeout for tcp stream acceptor 2018-09-27 19:34:07 -07:00
Nikolay Kim
85445ea809 rename and simplify ServiceFactory trait 2018-09-27 18:33:29 -07:00
Nikolay Kim
d57579d700 refactor acceptor pipeline add client timeout 2018-09-27 18:33:29 -07:00
Nikolay Kim
b6a1cfa6ad update openssl support 2018-09-27 18:33:29 -07:00
Nikolay Kim
9f1417af30 refactor http service builder 2018-09-27 18:33:29 -07:00
Nikolay Kim
0aa0f326f7 fix changes from master 2018-09-27 18:33:29 -07:00
Nikolay Kim
dbb4fab4f7 separate mod for HttpHandler; add HttpHandler impl for Vec<H> 2018-09-27 18:33:29 -07:00
Nikolay Kim
6f3e70a92a simplify application factory 2018-09-27 18:33:29 -07:00
Nikolay Kim
a63d3f9a7a cleanup ServerFactory trait 2018-09-27 18:33:29 -07:00
Nikolay Kim
a3cfc24232 refactor acceptor service 2018-09-27 18:33:29 -07:00
Nikolay Kim
6a61138bf8 enable ssl feature 2018-09-27 18:33:29 -07:00
Nikolay Kim
7cf9af9b55 disable ssl for travis 2018-09-27 18:33:29 -07:00
Nikolay Kim
c9a52e3197 refactor date generatioin 2018-09-27 18:33:29 -07:00
Nikolay Kim
1907102685 switch to actix-net server 2018-09-27 18:33:29 -07:00
Nikolay Kim
52195bbf16 update version 2018-09-27 18:17:58 -07:00
sapir
59deb4b40d Try to separate HTTP/1 read & write disconnect handling, to fix #511. (#514) 2018-09-27 18:15:02 -07:00
Ashley
782eeb5ded Reduced unsafe converage (#520) 2018-09-26 11:56:34 +03:00
Douman
1b298142e3 Correct composing of multiple origins in cors (#518) 2018-09-21 08:45:22 +03:00
Douman
0dc96658f2 Send response to inform client of error (#515) 2018-09-21 07:24:10 +03:00
51 changed files with 3295 additions and 2786 deletions

View File

@@ -1,6 +1,6 @@
environment:
global:
PROJECT_NAME: actix
PROJECT_NAME: actix-web
matrix:
# Stable channel
- TARGET: i686-pc-windows-msvc
@@ -37,4 +37,5 @@ build: false
# Equivalent to Travis' `script` phase
test_script:
- cargo clean
- cargo test --no-default-features --features="flate2-rust"

View File

@@ -30,14 +30,14 @@ before_script:
script:
- |
if [[ "$TRAVIS_RUST_VERSION" != "stable" ]]; then
if [[ "$TRAVIS_RUST_VERSION" != "nightly" ]]; then
cargo clean
cargo test --features="alpn,tls,rust-tls" -- --nocapture
cargo test --features="ssl,tls,rust-tls" -- --nocapture
fi
- |
if [[ "$TRAVIS_RUST_VERSION" == "stable" ]]; then
if [[ "$TRAVIS_RUST_VERSION" == "nightly" ]]; then
RUSTFLAGS="--cfg procmacro2_semver_exempt" cargo install -f cargo-tarpaulin
cargo tarpaulin --features="alpn,tls,rust-tls" --out Xml --no-count
RUST_BACKTRACE=1 cargo tarpaulin --features="ssl,tls,rust-tls" --out Xml
bash <(curl -s https://codecov.io/bash)
echo "Uploaded code coverage"
fi
@@ -45,8 +45,8 @@ script:
# Upload docs
after_success:
- |
if [[ "$TRAVIS_OS_NAME" == "linux" && "$TRAVIS_PULL_REQUEST" = "false" && "$TRAVIS_BRANCH" == "master" && "$TRAVIS_RUST_VERSION" == "beta" ]]; then
cargo doc --features "alpn, tls, rust-tls, session" --no-deps &&
if [[ "$TRAVIS_OS_NAME" == "linux" && "$TRAVIS_PULL_REQUEST" = "false" && "$TRAVIS_BRANCH" == "master" && "$TRAVIS_RUST_VERSION" == "stable" ]]; then
cargo doc --features "ssl,tls,rust-tls,session" --no-deps &&
echo "<meta http-equiv=refresh content=0;url=os_balloon/index.html>" > target/doc/index.html &&
git clone https://github.com/davisp/ghp-import.git &&
./ghp-import/ghp_import.py -n -p -f -m "Documentation upload" -r https://"$GH_TOKEN"@github.com/"$TRAVIS_REPO_SLUG.git" target/doc &&

View File

@@ -1,5 +1,29 @@
# Changes
## [0.7.9] - 2018-10-09
### Added
* Added client shutdown timeout setting
* Added slow request timeout setting
* Respond with 408 response on slow request timeout #523
### Fixed
* HTTP1 decoding errors are reported to the client. #512
* Correctly compose multiple allowed origins in CORS. #517
* Websocket server finished() isn't called if client disconnects #511
* Responses with the following codes: 100, 101, 102, 204 -- are sent without Content-Length header. #521
* Correct usage of `no_http2` flag in `bind_*` methods. #519
## [0.7.8] - 2018-09-17
### Added

View File

@@ -1,6 +1,6 @@
[package]
name = "actix-web"
version = "0.7.8"
version = "0.7.9"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix web is a simple, pragmatic and extremely fast web framework for Rust."
readme = "README.md"
@@ -17,7 +17,7 @@ exclude = [".gitignore", ".travis.yml", ".cargo/config", "appveyor.yml"]
build = "build.rs"
[package.metadata.docs.rs]
features = ["tls", "alpn", "rust-tls", "session", "brotli", "flate2-c"]
features = ["tls", "ssl", "rust-tls", "session", "brotli", "flate2-c"]
[badges]
travis-ci = { repository = "actix/actix-web", branch = "master" }
@@ -29,16 +29,19 @@ name = "actix_web"
path = "src/lib.rs"
[features]
default = ["session", "brotli", "flate2-c"]
default = ["session", "brotli", "flate2-c", "cell"]
# tls
tls = ["native-tls", "tokio-tls"]
tls = ["native-tls", "tokio-tls", "actix-net/tls"]
# openssl
alpn = ["openssl", "tokio-openssl"]
ssl = ["openssl", "tokio-openssl", "actix-net/ssl"]
# deprecated, use "ssl"
alpn = ["openssl", "tokio-openssl", "actix-net/ssl"]
# rustls
rust-tls = ["rustls", "tokio-rustls", "webpki", "webpki-roots"]
rust-tls = ["rustls", "tokio-rustls", "webpki", "webpki-roots", "actix-net/rust-tls"]
# unix sockets
uds = ["tokio-uds"]
@@ -55,8 +58,11 @@ flate2-c = ["flate2/miniz-sys"]
# rust backend for flate2 crate
flate2-rust = ["flate2/rust_backend"]
cell = ["actix-net/cell"]
[dependencies]
actix = "0.7.0"
actix-net = "0.1.0"
base64 = "0.9"
bitflags = "1.0"
@@ -81,6 +87,7 @@ language-tags = "0.2"
lazy_static = "1.0"
lazycell = "1.0.0"
parking_lot = "0.6"
serde_urlencoded = "^0.5.3"
url = { version="1.7", features=["query_encoding"] }
cookie = { version="0.11", features=["percent-encode"] }
brotli2 = { version="^0.3.2", optional = true }
@@ -112,16 +119,14 @@ openssl = { version="0.10", optional = true }
tokio-openssl = { version="0.2", optional = true }
#rustls
rustls = { version = "^0.13.1", optional = true }
tokio-rustls = { version = "^0.7.2", optional = true }
rustls = { version = "0.14", optional = true }
tokio-rustls = { version = "0.8", optional = true }
webpki = { version = "0.18", optional = true }
webpki-roots = { version = "0.15", optional = true }
# unix sockets
tokio-uds = { version="0.2", optional = true }
serde_urlencoded = "^0.5.3"
[dev-dependencies]
env_logger = "0.5"
serde_derive = "1.0"
@@ -133,8 +138,3 @@ version_check = "0.1"
lto = true
opt-level = 3
codegen-units = 1
[workspace]
members = [
"./",
]

View File

@@ -135,7 +135,7 @@ where
/// instance for each thread, thus application state must be constructed
/// multiple times. If you want to share state between different
/// threads, a shared object should be used, e.g. `Arc`. Application
/// state does not need to be `Send` and `Sync`.
/// state does not need to be `Send` or `Sync`.
pub fn with_state(state: S) -> App<S> {
App {
parts: Some(ApplicationParts {

View File

@@ -16,13 +16,16 @@ use http::{Error as HttpError, HttpTryFrom, Uri};
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_timer::Delay;
#[cfg(feature = "alpn")]
#[cfg(any(feature = "alpn", feature = "ssl"))]
use {
openssl::ssl::{Error as SslError, SslConnector, SslMethod},
tokio_openssl::SslConnectorExt,
};
#[cfg(all(feature = "tls", not(feature = "alpn")))]
#[cfg(all(
feature = "tls",
not(any(feature = "alpn", feature = "ssl", feature = "rust-tls"))
))]
use {
native_tls::{Error as SslError, TlsConnector as NativeTlsConnector},
tokio_tls::TlsConnector as SslConnector,
@@ -30,7 +33,7 @@ use {
#[cfg(all(
feature = "rust-tls",
not(any(feature = "alpn", feature = "tls"))
not(any(feature = "alpn", feature = "tls", feature = "ssl"))
))]
use {
rustls::ClientConfig, std::io::Error as SslError, std::sync::Arc,
@@ -39,11 +42,16 @@ use {
#[cfg(all(
feature = "rust-tls",
not(any(feature = "alpn", feature = "tls"))
not(any(feature = "alpn", feature = "tls", feature = "ssl"))
))]
type SslConnector = Arc<ClientConfig>;
#[cfg(not(any(feature = "alpn", feature = "tls", feature = "rust-tls")))]
#[cfg(not(any(
feature = "alpn",
feature = "ssl",
feature = "tls",
feature = "rust-tls"
)))]
type SslConnector = ();
use server::IoStream;
@@ -150,7 +158,12 @@ pub enum ClientConnectorError {
SslIsNotSupported,
/// SSL error
#[cfg(any(feature = "tls", feature = "alpn", feature = "rust-tls"))]
#[cfg(any(
feature = "tls",
feature = "alpn",
feature = "ssl",
feature = "rust-tls",
))]
#[fail(display = "{}", _0)]
SslError(#[cause] SslError),
@@ -247,19 +260,22 @@ impl SystemService for ClientConnector {}
impl Default for ClientConnector {
fn default() -> ClientConnector {
let connector = {
#[cfg(all(feature = "alpn"))]
#[cfg(all(any(feature = "alpn", feature = "ssl")))]
{
SslConnector::builder(SslMethod::tls()).unwrap().build()
}
#[cfg(all(feature = "tls", not(feature = "alpn")))]
#[cfg(all(
feature = "tls",
not(any(feature = "alpn", feature = "ssl", feature = "rust-tls"))
))]
{
NativeTlsConnector::builder().build().unwrap().into()
}
#[cfg(all(
feature = "rust-tls",
not(any(feature = "alpn", feature = "tls"))
not(any(feature = "alpn", feature = "tls", feature = "ssl"))
))]
{
let mut config = ClientConfig::new();
@@ -269,18 +285,21 @@ impl Default for ClientConnector {
Arc::new(config)
}
#[cfg(not(any(feature = "alpn", feature = "tls", feature = "rust-tls")))]
#[cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(any(
feature = "alpn", feature = "ssl", feature = "tls", feature = "rust-tls")))]
{
()
}
};
#[cfg_attr(feature = "cargo-clippy", allow(clippy::let_unit_value))]
ClientConnector::with_connector_impl(connector)
}
}
impl ClientConnector {
#[cfg(feature = "alpn")]
#[cfg(any(feature = "alpn", feature = "ssl"))]
/// Create `ClientConnector` actor with custom `SslConnector` instance.
///
/// By default `ClientConnector` uses very a simple SSL configuration.
@@ -325,7 +344,7 @@ impl ClientConnector {
#[cfg(all(
feature = "rust-tls",
not(any(feature = "alpn", feature = "tls"))
not(any(feature = "alpn", feature = "ssl", feature = "tls"))
))]
/// Create `ClientConnector` actor with custom `SslConnector` instance.
///
@@ -376,7 +395,7 @@ impl ClientConnector {
#[cfg(all(
feature = "tls",
not(any(feature = "alpn", feature = "rust-tls"))
not(any(feature = "ssl", feature = "alpn", feature = "rust-tls"))
))]
/// Create `ClientConnector` actor with custom `SslConnector` instance.
///
@@ -714,7 +733,7 @@ impl ClientConnector {
act.release_key(&key2);
()
}).and_then(move |res, act, _| {
#[cfg(feature = "alpn")]
#[cfg(any(feature = "alpn", feature = "ssl"))]
match res {
Err(err) => {
let _ = waiter.tx.send(Err(err.into()));
@@ -756,7 +775,7 @@ impl ClientConnector {
}
}
#[cfg(all(feature = "tls", not(feature = "alpn")))]
#[cfg(all(feature = "tls", not(any(feature = "alpn", feature = "ssl"))))]
match res {
Err(err) => {
let _ = waiter.tx.send(Err(err.into()));
@@ -800,7 +819,7 @@ impl ClientConnector {
#[cfg(all(
feature = "rust-tls",
not(any(feature = "alpn", feature = "tls"))
not(any(feature = "alpn", feature = "ssl", feature = "tls"))
))]
match res {
Err(err) => {
@@ -844,7 +863,12 @@ impl ClientConnector {
}
}
#[cfg(not(any(feature = "alpn", feature = "tls", feature = "rust-tls")))]
#[cfg(not(any(
feature = "alpn",
feature = "ssl",
feature = "tls",
feature = "rust-tls"
)))]
match res {
Err(err) => {
let _ = waiter.tx.send(Err(err.into()));
@@ -1260,6 +1284,11 @@ impl IoStream for Connection {
fn set_linger(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
IoStream::set_linger(&mut *self.stream, dur)
}
#[inline]
fn set_keepalive(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
IoStream::set_keepalive(&mut *self.stream, dur)
}
}
impl io::Read for Connection {
@@ -1307,4 +1336,9 @@ impl<Io: IoStream> IoStream for TlsStream<Io> {
fn set_linger(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
self.get_mut().get_mut().set_linger(dur)
}
#[inline]
fn set_keepalive(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
self.get_mut().get_mut().set_keepalive(dur)
}
}

View File

@@ -1,4 +1,7 @@
#![cfg_attr(feature = "cargo-clippy", allow(redundant_field_names))]
#![cfg_attr(
feature = "cargo-clippy",
allow(clippy::redundant_field_names)
)]
use std::cell::RefCell;
use std::fmt::Write as FmtWrite;

View File

@@ -31,6 +31,7 @@ impl Hasher for IdHasher {
type AnyMap = HashMap<TypeId, Box<Any>, BuildHasherDefault<IdHasher>>;
#[derive(Default)]
/// A type map of request extensions.
pub struct Extensions {
map: AnyMap,

View File

@@ -530,8 +530,7 @@ where
/// }
///
/// /// extract path info using serde
/// fn index(data: (State<MyApp>, Path<Info>)) -> String {
/// let (state, path) = data;
/// fn index(state: State<MyApp>, path: Path<Info>) -> String {
/// format!("{} {}!", state.msg, path.username)
/// }
///

View File

@@ -272,7 +272,7 @@ impl HttpResponse {
self.get_mut().response_size = size;
}
/// Set write buffer capacity
/// Get write buffer capacity
pub fn write_buffer_capacity(&self) -> usize {
self.get_ref().write_capacity
}
@@ -694,7 +694,7 @@ impl HttpResponseBuilder {
}
#[inline]
#[cfg_attr(feature = "cargo-clippy", allow(borrowed_box))]
#[cfg_attr(feature = "cargo-clippy", allow(clippy::borrowed_box))]
fn parts<'a>(
parts: &'a mut Option<Box<InnerHttpResponse>>, err: &Option<HttpError>,
) -> Option<&'a mut Box<InnerHttpResponse>> {

View File

@@ -16,7 +16,10 @@ pub struct ConnectionInfo {
impl ConnectionInfo {
/// Create *ConnectionInfo* instance for a request.
#[cfg_attr(feature = "cargo-clippy", allow(cyclomatic_complexity))]
#[cfg_attr(
feature = "cargo-clippy",
allow(clippy::cyclomatic_complexity)
)]
pub fn update(&mut self, req: &Request) {
let mut host = None;
let mut scheme = None;

View File

@@ -64,8 +64,8 @@
//! ## Package feature
//!
//! * `tls` - enables ssl support via `native-tls` crate
//! * `alpn` - enables ssl support via `openssl` crate, require for `http/2`
//! support
//! * `ssl` - enables ssl support via `openssl` crate, supports `http/2`
//! * `rust-tls` - enables ssl support via `rustls` crate, supports `http/2`
//! * `uds` - enables support for making client requests via Unix Domain Sockets.
//! Unix only. Not necessary for *serving* requests.
//! * `session` - enables session support, includes `ring` crate as
@@ -80,11 +80,8 @@
#![cfg_attr(actix_nightly, feature(
specialization, // for impl ErrorResponse for std::error::Error
extern_prelude,
tool_lints,
))]
#![cfg_attr(
feature = "cargo-clippy",
allow(decimal_literal_representation, suspicious_arithmetic_impl)
)]
#![warn(missing_docs)]
#[macro_use]
@@ -140,6 +137,8 @@ extern crate serde_urlencoded;
extern crate percent_encoding;
extern crate serde_json;
extern crate smallvec;
extern crate actix_net;
#[macro_use]
extern crate actix as actix_inner;

View File

@@ -826,8 +826,8 @@ impl<S: 'static> CorsBuilder<S> {
if let AllOrSome::Some(ref origins) = cors.origins {
let s = origins
.iter()
.fold(String::new(), |s, v| s + &v.to_string());
cors.origins_str = Some(HeaderValue::try_from(s.as_str()).unwrap());
.fold(String::new(), |s, v| format!("{}, {}", s, v));
cors.origins_str = Some(HeaderValue::try_from(&s[2..]).unwrap());
}
if !self.expose_hdrs.is_empty() {
@@ -1122,16 +1122,29 @@ mod tests {
let cors = Cors::build()
.disable_vary_header()
.allowed_origin("https://www.example.com")
.allowed_origin("https://www.google.com")
.finish();
let resp: HttpResponse = HttpResponse::Ok().into();
let resp = cors.response(&req, resp).unwrap().response();
assert_eq!(
&b"https://www.example.com"[..],
resp.headers()
.get(header::ACCESS_CONTROL_ALLOW_ORIGIN)
.unwrap()
.as_bytes()
);
let origins_str = resp
.headers()
.get(header::ACCESS_CONTROL_ALLOW_ORIGIN)
.unwrap()
.to_str()
.unwrap();
if origins_str.starts_with("https://www.example.com") {
assert_eq!(
"https://www.example.com, https://www.google.com",
origins_str
);
} else {
assert_eq!(
"https://www.google.com, https://www.example.com",
origins_str
);
}
}
#[test]

View File

@@ -48,7 +48,7 @@ impl DefaultHeaders {
/// Set a header.
#[inline]
#[cfg_attr(feature = "cargo-clippy", allow(match_wild_err_arm))]
#[cfg_attr(feature = "cargo-clippy", allow(clippy::match_wild_err_arm))]
pub fn header<K, V>(mut self, key: K, value: V) -> Self
where
HeaderName: HttpTryFrom<K>,

View File

@@ -1,6 +1,8 @@
//! Payload stream
use bytes::{Bytes, BytesMut};
use futures::task::{current as current_task, Task};
#[cfg(not(test))]
use futures::task::current as current_task;
use futures::task::Task;
use futures::{Async, Poll, Stream};
use std::cell::RefCell;
use std::cmp;

View File

@@ -134,8 +134,7 @@ impl<S: 'static> Route<S> {
/// }
/// ```
///
/// It is possible to use tuples for specifing multiple extractors for one
/// handler function.
/// It is possible to use multiple extractors for one handler function.
///
/// ```rust
/// # extern crate bytes;
@@ -152,9 +151,9 @@ impl<S: 'static> Route<S> {
///
/// /// extract path info using serde
/// fn index(
/// info: (Path<Info>, Query<HashMap<String, String>>, Json<Info>),
/// path: Path<Info>, query: Query<HashMap<String, String>>, body: Json<Info>,
/// ) -> Result<String> {
/// Ok(format!("Welcome {}!", info.0.username))
/// Ok(format!("Welcome {}!", path.username))
/// }
///
/// fn main() {

View File

@@ -59,7 +59,10 @@ pub struct Scope<S> {
middlewares: Rc<Vec<Box<Middleware<S>>>>,
}
#[cfg_attr(feature = "cargo-clippy", allow(new_without_default_derive))]
#[cfg_attr(
feature = "cargo-clippy",
allow(clippy::new_without_default_derive)
)]
impl<S: 'static> Scope<S> {
/// Create a new scope
pub fn new(path: &str) -> Scope<S> {

View File

@@ -1,475 +0,0 @@
use std::sync::mpsc as sync_mpsc;
use std::time::{Duration, Instant};
use std::{io, net, thread};
use futures::{sync::mpsc, Future};
use mio;
use slab::Slab;
use tokio_timer::Delay;
use actix::{msgs::Execute, Arbiter, System};
use super::server::ServerCommand;
use super::worker::{Conn, WorkerClient};
use super::Token;
pub(crate) enum Command {
Pause,
Resume,
Stop,
Worker(WorkerClient),
}
struct ServerSocketInfo {
addr: net::SocketAddr,
token: Token,
handler: Token,
sock: mio::net::TcpListener,
timeout: Option<Instant>,
}
#[derive(Clone)]
pub(crate) struct AcceptNotify(mio::SetReadiness);
impl AcceptNotify {
pub(crate) fn new(ready: mio::SetReadiness) -> Self {
AcceptNotify(ready)
}
pub(crate) fn notify(&self) {
let _ = self.0.set_readiness(mio::Ready::readable());
}
}
impl Default for AcceptNotify {
fn default() -> Self {
AcceptNotify::new(mio::Registration::new2().1)
}
}
pub(crate) struct AcceptLoop {
cmd_reg: Option<mio::Registration>,
cmd_ready: mio::SetReadiness,
notify_reg: Option<mio::Registration>,
notify_ready: mio::SetReadiness,
tx: sync_mpsc::Sender<Command>,
rx: Option<sync_mpsc::Receiver<Command>>,
srv: Option<(
mpsc::UnboundedSender<ServerCommand>,
mpsc::UnboundedReceiver<ServerCommand>,
)>,
}
impl AcceptLoop {
pub fn new() -> AcceptLoop {
let (tx, rx) = sync_mpsc::channel();
let (cmd_reg, cmd_ready) = mio::Registration::new2();
let (notify_reg, notify_ready) = mio::Registration::new2();
AcceptLoop {
tx,
cmd_ready,
cmd_reg: Some(cmd_reg),
notify_ready,
notify_reg: Some(notify_reg),
rx: Some(rx),
srv: Some(mpsc::unbounded()),
}
}
pub fn send(&self, msg: Command) {
let _ = self.tx.send(msg);
let _ = self.cmd_ready.set_readiness(mio::Ready::readable());
}
pub fn get_notify(&self) -> AcceptNotify {
AcceptNotify::new(self.notify_ready.clone())
}
pub(crate) fn start(
&mut self, socks: Vec<Vec<(Token, net::TcpListener)>>,
workers: Vec<WorkerClient>,
) -> mpsc::UnboundedReceiver<ServerCommand> {
let (tx, rx) = self.srv.take().expect("Can not re-use AcceptInfo");
Accept::start(
self.rx.take().expect("Can not re-use AcceptInfo"),
self.cmd_reg.take().expect("Can not re-use AcceptInfo"),
self.notify_reg.take().expect("Can not re-use AcceptInfo"),
socks,
tx,
workers,
);
rx
}
}
struct Accept {
poll: mio::Poll,
rx: sync_mpsc::Receiver<Command>,
sockets: Slab<ServerSocketInfo>,
workers: Vec<WorkerClient>,
srv: mpsc::UnboundedSender<ServerCommand>,
timer: (mio::Registration, mio::SetReadiness),
next: usize,
backpressure: bool,
}
const DELTA: usize = 100;
const CMD: mio::Token = mio::Token(0);
const TIMER: mio::Token = mio::Token(1);
const NOTIFY: mio::Token = mio::Token(2);
/// This function defines errors that are per-connection. Which basically
/// means that if we get this error from `accept()` system call it means
/// next connection might be ready to be accepted.
///
/// All other errors will incur a timeout before next `accept()` is performed.
/// The timeout is useful to handle resource exhaustion errors like ENFILE
/// and EMFILE. Otherwise, could enter into tight loop.
fn connection_error(e: &io::Error) -> bool {
e.kind() == io::ErrorKind::ConnectionRefused
|| e.kind() == io::ErrorKind::ConnectionAborted
|| e.kind() == io::ErrorKind::ConnectionReset
}
impl Accept {
#![cfg_attr(feature = "cargo-clippy", allow(too_many_arguments))]
pub(crate) fn start(
rx: sync_mpsc::Receiver<Command>, cmd_reg: mio::Registration,
notify_reg: mio::Registration, socks: Vec<Vec<(Token, net::TcpListener)>>,
srv: mpsc::UnboundedSender<ServerCommand>, workers: Vec<WorkerClient>,
) {
let sys = System::current();
// start accept thread
let _ = thread::Builder::new()
.name("actix-web accept loop".to_owned())
.spawn(move || {
System::set_current(sys);
let mut accept = Accept::new(rx, socks, workers, srv);
// Start listening for incoming commands
if let Err(err) = accept.poll.register(
&cmd_reg,
CMD,
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
panic!("Can not register Registration: {}", err);
}
// Start listening for notify updates
if let Err(err) = accept.poll.register(
&notify_reg,
NOTIFY,
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
panic!("Can not register Registration: {}", err);
}
accept.poll();
});
}
fn new(
rx: sync_mpsc::Receiver<Command>, socks: Vec<Vec<(Token, net::TcpListener)>>,
workers: Vec<WorkerClient>, srv: mpsc::UnboundedSender<ServerCommand>,
) -> Accept {
// Create a poll instance
let poll = match mio::Poll::new() {
Ok(poll) => poll,
Err(err) => panic!("Can not create mio::Poll: {}", err),
};
// Start accept
let mut sockets = Slab::new();
for (idx, srv_socks) in socks.into_iter().enumerate() {
for (hnd_token, lst) in srv_socks {
let addr = lst.local_addr().unwrap();
let server = mio::net::TcpListener::from_std(lst)
.expect("Can not create mio::net::TcpListener");
let entry = sockets.vacant_entry();
let token = entry.key();
// Start listening for incoming connections
if let Err(err) = poll.register(
&server,
mio::Token(token + DELTA),
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
panic!("Can not register io: {}", err);
}
entry.insert(ServerSocketInfo {
addr,
token: hnd_token,
handler: Token(idx),
sock: server,
timeout: None,
});
}
}
// Timer
let (tm, tmr) = mio::Registration::new2();
if let Err(err) =
poll.register(&tm, TIMER, mio::Ready::readable(), mio::PollOpt::edge())
{
panic!("Can not register Registration: {}", err);
}
Accept {
poll,
rx,
sockets,
workers,
srv,
next: 0,
timer: (tm, tmr),
backpressure: false,
}
}
fn poll(&mut self) {
// Create storage for events
let mut events = mio::Events::with_capacity(128);
loop {
if let Err(err) = self.poll.poll(&mut events, None) {
panic!("Poll error: {}", err);
}
for event in events.iter() {
let token = event.token();
match token {
CMD => if !self.process_cmd() {
return;
},
TIMER => self.process_timer(),
NOTIFY => self.backpressure(false),
_ => {
let token = usize::from(token);
if token < DELTA {
continue;
}
self.accept(token - DELTA);
}
}
}
}
}
fn process_timer(&mut self) {
let now = Instant::now();
for (token, info) in self.sockets.iter_mut() {
if let Some(inst) = info.timeout.take() {
if now > inst {
if let Err(err) = self.poll.register(
&info.sock,
mio::Token(token + DELTA),
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
error!("Can not register server socket {}", err);
} else {
info!("Resume accepting connections on {}", info.addr);
}
} else {
info.timeout = Some(inst);
}
}
}
}
fn process_cmd(&mut self) -> bool {
loop {
match self.rx.try_recv() {
Ok(cmd) => match cmd {
Command::Pause => {
for (_, info) in self.sockets.iter_mut() {
if let Err(err) = self.poll.deregister(&info.sock) {
error!("Can not deregister server socket {}", err);
} else {
info!("Paused accepting connections on {}", info.addr);
}
}
}
Command::Resume => {
for (token, info) in self.sockets.iter() {
if let Err(err) = self.poll.register(
&info.sock,
mio::Token(token + DELTA),
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
error!("Can not resume socket accept process: {}", err);
} else {
info!(
"Accepting connections on {} has been resumed",
info.addr
);
}
}
}
Command::Stop => {
for (_, info) in self.sockets.iter() {
let _ = self.poll.deregister(&info.sock);
}
return false;
}
Command::Worker(worker) => {
self.backpressure(false);
self.workers.push(worker);
}
},
Err(err) => match err {
sync_mpsc::TryRecvError::Empty => break,
sync_mpsc::TryRecvError::Disconnected => {
for (_, info) in self.sockets.iter() {
let _ = self.poll.deregister(&info.sock);
}
return false;
}
},
}
}
true
}
fn backpressure(&mut self, on: bool) {
if self.backpressure {
if !on {
self.backpressure = false;
for (token, info) in self.sockets.iter() {
if let Err(err) = self.poll.register(
&info.sock,
mio::Token(token + DELTA),
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
error!("Can not resume socket accept process: {}", err);
} else {
info!("Accepting connections on {} has been resumed", info.addr);
}
}
}
} else if on {
self.backpressure = true;
for (_, info) in self.sockets.iter() {
let _ = self.poll.deregister(&info.sock);
}
}
}
fn accept_one(&mut self, mut msg: Conn<net::TcpStream>) {
if self.backpressure {
while !self.workers.is_empty() {
match self.workers[self.next].send(msg) {
Ok(_) => (),
Err(err) => {
let _ = self.srv.unbounded_send(ServerCommand::WorkerDied(
self.workers[self.next].idx,
));
msg = err.into_inner();
self.workers.swap_remove(self.next);
if self.workers.is_empty() {
error!("No workers");
return;
} else if self.workers.len() <= self.next {
self.next = 0;
}
continue;
}
}
self.next = (self.next + 1) % self.workers.len();
break;
}
} else {
let mut idx = 0;
while idx < self.workers.len() {
idx += 1;
if self.workers[self.next].available() {
match self.workers[self.next].send(msg) {
Ok(_) => {
self.next = (self.next + 1) % self.workers.len();
return;
}
Err(err) => {
let _ = self.srv.unbounded_send(ServerCommand::WorkerDied(
self.workers[self.next].idx,
));
msg = err.into_inner();
self.workers.swap_remove(self.next);
if self.workers.is_empty() {
error!("No workers");
self.backpressure(true);
return;
} else if self.workers.len() <= self.next {
self.next = 0;
}
continue;
}
}
}
self.next = (self.next + 1) % self.workers.len();
}
// enable backpressure
self.backpressure(true);
self.accept_one(msg);
}
}
fn accept(&mut self, token: usize) {
loop {
let msg = if let Some(info) = self.sockets.get_mut(token) {
match info.sock.accept_std() {
Ok((io, addr)) => Conn {
io,
token: info.token,
handler: info.handler,
peer: Some(addr),
},
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => return,
Err(ref e) if connection_error(e) => continue,
Err(e) => {
error!("Error accepting connection: {}", e);
if let Err(err) = self.poll.deregister(&info.sock) {
error!("Can not deregister server socket {}", err);
}
// sleep after error
info.timeout = Some(Instant::now() + Duration::from_millis(500));
let r = self.timer.1.clone();
System::current().arbiter().do_send(Execute::new(
move || -> Result<(), ()> {
Arbiter::spawn(
Delay::new(
Instant::now() + Duration::from_millis(510),
).map_err(|_| ())
.and_then(
move |_| {
let _ =
r.set_readiness(mio::Ready::readable());
Ok(())
},
),
);
Ok(())
},
));
return;
}
}
} else {
return;
};
self.accept_one(msg);
}
}
}

396
src/server/acceptor.rs Normal file
View File

@@ -0,0 +1,396 @@
use std::time::Duration;
use std::{fmt, net};
use actix_net::server::ServerMessage;
use actix_net::service::{NewService, Service};
use futures::future::{err, ok, Either, FutureResult};
use futures::{Async, Future, Poll};
use tokio_reactor::Handle;
use tokio_tcp::TcpStream;
use tokio_timer::{sleep, Delay};
use super::channel::HttpProtocol;
use super::error::AcceptorError;
use super::handler::HttpHandler;
use super::settings::ServiceConfig;
use super::IoStream;
/// This trait indicates types that can create acceptor service for http server.
pub trait AcceptorServiceFactory: Send + Clone + 'static {
type Io: IoStream + Send;
type NewService: NewService<Request = TcpStream, Response = Self::Io>;
fn create(&self) -> Self::NewService;
}
impl<F, T> AcceptorServiceFactory for F
where
F: Fn() -> T + Send + Clone + 'static,
T::Response: IoStream + Send,
T: NewService<Request = TcpStream>,
T::InitError: fmt::Debug,
{
type Io = T::Response;
type NewService = T;
fn create(&self) -> T {
(self)()
}
}
#[derive(Clone)]
/// Default acceptor service convert `TcpStream` to a `tokio_tcp::TcpStream`
pub(crate) struct DefaultAcceptor;
impl AcceptorServiceFactory for DefaultAcceptor {
type Io = TcpStream;
type NewService = DefaultAcceptor;
fn create(&self) -> Self::NewService {
DefaultAcceptor
}
}
impl NewService for DefaultAcceptor {
type Request = TcpStream;
type Response = TcpStream;
type Error = ();
type InitError = ();
type Service = DefaultAcceptor;
type Future = FutureResult<Self::Service, Self::InitError>;
fn new_service(&self) -> Self::Future {
ok(DefaultAcceptor)
}
}
impl Service for DefaultAcceptor {
type Request = TcpStream;
type Response = TcpStream;
type Error = ();
type Future = FutureResult<Self::Response, Self::Error>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
Ok(Async::Ready(()))
}
fn call(&mut self, req: Self::Request) -> Self::Future {
ok(req)
}
}
pub(crate) struct TcpAcceptor<T> {
inner: T,
}
impl<T, E> TcpAcceptor<T>
where
T: NewService<Request = TcpStream, Error = AcceptorError<E>>,
T::InitError: fmt::Debug,
{
pub(crate) fn new(inner: T) -> Self {
TcpAcceptor { inner }
}
}
impl<T, E> NewService for TcpAcceptor<T>
where
T: NewService<Request = TcpStream, Error = AcceptorError<E>>,
T::InitError: fmt::Debug,
{
type Request = net::TcpStream;
type Response = T::Response;
type Error = AcceptorError<E>;
type InitError = T::InitError;
type Service = TcpAcceptorService<T::Service>;
type Future = TcpAcceptorResponse<T>;
fn new_service(&self) -> Self::Future {
TcpAcceptorResponse {
fut: self.inner.new_service(),
}
}
}
pub(crate) struct TcpAcceptorResponse<T>
where
T: NewService<Request = TcpStream>,
T::InitError: fmt::Debug,
{
fut: T::Future,
}
impl<T> Future for TcpAcceptorResponse<T>
where
T: NewService<Request = TcpStream>,
T::InitError: fmt::Debug,
{
type Item = TcpAcceptorService<T::Service>;
type Error = T::InitError;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match self.fut.poll() {
Ok(Async::NotReady) => Ok(Async::NotReady),
Ok(Async::Ready(service)) => {
Ok(Async::Ready(TcpAcceptorService { inner: service }))
}
Err(e) => {
error!("Can not create accetor service: {:?}", e);
Err(e)
}
}
}
}
pub(crate) struct TcpAcceptorService<T> {
inner: T,
}
impl<T, E> Service for TcpAcceptorService<T>
where
T: Service<Request = TcpStream, Error = AcceptorError<E>>,
{
type Request = net::TcpStream;
type Response = T::Response;
type Error = AcceptorError<E>;
type Future = Either<T::Future, FutureResult<Self::Response, Self::Error>>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
self.inner.poll_ready()
}
fn call(&mut self, req: Self::Request) -> Self::Future {
let stream = TcpStream::from_std(req, &Handle::default()).map_err(|e| {
error!("Can not convert to an async tcp stream: {}", e);
AcceptorError::Io(e)
});
match stream {
Ok(stream) => Either::A(self.inner.call(stream)),
Err(e) => Either::B(err(e)),
}
}
}
#[doc(hidden)]
/// Acceptor timeout middleware
///
/// Applies timeout to request prcoessing.
pub struct AcceptorTimeout<T> {
inner: T,
timeout: Duration,
}
impl<T: NewService> AcceptorTimeout<T> {
/// Create new `AcceptorTimeout` instance. timeout is in milliseconds.
pub fn new(timeout: u64, inner: T) -> Self {
Self {
inner,
timeout: Duration::from_millis(timeout),
}
}
}
impl<T: NewService> NewService for AcceptorTimeout<T> {
type Request = T::Request;
type Response = T::Response;
type Error = AcceptorError<T::Error>;
type InitError = T::InitError;
type Service = AcceptorTimeoutService<T::Service>;
type Future = AcceptorTimeoutFut<T>;
fn new_service(&self) -> Self::Future {
AcceptorTimeoutFut {
fut: self.inner.new_service(),
timeout: self.timeout,
}
}
}
#[doc(hidden)]
pub struct AcceptorTimeoutFut<T: NewService> {
fut: T::Future,
timeout: Duration,
}
impl<T: NewService> Future for AcceptorTimeoutFut<T> {
type Item = AcceptorTimeoutService<T::Service>;
type Error = T::InitError;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
let inner = try_ready!(self.fut.poll());
Ok(Async::Ready(AcceptorTimeoutService {
inner,
timeout: self.timeout,
}))
}
}
#[doc(hidden)]
/// Acceptor timeout service
///
/// Applies timeout to request prcoessing.
pub struct AcceptorTimeoutService<T> {
inner: T,
timeout: Duration,
}
impl<T: Service> Service for AcceptorTimeoutService<T> {
type Request = T::Request;
type Response = T::Response;
type Error = AcceptorError<T::Error>;
type Future = AcceptorTimeoutResponse<T>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
self.inner.poll_ready().map_err(AcceptorError::Service)
}
fn call(&mut self, req: Self::Request) -> Self::Future {
AcceptorTimeoutResponse {
fut: self.inner.call(req),
sleep: sleep(self.timeout),
}
}
}
#[doc(hidden)]
pub struct AcceptorTimeoutResponse<T: Service> {
fut: T::Future,
sleep: Delay,
}
impl<T: Service> Future for AcceptorTimeoutResponse<T> {
type Item = T::Response;
type Error = AcceptorError<T::Error>;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match self.fut.poll().map_err(AcceptorError::Service)? {
Async::NotReady => match self.sleep.poll() {
Err(_) => Err(AcceptorError::Timeout),
Ok(Async::Ready(_)) => Err(AcceptorError::Timeout),
Ok(Async::NotReady) => Ok(Async::NotReady),
},
Async::Ready(resp) => Ok(Async::Ready(resp)),
}
}
}
pub(crate) struct ServerMessageAcceptor<T, H: HttpHandler> {
inner: T,
settings: ServiceConfig<H>,
}
impl<T, H> ServerMessageAcceptor<T, H>
where
H: HttpHandler,
T: NewService<Request = net::TcpStream>,
{
pub(crate) fn new(settings: ServiceConfig<H>, inner: T) -> Self {
ServerMessageAcceptor { inner, settings }
}
}
impl<T, H> NewService for ServerMessageAcceptor<T, H>
where
H: HttpHandler,
T: NewService<Request = net::TcpStream>,
{
type Request = ServerMessage;
type Response = ();
type Error = T::Error;
type InitError = T::InitError;
type Service = ServerMessageAcceptorService<T::Service, H>;
type Future = ServerMessageAcceptorResponse<T, H>;
fn new_service(&self) -> Self::Future {
ServerMessageAcceptorResponse {
fut: self.inner.new_service(),
settings: self.settings.clone(),
}
}
}
pub(crate) struct ServerMessageAcceptorResponse<T, H>
where
H: HttpHandler,
T: NewService<Request = net::TcpStream>,
{
fut: T::Future,
settings: ServiceConfig<H>,
}
impl<T, H> Future for ServerMessageAcceptorResponse<T, H>
where
H: HttpHandler,
T: NewService<Request = net::TcpStream>,
{
type Item = ServerMessageAcceptorService<T::Service, H>;
type Error = T::InitError;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match self.fut.poll()? {
Async::NotReady => Ok(Async::NotReady),
Async::Ready(service) => Ok(Async::Ready(ServerMessageAcceptorService {
inner: service,
settings: self.settings.clone(),
})),
}
}
}
pub(crate) struct ServerMessageAcceptorService<T, H: HttpHandler> {
inner: T,
settings: ServiceConfig<H>,
}
impl<T, H> Service for ServerMessageAcceptorService<T, H>
where
H: HttpHandler,
T: Service<Request = net::TcpStream>,
{
type Request = ServerMessage;
type Response = ();
type Error = T::Error;
type Future =
Either<ServerMessageAcceptorServiceFut<T>, FutureResult<(), Self::Error>>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
self.inner.poll_ready()
}
fn call(&mut self, req: Self::Request) -> Self::Future {
match req {
ServerMessage::Connect(stream) => {
Either::A(ServerMessageAcceptorServiceFut {
fut: self.inner.call(stream),
})
}
ServerMessage::Shutdown(_) => Either::B(ok(())),
ServerMessage::ForceShutdown => {
self.settings
.head()
.traverse(|proto: &mut HttpProtocol<TcpStream, H>| proto.shutdown());
Either::B(ok(()))
}
}
}
}
pub(crate) struct ServerMessageAcceptorServiceFut<T: Service> {
fut: T::Future,
}
impl<T> Future for ServerMessageAcceptorServiceFut<T>
where
T: Service,
{
type Item = ();
type Error = T::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match self.fut.poll()? {
Async::NotReady => Ok(Async::NotReady),
Async::Ready(_) => Ok(Async::Ready(())),
}
}
}

117
src/server/builder.rs Normal file
View File

@@ -0,0 +1,117 @@
use std::{fmt, net};
use actix_net::either::Either;
use actix_net::server::{Server, ServiceFactory};
use actix_net::service::{NewService, NewServiceExt};
use super::acceptor::{
AcceptorServiceFactory, AcceptorTimeout, ServerMessageAcceptor, TcpAcceptor,
};
use super::error::AcceptorError;
use super::handler::IntoHttpHandler;
use super::service::HttpService;
use super::settings::{ServerSettings, ServiceConfig};
use super::KeepAlive;
pub(crate) trait ServiceProvider {
fn register(
&self, server: Server, lst: net::TcpListener, host: String,
addr: net::SocketAddr, keep_alive: KeepAlive, secure: bool, client_timeout: u64,
client_shutdown: u64,
) -> Server;
}
/// Utility type that builds complete http pipeline
pub(crate) struct HttpServiceBuilder<F, H, A>
where
F: Fn() -> H + Send + Clone,
{
factory: F,
acceptor: A,
}
impl<F, H, A> HttpServiceBuilder<F, H, A>
where
F: Fn() -> H + Send + Clone + 'static,
H: IntoHttpHandler,
A: AcceptorServiceFactory,
<A::NewService as NewService>::InitError: fmt::Debug,
{
/// Create http service builder
pub fn new(factory: F, acceptor: A) -> Self {
Self { factory, acceptor }
}
fn finish(
&self, host: String, addr: net::SocketAddr, keep_alive: KeepAlive, secure: bool,
client_timeout: u64, client_shutdown: u64,
) -> impl ServiceFactory {
let factory = self.factory.clone();
let acceptor = self.acceptor.clone();
move || {
let app = (factory)().into_handler();
let settings = ServiceConfig::new(
app,
keep_alive,
client_timeout,
client_shutdown,
ServerSettings::new(addr, &host, false),
);
if secure {
Either::B(ServerMessageAcceptor::new(
settings.clone(),
TcpAcceptor::new(AcceptorTimeout::new(
client_timeout,
acceptor.create(),
)).map_err(|_| ())
.map_init_err(|_| ())
.and_then(
HttpService::new(settings)
.map_init_err(|_| ())
.map_err(|_| ()),
),
))
} else {
Either::A(ServerMessageAcceptor::new(
settings.clone(),
TcpAcceptor::new(acceptor.create().map_err(AcceptorError::Service))
.map_err(|_| ())
.map_init_err(|_| ())
.and_then(
HttpService::new(settings)
.map_init_err(|_| ())
.map_err(|_| ()),
),
))
}
}
}
}
impl<F, H, A> ServiceProvider for HttpServiceBuilder<F, H, A>
where
F: Fn() -> H + Send + Clone + 'static,
A: AcceptorServiceFactory,
<A::NewService as NewService>::InitError: fmt::Debug,
H: IntoHttpHandler,
{
fn register(
&self, server: Server, lst: net::TcpListener, host: String,
addr: net::SocketAddr, keep_alive: KeepAlive, secure: bool, client_timeout: u64,
client_shutdown: u64,
) -> Server {
server.listen2(
"actix-web",
lst,
self.finish(
host,
addr,
keep_alive,
secure,
client_timeout,
client_shutdown,
),
)
}
}

View File

@@ -1,21 +1,41 @@
use std::net::{Shutdown, SocketAddr};
use std::rc::Rc;
use std::{io, ptr, time};
use std::net::Shutdown;
use std::{io, mem, time};
use bytes::{Buf, BufMut, BytesMut};
use futures::{Async, Future, Poll};
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_timer::Delay;
use super::settings::WorkerSettings;
use super::{h1, h2, ConnectionTag, HttpHandler, IoStream};
use super::error::HttpDispatchError;
use super::settings::ServiceConfig;
use super::{h1, h2, HttpHandler, IoStream};
use http::StatusCode;
const HTTP2_PREFACE: [u8; 14] = *b"PRI * HTTP/2.0";
enum HttpProtocol<T: IoStream, H: HttpHandler + 'static> {
H1(h1::Http1<T, H>),
pub(crate) enum HttpProtocol<T: IoStream, H: HttpHandler + 'static> {
H1(h1::Http1Dispatcher<T, H>),
H2(h2::Http2<T, H>),
Unknown(Rc<WorkerSettings<H>>, Option<SocketAddr>, T, BytesMut),
Unknown(ServiceConfig<H>, T, BytesMut),
None,
}
impl<T: IoStream, H: HttpHandler + 'static> HttpProtocol<T, H> {
pub(crate) fn shutdown(&mut self) {
match self {
HttpProtocol::H1(ref mut h1) => {
let io = h1.io();
let _ = IoStream::set_linger(io, Some(time::Duration::new(0, 0)));
let _ = IoStream::shutdown(io, Shutdown::Both);
}
HttpProtocol::H2(ref mut h2) => h2.shutdown(),
HttpProtocol::Unknown(_, io, _) => {
let _ = IoStream::set_linger(io, Some(time::Duration::new(0, 0)));
let _ = IoStream::shutdown(io, Shutdown::Both);
}
HttpProtocol::None => (),
}
}
}
enum ProtocolKind {
@@ -29,10 +49,9 @@ where
T: IoStream,
H: HttpHandler + 'static,
{
proto: Option<HttpProtocol<T, H>>,
node: Option<Node<HttpChannel<T, H>>>,
proto: HttpProtocol<T, H>,
node: Option<Node<()>>,
ka_timeout: Option<Delay>,
_tag: ConnectionTag,
}
impl<T, H> HttpChannel<T, H>
@@ -40,34 +59,13 @@ where
T: IoStream,
H: HttpHandler + 'static,
{
pub(crate) fn new(
settings: Rc<WorkerSettings<H>>, io: T, peer: Option<SocketAddr>,
) -> HttpChannel<T, H> {
let _tag = settings.connection();
let ka_timeout = settings.keep_alive_timer();
pub(crate) fn new(settings: ServiceConfig<H>, io: T) -> HttpChannel<T, H> {
let ka_timeout = settings.client_timer();
HttpChannel {
_tag,
ka_timeout,
node: None,
proto: Some(HttpProtocol::Unknown(
settings,
peer,
io,
BytesMut::with_capacity(8192),
)),
}
}
pub(crate) fn shutdown(&mut self) {
match self.proto {
Some(HttpProtocol::H1(ref mut h1)) => {
let io = h1.io();
let _ = IoStream::set_linger(io, Some(time::Duration::new(0, 0)));
let _ = IoStream::shutdown(io, Shutdown::Both);
}
Some(HttpProtocol::H2(ref mut h2)) => h2.shutdown(),
_ => (),
proto: HttpProtocol::Unknown(settings, io, BytesMut::with_capacity(8192)),
}
}
}
@@ -90,14 +88,25 @@ where
H: HttpHandler + 'static,
{
type Item = ();
type Error = ();
type Error = HttpDispatchError;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
// keep-alive timer
if let Some(ref mut timer) = self.ka_timeout {
match timer.poll() {
if self.ka_timeout.is_some() {
match self.ka_timeout.as_mut().unwrap().poll() {
Ok(Async::Ready(_)) => {
trace!("Slow request timed out, close connection");
let proto = mem::replace(&mut self.proto, HttpProtocol::None);
if let HttpProtocol::Unknown(settings, io, buf) = proto {
self.proto = HttpProtocol::H1(h1::Http1Dispatcher::for_error(
settings,
io,
StatusCode::REQUEST_TIMEOUT,
self.ka_timeout.take(),
buf,
));
return self.poll();
}
return Ok(Async::Ready(()));
}
Ok(Async::NotReady) => (),
@@ -106,31 +115,27 @@ where
}
if self.node.is_none() {
let el = self as *mut _;
self.node = Some(Node::new(el));
self.node = Some(Node::new(()));
let _ = match self.proto {
Some(HttpProtocol::H1(ref mut h1)) => {
HttpProtocol::H1(ref mut h1) => {
self.node.as_mut().map(|n| h1.settings().head().insert(n))
}
Some(HttpProtocol::H2(ref mut h2)) => {
HttpProtocol::H2(ref mut h2) => {
self.node.as_mut().map(|n| h2.settings().head().insert(n))
}
Some(HttpProtocol::Unknown(ref mut settings, _, _, _)) => {
HttpProtocol::Unknown(ref mut settings, _, _) => {
self.node.as_mut().map(|n| settings.head().insert(n))
}
None => unreachable!(),
HttpProtocol::None => unreachable!(),
};
}
let mut is_eof = false;
let kind = match self.proto {
Some(HttpProtocol::H1(ref mut h1)) => {
return h1.poll();
}
Some(HttpProtocol::H2(ref mut h2)) => {
return h2.poll();
}
Some(HttpProtocol::Unknown(_, _, ref mut io, ref mut buf)) => {
HttpProtocol::H1(ref mut h1) => return h1.poll(),
HttpProtocol::H2(ref mut h2) => return h2.poll(),
HttpProtocol::Unknown(_, ref mut io, ref mut buf) => {
let mut err = None;
let mut disconnect = false;
match io.read_available(buf) {
Ok(Async::Ready((read_some, stream_closed))) => {
@@ -140,14 +145,16 @@ where
disconnect = true;
}
}
Err(_) => {
disconnect = true;
Err(e) => {
err = Some(e.into());
}
_ => (),
}
if disconnect {
debug!("Ignored premature client disconnection");
return Err(());
return Ok(Async::Ready(()));
} else if let Some(e) = err {
return Err(e);
}
if buf.len() >= 14 {
@@ -160,31 +167,30 @@ where
return Ok(Async::NotReady);
}
}
None => unreachable!(),
HttpProtocol::None => unreachable!(),
};
// upgrade to specific http protocol
if let Some(HttpProtocol::Unknown(settings, addr, io, buf)) = self.proto.take() {
let proto = mem::replace(&mut self.proto, HttpProtocol::None);
if let HttpProtocol::Unknown(settings, io, buf) = proto {
match kind {
ProtocolKind::Http1 => {
self.proto = Some(HttpProtocol::H1(h1::Http1::new(
self.proto = HttpProtocol::H1(h1::Http1Dispatcher::new(
settings,
io,
addr,
buf,
is_eof,
self.ka_timeout.take(),
)));
));
return self.poll();
}
ProtocolKind::Http2 => {
self.proto = Some(HttpProtocol::H2(h2::Http2::new(
self.proto = HttpProtocol::H2(h2::Http2::new(
settings,
io,
addr,
buf.freeze(),
self.ka_timeout.take(),
)));
));
return self.poll();
}
}
@@ -193,47 +199,117 @@ where
}
}
#[doc(hidden)]
pub struct H1Channel<T, H>
where
T: IoStream,
H: HttpHandler + 'static,
{
proto: HttpProtocol<T, H>,
node: Option<Node<()>>,
}
impl<T, H> H1Channel<T, H>
where
T: IoStream,
H: HttpHandler + 'static,
{
pub(crate) fn new(settings: ServiceConfig<H>, io: T) -> H1Channel<T, H> {
H1Channel {
node: None,
proto: HttpProtocol::H1(h1::Http1Dispatcher::new(
settings,
io,
BytesMut::with_capacity(8192),
false,
None,
)),
}
}
}
impl<T, H> Drop for H1Channel<T, H>
where
T: IoStream,
H: HttpHandler + 'static,
{
fn drop(&mut self) {
if let Some(mut node) = self.node.take() {
node.remove();
}
}
}
impl<T, H> Future for H1Channel<T, H>
where
T: IoStream,
H: HttpHandler + 'static,
{
type Item = ();
type Error = HttpDispatchError;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
if self.node.is_none() {
self.node = Some(Node::new(()));
match self.proto {
HttpProtocol::H1(ref mut h1) => {
self.node.as_mut().map(|n| h1.settings().head().insert(n));
}
_ => unreachable!(),
};
}
match self.proto {
HttpProtocol::H1(ref mut h1) => h1.poll(),
_ => unreachable!(),
}
}
}
pub(crate) struct Node<T> {
next: Option<*mut Node<T>>,
prev: Option<*mut Node<T>>,
element: *mut T,
element: T,
}
impl<T> Node<T> {
fn new(el: *mut T) -> Self {
fn new(element: T) -> Self {
Node {
element,
next: None,
prev: None,
element: el,
}
}
fn insert<I>(&mut self, next_el: &mut Node<I>) {
unsafe {
let next: *mut Node<T> = next_el as *const _ as *mut _;
let next: *mut Node<T> = next_el as *const _ as *mut _;
if let Some(next2) = self.next {
if let Some(next2) = self.next {
unsafe {
let n = next2.as_mut().unwrap();
n.prev = Some(next);
next_el.next = Some(next2 as *mut _);
}
self.next = Some(next);
next_el.next = Some(next2 as *mut _);
}
self.next = Some(next);
unsafe {
let next: &mut Node<T> = &mut *next;
next.prev = Some(self as *mut _);
}
}
fn remove(&mut self) {
unsafe {
self.element = ptr::null_mut();
let next = self.next.take();
let prev = self.prev.take();
let next = self.next.take();
let prev = self.prev.take();
if let Some(prev) = prev {
if let Some(prev) = prev {
unsafe {
prev.as_mut().unwrap().next = next;
}
if let Some(next) = next {
}
if let Some(next) = next {
unsafe {
next.as_mut().unwrap().prev = prev;
}
}
@@ -245,30 +321,28 @@ impl Node<()> {
Node {
next: None,
prev: None,
element: ptr::null_mut(),
element: (),
}
}
pub(crate) fn traverse<T, H, F: Fn(&mut HttpChannel<T, H>)>(&self, f: F)
pub(crate) fn traverse<T, H, F: Fn(&mut HttpProtocol<T, H>)>(&self, f: F)
where
T: IoStream,
H: HttpHandler + 'static,
{
let mut next = self.next.as_ref();
loop {
if let Some(n) = next {
unsafe {
let n: &Node<()> = &*(n.as_ref().unwrap() as *const _);
next = n.next.as_ref();
if let Some(n) = self.next.as_ref() {
unsafe {
let mut next: &mut Node<HttpProtocol<T, H>> =
&mut *(n.as_ref().unwrap() as *const _ as *mut _);
loop {
f(&mut next.element);
if !n.element.is_null() {
let ch: &mut HttpChannel<T, H> =
&mut *(&mut *(n.element as *mut _) as *mut () as *mut _);
f(ch);
next = if let Some(n) = next.next.as_ref() {
&mut **n
} else {
return;
}
}
} else {
return;
}
}
}
@@ -307,6 +381,10 @@ where
fn set_linger(&mut self, _: Option<time::Duration>) -> io::Result<()> {
Ok(())
}
#[inline]
fn set_keepalive(&mut self, _: Option<time::Duration>) -> io::Result<()> {
Ok(())
}
}
impl<T> io::Read for WrapperStream<T>

View File

@@ -1,9 +1,84 @@
use std::io;
use futures::{Async, Poll};
use http2;
use super::{helpers, HttpHandlerTask, Writer};
use http::{StatusCode, Version};
use Error;
/// Errors produced by `AcceptorError` service.
#[derive(Debug)]
pub enum AcceptorError<T> {
/// The inner service error
Service(T),
/// Io specific error
Io(io::Error),
/// The request did not complete within the specified timeout.
Timeout,
}
#[derive(Fail, Debug)]
/// A set of errors that can occur during dispatching http requests
pub enum HttpDispatchError {
/// Application error
#[fail(display = "Application specific error: {}", _0)]
App(Error),
/// An `io::Error` that occurred while trying to read or write to a network
/// stream.
#[fail(display = "IO error: {}", _0)]
Io(io::Error),
/// The first request did not complete within the specified timeout.
#[fail(display = "The first request did not complete within the specified timeout")]
SlowRequestTimeout,
/// Shutdown timeout
#[fail(display = "Connection shutdown timeout")]
ShutdownTimeout,
/// HTTP2 error
#[fail(display = "HTTP2 error: {}", _0)]
Http2(http2::Error),
/// Payload is not consumed
#[fail(display = "Task is completed but request's payload is not consumed")]
PayloadIsNotConsumed,
/// Malformed request
#[fail(display = "Malformed request")]
MalformedRequest,
/// Internal error
#[fail(display = "Internal error")]
InternalError,
/// Unknown error
#[fail(display = "Unknown error")]
Unknown,
}
impl From<Error> for HttpDispatchError {
fn from(err: Error) -> Self {
HttpDispatchError::App(err)
}
}
impl From<io::Error> for HttpDispatchError {
fn from(err: io::Error) -> Self {
HttpDispatchError::Io(err)
}
}
impl From<http2::Error> for HttpDispatchError {
fn from(err: http2::Error) -> Self {
HttpDispatchError::Http2(err)
}
}
pub(crate) struct ServerError(Version, StatusCode);
impl ServerError {

View File

@@ -1,118 +1,155 @@
use std::collections::VecDeque;
use std::net::SocketAddr;
use std::rc::Rc;
use std::net::{Shutdown, SocketAddr};
use std::time::{Duration, Instant};
use bytes::BytesMut;
use futures::{Async, Future, Poll};
use tokio_current_thread::spawn;
use tokio_timer::Delay;
use error::{Error, PayloadError};
use http::{StatusCode, Version};
use payload::{Payload, PayloadStatus, PayloadWriter};
use super::error::ServerError;
use super::error::{HttpDispatchError, ServerError};
use super::h1decoder::{DecoderError, H1Decoder, Message};
use super::h1writer::H1Writer;
use super::handler::{HttpHandler, HttpHandlerTask, HttpHandlerTaskFut};
use super::input::PayloadType;
use super::settings::WorkerSettings;
use super::Writer;
use super::{HttpHandler, HttpHandlerTask, IoStream};
use super::settings::ServiceConfig;
use super::{IoStream, Writer};
const MAX_PIPELINED_MESSAGES: usize = 16;
bitflags! {
pub struct Flags: u8 {
const STARTED = 0b0000_0001;
const ERROR = 0b0000_0010;
const KEEPALIVE_ENABLED = 0b0000_0010;
const KEEPALIVE = 0b0000_0100;
const SHUTDOWN = 0b0000_1000;
const READ_DISCONNECTED = 0b0001_0000;
const WRITE_DISCONNECTED = 0b0010_0000;
const POLLED = 0b0100_0000;
const FLUSHED = 0b1000_0000;
}
}
bitflags! {
struct EntryFlags: u8 {
const EOF = 0b0000_0001;
const ERROR = 0b0000_0010;
const FINISHED = 0b0000_0100;
}
}
pub(crate) struct Http1<T: IoStream, H: HttpHandler + 'static> {
/// Dispatcher for HTTP/1.1 protocol
pub struct Http1Dispatcher<T: IoStream, H: HttpHandler + 'static> {
flags: Flags,
settings: Rc<WorkerSettings<H>>,
settings: ServiceConfig<H>,
addr: Option<SocketAddr>,
stream: H1Writer<T, H>,
decoder: H1Decoder,
payload: Option<PayloadType>,
buf: BytesMut,
tasks: VecDeque<Entry<H>>,
keepalive_timer: Option<Delay>,
error: Option<HttpDispatchError>,
ka_expire: Instant,
ka_timer: Option<Delay>,
}
enum EntryPipe<H: HttpHandler> {
enum Entry<H: HttpHandler> {
Task(H::Task),
Error(Box<HttpHandlerTask>),
}
impl<H: HttpHandler> EntryPipe<H> {
impl<H: HttpHandler> Entry<H> {
fn into_task(self) -> H::Task {
match self {
Entry::Task(task) => task,
Entry::Error(_) => panic!(),
}
}
fn disconnected(&mut self) {
match *self {
EntryPipe::Task(ref mut task) => task.disconnected(),
EntryPipe::Error(ref mut task) => task.disconnected(),
Entry::Task(ref mut task) => task.disconnected(),
Entry::Error(ref mut task) => task.disconnected(),
}
}
fn poll_io(&mut self, io: &mut Writer) -> Poll<bool, Error> {
match *self {
EntryPipe::Task(ref mut task) => task.poll_io(io),
EntryPipe::Error(ref mut task) => task.poll_io(io),
Entry::Task(ref mut task) => task.poll_io(io),
Entry::Error(ref mut task) => task.poll_io(io),
}
}
fn poll_completed(&mut self) -> Poll<(), Error> {
match *self {
EntryPipe::Task(ref mut task) => task.poll_completed(),
EntryPipe::Error(ref mut task) => task.poll_completed(),
Entry::Task(ref mut task) => task.poll_completed(),
Entry::Error(ref mut task) => task.poll_completed(),
}
}
}
struct Entry<H: HttpHandler> {
pipe: EntryPipe<H>,
flags: EntryFlags,
}
impl<T, H> Http1<T, H>
impl<T, H> Http1Dispatcher<T, H>
where
T: IoStream,
H: HttpHandler + 'static,
{
pub fn new(
settings: Rc<WorkerSettings<H>>, stream: T, addr: Option<SocketAddr>,
buf: BytesMut, is_eof: bool, keepalive_timer: Option<Delay>,
settings: ServiceConfig<H>, stream: T, buf: BytesMut, is_eof: bool,
keepalive_timer: Option<Delay>,
) -> Self {
Http1 {
flags: if is_eof {
Flags::READ_DISCONNECTED
} else {
Flags::KEEPALIVE
},
stream: H1Writer::new(stream, Rc::clone(&settings)),
let addr = stream.peer_addr();
let (ka_expire, ka_timer) = if let Some(delay) = keepalive_timer {
(delay.deadline(), Some(delay))
} else if let Some(delay) = settings.keep_alive_timer() {
(delay.deadline(), Some(delay))
} else {
(settings.now(), None)
};
let flags = if is_eof {
Flags::READ_DISCONNECTED | Flags::FLUSHED
} else if settings.keep_alive_enabled() {
Flags::KEEPALIVE | Flags::KEEPALIVE_ENABLED | Flags::FLUSHED
} else {
Flags::empty()
};
Http1Dispatcher {
stream: H1Writer::new(stream, settings.clone()),
decoder: H1Decoder::new(),
payload: None,
tasks: VecDeque::new(),
error: None,
flags,
addr,
buf,
settings,
keepalive_timer,
ka_timer,
ka_expire,
}
}
pub(crate) fn for_error(
settings: ServiceConfig<H>, stream: T, status: StatusCode,
mut keepalive_timer: Option<Delay>, buf: BytesMut,
) -> Self {
if let Some(deadline) = settings.client_timer_expire() {
let _ = keepalive_timer.as_mut().map(|delay| delay.reset(deadline));
}
let mut disp = Http1Dispatcher {
flags: Flags::STARTED | Flags::READ_DISCONNECTED | Flags::FLUSHED,
stream: H1Writer::new(stream, settings.clone()),
decoder: H1Decoder::new(),
payload: None,
tasks: VecDeque::new(),
error: None,
addr: None,
ka_timer: keepalive_timer,
ka_expire: settings.now(),
buf,
settings,
};
disp.push_response_entry(status);
disp
}
#[inline]
pub fn settings(&self) -> &WorkerSettings<H> {
self.settings.as_ref()
pub fn settings(&self) -> &ServiceConfig<H> {
&self.settings
}
#[inline]
@@ -122,10 +159,7 @@ where
#[inline]
fn can_read(&self) -> bool {
if self
.flags
.intersects(Flags::ERROR | Flags::READ_DISCONNECTED)
{
if self.flags.contains(Flags::READ_DISCONNECTED) {
return false;
}
@@ -136,247 +170,302 @@ where
}
}
fn notify_disconnect(&mut self) {
self.flags.insert(Flags::WRITE_DISCONNECTED);
// notify all tasks
self.stream.disconnected();
for task in &mut self.tasks {
task.pipe.disconnected();
}
}
fn client_disconnect(&mut self) {
// notify all tasks
self.notify_disconnect();
// kill keepalive
self.keepalive_timer.take();
// on parse error, stop reading stream but tasks need to be
// completed
self.flags.insert(Flags::ERROR);
// if checked is set to true, delay disconnect until all tasks have finished.
fn client_disconnected(&mut self, checked: bool) {
self.flags.insert(Flags::READ_DISCONNECTED);
if let Some(mut payload) = self.payload.take() {
payload.set_error(PayloadError::Incomplete);
}
if !checked || self.tasks.is_empty() {
self.flags
.insert(Flags::WRITE_DISCONNECTED | Flags::FLUSHED);
self.stream.disconnected();
// notify all tasks
for mut task in self.tasks.drain(..) {
task.disconnected();
match task.poll_completed() {
Ok(Async::NotReady) => {
// spawn not completed task, it does not require access to io
// at this point
spawn(HttpHandlerTaskFut::new(task.into_task()));
}
Ok(Async::Ready(_)) => (),
Err(err) => {
error!("Unhandled application error: {}", err);
}
}
}
}
}
#[inline]
pub fn poll(&mut self) -> Poll<(), ()> {
// keep-alive timer
if let Some(ref mut timer) = self.keepalive_timer {
match timer.poll() {
Ok(Async::Ready(_)) => {
trace!("Keep-alive timeout, close connection");
self.flags.insert(Flags::SHUTDOWN);
}
Ok(Async::NotReady) => (),
Err(_) => unreachable!(),
}
}
pub fn poll(&mut self) -> Poll<(), HttpDispatchError> {
// check connection keep-alive
self.poll_keepalive()?;
// shutdown
if self.flags.contains(Flags::SHUTDOWN) {
if self.flags.intersects(
Flags::ERROR | Flags::READ_DISCONNECTED | Flags::WRITE_DISCONNECTED,
) {
if self.flags.contains(Flags::WRITE_DISCONNECTED) {
return Ok(Async::Ready(()));
}
match self.stream.poll_completed(true) {
Ok(Async::NotReady) => return Ok(Async::NotReady),
Ok(Async::Ready(_)) => return Ok(Async::Ready(())),
Err(err) => {
debug!("Error sending data: {}", err);
return Err(());
}
}
return self.poll_flush(true);
}
self.poll_io();
// process incoming requests
if !self.flags.contains(Flags::WRITE_DISCONNECTED) {
self.poll_handler()?;
loop {
match self.poll_handler()? {
Async::Ready(true) => {
self.poll_io();
// flush stream
self.poll_flush(false)?;
// deal with keep-alive and stream eof (client-side write shutdown)
if self.tasks.is_empty() && self.flags.contains(Flags::FLUSHED) {
// handle stream eof
if self
.flags
.intersects(Flags::READ_DISCONNECTED | Flags::WRITE_DISCONNECTED)
{
return Ok(Async::Ready(()));
}
Async::Ready(false) => {
// no keep-alive
if self.flags.contains(Flags::STARTED)
&& (!self.flags.contains(Flags::KEEPALIVE_ENABLED)
|| !self.flags.contains(Flags::KEEPALIVE))
{
self.flags.insert(Flags::SHUTDOWN);
return self.poll();
}
Async::NotReady => return Ok(Async::NotReady),
}
Ok(Async::NotReady)
} else if let Some(err) = self.error.take() {
Err(err)
} else {
Ok(Async::Ready(()))
}
}
#[inline]
/// read data from stream
pub fn poll_io(&mut self) {
if !self.flags.contains(Flags::POLLED) {
self.parse();
self.flags.insert(Flags::POLLED);
return;
}
// read io from socket
if self.can_read() && self.tasks.len() < MAX_PIPELINED_MESSAGES {
match self.stream.get_mut().read_available(&mut self.buf) {
Ok(Async::Ready((read_some, disconnected))) => {
if read_some {
self.parse();
/// Flush stream
fn poll_flush(&mut self, shutdown: bool) -> Poll<(), HttpDispatchError> {
if shutdown || self.flags.contains(Flags::STARTED) {
match self.stream.poll_completed(shutdown) {
Ok(Async::NotReady) => {
// mark stream
if !self.stream.flushed() {
self.flags.remove(Flags::FLUSHED);
}
if disconnected {
// delay disconnect until all tasks have finished.
self.flags.insert(Flags::READ_DISCONNECTED);
Ok(Async::NotReady)
}
Err(err) => {
debug!("Error sending data: {}", err);
self.client_disconnected(false);
Err(err.into())
}
Ok(Async::Ready(_)) => {
// if payload is not consumed we can not use connection
if self.payload.is_some() && self.tasks.is_empty() {
return Err(HttpDispatchError::PayloadIsNotConsumed);
}
self.flags.insert(Flags::FLUSHED);
Ok(Async::Ready(()))
}
}
} else {
Ok(Async::Ready(()))
}
}
/// keep-alive timer. returns `true` is keep-alive, otherwise drop
fn poll_keepalive(&mut self) -> Result<(), HttpDispatchError> {
if let Some(ref mut timer) = self.ka_timer {
match timer.poll() {
Ok(Async::Ready(_)) => {
// if we get timer during shutdown, just drop connection
if self.flags.contains(Flags::SHUTDOWN) {
let io = self.stream.get_mut();
let _ = IoStream::set_linger(io, Some(Duration::from_secs(0)));
let _ = IoStream::shutdown(io, Shutdown::Both);
return Err(HttpDispatchError::ShutdownTimeout);
}
if timer.deadline() >= self.ka_expire {
// check for any outstanding request handling
if self.tasks.is_empty() {
self.client_disconnect();
if !self.flags.contains(Flags::STARTED) {
// timeout on first request (slow request) return 408
trace!("Slow request timeout");
self.flags
.insert(Flags::STARTED | Flags::READ_DISCONNECTED);
self.tasks.push_back(Entry::Error(ServerError::err(
Version::HTTP_11,
StatusCode::REQUEST_TIMEOUT,
)));
} else {
trace!("Keep-alive timeout, close connection");
self.flags.insert(Flags::SHUTDOWN);
// start shutdown timer
if let Some(deadline) =
self.settings.client_shutdown_timer()
{
timer.reset(deadline)
} else {
return Ok(());
}
}
} else if let Some(dl) = self.settings.keep_alive_expire() {
timer.reset(dl)
}
} else {
timer.reset(self.ka_expire)
}
}
Ok(Async::NotReady) => (),
Err(_) => {
self.client_disconnect();
Err(e) => {
error!("Timer error {:?}", e);
return Err(HttpDispatchError::Unknown);
}
}
}
Ok(())
}
pub fn poll_handler(&mut self) -> Poll<bool, ()> {
let retry = self.can_read();
// check in-flight messages
let mut io = false;
let mut idx = 0;
while idx < self.tasks.len() {
// only one task can do io operation in http/1
if !io
&& !self.tasks[idx].flags.contains(EntryFlags::EOF)
&& !self.flags.contains(Flags::WRITE_DISCONNECTED)
{
// io is corrupted, send buffer
if self.tasks[idx].flags.contains(EntryFlags::ERROR) {
if let Ok(Async::NotReady) = self.stream.poll_completed(true) {
return Ok(Async::NotReady);
}
self.flags.insert(Flags::ERROR);
return Err(());
}
match self.tasks[idx].pipe.poll_io(&mut self.stream) {
Ok(Async::Ready(ready)) => {
// override keep-alive state
if self.stream.keepalive() {
self.flags.insert(Flags::KEEPALIVE);
} else {
self.flags.remove(Flags::KEEPALIVE);
}
// prepare stream for next response
self.stream.reset();
if ready {
self.tasks[idx]
.flags
.insert(EntryFlags::EOF | EntryFlags::FINISHED);
} else {
self.tasks[idx].flags.insert(EntryFlags::EOF);
}
}
// no more IO for this iteration
Ok(Async::NotReady) => {
// check if previously read backpressure was enabled
if self.can_read() && !retry {
return Ok(Async::Ready(true));
}
io = true;
}
Err(err) => {
// it is not possible to recover from error
// during pipe handling, so just drop connection
self.notify_disconnect();
self.tasks[idx].flags.insert(EntryFlags::ERROR);
error!("Unhandled error1: {}", err);
continue;
}
}
} else if !self.tasks[idx].flags.contains(EntryFlags::FINISHED) {
match self.tasks[idx].pipe.poll_completed() {
Ok(Async::NotReady) => (),
Ok(Async::Ready(_)) => {
self.tasks[idx].flags.insert(EntryFlags::FINISHED)
}
Err(err) => {
self.notify_disconnect();
self.tasks[idx].flags.insert(EntryFlags::ERROR);
error!("Unhandled error: {}", err);
continue;
}
}
}
idx += 1;
}
// cleanup finished tasks
while !self.tasks.is_empty() {
if self.tasks[0]
.flags
.contains(EntryFlags::EOF | EntryFlags::FINISHED)
{
self.tasks.pop_front();
} else {
break;
#[inline]
/// read data from the stream
pub(self) fn poll_io(&mut self) -> Result<bool, HttpDispatchError> {
if !self.flags.contains(Flags::POLLED) {
self.flags.insert(Flags::POLLED);
if !self.buf.is_empty() {
let updated = self.parse()?;
return Ok(updated);
}
}
// check stream state
if self.flags.contains(Flags::STARTED) {
match self.stream.poll_completed(false) {
Ok(Async::NotReady) => return Ok(Async::NotReady),
// read io from socket
let mut updated = false;
if self.can_read() && self.tasks.len() < MAX_PIPELINED_MESSAGES {
match self.stream.get_mut().read_available(&mut self.buf) {
Ok(Async::Ready((read_some, disconnected))) => {
if read_some && self.parse()? {
updated = true;
}
if disconnected {
self.client_disconnected(true);
}
}
Ok(Async::NotReady) => (),
Err(err) => {
debug!("Error sending data: {}", err);
self.notify_disconnect();
return Err(());
}
Ok(Async::Ready(_)) => {
// non consumed payload in that case close connection
if self.payload.is_some() && self.tasks.is_empty() {
return Ok(Async::Ready(false));
}
self.client_disconnected(false);
return Err(err.into());
}
}
}
// deal with keep-alive and steam eof (client-side write shutdown)
if self.tasks.is_empty() {
// handle stream eof
if self.flags.contains(Flags::READ_DISCONNECTED) {
return Ok(Async::Ready(false));
}
// no keep-alive
if self.flags.contains(Flags::ERROR)
|| (!self.flags.contains(Flags::KEEPALIVE)
|| !self.settings.keep_alive_enabled())
&& self.flags.contains(Flags::STARTED)
{
return Ok(Async::Ready(false));
}
// start keep-alive timer
let keep_alive = self.settings.keep_alive();
if self.keepalive_timer.is_none() && keep_alive > 0 {
trace!("Start keep-alive timer");
let mut timer =
Delay::new(Instant::now() + Duration::from_secs(keep_alive));
// register timer
let _ = timer.poll();
self.keepalive_timer = Some(timer);
}
}
Ok(Async::NotReady)
Ok(updated)
}
pub fn parse(&mut self) {
pub(self) fn poll_handler(&mut self) -> Result<(), HttpDispatchError> {
self.poll_io()?;
let mut retry = self.can_read();
// process first pipelined response, only first task can do io operation in http/1
while !self.tasks.is_empty() {
match self.tasks[0].poll_io(&mut self.stream) {
Ok(Async::Ready(ready)) => {
// override keep-alive state
if self.stream.keepalive() {
self.flags.insert(Flags::KEEPALIVE);
} else {
self.flags.remove(Flags::KEEPALIVE);
}
// prepare stream for next response
self.stream.reset();
let task = self.tasks.pop_front().unwrap();
if !ready {
// task is done with io operations but still needs to do more work
spawn(HttpHandlerTaskFut::new(task.into_task()));
}
}
Ok(Async::NotReady) => {
// check if we need timer
if self.ka_timer.is_some() && self.stream.upgrade() {
self.ka_timer.take();
}
// if read-backpressure is enabled and we consumed some data.
// we may read more dataand retry
if !retry && self.can_read() && self.poll_io()? {
retry = self.can_read();
continue;
}
break;
}
Err(err) => {
error!("Unhandled error1: {}", err);
// it is not possible to recover from error
// during pipe handling, so just drop connection
self.client_disconnected(false);
return Err(err.into());
}
}
}
// check in-flight messages. all tasks must be alive,
// they need to produce response. if app returned error
// and we can not continue processing incoming requests.
let mut idx = 1;
while idx < self.tasks.len() {
let stop = match self.tasks[idx].poll_completed() {
Ok(Async::NotReady) => false,
Ok(Async::Ready(_)) => true,
Err(err) => {
self.error = Some(err.into());
true
}
};
if stop {
// error in task handling or task is completed,
// so no response for this task which means we can not read more requests
// because pipeline sequence is broken.
// but we can safely complete existing tasks
self.flags.insert(Flags::READ_DISCONNECTED);
for mut task in self.tasks.drain(idx..) {
task.disconnected();
match task.poll_completed() {
Ok(Async::NotReady) => {
// spawn not completed task, it does not require access to io
// at this point
spawn(HttpHandlerTaskFut::new(task.into_task()));
}
Ok(Async::Ready(_)) => (),
Err(err) => {
error!("Unhandled application error: {}", err);
}
}
}
break;
} else {
idx += 1;
}
}
Ok(())
}
fn push_response_entry(&mut self, status: StatusCode) {
self.tasks
.push_back(Entry::Error(ServerError::err(Version::HTTP_11, status)));
}
pub(self) fn parse(&mut self) -> Result<bool, HttpDispatchError> {
let mut updated = false;
'outer: loop {
match self.decoder.decode(&mut self.buf, &self.settings) {
Ok(Some(Message::Message { mut msg, payload })) => {
updated = true;
self.flags.insert(Flags::STARTED);
if payload {
@@ -392,89 +481,76 @@ where
// set remote addr
msg.inner_mut().addr = self.addr;
// stop keepalive timer
self.keepalive_timer.take();
// search handler for request
for h in self.settings.handlers().iter() {
msg = match h.handle(msg) {
Ok(mut pipe) => {
if self.tasks.is_empty() {
match pipe.poll_io(&mut self.stream) {
Ok(Async::Ready(ready)) => {
// override keep-alive state
if self.stream.keepalive() {
self.flags.insert(Flags::KEEPALIVE);
} else {
self.flags.remove(Flags::KEEPALIVE);
}
// prepare stream for next response
self.stream.reset();
match self.settings.handler().handle(msg) {
Ok(mut task) => {
if self.tasks.is_empty() {
match task.poll_io(&mut self.stream) {
Ok(Async::Ready(ready)) => {
// override keep-alive state
if self.stream.keepalive() {
self.flags.insert(Flags::KEEPALIVE);
} else {
self.flags.remove(Flags::KEEPALIVE);
}
// prepare stream for next response
self.stream.reset();
if !ready {
let item = Entry {
pipe: EntryPipe::Task(pipe),
flags: EntryFlags::EOF,
};
self.tasks.push_back(item);
}
continue 'outer;
}
Ok(Async::NotReady) => {}
Err(err) => {
error!("Unhandled error: {}", err);
self.flags.insert(Flags::ERROR);
return;
if !ready {
// task is done with io operations
// but still needs to do more work
spawn(HttpHandlerTaskFut::new(task));
}
continue 'outer;
}
Ok(Async::NotReady) => (),
Err(err) => {
error!("Unhandled error: {}", err);
self.client_disconnected(false);
return Err(err.into());
}
}
self.tasks.push_back(Entry {
pipe: EntryPipe::Task(pipe),
flags: EntryFlags::empty(),
});
continue 'outer;
}
Err(msg) => msg,
self.tasks.push_back(Entry::Task(task));
continue 'outer;
}
Err(_) => {
// handler is not found
self.push_response_entry(StatusCode::NOT_FOUND);
}
}
// handler is not found
self.tasks.push_back(Entry {
pipe: EntryPipe::Error(ServerError::err(
Version::HTTP_11,
StatusCode::NOT_FOUND,
)),
flags: EntryFlags::empty(),
});
}
Ok(Some(Message::Chunk(chunk))) => {
updated = true;
if let Some(ref mut payload) = self.payload {
payload.feed_data(chunk);
} else {
error!("Internal server error: unexpected payload chunk");
self.flags.insert(Flags::ERROR);
self.flags.insert(Flags::READ_DISCONNECTED | Flags::STARTED);
self.push_response_entry(StatusCode::INTERNAL_SERVER_ERROR);
self.error = Some(HttpDispatchError::InternalError);
break;
}
}
Ok(Some(Message::Eof)) => {
updated = true;
if let Some(mut payload) = self.payload.take() {
payload.feed_eof();
} else {
error!("Internal server error: unexpected eof");
self.flags.insert(Flags::ERROR);
self.flags.insert(Flags::READ_DISCONNECTED | Flags::STARTED);
self.push_response_entry(StatusCode::INTERNAL_SERVER_ERROR);
self.error = Some(HttpDispatchError::InternalError);
break;
}
}
Ok(None) => {
if self.flags.contains(Flags::READ_DISCONNECTED)
&& self.tasks.is_empty()
{
self.client_disconnect();
if self.flags.contains(Flags::READ_DISCONNECTED) {
self.client_disconnected(true);
}
break;
}
Err(e) => {
self.flags.insert(Flags::ERROR);
if let Some(mut payload) = self.payload.take() {
let e = match e {
DecoderError::Io(e) => PayloadError::Io(e),
@@ -482,10 +558,22 @@ where
};
payload.set_error(e);
}
// Malformed requests should be responded with 400
self.push_response_entry(StatusCode::BAD_REQUEST);
self.flags.insert(Flags::READ_DISCONNECTED | Flags::STARTED);
self.error = Some(HttpDispatchError::MalformedRequest);
break;
}
}
}
if self.ka_timer.is_some() && updated {
if let Some(expire) = self.settings.keep_alive_expire() {
self.ka_expire = expire;
}
}
Ok(updated)
}
}
@@ -494,24 +582,28 @@ mod tests {
use std::net::Shutdown;
use std::{cmp, io, time};
use actix::System;
use bytes::{Buf, Bytes, BytesMut};
use futures::future;
use http::{Method, Version};
use tokio_io::{AsyncRead, AsyncWrite};
use super::*;
use application::HttpApplication;
use application::{App, HttpApplication};
use httpmessage::HttpMessage;
use server::h1decoder::Message;
use server::settings::{ServerSettings, WorkerSettings};
use server::{Connections, KeepAlive, Request};
use server::handler::IntoHttpHandler;
use server::settings::{ServerSettings, ServiceConfig};
use server::{KeepAlive, Request};
fn wrk_settings() -> Rc<WorkerSettings<HttpApplication>> {
Rc::new(WorkerSettings::<HttpApplication>::new(
Vec::new(),
fn wrk_settings() -> ServiceConfig<HttpApplication> {
ServiceConfig::<HttpApplication>::new(
App::new().into_handler(),
KeepAlive::Os,
5000,
2000,
ServerSettings::default(),
Connections::default(),
))
)
}
impl Message {
@@ -608,6 +700,9 @@ mod tests {
fn set_linger(&mut self, _: Option<time::Duration>) -> io::Result<()> {
Ok(())
}
fn set_keepalive(&mut self, _: Option<time::Duration>) -> io::Result<()> {
Ok(())
}
}
impl io::Write for Buffer {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
@@ -626,40 +721,22 @@ mod tests {
}
}
#[test]
fn test_req_parse1() {
let buf = Buffer::new("GET /test HTTP/1.1\r\n\r\n");
let readbuf = BytesMut::new();
let settings = Rc::new(wrk_settings());
let mut h1 = Http1::new(Rc::clone(&settings), buf, None, readbuf, false, None);
h1.poll_io();
h1.poll_io();
assert_eq!(h1.tasks.len(), 1);
}
#[test]
fn test_req_parse2() {
let buf = Buffer::new("");
let readbuf =
BytesMut::from(Vec::<u8>::from(&b"GET /test HTTP/1.1\r\n\r\n"[..]));
let settings = Rc::new(wrk_settings());
let mut h1 = Http1::new(Rc::clone(&settings), buf, None, readbuf, true, None);
h1.poll_io();
assert_eq!(h1.tasks.len(), 1);
}
#[test]
fn test_req_parse_err() {
let buf = Buffer::new("GET /test HTTP/1\r\n\r\n");
let readbuf = BytesMut::new();
let settings = Rc::new(wrk_settings());
let mut sys = System::new("test");
let _ = sys.block_on(future::lazy(|| {
let buf = Buffer::new("GET /test HTTP/1\r\n\r\n");
let readbuf = BytesMut::new();
let settings = wrk_settings();
let mut h1 = Http1::new(Rc::clone(&settings), buf, None, readbuf, false, None);
h1.poll_io();
h1.poll_io();
assert!(h1.flags.contains(Flags::ERROR));
let mut h1 =
Http1Dispatcher::new(settings.clone(), buf, readbuf, false, None);
assert!(h1.poll_io().is_ok());
assert!(h1.poll_io().is_ok());
assert!(h1.flags.contains(Flags::READ_DISCONNECTED));
assert_eq!(h1.tasks.len(), 1);
future::ok::<_, ()>(())
}));
}
#[test]

View File

@@ -5,7 +5,7 @@ use futures::{Async, Poll};
use httparse;
use super::message::{MessageFlags, Request};
use super::settings::WorkerSettings;
use super::settings::ServiceConfig;
use error::ParseError;
use http::header::{HeaderName, HeaderValue};
use http::{header, HttpTryFrom, Method, Uri, Version};
@@ -18,6 +18,7 @@ pub(crate) struct H1Decoder {
decoder: Option<EncodingDecoder>,
}
#[derive(Debug)]
pub(crate) enum Message {
Message { msg: Request, payload: bool },
Chunk(Bytes),
@@ -42,7 +43,7 @@ impl H1Decoder {
}
pub fn decode<H>(
&mut self, src: &mut BytesMut, settings: &WorkerSettings<H>,
&mut self, src: &mut BytesMut, settings: &ServiceConfig<H>,
) -> Result<Option<Message>, DecoderError> {
// read payload
if self.decoder.is_some() {
@@ -79,7 +80,7 @@ impl H1Decoder {
}
fn parse_message<H>(
&self, buf: &mut BytesMut, settings: &WorkerSettings<H>,
&self, buf: &mut BytesMut, settings: &ServiceConfig<H>,
) -> Poll<(Request, Option<EncodingDecoder>), ParseError> {
// Parse http message
let mut has_upgrade = false;

View File

@@ -1,7 +1,6 @@
// #![cfg_attr(feature = "cargo-clippy", allow(redundant_field_names))]
use std::io::{self, Write};
use std::rc::Rc;
use bytes::{BufMut, BytesMut};
use futures::{Async, Poll};
@@ -9,7 +8,7 @@ use tokio_io::AsyncWrite;
use super::helpers;
use super::output::{Output, ResponseInfo, ResponseLength};
use super::settings::WorkerSettings;
use super::settings::ServiceConfig;
use super::Request;
use super::{Writer, WriterState, MAX_WRITE_BUFFER_SIZE};
use body::{Binary, Body};
@@ -38,11 +37,11 @@ pub(crate) struct H1Writer<T: AsyncWrite, H: 'static> {
headers_size: u32,
buffer: Output,
buffer_capacity: usize,
settings: Rc<WorkerSettings<H>>,
settings: ServiceConfig<H>,
}
impl<T: AsyncWrite, H: 'static> H1Writer<T, H> {
pub fn new(stream: T, settings: Rc<WorkerSettings<H>>) -> H1Writer<T, H> {
pub fn new(stream: T, settings: ServiceConfig<H>) -> H1Writer<T, H> {
H1Writer {
flags: Flags::KEEPALIVE,
written: 0,
@@ -63,10 +62,18 @@ impl<T: AsyncWrite, H: 'static> H1Writer<T, H> {
self.flags = Flags::KEEPALIVE;
}
pub fn flushed(&mut self) -> bool {
self.buffer.is_empty()
}
pub fn disconnected(&mut self) {
self.flags.insert(Flags::DISCONNECTED);
}
pub fn upgrade(&self) -> bool {
self.flags.contains(Flags::UPGRADE)
}
pub fn keepalive(&self) -> bool {
self.flags.contains(Flags::KEEPALIVE) && !self.flags.contains(Flags::UPGRADE)
}
@@ -169,13 +176,11 @@ impl<T: AsyncWrite, H: 'static> Writer for H1Writer<T, H> {
buffer.extend_from_slice(reason);
// content length
let mut len_is_set = true;
match info.length {
ResponseLength::Chunked => {
buffer.extend_from_slice(b"\r\ntransfer-encoding: chunked\r\n")
}
ResponseLength::Zero => {
buffer.extend_from_slice(b"\r\ncontent-length: 0\r\n")
}
ResponseLength::Length(len) => {
helpers::write_content_length(len, &mut buffer)
}
@@ -184,6 +189,10 @@ impl<T: AsyncWrite, H: 'static> Writer for H1Writer<T, H> {
write!(buffer.writer(), "{}", len)?;
buffer.extend_from_slice(b"\r\n");
}
ResponseLength::Zero => {
len_is_set = false;
buffer.extend_from_slice(b"\r\n");
}
ResponseLength::None => buffer.extend_from_slice(b"\r\n"),
}
if let Some(ce) = info.content_encoding {
@@ -196,47 +205,57 @@ impl<T: AsyncWrite, H: 'static> Writer for H1Writer<T, H> {
let mut pos = 0;
let mut has_date = false;
let mut remaining = buffer.remaining_mut();
unsafe {
let mut buf = &mut *(buffer.bytes_mut() as *mut [u8]);
for (key, value) in msg.headers() {
match *key {
TRANSFER_ENCODING => continue,
CONTENT_ENCODING => if encoding != ContentEncoding::Identity {
continue;
},
CONTENT_LENGTH => match info.length {
ResponseLength::None => (),
_ => continue,
},
DATE => {
has_date = true;
let mut buf = unsafe { &mut *(buffer.bytes_mut() as *mut [u8]) };
for (key, value) in msg.headers() {
match *key {
TRANSFER_ENCODING => continue,
CONTENT_ENCODING => if encoding != ContentEncoding::Identity {
continue;
},
CONTENT_LENGTH => match info.length {
ResponseLength::None => (),
ResponseLength::Zero => {
len_is_set = true;
}
_ => (),
_ => continue,
},
DATE => {
has_date = true;
}
_ => (),
}
let v = value.as_ref();
let k = key.as_str().as_bytes();
let len = k.len() + v.len() + 4;
if len > remaining {
let v = value.as_ref();
let k = key.as_str().as_bytes();
let len = k.len() + v.len() + 4;
if len > remaining {
unsafe {
buffer.advance_mut(pos);
pos = 0;
buffer.reserve(len);
remaining = buffer.remaining_mut();
}
pos = 0;
buffer.reserve(len);
remaining = buffer.remaining_mut();
unsafe {
buf = &mut *(buffer.bytes_mut() as *mut _);
}
buf[pos..pos + k.len()].copy_from_slice(k);
pos += k.len();
buf[pos..pos + 2].copy_from_slice(b": ");
pos += 2;
buf[pos..pos + v.len()].copy_from_slice(v);
pos += v.len();
buf[pos..pos + 2].copy_from_slice(b"\r\n");
pos += 2;
remaining -= len;
}
buf[pos..pos + k.len()].copy_from_slice(k);
pos += k.len();
buf[pos..pos + 2].copy_from_slice(b": ");
pos += 2;
buf[pos..pos + v.len()].copy_from_slice(v);
pos += v.len();
buf[pos..pos + 2].copy_from_slice(b"\r\n");
pos += 2;
remaining -= len;
}
unsafe {
buffer.advance_mut(pos);
}
if !len_is_set {
buffer.extend_from_slice(b"content-length: 0\r\n")
}
// optimized date header, set_date writes \r\n
if !has_date {

View File

@@ -2,7 +2,7 @@ use std::collections::VecDeque;
use std::io::{Read, Write};
use std::net::SocketAddr;
use std::rc::Rc;
use std::time::{Duration, Instant};
use std::time::Instant;
use std::{cmp, io, mem};
use bytes::{Buf, Bytes};
@@ -19,15 +19,16 @@ use http::{StatusCode, Version};
use payload::{Payload, PayloadStatus, PayloadWriter};
use uri::Url;
use super::error::ServerError;
use super::error::{HttpDispatchError, ServerError};
use super::h2writer::H2Writer;
use super::input::PayloadType;
use super::settings::WorkerSettings;
use super::settings::ServiceConfig;
use super::{HttpHandler, HttpHandlerTask, IoStream, Writer};
bitflags! {
struct Flags: u8 {
const DISCONNECTED = 0b0000_0010;
const DISCONNECTED = 0b0000_0001;
const SHUTDOWN = 0b0000_0010;
}
}
@@ -38,12 +39,13 @@ where
H: HttpHandler + 'static,
{
flags: Flags,
settings: Rc<WorkerSettings<H>>,
settings: ServiceConfig<H>,
addr: Option<SocketAddr>,
state: State<IoWrapper<T>>,
tasks: VecDeque<Entry<H>>,
keepalive_timer: Option<Delay>,
extensions: Option<Rc<Extensions>>,
ka_expire: Instant,
ka_timer: Option<Delay>,
}
enum State<T: AsyncRead + AsyncWrite> {
@@ -58,10 +60,20 @@ where
H: HttpHandler + 'static,
{
pub fn new(
settings: Rc<WorkerSettings<H>>, io: T, addr: Option<SocketAddr>, buf: Bytes,
keepalive_timer: Option<Delay>,
settings: ServiceConfig<H>, io: T, buf: Bytes, keepalive_timer: Option<Delay>,
) -> Self {
let addr = io.peer_addr();
let extensions = io.extensions();
// keep-alive timeout
let (ka_expire, ka_timer) = if let Some(delay) = keepalive_timer {
(delay.deadline(), Some(delay))
} else if let Some(delay) = settings.keep_alive_timer() {
(delay.deadline(), Some(delay))
} else {
(settings.now(), None)
};
Http2 {
flags: Flags::empty(),
tasks: VecDeque::new(),
@@ -72,36 +84,31 @@ where
addr,
settings,
extensions,
keepalive_timer,
ka_expire,
ka_timer,
}
}
pub(crate) fn shutdown(&mut self) {
self.state = State::Empty;
self.tasks.clear();
self.keepalive_timer.take();
}
pub fn settings(&self) -> &WorkerSettings<H> {
self.settings.as_ref()
pub fn settings(&self) -> &ServiceConfig<H> {
&self.settings
}
pub fn poll(&mut self) -> Poll<(), ()> {
pub fn poll(&mut self) -> Poll<(), HttpDispatchError> {
self.poll_keepalive()?;
// server
if let State::Connection(ref mut conn) = self.state {
// keep-alive timer
if let Some(ref mut timeout) = self.keepalive_timer {
match timeout.poll() {
Ok(Async::Ready(_)) => {
trace!("Keep-alive timeout, close connection");
return Ok(Async::Ready(()));
}
Ok(Async::NotReady) => (),
Err(_) => unreachable!(),
}
}
loop {
// shutdown connection
if self.flags.contains(Flags::SHUTDOWN) {
return conn.poll_close().map_err(|e| e.into());
}
let mut not_ready = true;
let disconnected = self.flags.contains(Flags::DISCONNECTED);
@@ -216,51 +223,30 @@ where
not_ready = false;
let (parts, body) = req.into_parts();
// stop keepalive timer
self.keepalive_timer.take();
// update keep-alive expire
if self.ka_timer.is_some() {
if let Some(expire) = self.settings.keep_alive_expire() {
self.ka_expire = expire;
}
}
self.tasks.push_back(Entry::new(
parts,
body,
resp,
self.addr,
&self.settings,
self.settings.clone(),
self.extensions.clone(),
));
}
Ok(Async::NotReady) => {
// start keep-alive timer
if self.tasks.is_empty() {
if self.settings.keep_alive_enabled() {
let keep_alive = self.settings.keep_alive();
if keep_alive > 0 && self.keepalive_timer.is_none() {
trace!("Start keep-alive timer");
let mut timeout = Delay::new(
Instant::now()
+ Duration::new(keep_alive, 0),
);
// register timeout
let _ = timeout.poll();
self.keepalive_timer = Some(timeout);
}
} else {
// keep-alive disable, drop connection
return conn.poll_close().map_err(|e| {
error!("Error during connection close: {}", e)
});
}
} else {
// keep-alive unset, rely on operating system
return Ok(Async::NotReady);
}
}
Ok(Async::NotReady) => return Ok(Async::NotReady),
Err(err) => {
trace!("Connection error: {}", err);
self.flags.insert(Flags::DISCONNECTED);
self.flags.insert(Flags::SHUTDOWN);
for entry in &mut self.tasks {
entry.task.disconnected()
}
self.keepalive_timer.take();
continue;
}
}
}
@@ -268,9 +254,7 @@ where
if not_ready {
if self.tasks.is_empty() && self.flags.contains(Flags::DISCONNECTED)
{
return conn
.poll_close()
.map_err(|e| error!("Error during connection close: {}", e));
return conn.poll_close().map_err(|e| e.into());
} else {
return Ok(Async::NotReady);
}
@@ -285,7 +269,7 @@ where
Ok(Async::NotReady) => return Ok(Async::NotReady),
Err(err) => {
trace!("Error handling connection: {}", err);
return Err(());
return Err(err.into());
}
}
} else {
@@ -294,6 +278,37 @@ where
self.poll()
}
/// keep-alive timer. returns `true` is keep-alive, otherwise drop
fn poll_keepalive(&mut self) -> Result<(), HttpDispatchError> {
if let Some(ref mut timer) = self.ka_timer {
match timer.poll() {
Ok(Async::Ready(_)) => {
// if we get timer during shutdown, just drop connection
if self.flags.contains(Flags::SHUTDOWN) {
return Err(HttpDispatchError::ShutdownTimeout);
}
if timer.deadline() >= self.ka_expire {
// check for any outstanding request handling
if self.tasks.is_empty() {
return Err(HttpDispatchError::ShutdownTimeout);
} else if let Some(dl) = self.settings.keep_alive_expire() {
timer.reset(dl)
}
} else {
timer.reset(self.ka_expire)
}
}
Ok(Async::NotReady) => (),
Err(e) => {
error!("Timer error {:?}", e);
return Err(HttpDispatchError::Unknown);
}
}
}
Ok(())
}
}
bitflags! {
@@ -343,7 +358,7 @@ struct Entry<H: HttpHandler + 'static> {
impl<H: HttpHandler + 'static> Entry<H> {
fn new(
parts: Parts, recv: RecvStream, resp: SendResponse<Bytes>,
addr: Option<SocketAddr>, settings: &Rc<WorkerSettings<H>>,
addr: Option<SocketAddr>, settings: ServiceConfig<H>,
extensions: Option<Rc<Extensions>>,
) -> Entry<H>
where
@@ -368,28 +383,20 @@ impl<H: HttpHandler + 'static> Entry<H> {
let psender = PayloadType::new(msg.headers(), psender);
// start request processing
let mut task = None;
for h in settings.handlers().iter() {
msg = match h.handle(msg) {
Ok(t) => {
task = Some(t);
break;
}
Err(msg) => msg,
}
}
let task = match settings.handler().handle(msg) {
Ok(task) => EntryPipe::Task(task),
Err(_) => EntryPipe::Error(ServerError::err(
Version::HTTP_2,
StatusCode::NOT_FOUND,
)),
};
Entry {
task: task.map(EntryPipe::Task).unwrap_or_else(|| {
EntryPipe::Error(ServerError::err(
Version::HTTP_2,
StatusCode::NOT_FOUND,
))
}),
payload: psender,
stream: H2Writer::new(resp, Rc::clone(settings)),
flags: EntryFlags::empty(),
task,
recv,
payload: psender,
stream: H2Writer::new(resp, settings),
flags: EntryFlags::empty(),
}
}

View File

@@ -1,25 +1,27 @@
#![cfg_attr(feature = "cargo-clippy", allow(redundant_field_names))]
#![cfg_attr(
feature = "cargo-clippy",
allow(clippy::redundant_field_names)
)]
use std::{cmp, io};
use bytes::{Bytes, BytesMut};
use futures::{Async, Poll};
use http2::server::SendResponse;
use http2::{Reason, SendStream};
use modhttp::Response;
use std::rc::Rc;
use std::{cmp, io};
use http::{HttpTryFrom, Method, Version};
use super::helpers;
use super::message::Request;
use super::output::{Output, ResponseInfo, ResponseLength};
use super::settings::WorkerSettings;
use super::settings::ServiceConfig;
use super::{Writer, WriterState, MAX_WRITE_BUFFER_SIZE};
use body::{Binary, Body};
use header::ContentEncoding;
use http::header::{
HeaderValue, CONNECTION, CONTENT_ENCODING, CONTENT_LENGTH, DATE, TRANSFER_ENCODING,
};
use http::{HttpTryFrom, Method, Version};
use httpresponse::HttpResponse;
const CHUNK_SIZE: usize = 16_384;
@@ -40,13 +42,11 @@ pub(crate) struct H2Writer<H: 'static> {
written: u64,
buffer: Output,
buffer_capacity: usize,
settings: Rc<WorkerSettings<H>>,
settings: ServiceConfig<H>,
}
impl<H: 'static> H2Writer<H> {
pub fn new(
respond: SendResponse<Bytes>, settings: Rc<WorkerSettings<H>>,
) -> H2Writer<H> {
pub fn new(respond: SendResponse<Bytes>, settings: ServiceConfig<H>) -> H2Writer<H> {
H2Writer {
stream: None,
flags: Flags::empty(),

208
src/server/handler.rs Normal file
View File

@@ -0,0 +1,208 @@
use futures::{Async, Future, Poll};
use super::message::Request;
use super::Writer;
use error::Error;
/// Low level http request handler
#[allow(unused_variables)]
pub trait HttpHandler: 'static {
/// Request handling task
type Task: HttpHandlerTask;
/// Handle request
fn handle(&self, req: Request) -> Result<Self::Task, Request>;
}
impl HttpHandler for Box<HttpHandler<Task = Box<HttpHandlerTask>>> {
type Task = Box<HttpHandlerTask>;
fn handle(&self, req: Request) -> Result<Box<HttpHandlerTask>, Request> {
self.as_ref().handle(req)
}
}
/// Low level http request handler
pub trait HttpHandlerTask {
/// Poll task, this method is used before or after *io* object is available
fn poll_completed(&mut self) -> Poll<(), Error> {
Ok(Async::Ready(()))
}
/// Poll task when *io* object is available
fn poll_io(&mut self, io: &mut Writer) -> Poll<bool, Error>;
/// Connection is disconnected
fn disconnected(&mut self) {}
}
impl HttpHandlerTask for Box<HttpHandlerTask> {
fn poll_io(&mut self, io: &mut Writer) -> Poll<bool, Error> {
self.as_mut().poll_io(io)
}
}
pub(super) struct HttpHandlerTaskFut<T: HttpHandlerTask> {
task: T,
}
impl<T: HttpHandlerTask> HttpHandlerTaskFut<T> {
pub(crate) fn new(task: T) -> Self {
Self { task }
}
}
impl<T: HttpHandlerTask> Future for HttpHandlerTaskFut<T> {
type Item = ();
type Error = ();
fn poll(&mut self) -> Poll<(), ()> {
self.task.poll_completed().map_err(|_| ())
}
}
/// Conversion helper trait
pub trait IntoHttpHandler {
/// The associated type which is result of conversion.
type Handler: HttpHandler;
/// Convert into `HttpHandler` object.
fn into_handler(self) -> Self::Handler;
}
impl<T: HttpHandler> IntoHttpHandler for T {
type Handler = T;
fn into_handler(self) -> Self::Handler {
self
}
}
impl<T: IntoHttpHandler> IntoHttpHandler for Vec<T> {
type Handler = VecHttpHandler<T::Handler>;
fn into_handler(self) -> Self::Handler {
VecHttpHandler(self.into_iter().map(|item| item.into_handler()).collect())
}
}
#[doc(hidden)]
pub struct VecHttpHandler<H: HttpHandler>(Vec<H>);
impl<H: HttpHandler> HttpHandler for VecHttpHandler<H> {
type Task = H::Task;
fn handle(&self, mut req: Request) -> Result<Self::Task, Request> {
for h in &self.0 {
req = match h.handle(req) {
Ok(task) => return Ok(task),
Err(e) => e,
};
}
Err(req)
}
}
macro_rules! http_handler ({$EN:ident, $(($n:tt, $T:ident)),+} => {
impl<$($T: HttpHandler,)+> HttpHandler for ($($T,)+) {
type Task = $EN<$($T,)+>;
fn handle(&self, mut req: Request) -> Result<Self::Task, Request> {
$(
req = match self.$n.handle(req) {
Ok(task) => return Ok($EN::$T(task)),
Err(e) => e,
};
)+
Err(req)
}
}
#[doc(hidden)]
pub enum $EN<$($T: HttpHandler,)+> {
$($T ($T::Task),)+
}
impl<$($T: HttpHandler,)+> HttpHandlerTask for $EN<$($T,)+>
{
fn poll_completed(&mut self) -> Poll<(), Error> {
match self {
$($EN :: $T(ref mut task) => task.poll_completed(),)+
}
}
fn poll_io(&mut self, io: &mut Writer) -> Poll<bool, Error> {
match self {
$($EN::$T(ref mut task) => task.poll_io(io),)+
}
}
/// Connection is disconnected
fn disconnected(&mut self) {
match self {
$($EN::$T(ref mut task) => task.disconnected(),)+
}
}
}
});
http_handler!(HttpHandlerTask1, (0, A));
http_handler!(HttpHandlerTask2, (0, A), (1, B));
http_handler!(HttpHandlerTask3, (0, A), (1, B), (2, C));
http_handler!(HttpHandlerTask4, (0, A), (1, B), (2, C), (3, D));
http_handler!(HttpHandlerTask5, (0, A), (1, B), (2, C), (3, D), (4, E));
http_handler!(
HttpHandlerTask6,
(0, A),
(1, B),
(2, C),
(3, D),
(4, E),
(5, F)
);
http_handler!(
HttpHandlerTask7,
(0, A),
(1, B),
(2, C),
(3, D),
(4, E),
(5, F),
(6, G)
);
http_handler!(
HttpHandlerTask8,
(0, A),
(1, B),
(2, C),
(3, D),
(4, E),
(5, F),
(6, G),
(7, H)
);
http_handler!(
HttpHandlerTask9,
(0, A),
(1, B),
(2, C),
(3, D),
(4, E),
(5, F),
(6, G),
(7, H),
(8, I)
);
http_handler!(
HttpHandlerTask10,
(0, A),
(1, B),
(2, C),
(3, D),
(4, E),
(5, F),
(6, G),
(7, H),
(8, I),
(9, J)
);

View File

@@ -29,20 +29,24 @@ pub(crate) fn write_status_line(version: Version, mut n: u16, bytes: &mut BytesM
let lut_ptr = DEC_DIGITS_LUT.as_ptr();
let four = n > 999;
// decode 2 more chars, if > 2 chars
let d1 = (n % 100) << 1;
n /= 100;
curr -= 2;
unsafe {
// decode 2 more chars, if > 2 chars
let d1 = (n % 100) << 1;
n /= 100;
curr -= 2;
ptr::copy_nonoverlapping(lut_ptr.offset(d1 as isize), buf_ptr.offset(curr), 2);
}
// decode last 1 or 2 chars
if n < 10 {
curr -= 1;
// decode last 1 or 2 chars
if n < 10 {
curr -= 1;
unsafe {
*buf_ptr.offset(curr) = (n as u8) + b'0';
} else {
let d1 = n << 1;
curr -= 2;
}
} else {
let d1 = n << 1;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(
lut_ptr.offset(d1 as isize),
buf_ptr.offset(curr),
@@ -74,7 +78,7 @@ pub fn write_content_length(mut n: usize, bytes: &mut BytesMut) {
let d1 = n << 1;
unsafe {
ptr::copy_nonoverlapping(
DEC_DIGITS_LUT.as_ptr().offset(d1 as isize),
DEC_DIGITS_LUT.as_ptr().add(d1),
buf.as_mut_ptr().offset(18),
2,
);
@@ -90,7 +94,7 @@ pub fn write_content_length(mut n: usize, bytes: &mut BytesMut) {
n /= 100;
unsafe {
ptr::copy_nonoverlapping(
DEC_DIGITS_LUT.as_ptr().offset(d1 as isize),
DEC_DIGITS_LUT.as_ptr().add(d1),
buf.as_mut_ptr().offset(19),
2,
)
@@ -107,47 +111,55 @@ pub fn write_content_length(mut n: usize, bytes: &mut BytesMut) {
}
pub(crate) fn convert_usize(mut n: usize, bytes: &mut BytesMut) {
unsafe {
let mut curr: isize = 39;
let mut buf: [u8; 41] = mem::uninitialized();
buf[39] = b'\r';
buf[40] = b'\n';
let buf_ptr = buf.as_mut_ptr();
let lut_ptr = DEC_DIGITS_LUT.as_ptr();
let mut curr: isize = 39;
let mut buf: [u8; 41] = unsafe { mem::uninitialized() };
buf[39] = b'\r';
buf[40] = b'\n';
let buf_ptr = buf.as_mut_ptr();
let lut_ptr = DEC_DIGITS_LUT.as_ptr();
// eagerly decode 4 characters at a time
while n >= 10_000 {
let rem = (n % 10_000) as isize;
n /= 10_000;
// eagerly decode 4 characters at a time
while n >= 10_000 {
let rem = (n % 10_000) as isize;
n /= 10_000;
let d1 = (rem / 100) << 1;
let d2 = (rem % 100) << 1;
curr -= 4;
let d1 = (rem / 100) << 1;
let d2 = (rem % 100) << 1;
curr -= 4;
unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
ptr::copy_nonoverlapping(lut_ptr.offset(d2), buf_ptr.offset(curr + 2), 2);
}
}
// if we reach here numbers are <= 9999, so at most 4 chars long
let mut n = n as isize; // possibly reduce 64bit math
// if we reach here numbers are <= 9999, so at most 4 chars long
let mut n = n as isize; // possibly reduce 64bit math
// decode 2 more chars, if > 2 chars
if n >= 100 {
let d1 = (n % 100) << 1;
n /= 100;
curr -= 2;
// decode 2 more chars, if > 2 chars
if n >= 100 {
let d1 = (n % 100) << 1;
n /= 100;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
}
}
// decode last 1 or 2 chars
if n < 10 {
curr -= 1;
// decode last 1 or 2 chars
if n < 10 {
curr -= 1;
unsafe {
*buf_ptr.offset(curr) = (n as u8) + b'0';
} else {
let d1 = n << 1;
curr -= 2;
}
} else {
let d1 = n << 1;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
}
}
unsafe {
bytes.extend_from_slice(slice::from_raw_parts(
buf_ptr.offset(curr),
41 - curr as usize,

View File

@@ -1,46 +1,48 @@
use std::marker::PhantomData;
use std::rc::Rc;
use std::sync::Arc;
use std::{io, mem, net, time};
use std::{fmt, io, mem, net};
use actix::{Actor, Addr, Arbiter, AsyncContext, Context, Handler, System};
use actix::{Addr, System};
use actix_net::server::Server;
use actix_net::service::NewService;
use actix_net::ssl;
use futures::{Future, Stream};
use net2::{TcpBuilder, TcpStreamExt};
use net2::TcpBuilder;
use num_cpus;
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_tcp::TcpStream;
#[cfg(feature = "tls")]
use native_tls::TlsAcceptor;
#[cfg(feature = "alpn")]
#[cfg(any(feature = "alpn", feature = "ssl"))]
use openssl::ssl::SslAcceptorBuilder;
#[cfg(feature = "rust-tls")]
use rustls::ServerConfig;
use super::channel::{HttpChannel, WrapperStream};
use super::server::{Connections, Server, Service, ServiceHandler};
use super::settings::{ServerSettings, WorkerSettings};
use super::worker::{Conn, Socket};
use super::{
AcceptorService, HttpHandler, IntoAsyncIo, IntoHttpHandler, IoStream, KeepAlive,
Token,
};
use super::acceptor::{AcceptorServiceFactory, DefaultAcceptor};
use super::builder::{HttpServiceBuilder, ServiceProvider};
use super::{IntoHttpHandler, KeepAlive};
struct Socket {
scheme: &'static str,
lst: net::TcpListener,
addr: net::SocketAddr,
handler: Box<ServiceProvider>,
}
/// An HTTP Server
///
/// By default it serves HTTP2 when HTTPs is enabled,
/// in order to change it, use `ServerFlags` that can be provided
/// to acceptor service.
pub struct HttpServer<H>
pub struct HttpServer<H, F>
where
H: IntoHttpHandler + 'static,
F: Fn() -> H + Send + Clone,
{
factory: Arc<Fn() -> Vec<H> + Send + Sync>,
host: Option<String>,
keep_alive: KeepAlive,
pub(super) factory: F,
pub(super) host: Option<String>,
pub(super) keep_alive: KeepAlive,
pub(super) client_timeout: u64,
pub(super) client_shutdown: u64,
backlog: i32,
threads: usize,
exit: bool,
@@ -50,24 +52,18 @@ where
maxconn: usize,
maxconnrate: usize,
sockets: Vec<Socket>,
handlers: Vec<Box<IoStreamHandler<H::Handler, net::TcpStream>>>,
}
impl<H> HttpServer<H>
impl<H, F> HttpServer<H, F>
where
H: IntoHttpHandler + 'static,
F: Fn() -> H + Send + Clone + 'static,
{
/// Create new http server with application factory
pub fn new<F, U>(factory: F) -> Self
where
F: Fn() -> U + Sync + Send + 'static,
U: IntoIterator<Item = H> + 'static,
{
let f = move || (factory)().into_iter().collect();
pub fn new(factory: F) -> HttpServer<H, F> {
HttpServer {
factory,
threads: num_cpus::get(),
factory: Arc::new(f),
host: None,
backlog: 2048,
keep_alive: KeepAlive::Timeout(5),
@@ -75,11 +71,11 @@ where
exit: false,
no_http2: false,
no_signals: false,
maxconn: 102_400,
maxconn: 25_600,
maxconnrate: 256,
// settings: None,
client_timeout: 5000,
client_shutdown: 5000,
sockets: Vec::new(),
handlers: Vec::new(),
}
}
@@ -112,7 +108,7 @@ where
/// All socket listeners will stop accepting connections when this limit is reached
/// for each worker.
///
/// By default max connections is set to a 100k.
/// By default max connections is set to a 25k.
pub fn maxconn(mut self, num: usize) -> Self {
self.maxconn = num;
self
@@ -137,6 +133,33 @@ where
self
}
/// Set server client timeout in milliseconds for first request.
///
/// Defines a timeout for reading client request header. If a client does not transmit
/// the entire set headers within this time, the request is terminated with
/// the 408 (Request Time-out) error.
///
/// To disable timeout set value to 0.
///
/// By default client timeout is set to 5000 milliseconds.
pub fn client_timeout(mut self, val: u64) -> Self {
self.client_timeout = val;
self
}
/// Set server connection shutdown timeout in milliseconds.
///
/// Defines a timeout for shutdown connection. If a shutdown procedure does not complete
/// within this time, the request is dropped.
///
/// To disable timeout set value to 0.
///
/// By default client timeout is set to 5000 milliseconds.
pub fn client_shutdown(mut self, val: u64) -> Self {
self.client_shutdown = val;
self
}
/// Set server host name.
///
/// Host name is used by application router aa a hostname for url
@@ -174,11 +197,6 @@ where
}
/// Disable `HTTP/2` support
// #[doc(hidden)]
// #[deprecated(
// since = "0.7.4",
// note = "please use acceptor service with proper ServerFlags parama"
// )]
pub fn no_http2(mut self) -> Self {
self.no_http2 = true;
self
@@ -196,10 +214,7 @@ where
/// and the user should be presented with an enumeration of which
/// socket requires which protocol.
pub fn addrs_with_scheme(&self) -> Vec<(net::SocketAddr, &str)> {
self.handlers
.iter()
.map(|s| (s.addr(), s.scheme()))
.collect()
self.sockets.iter().map(|s| (s.addr, s.scheme)).collect()
}
/// Use listener for accepting incoming connection requests
@@ -207,11 +222,16 @@ where
/// HttpServer does not change any configuration for TcpListener,
/// it needs to be configured before passing it to listen() method.
pub fn listen(mut self, lst: net::TcpListener) -> Self {
let token = Token(self.handlers.len());
let addr = lst.local_addr().unwrap();
self.handlers
.push(Box::new(SimpleHandler::new(lst.local_addr().unwrap())));
self.sockets.push(Socket { lst, addr, token });
self.sockets.push(Socket {
lst,
addr,
scheme: "http",
handler: Box::new(HttpServiceBuilder::new(
self.factory.clone(),
DefaultAcceptor,
)),
});
self
}
@@ -220,15 +240,16 @@ where
/// Use listener for accepting incoming connection requests
pub fn listen_with<A>(mut self, lst: net::TcpListener, acceptor: A) -> Self
where
A: AcceptorService<TcpStream> + Send + 'static,
A: AcceptorServiceFactory,
<A::NewService as NewService>::InitError: fmt::Debug,
{
let token = Token(self.handlers.len());
let addr = lst.local_addr().unwrap();
self.handlers.push(Box::new(StreamHandler::new(
lst.local_addr().unwrap(),
acceptor,
)));
self.sockets.push(Socket { lst, addr, token });
self.sockets.push(Socket {
lst,
addr,
scheme: "https",
handler: Box::new(HttpServiceBuilder::new(self.factory.clone(), acceptor)),
});
self
}
@@ -239,36 +260,42 @@ where
/// HttpServer does not change any configuration for TcpListener,
/// it needs to be configured before passing it to listen() method.
pub fn listen_tls(self, lst: net::TcpListener, acceptor: TlsAcceptor) -> Self {
use super::NativeTlsAcceptor;
use actix_net::service::NewServiceExt;
self.listen_with(lst, NativeTlsAcceptor::new(acceptor))
self.listen_with(lst, move || {
ssl::NativeTlsAcceptor::new(acceptor.clone()).map_err(|_| ())
})
}
#[cfg(feature = "alpn")]
#[cfg(any(feature = "alpn", feature = "ssl"))]
/// Use listener for accepting incoming tls connection requests
///
/// This method sets alpn protocols to "h2" and "http/1.1"
pub fn listen_ssl(
self, lst: net::TcpListener, builder: SslAcceptorBuilder,
) -> io::Result<Self> {
use super::{OpensslAcceptor, ServerFlags};
use super::{openssl_acceptor_with_flags, ServerFlags};
use actix_net::service::NewServiceExt;
// alpn support
let flags = if self.no_http2 {
ServerFlags::HTTP1
} else {
ServerFlags::HTTP1 | ServerFlags::HTTP2
};
Ok(self.listen_with(lst, OpensslAcceptor::with_flags(builder, flags)?))
let acceptor = openssl_acceptor_with_flags(builder, flags)?;
Ok(self.listen_with(lst, move || {
ssl::OpensslAcceptor::new(acceptor.clone()).map_err(|_| ())
}))
}
#[cfg(feature = "rust-tls")]
/// Use listener for accepting incoming tls connection requests
///
/// This method sets alpn protocols to "h2" and "http/1.1"
pub fn listen_rustls(self, lst: net::TcpListener, builder: ServerConfig) -> Self {
pub fn listen_rustls(self, lst: net::TcpListener, config: ServerConfig) -> Self {
use super::{RustlsAcceptor, ServerFlags};
use actix_net::service::NewServiceExt;
// alpn support
let flags = if self.no_http2 {
@@ -277,7 +304,9 @@ where
ServerFlags::HTTP1 | ServerFlags::HTTP2
};
self.listen_with(lst, RustlsAcceptor::with_flags(builder, flags))
self.listen_with(lst, move || {
RustlsAcceptor::with_flags(config.clone(), flags).map_err(|_| ())
})
}
/// The socket address to bind
@@ -287,11 +316,7 @@ where
let sockets = self.bind2(addr)?;
for lst in sockets {
let token = Token(self.handlers.len());
let addr = lst.local_addr().unwrap();
self.handlers
.push(Box::new(SimpleHandler::new(lst.local_addr().unwrap())));
self.sockets.push(Socket { lst, addr, token })
self = self.listen(lst);
}
Ok(self)
@@ -299,22 +324,29 @@ where
/// Start listening for incoming connections with supplied acceptor.
#[doc(hidden)]
#[cfg_attr(feature = "cargo-clippy", allow(needless_pass_by_value))]
#[cfg_attr(
feature = "cargo-clippy",
allow(clippy::needless_pass_by_value)
)]
pub fn bind_with<S, A>(mut self, addr: S, acceptor: A) -> io::Result<Self>
where
S: net::ToSocketAddrs,
A: AcceptorService<TcpStream> + Send + 'static,
A: AcceptorServiceFactory,
<A::NewService as NewService>::InitError: fmt::Debug,
{
let sockets = self.bind2(addr)?;
for lst in sockets {
let token = Token(self.handlers.len());
let addr = lst.local_addr().unwrap();
self.handlers.push(Box::new(StreamHandler::new(
lst.local_addr().unwrap(),
acceptor.clone(),
)));
self.sockets.push(Socket { lst, addr, token })
self.sockets.push(Socket {
lst,
addr,
scheme: "https",
handler: Box::new(HttpServiceBuilder::new(
self.factory.clone(),
acceptor.clone(),
)),
});
}
Ok(self)
@@ -357,12 +389,15 @@ where
pub fn bind_tls<S: net::ToSocketAddrs>(
self, addr: S, acceptor: TlsAcceptor,
) -> io::Result<Self> {
use super::NativeTlsAcceptor;
use actix_net::service::NewServiceExt;
use actix_net::ssl::NativeTlsAcceptor;
self.bind_with(addr, NativeTlsAcceptor::new(acceptor))
self.bind_with(addr, move || {
NativeTlsAcceptor::new(acceptor.clone()).map_err(|_| ())
})
}
#[cfg(feature = "alpn")]
#[cfg(any(feature = "alpn", feature = "ssl"))]
/// Start listening for incoming tls connections.
///
/// This method sets alpn protocols to "h2" and "http/1.1"
@@ -370,16 +405,20 @@ where
where
S: net::ToSocketAddrs,
{
use super::{OpensslAcceptor, ServerFlags};
use super::{openssl_acceptor_with_flags, ServerFlags};
use actix_net::service::NewServiceExt;
// alpn support
let flags = if !self.no_http2 {
let flags = if self.no_http2 {
ServerFlags::HTTP1
} else {
ServerFlags::HTTP1 | ServerFlags::HTTP2
};
self.bind_with(addr, OpensslAcceptor::with_flags(builder, flags)?)
let acceptor = openssl_acceptor_with_flags(builder, flags)?;
self.bind_with(addr, move || {
ssl::OpensslAcceptor::new(acceptor.clone()).map_err(|_| ())
})
}
#[cfg(feature = "rust-tls")]
@@ -390,76 +429,22 @@ where
self, addr: S, builder: ServerConfig,
) -> io::Result<Self> {
use super::{RustlsAcceptor, ServerFlags};
use actix_net::service::NewServiceExt;
// alpn support
let flags = if !self.no_http2 {
let flags = if self.no_http2 {
ServerFlags::HTTP1
} else {
ServerFlags::HTTP1 | ServerFlags::HTTP2
};
self.bind_with(addr, RustlsAcceptor::with_flags(builder, flags))
}
}
impl<H: IntoHttpHandler> Into<(Box<Service>, Vec<(Token, net::TcpListener)>)>
for HttpServer<H>
{
fn into(mut self) -> (Box<Service>, Vec<(Token, net::TcpListener)>) {
let sockets: Vec<_> = mem::replace(&mut self.sockets, Vec::new())
.into_iter()
.map(|item| (item.token, item.lst))
.collect();
(
Box::new(HttpService {
factory: self.factory,
host: self.host,
keep_alive: self.keep_alive,
handlers: self.handlers,
}),
sockets,
)
}
}
struct HttpService<H: IntoHttpHandler> {
factory: Arc<Fn() -> Vec<H> + Send + Sync>,
host: Option<String>,
keep_alive: KeepAlive,
handlers: Vec<Box<IoStreamHandler<H::Handler, net::TcpStream>>>,
}
impl<H: IntoHttpHandler + 'static> Service for HttpService<H> {
fn clone(&self) -> Box<Service> {
Box::new(HttpService {
factory: self.factory.clone(),
host: self.host.clone(),
keep_alive: self.keep_alive,
handlers: self.handlers.iter().map(|v| v.clone()).collect(),
self.bind_with(addr, move || {
RustlsAcceptor::with_flags(builder.clone(), flags).map_err(|_| ())
})
}
fn create(&self, conns: Connections) -> Box<ServiceHandler> {
let addr = self.handlers[0].addr();
let s = ServerSettings::new(Some(addr), &self.host, false);
let apps: Vec<_> = (*self.factory)()
.into_iter()
.map(|h| h.into_handler())
.collect();
let handlers = self.handlers.iter().map(|h| h.clone()).collect();
Box::new(HttpServiceHandler::new(
apps,
handlers,
self.keep_alive,
s,
conns,
))
}
}
impl<H: IntoHttpHandler> HttpServer<H> {
impl<H: IntoHttpHandler, F: Fn() -> H + Send + Clone> HttpServer<H, F> {
/// Start listening for incoming connections.
///
/// This method starts number of http workers in separate threads.
@@ -485,11 +470,12 @@ impl<H: IntoHttpHandler> HttpServer<H> {
/// sys.run(); // <- Run actix system, this method starts all async processes
/// }
/// ```
pub fn start(self) -> Addr<Server> {
pub fn start(mut self) -> Addr<Server> {
ssl::max_concurrent_ssl_connect(self.maxconnrate);
let mut srv = Server::new()
.workers(self.threads)
.maxconn(self.maxconn)
.maxconnrate(self.maxconnrate)
.shutdown_timeout(self.shutdown_timeout);
srv = if self.exit { srv.system_exit() } else { srv };
@@ -499,7 +485,31 @@ impl<H: IntoHttpHandler> HttpServer<H> {
srv
};
srv.service(self).start()
let sockets = mem::replace(&mut self.sockets, Vec::new());
for socket in sockets {
let host = self
.host
.as_ref()
.map(|h| h.to_owned())
.unwrap_or_else(|| format!("{}", socket.addr));
let (secure, client_shutdown) = if socket.scheme == "https" {
(true, self.client_shutdown)
} else {
(false, 0)
};
srv = socket.handler.register(
srv,
socket.lst,
host,
socket.addr,
self.keep_alive,
secure,
self.client_timeout,
client_shutdown,
);
}
srv.start()
}
/// Spawn new thread and start listening for incoming connections.
@@ -527,279 +537,35 @@ impl<H: IntoHttpHandler> HttpServer<H> {
self.start();
sys.run();
}
}
impl<H: IntoHttpHandler> HttpServer<H> {
/// Start listening for incoming connections from a stream.
///
/// This method uses only one thread for handling incoming connections.
pub fn start_incoming<T, S>(self, stream: S, secure: bool)
where
S: Stream<Item = T, Error = io::Error> + Send + 'static,
T: AsyncRead + AsyncWrite + Send + 'static,
{
// set server settings
let addr: net::SocketAddr = "127.0.0.1:8080".parse().unwrap();
let srv_settings = ServerSettings::new(Some(addr), &self.host, secure);
let apps: Vec<_> = (*self.factory)()
.into_iter()
.map(|h| h.into_handler())
.collect();
let settings = WorkerSettings::create(
apps,
self.keep_alive,
srv_settings,
Connections::default(),
);
// start server
HttpIncoming::create(move |ctx| {
ctx.add_message_stream(stream.map_err(|_| ()).map(move |t| Conn {
io: WrapperStream::new(t),
handler: Token::new(0),
token: Token::new(0),
peer: None,
}));
HttpIncoming { settings }
});
}
}
struct HttpIncoming<H: HttpHandler> {
settings: Rc<WorkerSettings<H>>,
}
impl<H> Actor for HttpIncoming<H>
where
H: HttpHandler,
{
type Context = Context<Self>;
}
impl<T, H> Handler<Conn<T>> for HttpIncoming<H>
where
T: IoStream,
H: HttpHandler,
{
type Result = ();
fn handle(&mut self, msg: Conn<T>, _: &mut Context<Self>) -> Self::Result {
Arbiter::spawn(HttpChannel::new(
Rc::clone(&self.settings),
msg.io,
msg.peer,
));
}
}
struct HttpServiceHandler<H>
where
H: HttpHandler + 'static,
{
settings: Rc<WorkerSettings<H>>,
handlers: Vec<Box<IoStreamHandler<H, net::TcpStream>>>,
tcp_ka: Option<time::Duration>,
}
impl<H: HttpHandler + 'static> HttpServiceHandler<H> {
fn new(
apps: Vec<H>, handlers: Vec<Box<IoStreamHandler<H, net::TcpStream>>>,
keep_alive: KeepAlive, settings: ServerSettings, conns: Connections,
) -> HttpServiceHandler<H> {
let tcp_ka = if let KeepAlive::Tcp(val) = keep_alive {
Some(time::Duration::new(val as u64, 0))
} else {
None
};
let settings = WorkerSettings::create(apps, keep_alive, settings, conns);
HttpServiceHandler {
handlers,
tcp_ka,
settings,
/// Register current http server as actix-net's server service
pub fn register(self, mut srv: Server) -> Server {
for socket in self.sockets {
let host = self
.host
.as_ref()
.map(|h| h.to_owned())
.unwrap_or_else(|| format!("{}", socket.addr));
let (secure, client_shutdown) = if socket.scheme == "https" {
(true, self.client_shutdown)
} else {
(false, 0)
};
srv = socket.handler.register(
srv,
socket.lst,
host,
socket.addr,
self.keep_alive,
secure,
self.client_timeout,
client_shutdown,
);
}
srv
}
}
impl<H> ServiceHandler for HttpServiceHandler<H>
where
H: HttpHandler + 'static,
{
fn handle(
&mut self, token: Token, io: net::TcpStream, peer: Option<net::SocketAddr>,
) {
if self.tcp_ka.is_some() && io.set_keepalive(self.tcp_ka).is_err() {
error!("Can not set socket keep-alive option");
}
self.handlers[token.0].handle(Rc::clone(&self.settings), io, peer);
}
fn shutdown(&self, force: bool) {
if force {
self.settings
.head()
.traverse(|ch: &mut HttpChannel<TcpStream, H>| ch.shutdown());
}
}
}
struct SimpleHandler<Io> {
addr: net::SocketAddr,
io: PhantomData<Io>,
}
impl<Io: IntoAsyncIo> Clone for SimpleHandler<Io> {
fn clone(&self) -> Self {
SimpleHandler {
addr: self.addr,
io: PhantomData,
}
}
}
impl<Io: IntoAsyncIo> SimpleHandler<Io> {
fn new(addr: net::SocketAddr) -> Self {
SimpleHandler {
addr,
io: PhantomData,
}
}
}
impl<H, Io> IoStreamHandler<H, Io> for SimpleHandler<Io>
where
H: HttpHandler,
Io: IntoAsyncIo + Send + 'static,
Io::Io: IoStream,
{
fn addr(&self) -> net::SocketAddr {
self.addr
}
fn clone(&self) -> Box<IoStreamHandler<H, Io>> {
Box::new(Clone::clone(self))
}
fn scheme(&self) -> &'static str {
"http"
}
fn handle(&self, h: Rc<WorkerSettings<H>>, io: Io, peer: Option<net::SocketAddr>) {
let mut io = match io.into_async_io() {
Ok(io) => io,
Err(err) => {
trace!("Failed to create async io: {}", err);
return;
}
};
let _ = io.set_nodelay(true);
Arbiter::spawn(HttpChannel::new(h, io, peer));
}
}
struct StreamHandler<A, Io> {
acceptor: A,
addr: net::SocketAddr,
io: PhantomData<Io>,
}
impl<Io: IntoAsyncIo, A: AcceptorService<Io::Io>> StreamHandler<A, Io> {
fn new(addr: net::SocketAddr, acceptor: A) -> Self {
StreamHandler {
addr,
acceptor,
io: PhantomData,
}
}
}
impl<Io: IntoAsyncIo, A: AcceptorService<Io::Io>> Clone for StreamHandler<A, Io> {
fn clone(&self) -> Self {
StreamHandler {
addr: self.addr,
acceptor: self.acceptor.clone(),
io: PhantomData,
}
}
}
impl<H, Io, A> IoStreamHandler<H, Io> for StreamHandler<A, Io>
where
H: HttpHandler,
Io: IntoAsyncIo + Send + 'static,
Io::Io: IoStream,
A: AcceptorService<Io::Io> + Send + 'static,
{
fn addr(&self) -> net::SocketAddr {
self.addr
}
fn clone(&self) -> Box<IoStreamHandler<H, Io>> {
Box::new(Clone::clone(self))
}
fn scheme(&self) -> &'static str {
self.acceptor.scheme()
}
fn handle(&self, h: Rc<WorkerSettings<H>>, io: Io, peer: Option<net::SocketAddr>) {
let mut io = match io.into_async_io() {
Ok(io) => io,
Err(err) => {
trace!("Failed to create async io: {}", err);
return;
}
};
let _ = io.set_nodelay(true);
let rate = h.connection_rate();
Arbiter::spawn(self.acceptor.accept(io).then(move |res| {
drop(rate);
match res {
Ok(io) => Arbiter::spawn(HttpChannel::new(h, io, peer)),
Err(err) => trace!("Can not establish connection: {}", err),
}
Ok(())
}))
}
}
impl<H, Io: 'static> IoStreamHandler<H, Io> for Box<IoStreamHandler<H, Io>>
where
H: HttpHandler,
Io: IntoAsyncIo,
{
fn addr(&self) -> net::SocketAddr {
self.as_ref().addr()
}
fn clone(&self) -> Box<IoStreamHandler<H, Io>> {
self.as_ref().clone()
}
fn scheme(&self) -> &'static str {
self.as_ref().scheme()
}
fn handle(&self, h: Rc<WorkerSettings<H>>, io: Io, peer: Option<net::SocketAddr>) {
self.as_ref().handle(h, io, peer)
}
}
trait IoStreamHandler<H, Io>: Send
where
H: HttpHandler,
{
fn clone(&self) -> Box<IoStreamHandler<H, Io>>;
fn addr(&self) -> net::SocketAddr;
fn scheme(&self) -> &'static str;
fn handle(&self, h: Rc<WorkerSettings<H>>, io: Io, peer: Option<net::SocketAddr>);
}
fn create_tcp_listener(
addr: net::SocketAddr, backlog: i32,
) -> io::Result<net::TcpListener> {

69
src/server/incoming.rs Normal file
View File

@@ -0,0 +1,69 @@
//! Support for `Stream<Item=T::AsyncReady+AsyncWrite>`, deprecated!
use std::{io, net};
use actix::{Actor, Arbiter, AsyncContext, Context, Handler, Message};
use futures::{Future, Stream};
use tokio_io::{AsyncRead, AsyncWrite};
use super::channel::{HttpChannel, WrapperStream};
use super::handler::{HttpHandler, IntoHttpHandler};
use super::http::HttpServer;
use super::settings::{ServerSettings, ServiceConfig};
impl<T: AsyncRead + AsyncWrite + 'static> Message for WrapperStream<T> {
type Result = ();
}
impl<H, F> HttpServer<H, F>
where
H: IntoHttpHandler,
F: Fn() -> H + Send + Clone,
{
#[doc(hidden)]
#[deprecated(since = "0.7.8")]
/// Start listening for incoming connections from a stream.
///
/// This method uses only one thread for handling incoming connections.
pub fn start_incoming<T, S>(self, stream: S, secure: bool)
where
S: Stream<Item = T, Error = io::Error> + 'static,
T: AsyncRead + AsyncWrite + 'static,
{
// set server settings
let addr: net::SocketAddr = "127.0.0.1:8080".parse().unwrap();
let apps = (self.factory)().into_handler();
let settings = ServiceConfig::new(
apps,
self.keep_alive,
self.client_timeout,
self.client_shutdown,
ServerSettings::new(addr, "127.0.0.1:8080", secure),
);
// start server
HttpIncoming::create(move |ctx| {
ctx.add_message_stream(stream.map_err(|_| ()).map(WrapperStream::new));
HttpIncoming { settings }
});
}
}
struct HttpIncoming<H: HttpHandler> {
settings: ServiceConfig<H>,
}
impl<H: HttpHandler> Actor for HttpIncoming<H> {
type Context = Context<Self>;
}
impl<T, H> Handler<WrapperStream<T>> for HttpIncoming<H>
where
T: AsyncRead + AsyncWrite,
H: HttpHandler,
{
type Result = ();
fn handle(&mut self, msg: WrapperStream<T>, _: &mut Context<Self>) -> Self::Result {
Arbiter::spawn(HttpChannel::new(self.settings.clone(), msg).map_err(|_| ()));
}
}

View File

@@ -1,5 +1,6 @@
use std::cell::{Cell, Ref, RefCell, RefMut};
use std::collections::VecDeque;
use std::fmt;
use std::net::SocketAddr;
use std::rc::Rc;
@@ -220,6 +221,26 @@ impl Request {
}
}
impl fmt::Debug for Request {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(
f,
"\nRequest {:?} {}:{}",
self.version(),
self.method(),
self.path()
)?;
if let Some(q) = self.uri().query().as_ref() {
writeln!(f, " query: ?{:?}", q)?;
}
writeln!(f, " headers:")?;
for (key, val) in self.headers().iter() {
writeln!(f, " {:?}: {:?}", key, val)?;
}
Ok(())
}
}
pub(crate) struct RequestPool(
RefCell<VecDeque<Rc<InnerRequest>>>,
RefCell<ServerSettings>,

View File

@@ -12,7 +12,7 @@
//! to serve incoming HTTP requests.
//!
//! As the server uses worker pool, the factory function is restricted to trait bounds
//! `Sync + Send + 'static` so that each worker would be able to accept Application
//! `Send + Clone + 'static` so that each worker would be able to accept Application
//! without a need for synchronization.
//!
//! If you wish to share part of state among all workers you should
@@ -29,13 +29,9 @@
//! Each TLS implementation is provided with [AcceptorService](trait.AcceptorService.html)
//! that describes how HTTP Server accepts connections.
//!
//! For `bind` and `listen` there are corresponding `bind_with` and `listen_with` that accepts
//! For `bind` and `listen` there are corresponding `bind_ssl|tls|rustls` and `listen_ssl|tls|rustls` that accepts
//! these services.
//!
//! By default, acceptor would work with both HTTP2 and HTTP1 protocols.
//! But it can be controlled using [ServerFlags](struct.ServerFlags.html) which
//! can be supplied when creating `AcceptorService`.
//!
//! **NOTE:** `native-tls` doesn't support `HTTP2` yet
//!
//! ## Signal handling and shutdown
@@ -87,17 +83,13 @@
//! // load ssl keys
//! let config = load_ssl();
//!
//! // Create acceptor service for only HTTP1 protocol
//! // You can use ::new(config) to leave defaults
//! let acceptor = server::RustlsAcceptor::with_flags(config, actix_web::server::ServerFlags::HTTP1);
//!
//! // create and start server at once
//! server::new(|| {
//! App::new()
//! // register simple handler, handle all methods
//! .resource("/index.html", |r| r.f(index))
//! }))
//! }).bind_with("127.0.0.1:8080", acceptor)
//! }).bind_rustls("127.0.0.1:8443", config)
//! .unwrap()
//! .start();
//!
@@ -106,17 +98,19 @@
//! let _ = sys.run();
//!}
//! ```
use std::net::Shutdown;
use std::net::{Shutdown, SocketAddr};
use std::rc::Rc;
use std::{io, net, time};
use std::{io, time};
use bytes::{BufMut, BytesMut};
use futures::{Async, Future, Poll};
use futures::{Async, Poll};
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_reactor::Handle;
use tokio_tcp::TcpStream;
pub(crate) mod accept;
pub use actix_net::server::{PauseServer, ResumeServer, StopServer};
pub(crate) mod acceptor;
pub(crate) mod builder;
mod channel;
mod error;
pub(crate) mod h1;
@@ -124,35 +118,38 @@ pub(crate) mod h1decoder;
mod h1writer;
mod h2;
mod h2writer;
mod handler;
pub(crate) mod helpers;
mod http;
pub(crate) mod incoming;
pub(crate) mod input;
pub(crate) mod message;
pub(crate) mod output;
mod server;
pub(crate) mod service;
pub(crate) mod settings;
mod ssl;
mod worker;
use actix::Message;
pub use self::message::Request;
pub use self::handler::*;
pub use self::http::HttpServer;
#[doc(hidden)]
pub use self::server::{
ConnectionRateTag, ConnectionTag, Connections, Server, Service, ServiceHandler,
};
pub use self::message::Request;
pub use self::ssl::*;
pub use self::error::{AcceptorError, HttpDispatchError};
pub use self::settings::ServerSettings;
#[doc(hidden)]
pub use self::ssl::*;
pub use self::acceptor::AcceptorTimeout;
#[doc(hidden)]
pub use self::settings::{ServiceConfig, ServiceConfigBuilder};
#[doc(hidden)]
pub use self::service::{H1Service, HttpService, StreamConfiguration};
#[doc(hidden)]
pub use self::helpers::write_content_length;
use body::Binary;
use error::Error;
use extensions::Extensions;
use header::ContentEncoding;
use httpresponse::HttpResponse;
@@ -184,10 +181,9 @@ const HW_BUFFER_SIZE: usize = 32_768;
/// sys.run();
/// }
/// ```
pub fn new<F, U, H>(factory: F) -> HttpServer<H>
pub fn new<F, H>(factory: F) -> HttpServer<H, F>
where
F: Fn() -> U + Sync + Send + 'static,
U: IntoIterator<Item = H> + 'static,
F: Fn() -> H + Send + Clone + 'static,
H: IntoHttpHandler + 'static,
{
HttpServer::new(factory)
@@ -233,124 +229,6 @@ impl From<Option<usize>> for KeepAlive {
}
}
/// Pause accepting incoming connections
///
/// If socket contains some pending connection, they might be dropped.
/// All opened connection remains active.
#[derive(Message)]
pub struct PauseServer;
/// Resume accepting incoming connections
#[derive(Message)]
pub struct ResumeServer;
/// Stop incoming connection processing, stop all workers and exit.
///
/// If server starts with `spawn()` method, then spawned thread get terminated.
pub struct StopServer {
/// Whether to try and shut down gracefully
pub graceful: bool,
}
impl Message for StopServer {
type Result = Result<(), ()>;
}
/// Socket id token
#[doc(hidden)]
#[derive(Clone, Copy)]
pub struct Token(usize);
impl Token {
pub(crate) fn new(val: usize) -> Token {
Token(val)
}
}
/// Low level http request handler
#[allow(unused_variables)]
pub trait HttpHandler: 'static {
/// Request handling task
type Task: HttpHandlerTask;
/// Handle request
fn handle(&self, req: Request) -> Result<Self::Task, Request>;
}
impl HttpHandler for Box<HttpHandler<Task = Box<HttpHandlerTask>>> {
type Task = Box<HttpHandlerTask>;
fn handle(&self, req: Request) -> Result<Box<HttpHandlerTask>, Request> {
self.as_ref().handle(req)
}
}
/// Low level http request handler
pub trait HttpHandlerTask {
/// Poll task, this method is used before or after *io* object is available
fn poll_completed(&mut self) -> Poll<(), Error> {
Ok(Async::Ready(()))
}
/// Poll task when *io* object is available
fn poll_io(&mut self, io: &mut Writer) -> Poll<bool, Error>;
/// Connection is disconnected
fn disconnected(&mut self) {}
}
impl HttpHandlerTask for Box<HttpHandlerTask> {
fn poll_io(&mut self, io: &mut Writer) -> Poll<bool, Error> {
self.as_mut().poll_io(io)
}
}
/// Conversion helper trait
pub trait IntoHttpHandler {
/// The associated type which is result of conversion.
type Handler: HttpHandler;
/// Convert into `HttpHandler` object.
fn into_handler(self) -> Self::Handler;
}
impl<T: HttpHandler> IntoHttpHandler for T {
type Handler = T;
fn into_handler(self) -> Self::Handler {
self
}
}
pub(crate) trait IntoAsyncIo {
type Io: AsyncRead + AsyncWrite;
fn into_async_io(self) -> Result<Self::Io, io::Error>;
}
impl IntoAsyncIo for net::TcpStream {
type Io = TcpStream;
fn into_async_io(self) -> Result<Self::Io, io::Error> {
TcpStream::from_std(self, &Handle::default())
}
}
#[doc(hidden)]
/// Trait implemented by types that could accept incomming socket connections.
pub trait AcceptorService<Io: AsyncRead + AsyncWrite>: Clone {
/// Established connection type
type Accepted: IoStream;
/// Future describes async accept process.
type Future: Future<Item = Self::Accepted, Error = io::Error> + 'static;
/// Establish new connection
fn accept(&self, io: Io) -> Self::Future;
/// Scheme
fn scheme(&self) -> &'static str;
}
#[doc(hidden)]
#[derive(Debug)]
pub enum WriterState {
@@ -386,37 +264,47 @@ pub trait Writer {
pub trait IoStream: AsyncRead + AsyncWrite + 'static {
fn shutdown(&mut self, how: Shutdown) -> io::Result<()>;
/// Returns the socket address of the remote peer of this TCP connection.
fn peer_addr(&self) -> Option<SocketAddr> {
None
}
/// Sets the value of the TCP_NODELAY option on this socket.
fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()>;
fn set_linger(&mut self, dur: Option<time::Duration>) -> io::Result<()>;
fn set_keepalive(&mut self, dur: Option<time::Duration>) -> io::Result<()>;
fn read_available(&mut self, buf: &mut BytesMut) -> Poll<(bool, bool), io::Error> {
let mut read_some = false;
loop {
if buf.remaining_mut() < LW_BUFFER_SIZE {
buf.reserve(HW_BUFFER_SIZE);
}
unsafe {
match self.read(buf.bytes_mut()) {
Ok(n) => {
if n == 0 {
return Ok(Async::Ready((read_some, true)));
} else {
read_some = true;
let read = unsafe { self.read(buf.bytes_mut()) };
match read {
Ok(n) => {
if n == 0 {
return Ok(Async::Ready((read_some, true)));
} else {
read_some = true;
unsafe {
buf.advance_mut(n);
}
}
Err(e) => {
return if e.kind() == io::ErrorKind::WouldBlock {
if read_some {
Ok(Async::Ready((read_some, false)))
} else {
Ok(Async::NotReady)
}
}
Err(e) => {
return if e.kind() == io::ErrorKind::WouldBlock {
if read_some {
Ok(Async::Ready((read_some, false)))
} else {
Err(e)
};
}
Ok(Async::NotReady)
}
} else {
Err(e)
};
}
}
}
@@ -444,6 +332,11 @@ impl IoStream for ::tokio_uds::UnixStream {
fn set_linger(&mut self, _dur: Option<time::Duration>) -> io::Result<()> {
Ok(())
}
#[inline]
fn set_keepalive(&mut self, _nodelay: bool) -> io::Result<()> {
Ok(())
}
}
impl IoStream for TcpStream {
@@ -452,6 +345,11 @@ impl IoStream for TcpStream {
TcpStream::shutdown(self, how)
}
#[inline]
fn peer_addr(&self) -> Option<SocketAddr> {
TcpStream::peer_addr(self).ok()
}
#[inline]
fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()> {
TcpStream::set_nodelay(self, nodelay)
@@ -461,4 +359,9 @@ impl IoStream for TcpStream {
fn set_linger(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
TcpStream::set_linger(self, dur)
}
#[inline]
fn set_keepalive(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
TcpStream::set_keepalive(self, dur)
}
}

View File

@@ -11,7 +11,7 @@ use flate2::write::{GzEncoder, ZlibEncoder};
#[cfg(feature = "flate2")]
use flate2::Compression;
use http::header::{ACCEPT_ENCODING, CONTENT_LENGTH};
use http::Version;
use http::{StatusCode, Version};
use super::message::InnerRequest;
use body::{Binary, Body};
@@ -151,10 +151,9 @@ impl Output {
let version = resp.version().unwrap_or_else(|| req.version);
let mut len = 0;
#[cfg_attr(feature = "cargo-clippy", allow(match_ref_pats))]
let has_body = match resp.body() {
&Body::Empty => false,
&Body::Binary(ref bin) => {
Body::Empty => false,
Body::Binary(ref bin) => {
len = bin.len();
!(response_encoding == ContentEncoding::Auto && len < 96)
}
@@ -190,16 +189,19 @@ impl Output {
#[cfg(not(any(feature = "brotli", feature = "flate2")))]
let mut encoding = ContentEncoding::Identity;
#[cfg_attr(feature = "cargo-clippy", allow(match_ref_pats))]
let transfer = match resp.body() {
&Body::Empty => {
if !info.head {
info.length = ResponseLength::Zero;
}
Body::Empty => {
info.length = match resp.status() {
StatusCode::NO_CONTENT
| StatusCode::CONTINUE
| StatusCode::SWITCHING_PROTOCOLS
| StatusCode::PROCESSING => ResponseLength::None,
_ => ResponseLength::Zero,
};
*self = Output::Empty(buf);
return;
}
&Body::Binary(_) => {
Body::Binary(_) => {
#[cfg(any(feature = "brotli", feature = "flate2"))]
{
if !(encoding == ContentEncoding::Identity
@@ -244,7 +246,7 @@ impl Output {
}
return;
}
&Body::Streaming(_) | &Body::Actor(_) => {
Body::Streaming(_) | Body::Actor(_) => {
if resp.upgrade() {
if version == Version::HTTP_2 {
error!("Connection upgrade is forbidden for HTTP/2");
@@ -441,7 +443,7 @@ impl ContentEncoder {
}
}
#[cfg_attr(feature = "cargo-clippy", allow(inline_always))]
#[cfg_attr(feature = "cargo-clippy", allow(clippy::inline_always))]
#[inline(always)]
pub fn write_eof(&mut self) -> Result<bool, io::Error> {
let encoder =
@@ -483,7 +485,7 @@ impl ContentEncoder {
}
}
#[cfg_attr(feature = "cargo-clippy", allow(inline_always))]
#[cfg_attr(feature = "cargo-clippy", allow(clippy::inline_always))]
#[inline(always)]
pub fn write(&mut self, data: &[u8]) -> Result<(), io::Error> {
match *self {

View File

@@ -1,528 +0,0 @@
use std::sync::{
atomic::{AtomicUsize, Ordering},
Arc,
};
use std::time::Duration;
use std::{mem, net};
use futures::sync::{mpsc, mpsc::unbounded};
use futures::{Future, Sink, Stream};
use num_cpus;
use actix::{
fut, signal, Actor, ActorFuture, Addr, Arbiter, AsyncContext, Context, Handler,
Response, StreamHandler, System, WrapFuture,
};
use super::accept::{AcceptLoop, AcceptNotify, Command};
use super::worker::{Conn, StopWorker, Worker, WorkerClient};
use super::{PauseServer, ResumeServer, StopServer, Token};
#[doc(hidden)]
/// Describes service that could be used
/// with [Server](struct.Server.html)
pub trait Service: Send + 'static {
/// Clone service
fn clone(&self) -> Box<Service>;
/// Create service handler for this service
fn create(&self, conn: Connections) -> Box<ServiceHandler>;
}
impl Service for Box<Service> {
fn clone(&self) -> Box<Service> {
self.as_ref().clone()
}
fn create(&self, conn: Connections) -> Box<ServiceHandler> {
self.as_ref().create(conn)
}
}
#[doc(hidden)]
/// Describes the way serivce handles incoming
/// TCP connections.
pub trait ServiceHandler {
/// Handle incoming stream
fn handle(
&mut self, token: Token, io: net::TcpStream, peer: Option<net::SocketAddr>,
);
/// Shutdown open handlers
fn shutdown(&self, _: bool) {}
}
pub(crate) enum ServerCommand {
WorkerDied(usize),
}
/// Generic server
#[doc(hidden)]
pub struct Server {
threads: usize,
workers: Vec<(usize, Addr<Worker>)>,
services: Vec<Box<Service>>,
sockets: Vec<Vec<(Token, net::TcpListener)>>,
accept: AcceptLoop,
exit: bool,
shutdown_timeout: u16,
signals: Option<Addr<signal::ProcessSignals>>,
no_signals: bool,
maxconn: usize,
maxconnrate: usize,
}
impl Default for Server {
fn default() -> Self {
Self::new()
}
}
impl Server {
/// Create new Server instance
pub fn new() -> Server {
Server {
threads: num_cpus::get(),
workers: Vec::new(),
services: Vec::new(),
sockets: Vec::new(),
accept: AcceptLoop::new(),
exit: false,
shutdown_timeout: 30,
signals: None,
no_signals: false,
maxconn: 102_400,
maxconnrate: 256,
}
}
/// Set number of workers to start.
///
/// By default http server uses number of available logical cpu as threads
/// count.
pub fn workers(mut self, num: usize) -> Self {
self.threads = num;
self
}
/// Sets the maximum per-worker number of concurrent connections.
///
/// All socket listeners will stop accepting connections when this limit is reached
/// for each worker.
///
/// By default max connections is set to a 100k.
pub fn maxconn(mut self, num: usize) -> Self {
self.maxconn = num;
self
}
/// Sets the maximum per-worker concurrent connection establish process.
///
/// All listeners will stop accepting connections when this limit is reached. It
/// can be used to limit the global SSL CPU usage.
///
/// By default max connections is set to a 256.
pub fn maxconnrate(mut self, num: usize) -> Self {
self.maxconnrate = num;
self
}
/// Stop actix system.
///
/// `SystemExit` message stops currently running system.
pub fn system_exit(mut self) -> Self {
self.exit = true;
self
}
#[doc(hidden)]
/// Set alternative address for `ProcessSignals` actor.
pub fn signals(mut self, addr: Addr<signal::ProcessSignals>) -> Self {
self.signals = Some(addr);
self
}
/// Disable signal handling
pub fn disable_signals(mut self) -> Self {
self.no_signals = true;
self
}
/// Timeout for graceful workers shutdown.
///
/// After receiving a stop signal, workers have this much time to finish
/// serving requests. Workers still alive after the timeout are force
/// dropped.
///
/// By default shutdown timeout sets to 30 seconds.
pub fn shutdown_timeout(mut self, sec: u16) -> Self {
self.shutdown_timeout = sec;
self
}
/// Add new service to server
pub fn service<T>(mut self, srv: T) -> Self
where
T: Into<(Box<Service>, Vec<(Token, net::TcpListener)>)>,
{
let (srv, sockets) = srv.into();
self.services.push(srv);
self.sockets.push(sockets);
self
}
/// Spawn new thread and start listening for incoming connections.
///
/// This method spawns new thread and starts new actix system. Other than
/// that it is similar to `start()` method. This method blocks.
///
/// This methods panics if no socket addresses get bound.
///
/// ```rust,ignore
/// # extern crate futures;
/// # extern crate actix_web;
/// # use futures::Future;
/// use actix_web::*;
///
/// fn main() {
/// Server::new().
/// .service(
/// HttpServer::new(|| App::new().resource("/", |r| r.h(|_| HttpResponse::Ok())))
/// .bind("127.0.0.1:0")
/// .expect("Can not bind to 127.0.0.1:0"))
/// .run();
/// }
/// ```
pub fn run(self) {
let sys = System::new("http-server");
self.start();
sys.run();
}
/// Starts Server Actor and returns its address
pub fn start(mut self) -> Addr<Server> {
if self.sockets.is_empty() {
panic!("Service should have at least one bound socket");
} else {
info!("Starting {} http workers", self.threads);
// start workers
let mut workers = Vec::new();
for idx in 0..self.threads {
let (addr, worker) = self.start_worker(idx, self.accept.get_notify());
workers.push(worker);
self.workers.push((idx, addr));
}
// start accept thread
for sock in &self.sockets {
for s in sock.iter() {
info!("Starting server on http://{}", s.1.local_addr().unwrap());
}
}
let rx = self
.accept
.start(mem::replace(&mut self.sockets, Vec::new()), workers);
// start http server actor
let signals = self.subscribe_to_signals();
let addr = Actor::create(move |ctx| {
ctx.add_stream(rx);
self
});
if let Some(signals) = signals {
signals.do_send(signal::Subscribe(addr.clone().recipient()))
}
addr
}
}
// subscribe to os signals
fn subscribe_to_signals(&self) -> Option<Addr<signal::ProcessSignals>> {
if !self.no_signals {
if let Some(ref signals) = self.signals {
Some(signals.clone())
} else {
Some(System::current().registry().get::<signal::ProcessSignals>())
}
} else {
None
}
}
fn start_worker(
&self, idx: usize, notify: AcceptNotify,
) -> (Addr<Worker>, WorkerClient) {
let (tx, rx) = unbounded::<Conn<net::TcpStream>>();
let conns = Connections::new(notify, self.maxconn, self.maxconnrate);
let worker = WorkerClient::new(idx, tx, conns.clone());
let services: Vec<_> = self.services.iter().map(|v| v.clone()).collect();
let addr = Arbiter::start(move |ctx: &mut Context<_>| {
ctx.add_message_stream(rx);
let handlers: Vec<_> = services
.into_iter()
.map(|s| s.create(conns.clone()))
.collect();
Worker::new(conns, handlers)
});
(addr, worker)
}
}
impl Actor for Server {
type Context = Context<Self>;
}
/// Signals support
/// Handle `SIGINT`, `SIGTERM`, `SIGQUIT` signals and stop actix system
/// message to `System` actor.
impl Handler<signal::Signal> for Server {
type Result = ();
fn handle(&mut self, msg: signal::Signal, ctx: &mut Context<Self>) {
match msg.0 {
signal::SignalType::Int => {
info!("SIGINT received, exiting");
self.exit = true;
Handler::<StopServer>::handle(self, StopServer { graceful: false }, ctx);
}
signal::SignalType::Term => {
info!("SIGTERM received, stopping");
self.exit = true;
Handler::<StopServer>::handle(self, StopServer { graceful: true }, ctx);
}
signal::SignalType::Quit => {
info!("SIGQUIT received, exiting");
self.exit = true;
Handler::<StopServer>::handle(self, StopServer { graceful: false }, ctx);
}
_ => (),
}
}
}
impl Handler<PauseServer> for Server {
type Result = ();
fn handle(&mut self, _: PauseServer, _: &mut Context<Self>) {
self.accept.send(Command::Pause);
}
}
impl Handler<ResumeServer> for Server {
type Result = ();
fn handle(&mut self, _: ResumeServer, _: &mut Context<Self>) {
self.accept.send(Command::Resume);
}
}
impl Handler<StopServer> for Server {
type Result = Response<(), ()>;
fn handle(&mut self, msg: StopServer, ctx: &mut Context<Self>) -> Self::Result {
// stop accept thread
self.accept.send(Command::Stop);
// stop workers
let (tx, rx) = mpsc::channel(1);
let dur = if msg.graceful {
Some(Duration::new(u64::from(self.shutdown_timeout), 0))
} else {
None
};
for worker in &self.workers {
let tx2 = tx.clone();
ctx.spawn(
worker
.1
.send(StopWorker { graceful: dur })
.into_actor(self)
.then(move |_, slf, ctx| {
slf.workers.pop();
if slf.workers.is_empty() {
let _ = tx2.send(());
// we need to stop system if server was spawned
if slf.exit {
ctx.run_later(Duration::from_millis(300), |_, _| {
System::current().stop();
});
}
}
fut::ok(())
}),
);
}
if !self.workers.is_empty() {
Response::async(rx.into_future().map(|_| ()).map_err(|_| ()))
} else {
// we need to stop system if server was spawned
if self.exit {
ctx.run_later(Duration::from_millis(300), |_, _| {
System::current().stop();
});
}
Response::reply(Ok(()))
}
}
}
/// Commands from accept threads
impl StreamHandler<ServerCommand, ()> for Server {
fn finished(&mut self, _: &mut Context<Self>) {}
fn handle(&mut self, msg: ServerCommand, _: &mut Context<Self>) {
match msg {
ServerCommand::WorkerDied(idx) => {
let mut found = false;
for i in 0..self.workers.len() {
if self.workers[i].0 == idx {
self.workers.swap_remove(i);
found = true;
break;
}
}
if found {
error!("Worker has died {:?}, restarting", idx);
let mut new_idx = self.workers.len();
'found: loop {
for i in 0..self.workers.len() {
if self.workers[i].0 == new_idx {
new_idx += 1;
continue 'found;
}
}
break;
}
let (addr, worker) =
self.start_worker(new_idx, self.accept.get_notify());
self.workers.push((new_idx, addr));
self.accept.send(Command::Worker(worker));
}
}
}
}
}
#[derive(Clone, Default)]
///Contains information about connection.
pub struct Connections(Arc<ConnectionsInner>);
impl Connections {
fn new(notify: AcceptNotify, maxconn: usize, maxconnrate: usize) -> Self {
let maxconn_low = if maxconn > 10 { maxconn - 10 } else { 0 };
let maxconnrate_low = if maxconnrate > 10 {
maxconnrate - 10
} else {
0
};
Connections(Arc::new(ConnectionsInner {
notify,
maxconn,
maxconnrate,
maxconn_low,
maxconnrate_low,
conn: AtomicUsize::new(0),
connrate: AtomicUsize::new(0),
}))
}
pub(crate) fn available(&self) -> bool {
self.0.available()
}
pub(crate) fn num_connections(&self) -> usize {
self.0.conn.load(Ordering::Relaxed)
}
/// Report opened connection
pub fn connection(&self) -> ConnectionTag {
ConnectionTag::new(self.0.clone())
}
/// Report rate connection, rate is usually ssl handshake
pub fn connection_rate(&self) -> ConnectionRateTag {
ConnectionRateTag::new(self.0.clone())
}
}
#[derive(Default)]
struct ConnectionsInner {
notify: AcceptNotify,
conn: AtomicUsize,
connrate: AtomicUsize,
maxconn: usize,
maxconnrate: usize,
maxconn_low: usize,
maxconnrate_low: usize,
}
impl ConnectionsInner {
fn available(&self) -> bool {
if self.maxconnrate <= self.connrate.load(Ordering::Relaxed) {
false
} else {
self.maxconn > self.conn.load(Ordering::Relaxed)
}
}
fn notify_maxconn(&self, maxconn: usize) {
if maxconn > self.maxconn_low && maxconn <= self.maxconn {
self.notify.notify();
}
}
fn notify_maxconnrate(&self, connrate: usize) {
if connrate > self.maxconnrate_low && connrate <= self.maxconnrate {
self.notify.notify();
}
}
}
/// Type responsible for max connection stat.
///
/// Max connections stat get updated on drop.
pub struct ConnectionTag(Arc<ConnectionsInner>);
impl ConnectionTag {
fn new(inner: Arc<ConnectionsInner>) -> Self {
inner.conn.fetch_add(1, Ordering::Relaxed);
ConnectionTag(inner)
}
}
impl Drop for ConnectionTag {
fn drop(&mut self) {
let conn = self.0.conn.fetch_sub(1, Ordering::Relaxed);
self.0.notify_maxconn(conn);
}
}
/// Type responsible for max connection rate stat.
///
/// Max connections rate stat get updated on drop.
pub struct ConnectionRateTag(Arc<ConnectionsInner>);
impl ConnectionRateTag {
fn new(inner: Arc<ConnectionsInner>) -> Self {
inner.connrate.fetch_add(1, Ordering::Relaxed);
ConnectionRateTag(inner)
}
}
impl Drop for ConnectionRateTag {
fn drop(&mut self) {
let connrate = self.0.connrate.fetch_sub(1, Ordering::Relaxed);
self.0.notify_maxconnrate(connrate);
}
}

272
src/server/service.rs Normal file
View File

@@ -0,0 +1,272 @@
use std::marker::PhantomData;
use std::time::Duration;
use actix_net::service::{NewService, Service};
use futures::future::{ok, FutureResult};
use futures::{Async, Poll};
use super::channel::{H1Channel, HttpChannel};
use super::error::HttpDispatchError;
use super::handler::HttpHandler;
use super::settings::ServiceConfig;
use super::IoStream;
/// `NewService` implementation for HTTP1/HTTP2 transports
pub struct HttpService<H, Io>
where
H: HttpHandler,
Io: IoStream,
{
settings: ServiceConfig<H>,
_t: PhantomData<Io>,
}
impl<H, Io> HttpService<H, Io>
where
H: HttpHandler,
Io: IoStream,
{
/// Create new `HttpService` instance.
pub fn new(settings: ServiceConfig<H>) -> Self {
HttpService {
settings,
_t: PhantomData,
}
}
}
impl<H, Io> NewService for HttpService<H, Io>
where
H: HttpHandler,
Io: IoStream,
{
type Request = Io;
type Response = ();
type Error = HttpDispatchError;
type InitError = ();
type Service = HttpServiceHandler<H, Io>;
type Future = FutureResult<Self::Service, Self::InitError>;
fn new_service(&self) -> Self::Future {
ok(HttpServiceHandler::new(self.settings.clone()))
}
}
pub struct HttpServiceHandler<H, Io>
where
H: HttpHandler,
Io: IoStream,
{
settings: ServiceConfig<H>,
_t: PhantomData<Io>,
}
impl<H, Io> HttpServiceHandler<H, Io>
where
H: HttpHandler,
Io: IoStream,
{
fn new(settings: ServiceConfig<H>) -> HttpServiceHandler<H, Io> {
HttpServiceHandler {
settings,
_t: PhantomData,
}
}
}
impl<H, Io> Service for HttpServiceHandler<H, Io>
where
H: HttpHandler,
Io: IoStream,
{
type Request = Io;
type Response = ();
type Error = HttpDispatchError;
type Future = HttpChannel<Io, H>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
Ok(Async::Ready(()))
}
fn call(&mut self, req: Self::Request) -> Self::Future {
HttpChannel::new(self.settings.clone(), req)
}
}
/// `NewService` implementation for HTTP1 transport
pub struct H1Service<H, Io>
where
H: HttpHandler,
Io: IoStream,
{
settings: ServiceConfig<H>,
_t: PhantomData<Io>,
}
impl<H, Io> H1Service<H, Io>
where
H: HttpHandler,
Io: IoStream,
{
/// Create new `HttpService` instance.
pub fn new(settings: ServiceConfig<H>) -> Self {
H1Service {
settings,
_t: PhantomData,
}
}
}
impl<H, Io> NewService for H1Service<H, Io>
where
H: HttpHandler,
Io: IoStream,
{
type Request = Io;
type Response = ();
type Error = HttpDispatchError;
type InitError = ();
type Service = H1ServiceHandler<H, Io>;
type Future = FutureResult<Self::Service, Self::InitError>;
fn new_service(&self) -> Self::Future {
ok(H1ServiceHandler::new(self.settings.clone()))
}
}
/// `Service` implementation for HTTP1 transport
pub struct H1ServiceHandler<H, Io>
where
H: HttpHandler,
Io: IoStream,
{
settings: ServiceConfig<H>,
_t: PhantomData<Io>,
}
impl<H, Io> H1ServiceHandler<H, Io>
where
H: HttpHandler,
Io: IoStream,
{
fn new(settings: ServiceConfig<H>) -> H1ServiceHandler<H, Io> {
H1ServiceHandler {
settings,
_t: PhantomData,
}
}
}
impl<H, Io> Service for H1ServiceHandler<H, Io>
where
H: HttpHandler,
Io: IoStream,
{
type Request = Io;
type Response = ();
type Error = HttpDispatchError;
type Future = H1Channel<Io, H>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
Ok(Async::Ready(()))
}
fn call(&mut self, req: Self::Request) -> Self::Future {
H1Channel::new(self.settings.clone(), req)
}
}
/// `NewService` implementation for stream configuration service
///
/// Stream configuration service allows to change some socket level
/// parameters. for example `tcp nodelay` or `tcp keep-alive`.
pub struct StreamConfiguration<T, E> {
no_delay: Option<bool>,
tcp_ka: Option<Option<Duration>>,
_t: PhantomData<(T, E)>,
}
impl<T, E> Default for StreamConfiguration<T, E> {
fn default() -> Self {
Self::new()
}
}
impl<T, E> StreamConfiguration<T, E> {
/// Create new `StreamConfigurationService` instance.
pub fn new() -> Self {
Self {
no_delay: None,
tcp_ka: None,
_t: PhantomData,
}
}
/// Sets the value of the `TCP_NODELAY` option on this socket.
pub fn nodelay(mut self, nodelay: bool) -> Self {
self.no_delay = Some(nodelay);
self
}
/// Sets whether keepalive messages are enabled to be sent on this socket.
pub fn tcp_keepalive(mut self, keepalive: Option<Duration>) -> Self {
self.tcp_ka = Some(keepalive);
self
}
}
impl<T: IoStream, E> NewService for StreamConfiguration<T, E> {
type Request = T;
type Response = T;
type Error = E;
type InitError = ();
type Service = StreamConfigurationService<T, E>;
type Future = FutureResult<Self::Service, Self::InitError>;
fn new_service(&self) -> Self::Future {
ok(StreamConfigurationService {
no_delay: self.no_delay,
tcp_ka: self.tcp_ka,
_t: PhantomData,
})
}
}
/// Stream configuration service
///
/// Stream configuration service allows to change some socket level
/// parameters. for example `tcp nodelay` or `tcp keep-alive`.
pub struct StreamConfigurationService<T, E> {
no_delay: Option<bool>,
tcp_ka: Option<Option<Duration>>,
_t: PhantomData<(T, E)>,
}
impl<T, E> Service for StreamConfigurationService<T, E>
where
T: IoStream,
{
type Request = T;
type Response = T;
type Error = E;
type Future = FutureResult<T, E>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
Ok(Async::Ready(()))
}
fn call(&mut self, mut req: Self::Request) -> Self::Future {
if let Some(no_delay) = self.no_delay {
if req.set_nodelay(no_delay).is_err() {
error!("Can not set socket no-delay option");
}
}
if let Some(keepalive) = self.tcp_ka {
if req.set_keepalive(keepalive).is_err() {
error!("Can not set socket keep-alive option");
}
}
ok(req)
}
}

View File

@@ -1,23 +1,22 @@
use std::cell::{RefCell, RefMut, UnsafeCell};
use std::cell::{Cell, RefCell, RefMut};
use std::collections::VecDeque;
use std::fmt::Write;
use std::rc::Rc;
use std::time::{Duration, Instant};
use std::{env, fmt, net};
use actix::Arbiter;
use bytes::BytesMut;
use futures::Stream;
use futures::{future, Future};
use futures_cpupool::CpuPool;
use http::StatusCode;
use lazycell::LazyCell;
use parking_lot::Mutex;
use time;
use tokio_timer::{Delay, Interval};
use tokio_current_thread::spawn;
use tokio_timer::{sleep, Delay};
use super::channel::Node;
use super::message::{Request, RequestPool};
use super::server::{ConnectionRateTag, ConnectionTag, Connections};
use super::KeepAlive;
use body::Body;
use httpresponse::{HttpResponse, HttpResponseBuilder, HttpResponsePool};
@@ -44,7 +43,7 @@ lazy_static! {
/// Various server settings
pub struct ServerSettings {
addr: Option<net::SocketAddr>,
addr: net::SocketAddr,
secure: bool,
host: String,
cpu_pool: LazyCell<CpuPool>,
@@ -66,7 +65,7 @@ impl Clone for ServerSettings {
impl Default for ServerSettings {
fn default() -> Self {
ServerSettings {
addr: None,
addr: "127.0.0.1:8080".parse().unwrap(),
secure: false,
host: "localhost:8080".to_owned(),
responses: HttpResponsePool::get_pool(),
@@ -78,15 +77,9 @@ impl Default for ServerSettings {
impl ServerSettings {
/// Crate server settings instance
pub(crate) fn new(
addr: Option<net::SocketAddr>, host: &Option<String>, secure: bool,
addr: net::SocketAddr, host: &str, secure: bool,
) -> ServerSettings {
let host = if let Some(ref host) = *host {
host.clone()
} else if let Some(ref addr) = addr {
format!("{}", addr)
} else {
"localhost".to_owned()
};
let host = host.to_owned();
let cpu_pool = LazyCell::new();
let responses = HttpResponsePool::get_pool();
ServerSettings {
@@ -99,7 +92,7 @@ impl ServerSettings {
}
/// Returns the socket address of the local half of this TCP connection
pub fn local_addr(&self) -> Option<net::SocketAddr> {
pub fn local_addr(&self) -> net::SocketAddr {
self.addr
}
@@ -134,129 +127,300 @@ impl ServerSettings {
// "Sun, 06 Nov 1994 08:49:37 GMT".len()
const DATE_VALUE_LENGTH: usize = 29;
pub(crate) struct WorkerSettings<H> {
h: Vec<H>,
keep_alive: u64,
/// Http service configuration
pub struct ServiceConfig<H>(Rc<Inner<H>>);
struct Inner<H> {
handler: H,
keep_alive: Option<Duration>,
client_timeout: u64,
client_shutdown: u64,
ka_enabled: bool,
bytes: Rc<SharedBytesPool>,
messages: &'static RequestPool,
conns: Connections,
node: RefCell<Node<()>>,
date: UnsafeCell<Date>,
date: Cell<Option<Date>>,
}
impl<H: 'static> WorkerSettings<H> {
pub(crate) fn create(
apps: Vec<H>, keep_alive: KeepAlive, settings: ServerSettings,
conns: Connections,
) -> Rc<WorkerSettings<H>> {
let settings = Rc::new(Self::new(apps, keep_alive, settings, conns));
// periodic date update
let s = settings.clone();
Arbiter::spawn(
Interval::new(Instant::now(), Duration::from_secs(1))
.map_err(|_| ())
.and_then(move |_| {
s.update_date();
Ok(())
}).fold((), |(), _| Ok(())),
);
settings
impl<H> Clone for ServiceConfig<H> {
fn clone(&self) -> Self {
ServiceConfig(self.0.clone())
}
}
impl<H> WorkerSettings<H> {
impl<H> ServiceConfig<H> {
/// Create instance of `ServiceConfig`
pub(crate) fn new(
h: Vec<H>, keep_alive: KeepAlive, settings: ServerSettings, conns: Connections,
) -> WorkerSettings<H> {
handler: H, keep_alive: KeepAlive, client_timeout: u64, client_shutdown: u64,
settings: ServerSettings,
) -> ServiceConfig<H> {
let (keep_alive, ka_enabled) = match keep_alive {
KeepAlive::Timeout(val) => (val as u64, true),
KeepAlive::Os | KeepAlive::Tcp(_) => (0, true),
KeepAlive::Disabled => (0, false),
};
let keep_alive = if ka_enabled && keep_alive > 0 {
Some(Duration::from_secs(keep_alive))
} else {
None
};
WorkerSettings {
h,
ServiceConfig(Rc::new(Inner {
handler,
keep_alive,
ka_enabled,
client_timeout,
client_shutdown,
bytes: Rc::new(SharedBytesPool::new()),
messages: RequestPool::pool(settings),
node: RefCell::new(Node::head()),
date: UnsafeCell::new(Date::new()),
keep_alive,
ka_enabled,
conns,
}
date: Cell::new(None),
}))
}
pub fn head(&self) -> RefMut<Node<()>> {
self.node.borrow_mut()
/// Create worker settings builder.
pub fn build(handler: H) -> ServiceConfigBuilder<H> {
ServiceConfigBuilder::new(handler)
}
pub fn handlers(&self) -> &Vec<H> {
&self.h
pub(crate) fn head(&self) -> RefMut<Node<()>> {
self.0.node.borrow_mut()
}
pub fn keep_alive_timer(&self) -> Option<Delay> {
if self.keep_alive != 0 {
Some(Delay::new(
Instant::now() + Duration::from_secs(self.keep_alive),
))
pub(crate) fn handler(&self) -> &H {
&self.0.handler
}
#[inline]
/// Keep alive duration if configured.
pub fn keep_alive(&self) -> Option<Duration> {
self.0.keep_alive
}
#[inline]
/// Return state of connection keep-alive funcitonality
pub fn keep_alive_enabled(&self) -> bool {
self.0.ka_enabled
}
pub(crate) fn get_bytes(&self) -> BytesMut {
self.0.bytes.get_bytes()
}
pub(crate) fn release_bytes(&self, bytes: BytesMut) {
self.0.bytes.release_bytes(bytes)
}
pub(crate) fn get_request(&self) -> Request {
RequestPool::get(self.0.messages)
}
}
impl<H: 'static> ServiceConfig<H> {
#[inline]
/// Client timeout for first request.
pub fn client_timer(&self) -> Option<Delay> {
let delay = self.0.client_timeout;
if delay != 0 {
Some(Delay::new(self.now() + Duration::from_millis(delay)))
} else {
None
}
}
pub fn keep_alive(&self) -> u64 {
self.keep_alive
}
pub fn keep_alive_enabled(&self) -> bool {
self.ka_enabled
}
pub fn get_bytes(&self) -> BytesMut {
self.bytes.get_bytes()
}
pub fn release_bytes(&self, bytes: BytesMut) {
self.bytes.release_bytes(bytes)
}
pub fn get_request(&self) -> Request {
RequestPool::get(self.messages)
}
pub fn connection(&self) -> ConnectionTag {
self.conns.connection()
}
fn update_date(&self) {
// Unsafe: WorkerSetting is !Sync and !Send
unsafe { &mut *self.date.get() }.update();
}
pub fn set_date(&self, dst: &mut BytesMut, full: bool) {
// Unsafe: WorkerSetting is !Sync and !Send
let date_bytes = unsafe { &(*self.date.get()).bytes };
if full {
let mut buf: [u8; 39] = [0; 39];
buf[..6].copy_from_slice(b"date: ");
buf[6..35].copy_from_slice(date_bytes);
buf[35..].copy_from_slice(b"\r\n\r\n");
dst.extend_from_slice(&buf);
/// Client timeout for first request.
pub fn client_timer_expire(&self) -> Option<Instant> {
let delay = self.0.client_timeout;
if delay != 0 {
Some(self.now() + Duration::from_millis(delay))
} else {
dst.extend_from_slice(date_bytes);
None
}
}
#[allow(dead_code)]
pub(crate) fn connection_rate(&self) -> ConnectionRateTag {
self.conns.connection_rate()
/// Client shutdown timer
pub fn client_shutdown_timer(&self) -> Option<Instant> {
let delay = self.0.client_shutdown;
if delay != 0 {
Some(self.now() + Duration::from_millis(delay))
} else {
None
}
}
#[inline]
/// Return keep-alive timer delay is configured.
pub fn keep_alive_timer(&self) -> Option<Delay> {
if let Some(ka) = self.0.keep_alive {
Some(Delay::new(self.now() + ka))
} else {
None
}
}
/// Keep-alive expire time
pub fn keep_alive_expire(&self) -> Option<Instant> {
if let Some(ka) = self.0.keep_alive {
Some(self.now() + ka)
} else {
None
}
}
fn check_date(&self) {
if unsafe { &*self.0.date.as_ptr() }.is_none() {
self.0.date.set(Some(Date::new()));
// periodic date update
let s = self.clone();
spawn(sleep(Duration::from_millis(500)).then(move |_| {
s.0.date.set(None);
future::ok(())
}));
}
}
pub(crate) fn set_date(&self, dst: &mut BytesMut, full: bool) {
self.check_date();
let date = &unsafe { &*self.0.date.as_ptr() }.as_ref().unwrap().bytes;
if full {
let mut buf: [u8; 39] = [0; 39];
buf[..6].copy_from_slice(b"date: ");
buf[6..35].copy_from_slice(date);
buf[35..].copy_from_slice(b"\r\n\r\n");
dst.extend_from_slice(&buf);
} else {
dst.extend_from_slice(date);
}
}
#[inline]
pub(crate) fn now(&self) -> Instant {
self.check_date();
unsafe { &*self.0.date.as_ptr() }.as_ref().unwrap().current
}
}
/// A service config builder
///
/// This type can be used to construct an instance of `ServiceConfig` through a
/// builder-like pattern.
pub struct ServiceConfigBuilder<H> {
handler: H,
keep_alive: KeepAlive,
client_timeout: u64,
client_shutdown: u64,
host: String,
addr: net::SocketAddr,
secure: bool,
}
impl<H> ServiceConfigBuilder<H> {
/// Create instance of `ServiceConfigBuilder`
pub fn new(handler: H) -> ServiceConfigBuilder<H> {
ServiceConfigBuilder {
handler,
keep_alive: KeepAlive::Timeout(5),
client_timeout: 5000,
client_shutdown: 5000,
secure: false,
host: "localhost".to_owned(),
addr: "127.0.0.1:8080".parse().unwrap(),
}
}
/// Enable secure flag for current server.
///
/// By default this flag is set to false.
pub fn secure(mut self) -> Self {
self.secure = true;
self
}
/// Set server keep-alive setting.
///
/// By default keep alive is set to a 5 seconds.
pub fn keep_alive<T: Into<KeepAlive>>(mut self, val: T) -> Self {
self.keep_alive = val.into();
self
}
/// Set server client timeout in milliseconds for first request.
///
/// Defines a timeout for reading client request header. If a client does not transmit
/// the entire set headers within this time, the request is terminated with
/// the 408 (Request Time-out) error.
///
/// To disable timeout set value to 0.
///
/// By default client timeout is set to 5000 milliseconds.
pub fn client_timeout(mut self, val: u64) -> Self {
self.client_timeout = val;
self
}
/// Set server connection shutdown timeout in milliseconds.
///
/// Defines a timeout for shutdown connection. If a shutdown procedure does not complete
/// within this time, the request is dropped. This timeout affects only secure connections.
///
/// To disable timeout set value to 0.
///
/// By default client timeout is set to 5000 milliseconds.
pub fn client_shutdown(mut self, val: u64) -> Self {
self.client_shutdown = val;
self
}
/// Set server host name.
///
/// Host name is used by application router aa a hostname for url
/// generation. Check [ConnectionInfo](./dev/struct.ConnectionInfo.
/// html#method.host) documentation for more information.
///
/// By default host name is set to a "localhost" value.
pub fn server_hostname(mut self, val: &str) -> Self {
self.host = val.to_owned();
self
}
/// Set server ip address.
///
/// Host name is used by application router aa a hostname for url
/// generation. Check [ConnectionInfo](./dev/struct.ConnectionInfo.
/// html#method.host) documentation for more information.
///
/// By default server address is set to a "127.0.0.1:8080"
pub fn server_address<S: net::ToSocketAddrs>(mut self, addr: S) -> Self {
match addr.to_socket_addrs() {
Err(err) => error!("Can not convert to SocketAddr: {}", err),
Ok(mut addrs) => if let Some(addr) = addrs.next() {
self.addr = addr;
},
}
self
}
/// Finish service configuration and create `ServiceConfig` object.
pub fn finish(self) -> ServiceConfig<H> {
let settings = ServerSettings::new(self.addr, &self.host, self.secure);
let client_shutdown = if self.secure { self.client_shutdown } else { 0 };
ServiceConfig::new(
self.handler,
self.keep_alive,
self.client_timeout,
client_shutdown,
settings,
)
}
}
#[derive(Copy, Clone)]
struct Date {
current: Instant,
bytes: [u8; DATE_VALUE_LENGTH],
pos: usize,
}
@@ -264,6 +428,7 @@ struct Date {
impl Date {
fn new() -> Date {
let mut date = Date {
current: Instant::now(),
bytes: [0; DATE_VALUE_LENGTH],
pos: 0,
};
@@ -272,6 +437,7 @@ impl Date {
}
fn update(&mut self) {
self.pos = 0;
self.current = Instant::now();
write!(self, "{}", time::at_utc(time::get_time()).rfc822()).unwrap();
}
}
@@ -313,6 +479,8 @@ impl SharedBytesPool {
#[cfg(test)]
mod tests {
use super::*;
use futures::future;
use tokio::runtime::current_thread;
#[test]
fn test_date_len() {
@@ -321,16 +489,22 @@ mod tests {
#[test]
fn test_date() {
let settings = WorkerSettings::<()>::new(
Vec::new(),
KeepAlive::Os,
ServerSettings::default(),
Connections::default(),
);
let mut buf1 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.set_date(&mut buf1, true);
let mut buf2 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.set_date(&mut buf2, true);
assert_eq!(buf1, buf2);
let mut rt = current_thread::Runtime::new().unwrap();
let _ = rt.block_on(future::lazy(|| {
let settings = ServiceConfig::<()>::new(
(),
KeepAlive::Os,
0,
0,
ServerSettings::default(),
);
let mut buf1 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.set_date(&mut buf1, true);
let mut buf2 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.set_date(&mut buf2, true);
assert_eq!(buf1, buf2);
future::ok::<_, ()>(())
}));
}
}

View File

@@ -1,12 +1,10 @@
#[cfg(feature = "alpn")]
#[cfg(any(feature = "alpn", feature = "ssl"))]
mod openssl;
#[cfg(feature = "alpn")]
pub use self::openssl::OpensslAcceptor;
#[cfg(any(feature = "alpn", feature = "ssl"))]
pub use self::openssl::{openssl_acceptor_with_flags, OpensslAcceptor};
#[cfg(feature = "tls")]
mod nativetls;
#[cfg(feature = "tls")]
pub use self::nativetls::{NativeTlsAcceptor, TlsStream};
#[cfg(feature = "rust-tls")]
mod rustls;

View File

@@ -1,61 +1,9 @@
use std::net::Shutdown;
use std::net::{Shutdown, SocketAddr};
use std::{io, time};
use futures::{Async, Future, Poll};
use native_tls::{self, HandshakeError, TlsAcceptor};
use tokio_io::{AsyncRead, AsyncWrite};
use actix_net::ssl::TlsStream;
use server::{AcceptorService, IoStream};
#[derive(Clone)]
/// Support `SSL` connections via native-tls package
///
/// `tls` feature enables `NativeTlsAcceptor` type
pub struct NativeTlsAcceptor {
acceptor: TlsAcceptor,
}
/// A wrapper around an underlying raw stream which implements the TLS or SSL
/// protocol.
///
/// A `TlsStream<S>` represents a handshake that has been completed successfully
/// and both the server and the client are ready for receiving and sending
/// data. Bytes read from a `TlsStream` are decrypted from `S` and bytes written
/// to a `TlsStream` are encrypted when passing through to `S`.
#[derive(Debug)]
pub struct TlsStream<S> {
inner: native_tls::TlsStream<S>,
}
/// Future returned from `NativeTlsAcceptor::accept` which will resolve
/// once the accept handshake has finished.
pub struct Accept<S> {
inner: Option<Result<native_tls::TlsStream<S>, HandshakeError<S>>>,
}
impl NativeTlsAcceptor {
/// Create `NativeTlsAcceptor` instance
pub fn new(acceptor: TlsAcceptor) -> Self {
NativeTlsAcceptor {
acceptor: acceptor.into(),
}
}
}
impl<Io: IoStream> AcceptorService<Io> for NativeTlsAcceptor {
type Accepted = TlsStream<Io>;
type Future = Accept<Io>;
fn scheme(&self) -> &'static str {
"https"
}
fn accept(&self, io: Io) -> Self::Future {
Accept {
inner: Some(self.acceptor.accept(io)),
}
}
}
use server::IoStream;
impl<Io: IoStream> IoStream for TlsStream<Io> {
#[inline]
@@ -64,6 +12,11 @@ impl<Io: IoStream> IoStream for TlsStream<Io> {
Ok(())
}
#[inline]
fn peer_addr(&self) -> Option<SocketAddr> {
self.get_ref().get_ref().peer_addr()
}
#[inline]
fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()> {
self.get_mut().get_mut().set_nodelay(nodelay)
@@ -73,71 +26,9 @@ impl<Io: IoStream> IoStream for TlsStream<Io> {
fn set_linger(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
self.get_mut().get_mut().set_linger(dur)
}
}
impl<Io: IoStream> Future for Accept<Io> {
type Item = TlsStream<Io>;
type Error = io::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match self.inner.take().expect("cannot poll MidHandshake twice") {
Ok(stream) => Ok(TlsStream { inner: stream }.into()),
Err(HandshakeError::Failure(e)) => {
Err(io::Error::new(io::ErrorKind::Other, e))
}
Err(HandshakeError::WouldBlock(s)) => match s.handshake() {
Ok(stream) => Ok(TlsStream { inner: stream }.into()),
Err(HandshakeError::Failure(e)) => {
Err(io::Error::new(io::ErrorKind::Other, e))
}
Err(HandshakeError::WouldBlock(s)) => {
self.inner = Some(Err(HandshakeError::WouldBlock(s)));
Ok(Async::NotReady)
}
},
}
}
}
impl<S> TlsStream<S> {
/// Get access to the internal `native_tls::TlsStream` stream which also
/// transitively allows access to `S`.
pub fn get_ref(&self) -> &native_tls::TlsStream<S> {
&self.inner
}
/// Get mutable access to the internal `native_tls::TlsStream` stream which
/// also transitively allows mutable access to `S`.
pub fn get_mut(&mut self) -> &mut native_tls::TlsStream<S> {
&mut self.inner
}
}
impl<S: io::Read + io::Write> io::Read for TlsStream<S> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.inner.read(buf)
}
}
impl<S: io::Read + io::Write> io::Write for TlsStream<S> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.inner.write(buf)
}
fn flush(&mut self) -> io::Result<()> {
self.inner.flush()
}
}
impl<S: AsyncRead + AsyncWrite> AsyncRead for TlsStream<S> {}
impl<S: AsyncRead + AsyncWrite> AsyncWrite for TlsStream<S> {
fn shutdown(&mut self) -> Poll<(), io::Error> {
match self.inner.shutdown() {
Ok(_) => (),
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => (),
Err(e) => return Err(e),
}
self.inner.get_mut().shutdown()
#[inline]
fn set_keepalive(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
self.get_mut().get_mut().set_keepalive(dur)
}
}

View File

@@ -1,80 +1,61 @@
use std::net::Shutdown;
use std::net::{Shutdown, SocketAddr};
use std::{io, time};
use futures::{Future, Poll};
use actix_net::ssl;
use openssl::ssl::{AlpnError, SslAcceptor, SslAcceptorBuilder};
use tokio_openssl::{AcceptAsync, SslAcceptorExt, SslStream};
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_openssl::SslStream;
use server::{AcceptorService, IoStream, ServerFlags};
use server::{IoStream, ServerFlags};
#[derive(Clone)]
/// Support `SSL` connections via openssl package
///
/// `alpn` feature enables `OpensslAcceptor` type
pub struct OpensslAcceptor {
acceptor: SslAcceptor,
/// `ssl` feature enables `OpensslAcceptor` type
pub struct OpensslAcceptor<T> {
_t: ssl::OpensslAcceptor<T>,
}
impl OpensslAcceptor {
impl<T: AsyncRead + AsyncWrite> OpensslAcceptor<T> {
/// Create `OpensslAcceptor` with enabled `HTTP/2` and `HTTP1.1` support.
pub fn new(builder: SslAcceptorBuilder) -> io::Result<Self> {
pub fn new(builder: SslAcceptorBuilder) -> io::Result<ssl::OpensslAcceptor<T>> {
OpensslAcceptor::with_flags(builder, ServerFlags::HTTP1 | ServerFlags::HTTP2)
}
/// Create `OpensslAcceptor` with custom server flags.
pub fn with_flags(
mut builder: SslAcceptorBuilder, flags: ServerFlags,
) -> io::Result<Self> {
let mut protos = Vec::new();
if flags.contains(ServerFlags::HTTP1) {
protos.extend(b"\x08http/1.1");
}
if flags.contains(ServerFlags::HTTP2) {
protos.extend(b"\x02h2");
builder.set_alpn_select_callback(|_, protos| {
const H2: &[u8] = b"\x02h2";
if protos.windows(3).any(|window| window == H2) {
Ok(b"h2")
} else {
Err(AlpnError::NOACK)
}
});
}
builder: SslAcceptorBuilder, flags: ServerFlags,
) -> io::Result<ssl::OpensslAcceptor<T>> {
let acceptor = openssl_acceptor_with_flags(builder, flags)?;
if !protos.is_empty() {
builder.set_alpn_protos(&protos)?;
}
Ok(OpensslAcceptor {
acceptor: builder.build(),
})
Ok(ssl::OpensslAcceptor::new(acceptor))
}
}
pub struct AcceptorFut<Io>(AcceptAsync<Io>);
impl<Io: IoStream> Future for AcceptorFut<Io> {
type Item = SslStream<Io>;
type Error = io::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
self.0
.poll()
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))
/// Configure `SslAcceptorBuilder` with custom server flags.
pub fn openssl_acceptor_with_flags(
mut builder: SslAcceptorBuilder, flags: ServerFlags,
) -> io::Result<SslAcceptor> {
let mut protos = Vec::new();
if flags.contains(ServerFlags::HTTP1) {
protos.extend(b"\x08http/1.1");
}
}
impl<Io: IoStream> AcceptorService<Io> for OpensslAcceptor {
type Accepted = SslStream<Io>;
type Future = AcceptorFut<Io>;
fn scheme(&self) -> &'static str {
"https"
if flags.contains(ServerFlags::HTTP2) {
protos.extend(b"\x02h2");
builder.set_alpn_select_callback(|_, protos| {
const H2: &[u8] = b"\x02h2";
if protos.windows(3).any(|window| window == H2) {
Ok(b"h2")
} else {
Err(AlpnError::NOACK)
}
});
}
fn accept(&self, io: Io) -> Self::Future {
AcceptorFut(SslAcceptorExt::accept_async(&self.acceptor, io))
if !protos.is_empty() {
builder.set_alpn_protos(&protos)?;
}
Ok(builder.build())
}
impl<T: IoStream> IoStream for SslStream<T> {
@@ -84,6 +65,11 @@ impl<T: IoStream> IoStream for SslStream<T> {
Ok(())
}
#[inline]
fn peer_addr(&self) -> Option<SocketAddr> {
self.get_ref().get_ref().peer_addr()
}
#[inline]
fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()> {
self.get_mut().get_mut().set_nodelay(nodelay)
@@ -93,4 +79,9 @@ impl<T: IoStream> IoStream for SslStream<T> {
fn set_linger(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
self.get_mut().get_mut().set_linger(dur)
}
#[inline]
fn set_keepalive(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
self.get_mut().get_mut().set_keepalive(dur)
}
}

View File

@@ -1,29 +1,25 @@
use std::net::Shutdown;
use std::sync::Arc;
use std::net::{Shutdown, SocketAddr};
use std::{io, time};
use actix_net::ssl; //::RustlsAcceptor;
use rustls::{ClientSession, ServerConfig, ServerSession};
use tokio_io::AsyncWrite;
use tokio_rustls::{AcceptAsync, ServerConfigExt, TlsStream};
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_rustls::TlsStream;
use server::{AcceptorService, IoStream, ServerFlags};
use server::{IoStream, ServerFlags};
#[derive(Clone)]
/// Support `SSL` connections via rustls package
///
/// `rust-tls` feature enables `RustlsAcceptor` type
pub struct RustlsAcceptor {
config: Arc<ServerConfig>,
pub struct RustlsAcceptor<T> {
_t: ssl::RustlsAcceptor<T>,
}
impl RustlsAcceptor {
/// Create `OpensslAcceptor` with enabled `HTTP/2` and `HTTP1.1` support.
pub fn new(config: ServerConfig) -> Self {
RustlsAcceptor::with_flags(config, ServerFlags::HTTP1 | ServerFlags::HTTP2)
}
/// Create `OpensslAcceptor` with custom server flags.
pub fn with_flags(mut config: ServerConfig, flags: ServerFlags) -> Self {
impl<T: AsyncRead + AsyncWrite> RustlsAcceptor<T> {
/// Create `RustlsAcceptor` with custom server flags.
pub fn with_flags(
mut config: ServerConfig, flags: ServerFlags,
) -> ssl::RustlsAcceptor<T> {
let mut protos = Vec::new();
if flags.contains(ServerFlags::HTTP2) {
protos.push("h2".to_string());
@@ -35,22 +31,7 @@ impl RustlsAcceptor {
config.set_protocols(&protos);
}
RustlsAcceptor {
config: Arc::new(config),
}
}
}
impl<Io: IoStream> AcceptorService<Io> for RustlsAcceptor {
type Accepted = TlsStream<Io, ServerSession>;
type Future = AcceptAsync<Io>;
fn scheme(&self) -> &'static str {
"https"
}
fn accept(&self, io: Io) -> Self::Future {
ServerConfigExt::accept_async(&self.config, io)
ssl::RustlsAcceptor::new(config)
}
}
@@ -70,6 +51,11 @@ impl<Io: IoStream> IoStream for TlsStream<Io, ClientSession> {
fn set_linger(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
self.get_mut().0.set_linger(dur)
}
#[inline]
fn set_keepalive(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
self.get_mut().0.set_keepalive(dur)
}
}
impl<Io: IoStream> IoStream for TlsStream<Io, ServerSession> {
@@ -79,6 +65,11 @@ impl<Io: IoStream> IoStream for TlsStream<Io, ServerSession> {
Ok(())
}
#[inline]
fn peer_addr(&self) -> Option<SocketAddr> {
self.get_ref().0.peer_addr()
}
#[inline]
fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()> {
self.get_mut().0.set_nodelay(nodelay)
@@ -88,4 +79,9 @@ impl<Io: IoStream> IoStream for TlsStream<Io, ServerSession> {
fn set_linger(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
self.get_mut().0.set_linger(dur)
}
#[inline]
fn set_keepalive(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
self.get_mut().0.set_keepalive(dur)
}
}

View File

@@ -1,139 +0,0 @@
use std::{net, time};
use futures::sync::mpsc::{SendError, UnboundedSender};
use futures::sync::oneshot;
use futures::Future;
use actix::msgs::StopArbiter;
use actix::{Actor, Arbiter, AsyncContext, Context, Handler, Message, Response};
use super::server::{Connections, ServiceHandler};
use super::Token;
#[derive(Message)]
pub(crate) struct Conn<T> {
pub io: T,
pub handler: Token,
pub token: Token,
pub peer: Option<net::SocketAddr>,
}
pub(crate) struct Socket {
pub lst: net::TcpListener,
pub addr: net::SocketAddr,
pub token: Token,
}
#[derive(Clone)]
pub(crate) struct WorkerClient {
pub idx: usize,
tx: UnboundedSender<Conn<net::TcpStream>>,
conns: Connections,
}
impl WorkerClient {
pub fn new(
idx: usize, tx: UnboundedSender<Conn<net::TcpStream>>, conns: Connections,
) -> Self {
WorkerClient { idx, tx, conns }
}
pub fn send(
&self, msg: Conn<net::TcpStream>,
) -> Result<(), SendError<Conn<net::TcpStream>>> {
self.tx.unbounded_send(msg)
}
pub fn available(&self) -> bool {
self.conns.available()
}
}
/// Stop worker message. Returns `true` on successful shutdown
/// and `false` if some connections still alive.
pub(crate) struct StopWorker {
pub graceful: Option<time::Duration>,
}
impl Message for StopWorker {
type Result = Result<bool, ()>;
}
/// Http worker
///
/// Worker accepts Socket objects via unbounded channel and start requests
/// processing.
pub(crate) struct Worker {
conns: Connections,
handlers: Vec<Box<ServiceHandler>>,
}
impl Actor for Worker {
type Context = Context<Self>;
}
impl Worker {
pub(crate) fn new(conns: Connections, handlers: Vec<Box<ServiceHandler>>) -> Self {
Worker { conns, handlers }
}
fn shutdown(&self, force: bool) {
self.handlers.iter().for_each(|h| h.shutdown(force));
}
fn shutdown_timeout(
&self, ctx: &mut Context<Worker>, tx: oneshot::Sender<bool>, dur: time::Duration,
) {
// sleep for 1 second and then check again
ctx.run_later(time::Duration::new(1, 0), move |slf, ctx| {
let num = slf.conns.num_connections();
if num == 0 {
let _ = tx.send(true);
Arbiter::current().do_send(StopArbiter(0));
} else if let Some(d) = dur.checked_sub(time::Duration::new(1, 0)) {
slf.shutdown_timeout(ctx, tx, d);
} else {
info!("Force shutdown http worker, {} connections", num);
slf.shutdown(true);
let _ = tx.send(false);
Arbiter::current().do_send(StopArbiter(0));
}
});
}
}
impl Handler<Conn<net::TcpStream>> for Worker {
type Result = ();
fn handle(&mut self, msg: Conn<net::TcpStream>, _: &mut Context<Self>) {
self.handlers[msg.handler.0].handle(msg.token, msg.io, msg.peer)
}
}
/// `StopWorker` message handler
impl Handler<StopWorker> for Worker {
type Result = Response<bool, ()>;
fn handle(&mut self, msg: StopWorker, ctx: &mut Context<Self>) -> Self::Result {
let num = self.conns.num_connections();
if num == 0 {
info!("Shutting down http worker, 0 connections");
Response::reply(Ok(true))
} else if let Some(dur) = msg.graceful {
self.shutdown(false);
let (tx, rx) = oneshot::channel();
let num = self.conns.num_connections();
if num != 0 {
info!("Graceful http worker shutdown, {} connections", num);
self.shutdown_timeout(ctx, tx, dur);
Response::reply(Ok(true))
} else {
Response::async(rx.map_err(|_| ()))
}
} else {
info!("Force shutdown http worker, {} connections", num);
self.shutdown(true);
Response::reply(Ok(false))
}
}
}

View File

@@ -13,14 +13,10 @@ use http::{HeaderMap, HttpTryFrom, Method, Uri, Version};
use net2::TcpBuilder;
use tokio::runtime::current_thread::Runtime;
#[cfg(feature = "alpn")]
#[cfg(any(feature = "alpn", feature = "ssl"))]
use openssl::ssl::SslAcceptorBuilder;
#[cfg(feature = "rust-tls")]
use rustls::ServerConfig;
#[cfg(feature = "alpn")]
use server::OpensslAcceptor;
#[cfg(feature = "rust-tls")]
use server::RustlsAcceptor;
use application::{App, HttpApplication};
use body::Binary;
@@ -79,13 +75,13 @@ impl TestServer {
/// middlewares or set handlers for test application.
pub fn new<F>(config: F) -> Self
where
F: Sync + Send + 'static + Fn(&mut TestApp<()>),
F: Clone + Send + 'static + Fn(&mut TestApp<()>),
{
TestServerBuilder::new(|| ()).start(config)
}
/// Create test server builder
pub fn build() -> TestServerBuilder<()> {
pub fn build() -> TestServerBuilder<(), impl Fn() -> () + Clone + Send + 'static> {
TestServerBuilder::new(|| ())
}
@@ -94,19 +90,18 @@ impl TestServer {
/// This method can be used for constructing application state.
/// Also it can be used for external dependency initialization,
/// like creating sync actors for diesel integration.
pub fn build_with_state<F, S>(state: F) -> TestServerBuilder<S>
pub fn build_with_state<S, F>(state: F) -> TestServerBuilder<S, F>
where
F: Fn() -> S + Sync + Send + 'static,
F: Fn() -> S + Clone + Send + 'static,
S: 'static,
{
TestServerBuilder::new(state)
}
/// Start new test server with application factory
pub fn with_factory<F, U, H>(factory: F) -> Self
pub fn with_factory<F, H>(factory: F) -> Self
where
F: Fn() -> U + Sync + Send + 'static,
U: IntoIterator<Item = H> + 'static,
F: Fn() -> H + Send + Clone + 'static,
H: IntoHttpHandler + 'static,
{
let (tx, rx) = mpsc::channel();
@@ -117,7 +112,7 @@ impl TestServer {
let tcp = net::TcpListener::bind("127.0.0.1:0").unwrap();
let local_addr = tcp.local_addr().unwrap();
HttpServer::new(factory)
let _ = HttpServer::new(factory)
.disable_signals()
.listen(tcp)
.keep_alive(5)
@@ -139,7 +134,7 @@ impl TestServer {
}
fn get_conn() -> Addr<ClientConnector> {
#[cfg(feature = "alpn")]
#[cfg(any(feature = "alpn", feature = "ssl"))]
{
use openssl::ssl::{SslConnector, SslMethod, SslVerifyMode};
@@ -147,7 +142,10 @@ impl TestServer {
builder.set_verify(SslVerifyMode::NONE);
ClientConnector::with_connector(builder.build()).start()
}
#[cfg(all(feature = "rust-tls", not(feature = "alpn")))]
#[cfg(all(
feature = "rust-tls",
not(any(feature = "alpn", feature = "ssl"))
))]
{
use rustls::ClientConfig;
use std::fs::File;
@@ -157,7 +155,7 @@ impl TestServer {
config.root_store.add_pem_file(pem_file).unwrap();
ClientConnector::with_connector(config).start()
}
#[cfg(not(any(feature = "alpn", feature = "rust-tls")))]
#[cfg(not(any(feature = "alpn", feature = "ssl", feature = "rust-tls")))]
{
ClientConnector::default().start()
}
@@ -261,30 +259,33 @@ impl Drop for TestServer {
///
/// This type can be used to construct an instance of `TestServer` through a
/// builder-like pattern.
pub struct TestServerBuilder<S> {
state: Box<Fn() -> S + Sync + Send + 'static>,
#[cfg(feature = "alpn")]
pub struct TestServerBuilder<S, F>
where
F: Fn() -> S + Send + Clone + 'static,
{
state: F,
#[cfg(any(feature = "alpn", feature = "ssl"))]
ssl: Option<SslAcceptorBuilder>,
#[cfg(feature = "rust-tls")]
rust_ssl: Option<ServerConfig>,
}
impl<S: 'static> TestServerBuilder<S> {
impl<S: 'static, F> TestServerBuilder<S, F>
where
F: Fn() -> S + Send + Clone + 'static,
{
/// Create a new test server
pub fn new<F>(state: F) -> TestServerBuilder<S>
where
F: Fn() -> S + Sync + Send + 'static,
{
pub fn new(state: F) -> TestServerBuilder<S, F> {
TestServerBuilder {
state: Box::new(state),
#[cfg(feature = "alpn")]
state,
#[cfg(any(feature = "alpn", feature = "ssl"))]
ssl: None,
#[cfg(feature = "rust-tls")]
rust_ssl: None,
}
}
#[cfg(feature = "alpn")]
#[cfg(any(feature = "alpn", feature = "ssl"))]
/// Create ssl server
pub fn ssl(mut self, ssl: SslAcceptorBuilder) -> Self {
self.ssl = Some(ssl);
@@ -300,15 +301,15 @@ impl<S: 'static> TestServerBuilder<S> {
#[allow(unused_mut)]
/// Configure test application and run test server
pub fn start<F>(mut self, config: F) -> TestServer
pub fn start<C>(mut self, config: C) -> TestServer
where
F: Sync + Send + 'static + Fn(&mut TestApp<S>),
C: Fn(&mut TestApp<S>) + Clone + Send + 'static,
{
let (tx, rx) = mpsc::channel();
let mut has_ssl = false;
#[cfg(feature = "alpn")]
#[cfg(any(feature = "alpn", feature = "ssl"))]
{
has_ssl = has_ssl || self.ssl.is_some();
}
@@ -327,7 +328,7 @@ impl<S: 'static> TestServerBuilder<S> {
let mut srv = HttpServer::new(move || {
let mut app = TestApp::new(state());
config(&mut app);
vec![app]
app
}).workers(1)
.keep_alive(5)
.disable_signals();
@@ -335,12 +336,12 @@ impl<S: 'static> TestServerBuilder<S> {
tx.send((System::current(), addr, TestServer::get_conn()))
.unwrap();
#[cfg(feature = "alpn")]
#[cfg(any(feature = "alpn", feature = "ssl"))]
{
let ssl = self.ssl.take();
if let Some(ssl) = ssl {
let tcp = net::TcpListener::bind(addr).unwrap();
srv = srv.listen_with(tcp, OpensslAcceptor::new(ssl).unwrap());
srv = srv.listen_ssl(tcp, ssl).unwrap();
}
}
#[cfg(feature = "rust-tls")]
@@ -348,7 +349,7 @@ impl<S: 'static> TestServerBuilder<S> {
let ssl = self.rust_ssl.take();
if let Some(ssl) = ssl {
let tcp = net::TcpListener::bind(addr).unwrap();
srv = srv.listen_with(tcp, RustlsAcceptor::new(ssl));
srv = srv.listen_rustls(tcp, ssl);
}
}
if !has_ssl {

View File

@@ -148,7 +148,7 @@ impl Quoter {
if let Some(data) = cloned {
// Unsafe: we get data from http::Uri, which does utf-8 checks already
// this code only decodes valid pct encoded values
Some(unsafe { Rc::new(String::from_utf8_unchecked(data)) })
Some(Rc::new(unsafe { String::from_utf8_unchecked(data) }))
} else {
None
}

View File

@@ -12,7 +12,6 @@ trait FnWith<T, R>: 'static {
}
impl<T, R, F: Fn(T) -> R + 'static> FnWith<T, R> for F {
#[cfg_attr(feature = "cargo-clippy", allow(boxed_local))]
fn call_with(self: &Self, arg: T) -> R {
(*self)(arg)
}
@@ -42,24 +41,6 @@ where
fn create_with_config(self, T::Config) -> WithAsync<T, S, R, I, E>;
}
// impl<T1, T2, T3, S, F, R> WithFactory<(T1, T2, T3), S, R> for F
// where F: Fn(T1, T2, T3) -> R + 'static,
// T1: FromRequest<S> + 'static,
// T2: FromRequest<S> + 'static,
// T3: FromRequest<S> + 'static,
// R: Responder + 'static,
// S: 'static,
// {
// fn create(self) -> With<(T1, T2, T3), S, R> {
// With::new(move |(t1, t2, t3)| (self)(t1, t2, t3), (
// T1::Config::default(), T2::Config::default(), T3::Config::default()))
// }
// fn create_with_config(self, cfg: (T1::Config, T2::Config, T3::Config,)) -> With<(T1, T2, T3), S, R> {
// With::new(move |(t1, t2, t3)| (self)(t1, t2, t3), cfg)
// }
// }
#[doc(hidden)]
pub struct With<T, S, R>
where

View File

@@ -46,7 +46,7 @@ impl Frame {
Frame::message(payload, OpCode::Close, true, genmask)
}
#[cfg_attr(feature = "cargo-clippy", allow(type_complexity))]
#[cfg_attr(feature = "cargo-clippy", allow(clippy::type_complexity))]
fn read_copy_md<S>(
pl: &mut PayloadBuffer<S>, server: bool, max_size: usize,
) -> Poll<Option<(usize, bool, OpCode, usize, Option<u32>)>, ProtocolError>

View File

@@ -1,5 +1,5 @@
//! This is code from [Tungstenite project](https://github.com/snapview/tungstenite-rs)
#![cfg_attr(feature = "cargo-clippy", allow(cast_ptr_alignment))]
#![cfg_attr(feature = "cargo-clippy", allow(clippy::cast_ptr_alignment))]
use std::ptr::copy_nonoverlapping;
use std::slice;
@@ -19,7 +19,7 @@ impl<'a> ShortSlice<'a> {
/// Faster version of `apply_mask()` which operates on 8-byte blocks.
#[inline]
#[cfg_attr(feature = "cargo-clippy", allow(cast_lossless))]
#[cfg_attr(feature = "cargo-clippy", allow(clippy::cast_lossless))]
pub(crate) fn apply_mask(buf: &mut [u8], mask_u32: u32) {
// Extend the mask to 64 bits
let mut mask_u64 = ((mask_u32 as u64) << 32) | (mask_u32 as u64);
@@ -50,7 +50,10 @@ pub(crate) fn apply_mask(buf: &mut [u8], mask_u32: u32) {
// TODO: copy_nonoverlapping here compiles to call memcpy. While it is not so
// inefficient, it could be done better. The compiler does not understand that
// a `ShortSlice` must be smaller than a u64.
#[cfg_attr(feature = "cargo-clippy", allow(needless_pass_by_value))]
#[cfg_attr(
feature = "cargo-clippy",
allow(clippy::needless_pass_by_value)
)]
fn xor_short(buf: ShortSlice, mask: u64) {
// Unsafe: we know that a `ShortSlice` fits in a u64
unsafe {

BIN
tests/identity.pfx Normal file

Binary file not shown.

View File

@@ -0,0 +1,81 @@
extern crate actix;
extern crate actix_net;
extern crate actix_web;
use std::{thread, time};
use actix::System;
use actix_net::server::Server;
use actix_net::service::NewServiceExt;
use actix_web::server::{HttpService, KeepAlive, ServiceConfig, StreamConfiguration};
use actix_web::{client, http, test, App, HttpRequest};
#[test]
fn test_custom_pipeline() {
let addr = test::TestServer::unused_addr();
thread::spawn(move || {
Server::new()
.bind("test", addr, move || {
let app = App::new()
.route("/", http::Method::GET, |_: HttpRequest| "OK")
.finish();
let settings = ServiceConfig::build(app)
.keep_alive(KeepAlive::Disabled)
.client_timeout(1000)
.client_shutdown(1000)
.server_hostname("localhost")
.server_address(addr)
.finish();
StreamConfiguration::new()
.nodelay(true)
.tcp_keepalive(Some(time::Duration::from_secs(10)))
.and_then(HttpService::new(settings))
}).unwrap()
.run();
});
let mut sys = System::new("test");
{
let req = client::ClientRequest::get(format!("http://{}/", addr).as_str())
.finish()
.unwrap();
let response = sys.block_on(req.send()).unwrap();
assert!(response.status().is_success());
}
}
#[test]
fn test_h1() {
use actix_web::server::H1Service;
let addr = test::TestServer::unused_addr();
thread::spawn(move || {
Server::new()
.bind("test", addr, move || {
let app = App::new()
.route("/", http::Method::GET, |_: HttpRequest| "OK")
.finish();
let settings = ServiceConfig::build(app)
.keep_alive(KeepAlive::Disabled)
.client_timeout(1000)
.client_shutdown(1000)
.server_hostname("localhost")
.server_address(addr)
.finish();
H1Service::new(settings)
}).unwrap()
.run();
});
let mut sys = System::new("test");
{
let req = client::ClientRequest::get(format!("http://{}/", addr).as_str())
.finish()
.unwrap();
let response = sys.block_on(req.send()).unwrap();
assert!(response.status().is_success());
}
}

View File

@@ -1,4 +1,5 @@
extern crate actix;
extern crate actix_net;
extern crate actix_web;
#[cfg(feature = "brotli")]
extern crate brotli2;
@@ -9,9 +10,18 @@ extern crate h2;
extern crate http as modhttp;
extern crate rand;
extern crate tokio;
extern crate tokio_current_thread;
extern crate tokio_current_thread as current_thread;
extern crate tokio_reactor;
extern crate tokio_tcp;
#[cfg(feature = "tls")]
extern crate native_tls;
#[cfg(feature = "ssl")]
extern crate openssl;
#[cfg(feature = "rust-tls")]
extern crate rustls;
use std::io::{Read, Write};
use std::sync::Arc;
use std::{thread, time};
@@ -28,8 +38,8 @@ use h2::client as h2client;
use modhttp::Request;
use rand::distributions::Alphanumeric;
use rand::Rng;
use tokio::executor::current_thread;
use tokio::runtime::current_thread::Runtime;
use tokio_current_thread::spawn;
use tokio_tcp::TcpStream;
use actix_web::*;
@@ -883,6 +893,209 @@ fn test_brotli_encoding_large() {
assert_eq!(bytes, Bytes::from(data));
}
#[cfg(all(feature = "brotli", feature = "ssl"))]
#[test]
fn test_brotli_encoding_large_ssl() {
use actix::{Actor, System};
use openssl::ssl::{
SslAcceptor, SslConnector, SslFiletype, SslMethod, SslVerifyMode,
};
// load ssl keys
let mut builder = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap();
builder
.set_private_key_file("tests/key.pem", SslFiletype::PEM)
.unwrap();
builder
.set_certificate_chain_file("tests/cert.pem")
.unwrap();
let data = STR.repeat(10);
let srv = test::TestServer::build().ssl(builder).start(|app| {
app.handler(|req: &HttpRequest| {
req.body()
.and_then(|bytes: Bytes| {
Ok(HttpResponse::Ok()
.content_encoding(http::ContentEncoding::Identity)
.body(bytes))
}).responder()
})
});
let mut rt = System::new("test");
// client connector
let mut builder = SslConnector::builder(SslMethod::tls()).unwrap();
builder.set_verify(SslVerifyMode::NONE);
let conn = client::ClientConnector::with_connector(builder.build()).start();
// body
let mut e = BrotliEncoder::new(Vec::new(), 5);
e.write_all(data.as_ref()).unwrap();
let enc = e.finish().unwrap();
// client request
let request = client::ClientRequest::build()
.uri(srv.url("/"))
.method(http::Method::POST)
.header(http::header::CONTENT_ENCODING, "br")
.with_connector(conn)
.body(enc)
.unwrap();
let response = rt.block_on(request.send()).unwrap();
assert!(response.status().is_success());
// read response
let bytes = rt.block_on(response.body()).unwrap();
assert_eq!(bytes, Bytes::from(data));
}
#[cfg(all(feature = "rust-tls", feature = "ssl"))]
#[test]
fn test_reading_deflate_encoding_large_random_ssl() {
use actix::{Actor, System};
use openssl::ssl::{SslConnector, SslMethod, SslVerifyMode};
use rustls::internal::pemfile::{certs, rsa_private_keys};
use rustls::{NoClientAuth, ServerConfig};
use std::fs::File;
use std::io::BufReader;
// load ssl keys
let mut config = ServerConfig::new(NoClientAuth::new());
let cert_file = &mut BufReader::new(File::open("tests/cert.pem").unwrap());
let key_file = &mut BufReader::new(File::open("tests/key.pem").unwrap());
let cert_chain = certs(cert_file).unwrap();
let mut keys = rsa_private_keys(key_file).unwrap();
config.set_single_cert(cert_chain, keys.remove(0)).unwrap();
let data = rand::thread_rng()
.sample_iter(&Alphanumeric)
.take(160_000)
.collect::<String>();
let srv = test::TestServer::build().rustls(config).start(|app| {
app.handler(|req: &HttpRequest| {
req.body()
.and_then(|bytes: Bytes| {
Ok(HttpResponse::Ok()
.content_encoding(http::ContentEncoding::Identity)
.body(bytes))
}).responder()
})
});
let mut rt = System::new("test");
// client connector
let mut builder = SslConnector::builder(SslMethod::tls()).unwrap();
builder.set_verify(SslVerifyMode::NONE);
let conn = client::ClientConnector::with_connector(builder.build()).start();
// encode data
let mut e = ZlibEncoder::new(Vec::new(), Compression::default());
e.write_all(data.as_ref()).unwrap();
let enc = e.finish().unwrap();
// client request
let request = client::ClientRequest::build()
.uri(srv.url("/"))
.method(http::Method::POST)
.header(http::header::CONTENT_ENCODING, "deflate")
.with_connector(conn)
.body(enc)
.unwrap();
let response = rt.block_on(request.send()).unwrap();
assert!(response.status().is_success());
// read response
let bytes = rt.block_on(response.body()).unwrap();
assert_eq!(bytes.len(), data.len());
assert_eq!(bytes, Bytes::from(data));
}
#[cfg(all(feature = "tls", feature = "ssl"))]
#[test]
fn test_reading_deflate_encoding_large_random_tls() {
use native_tls::{Identity, TlsAcceptor};
use openssl::ssl::{
SslAcceptor, SslConnector, SslFiletype, SslMethod, SslVerifyMode,
};
use std::fs::File;
use std::sync::mpsc;
use actix::{Actor, System};
let (tx, rx) = mpsc::channel();
// load ssl keys
let mut file = File::open("tests/identity.pfx").unwrap();
let mut identity = vec![];
file.read_to_end(&mut identity).unwrap();
let identity = Identity::from_pkcs12(&identity, "1").unwrap();
let acceptor = TlsAcceptor::new(identity).unwrap();
// load ssl keys
let mut builder = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap();
builder
.set_private_key_file("tests/key.pem", SslFiletype::PEM)
.unwrap();
builder
.set_certificate_chain_file("tests/cert.pem")
.unwrap();
let data = rand::thread_rng()
.sample_iter(&Alphanumeric)
.take(160_000)
.collect::<String>();
let addr = test::TestServer::unused_addr();
thread::spawn(move || {
System::run(move || {
server::new(|| {
App::new().handler("/", |req: &HttpRequest| {
req.body()
.and_then(|bytes: Bytes| {
Ok(HttpResponse::Ok()
.content_encoding(http::ContentEncoding::Identity)
.body(bytes))
}).responder()
})
}).bind_tls(addr, acceptor)
.unwrap()
.start();
let _ = tx.send(System::current());
});
});
let sys = rx.recv().unwrap();
let mut rt = System::new("test");
// client connector
let mut builder = SslConnector::builder(SslMethod::tls()).unwrap();
builder.set_verify(SslVerifyMode::NONE);
let conn = client::ClientConnector::with_connector(builder.build()).start();
// encode data
let mut e = ZlibEncoder::new(Vec::new(), Compression::default());
e.write_all(data.as_ref()).unwrap();
let enc = e.finish().unwrap();
// client request
let request = client::ClientRequest::build()
.uri(format!("https://{}/", addr))
.method(http::Method::POST)
.header(http::header::CONTENT_ENCODING, "deflate")
.with_connector(conn)
.body(enc)
.unwrap();
let response = rt.block_on(request.send()).unwrap();
assert!(response.status().is_success());
// read response
let bytes = rt.block_on(response.body()).unwrap();
assert_eq!(bytes.len(), data.len());
assert_eq!(bytes, Bytes::from(data));
let _ = sys.stop();
}
#[test]
fn test_h2() {
let srv = test::TestServer::new(|app| app.handler(|_| HttpResponse::Ok().body(STR)));
@@ -904,7 +1117,7 @@ fn test_h2() {
let (response, _) = client.send_request(request, false).unwrap();
// Spawn a task to run the conn...
current_thread::spawn(h2.map_err(|e| println!("GOT ERR={:?}", e)));
spawn(h2.map_err(|e| println!("GOT ERR={:?}", e)));
response.and_then(|response| {
assert_eq!(response.status(), http::StatusCode::OK);
@@ -1008,3 +1221,180 @@ fn test_server_cookies() {
assert_eq!(cookies[1], first_cookie);
}
}
#[test]
fn test_slow_request() {
use actix::System;
use std::net;
use std::sync::mpsc;
let (tx, rx) = mpsc::channel();
let addr = test::TestServer::unused_addr();
thread::spawn(move || {
System::run(move || {
let srv = server::new(|| {
vec![App::new().resource("/", |r| {
r.method(http::Method::GET).f(|_| HttpResponse::Ok())
})]
});
let srv = srv.bind(addr).unwrap();
srv.client_timeout(200).start();
let _ = tx.send(System::current());
});
});
let sys = rx.recv().unwrap();
thread::sleep(time::Duration::from_millis(200));
let mut stream = net::TcpStream::connect(addr).unwrap();
let mut data = String::new();
let _ = stream.read_to_string(&mut data);
assert!(data.starts_with("HTTP/1.1 408 Request Timeout"));
let mut stream = net::TcpStream::connect(addr).unwrap();
let _ = stream.write_all(b"GET /test/tests/test HTTP/1.1\r\n");
let mut data = String::new();
let _ = stream.read_to_string(&mut data);
assert!(data.starts_with("HTTP/1.1 408 Request Timeout"));
sys.stop();
}
#[test]
fn test_malformed_request() {
use actix::System;
use std::net;
use std::sync::mpsc;
let (tx, rx) = mpsc::channel();
let addr = test::TestServer::unused_addr();
thread::spawn(move || {
System::run(move || {
let srv = server::new(|| {
App::new().resource("/", |r| {
r.method(http::Method::GET).f(|_| HttpResponse::Ok())
})
});
let _ = srv.bind(addr).unwrap().start();
let _ = tx.send(System::current());
});
});
let sys = rx.recv().unwrap();
thread::sleep(time::Duration::from_millis(200));
let mut stream = net::TcpStream::connect(addr).unwrap();
let _ = stream.write_all(b"GET /test/tests/test HTTP1.1\r\n");
let mut data = String::new();
let _ = stream.read_to_string(&mut data);
assert!(data.starts_with("HTTP/1.1 400 Bad Request"));
sys.stop();
}
#[test]
fn test_app_404() {
let mut srv = test::TestServer::with_factory(|| {
App::new().prefix("/prefix").resource("/", |r| {
r.method(http::Method::GET).f(|_| HttpResponse::Ok())
})
});
let request = srv.client(http::Method::GET, "/prefix/").finish().unwrap();
let response = srv.execute(request.send()).unwrap();
assert!(response.status().is_success());
let request = srv.client(http::Method::GET, "/").finish().unwrap();
let response = srv.execute(request.send()).unwrap();
assert_eq!(response.status(), http::StatusCode::NOT_FOUND);
}
#[test]
#[cfg(feature = "ssl")]
fn test_ssl_handshake_timeout() {
use actix::System;
use openssl::ssl::{SslAcceptor, SslFiletype, SslMethod};
use std::net;
use std::sync::mpsc;
let (tx, rx) = mpsc::channel();
let addr = test::TestServer::unused_addr();
// load ssl keys
let mut builder = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap();
builder
.set_private_key_file("tests/key.pem", SslFiletype::PEM)
.unwrap();
builder
.set_certificate_chain_file("tests/cert.pem")
.unwrap();
thread::spawn(move || {
System::run(move || {
let srv = server::new(|| {
App::new().resource("/", |r| {
r.method(http::Method::GET).f(|_| HttpResponse::Ok())
})
});
srv.bind_ssl(addr, builder)
.unwrap()
.workers(1)
.client_timeout(200)
.start();
let _ = tx.send(System::current());
});
});
let sys = rx.recv().unwrap();
let mut stream = net::TcpStream::connect(addr).unwrap();
let mut data = String::new();
let _ = stream.read_to_string(&mut data);
assert!(data.is_empty());
let _ = sys.stop();
}
#[test]
fn test_content_length() {
use actix_web::http::header::{HeaderName, HeaderValue};
use http::StatusCode;
let mut srv = test::TestServer::new(move |app| {
app.resource("/{status}", |r| {
r.f(|req: &HttpRequest| {
let indx: usize =
req.match_info().get("status").unwrap().parse().unwrap();
let statuses = [
StatusCode::NO_CONTENT,
StatusCode::CONTINUE,
StatusCode::SWITCHING_PROTOCOLS,
StatusCode::PROCESSING,
StatusCode::OK,
StatusCode::NOT_FOUND,
];
HttpResponse::new(statuses[indx])
})
});
});
let addr = srv.addr();
let mut get_resp = |i| {
let url = format!("http://{}/{}", addr, i);
let req = srv.get().uri(url).finish().unwrap();
srv.execute(req.send()).unwrap()
};
let header = HeaderName::from_static("content-length");
let value = HeaderValue::from_static("0");
for i in 0..4 {
let response = get_resp(i);
assert_eq!(response.headers().get(&header), None);
}
for i in 4..6 {
let response = get_resp(i);
assert_eq!(response.headers().get(&header), Some(&value));
}
}

View File

@@ -5,12 +5,16 @@ extern crate futures;
extern crate http;
extern crate rand;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
use std::{thread, time};
use bytes::Bytes;
use futures::Stream;
use rand::distributions::Alphanumeric;
use rand::Rng;
#[cfg(feature = "alpn")]
#[cfg(feature = "ssl")]
extern crate openssl;
#[cfg(feature = "rust-tls")]
extern crate rustls;
@@ -278,9 +282,8 @@ fn test_server_send_bin() {
}
#[test]
#[cfg(feature = "alpn")]
#[cfg(feature = "ssl")]
fn test_ws_server_ssl() {
extern crate openssl;
use openssl::ssl::{SslAcceptor, SslFiletype, SslMethod};
// load ssl keys
@@ -316,7 +319,6 @@ fn test_ws_server_ssl() {
#[test]
#[cfg(feature = "rust-tls")]
fn test_ws_server_rust_tls() {
extern crate rustls;
use rustls::internal::pemfile::{certs, rsa_private_keys};
use rustls::{NoClientAuth, ServerConfig};
use std::fs::File;
@@ -351,3 +353,42 @@ fn test_ws_server_rust_tls() {
assert_eq!(item, data);
}
}
struct WsStopped(Arc<AtomicUsize>);
impl Actor for WsStopped {
type Context = ws::WebsocketContext<Self>;
fn stopped(&mut self, _: &mut Self::Context) {
self.0.fetch_add(1, Ordering::Relaxed);
}
}
impl StreamHandler<ws::Message, ws::ProtocolError> for WsStopped {
fn handle(&mut self, msg: ws::Message, ctx: &mut Self::Context) {
match msg {
ws::Message::Text(text) => ctx.text(text),
_ => (),
}
}
}
#[test]
fn test_ws_stopped() {
let num = Arc::new(AtomicUsize::new(0));
let num2 = num.clone();
let mut srv = test::TestServer::new(move |app| {
let num3 = num2.clone();
app.handler(move |req| ws::start(req, WsStopped(num3.clone())))
});
{
let (reader, mut writer) = srv.ws().unwrap();
writer.text("text");
let (item, _) = srv.execute(reader.into_future()).unwrap();
assert_eq!(item, Some(ws::Message::Text("text".to_owned())));
}
thread::sleep(time::Duration::from_millis(1000));
assert_eq!(num.load(Ordering::Relaxed), 1);
}