1
0
mirror of https://github.com/fafhrd91/actix-web synced 2025-07-03 17:41:30 +02:00

Compare commits

...

101 Commits

Author SHA1 Message Date
c3f8b5cf22 clippy warnings 2018-09-11 11:25:32 -07:00
70a3f317d3 fix failing requests to test server #508 2018-09-11 11:24:05 -07:00
513c8ec1ce Merge pull request #505 from Neopallium/master
Fix issue with HttpChannel linked list.
2018-09-11 11:18:33 -07:00
04608b2ea6 Update changes. 2018-09-12 00:27:15 +08:00
70b45659e2 Make Node's traverse method take a closure instead of calling shutdown on each HttpChannel. 2018-09-12 00:27:15 +08:00
e0ae6b10cd Fix bug with HttpChannel linked list. 2018-09-12 00:27:15 +08:00
003b05b095 Don't ignore errors in std::fmt::Debug implementations (#506) 2018-09-11 14:57:55 +03:00
cdb57b840e prepare release 2018-09-07 20:47:54 -07:00
002bb24b26 unhide SessionBackend and SessionImpl traits and cleanup warnings 2018-09-07 20:46:43 -07:00
51982b3fec Merge pull request #503 from uzytkownik/route-regex
Refactor resource route parsing to allow repetition in the regexes
2018-09-07 20:19:31 -07:00
4251b0bc10 Refactor resource route parsing to allow repetition in the regexes 2018-09-06 08:51:55 +02:00
42f3773bec update changes 2018-09-05 09:03:58 -07:00
86fdbb47a5 Fix system_exit in HttpServer (#501) 2018-09-05 10:41:23 +02:00
4ca9fd2ad1 remove debug print 2018-09-03 22:09:12 -07:00
f0f67072ae Read client response until eof if connection header set to close #464 2018-09-03 21:35:59 -07:00
24d1228943 simplify handler path processing 2018-09-03 11:28:47 -07:00
b7a73e0a4f fix Scope::handler doc test 2018-09-02 08:51:26 -07:00
968c81e267 Handling scoped paths without leading slashes #460 2018-09-02 08:14:54 -07:00
d5957a8466 Merge branch 'master' of https://github.com/actix/actix-web 2018-09-02 07:47:45 -07:00
f2f05e7715 allow to register handlers on scope level #465 2018-09-02 07:47:19 -07:00
3439f55288 doc: Add example for using custom nativetls connector (#497) 2018-09-01 18:13:52 +03:00
0425e2776f Fix Issue #490 (#498)
* Add failing testcase for HTTP 404 response with no reason text.

* Include canonical reason test for HTTP error responses.

* Don't send a reason for unknown status codes.
2018-09-01 12:00:32 +03:00
6464f96f8b Merge branch 'master' of https://github.com/actix/actix-web 2018-08-31 18:56:53 -07:00
a2b170fec9 fmt 2018-08-31 18:56:21 -07:00
0b42cae082 update tests 2018-08-31 18:54:19 -07:00
c313c003a4 Fix typo 2018-08-31 17:45:29 -07:00
3fa23f5e10 update version 2018-08-31 17:25:15 -07:00
2d51831899 handle socket read disconnect 2018-08-31 17:24:13 -07:00
e59abfd716 Merge pull request #496 from Neopallium/master
Fix issue with 'Connection: close' in ClientRequest
2018-08-31 17:17:39 -07:00
66881d7dd1 If buffer is empty, read more data before calling parser. 2018-09-01 02:25:05 +08:00
a42a8a2321 Add some comments to clarify logic. 2018-09-01 02:15:36 +08:00
2341656173 Simplify buffer reading logic. Remove duplicate code. 2018-09-01 01:41:38 +08:00
487519acec Add client test for 'Connection: close' as reported in issue #495 2018-09-01 00:34:19 +08:00
af6caa92c8 Merge branch 'master' into master 2018-09-01 00:17:34 +08:00
3ccbce6bc8 Fix issue with 'Connection: close' in ClientRequest 2018-09-01 00:08:53 +08:00
797b52ecbf Update CHANGES.md 2018-08-29 20:58:23 +02:00
4bab50c861 Add ability to pass a custom TlsConnector (#491) 2018-08-29 20:53:31 +02:00
5906971b6d Merge pull request #483 from Neopallium/master
Fix bug with client disconnect immediately after receiving http request.
2018-08-26 10:15:25 -07:00
8393d09a0f Fix tests. 2018-08-27 00:31:31 +08:00
c3ae9997fc Fix bug with http1 client disconnects. 2018-08-26 22:21:05 +08:00
d39dcc58cd Merge pull request #482 from 0x1793d1/master
Fix server startup log message
2018-08-24 20:53:45 -07:00
471a3e9806 Fix server startup log message 2018-08-24 23:21:32 +02:00
48ef18ffa9 update changes 2018-08-23 12:54:59 -07:00
9ef7a9c182 hide AcceptorService 2018-08-23 11:30:49 -07:00
3dafe6c251 hide token and server flags 2018-08-23 11:30:07 -07:00
8dfc34e785 fix tokio-tls IoStream impl 2018-08-23 10:27:32 -07:00
810995ade0 fix tokio-tls dependency #480 2018-08-23 10:10:13 -07:00
1716380f08 clippy fmt 2018-08-23 09:48:01 -07:00
e9c139bdea clippy warnings 2018-08-23 09:47:32 -07:00
cf54be2f17 hide new server api 2018-08-23 09:39:11 -07:00
f39b520a2d Merge pull request #478 from fzgregor/master
Made extensions constructor public
2018-08-23 09:34:47 -07:00
89f414477c Merge branch 'master' into master 2018-08-23 09:34:34 -07:00
986f19af86 Revert back to serde_urlencoded dependecy (#479) 2018-08-21 22:23:17 +03:00
e680541e10 Made extensions constructor public 2018-08-18 19:32:28 +02:00
56bc900a82 Set minimum rustls version that fixes corruption (#474) 2018-08-17 19:53:16 +03:00
bdc9a8bb07 Optionally support tokio-uds's UnixStream as IoStream (#472) 2018-08-17 19:04:15 +03:00
8fe30a5b66 Merge pull request #473 from kornelski/usetest
Fix tests on Unix
2018-08-17 07:20:47 -07:00
a8405d0686 Fix tests on Unix 2018-08-17 13:13:48 +01:00
eb1e9a785f allow to use fn with multiple arguments with .with()/.with_async() 2018-08-16 20:29:06 -07:00
248bd388ca Improve HTTP server docs (#470) 2018-08-16 16:11:15 +03:00
9f5641c85b Add mention of reworked Content-Disposition 2018-08-13 17:37:00 +03:00
d9c7cd96a6 Rework Content-Disposition parsing totally (#461) 2018-08-13 17:34:05 +03:00
bf7779a9a3 add TestRequest::run_async_result helper method 2018-08-09 18:58:14 -07:00
cc3fbd27e0 better ergonomics 2018-08-09 17:25:23 -07:00
26629aafa5 explicit use 2018-08-09 13:41:13 -07:00
2ab7dbadce better ergonomics for Server::service() method 2018-08-09 13:38:10 -07:00
2e8d67e2ae upgrade native-tls package 2018-08-09 13:08:59 -07:00
43b6828ab5 Merge branch 'master' of https://github.com/actix/actix-web 2018-08-09 11:52:45 -07:00
e4ce6dfbdf refactor workers management 2018-08-09 11:52:32 -07:00
6b9fa2c3d9 Merge pull request #458 from davidMcneil/master
Add json2 HttpResponseBuilder method
2018-08-09 02:10:14 -07:00
5713d93158 Merge branch 'master' into master 2018-08-09 08:13:22 +03:00
cfe4829a56 add TestRequest::execute() helper method 2018-08-08 16:13:45 -07:00
b69774db61 fix attr name 2018-08-08 14:23:16 -07:00
542782f28a add HttpRequest::drop_state() 2018-08-08 13:57:13 -07:00
7c8dc4c201 Add json2 tests 2018-08-08 12:17:19 -06:00
7a11c2eac1 Add json2 HttpResponseBuilder method 2018-08-08 11:11:15 -06:00
8eb9eb4247 flush io on complete 2018-08-08 09:12:32 -07:00
992f7a11b3 remove debug println 2018-08-07 22:40:09 -07:00
30769e3072 fix http/2 error handling 2018-08-07 20:48:25 -07:00
57f991280c fix protocol order for rustls acceptor 2018-08-07 13:53:24 -07:00
85acc3f8df deprecate HttpServer::no_http2(), update changes 2018-08-07 12:49:40 -07:00
5bd82d4f03 update changes 2018-08-07 12:00:51 -07:00
58a079bd10 include content-length to error response 2018-08-07 11:56:39 -07:00
16546a707f Merge pull request #453 from DoumanAsh/reserve_status_line_for_server_error
Reserve enough space for ServerError task to write status line
2018-08-07 11:48:55 -07:00
86a5afb5ca Reserve enough space for ServerError task to write status line 2018-08-07 17:34:24 +03:00
9c80d3aa77 Write non-80 port in HOST of client's request (#451) 2018-08-07 10:01:29 +03:00
954f1a0b0f impl FromRequest for () (#449) 2018-08-06 10:44:08 +03:00
f4fba5f481 Merge pull request #447 from DoumanAsh/multiple_set_cookies
Correct setting cookies in HTTP2 writer
2018-08-04 08:58:12 -07:00
995f819eae Merge branch 'master' into multiple_set_cookies 2018-08-04 08:58:00 -07:00
85e7548088 fix adding multiple response headers for http/2 #446 2018-08-04 08:56:33 -07:00
900fd5a98e Correct settings headers for HTTP2
Add test to verify number of Set-Cookies
2018-08-04 18:05:41 +03:00
84b27db218 fix no_http2 flag 2018-08-03 19:40:43 -07:00
ac9180ac46 simplify channel impl 2018-08-03 19:32:46 -07:00
e34b5c08ba allow to pass extra information from acceptor to application level 2018-08-03 19:24:53 -07:00
f3f1e04853 refactor ssl support 2018-08-03 16:09:46 -07:00
036cf5e867 update changes 2018-08-03 08:20:59 -07:00
e61ef7dee4 Use zlib instead of deflate for content encoding (#442) 2018-08-03 14:56:26 +02:00
9a10d8aa7a Fixed headers' formating for CORS Middleware Access-Control-Expose-Headers header value to HTTP/1.1 & HTTP/2 spec-compliant format (#436) 2018-08-03 15:03:11 +03:00
f8e5d7c6c1 Fixed broken build on wrong variable usage (#440) 2018-08-03 14:11:51 +03:00
8c89c90c50 add accept backpressure #250 2018-08-02 23:17:10 -07:00
e9c1889df4 test timing 2018-08-01 16:41:24 -07:00
72 changed files with 4790 additions and 4080 deletions

View File

@ -1,5 +1,97 @@
# Changes
## [0.7.7] - 2018-09-11
### Fixed
* Fix linked list of HttpChannels #504
* Fix requests to TestServer fail #508
## [0.7.6] - 2018-09-07
### Fixed
* Fix system_exit in HttpServer #501
* Fix parsing of route param containin regexes with repetition #500
### Changes
* Unhide `SessionBackend` and `SessionImpl` traits #455
## [0.7.5] - 2018-09-04
### Added
* Added the ability to pass a custom `TlsConnector`.
* Allow to register handlers on scope level #465
### Fixed
* Handle socket read disconnect
* Handling scoped paths without leading slashes #460
### Changed
* Read client response until eof if connection header set to close #464
## [0.7.4] - 2018-08-23
### Added
* Added `HttpServer::maxconn()` and `HttpServer::maxconnrate()`,
accept backpressure #250
* Allow to customize connection handshake process via `HttpServer::listen_with()`
and `HttpServer::bind_with()` methods
* Support making client connections via `tokio-uds`'s `UnixStream` when "uds" feature is enabled #472
### Changed
* It is allowed to use function with up to 10 parameters for handler with `extractor parameters`.
`Route::with_config()`/`Route::with_async_config()` always passes configuration objects as tuple
even for handler with one parameter.
* native-tls - 0.2
* `Content-Disposition` is re-worked. Its parser is now more robust and handles quoted content better. See #461
### Fixed
* Use zlib instead of raw deflate for decoding and encoding payloads with
`Content-Encoding: deflate`.
* Fixed headers formating for CORS Middleware Access-Control-Expose-Headers #436
* Fix adding multiple response headers #446
* Client includes port in HOST header when it is not default(e.g. not 80 and 443). #448
* Panic during access without routing being set #452
* Fixed http/2 error handling
### Deprecated
* `HttpServer::no_http2()` is deprecated, use `OpensslAcceptor::with_flags()` or
`RustlsAcceptor::with_flags()` instead
* `HttpServer::listen_tls()`, `HttpServer::listen_ssl()`, `HttpServer::listen_rustls()` have been
deprecated in favor of `HttpServer::listen_with()` with specific `acceptor`.
* `HttpServer::bind_tls()`, `HttpServer::bind_ssl()`, `HttpServer::bind_rustls()` have been
deprecated in favor of `HttpServer::bind_with()` with specific `acceptor`.
## [0.7.3] - 2018-08-01
### Added
@ -8,7 +100,6 @@
* Allow TestServer to open a websocket on any URL (TestServer::ws_at()) #433
### Fixed
* Fixed failure 0.1.2 compatibility
@ -19,6 +110,8 @@
* HttpRequest::url_for is not working with scopes #429
* Fixed headers' formating for CORS Middleware `Access-Control-Expose-Headers` header value to HTTP/1.1 & HTTP/2 spec-compliant format #436
## [0.7.2] - 2018-07-26

View File

@ -1,6 +1,6 @@
[package]
name = "actix-web"
version = "0.7.3"
version = "0.7.7"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix web is a simple, pragmatic and extremely fast web framework for Rust."
readme = "README.md"
@ -40,6 +40,9 @@ alpn = ["openssl", "tokio-openssl"]
# rustls
rust-tls = ["rustls", "tokio-rustls", "webpki", "webpki-roots"]
# unix sockets
uds = ["tokio-uds"]
# sessions feature, session require "ring" crate and c compiler
session = ["cookie/secure"]
@ -98,24 +101,26 @@ tokio-io = "0.1"
tokio-tcp = "0.1"
tokio-timer = "0.2"
tokio-reactor = "0.1"
tokio-current-thread = "0.1"
# native-tls
native-tls = { version="0.1", optional = true }
tokio-tls = { version="0.1", optional = true }
native-tls = { version="0.2", optional = true }
tokio-tls = { version="0.2", optional = true }
# openssl
openssl = { version="0.10", optional = true }
tokio-openssl = { version="0.2", optional = true }
#rustls
rustls = { version = "0.13", optional = true }
tokio-rustls = { version = "0.7", optional = true }
rustls = { version = "^0.13.1", optional = true }
tokio-rustls = { version = "^0.7.2", optional = true }
webpki = { version = "0.18", optional = true }
webpki-roots = { version = "0.15", optional = true }
# forked url_encoded
itoa = "0.4"
dtoa = "0.4"
# unix sockets
tokio-uds = { version="0.2", optional = true }
serde_urlencoded = "^0.5.3"
[dev-dependencies]
env_logger = "0.5"

View File

@ -1,3 +1,9 @@
## 0.7.4
* `Route::with_config()`/`Route::with_async_config()` always passes configuration objects as tuple
even for handler with one parameter.
## 0.7
* `HttpRequest` does not implement `Stream` anymore. If you need to read request payload

View File

@ -12,6 +12,7 @@ use resource::Resource;
use router::{ResourceDef, Router};
use scope::Scope;
use server::{HttpHandler, HttpHandlerTask, IntoHttpHandler, Request};
use with::WithFactory;
/// Application
pub struct HttpApplication<S = ()> {
@ -249,7 +250,7 @@ where
/// ```
pub fn route<T, F, R>(mut self, path: &str, method: Method, f: F) -> App<S>
where
F: Fn(T) -> R + 'static,
F: WithFactory<T, S, R>,
R: Responder + 'static,
T: FromRequest<S> + 'static,
{
@ -446,11 +447,8 @@ where
{
let mut path = path.trim().trim_right_matches('/').to_owned();
if !path.is_empty() && !path.starts_with('/') {
path.insert(0, '/')
}
if path.len() > 1 && path.ends_with('/') {
path.pop();
}
path.insert(0, '/');
};
self.parts
.as_mut()
.expect("Use after finish")
@ -775,8 +773,7 @@ mod tests {
.route("/test", Method::GET, |_: HttpRequest| HttpResponse::Ok())
.route("/test", Method::POST, |_: HttpRequest| {
HttpResponse::Created()
})
.finish();
}).finish();
let req = TestRequest::with_uri("/test").method(Method::GET).request();
let resp = app.run(req);

View File

@ -17,57 +17,34 @@ use tokio_io::{AsyncRead, AsyncWrite};
use tokio_timer::Delay;
#[cfg(feature = "alpn")]
use openssl::ssl::{Error as OpensslError, SslConnector, SslMethod};
#[cfg(feature = "alpn")]
use tokio_openssl::SslConnectorExt;
use {
openssl::ssl::{Error as SslError, SslConnector, SslMethod},
tokio_openssl::SslConnectorExt,
};
#[cfg(all(feature = "tls", not(feature = "alpn")))]
use native_tls::{Error as TlsError, TlsConnector, TlsStream};
#[cfg(all(feature = "tls", not(feature = "alpn")))]
use tokio_tls::TlsConnectorExt;
use {
native_tls::{Error as SslError, TlsConnector as NativeTlsConnector},
tokio_tls::TlsConnector as SslConnector,
};
#[cfg(
all(
#[cfg(all(
feature = "rust-tls",
not(any(feature = "alpn", feature = "tls"))
)
)]
use rustls::ClientConfig;
#[cfg(
all(
))]
use {
rustls::ClientConfig, std::io::Error as SslError, std::sync::Arc,
tokio_rustls::ClientConfigExt, webpki::DNSNameRef, webpki_roots,
};
#[cfg(all(
feature = "rust-tls",
not(any(feature = "alpn", feature = "tls"))
)
)]
use std::io::Error as TLSError;
#[cfg(
all(
feature = "rust-tls",
not(any(feature = "alpn", feature = "tls"))
)
)]
use std::sync::Arc;
#[cfg(
all(
feature = "rust-tls",
not(any(feature = "alpn", feature = "tls"))
)
)]
use tokio_rustls::ClientConfigExt;
#[cfg(
all(
feature = "rust-tls",
not(any(feature = "alpn", feature = "tls"))
)
)]
use webpki::DNSNameRef;
#[cfg(
all(
feature = "rust-tls",
not(any(feature = "alpn", feature = "tls"))
)
)]
use webpki_roots;
))]
type SslConnector = Arc<ClientConfig>;
#[cfg(not(any(feature = "alpn", feature = "tls", feature = "rust-tls")))]
type SslConnector = ();
use server::IoStream;
use {HAS_OPENSSL, HAS_RUSTLS, HAS_TLS};
@ -173,24 +150,9 @@ pub enum ClientConnectorError {
SslIsNotSupported,
/// SSL error
#[cfg(feature = "alpn")]
#[cfg(any(feature = "tls", feature = "alpn", feature = "rust-tls"))]
#[fail(display = "{}", _0)]
SslError(#[cause] OpensslError),
/// SSL error
#[cfg(all(feature = "tls", not(feature = "alpn")))]
#[fail(display = "{}", _0)]
SslError(#[cause] TlsError),
/// SSL error
#[cfg(
all(
feature = "rust-tls",
not(any(feature = "alpn", feature = "tls"))
)
)]
#[fail(display = "{}", _0)]
SslError(#[cause] TLSError),
SslError(#[cause] SslError),
/// Resolver error
#[fail(display = "{}", _0)]
@ -242,17 +204,8 @@ impl Paused {
/// `ClientConnector` type is responsible for transport layer of a
/// client connection.
pub struct ClientConnector {
#[cfg(all(feature = "alpn"))]
#[allow(dead_code)]
connector: SslConnector,
#[cfg(all(feature = "tls", not(feature = "alpn")))]
connector: TlsConnector,
#[cfg(
all(
feature = "rust-tls",
not(any(feature = "alpn", feature = "tls"))
)
)]
connector: Arc<ClientConfig>,
stats: ClientConnectorStats,
subscriber: Option<Recipient<ClientConnectorStats>>,
@ -293,71 +246,36 @@ impl SystemService for ClientConnector {}
impl Default for ClientConnector {
fn default() -> ClientConnector {
let connector = {
#[cfg(all(feature = "alpn"))]
{
let builder = SslConnector::builder(SslMethod::tls()).unwrap();
ClientConnector::with_connector(builder.build())
SslConnector::builder(SslMethod::tls()).unwrap().build()
}
#[cfg(all(feature = "tls", not(feature = "alpn")))]
{
let (tx, rx) = mpsc::unbounded();
let builder = TlsConnector::builder().unwrap();
ClientConnector {
stats: ClientConnectorStats::default(),
subscriber: None,
acq_tx: tx,
acq_rx: Some(rx),
resolver: None,
connector: builder.build().unwrap(),
conn_lifetime: Duration::from_secs(75),
conn_keep_alive: Duration::from_secs(15),
limit: 100,
limit_per_host: 0,
acquired: 0,
acquired_per_host: HashMap::new(),
available: HashMap::new(),
to_close: Vec::new(),
waiters: Some(HashMap::new()),
wait_timeout: None,
paused: Paused::No,
NativeTlsConnector::builder().build().unwrap().into()
}
}
#[cfg(
all(
#[cfg(all(
feature = "rust-tls",
not(any(feature = "alpn", feature = "tls"))
)
)]
))]
{
let mut config = ClientConfig::new();
config
.root_store
.add_server_trust_anchors(&webpki_roots::TLS_SERVER_ROOTS);
ClientConnector::with_connector(config)
Arc::new(config)
}
#[cfg(not(any(feature = "alpn", feature = "tls", feature = "rust-tls")))]
{
let (tx, rx) = mpsc::unbounded();
ClientConnector {
stats: ClientConnectorStats::default(),
subscriber: None,
acq_tx: tx,
acq_rx: Some(rx),
resolver: None,
conn_lifetime: Duration::from_secs(75),
conn_keep_alive: Duration::from_secs(15),
limit: 100,
limit_per_host: 0,
acquired: 0,
acquired_per_host: HashMap::new(),
available: HashMap::new(),
to_close: Vec::new(),
waiters: Some(HashMap::new()),
wait_timeout: None,
paused: Paused::No,
}
()
}
};
ClientConnector::with_connector_impl(connector)
}
}
@ -375,7 +293,6 @@ impl ClientConnector {
/// # extern crate futures;
/// # use futures::{future, Future};
/// # use std::io::Write;
/// # use std::process;
/// # use actix_web::actix::Actor;
/// extern crate openssl;
/// use actix_web::{actix, client::ClientConnector, client::Connect};
@ -402,35 +319,14 @@ impl ClientConnector {
/// }
/// ```
pub fn with_connector(connector: SslConnector) -> ClientConnector {
let (tx, rx) = mpsc::unbounded();
ClientConnector {
connector,
stats: ClientConnectorStats::default(),
subscriber: None,
acq_tx: tx,
acq_rx: Some(rx),
resolver: None,
conn_lifetime: Duration::from_secs(75),
conn_keep_alive: Duration::from_secs(15),
limit: 100,
limit_per_host: 0,
acquired: 0,
acquired_per_host: HashMap::new(),
available: HashMap::new(),
to_close: Vec::new(),
waiters: Some(HashMap::new()),
wait_timeout: None,
paused: Paused::No,
}
// keep level of indirection for docstrings matching featureflags
Self::with_connector_impl(connector)
}
#[cfg(
all(
#[cfg(all(
feature = "rust-tls",
not(any(feature = "alpn", feature = "tls"))
)
)]
))]
/// Create `ClientConnector` actor with custom `SslConnector` instance.
///
/// By default `ClientConnector` uses very a simple SSL configuration.
@ -441,10 +337,8 @@ impl ClientConnector {
/// # #![cfg(feature = "rust-tls")]
/// # extern crate actix_web;
/// # extern crate futures;
/// # extern crate tokio;
/// # use futures::{future, Future};
/// # use std::io::Write;
/// # use std::process;
/// # use actix_web::actix::Actor;
/// extern crate rustls;
/// extern crate webpki_roots;
@ -476,10 +370,61 @@ impl ClientConnector {
/// }
/// ```
pub fn with_connector(connector: ClientConfig) -> ClientConnector {
// keep level of indirection for docstrings matching featureflags
Self::with_connector_impl(Arc::new(connector))
}
#[cfg(all(
feature = "tls",
not(any(feature = "alpn", feature = "rust-tls"))
))]
/// Create `ClientConnector` actor with custom `SslConnector` instance.
///
/// By default `ClientConnector` uses very a simple SSL configuration.
/// With `with_connector` method it is possible to use a custom
/// `SslConnector` object.
///
/// ```rust
/// # #![cfg(feature = "tls")]
/// # extern crate actix_web;
/// # extern crate futures;
/// # use futures::{future, Future};
/// # use std::io::Write;
/// # use actix_web::actix::Actor;
/// extern crate native_tls;
/// extern crate webpki_roots;
/// use native_tls::TlsConnector;
/// use actix_web::{actix, client::ClientConnector, client::Connect};
///
/// fn main() {
/// actix::run(|| {
/// let connector = TlsConnector::new().unwrap();
/// let conn = ClientConnector::with_connector(connector.into()).start();
///
/// conn.send(
/// Connect::new("https://www.rust-lang.org").unwrap()) // <- connect to host
/// .map_err(|_| ())
/// .and_then(|res| {
/// if let Ok(mut stream) = res {
/// stream.write_all(b"GET / HTTP/1.0\r\n\r\n").unwrap();
/// }
/// # actix::System::current().stop();
/// Ok(())
/// })
/// });
/// }
/// ```
pub fn with_connector(connector: SslConnector) -> ClientConnector {
// keep level of indirection for docstrings matching featureflags
Self::with_connector_impl(connector)
}
#[inline]
fn with_connector_impl(connector: SslConnector) -> ClientConnector {
let (tx, rx) = mpsc::unbounded();
ClientConnector {
connector: Arc::new(connector),
connector,
stats: ClientConnectorStats::default(),
subscriber: None,
acq_tx: tx,
@ -768,8 +713,7 @@ impl ClientConnector {
).map_err(move |_, act, _| {
act.release_key(&key2);
()
})
.and_then(move |res, act, _| {
}).and_then(move |res, act, _| {
#[cfg(feature = "alpn")]
match res {
Err(err) => {
@ -791,8 +735,7 @@ impl ClientConnector {
));
}
Ok(stream) => {
let _ =
waiter.tx.send(Ok(Connection::new(
let _ = waiter.tx.send(Ok(Connection::new(
conn.0.clone(),
Some(conn),
Box::new(stream),
@ -824,7 +767,7 @@ impl ClientConnector {
if conn.0.ssl {
fut::Either::A(
act.connector
.connect_async(&conn.0.host, stream)
.connect(&conn.0.host, stream)
.into_actor(act)
.then(move |res, _, _| {
match res {
@ -834,8 +777,7 @@ impl ClientConnector {
));
}
Ok(stream) => {
let _ =
waiter.tx.send(Ok(Connection::new(
let _ = waiter.tx.send(Ok(Connection::new(
conn.0.clone(),
Some(conn),
Box::new(stream),
@ -856,12 +798,10 @@ impl ClientConnector {
}
}
#[cfg(
all(
#[cfg(all(
feature = "rust-tls",
not(any(feature = "alpn", feature = "tls"))
)
)]
))]
match res {
Err(err) => {
let _ = waiter.tx.send(Err(err.into()));
@ -870,8 +810,7 @@ impl ClientConnector {
Ok(stream) => {
act.stats.opened += 1;
if conn.0.ssl {
let host =
DNSNameRef::try_from_ascii_str(&key.host).unwrap();
let host = DNSNameRef::try_from_ascii_str(&key.host).unwrap();
fut::Either::A(
act.connector
.connect_async(host, stream)
@ -884,8 +823,7 @@ impl ClientConnector {
));
}
Ok(stream) => {
let _ =
waiter.tx.send(Ok(Connection::new(
let _ = waiter.tx.send(Ok(Connection::new(
conn.0.clone(),
Some(conn),
Box::new(stream),
@ -915,9 +853,8 @@ impl ClientConnector {
Ok(stream) => {
act.stats.opened += 1;
if conn.0.ssl {
let _ = waiter
.tx
.send(Err(ClientConnectorError::SslIsNotSupported));
let _ =
waiter.tx.send(Err(ClientConnectorError::SslIsNotSupported));
} else {
let _ = waiter.tx.send(Ok(Connection::new(
conn.0.clone(),
@ -928,8 +865,7 @@ impl ClientConnector {
fut::ok(())
}
}
})
.spawn(ctx);
}).spawn(ctx);
}
}
@ -1287,6 +1223,10 @@ impl Connection {
}
/// Create a new connection from an IO Stream
///
/// The stream can be a `UnixStream` if the Unix-only "uds" feature is enabled.
///
/// See also `ClientRequestBuilder::with_connection()`.
pub fn from_stream<T: IoStream + Send>(io: T) -> Connection {
Connection::new(Key::empty(), None, Box::new(io))
}
@ -1345,3 +1285,26 @@ impl AsyncWrite for Connection {
self.stream.shutdown()
}
}
#[cfg(feature = "tls")]
use tokio_tls::TlsStream;
#[cfg(feature = "tls")]
/// This is temp solution untile actix-net migration
impl<Io: IoStream> IoStream for TlsStream<Io> {
#[inline]
fn shutdown(&mut self, _how: Shutdown) -> io::Result<()> {
let _ = self.get_mut().shutdown();
Ok(())
}
#[inline]
fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()> {
self.get_mut().get_mut().set_nodelay(nodelay)
}
#[inline]
fn set_linger(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
self.get_mut().get_mut().set_linger(dur)
}
}

View File

@ -20,6 +20,7 @@ const MAX_HEADERS: usize = 96;
#[derive(Default)]
pub struct HttpResponseParser {
decoder: Option<EncodingDecoder>,
eof: bool, // indicate that we read payload until stream eof
}
#[derive(Debug, Fail)]
@ -38,41 +39,40 @@ impl HttpResponseParser {
where
T: IoStream,
{
// if buf is empty parse_message will always return NotReady, let's avoid that
if buf.is_empty() {
match io.read_available(buf) {
Ok(Async::Ready(true)) => {
return Err(HttpResponseParserError::Disconnect)
}
Ok(Async::Ready(false)) => (),
Ok(Async::NotReady) => return Ok(Async::NotReady),
Err(err) => return Err(HttpResponseParserError::Error(err.into())),
}
}
loop {
// Don't call parser until we have data to parse.
if !buf.is_empty() {
match HttpResponseParser::parse_message(buf)
.map_err(HttpResponseParserError::Error)?
{
Async::Ready((msg, decoder)) => {
self.decoder = decoder;
Async::Ready((msg, info)) => {
if let Some((decoder, eof)) = info {
self.eof = eof;
self.decoder = Some(decoder);
} else {
self.eof = false;
self.decoder = None;
}
return Ok(Async::Ready(msg));
}
Async::NotReady => {
if buf.capacity() >= MAX_BUFFER_SIZE {
return Err(HttpResponseParserError::Error(ParseError::TooLarge));
return Err(HttpResponseParserError::Error(
ParseError::TooLarge,
));
}
// Parser needs more data.
}
}
}
// Read some more data into the buffer for the parser.
match io.read_available(buf) {
Ok(Async::Ready(true)) => {
Ok(Async::Ready((false, true))) => {
return Err(HttpResponseParserError::Disconnect)
}
Ok(Async::Ready(false)) => (),
Ok(Async::Ready(_)) => (),
Ok(Async::NotReady) => return Ok(Async::NotReady),
Err(err) => {
return Err(HttpResponseParserError::Error(err.into()))
}
}
}
Err(err) => return Err(HttpResponseParserError::Error(err.into())),
}
}
}
@ -87,8 +87,8 @@ impl HttpResponseParser {
loop {
// read payload
let (not_ready, stream_finished) = match io.read_available(buf) {
Ok(Async::Ready(true)) => (false, true),
Ok(Async::Ready(false)) => (false, false),
Ok(Async::Ready((_, true))) => (false, true),
Ok(Async::Ready((_, false))) => (false, false),
Ok(Async::NotReady) => (true, false),
Err(err) => return Err(err.into()),
};
@ -104,9 +104,14 @@ impl HttpResponseParser {
return Ok(Async::NotReady);
}
if stream_finished {
// read untile eof?
if self.eof {
return Ok(Async::Ready(None));
} else {
return Err(PayloadError::Incomplete);
}
}
}
Err(err) => return Err(err.into()),
}
}
@ -117,7 +122,7 @@ impl HttpResponseParser {
fn parse_message(
buf: &mut BytesMut,
) -> Poll<(ClientResponse, Option<EncodingDecoder>), ParseError> {
) -> Poll<(ClientResponse, Option<(EncodingDecoder, bool)>), ParseError> {
// Unsafe: we read only this data only after httparse parses headers into.
// performance bump for pipeline benchmarks.
let mut headers: [HeaderIndex; MAX_HEADERS] = unsafe { mem::uninitialized() };
@ -163,12 +168,12 @@ impl HttpResponseParser {
}
let decoder = if status == StatusCode::SWITCHING_PROTOCOLS {
Some(EncodingDecoder::eof())
Some((EncodingDecoder::eof(), true))
} else if let Some(len) = hdrs.get(header::CONTENT_LENGTH) {
// Content-Length
if let Ok(s) = len.to_str() {
if let Ok(len) = s.parse::<u64>() {
Some(EncodingDecoder::length(len))
Some((EncodingDecoder::length(len), false))
} else {
debug!("illegal Content-Length: {:?}", len);
return Err(ParseError::Header);
@ -179,7 +184,18 @@ impl HttpResponseParser {
}
} else if chunked(&hdrs)? {
// Chunked encoding
Some(EncodingDecoder::chunked())
Some((EncodingDecoder::chunked(), false))
} else if let Some(value) = hdrs.get(header::CONNECTION) {
let close = if let Ok(s) = value.to_str() {
s == "close"
} else {
false
};
if close {
Some((EncodingDecoder::eof(), true))
} else {
None
}
} else {
None
};

View File

@ -254,16 +254,16 @@ impl ClientRequest {
impl fmt::Debug for ClientRequest {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let res = writeln!(
writeln!(
f,
"\nClientRequest {:?} {}:{}",
self.version, self.method, self.uri
);
let _ = writeln!(f, " headers:");
)?;
writeln!(f, " headers:")?;
for (key, val) in self.headers.iter() {
let _ = writeln!(f, " {:?}: {:?}", key, val);
writeln!(f, " {:?}: {:?}", key, val)?;
}
res
Ok(())
}
}
@ -629,7 +629,14 @@ impl ClientRequestBuilder {
if let Some(parts) = parts(&mut self.request, &self.err) {
if let Some(host) = parts.uri.host() {
if !parts.headers.contains_key(header::HOST) {
match host.try_into() {
let mut wrt = BytesMut::with_capacity(host.len() + 5).writer();
let _ = match parts.uri.port() {
None | Some(80) | Some(443) => write!(wrt, "{}", host),
Some(port) => write!(wrt, "{}:{}", host, port),
};
match wrt.get_mut().take().freeze().try_into() {
Ok(value) => {
parts.headers.insert(header::HOST, value);
}
@ -743,16 +750,16 @@ fn parts<'a>(
impl fmt::Debug for ClientRequestBuilder {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if let Some(ref parts) = self.request {
let res = writeln!(
writeln!(
f,
"\nClientRequestBuilder {:?} {}:{}",
parts.version, parts.method, parts.uri
);
let _ = writeln!(f, " headers:");
)?;
writeln!(f, " headers:")?;
for (key, val) in parts.headers.iter() {
let _ = writeln!(f, " {:?}: {:?}", key, val);
writeln!(f, " {:?}: {:?}", key, val)?;
}
res
Ok(())
} else {
write!(f, "ClientRequestBuilder(Consumed)")
}

View File

@ -95,12 +95,12 @@ impl ClientResponse {
impl fmt::Debug for ClientResponse {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let res = writeln!(f, "\nClientResponse {:?} {}", self.version(), self.status());
let _ = writeln!(f, " headers:");
writeln!(f, "\nClientResponse {:?} {}", self.version(), self.status())?;
writeln!(f, " headers:")?;
for (key, val) in self.headers().iter() {
let _ = writeln!(f, " {:?}: {:?}", key, val);
writeln!(f, " {:?}: {:?}", key, val)?;
}
res
Ok(())
}
}

View File

@ -8,7 +8,7 @@ use std::io::{self, Write};
use brotli2::write::BrotliEncoder;
use bytes::{BufMut, BytesMut};
#[cfg(feature = "flate2")]
use flate2::write::{DeflateEncoder, GzEncoder};
use flate2::write::{GzEncoder, ZlibEncoder};
#[cfg(feature = "flate2")]
use flate2::Compression;
use futures::{Async, Poll};
@ -232,7 +232,7 @@ fn content_encoder(buf: BytesMut, req: &mut ClientRequest) -> Output {
let mut enc = match encoding {
#[cfg(feature = "flate2")]
ContentEncoding::Deflate => ContentEncoder::Deflate(
DeflateEncoder::new(transfer, Compression::default()),
ZlibEncoder::new(transfer, Compression::default()),
),
#[cfg(feature = "flate2")]
ContentEncoding::Gzip => ContentEncoder::Gzip(GzEncoder::new(
@ -302,10 +302,9 @@ fn content_encoder(buf: BytesMut, req: &mut ClientRequest) -> Output {
req.replace_body(body);
let enc = match encoding {
#[cfg(feature = "flate2")]
ContentEncoding::Deflate => ContentEncoder::Deflate(DeflateEncoder::new(
transfer,
Compression::default(),
)),
ContentEncoding::Deflate => {
ContentEncoder::Deflate(ZlibEncoder::new(transfer, Compression::default()))
}
#[cfg(feature = "flate2")]
ContentEncoding::Gzip => {
ContentEncoder::Gzip(GzEncoder::new(transfer, Compression::default()))

View File

@ -39,7 +39,7 @@ pub struct Extensions {
impl Extensions {
/// Create an empty `Extensions`.
#[inline]
pub(crate) fn new() -> Extensions {
pub fn new() -> Extensions {
Extensions {
map: HashMap::default(),
}

View File

@ -101,6 +101,12 @@ impl<T> Path<T> {
}
}
impl<T> From<T> for Path<T> {
fn from(inner: T) -> Path<T> {
Path { inner }
}
}
impl<T, S> FromRequest<S> for Path<T>
where
T: DeserializeOwned,
@ -326,7 +332,7 @@ impl<T: fmt::Display> fmt::Display for Form<T> {
/// |r| {
/// r.method(http::Method::GET)
/// // register form handler and change form extractor configuration
/// .with_config(index, |cfg| {cfg.limit(4096);})
/// .with_config(index, |cfg| {cfg.0.limit(4096);})
/// },
/// );
/// }
@ -421,7 +427,7 @@ impl<S: 'static> FromRequest<S> for Bytes {
/// let app = App::new().resource("/index.html", |r| {
/// r.method(http::Method::GET)
/// .with_config(index, |cfg| { // <- register handler with extractor params
/// cfg.limit(4096); // <- limit size of the payload
/// cfg.0.limit(4096); // <- limit size of the payload
/// })
/// });
/// }
@ -690,6 +696,12 @@ macro_rules! tuple_from_req ({$fut_type:ident, $(($n:tt, $T:ident)),+} => {
}
});
impl<S> FromRequest<S> for () {
type Config = ();
type Result = Self;
fn from_request(_req: &HttpRequest<S>, _cfg: &Self::Config) -> Self::Result {}
}
tuple_from_req!(TupleFromRequest1, (0, A));
tuple_from_req!(TupleFromRequest2, (0, A), (1, B));
tuple_from_req!(TupleFromRequest3, (0, A), (1, B), (2, C));
@ -1006,5 +1018,7 @@ mod tests {
assert_eq!((res.0).1, "user1");
assert_eq!((res.1).0, "name");
assert_eq!((res.1).1, "user1");
let () = <()>::extract(&req);
}
}

View File

@ -164,11 +164,7 @@ impl<C: StaticFileConfig> NamedFile<C> {
let disposition_type = C::content_disposition_map(ct.type_());
let cd = ContentDisposition {
disposition: disposition_type,
parameters: vec![DispositionParam::Filename(
header::Charset::Ext("UTF-8".to_owned()),
None,
filename.as_bytes().to_vec(),
)],
parameters: vec![DispositionParam::Filename(filename.into_owned())],
};
(ct, cd)
};
@ -373,11 +369,7 @@ impl<C: StaticFileConfig> Responder for NamedFile<C> {
.body("This resource only supports GET and HEAD."));
}
let etag = if C::is_use_etag() {
self.etag()
} else {
None
};
let etag = if C::is_use_etag() { self.etag() } else { None };
let last_modified = if C::is_use_last_modifier() {
self.last_modified()
} else {
@ -522,7 +514,8 @@ impl Stream for ChunkedReadFile {
max_bytes = cmp::min(size.saturating_sub(counter), 65_536) as usize;
let mut buf = Vec::with_capacity(max_bytes);
file.seek(io::SeekFrom::Start(offset))?;
let nbytes = file.by_ref().take(max_bytes as u64).read_to_end(&mut buf)?;
let nbytes =
file.by_ref().take(max_bytes as u64).read_to_end(&mut buf)?;
if nbytes == 0 {
return Err(io::ErrorKind::UnexpectedEof.into());
}
@ -873,8 +866,7 @@ impl HttpRange {
length: length as u64,
}))
}
})
.collect::<Result<_, _>>()?;
}).collect::<Result<_, _>>()?;
let ranges: Vec<HttpRange> = all_ranges.into_iter().filter_map(|x| x).collect();
@ -990,11 +982,7 @@ mod tests {
use header::{ContentDisposition, DispositionParam, DispositionType};
let cd = ContentDisposition {
disposition: DispositionType::Attachment,
parameters: vec![DispositionParam::Filename(
header::Charset::Ext("UTF-8".to_owned()),
None,
"test.png".as_bytes().to_vec(),
)],
parameters: vec![DispositionParam::Filename(String::from("test.png"))],
};
let mut file = NamedFile::open("tests/test.png")
.unwrap()

View File

@ -353,13 +353,17 @@ impl<T, E: Into<Error>> From<Result<T, E>> for AsyncResult<T> {
}
}
impl<T, E: Into<Error>> From<Result<Box<Future<Item = T, Error = Error>>, E>>
for AsyncResult<T>
impl<T, E> From<Result<Box<Future<Item = T, Error = E>>, E>> for AsyncResult<T>
where
T: 'static,
E: Into<Error> + 'static,
{
#[inline]
fn from(res: Result<Box<Future<Item = T, Error = Error>>, E>) -> Self {
fn from(res: Result<Box<Future<Item = T, Error = E>>, E>) -> Self {
match res {
Ok(fut) => AsyncResult(Some(AsyncResultItem::Future(fut))),
Ok(fut) => AsyncResult(Some(AsyncResultItem::Future(Box::new(
fut.map_err(|e| e.into()),
)))),
Err(err) => AsyncResult(Some(AsyncResultItem::Err(err.into()))),
}
}

File diff suppressed because it is too large Load Diff

View File

@ -223,8 +223,7 @@ pub fn from_comma_delimited<T: FromStr>(
.filter_map(|x| match x.trim() {
"" => None,
y => Some(y),
})
.filter_map(|x| x.trim().parse().ok()),
}).filter_map(|x| x.trim().parse().ok()),
)
}
Ok(result)
@ -263,8 +262,10 @@ where
// From hyper v0.11.27 src/header/parsing.rs
/// An extended header parameter value (i.e., tagged with a character set and optionally,
/// a language), as defined in [RFC 5987](https://tools.ietf.org/html/rfc5987#section-3.2).
/// The value part of an extended parameter consisting of three parts:
/// the REQUIRED character set name (`charset`), the OPTIONAL language information (`language_tag`),
/// and a character sequence representing the actual value (`value`), separated by single quote
/// characters. It is defined in [RFC 5987](https://tools.ietf.org/html/rfc5987#section-3.2).
#[derive(Clone, Debug, PartialEq)]
pub struct ExtendedValue {
/// The character set that is used to encode the `value` to a string.

View File

@ -279,8 +279,7 @@ mod tests {
true,
StatusCode::MOVED_PERMANENTLY,
))
})
.finish();
}).finish();
// trailing slashes
let params = vec![

View File

@ -479,8 +479,7 @@ where
body.extend_from_slice(&chunk);
Ok(body)
}
})
.map(|body| body.freeze()),
}).map(|body| body.freeze()),
));
self.poll()
}
@ -588,8 +587,7 @@ where
body.extend_from_slice(&chunk);
Ok(body)
}
})
.and_then(move |body| {
}).and_then(move |body| {
if (encoding as *const Encoding) == UTF_8 {
serde_urlencoded::from_bytes::<U>(&body)
.map_err(|_| UrlencodedError::Parse)
@ -694,8 +692,7 @@ mod tests {
.header(
header::TRANSFER_ENCODING,
Bytes::from_static(b"some va\xadscc\xacas0xsdasdlue"),
)
.finish();
).finish();
assert!(req.chunked().is_err());
}
@ -830,8 +827,7 @@ mod tests {
b"Lorem Ipsum is simply dummy text of the printing and typesetting\n\
industry. Lorem Ipsum has been the industry's standard dummy\n\
Contrary to popular belief, Lorem Ipsum is not simply random text.",
))
.finish();
)).finish();
let mut r = Readlines::new(&req);
match r.poll().ok().unwrap() {
Async::Ready(Some(s)) => assert_eq!(

View File

@ -81,6 +81,15 @@ impl<S> HttpRequest<S> {
}
}
/// Construct new http request with empty state.
pub fn drop_state(&self) -> HttpRequest {
HttpRequest {
state: Rc::new(()),
req: self.req.as_ref().map(|r| r.clone()),
resource: self.resource.clone(),
}
}
#[inline]
/// Construct new http request with new RouteInfo.
pub(crate) fn with_route_info(&self, mut resource: ResourceInfo) -> HttpRequest<S> {
@ -255,7 +264,8 @@ impl<S> HttpRequest<S> {
if self.extensions().get::<Cookies>().is_none() {
let mut cookies = Vec::new();
for hdr in self.request().inner.headers.get_all(header::COOKIE) {
let s = str::from_utf8(hdr.as_bytes()).map_err(CookieParseError::from)?;
let s =
str::from_utf8(hdr.as_bytes()).map_err(CookieParseError::from)?;
for cookie_str in s.split(';').map(|s| s.trim()) {
if !cookie_str.is_empty() {
cookies.push(Cookie::parse_encoded(cookie_str)?.into_owned());
@ -344,24 +354,24 @@ impl<S> FromRequest<S> for HttpRequest<S> {
impl<S> fmt::Debug for HttpRequest<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let res = writeln!(
writeln!(
f,
"\nHttpRequest {:?} {}:{}",
self.version(),
self.method(),
self.path()
);
)?;
if !self.query_string().is_empty() {
let _ = writeln!(f, " query: ?{:?}", self.query_string());
writeln!(f, " query: ?{:?}", self.query_string())?;
}
if !self.match_info().is_empty() {
let _ = writeln!(f, " params: {:?}", self.match_info());
writeln!(f, " params: {:?}", self.match_info())?;
}
let _ = writeln!(f, " headers:");
writeln!(f, " headers:")?;
for (key, val) in self.headers().iter() {
let _ = writeln!(f, " {:?}: {:?}", key, val);
writeln!(f, " {:?}: {:?}", key, val)?;
}
res
Ok(())
}
}

View File

@ -142,8 +142,7 @@ impl HttpResponse {
HeaderValue::from_str(&cookie.to_string())
.map(|c| {
h.append(header::SET_COOKIE, c);
})
.map_err(|e| e.into())
}).map_err(|e| e.into())
}
/// Remove all cookies with the given name from this response. Returns
@ -650,7 +649,14 @@ impl HttpResponseBuilder {
///
/// `HttpResponseBuilder` can not be used after this call.
pub fn json<T: Serialize>(&mut self, value: T) -> HttpResponse {
match serde_json::to_string(&value) {
self.json2(&value)
}
/// Set a json body and generate `HttpResponse`
///
/// `HttpResponseBuilder` can not be used after this call.
pub fn json2<T: Serialize>(&mut self, value: &T) -> HttpResponse {
match serde_json::to_string(value) {
Ok(body) => {
let contains = if let Some(parts) = parts(&mut self.response, &self.err)
{
@ -1072,8 +1078,7 @@ mod tests {
.http_only(true)
.max_age(Duration::days(1))
.finish(),
)
.del_cookie(&cookies[0])
).del_cookie(&cookies[0])
.finish();
let mut val: Vec<_> = resp
@ -1186,6 +1191,30 @@ mod tests {
);
}
#[test]
fn test_json2() {
let resp = HttpResponse::build(StatusCode::OK).json2(&vec!["v1", "v2", "v3"]);
let ct = resp.headers().get(CONTENT_TYPE).unwrap();
assert_eq!(ct, HeaderValue::from_static("application/json"));
assert_eq!(
*resp.body(),
Body::from(Bytes::from_static(b"[\"v1\",\"v2\",\"v3\"]"))
);
}
#[test]
fn test_json2_ct() {
let resp = HttpResponse::build(StatusCode::OK)
.header(CONTENT_TYPE, "text/json")
.json2(&vec!["v1", "v2", "v3"]);
let ct = resp.headers().get(CONTENT_TYPE).unwrap();
assert_eq!(ct, HeaderValue::from_static("text/json"));
assert_eq!(
*resp.body(),
Body::from(Bytes::from_static(b"[\"v1\",\"v2\",\"v3\"]"))
);
}
impl Body {
pub(crate) fn bin_ref(&self) -> &Binary {
match *self {

View File

@ -174,8 +174,7 @@ mod tests {
.header(
header::FORWARDED,
"for=192.0.2.60; proto=https; by=203.0.113.43; host=rust-lang.org",
)
.request();
).request();
let mut info = ConnectionInfo::default();
info.update(&req);

View File

@ -172,7 +172,7 @@ where
/// let app = App::new().resource("/index.html", |r| {
/// r.method(http::Method::POST)
/// .with_config(index, |cfg| {
/// cfg.limit(4096) // <- change json extractor configuration
/// cfg.0.limit(4096) // <- change json extractor configuration
/// .error_handler(|err, req| { // <- create custom error response
/// error::InternalError::from_response(
/// err, HttpResponse::Conflict().finish()).into()
@ -327,8 +327,7 @@ impl<T: HttpMessage + 'static, U: DeserializeOwned + 'static> Future for JsonBod
body.extend_from_slice(&chunk);
Ok(body)
}
})
.and_then(|body| Ok(serde_json::from_slice::<U>(&body)?));
}).and_then(|body| Ok(serde_json::from_slice::<U>(&body)?));
self.fut = Some(Box::new(fut));
self.poll()
}
@ -388,8 +387,7 @@ mod tests {
.header(
header::CONTENT_TYPE,
header::HeaderValue::from_static("application/text"),
)
.finish();
).finish();
let mut json = req.json::<MyObject>();
assert_eq!(json.poll().err().unwrap(), JsonPayloadError::ContentType);
@ -397,12 +395,10 @@ mod tests {
.header(
header::CONTENT_TYPE,
header::HeaderValue::from_static("application/json"),
)
.header(
).header(
header::CONTENT_LENGTH,
header::HeaderValue::from_static("10000"),
)
.finish();
).finish();
let mut json = req.json::<MyObject>().limit(100);
assert_eq!(json.poll().err().unwrap(), JsonPayloadError::Overflow);
@ -410,12 +406,10 @@ mod tests {
.header(
header::CONTENT_TYPE,
header::HeaderValue::from_static("application/json"),
)
.header(
).header(
header::CONTENT_LENGTH,
header::HeaderValue::from_static("16"),
)
.set_payload(Bytes::from_static(b"{\"name\": \"test\"}"))
).set_payload(Bytes::from_static(b"{\"name\": \"test\"}"))
.finish();
let mut json = req.json::<MyObject>();
@ -442,8 +436,7 @@ mod tests {
).header(
header::CONTENT_LENGTH,
header::HeaderValue::from_static("16"),
)
.set_payload(Bytes::from_static(b"{\"name\": \"test\"}"))
).set_payload(Bytes::from_static(b"{\"name\": \"test\"}"))
.finish();
assert!(handler.handle(&req).as_err().is_none())
}

View File

@ -66,6 +66,8 @@
//! * `tls` - enables ssl support via `native-tls` crate
//! * `alpn` - enables ssl support via `openssl` crate, require for `http/2`
//! support
//! * `uds` - enables support for making client requests via Unix Domain Sockets.
//! Unix only. Not necessary for *serving* requests.
//! * `session` - enables session support, includes `ring` crate as
//! dependency
//! * `brotli` - enables `brotli` compression support, requires `c`
@ -116,10 +118,13 @@ extern crate parking_lot;
extern crate rand;
extern crate slab;
extern crate tokio;
extern crate tokio_current_thread;
extern crate tokio_io;
extern crate tokio_reactor;
extern crate tokio_tcp;
extern crate tokio_timer;
#[cfg(all(unix, feature = "uds"))]
extern crate tokio_uds;
extern crate url;
#[macro_use]
extern crate serde;
@ -130,6 +135,7 @@ extern crate encoding;
extern crate flate2;
extern crate h2 as http2;
extern crate num_cpus;
extern crate serde_urlencoded;
#[macro_use]
extern crate percent_encoding;
extern crate serde_json;
@ -182,7 +188,6 @@ mod resource;
mod route;
mod router;
mod scope;
mod serde_urlencoded;
mod uri;
mod with;
@ -253,12 +258,13 @@ pub mod dev {
pub use context::Drain;
pub use extractor::{FormConfig, PayloadConfig};
pub use handler::{AsyncResult, Handler};
pub use httpmessage::{MessageBody, UrlEncoded};
pub use httpmessage::{MessageBody, Readlines, UrlEncoded};
pub use httpresponse::HttpResponseBuilder;
pub use info::ConnectionInfo;
pub use json::{JsonBody, JsonConfig};
pub use param::{FromParam, Params};
pub use payload::{Payload, PayloadBuffer};
pub use pipeline::Pipeline;
pub use resource::Resource;
pub use route::Route;
pub use router::{ResourceDef, ResourceInfo, ResourceType, Router};
@ -280,7 +286,9 @@ pub mod http {
/// Various http headers
pub mod header {
pub use header::*;
pub use header::{ContentDisposition, DispositionType, DispositionParam, Charset, LanguageTag};
pub use header::{
Charset, ContentDisposition, DispositionParam, DispositionType, LanguageTag,
};
}
pub use header::ContentEncoding;
pub use httpresponse::ConnectionType;

View File

@ -387,12 +387,10 @@ impl<S> Middleware<S> for Cors {
header::ACCESS_CONTROL_MAX_AGE,
format!("{}", max_age).as_str(),
);
})
.if_some(headers, |headers, resp| {
}).if_some(headers, |headers, resp| {
let _ =
resp.header(header::ACCESS_CONTROL_ALLOW_HEADERS, headers);
})
.if_true(self.inner.origins.is_all(), |resp| {
}).if_true(self.inner.origins.is_all(), |resp| {
if self.inner.send_wildcard {
resp.header(header::ACCESS_CONTROL_ALLOW_ORIGIN, "*");
} else {
@ -402,17 +400,14 @@ impl<S> Middleware<S> for Cors {
origin.clone(),
);
}
})
.if_true(self.inner.origins.is_some(), |resp| {
}).if_true(self.inner.origins.is_some(), |resp| {
resp.header(
header::ACCESS_CONTROL_ALLOW_ORIGIN,
self.inner.origins_str.as_ref().unwrap().clone(),
);
})
.if_true(self.inner.supports_credentials, |resp| {
}).if_true(self.inner.supports_credentials, |resp| {
resp.header(header::ACCESS_CONTROL_ALLOW_CREDENTIALS, "true");
})
.header(
}).header(
header::ACCESS_CONTROL_ALLOW_METHODS,
&self
.inner
@ -420,8 +415,7 @@ impl<S> Middleware<S> for Cors {
.iter()
.fold(String::new(), |s, v| s + "," + v.as_str())
.as_str()[1..],
)
.finish(),
).finish(),
))
} else {
// Only check requests with a origin header.
@ -840,7 +834,7 @@ impl<S: 'static> CorsBuilder<S> {
cors.expose_hdrs = Some(
self.expose_hdrs
.iter()
.fold(String::new(), |s, v| s + v.as_str())[1..]
.fold(String::new(), |s, v| format!("{}, {}", s, v.as_str()))[2..]
.to_owned(),
);
}
@ -978,8 +972,7 @@ mod tests {
.header(
header::ACCESS_CONTROL_REQUEST_HEADERS,
"AUTHORIZATION,ACCEPT",
)
.method(Method::OPTIONS)
).method(Method::OPTIONS)
.finish();
let resp = cors.start(&req).unwrap().response();
@ -1073,12 +1066,14 @@ mod tests {
#[test]
fn test_response() {
let exposed_headers = vec![header::AUTHORIZATION, header::ACCEPT];
let cors = Cors::build()
.send_wildcard()
.disable_preflight()
.max_age(3600)
.allowed_methods(vec![Method::GET, Method::OPTIONS, Method::POST])
.allowed_headers(vec![header::AUTHORIZATION, header::ACCEPT])
.allowed_headers(exposed_headers.clone())
.expose_headers(exposed_headers.clone())
.allowed_header(header::CONTENT_TYPE)
.finish();
@ -1100,6 +1095,22 @@ mod tests {
resp.headers().get(header::VARY).unwrap().as_bytes()
);
{
let headers = resp
.headers()
.get(header::ACCESS_CONTROL_EXPOSE_HEADERS)
.unwrap()
.to_str()
.unwrap()
.split(',')
.map(|s| s.trim())
.collect::<Vec<&str>>();
for h in exposed_headers {
assert!(headers.contains(&h.as_str()));
}
}
let resp: HttpResponse =
HttpResponse::Ok().header(header::VARY, "Accept").finish();
let resp = cors.response(&req, resp).unwrap().response();

View File

@ -93,8 +93,7 @@ fn origin(headers: &HeaderMap) -> Option<Result<Cow<str>, CsrfError>> {
.to_str()
.map_err(|_| CsrfError::BadOrigin)
.map(|o| o.into())
})
.or_else(|| {
}).or_else(|| {
headers.get(header::REFERER).map(|referer| {
Uri::try_from(Bytes::from(referer.as_bytes()))
.ok()

View File

@ -270,14 +270,17 @@ impl<S: 'static, T: SessionBackend<S>> Middleware<S> for SessionStorage<T, S> {
}
/// A simple key-value storage interface that is internally used by `Session`.
#[doc(hidden)]
pub trait SessionImpl: 'static {
/// Get session value by key
fn get(&self, key: &str) -> Option<&str>;
/// Set session value
fn set(&mut self, key: &str, value: String);
/// Remove specific key from session
fn remove(&mut self, key: &str);
/// Remove all values from session
fn clear(&mut self);
/// Write session to storage backend.
@ -285,9 +288,10 @@ pub trait SessionImpl: 'static {
}
/// Session's storage backend trait definition.
#[doc(hidden)]
pub trait SessionBackend<S>: Sized + 'static {
/// Session item
type Session: SessionImpl;
/// Future that reads session
type ReadFuture: Future<Item = Self::Session, Error = Error>;
/// Parse the session from request and load data from a storage backend.
@ -579,8 +583,7 @@ mod tests {
App::new()
.middleware(SessionStorage::new(
CookieSessionBackend::signed(&[0; 32]).secure(false),
))
.resource("/", |r| {
)).resource("/", |r| {
r.f(|req| {
let _ = req.session().set("counter", 100);
"test"
@ -599,8 +602,7 @@ mod tests {
App::new()
.middleware(SessionStorage::new(
CookieSessionBackend::signed(&[0; 32]).secure(false),
))
.resource("/", |r| {
)).resource("/", |r| {
r.with(|ses: Session| {
let _ = ses.set("counter", 100);
"test"

View File

@ -441,13 +441,13 @@ where
impl<S> fmt::Debug for Field<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let res = writeln!(f, "\nMultipartField: {}", self.ct);
let _ = writeln!(f, " boundary: {}", self.inner.borrow().boundary);
let _ = writeln!(f, " headers:");
writeln!(f, "\nMultipartField: {}", self.ct)?;
writeln!(f, " boundary: {}", self.inner.borrow().boundary)?;
writeln!(f, " headers:")?;
for (key, val) in self.headers.iter() {
let _ = writeln!(f, " {:?}: {:?}", key, val);
writeln!(f, " {:?}: {:?}", key, val)?;
}
res
Ok(())
}
}
@ -756,13 +756,10 @@ mod tests {
{
use http::header::{DispositionParam, DispositionType};
let cd = field.content_disposition().unwrap();
assert_eq!(
cd.disposition,
DispositionType::Ext("form-data".into())
);
assert_eq!(cd.disposition, DispositionType::FormData);
assert_eq!(
cd.parameters[0],
DispositionParam::Ext("name".into(), "file".into())
DispositionParam::Name("file".into())
);
}
assert_eq!(field.content_type().type_(), mime::TEXT);
@ -813,7 +810,6 @@ mod tests {
let res: Result<(), ()> = Ok(());
result(res)
}))
.unwrap();
})).unwrap();
}
}

View File

@ -236,7 +236,6 @@ macro_rules! FROM_STR {
($type:ty) => {
impl FromParam for $type {
type Err = InternalError<<$type as FromStr>::Err>;
fn from_param(val: &str) -> Result<Self, Self::Err> {
<$type as FromStr>::from_str(val)
.map_err(|e| InternalError::new(e, StatusCode::BAD_REQUEST))

View File

@ -513,8 +513,7 @@ where
.fold(BytesMut::new(), |mut b, c| {
b.extend_from_slice(c);
b
})
.freeze()
}).freeze()
}
}
@ -553,8 +552,7 @@ mod tests {
let res: Result<(), ()> = Ok(());
result(res)
}))
.unwrap();
})).unwrap();
}
#[test]
@ -578,8 +576,7 @@ mod tests {
let res: Result<(), ()> = Ok(());
result(res)
}))
.unwrap();
})).unwrap();
}
#[test]
@ -596,8 +593,7 @@ mod tests {
payload.readany().err().unwrap();
let res: Result<(), ()> = Ok(());
result(res)
}))
.unwrap();
})).unwrap();
}
#[test]
@ -625,8 +621,7 @@ mod tests {
let res: Result<(), ()> = Ok(());
result(res)
}))
.unwrap();
})).unwrap();
}
#[test]
@ -659,8 +654,7 @@ mod tests {
let res: Result<(), ()> = Ok(());
result(res)
}))
.unwrap();
})).unwrap();
}
#[test]
@ -693,8 +687,7 @@ mod tests {
let res: Result<(), ()> = Ok(());
result(res)
}))
.unwrap();
})).unwrap();
}
#[test]
@ -715,7 +708,6 @@ mod tests {
let res: Result<(), ()> = Ok(());
result(res)
}))
.unwrap();
})).unwrap();
}
}

View File

@ -42,13 +42,6 @@ enum PipelineState<S, H> {
}
impl<S: 'static, H: PipelineHandler<S>> PipelineState<S, H> {
fn is_response(&self) -> bool {
match *self {
PipelineState::Response(_) => true,
_ => false,
}
}
fn poll(
&mut self, info: &mut PipelineInfo<S>, mws: &[Box<Middleware<S>>],
) -> Option<PipelineState<S, H>> {
@ -58,9 +51,8 @@ impl<S: 'static, H: PipelineHandler<S>> PipelineState<S, H> {
PipelineState::RunMiddlewares(ref mut state) => state.poll(info, mws),
PipelineState::Finishing(ref mut state) => state.poll(info, mws),
PipelineState::Completed(ref mut state) => state.poll(info),
PipelineState::Response(_) | PipelineState::None | PipelineState::Error => {
None
}
PipelineState::Response(ref mut state) => state.poll(info, mws),
PipelineState::None | PipelineState::Error => None,
}
}
}
@ -89,7 +81,7 @@ impl<S: 'static> PipelineInfo<S> {
}
impl<S: 'static, H: PipelineHandler<S>> Pipeline<S, H> {
pub fn new(
pub(crate) fn new(
req: HttpRequest<S>, mws: Rc<Vec<Box<Middleware<S>>>>, handler: Rc<H>,
) -> Pipeline<S, H> {
let mut info = PipelineInfo {
@ -130,7 +122,6 @@ impl<S: 'static, H: PipelineHandler<S>> HttpHandlerTask for Pipeline<S, H> {
let mut state = mem::replace(&mut self.1, PipelineState::None);
loop {
if state.is_response() {
if let PipelineState::Response(st) = state {
match st.poll_io(io, &mut self.0, &self.2) {
Ok(state) => {
@ -147,7 +138,6 @@ impl<S: 'static, H: PipelineHandler<S>> HttpHandlerTask for Pipeline<S, H> {
}
}
}
}
match state {
PipelineState::None => return Ok(Async::Ready(true)),
PipelineState::Error => {
@ -401,7 +391,7 @@ impl<S: 'static, H> RunMiddlewares<S, H> {
}
struct ProcessResponse<S, H> {
resp: HttpResponse,
resp: Option<HttpResponse>,
iostate: IOState,
running: RunningState,
drain: Option<oneshot::Sender<()>>,
@ -442,7 +432,7 @@ impl<S: 'static, H> ProcessResponse<S, H> {
#[inline]
fn init(resp: HttpResponse) -> PipelineState<S, H> {
PipelineState::Response(ProcessResponse {
resp,
resp: Some(resp),
iostate: IOState::Response,
running: RunningState::Running,
drain: None,
@ -451,6 +441,79 @@ impl<S: 'static, H> ProcessResponse<S, H> {
})
}
fn poll(
&mut self, info: &mut PipelineInfo<S>, mws: &[Box<Middleware<S>>],
) -> Option<PipelineState<S, H>> {
// connection is dead at this point
match mem::replace(&mut self.iostate, IOState::Done) {
IOState::Response => Some(FinishingMiddlewares::init(
info,
mws,
self.resp.take().unwrap(),
)),
IOState::Payload(_) => Some(FinishingMiddlewares::init(
info,
mws,
self.resp.take().unwrap(),
)),
IOState::Actor(mut ctx) => {
if info.disconnected.take().is_some() {
ctx.disconnected();
}
loop {
match ctx.poll() {
Ok(Async::Ready(Some(vec))) => {
if vec.is_empty() {
continue;
}
for frame in vec {
match frame {
Frame::Chunk(None) => {
info.context = Some(ctx);
return Some(FinishingMiddlewares::init(
info,
mws,
self.resp.take().unwrap(),
));
}
Frame::Chunk(Some(_)) => (),
Frame::Drain(fut) => {
let _ = fut.send(());
}
}
}
}
Ok(Async::Ready(None)) => {
return Some(FinishingMiddlewares::init(
info,
mws,
self.resp.take().unwrap(),
))
}
Ok(Async::NotReady) => {
self.iostate = IOState::Actor(ctx);
return None;
}
Err(err) => {
info.context = Some(ctx);
info.error = Some(err);
return Some(FinishingMiddlewares::init(
info,
mws,
self.resp.take().unwrap(),
));
}
}
}
}
IOState::Done => Some(FinishingMiddlewares::init(
info,
mws,
self.resp.take().unwrap(),
)),
}
}
fn poll_io(
mut self, io: &mut Writer, info: &mut PipelineInfo<S>,
mws: &[Box<Middleware<S>>],
@ -461,25 +524,35 @@ impl<S: 'static, H> ProcessResponse<S, H> {
'inner: loop {
let result = match mem::replace(&mut self.iostate, IOState::Done) {
IOState::Response => {
let encoding =
self.resp.content_encoding().unwrap_or(info.encoding);
let encoding = self
.resp
.as_ref()
.unwrap()
.content_encoding()
.unwrap_or(info.encoding);
let result =
match io.start(&info.req, &mut self.resp, encoding) {
let result = match io.start(
&info.req,
self.resp.as_mut().unwrap(),
encoding,
) {
Ok(res) => res,
Err(err) => {
info.error = Some(err.into());
return Ok(FinishingMiddlewares::init(
info, mws, self.resp,
info,
mws,
self.resp.take().unwrap(),
));
}
};
if let Some(err) = self.resp.error() {
if self.resp.status().is_server_error() {
if let Some(err) = self.resp.as_ref().unwrap().error() {
if self.resp.as_ref().unwrap().status().is_server_error()
{
error!(
"Error occured during request handling, status: {} {}",
self.resp.status(), err
self.resp.as_ref().unwrap().status(), err
);
} else {
warn!(
@ -493,7 +566,7 @@ impl<S: 'static, H> ProcessResponse<S, H> {
}
// always poll stream or actor for the first time
match self.resp.replace_body(Body::Empty) {
match self.resp.as_mut().unwrap().replace_body(Body::Empty) {
Body::Streaming(stream) => {
self.iostate = IOState::Payload(stream);
continue 'inner;
@ -512,7 +585,9 @@ impl<S: 'static, H> ProcessResponse<S, H> {
if let Err(err) = io.write_eof() {
info.error = Some(err.into());
return Ok(FinishingMiddlewares::init(
info, mws, self.resp,
info,
mws,
self.resp.take().unwrap(),
));
}
break;
@ -523,7 +598,9 @@ impl<S: 'static, H> ProcessResponse<S, H> {
Err(err) => {
info.error = Some(err.into());
return Ok(FinishingMiddlewares::init(
info, mws, self.resp,
info,
mws,
self.resp.take().unwrap(),
));
}
Ok(result) => result,
@ -536,7 +613,9 @@ impl<S: 'static, H> ProcessResponse<S, H> {
Err(err) => {
info.error = Some(err);
return Ok(FinishingMiddlewares::init(
info, mws, self.resp,
info,
mws,
self.resp.take().unwrap(),
));
}
},
@ -559,26 +638,30 @@ impl<S: 'static, H> ProcessResponse<S, H> {
info.error = Some(err.into());
return Ok(
FinishingMiddlewares::init(
info, mws, self.resp,
info,
mws,
self.resp.take().unwrap(),
),
);
}
break 'inner;
}
Frame::Chunk(Some(chunk)) => {
match io.write(&chunk) {
Frame::Chunk(Some(chunk)) => match io
.write(&chunk)
{
Err(err) => {
info.context = Some(ctx);
info.error = Some(err.into());
return Ok(
FinishingMiddlewares::init(
info, mws, self.resp,
info,
mws,
self.resp.take().unwrap(),
),
);
}
Ok(result) => res = Some(result),
}
}
},
Frame::Drain(fut) => self.drain = Some(fut),
}
}
@ -598,7 +681,9 @@ impl<S: 'static, H> ProcessResponse<S, H> {
info.context = Some(ctx);
info.error = Some(err);
return Ok(FinishingMiddlewares::init(
info, mws, self.resp,
info,
mws,
self.resp.take().unwrap(),
));
}
}
@ -638,7 +723,11 @@ impl<S: 'static, H> ProcessResponse<S, H> {
info.context = Some(ctx);
}
info.error = Some(err.into());
return Ok(FinishingMiddlewares::init(info, mws, self.resp));
return Ok(FinishingMiddlewares::init(
info,
mws,
self.resp.take().unwrap(),
));
}
}
}
@ -652,11 +741,19 @@ impl<S: 'static, H> ProcessResponse<S, H> {
Ok(_) => (),
Err(err) => {
info.error = Some(err.into());
return Ok(FinishingMiddlewares::init(info, mws, self.resp));
return Ok(FinishingMiddlewares::init(
info,
mws,
self.resp.take().unwrap(),
));
}
}
self.resp.set_response_size(io.written());
Ok(FinishingMiddlewares::init(info, mws, self.resp))
self.resp.as_mut().unwrap().set_response_size(io.written());
Ok(FinishingMiddlewares::init(
info,
mws,
self.resp.take().unwrap(),
))
}
_ => Err(PipelineState::Response(self)),
}

View File

@ -264,8 +264,7 @@ mod tests {
.header(
header::HOST,
header::HeaderValue::from_static("www.rust-lang.org"),
)
.finish();
).finish();
let pred = Host("www.rust-lang.org");
assert!(pred.check(&req, req.state()));

View File

@ -13,6 +13,7 @@ use middleware::Middleware;
use pred;
use route::Route;
use router::ResourceDef;
use with::WithFactory;
#[derive(Copy, Clone)]
pub(crate) struct RouteId(usize);
@ -217,7 +218,7 @@ impl<S: 'static> Resource<S> {
/// ```
pub fn with<T, F, R>(&mut self, handler: F)
where
F: Fn(T) -> R + 'static,
F: WithFactory<T, S, R>,
R: Responder + 'static,
T: FromRequest<S> + 'static,
{

View File

@ -16,7 +16,7 @@ use middleware::{
Started as MiddlewareStarted,
};
use pred::Predicate;
use with::{With, WithAsync};
use with::{WithAsyncFactory, WithFactory};
/// Resource route definition
///
@ -166,15 +166,15 @@ impl<S: 'static> Route<S> {
/// ```
pub fn with<T, F, R>(&mut self, handler: F)
where
F: Fn(T) -> R + 'static,
F: WithFactory<T, S, R> + 'static,
R: Responder + 'static,
T: FromRequest<S> + 'static,
{
self.h(With::new(handler, <T::Config as Default>::default()));
self.h(handler.create());
}
/// Set handler function. Same as `.with()` but it allows to configure
/// extractor.
/// extractor. Configuration closure accepts config objects as tuple.
///
/// ```rust
/// # extern crate bytes;
@ -192,21 +192,21 @@ impl<S: 'static> Route<S> {
/// let app = App::new().resource("/index.html", |r| {
/// r.method(http::Method::GET)
/// .with_config(index, |cfg| { // <- register handler
/// cfg.limit(4096); // <- limit size of the payload
/// cfg.0.limit(4096); // <- limit size of the payload
/// })
/// });
/// }
/// ```
pub fn with_config<T, F, R, C>(&mut self, handler: F, cfg_f: C)
where
F: Fn(T) -> R + 'static,
F: WithFactory<T, S, R>,
R: Responder + 'static,
T: FromRequest<S> + 'static,
C: FnOnce(&mut T::Config),
{
let mut cfg = <T::Config as Default>::default();
cfg_f(&mut cfg);
self.h(With::new(handler, cfg));
self.h(handler.create_with_config(cfg));
}
/// Set async handler function, use request extractor for parameters.
@ -240,17 +240,18 @@ impl<S: 'static> Route<S> {
/// ```
pub fn with_async<T, F, R, I, E>(&mut self, handler: F)
where
F: Fn(T) -> R + 'static,
F: WithAsyncFactory<T, S, R, I, E>,
R: Future<Item = I, Error = E> + 'static,
I: Responder + 'static,
E: Into<Error> + 'static,
T: FromRequest<S> + 'static,
{
self.h(WithAsync::new(handler, <T::Config as Default>::default()));
self.h(handler.create());
}
/// Set async handler function, use request extractor for parameters.
/// This method allows to configure extractor.
/// This method allows to configure extractor. Configuration closure
/// accepts config objects as tuple.
///
/// ```rust
/// # extern crate bytes;
@ -275,14 +276,14 @@ impl<S: 'static> Route<S> {
/// "/{username}/index.html", // <- define path parameters
/// |r| r.method(http::Method::GET)
/// .with_async_config(index, |cfg| {
/// cfg.limit(4096);
/// cfg.0.limit(4096);
/// }),
/// ); // <- use `with` extractor
/// }
/// ```
pub fn with_async_config<T, F, R, I, E, C>(&mut self, handler: F, cfg: C)
where
F: Fn(T) -> R + 'static,
F: WithAsyncFactory<T, S, R, I, E>,
R: Future<Item = I, Error = E> + 'static,
I: Responder + 'static,
E: Into<Error> + 'static,
@ -291,7 +292,7 @@ impl<S: 'static> Route<S> {
{
let mut extractor_cfg = <T::Config as Default>::default();
cfg(&mut extractor_cfg);
self.h(WithAsync::new(handler, extractor_cfg));
self.h(handler.create_with_config(extractor_cfg));
}
}

View File

@ -17,6 +17,7 @@ use pred::Predicate;
use resource::{DefaultResource, Resource};
use scope::Scope;
use server::Request;
use with::WithFactory;
#[derive(Debug, Copy, Clone, PartialEq)]
pub(crate) enum ResourceId {
@ -290,19 +291,6 @@ impl<S: 'static> Router<S> {
}
}
#[cfg(test)]
pub(crate) fn route_info(&self, req: &Request, prefix: u16) -> ResourceInfo {
let mut params = Params::with_url(req.url());
params.set_tail(prefix);
ResourceInfo {
params,
prefix: 0,
rmap: self.rmap.clone(),
resource: ResourceId::Default,
}
}
#[cfg(test)]
pub(crate) fn default_route_info(&self) -> ResourceInfo {
ResourceInfo {
@ -411,7 +399,7 @@ impl<S: 'static> Router<S> {
pub(crate) fn register_route<T, F, R>(&mut self, path: &str, method: Method, f: F)
where
F: Fn(T) -> R + 'static,
F: WithFactory<T, S, R>,
R: Responder + 'static,
T: FromRequest<S> + 'static,
{
@ -827,73 +815,70 @@ impl ResourceDef {
Ok(())
}
fn parse(
pattern: &str, for_prefix: bool,
) -> (String, Vec<PatternElement>, bool, usize) {
fn parse_param(pattern: &str) -> (PatternElement, String, &str) {
const DEFAULT_PATTERN: &str = "[^/]+";
let mut re1 = String::from("^");
let mut re2 = String::new();
let mut el = String::new();
let mut in_param = false;
let mut in_param_pattern = false;
let mut param_name = String::new();
let mut param_pattern = String::from(DEFAULT_PATTERN);
let mut is_dynamic = false;
let mut elems = Vec::new();
let mut len = 0;
for ch in pattern.chars() {
if in_param {
// In parameter segment: `{....}`
if ch == '}' {
elems.push(PatternElement::Var(param_name.clone()));
re1.push_str(&format!(r"(?P<{}>{})", &param_name, &param_pattern));
param_name.clear();
param_pattern = String::from(DEFAULT_PATTERN);
len = 0;
in_param_pattern = false;
in_param = false;
} else if ch == ':' {
// The parameter name has been determined; custom pattern land
in_param_pattern = true;
param_pattern.clear();
} else if in_param_pattern {
// Ignore leading whitespace for pattern
if !(ch == ' ' && param_pattern.is_empty()) {
param_pattern.push(ch);
let mut params_nesting = 0usize;
let close_idx = pattern
.find(|c| match c {
'{' => {
params_nesting += 1;
false
}
} else {
param_name.push(ch);
'}' => {
params_nesting -= 1;
params_nesting == 0
}
} else if ch == '{' {
in_param = true;
is_dynamic = true;
elems.push(PatternElement::Str(el.clone()));
el.clear();
} else {
re1.push_str(escape(&ch.to_string()).as_str());
re2.push(ch);
el.push(ch);
len += 1;
_ => false,
}).expect("malformed param");
let (mut param, rem) = pattern.split_at(close_idx + 1);
param = &param[1..param.len() - 1]; // Remove outer brackets
let (name, pattern) = match param.find(':') {
Some(idx) => {
let (name, pattern) = param.split_at(idx);
(name, &pattern[1..])
}
}
if !el.is_empty() {
elems.push(PatternElement::Str(el.clone()));
}
let re = if is_dynamic {
if !for_prefix {
re1.push('$');
}
re1
} else {
re2
None => (param, DEFAULT_PATTERN),
};
(re, elems, is_dynamic, len)
(
PatternElement::Var(name.to_string()),
format!(r"(?P<{}>{})", &name, &pattern),
rem,
)
}
fn parse(
mut pattern: &str, for_prefix: bool,
) -> (String, Vec<PatternElement>, bool, usize) {
if pattern.find('{').is_none() {
return (
String::from(pattern),
vec![PatternElement::Str(String::from(pattern))],
false,
pattern.chars().count(),
);
};
let mut elems = Vec::new();
let mut re = String::from("^");
while let Some(idx) = pattern.find('{') {
let (prefix, rem) = pattern.split_at(idx);
elems.push(PatternElement::Str(String::from(prefix)));
re.push_str(&escape(prefix));
let (param_pattern, re_part, rem) = Self::parse_param(rem);
elems.push(param_pattern);
re.push_str(&re_part);
pattern = rem;
}
elems.push(PatternElement::Str(String::from(pattern)));
re.push_str(&escape(pattern));
if !for_prefix {
re.push_str("$");
}
(re, elems, true, pattern.chars().count())
}
}
@ -1084,6 +1069,16 @@ mod tests {
let info = re.match_with_params(&req, 0).unwrap();
assert_eq!(info.get("version").unwrap(), "151");
assert_eq!(info.get("id").unwrap(), "adahg32");
let re = ResourceDef::new("/{id:[[:digit:]]{6}}");
assert!(re.is_match("/012345"));
assert!(!re.is_match("/012"));
assert!(!re.is_match("/01234567"));
assert!(!re.is_match("/XXXXXX"));
let req = TestRequest::with_uri("/012345").finish();
let info = re.match_with_params(&req, 0).unwrap();
assert_eq!(info.get("id").unwrap(), "012345");
}
#[test]

View File

@ -5,7 +5,10 @@ use std::rc::Rc;
use futures::{Async, Future, Poll};
use error::Error;
use handler::{AsyncResult, AsyncResultItem, FromRequest, Responder, RouteHandler};
use handler::{
AsyncResult, AsyncResultItem, FromRequest, Handler, Responder, RouteHandler,
WrapHandler,
};
use http::Method;
use httprequest::HttpRequest;
use httpresponse::HttpResponse;
@ -17,6 +20,7 @@ use pred::Predicate;
use resource::{DefaultResource, Resource};
use router::{ResourceDef, Router};
use server::Request;
use with::WithFactory;
/// Resources scope
///
@ -179,7 +183,7 @@ impl<S: 'static> Scope<S> {
where
F: FnOnce(Scope<S>) -> Scope<S>,
{
let rdef = ResourceDef::prefix(&path);
let rdef = ResourceDef::prefix(&insert_slash(path));
let scope = Scope {
rdef: rdef.clone(),
filters: Vec::new(),
@ -222,13 +226,15 @@ impl<S: 'static> Scope<S> {
/// ```
pub fn route<T, F, R>(mut self, path: &str, method: Method, f: F) -> Scope<S>
where
F: Fn(T) -> R + 'static,
F: WithFactory<T, S, R>,
R: Responder + 'static,
T: FromRequest<S> + 'static,
{
Rc::get_mut(&mut self.router)
.unwrap()
.register_route(path, method, f);
Rc::get_mut(&mut self.router).unwrap().register_route(
&insert_slash(path),
method,
f,
);
self
}
@ -260,7 +266,7 @@ impl<S: 'static> Scope<S> {
F: FnOnce(&mut Resource<S>) -> R + 'static,
{
// add resource
let mut resource = Resource::new(ResourceDef::new(path));
let mut resource = Resource::new(ResourceDef::new(&insert_slash(path)));
f(&mut resource);
Rc::get_mut(&mut self.router)
@ -285,6 +291,35 @@ impl<S: 'static> Scope<S> {
self
}
/// Configure handler for specific path prefix.
///
/// A path prefix consists of valid path segments, i.e for the
/// prefix `/app` any request with the paths `/app`, `/app/` or
/// `/app/test` would match, but the path `/application` would
/// not.
///
/// ```rust
/// # extern crate actix_web;
/// use actix_web::{http, App, HttpRequest, HttpResponse};
///
/// fn main() {
/// let app = App::new().scope("/scope-prefix", |scope| {
/// scope.handler("/app", |req: &HttpRequest| match *req.method() {
/// http::Method::GET => HttpResponse::Ok(),
/// http::Method::POST => HttpResponse::MethodNotAllowed(),
/// _ => HttpResponse::NotFound(),
/// })
/// });
/// }
/// ```
pub fn handler<H: Handler<S>>(mut self, path: &str, handler: H) -> Scope<S> {
let path = insert_slash(path.trim().trim_right_matches('/'));
Rc::get_mut(&mut self.router)
.expect("Multiple copies of scope router")
.register_handler(&path, Box::new(WrapHandler::new(handler)), None);
self
}
/// Register a scope middleware
///
/// This is similar to `App's` middlewares, but
@ -300,6 +335,14 @@ impl<S: 'static> Scope<S> {
}
}
fn insert_slash(path: &str) -> String {
let mut path = path.to_owned();
if !path.is_empty() && !path.starts_with('/') {
path.insert(0, '/');
};
path
}
impl<S: 'static> RouteHandler<S> for Scope<S> {
fn handle(&self, req: &HttpRequest<S>) -> AsyncResult<HttpResponse> {
let tail = req.match_info().tail as usize;
@ -714,8 +757,7 @@ mod tests {
let app = App::new()
.scope("/app", |scope| {
scope.resource("/path1", |r| r.f(|_| HttpResponse::Ok()))
})
.finish();
}).finish();
let req = TestRequest::with_uri("/app/path1").request();
let resp = app.run(req);
@ -729,8 +771,7 @@ mod tests {
scope
.resource("", |r| r.f(|_| HttpResponse::Ok()))
.resource("/", |r| r.f(|_| HttpResponse::Created()))
})
.finish();
}).finish();
let req = TestRequest::with_uri("/app").request();
let resp = app.run(req);
@ -746,8 +787,7 @@ mod tests {
let app = App::new()
.scope("/app/", |scope| {
scope.resource("", |r| r.f(|_| HttpResponse::Ok()))
})
.finish();
}).finish();
let req = TestRequest::with_uri("/app").request();
let resp = app.run(req);
@ -763,8 +803,7 @@ mod tests {
let app = App::new()
.scope("/app/", |scope| {
scope.resource("/", |r| r.f(|_| HttpResponse::Ok()))
})
.finish();
}).finish();
let req = TestRequest::with_uri("/app").request();
let resp = app.run(req);
@ -782,12 +821,40 @@ mod tests {
scope
.route("/path1", Method::GET, |_: HttpRequest<_>| {
HttpResponse::Ok()
})
.route("/path1", Method::DELETE, |_: HttpRequest<_>| {
}).route(
"/path1",
Method::DELETE,
|_: HttpRequest<_>| HttpResponse::Ok(),
)
}).finish();
let req = TestRequest::with_uri("/app/path1").request();
let resp = app.run(req);
assert_eq!(resp.as_msg().status(), StatusCode::OK);
let req = TestRequest::with_uri("/app/path1")
.method(Method::DELETE)
.request();
let resp = app.run(req);
assert_eq!(resp.as_msg().status(), StatusCode::OK);
let req = TestRequest::with_uri("/app/path1")
.method(Method::POST)
.request();
let resp = app.run(req);
assert_eq!(resp.as_msg().status(), StatusCode::NOT_FOUND);
}
#[test]
fn test_scope_route_without_leading_slash() {
let app = App::new()
.scope("app", |scope| {
scope
.route("path1", Method::GET, |_: HttpRequest<_>| HttpResponse::Ok())
.route("path1", Method::DELETE, |_: HttpRequest<_>| {
HttpResponse::Ok()
})
})
.finish();
}).finish();
let req = TestRequest::with_uri("/app/path1").request();
let resp = app.run(req);
@ -813,8 +880,7 @@ mod tests {
scope
.filter(pred::Get())
.resource("/path1", |r| r.f(|_| HttpResponse::Ok()))
})
.finish();
}).finish();
let req = TestRequest::with_uri("/app/path1")
.method(Method::POST)
@ -839,8 +905,7 @@ mod tests {
.body(format!("project: {}", &r.match_info()["project"]))
})
})
})
.finish();
}).finish();
let req = TestRequest::with_uri("/ab-project1/path1").request();
let resp = app.run(req);
@ -868,8 +933,7 @@ mod tests {
scope.with_state("/t1", State, |scope| {
scope.resource("/path1", |r| r.f(|_| HttpResponse::Created()))
})
})
.finish();
}).finish();
let req = TestRequest::with_uri("/app/t1/path1").request();
let resp = app.run(req);
@ -887,8 +951,7 @@ mod tests {
.resource("", |r| r.f(|_| HttpResponse::Ok()))
.resource("/", |r| r.f(|_| HttpResponse::Created()))
})
})
.finish();
}).finish();
let req = TestRequest::with_uri("/app/t1").request();
let resp = app.run(req);
@ -908,8 +971,7 @@ mod tests {
scope.with_state("/t1/", State, |scope| {
scope.resource("", |r| r.f(|_| HttpResponse::Ok()))
})
})
.finish();
}).finish();
let req = TestRequest::with_uri("/app/t1").request();
let resp = app.run(req);
@ -929,8 +991,7 @@ mod tests {
scope.with_state("/t1/", State, |scope| {
scope.resource("/", |r| r.f(|_| HttpResponse::Ok()))
})
})
.finish();
}).finish();
let req = TestRequest::with_uri("/app/t1").request();
let resp = app.run(req);
@ -952,8 +1013,7 @@ mod tests {
.filter(pred::Get())
.resource("/path1", |r| r.f(|_| HttpResponse::Ok()))
})
})
.finish();
}).finish();
let req = TestRequest::with_uri("/app/t1/path1")
.method(Method::POST)
@ -975,8 +1035,21 @@ mod tests {
scope.nested("/t1", |scope| {
scope.resource("/path1", |r| r.f(|_| HttpResponse::Created()))
})
}).finish();
let req = TestRequest::with_uri("/app/t1/path1").request();
let resp = app.run(req);
assert_eq!(resp.as_msg().status(), StatusCode::CREATED);
}
#[test]
fn test_nested_scope_no_slash() {
let app = App::new()
.scope("/app", |scope| {
scope.nested("t1", |scope| {
scope.resource("/path1", |r| r.f(|_| HttpResponse::Created()))
})
.finish();
}).finish();
let req = TestRequest::with_uri("/app/t1/path1").request();
let resp = app.run(req);
@ -992,8 +1065,7 @@ mod tests {
.resource("", |r| r.f(|_| HttpResponse::Ok()))
.resource("/", |r| r.f(|_| HttpResponse::Created()))
})
})
.finish();
}).finish();
let req = TestRequest::with_uri("/app/t1").request();
let resp = app.run(req);
@ -1013,8 +1085,7 @@ mod tests {
.filter(pred::Get())
.resource("/path1", |r| r.f(|_| HttpResponse::Ok()))
})
})
.finish();
}).finish();
let req = TestRequest::with_uri("/app/t1/path1")
.method(Method::POST)
@ -1043,8 +1114,7 @@ mod tests {
})
})
})
})
.finish();
}).finish();
let req = TestRequest::with_uri("/app/project_1/path1").request();
let resp = app.run(req);
@ -1076,8 +1146,7 @@ mod tests {
})
})
})
})
.finish();
}).finish();
let req = TestRequest::with_uri("/app/test/1/path1").request();
let resp = app.run(req);
@ -1103,8 +1172,7 @@ mod tests {
scope
.resource("/path1", |r| r.f(|_| HttpResponse::Ok()))
.default_resource(|r| r.f(|_| HttpResponse::BadRequest()))
})
.finish();
}).finish();
let req = TestRequest::with_uri("/app/path2").request();
let resp = app.run(req);
@ -1120,8 +1188,7 @@ mod tests {
let app = App::new()
.scope("/app1", |scope| {
scope.default_resource(|r| r.f(|_| HttpResponse::BadRequest()))
})
.scope("/app2", |scope| scope)
}).scope("/app2", |scope| scope)
.default_resource(|r| r.f(|_| HttpResponse::MethodNotAllowed()))
.finish();
@ -1137,4 +1204,32 @@ mod tests {
let resp = app.run(req);
assert_eq!(resp.as_msg().status(), StatusCode::METHOD_NOT_ALLOWED);
}
#[test]
fn test_handler() {
let app = App::new()
.scope("/scope", |scope| {
scope.handler("/test", |_: &_| HttpResponse::Ok())
}).finish();
let req = TestRequest::with_uri("/scope/test").request();
let resp = app.run(req);
assert_eq!(resp.as_msg().status(), StatusCode::OK);
let req = TestRequest::with_uri("/scope/test/").request();
let resp = app.run(req);
assert_eq!(resp.as_msg().status(), StatusCode::OK);
let req = TestRequest::with_uri("/scope/test/app").request();
let resp = app.run(req);
assert_eq!(resp.as_msg().status(), StatusCode::OK);
let req = TestRequest::with_uri("/scope/testapp").request();
let resp = app.run(req);
assert_eq!(resp.as_msg().status(), StatusCode::NOT_FOUND);
let req = TestRequest::with_uri("/scope/blah").request();
let resp = app.run(req);
assert_eq!(resp.as_msg().status(), StatusCode::NOT_FOUND);
}
}

View File

@ -1,305 +0,0 @@
//! Deserialization support for the `application/x-www-form-urlencoded` format.
use serde::de::Error as de_Error;
use serde::de::{
self, DeserializeSeed, EnumAccess, IntoDeserializer, VariantAccess, Visitor,
};
use serde::de::value::MapDeserializer;
use std::borrow::Cow;
use std::io::Read;
use url::form_urlencoded::parse;
use url::form_urlencoded::Parse as UrlEncodedParse;
#[doc(inline)]
pub use serde::de::value::Error;
/// Deserializes a `application/x-wwww-url-encoded` value from a `&[u8]`.
///
/// ```ignore
/// let meal = vec![
/// ("bread".to_owned(), "baguette".to_owned()),
/// ("cheese".to_owned(), "comté".to_owned()),
/// ("meat".to_owned(), "ham".to_owned()),
/// ("fat".to_owned(), "butter".to_owned()),
/// ];
///
/// assert_eq!(
/// serde_urlencoded::from_bytes::<Vec<(String, String)>>(
/// b"bread=baguette&cheese=comt%C3%A9&meat=ham&fat=butter"),
/// Ok(meal));
/// ```
pub fn from_bytes<'de, T>(input: &'de [u8]) -> Result<T, Error>
where
T: de::Deserialize<'de>,
{
T::deserialize(Deserializer::new(parse(input)))
}
/// Deserializes a `application/x-wwww-url-encoded` value from a `&str`.
///
/// ```ignore
/// let meal = vec![
/// ("bread".to_owned(), "baguette".to_owned()),
/// ("cheese".to_owned(), "comté".to_owned()),
/// ("meat".to_owned(), "ham".to_owned()),
/// ("fat".to_owned(), "butter".to_owned()),
/// ];
///
/// assert_eq!(
/// serde_urlencoded::from_str::<Vec<(String, String)>>(
/// "bread=baguette&cheese=comt%C3%A9&meat=ham&fat=butter"),
/// Ok(meal));
/// ```
pub fn from_str<'de, T>(input: &'de str) -> Result<T, Error>
where
T: de::Deserialize<'de>,
{
from_bytes(input.as_bytes())
}
#[allow(dead_code)]
/// Convenience function that reads all bytes from `reader` and deserializes
/// them with `from_bytes`.
pub fn from_reader<T, R>(mut reader: R) -> Result<T, Error>
where
T: de::DeserializeOwned,
R: Read,
{
let mut buf = vec![];
reader
.read_to_end(&mut buf)
.map_err(|e| de::Error::custom(format_args!("could not read input: {}", e)))?;
from_bytes(&buf)
}
/// A deserializer for the `application/x-www-form-urlencoded` format.
///
/// * Supported top-level outputs are structs, maps and sequences of pairs,
/// with or without a given length.
///
/// * Main `deserialize` methods defers to `deserialize_map`.
///
/// * Everything else but `deserialize_seq` and `deserialize_seq_fixed_size`
/// defers to `deserialize`.
pub struct Deserializer<'de> {
inner: MapDeserializer<'de, PartIterator<'de>, Error>,
}
impl<'de> Deserializer<'de> {
/// Returns a new `Deserializer`.
pub fn new(parser: UrlEncodedParse<'de>) -> Self {
Deserializer {
inner: MapDeserializer::new(PartIterator(parser)),
}
}
}
impl<'de> de::Deserializer<'de> for Deserializer<'de> {
type Error = Error;
fn deserialize_any<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: de::Visitor<'de>,
{
self.deserialize_map(visitor)
}
fn deserialize_map<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: de::Visitor<'de>,
{
visitor.visit_map(self.inner)
}
fn deserialize_seq<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: de::Visitor<'de>,
{
visitor.visit_seq(self.inner)
}
fn deserialize_unit<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: de::Visitor<'de>,
{
self.inner.end()?;
visitor.visit_unit()
}
forward_to_deserialize_any! {
bool
u8
u16
u32
u64
i8
i16
i32
i64
f32
f64
char
str
string
option
bytes
byte_buf
unit_struct
newtype_struct
tuple_struct
struct
identifier
tuple
enum
ignored_any
}
}
struct PartIterator<'de>(UrlEncodedParse<'de>);
impl<'de> Iterator for PartIterator<'de> {
type Item = (Part<'de>, Part<'de>);
fn next(&mut self) -> Option<Self::Item> {
self.0.next().map(|(k, v)| (Part(k), Part(v)))
}
}
struct Part<'de>(Cow<'de, str>);
impl<'de> IntoDeserializer<'de> for Part<'de> {
type Deserializer = Self;
fn into_deserializer(self) -> Self::Deserializer {
self
}
}
macro_rules! forward_parsed_value {
($($ty:ident => $method:ident,)*) => {
$(
fn $method<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor<'de>
{
match self.0.parse::<$ty>() {
Ok(val) => val.into_deserializer().$method(visitor),
Err(e) => Err(de::Error::custom(e))
}
}
)*
}
}
impl<'de> de::Deserializer<'de> for Part<'de> {
type Error = Error;
fn deserialize_any<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: de::Visitor<'de>,
{
self.0.into_deserializer().deserialize_any(visitor)
}
fn deserialize_option<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: de::Visitor<'de>,
{
visitor.visit_some(self)
}
fn deserialize_enum<V>(
self, _name: &'static str, _variants: &'static [&'static str], visitor: V,
) -> Result<V::Value, Self::Error>
where
V: de::Visitor<'de>,
{
visitor.visit_enum(ValueEnumAccess { value: self.0 })
}
forward_to_deserialize_any! {
char
str
string
unit
bytes
byte_buf
unit_struct
newtype_struct
tuple_struct
struct
identifier
tuple
ignored_any
seq
map
}
forward_parsed_value! {
bool => deserialize_bool,
u8 => deserialize_u8,
u16 => deserialize_u16,
u32 => deserialize_u32,
u64 => deserialize_u64,
i8 => deserialize_i8,
i16 => deserialize_i16,
i32 => deserialize_i32,
i64 => deserialize_i64,
f32 => deserialize_f32,
f64 => deserialize_f64,
}
}
/// Provides access to a keyword which can be deserialized into an enum variant. The enum variant
/// must be a unit variant, otherwise deserialization will fail.
struct ValueEnumAccess<'de> {
value: Cow<'de, str>,
}
impl<'de> EnumAccess<'de> for ValueEnumAccess<'de> {
type Error = Error;
type Variant = UnitOnlyVariantAccess;
fn variant_seed<V>(self, seed: V) -> Result<(V::Value, Self::Variant), Self::Error>
where
V: DeserializeSeed<'de>,
{
let variant = seed.deserialize(self.value.into_deserializer())?;
Ok((variant, UnitOnlyVariantAccess))
}
}
/// A visitor for deserializing the contents of the enum variant. As we only support
/// `unit_variant`, all other variant types will return an error.
struct UnitOnlyVariantAccess;
impl<'de> VariantAccess<'de> for UnitOnlyVariantAccess {
type Error = Error;
fn unit_variant(self) -> Result<(), Self::Error> {
Ok(())
}
fn newtype_variant_seed<T>(self, _seed: T) -> Result<T::Value, Self::Error>
where
T: DeserializeSeed<'de>,
{
Err(Error::custom("expected unit variant"))
}
fn tuple_variant<V>(self, _len: usize, _visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
Err(Error::custom("expected unit variant"))
}
fn struct_variant<V>(
self, _fields: &'static [&'static str], _visitor: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
Err(Error::custom("expected unit variant"))
}
}

View File

@ -1,121 +0,0 @@
//! `x-www-form-urlencoded` meets Serde
extern crate dtoa;
extern crate itoa;
pub mod de;
pub mod ser;
#[doc(inline)]
pub use self::de::{from_bytes, from_reader, from_str, Deserializer};
#[doc(inline)]
pub use self::ser::{to_string, Serializer};
#[cfg(test)]
mod tests {
#[test]
fn deserialize_bytes() {
let result = vec![("first".to_owned(), 23), ("last".to_owned(), 42)];
assert_eq!(super::from_bytes(b"first=23&last=42"), Ok(result));
}
#[test]
fn deserialize_str() {
let result = vec![("first".to_owned(), 23), ("last".to_owned(), 42)];
assert_eq!(super::from_str("first=23&last=42"), Ok(result));
}
#[test]
fn deserialize_reader() {
let result = vec![("first".to_owned(), 23), ("last".to_owned(), 42)];
assert_eq!(super::from_reader(b"first=23&last=42" as &[_]), Ok(result));
}
#[test]
fn deserialize_option() {
let result = vec![
("first".to_owned(), Some(23)),
("last".to_owned(), Some(42)),
];
assert_eq!(super::from_str("first=23&last=42"), Ok(result));
}
#[test]
fn deserialize_unit() {
assert_eq!(super::from_str(""), Ok(()));
assert_eq!(super::from_str("&"), Ok(()));
assert_eq!(super::from_str("&&"), Ok(()));
assert!(super::from_str::<()>("first=23").is_err());
}
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)]
enum X {
A,
B,
C,
}
#[test]
fn deserialize_unit_enum() {
let result = vec![
("one".to_owned(), X::A),
("two".to_owned(), X::B),
("three".to_owned(), X::C),
];
assert_eq!(super::from_str("one=A&two=B&three=C"), Ok(result));
}
#[test]
fn serialize_option_map_int() {
let params = &[("first", Some(23)), ("middle", None), ("last", Some(42))];
assert_eq!(super::to_string(params), Ok("first=23&last=42".to_owned()));
}
#[test]
fn serialize_option_map_string() {
let params = &[
("first", Some("hello")),
("middle", None),
("last", Some("world")),
];
assert_eq!(
super::to_string(params),
Ok("first=hello&last=world".to_owned())
);
}
#[test]
fn serialize_option_map_bool() {
let params = &[("one", Some(true)), ("two", Some(false))];
assert_eq!(
super::to_string(params),
Ok("one=true&two=false".to_owned())
);
}
#[test]
fn serialize_map_bool() {
let params = &[("one", true), ("two", false)];
assert_eq!(
super::to_string(params),
Ok("one=true&two=false".to_owned())
);
}
#[test]
fn serialize_unit_enum() {
let params = &[("one", X::A), ("two", X::B), ("three", X::C)];
assert_eq!(
super::to_string(params),
Ok("one=A&two=B&three=C".to_owned())
);
}
}

View File

@ -1,74 +0,0 @@
use super::super::ser::part::Sink;
use super::super::ser::Error;
use serde::Serialize;
use std::borrow::Cow;
use std::ops::Deref;
pub enum Key<'key> {
Static(&'static str),
Dynamic(Cow<'key, str>),
}
impl<'key> Deref for Key<'key> {
type Target = str;
fn deref(&self) -> &str {
match *self {
Key::Static(key) => key,
Key::Dynamic(ref key) => key,
}
}
}
impl<'key> From<Key<'key>> for Cow<'static, str> {
fn from(key: Key<'key>) -> Self {
match key {
Key::Static(key) => key.into(),
Key::Dynamic(key) => key.into_owned().into(),
}
}
}
pub struct KeySink<End> {
end: End,
}
impl<End, Ok> KeySink<End>
where
End: for<'key> FnOnce(Key<'key>) -> Result<Ok, Error>,
{
pub fn new(end: End) -> Self {
KeySink { end }
}
}
impl<End, Ok> Sink for KeySink<End>
where
End: for<'key> FnOnce(Key<'key>) -> Result<Ok, Error>,
{
type Ok = Ok;
fn serialize_static_str(self, value: &'static str) -> Result<Ok, Error> {
(self.end)(Key::Static(value))
}
fn serialize_str(self, value: &str) -> Result<Ok, Error> {
(self.end)(Key::Dynamic(value.into()))
}
fn serialize_string(self, value: String) -> Result<Ok, Error> {
(self.end)(Key::Dynamic(value.into()))
}
fn serialize_none(self) -> Result<Ok, Error> {
Err(self.unsupported())
}
fn serialize_some<T: ?Sized + Serialize>(self, _value: &T) -> Result<Ok, Error> {
Err(self.unsupported())
}
fn unsupported(self) -> Error {
Error::Custom("unsupported key".into())
}
}

View File

@ -1,490 +0,0 @@
//! Serialization support for the `application/x-www-form-urlencoded` format.
mod key;
mod pair;
mod part;
mod value;
use serde::ser;
use std::borrow::Cow;
use std::error;
use std::fmt;
use std::str;
use url::form_urlencoded::Serializer as UrlEncodedSerializer;
use url::form_urlencoded::Target as UrlEncodedTarget;
/// Serializes a value into a `application/x-wwww-url-encoded` `String` buffer.
///
/// ```ignore
/// let meal = &[
/// ("bread", "baguette"),
/// ("cheese", "comté"),
/// ("meat", "ham"),
/// ("fat", "butter"),
/// ];
///
/// assert_eq!(
/// serde_urlencoded::to_string(meal),
/// Ok("bread=baguette&cheese=comt%C3%A9&meat=ham&fat=butter".to_owned()));
/// ```
pub fn to_string<T: ser::Serialize>(input: T) -> Result<String, Error> {
let mut urlencoder = UrlEncodedSerializer::new("".to_owned());
input.serialize(Serializer::new(&mut urlencoder))?;
Ok(urlencoder.finish())
}
/// A serializer for the `application/x-www-form-urlencoded` format.
///
/// * Supported top-level inputs are structs, maps and sequences of pairs,
/// with or without a given length.
///
/// * Supported keys and values are integers, bytes (if convertible to strings),
/// unit structs and unit variants.
///
/// * Newtype structs defer to their inner values.
pub struct Serializer<'output, Target: 'output + UrlEncodedTarget> {
urlencoder: &'output mut UrlEncodedSerializer<Target>,
}
impl<'output, Target: 'output + UrlEncodedTarget> Serializer<'output, Target> {
/// Returns a new `Serializer`.
pub fn new(urlencoder: &'output mut UrlEncodedSerializer<Target>) -> Self {
Serializer { urlencoder }
}
}
/// Errors returned during serializing to `application/x-www-form-urlencoded`.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum Error {
Custom(Cow<'static, str>),
Utf8(str::Utf8Error),
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Error::Custom(ref msg) => msg.fmt(f),
Error::Utf8(ref err) => write!(f, "invalid UTF-8: {}", err),
}
}
}
impl error::Error for Error {
fn description(&self) -> &str {
match *self {
Error::Custom(ref msg) => msg,
Error::Utf8(ref err) => error::Error::description(err),
}
}
/// The lower-level cause of this error, in the case of a `Utf8` error.
fn cause(&self) -> Option<&error::Error> {
match *self {
Error::Custom(_) => None,
Error::Utf8(ref err) => Some(err),
}
}
}
impl ser::Error for Error {
fn custom<T: fmt::Display>(msg: T) -> Self {
Error::Custom(format!("{}", msg).into())
}
}
/// Sequence serializer.
pub struct SeqSerializer<'output, Target: 'output + UrlEncodedTarget> {
urlencoder: &'output mut UrlEncodedSerializer<Target>,
}
/// Tuple serializer.
///
/// Mostly used for arrays.
pub struct TupleSerializer<'output, Target: 'output + UrlEncodedTarget> {
urlencoder: &'output mut UrlEncodedSerializer<Target>,
}
/// Tuple struct serializer.
///
/// Never instantiated, tuple structs are not supported.
pub struct TupleStructSerializer<'output, T: 'output + UrlEncodedTarget> {
inner: ser::Impossible<&'output mut UrlEncodedSerializer<T>, Error>,
}
/// Tuple variant serializer.
///
/// Never instantiated, tuple variants are not supported.
pub struct TupleVariantSerializer<'output, T: 'output + UrlEncodedTarget> {
inner: ser::Impossible<&'output mut UrlEncodedSerializer<T>, Error>,
}
/// Map serializer.
pub struct MapSerializer<'output, Target: 'output + UrlEncodedTarget> {
urlencoder: &'output mut UrlEncodedSerializer<Target>,
key: Option<Cow<'static, str>>,
}
/// Struct serializer.
pub struct StructSerializer<'output, Target: 'output + UrlEncodedTarget> {
urlencoder: &'output mut UrlEncodedSerializer<Target>,
}
/// Struct variant serializer.
///
/// Never instantiated, struct variants are not supported.
pub struct StructVariantSerializer<'output, T: 'output + UrlEncodedTarget> {
inner: ser::Impossible<&'output mut UrlEncodedSerializer<T>, Error>,
}
impl<'output, Target> ser::Serializer for Serializer<'output, Target>
where
Target: 'output + UrlEncodedTarget,
{
type Ok = &'output mut UrlEncodedSerializer<Target>;
type Error = Error;
type SerializeSeq = SeqSerializer<'output, Target>;
type SerializeTuple = TupleSerializer<'output, Target>;
type SerializeTupleStruct = TupleStructSerializer<'output, Target>;
type SerializeTupleVariant = TupleVariantSerializer<'output, Target>;
type SerializeMap = MapSerializer<'output, Target>;
type SerializeStruct = StructSerializer<'output, Target>;
type SerializeStructVariant = StructVariantSerializer<'output, Target>;
/// Returns an error.
fn serialize_bool(self, _v: bool) -> Result<Self::Ok, Error> {
Err(Error::top_level())
}
/// Returns an error.
fn serialize_i8(self, _v: i8) -> Result<Self::Ok, Error> {
Err(Error::top_level())
}
/// Returns an error.
fn serialize_i16(self, _v: i16) -> Result<Self::Ok, Error> {
Err(Error::top_level())
}
/// Returns an error.
fn serialize_i32(self, _v: i32) -> Result<Self::Ok, Error> {
Err(Error::top_level())
}
/// Returns an error.
fn serialize_i64(self, _v: i64) -> Result<Self::Ok, Error> {
Err(Error::top_level())
}
/// Returns an error.
fn serialize_u8(self, _v: u8) -> Result<Self::Ok, Error> {
Err(Error::top_level())
}
/// Returns an error.
fn serialize_u16(self, _v: u16) -> Result<Self::Ok, Error> {
Err(Error::top_level())
}
/// Returns an error.
fn serialize_u32(self, _v: u32) -> Result<Self::Ok, Error> {
Err(Error::top_level())
}
/// Returns an error.
fn serialize_u64(self, _v: u64) -> Result<Self::Ok, Error> {
Err(Error::top_level())
}
/// Returns an error.
fn serialize_f32(self, _v: f32) -> Result<Self::Ok, Error> {
Err(Error::top_level())
}
/// Returns an error.
fn serialize_f64(self, _v: f64) -> Result<Self::Ok, Error> {
Err(Error::top_level())
}
/// Returns an error.
fn serialize_char(self, _v: char) -> Result<Self::Ok, Error> {
Err(Error::top_level())
}
/// Returns an error.
fn serialize_str(self, _value: &str) -> Result<Self::Ok, Error> {
Err(Error::top_level())
}
/// Returns an error.
fn serialize_bytes(self, _value: &[u8]) -> Result<Self::Ok, Error> {
Err(Error::top_level())
}
/// Returns an error.
fn serialize_unit(self) -> Result<Self::Ok, Error> {
Err(Error::top_level())
}
/// Returns an error.
fn serialize_unit_struct(self, _name: &'static str) -> Result<Self::Ok, Error> {
Err(Error::top_level())
}
/// Returns an error.
fn serialize_unit_variant(
self, _name: &'static str, _variant_index: u32, _variant: &'static str,
) -> Result<Self::Ok, Error> {
Err(Error::top_level())
}
/// Serializes the inner value, ignoring the newtype name.
fn serialize_newtype_struct<T: ?Sized + ser::Serialize>(
self, _name: &'static str, value: &T,
) -> Result<Self::Ok, Error> {
value.serialize(self)
}
/// Returns an error.
fn serialize_newtype_variant<T: ?Sized + ser::Serialize>(
self, _name: &'static str, _variant_index: u32, _variant: &'static str,
_value: &T,
) -> Result<Self::Ok, Error> {
Err(Error::top_level())
}
/// Returns `Ok`.
fn serialize_none(self) -> Result<Self::Ok, Error> {
Ok(self.urlencoder)
}
/// Serializes the given value.
fn serialize_some<T: ?Sized + ser::Serialize>(
self, value: &T,
) -> Result<Self::Ok, Error> {
value.serialize(self)
}
/// Serialize a sequence, given length (if any) is ignored.
fn serialize_seq(self, _len: Option<usize>) -> Result<Self::SerializeSeq, Error> {
Ok(SeqSerializer {
urlencoder: self.urlencoder,
})
}
/// Returns an error.
fn serialize_tuple(self, _len: usize) -> Result<Self::SerializeTuple, Error> {
Ok(TupleSerializer {
urlencoder: self.urlencoder,
})
}
/// Returns an error.
fn serialize_tuple_struct(
self, _name: &'static str, _len: usize,
) -> Result<Self::SerializeTupleStruct, Error> {
Err(Error::top_level())
}
/// Returns an error.
fn serialize_tuple_variant(
self, _name: &'static str, _variant_index: u32, _variant: &'static str,
_len: usize,
) -> Result<Self::SerializeTupleVariant, Error> {
Err(Error::top_level())
}
/// Serializes a map, given length is ignored.
fn serialize_map(self, _len: Option<usize>) -> Result<Self::SerializeMap, Error> {
Ok(MapSerializer {
urlencoder: self.urlencoder,
key: None,
})
}
/// Serializes a struct, given length is ignored.
fn serialize_struct(
self, _name: &'static str, _len: usize,
) -> Result<Self::SerializeStruct, Error> {
Ok(StructSerializer {
urlencoder: self.urlencoder,
})
}
/// Returns an error.
fn serialize_struct_variant(
self, _name: &'static str, _variant_index: u32, _variant: &'static str,
_len: usize,
) -> Result<Self::SerializeStructVariant, Error> {
Err(Error::top_level())
}
}
impl<'output, Target> ser::SerializeSeq for SeqSerializer<'output, Target>
where
Target: 'output + UrlEncodedTarget,
{
type Ok = &'output mut UrlEncodedSerializer<Target>;
type Error = Error;
fn serialize_element<T: ?Sized + ser::Serialize>(
&mut self, value: &T,
) -> Result<(), Error> {
value.serialize(pair::PairSerializer::new(self.urlencoder))
}
fn end(self) -> Result<Self::Ok, Error> {
Ok(self.urlencoder)
}
}
impl<'output, Target> ser::SerializeTuple for TupleSerializer<'output, Target>
where
Target: 'output + UrlEncodedTarget,
{
type Ok = &'output mut UrlEncodedSerializer<Target>;
type Error = Error;
fn serialize_element<T: ?Sized + ser::Serialize>(
&mut self, value: &T,
) -> Result<(), Error> {
value.serialize(pair::PairSerializer::new(self.urlencoder))
}
fn end(self) -> Result<Self::Ok, Error> {
Ok(self.urlencoder)
}
}
impl<'output, Target> ser::SerializeTupleStruct
for TupleStructSerializer<'output, Target>
where
Target: 'output + UrlEncodedTarget,
{
type Ok = &'output mut UrlEncodedSerializer<Target>;
type Error = Error;
fn serialize_field<T: ?Sized + ser::Serialize>(
&mut self, value: &T,
) -> Result<(), Error> {
self.inner.serialize_field(value)
}
fn end(self) -> Result<Self::Ok, Error> {
self.inner.end()
}
}
impl<'output, Target> ser::SerializeTupleVariant
for TupleVariantSerializer<'output, Target>
where
Target: 'output + UrlEncodedTarget,
{
type Ok = &'output mut UrlEncodedSerializer<Target>;
type Error = Error;
fn serialize_field<T: ?Sized + ser::Serialize>(
&mut self, value: &T,
) -> Result<(), Error> {
self.inner.serialize_field(value)
}
fn end(self) -> Result<Self::Ok, Error> {
self.inner.end()
}
}
impl<'output, Target> ser::SerializeMap for MapSerializer<'output, Target>
where
Target: 'output + UrlEncodedTarget,
{
type Ok = &'output mut UrlEncodedSerializer<Target>;
type Error = Error;
fn serialize_entry<K: ?Sized + ser::Serialize, V: ?Sized + ser::Serialize>(
&mut self, key: &K, value: &V,
) -> Result<(), Error> {
let key_sink = key::KeySink::new(|key| {
let value_sink = value::ValueSink::new(self.urlencoder, &key);
value.serialize(part::PartSerializer::new(value_sink))?;
self.key = None;
Ok(())
});
let entry_serializer = part::PartSerializer::new(key_sink);
key.serialize(entry_serializer)
}
fn serialize_key<T: ?Sized + ser::Serialize>(
&mut self, key: &T,
) -> Result<(), Error> {
let key_sink = key::KeySink::new(|key| Ok(key.into()));
let key_serializer = part::PartSerializer::new(key_sink);
self.key = Some(key.serialize(key_serializer)?);
Ok(())
}
fn serialize_value<T: ?Sized + ser::Serialize>(
&mut self, value: &T,
) -> Result<(), Error> {
{
let key = self.key.as_ref().ok_or_else(Error::no_key)?;
let value_sink = value::ValueSink::new(self.urlencoder, &key);
value.serialize(part::PartSerializer::new(value_sink))?;
}
self.key = None;
Ok(())
}
fn end(self) -> Result<Self::Ok, Error> {
Ok(self.urlencoder)
}
}
impl<'output, Target> ser::SerializeStruct for StructSerializer<'output, Target>
where
Target: 'output + UrlEncodedTarget,
{
type Ok = &'output mut UrlEncodedSerializer<Target>;
type Error = Error;
fn serialize_field<T: ?Sized + ser::Serialize>(
&mut self, key: &'static str, value: &T,
) -> Result<(), Error> {
let value_sink = value::ValueSink::new(self.urlencoder, key);
value.serialize(part::PartSerializer::new(value_sink))
}
fn end(self) -> Result<Self::Ok, Error> {
Ok(self.urlencoder)
}
}
impl<'output, Target> ser::SerializeStructVariant
for StructVariantSerializer<'output, Target>
where
Target: 'output + UrlEncodedTarget,
{
type Ok = &'output mut UrlEncodedSerializer<Target>;
type Error = Error;
fn serialize_field<T: ?Sized + ser::Serialize>(
&mut self, key: &'static str, value: &T,
) -> Result<(), Error> {
self.inner.serialize_field(key, value)
}
fn end(self) -> Result<Self::Ok, Error> {
self.inner.end()
}
}
impl Error {
fn top_level() -> Self {
let msg = "top-level serializer supports only maps and structs";
Error::Custom(msg.into())
}
fn no_key() -> Self {
let msg = "tried to serialize a value before serializing key";
Error::Custom(msg.into())
}
}

View File

@ -1,239 +0,0 @@
use super::super::ser::key::KeySink;
use super::super::ser::part::PartSerializer;
use super::super::ser::value::ValueSink;
use super::super::ser::Error;
use serde::ser;
use std::borrow::Cow;
use std::mem;
use url::form_urlencoded::Serializer as UrlEncodedSerializer;
use url::form_urlencoded::Target as UrlEncodedTarget;
pub struct PairSerializer<'target, Target: 'target + UrlEncodedTarget> {
urlencoder: &'target mut UrlEncodedSerializer<Target>,
state: PairState,
}
impl<'target, Target> PairSerializer<'target, Target>
where
Target: 'target + UrlEncodedTarget,
{
pub fn new(urlencoder: &'target mut UrlEncodedSerializer<Target>) -> Self {
PairSerializer {
urlencoder,
state: PairState::WaitingForKey,
}
}
}
impl<'target, Target> ser::Serializer for PairSerializer<'target, Target>
where
Target: 'target + UrlEncodedTarget,
{
type Ok = ();
type Error = Error;
type SerializeSeq = ser::Impossible<(), Error>;
type SerializeTuple = Self;
type SerializeTupleStruct = ser::Impossible<(), Error>;
type SerializeTupleVariant = ser::Impossible<(), Error>;
type SerializeMap = ser::Impossible<(), Error>;
type SerializeStruct = ser::Impossible<(), Error>;
type SerializeStructVariant = ser::Impossible<(), Error>;
fn serialize_bool(self, _v: bool) -> Result<(), Error> {
Err(Error::unsupported_pair())
}
fn serialize_i8(self, _v: i8) -> Result<(), Error> {
Err(Error::unsupported_pair())
}
fn serialize_i16(self, _v: i16) -> Result<(), Error> {
Err(Error::unsupported_pair())
}
fn serialize_i32(self, _v: i32) -> Result<(), Error> {
Err(Error::unsupported_pair())
}
fn serialize_i64(self, _v: i64) -> Result<(), Error> {
Err(Error::unsupported_pair())
}
fn serialize_u8(self, _v: u8) -> Result<(), Error> {
Err(Error::unsupported_pair())
}
fn serialize_u16(self, _v: u16) -> Result<(), Error> {
Err(Error::unsupported_pair())
}
fn serialize_u32(self, _v: u32) -> Result<(), Error> {
Err(Error::unsupported_pair())
}
fn serialize_u64(self, _v: u64) -> Result<(), Error> {
Err(Error::unsupported_pair())
}
fn serialize_f32(self, _v: f32) -> Result<(), Error> {
Err(Error::unsupported_pair())
}
fn serialize_f64(self, _v: f64) -> Result<(), Error> {
Err(Error::unsupported_pair())
}
fn serialize_char(self, _v: char) -> Result<(), Error> {
Err(Error::unsupported_pair())
}
fn serialize_str(self, _value: &str) -> Result<(), Error> {
Err(Error::unsupported_pair())
}
fn serialize_bytes(self, _value: &[u8]) -> Result<(), Error> {
Err(Error::unsupported_pair())
}
fn serialize_unit(self) -> Result<(), Error> {
Err(Error::unsupported_pair())
}
fn serialize_unit_struct(self, _name: &'static str) -> Result<(), Error> {
Err(Error::unsupported_pair())
}
fn serialize_unit_variant(
self, _name: &'static str, _variant_index: u32, _variant: &'static str,
) -> Result<(), Error> {
Err(Error::unsupported_pair())
}
fn serialize_newtype_struct<T: ?Sized + ser::Serialize>(
self, _name: &'static str, value: &T,
) -> Result<(), Error> {
value.serialize(self)
}
fn serialize_newtype_variant<T: ?Sized + ser::Serialize>(
self, _name: &'static str, _variant_index: u32, _variant: &'static str,
_value: &T,
) -> Result<(), Error> {
Err(Error::unsupported_pair())
}
fn serialize_none(self) -> Result<(), Error> {
Ok(())
}
fn serialize_some<T: ?Sized + ser::Serialize>(self, value: &T) -> Result<(), Error> {
value.serialize(self)
}
fn serialize_seq(self, _len: Option<usize>) -> Result<Self::SerializeSeq, Error> {
Err(Error::unsupported_pair())
}
fn serialize_tuple(self, len: usize) -> Result<Self, Error> {
if len == 2 {
Ok(self)
} else {
Err(Error::unsupported_pair())
}
}
fn serialize_tuple_struct(
self, _name: &'static str, _len: usize,
) -> Result<Self::SerializeTupleStruct, Error> {
Err(Error::unsupported_pair())
}
fn serialize_tuple_variant(
self, _name: &'static str, _variant_index: u32, _variant: &'static str,
_len: usize,
) -> Result<Self::SerializeTupleVariant, Error> {
Err(Error::unsupported_pair())
}
fn serialize_map(self, _len: Option<usize>) -> Result<Self::SerializeMap, Error> {
Err(Error::unsupported_pair())
}
fn serialize_struct(
self, _name: &'static str, _len: usize,
) -> Result<Self::SerializeStruct, Error> {
Err(Error::unsupported_pair())
}
fn serialize_struct_variant(
self, _name: &'static str, _variant_index: u32, _variant: &'static str,
_len: usize,
) -> Result<Self::SerializeStructVariant, Error> {
Err(Error::unsupported_pair())
}
}
impl<'target, Target> ser::SerializeTuple for PairSerializer<'target, Target>
where
Target: 'target + UrlEncodedTarget,
{
type Ok = ();
type Error = Error;
fn serialize_element<T: ?Sized + ser::Serialize>(
&mut self, value: &T,
) -> Result<(), Error> {
match mem::replace(&mut self.state, PairState::Done) {
PairState::WaitingForKey => {
let key_sink = KeySink::new(|key| Ok(key.into()));
let key_serializer = PartSerializer::new(key_sink);
self.state = PairState::WaitingForValue {
key: value.serialize(key_serializer)?,
};
Ok(())
}
PairState::WaitingForValue { key } => {
let result = {
let value_sink = ValueSink::new(self.urlencoder, &key);
let value_serializer = PartSerializer::new(value_sink);
value.serialize(value_serializer)
};
if result.is_ok() {
self.state = PairState::Done;
} else {
self.state = PairState::WaitingForValue { key };
}
result
}
PairState::Done => Err(Error::done()),
}
}
fn end(self) -> Result<(), Error> {
if let PairState::Done = self.state {
Ok(())
} else {
Err(Error::not_done())
}
}
}
enum PairState {
WaitingForKey,
WaitingForValue { key: Cow<'static, str> },
Done,
}
impl Error {
fn done() -> Self {
Error::Custom("this pair has already been serialized".into())
}
fn not_done() -> Self {
Error::Custom("this pair has not yet been serialized".into())
}
fn unsupported_pair() -> Self {
Error::Custom("unsupported pair".into())
}
}

View File

@ -1,201 +0,0 @@
use serde;
use super::super::dtoa;
use super::super::itoa;
use super::super::ser::Error;
use std::str;
pub struct PartSerializer<S> {
sink: S,
}
impl<S: Sink> PartSerializer<S> {
pub fn new(sink: S) -> Self {
PartSerializer { sink }
}
}
pub trait Sink: Sized {
type Ok;
fn serialize_static_str(self, value: &'static str) -> Result<Self::Ok, Error>;
fn serialize_str(self, value: &str) -> Result<Self::Ok, Error>;
fn serialize_string(self, value: String) -> Result<Self::Ok, Error>;
fn serialize_none(self) -> Result<Self::Ok, Error>;
fn serialize_some<T: ?Sized + serde::ser::Serialize>(
self, value: &T,
) -> Result<Self::Ok, Error>;
fn unsupported(self) -> Error;
}
impl<S: Sink> serde::ser::Serializer for PartSerializer<S> {
type Ok = S::Ok;
type Error = Error;
type SerializeSeq = serde::ser::Impossible<S::Ok, Error>;
type SerializeTuple = serde::ser::Impossible<S::Ok, Error>;
type SerializeTupleStruct = serde::ser::Impossible<S::Ok, Error>;
type SerializeTupleVariant = serde::ser::Impossible<S::Ok, Error>;
type SerializeMap = serde::ser::Impossible<S::Ok, Error>;
type SerializeStruct = serde::ser::Impossible<S::Ok, Error>;
type SerializeStructVariant = serde::ser::Impossible<S::Ok, Error>;
fn serialize_bool(self, v: bool) -> Result<S::Ok, Error> {
self.sink
.serialize_static_str(if v { "true" } else { "false" })
}
fn serialize_i8(self, v: i8) -> Result<S::Ok, Error> {
self.serialize_integer(v)
}
fn serialize_i16(self, v: i16) -> Result<S::Ok, Error> {
self.serialize_integer(v)
}
fn serialize_i32(self, v: i32) -> Result<S::Ok, Error> {
self.serialize_integer(v)
}
fn serialize_i64(self, v: i64) -> Result<S::Ok, Error> {
self.serialize_integer(v)
}
fn serialize_u8(self, v: u8) -> Result<S::Ok, Error> {
self.serialize_integer(v)
}
fn serialize_u16(self, v: u16) -> Result<S::Ok, Error> {
self.serialize_integer(v)
}
fn serialize_u32(self, v: u32) -> Result<S::Ok, Error> {
self.serialize_integer(v)
}
fn serialize_u64(self, v: u64) -> Result<S::Ok, Error> {
self.serialize_integer(v)
}
fn serialize_f32(self, v: f32) -> Result<S::Ok, Error> {
self.serialize_floating(v)
}
fn serialize_f64(self, v: f64) -> Result<S::Ok, Error> {
self.serialize_floating(v)
}
fn serialize_char(self, v: char) -> Result<S::Ok, Error> {
self.sink.serialize_string(v.to_string())
}
fn serialize_str(self, value: &str) -> Result<S::Ok, Error> {
self.sink.serialize_str(value)
}
fn serialize_bytes(self, value: &[u8]) -> Result<S::Ok, Error> {
match str::from_utf8(value) {
Ok(value) => self.sink.serialize_str(value),
Err(err) => Err(Error::Utf8(err)),
}
}
fn serialize_unit(self) -> Result<S::Ok, Error> {
Err(self.sink.unsupported())
}
fn serialize_unit_struct(self, name: &'static str) -> Result<S::Ok, Error> {
self.sink.serialize_static_str(name)
}
fn serialize_unit_variant(
self, _name: &'static str, _variant_index: u32, variant: &'static str,
) -> Result<S::Ok, Error> {
self.sink.serialize_static_str(variant)
}
fn serialize_newtype_struct<T: ?Sized + serde::ser::Serialize>(
self, _name: &'static str, value: &T,
) -> Result<S::Ok, Error> {
value.serialize(self)
}
fn serialize_newtype_variant<T: ?Sized + serde::ser::Serialize>(
self, _name: &'static str, _variant_index: u32, _variant: &'static str,
_value: &T,
) -> Result<S::Ok, Error> {
Err(self.sink.unsupported())
}
fn serialize_none(self) -> Result<S::Ok, Error> {
self.sink.serialize_none()
}
fn serialize_some<T: ?Sized + serde::ser::Serialize>(
self, value: &T,
) -> Result<S::Ok, Error> {
self.sink.serialize_some(value)
}
fn serialize_seq(self, _len: Option<usize>) -> Result<Self::SerializeSeq, Error> {
Err(self.sink.unsupported())
}
fn serialize_tuple(self, _len: usize) -> Result<Self::SerializeTuple, Error> {
Err(self.sink.unsupported())
}
fn serialize_tuple_struct(
self, _name: &'static str, _len: usize,
) -> Result<Self::SerializeTuple, Error> {
Err(self.sink.unsupported())
}
fn serialize_tuple_variant(
self, _name: &'static str, _variant_index: u32, _variant: &'static str,
_len: usize,
) -> Result<Self::SerializeTupleVariant, Error> {
Err(self.sink.unsupported())
}
fn serialize_map(self, _len: Option<usize>) -> Result<Self::SerializeMap, Error> {
Err(self.sink.unsupported())
}
fn serialize_struct(
self, _name: &'static str, _len: usize,
) -> Result<Self::SerializeStruct, Error> {
Err(self.sink.unsupported())
}
fn serialize_struct_variant(
self, _name: &'static str, _variant_index: u32, _variant: &'static str,
_len: usize,
) -> Result<Self::SerializeStructVariant, Error> {
Err(self.sink.unsupported())
}
}
impl<S: Sink> PartSerializer<S> {
fn serialize_integer<I>(self, value: I) -> Result<S::Ok, Error>
where
I: itoa::Integer,
{
let mut buf = [b'\0'; 20];
let len = itoa::write(&mut buf[..], value).unwrap();
let part = unsafe { str::from_utf8_unchecked(&buf[0..len]) };
serde::ser::Serializer::serialize_str(self, part)
}
fn serialize_floating<F>(self, value: F) -> Result<S::Ok, Error>
where
F: dtoa::Floating,
{
let mut buf = [b'\0'; 24];
let len = dtoa::write(&mut buf[..], value).unwrap();
let part = unsafe { str::from_utf8_unchecked(&buf[0..len]) };
serde::ser::Serializer::serialize_str(self, part)
}
}

View File

@ -1,59 +0,0 @@
use super::super::ser::part::{PartSerializer, Sink};
use super::super::ser::Error;
use serde::ser::Serialize;
use std::str;
use url::form_urlencoded::Serializer as UrlEncodedSerializer;
use url::form_urlencoded::Target as UrlEncodedTarget;
pub struct ValueSink<'key, 'target, Target>
where
Target: 'target + UrlEncodedTarget,
{
urlencoder: &'target mut UrlEncodedSerializer<Target>,
key: &'key str,
}
impl<'key, 'target, Target> ValueSink<'key, 'target, Target>
where
Target: 'target + UrlEncodedTarget,
{
pub fn new(
urlencoder: &'target mut UrlEncodedSerializer<Target>, key: &'key str,
) -> Self {
ValueSink { urlencoder, key }
}
}
impl<'key, 'target, Target> Sink for ValueSink<'key, 'target, Target>
where
Target: 'target + UrlEncodedTarget,
{
type Ok = ();
fn serialize_str(self, value: &str) -> Result<(), Error> {
self.urlencoder.append_pair(self.key, value);
Ok(())
}
fn serialize_static_str(self, value: &'static str) -> Result<(), Error> {
self.serialize_str(value)
}
fn serialize_string(self, value: String) -> Result<(), Error> {
self.serialize_str(&value)
}
fn serialize_none(self) -> Result<Self::Ok, Error> {
Ok(())
}
fn serialize_some<T: ?Sized + Serialize>(
self, value: &T,
) -> Result<Self::Ok, Error> {
value.serialize(PartSerializer::new(self))
}
fn unsupported(self) -> Error {
Error::Custom("unsupported value".into())
}
}

View File

@ -9,57 +9,116 @@ use tokio_timer::Delay;
use actix::{msgs::Execute, Arbiter, System};
use super::srv::{ServerCommand, Socket};
use super::worker::Conn;
use super::server::ServerCommand;
use super::worker::{Conn, WorkerClient};
use super::Token;
pub(crate) enum Command {
Pause,
Resume,
Stop,
Worker(usize, mpsc::UnboundedSender<Conn<net::TcpStream>>),
Worker(WorkerClient),
}
struct ServerSocketInfo {
addr: net::SocketAddr,
token: usize,
token: Token,
handler: Token,
sock: mio::net::TcpListener,
timeout: Option<Instant>,
}
#[derive(Clone)]
pub(crate) struct AcceptNotify(mio::SetReadiness);
impl AcceptNotify {
pub(crate) fn new(ready: mio::SetReadiness) -> Self {
AcceptNotify(ready)
}
pub(crate) fn notify(&self) {
let _ = self.0.set_readiness(mio::Ready::readable());
}
}
impl Default for AcceptNotify {
fn default() -> Self {
AcceptNotify::new(mio::Registration::new2().1)
}
}
pub(crate) struct AcceptLoop {
cmd_reg: Option<mio::Registration>,
cmd_ready: mio::SetReadiness,
notify_reg: Option<mio::Registration>,
notify_ready: mio::SetReadiness,
tx: sync_mpsc::Sender<Command>,
rx: Option<sync_mpsc::Receiver<Command>>,
srv: Option<(
mpsc::UnboundedSender<ServerCommand>,
mpsc::UnboundedReceiver<ServerCommand>,
)>,
}
impl AcceptLoop {
pub fn new() -> AcceptLoop {
let (tx, rx) = sync_mpsc::channel();
let (cmd_reg, cmd_ready) = mio::Registration::new2();
let (notify_reg, notify_ready) = mio::Registration::new2();
AcceptLoop {
tx,
cmd_ready,
cmd_reg: Some(cmd_reg),
notify_ready,
notify_reg: Some(notify_reg),
rx: Some(rx),
srv: Some(mpsc::unbounded()),
}
}
pub fn send(&self, msg: Command) {
let _ = self.tx.send(msg);
let _ = self.cmd_ready.set_readiness(mio::Ready::readable());
}
pub fn get_notify(&self) -> AcceptNotify {
AcceptNotify::new(self.notify_ready.clone())
}
pub(crate) fn start(
&mut self, socks: Vec<Vec<(Token, net::TcpListener)>>,
workers: Vec<WorkerClient>,
) -> mpsc::UnboundedReceiver<ServerCommand> {
let (tx, rx) = self.srv.take().expect("Can not re-use AcceptInfo");
Accept::start(
self.rx.take().expect("Can not re-use AcceptInfo"),
self.cmd_reg.take().expect("Can not re-use AcceptInfo"),
self.notify_reg.take().expect("Can not re-use AcceptInfo"),
socks,
tx,
workers,
);
rx
}
}
struct Accept {
poll: mio::Poll,
rx: sync_mpsc::Receiver<Command>,
sockets: Slab<ServerSocketInfo>,
workers: Vec<(usize, mpsc::UnboundedSender<Conn<net::TcpStream>>)>,
_reg: mio::Registration,
next: usize,
workers: Vec<WorkerClient>,
srv: mpsc::UnboundedSender<ServerCommand>,
timer: (mio::Registration, mio::SetReadiness),
next: usize,
backpressure: bool,
}
const DELTA: usize = 100;
const CMD: mio::Token = mio::Token(0);
const TIMER: mio::Token = mio::Token(1);
pub(crate) fn start_accept_thread(
socks: Vec<(usize, Socket)>, srv: mpsc::UnboundedSender<ServerCommand>,
workers: Vec<(usize, mpsc::UnboundedSender<Conn<net::TcpStream>>)>,
) -> (mio::SetReadiness, sync_mpsc::Sender<Command>) {
let (tx, rx) = sync_mpsc::channel();
let (reg, readiness) = mio::Registration::new2();
let sys = System::current();
// start accept thread
#[cfg_attr(feature = "cargo-clippy", allow(cyclomatic_complexity))]
let _ = thread::Builder::new()
.name("actix-web accept loop".to_owned())
.spawn(move || {
System::set_current(sys);
Accept::new(reg, rx, socks, workers, srv).poll();
});
(readiness, tx)
}
const NOTIFY: mio::Token = mio::Token(2);
/// This function defines errors that are per-connection. Which basically
/// means that if we get this error from `accept()` system call it means
@ -75,11 +134,48 @@ fn connection_error(e: &io::Error) -> bool {
}
impl Accept {
#![cfg_attr(feature = "cargo-clippy", allow(too_many_arguments))]
pub(crate) fn start(
rx: sync_mpsc::Receiver<Command>, cmd_reg: mio::Registration,
notify_reg: mio::Registration, socks: Vec<Vec<(Token, net::TcpListener)>>,
srv: mpsc::UnboundedSender<ServerCommand>, workers: Vec<WorkerClient>,
) {
let sys = System::current();
// start accept thread
let _ = thread::Builder::new()
.name("actix-web accept loop".to_owned())
.spawn(move || {
System::set_current(sys);
let mut accept = Accept::new(rx, socks, workers, srv);
// Start listening for incoming commands
if let Err(err) = accept.poll.register(
&cmd_reg,
CMD,
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
panic!("Can not register Registration: {}", err);
}
// Start listening for notify updates
if let Err(err) = accept.poll.register(
&notify_reg,
NOTIFY,
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
panic!("Can not register Registration: {}", err);
}
accept.poll();
});
}
fn new(
_reg: mio::Registration, rx: sync_mpsc::Receiver<Command>,
socks: Vec<(usize, Socket)>,
workers: Vec<(usize, mpsc::UnboundedSender<Conn<net::TcpStream>>)>,
srv: mpsc::UnboundedSender<ServerCommand>,
rx: sync_mpsc::Receiver<Command>, socks: Vec<Vec<(Token, net::TcpListener)>>,
workers: Vec<WorkerClient>, srv: mpsc::UnboundedSender<ServerCommand>,
) -> Accept {
// Create a poll instance
let poll = match mio::Poll::new() {
@ -87,17 +183,12 @@ impl Accept {
Err(err) => panic!("Can not create mio::Poll: {}", err),
};
// Start listening for incoming commands
if let Err(err) =
poll.register(&_reg, CMD, mio::Ready::readable(), mio::PollOpt::edge())
{
panic!("Can not register Registration: {}", err);
}
// Start accept
let mut sockets = Slab::new();
for (stoken, sock) in socks {
let server = mio::net::TcpListener::from_std(sock.lst)
for (idx, srv_socks) in socks.into_iter().enumerate() {
for (hnd_token, lst) in srv_socks {
let addr = lst.local_addr().unwrap();
let server = mio::net::TcpListener::from_std(lst)
.expect("Can not create mio::net::TcpListener");
let entry = sockets.vacant_entry();
@ -106,7 +197,7 @@ impl Accept {
// Start listening for incoming connections
if let Err(err) = poll.register(
&server,
mio::Token(token + 1000),
mio::Token(token + DELTA),
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
@ -114,12 +205,14 @@ impl Accept {
}
entry.insert(ServerSocketInfo {
token: stoken,
addr: sock.addr,
addr,
token: hnd_token,
handler: Token(idx),
sock: server,
timeout: None,
});
}
}
// Timer
let (tm, tmr) = mio::Registration::new2();
@ -132,12 +225,12 @@ impl Accept {
Accept {
poll,
rx,
_reg,
sockets,
workers,
srv,
next: 0,
timer: (tm, tmr),
backpressure: false,
}
}
@ -157,7 +250,14 @@ impl Accept {
return;
},
TIMER => self.process_timer(),
_ => self.accept(token),
NOTIFY => self.backpressure(false),
_ => {
let token = usize::from(token);
if token < DELTA {
continue;
}
self.accept(token - DELTA);
}
}
}
}
@ -170,7 +270,7 @@ impl Accept {
if now > inst {
if let Err(err) = self.poll.register(
&info.sock,
mio::Token(token + 1000),
mio::Token(token + DELTA),
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
@ -202,7 +302,7 @@ impl Accept {
for (token, info) in self.sockets.iter() {
if let Err(err) = self.poll.register(
&info.sock,
mio::Token(token + 1000),
mio::Token(token + DELTA),
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
@ -221,8 +321,9 @@ impl Accept {
}
return false;
}
Command::Worker(idx, addr) => {
self.workers.push((idx, addr));
Command::Worker(worker) => {
self.backpressure(false);
self.workers.push(worker);
}
},
Err(err) => match err {
@ -239,37 +340,45 @@ impl Accept {
true
}
fn accept(&mut self, token: mio::Token) {
let token = usize::from(token);
if token < 1000 {
return;
fn backpressure(&mut self, on: bool) {
if self.backpressure {
if !on {
self.backpressure = false;
for (token, info) in self.sockets.iter() {
if let Err(err) = self.poll.register(
&info.sock,
mio::Token(token + DELTA),
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
error!("Can not resume socket accept process: {}", err);
} else {
info!("Accepting connections on {} has been resumed", info.addr);
}
}
}
} else if on {
self.backpressure = true;
for (_, info) in self.sockets.iter() {
let _ = self.poll.deregister(&info.sock);
}
}
}
if let Some(info) = self.sockets.get_mut(token - 1000) {
loop {
match info.sock.accept_std() {
Ok((io, addr)) => {
let mut msg = Conn {
io,
token: info.token,
peer: Some(addr),
http2: false,
};
fn accept_one(&mut self, mut msg: Conn<net::TcpStream>) {
if self.backpressure {
while !self.workers.is_empty() {
match self.workers[self.next].1.unbounded_send(msg) {
match self.workers[self.next].send(msg) {
Ok(_) => (),
Err(err) => {
let _ = self.srv.unbounded_send(
ServerCommand::WorkerDied(
self.workers[self.next].0,
),
);
let _ = self.srv.unbounded_send(ServerCommand::WorkerDied(
self.workers[self.next].idx,
));
msg = err.into_inner();
self.workers.swap_remove(self.next);
if self.workers.is_empty() {
error!("No workers");
thread::sleep(Duration::from_millis(100));
break;
return;
} else if self.workers.len() <= self.next {
self.next = 0;
}
@ -279,8 +388,52 @@ impl Accept {
self.next = (self.next + 1) % self.workers.len();
break;
}
} else {
let mut idx = 0;
while idx < self.workers.len() {
idx += 1;
if self.workers[self.next].available() {
match self.workers[self.next].send(msg) {
Ok(_) => {
self.next = (self.next + 1) % self.workers.len();
return;
}
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => break,
Err(err) => {
let _ = self.srv.unbounded_send(ServerCommand::WorkerDied(
self.workers[self.next].idx,
));
msg = err.into_inner();
self.workers.swap_remove(self.next);
if self.workers.is_empty() {
error!("No workers");
self.backpressure(true);
return;
} else if self.workers.len() <= self.next {
self.next = 0;
}
continue;
}
}
}
self.next = (self.next + 1) % self.workers.len();
}
// enable backpressure
self.backpressure(true);
self.accept_one(msg);
}
}
fn accept(&mut self, token: usize) {
loop {
let msg = if let Some(info) = self.sockets.get_mut(token) {
match info.sock.accept_std() {
Ok((io, addr)) => Conn {
io,
token: info.token,
handler: info.handler,
peer: Some(addr),
},
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => return,
Err(ref e) if connection_error(e) => continue,
Err(e) => {
error!("Error accepting connection: {}", e);
@ -299,18 +452,21 @@ impl Accept {
Instant::now() + Duration::from_millis(510),
).map_err(|_| ())
.and_then(move |_| {
let _ =
r.set_readiness(mio::Ready::readable());
let _ = r.set_readiness(mio::Ready::readable());
Ok(())
}),
);
Ok(())
},
));
break;
}
}
}
return;
}
}
} else {
return;
};
self.accept_one(msg);
}
}
}

View File

@ -2,12 +2,12 @@ use std::net::{Shutdown, SocketAddr};
use std::rc::Rc;
use std::{io, ptr, time};
use bytes::{Buf, BufMut, Bytes, BytesMut};
use bytes::{Buf, BufMut, BytesMut};
use futures::{Async, Future, Poll};
use tokio_io::{AsyncRead, AsyncWrite};
use super::settings::WorkerSettings;
use super::{h1, h2, HttpHandler, IoStream};
use super::{h1, h2, ConnectionTag, HttpHandler, IoStream};
const HTTP2_PREFACE: [u8; 14] = *b"PRI * HTTP/2.0";
@ -30,6 +30,7 @@ where
{
proto: Option<HttpProtocol<T, H>>,
node: Option<Node<HttpChannel<T, H>>>,
_tag: ConnectionTag,
}
impl<T, H> HttpChannel<T, H>
@ -38,24 +39,12 @@ where
H: HttpHandler + 'static,
{
pub(crate) fn new(
settings: Rc<WorkerSettings<H>>, mut io: T, peer: Option<SocketAddr>,
http2: bool,
settings: Rc<WorkerSettings<H>>, io: T, peer: Option<SocketAddr>,
) -> HttpChannel<T, H> {
settings.add_channel();
let _ = io.set_nodelay(true);
let _tag = settings.connection();
if http2 {
HttpChannel {
node: None,
proto: Some(HttpProtocol::H2(h2::Http2::new(
settings,
io,
peer,
Bytes::new(),
))),
}
} else {
HttpChannel {
_tag,
node: None,
proto: Some(HttpProtocol::Unknown(
settings,
@ -65,9 +54,8 @@ where
)),
}
}
}
fn shutdown(&mut self) {
pub(crate) fn shutdown(&mut self) {
match self.proto {
Some(HttpProtocol::H1(ref mut h1)) => {
let io = h1.io();
@ -89,7 +77,7 @@ where
type Error = ();
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
if self.node.is_some() {
if self.node.is_none() {
let el = self as *mut _;
self.node = Some(Node::new(el));
let _ = match self.proto {
@ -106,12 +94,12 @@ where
};
}
let mut is_eof = false;
let kind = match self.proto {
Some(HttpProtocol::H1(ref mut h1)) => {
let result = h1.poll();
match result {
Ok(Async::Ready(())) | Err(_) => {
h1.settings().remove_channel();
if let Some(n) = self.node.as_mut() {
n.remove()
};
@ -124,7 +112,6 @@ where
let result = h2.poll();
match result {
Ok(Async::Ready(())) | Err(_) => {
h2.settings().remove_channel();
if let Some(n) = self.node.as_mut() {
n.remove()
};
@ -133,23 +120,28 @@ where
}
return result;
}
Some(HttpProtocol::Unknown(
ref mut settings,
_,
ref mut io,
ref mut buf,
)) => {
Some(HttpProtocol::Unknown(_, _, ref mut io, ref mut buf)) => {
let mut disconnect = false;
match io.read_available(buf) {
Ok(Async::Ready(true)) | Err(_) => {
Ok(Async::Ready((read_some, stream_closed))) => {
is_eof = stream_closed;
// Only disconnect if no data was read.
if is_eof && !read_some {
disconnect = true;
}
}
Err(_) => {
disconnect = true;
}
_ => (),
}
if disconnect {
debug!("Ignored premature client disconnection");
settings.remove_channel();
if let Some(n) = self.node.as_mut() {
n.remove()
};
return Err(());
}
_ => (),
}
if buf.len() >= 14 {
if buf[..14] == HTTP2_PREFACE[..] {
@ -168,8 +160,9 @@ where
if let Some(HttpProtocol::Unknown(settings, addr, io, buf)) = self.proto.take() {
match kind {
ProtocolKind::Http1 => {
self.proto =
Some(HttpProtocol::H1(h1::Http1::new(settings, io, addr, buf)));
self.proto = Some(HttpProtocol::H1(h1::Http1::new(
settings, io, addr, buf, is_eof,
)));
return self.poll();
}
ProtocolKind::Http2 => {
@ -239,7 +232,7 @@ impl Node<()> {
}
}
pub(crate) fn traverse<T, H>(&self)
pub(crate) fn traverse<T, H, F: Fn(&mut HttpChannel<T, H>)>(&self, f: F)
where
T: IoStream,
H: HttpHandler + 'static,
@ -254,7 +247,7 @@ impl Node<()> {
if !n.element.is_null() {
let ch: &mut HttpChannel<T, H> =
&mut *(&mut *(n.element as *mut _) as *mut () as *mut _);
ch.shutdown();
f(ch);
}
}
} else {

View File

@ -16,8 +16,17 @@ impl HttpHandlerTask for ServerError {
fn poll_io(&mut self, io: &mut Writer) -> Poll<bool, Error> {
{
let bytes = io.buffer();
// Buffer should have sufficient capacity for status line
// and extra space
bytes.reserve(helpers::STATUS_LINE_BUF_SIZE + 1);
helpers::write_status_line(self.0, self.1.as_u16(), bytes);
}
// Convert Status Code to Reason.
let reason = self.1.canonical_reason().unwrap_or("");
io.buffer().extend_from_slice(reason.as_bytes());
// No response body.
io.buffer().extend_from_slice(b"\r\ncontent-length: 0\r\n");
// date header
io.set_date();
Ok(Async::Ready(true))
}

View File

@ -22,13 +22,14 @@ use super::{HttpHandler, HttpHandlerTask, IoStream};
const MAX_PIPELINED_MESSAGES: usize = 16;
bitflags! {
struct Flags: u8 {
pub struct Flags: u8 {
const STARTED = 0b0000_0001;
const ERROR = 0b0000_0010;
const KEEPALIVE = 0b0000_0100;
const SHUTDOWN = 0b0000_1000;
const DISCONNECTED = 0b0001_0000;
const POLLED = 0b0010_0000;
const READ_DISCONNECTED = 0b0001_0000;
const WRITE_DISCONNECTED = 0b0010_0000;
const POLLED = 0b0100_0000;
}
}
@ -90,10 +91,14 @@ where
{
pub fn new(
settings: Rc<WorkerSettings<H>>, stream: T, addr: Option<SocketAddr>,
buf: BytesMut,
buf: BytesMut, is_eof: bool,
) -> Self {
Http1 {
flags: Flags::KEEPALIVE,
flags: if is_eof {
Flags::READ_DISCONNECTED
} else {
Flags::KEEPALIVE
},
stream: H1Writer::new(stream, Rc::clone(&settings)),
decoder: H1Decoder::new(),
payload: None,
@ -117,6 +122,13 @@ where
#[inline]
fn can_read(&self) -> bool {
if self
.flags
.intersects(Flags::ERROR | Flags::READ_DISCONNECTED)
{
return false;
}
if let Some(ref info) = self.payload {
info.need_read() == PayloadStatus::Read
} else {
@ -125,6 +137,8 @@ where
}
fn notify_disconnect(&mut self) {
self.flags.insert(Flags::WRITE_DISCONNECTED);
// notify all tasks
self.stream.disconnected();
for task in &mut self.tasks {
@ -132,6 +146,21 @@ where
}
}
fn client_disconnect(&mut self) {
// notify all tasks
self.notify_disconnect();
// kill keepalive
self.keepalive_timer.take();
// on parse error, stop reading stream but tasks need to be
// completed
self.flags.insert(Flags::ERROR);
if let Some(mut payload) = self.payload.take() {
payload.set_error(PayloadError::Incomplete);
}
}
#[inline]
pub fn poll(&mut self) -> Poll<(), ()> {
// keep-alive timer
@ -148,6 +177,11 @@ where
// shutdown
if self.flags.contains(Flags::SHUTDOWN) {
if self.flags.intersects(
Flags::ERROR | Flags::READ_DISCONNECTED | Flags::WRITE_DISCONNECTED,
) {
return Ok(Async::Ready(()));
}
match self.stream.poll_completed(true) {
Ok(Async::NotReady) => return Ok(Async::NotReady),
Ok(Async::Ready(_)) => return Ok(Async::Ready(())),
@ -182,44 +216,25 @@ where
self.flags.insert(Flags::POLLED);
return;
}
// read io from socket
if !self.flags.intersects(Flags::ERROR)
&& self.tasks.len() < MAX_PIPELINED_MESSAGES
&& self.can_read()
{
if self.can_read() && self.tasks.len() < MAX_PIPELINED_MESSAGES {
match self.stream.get_mut().read_available(&mut self.buf) {
Ok(Async::Ready(disconnected)) => {
if disconnected {
// notify all tasks
self.notify_disconnect();
// kill keepalive
self.keepalive_timer.take();
// on parse error, stop reading stream but tasks need to be
// completed
self.flags.insert(Flags::ERROR);
if let Some(mut payload) = self.payload.take() {
payload.set_error(PayloadError::Incomplete);
}
} else {
Ok(Async::Ready((read_some, disconnected))) => {
if read_some {
self.parse();
}
if disconnected {
// delay disconnect until all tasks have finished.
self.flags.insert(Flags::READ_DISCONNECTED);
if self.tasks.is_empty() {
self.client_disconnect();
}
}
}
Ok(Async::NotReady) => (),
Err(_) => {
// notify all tasks
self.notify_disconnect();
// kill keepalive
self.keepalive_timer.take();
// on parse error, stop reading stream but tasks need to be
// completed
self.flags.insert(Flags::ERROR);
if let Some(mut payload) = self.payload.take() {
payload.set_error(PayloadError::Incomplete);
}
self.client_disconnect();
}
}
}
@ -233,7 +248,10 @@ where
let mut idx = 0;
while idx < self.tasks.len() {
// only one task can do io operation in http/1
if !io && !self.tasks[idx].flags.contains(EntryFlags::EOF) {
if !io
&& !self.tasks[idx].flags.contains(EntryFlags::EOF)
&& !self.flags.contains(Flags::WRITE_DISCONNECTED)
{
// io is corrupted, send buffer
if self.tasks[idx].flags.contains(EntryFlags::ERROR) {
if let Ok(Async::NotReady) = self.stream.poll_completed(true) {
@ -297,7 +315,6 @@ where
}
// cleanup finished tasks
let max = self.tasks.len() >= MAX_PIPELINED_MESSAGES;
while !self.tasks.is_empty() {
if self.tasks[0]
.flags
@ -308,10 +325,6 @@ where
break;
}
}
// read more message
if max && self.tasks.len() >= MAX_PIPELINED_MESSAGES {
return Ok(Async::Ready(true));
}
// check stream state
if self.flags.contains(Flags::STARTED) {
@ -331,8 +344,12 @@ where
}
}
// deal with keep-alive
// deal with keep-alive and steam eof (client-side write shutdown)
if self.tasks.is_empty() {
// handle stream eof
if self.flags.contains(Flags::READ_DISCONNECTED) {
return Ok(Async::Ready(false));
}
// no keep-alive
if self.flags.contains(Flags::ERROR)
|| (!self.flags.contains(Flags::KEEPALIVE)
@ -368,6 +385,10 @@ where
self.payload = Some(PayloadType::new(&msg.inner.headers, ps));
}
// stream extensions
msg.inner_mut().stream_extensions =
self.stream.get_mut().extensions();
// set remote addr
msg.inner_mut().addr = self.addr;
@ -375,7 +396,7 @@ where
self.keepalive_timer.take();
// search handler for request
for h in self.settings.handlers().iter_mut() {
for h in self.settings.handlers().iter() {
msg = match h.handle(msg) {
Ok(mut pipe) => {
if self.tasks.is_empty() {
@ -444,7 +465,14 @@ where
break;
}
}
Ok(None) => break,
Ok(None) => {
if self.flags.contains(Flags::READ_DISCONNECTED)
&& self.tasks.is_empty()
{
self.client_disconnect();
}
break;
}
Err(e) => {
self.flags.insert(Flags::ERROR);
if let Some(mut payload) = self.payload.take() {
@ -475,7 +503,16 @@ mod tests {
use httpmessage::HttpMessage;
use server::h1decoder::Message;
use server::settings::{ServerSettings, WorkerSettings};
use server::{KeepAlive, Request};
use server::{Connections, KeepAlive, Request};
fn wrk_settings() -> Rc<WorkerSettings<HttpApplication>> {
Rc::new(WorkerSettings::<HttpApplication>::new(
Vec::new(),
KeepAlive::Os,
ServerSettings::default(),
Connections::default(),
))
}
impl Message {
fn message(self) -> Request {
@ -506,8 +543,7 @@ mod tests {
macro_rules! parse_ready {
($e:expr) => {{
let settings: WorkerSettings<HttpApplication> =
WorkerSettings::new(Vec::new(), KeepAlive::Os, ServerSettings::default());
let settings = wrk_settings();
match H1Decoder::new().decode($e, &settings) {
Ok(Some(msg)) => msg.message(),
Ok(_) => unreachable!("Eof during parsing http request"),
@ -518,8 +554,7 @@ mod tests {
macro_rules! expect_parse_err {
($e:expr) => {{
let settings: WorkerSettings<HttpApplication> =
WorkerSettings::new(Vec::new(), KeepAlive::Os, ServerSettings::default());
let settings = wrk_settings();
match H1Decoder::new().decode($e, &settings) {
Err(err) => match err {
@ -592,32 +627,36 @@ mod tests {
}
#[test]
fn test_req_parse() {
fn test_req_parse1() {
let buf = Buffer::new("GET /test HTTP/1.1\r\n\r\n");
let readbuf = BytesMut::new();
let settings = Rc::new(WorkerSettings::<HttpApplication>::new(
Vec::new(),
KeepAlive::Os,
ServerSettings::default(),
));
let settings = Rc::new(wrk_settings());
let mut h1 = Http1::new(Rc::clone(&settings), buf, None, readbuf);
let mut h1 = Http1::new(Rc::clone(&settings), buf, None, readbuf, false);
h1.poll_io();
h1.poll_io();
assert_eq!(h1.tasks.len(), 1);
}
#[test]
fn test_req_parse2() {
let buf = Buffer::new("");
let readbuf =
BytesMut::from(Vec::<u8>::from(&b"GET /test HTTP/1.1\r\n\r\n"[..]));
let settings = Rc::new(wrk_settings());
let mut h1 = Http1::new(Rc::clone(&settings), buf, None, readbuf, true);
h1.poll_io();
assert_eq!(h1.tasks.len(), 1);
}
#[test]
fn test_req_parse_err() {
let buf = Buffer::new("GET /test HTTP/1\r\n\r\n");
let readbuf = BytesMut::new();
let settings = Rc::new(WorkerSettings::<HttpApplication>::new(
Vec::new(),
KeepAlive::Os,
ServerSettings::default(),
));
let settings = Rc::new(wrk_settings());
let mut h1 = Http1::new(Rc::clone(&settings), buf, None, readbuf);
let mut h1 = Http1::new(Rc::clone(&settings), buf, None, readbuf, false);
h1.poll_io();
h1.poll_io();
assert!(h1.flags.contains(Flags::ERROR));
@ -626,11 +665,7 @@ mod tests {
#[test]
fn test_parse() {
let mut buf = BytesMut::from("GET /test HTTP/1.1\r\n\r\n");
let settings = WorkerSettings::<HttpApplication>::new(
Vec::new(),
KeepAlive::Os,
ServerSettings::default(),
);
let settings = wrk_settings();
let mut reader = H1Decoder::new();
match reader.decode(&mut buf, &settings) {
@ -647,11 +682,7 @@ mod tests {
#[test]
fn test_parse_partial() {
let mut buf = BytesMut::from("PUT /test HTTP/1");
let settings = WorkerSettings::<HttpApplication>::new(
Vec::new(),
KeepAlive::Os,
ServerSettings::default(),
);
let settings = wrk_settings();
let mut reader = H1Decoder::new();
match reader.decode(&mut buf, &settings) {
@ -674,11 +705,7 @@ mod tests {
#[test]
fn test_parse_post() {
let mut buf = BytesMut::from("POST /test2 HTTP/1.0\r\n\r\n");
let settings = WorkerSettings::<HttpApplication>::new(
Vec::new(),
KeepAlive::Os,
ServerSettings::default(),
);
let settings = wrk_settings();
let mut reader = H1Decoder::new();
match reader.decode(&mut buf, &settings) {
@ -696,11 +723,7 @@ mod tests {
fn test_parse_body() {
let mut buf =
BytesMut::from("GET /test HTTP/1.1\r\nContent-Length: 4\r\n\r\nbody");
let settings = WorkerSettings::<HttpApplication>::new(
Vec::new(),
KeepAlive::Os,
ServerSettings::default(),
);
let settings = wrk_settings();
let mut reader = H1Decoder::new();
match reader.decode(&mut buf, &settings) {
@ -727,11 +750,7 @@ mod tests {
fn test_parse_body_crlf() {
let mut buf =
BytesMut::from("\r\nGET /test HTTP/1.1\r\nContent-Length: 4\r\n\r\nbody");
let settings = WorkerSettings::<HttpApplication>::new(
Vec::new(),
KeepAlive::Os,
ServerSettings::default(),
);
let settings = wrk_settings();
let mut reader = H1Decoder::new();
match reader.decode(&mut buf, &settings) {
@ -757,11 +776,7 @@ mod tests {
#[test]
fn test_parse_partial_eof() {
let mut buf = BytesMut::from("GET /test HTTP/1.1\r\n");
let settings = WorkerSettings::<HttpApplication>::new(
Vec::new(),
KeepAlive::Os,
ServerSettings::default(),
);
let settings = wrk_settings();
let mut reader = H1Decoder::new();
assert!(reader.decode(&mut buf, &settings).unwrap().is_none());
@ -780,11 +795,7 @@ mod tests {
#[test]
fn test_headers_split_field() {
let mut buf = BytesMut::from("GET /test HTTP/1.1\r\n");
let settings = WorkerSettings::<HttpApplication>::new(
Vec::new(),
KeepAlive::Os,
ServerSettings::default(),
);
let settings = wrk_settings();
let mut reader = H1Decoder::new();
assert!{ reader.decode(&mut buf, &settings).unwrap().is_none() }
@ -815,11 +826,7 @@ mod tests {
Set-Cookie: c1=cookie1\r\n\
Set-Cookie: c2=cookie2\r\n\r\n",
);
let settings = WorkerSettings::<HttpApplication>::new(
Vec::new(),
KeepAlive::Os,
ServerSettings::default(),
);
let settings = wrk_settings();
let mut reader = H1Decoder::new();
let msg = reader.decode(&mut buf, &settings).unwrap().unwrap();
let req = msg.message();
@ -1015,11 +1022,7 @@ mod tests {
#[test]
fn test_http_request_upgrade() {
let settings = WorkerSettings::<HttpApplication>::new(
Vec::new(),
KeepAlive::Os,
ServerSettings::default(),
);
let settings = wrk_settings();
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
connection: upgrade\r\n\
@ -1085,12 +1088,7 @@ mod tests {
"GET /test HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n",
);
let settings = WorkerSettings::<HttpApplication>::new(
Vec::new(),
KeepAlive::Os,
ServerSettings::default(),
);
let settings = wrk_settings();
let mut reader = H1Decoder::new();
let msg = reader.decode(&mut buf, &settings).unwrap().unwrap();
assert!(msg.is_payload());
@ -1125,11 +1123,7 @@ mod tests {
"GET /test HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n",
);
let settings = WorkerSettings::<HttpApplication>::new(
Vec::new(),
KeepAlive::Os,
ServerSettings::default(),
);
let settings = wrk_settings();
let mut reader = H1Decoder::new();
let msg = reader.decode(&mut buf, &settings).unwrap().unwrap();
assert!(msg.is_payload());
@ -1163,11 +1157,7 @@ mod tests {
"GET /test HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n",
);
let settings = WorkerSettings::<HttpApplication>::new(
Vec::new(),
KeepAlive::Os,
ServerSettings::default(),
);
let settings = wrk_settings();
let mut reader = H1Decoder::new();
let msg = reader.decode(&mut buf, &settings).unwrap().unwrap();
@ -1214,11 +1204,7 @@ mod tests {
&"GET /test HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n"[..],
);
let settings = WorkerSettings::<HttpApplication>::new(
Vec::new(),
KeepAlive::Os,
ServerSettings::default(),
);
let settings = wrk_settings();
let mut reader = H1Decoder::new();
let msg = reader.decode(&mut buf, &settings).unwrap().unwrap();

View File

@ -166,8 +166,8 @@ impl H1Decoder {
{
true
} else {
version == Version::HTTP_11
&& !(conn.contains("close")
version == Version::HTTP_11 && !(conn
.contains("close")
|| conn.contains("upgrade"))
}
} else {

View File

@ -63,7 +63,9 @@ impl<T: AsyncWrite, H: 'static> H1Writer<T, H> {
self.flags = Flags::KEEPALIVE;
}
pub fn disconnected(&mut self) {}
pub fn disconnected(&mut self) {
self.flags.insert(Flags::DISCONNECTED);
}
pub fn keepalive(&self) -> bool {
self.flags.contains(Flags::KEEPALIVE) && !self.flags.contains(Flags::UPGRADE)
@ -152,8 +154,7 @@ impl<T: AsyncWrite, H: 'static> Writer for H1Writer<T, H> {
let reason = msg.reason().as_bytes();
if let Body::Binary(ref bytes) = body {
buffer.reserve(
256
+ msg.headers().len() * AVERAGE_HEADER_SIZE
256 + msg.headers().len() * AVERAGE_HEADER_SIZE
+ bytes.len()
+ reason.len(),
);
@ -269,10 +270,7 @@ impl<T: AsyncWrite, H: 'static> Writer for H1Writer<T, H> {
let pl: &[u8] = payload.as_ref();
let n = match Self::write_data(&mut self.stream, pl) {
Err(err) => {
if err.kind() == io::ErrorKind::WriteZero {
self.disconnected();
}
return Err(err);
}
Ok(val) => val,
@ -316,14 +314,15 @@ impl<T: AsyncWrite, H: 'static> Writer for H1Writer<T, H> {
#[inline]
fn poll_completed(&mut self, shutdown: bool) -> Poll<(), io::Error> {
if self.flags.contains(Flags::DISCONNECTED) {
return Err(io::Error::new(io::ErrorKind::Other, "disconnected"));
}
if !self.buffer.is_empty() {
let written = {
match Self::write_data(&mut self.stream, self.buffer.as_ref().as_ref()) {
Err(err) => {
if err.kind() == io::ErrorKind::WriteZero {
self.disconnected();
}
return Err(err);
}
Ok(val) => val,
@ -337,9 +336,10 @@ impl<T: AsyncWrite, H: 'static> Writer for H1Writer<T, H> {
}
}
if shutdown {
self.stream.poll_flush()?;
self.stream.shutdown()
} else {
Ok(Async::Ready(()))
Ok(self.stream.poll_flush()?)
}
}
}

View File

@ -14,6 +14,7 @@ use tokio_io::{AsyncRead, AsyncWrite};
use tokio_timer::Delay;
use error::{Error, PayloadError};
use extensions::Extensions;
use http::{StatusCode, Version};
use payload::{Payload, PayloadStatus, PayloadWriter};
use uri::Url;
@ -22,7 +23,7 @@ use super::error::ServerError;
use super::h2writer::H2Writer;
use super::input::PayloadType;
use super::settings::WorkerSettings;
use super::{HttpHandler, HttpHandlerTask, Writer};
use super::{HttpHandler, HttpHandlerTask, IoStream, Writer};
bitflags! {
struct Flags: u8 {
@ -42,6 +43,7 @@ where
state: State<IoWrapper<T>>,
tasks: VecDeque<Entry<H>>,
keepalive_timer: Option<Delay>,
extensions: Option<Rc<Extensions>>,
}
enum State<T: AsyncRead + AsyncWrite> {
@ -52,12 +54,13 @@ enum State<T: AsyncRead + AsyncWrite> {
impl<T, H> Http2<T, H>
where
T: AsyncRead + AsyncWrite + 'static,
T: IoStream + 'static,
H: HttpHandler + 'static,
{
pub fn new(
settings: Rc<WorkerSettings<H>>, io: T, addr: Option<SocketAddr>, buf: Bytes,
) -> Self {
let extensions = io.extensions();
Http2 {
flags: Flags::empty(),
tasks: VecDeque::new(),
@ -68,6 +71,7 @@ where
keepalive_timer: None,
addr,
settings,
extensions,
}
}
@ -98,13 +102,19 @@ where
loop {
let mut not_ready = true;
let disconnected = self.flags.contains(Flags::DISCONNECTED);
// check in-flight connections
for item in &mut self.tasks {
// read payload
if !disconnected {
item.poll_payload();
}
if !item.flags.contains(EntryFlags::EOF) {
if disconnected {
item.flags.insert(EntryFlags::EOF);
} else {
let retry = item.payload.need_read() == PayloadStatus::Read;
loop {
match item.task.poll_io(&mut item.stream) {
@ -119,7 +129,8 @@ where
not_ready = false;
}
Ok(Async::NotReady) => {
if item.payload.need_read() == PayloadStatus::Read
if item.payload.need_read()
== PayloadStatus::Read
&& !retry
{
continue;
@ -137,12 +148,18 @@ where
}
break;
}
} else if !item.flags.contains(EntryFlags::FINISHED) {
}
}
if item.flags.contains(EntryFlags::EOF)
&& !item.flags.contains(EntryFlags::FINISHED)
{
match item.task.poll_completed() {
Ok(Async::NotReady) => (),
Ok(Async::Ready(_)) => {
not_ready = false;
item.flags.insert(EntryFlags::FINISHED);
item.flags.insert(
EntryFlags::FINISHED | EntryFlags::WRITE_DONE,
);
}
Err(err) => {
item.flags.insert(
@ -157,6 +174,7 @@ where
if item.flags.contains(EntryFlags::FINISHED)
&& !item.flags.contains(EntryFlags::WRITE_DONE)
&& !disconnected
{
match item.stream.poll_completed(false) {
Ok(Async::NotReady) => (),
@ -164,7 +182,7 @@ where
not_ready = false;
item.flags.insert(EntryFlags::WRITE_DONE);
}
Err(_err) => {
Err(_) => {
item.flags.insert(EntryFlags::ERROR);
}
}
@ -173,7 +191,7 @@ where
// cleanup finished tasks
while !self.tasks.is_empty() {
if self.tasks[0].flags.contains(EntryFlags::EOF)
if self.tasks[0].flags.contains(EntryFlags::FINISHED)
&& self.tasks[0].flags.contains(EntryFlags::WRITE_DONE)
|| self.tasks[0].flags.contains(EntryFlags::ERROR)
{
@ -206,6 +224,7 @@ where
resp,
self.addr,
&self.settings,
self.extensions.clone(),
));
}
Ok(Async::NotReady) => {
@ -324,6 +343,7 @@ impl<H: HttpHandler + 'static> Entry<H> {
fn new(
parts: Parts, recv: RecvStream, resp: SendResponse<Bytes>,
addr: Option<SocketAddr>, settings: &Rc<WorkerSettings<H>>,
extensions: Option<Rc<Extensions>>,
) -> Entry<H>
where
H: HttpHandler + 'static,
@ -338,6 +358,7 @@ impl<H: HttpHandler + 'static> Entry<H> {
inner.method = parts.method;
inner.version = parts.version;
inner.headers = parts.headers;
inner.stream_extensions = extensions;
*inner.payload.borrow_mut() = Some(payload);
inner.addr = addr;
}
@ -347,7 +368,7 @@ impl<H: HttpHandler + 'static> Entry<H> {
// start request processing
let mut task = None;
for h in settings.handlers().iter_mut() {
for h in settings.handlers().iter() {
msg = match h.handle(msg) {
Ok(t) => {
task = Some(t);

View File

@ -112,7 +112,7 @@ impl<H: 'static> Writer for H2Writer<H> {
DATE => has_date = true,
_ => (),
}
resp.headers_mut().insert(key, value.clone());
resp.headers_mut().append(key, value.clone());
}
// set date header
@ -151,6 +151,8 @@ impl<H: 'static> Writer for H2Writer<H> {
.insert(CONTENT_ENCODING, HeaderValue::try_from(ce).unwrap());
}
trace!("Response: {:?}", resp);
match self
.respond
.send_response(resp, self.flags.contains(Flags::EOF))
@ -159,15 +161,12 @@ impl<H: 'static> Writer for H2Writer<H> {
Err(_) => return Err(io::Error::new(io::ErrorKind::Other, "err")),
}
trace!("Response: {:?}", msg);
let body = msg.replace_body(Body::Empty);
if let Body::Binary(bytes) = body {
if bytes.is_empty() {
Ok(WriterState::Done)
} else {
self.flags.insert(Flags::EOF);
self.written = bytes.len() as u64;
self.buffer.write(bytes.as_ref())?;
if let Some(ref mut stream) = self.stream {
self.flags.insert(Flags::RESERVED);
@ -183,8 +182,6 @@ impl<H: 'static> Writer for H2Writer<H> {
}
fn write(&mut self, payload: &Binary) -> io::Result<WriterState> {
self.written = payload.len() as u64;
if !self.flags.contains(Flags::DISCONNECTED) {
if self.flags.contains(Flags::STARTED) {
// TODO: add warning, write after EOF

View File

@ -8,8 +8,10 @@ const DEC_DIGITS_LUT: &[u8] = b"0001020304050607080910111213141516171819\
6061626364656667686970717273747576777879\
8081828384858687888990919293949596979899";
pub(crate) const STATUS_LINE_BUF_SIZE: usize = 13;
pub(crate) fn write_status_line(version: Version, mut n: u16, bytes: &mut BytesMut) {
let mut buf: [u8; 13] = [
let mut buf: [u8; STATUS_LINE_BUF_SIZE] = [
b'H', b'T', b'T', b'P', b'/', b'1', b'.', b'1', b' ', b' ', b' ', b' ', b' ',
];
match version {

813
src/server/http.rs Normal file
View File

@ -0,0 +1,813 @@
use std::marker::PhantomData;
use std::rc::Rc;
use std::sync::Arc;
use std::{io, mem, net, time};
use actix::{Actor, Addr, Arbiter, AsyncContext, Context, Handler, System};
use futures::{Future, Stream};
use net2::{TcpBuilder, TcpStreamExt};
use num_cpus;
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_tcp::TcpStream;
#[cfg(feature = "tls")]
use native_tls::TlsAcceptor;
#[cfg(feature = "alpn")]
use openssl::ssl::SslAcceptorBuilder;
#[cfg(feature = "rust-tls")]
use rustls::ServerConfig;
use super::channel::{HttpChannel, WrapperStream};
use super::server::{Connections, Server, Service, ServiceHandler};
use super::settings::{ServerSettings, WorkerSettings};
use super::worker::{Conn, Socket};
use super::{
AcceptorService, HttpHandler, IntoAsyncIo, IntoHttpHandler, IoStream, KeepAlive,
Token,
};
/// An HTTP Server
///
/// By default it serves HTTP2 when HTTPs is enabled,
/// in order to change it, use `ServerFlags` that can be provided
/// to acceptor service.
pub struct HttpServer<H>
where
H: IntoHttpHandler + 'static,
{
factory: Arc<Fn() -> Vec<H> + Send + Sync>,
host: Option<String>,
keep_alive: KeepAlive,
backlog: i32,
threads: usize,
exit: bool,
shutdown_timeout: u16,
no_http2: bool,
no_signals: bool,
maxconn: usize,
maxconnrate: usize,
sockets: Vec<Socket>,
handlers: Vec<Box<IoStreamHandler<H::Handler, net::TcpStream>>>,
}
impl<H> HttpServer<H>
where
H: IntoHttpHandler + 'static,
{
/// Create new http server with application factory
pub fn new<F, U>(factory: F) -> Self
where
F: Fn() -> U + Sync + Send + 'static,
U: IntoIterator<Item = H> + 'static,
{
let f = move || (factory)().into_iter().collect();
HttpServer {
threads: num_cpus::get(),
factory: Arc::new(f),
host: None,
backlog: 2048,
keep_alive: KeepAlive::Os,
shutdown_timeout: 30,
exit: false,
no_http2: false,
no_signals: false,
maxconn: 102_400,
maxconnrate: 256,
// settings: None,
sockets: Vec::new(),
handlers: Vec::new(),
}
}
/// Set number of workers to start.
///
/// By default http server uses number of available logical cpu as threads
/// count.
pub fn workers(mut self, num: usize) -> Self {
self.threads = num;
self
}
/// Set the maximum number of pending connections.
///
/// This refers to the number of clients that can be waiting to be served.
/// Exceeding this number results in the client getting an error when
/// attempting to connect. It should only affect servers under significant
/// load.
///
/// Generally set in the 64-2048 range. Default value is 2048.
///
/// This method should be called before `bind()` method call.
pub fn backlog(mut self, num: i32) -> Self {
self.backlog = num;
self
}
/// Sets the maximum per-worker number of concurrent connections.
///
/// All socket listeners will stop accepting connections when this limit is reached
/// for each worker.
///
/// By default max connections is set to a 100k.
pub fn maxconn(mut self, num: usize) -> Self {
self.maxconn = num;
self
}
/// Sets the maximum per-worker concurrent connection establish process.
///
/// All listeners will stop accepting connections when this limit is reached. It
/// can be used to limit the global SSL CPU usage.
///
/// By default max connections is set to a 256.
pub fn maxconnrate(mut self, num: usize) -> Self {
self.maxconnrate = num;
self
}
/// Set server keep-alive setting.
///
/// By default keep alive is set to a `Os`.
pub fn keep_alive<T: Into<KeepAlive>>(mut self, val: T) -> Self {
self.keep_alive = val.into();
self
}
/// Set server host name.
///
/// Host name is used by application router aa a hostname for url
/// generation. Check [ConnectionInfo](./dev/struct.ConnectionInfo.
/// html#method.host) documentation for more information.
pub fn server_hostname(mut self, val: String) -> Self {
self.host = Some(val);
self
}
/// Stop actix system.
///
/// `SystemExit` message stops currently running system.
pub fn system_exit(mut self) -> Self {
self.exit = true;
self
}
/// Disable signal handling
pub fn disable_signals(mut self) -> Self {
self.no_signals = true;
self
}
/// Timeout for graceful workers shutdown.
///
/// After receiving a stop signal, workers have this much time to finish
/// serving requests. Workers still alive after the timeout are force
/// dropped.
///
/// By default shutdown timeout sets to 30 seconds.
pub fn shutdown_timeout(mut self, sec: u16) -> Self {
self.shutdown_timeout = sec;
self
}
/// Disable `HTTP/2` support
// #[doc(hidden)]
// #[deprecated(
// since = "0.7.4",
// note = "please use acceptor service with proper ServerFlags parama"
// )]
pub fn no_http2(mut self) -> Self {
self.no_http2 = true;
self
}
/// Get addresses of bound sockets.
pub fn addrs(&self) -> Vec<net::SocketAddr> {
self.sockets.iter().map(|s| s.addr).collect()
}
/// Get addresses of bound sockets and the scheme for it.
///
/// This is useful when the server is bound from different sources
/// with some sockets listening on http and some listening on https
/// and the user should be presented with an enumeration of which
/// socket requires which protocol.
pub fn addrs_with_scheme(&self) -> Vec<(net::SocketAddr, &str)> {
self.handlers
.iter()
.map(|s| (s.addr(), s.scheme()))
.collect()
}
/// Use listener for accepting incoming connection requests
///
/// HttpServer does not change any configuration for TcpListener,
/// it needs to be configured before passing it to listen() method.
pub fn listen(mut self, lst: net::TcpListener) -> Self {
let token = Token(self.handlers.len());
let addr = lst.local_addr().unwrap();
self.handlers
.push(Box::new(SimpleHandler::new(lst.local_addr().unwrap())));
self.sockets.push(Socket { lst, addr, token });
self
}
#[doc(hidden)]
/// Use listener for accepting incoming connection requests
pub fn listen_with<A>(mut self, lst: net::TcpListener, acceptor: A) -> Self
where
A: AcceptorService<TcpStream> + Send + 'static,
{
let token = Token(self.handlers.len());
let addr = lst.local_addr().unwrap();
self.handlers.push(Box::new(StreamHandler::new(
lst.local_addr().unwrap(),
acceptor,
)));
self.sockets.push(Socket { lst, addr, token });
self
}
#[cfg(feature = "tls")]
/// Use listener for accepting incoming tls connection requests
///
/// HttpServer does not change any configuration for TcpListener,
/// it needs to be configured before passing it to listen() method.
pub fn listen_tls(self, lst: net::TcpListener, acceptor: TlsAcceptor) -> Self {
use super::NativeTlsAcceptor;
self.listen_with(lst, NativeTlsAcceptor::new(acceptor))
}
#[cfg(feature = "alpn")]
/// Use listener for accepting incoming tls connection requests
///
/// This method sets alpn protocols to "h2" and "http/1.1"
pub fn listen_ssl(
self, lst: net::TcpListener, builder: SslAcceptorBuilder,
) -> io::Result<Self> {
use super::{OpensslAcceptor, ServerFlags};
// alpn support
let flags = if self.no_http2 {
ServerFlags::HTTP1
} else {
ServerFlags::HTTP1 | ServerFlags::HTTP2
};
Ok(self.listen_with(lst, OpensslAcceptor::with_flags(builder, flags)?))
}
#[cfg(feature = "rust-tls")]
/// Use listener for accepting incoming tls connection requests
///
/// This method sets alpn protocols to "h2" and "http/1.1"
pub fn listen_rustls(self, lst: net::TcpListener, builder: ServerConfig) -> Self {
use super::{RustlsAcceptor, ServerFlags};
// alpn support
let flags = if self.no_http2 {
ServerFlags::HTTP1
} else {
ServerFlags::HTTP1 | ServerFlags::HTTP2
};
self.listen_with(lst, RustlsAcceptor::with_flags(builder, flags))
}
/// The socket address to bind
///
/// To bind multiple addresses this method can be called multiple times.
pub fn bind<S: net::ToSocketAddrs>(mut self, addr: S) -> io::Result<Self> {
let sockets = self.bind2(addr)?;
for lst in sockets {
let token = Token(self.handlers.len());
let addr = lst.local_addr().unwrap();
self.handlers
.push(Box::new(SimpleHandler::new(lst.local_addr().unwrap())));
self.sockets.push(Socket { lst, addr, token })
}
Ok(self)
}
/// Start listening for incoming connections with supplied acceptor.
#[doc(hidden)]
#[cfg_attr(feature = "cargo-clippy", allow(needless_pass_by_value))]
pub fn bind_with<S, A>(mut self, addr: S, acceptor: A) -> io::Result<Self>
where
S: net::ToSocketAddrs,
A: AcceptorService<TcpStream> + Send + 'static,
{
let sockets = self.bind2(addr)?;
for lst in sockets {
let token = Token(self.handlers.len());
let addr = lst.local_addr().unwrap();
self.handlers.push(Box::new(StreamHandler::new(
lst.local_addr().unwrap(),
acceptor.clone(),
)));
self.sockets.push(Socket { lst, addr, token })
}
Ok(self)
}
fn bind2<S: net::ToSocketAddrs>(
&self, addr: S,
) -> io::Result<Vec<net::TcpListener>> {
let mut err = None;
let mut succ = false;
let mut sockets = Vec::new();
for addr in addr.to_socket_addrs()? {
match create_tcp_listener(addr, self.backlog) {
Ok(lst) => {
succ = true;
sockets.push(lst);
}
Err(e) => err = Some(e),
}
}
if !succ {
if let Some(e) = err.take() {
Err(e)
} else {
Err(io::Error::new(
io::ErrorKind::Other,
"Can not bind to address.",
))
}
} else {
Ok(sockets)
}
}
#[cfg(feature = "tls")]
/// The ssl socket address to bind
///
/// To bind multiple addresses this method can be called multiple times.
pub fn bind_tls<S: net::ToSocketAddrs>(
self, addr: S, acceptor: TlsAcceptor,
) -> io::Result<Self> {
use super::NativeTlsAcceptor;
self.bind_with(addr, NativeTlsAcceptor::new(acceptor))
}
#[cfg(feature = "alpn")]
/// Start listening for incoming tls connections.
///
/// This method sets alpn protocols to "h2" and "http/1.1"
pub fn bind_ssl<S>(self, addr: S, builder: SslAcceptorBuilder) -> io::Result<Self>
where
S: net::ToSocketAddrs,
{
use super::{OpensslAcceptor, ServerFlags};
// alpn support
let flags = if !self.no_http2 {
ServerFlags::HTTP1
} else {
ServerFlags::HTTP1 | ServerFlags::HTTP2
};
self.bind_with(addr, OpensslAcceptor::with_flags(builder, flags)?)
}
#[cfg(feature = "rust-tls")]
/// Start listening for incoming tls connections.
///
/// This method sets alpn protocols to "h2" and "http/1.1"
pub fn bind_rustls<S: net::ToSocketAddrs>(
self, addr: S, builder: ServerConfig,
) -> io::Result<Self> {
use super::{RustlsAcceptor, ServerFlags};
// alpn support
let flags = if !self.no_http2 {
ServerFlags::HTTP1
} else {
ServerFlags::HTTP1 | ServerFlags::HTTP2
};
self.bind_with(addr, RustlsAcceptor::with_flags(builder, flags))
}
}
impl<H: IntoHttpHandler> Into<(Box<Service>, Vec<(Token, net::TcpListener)>)>
for HttpServer<H>
{
fn into(mut self) -> (Box<Service>, Vec<(Token, net::TcpListener)>) {
let sockets: Vec<_> = mem::replace(&mut self.sockets, Vec::new())
.into_iter()
.map(|item| (item.token, item.lst))
.collect();
(
Box::new(HttpService {
factory: self.factory,
host: self.host,
keep_alive: self.keep_alive,
handlers: self.handlers,
}),
sockets,
)
}
}
struct HttpService<H: IntoHttpHandler> {
factory: Arc<Fn() -> Vec<H> + Send + Sync>,
host: Option<String>,
keep_alive: KeepAlive,
handlers: Vec<Box<IoStreamHandler<H::Handler, net::TcpStream>>>,
}
impl<H: IntoHttpHandler + 'static> Service for HttpService<H> {
fn clone(&self) -> Box<Service> {
Box::new(HttpService {
factory: self.factory.clone(),
host: self.host.clone(),
keep_alive: self.keep_alive,
handlers: self.handlers.iter().map(|v| v.clone()).collect(),
})
}
fn create(&self, conns: Connections) -> Box<ServiceHandler> {
let addr = self.handlers[0].addr();
let s = ServerSettings::new(Some(addr), &self.host, false);
let apps: Vec<_> = (*self.factory)()
.into_iter()
.map(|h| h.into_handler())
.collect();
let handlers = self.handlers.iter().map(|h| h.clone()).collect();
Box::new(HttpServiceHandler::new(
apps,
handlers,
self.keep_alive,
s,
conns,
))
}
}
impl<H: IntoHttpHandler> HttpServer<H> {
/// Start listening for incoming connections.
///
/// This method starts number of http workers in separate threads.
/// For each address this method starts separate thread which does
/// `accept()` in a loop.
///
/// This methods panics if no socket addresses get bound.
///
/// This method requires to run within properly configured `Actix` system.
///
/// ```rust
/// extern crate actix_web;
/// use actix_web::{actix, server, App, HttpResponse};
///
/// fn main() {
/// let sys = actix::System::new("example"); // <- create Actix system
///
/// server::new(|| App::new().resource("/", |r| r.h(|_: &_| HttpResponse::Ok())))
/// .bind("127.0.0.1:0")
/// .expect("Can not bind to 127.0.0.1:0")
/// .start();
/// # actix::System::current().stop();
/// sys.run(); // <- Run actix system, this method starts all async processes
/// }
/// ```
pub fn start(self) -> Addr<Server> {
let mut srv = Server::new()
.workers(self.threads)
.maxconn(self.maxconn)
.maxconnrate(self.maxconnrate)
.shutdown_timeout(self.shutdown_timeout);
srv = if self.exit { srv.system_exit() } else { srv };
srv = if self.no_signals {
srv.disable_signals()
} else {
srv
};
srv.service(self).start()
}
/// Spawn new thread and start listening for incoming connections.
///
/// This method spawns new thread and starts new actix system. Other than
/// that it is similar to `start()` method. This method blocks.
///
/// This methods panics if no socket addresses get bound.
///
/// ```rust,ignore
/// # extern crate futures;
/// # extern crate actix_web;
/// # use futures::Future;
/// use actix_web::*;
///
/// fn main() {
/// HttpServer::new(|| App::new().resource("/", |r| r.h(|_| HttpResponse::Ok())))
/// .bind("127.0.0.1:0")
/// .expect("Can not bind to 127.0.0.1:0")
/// .run();
/// }
/// ```
pub fn run(self) {
let sys = System::new("http-server");
self.start();
sys.run();
}
}
impl<H: IntoHttpHandler> HttpServer<H> {
/// Start listening for incoming connections from a stream.
///
/// This method uses only one thread for handling incoming connections.
pub fn start_incoming<T, S>(self, stream: S, secure: bool)
where
S: Stream<Item = T, Error = io::Error> + Send + 'static,
T: AsyncRead + AsyncWrite + Send + 'static,
{
// set server settings
let addr: net::SocketAddr = "127.0.0.1:8080".parse().unwrap();
let srv_settings = ServerSettings::new(Some(addr), &self.host, secure);
let apps: Vec<_> = (*self.factory)()
.into_iter()
.map(|h| h.into_handler())
.collect();
let settings = WorkerSettings::create(
apps,
self.keep_alive,
srv_settings,
Connections::default(),
);
// start server
HttpIncoming::create(move |ctx| {
ctx.add_message_stream(stream.map_err(|_| ()).map(move |t| Conn {
io: WrapperStream::new(t),
handler: Token::new(0),
token: Token::new(0),
peer: None,
}));
HttpIncoming { settings }
});
}
}
struct HttpIncoming<H: HttpHandler> {
settings: Rc<WorkerSettings<H>>,
}
impl<H> Actor for HttpIncoming<H>
where
H: HttpHandler,
{
type Context = Context<Self>;
}
impl<T, H> Handler<Conn<T>> for HttpIncoming<H>
where
T: IoStream,
H: HttpHandler,
{
type Result = ();
fn handle(&mut self, msg: Conn<T>, _: &mut Context<Self>) -> Self::Result {
Arbiter::spawn(HttpChannel::new(
Rc::clone(&self.settings),
msg.io,
msg.peer,
));
}
}
struct HttpServiceHandler<H>
where
H: HttpHandler + 'static,
{
settings: Rc<WorkerSettings<H>>,
handlers: Vec<Box<IoStreamHandler<H, net::TcpStream>>>,
tcp_ka: Option<time::Duration>,
}
impl<H: HttpHandler + 'static> HttpServiceHandler<H> {
fn new(
apps: Vec<H>, handlers: Vec<Box<IoStreamHandler<H, net::TcpStream>>>,
keep_alive: KeepAlive, settings: ServerSettings, conns: Connections,
) -> HttpServiceHandler<H> {
let tcp_ka = if let KeepAlive::Tcp(val) = keep_alive {
Some(time::Duration::new(val as u64, 0))
} else {
None
};
let settings = WorkerSettings::create(apps, keep_alive, settings, conns);
HttpServiceHandler {
handlers,
tcp_ka,
settings,
}
}
}
impl<H> ServiceHandler for HttpServiceHandler<H>
where
H: HttpHandler + 'static,
{
fn handle(
&mut self, token: Token, io: net::TcpStream, peer: Option<net::SocketAddr>,
) {
if self.tcp_ka.is_some() && io.set_keepalive(self.tcp_ka).is_err() {
error!("Can not set socket keep-alive option");
}
self.handlers[token.0].handle(Rc::clone(&self.settings), io, peer);
}
fn shutdown(&self, force: bool) {
if force {
self.settings
.head()
.traverse(|ch: &mut HttpChannel<TcpStream, H>| ch.shutdown());
}
}
}
struct SimpleHandler<Io> {
addr: net::SocketAddr,
io: PhantomData<Io>,
}
impl<Io: IntoAsyncIo> Clone for SimpleHandler<Io> {
fn clone(&self) -> Self {
SimpleHandler {
addr: self.addr,
io: PhantomData,
}
}
}
impl<Io: IntoAsyncIo> SimpleHandler<Io> {
fn new(addr: net::SocketAddr) -> Self {
SimpleHandler {
addr,
io: PhantomData,
}
}
}
impl<H, Io> IoStreamHandler<H, Io> for SimpleHandler<Io>
where
H: HttpHandler,
Io: IntoAsyncIo + Send + 'static,
Io::Io: IoStream,
{
fn addr(&self) -> net::SocketAddr {
self.addr
}
fn clone(&self) -> Box<IoStreamHandler<H, Io>> {
Box::new(Clone::clone(self))
}
fn scheme(&self) -> &'static str {
"http"
}
fn handle(&self, h: Rc<WorkerSettings<H>>, io: Io, peer: Option<net::SocketAddr>) {
let mut io = match io.into_async_io() {
Ok(io) => io,
Err(err) => {
trace!("Failed to create async io: {}", err);
return;
}
};
let _ = io.set_nodelay(true);
Arbiter::spawn(HttpChannel::new(h, io, peer));
}
}
struct StreamHandler<A, Io> {
acceptor: A,
addr: net::SocketAddr,
io: PhantomData<Io>,
}
impl<Io: IntoAsyncIo, A: AcceptorService<Io::Io>> StreamHandler<A, Io> {
fn new(addr: net::SocketAddr, acceptor: A) -> Self {
StreamHandler {
addr,
acceptor,
io: PhantomData,
}
}
}
impl<Io: IntoAsyncIo, A: AcceptorService<Io::Io>> Clone for StreamHandler<A, Io> {
fn clone(&self) -> Self {
StreamHandler {
addr: self.addr,
acceptor: self.acceptor.clone(),
io: PhantomData,
}
}
}
impl<H, Io, A> IoStreamHandler<H, Io> for StreamHandler<A, Io>
where
H: HttpHandler,
Io: IntoAsyncIo + Send + 'static,
Io::Io: IoStream,
A: AcceptorService<Io::Io> + Send + 'static,
{
fn addr(&self) -> net::SocketAddr {
self.addr
}
fn clone(&self) -> Box<IoStreamHandler<H, Io>> {
Box::new(Clone::clone(self))
}
fn scheme(&self) -> &'static str {
self.acceptor.scheme()
}
fn handle(&self, h: Rc<WorkerSettings<H>>, io: Io, peer: Option<net::SocketAddr>) {
let mut io = match io.into_async_io() {
Ok(io) => io,
Err(err) => {
trace!("Failed to create async io: {}", err);
return;
}
};
let _ = io.set_nodelay(true);
let rate = h.connection_rate();
Arbiter::spawn(self.acceptor.accept(io).then(move |res| {
drop(rate);
match res {
Ok(io) => Arbiter::spawn(HttpChannel::new(h, io, peer)),
Err(err) => trace!("Can not establish connection: {}", err),
}
Ok(())
}))
}
}
impl<H, Io: 'static> IoStreamHandler<H, Io> for Box<IoStreamHandler<H, Io>>
where
H: HttpHandler,
Io: IntoAsyncIo,
{
fn addr(&self) -> net::SocketAddr {
self.as_ref().addr()
}
fn clone(&self) -> Box<IoStreamHandler<H, Io>> {
self.as_ref().clone()
}
fn scheme(&self) -> &'static str {
self.as_ref().scheme()
}
fn handle(&self, h: Rc<WorkerSettings<H>>, io: Io, peer: Option<net::SocketAddr>) {
self.as_ref().handle(h, io, peer)
}
}
trait IoStreamHandler<H, Io>: Send
where
H: HttpHandler,
{
fn clone(&self) -> Box<IoStreamHandler<H, Io>>;
fn addr(&self) -> net::SocketAddr;
fn scheme(&self) -> &'static str;
fn handle(&self, h: Rc<WorkerSettings<H>>, io: Io, peer: Option<net::SocketAddr>);
}
fn create_tcp_listener(
addr: net::SocketAddr, backlog: i32,
) -> io::Result<net::TcpListener> {
let builder = match addr {
net::SocketAddr::V4(_) => TcpBuilder::new_v4()?,
net::SocketAddr::V6(_) => TcpBuilder::new_v6()?,
};
builder.reuse_address(true)?;
builder.bind(addr)?;
Ok(builder.listen(backlog)?)
}

View File

@ -5,7 +5,7 @@ use brotli2::write::BrotliDecoder;
use bytes::{Bytes, BytesMut};
use error::PayloadError;
#[cfg(feature = "flate2")]
use flate2::write::{DeflateDecoder, GzDecoder};
use flate2::write::{GzDecoder, ZlibDecoder};
use header::ContentEncoding;
use http::header::{HeaderMap, CONTENT_ENCODING};
use payload::{PayloadSender, PayloadStatus, PayloadWriter};
@ -139,7 +139,7 @@ impl PayloadWriter for EncodedPayload {
pub(crate) enum Decoder {
#[cfg(feature = "flate2")]
Deflate(Box<DeflateDecoder<Writer>>),
Deflate(Box<ZlibDecoder<Writer>>),
#[cfg(feature = "flate2")]
Gzip(Box<GzDecoder<Writer>>),
#[cfg(feature = "brotli")]
@ -186,7 +186,7 @@ impl PayloadStream {
}
#[cfg(feature = "flate2")]
ContentEncoding::Deflate => {
Decoder::Deflate(Box::new(DeflateDecoder::new(Writer::new())))
Decoder::Deflate(Box::new(ZlibDecoder::new(Writer::new())))
}
#[cfg(feature = "flate2")]
ContentEncoding::Gzip => {

View File

@ -35,6 +35,7 @@ pub(crate) struct InnerRequest {
pub(crate) info: RefCell<ConnectionInfo>,
pub(crate) payload: RefCell<Option<Payload>>,
pub(crate) settings: ServerSettings,
pub(crate) stream_extensions: Option<Rc<Extensions>>,
pool: &'static RequestPool,
}
@ -82,6 +83,7 @@ impl Request {
info: RefCell::new(ConnectionInfo::default()),
payload: RefCell::new(None),
extensions: RefCell::new(Extensions::new()),
stream_extensions: None,
}),
}
}
@ -189,6 +191,12 @@ impl Request {
}
}
/// Io stream extensions
#[inline]
pub fn stream_extensions(&self) -> Option<&Extensions> {
self.inner().stream_extensions.as_ref().map(|e| e.as_ref())
}
/// Server settings
#[inline]
pub fn server_settings(&self) -> &ServerSettings {

View File

@ -1,10 +1,119 @@
//! Http server
//! Http server module
//!
//! The module contains everything necessary to setup
//! HTTP server.
//!
//! In order to start HTTP server, first you need to create and configure it
//! using factory that can be supplied to [new](fn.new.html).
//!
//! ## Factory
//!
//! Factory is a function that returns Application, describing how
//! to serve incoming HTTP requests.
//!
//! As the server uses worker pool, the factory function is restricted to trait bounds
//! `Sync + Send + 'static` so that each worker would be able to accept Application
//! without a need for synchronization.
//!
//! If you wish to share part of state among all workers you should
//! wrap it in `Arc` and potentially synchronization primitive like
//! [RwLock](https://doc.rust-lang.org/std/sync/struct.RwLock.html)
//! If the wrapped type is not thread safe.
//!
//! Note though that locking is not advisable for asynchronous programming
//! and you should minimize all locks in your request handlers
//!
//! ## HTTPS Support
//!
//! Actix-web provides support for major crates that provides TLS.
//! Each TLS implementation is provided with [AcceptorService](trait.AcceptorService.html)
//! that describes how HTTP Server accepts connections.
//!
//! For `bind` and `listen` there are corresponding `bind_with` and `listen_with` that accepts
//! these services.
//!
//! By default, acceptor would work with both HTTP2 and HTTP1 protocols.
//! But it can be controlled using [ServerFlags](struct.ServerFlags.html) which
//! can be supplied when creating `AcceptorService`.
//!
//! **NOTE:** `native-tls` doesn't support `HTTP2` yet
//!
//! ## Signal handling and shutdown
//!
//! By default HTTP Server listens for system signals
//! and, gracefully shuts down at most after 30 seconds.
//!
//! Both signal handling and shutdown timeout can be controlled
//! using corresponding methods.
//!
//! If worker, for some reason, unable to shut down within timeout
//! it is forcibly dropped.
//!
//! ## Example
//!
//! ```rust,ignore
//!extern crate actix;
//!extern crate actix_web;
//!extern crate rustls;
//!
//!use actix_web::{http, middleware, server, App, Error, HttpRequest, HttpResponse, Responder};
//!use std::io::BufReader;
//!use rustls::internal::pemfile::{certs, rsa_private_keys};
//!use rustls::{NoClientAuth, ServerConfig};
//!
//!fn index(req: &HttpRequest) -> Result<HttpResponse, Error> {
//! Ok(HttpResponse::Ok().content_type("text/plain").body("Welcome!"))
//!}
//!
//!fn load_ssl() -> ServerConfig {
//! use std::io::BufReader;
//!
//! const CERT: &'static [u8] = include_bytes!("../cert.pem");
//! const KEY: &'static [u8] = include_bytes!("../key.pem");
//!
//! let mut cert = BufReader::new(CERT);
//! let mut key = BufReader::new(KEY);
//!
//! let mut config = ServerConfig::new(NoClientAuth::new());
//! let cert_chain = certs(&mut cert).unwrap();
//! let mut keys = rsa_private_keys(&mut key).unwrap();
//! config.set_single_cert(cert_chain, keys.remove(0)).unwrap();
//!
//! config
//!}
//!
//!fn main() {
//! let sys = actix::System::new("http-server");
//! // load ssl keys
//! let config = load_ssl();
//!
//! // Create acceptor service for only HTTP1 protocol
//! // You can use ::new(config) to leave defaults
//! let acceptor = server::RustlsAcceptor::with_flags(config, actix_web::server::ServerFlags::HTTP1);
//!
//! // create and start server at once
//! server::new(|| {
//! App::new()
//! // register simple handler, handle all methods
//! .resource("/index.html", |r| r.f(index))
//! }))
//! }).bind_with("127.0.0.1:8080", acceptor)
//! .unwrap()
//! .start();
//!
//! println!("Started http server: 127.0.0.1:8080");
//! //Run system so that server would start accepting connections
//! let _ = sys.run();
//!}
//! ```
use std::net::Shutdown;
use std::{io, time};
use std::rc::Rc;
use std::{io, net, time};
use bytes::{BufMut, BytesMut};
use futures::{Async, Poll};
use futures::{Async, Future, Poll};
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_reactor::Handle;
use tokio_tcp::TcpStream;
pub(crate) mod accept;
@ -16,23 +125,35 @@ mod h1writer;
mod h2;
mod h2writer;
pub(crate) mod helpers;
mod http;
pub(crate) mod input;
pub(crate) mod message;
pub(crate) mod output;
mod server;
pub(crate) mod settings;
mod srv;
mod ssl;
mod worker;
use actix::Message;
pub use self::message::Request;
pub use self::http::HttpServer;
#[doc(hidden)]
pub use self::server::{
ConnectionRateTag, ConnectionTag, Connections, Server, Service, ServiceHandler,
};
pub use self::settings::ServerSettings;
pub use self::srv::HttpServer;
#[doc(hidden)]
pub use self::ssl::*;
#[doc(hidden)]
pub use self::helpers::write_content_length;
use actix::Message;
use body::Binary;
use error::Error;
use extensions::Extensions;
use header::ContentEncoding;
use httpresponse::HttpResponse;
@ -72,6 +193,17 @@ where
HttpServer::new(factory)
}
#[doc(hidden)]
bitflags! {
///Flags that can be used to configure HTTP Server.
pub struct ServerFlags: u8 {
///Use HTTP1 protocol
const HTTP1 = 0b0000_0001;
///Use HTTP2 protocol
const HTTP2 = 0b0000_0010;
}
}
#[derive(Debug, PartialEq, Clone, Copy)]
/// Server keep-alive setting
pub enum KeepAlive {
@ -124,6 +256,17 @@ impl Message for StopServer {
type Result = Result<(), ()>;
}
/// Socket id token
#[doc(hidden)]
#[derive(Clone, Copy)]
pub struct Token(usize);
impl Token {
pub(crate) fn new(val: usize) -> Token {
Token(val)
}
}
/// Low level http request handler
#[allow(unused_variables)]
pub trait HttpHandler: 'static {
@ -179,6 +322,35 @@ impl<T: HttpHandler> IntoHttpHandler for T {
}
}
pub(crate) trait IntoAsyncIo {
type Io: AsyncRead + AsyncWrite;
fn into_async_io(self) -> Result<Self::Io, io::Error>;
}
impl IntoAsyncIo for net::TcpStream {
type Io = TcpStream;
fn into_async_io(self) -> Result<Self::Io, io::Error> {
TcpStream::from_std(self, &Handle::default())
}
}
#[doc(hidden)]
/// Trait implemented by types that could accept incomming socket connections.
pub trait AcceptorService<Io: AsyncRead + AsyncWrite>: Clone {
/// Established connection type
type Accepted: IoStream;
/// Future describes async accept process.
type Future: Future<Item = Self::Accepted, Error = io::Error> + 'static;
/// Establish new connection
fn accept(&self, io: Io) -> Self::Future;
/// Scheme
fn scheme(&self) -> &'static str;
}
#[doc(hidden)]
#[derive(Debug)]
pub enum WriterState {
@ -218,7 +390,7 @@ pub trait IoStream: AsyncRead + AsyncWrite + 'static {
fn set_linger(&mut self, dur: Option<time::Duration>) -> io::Result<()>;
fn read_available(&mut self, buf: &mut BytesMut) -> Poll<bool, io::Error> {
fn read_available(&mut self, buf: &mut BytesMut) -> Poll<(bool, bool), io::Error> {
let mut read_some = false;
loop {
if buf.remaining_mut() < LW_BUFFER_SIZE {
@ -228,7 +400,7 @@ pub trait IoStream: AsyncRead + AsyncWrite + 'static {
match self.read(buf.bytes_mut()) {
Ok(n) => {
if n == 0 {
return Ok(Async::Ready(!read_some));
return Ok(Async::Ready((read_some, true)));
} else {
read_some = true;
buf.advance_mut(n);
@ -237,7 +409,7 @@ pub trait IoStream: AsyncRead + AsyncWrite + 'static {
Err(e) => {
return if e.kind() == io::ErrorKind::WouldBlock {
if read_some {
Ok(Async::Ready(false))
Ok(Async::Ready((read_some, false)))
} else {
Ok(Async::NotReady)
}
@ -249,6 +421,29 @@ pub trait IoStream: AsyncRead + AsyncWrite + 'static {
}
}
}
/// Extra io stream extensions
fn extensions(&self) -> Option<Rc<Extensions>> {
None
}
}
#[cfg(all(unix, feature = "uds"))]
impl IoStream for ::tokio_uds::UnixStream {
#[inline]
fn shutdown(&mut self, how: Shutdown) -> io::Result<()> {
::tokio_uds::UnixStream::shutdown(self, how)
}
#[inline]
fn set_nodelay(&mut self, _nodelay: bool) -> io::Result<()> {
Ok(())
}
#[inline]
fn set_linger(&mut self, _dur: Option<time::Duration>) -> io::Result<()> {
Ok(())
}
}
impl IoStream for TcpStream {
@ -267,90 +462,3 @@ impl IoStream for TcpStream {
TcpStream::set_linger(self, dur)
}
}
#[cfg(feature = "alpn")]
use tokio_openssl::SslStream;
#[cfg(feature = "alpn")]
impl IoStream for SslStream<TcpStream> {
#[inline]
fn shutdown(&mut self, _how: Shutdown) -> io::Result<()> {
let _ = self.get_mut().shutdown();
Ok(())
}
#[inline]
fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()> {
self.get_mut().get_mut().set_nodelay(nodelay)
}
#[inline]
fn set_linger(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
self.get_mut().get_mut().set_linger(dur)
}
}
#[cfg(feature = "tls")]
use tokio_tls::TlsStream;
#[cfg(feature = "tls")]
impl IoStream for TlsStream<TcpStream> {
#[inline]
fn shutdown(&mut self, _how: Shutdown) -> io::Result<()> {
let _ = self.get_mut().shutdown();
Ok(())
}
#[inline]
fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()> {
self.get_mut().get_mut().set_nodelay(nodelay)
}
#[inline]
fn set_linger(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
self.get_mut().get_mut().set_linger(dur)
}
}
#[cfg(feature = "rust-tls")]
use rustls::{ClientSession, ServerSession};
#[cfg(feature = "rust-tls")]
use tokio_rustls::TlsStream as RustlsStream;
#[cfg(feature = "rust-tls")]
impl IoStream for RustlsStream<TcpStream, ClientSession> {
#[inline]
fn shutdown(&mut self, _how: Shutdown) -> io::Result<()> {
let _ = <Self as AsyncWrite>::shutdown(self);
Ok(())
}
#[inline]
fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()> {
self.get_mut().0.set_nodelay(nodelay)
}
#[inline]
fn set_linger(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
self.get_mut().0.set_linger(dur)
}
}
#[cfg(feature = "rust-tls")]
impl IoStream for RustlsStream<TcpStream, ServerSession> {
#[inline]
fn shutdown(&mut self, _how: Shutdown) -> io::Result<()> {
let _ = <Self as AsyncWrite>::shutdown(self);
Ok(())
}
#[inline]
fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()> {
self.get_mut().0.set_nodelay(nodelay)
}
#[inline]
fn set_linger(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
self.get_mut().0.set_linger(dur)
}
}

View File

@ -7,7 +7,7 @@ use std::{cmp, fmt, io, mem};
use brotli2::write::BrotliEncoder;
use bytes::BytesMut;
#[cfg(feature = "flate2")]
use flate2::write::{DeflateEncoder, GzEncoder};
use flate2::write::{GzEncoder, ZlibEncoder};
#[cfg(feature = "flate2")]
use flate2::Compression;
use http::header::{ACCEPT_ENCODING, CONTENT_LENGTH};
@ -210,7 +210,7 @@ impl Output {
let mut enc = match encoding {
#[cfg(feature = "flate2")]
ContentEncoding::Deflate => ContentEncoder::Deflate(
DeflateEncoder::new(transfer, Compression::fast()),
ZlibEncoder::new(transfer, Compression::fast()),
),
#[cfg(feature = "flate2")]
ContentEncoding::Gzip => ContentEncoder::Gzip(
@ -273,10 +273,9 @@ impl Output {
let enc = match encoding {
#[cfg(feature = "flate2")]
ContentEncoding::Deflate => ContentEncoder::Deflate(DeflateEncoder::new(
transfer,
Compression::fast(),
)),
ContentEncoding::Deflate => {
ContentEncoder::Deflate(ZlibEncoder::new(transfer, Compression::fast()))
}
#[cfg(feature = "flate2")]
ContentEncoding::Gzip => {
ContentEncoder::Gzip(GzEncoder::new(transfer, Compression::fast()))
@ -354,7 +353,7 @@ impl Output {
pub(crate) enum ContentEncoder {
#[cfg(feature = "flate2")]
Deflate(DeflateEncoder<TransferEncoding>),
Deflate(ZlibEncoder<TransferEncoding>),
#[cfg(feature = "flate2")]
Gzip(GzEncoder<TransferEncoding>),
#[cfg(feature = "brotli")]

528
src/server/server.rs Normal file
View File

@ -0,0 +1,528 @@
use std::sync::{
atomic::{AtomicUsize, Ordering},
Arc,
};
use std::time::Duration;
use std::{mem, net};
use futures::sync::{mpsc, mpsc::unbounded};
use futures::{Future, Sink, Stream};
use num_cpus;
use actix::{
fut, signal, Actor, ActorFuture, Addr, Arbiter, AsyncContext, Context, Handler,
Response, StreamHandler, System, WrapFuture,
};
use super::accept::{AcceptLoop, AcceptNotify, Command};
use super::worker::{Conn, StopWorker, Worker, WorkerClient};
use super::{PauseServer, ResumeServer, StopServer, Token};
#[doc(hidden)]
/// Describes service that could be used
/// with [Server](struct.Server.html)
pub trait Service: Send + 'static {
/// Clone service
fn clone(&self) -> Box<Service>;
/// Create service handler for this service
fn create(&self, conn: Connections) -> Box<ServiceHandler>;
}
impl Service for Box<Service> {
fn clone(&self) -> Box<Service> {
self.as_ref().clone()
}
fn create(&self, conn: Connections) -> Box<ServiceHandler> {
self.as_ref().create(conn)
}
}
#[doc(hidden)]
/// Describes the way serivce handles incoming
/// TCP connections.
pub trait ServiceHandler {
/// Handle incoming stream
fn handle(
&mut self, token: Token, io: net::TcpStream, peer: Option<net::SocketAddr>,
);
/// Shutdown open handlers
fn shutdown(&self, _: bool) {}
}
pub(crate) enum ServerCommand {
WorkerDied(usize),
}
/// Generic server
#[doc(hidden)]
pub struct Server {
threads: usize,
workers: Vec<(usize, Addr<Worker>)>,
services: Vec<Box<Service>>,
sockets: Vec<Vec<(Token, net::TcpListener)>>,
accept: AcceptLoop,
exit: bool,
shutdown_timeout: u16,
signals: Option<Addr<signal::ProcessSignals>>,
no_signals: bool,
maxconn: usize,
maxconnrate: usize,
}
impl Default for Server {
fn default() -> Self {
Self::new()
}
}
impl Server {
/// Create new Server instance
pub fn new() -> Server {
Server {
threads: num_cpus::get(),
workers: Vec::new(),
services: Vec::new(),
sockets: Vec::new(),
accept: AcceptLoop::new(),
exit: false,
shutdown_timeout: 30,
signals: None,
no_signals: false,
maxconn: 102_400,
maxconnrate: 256,
}
}
/// Set number of workers to start.
///
/// By default http server uses number of available logical cpu as threads
/// count.
pub fn workers(mut self, num: usize) -> Self {
self.threads = num;
self
}
/// Sets the maximum per-worker number of concurrent connections.
///
/// All socket listeners will stop accepting connections when this limit is reached
/// for each worker.
///
/// By default max connections is set to a 100k.
pub fn maxconn(mut self, num: usize) -> Self {
self.maxconn = num;
self
}
/// Sets the maximum per-worker concurrent connection establish process.
///
/// All listeners will stop accepting connections when this limit is reached. It
/// can be used to limit the global SSL CPU usage.
///
/// By default max connections is set to a 256.
pub fn maxconnrate(mut self, num: usize) -> Self {
self.maxconnrate = num;
self
}
/// Stop actix system.
///
/// `SystemExit` message stops currently running system.
pub fn system_exit(mut self) -> Self {
self.exit = true;
self
}
#[doc(hidden)]
/// Set alternative address for `ProcessSignals` actor.
pub fn signals(mut self, addr: Addr<signal::ProcessSignals>) -> Self {
self.signals = Some(addr);
self
}
/// Disable signal handling
pub fn disable_signals(mut self) -> Self {
self.no_signals = true;
self
}
/// Timeout for graceful workers shutdown.
///
/// After receiving a stop signal, workers have this much time to finish
/// serving requests. Workers still alive after the timeout are force
/// dropped.
///
/// By default shutdown timeout sets to 30 seconds.
pub fn shutdown_timeout(mut self, sec: u16) -> Self {
self.shutdown_timeout = sec;
self
}
/// Add new service to server
pub fn service<T>(mut self, srv: T) -> Self
where
T: Into<(Box<Service>, Vec<(Token, net::TcpListener)>)>,
{
let (srv, sockets) = srv.into();
self.services.push(srv);
self.sockets.push(sockets);
self
}
/// Spawn new thread and start listening for incoming connections.
///
/// This method spawns new thread and starts new actix system. Other than
/// that it is similar to `start()` method. This method blocks.
///
/// This methods panics if no socket addresses get bound.
///
/// ```rust,ignore
/// # extern crate futures;
/// # extern crate actix_web;
/// # use futures::Future;
/// use actix_web::*;
///
/// fn main() {
/// Server::new().
/// .service(
/// HttpServer::new(|| App::new().resource("/", |r| r.h(|_| HttpResponse::Ok())))
/// .bind("127.0.0.1:0")
/// .expect("Can not bind to 127.0.0.1:0"))
/// .run();
/// }
/// ```
pub fn run(self) {
let sys = System::new("http-server");
self.start();
sys.run();
}
/// Starts Server Actor and returns its address
pub fn start(mut self) -> Addr<Server> {
if self.sockets.is_empty() {
panic!("Service should have at least one bound socket");
} else {
info!("Starting {} http workers", self.threads);
// start workers
let mut workers = Vec::new();
for idx in 0..self.threads {
let (addr, worker) = self.start_worker(idx, self.accept.get_notify());
workers.push(worker);
self.workers.push((idx, addr));
}
// start accept thread
for sock in &self.sockets {
for s in sock.iter() {
info!("Starting server on http://{}", s.1.local_addr().unwrap());
}
}
let rx = self
.accept
.start(mem::replace(&mut self.sockets, Vec::new()), workers);
// start http server actor
let signals = self.subscribe_to_signals();
let addr = Actor::create(move |ctx| {
ctx.add_stream(rx);
self
});
if let Some(signals) = signals {
signals.do_send(signal::Subscribe(addr.clone().recipient()))
}
addr
}
}
// subscribe to os signals
fn subscribe_to_signals(&self) -> Option<Addr<signal::ProcessSignals>> {
if !self.no_signals {
if let Some(ref signals) = self.signals {
Some(signals.clone())
} else {
Some(System::current().registry().get::<signal::ProcessSignals>())
}
} else {
None
}
}
fn start_worker(
&self, idx: usize, notify: AcceptNotify,
) -> (Addr<Worker>, WorkerClient) {
let (tx, rx) = unbounded::<Conn<net::TcpStream>>();
let conns = Connections::new(notify, self.maxconn, self.maxconnrate);
let worker = WorkerClient::new(idx, tx, conns.clone());
let services: Vec<_> = self.services.iter().map(|v| v.clone()).collect();
let addr = Arbiter::start(move |ctx: &mut Context<_>| {
ctx.add_message_stream(rx);
let handlers: Vec<_> = services
.into_iter()
.map(|s| s.create(conns.clone()))
.collect();
Worker::new(conns, handlers)
});
(addr, worker)
}
}
impl Actor for Server {
type Context = Context<Self>;
}
/// Signals support
/// Handle `SIGINT`, `SIGTERM`, `SIGQUIT` signals and stop actix system
/// message to `System` actor.
impl Handler<signal::Signal> for Server {
type Result = ();
fn handle(&mut self, msg: signal::Signal, ctx: &mut Context<Self>) {
match msg.0 {
signal::SignalType::Int => {
info!("SIGINT received, exiting");
self.exit = true;
Handler::<StopServer>::handle(self, StopServer { graceful: false }, ctx);
}
signal::SignalType::Term => {
info!("SIGTERM received, stopping");
self.exit = true;
Handler::<StopServer>::handle(self, StopServer { graceful: true }, ctx);
}
signal::SignalType::Quit => {
info!("SIGQUIT received, exiting");
self.exit = true;
Handler::<StopServer>::handle(self, StopServer { graceful: false }, ctx);
}
_ => (),
}
}
}
impl Handler<PauseServer> for Server {
type Result = ();
fn handle(&mut self, _: PauseServer, _: &mut Context<Self>) {
self.accept.send(Command::Pause);
}
}
impl Handler<ResumeServer> for Server {
type Result = ();
fn handle(&mut self, _: ResumeServer, _: &mut Context<Self>) {
self.accept.send(Command::Resume);
}
}
impl Handler<StopServer> for Server {
type Result = Response<(), ()>;
fn handle(&mut self, msg: StopServer, ctx: &mut Context<Self>) -> Self::Result {
// stop accept thread
self.accept.send(Command::Stop);
// stop workers
let (tx, rx) = mpsc::channel(1);
let dur = if msg.graceful {
Some(Duration::new(u64::from(self.shutdown_timeout), 0))
} else {
None
};
for worker in &self.workers {
let tx2 = tx.clone();
ctx.spawn(
worker
.1
.send(StopWorker { graceful: dur })
.into_actor(self)
.then(move |_, slf, ctx| {
slf.workers.pop();
if slf.workers.is_empty() {
let _ = tx2.send(());
// we need to stop system if server was spawned
if slf.exit {
ctx.run_later(Duration::from_millis(300), |_, _| {
System::current().stop();
});
}
}
fut::ok(())
}),
);
}
if !self.workers.is_empty() {
Response::async(rx.into_future().map(|_| ()).map_err(|_| ()))
} else {
// we need to stop system if server was spawned
if self.exit {
ctx.run_later(Duration::from_millis(300), |_, _| {
System::current().stop();
});
}
Response::reply(Ok(()))
}
}
}
/// Commands from accept threads
impl StreamHandler<ServerCommand, ()> for Server {
fn finished(&mut self, _: &mut Context<Self>) {}
fn handle(&mut self, msg: ServerCommand, _: &mut Context<Self>) {
match msg {
ServerCommand::WorkerDied(idx) => {
let mut found = false;
for i in 0..self.workers.len() {
if self.workers[i].0 == idx {
self.workers.swap_remove(i);
found = true;
break;
}
}
if found {
error!("Worker has died {:?}, restarting", idx);
let mut new_idx = self.workers.len();
'found: loop {
for i in 0..self.workers.len() {
if self.workers[i].0 == new_idx {
new_idx += 1;
continue 'found;
}
}
break;
}
let (addr, worker) =
self.start_worker(new_idx, self.accept.get_notify());
self.workers.push((new_idx, addr));
self.accept.send(Command::Worker(worker));
}
}
}
}
}
#[derive(Clone, Default)]
///Contains information about connection.
pub struct Connections(Arc<ConnectionsInner>);
impl Connections {
fn new(notify: AcceptNotify, maxconn: usize, maxconnrate: usize) -> Self {
let maxconn_low = if maxconn > 10 { maxconn - 10 } else { 0 };
let maxconnrate_low = if maxconnrate > 10 {
maxconnrate - 10
} else {
0
};
Connections(Arc::new(ConnectionsInner {
notify,
maxconn,
maxconnrate,
maxconn_low,
maxconnrate_low,
conn: AtomicUsize::new(0),
connrate: AtomicUsize::new(0),
}))
}
pub(crate) fn available(&self) -> bool {
self.0.available()
}
pub(crate) fn num_connections(&self) -> usize {
self.0.conn.load(Ordering::Relaxed)
}
/// Report opened connection
pub fn connection(&self) -> ConnectionTag {
ConnectionTag::new(self.0.clone())
}
/// Report rate connection, rate is usually ssl handshake
pub fn connection_rate(&self) -> ConnectionRateTag {
ConnectionRateTag::new(self.0.clone())
}
}
#[derive(Default)]
struct ConnectionsInner {
notify: AcceptNotify,
conn: AtomicUsize,
connrate: AtomicUsize,
maxconn: usize,
maxconnrate: usize,
maxconn_low: usize,
maxconnrate_low: usize,
}
impl ConnectionsInner {
fn available(&self) -> bool {
if self.maxconnrate <= self.connrate.load(Ordering::Relaxed) {
false
} else {
self.maxconn > self.conn.load(Ordering::Relaxed)
}
}
fn notify_maxconn(&self, maxconn: usize) {
if maxconn > self.maxconn_low && maxconn <= self.maxconn {
self.notify.notify();
}
}
fn notify_maxconnrate(&self, connrate: usize) {
if connrate > self.maxconnrate_low && connrate <= self.maxconnrate {
self.notify.notify();
}
}
}
/// Type responsible for max connection stat.
///
/// Max connections stat get updated on drop.
pub struct ConnectionTag(Arc<ConnectionsInner>);
impl ConnectionTag {
fn new(inner: Arc<ConnectionsInner>) -> Self {
inner.conn.fetch_add(1, Ordering::Relaxed);
ConnectionTag(inner)
}
}
impl Drop for ConnectionTag {
fn drop(&mut self) {
let conn = self.0.conn.fetch_sub(1, Ordering::Relaxed);
self.0.notify_maxconn(conn);
}
}
/// Type responsible for max connection rate stat.
///
/// Max connections rate stat get updated on drop.
pub struct ConnectionRateTag(Arc<ConnectionsInner>);
impl ConnectionRateTag {
fn new(inner: Arc<ConnectionsInner>) -> Self {
inner.connrate.fetch_add(1, Ordering::Relaxed);
ConnectionRateTag(inner)
}
}
impl Drop for ConnectionRateTag {
fn drop(&mut self) {
let connrate = self.0.connrate.fetch_sub(1, Ordering::Relaxed);
self.0.notify_maxconnrate(connrate);
}
}

View File

@ -1,18 +1,23 @@
use std::cell::{Cell, RefCell, RefMut, UnsafeCell};
use std::cell::{RefCell, RefMut, UnsafeCell};
use std::collections::VecDeque;
use std::fmt::Write;
use std::rc::Rc;
use std::time::{Duration, Instant};
use std::{env, fmt, net};
use actix::Arbiter;
use bytes::BytesMut;
use futures::Stream;
use futures_cpupool::CpuPool;
use http::StatusCode;
use lazycell::LazyCell;
use parking_lot::Mutex;
use time;
use tokio_timer::Interval;
use super::channel::Node;
use super::message::{Request, RequestPool};
use super::server::{ConnectionRateTag, ConnectionTag, Connections};
use super::KeepAlive;
use body::Body;
use httpresponse::{HttpResponse, HttpResponseBuilder, HttpResponsePool};
@ -93,21 +98,6 @@ impl ServerSettings {
}
}
pub(crate) fn parts(&self) -> (Option<net::SocketAddr>, String, bool) {
(self.addr, self.host.clone(), self.secure)
}
pub(crate) fn from_parts(parts: (Option<net::SocketAddr>, String, bool)) -> Self {
let (addr, host, secure) = parts;
ServerSettings {
addr,
host,
secure,
cpu_pool: LazyCell::new(),
responses: HttpResponsePool::get_pool(),
}
}
/// Returns the socket address of the local half of this TCP connection
pub fn local_addr(&self) -> Option<net::SocketAddr> {
self.addr
@ -145,19 +135,41 @@ impl ServerSettings {
const DATE_VALUE_LENGTH: usize = 29;
pub(crate) struct WorkerSettings<H> {
h: RefCell<Vec<H>>,
h: Vec<H>,
keep_alive: u64,
ka_enabled: bool,
bytes: Rc<SharedBytesPool>,
messages: &'static RequestPool,
channels: Cell<usize>,
conns: Connections,
node: RefCell<Node<()>>,
date: UnsafeCell<Date>,
}
impl<H: 'static> WorkerSettings<H> {
pub(crate) fn create(
apps: Vec<H>, keep_alive: KeepAlive, settings: ServerSettings,
conns: Connections,
) -> Rc<WorkerSettings<H>> {
let settings = Rc::new(Self::new(apps, keep_alive, settings, conns));
// periodic date update
let s = settings.clone();
Arbiter::spawn(
Interval::new(Instant::now(), Duration::from_secs(1))
.map_err(|_| ())
.and_then(move |_| {
s.update_date();
Ok(())
}).fold((), |(), _| Ok(())),
);
settings
}
}
impl<H> WorkerSettings<H> {
pub(crate) fn new(
h: Vec<H>, keep_alive: KeepAlive, settings: ServerSettings,
h: Vec<H>, keep_alive: KeepAlive, settings: ServerSettings, conns: Connections,
) -> WorkerSettings<H> {
let (keep_alive, ka_enabled) = match keep_alive {
KeepAlive::Timeout(val) => (val as u64, true),
@ -166,27 +178,23 @@ impl<H> WorkerSettings<H> {
};
WorkerSettings {
h: RefCell::new(h),
h,
bytes: Rc::new(SharedBytesPool::new()),
messages: RequestPool::pool(settings),
channels: Cell::new(0),
node: RefCell::new(Node::head()),
date: UnsafeCell::new(Date::new()),
keep_alive,
ka_enabled,
conns,
}
}
pub fn num_channels(&self) -> usize {
self.channels.get()
}
pub fn head(&self) -> RefMut<Node<()>> {
self.node.borrow_mut()
}
pub fn handlers(&self) -> RefMut<Vec<H>> {
self.h.borrow_mut()
pub fn handlers(&self) -> &Vec<H> {
&self.h
}
pub fn keep_alive(&self) -> u64 {
@ -209,20 +217,11 @@ impl<H> WorkerSettings<H> {
RequestPool::get(self.messages)
}
pub fn add_channel(&self) {
self.channels.set(self.channels.get() + 1);
pub fn connection(&self) -> ConnectionTag {
self.conns.connection()
}
pub fn remove_channel(&self) {
let num = self.channels.get();
if num > 0 {
self.channels.set(num - 1);
} else {
error!("Number of removed channels is bigger than added channel. Bug in actix-web");
}
}
pub fn update_date(&self) {
fn update_date(&self) {
// Unsafe: WorkerSetting is !Sync and !Send
unsafe { &mut *self.date.get() }.update();
}
@ -240,6 +239,11 @@ impl<H> WorkerSettings<H> {
dst.extend_from_slice(date_bytes);
}
}
#[allow(dead_code)]
pub(crate) fn connection_rate(&self) -> ConnectionRateTag {
self.conns.connection_rate()
}
}
struct Date {
@ -311,6 +315,7 @@ mod tests {
Vec::new(),
KeepAlive::Os,
ServerSettings::default(),
Connections::default(),
);
let mut buf1 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.set_date(&mut buf1, true);

View File

@ -1,822 +0,0 @@
use std::rc::Rc;
use std::sync::{mpsc as sync_mpsc, Arc};
use std::time::Duration;
use std::{io, net};
use actix::{
fut, signal, Actor, ActorFuture, Addr, Arbiter, AsyncContext, Context, Handler,
Response, StreamHandler, System, WrapFuture,
};
use futures::sync::mpsc;
use futures::{Future, Sink, Stream};
use mio;
use net2::TcpBuilder;
use num_cpus;
use slab::Slab;
use tokio_io::{AsyncRead, AsyncWrite};
#[cfg(feature = "tls")]
use native_tls::TlsAcceptor;
#[cfg(feature = "alpn")]
use openssl::ssl::{AlpnError, SslAcceptorBuilder};
#[cfg(feature = "rust-tls")]
use rustls::ServerConfig;
use super::accept::{start_accept_thread, Command};
use super::channel::{HttpChannel, WrapperStream};
use super::settings::{ServerSettings, WorkerSettings};
use super::worker::{Conn, SocketInfo, StopWorker, StreamHandlerType, Worker};
use super::{IntoHttpHandler, IoStream, KeepAlive};
use super::{PauseServer, ResumeServer, StopServer};
#[cfg(feature = "alpn")]
fn configure_alpn(builder: &mut SslAcceptorBuilder) -> io::Result<()> {
builder.set_alpn_protos(b"\x02h2\x08http/1.1")?;
builder.set_alpn_select_callback(|_, protos| {
const H2: &[u8] = b"\x02h2";
if protos.windows(3).any(|window| window == H2) {
Ok(b"h2")
} else {
Err(AlpnError::NOACK)
}
});
Ok(())
}
/// An HTTP Server
pub struct HttpServer<H>
where
H: IntoHttpHandler + 'static,
{
h: Option<Rc<WorkerSettings<H::Handler>>>,
threads: usize,
backlog: i32,
host: Option<String>,
keep_alive: KeepAlive,
factory: Arc<Fn() -> Vec<H> + Send + Sync>,
#[cfg_attr(feature = "cargo-clippy", allow(type_complexity))]
workers: Vec<(usize, Addr<Worker<H::Handler>>)>,
sockets: Vec<Socket>,
accept: Option<(
mio::SetReadiness,
sync_mpsc::Sender<Command>,
Slab<SocketInfo>,
)>,
exit: bool,
shutdown_timeout: u16,
signals: Option<Addr<signal::ProcessSignals>>,
no_http2: bool,
no_signals: bool,
}
pub(crate) enum ServerCommand {
WorkerDied(usize),
}
impl<H> Actor for HttpServer<H>
where
H: IntoHttpHandler,
{
type Context = Context<Self>;
}
pub(crate) struct Socket {
pub lst: net::TcpListener,
pub addr: net::SocketAddr,
pub tp: StreamHandlerType,
}
impl<H> HttpServer<H>
where
H: IntoHttpHandler + 'static,
{
/// Create new http server with application factory
pub fn new<F, U>(factory: F) -> Self
where
F: Fn() -> U + Sync + Send + 'static,
U: IntoIterator<Item = H> + 'static,
{
let f = move || (factory)().into_iter().collect();
HttpServer {
h: None,
threads: num_cpus::get(),
backlog: 2048,
host: None,
keep_alive: KeepAlive::Os,
factory: Arc::new(f),
workers: Vec::new(),
sockets: Vec::new(),
accept: None,
exit: false,
shutdown_timeout: 30,
signals: None,
no_http2: false,
no_signals: false,
}
}
/// Set number of workers to start.
///
/// By default http server uses number of available logical cpu as threads
/// count.
pub fn workers(mut self, num: usize) -> Self {
self.threads = num;
self
}
#[doc(hidden)]
#[deprecated(
since = "0.6.0",
note = "please use `HttpServer::workers()` instead"
)]
pub fn threads(self, num: usize) -> Self {
self.workers(num)
}
/// Set the maximum number of pending connections.
///
/// This refers to the number of clients that can be waiting to be served.
/// Exceeding this number results in the client getting an error when
/// attempting to connect. It should only affect servers under significant
/// load.
///
/// Generally set in the 64-2048 range. Default value is 2048.
///
/// This method should be called before `bind()` method call.
pub fn backlog(mut self, num: i32) -> Self {
self.backlog = num;
self
}
/// Set server keep-alive setting.
///
/// By default keep alive is set to a `Os`.
pub fn keep_alive<T: Into<KeepAlive>>(mut self, val: T) -> Self {
self.keep_alive = val.into();
self
}
/// Set server host name.
///
/// Host name is used by application router aa a hostname for url
/// generation. Check [ConnectionInfo](./dev/struct.ConnectionInfo.
/// html#method.host) documentation for more information.
pub fn server_hostname(mut self, val: String) -> Self {
self.host = Some(val);
self
}
/// Stop actix system.
///
/// `SystemExit` message stops currently running system.
pub fn system_exit(mut self) -> Self {
self.exit = true;
self
}
/// Set alternative address for `ProcessSignals` actor.
pub fn signals(mut self, addr: Addr<signal::ProcessSignals>) -> Self {
self.signals = Some(addr);
self
}
/// Disable signal handling
pub fn disable_signals(mut self) -> Self {
self.no_signals = true;
self
}
/// Timeout for graceful workers shutdown.
///
/// After receiving a stop signal, workers have this much time to finish
/// serving requests. Workers still alive after the timeout are force
/// dropped.
///
/// By default shutdown timeout sets to 30 seconds.
pub fn shutdown_timeout(mut self, sec: u16) -> Self {
self.shutdown_timeout = sec;
self
}
/// Disable `HTTP/2` support
pub fn no_http2(mut self) -> Self {
self.no_http2 = true;
self
}
/// Get addresses of bound sockets.
pub fn addrs(&self) -> Vec<net::SocketAddr> {
self.sockets.iter().map(|s| s.addr).collect()
}
/// Get addresses of bound sockets and the scheme for it.
///
/// This is useful when the server is bound from different sources
/// with some sockets listening on http and some listening on https
/// and the user should be presented with an enumeration of which
/// socket requires which protocol.
pub fn addrs_with_scheme(&self) -> Vec<(net::SocketAddr, &str)> {
self.sockets
.iter()
.map(|s| (s.addr, s.tp.scheme()))
.collect()
}
/// Use listener for accepting incoming connection requests
///
/// HttpServer does not change any configuration for TcpListener,
/// it needs to be configured before passing it to listen() method.
pub fn listen(mut self, lst: net::TcpListener) -> Self {
let addr = lst.local_addr().unwrap();
self.sockets.push(Socket {
addr,
lst,
tp: StreamHandlerType::Normal,
});
self
}
#[cfg(feature = "tls")]
/// Use listener for accepting incoming tls connection requests
///
/// HttpServer does not change any configuration for TcpListener,
/// it needs to be configured before passing it to listen() method.
pub fn listen_tls(mut self, lst: net::TcpListener, acceptor: TlsAcceptor) -> Self {
let addr = lst.local_addr().unwrap();
self.sockets.push(Socket {
addr,
lst,
tp: StreamHandlerType::Tls(acceptor.clone()),
});
self
}
#[cfg(feature = "alpn")]
/// Use listener for accepting incoming tls connection requests
///
/// This method sets alpn protocols to "h2" and "http/1.1"
pub fn listen_ssl(
mut self, lst: net::TcpListener, mut builder: SslAcceptorBuilder,
) -> io::Result<Self> {
// alpn support
if !self.no_http2 {
configure_alpn(&mut builder)?;
}
let acceptor = builder.build();
let addr = lst.local_addr().unwrap();
self.sockets.push(Socket {
addr,
lst,
tp: StreamHandlerType::Alpn(acceptor.clone()),
});
Ok(self)
}
#[cfg(feature = "rust-tls")]
/// Use listener for accepting incoming tls connection requests
///
/// This method sets alpn protocols to "h2" and "http/1.1"
pub fn listen_rustls(
mut self, lst: net::TcpListener, mut builder: ServerConfig,
) -> io::Result<Self> {
// alpn support
if !self.no_http2 {
builder.set_protocols(&vec!["h2".to_string(), "http/1.1".to_string()]);
}
let addr = lst.local_addr().unwrap();
self.sockets.push(Socket {
addr,
lst,
tp: StreamHandlerType::Rustls(Arc::new(builder)),
});
Ok(self)
}
fn bind2<S: net::ToSocketAddrs>(&mut self, addr: S) -> io::Result<Vec<Socket>> {
let mut err = None;
let mut succ = false;
let mut sockets = Vec::new();
for addr in addr.to_socket_addrs()? {
match create_tcp_listener(addr, self.backlog) {
Ok(lst) => {
succ = true;
let addr = lst.local_addr().unwrap();
sockets.push(Socket {
lst,
addr,
tp: StreamHandlerType::Normal,
});
}
Err(e) => err = Some(e),
}
}
if !succ {
if let Some(e) = err.take() {
Err(e)
} else {
Err(io::Error::new(
io::ErrorKind::Other,
"Can not bind to address.",
))
}
} else {
Ok(sockets)
}
}
/// The socket address to bind
///
/// To bind multiple addresses this method can be called multiple times.
pub fn bind<S: net::ToSocketAddrs>(mut self, addr: S) -> io::Result<Self> {
let sockets = self.bind2(addr)?;
self.sockets.extend(sockets);
Ok(self)
}
#[cfg(feature = "tls")]
/// The ssl socket address to bind
///
/// To bind multiple addresses this method can be called multiple times.
pub fn bind_tls<S: net::ToSocketAddrs>(
mut self, addr: S, acceptor: TlsAcceptor,
) -> io::Result<Self> {
let sockets = self.bind2(addr)?;
self.sockets.extend(sockets.into_iter().map(|mut s| {
s.tp = StreamHandlerType::Tls(acceptor.clone());
s
}));
Ok(self)
}
#[cfg(feature = "alpn")]
/// Start listening for incoming tls connections.
///
/// This method sets alpn protocols to "h2" and "http/1.1"
pub fn bind_ssl<S: net::ToSocketAddrs>(
mut self, addr: S, mut builder: SslAcceptorBuilder,
) -> io::Result<Self> {
// alpn support
if !self.no_http2 {
configure_alpn(&mut builder)?;
}
let acceptor = builder.build();
let sockets = self.bind2(addr)?;
self.sockets.extend(sockets.into_iter().map(|mut s| {
s.tp = StreamHandlerType::Alpn(acceptor.clone());
s
}));
Ok(self)
}
#[cfg(feature = "rust-tls")]
/// Start listening for incoming tls connections.
///
/// This method sets alpn protocols to "h2" and "http/1.1"
pub fn bind_rustls<S: net::ToSocketAddrs>(
mut self, addr: S, mut builder: ServerConfig,
) -> io::Result<Self> {
// alpn support
if !self.no_http2 {
builder.set_protocols(&vec!["h2".to_string(), "http/1.1".to_string()]);
}
let builder = Arc::new(builder);
let sockets = self.bind2(addr)?;
self.sockets.extend(sockets.into_iter().map(move |mut s| {
s.tp = StreamHandlerType::Rustls(builder.clone());
s
}));
Ok(self)
}
fn start_workers(
&mut self, settings: &ServerSettings, sockets: &Slab<SocketInfo>,
) -> Vec<(usize, mpsc::UnboundedSender<Conn<net::TcpStream>>)> {
// start workers
let mut workers = Vec::new();
for idx in 0..self.threads {
let (tx, rx) = mpsc::unbounded::<Conn<net::TcpStream>>();
let ka = self.keep_alive;
let socks = sockets.clone();
let factory = Arc::clone(&self.factory);
let parts = settings.parts();
let addr = Arbiter::start(move |ctx: &mut Context<_>| {
let s = ServerSettings::from_parts(parts);
let apps: Vec<_> =
(*factory)().into_iter().map(|h| h.into_handler()).collect();
ctx.add_message_stream(rx);
Worker::new(apps, socks, ka, s)
});
workers.push((idx, tx));
self.workers.push((idx, addr));
}
info!("Starting {} http workers", self.threads);
workers
}
// subscribe to os signals
fn subscribe_to_signals(&self) -> Option<Addr<signal::ProcessSignals>> {
if !self.no_signals {
if let Some(ref signals) = self.signals {
Some(signals.clone())
} else {
Some(System::current().registry().get::<signal::ProcessSignals>())
}
} else {
None
}
}
}
impl<H: IntoHttpHandler> HttpServer<H> {
/// Start listening for incoming connections.
///
/// This method starts number of http handler workers in separate threads.
/// For each address this method starts separate thread which does
/// `accept()` in a loop.
///
/// This methods panics if no socket addresses get bound.
///
/// This method requires to run within properly configured `Actix` system.
///
/// ```rust
/// extern crate actix_web;
/// use actix_web::{actix, server, App, HttpResponse};
///
/// fn main() {
/// let sys = actix::System::new("example"); // <- create Actix system
///
/// server::new(|| App::new().resource("/", |r| r.h(|_: &_| HttpResponse::Ok())))
/// .bind("127.0.0.1:0")
/// .expect("Can not bind to 127.0.0.1:0")
/// .start();
/// # actix::System::current().stop();
/// sys.run(); // <- Run actix system, this method starts all async processes
/// }
/// ```
pub fn start(mut self) -> Addr<Self> {
if self.sockets.is_empty() {
panic!("HttpServer::bind() has to be called before start()");
} else {
let (tx, rx) = mpsc::unbounded();
let mut socks = Slab::new();
let mut addrs: Vec<(usize, Socket)> = Vec::new();
for socket in self.sockets.drain(..) {
let entry = socks.vacant_entry();
let token = entry.key();
entry.insert(SocketInfo {
addr: socket.addr,
htype: socket.tp.clone(),
});
addrs.push((token, socket));
}
let settings = ServerSettings::new(Some(addrs[0].1.addr), &self.host, false);
let workers = self.start_workers(&settings, &socks);
// start accept thread
for (_, sock) in &addrs {
info!("Starting server on http://{}", sock.addr);
}
let (r, cmd) = start_accept_thread(addrs, tx.clone(), workers.clone());
self.accept = Some((r, cmd, socks));
// start http server actor
let signals = self.subscribe_to_signals();
let addr = Actor::create(move |ctx| {
ctx.add_stream(rx);
self
});
if let Some(signals) = signals {
signals.do_send(signal::Subscribe(addr.clone().recipient()))
}
addr
}
}
/// Spawn new thread and start listening for incoming connections.
///
/// This method spawns new thread and starts new actix system. Other than
/// that it is similar to `start()` method. This method blocks.
///
/// This methods panics if no socket addresses get bound.
///
/// ```rust,ignore
/// # extern crate futures;
/// # extern crate actix_web;
/// # use futures::Future;
/// use actix_web::*;
///
/// fn main() {
/// HttpServer::new(|| App::new().resource("/", |r| r.h(|_| HttpResponse::Ok())))
/// .bind("127.0.0.1:0")
/// .expect("Can not bind to 127.0.0.1:0")
/// .run();
/// }
/// ```
pub fn run(self) {
let sys = System::new("http-server");
self.start();
sys.run();
}
}
#[doc(hidden)]
#[cfg(feature = "tls")]
#[deprecated(
since = "0.6.0",
note = "please use `actix_web::HttpServer::bind_tls` instead"
)]
impl<H: IntoHttpHandler> HttpServer<H> {
/// Start listening for incoming tls connections.
pub fn start_tls(mut self, acceptor: TlsAcceptor) -> io::Result<Addr<Self>> {
for sock in &mut self.sockets {
match sock.tp {
StreamHandlerType::Normal => (),
_ => continue,
}
sock.tp = StreamHandlerType::Tls(acceptor.clone());
}
Ok(self.start())
}
}
#[doc(hidden)]
#[cfg(feature = "alpn")]
#[deprecated(
since = "0.6.0",
note = "please use `actix_web::HttpServer::bind_ssl` instead"
)]
impl<H: IntoHttpHandler> HttpServer<H> {
/// Start listening for incoming tls connections.
///
/// This method sets alpn protocols to "h2" and "http/1.1"
pub fn start_ssl(
mut self, mut builder: SslAcceptorBuilder,
) -> io::Result<Addr<Self>> {
// alpn support
if !self.no_http2 {
builder.set_alpn_protos(b"\x02h2\x08http/1.1")?;
builder.set_alpn_select_callback(|_, protos| {
const H2: &[u8] = b"\x02h2";
if protos.windows(3).any(|window| window == H2) {
Ok(b"h2")
} else {
Err(AlpnError::NOACK)
}
});
}
let acceptor = builder.build();
for sock in &mut self.sockets {
match sock.tp {
StreamHandlerType::Normal => (),
_ => continue,
}
sock.tp = StreamHandlerType::Alpn(acceptor.clone());
}
Ok(self.start())
}
}
impl<H: IntoHttpHandler> HttpServer<H> {
/// Start listening for incoming connections from a stream.
///
/// This method uses only one thread for handling incoming connections.
pub fn start_incoming<T, S>(mut self, stream: S, secure: bool) -> Addr<Self>
where
S: Stream<Item = T, Error = io::Error> + Send + 'static,
T: AsyncRead + AsyncWrite + Send + 'static,
{
// set server settings
let addr: net::SocketAddr = "127.0.0.1:8080".parse().unwrap();
let settings = ServerSettings::new(Some(addr), &self.host, secure);
let apps: Vec<_> = (*self.factory)()
.into_iter()
.map(|h| h.into_handler())
.collect();
self.h = Some(Rc::new(WorkerSettings::new(
apps,
self.keep_alive,
settings,
)));
// start server
let signals = self.subscribe_to_signals();
let addr = HttpServer::create(move |ctx| {
ctx.add_message_stream(stream.map_err(|_| ()).map(move |t| Conn {
io: WrapperStream::new(t),
token: 0,
peer: None,
http2: false,
}));
self
});
if let Some(signals) = signals {
signals.do_send(signal::Subscribe(addr.clone().recipient()))
}
addr
}
}
/// Signals support
/// Handle `SIGINT`, `SIGTERM`, `SIGQUIT` signals and stop actix system
/// message to `System` actor.
impl<H: IntoHttpHandler> Handler<signal::Signal> for HttpServer<H> {
type Result = ();
fn handle(&mut self, msg: signal::Signal, ctx: &mut Context<Self>) {
match msg.0 {
signal::SignalType::Int => {
info!("SIGINT received, exiting");
self.exit = true;
Handler::<StopServer>::handle(self, StopServer { graceful: false }, ctx);
}
signal::SignalType::Term => {
info!("SIGTERM received, stopping");
self.exit = true;
Handler::<StopServer>::handle(self, StopServer { graceful: true }, ctx);
}
signal::SignalType::Quit => {
info!("SIGQUIT received, exiting");
self.exit = true;
Handler::<StopServer>::handle(self, StopServer { graceful: false }, ctx);
}
_ => (),
}
}
}
/// Commands from accept threads
impl<H: IntoHttpHandler> StreamHandler<ServerCommand, ()> for HttpServer<H> {
fn finished(&mut self, _: &mut Context<Self>) {}
fn handle(&mut self, msg: ServerCommand, _: &mut Context<Self>) {
match msg {
ServerCommand::WorkerDied(idx) => {
let mut found = false;
for i in 0..self.workers.len() {
if self.workers[i].0 == idx {
self.workers.swap_remove(i);
found = true;
break;
}
}
if found {
error!("Worker has died {:?}, restarting", idx);
let (tx, rx) = mpsc::unbounded::<Conn<net::TcpStream>>();
let mut new_idx = self.workers.len();
'found: loop {
for i in 0..self.workers.len() {
if self.workers[i].0 == new_idx {
new_idx += 1;
continue 'found;
}
}
break;
}
let ka = self.keep_alive;
let factory = Arc::clone(&self.factory);
let host = self.host.clone();
let socks = self.accept.as_ref().unwrap().2.clone();
let addr = socks[0].addr;
let addr = Arbiter::start(move |ctx: &mut Context<_>| {
let settings = ServerSettings::new(Some(addr), &host, false);
let apps: Vec<_> =
(*factory)().into_iter().map(|h| h.into_handler()).collect();
ctx.add_message_stream(rx);
Worker::new(apps, socks, ka, settings)
});
if let Some(ref item) = &self.accept {
let _ = item.1.send(Command::Worker(new_idx, tx.clone()));
let _ = item.0.set_readiness(mio::Ready::readable());
}
self.workers.push((new_idx, addr));
}
}
}
}
}
impl<T, H> Handler<Conn<T>> for HttpServer<H>
where
T: IoStream,
H: IntoHttpHandler,
{
type Result = ();
fn handle(&mut self, msg: Conn<T>, _: &mut Context<Self>) -> Self::Result {
Arbiter::spawn(HttpChannel::new(
Rc::clone(self.h.as_ref().unwrap()),
msg.io,
msg.peer,
msg.http2,
));
}
}
impl<H: IntoHttpHandler> Handler<PauseServer> for HttpServer<H> {
type Result = ();
fn handle(&mut self, _: PauseServer, _: &mut Context<Self>) {
for item in &self.accept {
let _ = item.1.send(Command::Pause);
let _ = item.0.set_readiness(mio::Ready::readable());
}
}
}
impl<H: IntoHttpHandler> Handler<ResumeServer> for HttpServer<H> {
type Result = ();
fn handle(&mut self, _: ResumeServer, _: &mut Context<Self>) {
for item in &self.accept {
let _ = item.1.send(Command::Resume);
let _ = item.0.set_readiness(mio::Ready::readable());
}
}
}
impl<H: IntoHttpHandler> Handler<StopServer> for HttpServer<H> {
type Result = Response<(), ()>;
fn handle(&mut self, msg: StopServer, ctx: &mut Context<Self>) -> Self::Result {
// stop accept threads
for item in &self.accept {
let _ = item.1.send(Command::Stop);
let _ = item.0.set_readiness(mio::Ready::readable());
}
// stop workers
let (tx, rx) = mpsc::channel(1);
let dur = if msg.graceful {
Some(Duration::new(u64::from(self.shutdown_timeout), 0))
} else {
None
};
for worker in &self.workers {
let tx2 = tx.clone();
ctx.spawn(
worker
.1
.send(StopWorker { graceful: dur })
.into_actor(self)
.then(move |_, slf, ctx| {
slf.workers.pop();
if slf.workers.is_empty() {
let _ = tx2.send(());
// we need to stop system if server was spawned
if slf.exit {
ctx.run_later(Duration::from_millis(300), |_, _| {
System::current().stop();
});
}
}
fut::ok(())
}),
);
}
if !self.workers.is_empty() {
Response::async(rx.into_future().map(|_| ()).map_err(|_| ()))
} else {
// we need to stop system if server was spawned
if self.exit {
ctx.run_later(Duration::from_millis(300), |_, _| {
System::current().stop();
});
}
Response::reply(Ok(()))
}
}
}
fn create_tcp_listener(
addr: net::SocketAddr, backlog: i32,
) -> io::Result<net::TcpListener> {
let builder = match addr {
net::SocketAddr::V4(_) => TcpBuilder::new_v4()?,
net::SocketAddr::V6(_) => TcpBuilder::new_v6()?,
};
builder.reuse_address(true)?;
builder.bind(addr)?;
Ok(builder.listen(backlog)?)
}

14
src/server/ssl/mod.rs Normal file
View File

@ -0,0 +1,14 @@
#[cfg(feature = "alpn")]
mod openssl;
#[cfg(feature = "alpn")]
pub use self::openssl::OpensslAcceptor;
#[cfg(feature = "tls")]
mod nativetls;
#[cfg(feature = "tls")]
pub use self::nativetls::{NativeTlsAcceptor, TlsStream};
#[cfg(feature = "rust-tls")]
mod rustls;
#[cfg(feature = "rust-tls")]
pub use self::rustls::RustlsAcceptor;

143
src/server/ssl/nativetls.rs Normal file
View File

@ -0,0 +1,143 @@
use std::net::Shutdown;
use std::{io, time};
use futures::{Async, Future, Poll};
use native_tls::{self, HandshakeError, TlsAcceptor};
use tokio_io::{AsyncRead, AsyncWrite};
use server::{AcceptorService, IoStream};
#[derive(Clone)]
/// Support `SSL` connections via native-tls package
///
/// `tls` feature enables `NativeTlsAcceptor` type
pub struct NativeTlsAcceptor {
acceptor: TlsAcceptor,
}
/// A wrapper around an underlying raw stream which implements the TLS or SSL
/// protocol.
///
/// A `TlsStream<S>` represents a handshake that has been completed successfully
/// and both the server and the client are ready for receiving and sending
/// data. Bytes read from a `TlsStream` are decrypted from `S` and bytes written
/// to a `TlsStream` are encrypted when passing through to `S`.
#[derive(Debug)]
pub struct TlsStream<S> {
inner: native_tls::TlsStream<S>,
}
/// Future returned from `NativeTlsAcceptor::accept` which will resolve
/// once the accept handshake has finished.
pub struct Accept<S> {
inner: Option<Result<native_tls::TlsStream<S>, HandshakeError<S>>>,
}
impl NativeTlsAcceptor {
/// Create `NativeTlsAcceptor` instance
pub fn new(acceptor: TlsAcceptor) -> Self {
NativeTlsAcceptor {
acceptor: acceptor.into(),
}
}
}
impl<Io: IoStream> AcceptorService<Io> for NativeTlsAcceptor {
type Accepted = TlsStream<Io>;
type Future = Accept<Io>;
fn scheme(&self) -> &'static str {
"https"
}
fn accept(&self, io: Io) -> Self::Future {
Accept {
inner: Some(self.acceptor.accept(io)),
}
}
}
impl<Io: IoStream> IoStream for TlsStream<Io> {
#[inline]
fn shutdown(&mut self, _how: Shutdown) -> io::Result<()> {
let _ = self.get_mut().shutdown();
Ok(())
}
#[inline]
fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()> {
self.get_mut().get_mut().set_nodelay(nodelay)
}
#[inline]
fn set_linger(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
self.get_mut().get_mut().set_linger(dur)
}
}
impl<Io: IoStream> Future for Accept<Io> {
type Item = TlsStream<Io>;
type Error = io::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match self.inner.take().expect("cannot poll MidHandshake twice") {
Ok(stream) => Ok(TlsStream { inner: stream }.into()),
Err(HandshakeError::Failure(e)) => {
Err(io::Error::new(io::ErrorKind::Other, e))
}
Err(HandshakeError::WouldBlock(s)) => match s.handshake() {
Ok(stream) => Ok(TlsStream { inner: stream }.into()),
Err(HandshakeError::Failure(e)) => {
Err(io::Error::new(io::ErrorKind::Other, e))
}
Err(HandshakeError::WouldBlock(s)) => {
self.inner = Some(Err(HandshakeError::WouldBlock(s)));
Ok(Async::NotReady)
}
},
}
}
}
impl<S> TlsStream<S> {
/// Get access to the internal `native_tls::TlsStream` stream which also
/// transitively allows access to `S`.
pub fn get_ref(&self) -> &native_tls::TlsStream<S> {
&self.inner
}
/// Get mutable access to the internal `native_tls::TlsStream` stream which
/// also transitively allows mutable access to `S`.
pub fn get_mut(&mut self) -> &mut native_tls::TlsStream<S> {
&mut self.inner
}
}
impl<S: io::Read + io::Write> io::Read for TlsStream<S> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.inner.read(buf)
}
}
impl<S: io::Read + io::Write> io::Write for TlsStream<S> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.inner.write(buf)
}
fn flush(&mut self) -> io::Result<()> {
self.inner.flush()
}
}
impl<S: AsyncRead + AsyncWrite> AsyncRead for TlsStream<S> {}
impl<S: AsyncRead + AsyncWrite> AsyncWrite for TlsStream<S> {
fn shutdown(&mut self) -> Poll<(), io::Error> {
match self.inner.shutdown() {
Ok(_) => (),
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => (),
Err(e) => return Err(e),
}
self.inner.get_mut().shutdown()
}
}

96
src/server/ssl/openssl.rs Normal file
View File

@ -0,0 +1,96 @@
use std::net::Shutdown;
use std::{io, time};
use futures::{Future, Poll};
use openssl::ssl::{AlpnError, SslAcceptor, SslAcceptorBuilder};
use tokio_openssl::{AcceptAsync, SslAcceptorExt, SslStream};
use server::{AcceptorService, IoStream, ServerFlags};
#[derive(Clone)]
/// Support `SSL` connections via openssl package
///
/// `alpn` feature enables `OpensslAcceptor` type
pub struct OpensslAcceptor {
acceptor: SslAcceptor,
}
impl OpensslAcceptor {
/// Create `OpensslAcceptor` with enabled `HTTP/2` and `HTTP1.1` support.
pub fn new(builder: SslAcceptorBuilder) -> io::Result<Self> {
OpensslAcceptor::with_flags(builder, ServerFlags::HTTP1 | ServerFlags::HTTP2)
}
/// Create `OpensslAcceptor` with custom server flags.
pub fn with_flags(
mut builder: SslAcceptorBuilder, flags: ServerFlags,
) -> io::Result<Self> {
let mut protos = Vec::new();
if flags.contains(ServerFlags::HTTP1) {
protos.extend(b"\x08http/1.1");
}
if flags.contains(ServerFlags::HTTP2) {
protos.extend(b"\x02h2");
builder.set_alpn_select_callback(|_, protos| {
const H2: &[u8] = b"\x02h2";
if protos.windows(3).any(|window| window == H2) {
Ok(b"h2")
} else {
Err(AlpnError::NOACK)
}
});
}
if !protos.is_empty() {
builder.set_alpn_protos(&protos)?;
}
Ok(OpensslAcceptor {
acceptor: builder.build(),
})
}
}
pub struct AcceptorFut<Io>(AcceptAsync<Io>);
impl<Io: IoStream> Future for AcceptorFut<Io> {
type Item = SslStream<Io>;
type Error = io::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
self.0
.poll()
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))
}
}
impl<Io: IoStream> AcceptorService<Io> for OpensslAcceptor {
type Accepted = SslStream<Io>;
type Future = AcceptorFut<Io>;
fn scheme(&self) -> &'static str {
"https"
}
fn accept(&self, io: Io) -> Self::Future {
AcceptorFut(SslAcceptorExt::accept_async(&self.acceptor, io))
}
}
impl<T: IoStream> IoStream for SslStream<T> {
#[inline]
fn shutdown(&mut self, _how: Shutdown) -> io::Result<()> {
let _ = self.get_mut().shutdown();
Ok(())
}
#[inline]
fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()> {
self.get_mut().get_mut().set_nodelay(nodelay)
}
#[inline]
fn set_linger(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
self.get_mut().get_mut().set_linger(dur)
}
}

91
src/server/ssl/rustls.rs Normal file
View File

@ -0,0 +1,91 @@
use std::net::Shutdown;
use std::sync::Arc;
use std::{io, time};
use rustls::{ClientSession, ServerConfig, ServerSession};
use tokio_io::AsyncWrite;
use tokio_rustls::{AcceptAsync, ServerConfigExt, TlsStream};
use server::{AcceptorService, IoStream, ServerFlags};
#[derive(Clone)]
/// Support `SSL` connections via rustls package
///
/// `rust-tls` feature enables `RustlsAcceptor` type
pub struct RustlsAcceptor {
config: Arc<ServerConfig>,
}
impl RustlsAcceptor {
/// Create `OpensslAcceptor` with enabled `HTTP/2` and `HTTP1.1` support.
pub fn new(config: ServerConfig) -> Self {
RustlsAcceptor::with_flags(config, ServerFlags::HTTP1 | ServerFlags::HTTP2)
}
/// Create `OpensslAcceptor` with custom server flags.
pub fn with_flags(mut config: ServerConfig, flags: ServerFlags) -> Self {
let mut protos = Vec::new();
if flags.contains(ServerFlags::HTTP2) {
protos.push("h2".to_string());
}
if flags.contains(ServerFlags::HTTP1) {
protos.push("http/1.1".to_string());
}
if !protos.is_empty() {
config.set_protocols(&protos);
}
RustlsAcceptor {
config: Arc::new(config),
}
}
}
impl<Io: IoStream> AcceptorService<Io> for RustlsAcceptor {
type Accepted = TlsStream<Io, ServerSession>;
type Future = AcceptAsync<Io>;
fn scheme(&self) -> &'static str {
"https"
}
fn accept(&self, io: Io) -> Self::Future {
ServerConfigExt::accept_async(&self.config, io)
}
}
impl<Io: IoStream> IoStream for TlsStream<Io, ClientSession> {
#[inline]
fn shutdown(&mut self, _how: Shutdown) -> io::Result<()> {
let _ = <Self as AsyncWrite>::shutdown(self);
Ok(())
}
#[inline]
fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()> {
self.get_mut().0.set_nodelay(nodelay)
}
#[inline]
fn set_linger(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
self.get_mut().0.set_linger(dur)
}
}
impl<Io: IoStream> IoStream for TlsStream<Io, ServerSession> {
#[inline]
fn shutdown(&mut self, _how: Shutdown) -> io::Result<()> {
let _ = <Self as AsyncWrite>::shutdown(self);
Ok(())
}
#[inline]
fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()> {
self.get_mut().0.set_nodelay(nodelay)
}
#[inline]
fn set_linger(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
self.get_mut().0.set_linger(dur)
}
}

View File

@ -1,52 +1,52 @@
use std::{net, time};
use futures::sync::mpsc::{SendError, UnboundedSender};
use futures::sync::oneshot;
use futures::Future;
use net2::TcpStreamExt;
use slab::Slab;
use std::rc::Rc;
use std::{net, time};
use tokio::executor::current_thread;
use tokio_reactor::Handle;
use tokio_tcp::TcpStream;
#[cfg(any(feature = "tls", feature = "alpn", feature = "rust-tls"))]
use futures::future;
#[cfg(feature = "tls")]
use native_tls::TlsAcceptor;
#[cfg(feature = "tls")]
use tokio_tls::TlsAcceptorExt;
#[cfg(feature = "alpn")]
use openssl::ssl::SslAcceptor;
#[cfg(feature = "alpn")]
use tokio_openssl::SslAcceptorExt;
#[cfg(feature = "rust-tls")]
use rustls::{ServerConfig, Session};
#[cfg(feature = "rust-tls")]
use std::sync::Arc;
#[cfg(feature = "rust-tls")]
use tokio_rustls::ServerConfigExt;
use actix::msgs::StopArbiter;
use actix::{Actor, Arbiter, AsyncContext, Context, Handler, Message, Response};
use server::channel::HttpChannel;
use server::settings::{ServerSettings, WorkerSettings};
use server::{HttpHandler, KeepAlive};
use super::server::{Connections, ServiceHandler};
use super::Token;
#[derive(Message)]
pub(crate) struct Conn<T> {
pub io: T,
pub token: usize,
pub handler: Token,
pub token: Token,
pub peer: Option<net::SocketAddr>,
pub http2: bool,
}
pub(crate) struct Socket {
pub lst: net::TcpListener,
pub addr: net::SocketAddr,
pub token: Token,
}
#[derive(Clone)]
pub(crate) struct SocketInfo {
pub addr: net::SocketAddr,
pub htype: StreamHandlerType,
pub(crate) struct WorkerClient {
pub idx: usize,
tx: UnboundedSender<Conn<net::TcpStream>>,
conns: Connections,
}
impl WorkerClient {
pub fn new(
idx: usize, tx: UnboundedSender<Conn<net::TcpStream>>, conns: Connections,
) -> Self {
WorkerClient { idx, tx, conns }
}
pub fn send(
&self, msg: Conn<net::TcpStream>,
) -> Result<(), SendError<Conn<net::TcpStream>>> {
self.tx.unbounded_send(msg)
}
pub fn available(&self) -> bool {
self.conns.available()
}
}
/// Stop worker message. Returns `true` on successful shutdown
@ -63,44 +63,30 @@ impl Message for StopWorker {
///
/// Worker accepts Socket objects via unbounded channel and start requests
/// processing.
pub(crate) struct Worker<H>
where
H: HttpHandler + 'static,
{
settings: Rc<WorkerSettings<H>>,
socks: Slab<SocketInfo>,
tcp_ka: Option<time::Duration>,
pub(crate) struct Worker {
conns: Connections,
handlers: Vec<Box<ServiceHandler>>,
}
impl<H: HttpHandler + 'static> Worker<H> {
pub(crate) fn new(
h: Vec<H>, socks: Slab<SocketInfo>, keep_alive: KeepAlive,
settings: ServerSettings,
) -> Worker<H> {
let tcp_ka = if let KeepAlive::Tcp(val) = keep_alive {
Some(time::Duration::new(val as u64, 0))
} else {
None
};
Worker {
settings: Rc::new(WorkerSettings::new(h, keep_alive, settings)),
socks,
tcp_ka,
}
impl Actor for Worker {
type Context = Context<Self>;
}
fn update_time(&self, ctx: &mut Context<Self>) {
self.settings.update_date();
ctx.run_later(time::Duration::new(1, 0), |slf, ctx| slf.update_time(ctx));
impl Worker {
pub(crate) fn new(conns: Connections, handlers: Vec<Box<ServiceHandler>>) -> Self {
Worker { conns, handlers }
}
fn shutdown(&self, force: bool) {
self.handlers.iter().for_each(|h| h.shutdown(force));
}
fn shutdown_timeout(
&self, ctx: &mut Context<Self>, tx: oneshot::Sender<bool>, dur: time::Duration,
&self, ctx: &mut Context<Worker>, tx: oneshot::Sender<bool>, dur: time::Duration,
) {
// sleep for 1 second and then check again
ctx.run_later(time::Duration::new(1, 0), move |slf, ctx| {
let num = slf.settings.num_channels();
let num = slf.conns.num_connections();
if num == 0 {
let _ = tx.send(true);
Arbiter::current().do_send(StopArbiter(0));
@ -108,7 +94,7 @@ impl<H: HttpHandler + 'static> Worker<H> {
slf.shutdown_timeout(ctx, tx, d);
} else {
info!("Force shutdown http worker, {} connections", num);
slf.settings.head().traverse::<TcpStream, H>();
slf.shutdown(true);
let _ = tx.send(false);
Arbiter::current().do_send(StopArbiter(0));
}
@ -116,178 +102,38 @@ impl<H: HttpHandler + 'static> Worker<H> {
}
}
impl<H: 'static> Actor for Worker<H>
where
H: HttpHandler + 'static,
{
type Context = Context<Self>;
fn started(&mut self, ctx: &mut Self::Context) {
self.update_time(ctx);
}
}
impl<H> Handler<Conn<net::TcpStream>> for Worker<H>
where
H: HttpHandler + 'static,
{
impl Handler<Conn<net::TcpStream>> for Worker {
type Result = ();
fn handle(&mut self, msg: Conn<net::TcpStream>, _: &mut Context<Self>) {
if self.tcp_ka.is_some() && msg.io.set_keepalive(self.tcp_ka).is_err() {
error!("Can not set socket keep-alive option");
}
self.socks
.get_mut(msg.token)
.unwrap()
.htype
.handle(Rc::clone(&self.settings), msg);
self.handlers[msg.handler.0].handle(msg.token, msg.io, msg.peer)
}
}
/// `StopWorker` message handler
impl<H> Handler<StopWorker> for Worker<H>
where
H: HttpHandler + 'static,
{
impl Handler<StopWorker> for Worker {
type Result = Response<bool, ()>;
fn handle(&mut self, msg: StopWorker, ctx: &mut Context<Self>) -> Self::Result {
let num = self.settings.num_channels();
let num = self.conns.num_connections();
if num == 0 {
info!("Shutting down http worker, 0 connections");
Response::reply(Ok(true))
} else if let Some(dur) = msg.graceful {
info!("Graceful http worker shutdown, {} connections", num);
self.shutdown(false);
let (tx, rx) = oneshot::channel();
let num = self.conns.num_connections();
if num != 0 {
info!("Graceful http worker shutdown, {} connections", num);
self.shutdown_timeout(ctx, tx, dur);
Response::reply(Ok(true))
} else {
Response::async(rx.map_err(|_| ()))
}
} else {
info!("Force shutdown http worker, {} connections", num);
self.settings.head().traverse::<TcpStream, H>();
self.shutdown(true);
Response::reply(Ok(false))
}
}
}
#[derive(Clone)]
pub(crate) enum StreamHandlerType {
Normal,
#[cfg(feature = "tls")]
Tls(TlsAcceptor),
#[cfg(feature = "alpn")]
Alpn(SslAcceptor),
#[cfg(feature = "rust-tls")]
Rustls(Arc<ServerConfig>),
}
impl StreamHandlerType {
fn handle<H: HttpHandler>(
&mut self, h: Rc<WorkerSettings<H>>, msg: Conn<net::TcpStream>,
) {
match *self {
StreamHandlerType::Normal => {
let _ = msg.io.set_nodelay(true);
let io = TcpStream::from_std(msg.io, &Handle::default())
.expect("failed to associate TCP stream");
current_thread::spawn(HttpChannel::new(h, io, msg.peer, msg.http2));
}
#[cfg(feature = "tls")]
StreamHandlerType::Tls(ref acceptor) => {
let Conn {
io, peer, http2, ..
} = msg;
let _ = io.set_nodelay(true);
let io = TcpStream::from_std(io, &Handle::default())
.expect("failed to associate TCP stream");
current_thread::spawn(TlsAcceptorExt::accept_async(acceptor, io).then(
move |res| {
match res {
Ok(io) => current_thread::spawn(HttpChannel::new(
h, io, peer, http2,
)),
Err(err) => {
trace!("Error during handling tls connection: {}", err)
}
};
future::result(Ok(()))
},
));
}
#[cfg(feature = "alpn")]
StreamHandlerType::Alpn(ref acceptor) => {
let Conn { io, peer, .. } = msg;
let _ = io.set_nodelay(true);
let io = TcpStream::from_std(io, &Handle::default())
.expect("failed to associate TCP stream");
current_thread::spawn(SslAcceptorExt::accept_async(acceptor, io).then(
move |res| {
match res {
Ok(io) => {
let http2 = if let Some(p) =
io.get_ref().ssl().selected_alpn_protocol()
{
p.len() == 2 && &p == b"h2"
} else {
false
};
current_thread::spawn(HttpChannel::new(
h, io, peer, http2,
));
}
Err(err) => {
trace!("Error during handling tls connection: {}", err)
}
};
future::result(Ok(()))
},
));
}
#[cfg(feature = "rust-tls")]
StreamHandlerType::Rustls(ref acceptor) => {
let Conn { io, peer, .. } = msg;
let _ = io.set_nodelay(true);
let io = TcpStream::from_std(io, &Handle::default())
.expect("failed to associate TCP stream");
current_thread::spawn(ServerConfigExt::accept_async(acceptor, io).then(
move |res| {
match res {
Ok(io) => {
let http2 = if let Some(p) =
io.get_ref().1.get_alpn_protocol()
{
p.len() == 2 && &p == &"h2"
} else {
false
};
current_thread::spawn(HttpChannel::new(
h, io, peer, http2,
));
}
Err(err) => {
trace!("Error during handling tls connection: {}", err)
}
};
future::result(Ok(()))
},
));
}
}
}
pub(crate) fn scheme(&self) -> &'static str {
match *self {
StreamHandlerType::Normal => "http",
#[cfg(feature = "tls")]
StreamHandlerType::Tls(_) => "https",
#[cfg(feature = "alpn")]
StreamHandlerType::Alpn(_) => "https",
#[cfg(feature = "rust-tls")]
StreamHandlerType::Rustls(_) => "https",
}
}
}

View File

@ -15,14 +15,18 @@ use tokio::runtime::current_thread::Runtime;
#[cfg(feature = "alpn")]
use openssl::ssl::SslAcceptorBuilder;
#[cfg(all(feature = "rust-tls"))]
#[cfg(feature = "rust-tls")]
use rustls::ServerConfig;
#[cfg(feature = "alpn")]
use server::OpensslAcceptor;
#[cfg(feature = "rust-tls")]
use server::RustlsAcceptor;
use application::{App, HttpApplication};
use body::Binary;
use client::{ClientConnector, ClientRequest, ClientRequestBuilder};
use error::Error;
use handler::{AsyncResultItem, Handler, Responder};
use handler::{AsyncResult, AsyncResultItem, Handler, Responder};
use header::{Header, IntoHeaderValue};
use httprequest::HttpRequest;
use httpresponse::HttpResponse;
@ -334,7 +338,7 @@ impl<S: 'static> TestServerBuilder<S> {
let ssl = self.ssl.take();
if let Some(ssl) = ssl {
let tcp = net::TcpListener::bind(addr).unwrap();
srv = srv.listen_ssl(tcp, ssl).unwrap();
srv = srv.listen_with(tcp, OpensslAcceptor::new(ssl).unwrap());
}
}
#[cfg(feature = "rust-tls")]
@ -342,7 +346,7 @@ impl<S: 'static> TestServerBuilder<S> {
let ssl = self.rust_ssl.take();
if let Some(ssl) = ssl {
let tcp = net::TcpListener::bind(addr).unwrap();
srv = srv.listen_rustls(tcp, ssl).unwrap();
srv = srv.listen_with(tcp, RustlsAcceptor::new(ssl));
}
}
if !has_ssl {
@ -674,8 +678,6 @@ impl<S: 'static> TestRequest<S> {
/// This method generates `HttpRequest` instance and runs handler
/// with generated request.
///
/// This method panics is handler returns actor or async result.
pub fn run<H: Handler<S>>(self, h: &H) -> Result<HttpResponse, Error> {
let req = self.finish();
let resp = h.handle(&req);
@ -684,7 +686,10 @@ impl<S: 'static> TestRequest<S> {
Ok(resp) => match resp.into().into() {
AsyncResultItem::Ok(resp) => Ok(resp),
AsyncResultItem::Err(err) => Err(err),
AsyncResultItem::Future(_) => panic!("Async handler is not supported."),
AsyncResultItem::Future(fut) => {
let mut sys = System::new("test");
sys.block_on(fut)
}
},
Err(err) => Err(err.into()),
}
@ -704,8 +709,8 @@ impl<S: 'static> TestRequest<S> {
let req = self.finish();
let fut = h(req.clone());
let mut core = Runtime::new().unwrap();
match core.block_on(fut) {
let mut sys = System::new("test");
match sys.block_on(fut) {
Ok(r) => match r.respond_to(&req) {
Ok(reply) => match reply.into().into() {
AsyncResultItem::Ok(resp) => Ok(resp),
@ -716,4 +721,45 @@ impl<S: 'static> TestRequest<S> {
Err(err) => Err(err),
}
}
/// This method generates `HttpRequest` instance and executes handler
pub fn run_async_result<F, R, I, E>(self, f: F) -> Result<I, E>
where
F: FnOnce(&HttpRequest<S>) -> R,
R: Into<AsyncResult<I, E>>,
{
let req = self.finish();
let res = f(&req);
match res.into().into() {
AsyncResultItem::Ok(resp) => Ok(resp),
AsyncResultItem::Err(err) => Err(err),
AsyncResultItem::Future(fut) => {
let mut sys = System::new("test");
sys.block_on(fut)
}
}
}
/// This method generates `HttpRequest` instance and executes handler
pub fn execute<F, R>(self, f: F) -> Result<HttpResponse, Error>
where
F: FnOnce(&HttpRequest<S>) -> R,
R: Responder + 'static,
{
let req = self.finish();
let resp = f(&req);
match resp.respond_to(&req) {
Ok(resp) => match resp.into().into() {
AsyncResultItem::Ok(resp) => Ok(resp),
AsyncResultItem::Err(err) => Err(err),
AsyncResultItem::Future(fut) => {
let mut sys = System::new("test");
sys.block_on(fut)
}
},
Err(err) => Err(err.into()),
}
}
}

View File

@ -7,24 +7,76 @@ use handler::{AsyncResult, AsyncResultItem, FromRequest, Handler, Responder};
use httprequest::HttpRequest;
use httpresponse::HttpResponse;
pub(crate) struct With<T, S, F, R>
trait FnWith<T, R>: 'static {
fn call_with(self: &Self, T) -> R;
}
impl<T, R, F: Fn(T) -> R + 'static> FnWith<T, R> for F {
#[cfg_attr(feature = "cargo-clippy", allow(boxed_local))]
fn call_with(self: &Self, arg: T) -> R {
(*self)(arg)
}
}
#[doc(hidden)]
pub trait WithFactory<T, S, R>: 'static
where
T: FromRequest<S>,
R: Responder,
{
fn create(self) -> With<T, S, R>;
fn create_with_config(self, T::Config) -> With<T, S, R>;
}
#[doc(hidden)]
pub trait WithAsyncFactory<T, S, R, I, E>: 'static
where
T: FromRequest<S>,
R: Future<Item = I, Error = E>,
I: Responder,
E: Into<Error>,
{
fn create(self) -> WithAsync<T, S, R, I, E>;
fn create_with_config(self, T::Config) -> WithAsync<T, S, R, I, E>;
}
// impl<T1, T2, T3, S, F, R> WithFactory<(T1, T2, T3), S, R> for F
// where F: Fn(T1, T2, T3) -> R + 'static,
// T1: FromRequest<S> + 'static,
// T2: FromRequest<S> + 'static,
// T3: FromRequest<S> + 'static,
// R: Responder + 'static,
// S: 'static,
// {
// fn create(self) -> With<(T1, T2, T3), S, R> {
// With::new(move |(t1, t2, t3)| (self)(t1, t2, t3), (
// T1::Config::default(), T2::Config::default(), T3::Config::default()))
// }
// fn create_with_config(self, cfg: (T1::Config, T2::Config, T3::Config,)) -> With<(T1, T2, T3), S, R> {
// With::new(move |(t1, t2, t3)| (self)(t1, t2, t3), cfg)
// }
// }
#[doc(hidden)]
pub struct With<T, S, R>
where
F: Fn(T) -> R,
T: FromRequest<S>,
S: 'static,
{
hnd: Rc<F>,
hnd: Rc<FnWith<T, R>>,
cfg: Rc<T::Config>,
_s: PhantomData<S>,
}
impl<T, S, F, R> With<T, S, F, R>
impl<T, S, R> With<T, S, R>
where
F: Fn(T) -> R,
T: FromRequest<S>,
S: 'static,
{
pub fn new(f: F, cfg: T::Config) -> Self {
pub fn new<F: Fn(T) -> R + 'static>(f: F, cfg: T::Config) -> Self {
With {
cfg: Rc::new(cfg),
hnd: Rc::new(f),
@ -33,9 +85,8 @@ where
}
}
impl<T, S, F, R> Handler<S> for With<T, S, F, R>
impl<T, S, R> Handler<S> for With<T, S, R>
where
F: Fn(T) -> R + 'static,
R: Responder + 'static,
T: FromRequest<S> + 'static,
S: 'static,
@ -60,24 +111,22 @@ where
}
}
struct WithHandlerFut<T, S, F, R>
struct WithHandlerFut<T, S, R>
where
F: Fn(T) -> R,
R: Responder,
T: FromRequest<S> + 'static,
S: 'static,
{
started: bool,
hnd: Rc<F>,
hnd: Rc<FnWith<T, R>>,
cfg: Rc<T::Config>,
req: HttpRequest<S>,
fut1: Option<Box<Future<Item = T, Error = Error>>>,
fut2: Option<Box<Future<Item = HttpResponse, Error = Error>>>,
}
impl<T, S, F, R> Future for WithHandlerFut<T, S, F, R>
impl<T, S, R> Future for WithHandlerFut<T, S, R>
where
F: Fn(T) -> R,
R: Responder + 'static,
T: FromRequest<S> + 'static,
S: 'static,
@ -108,7 +157,7 @@ where
}
};
let item = match (*self.hnd)(item).respond_to(&self.req) {
let item = match self.hnd.as_ref().call_with(item).respond_to(&self.req) {
Ok(item) => item.into(),
Err(e) => return Err(e.into()),
};
@ -124,30 +173,29 @@ where
}
}
pub(crate) struct WithAsync<T, S, F, R, I, E>
#[doc(hidden)]
pub struct WithAsync<T, S, R, I, E>
where
F: Fn(T) -> R,
R: Future<Item = I, Error = E>,
I: Responder,
E: Into<E>,
T: FromRequest<S>,
S: 'static,
{
hnd: Rc<F>,
hnd: Rc<FnWith<T, R>>,
cfg: Rc<T::Config>,
_s: PhantomData<S>,
}
impl<T, S, F, R, I, E> WithAsync<T, S, F, R, I, E>
impl<T, S, R, I, E> WithAsync<T, S, R, I, E>
where
F: Fn(T) -> R,
R: Future<Item = I, Error = E>,
I: Responder,
E: Into<Error>,
T: FromRequest<S>,
S: 'static,
{
pub fn new(f: F, cfg: T::Config) -> Self {
pub fn new<F: Fn(T) -> R + 'static>(f: F, cfg: T::Config) -> Self {
WithAsync {
cfg: Rc::new(cfg),
hnd: Rc::new(f),
@ -156,9 +204,8 @@ where
}
}
impl<T, S, F, R, I, E> Handler<S> for WithAsync<T, S, F, R, I, E>
impl<T, S, R, I, E> Handler<S> for WithAsync<T, S, R, I, E>
where
F: Fn(T) -> R + 'static,
R: Future<Item = I, Error = E> + 'static,
I: Responder + 'static,
E: Into<Error> + 'static,
@ -186,9 +233,8 @@ where
}
}
struct WithAsyncHandlerFut<T, S, F, R, I, E>
struct WithAsyncHandlerFut<T, S, R, I, E>
where
F: Fn(T) -> R,
R: Future<Item = I, Error = E> + 'static,
I: Responder + 'static,
E: Into<Error> + 'static,
@ -196,7 +242,7 @@ where
S: 'static,
{
started: bool,
hnd: Rc<F>,
hnd: Rc<FnWith<T, R>>,
cfg: Rc<T::Config>,
req: HttpRequest<S>,
fut1: Option<Box<Future<Item = T, Error = Error>>>,
@ -204,9 +250,8 @@ where
fut3: Option<Box<Future<Item = HttpResponse, Error = Error>>>,
}
impl<T, S, F, R, I, E> Future for WithAsyncHandlerFut<T, S, F, R, I, E>
impl<T, S, R, I, E> Future for WithAsyncHandlerFut<T, S, R, I, E>
where
F: Fn(T) -> R,
R: Future<Item = I, Error = E> + 'static,
I: Responder + 'static,
E: Into<Error> + 'static,
@ -257,7 +302,101 @@ where
}
};
self.fut2 = Some((*self.hnd)(item));
self.fut2 = Some(self.hnd.as_ref().call_with(item));
self.poll()
}
}
macro_rules! with_factory_tuple ({$(($n:tt, $T:ident)),+} => {
impl<$($T,)+ State, Func, Res> WithFactory<($($T,)+), State, Res> for Func
where Func: Fn($($T,)+) -> Res + 'static,
$($T: FromRequest<State> + 'static,)+
Res: Responder + 'static,
State: 'static,
{
fn create(self) -> With<($($T,)+), State, Res> {
With::new(move |($($n,)+)| (self)($($n,)+), ($($T::Config::default(),)+))
}
fn create_with_config(self, cfg: ($($T::Config,)+)) -> With<($($T,)+), State, Res> {
With::new(move |($($n,)+)| (self)($($n,)+), cfg)
}
}
});
macro_rules! with_async_factory_tuple ({$(($n:tt, $T:ident)),+} => {
impl<$($T,)+ State, Func, Res, Item, Err> WithAsyncFactory<($($T,)+), State, Res, Item, Err> for Func
where Func: Fn($($T,)+) -> Res + 'static,
$($T: FromRequest<State> + 'static,)+
Res: Future<Item=Item, Error=Err>,
Item: Responder + 'static,
Err: Into<Error>,
State: 'static,
{
fn create(self) -> WithAsync<($($T,)+), State, Res, Item, Err> {
WithAsync::new(move |($($n,)+)| (self)($($n,)+), ($($T::Config::default(),)+))
}
fn create_with_config(self, cfg: ($($T::Config,)+)) -> WithAsync<($($T,)+), State, Res, Item, Err> {
WithAsync::new(move |($($n,)+)| (self)($($n,)+), cfg)
}
}
});
with_factory_tuple!((a, A));
with_factory_tuple!((a, A), (b, B));
with_factory_tuple!((a, A), (b, B), (c, C));
with_factory_tuple!((a, A), (b, B), (c, C), (d, D));
with_factory_tuple!((a, A), (b, B), (c, C), (d, D), (e, E));
with_factory_tuple!((a, A), (b, B), (c, C), (d, D), (e, E), (f, F));
with_factory_tuple!((a, A), (b, B), (c, C), (d, D), (e, E), (f, F), (g, G));
with_factory_tuple!(
(a, A),
(b, B),
(c, C),
(d, D),
(e, E),
(f, F),
(g, G),
(h, H)
);
with_factory_tuple!(
(a, A),
(b, B),
(c, C),
(d, D),
(e, E),
(f, F),
(g, G),
(h, H),
(i, I)
);
with_async_factory_tuple!((a, A));
with_async_factory_tuple!((a, A), (b, B));
with_async_factory_tuple!((a, A), (b, B), (c, C));
with_async_factory_tuple!((a, A), (b, B), (c, C), (d, D));
with_async_factory_tuple!((a, A), (b, B), (c, C), (d, D), (e, E));
with_async_factory_tuple!((a, A), (b, B), (c, C), (d, D), (e, E), (f, F));
with_async_factory_tuple!((a, A), (b, B), (c, C), (d, D), (e, E), (f, F), (g, G));
with_async_factory_tuple!(
(a, A),
(b, B),
(c, C),
(d, D),
(e, E),
(f, F),
(g, G),
(h, H)
);
with_async_factory_tuple!(
(a, A),
(b, B),
(c, C),
(d, D),
(e, E),
(f, F),
(g, G),
(h, H),
(i, I)
);

View File

@ -387,8 +387,7 @@ mod tests {
.header(
header::UPGRADE,
header::HeaderValue::from_static("websocket"),
)
.finish();
).finish();
assert_eq!(
HandshakeError::NoConnectionUpgrade,
handshake(&req).err().unwrap()
@ -398,12 +397,10 @@ mod tests {
.header(
header::UPGRADE,
header::HeaderValue::from_static("websocket"),
)
.header(
).header(
header::CONNECTION,
header::HeaderValue::from_static("upgrade"),
)
.finish();
).finish();
assert_eq!(
HandshakeError::NoVersionHeader,
handshake(&req).err().unwrap()
@ -413,16 +410,13 @@ mod tests {
.header(
header::UPGRADE,
header::HeaderValue::from_static("websocket"),
)
.header(
).header(
header::CONNECTION,
header::HeaderValue::from_static("upgrade"),
)
.header(
).header(
header::SEC_WEBSOCKET_VERSION,
header::HeaderValue::from_static("5"),
)
.finish();
).finish();
assert_eq!(
HandshakeError::UnsupportedVersion,
handshake(&req).err().unwrap()
@ -432,16 +426,13 @@ mod tests {
.header(
header::UPGRADE,
header::HeaderValue::from_static("websocket"),
)
.header(
).header(
header::CONNECTION,
header::HeaderValue::from_static("upgrade"),
)
.header(
).header(
header::SEC_WEBSOCKET_VERSION,
header::HeaderValue::from_static("13"),
)
.finish();
).finish();
assert_eq!(
HandshakeError::BadWebsocketKey,
handshake(&req).err().unwrap()
@ -451,20 +442,16 @@ mod tests {
.header(
header::UPGRADE,
header::HeaderValue::from_static("websocket"),
)
.header(
).header(
header::CONNECTION,
header::HeaderValue::from_static("upgrade"),
)
.header(
).header(
header::SEC_WEBSOCKET_VERSION,
header::HeaderValue::from_static("13"),
)
.header(
).header(
header::SEC_WEBSOCKET_KEY,
header::HeaderValue::from_static("13"),
)
.finish();
).finish();
assert_eq!(
StatusCode::SWITCHING_PROTOCOLS,
handshake(&req).unwrap().finish().status()

View File

@ -5,8 +5,11 @@ extern crate bytes;
extern crate flate2;
extern crate futures;
extern crate rand;
#[cfg(all(unix, feature = "uds"))]
extern crate tokio_uds;
use std::io::Read;
use std::io::{Read, Write};
use std::{net, thread};
use bytes::Bytes;
use flate2::read::GzDecoder;
@ -64,6 +67,16 @@ fn test_simple() {
assert_eq!(bytes, Bytes::from_static(STR.as_ref()));
}
#[test]
fn test_connection_close() {
let mut srv =
test::TestServer::new(|app| app.handler(|_| HttpResponse::Ok().body(STR)));
let request = srv.get().header("Connection", "close").finish().unwrap();
let response = srv.execute(request.send()).unwrap();
assert!(response.status().is_success());
}
#[test]
fn test_with_query_parameter() {
let mut srv = test::TestServer::new(|app| {
@ -116,8 +129,7 @@ fn test_client_gzip_encoding() {
Ok(HttpResponse::Ok()
.content_encoding(http::ContentEncoding::Deflate)
.body(bytes))
})
.responder()
}).responder()
})
});
@ -146,8 +158,7 @@ fn test_client_gzip_encoding_large() {
Ok(HttpResponse::Ok()
.content_encoding(http::ContentEncoding::Deflate)
.body(bytes))
})
.responder()
}).responder()
})
});
@ -179,8 +190,7 @@ fn test_client_gzip_encoding_large_random() {
Ok(HttpResponse::Ok()
.content_encoding(http::ContentEncoding::Deflate)
.body(bytes))
})
.responder()
}).responder()
})
});
@ -198,6 +208,13 @@ fn test_client_gzip_encoding_large_random() {
assert_eq!(bytes, Bytes::from(data));
}
#[cfg(all(unix, feature = "uds"))]
#[test]
fn test_compatible_with_unix_socket_stream() {
let (stream, _) = tokio_uds::UnixStream::pair().unwrap();
let _ = client::Connection::from_stream(stream);
}
#[cfg(feature = "brotli")]
#[test]
fn test_client_brotli_encoding() {
@ -208,8 +225,7 @@ fn test_client_brotli_encoding() {
Ok(HttpResponse::Ok()
.content_encoding(http::ContentEncoding::Gzip)
.body(bytes))
})
.responder()
}).responder()
})
});
@ -242,8 +258,7 @@ fn test_client_brotli_encoding_large_random() {
Ok(HttpResponse::Ok()
.content_encoding(http::ContentEncoding::Gzip)
.body(bytes))
})
.responder()
}).responder()
})
});
@ -272,8 +287,7 @@ fn test_client_deflate_encoding() {
Ok(HttpResponse::Ok()
.content_encoding(http::ContentEncoding::Br)
.body(bytes))
})
.responder()
}).responder()
})
});
@ -306,8 +320,7 @@ fn test_client_deflate_encoding_large_random() {
Ok(HttpResponse::Ok()
.content_encoding(http::ContentEncoding::Br)
.body(bytes))
})
.responder()
}).responder()
})
});
@ -336,8 +349,7 @@ fn test_client_streaming_explicit() {
.chunked()
.content_encoding(http::ContentEncoding::Identity)
.body(body))
})
.responder()
}).responder()
})
});
@ -459,3 +471,33 @@ fn test_default_headers() {
"\""
)));
}
#[test]
fn client_read_until_eof() {
let addr = test::TestServer::unused_addr();
thread::spawn(move || {
let lst = net::TcpListener::bind(addr).unwrap();
for stream in lst.incoming() {
let mut stream = stream.unwrap();
let mut b = [0; 1000];
let _ = stream.read(&mut b).unwrap();
let _ = stream
.write_all(b"HTTP/1.1 200 OK\r\nconnection: close\r\n\r\nwelcome!");
}
});
let mut sys = actix::System::new("test");
// client request
let req = client::ClientRequest::get(format!("http://{}/", addr).as_str())
.finish()
.unwrap();
let response = sys.block_on(req.send()).unwrap();
assert!(response.status().is_success());
// read response
let bytes = sys.block_on(response.body()).unwrap();
assert_eq!(bytes, Bytes::from_static(b"welcome!"));
}

View File

@ -191,8 +191,7 @@ fn test_form_extractor() {
.uri(srv.url("/test1/index.html"))
.form(FormData {
username: "test".to_string(),
})
.unwrap();
}).unwrap();
let response = srv.execute(request.send()).unwrap();
assert!(response.status().is_success());
@ -208,7 +207,7 @@ fn test_form_extractor2() {
r.route().with_config(
|form: Form<FormData>| format!("{}", form.username),
|cfg| {
cfg.error_handler(|err, _| {
cfg.0.error_handler(|err, _| {
error::InternalError::from_response(
err,
HttpResponse::Conflict().finish(),
@ -306,8 +305,7 @@ fn test_path_and_query_extractor2_async() {
Delay::new(Instant::now() + Duration::from_millis(10))
.and_then(move |_| {
Ok(format!("Welcome {} - {}!", p.username, data.0))
})
.responder()
}).responder()
},
)
});
@ -336,8 +334,7 @@ fn test_path_and_query_extractor3_async() {
Delay::new(Instant::now() + Duration::from_millis(10))
.and_then(move |_| {
Ok(format!("Welcome {} - {}!", p.username, data.0))
})
.responder()
}).responder()
})
});
});
@ -361,8 +358,7 @@ fn test_path_and_query_extractor4_async() {
Delay::new(Instant::now() + Duration::from_millis(10))
.and_then(move |_| {
Ok(format!("Welcome {} - {}!", p.username, data.0))
})
.responder()
}).responder()
})
});
});
@ -387,8 +383,7 @@ fn test_path_and_query_extractor2_async2() {
Delay::new(Instant::now() + Duration::from_millis(10))
.and_then(move |_| {
Ok(format!("Welcome {} - {}!", p.username, data.0))
})
.responder()
}).responder()
},
)
});
@ -422,15 +417,13 @@ fn test_path_and_query_extractor2_async2() {
fn test_path_and_query_extractor2_async3() {
let mut srv = test::TestServer::new(|app| {
app.resource("/{username}/index.html", |r| {
r.route().with(
|(data, p, _q): (Json<Value>, Path<PParam>, Query<PParam>)| {
r.route()
.with(|data: Json<Value>, p: Path<PParam>, _: Query<PParam>| {
Delay::new(Instant::now() + Duration::from_millis(10))
.and_then(move |_| {
Ok(format!("Welcome {} - {}!", p.username, data.0))
}).responder()
})
.responder()
},
)
});
});
@ -467,8 +460,7 @@ fn test_path_and_query_extractor2_async4() {
Delay::new(Instant::now() + Duration::from_millis(10))
.and_then(move |_| {
Ok(format!("Welcome {} - {}!", data.1.username, (data.0).0))
})
.responder()
}).responder()
})
});
});

View File

@ -87,8 +87,7 @@ fn test_middleware_multiple() {
start: Arc::clone(&act_num1),
response: Arc::clone(&act_num2),
finish: Arc::clone(&act_num3),
})
.handler(|_| HttpResponse::Ok())
}).handler(|_| HttpResponse::Ok())
});
let request = srv.get().finish().unwrap();
@ -146,8 +145,7 @@ fn test_resource_middleware_multiple() {
start: Arc::clone(&act_num1),
response: Arc::clone(&act_num2),
finish: Arc::clone(&act_num3),
})
.handler(|_| HttpResponse::Ok())
}).handler(|_| HttpResponse::Ok())
});
let request = srv.get().finish().unwrap();
@ -176,8 +174,7 @@ fn test_scope_middleware() {
start: Arc::clone(&act_num1),
response: Arc::clone(&act_num2),
finish: Arc::clone(&act_num3),
})
.resource("/test", |r| r.f(|_| HttpResponse::Ok()))
}).resource("/test", |r| r.f(|_| HttpResponse::Ok()))
})
});
@ -207,13 +204,11 @@ fn test_scope_middleware_multiple() {
start: Arc::clone(&act_num1),
response: Arc::clone(&act_num2),
finish: Arc::clone(&act_num3),
})
.middleware(MiddlewareTest {
}).middleware(MiddlewareTest {
start: Arc::clone(&act_num1),
response: Arc::clone(&act_num2),
finish: Arc::clone(&act_num3),
})
.resource("/test", |r| r.f(|_| HttpResponse::Ok()))
}).resource("/test", |r| r.f(|_| HttpResponse::Ok()))
})
});
@ -242,8 +237,7 @@ fn test_middleware_async_handler() {
start: Arc::clone(&act_num1),
response: Arc::clone(&act_num2),
finish: Arc::clone(&act_num3),
})
.resource("/", |r| {
}).resource("/", |r| {
r.route().a(|_| {
Delay::new(Instant::now() + Duration::from_millis(10))
.and_then(|_| Ok(HttpResponse::Ok()))
@ -312,8 +306,7 @@ fn test_scope_middleware_async_handler() {
start: Arc::clone(&act_num1),
response: Arc::clone(&act_num2),
finish: Arc::clone(&act_num3),
})
.resource("/test", |r| {
}).resource("/test", |r| {
r.route().a(|_| {
Delay::new(Instant::now() + Duration::from_millis(10))
.and_then(|_| Ok(HttpResponse::Ok()))
@ -379,8 +372,7 @@ fn test_scope_middleware_async_error() {
start: Arc::clone(&act_req),
response: Arc::clone(&act_resp),
finish: Arc::clone(&act_fin),
})
.resource("/test", |r| r.f(index_test_middleware_async_error))
}).resource("/test", |r| r.f(index_test_middleware_async_error))
})
});
@ -514,13 +506,11 @@ fn test_async_middleware_multiple() {
start: Arc::clone(&act_num1),
response: Arc::clone(&act_num2),
finish: Arc::clone(&act_num3),
})
.middleware(MiddlewareAsyncTest {
}).middleware(MiddlewareAsyncTest {
start: Arc::clone(&act_num1),
response: Arc::clone(&act_num2),
finish: Arc::clone(&act_num3),
})
.resource("/test", |r| r.f(|_| HttpResponse::Ok()))
}).resource("/test", |r| r.f(|_| HttpResponse::Ok()))
});
let request = srv.get().uri(srv.url("/test")).finish().unwrap();
@ -550,13 +540,11 @@ fn test_async_sync_middleware_multiple() {
start: Arc::clone(&act_num1),
response: Arc::clone(&act_num2),
finish: Arc::clone(&act_num3),
})
.middleware(MiddlewareTest {
}).middleware(MiddlewareTest {
start: Arc::clone(&act_num1),
response: Arc::clone(&act_num2),
finish: Arc::clone(&act_num3),
})
.resource("/test", |r| r.f(|_| HttpResponse::Ok()))
}).resource("/test", |r| r.f(|_| HttpResponse::Ok()))
});
let request = srv.get().uri(srv.url("/test")).finish().unwrap();
@ -587,8 +575,7 @@ fn test_async_scope_middleware() {
start: Arc::clone(&act_num1),
response: Arc::clone(&act_num2),
finish: Arc::clone(&act_num3),
})
.resource("/test", |r| r.f(|_| HttpResponse::Ok()))
}).resource("/test", |r| r.f(|_| HttpResponse::Ok()))
})
});
@ -620,13 +607,11 @@ fn test_async_scope_middleware_multiple() {
start: Arc::clone(&act_num1),
response: Arc::clone(&act_num2),
finish: Arc::clone(&act_num3),
})
.middleware(MiddlewareAsyncTest {
}).middleware(MiddlewareAsyncTest {
start: Arc::clone(&act_num1),
response: Arc::clone(&act_num2),
finish: Arc::clone(&act_num3),
})
.resource("/test", |r| r.f(|_| HttpResponse::Ok()))
}).resource("/test", |r| r.f(|_| HttpResponse::Ok()))
})
});
@ -658,13 +643,11 @@ fn test_async_async_scope_middleware_multiple() {
start: Arc::clone(&act_num1),
response: Arc::clone(&act_num2),
finish: Arc::clone(&act_num3),
})
.middleware(MiddlewareTest {
}).middleware(MiddlewareTest {
start: Arc::clone(&act_num1),
response: Arc::clone(&act_num2),
finish: Arc::clone(&act_num3),
})
.resource("/test", |r| r.f(|_| HttpResponse::Ok()))
}).resource("/test", |r| r.f(|_| HttpResponse::Ok()))
})
});
@ -1012,8 +995,7 @@ fn test_session_storage_middleware() {
App::new()
.middleware(SessionStorage::new(
CookieSessionBackend::signed(&[0; 32]).secure(false),
))
.resource("/index", move |r| {
)).resource("/index", move |r| {
r.f(|req| {
let res = req.session().set(COMPLEX_NAME, COMPLEX_PAYLOAD);
assert!(res.is_ok());
@ -1033,8 +1015,7 @@ fn test_session_storage_middleware() {
HttpResponse::Ok()
})
})
.resource("/expect_cookie", move |r| {
}).resource("/expect_cookie", move |r| {
r.f(|req| {
let _cookies = req.cookies().expect("To get cookies");

View File

@ -13,14 +13,14 @@ extern crate tokio_reactor;
extern crate tokio_tcp;
use std::io::{Read, Write};
use std::sync::{mpsc, Arc};
use std::{net, thread, time};
use std::sync::Arc;
use std::{thread, time};
#[cfg(feature = "brotli")]
use brotli2::write::{BrotliDecoder, BrotliEncoder};
use bytes::{Bytes, BytesMut};
use flate2::read::GzDecoder;
use flate2::write::{DeflateDecoder, DeflateEncoder, GzEncoder};
use flate2::write::{GzEncoder, ZlibDecoder, ZlibEncoder};
use flate2::Compression;
use futures::stream::once;
use futures::{Future, Stream};
@ -32,7 +32,6 @@ use tokio::executor::current_thread;
use tokio::runtime::current_thread::Runtime;
use tokio_tcp::TcpStream;
use actix::System;
use actix_web::*;
const STR: &str = "Hello World Hello World Hello World Hello World Hello World \
@ -60,6 +59,9 @@ const STR: &str = "Hello World Hello World Hello World Hello World Hello World \
#[test]
#[cfg(unix)]
fn test_start() {
use actix::System;
use std::sync::mpsc;
let _ = test::TestServer::unused_addr();
let (tx, rx) = mpsc::channel();
@ -117,6 +119,10 @@ fn test_start() {
#[test]
#[cfg(unix)]
fn test_shutdown() {
use actix::System;
use std::net;
use std::sync::mpsc;
let _ = test::TestServer::unused_addr();
let (tx, rx) = mpsc::channel();
@ -156,6 +162,9 @@ fn test_shutdown() {
#[test]
#[cfg(unix)]
fn test_panic() {
use actix::System;
use std::sync::mpsc;
let _ = test::TestServer::unused_addr();
let (tx, rx) = mpsc::channel();
@ -167,8 +176,7 @@ fn test_panic() {
r.method(http::Method::GET).f(|_| -> &'static str {
panic!("error");
});
})
.resource("/", |r| {
}).resource("/", |r| {
r.method(http::Method::GET).f(|_| HttpResponse::Ok())
})
}).workers(1);
@ -528,7 +536,7 @@ fn test_body_chunked_explicit() {
#[test]
fn test_body_identity() {
let mut e = DeflateEncoder::new(Vec::new(), Compression::default());
let mut e = ZlibEncoder::new(Vec::new(), Compression::default());
e.write_all(STR.as_ref()).unwrap();
let enc = e.finish().unwrap();
let enc2 = enc.clone();
@ -578,7 +586,7 @@ fn test_body_deflate() {
let bytes = srv.execute(response.body()).unwrap();
// decode deflate
let mut e = DeflateDecoder::new(Vec::new());
let mut e = ZlibDecoder::new(Vec::new());
e.write_all(bytes.as_ref()).unwrap();
let dec = e.finish().unwrap();
assert_eq!(Bytes::from(dec), Bytes::from_static(STR.as_ref()));
@ -619,8 +627,7 @@ fn test_gzip_encoding() {
Ok(HttpResponse::Ok()
.content_encoding(http::ContentEncoding::Identity)
.body(bytes))
})
.responder()
}).responder()
})
});
@ -652,8 +659,7 @@ fn test_gzip_encoding_large() {
Ok(HttpResponse::Ok()
.content_encoding(http::ContentEncoding::Identity)
.body(bytes))
})
.responder()
}).responder()
})
});
@ -689,8 +695,7 @@ fn test_reading_gzip_encoding_large_random() {
Ok(HttpResponse::Ok()
.content_encoding(http::ContentEncoding::Identity)
.body(bytes))
})
.responder()
}).responder()
})
});
@ -722,12 +727,11 @@ fn test_reading_deflate_encoding() {
Ok(HttpResponse::Ok()
.content_encoding(http::ContentEncoding::Identity)
.body(bytes))
})
.responder()
}).responder()
})
});
let mut e = DeflateEncoder::new(Vec::new(), Compression::default());
let mut e = ZlibEncoder::new(Vec::new(), Compression::default());
e.write_all(STR.as_ref()).unwrap();
let enc = e.finish().unwrap();
@ -755,12 +759,11 @@ fn test_reading_deflate_encoding_large() {
Ok(HttpResponse::Ok()
.content_encoding(http::ContentEncoding::Identity)
.body(bytes))
})
.responder()
}).responder()
})
});
let mut e = DeflateEncoder::new(Vec::new(), Compression::default());
let mut e = ZlibEncoder::new(Vec::new(), Compression::default());
e.write_all(data.as_ref()).unwrap();
let enc = e.finish().unwrap();
@ -792,12 +795,11 @@ fn test_reading_deflate_encoding_large_random() {
Ok(HttpResponse::Ok()
.content_encoding(http::ContentEncoding::Identity)
.body(bytes))
})
.responder()
}).responder()
})
});
let mut e = DeflateEncoder::new(Vec::new(), Compression::default());
let mut e = ZlibEncoder::new(Vec::new(), Compression::default());
e.write_all(data.as_ref()).unwrap();
let enc = e.finish().unwrap();
@ -826,8 +828,7 @@ fn test_brotli_encoding() {
Ok(HttpResponse::Ok()
.content_encoding(http::ContentEncoding::Identity)
.body(bytes))
})
.responder()
}).responder()
})
});
@ -860,8 +861,7 @@ fn test_brotli_encoding_large() {
Ok(HttpResponse::Ok()
.content_encoding(http::ContentEncoding::Identity)
.body(bytes))
})
.responder()
}).responder()
})
});
@ -887,6 +887,7 @@ fn test_brotli_encoding_large() {
fn test_h2() {
let srv = test::TestServer::new(|app| app.handler(|_| HttpResponse::Ok().body(STR)));
let addr = srv.addr();
thread::sleep(time::Duration::from_millis(500));
let mut core = Runtime::new().unwrap();
let tcp = TcpStream::connect(&addr);
@ -930,3 +931,80 @@ fn test_application() {
let response = srv.execute(request.send()).unwrap();
assert!(response.status().is_success());
}
#[test]
fn test_default_404_handler_response() {
let mut srv = test::TestServer::with_factory(|| {
App::new()
.prefix("/app")
.resource("", |r| r.f(|_| HttpResponse::Ok()))
.resource("/", |r| r.f(|_| HttpResponse::Ok()))
});
let addr = srv.addr();
let mut buf = [0; 24];
let request = TcpStream::connect(&addr)
.and_then(|sock| {
tokio::io::write_all(sock, "HEAD / HTTP/1.1\r\nHost: localhost\r\n\r\n")
.and_then(|(sock, _)| tokio::io::read_exact(sock, &mut buf))
.and_then(|(_, buf)| Ok(buf))
}).map_err(|e| panic!("{:?}", e));
let response = srv.execute(request).unwrap();
let rep = String::from_utf8_lossy(&response[..]);
assert!(rep.contains("HTTP/1.1 404 Not Found"));
}
#[test]
fn test_server_cookies() {
use actix_web::http;
let mut srv = test::TestServer::with_factory(|| {
App::new().resource("/", |r| {
r.f(|_| {
HttpResponse::Ok()
.cookie(
http::CookieBuilder::new("first", "first_value")
.http_only(true)
.finish(),
).cookie(http::Cookie::new("second", "first_value"))
.cookie(http::Cookie::new("second", "second_value"))
.finish()
})
})
});
let first_cookie = http::CookieBuilder::new("first", "first_value")
.http_only(true)
.finish();
let second_cookie = http::Cookie::new("second", "second_value");
let request = srv.get().finish().unwrap();
let response = srv.execute(request.send()).unwrap();
assert!(response.status().is_success());
let cookies = response.cookies().expect("To have cookies");
assert_eq!(cookies.len(), 2);
if cookies[0] == first_cookie {
assert_eq!(cookies[1], second_cookie);
} else {
assert_eq!(cookies[0], second_cookie);
assert_eq!(cookies[1], first_cookie);
}
let first_cookie = first_cookie.to_string();
let second_cookie = second_cookie.to_string();
//Check that we have exactly two instances of raw cookie headers
let cookies = response
.headers()
.get_all(http::header::SET_COOKIE)
.iter()
.map(|header| header.to_str().expect("To str").to_string())
.collect::<Vec<_>>();
assert_eq!(cookies.len(), 2);
if cookies[0] == first_cookie {
assert_eq!(cookies[1], second_cookie);
} else {
assert_eq!(cookies[0], second_cookie);
assert_eq!(cookies[1], first_cookie);
}
}

View File

@ -103,7 +103,6 @@ fn test_simple_path() {
);
}
#[test]
fn test_empty_close_code() {
let mut srv = test::TestServer::new(|app| app.handler(|req| ws::start(req, Ws)));
@ -214,8 +213,7 @@ impl Ws2 {
act.send(ctx);
}
actix::fut::ok(())
})
.wait(ctx);
}).wait(ctx);
}
}