1
0
mirror of https://github.com/fafhrd91/actix-web synced 2025-07-14 22:17:04 +02:00

Compare commits

...

12 Commits

Author SHA1 Message Date
059d1671d7 prepare release beta 4 (#1659) 2020-09-09 22:14:11 +01:00
3a27580ebe awc: improve module documentation (#1656)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2020-09-09 14:24:12 +01:00
9d0534999d bump connect and tls versions (#1655) 2020-09-09 09:20:54 +01:00
c54d73e0bb Improve awc websocket docs (#1654)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2020-09-07 12:04:54 +01:00
9a9d4b182e document all remaining unsafe usages (#1642)
adds some debug assertions where appropriate
2020-09-03 10:00:24 +01:00
4e321595bc extract more config types from Data<T> as well (#1641) 2020-09-02 22:12:07 +01:00
01cbef700f Fix a small typo in a doc comment. (#1649) 2020-08-28 22:16:41 +01:00
8497b5f490 integrate with updated actix-{codec, utils} (#1634) 2020-08-24 10:13:35 +01:00
LJ
75d86a6beb Configurable trailing slash behaviour for NormalizePath (#1639)
Co-authored-by: ljoonal <ljoona@ljoonal.xyz>
2020-08-19 12:21:52 +01:00
3892a95c11 Fix actix-web version to publish 2020-08-18 01:16:18 +09:00
5802eb797f awc,web: Bump up to next beta releases (#1638) 2020-08-18 01:08:40 +09:00
ff2ca0f420 Update rustls to 0.18 (#1637) 2020-08-18 00:28:39 +09:00
48 changed files with 582 additions and 463 deletions

View File

@ -1,6 +1,26 @@
# Changes # Changes
## Unreleased ## Unreleased - 2020-xx-xx
### Added
* `middleware::NormalizePath` now has configurable behaviour for either always having a trailing
slash, or as the new addition, always trimming trailing slashes. [#1639]
### Changed
* Update actix-codec and actix-utils dependencies. [#1634]
* `FormConfig` and `JsonConfig` configurations are now also considered when set
using `App::data`. [#1641]
* `HttpServer::maxconn` is renamed to the more expressive `HttpServer::max_connections`. [#1655]
* `HttpServer::maxconnrate` is renamed to the more expressive
`HttpServer::max_connection_rate`. [#1655]
[#1639]: https://github.com/actix/actix-web/pull/1639
[#1641]: https://github.com/actix/actix-web/pull/1641
[#1634]: https://github.com/actix/actix-web/pull/1634
[#1655]: https://github.com/actix/actix-web/pull/1655
## 3.0.0-beta.3 - 2020-08-17
### Changed
* Update `rustls` to 0.18
## 3.0.0-beta.2 - 2020-08-17 ## 3.0.0-beta.2 - 2020-08-17

View File

@ -1,6 +1,6 @@
[package] [package]
name = "actix-web" name = "actix-web"
version = "3.0.0-beta.2" version = "3.0.0-beta.4"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"] authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix web is a simple, pragmatic and extremely fast web framework for Rust." description = "Actix web is a simple, pragmatic and extremely fast web framework for Rust."
readme = "README.md" readme = "README.md"
@ -65,20 +65,20 @@ name = "test_server"
required-features = ["compress"] required-features = ["compress"]
[dependencies] [dependencies]
actix-codec = "0.2.0" actix-codec = "0.3.0"
actix-service = "1.0.2" actix-service = "1.0.6"
actix-utils = "1.0.6" actix-utils = "2.0.0"
actix-router = "0.2.4" actix-router = "0.2.4"
actix-rt = "1.1.1" actix-rt = "1.1.1"
actix-server = "1.0.0" actix-server = "1.0.0"
actix-testing = "1.0.0" actix-testing = "1.0.0"
actix-macros = "0.1.0" actix-macros = "0.1.0"
actix-threadpool = "0.3.1" actix-threadpool = "0.3.1"
actix-tls = "2.0.0-alpha.1" actix-tls = "2.0.0"
actix-web-codegen = "0.3.0-beta.1" actix-web-codegen = "0.3.0-beta.1"
actix-http = "2.0.0-beta.3" actix-http = "2.0.0-beta.4"
awc = { version = "2.0.0-beta.1", default-features = false } awc = { version = "2.0.0-beta.4", default-features = false }
bytes = "0.5.3" bytes = "0.5.3"
derive_more = "0.99.2" derive_more = "0.99.2"
@ -98,7 +98,7 @@ serde_urlencoded = "0.6.1"
time = { version = "0.2.7", default-features = false, features = ["std"] } time = { version = "0.2.7", default-features = false, features = ["std"] }
url = "2.1" url = "2.1"
open-ssl = { package = "openssl", version = "0.10", optional = true } open-ssl = { package = "openssl", version = "0.10", optional = true }
rust-tls = { package = "rustls", version = "0.17.0", optional = true } rust-tls = { package = "rustls", version = "0.18.0", optional = true }
tinyvec = { version = "0.3", features = ["alloc"] } tinyvec = { version = "0.3", features = ["alloc"] }
[dev-dependencies] [dev-dependencies]

View File

@ -32,6 +32,14 @@
} }
``` ```
* `middleware::NormalizePath` can now also be configured to trim trailing slashes instead of always keeping one.
It will need `middleware::normalize::TrailingSlash` when being constructed with `NormalizePath::new(...)`,
or for an easier migration you can replace `wrap(middleware::NormalizePath)` with `wrap(middleware::NormalizePath::default())`.
* `HttpServer::maxconn` is renamed to the more expressive `HttpServer::max_connections`.
* `HttpServer::maxconnrate` is renamed to the more expressive `HttpServer::max_connection_rate`.
## 2.0.0 ## 2.0.0
* `HttpServer::start()` renamed to `HttpServer::run()`. It also possible to * `HttpServer::start()` renamed to `HttpServer::run()`. It also possible to

View File

@ -17,8 +17,8 @@ name = "actix_files"
path = "src/lib.rs" path = "src/lib.rs"
[dependencies] [dependencies]
actix-web = { version = "3.0.0-beta.2", default-features = false } actix-web = { version = "3.0.0-beta.4", default-features = false }
actix-http = "2.0.0-beta.3" actix-http = "2.0.0-beta.4"
actix-service = "1.0.1" actix-service = "1.0.1"
bitflags = "1" bitflags = "1"
bytes = "0.5.3" bytes = "0.5.3"
@ -33,4 +33,4 @@ v_htmlescape = "0.10"
[dev-dependencies] [dev-dependencies]
actix-rt = "1.0.0" actix-rt = "1.0.0"
actix-web = { version = "3.0.0-beta.2", features = ["openssl"] } actix-web = { version = "3.0.0-beta.4", features = ["openssl"] }

View File

@ -1,5 +1,14 @@
# Changes # Changes
## Unreleased - 2020-xx-xx
## 2.0.0-beta.4 - 2020-09-09
### Changed
* Update actix-codec and actix-utils dependencies.
* Update actix-connect and actix-tls dependencies.
## [2.0.0-beta.3] - 2020-08-14 ## [2.0.0-beta.3] - 2020-08-14
### Fixed ### Fixed

View File

@ -1,6 +1,6 @@
[package] [package]
name = "actix-http" name = "actix-http"
version = "2.0.0-beta.3" version = "2.0.0-beta.4"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"] authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix HTTP primitives" description = "Actix HTTP primitives"
readme = "README.md" readme = "README.md"
@ -41,12 +41,12 @@ actors = ["actix"]
[dependencies] [dependencies]
actix-service = "1.0.5" actix-service = "1.0.5"
actix-codec = "0.2.0" actix-codec = "0.3.0"
actix-connect = "2.0.0-alpha.3" actix-connect = "2.0.0"
actix-utils = "1.0.6" actix-utils = "2.0.0"
actix-rt = "1.0.0" actix-rt = "1.0.0"
actix-threadpool = "0.3.1" actix-threadpool = "0.3.1"
actix-tls = { version = "2.0.0-alpha.1", optional = true } actix-tls = { version = "2.0.0", optional = true }
actix = { version = "0.10.0-alpha.1", optional = true } actix = { version = "0.10.0-alpha.1", optional = true }
base64 = "0.12" base64 = "0.12"
@ -87,14 +87,14 @@ flate2 = { version = "1.0.13", optional = true }
[dev-dependencies] [dev-dependencies]
actix-server = "1.0.1" actix-server = "1.0.1"
actix-connect = { version = "2.0.0-alpha.2", features = ["openssl"] } actix-connect = { version = "2.0.0", features = ["openssl"] }
actix-http-test = { version = "2.0.0-alpha.1", features = ["openssl"] } actix-http-test = { version = "2.0.0-alpha.1", features = ["openssl"] }
actix-tls = { version = "2.0.0-alpha.1", features = ["openssl"] } actix-tls = { version = "2.0.0", features = ["openssl"] }
criterion = "0.3" criterion = "0.3"
env_logger = "0.7" env_logger = "0.7"
serde_derive = "1.0" serde_derive = "1.0"
open-ssl = { version="0.10", package = "openssl" } open-ssl = { version="0.10", package = "openssl" }
rust-tls = { version="0.17", package = "rustls" } rust-tls = { version="0.18", package = "rustls" }
[[bench]] [[bench]]
name = "content-length" name = "content-length"

View File

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2017-NOW Nikolay Kim
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

1
actix-http/LICENSE-APACHE Symbolic link
View File

@ -0,0 +1 @@
../LICENSE-APACHE

View File

@ -1,25 +0,0 @@
Copyright (c) 2017 Nikolay Kim
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

1
actix-http/LICENSE-MIT Symbolic link
View File

@ -0,0 +1 @@
../LICENSE-MIT

View File

@ -46,10 +46,10 @@ pub trait Connection {
pub(crate) trait ConnectionLifetime: AsyncRead + AsyncWrite + 'static { pub(crate) trait ConnectionLifetime: AsyncRead + AsyncWrite + 'static {
/// Close connection /// Close connection
fn close(&mut self); fn close(self: Pin<&mut Self>);
/// Release connection to the connection pool /// Release connection to the connection pool
fn release(&mut self); fn release(self: Pin<&mut Self>);
} }
#[doc(hidden)] #[doc(hidden)]
@ -195,11 +195,15 @@ where
match self { match self {
EitherConnection::A(con) => con EitherConnection::A(con) => con
.open_tunnel(head) .open_tunnel(head)
.map(|res| res.map(|(head, framed)| (head, framed.map_io(EitherIo::A)))) .map(|res| {
res.map(|(head, framed)| (head, framed.into_map_io(EitherIo::A)))
})
.boxed_local(), .boxed_local(),
EitherConnection::B(con) => con EitherConnection::B(con) => con
.open_tunnel(head) .open_tunnel(head)
.map(|res| res.map(|(head, framed)| (head, framed.map_io(EitherIo::B)))) .map(|res| {
res.map(|(head, framed)| (head, framed.into_map_io(EitherIo::B)))
})
.boxed_local(), .boxed_local(),
} }
} }

View File

@ -67,17 +67,17 @@ where
}; };
// create Framed and send request // create Framed and send request
let mut framed = Framed::new(io, h1::ClientCodec::default()); let mut framed_inner = Framed::new(io, h1::ClientCodec::default());
framed.send((head, body.size()).into()).await?; framed_inner.send((head, body.size()).into()).await?;
// send request body // send request body
match body.size() { match body.size() {
BodySize::None | BodySize::Empty | BodySize::Sized(0) => (), BodySize::None | BodySize::Empty | BodySize::Sized(0) => (),
_ => send_body(body, &mut framed).await?, _ => send_body(body, Pin::new(&mut framed_inner)).await?,
}; };
// read response and init read body // read response and init read body
let res = framed.into_future().await; let res = Pin::new(&mut framed_inner).into_future().await;
let (head, framed) = if let (Some(result), framed) = res { let (head, framed) = if let (Some(result), framed) = res {
let item = result.map_err(SendRequestError::from)?; let item = result.map_err(SendRequestError::from)?;
(item, framed) (item, framed)
@ -85,14 +85,14 @@ where
return Err(SendRequestError::from(ConnectError::Disconnected)); return Err(SendRequestError::from(ConnectError::Disconnected));
}; };
match framed.get_codec().message_type() { match framed.codec_ref().message_type() {
h1::MessageType::None => { h1::MessageType::None => {
let force_close = !framed.get_codec().keepalive(); let force_close = !framed.codec_ref().keepalive();
release_connection(framed, force_close); release_connection(framed, force_close);
Ok((head, Payload::None)) Ok((head, Payload::None))
} }
_ => { _ => {
let pl: PayloadStream = PlStream::new(framed).boxed_local(); let pl: PayloadStream = PlStream::new(framed_inner).boxed_local();
Ok((head, pl.into())) Ok((head, pl.into()))
} }
} }
@ -119,35 +119,36 @@ where
} }
/// send request body to the peer /// send request body to the peer
pub(crate) async fn send_body<I, B>( pub(crate) async fn send_body<T, B>(
body: B, body: B,
framed: &mut Framed<I, h1::ClientCodec>, mut framed: Pin<&mut Framed<T, h1::ClientCodec>>,
) -> Result<(), SendRequestError> ) -> Result<(), SendRequestError>
where where
I: ConnectionLifetime, T: ConnectionLifetime + Unpin,
B: MessageBody, B: MessageBody,
{ {
let mut eof = false;
pin_mut!(body); pin_mut!(body);
let mut eof = false;
while !eof { while !eof {
while !eof && !framed.is_write_buf_full() { while !eof && !framed.as_ref().is_write_buf_full() {
match poll_fn(|cx| body.as_mut().poll_next(cx)).await { match poll_fn(|cx| body.as_mut().poll_next(cx)).await {
Some(result) => { Some(result) => {
framed.write(h1::Message::Chunk(Some(result?)))?; framed.as_mut().write(h1::Message::Chunk(Some(result?)))?;
} }
None => { None => {
eof = true; eof = true;
framed.write(h1::Message::Chunk(None))?; framed.as_mut().write(h1::Message::Chunk(None))?;
} }
} }
} }
if !framed.is_write_buf_empty() { if !framed.as_ref().is_write_buf_empty() {
poll_fn(|cx| match framed.flush(cx) { poll_fn(|cx| match framed.as_mut().flush(cx) {
Poll::Ready(Ok(_)) => Poll::Ready(Ok(())), Poll::Ready(Ok(_)) => Poll::Ready(Ok(())),
Poll::Ready(Err(err)) => Poll::Ready(Err(err)), Poll::Ready(Err(err)) => Poll::Ready(Err(err)),
Poll::Pending => { Poll::Pending => {
if !framed.is_write_buf_full() { if !framed.as_ref().is_write_buf_full() {
Poll::Ready(Ok(())) Poll::Ready(Ok(()))
} else { } else {
Poll::Pending Poll::Pending
@ -158,13 +159,14 @@ where
} }
} }
SinkExt::flush(framed).await?; SinkExt::flush(Pin::into_inner(framed)).await?;
Ok(()) Ok(())
} }
#[doc(hidden)] #[doc(hidden)]
/// HTTP client connection /// HTTP client connection
pub struct H1Connection<T> { pub struct H1Connection<T> {
/// T should be `Unpin`
io: Option<T>, io: Option<T>,
created: time::Instant, created: time::Instant,
pool: Option<Acquired<T>>, pool: Option<Acquired<T>>,
@ -175,7 +177,7 @@ where
T: AsyncRead + AsyncWrite + Unpin + 'static, T: AsyncRead + AsyncWrite + Unpin + 'static,
{ {
/// Close connection /// Close connection
fn close(&mut self) { fn close(mut self: Pin<&mut Self>) {
if let Some(mut pool) = self.pool.take() { if let Some(mut pool) = self.pool.take() {
if let Some(io) = self.io.take() { if let Some(io) = self.io.take() {
pool.close(IoConnection::new( pool.close(IoConnection::new(
@ -188,7 +190,7 @@ where
} }
/// Release this connection to the connection pool /// Release this connection to the connection pool
fn release(&mut self) { fn release(mut self: Pin<&mut Self>) {
if let Some(mut pool) = self.pool.take() { if let Some(mut pool) = self.pool.take() {
if let Some(io) = self.io.take() { if let Some(io) = self.io.take() {
pool.release(IoConnection::new( pool.release(IoConnection::new(
@ -242,14 +244,18 @@ impl<T: AsyncRead + AsyncWrite + Unpin + 'static> AsyncWrite for H1Connection<T>
} }
} }
#[pin_project::pin_project]
pub(crate) struct PlStream<Io> { pub(crate) struct PlStream<Io> {
#[pin]
framed: Option<Framed<Io, h1::ClientPayloadCodec>>, framed: Option<Framed<Io, h1::ClientPayloadCodec>>,
} }
impl<Io: ConnectionLifetime> PlStream<Io> { impl<Io: ConnectionLifetime> PlStream<Io> {
fn new(framed: Framed<Io, h1::ClientCodec>) -> Self { fn new(framed: Framed<Io, h1::ClientCodec>) -> Self {
let framed = framed.into_map_codec(|codec| codec.into_payload_codec());
PlStream { PlStream {
framed: Some(framed.map_codec(|codec| codec.into_payload_codec())), framed: Some(framed),
} }
} }
} }
@ -261,16 +267,16 @@ impl<Io: ConnectionLifetime> Stream for PlStream<Io> {
self: Pin<&mut Self>, self: Pin<&mut Self>,
cx: &mut Context<'_>, cx: &mut Context<'_>,
) -> Poll<Option<Self::Item>> { ) -> Poll<Option<Self::Item>> {
let this = self.get_mut(); let mut this = self.project();
match this.framed.as_mut().unwrap().next_item(cx)? { match this.framed.as_mut().as_pin_mut().unwrap().next_item(cx)? {
Poll::Pending => Poll::Pending, Poll::Pending => Poll::Pending,
Poll::Ready(Some(chunk)) => { Poll::Ready(Some(chunk)) => {
if let Some(chunk) = chunk { if let Some(chunk) = chunk {
Poll::Ready(Some(Ok(chunk))) Poll::Ready(Some(Ok(chunk)))
} else { } else {
let framed = this.framed.take().unwrap(); let framed = this.framed.as_mut().as_pin_mut().unwrap();
let force_close = !framed.get_codec().keepalive(); let force_close = !framed.codec_ref().keepalive();
release_connection(framed, force_close); release_connection(framed, force_close);
Poll::Ready(None) Poll::Ready(None)
} }
@ -280,14 +286,13 @@ impl<Io: ConnectionLifetime> Stream for PlStream<Io> {
} }
} }
fn release_connection<T, U>(framed: Framed<T, U>, force_close: bool) fn release_connection<T, U>(framed: Pin<&mut Framed<T, U>>, force_close: bool)
where where
T: ConnectionLifetime, T: ConnectionLifetime,
{ {
let mut parts = framed.into_parts(); if !force_close && framed.is_read_buf_empty() && framed.is_write_buf_empty() {
if !force_close && parts.read_buf.is_empty() && parts.write_buf.is_empty() { framed.io_pin().release()
parts.io.release()
} else { } else {
parts.io.close() framed.io_pin().close()
} }
} }

View File

@ -17,7 +17,7 @@ const DATE_VALUE_LENGTH: usize = 29;
pub enum KeepAlive { pub enum KeepAlive {
/// Keep alive in seconds /// Keep alive in seconds
Timeout(usize), Timeout(usize),
/// Relay on OS to shutdown tcp connection /// Rely on OS to shutdown tcp connection
Os, Os,
/// Disabled /// Disabled
Disabled, Disabled,
@ -209,6 +209,7 @@ impl Date {
date.update(); date.update();
date date
} }
fn update(&mut self) { fn update(&mut self) {
self.pos = 0; self.pos = 0;
write!( write!(

View File

@ -1,4 +1,5 @@
//! Error and Result module //! Error and Result module
use std::cell::RefCell; use std::cell::RefCell;
use std::io::Write; use std::io::Write;
use std::str::Utf8Error; use std::str::Utf8Error;
@ -7,7 +8,7 @@ use std::{fmt, io, result};
use actix_codec::{Decoder, Encoder}; use actix_codec::{Decoder, Encoder};
pub use actix_threadpool::BlockingError; pub use actix_threadpool::BlockingError;
use actix_utils::framed::DispatcherError as FramedDispatcherError; use actix_utils::dispatcher::DispatcherError as FramedDispatcherError;
use actix_utils::timeout::TimeoutError; use actix_utils::timeout::TimeoutError;
use bytes::BytesMut; use bytes::BytesMut;
use derive_more::{Display, From}; use derive_more::{Display, From};
@ -452,10 +453,10 @@ impl ResponseError for ContentTypeError {
} }
} }
impl<E, U: Encoder + Decoder> ResponseError for FramedDispatcherError<E, U> impl<E, U: Encoder<I> + Decoder, I> ResponseError for FramedDispatcherError<E, U, I>
where where
E: fmt::Debug + fmt::Display, E: fmt::Debug + fmt::Display,
<U as Encoder>::Error: fmt::Debug, <U as Encoder<I>>::Error: fmt::Debug,
<U as Decoder>::Error: fmt::Debug, <U as Decoder>::Error: fmt::Debug,
{ {
} }

View File

@ -173,13 +173,12 @@ impl Decoder for ClientPayloadCodec {
} }
} }
impl Encoder for ClientCodec { impl Encoder<Message<(RequestHeadType, BodySize)>> for ClientCodec {
type Item = Message<(RequestHeadType, BodySize)>;
type Error = io::Error; type Error = io::Error;
fn encode( fn encode(
&mut self, &mut self,
item: Self::Item, item: Message<(RequestHeadType, BodySize)>,
dst: &mut BytesMut, dst: &mut BytesMut,
) -> Result<(), Self::Error> { ) -> Result<(), Self::Error> {
match item { match item {

View File

@ -144,13 +144,12 @@ impl Decoder for Codec {
} }
} }
impl Encoder for Codec { impl Encoder<Message<(Response<()>, BodySize)>> for Codec {
type Item = Message<(Response<()>, BodySize)>;
type Error = io::Error; type Error = io::Error;
fn encode( fn encode(
&mut self, &mut self,
item: Self::Item, item: Message<(Response<()>, BodySize)>,
dst: &mut BytesMut, dst: &mut BytesMut,
) -> Result<(), Self::Error> { ) -> Result<(), Self::Error> {
match item { match item {

View File

@ -76,12 +76,14 @@ pub(crate) trait MessageType: Sized {
let name = let name =
HeaderName::from_bytes(&slice[idx.name.0..idx.name.1]).unwrap(); HeaderName::from_bytes(&slice[idx.name.0..idx.name.1]).unwrap();
// SAFETY: httparse checks header value is valid UTF-8 // SAFETY: httparse already checks header value is only visible ASCII bytes
// from_maybe_shared_unchecked contains debug assertions so they are omitted here
let value = unsafe { let value = unsafe {
HeaderValue::from_maybe_shared_unchecked( HeaderValue::from_maybe_shared_unchecked(
slice.slice(idx.value.0..idx.value.1), slice.slice(idx.value.0..idx.value.1),
) )
}; };
match name { match name {
header::CONTENT_LENGTH => { header::CONTENT_LENGTH => {
if let Ok(s) = value.to_str() { if let Ok(s) = value.to_str() {

View File

@ -314,11 +314,15 @@ where
Poll::Ready(Err(err)) => return Err(DispatchError::Io(err)), Poll::Ready(Err(err)) => return Err(DispatchError::Io(err)),
} }
} }
if written == write_buf.len() { if written == write_buf.len() {
// SAFETY: setting length to 0 is safe
// skips one length check vs truncate
unsafe { write_buf.set_len(0) } unsafe { write_buf.set_len(0) }
} else { } else {
write_buf.advance(written); write_buf.advance(written);
} }
Ok(false) Ok(false)
} }

View File

@ -129,89 +129,133 @@ pub(crate) trait MessageType: Sized {
.chain(extra_headers.inner.iter()); .chain(extra_headers.inner.iter());
// write headers // write headers
let mut pos = 0;
let mut has_date = false; let mut has_date = false;
let mut remaining = dst.capacity() - dst.len();
let mut buf = dst.bytes_mut().as_mut_ptr() as *mut u8; let mut buf = dst.bytes_mut().as_mut_ptr() as *mut u8;
let mut remaining = dst.capacity() - dst.len();
// tracks bytes written since last buffer resize
// since buf is a raw pointer to a bytes container storage but is written to without the
// container's knowledge, this is used to sync the containers cursor after data is written
let mut pos = 0;
for (key, value) in headers { for (key, value) in headers {
match *key { match *key {
CONNECTION => continue, CONNECTION => continue,
TRANSFER_ENCODING | CONTENT_LENGTH if skip_len => continue, TRANSFER_ENCODING | CONTENT_LENGTH if skip_len => continue,
DATE => { DATE => has_date = true,
has_date = true;
}
_ => (), _ => (),
} }
let k = key.as_str().as_bytes(); let k = key.as_str().as_bytes();
let k_len = k.len();
match value { match value {
map::Value::One(ref val) => { map::Value::One(ref val) => {
let v = val.as_ref(); let v = val.as_ref();
let v_len = v.len(); let v_len = v.len();
let k_len = k.len();
// key length + value length + colon + space + \r\n
let len = k_len + v_len + 4; let len = k_len + v_len + 4;
if len > remaining { if len > remaining {
// not enough room in buffer for this header; reserve more space
// SAFETY: all the bytes written up to position "pos" are initialized
// the written byte count and pointer advancement are kept in sync
unsafe { unsafe {
dst.advance_mut(pos); dst.advance_mut(pos);
} }
pos = 0; pos = 0;
dst.reserve(len * 2); dst.reserve(len * 2);
remaining = dst.capacity() - dst.len(); remaining = dst.capacity() - dst.len();
// re-assign buf raw pointer since it's possible that the buffer was
// reallocated and/or resized
buf = dst.bytes_mut().as_mut_ptr() as *mut u8; buf = dst.bytes_mut().as_mut_ptr() as *mut u8;
} }
// use upper Camel-Case
// SAFETY: on each write, it is enough to ensure that the advancement of the
// cursor matches the number of bytes written
unsafe { unsafe {
// use upper Camel-Case
if camel_case { if camel_case {
write_camel_case(k, from_raw_parts_mut(buf, k_len)) write_camel_case(k, from_raw_parts_mut(buf, k_len))
} else { } else {
write_data(k, buf, k_len) write_data(k, buf, k_len)
} }
buf = buf.add(k_len); buf = buf.add(k_len);
write_data(b": ", buf, 2); write_data(b": ", buf, 2);
buf = buf.add(2); buf = buf.add(2);
write_data(v, buf, v_len); write_data(v, buf, v_len);
buf = buf.add(v_len); buf = buf.add(v_len);
write_data(b"\r\n", buf, 2); write_data(b"\r\n", buf, 2);
buf = buf.add(2); buf = buf.add(2);
pos += len;
remaining -= len;
} }
pos += len;
remaining -= len;
} }
map::Value::Multi(ref vec) => { map::Value::Multi(ref vec) => {
for val in vec { for val in vec {
let v = val.as_ref(); let v = val.as_ref();
let v_len = v.len(); let v_len = v.len();
let k_len = k.len();
let len = k_len + v_len + 4; let len = k_len + v_len + 4;
if len > remaining { if len > remaining {
// SAFETY: all the bytes written up to position "pos" are initialized
// the written byte count and pointer advancement are kept in sync
unsafe { unsafe {
dst.advance_mut(pos); dst.advance_mut(pos);
} }
pos = 0; pos = 0;
dst.reserve(len * 2); dst.reserve(len * 2);
remaining = dst.capacity() - dst.len(); remaining = dst.capacity() - dst.len();
// re-assign buf raw pointer since it's possible that the buffer was
// reallocated and/or resized
buf = dst.bytes_mut().as_mut_ptr() as *mut u8; buf = dst.bytes_mut().as_mut_ptr() as *mut u8;
} }
// use upper Camel-Case
// SAFETY: on each write, it is enough to ensure that the advancement of
// the cursor matches the number of bytes written
unsafe { unsafe {
if camel_case { if camel_case {
write_camel_case(k, from_raw_parts_mut(buf, k_len)); write_camel_case(k, from_raw_parts_mut(buf, k_len));
} else { } else {
write_data(k, buf, k_len); write_data(k, buf, k_len);
} }
buf = buf.add(k_len); buf = buf.add(k_len);
write_data(b": ", buf, 2); write_data(b": ", buf, 2);
buf = buf.add(2); buf = buf.add(2);
write_data(v, buf, v_len); write_data(v, buf, v_len);
buf = buf.add(v_len); buf = buf.add(v_len);
write_data(b"\r\n", buf, 2); write_data(b"\r\n", buf, 2);
buf = buf.add(2); buf = buf.add(2);
}; };
pos += len; pos += len;
remaining -= len; remaining -= len;
} }
} }
} }
} }
// final cursor synchronization with the bytes container
//
// SAFETY: all the bytes written up to position "pos" are initialized
// the written byte count and pointer advancement are kept in sync
unsafe { unsafe {
dst.advance_mut(pos); dst.advance_mut(pos);
} }
@ -477,7 +521,10 @@ impl<'a> io::Write for Writer<'a> {
} }
} }
/// # Safety
/// Callers must ensure that the given length matches given value length.
unsafe fn write_data(value: &[u8], buf: *mut u8, len: usize) { unsafe fn write_data(value: &[u8], buf: *mut u8, len: usize) {
debug_assert_eq!(value.len(), len);
copy_nonoverlapping(value.as_ptr(), buf, len); copy_nonoverlapping(value.as_ptr(), buf, len);
} }

View File

@ -98,7 +98,7 @@ mod openssl {
use super::*; use super::*;
use actix_tls::openssl::{Acceptor, SslAcceptor, SslStream}; use actix_tls::openssl::{Acceptor, SslAcceptor, SslStream};
use actix_tls::{openssl::HandshakeError, SslError}; use actix_tls::{openssl::HandshakeError, TlsError};
impl<S, B, X, U> H1Service<SslStream<TcpStream>, S, B, X, U> impl<S, B, X, U> H1Service<SslStream<TcpStream>, S, B, X, U>
where where
@ -126,19 +126,19 @@ mod openssl {
Config = (), Config = (),
Request = TcpStream, Request = TcpStream,
Response = (), Response = (),
Error = SslError<HandshakeError<TcpStream>, DispatchError>, Error = TlsError<HandshakeError<TcpStream>, DispatchError>,
InitError = (), InitError = (),
> { > {
pipeline_factory( pipeline_factory(
Acceptor::new(acceptor) Acceptor::new(acceptor)
.map_err(SslError::Ssl) .map_err(TlsError::Tls)
.map_init_err(|_| panic!()), .map_init_err(|_| panic!()),
) )
.and_then(|io: SslStream<TcpStream>| { .and_then(|io: SslStream<TcpStream>| {
let peer_addr = io.get_ref().peer_addr().ok(); let peer_addr = io.get_ref().peer_addr().ok();
ok((io, peer_addr)) ok((io, peer_addr))
}) })
.and_then(self.map_err(SslError::Service)) .and_then(self.map_err(TlsError::Service))
} }
} }
} }
@ -147,7 +147,7 @@ mod openssl {
mod rustls { mod rustls {
use super::*; use super::*;
use actix_tls::rustls::{Acceptor, ServerConfig, TlsStream}; use actix_tls::rustls::{Acceptor, ServerConfig, TlsStream};
use actix_tls::SslError; use actix_tls::TlsError;
use std::{fmt, io}; use std::{fmt, io};
impl<S, B, X, U> H1Service<TlsStream<TcpStream>, S, B, X, U> impl<S, B, X, U> H1Service<TlsStream<TcpStream>, S, B, X, U>
@ -176,19 +176,19 @@ mod rustls {
Config = (), Config = (),
Request = TcpStream, Request = TcpStream,
Response = (), Response = (),
Error = SslError<io::Error, DispatchError>, Error = TlsError<io::Error, DispatchError>,
InitError = (), InitError = (),
> { > {
pipeline_factory( pipeline_factory(
Acceptor::new(config) Acceptor::new(config)
.map_err(SslError::Ssl) .map_err(TlsError::Tls)
.map_init_err(|_| panic!()), .map_init_err(|_| panic!()),
) )
.and_then(|io: TlsStream<TcpStream>| { .and_then(|io: TlsStream<TcpStream>| {
let peer_addr = io.get_ref().0.peer_addr().ok(); let peer_addr = io.get_ref().0.peer_addr().ok();
ok((io, peer_addr)) ok((io, peer_addr))
}) })
.and_then(self.map_err(SslError::Service)) .and_then(self.map_err(TlsError::Service))
} }
} }
} }
@ -548,10 +548,12 @@ where
} }
#[doc(hidden)] #[doc(hidden)]
#[pin_project::pin_project]
pub struct OneRequestServiceResponse<T> pub struct OneRequestServiceResponse<T>
where where
T: AsyncRead + AsyncWrite + Unpin, T: AsyncRead + AsyncWrite + Unpin,
{ {
#[pin]
framed: Option<Framed<T, Codec>>, framed: Option<Framed<T, Codec>>,
} }
@ -562,16 +564,18 @@ where
type Output = Result<(Request, Framed<T, Codec>), ParseError>; type Output = Result<(Request, Framed<T, Codec>), ParseError>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
match self.framed.as_mut().unwrap().next_item(cx) { let this = self.as_mut().project();
Poll::Ready(Some(Ok(req))) => match req {
match ready!(this.framed.as_pin_mut().unwrap().next_item(cx)) {
Some(Ok(req)) => match req {
Message::Item(req) => { Message::Item(req) => {
Poll::Ready(Ok((req, self.framed.take().unwrap()))) let mut this = self.as_mut().project();
Poll::Ready(Ok((req, this.framed.take().unwrap())))
} }
Message::Chunk(_) => unreachable!("Something is wrong"), Message::Chunk(_) => unreachable!("Something is wrong"),
}, },
Poll::Ready(Some(Err(err))) => Poll::Ready(Err(err)), Some(Err(err)) => Poll::Ready(Err(err)),
Poll::Ready(None) => Poll::Ready(Err(ParseError::Incomplete)), None => Poll::Ready(Err(ParseError::Incomplete)),
Poll::Pending => Poll::Pending,
} }
} }
} }

View File

@ -9,12 +9,13 @@ use crate::error::Error;
use crate::h1::{Codec, Message}; use crate::h1::{Codec, Message};
use crate::response::Response; use crate::response::Response;
/// Send http/1 response /// Send HTTP/1 response
#[pin_project::pin_project] #[pin_project::pin_project]
pub struct SendResponse<T, B> { pub struct SendResponse<T, B> {
res: Option<Message<(Response<()>, BodySize)>>, res: Option<Message<(Response<()>, BodySize)>>,
#[pin] #[pin]
body: Option<ResponseBody<B>>, body: Option<ResponseBody<B>>,
#[pin]
framed: Option<Framed<T, Codec>>, framed: Option<Framed<T, Codec>>,
} }
@ -35,23 +36,30 @@ where
impl<T, B> Future for SendResponse<T, B> impl<T, B> Future for SendResponse<T, B>
where where
T: AsyncRead + AsyncWrite, T: AsyncRead + AsyncWrite + Unpin,
B: MessageBody + Unpin, B: MessageBody + Unpin,
{ {
type Output = Result<Framed<T, Codec>, Error>; type Output = Result<Framed<T, Codec>, Error>;
// TODO: rethink if we need loops in polls // TODO: rethink if we need loops in polls
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut this = self.project(); let mut this = self.as_mut().project();
let mut body_done = this.body.is_none(); let mut body_done = this.body.is_none();
loop { loop {
let mut body_ready = !body_done; let mut body_ready = !body_done;
let framed = this.framed.as_mut().unwrap();
// send body // send body
if this.res.is_none() && body_ready { if this.res.is_none() && body_ready {
while body_ready && !body_done && !framed.is_write_buf_full() { while body_ready
&& !body_done
&& !this
.framed
.as_ref()
.as_pin_ref()
.unwrap()
.is_write_buf_full()
{
match this.body.as_mut().as_pin_mut().unwrap().poll_next(cx)? { match this.body.as_mut().as_pin_mut().unwrap().poll_next(cx)? {
Poll::Ready(item) => { Poll::Ready(item) => {
// body is done when item is None // body is done when item is None
@ -59,6 +67,7 @@ where
if body_done { if body_done {
let _ = this.body.take(); let _ = this.body.take();
} }
let framed = this.framed.as_mut().as_pin_mut().unwrap();
framed.write(Message::Chunk(item))?; framed.write(Message::Chunk(item))?;
} }
Poll::Pending => body_ready = false, Poll::Pending => body_ready = false,
@ -66,6 +75,8 @@ where
} }
} }
let framed = this.framed.as_mut().as_pin_mut().unwrap();
// flush write buffer // flush write buffer
if !framed.is_write_buf_empty() { if !framed.is_write_buf_empty() {
match framed.flush(cx)? { match framed.flush(cx)? {
@ -96,6 +107,9 @@ where
break; break;
} }
} }
Poll::Ready(Ok(this.framed.take().unwrap()))
let framed = this.framed.take().unwrap();
Poll::Ready(Ok(framed))
} }
} }

View File

@ -227,9 +227,11 @@ where
if !has_date { if !has_date {
let mut bytes = BytesMut::with_capacity(29); let mut bytes = BytesMut::with_capacity(29);
self.config.set_date_header(&mut bytes); self.config.set_date_header(&mut bytes);
res.headers_mut().insert(DATE, unsafe { res.headers_mut().insert(
HeaderValue::from_maybe_shared_unchecked(bytes.freeze()) DATE,
}); // SAFETY: serialized date-times are known ASCII strings
unsafe { HeaderValue::from_maybe_shared_unchecked(bytes.freeze()) },
);
} }
res res

View File

@ -97,7 +97,7 @@ where
mod openssl { mod openssl {
use actix_service::{fn_factory, fn_service}; use actix_service::{fn_factory, fn_service};
use actix_tls::openssl::{Acceptor, SslAcceptor, SslStream}; use actix_tls::openssl::{Acceptor, SslAcceptor, SslStream};
use actix_tls::{openssl::HandshakeError, SslError}; use actix_tls::{openssl::HandshakeError, TlsError};
use super::*; use super::*;
@ -117,12 +117,12 @@ mod openssl {
Config = (), Config = (),
Request = TcpStream, Request = TcpStream,
Response = (), Response = (),
Error = SslError<HandshakeError<TcpStream>, DispatchError>, Error = TlsError<HandshakeError<TcpStream>, DispatchError>,
InitError = S::InitError, InitError = S::InitError,
> { > {
pipeline_factory( pipeline_factory(
Acceptor::new(acceptor) Acceptor::new(acceptor)
.map_err(SslError::Ssl) .map_err(TlsError::Tls)
.map_init_err(|_| panic!()), .map_init_err(|_| panic!()),
) )
.and_then(fn_factory(|| { .and_then(fn_factory(|| {
@ -131,7 +131,7 @@ mod openssl {
ok((io, peer_addr)) ok((io, peer_addr))
})) }))
})) }))
.and_then(self.map_err(SslError::Service)) .and_then(self.map_err(TlsError::Service))
} }
} }
} }
@ -140,7 +140,7 @@ mod openssl {
mod rustls { mod rustls {
use super::*; use super::*;
use actix_tls::rustls::{Acceptor, ServerConfig, TlsStream}; use actix_tls::rustls::{Acceptor, ServerConfig, TlsStream};
use actix_tls::SslError; use actix_tls::TlsError;
use std::io; use std::io;
impl<S, B> H2Service<TlsStream<TcpStream>, S, B> impl<S, B> H2Service<TlsStream<TcpStream>, S, B>
@ -159,7 +159,7 @@ mod rustls {
Config = (), Config = (),
Request = TcpStream, Request = TcpStream,
Response = (), Response = (),
Error = SslError<io::Error, DispatchError>, Error = TlsError<io::Error, DispatchError>,
InitError = S::InitError, InitError = S::InitError,
> { > {
let protos = vec!["h2".to_string().into()]; let protos = vec!["h2".to_string().into()];
@ -167,7 +167,7 @@ mod rustls {
pipeline_factory( pipeline_factory(
Acceptor::new(config) Acceptor::new(config)
.map_err(SslError::Ssl) .map_err(TlsError::Tls)
.map_init_err(|_| panic!()), .map_init_err(|_| panic!()),
) )
.and_then(fn_factory(|| { .and_then(fn_factory(|| {
@ -176,7 +176,7 @@ mod rustls {
ok((io, peer_addr)) ok((io, peer_addr))
})) }))
})) }))
.and_then(self.map_err(SslError::Service)) .and_then(self.map_err(TlsError::Service))
} }
} }
} }

View File

@ -38,7 +38,7 @@ macro_rules! downcast {
/// Downcasts generic body to a specific type. /// Downcasts generic body to a specific type.
pub fn downcast_ref<T: $name + 'static>(&self) -> Option<&T> { pub fn downcast_ref<T: $name + 'static>(&self) -> Option<&T> {
if self.__private_get_type_id__().0 == std::any::TypeId::of::<T>() { if self.__private_get_type_id__().0 == std::any::TypeId::of::<T>() {
// Safety: external crates cannot override the default // SAFETY: external crates cannot override the default
// implementation of `__private_get_type_id__`, since // implementation of `__private_get_type_id__`, since
// it requires returning a private type. We can therefore // it requires returning a private type. We can therefore
// rely on the returned `TypeId`, which ensures that this // rely on the returned `TypeId`, which ensures that this
@ -48,10 +48,11 @@ macro_rules! downcast {
None None
} }
} }
/// Downcasts a generic body to a mutable specific type. /// Downcasts a generic body to a mutable specific type.
pub fn downcast_mut<T: $name + 'static>(&mut self) -> Option<&mut T> { pub fn downcast_mut<T: $name + 'static>(&mut self) -> Option<&mut T> {
if self.__private_get_type_id__().0 == std::any::TypeId::of::<T>() { if self.__private_get_type_id__().0 == std::any::TypeId::of::<T>() {
// Safety: external crates cannot override the default // SAFETY: external crates cannot override the default
// implementation of `__private_get_type_id__`, since // implementation of `__private_get_type_id__`, since
// it requires returning a private type. We can therefore // it requires returning a private type. We can therefore
// rely on the returned `TypeId`, which ensures that this // rely on the returned `TypeId`, which ensures that this

View File

@ -195,7 +195,7 @@ where
mod openssl { mod openssl {
use super::*; use super::*;
use actix_tls::openssl::{Acceptor, SslAcceptor, SslStream}; use actix_tls::openssl::{Acceptor, SslAcceptor, SslStream};
use actix_tls::{openssl::HandshakeError, SslError}; use actix_tls::{openssl::HandshakeError, TlsError};
impl<S, B, X, U> HttpService<SslStream<TcpStream>, S, B, X, U> impl<S, B, X, U> HttpService<SslStream<TcpStream>, S, B, X, U>
where where
@ -226,12 +226,12 @@ mod openssl {
Config = (), Config = (),
Request = TcpStream, Request = TcpStream,
Response = (), Response = (),
Error = SslError<HandshakeError<TcpStream>, DispatchError>, Error = TlsError<HandshakeError<TcpStream>, DispatchError>,
InitError = (), InitError = (),
> { > {
pipeline_factory( pipeline_factory(
Acceptor::new(acceptor) Acceptor::new(acceptor)
.map_err(SslError::Ssl) .map_err(TlsError::Tls)
.map_init_err(|_| panic!()), .map_init_err(|_| panic!()),
) )
.and_then(|io: SslStream<TcpStream>| { .and_then(|io: SslStream<TcpStream>| {
@ -247,7 +247,7 @@ mod openssl {
let peer_addr = io.get_ref().peer_addr().ok(); let peer_addr = io.get_ref().peer_addr().ok();
ok((io, proto, peer_addr)) ok((io, proto, peer_addr))
}) })
.and_then(self.map_err(SslError::Service)) .and_then(self.map_err(TlsError::Service))
} }
} }
} }
@ -256,7 +256,7 @@ mod openssl {
mod rustls { mod rustls {
use super::*; use super::*;
use actix_tls::rustls::{Acceptor, ServerConfig, Session, TlsStream}; use actix_tls::rustls::{Acceptor, ServerConfig, Session, TlsStream};
use actix_tls::SslError; use actix_tls::TlsError;
use std::io; use std::io;
impl<S, B, X, U> HttpService<TlsStream<TcpStream>, S, B, X, U> impl<S, B, X, U> HttpService<TlsStream<TcpStream>, S, B, X, U>
@ -288,7 +288,7 @@ mod rustls {
Config = (), Config = (),
Request = TcpStream, Request = TcpStream,
Response = (), Response = (),
Error = SslError<io::Error, DispatchError>, Error = TlsError<io::Error, DispatchError>,
InitError = (), InitError = (),
> { > {
let protos = vec!["h2".to_string().into(), "http/1.1".to_string().into()]; let protos = vec!["h2".to_string().into(), "http/1.1".to_string().into()];
@ -296,7 +296,7 @@ mod rustls {
pipeline_factory( pipeline_factory(
Acceptor::new(config) Acceptor::new(config)
.map_err(SslError::Ssl) .map_err(TlsError::Tls)
.map_init_err(|_| panic!()), .map_init_err(|_| panic!()),
) )
.and_then(|io: TlsStream<TcpStream>| { .and_then(|io: TlsStream<TcpStream>| {
@ -312,7 +312,7 @@ mod rustls {
let peer_addr = io.get_ref().0.peer_addr().ok(); let peer_addr = io.get_ref().0.peer_addr().ok();
ok((io, proto, peer_addr)) ok((io, proto, peer_addr))
}) })
.and_then(self.map_err(SslError::Service)) .and_then(self.map_err(TlsError::Service))
} }
} }
} }

View File

@ -91,8 +91,7 @@ impl Codec {
} }
} }
impl Encoder for Codec { impl Encoder<Message> for Codec {
type Item = Message;
type Error = ProtocolError; type Error = ProtocolError;
fn encode(&mut self, item: Message, dst: &mut BytesMut) -> Result<(), Self::Error> { fn encode(&mut self, item: Message, dst: &mut BytesMut) -> Result<(), Self::Error> {

View File

@ -4,16 +4,18 @@ use std::task::{Context, Poll};
use actix_codec::{AsyncRead, AsyncWrite, Framed}; use actix_codec::{AsyncRead, AsyncWrite, Framed};
use actix_service::{IntoService, Service}; use actix_service::{IntoService, Service};
use actix_utils::framed; use actix_utils::dispatcher::{Dispatcher as InnerDispatcher, DispatcherError};
use super::{Codec, Frame, Message}; use super::{Codec, Frame, Message};
#[pin_project::pin_project]
pub struct Dispatcher<S, T> pub struct Dispatcher<S, T>
where where
S: Service<Request = Frame, Response = Message> + 'static, S: Service<Request = Frame, Response = Message> + 'static,
T: AsyncRead + AsyncWrite, T: AsyncRead + AsyncWrite,
{ {
inner: framed::Dispatcher<S, T, Codec>, #[pin]
inner: InnerDispatcher<S, T, Codec, Message>,
} }
impl<S, T> Dispatcher<S, T> impl<S, T> Dispatcher<S, T>
@ -25,13 +27,13 @@ where
{ {
pub fn new<F: IntoService<S>>(io: T, service: F) -> Self { pub fn new<F: IntoService<S>>(io: T, service: F) -> Self {
Dispatcher { Dispatcher {
inner: framed::Dispatcher::new(Framed::new(io, Codec::new()), service), inner: InnerDispatcher::new(Framed::new(io, Codec::new()), service),
} }
} }
pub fn with<F: IntoService<S>>(framed: Framed<T, Codec>, service: F) -> Self { pub fn with<F: IntoService<S>>(framed: Framed<T, Codec>, service: F) -> Self {
Dispatcher { Dispatcher {
inner: framed::Dispatcher::new(framed, service), inner: InnerDispatcher::new(framed, service),
} }
} }
} }
@ -43,9 +45,9 @@ where
S::Future: 'static, S::Future: 'static,
S::Error: 'static, S::Error: 'static,
{ {
type Output = Result<(), framed::DispatcherError<S::Error, Codec>>; type Output = Result<(), DispatcherError<S::Error, Codec, Message>>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
Pin::new(&mut self.inner).poll(cx) self.project().inner.poll(cx)
} }
} }

View File

@ -7,6 +7,8 @@ use std::slice;
struct ShortSlice<'a>(&'a mut [u8]); struct ShortSlice<'a>(&'a mut [u8]);
impl<'a> ShortSlice<'a> { impl<'a> ShortSlice<'a> {
/// # Safety
/// Given slice must be shorter than 8 bytes.
unsafe fn new(slice: &'a mut [u8]) -> Self { unsafe fn new(slice: &'a mut [u8]) -> Self {
// Sanity check for debug builds // Sanity check for debug builds
debug_assert!(slice.len() < 8); debug_assert!(slice.len() < 8);
@ -46,13 +48,13 @@ pub(crate) fn apply_mask(buf: &mut [u8], mask_u32: u32) {
} }
} }
#[inline]
// TODO: copy_nonoverlapping here compiles to call memcpy. While it is not so // TODO: copy_nonoverlapping here compiles to call memcpy. While it is not so
// inefficient, it could be done better. The compiler does not understand that // inefficient, it could be done better. The compiler does not understand that
// a `ShortSlice` must be smaller than a u64. // a `ShortSlice` must be smaller than a u64.
#[inline]
#[allow(clippy::needless_pass_by_value)] #[allow(clippy::needless_pass_by_value)]
fn xor_short(buf: ShortSlice<'_>, mask: u64) { fn xor_short(buf: ShortSlice<'_>, mask: u64) {
// Unsafe: we know that a `ShortSlice` fits in a u64 // SAFETY: we know that a `ShortSlice` fits in a u64
unsafe { unsafe {
let (ptr, len) = (buf.0.as_mut_ptr(), buf.0.len()); let (ptr, len) = (buf.0.as_mut_ptr(), buf.0.len());
let mut b: u64 = 0; let mut b: u64 = 0;
@ -64,8 +66,9 @@ fn xor_short(buf: ShortSlice<'_>, mask: u64) {
} }
} }
/// # Safety
/// Caller must ensure the buffer has the correct size and alignment.
#[inline] #[inline]
// Unsafe: caller must ensure the buffer has the correct size and alignment
unsafe fn cast_slice(buf: &mut [u8]) -> &mut [u64] { unsafe fn cast_slice(buf: &mut [u8]) -> &mut [u64] {
// Assert correct size and alignment in debug builds // Assert correct size and alignment in debug builds
debug_assert!(buf.len().trailing_zeros() >= 3); debug_assert!(buf.len().trailing_zeros() >= 3);
@ -74,9 +77,9 @@ unsafe fn cast_slice(buf: &mut [u8]) -> &mut [u64] {
slice::from_raw_parts_mut(buf.as_mut_ptr() as *mut u64, buf.len() >> 3) slice::from_raw_parts_mut(buf.as_mut_ptr() as *mut u64, buf.len() >> 3)
} }
#[inline]
// Splits a slice into three parts: an unaligned short head and tail, plus an aligned // Splits a slice into three parts: an unaligned short head and tail, plus an aligned
// u64 mid section. // u64 mid section.
#[inline]
fn align_buf(buf: &mut [u8]) -> (ShortSlice<'_>, &mut [u64], ShortSlice<'_>) { fn align_buf(buf: &mut [u8]) -> (ShortSlice<'_>, &mut [u64], ShortSlice<'_>) {
let start_ptr = buf.as_ptr() as usize; let start_ptr = buf.as_ptr() as usize;
let end_ptr = start_ptr + buf.len(); let end_ptr = start_ptr + buf.len();
@ -91,13 +94,13 @@ fn align_buf(buf: &mut [u8]) -> (ShortSlice<'_>, &mut [u64], ShortSlice<'_>) {
let (tmp, tail) = buf.split_at_mut(end_aligned - start_ptr); let (tmp, tail) = buf.split_at_mut(end_aligned - start_ptr);
let (head, mid) = tmp.split_at_mut(start_aligned - start_ptr); let (head, mid) = tmp.split_at_mut(start_aligned - start_ptr);
// Unsafe: we know the middle section is correctly aligned, and the outer // SAFETY: we know the middle section is correctly aligned, and the outer
// sections are smaller than 8 bytes // sections are smaller than 8 bytes
unsafe { (ShortSlice::new(head), cast_slice(mid), ShortSlice(tail)) } unsafe { (ShortSlice::new(head), cast_slice(mid), ShortSlice(tail)) }
} else { } else {
// We didn't cross even one aligned boundary! // We didn't cross even one aligned boundary!
// Unsafe: The outer sections are smaller than 8 bytes // SAFETY: The outer sections are smaller than 8 bytes
unsafe { (ShortSlice::new(buf), &mut [], ShortSlice::new(&mut [])) } unsafe { (ShortSlice::new(buf), &mut [], ShortSlice::new(&mut [])) }
} }
} }

View File

@ -8,7 +8,7 @@ use actix_codec::{AsyncRead, AsyncWrite, Framed};
use actix_http::{body, h1, ws, Error, HttpService, Request, Response}; use actix_http::{body, h1, ws, Error, HttpService, Request, Response};
use actix_http_test::test_server; use actix_http_test::test_server;
use actix_service::{fn_factory, Service}; use actix_service::{fn_factory, Service};
use actix_utils::framed::Dispatcher; use actix_utils::dispatcher::Dispatcher;
use bytes::Bytes; use bytes::Bytes;
use futures_util::future; use futures_util::future;
use futures_util::task::{Context, Poll}; use futures_util::task::{Context, Poll};
@ -59,7 +59,7 @@ where
.await .await
.unwrap(); .unwrap();
Dispatcher::new(framed.into_framed(ws::Codec::new()), service) Dispatcher::new(framed.replace_codec(ws::Codec::new()), service)
.await .await
.map_err(|_| panic!()) .map_err(|_| panic!())
}; };

View File

@ -16,9 +16,9 @@ name = "actix_multipart"
path = "src/lib.rs" path = "src/lib.rs"
[dependencies] [dependencies]
actix-web = { version = "3.0.0-beta.2", default-features = false } actix-web = { version = "3.0.0-beta.4", default-features = false }
actix-service = "1.0.1" actix-service = "1.0.1"
actix-utils = "1.0.3" actix-utils = "2.0.0"
bytes = "0.5.3" bytes = "0.5.3"
derive_more = "0.99.2" derive_more = "0.99.2"
httparse = "1.3" httparse = "1.3"
@ -29,4 +29,4 @@ twoway = "0.2"
[dev-dependencies] [dev-dependencies]
actix-rt = "1.0.0" actix-rt = "1.0.0"
actix-http = "2.0.0-beta.3" actix-http = "2.0.0-beta.4"

View File

@ -17,9 +17,9 @@ path = "src/lib.rs"
[dependencies] [dependencies]
actix = "0.10.0-alpha.2" actix = "0.10.0-alpha.2"
actix-web = { version = "3.0.0-beta.2", default-features = false } actix-web = { version = "3.0.0-beta.4", default-features = false }
actix-http = "2.0.0-beta.3" actix-http = "2.0.0-beta.4"
actix-codec = "0.2.0" actix-codec = "0.3.0"
bytes = "0.5.2" bytes = "0.5.2"
futures-channel = { version = "0.3.5", default-features = false } futures-channel = { version = "0.3.5", default-features = false }
futures-core = { version = "0.3.5", default-features = false } futures-core = { version = "0.3.5", default-features = false }

View File

@ -20,5 +20,5 @@ proc-macro2 = "1"
[dev-dependencies] [dev-dependencies]
actix-rt = "1.0.0" actix-rt = "1.0.0"
actix-web = "3.0.0-beta.2" actix-web = "3.0.0-beta.4"
futures-util = { version = "0.3.5", default-features = false } futures-util = { version = "0.3.5", default-features = false }

View File

@ -3,6 +3,16 @@
## Unreleased - 2020-xx-xx ## Unreleased - 2020-xx-xx
## 2.0.0-beta.4 - 2020-09-09
### Changed
* Update actix-codec & actix-tls dependencies.
## 2.0.0-beta.3 - 2020-08-17
### Changed
* Update `rustls` to 0.18
## 2.0.0-beta.2 - 2020-07-21 ## 2.0.0-beta.2 - 2020-07-21
### Changed ### Changed
* Update `actix-http` dependency to 2.0.0-beta.2 * Update `actix-http` dependency to 2.0.0-beta.2

View File

@ -1,6 +1,6 @@
[package] [package]
name = "awc" name = "awc"
version = "2.0.0-beta.2" version = "2.0.0-beta.4"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"] authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Async HTTP client library that uses the Actix runtime." description = "Async HTTP client library that uses the Actix runtime."
readme = "README.md" readme = "README.md"
@ -37,9 +37,9 @@ rustls = ["rust-tls", "actix-http/rustls"]
compress = ["actix-http/compress"] compress = ["actix-http/compress"]
[dependencies] [dependencies]
actix-codec = "0.2.0" actix-codec = "0.3.0"
actix-service = "1.0.1" actix-service = "1.0.1"
actix-http = "2.0.0-beta.3" actix-http = "2.0.0-beta.4"
actix-rt = "1.0.0" actix-rt = "1.0.0"
base64 = "0.12" base64 = "0.12"
@ -54,16 +54,16 @@ serde = "1.0"
serde_json = "1.0" serde_json = "1.0"
serde_urlencoded = "0.6.1" serde_urlencoded = "0.6.1"
open-ssl = { version = "0.10", package = "openssl", optional = true } open-ssl = { version = "0.10", package = "openssl", optional = true }
rust-tls = { version = "0.17.0", package = "rustls", optional = true, features = ["dangerous_configuration"] } rust-tls = { version = "0.18.0", package = "rustls", optional = true, features = ["dangerous_configuration"] }
[dev-dependencies] [dev-dependencies]
actix-connect = { version = "2.0.0-alpha.2", features = ["openssl"] } actix-connect = { version = "2.0.0", features = ["openssl"] }
actix-web = { version = "3.0.0-beta.2", features = ["openssl"] } actix-web = { version = "3.0.0-beta.4", features = ["openssl"] }
actix-http = { version = "2.0.0-beta.3", features = ["openssl"] } actix-http = { version = "2.0.0-beta.4", features = ["openssl"] }
actix-http-test = { version = "2.0.0-alpha.1", features = ["openssl"] } actix-http-test = { version = "2.0.0-alpha.1", features = ["openssl"] }
actix-utils = "1.0.3" actix-utils = "2.0.0"
actix-server = "1.0.0" actix-server = "1.0.0"
actix-tls = { version = "2.0.0-alpha.1", features = ["openssl", "rustls"] } actix-tls = { version = "2.0.0", features = ["openssl", "rustls"] }
brotli2 = "0.3.2" brotli2 = "0.3.2"
flate2 = "1.0.13" flate2 = "1.0.13"
futures-util = { version = "0.3.5", default-features = false } futures-util = { version = "0.3.5", default-features = false }

View File

@ -152,7 +152,7 @@ where
let (head, framed) = let (head, framed) =
connection.open_tunnel(RequestHeadType::from(head)).await?; connection.open_tunnel(RequestHeadType::from(head)).await?;
let framed = framed.map_io(|io| BoxedSocket(Box::new(Socket(io)))); let framed = framed.into_map_io(|io| BoxedSocket(Box::new(Socket(io))));
Ok((head, framed)) Ok((head, framed))
}) })
} }
@ -186,7 +186,7 @@ where
.open_tunnel(RequestHeadType::Rc(head, extra_headers)) .open_tunnel(RequestHeadType::Rc(head, extra_headers))
.await?; .await?;
let framed = framed.map_io(|io| BoxedSocket(Box::new(Socket(io)))); let framed = framed.into_map_io(|io| BoxedSocket(Box::new(Socket(io))));
Ok((head, framed)) Ok((head, framed))
}) })
} }

View File

@ -4,24 +4,93 @@
clippy::borrow_interior_mutable_const, clippy::borrow_interior_mutable_const,
clippy::needless_doctest_main clippy::needless_doctest_main
)] )]
//! An HTTP Client
//! `awc` is a HTTP and WebSocket client library built using the Actix ecosystem.
//!
//! ## Making a GET request
//! //!
//! ```rust //! ```rust
//! use actix_rt::System; //! # #[actix_rt::main]
//! use awc::Client; //! # async fn main() -> Result<(), awc::error::SendRequestError> {
//! let mut client = awc::Client::default();
//! let response = client.get("http://www.rust-lang.org") // <- Create request builder
//! .header("User-Agent", "Actix-web")
//! .send() // <- Send http request
//! .await?;
//! //!
//! #[actix_rt::main] //! println!("Response: {:?}", response);
//! async fn main() { //! # Ok(())
//! let mut client = Client::default(); //! # }
//!
//! let response = client.get("http://www.rust-lang.org") // <- Create request builder
//! .header("User-Agent", "Actix-web")
//! .send() // <- Send http request
//! .await;
//!
//! println!("Response: {:?}", response);
//! }
//! ``` //! ```
//!
//! ## Making POST requests
//!
//! ### Raw body contents
//!
//! ```rust
//! # #[actix_rt::main]
//! # async fn main() -> Result<(), awc::error::SendRequestError> {
//! let mut client = awc::Client::default();
//! let response = client.post("http://httpbin.org/post")
//! .send_body("Raw body contents")
//! .await?;
//! # Ok(())
//! # }
//! ```
//!
//! ### Forms
//!
//! ```rust
//! # #[actix_rt::main]
//! # async fn main() -> Result<(), awc::error::SendRequestError> {
//! let params = [("foo", "bar"), ("baz", "quux")];
//!
//! let mut client = awc::Client::default();
//! let response = client.post("http://httpbin.org/post")
//! .send_form(&params)
//! .await?;
//! # Ok(())
//! # }
//! ```
//!
//! ### JSON
//!
//! ```rust
//! # #[actix_rt::main]
//! # async fn main() -> Result<(), awc::error::SendRequestError> {
//! let request = serde_json::json!({
//! "lang": "rust",
//! "body": "json"
//! });
//!
//! let mut client = awc::Client::default();
//! let response = client.post("http://httpbin.org/post")
//! .send_json(&request)
//! .await?;
//! # Ok(())
//! # }
//! ```
//!
//! ## WebSocket support
//!
//! ```
//! # #[actix_rt::main]
//! # async fn main() -> Result<(), Box<dyn std::error::Error>> {
//! use futures_util::{sink::SinkExt, stream::StreamExt};
//! let (_resp, mut connection) = awc::Client::new()
//! .ws("ws://echo.websocket.org")
//! .connect()
//! .await?;
//!
//! connection
//! .send(awc::ws::Message::Text("Echo".to_string()))
//! .await?;
//! let response = connection.next().await.unwrap()?;
//! # assert_eq!(response, awc::ws::Frame::Text("Echo".as_bytes().into()));
//! # Ok(())
//! # }
//! ```
use std::cell::RefCell; use std::cell::RefCell;
use std::convert::TryFrom; use std::convert::TryFrom;
use std::rc::Rc; use std::rc::Rc;
@ -51,7 +120,9 @@ pub use self::sender::SendClientRequest;
use self::connect::{Connect, ConnectorWrapper}; use self::connect::{Connect, ConnectorWrapper};
/// An HTTP Client /// An asynchronous HTTP and WebSocket client.
///
/// ## Examples
/// ///
/// ```rust /// ```rust
/// use awc::Client; /// use awc::Client;
@ -193,7 +264,8 @@ impl Client {
self.request(Method::OPTIONS, url) self.request(Method::OPTIONS, url)
} }
/// Construct WebSockets request. /// Initialize a WebSocket connection.
/// Returns a WebSocket connection builder.
pub fn ws<U>(&self, url: U) -> ws::WebsocketsRequest pub fn ws<U>(&self, url: U) -> ws::WebsocketsRequest
where where
Uri: TryFrom<U>, Uri: TryFrom<U>,

View File

@ -1,4 +1,31 @@
//! Websockets client //! Websockets client
//!
//! Type definitions required to use [`awc::Client`](../struct.Client.html) as a WebSocket client.
//!
//! # Example
//!
//! ```
//! use awc::{Client, ws};
//! use futures_util::{sink::SinkExt, stream::StreamExt};
//!
//! #[actix_rt::main]
//! async fn main() {
//! let (_resp, mut connection) = Client::new()
//! .ws("ws://echo.websocket.org")
//! .connect()
//! .await
//! .unwrap();
//!
//! connection
//! .send(ws::Message::Text("Echo".to_string()))
//! .await
//! .unwrap();
//! let response = connection.next().await.unwrap().unwrap();
//!
//! assert_eq!(response, ws::Frame::Text("Echo".as_bytes().into()));
//! }
//! ```
use std::convert::TryFrom; use std::convert::TryFrom;
use std::net::SocketAddr; use std::net::SocketAddr;
use std::rc::Rc; use std::rc::Rc;
@ -366,7 +393,7 @@ impl WebsocketsRequest {
// response and ws framed // response and ws framed
Ok(( Ok((
ClientResponse::new(head, Payload::None), ClientResponse::new(head, Payload::None),
framed.map_codec(|_| { framed.into_map_codec(|_| {
if server_mode { if server_mode {
ws::Codec::new().max_size(max_size) ws::Codec::new().max_size(max_size)
} else { } else {

View File

@ -32,7 +32,7 @@ async fn test_simple() {
.await?; .await?;
// start websocket service // start websocket service
let framed = framed.into_framed(ws::Codec::new()); let framed = framed.replace_codec(ws::Codec::new());
ws::Dispatcher::with(framed, ws_service).await ws::Dispatcher::with(framed, ws_service).await
} }
}) })

View File

@ -17,7 +17,7 @@ digraph {
"actix-utils" -> { "actix-service" "actix-rt" "actix-codec" } "actix-utils" -> { "actix-service" "actix-rt" "actix-codec" }
"actix-tracing" -> { "actix-service" } "actix-tracing" -> { "actix-service" }
"actix-tls" -> { "actix-service" "actix-codec" "actix-utils" "actix-rt" } "actix-tls" -> { "actix-service" "actix-codec" "actix-utils" }
"actix-testing" -> { "actix-rt" "actix-macros" "actix-server" "actix-service" } "actix-testing" -> { "actix-rt" "actix-macros" "actix-server" "actix-service" }
"actix-server" -> { "actix-service" "actix-rt" "actix-codec" "actix-utils" } "actix-server" -> { "actix-service" "actix-rt" "actix-codec" "actix-utils" }
"actix-rt" -> { "actix-macros" "actix-threadpool" } "actix-rt" -> { "actix-macros" "actix-threadpool" }

View File

@ -22,7 +22,7 @@ digraph {
"actix-utils" -> { "actix-service" "actix-rt" "actix-codec" } "actix-utils" -> { "actix-service" "actix-rt" "actix-codec" }
"actix-tracing" -> { "actix-service" } "actix-tracing" -> { "actix-service" }
"actix-tls" -> { "actix-service" "actix-codec" "actix-utils" "actix-rt" } "actix-tls" -> { "actix-service" "actix-codec" "actix-utils" }
"actix-testing" -> { "actix-rt" "actix-macros" "actix-server" "actix-service" } "actix-testing" -> { "actix-rt" "actix-macros" "actix-server" "actix-service" }
"actix-server" -> { "actix-service" "actix-rt" "actix-codec" "actix-utils" } "actix-server" -> { "actix-service" "actix-rt" "actix-codec" "actix-utils" }
"actix-rt" -> { "actix-macros" "actix-threadpool" } "actix-rt" -> { "actix-macros" "actix-threadpool" }

View File

@ -17,7 +17,7 @@ use futures_util::future::{ok, Either, FutureExt, LocalBoxFuture};
/// # fn main() { /// # fn main() {
/// let enable_normalize = std::env::var("NORMALIZE_PATH") == Ok("true".into()); /// let enable_normalize = std::env::var("NORMALIZE_PATH") == Ok("true".into());
/// let app = App::new() /// let app = App::new()
/// .wrap(Condition::new(enable_normalize, NormalizePath)); /// .wrap(Condition::new(enable_normalize, NormalizePath::default()));
/// # } /// # }
/// ``` /// ```
pub struct Condition<T> { pub struct Condition<T> {

View File

@ -85,7 +85,7 @@ use crate::HttpResponse;
/// [`ConnectionInfo::realip_remote_addr()`](../dev/struct.ConnectionInfo.html#method.realip_remote_addr) /// [`ConnectionInfo::realip_remote_addr()`](../dev/struct.ConnectionInfo.html#method.realip_remote_addr)
/// ///
/// If you use this value ensure that all requests come from trusted hosts, since it is trivial /// If you use this value ensure that all requests come from trusted hosts, since it is trivial
/// for the remote client to simulate been another client. /// for the remote client to simulate being another client.
/// ///
pub struct Logger(Rc<Inner>); pub struct Logger(Rc<Inner>);

View File

@ -10,20 +10,37 @@ use regex::Regex;
use crate::service::{ServiceRequest, ServiceResponse}; use crate::service::{ServiceRequest, ServiceResponse};
use crate::Error; use crate::Error;
/// To be used when constructing `NormalizePath` to define it's behavior.
#[non_exhaustive]
#[derive(Clone, Copy)]
pub enum TrailingSlash {
/// Always add a trailing slash to the end of the path.
/// This will require all routes to end in a trailing slash for them to be accessible.
Always,
/// Trim trailing slashes from the end of the path.
Trim,
}
impl Default for TrailingSlash {
fn default() -> Self {
TrailingSlash::Always
}
}
#[derive(Default, Clone, Copy)] #[derive(Default, Clone, Copy)]
/// `Middleware` to normalize request's URI in place /// `Middleware` to normalize request's URI in place
/// ///
/// Performs following: /// Performs following:
/// ///
/// - Merges multiple slashes into one. /// - Merges multiple slashes into one.
/// - Appends a trailing slash if one is not present. /// - Appends a trailing slash if one is not present, or removes one if present, depending on the supplied `TrailingSlash`.
/// ///
/// ```rust /// ```rust
/// use actix_web::{web, http, middleware, App, HttpResponse}; /// use actix_web::{web, http, middleware, App, HttpResponse};
/// ///
/// # fn main() { /// # fn main() {
/// let app = App::new() /// let app = App::new()
/// .wrap(middleware::NormalizePath) /// .wrap(middleware::NormalizePath::default())
/// .service( /// .service(
/// web::resource("/test") /// web::resource("/test")
/// .route(web::get().to(|| HttpResponse::Ok())) /// .route(web::get().to(|| HttpResponse::Ok()))
@ -32,7 +49,14 @@ use crate::Error;
/// # } /// # }
/// ``` /// ```
pub struct NormalizePath; pub struct NormalizePath(TrailingSlash);
impl NormalizePath {
/// Create new `NormalizePath` middleware with the specified trailing slash style.
pub fn new(trailing_slash_style: TrailingSlash) -> Self {
NormalizePath(trailing_slash_style)
}
}
impl<S, B> Transform<S> for NormalizePath impl<S, B> Transform<S> for NormalizePath
where where
@ -50,6 +74,7 @@ where
ok(NormalizePathNormalization { ok(NormalizePathNormalization {
service, service,
merge_slash: Regex::new("//+").unwrap(), merge_slash: Regex::new("//+").unwrap(),
trailing_slash_behavior: self.0,
}) })
} }
} }
@ -57,6 +82,7 @@ where
pub struct NormalizePathNormalization<S> { pub struct NormalizePathNormalization<S> {
service: S, service: S,
merge_slash: Regex, merge_slash: Regex,
trailing_slash_behavior: TrailingSlash,
} }
impl<S, B> Service for NormalizePathNormalization<S> impl<S, B> Service for NormalizePathNormalization<S>
@ -78,8 +104,11 @@ where
let original_path = head.uri.path(); let original_path = head.uri.path();
// always add trailing slash, might be an extra one // Either adds a string to the end (duplicates will be removed anyways) or trims all slashes from the end
let path = original_path.to_string() + "/"; let path = match self.trailing_slash_behavior {
TrailingSlash::Always => original_path.to_string() + "/",
TrailingSlash::Trim => original_path.trim_end_matches('/').to_string(),
};
// normalize multiple /'s to one / // normalize multiple /'s to one /
let path = self.merge_slash.replace_all(&path, "/"); let path = self.merge_slash.replace_all(&path, "/");
@ -150,6 +179,32 @@ mod tests {
assert!(res4.status().is_success()); assert!(res4.status().is_success());
} }
#[actix_rt::test]
async fn trim_trailing_slashes() {
let mut app = init_service(
App::new()
.wrap(NormalizePath(TrailingSlash::Trim))
.service(web::resource("/v1/something").to(HttpResponse::Ok)),
)
.await;
let req = TestRequest::with_uri("/v1/something////").to_request();
let res = call_service(&mut app, req).await;
assert!(res.status().is_success());
let req2 = TestRequest::with_uri("/v1/something/").to_request();
let res2 = call_service(&mut app, req2).await;
assert!(res2.status().is_success());
let req3 = TestRequest::with_uri("//v1//something//").to_request();
let res3 = call_service(&mut app, req3).await;
assert!(res3.status().is_success());
let req4 = TestRequest::with_uri("//v1//something").to_request();
let res4 = call_service(&mut app, req4).await;
assert!(res4.status().is_success());
}
#[actix_rt::test] #[actix_rt::test]
async fn test_in_place_normalization() { async fn test_in_place_normalization() {
let srv = |req: ServiceRequest| { let srv = |req: ServiceRequest| {
@ -157,7 +212,7 @@ mod tests {
ok(req.into_response(HttpResponse::Ok().finish())) ok(req.into_response(HttpResponse::Ok().finish()))
}; };
let mut normalize = NormalizePath let mut normalize = NormalizePath::default()
.new_transform(srv.into_service()) .new_transform(srv.into_service())
.await .await
.unwrap(); .unwrap();
@ -188,7 +243,7 @@ mod tests {
ok(req.into_response(HttpResponse::Ok().finish())) ok(req.into_response(HttpResponse::Ok().finish()))
}; };
let mut normalize = NormalizePath let mut normalize = NormalizePath::default()
.new_transform(srv.into_service()) .new_transform(srv.into_service())
.await .await
.unwrap(); .unwrap();
@ -207,7 +262,7 @@ mod tests {
ok(req.into_response(HttpResponse::Ok().finish())) ok(req.into_response(HttpResponse::Ok().finish()))
}; };
let mut normalize = NormalizePath let mut normalize = NormalizePath::default()
.new_transform(srv.into_service()) .new_transform(srv.into_service())
.await .await
.unwrap(); .unwrap();

View File

@ -122,23 +122,23 @@ where
/// Sets the maximum per-worker number of concurrent connections. /// Sets the maximum per-worker number of concurrent connections.
/// ///
/// All socket listeners will stop accepting connections when this limit is reached /// All socket listeners will stop accepting connections when this limit is reached for
/// for each worker. /// each worker.
/// ///
/// By default max connections is set to a 25k. /// By default max connections is set to a 25k.
pub fn maxconn(mut self, num: usize) -> Self { pub fn max_connections(mut self, num: usize) -> Self {
self.builder = self.builder.maxconn(num); self.builder = self.builder.maxconn(num);
self self
} }
/// Sets the maximum per-worker concurrent connection establish process. /// Sets the maximum per-worker concurrent connection establish process.
/// ///
/// All listeners will stop accepting connections when this limit is reached. It /// All listeners will stop accepting connections when this limit is reached. It can be used to
/// can be used to limit the global SSL CPU usage. /// limit the global TLS CPU usage.
/// ///
/// By default max connections is set to a 256. /// By default max connections is set to a 256.
pub fn maxconnrate(self, num: usize) -> Self { pub fn max_connection_rate(self, num: usize) -> Self {
actix_tls::max_concurrent_ssl_connect(num); actix_tls::max_concurrent_tls_connect(num);
self self
} }
@ -375,19 +375,20 @@ where
addr: A, addr: A,
) -> io::Result<Vec<net::TcpListener>> { ) -> io::Result<Vec<net::TcpListener>> {
let mut err = None; let mut err = None;
let mut succ = false; let mut success = false;
let mut sockets = Vec::new(); let mut sockets = Vec::new();
for addr in addr.to_socket_addrs()? { for addr in addr.to_socket_addrs()? {
match create_tcp_listener(addr, self.backlog) { match create_tcp_listener(addr, self.backlog) {
Ok(lst) => { Ok(lst) => {
succ = true; success = true;
sockets.push(lst); sockets.push(lst);
} }
Err(e) => err = Some(e), Err(e) => err = Some(e),
} }
} }
if !succ { if !success {
if let Some(e) = err.take() { if let Some(e) = err.take() {
Err(e) Err(e)
} else { } else {
@ -575,17 +576,19 @@ fn create_tcp_listener(
#[cfg(feature = "openssl")] #[cfg(feature = "openssl")]
/// Configure `SslAcceptorBuilder` with custom server flags. /// Configure `SslAcceptorBuilder` with custom server flags.
fn openssl_acceptor(mut builder: SslAcceptorBuilder) -> io::Result<SslAcceptor> { fn openssl_acceptor(mut builder: SslAcceptorBuilder) -> io::Result<SslAcceptor> {
builder.set_alpn_select_callback(|_, protos| { builder.set_alpn_select_callback(|_, protocols| {
const H2: &[u8] = b"\x02h2"; const H2: &[u8] = b"\x02h2";
const H11: &[u8] = b"\x08http/1.1"; const H11: &[u8] = b"\x08http/1.1";
if protos.windows(3).any(|window| window == H2) {
if protocols.windows(3).any(|window| window == H2) {
Ok(b"h2") Ok(b"h2")
} else if protos.windows(9).any(|window| window == H11) { } else if protocols.windows(9).any(|window| window == H11) {
Ok(b"http/1.1") Ok(b"http/1.1")
} else { } else {
Err(AlpnError::NOACK) Err(AlpnError::NOACK)
} }
}); });
builder.set_alpn_protos(b"\x08http/1.1\x02h2")?; builder.set_alpn_protos(b"\x08http/1.1\x02h2")?;
Ok(builder.build()) Ok(builder.build())

View File

@ -23,7 +23,7 @@ use crate::http::{
StatusCode, StatusCode,
}; };
use crate::request::HttpRequest; use crate::request::HttpRequest;
use crate::responder::Responder; use crate::{responder::Responder, web};
/// Form data helper (`application/x-www-form-urlencoded`) /// Form data helper (`application/x-www-form-urlencoded`)
/// ///
@ -121,8 +121,12 @@ where
fn from_request(req: &HttpRequest, payload: &mut Payload) -> Self::Future { fn from_request(req: &HttpRequest, payload: &mut Payload) -> Self::Future {
let req2 = req.clone(); let req2 = req.clone();
let (limit, err) = req let (limit, err) = req
.app_data::<FormConfig>() .app_data::<Self::Config>()
.map(|c| (c.limit, c.ehandler.clone())) .or_else(|| {
req.app_data::<web::Data<Self::Config>>()
.map(|d| d.as_ref())
})
.map(|c| (c.limit, c.err_handler.clone()))
.unwrap_or((16384, None)); .unwrap_or((16384, None));
UrlEncoded::new(req, payload) UrlEncoded::new(req, payload)
@ -200,7 +204,7 @@ impl<T: Serialize> Responder for Form<T> {
#[derive(Clone)] #[derive(Clone)]
pub struct FormConfig { pub struct FormConfig {
limit: usize, limit: usize,
ehandler: Option<Rc<dyn Fn(UrlencodedError, &HttpRequest) -> Error>>, err_handler: Option<Rc<dyn Fn(UrlencodedError, &HttpRequest) -> Error>>,
} }
impl FormConfig { impl FormConfig {
@ -215,7 +219,7 @@ impl FormConfig {
where where
F: Fn(UrlencodedError, &HttpRequest) -> Error + 'static, F: Fn(UrlencodedError, &HttpRequest) -> Error + 'static,
{ {
self.ehandler = Some(Rc::new(f)); self.err_handler = Some(Rc::new(f));
self self
} }
} }
@ -223,8 +227,8 @@ impl FormConfig {
impl Default for FormConfig { impl Default for FormConfig {
fn default() -> Self { fn default() -> Self {
FormConfig { FormConfig {
limit: 16384, limit: 16_384, // 2^14 bytes (~16kB)
ehandler: None, err_handler: None,
} }
} }
} }
@ -378,7 +382,7 @@ mod tests {
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use super::*; use super::*;
use crate::http::header::{HeaderValue, CONTENT_TYPE}; use crate::http::header::{HeaderValue, CONTENT_LENGTH, CONTENT_TYPE};
use crate::test::TestRequest; use crate::test::TestRequest;
#[derive(Deserialize, Serialize, Debug, PartialEq)] #[derive(Deserialize, Serialize, Debug, PartialEq)]
@ -499,4 +503,22 @@ mod tests {
use crate::responder::tests::BodyTest; use crate::responder::tests::BodyTest;
assert_eq!(resp.body().bin_ref(), b"hello=world&counter=123"); assert_eq!(resp.body().bin_ref(), b"hello=world&counter=123");
} }
#[actix_rt::test]
async fn test_with_config_in_data_wrapper() {
let ctype = HeaderValue::from_static("application/x-www-form-urlencoded");
let (req, mut pl) = TestRequest::default()
.header(CONTENT_TYPE, ctype)
.header(CONTENT_LENGTH, HeaderValue::from_static("20"))
.set_payload(Bytes::from_static(b"hello=test&counter=4"))
.app_data(web::Data::new(FormConfig::default().limit(10)))
.to_http_parts();
let s = Form::<Info>::from_request(&req, &mut pl).await;
assert!(s.is_err());
let err_str = s.err().unwrap().to_string();
assert!(err_str.contains("Urlencoded payload size is bigger"));
}
} }

View File

@ -20,7 +20,7 @@ use crate::dev::Decompress;
use crate::error::{Error, JsonPayloadError}; use crate::error::{Error, JsonPayloadError};
use crate::extract::FromRequest; use crate::extract::FromRequest;
use crate::request::HttpRequest; use crate::request::HttpRequest;
use crate::responder::Responder; use crate::{responder::Responder, web};
/// Json helper /// Json helper
/// ///
@ -179,10 +179,11 @@ where
#[inline] #[inline]
fn from_request(req: &HttpRequest, payload: &mut Payload) -> Self::Future { fn from_request(req: &HttpRequest, payload: &mut Payload) -> Self::Future {
let req2 = req.clone(); let req2 = req.clone();
let (limit, err, ctype) = req let config = JsonConfig::from_req(req);
.app_data::<Self::Config>()
.map(|c| (c.limit, c.ehandler.clone(), c.content_type.clone())) let limit = config.limit;
.unwrap_or((32768, None, None)); let ctype = config.content_type.clone();
let err_handler = config.err_handler.clone();
JsonBody::new(req, payload, ctype) JsonBody::new(req, payload, ctype)
.limit(limit) .limit(limit)
@ -193,7 +194,8 @@ where
Request path: {}", Request path: {}",
req2.path() req2.path()
); );
if let Some(err) = err {
if let Some(err) = err_handler {
Err((*err)(e, &req2)) Err((*err)(e, &req2))
} else { } else {
Err(e.into()) Err(e.into())
@ -255,7 +257,8 @@ where
#[derive(Clone)] #[derive(Clone)]
pub struct JsonConfig { pub struct JsonConfig {
limit: usize, limit: usize,
ehandler: Option<Arc<dyn Fn(JsonPayloadError, &HttpRequest) -> Error + Send + Sync>>, err_handler:
Option<Arc<dyn Fn(JsonPayloadError, &HttpRequest) -> Error + Send + Sync>>,
content_type: Option<Arc<dyn Fn(mime::Mime) -> bool + Send + Sync>>, content_type: Option<Arc<dyn Fn(mime::Mime) -> bool + Send + Sync>>,
} }
@ -271,7 +274,7 @@ impl JsonConfig {
where where
F: Fn(JsonPayloadError, &HttpRequest) -> Error + Send + Sync + 'static, F: Fn(JsonPayloadError, &HttpRequest) -> Error + Send + Sync + 'static,
{ {
self.ehandler = Some(Arc::new(f)); self.err_handler = Some(Arc::new(f));
self self
} }
@ -283,15 +286,26 @@ impl JsonConfig {
self.content_type = Some(Arc::new(predicate)); self.content_type = Some(Arc::new(predicate));
self self
} }
/// Extract payload config from app data. Check both `T` and `Data<T>`, in that order, and fall
/// back to the default payload config.
fn from_req(req: &HttpRequest) -> &Self {
req.app_data::<Self>()
.or_else(|| req.app_data::<web::Data<Self>>().map(|d| d.as_ref()))
.unwrap_or_else(|| &DEFAULT_CONFIG)
}
} }
// Allow shared refs to default.
const DEFAULT_CONFIG: JsonConfig = JsonConfig {
limit: 32_768, // 2^15 bytes, (~32kB)
err_handler: None,
content_type: None,
};
impl Default for JsonConfig { impl Default for JsonConfig {
fn default() -> Self { fn default() -> Self {
JsonConfig { DEFAULT_CONFIG.clone()
limit: 32768,
ehandler: None,
content_type: None,
}
} }
} }
@ -422,7 +436,7 @@ mod tests {
use super::*; use super::*;
use crate::error::InternalError; use crate::error::InternalError;
use crate::http::header; use crate::http::header::{self, HeaderValue, CONTENT_LENGTH, CONTENT_TYPE};
use crate::test::{load_stream, TestRequest}; use crate::test::{load_stream, TestRequest};
use crate::HttpResponse; use crate::HttpResponse;
@ -659,4 +673,20 @@ mod tests {
let s = Json::<MyObject>::from_request(&req, &mut pl).await; let s = Json::<MyObject>::from_request(&req, &mut pl).await;
assert!(s.is_err()) assert!(s.is_err())
} }
#[actix_rt::test]
async fn test_with_config_in_data_wrapper() {
let (req, mut pl) = TestRequest::default()
.header(CONTENT_TYPE, HeaderValue::from_static("application/json"))
.header(CONTENT_LENGTH, HeaderValue::from_static("16"))
.set_payload(Bytes::from_static(b"{\"name\": \"test\"}"))
.app_data(web::Data::new(JsonConfig::default().limit(10)))
.to_http_parts();
let s = Json::<MyObject>::from_request(&req, &mut pl).await;
assert!(s.is_err());
let err_str = s.err().unwrap().to_string();
assert!(err_str.contains("Json payload size is bigger than allowed"));
}
} }

View File

@ -279,27 +279,24 @@ impl PayloadConfig {
Ok(()) Ok(())
} }
/// Allow payload config extraction from app data checking both `T` and `Data<T>`, in that /// Extract payload config from app data. Check both `T` and `Data<T>`, in that order, and fall
/// order, and falling back to the default payload config. /// back to the default payload config.
fn from_req(req: &HttpRequest) -> &PayloadConfig { fn from_req(req: &HttpRequest) -> &Self {
req.app_data::<PayloadConfig>() req.app_data::<Self>()
.or_else(|| { .or_else(|| req.app_data::<web::Data<Self>>().map(|d| d.as_ref()))
req.app_data::<web::Data<PayloadConfig>>() .unwrap_or_else(|| &DEFAULT_CONFIG)
.map(|d| d.as_ref())
})
.unwrap_or_else(|| &DEFAULT_PAYLOAD_CONFIG)
} }
} }
// Allow shared refs to default. // Allow shared refs to default.
static DEFAULT_PAYLOAD_CONFIG: PayloadConfig = PayloadConfig { const DEFAULT_CONFIG: PayloadConfig = PayloadConfig {
limit: 262_144, // 2^18 bytes (~256kB) limit: 262_144, // 2^18 bytes (~256kB)
mimetype: None, mimetype: None,
}; };
impl Default for PayloadConfig { impl Default for PayloadConfig {
fn default() -> Self { fn default() -> Self {
DEFAULT_PAYLOAD_CONFIG.clone() DEFAULT_CONFIG.clone()
} }
} }

View File

@ -1,5 +1,8 @@
# Changes # Changes
## Unreleased - 2020-xx-xx
* Update actix-codec and actix-utils dependencies.
## [2.0.0-alpha.1] - 2020-05-23 ## [2.0.0-alpha.1] - 2020-05-23
* Update the `time` dependency to 0.2.7 * Update the `time` dependency to 0.2.7

View File

@ -30,13 +30,13 @@ openssl = ["open-ssl", "awc/openssl"]
[dependencies] [dependencies]
actix-service = "1.0.1" actix-service = "1.0.1"
actix-codec = "0.2.0" actix-codec = "0.3.0"
actix-connect = "2.0.0-alpha.2" actix-connect = "2.0.0"
actix-utils = "1.0.3" actix-utils = "2.0.0"
actix-rt = "1.0.0" actix-rt = "1.0.0"
actix-server = "1.0.0" actix-server = "1.0.0"
actix-testing = "1.0.0" actix-testing = "1.0.0"
awc = "2.0.0-alpha.2" awc = "2.0.0-beta.4"
base64 = "0.12" base64 = "0.12"
bytes = "0.5.3" bytes = "0.5.3"
@ -52,5 +52,5 @@ time = { version = "0.2.7", default-features = false, features = ["std"] }
open-ssl = { version = "0.10", package = "openssl", optional = true } open-ssl = { version = "0.10", package = "openssl", optional = true }
[dev-dependencies] [dev-dependencies]
actix-web = "3.0.0-beta.2" actix-web = "3.0.0-beta.4"
actix-http = "2.0.0-beta.3" actix-http = "2.0.0-beta.4"

View File

@ -22,8 +22,8 @@ async fn test_start() {
}) })
.workers(1) .workers(1)
.backlog(1) .backlog(1)
.maxconn(10) .max_connections(10)
.maxconnrate(10) .max_connection_rate(10)
.keep_alive(10) .keep_alive(10)
.client_timeout(5000) .client_timeout(5000)
.client_shutdown(0) .client_shutdown(0)