1
0
mirror of https://github.com/fafhrd91/actix-net synced 2025-08-13 22:28:22 +02:00

Compare commits

..

2 Commits

Author SHA1 Message Date
Rob Ede
45e3a5c0e6 Merge branch 'master' into remove-mpsc 2021-02-24 09:48:50 +00:00
Rob Ede
564acfbf3a remove mpsc impl from -utils 2021-02-24 02:11:08 +00:00
103 changed files with 5084 additions and 3271 deletions

View File

@@ -1,23 +0,0 @@
[alias]
chk = "check --workspace --all-features --tests --examples --bins"
lint = "clippy --workspace --all-features --tests --examples --bins -- -Dclippy::todo"
ci-doctest = "test --workspace --all-features --doc --no-fail-fast -- --nocapture"
# just check the library (without dev deps)
ci-check-min = "hack --workspace check --no-default-features"
ci-check-lib = "hack --workspace --feature-powerset --exclude-features io-uring check"
ci-check-lib-linux = "hack --workspace --feature-powerset check"
# check everything
ci-check = "hack --workspace --feature-powerset --exclude-features io-uring check --tests --examples"
ci-check-linux = "hack --workspace --feature-powerset check --tests --examples"
# tests avoiding io-uring feature
ci-test = "hack test --workspace --exclude=actix-rt --exclude=actix-server --all-features --lib --tests --no-fail-fast -- --nocapture"
ci-test-rt = " hack --feature-powerset --exclude-features io-uring test --package=actix-rt --lib --tests --no-fail-fast -- --nocapture"
ci-test-server = "hack --feature-powerset --exclude-features io-uring test --package=actix-server --lib --tests --no-fail-fast -- --nocapture"
# test with io-uring feature
ci-test-rt-linux = " hack --feature-powerset test --package=actix-rt --lib --tests --no-fail-fast -- --nocapture"
ci-test-server-linux = "hack --feature-powerset test --package=actix-server --lib --tests --no-fail-fast -- --nocapture"

View File

@@ -25,9 +25,6 @@ jobs:
name: ${{ matrix.target.name }} / ${{ matrix.version }}
runs-on: ${{ matrix.target.os }}
env:
VCPKGRS_DYNAMIC: 1
steps:
- name: Setup Routing
if: matrix.target.os == 'macos-latest'
@@ -35,17 +32,6 @@ jobs:
- uses: actions/checkout@v2
# install OpenSSL on Windows
- name: Set vcpkg root
if: matrix.target.triple == 'x86_64-pc-windows-msvc' || matrix.target.triple == 'i686-pc-windows-msvc'
run: echo "VCPKG_ROOT=$env:VCPKG_INSTALLATION_ROOT" | Out-File -FilePath $env:GITHUB_ENV -Append
- name: Install OpenSSL
if: matrix.target.triple == 'x86_64-pc-windows-msvc'
run: vcpkg install openssl:x64-windows
- name: Install OpenSSL
if: matrix.target.triple == 'i686-pc-windows-msvc'
run: vcpkg install openssl:x86-windows
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
@@ -53,21 +39,21 @@ jobs:
profile: minimal
override: true
# - name: Install MSYS2
# if: matrix.target.triple == 'x86_64-pc-windows-gnu'
# uses: msys2/setup-msys2@v2
# - name: Install MinGW Packages
# if: matrix.target.triple == 'x86_64-pc-windows-gnu'
# run: |
# msys2 -c 'pacman -Sy --noconfirm pacman'
# msys2 -c 'pacman --noconfirm -S base-devel pkg-config'
- name: Install MSYS2
if: matrix.target.triple == 'x86_64-pc-windows-gnu'
uses: msys2/setup-msys2@v2
- name: Install MinGW Packages
if: matrix.target.triple == 'x86_64-pc-windows-gnu'
run: |
msys2 -c 'pacman -Sy --noconfirm pacman'
msys2 -c 'pacman --noconfirm -S base-devel pkg-config'
# - name: Generate Cargo.lock
# uses: actions-rs/cargo@v1
# with:
# command: generate-lockfile
# - name: Cache Dependencies
# uses: Swatinem/rust-cache@v1.2.0
- name: Generate Cargo.lock
uses: actions-rs/cargo@v1
with:
command: generate-lockfile
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.2.0
- name: Install cargo-hack
uses: actions-rs/cargo@v1
@@ -75,47 +61,24 @@ jobs:
command: install
args: cargo-hack
- name: check lib
if: >
matrix.target.os != 'ubuntu-latest'
&& matrix.target.triple != 'x86_64-pc-windows-gnu'
- name: check minimal
uses: actions-rs/cargo@v1
with: { command: ci-check-lib }
- name: check lib
if: matrix.target.os == 'ubuntu-latest'
uses: actions-rs/cargo@v1
with: { command: ci-check-lib-linux }
- name: check lib
if: matrix.target.triple == 'x86_64-pc-windows-gnu'
uses: actions-rs/cargo@v1
with: { command: ci-check-min }
with:
command: hack
args: --clean-per-run check --workspace --no-default-features --tests
- name: check full
# TODO: compile OpenSSL and run tests on MinGW
if: >
matrix.target.os != 'ubuntu-latest'
&& matrix.target.triple != 'x86_64-pc-windows-gnu'
uses: actions-rs/cargo@v1
with: { command: ci-check }
- name: check all
if: matrix.target.os == 'ubuntu-latest'
uses: actions-rs/cargo@v1
with: { command: ci-check-linux }
with:
command: check
args: --workspace --bins --examples --tests
- name: tests
if: >
matrix.target.os != 'ubuntu-latest'
&& matrix.target.triple != 'x86_64-pc-windows-gnu'
run: |
cargo ci-test
cargo ci-test-rt
cargo ci-test-server
- name: tests
if: matrix.target.os == 'ubuntu-latest'
run: |
cargo ci-test
cargo ci-test-rt-linux
cargo ci-test-server-linux
if: matrix.target.triple != 'x86_64-pc-windows-gnu'
uses: actions-rs/cargo@v1
with:
command: test
args: --workspace --all-features --no-fail-fast -- --nocapture
- name: Generate coverage file
if: >
@@ -131,40 +94,10 @@ jobs:
&& matrix.version == 'stable'
&& github.ref == 'refs/heads/master'
uses: codecov/codecov-action@v1
with: { file: cobertura.xml }
with:
file: cobertura.xml
- name: Clear the cargo caches
run: |
cargo install cargo-cache --version 0.6.2 --no-default-features --features ci-autoclean
cargo install cargo-cache --no-default-features --features ci-autoclean
cargo-cache
rustdoc:
name: rustdoc
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install Rust (nightly)
uses: actions-rs/toolchain@v1
with:
toolchain: nightly-x86_64-unknown-linux-gnu
profile: minimal
override: true
- name: Generate Cargo.lock
uses: actions-rs/cargo@v1
with: { command: generate-lockfile }
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.3.0
- name: Install cargo-hack
uses: actions-rs/cargo@v1
with:
command: install
args: cargo-hack
- name: doc tests
uses: actions-rs/cargo@v1
timeout-minutes: 40
with: { command: ci-doctest }

View File

@@ -39,4 +39,4 @@ jobs:
uses: actions-rs/clippy-check@v1
with:
token: ${{ secrets.GITHUB_TOKEN }}
args: --workspace --all-features --tests --examples --bins -- -Dclippy::todo
args: --workspace --tests --all-features

View File

@@ -2,6 +2,7 @@
members = [
"actix-codec",
"actix-macros",
"actix-router",
"actix-rt",
"actix-server",
"actix-service",
@@ -9,13 +10,12 @@ members = [
"actix-tracing",
"actix-utils",
"bytestring",
"local-channel",
"local-waker",
]
[patch.crates-io]
actix-codec = { path = "actix-codec" }
actix-macros = { path = "actix-macros" }
actix-router = { path = "actix-router" }
actix-rt = { path = "actix-rt" }
actix-server = { path = "actix-server" }
actix-service = { path = "actix-service" }
@@ -23,10 +23,3 @@ actix-tls = { path = "actix-tls" }
actix-tracing = { path = "actix-tracing" }
actix-utils = { path = "actix-utils" }
bytestring = { path = "bytestring" }
local-channel = { path = "local-channel" }
local-waker = { path = "local-waker" }
[profile.release]
lto = true
opt-level = 3
codegen-units = 1

View File

@@ -186,7 +186,7 @@
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2017-NOW Actix Team
Copyright 2017-NOW Nikolay Kim
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -1,4 +1,4 @@
Copyright (c) 2017-NOW Actix Team
Copyright (c) 2017 Nikolay Kim
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated

View File

@@ -3,10 +3,6 @@
## Unreleased - 2021-xx-xx
## 0.4.0 - 2021-04-20
* No significant changes since v0.4.0-beta.1.
## 0.4.0-beta.1 - 2020-12-28
* Replace `pin-project` with `pin-project-lite`. [#237]
* Upgrade `tokio` dependency to `1`. [#237]
@@ -27,28 +23,28 @@
## 0.3.0-beta.1 - 2020-08-19
* Use `.advance()` instead of `.split_to()`.
* Upgrade `tokio-util` to `0.3`.
* Improve `BytesCodec::encode()` performance.
* Simplify `BytesCodec::decode()`.
* Improve `BytesCodec` `.encode()` performance
* Simplify `BytesCodec` `.decode()`
* Rename methods on `Framed` to better describe their use.
* Add method on `Framed` to get a pinned reference to the underlying I/O.
* Add method on `Framed` check emptiness of read buffer.
## 0.2.0 - 2019-12-10
* Use specific futures dependencies.
* Use specific futures dependencies
## 0.2.0-alpha.4
* Fix buffer remaining capacity calculation.
* Fix buffer remaining capacity calculation
## 0.2.0-alpha.3
* Use tokio 0.2.
* Fix low/high watermark for write/read buffers.
* Use tokio 0.2
* Fix low/high watermark for write/read buffers
## 0.2.0-alpha.2
* Migrated to `std::future`.
* Migrated to `std::future`
## 0.1.2 - 2019-03-27
@@ -60,4 +56,4 @@
## 0.1.0 - 2018-12-09
* Move codec to separate crate.
* Move codec to separate crate

View File

@@ -1,10 +1,12 @@
[package]
name = "actix-codec"
version = "0.4.0"
version = "0.4.0-beta.1"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Codec utilities for working with framed protocols"
keywords = ["network", "framework", "async", "futures"]
repository = "https://github.com/actix/actix-net"
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-codec"
categories = ["network-programming", "asynchronous"]
license = "MIT OR Apache-2.0"
edition = "2018"

View File

@@ -21,13 +21,14 @@ bitflags::bitflags! {
}
pin_project_lite::pin_project! {
/// A unified `Stream` and `Sink` interface to an underlying I/O object, using the `Encoder` and
/// `Decoder` traits to encode and decode frames.
/// A unified `Stream` and `Sink` interface to an underlying I/O object, using
/// the `Encoder` and `Decoder` traits to encode and decode frames.
///
/// Raw I/O objects work with byte sequences, but higher-level code usually wants to batch these
/// into meaningful chunks, called "frames". This method layers framing on top of an I/O object,
/// by using the `Encoder`/`Decoder` traits to handle encoding and decoding of message frames.
/// Note that the incoming and outgoing frame types may be distinct.
/// Raw I/O objects work with byte sequences, but higher-level code usually
/// wants to batch these into meaningful chunks, called "frames". This
/// method layers framing on top of an I/O object, by using the `Encoder`/`Decoder`
/// traits to handle encoding and decoding of message frames. Note that
/// the incoming and outgoing frame types may be distinct.
pub struct Framed<T, U> {
#[pin]
io: T,
@@ -43,9 +44,10 @@ where
T: AsyncRead + AsyncWrite,
U: Decoder,
{
/// This function returns a *single* object that is both `Stream` and `Sink`; grouping this into
/// a single object is often useful for layering things like gzip or TLS, which require both
/// read and write access to the underlying object.
/// This function returns a *single* object that is both `Stream` and
/// `Sink`; grouping this into a single object is often useful for layering
/// things like gzip or TLS, which require both read and write access to the
/// underlying object.
pub fn new(io: T, codec: U) -> Framed<T, U> {
Framed {
io,
@@ -68,18 +70,21 @@ impl<T, U> Framed<T, U> {
&mut self.codec
}
/// Returns a reference to the underlying I/O stream wrapped by `Frame`.
/// Returns a reference to the underlying I/O stream wrapped by
/// `Frame`.
///
/// Note that care should be taken to not tamper with the underlying stream of data coming in as
/// it may corrupt the stream of frames otherwise being worked with.
/// Note that care should be taken to not tamper with the underlying stream
/// of data coming in as it may corrupt the stream of frames otherwise
/// being worked with.
pub fn io_ref(&self) -> &T {
&self.io
}
/// Returns a mutable reference to the underlying I/O stream.
///
/// Note that care should be taken to not tamper with the underlying stream of data coming in as
/// it may corrupt the stream of frames otherwise being worked with.
/// Note that care should be taken to not tamper with the underlying stream
/// of data coming in as it may corrupt the stream of frames otherwise
/// being worked with.
pub fn io_mut(&mut self) -> &mut T {
&mut self.io
}
@@ -179,9 +184,10 @@ impl<T, U> Framed<T, U> {
{
loop {
let mut this = self.as_mut().project();
// Repeatedly call `decode` or `decode_eof` as long as it is "readable". Readable is
// defined as not having returned `None`. If the upstream has returned EOF, and the
// decoder is no longer readable, it can be assumed that the decoder will never become
// Repeatedly call `decode` or `decode_eof` as long as it is
// "readable". Readable is defined as not having returned `None`. If
// the upstream has returned EOF, and the decoder is no longer
// readable, it can be assumed that the decoder will never become
// readable again, at which point the stream is terminated.
if this.flags.contains(Flags::READABLE) {
@@ -209,7 +215,7 @@ impl<T, U> Framed<T, U> {
debug_assert!(!this.flags.contains(Flags::EOF));
// Otherwise, try to read more data and try again. Make sure we've got room.
// Otherwise, try to read more data and try again. Make sure we've got room
let remaining = this.read_buf.capacity() - this.read_buf.len();
if remaining < LW {
this.read_buf.reserve(HW - remaining)
@@ -229,7 +235,7 @@ impl<T, U> Framed<T, U> {
}
/// Flush write buffer to underlying I/O stream.
pub fn flush<I>(
pub fn poll_flush<I>(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), U::Error>>
@@ -265,7 +271,7 @@ impl<T, U> Framed<T, U> {
}
/// Flush write buffer and shutdown underlying I/O stream.
pub fn close<I>(
pub fn poll_close<I>(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), U::Error>>
@@ -313,11 +319,11 @@ where
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.flush(cx)
self.poll_flush(cx)
}
fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.close(cx)
self.poll_close(cx)
}
}
@@ -335,12 +341,13 @@ where
}
impl<T, U> Framed<T, U> {
/// This function returns a *single* object that is both `Stream` and `Sink`; grouping this into
/// a single object is often useful for layering things like gzip or TLS, which require both
/// read and write access to the underlying object.
/// This function returns a *single* object that is both `Stream` and
/// `Sink`; grouping this into a single object is often useful for layering
/// things like gzip or TLS, which require both read and write access to the
/// underlying object.
///
/// These objects take a stream, a read buffer and a write buffer. These fields can be obtained
/// from an existing `Framed` with the `into_parts` method.
/// These objects take a stream, a read buffer and a write buffer. These
/// fields can be obtained from an existing `Framed` with the `into_parts` method.
pub fn from_parts(parts: FramedParts<T, U>) -> Framed<T, U> {
Framed {
io: parts.io,
@@ -351,11 +358,12 @@ impl<T, U> Framed<T, U> {
}
}
/// Consumes the `Frame`, returning its underlying I/O stream, the buffer with unprocessed data,
/// and the codec.
/// Consumes the `Frame`, returning its underlying I/O stream, the buffer
/// with unprocessed data, and the codec.
///
/// Note that care should be taken to not tamper with the underlying stream of data coming in as
/// it may corrupt the stream of frames otherwise being worked with.
/// Note that care should be taken to not tamper with the underlying stream
/// of data coming in as it may corrupt the stream of frames otherwise
/// being worked with.
pub fn into_parts(self) -> FramedParts<T, U> {
FramedParts {
io: self.io,
@@ -368,15 +376,14 @@ impl<T, U> Framed<T, U> {
}
/// `FramedParts` contains an export of the data of a Framed transport.
///
/// It can be used to construct a new `Framed` with a different codec. It contains all current
/// buffers and the inner transport.
/// It can be used to construct a new `Framed` with a different codec.
/// It contains all current buffers and the inner transport.
#[derive(Debug)]
pub struct FramedParts<T, U> {
/// The inner transport used to read bytes to and write bytes to.
/// The inner transport used to read bytes to and write bytes to
pub io: T,
/// The codec object.
/// The codec
pub codec: U,
/// The buffer with read but unprocessed data.
@@ -389,7 +396,7 @@ pub struct FramedParts<T, U> {
}
impl<T, U> FramedParts<T, U> {
/// Creates a new default `FramedParts`.
/// Create a new, default, `FramedParts`
pub fn new(io: T, codec: U) -> FramedParts<T, U> {
FramedParts {
io,
@@ -400,7 +407,7 @@ impl<T, U> FramedParts<T, U> {
}
}
/// Creates a new `FramedParts` with read buffer.
/// Create a new `FramedParts` with read buffer
pub fn with_read_buf(io: T, codec: U, read_buf: BytesMut) -> FramedParts<T, U> {
FramedParts {
io,

View File

@@ -7,7 +7,7 @@
//! [`Sink`]: futures_sink::Sink
//! [`Stream`]: futures_core::Stream
#![deny(rust_2018_idioms, nonstandard_style, future_incompatible)]
#![deny(rust_2018_idioms, nonstandard_style)]
#![warn(missing_docs)]
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]

View File

@@ -3,12 +3,6 @@
## Unreleased - 2021-xx-xx
## 0.2.1 - 2021-02-02
* Add optional argument `system` to `main` macro which can be used to specify the path to `actix_rt::System` (useful for re-exports). [#363]
[#363]: https://github.com/actix/actix-net/pull/363
## 0.2.0 - 2021-02-02
* Update to latest `actix_rt::System::new` signature. [#261]

View File

@@ -1,12 +1,10 @@
[package]
name = "actix-macros"
version = "0.2.1"
authors = [
"Nikolay Kim <fafhrd91@gmail.com>",
"Ibraheem Ahmed <ibrah1440@gmail.com>",
]
version = "0.2.0"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Macros for Actix system and runtime"
repository = "https://github.com/actix/actix-net"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-macros"
categories = ["network-programming", "asynchronous"]
license = "MIT OR Apache-2.0"
edition = "2018"
@@ -21,5 +19,5 @@ syn = { version = "^1", features = ["full"] }
[dev-dependencies]
actix-rt = "2.0.0"
futures-util = { version = "0.3.7", default-features = false }
futures-util = { version = "0.3", default-features = false }
trybuild = "1"

View File

@@ -27,10 +27,8 @@ use quote::quote;
#[allow(clippy::needless_doctest_main)]
#[proc_macro_attribute]
#[cfg(not(test))] // Work around for rust-lang/rust#62127
pub fn main(args: TokenStream, item: TokenStream) -> TokenStream {
pub fn main(_: TokenStream, item: TokenStream) -> TokenStream {
let mut input = syn::parse_macro_input!(item as syn::ItemFn);
let args = syn::parse_macro_input!(args as syn::AttributeArgs);
let attrs = &input.attrs;
let vis = &input.vis;
let sig = &mut input.sig;
@@ -45,47 +43,13 @@ pub fn main(args: TokenStream, item: TokenStream) -> TokenStream {
.into();
}
let mut system = syn::parse_str::<syn::Path>("::actix_rt::System").unwrap();
for arg in &args {
match arg {
syn::NestedMeta::Meta(syn::Meta::NameValue(syn::MetaNameValue {
lit: syn::Lit::Str(lit),
path,
..
})) => match path
.get_ident()
.map(|i| i.to_string().to_lowercase())
.as_deref()
{
Some("system") => match lit.parse() {
Ok(path) => system = path,
Err(_) => {
return syn::Error::new_spanned(lit, "Expected path")
.to_compile_error()
.into();
}
},
_ => {
return syn::Error::new_spanned(arg, "Unknown attribute specified")
.to_compile_error()
.into();
}
},
_ => {
return syn::Error::new_spanned(arg, "Unknown attribute specified")
.to_compile_error()
.into();
}
}
}
sig.asyncness = None;
(quote! {
#(#attrs)*
#vis #sig {
<#system>::new().block_on(async move { #body })
actix_rt::System::new()
.block_on(async move { #body })
}
})
.into()

View File

@@ -4,9 +4,6 @@ fn compile_macros() {
t.pass("tests/trybuild/main-01-basic.rs");
t.compile_fail("tests/trybuild/main-02-only-async.rs");
t.pass("tests/trybuild/main-03-fn-params.rs");
t.pass("tests/trybuild/main-04-system-path.rs");
t.compile_fail("tests/trybuild/main-05-system-expect-path.rs");
t.compile_fail("tests/trybuild/main-06-unknown-attr.rs");
t.pass("tests/trybuild/test-01-basic.rs");
t.pass("tests/trybuild/test-02-keep-attrs.rs");

View File

@@ -1,8 +0,0 @@
mod system {
pub use actix_rt::System as MySystem;
}
#[actix_rt::main(system = "system::MySystem")]
async fn main() {
futures_util::future::ready(()).await
}

View File

@@ -1,4 +0,0 @@
#[actix_rt::main(system = "!@#*&")]
async fn main2() {}
fn main() {}

View File

@@ -1,5 +0,0 @@
error: Expected path
--> $DIR/main-05-system-expect-path.rs:1:27
|
1 | #[actix_rt::main(system = "!@#*&")]
| ^^^^^^^

View File

@@ -1,7 +0,0 @@
#[actix_rt::main(foo = "bar")]
async fn async_main() {}
#[actix_rt::main(bar::baz)]
async fn async_main2() {}
fn main() {}

View File

@@ -1,11 +0,0 @@
error: Unknown attribute specified
--> $DIR/main-06-unknown-attr.rs:1:18
|
1 | #[actix_rt::main(foo = "bar")]
| ^^^^^^^^^^^
error: Unknown attribute specified
--> $DIR/main-06-unknown-attr.rs:4:18
|
4 | #[actix_rt::main(bar::baz)]
| ^^^^^^^^

67
actix-router/CHANGES.md Normal file
View File

@@ -0,0 +1,67 @@
# Changes
## Unreleased - 2021-xx-xx
## 0.2.7 - 2021-02-06
* Add `Router::recognize_checked` [#247]
[#247]: https://github.com/actix/actix-net/pull/247
## 0.2.6 - 2021-01-09
* Use `bytestring` version range compatible with Bytes v1.0. [#246]
[#246]: https://github.com/actix/actix-net/pull/246
## 0.2.5 - 2020-09-20
* Fix `from_hex()` method
## 0.2.4 - 2019-12-31
* Add `ResourceDef::resource_path_named()` path generation method
## 0.2.3 - 2019-12-25
* Add impl `IntoPattern` for `&String`
## 0.2.2 - 2019-12-25
* Use `IntoPattern` for `RouterBuilder::path()`
## 0.2.1 - 2019-12-25
* Add `IntoPattern` trait
* Add multi-pattern resources
## 0.2.0 - 2019-12-07
* Update http to 0.2
* Update regex to 1.3
* Use bytestring instead of string
## 0.1.5 - 2019-05-15
* Remove debug prints
## 0.1.4 - 2019-05-15
* Fix checked resource match
## 0.1.3 - 2019-04-22
* Added support for `remainder match` (i.e "/path/{tail}*")
## 0.1.2 - 2019-04-07
* Export `Quoter` type
* Allow to reset `Path` instance
## 0.1.1 - 2019-04-03
* Get dynamic segment by name instead of iterator.
## 0.1.0 - 2019-03-09
* Initial release

29
actix-router/Cargo.toml Normal file
View File

@@ -0,0 +1,29 @@
[package]
name = "actix-router"
version = "0.2.7"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Resource path matching library"
keywords = ["actix", "router", "routing"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-router"
license = "MIT OR Apache-2.0"
edition = "2018"
[lib]
name = "actix_router"
path = "src/lib.rs"
[features]
default = ["http"]
[dependencies]
regex = "1.3.1"
serde = "1.0.104"
bytestring = ">=0.1.5, <2"
log = "0.4.8"
http = { version = "0.2.2", optional = true }
[dev-dependencies]
http = "0.2.2"
serde_derive = "1.0"

1
actix-router/LICENSE-APACHE Symbolic link
View File

@@ -0,0 +1 @@
../LICENSE-APACHE

1
actix-router/LICENSE-MIT Symbolic link
View File

@@ -0,0 +1 @@
../LICENSE-MIT

717
actix-router/src/de.rs Normal file
View File

@@ -0,0 +1,717 @@
use serde::de::{self, Deserializer, Error as DeError, Visitor};
use serde::forward_to_deserialize_any;
use crate::path::{Path, PathIter};
use crate::ResourcePath;
macro_rules! unsupported_type {
($trait_fn:ident, $name:expr) => {
fn $trait_fn<V>(self, _: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
Err(de::value::Error::custom(concat!(
"unsupported type: ",
$name
)))
}
};
}
macro_rules! parse_single_value {
($trait_fn:ident, $visit_fn:ident, $tp:tt) => {
fn $trait_fn<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
if self.path.len() != 1 {
Err(de::value::Error::custom(
format!("wrong number of parameters: {} expected 1", self.path.len())
.as_str(),
))
} else {
let v = self.path[0].parse().map_err(|_| {
de::value::Error::custom(format!(
"can not parse {:?} to a {}",
&self.path[0], $tp
))
})?;
visitor.$visit_fn(v)
}
}
};
}
pub struct PathDeserializer<'de, T: ResourcePath> {
path: &'de Path<T>,
}
impl<'de, T: ResourcePath + 'de> PathDeserializer<'de, T> {
pub fn new(path: &'de Path<T>) -> Self {
PathDeserializer { path }
}
}
impl<'de, T: ResourcePath + 'de> Deserializer<'de> for PathDeserializer<'de, T> {
type Error = de::value::Error;
fn deserialize_map<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_map(ParamsDeserializer {
params: self.path.iter(),
current: None,
})
}
fn deserialize_struct<V>(
self,
_: &'static str,
_: &'static [&'static str],
visitor: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_map(visitor)
}
fn deserialize_unit<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_unit()
}
fn deserialize_unit_struct<V>(
self,
_: &'static str,
visitor: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_unit(visitor)
}
fn deserialize_newtype_struct<V>(
self,
_: &'static str,
visitor: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_newtype_struct(self)
}
fn deserialize_tuple<V>(self, len: usize, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
if self.path.len() < len {
Err(de::value::Error::custom(
format!(
"wrong number of parameters: {} expected {}",
self.path.len(),
len
)
.as_str(),
))
} else {
visitor.visit_seq(ParamsSeq {
params: self.path.iter(),
})
}
}
fn deserialize_tuple_struct<V>(
self,
_: &'static str,
len: usize,
visitor: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
if self.path.len() < len {
Err(de::value::Error::custom(
format!(
"wrong number of parameters: {} expected {}",
self.path.len(),
len
)
.as_str(),
))
} else {
visitor.visit_seq(ParamsSeq {
params: self.path.iter(),
})
}
}
fn deserialize_enum<V>(
self,
_: &'static str,
_: &'static [&'static str],
visitor: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
if self.path.is_empty() {
Err(de::value::Error::custom("expected at least one parameters"))
} else {
visitor.visit_enum(ValueEnum {
value: &self.path[0],
})
}
}
fn deserialize_str<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
if self.path.len() != 1 {
Err(de::value::Error::custom(
format!("wrong number of parameters: {} expected 1", self.path.len()).as_str(),
))
} else {
visitor.visit_str(&self.path[0])
}
}
fn deserialize_seq<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_seq(ParamsSeq {
params: self.path.iter(),
})
}
unsupported_type!(deserialize_any, "'any'");
unsupported_type!(deserialize_bytes, "bytes");
unsupported_type!(deserialize_option, "Option<T>");
unsupported_type!(deserialize_identifier, "identifier");
unsupported_type!(deserialize_ignored_any, "ignored_any");
parse_single_value!(deserialize_bool, visit_bool, "bool");
parse_single_value!(deserialize_i8, visit_i8, "i8");
parse_single_value!(deserialize_i16, visit_i16, "i16");
parse_single_value!(deserialize_i32, visit_i32, "i32");
parse_single_value!(deserialize_i64, visit_i64, "i64");
parse_single_value!(deserialize_u8, visit_u8, "u8");
parse_single_value!(deserialize_u16, visit_u16, "u16");
parse_single_value!(deserialize_u32, visit_u32, "u32");
parse_single_value!(deserialize_u64, visit_u64, "u64");
parse_single_value!(deserialize_f32, visit_f32, "f32");
parse_single_value!(deserialize_f64, visit_f64, "f64");
parse_single_value!(deserialize_string, visit_string, "String");
parse_single_value!(deserialize_byte_buf, visit_string, "String");
parse_single_value!(deserialize_char, visit_char, "char");
}
struct ParamsDeserializer<'de, T: ResourcePath> {
params: PathIter<'de, T>,
current: Option<(&'de str, &'de str)>,
}
impl<'de, T: ResourcePath> de::MapAccess<'de> for ParamsDeserializer<'de, T> {
type Error = de::value::Error;
fn next_key_seed<K>(&mut self, seed: K) -> Result<Option<K::Value>, Self::Error>
where
K: de::DeserializeSeed<'de>,
{
self.current = self.params.next().map(|ref item| (item.0, item.1));
match self.current {
Some((key, _)) => Ok(Some(seed.deserialize(Key { key })?)),
None => Ok(None),
}
}
fn next_value_seed<V>(&mut self, seed: V) -> Result<V::Value, Self::Error>
where
V: de::DeserializeSeed<'de>,
{
if let Some((_, value)) = self.current.take() {
seed.deserialize(Value { value })
} else {
Err(de::value::Error::custom("unexpected item"))
}
}
}
struct Key<'de> {
key: &'de str,
}
impl<'de> Deserializer<'de> for Key<'de> {
type Error = de::value::Error;
fn deserialize_identifier<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_str(self.key)
}
fn deserialize_any<V>(self, _visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
Err(de::value::Error::custom("Unexpected"))
}
forward_to_deserialize_any! {
bool i8 i16 i32 i64 u8 u16 u32 u64 f32 f64 char str string bytes
byte_buf option unit unit_struct newtype_struct seq tuple
tuple_struct map struct enum ignored_any
}
}
macro_rules! parse_value {
($trait_fn:ident, $visit_fn:ident, $tp:tt) => {
fn $trait_fn<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
let v = self.value.parse().map_err(|_| {
de::value::Error::custom(format!("can not parse {:?} to a {}", self.value, $tp))
})?;
visitor.$visit_fn(v)
}
};
}
struct Value<'de> {
value: &'de str,
}
impl<'de> Deserializer<'de> for Value<'de> {
type Error = de::value::Error;
parse_value!(deserialize_bool, visit_bool, "bool");
parse_value!(deserialize_i8, visit_i8, "i8");
parse_value!(deserialize_i16, visit_i16, "i16");
parse_value!(deserialize_i32, visit_i32, "i16");
parse_value!(deserialize_i64, visit_i64, "i64");
parse_value!(deserialize_u8, visit_u8, "u8");
parse_value!(deserialize_u16, visit_u16, "u16");
parse_value!(deserialize_u32, visit_u32, "u32");
parse_value!(deserialize_u64, visit_u64, "u64");
parse_value!(deserialize_f32, visit_f32, "f32");
parse_value!(deserialize_f64, visit_f64, "f64");
parse_value!(deserialize_string, visit_string, "String");
parse_value!(deserialize_byte_buf, visit_string, "String");
parse_value!(deserialize_char, visit_char, "char");
fn deserialize_ignored_any<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_unit()
}
fn deserialize_unit<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_unit()
}
fn deserialize_unit_struct<V>(
self,
_: &'static str,
visitor: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_unit()
}
fn deserialize_bytes<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_borrowed_bytes(self.value.as_bytes())
}
fn deserialize_str<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_borrowed_str(self.value)
}
fn deserialize_option<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_some(self)
}
fn deserialize_enum<V>(
self,
_: &'static str,
_: &'static [&'static str],
visitor: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_enum(ValueEnum { value: self.value })
}
fn deserialize_newtype_struct<V>(
self,
_: &'static str,
visitor: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_newtype_struct(self)
}
fn deserialize_tuple<V>(self, _: usize, _: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
Err(de::value::Error::custom("unsupported type: tuple"))
}
fn deserialize_struct<V>(
self,
_: &'static str,
_: &'static [&'static str],
_: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
Err(de::value::Error::custom("unsupported type: struct"))
}
fn deserialize_tuple_struct<V>(
self,
_: &'static str,
_: usize,
_: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
Err(de::value::Error::custom("unsupported type: tuple struct"))
}
unsupported_type!(deserialize_any, "any");
unsupported_type!(deserialize_seq, "seq");
unsupported_type!(deserialize_map, "map");
unsupported_type!(deserialize_identifier, "identifier");
}
struct ParamsSeq<'de, T: ResourcePath> {
params: PathIter<'de, T>,
}
impl<'de, T: ResourcePath> de::SeqAccess<'de> for ParamsSeq<'de, T> {
type Error = de::value::Error;
fn next_element_seed<U>(&mut self, seed: U) -> Result<Option<U::Value>, Self::Error>
where
U: de::DeserializeSeed<'de>,
{
match self.params.next() {
Some(item) => Ok(Some(seed.deserialize(Value { value: item.1 })?)),
None => Ok(None),
}
}
}
struct ValueEnum<'de> {
value: &'de str,
}
impl<'de> de::EnumAccess<'de> for ValueEnum<'de> {
type Error = de::value::Error;
type Variant = UnitVariant;
fn variant_seed<V>(self, seed: V) -> Result<(V::Value, Self::Variant), Self::Error>
where
V: de::DeserializeSeed<'de>,
{
Ok((seed.deserialize(Key { key: self.value })?, UnitVariant))
}
}
struct UnitVariant;
impl<'de> de::VariantAccess<'de> for UnitVariant {
type Error = de::value::Error;
fn unit_variant(self) -> Result<(), Self::Error> {
Ok(())
}
fn newtype_variant_seed<T>(self, _seed: T) -> Result<T::Value, Self::Error>
where
T: de::DeserializeSeed<'de>,
{
Err(de::value::Error::custom("not supported"))
}
fn tuple_variant<V>(self, _len: usize, _visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
Err(de::value::Error::custom("not supported"))
}
fn struct_variant<V>(
self,
_: &'static [&'static str],
_: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
Err(de::value::Error::custom("not supported"))
}
}
#[cfg(test)]
mod tests {
use serde::de;
use serde_derive::Deserialize;
use super::*;
use crate::path::Path;
use crate::router::Router;
#[derive(Deserialize)]
struct MyStruct {
key: String,
value: String,
}
#[derive(Deserialize)]
struct Id {
_id: String,
}
#[derive(Debug, Deserialize)]
struct Test1(String, u32);
#[derive(Debug, Deserialize)]
struct Test2 {
key: String,
value: u32,
}
#[derive(Debug, Deserialize, PartialEq)]
#[serde(rename_all = "lowercase")]
enum TestEnum {
Val1,
Val2,
}
#[derive(Debug, Deserialize)]
struct Test3 {
val: TestEnum,
}
#[test]
fn test_request_extract() {
let mut router = Router::<()>::build();
router.path("/{key}/{value}/", ());
let router = router.finish();
let mut path = Path::new("/name/user1/");
assert!(router.recognize(&mut path).is_some());
let s: MyStruct = de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
assert_eq!(s.key, "name");
assert_eq!(s.value, "user1");
let s: (String, String) =
de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
assert_eq!(s.0, "name");
assert_eq!(s.1, "user1");
let mut router = Router::<()>::build();
router.path("/{key}/{value}/", ());
let router = router.finish();
let mut path = Path::new("/name/32/");
assert!(router.recognize(&mut path).is_some());
let s: Test1 = de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
assert_eq!(s.0, "name");
assert_eq!(s.1, 32);
let s: Test2 = de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
assert_eq!(s.key, "name");
assert_eq!(s.value, 32);
let s: (String, u8) =
de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
assert_eq!(s.0, "name");
assert_eq!(s.1, 32);
let res: Vec<String> =
de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
assert_eq!(res[0], "name".to_owned());
assert_eq!(res[1], "32".to_owned());
}
#[test]
fn test_extract_path_single() {
let mut router = Router::<()>::build();
router.path("/{value}/", ());
let router = router.finish();
let mut path = Path::new("/32/");
assert!(router.recognize(&mut path).is_some());
let i: i8 = de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
assert_eq!(i, 32);
}
#[test]
fn test_extract_enum() {
let mut router = Router::<()>::build();
router.path("/{val}/", ());
let router = router.finish();
let mut path = Path::new("/val1/");
assert!(router.recognize(&mut path).is_some());
let i: TestEnum = de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
assert_eq!(i, TestEnum::Val1);
let mut router = Router::<()>::build();
router.path("/{val1}/{val2}/", ());
let router = router.finish();
let mut path = Path::new("/val1/val2/");
assert!(router.recognize(&mut path).is_some());
let i: (TestEnum, TestEnum) =
de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
assert_eq!(i, (TestEnum::Val1, TestEnum::Val2));
}
#[test]
fn test_extract_enum_value() {
let mut router = Router::<()>::build();
router.path("/{val}/", ());
let router = router.finish();
let mut path = Path::new("/val1/");
assert!(router.recognize(&mut path).is_some());
let i: Test3 = de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
assert_eq!(i.val, TestEnum::Val1);
let mut path = Path::new("/val3/");
assert!(router.recognize(&mut path).is_some());
let i: Result<Test3, de::value::Error> =
de::Deserialize::deserialize(PathDeserializer::new(&path));
assert!(i.is_err());
assert!(format!("{:?}", i).contains("unknown variant"));
}
#[test]
fn test_extract_errors() {
let mut router = Router::<()>::build();
router.path("/{value}/", ());
let router = router.finish();
let mut path = Path::new("/name/");
assert!(router.recognize(&mut path).is_some());
let s: Result<Test1, de::value::Error> =
de::Deserialize::deserialize(PathDeserializer::new(&path));
assert!(s.is_err());
assert!(format!("{:?}", s).contains("wrong number of parameters"));
let s: Result<Test2, de::value::Error> =
de::Deserialize::deserialize(PathDeserializer::new(&path));
assert!(s.is_err());
assert!(format!("{:?}", s).contains("can not parse"));
let s: Result<(String, String), de::value::Error> =
de::Deserialize::deserialize(PathDeserializer::new(&path));
assert!(s.is_err());
assert!(format!("{:?}", s).contains("wrong number of parameters"));
let s: Result<u32, de::value::Error> =
de::Deserialize::deserialize(PathDeserializer::new(&path));
assert!(s.is_err());
assert!(format!("{:?}", s).contains("can not parse"));
}
// #[test]
// fn test_extract_path_decode() {
// let mut router = Router::<()>::default();
// router.register_resource(Resource::new(ResourceDef::new("/{value}/")));
// macro_rules! test_single_value {
// ($value:expr, $expected:expr) => {{
// let req = TestRequest::with_uri($value).finish();
// let info = router.recognize(&req, &(), 0);
// let req = req.with_route_info(info);
// assert_eq!(
// *Path::<String>::from_request(&req, &PathConfig::default()).unwrap(),
// $expected
// );
// }};
// }
// test_single_value!("/%25/", "%");
// test_single_value!("/%40%C2%A3%24%25%5E%26%2B%3D/", "@£$%^&+=");
// test_single_value!("/%2B/", "+");
// test_single_value!("/%252B/", "%2B");
// test_single_value!("/%2F/", "/");
// test_single_value!("/%252F/", "%2F");
// test_single_value!(
// "/http%3A%2F%2Flocalhost%3A80%2Ffoo/",
// "http://localhost:80/foo"
// );
// test_single_value!("/%2Fvar%2Flog%2Fsyslog/", "/var/log/syslog");
// test_single_value!(
// "/http%3A%2F%2Flocalhost%3A80%2Ffile%2F%252Fvar%252Flog%252Fsyslog/",
// "http://localhost:80/file/%2Fvar%2Flog%2Fsyslog"
// );
// let req = TestRequest::with_uri("/%25/7/?id=test").finish();
// let mut router = Router::<()>::default();
// router.register_resource(Resource::new(ResourceDef::new("/{key}/{value}/")));
// let info = router.recognize(&req, &(), 0);
// let req = req.with_route_info(info);
// let s = Path::<Test2>::from_request(&req, &PathConfig::default()).unwrap();
// assert_eq!(s.key, "%");
// assert_eq!(s.value, 7);
// let s = Path::<(String, String)>::from_request(&req, &PathConfig::default()).unwrap();
// assert_eq!(s.0, "%");
// assert_eq!(s.1, "7");
// }
// #[test]
// fn test_extract_path_no_decode() {
// let mut router = Router::<()>::default();
// router.register_resource(Resource::new(ResourceDef::new("/{value}/")));
// let req = TestRequest::with_uri("/%25/").finish();
// let info = router.recognize(&req, &(), 0);
// let req = req.with_route_info(info);
// assert_eq!(
// *Path::<String>::from_request(&req, &&PathConfig::default().disable_decoding())
// .unwrap(),
// "%25"
// );
// }
}

152
actix-router/src/lib.rs Normal file
View File

@@ -0,0 +1,152 @@
//! Resource path matching library.
#![deny(rust_2018_idioms, nonstandard_style)]
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
mod de;
mod path;
mod resource;
mod router;
pub use self::de::PathDeserializer;
pub use self::path::Path;
pub use self::resource::ResourceDef;
pub use self::router::{ResourceInfo, Router, RouterBuilder};
pub trait Resource<T: ResourcePath> {
fn resource_path(&mut self) -> &mut Path<T>;
}
pub trait ResourcePath {
fn path(&self) -> &str;
}
impl ResourcePath for String {
fn path(&self) -> &str {
self.as_str()
}
}
impl<'a> ResourcePath for &'a str {
fn path(&self) -> &str {
self
}
}
impl ResourcePath for bytestring::ByteString {
fn path(&self) -> &str {
&*self
}
}
/// Helper trait for type that could be converted to path pattern
pub trait IntoPattern {
fn is_single(&self) -> bool;
fn patterns(&self) -> Vec<String>;
}
impl IntoPattern for String {
fn is_single(&self) -> bool {
true
}
fn patterns(&self) -> Vec<String> {
vec![self.clone()]
}
}
impl<'a> IntoPattern for &'a String {
fn is_single(&self) -> bool {
true
}
fn patterns(&self) -> Vec<String> {
vec![self.as_str().to_string()]
}
}
impl<'a> IntoPattern for &'a str {
fn is_single(&self) -> bool {
true
}
fn patterns(&self) -> Vec<String> {
vec![(*self).to_string()]
}
}
impl<T: AsRef<str>> IntoPattern for Vec<T> {
fn is_single(&self) -> bool {
self.len() == 1
}
fn patterns(&self) -> Vec<String> {
self.iter().map(|v| v.as_ref().to_string()).collect()
}
}
macro_rules! array_patterns (($tp:ty, $num:tt) => {
impl IntoPattern for [$tp; $num] {
fn is_single(&self) -> bool {
$num == 1
}
fn patterns(&self) -> Vec<String> {
self.iter().map(|v| v.to_string()).collect()
}
}
});
array_patterns!(&str, 1);
array_patterns!(&str, 2);
array_patterns!(&str, 3);
array_patterns!(&str, 4);
array_patterns!(&str, 5);
array_patterns!(&str, 6);
array_patterns!(&str, 7);
array_patterns!(&str, 8);
array_patterns!(&str, 9);
array_patterns!(&str, 10);
array_patterns!(&str, 11);
array_patterns!(&str, 12);
array_patterns!(&str, 13);
array_patterns!(&str, 14);
array_patterns!(&str, 15);
array_patterns!(&str, 16);
array_patterns!(String, 1);
array_patterns!(String, 2);
array_patterns!(String, 3);
array_patterns!(String, 4);
array_patterns!(String, 5);
array_patterns!(String, 6);
array_patterns!(String, 7);
array_patterns!(String, 8);
array_patterns!(String, 9);
array_patterns!(String, 10);
array_patterns!(String, 11);
array_patterns!(String, 12);
array_patterns!(String, 13);
array_patterns!(String, 14);
array_patterns!(String, 15);
array_patterns!(String, 16);
#[cfg(feature = "http")]
mod url;
#[cfg(feature = "http")]
pub use self::url::{Quoter, Url};
#[cfg(feature = "http")]
mod http_support {
use super::ResourcePath;
use http::Uri;
impl ResourcePath for Uri {
fn path(&self) -> &str {
self.path()
}
}
}

222
actix-router/src/path.rs Normal file
View File

@@ -0,0 +1,222 @@
use std::ops::Index;
use serde::de;
use crate::de::PathDeserializer;
use crate::{Resource, ResourcePath};
#[derive(Debug, Clone, Copy)]
pub(crate) enum PathItem {
Static(&'static str),
Segment(u16, u16),
}
/// Resource path match information
///
/// If resource path contains variable patterns, `Path` stores them.
#[derive(Debug)]
pub struct Path<T> {
path: T,
pub(crate) skip: u16,
pub(crate) segments: Vec<(&'static str, PathItem)>,
}
impl<T: Default> Default for Path<T> {
fn default() -> Self {
Path {
path: T::default(),
skip: 0,
segments: Vec::new(),
}
}
}
impl<T: Clone> Clone for Path<T> {
fn clone(&self) -> Self {
Path {
path: self.path.clone(),
skip: self.skip,
segments: self.segments.clone(),
}
}
}
impl<T: ResourcePath> Path<T> {
pub fn new(path: T) -> Path<T> {
Path {
path,
skip: 0,
segments: Vec::new(),
}
}
/// Get reference to inner path instance
#[inline]
pub fn get_ref(&self) -> &T {
&self.path
}
/// Get mutable reference to inner path instance
#[inline]
pub fn get_mut(&mut self) -> &mut T {
&mut self.path
}
/// Path
#[inline]
pub fn path(&self) -> &str {
let skip = self.skip as usize;
let path = self.path.path();
if skip <= path.len() {
&path[skip..]
} else {
""
}
}
/// Set new path
#[inline]
pub fn set(&mut self, path: T) {
self.skip = 0;
self.path = path;
self.segments.clear();
}
/// Reset state
#[inline]
pub fn reset(&mut self) {
self.skip = 0;
self.segments.clear();
}
/// Skip first `n` chars in path
#[inline]
pub fn skip(&mut self, n: u16) {
self.skip += n;
}
pub(crate) fn add(&mut self, name: &'static str, value: PathItem) {
match value {
PathItem::Static(s) => self.segments.push((name, PathItem::Static(s))),
PathItem::Segment(begin, end) => self
.segments
.push((name, PathItem::Segment(self.skip + begin, self.skip + end))),
}
}
#[doc(hidden)]
pub fn add_static(&mut self, name: &'static str, value: &'static str) {
self.segments.push((name, PathItem::Static(value)));
}
/// Check if there are any matched patterns
#[inline]
pub fn is_empty(&self) -> bool {
self.segments.is_empty()
}
/// Check number of extracted parameters
#[inline]
pub fn len(&self) -> usize {
self.segments.len()
}
/// Get matched parameter by name without type conversion
pub fn get(&self, key: &str) -> Option<&str> {
for item in self.segments.iter() {
if key == item.0 {
return match item.1 {
PathItem::Static(ref s) => Some(&s),
PathItem::Segment(s, e) => {
Some(&self.path.path()[(s as usize)..(e as usize)])
}
};
}
}
if key == "tail" {
Some(&self.path.path()[(self.skip as usize)..])
} else {
None
}
}
/// Get unprocessed part of the path
pub fn unprocessed(&self) -> &str {
&self.path.path()[(self.skip as usize)..]
}
/// Get matched parameter by name.
///
/// If keyed parameter is not available empty string is used as default
/// value.
pub fn query(&self, key: &str) -> &str {
if let Some(s) = self.get(key) {
s
} else {
""
}
}
/// Return iterator to items in parameter container
pub fn iter(&self) -> PathIter<'_, T> {
PathIter {
idx: 0,
params: self,
}
}
/// Try to deserialize matching parameters to a specified type `U`
pub fn load<'de, U: serde::Deserialize<'de>>(&'de self) -> Result<U, de::value::Error> {
de::Deserialize::deserialize(PathDeserializer::new(self))
}
}
#[derive(Debug)]
pub struct PathIter<'a, T> {
idx: usize,
params: &'a Path<T>,
}
impl<'a, T: ResourcePath> Iterator for PathIter<'a, T> {
type Item = (&'a str, &'a str);
#[inline]
fn next(&mut self) -> Option<(&'a str, &'a str)> {
if self.idx < self.params.len() {
let idx = self.idx;
let res = match self.params.segments[idx].1 {
PathItem::Static(ref s) => &s,
PathItem::Segment(s, e) => &self.params.path.path()[(s as usize)..(e as usize)],
};
self.idx += 1;
return Some((&self.params.segments[idx].0, res));
}
None
}
}
impl<'a, T: ResourcePath> Index<&'a str> for Path<T> {
type Output = str;
fn index(&self, name: &'a str) -> &str {
self.get(name)
.expect("Value for parameter is not available")
}
}
impl<T: ResourcePath> Index<usize> for Path<T> {
type Output = str;
fn index(&self, idx: usize) -> &str {
match self.segments[idx].1 {
PathItem::Static(ref s) => &s,
PathItem::Segment(s, e) => &self.path.path()[(s as usize)..(e as usize)],
}
}
}
impl<T: ResourcePath> Resource<T> for Path<T> {
fn resource_path(&mut self) -> &mut Self {
self
}
}

View File

@@ -0,0 +1,948 @@
use std::cmp::min;
use std::collections::HashMap;
use std::hash::{Hash, Hasher};
use regex::{escape, Regex, RegexSet};
use crate::path::{Path, PathItem};
use crate::{IntoPattern, Resource, ResourcePath};
const MAX_DYNAMIC_SEGMENTS: usize = 16;
/// ResourceDef describes an entry in resources table
///
/// Resource definition can contain only 16 dynamic segments
#[derive(Clone, Debug)]
pub struct ResourceDef {
id: u16,
tp: PatternType,
name: String,
pattern: String,
elements: Vec<PatternElement>,
}
#[derive(Debug, Clone, PartialEq)]
enum PatternElement {
Str(String),
Var(String),
}
#[derive(Clone, Debug)]
#[allow(clippy::large_enum_variant)]
enum PatternType {
Static(String),
Prefix(String),
Dynamic(Regex, Vec<&'static str>, usize),
DynamicSet(RegexSet, Vec<(Regex, Vec<&'static str>, usize)>),
}
impl ResourceDef {
/// Parse path pattern and create new `Pattern` instance.
///
/// Panics if path pattern is malformed.
pub fn new<T: IntoPattern>(path: T) -> Self {
if path.is_single() {
let patterns = path.patterns();
ResourceDef::with_prefix(&patterns[0], false)
} else {
let set = path.patterns();
let mut data = Vec::new();
let mut re_set = Vec::new();
for path in set {
let (pattern, _, _, len) = ResourceDef::parse(&path, false);
let re = match Regex::new(&pattern) {
Ok(re) => re,
Err(err) => panic!("Wrong path pattern: \"{}\" {}", path, err),
};
// actix creates one router per thread
let names: Vec<_> = re
.capture_names()
.filter_map(|name| {
name.map(|name| Box::leak(Box::new(name.to_owned())).as_str())
})
.collect();
data.push((re, names, len));
re_set.push(pattern);
}
ResourceDef {
id: 0,
tp: PatternType::DynamicSet(RegexSet::new(re_set).unwrap(), data),
elements: Vec::new(),
name: String::new(),
pattern: "".to_owned(),
}
}
}
/// Parse path pattern and create new `Pattern` instance.
///
/// Use `prefix` type instead of `static`.
///
/// Panics if path regex pattern is malformed.
pub fn prefix(path: &str) -> Self {
ResourceDef::with_prefix(path, true)
}
/// Parse path pattern and create new `Pattern` instance.
/// Inserts `/` to begging of the pattern.
///
///
/// Use `prefix` type instead of `static`.
///
/// Panics if path regex pattern is malformed.
pub fn root_prefix(path: &str) -> Self {
ResourceDef::with_prefix(&insert_slash(path), true)
}
/// Resource id
pub fn id(&self) -> u16 {
self.id
}
/// Set resource id
pub fn set_id(&mut self, id: u16) {
self.id = id;
}
/// Parse path pattern and create new `Pattern` instance with custom prefix
fn with_prefix(path: &str, for_prefix: bool) -> Self {
let path = path.to_owned();
let (pattern, elements, is_dynamic, len) = ResourceDef::parse(&path, for_prefix);
let tp = if is_dynamic {
let re = match Regex::new(&pattern) {
Ok(re) => re,
Err(err) => panic!("Wrong path pattern: \"{}\" {}", path, err),
};
// actix creates one router per thread
let names = re
.capture_names()
.filter_map(|name| {
name.map(|name| Box::leak(Box::new(name.to_owned())).as_str())
})
.collect();
PatternType::Dynamic(re, names, len)
} else if for_prefix {
PatternType::Prefix(pattern)
} else {
PatternType::Static(pattern)
};
ResourceDef {
tp,
elements,
id: 0,
name: String::new(),
pattern: path,
}
}
/// Resource pattern name
pub fn name(&self) -> &str {
&self.name
}
/// Mutable reference to a name of a resource definition.
pub fn name_mut(&mut self) -> &mut String {
&mut self.name
}
/// Path pattern of the resource
pub fn pattern(&self) -> &str {
&self.pattern
}
/// Check if path matches this pattern.
#[inline]
pub fn is_match(&self, path: &str) -> bool {
match self.tp {
PatternType::Static(ref s) => s == path,
PatternType::Prefix(ref s) => path.starts_with(s),
PatternType::Dynamic(ref re, _, _) => re.is_match(path),
PatternType::DynamicSet(ref re, _) => re.is_match(path),
}
}
/// Is prefix path a match against this resource.
pub fn is_prefix_match(&self, path: &str) -> Option<usize> {
let p_len = path.len();
let path = if path.is_empty() { "/" } else { path };
match self.tp {
PatternType::Static(ref s) => {
if s == path {
Some(p_len)
} else {
None
}
}
PatternType::Dynamic(ref re, _, len) => {
if let Some(captures) = re.captures(path) {
let mut pos = 0;
let mut passed = false;
for capture in captures.iter() {
if let Some(ref m) = capture {
if !passed {
passed = true;
continue;
}
pos = m.end();
}
}
Some(pos + len)
} else {
None
}
}
PatternType::Prefix(ref s) => {
let len = if path == s {
s.len()
} else if path.starts_with(s)
&& (s.ends_with('/') || path.split_at(s.len()).1.starts_with('/'))
{
if s.ends_with('/') {
s.len() - 1
} else {
s.len()
}
} else {
return None;
};
Some(min(p_len, len))
}
PatternType::DynamicSet(ref re, ref params) => {
if let Some(idx) = re.matches(path).into_iter().next() {
let (ref pattern, _, len) = params[idx];
if let Some(captures) = pattern.captures(path) {
let mut pos = 0;
let mut passed = false;
for capture in captures.iter() {
if let Some(ref m) = capture {
if !passed {
passed = true;
continue;
}
pos = m.end();
}
}
Some(pos + len)
} else {
None
}
} else {
None
}
}
}
}
/// Is the given path and parameters a match against this pattern.
pub fn match_path<T: ResourcePath>(&self, path: &mut Path<T>) -> bool {
match self.tp {
PatternType::Static(ref s) => {
if s == path.path() {
path.skip(path.len() as u16);
true
} else {
false
}
}
PatternType::Prefix(ref s) => {
let r_path = path.path();
let len = if s == r_path {
s.len()
} else if r_path.starts_with(s)
&& (s.ends_with('/') || r_path.split_at(s.len()).1.starts_with('/'))
{
if s.ends_with('/') {
s.len() - 1
} else {
s.len()
}
} else {
return false;
};
let r_path_len = r_path.len();
path.skip(min(r_path_len, len) as u16);
true
}
PatternType::Dynamic(ref re, ref names, len) => {
let mut idx = 0;
let mut pos = 0;
let mut segments: [PathItem; MAX_DYNAMIC_SEGMENTS] =
[PathItem::Static(""); MAX_DYNAMIC_SEGMENTS];
if let Some(captures) = re.captures(path.path()) {
for (no, name) in names.iter().enumerate() {
if let Some(m) = captures.name(&name) {
idx += 1;
pos = m.end();
segments[no] = PathItem::Segment(m.start() as u16, m.end() as u16);
} else {
log::error!(
"Dynamic path match but not all segments found: {}",
name
);
return false;
}
}
} else {
return false;
}
for idx in 0..idx {
path.add(names[idx], segments[idx]);
}
path.skip((pos + len) as u16);
true
}
PatternType::DynamicSet(ref re, ref params) => {
if let Some(idx) = re.matches(path.path()).into_iter().next() {
let (ref pattern, ref names, len) = params[idx];
let mut idx = 0;
let mut pos = 0;
let mut segments: [PathItem; MAX_DYNAMIC_SEGMENTS] =
[PathItem::Static(""); MAX_DYNAMIC_SEGMENTS];
if let Some(captures) = pattern.captures(path.path()) {
for (no, name) in names.iter().enumerate() {
if let Some(m) = captures.name(&name) {
idx += 1;
pos = m.end();
segments[no] =
PathItem::Segment(m.start() as u16, m.end() as u16);
} else {
log::error!(
"Dynamic path match but not all segments found: {}",
name
);
return false;
}
}
} else {
return false;
}
for idx in 0..idx {
path.add(names[idx], segments[idx]);
}
path.skip((pos + len) as u16);
true
} else {
false
}
}
}
}
/// Is the given path and parameters a match against this pattern?
pub fn match_path_checked<R, T, F, U>(
&self,
res: &mut R,
check: &F,
user_data: &Option<U>,
) -> bool
where
T: ResourcePath,
R: Resource<T>,
F: Fn(&R, &Option<U>) -> bool,
{
match self.tp {
PatternType::Static(ref s) => {
if s == res.resource_path().path() && check(res, user_data) {
let path = res.resource_path();
path.skip(path.len() as u16);
true
} else {
false
}
}
PatternType::Prefix(ref s) => {
let len = {
let r_path = res.resource_path().path();
if s == r_path {
s.len()
} else if r_path.starts_with(s)
&& (s.ends_with('/') || r_path.split_at(s.len()).1.starts_with('/'))
{
if s.ends_with('/') {
s.len() - 1
} else {
s.len()
}
} else {
return false;
}
};
if !check(res, user_data) {
return false;
}
let path = res.resource_path();
path.skip(min(path.path().len(), len) as u16);
true
}
PatternType::Dynamic(ref re, ref names, len) => {
let mut idx = 0;
let mut pos = 0;
let mut segments: [PathItem; MAX_DYNAMIC_SEGMENTS] =
[PathItem::Static(""); MAX_DYNAMIC_SEGMENTS];
if let Some(captures) = re.captures(res.resource_path().path()) {
for (no, name) in names.iter().enumerate() {
if let Some(m) = captures.name(&name) {
idx += 1;
pos = m.end();
segments[no] = PathItem::Segment(m.start() as u16, m.end() as u16);
} else {
log::error!(
"Dynamic path match but not all segments found: {}",
name
);
return false;
}
}
} else {
return false;
}
if !check(res, user_data) {
return false;
}
let path = res.resource_path();
for idx in 0..idx {
path.add(names[idx], segments[idx]);
}
path.skip((pos + len) as u16);
true
}
PatternType::DynamicSet(ref re, ref params) => {
let path = res.resource_path().path();
if let Some(idx) = re.matches(path).into_iter().next() {
let (ref pattern, ref names, len) = params[idx];
let mut idx = 0;
let mut pos = 0;
let mut segments: [PathItem; MAX_DYNAMIC_SEGMENTS] =
[PathItem::Static(""); MAX_DYNAMIC_SEGMENTS];
if let Some(captures) = pattern.captures(path) {
for (no, name) in names.iter().enumerate() {
if let Some(m) = captures.name(&name) {
idx += 1;
pos = m.end();
segments[no] =
PathItem::Segment(m.start() as u16, m.end() as u16);
} else {
log::error!(
"Dynamic path match but not all segments found: {}",
name
);
return false;
}
}
} else {
return false;
}
if !check(res, user_data) {
return false;
}
let path = res.resource_path();
for idx in 0..idx {
path.add(names[idx], segments[idx]);
}
path.skip((pos + len) as u16);
true
} else {
false
}
}
}
}
/// Build resource path from elements. Returns `true` on success.
pub fn resource_path<U, I>(&self, path: &mut String, elements: &mut U) -> bool
where
U: Iterator<Item = I>,
I: AsRef<str>,
{
match self.tp {
PatternType::Prefix(ref p) => path.push_str(p),
PatternType::Static(ref p) => path.push_str(p),
PatternType::Dynamic(..) => {
for el in &self.elements {
match *el {
PatternElement::Str(ref s) => path.push_str(s),
PatternElement::Var(_) => {
if let Some(val) = elements.next() {
path.push_str(val.as_ref())
} else {
return false;
}
}
}
}
}
PatternType::DynamicSet(..) => {
return false;
}
}
true
}
/// Build resource path from elements. Returns `true` on success.
pub fn resource_path_named<K, V, S>(
&self,
path: &mut String,
elements: &HashMap<K, V, S>,
) -> bool
where
K: std::borrow::Borrow<str> + Eq + Hash,
V: AsRef<str>,
S: std::hash::BuildHasher,
{
match self.tp {
PatternType::Prefix(ref p) => path.push_str(p),
PatternType::Static(ref p) => path.push_str(p),
PatternType::Dynamic(..) => {
for el in &self.elements {
match *el {
PatternElement::Str(ref s) => path.push_str(s),
PatternElement::Var(ref name) => {
if let Some(val) = elements.get(name) {
path.push_str(val.as_ref())
} else {
return false;
}
}
}
}
}
PatternType::DynamicSet(..) => {
return false;
}
}
true
}
fn parse_param(pattern: &str) -> (PatternElement, String, &str, bool) {
const DEFAULT_PATTERN: &str = "[^/]+";
const DEFAULT_PATTERN_TAIL: &str = ".*";
let mut params_nesting = 0usize;
let close_idx = pattern
.find(|c| match c {
'{' => {
params_nesting += 1;
false
}
'}' => {
params_nesting -= 1;
params_nesting == 0
}
_ => false,
})
.expect("malformed dynamic segment");
let (mut param, mut rem) = pattern.split_at(close_idx + 1);
param = &param[1..param.len() - 1]; // Remove outer brackets
let tail = rem == "*";
let (name, pattern) = match param.find(':') {
Some(idx) => {
if tail {
panic!("Custom regex is not supported for remainder match");
}
let (name, pattern) = param.split_at(idx);
(name, &pattern[1..])
}
None => (
param,
if tail {
rem = &rem[1..];
DEFAULT_PATTERN_TAIL
} else {
DEFAULT_PATTERN
},
),
};
(
PatternElement::Var(name.to_string()),
format!(r"(?P<{}>{})", &name, &pattern),
rem,
tail,
)
}
fn parse(
mut pattern: &str,
mut for_prefix: bool,
) -> (String, Vec<PatternElement>, bool, usize) {
if pattern.find('{').is_none() {
// TODO: MSRV: 1.45
#[allow(clippy::manual_strip)]
return if pattern.ends_with('*') {
let path = &pattern[..pattern.len() - 1];
let re = String::from("^") + path + "(.*)";
(re, vec![PatternElement::Str(String::from(path))], true, 0)
} else {
(
String::from(pattern),
vec![PatternElement::Str(String::from(pattern))],
false,
pattern.chars().count(),
)
};
}
let mut elements = Vec::new();
let mut re = String::from("^");
let mut dyn_elements = 0;
while let Some(idx) = pattern.find('{') {
let (prefix, rem) = pattern.split_at(idx);
elements.push(PatternElement::Str(String::from(prefix)));
re.push_str(&escape(prefix));
let (param_pattern, re_part, rem, tail) = Self::parse_param(rem);
if tail {
for_prefix = true;
}
elements.push(param_pattern);
re.push_str(&re_part);
pattern = rem;
dyn_elements += 1;
}
elements.push(PatternElement::Str(String::from(pattern)));
re.push_str(&escape(pattern));
if dyn_elements > MAX_DYNAMIC_SEGMENTS {
panic!(
"Only {} dynamic segments are allowed, provided: {}",
MAX_DYNAMIC_SEGMENTS, dyn_elements
);
}
if !for_prefix {
re.push('$');
}
(re, elements, true, pattern.chars().count())
}
}
impl Eq for ResourceDef {}
impl PartialEq for ResourceDef {
fn eq(&self, other: &ResourceDef) -> bool {
self.pattern == other.pattern
}
}
impl Hash for ResourceDef {
fn hash<H: Hasher>(&self, state: &mut H) {
self.pattern.hash(state);
}
}
impl<'a> From<&'a str> for ResourceDef {
fn from(path: &'a str) -> ResourceDef {
ResourceDef::new(path)
}
}
impl From<String> for ResourceDef {
fn from(path: String) -> ResourceDef {
ResourceDef::new(path)
}
}
pub(crate) fn insert_slash(path: &str) -> String {
let mut path = path.to_owned();
if !path.is_empty() && !path.starts_with('/') {
path.insert(0, '/');
};
path
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parse_static() {
let re = ResourceDef::new("/");
assert!(re.is_match("/"));
assert!(!re.is_match("/a"));
let re = ResourceDef::new("/name");
assert!(re.is_match("/name"));
assert!(!re.is_match("/name1"));
assert!(!re.is_match("/name/"));
assert!(!re.is_match("/name~"));
assert_eq!(re.is_prefix_match("/name"), Some(5));
assert_eq!(re.is_prefix_match("/name1"), None);
assert_eq!(re.is_prefix_match("/name/"), None);
assert_eq!(re.is_prefix_match("/name~"), None);
let re = ResourceDef::new("/name/");
assert!(re.is_match("/name/"));
assert!(!re.is_match("/name"));
assert!(!re.is_match("/name/gs"));
let re = ResourceDef::new("/user/profile");
assert!(re.is_match("/user/profile"));
assert!(!re.is_match("/user/profile/profile"));
}
#[test]
fn test_parse_param() {
let re = ResourceDef::new("/user/{id}");
assert!(re.is_match("/user/profile"));
assert!(re.is_match("/user/2345"));
assert!(!re.is_match("/user/2345/"));
assert!(!re.is_match("/user/2345/sdg"));
let mut path = Path::new("/user/profile");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "profile");
let mut path = Path::new("/user/1245125");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "1245125");
let re = ResourceDef::new("/v{version}/resource/{id}");
assert!(re.is_match("/v1/resource/320120"));
assert!(!re.is_match("/v/resource/1"));
assert!(!re.is_match("/resource"));
let mut path = Path::new("/v151/resource/adage32");
assert!(re.match_path(&mut path));
assert_eq!(path.get("version").unwrap(), "151");
assert_eq!(path.get("id").unwrap(), "adage32");
let re = ResourceDef::new("/{id:[[:digit:]]{6}}");
assert!(re.is_match("/012345"));
assert!(!re.is_match("/012"));
assert!(!re.is_match("/01234567"));
assert!(!re.is_match("/XXXXXX"));
let mut path = Path::new("/012345");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "012345");
}
#[allow(clippy::cognitive_complexity)]
#[test]
fn test_dynamic_set() {
let re = ResourceDef::new(vec![
"/user/{id}",
"/v{version}/resource/{id}",
"/{id:[[:digit:]]{6}}",
]);
assert!(re.is_match("/user/profile"));
assert!(re.is_match("/user/2345"));
assert!(!re.is_match("/user/2345/"));
assert!(!re.is_match("/user/2345/sdg"));
let mut path = Path::new("/user/profile");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "profile");
let mut path = Path::new("/user/1245125");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "1245125");
assert!(re.is_match("/v1/resource/320120"));
assert!(!re.is_match("/v/resource/1"));
assert!(!re.is_match("/resource"));
let mut path = Path::new("/v151/resource/adage32");
assert!(re.match_path(&mut path));
assert_eq!(path.get("version").unwrap(), "151");
assert_eq!(path.get("id").unwrap(), "adage32");
assert!(re.is_match("/012345"));
assert!(!re.is_match("/012"));
assert!(!re.is_match("/01234567"));
assert!(!re.is_match("/XXXXXX"));
let mut path = Path::new("/012345");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "012345");
let re = ResourceDef::new([
"/user/{id}",
"/v{version}/resource/{id}",
"/{id:[[:digit:]]{6}}",
]);
assert!(re.is_match("/user/profile"));
assert!(re.is_match("/user/2345"));
assert!(!re.is_match("/user/2345/"));
assert!(!re.is_match("/user/2345/sdg"));
let re = ResourceDef::new([
"/user/{id}".to_string(),
"/v{version}/resource/{id}".to_string(),
"/{id:[[:digit:]]{6}}".to_string(),
]);
assert!(re.is_match("/user/profile"));
assert!(re.is_match("/user/2345"));
assert!(!re.is_match("/user/2345/"));
assert!(!re.is_match("/user/2345/sdg"));
}
#[test]
fn test_parse_tail() {
let re = ResourceDef::new("/user/-{id}*");
let mut path = Path::new("/user/-profile");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "profile");
let mut path = Path::new("/user/-2345");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "2345");
let mut path = Path::new("/user/-2345/");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "2345/");
let mut path = Path::new("/user/-2345/sdg");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "2345/sdg");
}
#[test]
fn test_static_tail() {
let re = ResourceDef::new("/user*");
assert!(re.is_match("/user/profile"));
assert!(re.is_match("/user/2345"));
assert!(re.is_match("/user/2345/"));
assert!(re.is_match("/user/2345/sdg"));
let re = ResourceDef::new("/user/*");
assert!(re.is_match("/user/profile"));
assert!(re.is_match("/user/2345"));
assert!(re.is_match("/user/2345/"));
assert!(re.is_match("/user/2345/sdg"));
}
#[cfg(feature = "http")]
#[test]
fn test_parse_urlencoded_param() {
use std::convert::TryFrom;
let re = ResourceDef::new("/user/{id}/test");
let mut path = Path::new("/user/2345/test");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "2345");
let mut path = Path::new("/user/qwe%25/test");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "qwe%25");
let uri = http::Uri::try_from("/user/qwe%25/test").unwrap();
let mut path = Path::new(uri);
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "qwe%25");
}
#[test]
fn test_resource_prefix() {
let re = ResourceDef::prefix("/name");
assert!(re.is_match("/name"));
assert!(re.is_match("/name/"));
assert!(re.is_match("/name/test/test"));
assert!(re.is_match("/name1"));
assert!(re.is_match("/name~"));
assert_eq!(re.is_prefix_match("/name"), Some(5));
assert_eq!(re.is_prefix_match("/name/"), Some(5));
assert_eq!(re.is_prefix_match("/name/test/test"), Some(5));
assert_eq!(re.is_prefix_match("/name1"), None);
assert_eq!(re.is_prefix_match("/name~"), None);
let re = ResourceDef::prefix("/name/");
assert!(re.is_match("/name/"));
assert!(re.is_match("/name/gs"));
assert!(!re.is_match("/name"));
let re = ResourceDef::root_prefix("name/");
assert!(re.is_match("/name/"));
assert!(re.is_match("/name/gs"));
assert!(!re.is_match("/name"));
}
#[test]
fn test_resource_prefix_dynamic() {
let re = ResourceDef::prefix("/{name}/");
assert!(re.is_match("/name/"));
assert!(re.is_match("/name/gs"));
assert!(!re.is_match("/name"));
assert_eq!(re.is_prefix_match("/name/"), Some(6));
assert_eq!(re.is_prefix_match("/name/gs"), Some(6));
assert_eq!(re.is_prefix_match("/name"), None);
let mut path = Path::new("/test2/");
assert!(re.match_path(&mut path));
assert_eq!(&path["name"], "test2");
assert_eq!(&path[0], "test2");
let mut path = Path::new("/test2/subpath1/subpath2/index.html");
assert!(re.match_path(&mut path));
assert_eq!(&path["name"], "test2");
assert_eq!(&path[0], "test2");
}
#[test]
fn test_resource_path() {
let mut s = String::new();
let resource = ResourceDef::new("/user/{item1}/test");
assert!(resource.resource_path(&mut s, &mut (&["user1"]).iter()));
assert_eq!(s, "/user/user1/test");
let mut s = String::new();
let resource = ResourceDef::new("/user/{item1}/{item2}/test");
assert!(resource.resource_path(&mut s, &mut (&["item", "item2"]).iter()));
assert_eq!(s, "/user/item/item2/test");
let mut s = String::new();
let resource = ResourceDef::new("/user/{item1}/{item2}");
assert!(resource.resource_path(&mut s, &mut (&["item", "item2"]).iter()));
assert_eq!(s, "/user/item/item2");
let mut s = String::new();
let resource = ResourceDef::new("/user/{item1}/{item2}/");
assert!(resource.resource_path(&mut s, &mut (&["item", "item2"]).iter()));
assert_eq!(s, "/user/item/item2/");
let mut s = String::new();
assert!(!resource.resource_path(&mut s, &mut (&["item"]).iter()));
let mut s = String::new();
assert!(resource.resource_path(&mut s, &mut (&["item", "item2"]).iter()));
assert_eq!(s, "/user/item/item2/");
assert!(!resource.resource_path(&mut s, &mut (&["item"]).iter()));
let mut s = String::new();
assert!(resource.resource_path(&mut s, &mut vec!["item", "item2"].into_iter()));
assert_eq!(s, "/user/item/item2/");
let mut map = HashMap::new();
map.insert("item1", "item");
let mut s = String::new();
assert!(!resource.resource_path_named(&mut s, &map));
let mut s = String::new();
map.insert("item2", "item2");
assert!(resource.resource_path_named(&mut s, &map));
assert_eq!(s, "/user/item/item2/");
}
}

259
actix-router/src/router.rs Normal file
View File

@@ -0,0 +1,259 @@
use crate::{IntoPattern, Resource, ResourceDef, ResourcePath};
#[derive(Debug, Copy, Clone, PartialEq)]
pub struct ResourceId(pub u16);
/// Information about current resource
#[derive(Clone, Debug)]
pub struct ResourceInfo {
resource: ResourceId,
}
/// Resource router.
pub struct Router<T, U = ()>(Vec<(ResourceDef, T, Option<U>)>);
impl<T, U> Router<T, U> {
pub fn build() -> RouterBuilder<T, U> {
RouterBuilder {
resources: Vec::new(),
}
}
pub fn recognize<R, P>(&self, resource: &mut R) -> Option<(&T, ResourceId)>
where
R: Resource<P>,
P: ResourcePath,
{
for item in self.0.iter() {
if item.0.match_path(resource.resource_path()) {
return Some((&item.1, ResourceId(item.0.id())));
}
}
None
}
pub fn recognize_mut<R, P>(&mut self, resource: &mut R) -> Option<(&mut T, ResourceId)>
where
R: Resource<P>,
P: ResourcePath,
{
for item in self.0.iter_mut() {
if item.0.match_path(resource.resource_path()) {
return Some((&mut item.1, ResourceId(item.0.id())));
}
}
None
}
pub fn recognize_checked<R, P, F>(
&self,
resource: &mut R,
check: F,
) -> Option<(&T, ResourceId)>
where
F: Fn(&R, &Option<U>) -> bool,
R: Resource<P>,
P: ResourcePath,
{
for item in self.0.iter() {
if item.0.match_path_checked(resource, &check, &item.2) {
return Some((&item.1, ResourceId(item.0.id())));
}
}
None
}
pub fn recognize_mut_checked<R, P, F>(
&mut self,
resource: &mut R,
check: F,
) -> Option<(&mut T, ResourceId)>
where
F: Fn(&R, &Option<U>) -> bool,
R: Resource<P>,
P: ResourcePath,
{
for item in self.0.iter_mut() {
if item.0.match_path_checked(resource, &check, &item.2) {
return Some((&mut item.1, ResourceId(item.0.id())));
}
}
None
}
}
pub struct RouterBuilder<T, U = ()> {
resources: Vec<(ResourceDef, T, Option<U>)>,
}
impl<T, U> RouterBuilder<T, U> {
/// Register resource for specified path.
pub fn path<P: IntoPattern>(
&mut self,
path: P,
resource: T,
) -> &mut (ResourceDef, T, Option<U>) {
self.resources
.push((ResourceDef::new(path), resource, None));
self.resources.last_mut().unwrap()
}
/// Register resource for specified path prefix.
pub fn prefix(&mut self, prefix: &str, resource: T) -> &mut (ResourceDef, T, Option<U>) {
self.resources
.push((ResourceDef::prefix(prefix), resource, None));
self.resources.last_mut().unwrap()
}
/// Register resource for ResourceDef
pub fn rdef(&mut self, rdef: ResourceDef, resource: T) -> &mut (ResourceDef, T, Option<U>) {
self.resources.push((rdef, resource, None));
self.resources.last_mut().unwrap()
}
/// Finish configuration and create router instance.
pub fn finish(self) -> Router<T, U> {
Router(self.resources)
}
}
#[cfg(test)]
mod tests {
use crate::path::Path;
use crate::router::{ResourceId, Router};
#[allow(clippy::cognitive_complexity)]
#[test]
fn test_recognizer_1() {
let mut router = Router::<usize>::build();
router.path("/name", 10).0.set_id(0);
router.path("/name/{val}", 11).0.set_id(1);
router.path("/name/{val}/index.html", 12).0.set_id(2);
router.path("/file/{file}.{ext}", 13).0.set_id(3);
router.path("/v{val}/{val2}/index.html", 14).0.set_id(4);
router.path("/v/{tail:.*}", 15).0.set_id(5);
router.path("/test2/{test}.html", 16).0.set_id(6);
router.path("/{test}/index.html", 17).0.set_id(7);
let mut router = router.finish();
let mut path = Path::new("/unknown");
assert!(router.recognize_mut(&mut path).is_none());
let mut path = Path::new("/name");
let (h, info) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 10);
assert_eq!(info, ResourceId(0));
assert!(path.is_empty());
let mut path = Path::new("/name/value");
let (h, info) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 11);
assert_eq!(info, ResourceId(1));
assert_eq!(path.get("val").unwrap(), "value");
assert_eq!(&path["val"], "value");
let mut path = Path::new("/name/value2/index.html");
let (h, info) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 12);
assert_eq!(info, ResourceId(2));
assert_eq!(path.get("val").unwrap(), "value2");
let mut path = Path::new("/file/file.gz");
let (h, info) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 13);
assert_eq!(info, ResourceId(3));
assert_eq!(path.get("file").unwrap(), "file");
assert_eq!(path.get("ext").unwrap(), "gz");
let mut path = Path::new("/vtest/ttt/index.html");
let (h, info) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 14);
assert_eq!(info, ResourceId(4));
assert_eq!(path.get("val").unwrap(), "test");
assert_eq!(path.get("val2").unwrap(), "ttt");
let mut path = Path::new("/v/blah-blah/index.html");
let (h, info) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 15);
assert_eq!(info, ResourceId(5));
assert_eq!(path.get("tail").unwrap(), "blah-blah/index.html");
let mut path = Path::new("/test2/index.html");
let (h, info) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 16);
assert_eq!(info, ResourceId(6));
assert_eq!(path.get("test").unwrap(), "index");
let mut path = Path::new("/bbb/index.html");
let (h, info) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 17);
assert_eq!(info, ResourceId(7));
assert_eq!(path.get("test").unwrap(), "bbb");
}
#[test]
fn test_recognizer_2() {
let mut router = Router::<usize>::build();
router.path("/index.json", 10);
router.path("/{source}.json", 11);
let mut router = router.finish();
let mut path = Path::new("/index.json");
let (h, _) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 10);
let mut path = Path::new("/test.json");
let (h, _) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 11);
}
#[test]
fn test_recognizer_with_prefix() {
let mut router = Router::<usize>::build();
router.path("/name", 10).0.set_id(0);
router.path("/name/{val}", 11).0.set_id(1);
let mut router = router.finish();
let mut path = Path::new("/name");
path.skip(5);
assert!(router.recognize_mut(&mut path).is_none());
let mut path = Path::new("/test/name");
path.skip(5);
let (h, _) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 10);
let mut path = Path::new("/test/name/value");
path.skip(5);
let (h, id) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 11);
assert_eq!(id, ResourceId(1));
assert_eq!(path.get("val").unwrap(), "value");
assert_eq!(&path["val"], "value");
// same patterns
let mut router = Router::<usize>::build();
router.path("/name", 10);
router.path("/name/{val}", 11);
let mut router = router.finish();
let mut path = Path::new("/name");
path.skip(6);
assert!(router.recognize_mut(&mut path).is_none());
let mut path = Path::new("/test2/name");
path.skip(6);
let (h, _) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 10);
let mut path = Path::new("/test2/name-test");
path.skip(6);
assert!(router.recognize_mut(&mut path).is_none());
let mut path = Path::new("/test2/name/ttt");
path.skip(6);
let (h, _) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 11);
assert_eq!(&path["val"], "ttt");
}
}

249
actix-router/src/url.rs Normal file
View File

@@ -0,0 +1,249 @@
use crate::ResourcePath;
#[allow(dead_code)]
const GEN_DELIMS: &[u8] = b":/?#[]@";
#[allow(dead_code)]
const SUB_DELIMS_WITHOUT_QS: &[u8] = b"!$'()*,";
#[allow(dead_code)]
const SUB_DELIMS: &[u8] = b"!$'()*,+?=;";
#[allow(dead_code)]
const RESERVED: &[u8] = b":/?#[]@!$'()*,+?=;";
#[allow(dead_code)]
const UNRESERVED: &[u8] = b"abcdefghijklmnopqrstuvwxyz
ABCDEFGHIJKLMNOPQRSTUVWXYZ
1234567890
-._~";
const ALLOWED: &[u8] = b"abcdefghijklmnopqrstuvwxyz
ABCDEFGHIJKLMNOPQRSTUVWXYZ
1234567890
-._~
!$'()*,";
const QS: &[u8] = b"+&=;b";
#[inline]
fn bit_at(array: &[u8], ch: u8) -> bool {
array[(ch >> 3) as usize] & (1 << (ch & 7)) != 0
}
#[inline]
fn set_bit(array: &mut [u8], ch: u8) {
array[(ch >> 3) as usize] |= 1 << (ch & 7)
}
thread_local! {
static DEFAULT_QUOTER: Quoter = Quoter::new(b"@:", b"/+");
}
#[derive(Default, Clone, Debug)]
pub struct Url {
uri: http::Uri,
path: Option<String>,
}
impl Url {
pub fn new(uri: http::Uri) -> Url {
let path = DEFAULT_QUOTER.with(|q| q.requote(uri.path().as_bytes()));
Url { uri, path }
}
pub fn with_quoter(uri: http::Uri, quoter: &Quoter) -> Url {
Url {
path: quoter.requote(uri.path().as_bytes()),
uri,
}
}
pub fn uri(&self) -> &http::Uri {
&self.uri
}
pub fn path(&self) -> &str {
if let Some(ref s) = self.path {
s
} else {
self.uri.path()
}
}
#[inline]
pub fn update(&mut self, uri: &http::Uri) {
self.uri = uri.clone();
self.path = DEFAULT_QUOTER.with(|q| q.requote(uri.path().as_bytes()));
}
#[inline]
pub fn update_with_quoter(&mut self, uri: &http::Uri, quoter: &Quoter) {
self.uri = uri.clone();
self.path = quoter.requote(uri.path().as_bytes());
}
}
impl ResourcePath for Url {
#[inline]
fn path(&self) -> &str {
self.path()
}
}
pub struct Quoter {
safe_table: [u8; 16],
protected_table: [u8; 16],
}
impl Quoter {
pub fn new(safe: &[u8], protected: &[u8]) -> Quoter {
let mut q = Quoter {
safe_table: [0; 16],
protected_table: [0; 16],
};
// prepare safe table
for i in 0..128 {
if ALLOWED.contains(&i) {
set_bit(&mut q.safe_table, i);
}
if QS.contains(&i) {
set_bit(&mut q.safe_table, i);
}
}
for ch in safe {
set_bit(&mut q.safe_table, *ch)
}
// prepare protected table
for ch in protected {
set_bit(&mut q.safe_table, *ch);
set_bit(&mut q.protected_table, *ch);
}
q
}
pub fn requote(&self, val: &[u8]) -> Option<String> {
let mut has_pct = 0;
let mut pct = [b'%', 0, 0];
let mut idx = 0;
let mut cloned: Option<Vec<u8>> = None;
let len = val.len();
while idx < len {
let ch = val[idx];
if has_pct != 0 {
pct[has_pct] = val[idx];
has_pct += 1;
if has_pct == 3 {
has_pct = 0;
let buf = cloned.as_mut().unwrap();
if let Some(ch) = restore_ch(pct[1], pct[2]) {
if ch < 128 {
if bit_at(&self.protected_table, ch) {
buf.extend_from_slice(&pct);
idx += 1;
continue;
}
if bit_at(&self.safe_table, ch) {
buf.push(ch);
idx += 1;
continue;
}
}
buf.push(ch);
} else {
buf.extend_from_slice(&pct[..]);
}
}
} else if ch == b'%' {
has_pct = 1;
if cloned.is_none() {
let mut c = Vec::with_capacity(len);
c.extend_from_slice(&val[..idx]);
cloned = Some(c);
}
} else if let Some(ref mut cloned) = cloned {
cloned.push(ch)
}
idx += 1;
}
if let Some(data) = cloned {
// Unsafe: we get data from http::Uri, which does utf-8 checks already
// this code only decodes valid pct encoded values
Some(unsafe { String::from_utf8_unchecked(data) })
} else {
None
}
}
}
#[inline]
fn from_hex(v: u8) -> Option<u8> {
if (b'0'..=b'9').contains(&v) {
Some(v - 0x30) // ord('0') == 0x30
} else if (b'A'..=b'F').contains(&v) {
Some(v - 0x41 + 10) // ord('A') == 0x41
} else if (b'a'..=b'f').contains(&v) {
Some(v - 0x61 + 10) // ord('a') == 0x61
} else {
None
}
}
#[inline]
fn restore_ch(d1: u8, d2: u8) -> Option<u8> {
from_hex(d1).and_then(|d1| from_hex(d2).map(move |d2| d1 << 4 | d2))
}
#[cfg(test)]
mod tests {
use http::Uri;
use std::convert::TryFrom;
use super::*;
use crate::{Path, ResourceDef};
#[test]
fn test_parse_url() {
let re = ResourceDef::new("/user/{id}/test");
let url = Uri::try_from("/user/2345/test").unwrap();
let mut path = Path::new(Url::new(url));
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "2345");
let url = Uri::try_from("/user/qwe%25/test").unwrap();
let mut path = Path::new(Url::new(url));
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "qwe%");
let url = Uri::try_from("/user/qwe%25rty/test").unwrap();
let mut path = Path::new(Url::new(url));
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "qwe%rty");
}
#[test]
fn test_from_hex() {
let hex = b"0123456789abcdefABCDEF";
for i in 0..256 {
let c = i as u8;
if hex.contains(&c) {
assert!(from_hex(c).is_some())
} else {
assert!(from_hex(c).is_none())
}
}
let expected = [
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 10, 11, 12, 13, 14, 15,
];
for i in 0..hex.len() {
assert_eq!(from_hex(hex[i]).unwrap(), expected[i]);
}
}
}

View File

@@ -1,22 +1,6 @@
# Changes
## Unreleased - 2021-xx-xx
* Add `io-uring` feature for enabling async file I/O on linux. [#374]
* The `spawn` method can now resolve with non-unit outputs. [#369]
[#369]: https://github.com/actix/actix-net/pull/369
[#374]: https://github.com/actix/actix-net/pull/374
## 2.2.0 - 2021-03-29
* **BREAKING** `ActixStream::{poll_read_ready, poll_write_ready}` methods now return
`Ready` object in ok variant. [#293]
* Breakage is acceptable since `ActixStream` was not intended to be public.
[#293]: https://github.com/actix/actix-net/pull/293
## 2.1.0 - 2021-02-24
* Add `ActixStream` extension trait to include readiness methods. [#276]
* Re-export `tokio::net::TcpSocket` in `net` module [#282]
@@ -77,7 +61,10 @@
## 2.0.0-beta.1 - 2020-12-28
### Added
* Add `System::attach_to_tokio` method. [#173]
### Changed
* Update `tokio` dependency to `1.0`. [#236]
* Rename `time` module `delay_for` to `sleep`, `delay_until` to `sleep_until`, `Delay` to `Sleep`
to stay aligned with Tokio's naming. [#236]
@@ -85,19 +72,27 @@
* These methods now accept `&self` when calling. [#236]
* Remove `'static` lifetime requirement for `System::run` and `Builder::run`. [#236]
* `Arbiter::spawn` now panics when `System` is not in scope. [#207]
### Fixed
* Fix work load issue by removing `PENDING` thread local. [#207]
[#207]: https://github.com/actix/actix-net/pull/207
[#236]: https://github.com/actix/actix-net/pull/236
## [1.1.1] - 2020-04-30
### Fixed
## 1.1.1 - 2020-04-30
* Fix memory leak due to [#94] (see [#129] for more detail)
[#129]: https://github.com/actix/actix-net/issues/129
## [1.1.0] - 2020-04-08
**This version has been yanked.**
### Added
## 1.1.0 - 2020-04-08 (YANKED)
* Expose `System::is_set` to check if current system has ben started [#99]
* Add `Arbiter::is_running` to check if event loop is running [#124]
* Add `Arbiter::local_join` associated function
@@ -107,57 +102,96 @@
[#99]: https://github.com/actix/actix-net/pull/99
[#124]: https://github.com/actix/actix-net/pull/124
## [1.0.0] - 2019-12-11
## 1.0.0 - 2019-12-11
* Update dependencies
## [1.0.0-alpha.3] - 2019-12-07
### Fixed
## 1.0.0-alpha.3 - 2019-12-07
* Migrate to tokio 0.2
* Fix compilation on non-unix platforms
### Changed
* Migrate to tokio 0.2
## [1.0.0-alpha.2] - 2019-12-02
Added
## 1.0.0-alpha.2 - 2019-12-02
* Export `main` and `test` attribute macros
* Export `time` module (re-export of tokio-timer)
* Export `net` module (re-export of tokio-net)
## 1.0.0-alpha.1 - 2019-11-22
## [1.0.0-alpha.1] - 2019-11-22
### Changed
* Migrate to std::future and tokio 0.2
## 0.2.6 - 2019-11-14
* Allow to join arbiter's thread. #60
## [0.2.6] - 2019-11-14
### Fixed
* Fix arbiter's thread panic message.
### Added
* Allow to join arbiter's thread. #60
## [0.2.5] - 2019-09-02
### Added
## 0.2.5 - 2019-09-02
* Add arbiter specific storage
## 0.2.4 - 2019-07-17
## [0.2.4] - 2019-07-17
### Changed
* Avoid a copy of the Future when initializing the Box. #29
## 0.2.3 - 2019-06-22
* Allow to start System using existing CurrentThread Handle #22
## [0.2.3] - 2019-06-22
### Added
* Allow to start System using exsiting CurrentThread Handle #22
## 0.2.2 - 2019-03-28
## [0.2.2] - 2019-03-28
### Changed
* Moved `blocking` module to `actix-threadpool` crate
## 0.2.1 - 2019-03-11
## [0.2.1] - 2019-03-11
### Added
* Added `blocking` module
* Added `Arbiter::exec_fn` - execute fn on the arbiter's thread
* Added `Arbiter::exec` - execute fn on the arbiter's thread and wait result
* Arbiter::exec_fn - execute fn on the arbiter's thread
* Arbiter::exec - execute fn on the arbiter's thread and wait result
## 0.2.0 - 2019-03-06
## [0.2.0] - 2019-03-06
* `run` method returns `io::Result<()>`
* Removed `Handle`
## 0.1.0 - 2018-12-09
## [0.1.0] - 2018-12-09
* Initial release

View File

@@ -1,6 +1,6 @@
[package]
name = "actix-rt"
version = "2.2.0"
version = "2.0.2"
authors = [
"Nikolay Kim <fafhrd91@gmail.com>",
"Rob Ede <robjtede@icloud.com>",
@@ -8,7 +8,7 @@ authors = [
description = "Tokio-based single-threaded async runtime for the Actix ecosystem"
keywords = ["async", "futures", "io", "runtime"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-rt"
categories = ["network-programming", "asynchronous"]
license = "MIT OR Apache-2.0"
@@ -21,16 +21,12 @@ path = "src/lib.rs"
[features]
default = ["macros"]
macros = ["actix-macros"]
io-uring = ["tokio-uring"]
[dependencies]
actix-macros = { version = "0.2.0", optional = true }
futures-core = { version = "0.3", default-features = false }
tokio = { version = "1.3", features = ["rt", "net", "parking_lot", "signal", "sync", "time"] }
[target.'cfg(target_os = "linux")'.dependencies]
tokio-uring = { version = "0.1", optional = true }
tokio = { version = "1.2", features = ["rt", "net", "parking_lot", "signal", "sync", "time"] }
[dev-dependencies]
tokio = { version = "1.2", features = ["full"] }

View File

@@ -2,13 +2,4 @@
> Tokio-based single-threaded async runtime for the Actix ecosystem.
[![crates.io](https://img.shields.io/crates/v/actix-rt?label=latest)](https://crates.io/crates/actix-rt)
[![Documentation](https://docs.rs/actix-rt/badge.svg?version=2.2.0)](https://docs.rs/actix-rt/2.2.0)
[![Version](https://img.shields.io/badge/rustc-1.46+-ab6000.svg)](https://blog.rust-lang.org/2020/03/12/Rust-1.46.html)
![MIT or Apache 2.0 licensed](https://img.shields.io/crates/l/actix-rt.svg)
<br />
[![dependency status](https://deps.rs/crate/actix-rt/2.2.0/status.svg)](https://deps.rs/crate/actix-rt/2.2.0)
![Download](https://img.shields.io/crates/d/actix-rt.svg)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/WghFtEH6Hb)
See crate documentation for more: https://docs.rs/actix-rt.

View File

@@ -1,60 +0,0 @@
//! An example on how to build a multi-thread tokio runtime for Actix System.
//! Then spawn async task that can make use of work stealing of tokio runtime.
use actix_rt::System;
fn main() {
System::with_tokio_rt(|| {
// build system with a multi-thread tokio runtime.
tokio::runtime::Builder::new_multi_thread()
.worker_threads(2)
.enable_all()
.build()
.unwrap()
})
.block_on(async_main());
}
// async main function that acts like #[actix_web::main] or #[tokio::main]
async fn async_main() {
let (tx, rx) = tokio::sync::oneshot::channel();
// get a handle to system arbiter and spawn async task on it
System::current().arbiter().spawn(async {
// use tokio::spawn to get inside the context of multi thread tokio runtime
let h1 = tokio::spawn(async {
println!("thread id is {:?}", std::thread::current().id());
std::thread::sleep(std::time::Duration::from_secs(2));
});
// work stealing occurs for this task spawn
let h2 = tokio::spawn(async {
println!("thread id is {:?}", std::thread::current().id());
});
h1.await.unwrap();
h2.await.unwrap();
let _ = tx.send(());
});
rx.await.unwrap();
let (tx, rx) = tokio::sync::oneshot::channel();
let now = std::time::Instant::now();
// without additional tokio::spawn, all spawned tasks run on single thread
System::current().arbiter().spawn(async {
println!("thread id is {:?}", std::thread::current().id());
std::thread::sleep(std::time::Duration::from_secs(2));
let _ = tx.send(());
});
// previous spawn task has blocked the system arbiter thread
// so this task will wait for 2 seconds until it can be run
System::current().arbiter().spawn(async move {
println!("thread id is {:?}", std::thread::current().id());
assert!(now.elapsed() > std::time::Duration::from_secs(2));
});
rx.await.unwrap();
}

View File

@@ -9,9 +9,12 @@ use std::{
};
use futures_core::ready;
use tokio::sync::mpsc;
use tokio::{sync::mpsc, task::LocalSet};
use crate::system::{System, SystemCommand};
use crate::{
runtime::{default_tokio_runtime, Runtime},
system::{System, SystemCommand},
};
pub(crate) static COUNT: AtomicUsize = AtomicUsize::new(0);
@@ -95,19 +98,16 @@ impl Arbiter {
///
/// # Panics
/// Panics if a [System] is not registered on the current thread.
#[cfg(not(all(target_os = "linux", feature = "io-uring")))]
#[allow(clippy::new_without_default)]
pub fn new() -> Arbiter {
Self::with_tokio_rt(|| {
crate::runtime::default_tokio_runtime()
.expect("Cannot create new Arbiter's Runtime.")
default_tokio_runtime().expect("Cannot create new Arbiter's Runtime.")
})
}
/// Spawn a new Arbiter using the [Tokio Runtime](tokio-runtime) returned from a closure.
///
/// [tokio-runtime]: tokio::runtime::Runtime
#[cfg(not(all(target_os = "linux", feature = "io-uring")))]
#[doc(hidden)]
pub fn with_tokio_rt<F>(runtime_factory: F) -> Arbiter
where
@@ -127,7 +127,7 @@ impl Arbiter {
.spawn({
let tx = tx.clone();
move || {
let rt = crate::runtime::Runtime::from(runtime_factory());
let rt = Runtime::from(runtime_factory());
let hnd = ArbiterHandle::new(tx);
System::set_current(sys);
@@ -159,67 +159,15 @@ impl Arbiter {
Arbiter { tx, thread_handle }
}
/// Spawn a new Arbiter thread and start its event loop with `tokio-uring` runtime.
///
/// # Panics
/// Panics if a [System] is not registered on the current thread.
#[cfg(all(target_os = "linux", feature = "io-uring"))]
#[allow(clippy::new_without_default)]
pub fn new() -> Arbiter {
let sys = System::current();
let system_id = sys.id();
let arb_id = COUNT.fetch_add(1, Ordering::Relaxed);
let name = format!("actix-rt|system:{}|arbiter:{}", system_id, arb_id);
let (tx, rx) = mpsc::unbounded_channel();
let (ready_tx, ready_rx) = std::sync::mpsc::channel::<()>();
let thread_handle = thread::Builder::new()
.name(name.clone())
.spawn({
let tx = tx.clone();
move || {
let hnd = ArbiterHandle::new(tx);
System::set_current(sys);
HANDLE.with(|cell| *cell.borrow_mut() = Some(hnd.clone()));
// register arbiter
let _ = System::current()
.tx()
.send(SystemCommand::RegisterArbiter(arb_id, hnd));
ready_tx.send(()).unwrap();
// run arbiter event processing loop
tokio_uring::start(ArbiterRunner { rx });
// deregister arbiter
let _ = System::current()
.tx()
.send(SystemCommand::DeregisterArbiter(arb_id));
}
})
.unwrap_or_else(|err| {
panic!("Cannot spawn Arbiter's thread: {:?}. {:?}", &name, err)
});
ready_rx.recv().unwrap();
Arbiter { tx, thread_handle }
}
/// Sets up an Arbiter runner in a new System using the environment's local set.
pub(crate) fn in_new_system() -> ArbiterHandle {
/// Sets up an Arbiter runner in a new System using the provided runtime local task set.
pub(crate) fn in_new_system(local: &LocalSet) -> ArbiterHandle {
let (tx, rx) = mpsc::unbounded_channel();
let hnd = ArbiterHandle::new(tx);
HANDLE.with(|cell| *cell.borrow_mut() = Some(hnd.clone()));
crate::spawn(ArbiterRunner { rx });
local.spawn_local(ArbiterRunner { rx });
hnd
}

View File

@@ -32,10 +32,6 @@
//! arbiter.stop();
//! arbiter.join().unwrap();
//! ```
//!
//! # `io-uring` Support
//! There is experimental support for using io-uring with this crate by enabling the
//! `io-uring` feature. For now, it is semver exempt.
#![deny(rust_2018_idioms, nonstandard_style)]
#![allow(clippy::type_complexity)]
@@ -43,9 +39,6 @@
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
#[cfg(all(not(target_os = "linux"), feature = "io-uring"))]
compile_error!("io_uring is a linux only feature.");
use std::future::Future;
use tokio::task::JoinHandle;
@@ -53,10 +46,7 @@ use tokio::task::JoinHandle;
// Cannot define a main macro when compiled into test harness.
// Workaround for https://github.com/rust-lang/rust/issues/62127.
#[cfg(all(feature = "macros", not(test)))]
pub use actix_macros::main;
#[cfg(feature = "macros")]
pub use actix_macros::test;
pub use actix_macros::{main, test};
mod arbiter;
mod runtime;
@@ -82,14 +72,9 @@ pub mod signal {
pub mod net {
//! TCP/UDP/Unix bindings (mostly Tokio re-exports).
use std::{
future::Future,
io,
task::{Context, Poll},
};
use std::task::{Context, Poll};
pub use tokio::io::Ready;
use tokio::io::{AsyncRead, AsyncWrite, Interest};
use tokio::io::{AsyncRead, AsyncWrite};
pub use tokio::net::UdpSocket;
pub use tokio::net::{TcpListener, TcpSocket, TcpStream};
@@ -97,55 +82,36 @@ pub mod net {
pub use tokio::net::{UnixDatagram, UnixListener, UnixStream};
/// Extension trait over async read+write types that can also signal readiness.
#[doc(hidden)]
pub trait ActixStream: AsyncRead + AsyncWrite + Unpin {
pub trait ActixStream: AsyncRead + AsyncWrite + Unpin + 'static {
/// Poll stream and check read readiness of Self.
///
/// See [tokio::net::TcpStream::poll_read_ready] for detail on intended use.
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>>;
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<std::io::Result<()>>;
/// Poll stream and check write readiness of Self.
///
/// See [tokio::net::TcpStream::poll_write_ready] for detail on intended use.
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>>;
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<std::io::Result<()>>;
}
impl ActixStream for TcpStream {
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
let ready = self.ready(Interest::READABLE);
tokio::pin!(ready);
ready.poll(cx)
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
TcpStream::poll_read_ready(self, cx)
}
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
let ready = self.ready(Interest::WRITABLE);
tokio::pin!(ready);
ready.poll(cx)
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
TcpStream::poll_write_ready(self, cx)
}
}
#[cfg(unix)]
impl ActixStream for UnixStream {
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
let ready = self.ready(Interest::READABLE);
tokio::pin!(ready);
ready.poll(cx)
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
UnixStream::poll_read_ready(self, cx)
}
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
let ready = self.ready(Interest::WRITABLE);
tokio::pin!(ready);
ready.poll(cx)
}
}
impl<Io: ActixStream + ?Sized> ActixStream for Box<Io> {
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
(**self).poll_read_ready(cx)
}
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
(**self).poll_write_ready(cx)
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
UnixStream::poll_write_ready(self, cx)
}
}
}
@@ -165,41 +131,14 @@ pub mod task {
pub use tokio::task::{spawn_blocking, yield_now, JoinError, JoinHandle};
}
/// Spawns a future on the current thread as a new task.
///
/// If not immediately awaited, the task can be cancelled using [`JoinHandle::abort`].
///
/// The provided future is spawned as a new task; therefore, panics are caught.
/// Spawns a future on the current thread.
///
/// # Panics
/// Panics if Actix system is not running.
///
/// # Examples
/// ```
/// # use std::time::Duration;
/// # actix_rt::Runtime::new().unwrap().block_on(async {
/// // task resolves successfully
/// assert_eq!(actix_rt::spawn(async { 1 }).await.unwrap(), 1);
///
/// // task panics
/// assert!(actix_rt::spawn(async {
/// panic!("panic is caught at task boundary");
/// })
/// .await
/// .unwrap_err()
/// .is_panic());
///
/// // task is cancelled before completion
/// let handle = actix_rt::spawn(actix_rt::time::sleep(Duration::from_secs(100)));
/// handle.abort();
/// assert!(handle.await.unwrap_err().is_cancelled());
/// # });
/// ```
#[inline]
pub fn spawn<Fut>(f: Fut) -> JoinHandle<Fut::Output>
pub fn spawn<Fut>(f: Fut) -> JoinHandle<()>
where
Fut: Future + 'static,
Fut::Output: 'static,
Fut: Future<Output = ()> + 'static,
{
tokio::task::spawn_local(f)
}

View File

@@ -31,6 +31,11 @@ impl Runtime {
})
}
/// Reference to local task set.
pub(crate) fn local_set(&self) -> &LocalSet {
&self.local
}
/// Offload a future onto the single-threaded runtime.
///
/// The returned join handle can be used to await the future's result.

View File

@@ -54,7 +54,7 @@ impl System {
let (sys_tx, sys_rx) = mpsc::unbounded_channel();
let rt = Runtime::from(runtime_factory());
let sys_arbiter = rt.block_on(async { Arbiter::in_new_system() });
let sys_arbiter = Arbiter::in_new_system(rt.local_set());
let system = System::construct(sys_tx, sys_arbiter.clone());
system

View File

@@ -1,11 +1,14 @@
use std::{
future::Future,
sync::mpsc::channel,
sync::{
atomic::{AtomicBool, Ordering},
mpsc::channel,
Arc,
},
thread,
time::{Duration, Instant},
};
use actix_rt::{task::JoinError, Arbiter, System};
use actix_rt::{Arbiter, System};
use tokio::sync::oneshot;
#[test]
@@ -217,8 +220,8 @@ fn system_stop_stops_arbiters() {
System::current().stop();
sys.run().unwrap();
// account for slightly slow thread de-spawns
thread::sleep(Duration::from_millis(500));
// account for slightly slow thread de-spawns (only observed on windows)
thread::sleep(Duration::from_millis(100));
// arbiter should be dead and return false
assert!(!Arbiter::current().spawn_fn(|| {}));
@@ -227,7 +230,6 @@ fn system_stop_stops_arbiters() {
arb.join().unwrap();
}
#[cfg(not(feature = "io-uring"))]
#[test]
fn new_system_with_tokio() {
let (tx, rx) = channel();
@@ -260,14 +262,8 @@ fn new_system_with_tokio() {
assert_eq!(rx.recv().unwrap(), 42);
}
#[cfg(not(feature = "io-uring"))]
#[test]
fn new_arbiter_with_tokio() {
use std::sync::{
atomic::{AtomicBool, Ordering},
Arc,
};
let _ = System::new();
let arb = Arbiter::with_tokio_rt(|| {
@@ -290,7 +286,7 @@ fn new_arbiter_with_tokio() {
arb.join().unwrap();
assert!(!counter.load(Ordering::SeqCst));
assert_eq!(false, counter.load(Ordering::SeqCst));
}
#[test]
@@ -302,56 +298,3 @@ fn try_current_no_system() {
fn try_current_with_system() {
System::new().block_on(async { assert!(System::try_current().is_some()) });
}
#[allow(clippy::unit_cmp)]
#[test]
fn spawn_local() {
System::new().block_on(async {
// demonstrate that spawn -> R is strictly more capable than spawn -> ()
assert_eq!(actix_rt::spawn(async {}).await.unwrap(), ());
assert_eq!(actix_rt::spawn(async { 1 }).await.unwrap(), 1);
assert!(actix_rt::spawn(async { panic!("") }).await.is_err());
actix_rt::spawn(async { tokio::time::sleep(Duration::from_millis(50)).await })
.await
.unwrap();
fn g<F: Future<Output = Result<(), JoinError>>>(_f: F) {}
g(actix_rt::spawn(async {}));
// g(actix_rt::spawn(async { 1 })); // compile err
fn h<F: Future<Output = Result<R, JoinError>>, R>(_f: F) {}
h(actix_rt::spawn(async {}));
h(actix_rt::spawn(async { 1 }));
})
}
#[cfg(all(target_os = "linux", feature = "io-uring"))]
#[test]
fn tokio_uring_arbiter() {
let system = System::new();
let (tx, rx) = std::sync::mpsc::channel();
Arbiter::new().spawn(async move {
let handle = actix_rt::spawn(async move {
let f = tokio_uring::fs::File::create("test.txt").await.unwrap();
let buf = b"Hello World!";
let (res, _) = f.write_at(&buf[..], 0).await;
assert!(res.is_ok());
f.sync_all().await.unwrap();
f.close().await.unwrap();
std::fs::remove_file("test.txt").unwrap();
});
handle.await.unwrap();
tx.send(true).unwrap();
});
assert!(rx.recv().unwrap());
drop(system);
}

View File

@@ -1,30 +1,6 @@
# Changes
## Unreleased - 2021-xx-xx
* Remove `config` module. `ServiceConfig`, `ServiceRuntime` public types are removed due to this change. [#349]
* Remove `ServerBuilder::configure` [#349]
* Add `io-uring` feature for enabling async file I/O on linux. [#374]
* Server no long listens to SIGHUP signal.
It actually did not take any action when receiving SIGHUP, the only thing SIGHUP did was to stop
the Server from receiving any future signal, because the `Signals` future stops on the first
signal received [#389]
[#374]: https://github.com/actix/actix-net/pull/374
[#349]: https://github.com/actix/actix-net/pull/349
[#389]: https://github.com/actix/actix-net/pull/389
## 2.0.0-beta.5 - 2021-04-20
* Server shutdown would notify all workers to exit regardless if shutdown is graceful.
This would make all worker shutdown immediately in force shutdown case. [#333]
[#333]: https://github.com/actix/actix-net/pull/333
## 2.0.0-beta.4 - 2021-04-01
* Prevent panic when `shutdown_timeout` is very large. [f9262db]
[f9262db]: https://github.com/actix/actix-net/commit/f9262db
## 2.0.0-beta.3 - 2021-02-06

View File

@@ -1,13 +1,15 @@
[package]
name = "actix-server"
version = "2.0.0-beta.5"
version = "2.0.0-beta.3"
authors = [
"Nikolay Kim <fafhrd91@gmail.com>",
"fakeshadow <24548779@qq.com>",
]
description = "General purpose TCP server built for the Actix ecosystem"
keywords = ["network", "framework", "async", "futures"]
repository = "https://github.com/actix/actix-net"
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-server"
categories = ["network-programming", "asynchronous"]
license = "MIT OR Apache-2.0"
edition = "2018"
@@ -18,23 +20,22 @@ path = "src/lib.rs"
[features]
default = []
io-uring = ["actix-rt/io-uring"]
[dependencies]
actix-codec = "0.4.0-beta.1"
actix-rt = { version = "2.0.0", default-features = false }
actix-service = "2.0.0"
actix-utils = "3.0.0"
actix-service = "2.0.0-beta.4"
actix-utils = "3.0.0-beta.2"
futures-core = { version = "0.3.7", default-features = false, features = ["alloc"] }
log = "0.4"
mio = { version = "0.7.6", features = ["os-poll", "net"] }
num_cpus = "1.13"
slab = "0.4"
tokio = { version = "1.2", features = ["sync"] }
[dev-dependencies]
actix-codec = "0.4.0-beta.1"
actix-rt = "2.0.0"
bytes = "1"
env_logger = "0.8"
futures-util = { version = "0.3.7", default-features = false, features = ["sink"] }

View File

@@ -9,17 +9,15 @@
//! Start typing. When you press enter the typed line will be echoed back. The server will log
//! the length of each line it echos and the total size of data sent when the connection is closed.
use std::{
env, io,
sync::{
atomic::{AtomicUsize, Ordering},
Arc,
},
use std::sync::{
atomic::{AtomicUsize, Ordering},
Arc,
};
use std::{env, io};
use actix_rt::net::TcpStream;
use actix_server::Server;
use actix_service::{fn_service, ServiceFactoryExt as _};
use actix_service::pipeline_factory;
use bytes::BytesMut;
use futures_util::future::ok;
use log::{error, info};
@@ -27,7 +25,7 @@ use tokio::io::{AsyncReadExt, AsyncWriteExt};
#[actix_rt::main]
async fn main() -> io::Result<()> {
env::set_var("RUST_LOG", "info");
env::set_var("RUST_LOG", "actix=trace,basic=trace");
env_logger::init();
let count = Arc::new(AtomicUsize::new(0));
@@ -43,7 +41,7 @@ async fn main() -> io::Result<()> {
let count = Arc::clone(&count);
let num2 = Arc::clone(&count);
fn_service(move |mut stream: TcpStream| {
pipeline_factory(move |mut stream: TcpStream| {
let count = Arc::clone(&count);
async move {

View File

@@ -2,24 +2,28 @@ use std::time::Duration;
use std::{io, thread};
use actix_rt::{
time::{sleep, Instant},
time::{sleep_until, Instant},
System,
};
use log::{error, info};
use mio::{Interest, Poll, Token as MioToken};
use slab::Slab;
use crate::server::Server;
use crate::socket::MioListener;
use crate::socket::{MioListener, SocketAddr};
use crate::waker_queue::{WakerInterest, WakerQueue, WAKER_TOKEN};
use crate::worker::{Conn, WorkerHandleAccept};
use crate::worker::{Conn, WorkerHandle};
use crate::Token;
struct ServerSocketInfo {
token: usize,
// addr for socket. mainly used for logging.
addr: SocketAddr,
// be ware this is the crate token for identify socket and should not be confused with
// mio::Token
token: Token,
lst: MioListener,
/// Timeout is used to mark the deadline when this socket's listener should be registered again
/// after an error.
// timeout is used to mark the deadline when this socket's listener should be registered again
// after an error.
timeout: Option<Instant>,
}
@@ -58,8 +62,8 @@ impl AcceptLoop {
pub(crate) fn start(
&mut self,
socks: Vec<(usize, MioListener)>,
handles: Vec<WorkerHandleAccept>,
socks: Vec<(Token, MioListener)>,
handles: Vec<WorkerHandle>,
) {
let srv = self.srv.take().expect("Can not re-use AcceptInfo");
let poll = self.poll.take().unwrap();
@@ -73,71 +77,10 @@ impl AcceptLoop {
struct Accept {
poll: Poll,
waker: WakerQueue,
handles: Vec<WorkerHandleAccept>,
handles: Vec<WorkerHandle>,
srv: Server,
next: usize,
avail: Availability,
paused: bool,
}
/// Array of u128 with every bit as marker for a worker handle's availability.
struct Availability([u128; 4]);
impl Default for Availability {
fn default() -> Self {
Self([0; 4])
}
}
impl Availability {
/// Check if any worker handle is available
#[inline(always)]
fn available(&self) -> bool {
self.0.iter().any(|a| *a != 0)
}
/// Check if worker handle is available by index
#[inline(always)]
fn get_available(&self, idx: usize) -> bool {
let (offset, idx) = Self::offset(idx);
self.0[offset] & (1 << idx as u128) != 0
}
/// Set worker handle available state by index.
fn set_available(&mut self, idx: usize, avail: bool) {
let (offset, idx) = Self::offset(idx);
let off = 1 << idx as u128;
if avail {
self.0[offset] |= off;
} else {
self.0[offset] &= !off
}
}
/// Set all worker handle to available state.
/// This would result in a re-check on all workers' availability.
fn set_available_all(&mut self, handles: &[WorkerHandleAccept]) {
handles.iter().for_each(|handle| {
self.set_available(handle.idx(), true);
})
}
/// Get offset and adjusted index of given worker handle index.
fn offset(idx: usize) -> (usize, usize) {
if idx < 128 {
(0, idx)
} else if idx < 128 * 2 {
(1, idx - 128)
} else if idx < 128 * 3 {
(2, idx - 128 * 2)
} else if idx < 128 * 4 {
(3, idx - 128 * 3)
} else {
panic!("Max WorkerHandle count is 512")
}
}
backpressure: bool,
}
/// This function defines errors that are per-connection. Which basically
@@ -157,9 +100,9 @@ impl Accept {
pub(crate) fn start(
poll: Poll,
waker: WakerQueue,
socks: Vec<(usize, MioListener)>,
socks: Vec<(Token, MioListener)>,
srv: Server,
handles: Vec<WorkerHandleAccept>,
handles: Vec<WorkerHandle>,
) {
// Accept runs in its own thread and would want to spawn additional futures to current
// actix system.
@@ -168,10 +111,9 @@ impl Accept {
.name("actix-server accept loop".to_owned())
.spawn(move || {
System::set_current(sys);
let (mut accept, mut sockets) =
let (mut accept, sockets) =
Accept::new_with_sockets(poll, waker, socks, handles, srv);
accept.poll_with(&mut sockets);
accept.poll_with(sockets);
})
.unwrap();
}
@@ -179,30 +121,29 @@ impl Accept {
fn new_with_sockets(
poll: Poll,
waker: WakerQueue,
socks: Vec<(usize, MioListener)>,
handles: Vec<WorkerHandleAccept>,
socks: Vec<(Token, MioListener)>,
handles: Vec<WorkerHandle>,
srv: Server,
) -> (Accept, Vec<ServerSocketInfo>) {
let sockets = socks
.into_iter()
.map(|(token, mut lst)| {
// Start listening for incoming connections
poll.registry()
.register(&mut lst, MioToken(token), Interest::READABLE)
.unwrap_or_else(|e| panic!("Can not register io: {}", e));
) -> (Accept, Slab<ServerSocketInfo>) {
let mut sockets = Slab::new();
for (hnd_token, mut lst) in socks.into_iter() {
let addr = lst.local_addr();
ServerSocketInfo {
token,
lst,
timeout: None,
}
})
.collect();
let entry = sockets.vacant_entry();
let token = entry.key();
let mut avail = Availability::default();
// Start listening for incoming connections
poll.registry()
.register(&mut lst, MioToken(token), Interest::READABLE)
.unwrap_or_else(|e| panic!("Can not register io: {}", e));
// Assume all handles are avail at construct time.
avail.set_available_all(&handles);
entry.insert(ServerSocketInfo {
addr,
token: hnd_token,
lst,
timeout: None,
});
}
let accept = Accept {
poll,
@@ -210,383 +151,278 @@ impl Accept {
handles,
srv,
next: 0,
avail,
paused: false,
backpressure: false,
};
(accept, sockets)
}
fn poll_with(&mut self, sockets: &mut [ServerSocketInfo]) {
fn poll_with(&mut self, mut sockets: Slab<ServerSocketInfo>) {
let mut events = mio::Events::with_capacity(128);
loop {
if let Err(e) = self.poll.poll(&mut events, None) {
match e.kind() {
io::ErrorKind::Interrupted => {}
_ => panic!("Poll error: {}", e),
std::io::ErrorKind::Interrupted => {
continue;
}
_ => {
panic!("Poll error: {}", e);
}
}
}
for event in events.iter() {
let token = event.token();
match token {
WAKER_TOKEN => {
let exit = self.handle_waker(sockets);
if exit {
info!("Accept is stopped.");
return;
// This is a loop because interests for command from previous version was
// a loop that would try to drain the command channel. It's yet unknown
// if it's necessary/good practice to actively drain the waker queue.
WAKER_TOKEN => 'waker: loop {
// take guard with every iteration so no new interest can be added
// until the current task is done.
let mut guard = self.waker.guard();
match guard.pop_front() {
// worker notify it becomes available. we may want to recover
// from backpressure.
Some(WakerInterest::WorkerAvailable) => {
drop(guard);
self.maybe_backpressure(&mut sockets, false);
}
// a new worker thread is made and it's handle would be added
// to Accept
Some(WakerInterest::Worker(handle)) => {
drop(guard);
// maybe we want to recover from a backpressure.
self.maybe_backpressure(&mut sockets, false);
self.handles.push(handle);
}
// got timer interest and it's time to try register socket(s)
// again.
Some(WakerInterest::Timer) => {
drop(guard);
self.process_timer(&mut sockets)
}
Some(WakerInterest::Pause) => {
drop(guard);
sockets.iter_mut().for_each(|(_, info)| {
match self.deregister(info) {
Ok(_) => info!(
"Paused accepting connections on {}",
info.addr
),
Err(e) => {
error!("Can not deregister server socket {}", e)
}
}
});
}
Some(WakerInterest::Resume) => {
drop(guard);
sockets.iter_mut().for_each(|(token, info)| {
self.register_logged(token, info);
});
}
Some(WakerInterest::Stop) => {
return self.deregister_all(&mut sockets);
}
// waker queue is drained.
None => {
// Reset the WakerQueue before break so it does not grow
// infinitely.
WakerQueue::reset(&mut guard);
break 'waker;
}
}
}
},
_ => {
let token = usize::from(token);
self.accept(sockets, token);
self.accept(&mut sockets, token);
}
}
}
}
}
fn handle_waker(&mut self, sockets: &mut [ServerSocketInfo]) -> bool {
// This is a loop because interests for command from previous version was
// a loop that would try to drain the command channel. It's yet unknown
// if it's necessary/good practice to actively drain the waker queue.
loop {
// take guard with every iteration so no new interest can be added
// until the current task is done.
let mut guard = self.waker.guard();
match guard.pop_front() {
// worker notify it becomes available.
Some(WakerInterest::WorkerAvailable(idx)) => {
drop(guard);
self.avail.set_available(idx, true);
if !self.paused {
self.accept_all(sockets);
}
}
// a new worker thread is made and it's handle would be added to Accept
Some(WakerInterest::Worker(handle)) => {
drop(guard);
self.avail.set_available(handle.idx(), true);
self.handles.push(handle);
if !self.paused {
self.accept_all(sockets);
}
}
// got timer interest and it's time to try register socket(s) again
Some(WakerInterest::Timer) => {
drop(guard);
self.process_timer(sockets)
}
Some(WakerInterest::Pause) => {
drop(guard);
if !self.paused {
self.paused = true;
self.deregister_all(sockets);
}
}
Some(WakerInterest::Resume) => {
drop(guard);
if self.paused {
self.paused = false;
sockets.iter_mut().for_each(|info| {
self.register_logged(info);
});
self.accept_all(sockets);
}
}
Some(WakerInterest::Stop) => {
if !self.paused {
self.deregister_all(sockets);
}
return true;
}
// waker queue is drained
None => {
// Reset the WakerQueue before break so it does not grow infinitely
WakerQueue::reset(&mut guard);
return false;
}
}
}
}
fn process_timer(&self, sockets: &mut [ServerSocketInfo]) {
fn process_timer(&self, sockets: &mut Slab<ServerSocketInfo>) {
let now = Instant::now();
sockets
.iter_mut()
// Only sockets that had an associated timeout were deregistered.
.filter(|info| info.timeout.is_some())
.for_each(|info| {
let inst = info.timeout.take().unwrap();
if now < inst {
sockets.iter_mut().for_each(|(token, info)| {
// only the ServerSocketInfo have an associate timeout value was de registered.
if let Some(inst) = info.timeout.take() {
if now > inst {
self.register_logged(token, info);
} else {
info.timeout = Some(inst);
} else if !self.paused {
self.register_logged(info);
}
// Drop the timeout if server is paused and socket timeout is expired.
// When server recovers from pause it will register all sockets without
// a timeout value so this socket register will be delayed till then.
});
}
});
}
#[cfg(not(target_os = "windows"))]
fn register(&self, info: &mut ServerSocketInfo) -> io::Result<()> {
let token = MioToken(info.token);
fn register(&self, token: usize, info: &mut ServerSocketInfo) -> io::Result<()> {
self.poll
.registry()
.register(&mut info.lst, token, Interest::READABLE)
.register(&mut info.lst, MioToken(token), Interest::READABLE)
}
#[cfg(target_os = "windows")]
fn register(&self, info: &mut ServerSocketInfo) -> io::Result<()> {
fn register(&self, token: usize, info: &mut ServerSocketInfo) -> io::Result<()> {
// On windows, calling register without deregister cause an error.
// See https://github.com/actix/actix-web/issues/905
// Calling reregister seems to fix the issue.
let token = MioToken(info.token);
self.poll
.registry()
.register(&mut info.lst, token, Interest::READABLE)
.register(&mut info.lst, mio::Token(token), Interest::READABLE)
.or_else(|_| {
self.poll
.registry()
.reregister(&mut info.lst, token, Interest::READABLE)
self.poll.registry().reregister(
&mut info.lst,
mio::Token(token),
Interest::READABLE,
)
})
}
fn register_logged(&self, info: &mut ServerSocketInfo) {
match self.register(info) {
Ok(_) => info!("Resume accepting connections on {}", info.lst.local_addr()),
fn register_logged(&self, token: usize, info: &mut ServerSocketInfo) {
match self.register(token, info) {
Ok(_) => info!("Resume accepting connections on {}", info.addr),
Err(e) => error!("Can not register server socket {}", e),
}
}
fn deregister_logged(&self, info: &mut ServerSocketInfo) {
match self.poll.registry().deregister(&mut info.lst) {
Ok(_) => info!("Paused accepting connections on {}", info.lst.local_addr()),
Err(e) => {
error!("Can not deregister server socket {}", e)
}
}
fn deregister(&self, info: &mut ServerSocketInfo) -> io::Result<()> {
self.poll.registry().deregister(&mut info.lst)
}
fn deregister_all(&self, sockets: &mut [ServerSocketInfo]) {
// This is a best effort implementation with following limitation:
//
// Every ServerSocketInfo with associate timeout will be skipped and it's timeout
// is removed in the process.
//
// Therefore WakerInterest::Pause followed by WakerInterest::Resume in a very short
// gap (less than 500ms) would cause all timing out ServerSocketInfos be reregistered
// before expected timing.
sockets
.iter_mut()
// Take all timeout.
// This is to prevent Accept::process_timer method re-register a socket afterwards.
.map(|info| (info.timeout.take(), info))
// Socket info with a timeout is already deregistered so skip them.
.filter(|(timeout, _)| timeout.is_none())
.for_each(|(_, info)| self.deregister_logged(info));
fn deregister_all(&self, sockets: &mut Slab<ServerSocketInfo>) {
sockets.iter_mut().for_each(|(_, info)| {
info!("Accepting connections on {} has been paused", info.addr);
let _ = self.deregister(info);
});
}
// Send connection to worker and handle error.
fn send_connection(&mut self, conn: Conn) -> Result<(), Conn> {
let next = self.next();
match next.send(conn) {
Ok(_) => {
// Increment counter of WorkerHandle.
// Set worker to unavailable with it hit max (Return false).
if !next.inc_counter() {
let idx = next.idx();
self.avail.set_available(idx, false);
}
self.set_next();
Ok(())
}
Err(conn) => {
// Worker thread is error and could be gone.
// Remove worker handle and notify `ServerBuilder`.
self.remove_next();
if self.handles.is_empty() {
error!("No workers");
// All workers are gone and Conn is nowhere to be sent.
// Treat this situation as Ok and drop Conn.
return Ok(());
} else if self.handles.len() <= self.next {
self.next = 0;
}
Err(conn)
}
}
}
fn accept_one(&mut self, mut conn: Conn) {
loop {
let next = self.next();
let idx = next.idx();
if self.avail.get_available(idx) {
match self.send_connection(conn) {
Ok(_) => return,
Err(c) => conn = c,
}
} else {
self.avail.set_available(idx, false);
self.set_next();
if !self.avail.available() {
while let Err(c) = self.send_connection(conn) {
conn = c;
fn maybe_backpressure(&mut self, sockets: &mut Slab<ServerSocketInfo>, on: bool) {
if self.backpressure {
if !on {
self.backpressure = false;
for (token, info) in sockets.iter_mut() {
if info.timeout.is_some() {
// socket will attempt to re-register itself when its timeout completes
continue;
}
return;
self.register_logged(token, info);
}
}
} else if on {
self.backpressure = true;
self.deregister_all(sockets);
}
}
fn accept(&mut self, sockets: &mut [ServerSocketInfo], token: usize) {
while self.avail.available() {
let info = &mut sockets[token];
match info.lst.accept() {
Ok(io) => {
let conn = Conn { io, token };
self.accept_one(conn);
fn accept_one(&mut self, sockets: &mut Slab<ServerSocketInfo>, mut msg: Conn) {
if self.backpressure {
while !self.handles.is_empty() {
match self.handles[self.next].send(msg) {
Ok(_) => {
self.set_next();
break;
}
Err(tmp) => {
// worker lost contact and could be gone. a message is sent to
// `ServerBuilder` future to notify it a new worker should be made.
// after that remove the fault worker.
self.srv.worker_faulted(self.handles[self.next].idx);
msg = tmp;
self.handles.swap_remove(self.next);
if self.handles.is_empty() {
error!("No workers");
return;
} else if self.handles.len() <= self.next {
self.next = 0;
}
continue;
}
}
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => return,
Err(ref e) if connection_error(e) => continue,
Err(e) => {
error!("Error accepting connection: {}", e);
// deregister listener temporary
self.deregister_logged(info);
// sleep after error. write the timeout to socket info as later
// the poll would need it mark which socket and when it's
// listener should be registered
info.timeout = Some(Instant::now() + Duration::from_millis(500));
// after the sleep a Timer interest is sent to Accept Poll
let waker = self.waker.clone();
System::current().arbiter().spawn(async move {
sleep(Duration::from_millis(510)).await;
waker.wake(WakerInterest::Timer);
});
return;
}
} else {
let mut idx = 0;
while idx < self.handles.len() {
idx += 1;
if self.handles[self.next].available() {
match self.handles[self.next].send(msg) {
Ok(_) => {
self.set_next();
return;
}
// worker lost contact and could be gone. a message is sent to
// `ServerBuilder` future to notify it a new worker should be made.
// after that remove the fault worker and enter backpressure if necessary.
Err(tmp) => {
self.srv.worker_faulted(self.handles[self.next].idx);
msg = tmp;
self.handles.swap_remove(self.next);
if self.handles.is_empty() {
error!("No workers");
self.maybe_backpressure(sockets, true);
return;
} else if self.handles.len() <= self.next {
self.next = 0;
}
continue;
}
}
}
};
self.set_next();
}
// enable backpressure
self.maybe_backpressure(sockets, true);
self.accept_one(sockets, msg);
}
}
fn accept_all(&mut self, sockets: &mut [ServerSocketInfo]) {
sockets
.iter_mut()
.map(|info| info.token)
.collect::<Vec<_>>()
.into_iter()
.for_each(|idx| self.accept(sockets, idx))
}
#[inline(always)]
fn next(&self) -> &WorkerHandleAccept {
&self.handles[self.next]
}
/// Set next worker handle that would accept connection.
#[inline(always)]
// set next worker handle that would accept work.
fn set_next(&mut self) {
self.next = (self.next + 1) % self.handles.len();
}
/// Remove next worker handle that fail to accept connection.
fn remove_next(&mut self) {
let handle = self.handles.swap_remove(self.next);
let idx = handle.idx();
// A message is sent to `ServerBuilder` future to notify it a new worker
// should be made.
self.srv.worker_faulted(idx);
self.avail.set_available(idx, false);
}
}
fn accept(&mut self, sockets: &mut Slab<ServerSocketInfo>, token: usize) {
loop {
let msg = if let Some(info) = sockets.get_mut(token) {
match info.lst.accept() {
Ok(Some((io, addr))) => Conn {
io,
token: info.token,
peer: Some(addr),
},
Ok(None) => return,
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => return,
Err(ref e) if connection_error(e) => continue,
Err(e) => {
// deregister listener temporary
error!("Error accepting connection: {}", e);
if let Err(err) = self.deregister(info) {
error!("Can not deregister server socket {}", err);
}
#[cfg(test)]
mod test {
use super::Availability;
// sleep after error. write the timeout to socket info as later the poll
// would need it mark which socket and when it's listener should be
// registered.
info.timeout = Some(Instant::now() + Duration::from_millis(500));
fn single(aval: &mut Availability, idx: usize) {
aval.set_available(idx, true);
assert!(aval.available());
// after the sleep a Timer interest is sent to Accept Poll
let waker = self.waker.clone();
System::current().arbiter().spawn(async move {
sleep_until(Instant::now() + Duration::from_millis(510)).await;
waker.wake(WakerInterest::Timer);
});
aval.set_available(idx, true);
return;
}
}
} else {
return;
};
aval.set_available(idx, false);
assert!(!aval.available());
aval.set_available(idx, false);
assert!(!aval.available());
}
fn multi(aval: &mut Availability, mut idx: Vec<usize>) {
idx.iter().for_each(|idx| aval.set_available(*idx, true));
assert!(aval.available());
while let Some(idx) = idx.pop() {
assert!(aval.available());
aval.set_available(idx, false);
self.accept_one(sockets, msg);
}
assert!(!aval.available());
}
#[test]
fn availability() {
let mut aval = Availability::default();
single(&mut aval, 1);
single(&mut aval, 128);
single(&mut aval, 256);
single(&mut aval, 511);
let idx = (0..511).filter(|i| i % 3 == 0 && i % 5 == 0).collect();
multi(&mut aval, idx);
multi(&mut aval, (0..511).collect())
}
#[test]
#[should_panic]
fn overflow() {
let mut aval = Availability::default();
single(&mut aval, 512);
}
#[test]
fn pin_point() {
let mut aval = Availability::default();
aval.set_available(438, true);
aval.set_available(479, true);
assert_eq!(aval.0[3], 1 << (438 - 384) | 1 << (479 - 384));
}
}

View File

@@ -1,36 +1,35 @@
use std::{
future::Future,
io, mem,
pin::Pin,
task::{Context, Poll},
time::Duration,
};
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::time::Duration;
use std::{io, mem};
use actix_rt::{self as rt, net::TcpStream, time::sleep, System};
use actix_rt::net::TcpStream;
use actix_rt::time::{sleep_until, Instant};
use actix_rt::{self as rt, System};
use log::{error, info};
use tokio::sync::{
mpsc::{unbounded_channel, UnboundedReceiver},
oneshot,
};
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver};
use tokio::sync::oneshot;
use crate::accept::AcceptLoop;
use crate::join_all;
use crate::config::{ConfiguredService, ServiceConfig};
use crate::server::{Server, ServerCommand};
use crate::service::{InternalServiceFactory, ServiceFactory, StreamNewService};
use crate::signals::{Signal, Signals};
use crate::socket::{MioListener, StdSocketAddr, StdTcpListener, ToSocketAddrs};
use crate::socket::{MioTcpListener, MioTcpSocket};
use crate::waker_queue::{WakerInterest, WakerQueue};
use crate::worker::{ServerWorker, ServerWorkerConfig, WorkerHandleAccept, WorkerHandleServer};
use crate::worker::{self, ServerWorker, ServerWorkerConfig, WorkerAvailability, WorkerHandle};
use crate::{join_all, Token};
/// Server builder
pub struct ServerBuilder {
threads: usize,
token: usize,
token: Token,
backlog: u32,
handles: Vec<(usize, WorkerHandleServer)>,
handles: Vec<(usize, WorkerHandle)>,
services: Vec<Box<dyn InternalServiceFactory>>,
sockets: Vec<(usize, String, MioListener)>,
sockets: Vec<(Token, String, MioListener)>,
accept: AcceptLoop,
exit: bool,
no_signals: bool,
@@ -54,7 +53,7 @@ impl ServerBuilder {
ServerBuilder {
threads: num_cpus::get(),
token: 0,
token: Token::default(),
handles: Vec::new(),
services: Vec::new(),
sockets: Vec::new(),
@@ -118,18 +117,18 @@ impl ServerBuilder {
/// reached for each worker.
///
/// By default max connections is set to a 25k per worker.
pub fn maxconn(mut self, num: usize) -> Self {
self.worker_config.max_concurrent_connections(num);
pub fn maxconn(self, num: usize) -> Self {
worker::max_concurrent_connections(num);
self
}
/// Stop Actix system.
/// Stop actix system.
pub fn system_exit(mut self) -> Self {
self.exit = true;
self
}
/// Disable signal handling.
/// Disable signal handling
pub fn disable_signals(mut self) -> Self {
self.no_signals = true;
self
@@ -137,8 +136,9 @@ impl ServerBuilder {
/// Timeout for graceful workers shutdown in seconds.
///
/// After receiving a stop signal, workers have this much time to finish serving requests.
/// Workers still alive after the timeout are force dropped.
/// After receiving a stop signal, workers have this much time to finish
/// serving requests. Workers still alive after the timeout are force
/// dropped.
///
/// By default shutdown timeout sets to 30 seconds.
pub fn shutdown_timeout(mut self, sec: u64) -> Self {
@@ -147,6 +147,33 @@ impl ServerBuilder {
self
}
/// Execute external configuration as part of the server building
/// process.
///
/// This function is useful for moving parts of configuration to a
/// different module or even library.
pub fn configure<F>(mut self, f: F) -> io::Result<ServerBuilder>
where
F: Fn(&mut ServiceConfig) -> io::Result<()>,
{
let mut cfg = ServiceConfig::new(self.threads, self.backlog);
f(&mut cfg)?;
if let Some(apply) = cfg.apply {
let mut srv = ConfiguredService::new(apply);
for (name, lst) in cfg.services {
let token = self.token.next();
srv.stream(token, name.clone(), lst.local_addr()?);
self.sockets.push((token, name, MioListener::Tcp(lst)));
}
self.services.push(Box::new(srv));
}
self.threads = cfg.threads;
Ok(self)
}
/// Add new service to the server.
pub fn bind<F, U, N: AsRef<str>>(mut self, name: N, addr: U, factory: F) -> io::Result<Self>
where
@@ -156,7 +183,7 @@ impl ServerBuilder {
let sockets = bind_addr(addr, self.backlog)?;
for lst in sockets {
let token = self.next_token();
let token = self.token.next();
self.services.push(StreamNewService::create(
name.as_ref().to_string(),
token,
@@ -205,7 +232,7 @@ impl ServerBuilder {
{
use std::net::{IpAddr, Ipv4Addr};
lst.set_nonblocking(true)?;
let token = self.next_token();
let token = self.token.next();
let addr = StdSocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
self.services.push(StreamNewService::create(
name.as_ref().to_string(),
@@ -231,7 +258,7 @@ impl ServerBuilder {
lst.set_nonblocking(true)?;
let addr = lst.local_addr()?;
let token = self.next_token();
let token = self.token.next();
self.services.push(StreamNewService::create(
name.as_ref().to_string(),
token,
@@ -241,7 +268,6 @@ impl ServerBuilder {
self.sockets
.push((token, name.as_ref().to_string(), MioListener::from(lst)));
Ok(self)
}
@@ -255,11 +281,10 @@ impl ServerBuilder {
// start workers
let handles = (0..self.threads)
.map(|idx| {
let (handle_accept, handle_server) =
self.start_worker(idx, self.accept.waker_owned());
self.handles.push((idx, handle_server));
let handle = self.start_worker(idx, self.accept.waker_owned());
self.handles.push((idx, handle.clone()));
handle_accept
handle
})
.collect();
@@ -287,14 +312,11 @@ impl ServerBuilder {
}
}
fn start_worker(
&self,
idx: usize,
waker_queue: WakerQueue,
) -> (WorkerHandleAccept, WorkerHandleServer) {
fn start_worker(&self, idx: usize, waker: WakerQueue) -> WorkerHandle {
let avail = WorkerAvailability::new(waker);
let services = self.services.iter().map(|v| v.clone_factory()).collect();
ServerWorker::start(idx, services, waker_queue, self.worker_config)
ServerWorker::start(idx, services, avail, self.worker_config)
}
fn handle_cmd(&mut self, item: ServerCommand) {
@@ -335,6 +357,7 @@ impl ServerBuilder {
completion: None,
})
}
_ => (),
}
}
ServerCommand::Notify(tx) => {
@@ -351,29 +374,45 @@ impl ServerBuilder {
let notify = std::mem::take(&mut self.notify);
// stop workers
let stop = self
.handles
.iter()
.map(move |worker| worker.1.stop(graceful))
.collect();
if !self.handles.is_empty() && graceful {
let iter = self
.handles
.iter()
.map(move |worker| worker.1.stop(graceful))
.collect();
rt::spawn(async move {
if graceful {
let _ = join_all(stop).await;
let fut = join_all(iter);
rt::spawn(async move {
let _ = fut.await;
if let Some(tx) = completion {
let _ = tx.send(());
}
for tx in notify {
let _ = tx.send(());
}
if exit {
rt::spawn(async {
sleep_until(Instant::now() + Duration::from_millis(300)).await;
System::current().stop();
});
}
});
} else {
// we need to stop system if server was spawned
if self.exit {
rt::spawn(async {
sleep_until(Instant::now() + Duration::from_millis(300)).await;
System::current().stop();
});
}
if let Some(tx) = completion {
let _ = tx.send(());
}
for tx in notify {
let _ = tx.send(());
}
if exit {
sleep(Duration::from_millis(300)).await;
System::current().stop();
}
});
}
}
ServerCommand::WorkerFaulted(idx) => {
let mut found = false;
@@ -399,20 +438,13 @@ impl ServerBuilder {
break;
}
let (handle_accept, handle_server) =
self.start_worker(new_idx, self.accept.waker_owned());
self.handles.push((new_idx, handle_server));
self.accept.wake(WakerInterest::Worker(handle_accept));
let handle = self.start_worker(new_idx, self.accept.waker_owned());
self.handles.push((new_idx, handle.clone()));
self.accept.wake(WakerInterest::Worker(handle));
}
}
}
}
fn next_token(&mut self) -> usize {
let token = self.token;
self.token += 1;
token
}
}
impl Future for ServerBuilder {

287
actix-server/src/config.rs Normal file
View File

@@ -0,0 +1,287 @@
use std::collections::HashMap;
use std::future::Future;
use std::{fmt, io};
use actix_rt::net::TcpStream;
use actix_service::{
fn_service, IntoServiceFactory as IntoBaseServiceFactory,
ServiceFactory as BaseServiceFactory,
};
use actix_utils::counter::CounterGuard;
use futures_core::future::LocalBoxFuture;
use log::error;
use crate::builder::bind_addr;
use crate::service::{BoxedServerService, InternalServiceFactory, StreamService};
use crate::socket::{MioStream, MioTcpListener, StdSocketAddr, StdTcpListener, ToSocketAddrs};
use crate::{ready, Token};
pub struct ServiceConfig {
pub(crate) services: Vec<(String, MioTcpListener)>,
pub(crate) apply: Option<Box<dyn ServiceRuntimeConfiguration>>,
pub(crate) threads: usize,
pub(crate) backlog: u32,
}
impl ServiceConfig {
pub(super) fn new(threads: usize, backlog: u32) -> ServiceConfig {
ServiceConfig {
threads,
backlog,
services: Vec::new(),
apply: None,
}
}
/// Set number of workers to start.
///
/// By default server uses number of available logical cpu as workers
/// count.
pub fn workers(&mut self, num: usize) {
self.threads = num;
}
/// Add new service to server
pub fn bind<U, N: AsRef<str>>(&mut self, name: N, addr: U) -> io::Result<&mut Self>
where
U: ToSocketAddrs,
{
let sockets = bind_addr(addr, self.backlog)?;
for lst in sockets {
self._listen(name.as_ref(), lst);
}
Ok(self)
}
/// Add new service to server
pub fn listen<N: AsRef<str>>(&mut self, name: N, lst: StdTcpListener) -> &mut Self {
self._listen(name, MioTcpListener::from_std(lst))
}
/// Register service configuration function. This function get called
/// during worker runtime configuration. It get executed in worker thread.
pub fn apply<F>(&mut self, f: F) -> io::Result<()>
where
F: Fn(&mut ServiceRuntime) + Send + Clone + 'static,
{
self.apply = Some(Box::new(f));
Ok(())
}
fn _listen<N: AsRef<str>>(&mut self, name: N, lst: MioTcpListener) -> &mut Self {
if self.apply.is_none() {
self.apply = Some(Box::new(not_configured));
}
self.services.push((name.as_ref().to_string(), lst));
self
}
}
pub(super) struct ConfiguredService {
rt: Box<dyn ServiceRuntimeConfiguration>,
names: HashMap<Token, (String, StdSocketAddr)>,
topics: HashMap<String, Token>,
services: Vec<Token>,
}
impl ConfiguredService {
pub(super) fn new(rt: Box<dyn ServiceRuntimeConfiguration>) -> Self {
ConfiguredService {
rt,
names: HashMap::new(),
topics: HashMap::new(),
services: Vec::new(),
}
}
pub(super) fn stream(&mut self, token: Token, name: String, addr: StdSocketAddr) {
self.names.insert(token, (name.clone(), addr));
self.topics.insert(name, token);
self.services.push(token);
}
}
impl InternalServiceFactory for ConfiguredService {
fn name(&self, token: Token) -> &str {
&self.names[&token].0
}
fn clone_factory(&self) -> Box<dyn InternalServiceFactory> {
Box::new(Self {
rt: self.rt.clone(),
names: self.names.clone(),
topics: self.topics.clone(),
services: self.services.clone(),
})
}
fn create(&self) -> LocalBoxFuture<'static, Result<Vec<(Token, BoxedServerService)>, ()>> {
// configure services
let mut rt = ServiceRuntime::new(self.topics.clone());
self.rt.configure(&mut rt);
rt.validate();
let mut names = self.names.clone();
let tokens = self.services.clone();
// construct services
Box::pin(async move {
let mut services = rt.services;
// TODO: Proper error handling here
for f in rt.onstart.into_iter() {
f.await;
}
let mut res = vec![];
for token in tokens {
if let Some(srv) = services.remove(&token) {
let newserv = srv.new_service(());
match newserv.await {
Ok(serv) => {
res.push((token, serv));
}
Err(_) => {
error!("Can not construct service");
return Err(());
}
}
} else {
let name = names.remove(&token).unwrap().0;
res.push((
token,
Box::new(StreamService::new(fn_service(move |_: TcpStream| {
error!("Service {:?} is not configured", name);
ready::<Result<_, ()>>(Ok(()))
}))),
));
};
}
Ok(res)
})
}
}
pub(super) trait ServiceRuntimeConfiguration: Send {
fn clone(&self) -> Box<dyn ServiceRuntimeConfiguration>;
fn configure(&self, rt: &mut ServiceRuntime);
}
impl<F> ServiceRuntimeConfiguration for F
where
F: Fn(&mut ServiceRuntime) + Send + Clone + 'static,
{
fn clone(&self) -> Box<dyn ServiceRuntimeConfiguration> {
Box::new(self.clone())
}
fn configure(&self, rt: &mut ServiceRuntime) {
(self)(rt)
}
}
fn not_configured(_: &mut ServiceRuntime) {
error!("Service is not configured");
}
pub struct ServiceRuntime {
names: HashMap<String, Token>,
services: HashMap<Token, BoxedNewService>,
onstart: Vec<LocalBoxFuture<'static, ()>>,
}
impl ServiceRuntime {
fn new(names: HashMap<String, Token>) -> Self {
ServiceRuntime {
names,
services: HashMap::new(),
onstart: Vec::new(),
}
}
fn validate(&self) {
for (name, token) in &self.names {
if !self.services.contains_key(&token) {
error!("Service {:?} is not configured", name);
}
}
}
/// Register service.
///
/// Name of the service must be registered during configuration stage with
/// *ServiceConfig::bind()* or *ServiceConfig::listen()* methods.
pub fn service<T, F>(&mut self, name: &str, service: F)
where
F: IntoBaseServiceFactory<T, TcpStream>,
T: BaseServiceFactory<TcpStream, Config = ()> + 'static,
T::Future: 'static,
T::Service: 'static,
T::InitError: fmt::Debug,
{
// let name = name.to_owned();
if let Some(token) = self.names.get(name) {
self.services.insert(
*token,
Box::new(ServiceFactory {
inner: service.into_factory(),
}),
);
} else {
panic!("Unknown service: {:?}", name);
}
}
/// Execute future before services initialization.
pub fn on_start<F>(&mut self, fut: F)
where
F: Future<Output = ()> + 'static,
{
self.onstart.push(Box::pin(fut))
}
}
type BoxedNewService = Box<
dyn BaseServiceFactory<
(Option<CounterGuard>, MioStream),
Response = (),
Error = (),
InitError = (),
Config = (),
Service = BoxedServerService,
Future = LocalBoxFuture<'static, Result<BoxedServerService, ()>>,
>,
>;
struct ServiceFactory<T> {
inner: T,
}
impl<T> BaseServiceFactory<(Option<CounterGuard>, MioStream)> for ServiceFactory<T>
where
T: BaseServiceFactory<TcpStream, Config = ()>,
T::Future: 'static,
T::Service: 'static,
T::Error: 'static,
T::InitError: fmt::Debug + 'static,
{
type Response = ();
type Error = ();
type Config = ();
type Service = BoxedServerService;
type InitError = ();
type Future = LocalBoxFuture<'static, Result<BoxedServerService, ()>>;
fn new_service(&self, _: ()) -> Self::Future {
let fut = self.inner.new_service(());
Box::pin(async move {
match fut.await {
Ok(s) => Ok(Box::new(StreamService::new(s)) as BoxedServerService),
Err(e) => {
error!("Can not construct service: {:?}", e);
Err(())
}
}
})
}
}

View File

@@ -6,6 +6,7 @@
mod accept;
mod builder;
mod config;
mod server;
mod service;
mod signals;
@@ -15,6 +16,7 @@ mod waker_queue;
mod worker;
pub use self::builder::ServerBuilder;
pub use self::config::{ServiceConfig, ServiceRuntime};
pub use self::server::Server;
pub use self::service::ServiceFactory;
pub use self::test_server::TestServer;
@@ -26,11 +28,51 @@ use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
/// Socket ID token
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub(crate) struct Token(usize);
impl Default for Token {
fn default() -> Self {
Self::new()
}
}
impl Token {
fn new() -> Self {
Self(0)
}
pub(crate) fn next(&mut self) -> Token {
let token = Token(self.0);
self.0 += 1;
token
}
}
/// Start server building process
pub fn new() -> ServerBuilder {
ServerBuilder::default()
}
// temporary Ready type for std::future::{ready, Ready}; Can be removed when MSRV surpass 1.48
#[doc(hidden)]
pub struct Ready<T>(Option<T>);
pub(crate) fn ready<T>(t: T) -> Ready<T> {
Ready(Some(t))
}
impl<T> Unpin for Ready<T> {}
impl<T> Future for Ready<T> {
type Output = T;
fn poll(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Self::Output> {
Poll::Ready(self.get_mut().0.take().unwrap())
}
}
// a poor man's join future. joined future is only used when starting/stopping the server.
// pin_project and pinned futures are overkill for this task.
pub(crate) struct JoinAll<T> {
@@ -90,8 +132,6 @@ impl<T> Future for JoinAll<T> {
mod test {
use super::*;
use actix_utils::future::ready;
#[actix_rt::test]
async fn test_join_all() {
let futs = vec![ready(Ok(1)), ready(Err(3)), ready(Ok(9))];

View File

@@ -3,12 +3,12 @@ use std::net::SocketAddr;
use std::task::{Context, Poll};
use actix_service::{Service, ServiceFactory as BaseServiceFactory};
use actix_utils::future::{ready, Ready};
use actix_utils::counter::CounterGuard;
use futures_core::future::LocalBoxFuture;
use log::error;
use crate::socket::{FromStream, MioStream};
use crate::worker::WorkerCounterGuard;
use crate::{ready, Ready, Token};
pub trait ServiceFactory<Stream: FromStream>: Send + Clone + 'static {
type Factory: BaseServiceFactory<Stream, Config = ()>;
@@ -17,16 +17,16 @@ pub trait ServiceFactory<Stream: FromStream>: Send + Clone + 'static {
}
pub(crate) trait InternalServiceFactory: Send {
fn name(&self, token: usize) -> &str;
fn name(&self, token: Token) -> &str;
fn clone_factory(&self) -> Box<dyn InternalServiceFactory>;
fn create(&self) -> LocalBoxFuture<'static, Result<(usize, BoxedServerService), ()>>;
fn create(&self) -> LocalBoxFuture<'static, Result<Vec<(Token, BoxedServerService)>, ()>>;
}
pub(crate) type BoxedServerService = Box<
dyn Service<
(WorkerCounterGuard, MioStream),
(Option<CounterGuard>, MioStream),
Response = (),
Error = (),
Future = Ready<Result<(), ()>>,
@@ -47,7 +47,7 @@ impl<S, I> StreamService<S, I> {
}
}
impl<S, I> Service<(WorkerCounterGuard, MioStream)> for StreamService<S, I>
impl<S, I> Service<(Option<CounterGuard>, MioStream)> for StreamService<S, I>
where
S: Service<I>,
S::Future: 'static,
@@ -62,7 +62,7 @@ where
self.service.poll_ready(ctx).map_err(|_| ())
}
fn call(&self, (guard, req): (WorkerCounterGuard, MioStream)) -> Self::Future {
fn call(&self, (guard, req): (Option<CounterGuard>, MioStream)) -> Self::Future {
ready(match FromStream::from_mio(req) {
Ok(stream) => {
let f = self.service.call(stream);
@@ -83,7 +83,7 @@ where
pub(crate) struct StreamNewService<F: ServiceFactory<Io>, Io: FromStream> {
name: String,
inner: F,
token: usize,
token: Token,
addr: SocketAddr,
_t: PhantomData<Io>,
}
@@ -95,7 +95,7 @@ where
{
pub(crate) fn create(
name: String,
token: usize,
token: Token,
inner: F,
addr: SocketAddr,
) -> Box<dyn InternalServiceFactory> {
@@ -114,7 +114,7 @@ where
F: ServiceFactory<Io>,
Io: FromStream + Send + 'static,
{
fn name(&self, _: usize) -> &str {
fn name(&self, _: Token) -> &str {
&self.name
}
@@ -128,14 +128,14 @@ where
})
}
fn create(&self) -> LocalBoxFuture<'static, Result<(usize, BoxedServerService), ()>> {
fn create(&self) -> LocalBoxFuture<'static, Result<Vec<(Token, BoxedServerService)>, ()>> {
let token = self.token;
let fut = self.inner.create().new_service(());
Box::pin(async move {
match fut.await {
Ok(inner) => {
let service = Box::new(StreamService::new(inner)) as _;
Ok((token, service))
Ok(vec![(token, service)])
}
Err(_) => Err(()),
}

View File

@@ -8,6 +8,8 @@ use crate::server::Server;
#[allow(dead_code)]
#[derive(PartialEq, Clone, Copy, Debug)]
pub(crate) enum Signal {
/// SIGHUP
Hup,
/// SIGINT
Int,
/// SIGTERM
@@ -39,6 +41,7 @@ impl Signals {
let sig_map = [
(unix::SignalKind::interrupt(), Signal::Int),
(unix::SignalKind::hangup(), Signal::Hup),
(unix::SignalKind::terminate(), Signal::Term),
(unix::SignalKind::quit(), Signal::Quit),
];

View File

@@ -12,7 +12,18 @@ pub(crate) use {
use std::{fmt, io};
use actix_rt::net::TcpStream;
use mio::{event::Source, Interest, Registry, Token};
use mio::event::Source;
use mio::net::TcpStream as MioTcpStream;
use mio::{Interest, Registry, Token};
#[cfg(windows)]
use std::os::windows::io::{FromRawSocket, IntoRawSocket};
#[cfg(unix)]
use {
actix_rt::net::UnixStream,
mio::net::{SocketAddr as MioSocketAddr, UnixStream as MioUnixStream},
std::os::unix::io::{FromRawFd, IntoRawFd},
};
pub(crate) enum MioListener {
Tcp(MioTcpListener),
@@ -23,23 +34,21 @@ pub(crate) enum MioListener {
impl MioListener {
pub(crate) fn local_addr(&self) -> SocketAddr {
match *self {
MioListener::Tcp(ref lst) => lst
.local_addr()
.map(SocketAddr::Tcp)
.unwrap_or(SocketAddr::Unknown),
MioListener::Tcp(ref lst) => SocketAddr::Tcp(lst.local_addr().unwrap()),
#[cfg(unix)]
MioListener::Uds(ref lst) => lst
.local_addr()
.map(SocketAddr::Uds)
.unwrap_or(SocketAddr::Unknown),
MioListener::Uds(ref lst) => SocketAddr::Uds(lst.local_addr().unwrap()),
}
}
pub(crate) fn accept(&self) -> io::Result<MioStream> {
pub(crate) fn accept(&self) -> io::Result<Option<(MioStream, SocketAddr)>> {
match *self {
MioListener::Tcp(ref lst) => lst.accept().map(|(stream, _)| MioStream::Tcp(stream)),
MioListener::Tcp(ref lst) => lst
.accept()
.map(|(stream, addr)| Some((MioStream::Tcp(stream), SocketAddr::Tcp(addr)))),
#[cfg(unix)]
MioListener::Uds(ref lst) => lst.accept().map(|(stream, _)| MioStream::Uds(stream)),
MioListener::Uds(ref lst) => lst
.accept()
.map(|(stream, addr)| Some((MioStream::Uds(stream), SocketAddr::Uds(addr)))),
}
}
}
@@ -116,27 +125,25 @@ impl fmt::Debug for MioListener {
impl fmt::Display for MioListener {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
MioListener::Tcp(ref lst) => write!(f, "{:?}", lst),
MioListener::Tcp(ref lst) => write!(f, "{}", lst.local_addr().ok().unwrap()),
#[cfg(unix)]
MioListener::Uds(ref lst) => write!(f, "{:?}", lst),
MioListener::Uds(ref lst) => write!(f, "{:?}", lst.local_addr().ok().unwrap()),
}
}
}
pub(crate) enum SocketAddr {
Unknown,
Tcp(StdSocketAddr),
#[cfg(unix)]
Uds(mio::net::SocketAddr),
Uds(MioSocketAddr),
}
impl fmt::Display for SocketAddr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
Self::Unknown => write!(f, "Unknown SocketAddr"),
Self::Tcp(ref addr) => write!(f, "{}", addr),
SocketAddr::Tcp(ref addr) => write!(f, "{}", addr),
#[cfg(unix)]
Self::Uds(ref addr) => write!(f, "{:?}", addr),
SocketAddr::Uds(ref addr) => write!(f, "{:?}", addr),
}
}
}
@@ -144,19 +151,18 @@ impl fmt::Display for SocketAddr {
impl fmt::Debug for SocketAddr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
Self::Unknown => write!(f, "Unknown SocketAddr"),
Self::Tcp(ref addr) => write!(f, "{:?}", addr),
SocketAddr::Tcp(ref addr) => write!(f, "{:?}", addr),
#[cfg(unix)]
Self::Uds(ref addr) => write!(f, "{:?}", addr),
SocketAddr::Uds(ref addr) => write!(f, "{:?}", addr),
}
}
}
#[derive(Debug)]
pub enum MioStream {
Tcp(mio::net::TcpStream),
Tcp(MioTcpStream),
#[cfg(unix)]
Uds(mio::net::UnixStream),
Uds(MioUnixStream),
}
/// helper trait for converting mio stream to tokio stream.
@@ -164,60 +170,47 @@ pub trait FromStream: Sized {
fn from_mio(sock: MioStream) -> io::Result<Self>;
}
#[cfg(windows)]
mod win_impl {
use super::*;
use std::os::windows::io::{FromRawSocket, IntoRawSocket};
// FIXME: This is a workaround and we need an efficient way to convert between mio and tokio stream
impl FromStream for TcpStream {
fn from_mio(sock: MioStream) -> io::Result<Self> {
match sock {
MioStream::Tcp(mio) => {
let raw = IntoRawSocket::into_raw_socket(mio);
// SAFETY: This is a in place conversion from mio stream to tokio stream.
TcpStream::from_std(unsafe { FromRawSocket::from_raw_socket(raw) })
}
// FIXME: This is a workaround and we need an efficient way to convert between mio and tokio stream
#[cfg(unix)]
impl FromStream for TcpStream {
fn from_mio(sock: MioStream) -> io::Result<Self> {
match sock {
MioStream::Tcp(mio) => {
let raw = IntoRawFd::into_raw_fd(mio);
// SAFETY: This is a in place conversion from mio stream to tokio stream.
TcpStream::from_std(unsafe { FromRawFd::from_raw_fd(raw) })
}
MioStream::Uds(_) => {
panic!("Should not happen, bug in server impl");
}
}
}
}
#[cfg(unix)]
mod unix_impl {
use super::*;
use std::os::unix::io::{FromRawFd, IntoRawFd};
use actix_rt::net::UnixStream;
// FIXME: This is a workaround and we need an efficient way to convert between mio and tokio stream
impl FromStream for TcpStream {
fn from_mio(sock: MioStream) -> io::Result<Self> {
match sock {
MioStream::Tcp(mio) => {
let raw = IntoRawFd::into_raw_fd(mio);
// SAFETY: This is a in place conversion from mio stream to tokio stream.
TcpStream::from_std(unsafe { FromRawFd::from_raw_fd(raw) })
}
MioStream::Uds(_) => {
panic!("Should not happen, bug in server impl");
}
// FIXME: This is a workaround and we need an efficient way to convert between mio and tokio stream
#[cfg(windows)]
impl FromStream for TcpStream {
fn from_mio(sock: MioStream) -> io::Result<Self> {
match sock {
MioStream::Tcp(mio) => {
let raw = IntoRawSocket::into_raw_socket(mio);
// SAFETY: This is a in place conversion from mio stream to tokio stream.
TcpStream::from_std(unsafe { FromRawSocket::from_raw_socket(raw) })
}
}
}
}
// FIXME: This is a workaround and we need an efficient way to convert between mio and tokio stream
impl FromStream for UnixStream {
fn from_mio(sock: MioStream) -> io::Result<Self> {
match sock {
MioStream::Tcp(_) => panic!("Should not happen, bug in server impl"),
MioStream::Uds(mio) => {
let raw = IntoRawFd::into_raw_fd(mio);
// SAFETY: This is a in place conversion from mio stream to tokio stream.
UnixStream::from_std(unsafe { FromRawFd::from_raw_fd(raw) })
}
// FIXME: This is a workaround and we need an efficient way to convert between mio and tokio stream
#[cfg(unix)]
impl FromStream for UnixStream {
fn from_mio(sock: MioStream) -> io::Result<Self> {
match sock {
MioStream::Tcp(_) => panic!("Should not happen, bug in server impl"),
MioStream::Uds(mio) => {
let raw = IntoRawFd::into_raw_fd(mio);
// SAFETY: This is a in place conversion from mio stream to tokio stream.
UnixStream::from_std(unsafe { FromRawFd::from_raw_fd(raw) })
}
}
}

View File

@@ -92,10 +92,10 @@ impl TestServer {
let port = addr.port();
TestServerRuntime {
system,
addr,
host,
port,
system,
}
}

View File

@@ -6,7 +6,7 @@ use std::{
use mio::{Registry, Token as MioToken, Waker};
use crate::worker::WorkerHandleAccept;
use crate::worker::WorkerHandle;
/// Waker token for `mio::Poll` instance.
pub(crate) const WAKER_TOKEN: MioToken = MioToken(usize::MAX);
@@ -72,7 +72,7 @@ impl WakerQueue {
pub(crate) enum WakerInterest {
/// `WorkerAvailable` is an interest from `Worker` notifying `Accept` there is a worker
/// available and can accept new tasks.
WorkerAvailable(usize),
WorkerAvailable,
/// `Pause`, `Resume`, `Stop` Interest are from `ServerBuilder` future. It listens to
/// `ServerCommand` and notify `Accept` to do exactly these tasks.
Pause,
@@ -84,6 +84,6 @@ pub(crate) enum WakerInterest {
Timer,
/// `Worker` is an interest happen after a worker runs into faulted state(This is determined
/// by if work can be sent to it successfully).`Accept` would be waked up and add the new
/// `WorkerHandleAccept`.
Worker(WorkerHandleAccept),
/// `WorkerHandle`.
Worker(WorkerHandle),
}

View File

@@ -1,211 +1,139 @@
use std::{
future::Future,
mem,
pin::Pin,
rc::Rc,
sync::{
atomic::{AtomicUsize, Ordering},
Arc,
},
task::{Context, Poll},
time::Duration,
};
use std::future::Future;
use std::pin::Pin;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::sync::Arc;
use std::task::{Context, Poll};
use std::time::Duration;
use actix_rt::{
spawn,
time::{sleep, Instant, Sleep},
Arbiter,
};
use futures_core::{future::LocalBoxFuture, ready};
use actix_rt::time::{sleep_until, Instant, Sleep};
use actix_rt::{spawn, Arbiter};
use actix_utils::counter::Counter;
use futures_core::future::LocalBoxFuture;
use log::{error, info, trace};
use tokio::sync::{
mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender},
oneshot,
};
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender};
use tokio::sync::oneshot;
use crate::join_all;
use crate::service::{BoxedServerService, InternalServiceFactory};
use crate::socket::MioStream;
use crate::socket::{MioStream, SocketAddr};
use crate::waker_queue::{WakerInterest, WakerQueue};
use crate::{join_all, Token};
/// Stop worker message. Returns `true` on successful graceful shutdown.
/// and `false` if some connections still alive when shutdown execute.
pub(crate) struct Stop {
pub(crate) struct WorkerCommand(Conn);
/// Stop worker message. Returns `true` on successful shutdown
/// and `false` if some connections still alive.
pub(crate) struct StopCommand {
graceful: bool,
tx: oneshot::Sender<bool>,
result: oneshot::Sender<bool>,
}
#[derive(Debug)]
pub(crate) struct Conn {
pub io: MioStream,
pub token: usize,
pub token: Token,
pub peer: Option<SocketAddr>,
}
fn handle_pair(
idx: usize,
tx1: UnboundedSender<Conn>,
tx2: UnboundedSender<Stop>,
counter: Counter,
) -> (WorkerHandleAccept, WorkerHandleServer) {
let accept = WorkerHandleAccept {
idx,
tx: tx1,
counter,
};
static MAX_CONNS: AtomicUsize = AtomicUsize::new(25600);
let server = WorkerHandleServer { idx, tx: tx2 };
(accept, server)
/// Sets the maximum per-worker number of concurrent connections.
///
/// All socket listeners will stop accepting connections when this limit is
/// reached for each worker.
///
/// By default max connections is set to a 25k per worker.
pub fn max_concurrent_connections(num: usize) {
MAX_CONNS.store(num, Ordering::Relaxed);
}
/// counter: Arc<AtomicUsize> field is owned by `Accept` thread and `ServerWorker` thread.
///
/// `Accept` would increment the counter and `ServerWorker` would decrement it.
///
/// # Atomic Ordering:
///
/// `Accept` always look into it's cached `Availability` field for `ServerWorker` state.
/// It lazily increment counter after successful dispatching new work to `ServerWorker`.
/// On reaching counter limit `Accept` update it's cached `Availability` and mark worker as
/// unable to accept any work.
///
/// `ServerWorker` always decrement the counter when every work received from `Accept` is done.
/// On reaching counter limit worker would use `mio::Waker` and `WakerQueue` to wake up `Accept`
/// and notify it to update cached `Availability` again to mark worker as able to accept work again.
///
/// Hence, a wake up would only happen after `Accept` increment it to limit.
/// And a decrement to limit always wake up `Accept`.
thread_local! {
static MAX_CONNS_COUNTER: Counter =
Counter::new(MAX_CONNS.load(Ordering::Relaxed));
}
pub(crate) fn num_connections() -> usize {
MAX_CONNS_COUNTER.with(|conns| conns.total())
}
// a handle to worker that can send message to worker and share the availability of worker to other
// thread.
#[derive(Clone)]
pub(crate) struct Counter {
counter: Arc<AtomicUsize>,
limit: usize,
pub(crate) struct WorkerHandle {
pub idx: usize,
tx1: UnboundedSender<WorkerCommand>,
tx2: UnboundedSender<StopCommand>,
avail: WorkerAvailability,
}
impl Counter {
pub(crate) fn new(limit: usize) -> Self {
Self {
counter: Arc::new(AtomicUsize::new(1)),
limit,
}
}
/// Increment counter by 1 and return true when hitting limit
#[inline(always)]
pub(crate) fn inc(&self) -> bool {
self.counter.fetch_add(1, Ordering::Relaxed) != self.limit
}
/// Decrement counter by 1 and return true if crossing limit.
#[inline(always)]
pub(crate) fn dec(&self) -> bool {
self.counter.fetch_sub(1, Ordering::Relaxed) == self.limit
}
pub(crate) fn total(&self) -> usize {
self.counter.load(Ordering::SeqCst) - 1
}
}
pub(crate) struct WorkerCounter {
idx: usize,
inner: Rc<(WakerQueue, Counter)>,
}
impl Clone for WorkerCounter {
fn clone(&self) -> Self {
Self {
idx: self.idx,
inner: self.inner.clone(),
}
}
}
impl WorkerCounter {
pub(crate) fn new(idx: usize, waker_queue: WakerQueue, counter: Counter) -> Self {
Self {
impl WorkerHandle {
pub fn new(
idx: usize,
tx1: UnboundedSender<WorkerCommand>,
tx2: UnboundedSender<StopCommand>,
avail: WorkerAvailability,
) -> Self {
WorkerHandle {
idx,
inner: Rc::new((waker_queue, counter)),
tx1,
tx2,
avail,
}
}
#[inline(always)]
pub(crate) fn guard(&self) -> WorkerCounterGuard {
WorkerCounterGuard(self.clone())
pub fn send(&self, msg: Conn) -> Result<(), Conn> {
self.tx1.send(WorkerCommand(msg)).map_err(|msg| msg.0 .0)
}
fn total(&self) -> usize {
self.inner.1.total()
}
}
pub(crate) struct WorkerCounterGuard(WorkerCounter);
impl Drop for WorkerCounterGuard {
fn drop(&mut self) {
let (waker_queue, counter) = &*self.0.inner;
if counter.dec() {
waker_queue.wake(WakerInterest::WorkerAvailable(self.0.idx));
}
}
}
/// Handle to worker that can send connection message to worker and share the
/// availability of worker to other thread.
///
/// Held by [Accept](crate::accept::Accept).
pub(crate) struct WorkerHandleAccept {
idx: usize,
tx: UnboundedSender<Conn>,
counter: Counter,
}
impl WorkerHandleAccept {
#[inline(always)]
pub(crate) fn idx(&self) -> usize {
self.idx
pub fn available(&self) -> bool {
self.avail.available()
}
#[inline(always)]
pub(crate) fn send(&self, msg: Conn) -> Result<(), Conn> {
self.tx.send(msg).map_err(|msg| msg.0)
}
#[inline(always)]
pub(crate) fn inc_counter(&self) -> bool {
self.counter.inc()
}
}
/// Handle to worker than can send stop message to worker.
///
/// Held by [ServerBuilder](crate::builder::ServerBuilder).
#[derive(Debug)]
pub(crate) struct WorkerHandleServer {
idx: usize,
tx: UnboundedSender<Stop>,
}
impl WorkerHandleServer {
pub(crate) fn stop(&self, graceful: bool) -> oneshot::Receiver<bool> {
let (tx, rx) = oneshot::channel();
let _ = self.tx.send(Stop { graceful, tx });
pub fn stop(&self, graceful: bool) -> oneshot::Receiver<bool> {
let (result, rx) = oneshot::channel();
let _ = self.tx2.send(StopCommand { graceful, result });
rx
}
}
#[derive(Clone)]
pub(crate) struct WorkerAvailability {
waker: WakerQueue,
available: Arc<AtomicBool>,
}
impl WorkerAvailability {
pub fn new(waker: WakerQueue) -> Self {
WorkerAvailability {
waker,
available: Arc::new(AtomicBool::new(false)),
}
}
pub fn available(&self) -> bool {
self.available.load(Ordering::Acquire)
}
pub fn set(&self, val: bool) {
let old = self.available.swap(val, Ordering::Release);
// notify the accept on switched to available.
if !old && val {
self.waker.wake(WakerInterest::WorkerAvailable);
}
}
}
/// Service worker.
///
/// Worker accepts Socket objects via unbounded channel and starts stream processing.
pub(crate) struct ServerWorker {
// UnboundedReceiver<Conn> should always be the first field.
// It must be dropped as soon as ServerWorker dropping.
rx: UnboundedReceiver<Conn>,
rx2: UnboundedReceiver<Stop>,
counter: WorkerCounter,
services: Box<[WorkerService]>,
factories: Box<[Box<dyn InternalServiceFactory>]>,
rx: UnboundedReceiver<WorkerCommand>,
rx2: UnboundedReceiver<StopCommand>,
services: Vec<WorkerService>,
availability: WorkerAvailability,
conns: Counter,
factories: Vec<Box<dyn InternalServiceFactory>>,
state: WorkerState,
shutdown_timeout: Duration,
config: ServerWorkerConfig,
}
struct WorkerService {
@@ -236,7 +164,6 @@ enum WorkerServiceStatus {
pub(crate) struct ServerWorkerConfig {
shutdown_timeout: Duration,
max_blocking_threads: usize,
max_concurrent_connections: usize,
}
impl Default for ServerWorkerConfig {
@@ -246,7 +173,6 @@ impl Default for ServerWorkerConfig {
Self {
shutdown_timeout: Duration::from_secs(30),
max_blocking_threads,
max_concurrent_connections: 25600,
}
}
}
@@ -256,10 +182,6 @@ impl ServerWorkerConfig {
self.max_blocking_threads = num;
}
pub(crate) fn max_concurrent_connections(&mut self, num: usize) {
self.max_concurrent_connections = num;
}
pub(crate) fn shutdown_timeout(&mut self, dur: Duration) {
self.shutdown_timeout = dur;
}
@@ -269,112 +191,97 @@ impl ServerWorker {
pub(crate) fn start(
idx: usize,
factories: Vec<Box<dyn InternalServiceFactory>>,
waker_queue: WakerQueue,
availability: WorkerAvailability,
config: ServerWorkerConfig,
) -> (WorkerHandleAccept, WorkerHandleServer) {
) -> WorkerHandle {
let (tx1, rx) = unbounded_channel();
let (tx2, rx2) = unbounded_channel();
let avail = availability.clone();
let counter = Counter::new(config.max_concurrent_connections);
let counter_clone = counter.clone();
// every worker runs in it's own arbiter.
// use a custom tokio runtime builder to change the settings of runtime.
#[cfg(all(target_os = "linux", feature = "io-uring"))]
let arbiter = {
// TODO: pass max blocking thread config when tokio-uring enable configuration
// on building runtime.
let _ = config.max_blocking_threads;
Arbiter::new()
};
#[cfg(not(all(target_os = "linux", feature = "io-uring")))]
let arbiter = Arbiter::with_tokio_rt(move || {
Arbiter::with_tokio_rt(move || {
tokio::runtime::Builder::new_current_thread()
.enable_all()
.max_blocking_threads(config.max_blocking_threads)
.build()
.unwrap()
});
})
.spawn(async move {
availability.set(false);
let mut wrk = MAX_CONNS_COUNTER.with(move |conns| ServerWorker {
rx,
rx2,
availability,
factories,
config,
services: Vec::new(),
conns: conns.clone(),
state: WorkerState::Unavailable,
});
arbiter.spawn(async move {
let fut = factories
let fut = wrk
.factories
.iter()
.enumerate()
.map(|(idx, factory)| {
let fut = factory.create();
async move { fut.await.map(|(t, s)| (idx, t, s)) }
async move {
fut.await.map(|r| {
r.into_iter().map(|(t, s)| (idx, t, s)).collect::<Vec<_>>()
})
}
})
.collect::<Vec<_>>();
// a second spawn to run !Send future tasks.
// a second spawn to make sure worker future runs as non boxed future.
// As Arbiter::spawn would box the future before send it to arbiter.
spawn(async move {
let res = join_all(fut)
.await
.into_iter()
.collect::<Result<Vec<_>, _>>();
let services = match res {
Ok(res) => res
.into_iter()
.fold(Vec::new(), |mut services, (factory, token, service)| {
assert_eq!(token, services.len());
services.push(WorkerService {
factory,
service,
status: WorkerServiceStatus::Unavailable,
});
services
})
.into_boxed_slice(),
let res: Result<Vec<_>, _> = join_all(fut).await.into_iter().collect();
match res {
Ok(services) => {
for item in services {
for (factory, token, service) in item {
assert_eq!(token.0, wrk.services.len());
wrk.services.push(WorkerService {
factory,
service,
status: WorkerServiceStatus::Unavailable,
});
}
}
}
Err(e) => {
error!("Can not start worker: {:?}", e);
Arbiter::current().stop();
return;
}
};
// a third spawn to make sure ServerWorker runs as non boxed future.
spawn(ServerWorker {
rx,
rx2,
services,
counter: WorkerCounter::new(idx, waker_queue, counter_clone),
factories: factories.into_boxed_slice(),
state: Default::default(),
shutdown_timeout: config.shutdown_timeout,
});
}
wrk.await
});
});
handle_pair(idx, tx1, tx2, counter)
}
fn restart_service(&mut self, idx: usize, factory_id: usize) {
let factory = &self.factories[factory_id];
trace!("Service {:?} failed, restarting", factory.name(idx));
self.services[idx].status = WorkerServiceStatus::Restarting;
self.state = WorkerState::Restarting(Restart {
factory_id,
token: idx,
fut: factory.create(),
});
WorkerHandle::new(idx, tx1, tx2, avail)
}
fn shutdown(&mut self, force: bool) {
self.services
.iter_mut()
.filter(|srv| srv.status == WorkerServiceStatus::Available)
.for_each(|srv| {
srv.status = if force {
WorkerServiceStatus::Stopped
} else {
WorkerServiceStatus::Stopping
};
if force {
self.services.iter_mut().for_each(|srv| {
if srv.status == WorkerServiceStatus::Available {
srv.status = WorkerServiceStatus::Stopped;
}
});
} else {
self.services.iter_mut().for_each(move |srv| {
if srv.status == WorkerServiceStatus::Available {
srv.status = WorkerServiceStatus::Stopping;
}
});
}
}
fn check_readiness(&mut self, cx: &mut Context<'_>) -> Result<bool, (usize, usize)> {
let mut ready = true;
fn check_readiness(&mut self, cx: &mut Context<'_>) -> Result<bool, (Token, usize)> {
let mut ready = self.conns.available(cx);
let mut failed = None;
for (idx, srv) in self.services.iter_mut().enumerate() {
if srv.status == WorkerServiceStatus::Available
|| srv.status == WorkerServiceStatus::Unavailable
@@ -384,7 +291,7 @@ impl ServerWorker {
if srv.status == WorkerServiceStatus::Unavailable {
trace!(
"Service {:?} is available",
self.factories[srv.factory].name(idx)
self.factories[srv.factory].name(Token(idx))
);
srv.status = WorkerServiceStatus::Available;
}
@@ -395,7 +302,7 @@ impl ServerWorker {
if srv.status == WorkerServiceStatus::Available {
trace!(
"Service {:?} is unavailable",
self.factories[srv.factory].name(idx)
self.factories[srv.factory].name(Token(idx))
);
srv.status = WorkerServiceStatus::Unavailable;
}
@@ -403,168 +310,173 @@ impl ServerWorker {
Poll::Ready(Err(_)) => {
error!(
"Service {:?} readiness check returned error, restarting",
self.factories[srv.factory].name(idx)
self.factories[srv.factory].name(Token(idx))
);
failed = Some((Token(idx), srv.factory));
srv.status = WorkerServiceStatus::Failed;
return Err((idx, srv.factory));
}
}
}
}
Ok(ready)
if let Some(idx) = failed {
Err(idx)
} else {
Ok(ready)
}
}
}
enum WorkerState {
Available,
Unavailable,
Restarting(Restart),
Shutdown(Shutdown),
}
struct Restart {
factory_id: usize,
token: usize,
fut: LocalBoxFuture<'static, Result<(usize, BoxedServerService), ()>>,
}
// Shutdown keep states necessary for server shutdown:
// Sleep for interval check the shutdown progress.
// Instant for the start time of shutdown.
// Sender for send back the shutdown outcome(force/grace) to StopCommand caller.
struct Shutdown {
timer: Pin<Box<Sleep>>,
start_from: Instant,
tx: oneshot::Sender<bool>,
}
impl Default for WorkerState {
fn default() -> Self {
Self::Unavailable
}
}
impl Drop for ServerWorker {
fn drop(&mut self) {
// Stop the Arbiter ServerWorker runs on on drop.
Arbiter::current().stop();
}
Restarting(
usize,
Token,
LocalBoxFuture<'static, Result<Vec<(Token, BoxedServerService)>, ()>>,
),
Shutdown(
Pin<Box<Sleep>>,
Pin<Box<Sleep>>,
Option<oneshot::Sender<bool>>,
),
}
impl Future for ServerWorker {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.as_mut().get_mut();
// `StopWorker` message handler
if let Poll::Ready(Some(Stop { graceful, tx })) = Pin::new(&mut this.rx2).poll_recv(cx)
if let Poll::Ready(Some(StopCommand { graceful, result })) =
Pin::new(&mut self.rx2).poll_recv(cx)
{
let num = this.counter.total();
self.availability.set(false);
let num = num_connections();
if num == 0 {
info!("Shutting down worker, 0 connections");
let _ = tx.send(true);
let _ = result.send(true);
return Poll::Ready(());
} else if graceful {
info!("Graceful worker shutdown, {} connections", num);
this.shutdown(false);
this.state = WorkerState::Shutdown(Shutdown {
timer: Box::pin(sleep(Duration::from_secs(1))),
start_from: Instant::now(),
tx,
});
self.shutdown(false);
let num = num_connections();
if num != 0 {
info!("Graceful worker shutdown, {} connections", num);
self.state = WorkerState::Shutdown(
Box::pin(sleep_until(Instant::now() + Duration::from_secs(1))),
Box::pin(sleep_until(Instant::now() + self.config.shutdown_timeout)),
Some(result),
);
} else {
let _ = result.send(true);
return Poll::Ready(());
}
} else {
info!("Force shutdown worker, {} connections", num);
this.shutdown(true);
let _ = tx.send(false);
self.shutdown(true);
let _ = result.send(false);
return Poll::Ready(());
}
}
match this.state {
WorkerState::Unavailable => match this.check_readiness(cx) {
match self.state {
WorkerState::Unavailable => match self.check_readiness(cx) {
Ok(true) => {
this.state = WorkerState::Available;
self.state = WorkerState::Available;
self.availability.set(true);
self.poll(cx)
}
Ok(false) => Poll::Pending,
Err((token, idx)) => {
this.restart_service(token, idx);
trace!(
"Service {:?} failed, restarting",
self.factories[idx].name(token)
);
self.services[token.0].status = WorkerServiceStatus::Restarting;
self.state =
WorkerState::Restarting(idx, token, self.factories[idx].create());
self.poll(cx)
}
},
WorkerState::Restarting(ref mut restart) => {
let factory_id = restart.factory_id;
let token = restart.token;
let (token_new, service) = ready!(restart.fut.as_mut().poll(cx))
.unwrap_or_else(|_| {
WorkerState::Restarting(idx, token, ref mut fut) => {
match fut.as_mut().poll(cx) {
Poll::Ready(Ok(item)) => {
// only interest in the first item?
if let Some((token, service)) = item.into_iter().next() {
trace!(
"Service {:?} has been restarted",
self.factories[idx].name(token)
);
self.services[token.0].created(service);
self.state = WorkerState::Unavailable;
return self.poll(cx);
}
}
Poll::Ready(Err(_)) => {
panic!(
"Can not restart {:?} service",
this.factories[factory_id].name(token)
)
});
assert_eq!(token, token_new);
trace!(
"Service {:?} has been restarted",
this.factories[factory_id].name(token)
);
this.services[token].created(service);
this.state = WorkerState::Unavailable;
self.factories[idx].name(token)
);
}
Poll::Pending => return Poll::Pending,
}
self.poll(cx)
}
WorkerState::Shutdown(ref mut shutdown) => {
// Wait for 1 second.
ready!(shutdown.timer.as_mut().poll(cx));
if this.counter.total() == 0 {
// Graceful shutdown.
if let WorkerState::Shutdown(shutdown) = mem::take(&mut this.state) {
let _ = shutdown.tx.send(true);
}
Poll::Ready(())
} else if shutdown.start_from.elapsed() >= this.shutdown_timeout {
// Timeout forceful shutdown.
if let WorkerState::Shutdown(shutdown) = mem::take(&mut this.state) {
let _ = shutdown.tx.send(false);
}
Poll::Ready(())
} else {
// Reset timer and wait for 1 second.
let time = Instant::now() + Duration::from_secs(1);
shutdown.timer.as_mut().reset(time);
shutdown.timer.as_mut().poll(cx)
WorkerState::Shutdown(ref mut t1, ref mut t2, ref mut tx) => {
let num = num_connections();
if num == 0 {
let _ = tx.take().unwrap().send(true);
Arbiter::current().stop();
return Poll::Ready(());
}
// check graceful timeout
if Pin::new(t2).poll(cx).is_ready() {
let _ = tx.take().unwrap().send(false);
self.shutdown(true);
Arbiter::current().stop();
return Poll::Ready(());
}
// sleep for 1 second and then check again
if t1.as_mut().poll(cx).is_ready() {
*t1 = Box::pin(sleep_until(Instant::now() + Duration::from_secs(1)));
let _ = t1.as_mut().poll(cx);
}
Poll::Pending
}
// actively poll stream and handle worker command
WorkerState::Available => loop {
match this.check_readiness(cx) {
Ok(true) => {}
match self.check_readiness(cx) {
Ok(true) => (),
Ok(false) => {
trace!("Worker is unavailable");
this.state = WorkerState::Unavailable;
self.availability.set(false);
self.state = WorkerState::Unavailable;
return self.poll(cx);
}
Err((token, idx)) => {
this.restart_service(token, idx);
trace!(
"Service {:?} failed, restarting",
self.factories[idx].name(token)
);
self.availability.set(false);
self.services[token.0].status = WorkerServiceStatus::Restarting;
self.state =
WorkerState::Restarting(idx, token, self.factories[idx].create());
return self.poll(cx);
}
}
// handle incoming io stream
match ready!(Pin::new(&mut this.rx).poll_recv(cx)) {
Some(msg) => {
let guard = this.counter.guard();
let _ = this.services[msg.token].service.call((guard, msg.io));
match Pin::new(&mut self.rx).poll_recv(cx) {
// handle incoming io stream
Poll::Ready(Some(WorkerCommand(msg))) => {
let guard = self.conns.get();
let _ = self.services[msg.token.0]
.service
.call((Some(guard), msg.io));
}
None => return Poll::Ready(()),
Poll::Pending => return Poll::Pending,
Poll::Ready(None) => return Poll::Ready(()),
};
},
}

View File

@@ -1,12 +1,10 @@
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::atomic::{AtomicUsize, Ordering::Relaxed};
use std::sync::{mpsc, Arc};
use std::{net, thread, time::Duration};
use std::{net, thread, time};
use actix_rt::{net::TcpStream, time::sleep};
use actix_server::Server;
use actix_service::fn_service;
use actix_utils::future::ok;
use futures_util::future::lazy;
use futures_util::future::{lazy, ok};
fn unused_addr() -> net::SocketAddr {
let addr: net::SocketAddr = "127.0.0.1:0".parse().unwrap();
@@ -32,13 +30,12 @@ fn test_bind() {
.unwrap()
.run()
}));
let _ = tx.send((srv, actix_rt::System::current()));
let _ = sys.run();
});
let (_, sys) = rx.recv().unwrap();
thread::sleep(Duration::from_millis(500));
thread::sleep(time::Duration::from_millis(500));
assert!(net::TcpStream::connect(addr).is_ok());
sys.stop();
let _ = h.join();
@@ -65,7 +62,7 @@ fn test_listen() {
});
let sys = rx.recv().unwrap();
thread::sleep(Duration::from_millis(500));
thread::sleep(time::Duration::from_millis(500));
assert!(net::TcpStream::connect(addr).is_ok());
sys.stop();
let _ = h.join();
@@ -74,11 +71,11 @@ fn test_listen() {
#[test]
#[cfg(unix)]
fn test_start() {
use std::io::Read;
use actix_codec::{BytesCodec, Framed};
use actix_rt::net::TcpStream;
use bytes::Bytes;
use futures_util::sink::SinkExt;
use std::io::Read;
let addr = unused_addr();
let (tx, rx) = mpsc::channel();
@@ -113,16 +110,16 @@ fn test_start() {
// pause
let _ = srv.pause();
thread::sleep(Duration::from_millis(200));
thread::sleep(time::Duration::from_millis(200));
let mut conn = net::TcpStream::connect(addr).unwrap();
conn.set_read_timeout(Some(Duration::from_millis(100)))
conn.set_read_timeout(Some(time::Duration::from_millis(100)))
.unwrap();
let res = conn.read_exact(&mut buf);
assert!(res.is_err());
// resume
let _ = srv.resume();
thread::sleep(Duration::from_millis(100));
thread::sleep(time::Duration::from_millis(100));
assert!(net::TcpStream::connect(addr).is_ok());
assert!(net::TcpStream::connect(addr).is_ok());
assert!(net::TcpStream::connect(addr).is_ok());
@@ -134,320 +131,60 @@ fn test_start() {
// stop
let _ = srv.stop(false);
thread::sleep(Duration::from_millis(100));
thread::sleep(time::Duration::from_millis(100));
assert!(net::TcpStream::connect(addr).is_err());
thread::sleep(Duration::from_millis(100));
thread::sleep(time::Duration::from_millis(100));
sys.stop();
let _ = h.join();
}
#[actix_rt::test]
async fn test_max_concurrent_connections() {
// Note:
// A tcp listener would accept connects based on it's backlog setting.
//
// The limit test on the other hand is only for concurrent tcp stream limiting a work
// thread accept.
use tokio::io::AsyncWriteExt;
let addr = unused_addr();
let (tx, rx) = mpsc::channel();
let counter = Arc::new(AtomicUsize::new(0));
let counter_clone = counter.clone();
let max_conn = 3;
let h = thread::spawn(move || {
actix_rt::System::new().block_on(async {
let server = Server::build()
// Set a relative higher backlog.
.backlog(12)
// max connection for a worker is 3.
.maxconn(max_conn)
.workers(1)
.disable_signals()
.bind("test", addr, move || {
let counter = counter.clone();
fn_service(move |_io: TcpStream| {
let counter = counter.clone();
async move {
counter.fetch_add(1, Ordering::SeqCst);
sleep(Duration::from_secs(20)).await;
counter.fetch_sub(1, Ordering::SeqCst);
Ok::<(), ()>(())
}
})
})?
.run();
let _ = tx.send((server.clone(), actix_rt::System::current()));
server.await
})
});
let (srv, sys) = rx.recv().unwrap();
let mut conns = vec![];
for _ in 0..12 {
let conn = tokio::net::TcpStream::connect(addr).await.unwrap();
conns.push(conn);
}
sleep(Duration::from_secs(5)).await;
// counter would remain at 3 even with 12 successful connection.
// and 9 of them remain in backlog.
assert_eq!(max_conn, counter_clone.load(Ordering::SeqCst));
for mut conn in conns {
conn.shutdown().await.unwrap();
}
srv.stop(false).await;
sys.stop();
let _ = h.join().unwrap();
}
#[actix_rt::test]
async fn test_service_restart() {
use std::task::{Context, Poll};
use actix_service::{fn_factory, Service};
use futures_core::future::LocalBoxFuture;
use tokio::io::AsyncWriteExt;
struct TestService(Arc<AtomicUsize>);
impl Service<TcpStream> for TestService {
type Response = ();
type Error = ();
type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>;
fn poll_ready(&self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
let TestService(ref counter) = self;
let c = counter.fetch_add(1, Ordering::SeqCst);
// Force the service to restart on first readiness check.
if c > 0 {
Poll::Ready(Ok(()))
} else {
Poll::Ready(Err(()))
}
}
fn call(&self, _: TcpStream) -> Self::Future {
Box::pin(async { Ok(()) })
}
}
#[test]
fn test_configure() {
let addr1 = unused_addr();
let addr2 = unused_addr();
let addr3 = unused_addr();
let (tx, rx) = mpsc::channel();
let num = Arc::new(AtomicUsize::new(0));
let num2 = Arc::new(AtomicUsize::new(0));
let num_clone = num.clone();
let num2_clone = num2.clone();
let num2 = num.clone();
let h = thread::spawn(move || {
let num = num.clone();
actix_rt::System::new().block_on(async {
let server = Server::build()
.backlog(1)
let num = num2.clone();
let sys = actix_rt::System::new();
let srv = sys.block_on(lazy(|_| {
Server::build()
.disable_signals()
.bind("addr1", addr1, move || {
.configure(move |cfg| {
let num = num.clone();
fn_factory(move || {
let num = num.clone();
async move { Ok::<_, ()>(TestService(num)) }
})
})
.unwrap()
.bind("addr2", addr2, move || {
let num2 = num2.clone();
fn_factory(move || {
let num2 = num2.clone();
async move { Ok::<_, ()>(TestService(num2)) }
})
let lst = net::TcpListener::bind(addr3).unwrap();
cfg.bind("addr1", addr1)
.unwrap()
.bind("addr2", addr2)
.unwrap()
.listen("addr3", lst)
.apply(move |rt| {
let num = num.clone();
rt.service("addr1", fn_service(|_| ok::<_, ()>(())));
rt.service("addr3", fn_service(|_| ok::<_, ()>(())));
rt.on_start(lazy(move |_| {
let _ = num.fetch_add(1, Relaxed);
}))
})
})
.unwrap()
.workers(1)
.run();
let _ = tx.send((server.clone(), actix_rt::System::current()));
server.await
})
.run()
}));
let _ = tx.send((srv, actix_rt::System::current()));
let _ = sys.run();
});
let (_, sys) = rx.recv().unwrap();
thread::sleep(time::Duration::from_millis(500));
let (server, sys) = rx.recv().unwrap();
for _ in 0..5 {
TcpStream::connect(addr1)
.await
.unwrap()
.shutdown()
.await
.unwrap();
TcpStream::connect(addr2)
.await
.unwrap()
.shutdown()
.await
.unwrap();
}
sleep(Duration::from_secs(3)).await;
assert!(num_clone.load(Ordering::SeqCst) > 5);
assert!(num2_clone.load(Ordering::SeqCst) > 5);
assert!(net::TcpStream::connect(addr1).is_ok());
assert!(net::TcpStream::connect(addr2).is_ok());
assert!(net::TcpStream::connect(addr3).is_ok());
assert_eq!(num.load(Relaxed), 1);
sys.stop();
let _ = server.stop(false);
let _ = h.join().unwrap();
}
#[ignore]
#[actix_rt::test]
async fn worker_restart() {
use actix_service::{Service, ServiceFactory};
use futures_core::future::LocalBoxFuture;
use tokio::io::{AsyncReadExt, AsyncWriteExt};
struct TestServiceFactory(Arc<AtomicUsize>);
impl ServiceFactory<TcpStream> for TestServiceFactory {
type Response = ();
type Error = ();
type Config = ();
type Service = TestService;
type InitError = ();
type Future = LocalBoxFuture<'static, Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: Self::Config) -> Self::Future {
let counter = self.0.fetch_add(1, Ordering::Relaxed);
Box::pin(async move { Ok(TestService(counter)) })
}
}
struct TestService(usize);
impl Service<TcpStream> for TestService {
type Response = ();
type Error = ();
type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>;
actix_service::always_ready!();
fn call(&self, stream: TcpStream) -> Self::Future {
let counter = self.0;
let mut stream = stream.into_std().unwrap();
use std::io::Write;
let str = counter.to_string();
let buf = str.as_bytes();
let mut written = 0;
while written < buf.len() {
if let Ok(n) = stream.write(&buf[written..]) {
written += n;
}
}
stream.flush().unwrap();
stream.shutdown(net::Shutdown::Write).unwrap();
// force worker 2 to restart service once.
if counter == 2 {
panic!("panic on purpose")
} else {
Box::pin(async { Ok(()) })
}
}
}
let addr = unused_addr();
let (tx, rx) = mpsc::channel();
let counter = Arc::new(AtomicUsize::new(1));
let h = thread::spawn(move || {
let counter = counter.clone();
actix_rt::System::new().block_on(async {
let server = Server::build()
.disable_signals()
.bind("addr", addr, move || TestServiceFactory(counter.clone()))
.unwrap()
.workers(2)
.run();
let _ = tx.send((server.clone(), actix_rt::System::current()));
server.await
})
});
let (server, sys) = rx.recv().unwrap();
sleep(Duration::from_secs(3)).await;
let mut buf = [0; 8];
// worker 1 would not restart and return it's id consistently.
let mut stream = TcpStream::connect(addr).await.unwrap();
let n = stream.read(&mut buf).await.unwrap();
let id = String::from_utf8_lossy(&buf[0..n]);
assert_eq!("1", id);
stream.shutdown().await.unwrap();
// worker 2 dead after return response.
let mut stream = TcpStream::connect(addr).await.unwrap();
let n = stream.read(&mut buf).await.unwrap();
let id = String::from_utf8_lossy(&buf[0..n]);
assert_eq!("2", id);
stream.shutdown().await.unwrap();
// request to worker 1
let mut stream = TcpStream::connect(addr).await.unwrap();
let n = stream.read(&mut buf).await.unwrap();
let id = String::from_utf8_lossy(&buf[0..n]);
assert_eq!("1", id);
stream.shutdown().await.unwrap();
// TODO: Remove sleep if it can pass CI.
sleep(Duration::from_secs(3)).await;
// worker 2 restarting and work goes to worker 1.
let mut stream = TcpStream::connect(addr).await.unwrap();
let n = stream.read(&mut buf).await.unwrap();
let id = String::from_utf8_lossy(&buf[0..n]);
assert_eq!("1", id);
stream.shutdown().await.unwrap();
// TODO: Remove sleep if it can pass CI.
sleep(Duration::from_secs(3)).await;
// worker 2 restarted but worker 1 was still the next to accept connection.
let mut stream = TcpStream::connect(addr).await.unwrap();
let n = stream.read(&mut buf).await.unwrap();
let id = String::from_utf8_lossy(&buf[0..n]);
assert_eq!("1", id);
stream.shutdown().await.unwrap();
// TODO: Remove sleep if it can pass CI.
sleep(Duration::from_secs(3)).await;
// worker 2 accept connection again but it's id is 3.
let mut stream = TcpStream::connect(addr).await.unwrap();
let n = stream.read(&mut buf).await.unwrap();
let id = String::from_utf8_lossy(&buf[0..n]);
assert_eq!("3", id);
stream.shutdown().await.unwrap();
sys.stop();
let _ = server.stop(false);
let _ = h.join().unwrap();
let _ = h.join();
}

View File

@@ -3,24 +3,6 @@
## Unreleased - 2021-xx-xx
## 2.0.1 - 2021-10-11
* Documentation fix.
## 2.0.0 - 2021-04-16
* Removed pipeline and related structs/functions. [#335]
[#335]: https://github.com/actix/actix-net/pull/335
## 2.0.0-beta.5 - 2021-03-15
* Add default `Service` trait impl for `Rc<S: Service>` and `&S: Service`. [#288]
* Add `boxed::rc_service` function for constructing `boxed::RcService` type [#290]
[#288]: https://github.com/actix/actix-net/pull/288
[#290]: https://github.com/actix/actix-net/pull/290
## 2.0.0-beta.4 - 2021-02-04
* `Service::poll_ready` and `Service::call` receive `&self`. [#247]
* `apply_fn` and `apply_fn_factory` now receive `Fn(Req, &Service)` function type. [#247]

View File

@@ -1,6 +1,6 @@
[package]
name = "actix-service"
version = "2.0.1"
version = "2.0.0-beta.4"
authors = [
"Nikolay Kim <fafhrd91@gmail.com>",
"Rob Ede <robjtede@icloud.com>",
@@ -8,8 +8,11 @@ authors = [
]
description = "Service trait and combinators for representing asynchronous request/response operations."
keywords = ["network", "framework", "async", "futures", "service"]
categories = ["network-programming", "asynchronous", "no-std"]
repository = "https://github.com/actix/actix-net"
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-service"
readme = "README.md"
categories = ["network-programming", "asynchronous"]
license = "MIT OR Apache-2.0"
edition = "2018"
@@ -19,10 +22,8 @@ path = "src/lib.rs"
[dependencies]
futures-core = { version = "0.3.7", default-features = false }
paste = "1"
pin-project-lite = "0.2"
[dev-dependencies]
actix-rt = "2.0.0"
actix-utils = "3.0.0"
futures-util = { version = "0.3.7", default-features = false }

View File

@@ -3,11 +3,11 @@
> Service trait and combinators for representing asynchronous request/response operations.
[![crates.io](https://img.shields.io/crates/v/actix-service?label=latest)](https://crates.io/crates/actix-service)
[![Documentation](https://docs.rs/actix-service/badge.svg?version=2.0.1)](https://docs.rs/actix-service/2.0.1)
[![Documentation](https://docs.rs/actix-service/badge.svg?version=2.0.0-beta.4)](https://docs.rs/actix-service/2.0.0-beta.4)
[![Version](https://img.shields.io/badge/rustc-1.46+-ab6000.svg)](https://blog.rust-lang.org/2020/03/12/Rust-1.46.html)
![License](https://img.shields.io/crates/l/actix-service.svg)
[![Dependency Status](https://deps.rs/crate/actix-service/2.0.1/status.svg)](https://deps.rs/crate/actix-service/2.0.1)
![Download](https://img.shields.io/crates/d/actix-service.svg)
[![Dependency Status](https://deps.rs/crate/actix-service/2.0.0-beta.4/status.svg)](https://deps.rs/crate/actix-service/2.0.0-beta.4)
[![Download](https://img.shields.io/crates/d/actix-service.svg)](https://crates.io/crates/actix-service)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x)
See documentation for detailed explanations of these components: https://docs.rs/actix-service.

View File

@@ -11,11 +11,11 @@ use pin_project_lite::pin_project;
use super::{Service, ServiceFactory};
/// Service for the `and_then` combinator, chaining a computation onto the end of another service
/// which completes successfully.
/// Service for the `and_then` combinator, chaining a computation onto the end
/// of another service which completes successfully.
///
/// This is created by the `Pipeline::and_then` method.
pub struct AndThenService<A, B, Req>(Rc<(A, B)>, PhantomData<Req>);
pub(crate) struct AndThenService<A, B, Req>(Rc<(A, B)>, PhantomData<Req>);
impl<A, B, Req> AndThenService<A, B, Req> {
/// Create new `AndThen` combinator
@@ -64,7 +64,7 @@ where
}
pin_project! {
pub struct AndThenServiceResponse<A, B, Req>
pub(crate) struct AndThenServiceResponse<A, B, Req>
where
A: Service<Req>,
B: Service<A::Response, Error = A::Error>,
@@ -117,7 +117,7 @@ where
}
/// `.and_then()` service factory combinator
pub struct AndThenServiceFactory<A, B, Req>
pub(crate) struct AndThenServiceFactory<A, B, Req>
where
A: ServiceFactory<Req>,
A::Config: Clone,
@@ -200,7 +200,7 @@ where
}
pin_project! {
pub struct AndThenServiceFactoryResponse<A, B, Req>
pub(crate) struct AndThenServiceFactoryResponse<A, B, Req>
where
A: ServiceFactory<Req>,
B: ServiceFactory<A::Response>,
@@ -272,9 +272,7 @@ mod tests {
use futures_util::future::lazy;
use crate::{
fn_factory, ok,
pipeline::{pipeline, pipeline_factory},
ready, Ready, Service, ServiceFactory,
fn_factory, ok, pipeline, pipeline_factory, ready, Ready, Service, ServiceFactory,
};
struct Srv1(Rc<Cell<usize>>);

View File

@@ -214,11 +214,7 @@ mod tests {
use futures_util::future::lazy;
use super::*;
use crate::{
ok,
pipeline::{pipeline, pipeline_factory},
Ready, Service, ServiceFactory,
};
use crate::{ok, pipeline, pipeline_factory, Ready, Service, ServiceFactory};
#[derive(Clone)]
struct Srv;

View File

@@ -1,69 +1,21 @@
//! Trait object forms of services and service factories.
use alloc::{boxed::Box, rc::Rc};
use core::{future::Future, pin::Pin};
use paste::paste;
use alloc::boxed::Box;
use core::{
future::Future,
marker::PhantomData,
pin::Pin,
task::{Context, Poll},
};
use crate::{Service, ServiceFactory};
/// A boxed future with no send bound or lifetime parameters.
pub type BoxFuture<T> = Pin<Box<dyn Future<Output = T>>>;
macro_rules! service_object {
($name: ident, $type: tt, $fn_name: ident) => {
paste! {
#[doc = "Type alias for service trait object using `" $type "`."]
pub type $name<Req, Res, Err> = $type<
dyn Service<Req, Response = Res, Error = Err, Future = BoxFuture<Result<Res, Err>>>,
>;
pub type BoxService<Req, Res, Err> =
Box<dyn Service<Req, Response = Res, Error = Err, Future = BoxFuture<Result<Res, Err>>>>;
#[doc = "Wraps service as a trait object using [`" $name "`]."]
pub fn $fn_name<S, Req>(service: S) -> $name<Req, S::Response, S::Error>
where
S: Service<Req> + 'static,
Req: 'static,
S::Future: 'static,
{
$type::new(ServiceWrapper::new(service))
}
}
};
}
service_object!(BoxService, Box, service);
service_object!(RcService, Rc, rc_service);
struct ServiceWrapper<S> {
inner: S,
}
impl<S> ServiceWrapper<S> {
fn new(inner: S) -> Self {
Self { inner }
}
}
impl<S, Req, Res, Err> Service<Req> for ServiceWrapper<S>
where
S: Service<Req, Response = Res, Error = Err>,
S::Future: 'static,
{
type Response = Res;
type Error = Err;
type Future = BoxFuture<Result<Res, Err>>;
crate::forward_ready!(inner);
fn call(&self, req: Req) -> Self::Future {
Box::pin(self.inner.call(req))
}
}
/// Wrapper for a service factory that will map it's services to boxed trait object services.
pub struct BoxServiceFactory<Cfg, Req, Res, Err, InitErr>(Inner<Cfg, Req, Res, Err, InitErr>);
/// Wraps a service factory that returns service trait objects.
/// Create boxed service factory
pub fn factory<SF, Req>(
factory: SF,
) -> BoxServiceFactory<SF::Config, Req, SF::Response, SF::Error, SF::InitError>
@@ -76,7 +28,20 @@ where
SF::Error: 'static,
SF::InitError: 'static,
{
BoxServiceFactory(Box::new(FactoryWrapper(factory)))
BoxServiceFactory(Box::new(FactoryWrapper {
factory,
_t: PhantomData,
}))
}
/// Create boxed service
pub fn service<S, Req>(service: S) -> BoxService<Req, S::Response, S::Error>
where
S: Service<Req> + 'static,
Req: 'static,
S::Future: 'static,
{
Box::new(ServiceWrapper(service, PhantomData))
}
type Inner<C, Req, Res, Err, InitErr> = Box<
@@ -101,9 +66,9 @@ where
{
type Response = Res;
type Error = Err;
type InitError = InitErr;
type Config = C;
type Service = BoxService<Req, Res, Err>;
type InitError = InitErr;
type Future = BoxFuture<Result<Self::Service, InitErr>>;
@@ -112,9 +77,12 @@ where
}
}
struct FactoryWrapper<SF>(SF);
struct FactoryWrapper<SF, Req, Cfg> {
factory: SF,
_t: PhantomData<(Req, Cfg)>,
}
impl<SF, Req, Cfg, Res, Err, InitErr> ServiceFactory<Req> for FactoryWrapper<SF>
impl<SF, Req, Cfg, Res, Err, InitErr> ServiceFactory<Req> for FactoryWrapper<SF, Req, Cfg>
where
Req: 'static,
Res: 'static,
@@ -127,13 +95,47 @@ where
{
type Response = Res;
type Error = Err;
type InitError = InitErr;
type Config = Cfg;
type Service = BoxService<Req, Res, Err>;
type InitError = InitErr;
type Future = BoxFuture<Result<Self::Service, Self::InitError>>;
fn new_service(&self, cfg: Cfg) -> Self::Future {
let f = self.0.new_service(cfg);
Box::pin(async { f.await.map(|s| Box::new(ServiceWrapper::new(s)) as _) })
let fut = self.factory.new_service(cfg);
Box::pin(async {
let res = fut.await;
res.map(ServiceWrapper::boxed)
})
}
}
struct ServiceWrapper<S: Service<Req>, Req>(S, PhantomData<Req>);
impl<S, Req> ServiceWrapper<S, Req>
where
S: Service<Req> + 'static,
Req: 'static,
S::Future: 'static,
{
fn boxed(service: S) -> BoxService<Req, S::Response, S::Error> {
Box::new(ServiceWrapper(service, PhantomData))
}
}
impl<S, Req, Res, Err> Service<Req> for ServiceWrapper<S, Req>
where
S: Service<Req, Response = Res, Error = Err>,
S::Future: 'static,
{
type Response = Res;
type Error = Err;
type Future = BoxFuture<Result<Res, Err>>;
fn poll_ready(&self, ctx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.0.poll_ready(ctx)
}
fn call(&self, req: Req) -> Self::Future {
Box::pin(self.0.call(req))
}
}

View File

@@ -1,12 +1,5 @@
use crate::{
and_then::{AndThenService, AndThenServiceFactory},
map::Map,
map_err::MapErr,
transform_err::TransformMapInitErr,
IntoService, IntoServiceFactory, Service, ServiceFactory, Transform,
};
use crate::{dev, Service, ServiceFactory};
/// An extension trait for [`Service`]s that provides a variety of convenient adapters.
pub trait ServiceExt<Req>: Service<Req> {
/// Map this service's output to a different type, returning a new service
/// of the resulting type.
@@ -17,12 +10,12 @@ pub trait ServiceExt<Req>: Service<Req> {
/// Note that this function consumes the receiving service and returns a
/// wrapped version of it, similar to the existing `map` methods in the
/// standard library.
fn map<F, R>(self, f: F) -> Map<Self, F, Req, R>
fn map<F, R>(self, f: F) -> dev::Map<Self, F, Req, R>
where
Self: Sized,
F: FnMut(Self::Response) -> R,
{
Map::new(self, f)
dev::Map::new(self, f)
}
/// Map this service's error to a different error, returning a new service.
@@ -33,34 +26,17 @@ pub trait ServiceExt<Req>: Service<Req> {
///
/// Note that this function consumes the receiving service and returns a
/// wrapped version of it.
fn map_err<F, E>(self, f: F) -> MapErr<Self, Req, F, E>
fn map_err<F, E>(self, f: F) -> dev::MapErr<Self, Req, F, E>
where
Self: Sized,
F: Fn(Self::Error) -> E,
{
MapErr::new(self, f)
}
/// Call another service after call to this one has resolved successfully.
///
/// This function can be used to chain two services together and ensure that the second service
/// isn't called until call to the fist service have finished. Result of the call to the first
/// service is used as an input parameter for the second service's call.
///
/// Note that this function consumes the receiving service and returns a wrapped version of it.
fn and_then<I, S1>(self, service: I) -> AndThenService<Self, S1, Req>
where
Self: Sized,
I: IntoService<S1, Self::Response>,
S1: Service<Self::Response, Error = Self::Error>,
{
AndThenService::new(self, service.into_service())
dev::MapErr::new(self, f)
}
}
impl<S, Req> ServiceExt<Req> for S where S: Service<Req> {}
/// An extension trait for [`ServiceFactory`]s that provides a variety of convenient adapters.
pub trait ServiceFactoryExt<Req>: ServiceFactory<Req> {
/// Map this service's output to a different type, returning a new service
/// of the resulting type.
@@ -89,36 +65,6 @@ pub trait ServiceFactoryExt<Req>: ServiceFactory<Req> {
{
crate::map_init_err::MapInitErr::new(self, f)
}
/// Call another service after call to this one has resolved successfully.
fn and_then<I, SF1>(self, factory: I) -> AndThenServiceFactory<Self, SF1, Req>
where
Self: Sized,
Self::Config: Clone,
I: IntoServiceFactory<SF1, Self::Response>,
SF1: ServiceFactory<
Self::Response,
Config = Self::Config,
Error = Self::Error,
InitError = Self::InitError,
>,
{
AndThenServiceFactory::new(self, factory.into_factory())
}
}
impl<SF, Req> ServiceFactoryExt<Req> for SF where SF: ServiceFactory<Req> {}
/// An extension trait for [`Transform`]s that provides a variety of convenient adapters.
pub trait TransformExt<S, Req>: Transform<S, Req> {
/// Return a new `Transform` whose init error is mapped to to a different type.
fn map_init_err<F, E>(self, f: F) -> TransformMapInitErr<Self, S, Req, F, E>
where
Self: Sized,
F: Fn(Self::InitError) -> E + Clone,
{
TransformMapInitErr::new(self, f)
}
}
impl<T, Req> TransformExt<T, Req> for T where T: Transform<T, Req> {}
impl<S, Req> ServiceFactoryExt<Req> for S where S: ServiceFactory<Req> {}

View File

@@ -1,4 +1,4 @@
use core::{future::Future, marker::PhantomData};
use core::{future::Future, marker::PhantomData, task::Poll};
use crate::{ok, IntoService, IntoServiceFactory, Ready, Service, ServiceFactory};
@@ -15,7 +15,8 @@ where
/// Create `ServiceFactory` for function that can produce services
///
/// # Examples
/// # Example
///
/// ```
/// use std::io;
/// use actix_service::{fn_factory, fn_service, Service, ServiceFactory};
@@ -61,10 +62,11 @@ where
/// Create `ServiceFactory` for function that accepts config argument and can produce services
///
/// Any function that has following form `Fn(Config) -> Future<Output = Service>` could act as
/// a `ServiceFactory`.
/// Any function that has following form `Fn(Config) -> Future<Output = Service>` could
/// act as a `ServiceFactory`.
///
/// # Example
///
/// # Examples
/// ```
/// use std::io;
/// use actix_service::{fn_factory_with_config, fn_service, Service, ServiceFactory};

View File

@@ -1,8 +1,7 @@
//! See [`Service`] docs for information on this crate's foundational trait.
#![no_std]
#![deny(rust_2018_idioms, nonstandard_style, future_incompatible)]
#![warn(missing_docs)]
#![deny(rust_2018_idioms, nonstandard_style)]
#![allow(clippy::type_complexity)]
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
@@ -22,7 +21,6 @@ mod apply_cfg;
pub mod boxed;
mod ext;
mod fn_service;
mod macros;
mod map;
mod map_config;
mod map_err;
@@ -35,10 +33,11 @@ mod transform_err;
pub use self::apply::{apply_fn, apply_fn_factory};
pub use self::apply_cfg::{apply_cfg, apply_cfg_factory};
pub use self::ext::{ServiceExt, ServiceFactoryExt, TransformExt};
pub use self::ext::{ServiceExt, ServiceFactoryExt};
pub use self::fn_service::{fn_factory, fn_factory_with_config, fn_service};
pub use self::map_config::{map_config, unit_config};
pub use self::transform::{apply, ApplyTransform, Transform};
pub use self::pipeline::{pipeline, pipeline_factory, Pipeline, PipelineFactory};
pub use self::transform::{apply, Transform};
#[allow(unused_imports)]
use self::ready::{err, ok, ready, Ready};
@@ -53,14 +52,8 @@ use self::ready::{err, ok, ready, Ready};
/// async fn(Request) -> Result<Response, Err>
/// ```
///
/// The `Service` trait just generalizes this form. Requests are defined as a generic type parameter
/// and responses and other details are defined as associated types on the trait impl. Notice that
/// this design means that services can receive many request types and converge them to a single
/// response type.
///
/// Services can also have mutable state that influence computation by using a `Cell`, `RefCell`
/// or `Mutex`. Services intentionally do not take `&mut self` to reduce overhead in the
/// common cases.
/// The `Service` trait just generalizes this form where each parameter is described as an
/// associated type on the trait. Services can also have mutable state that influence computation.
///
/// `Service` provides a symmetric and uniform API; the same abstractions can be used to represent
/// both clients and servers. Services describe only _transformation_ operations which encourage
@@ -70,25 +63,23 @@ use self::ready::{err, ok, ready, Ready};
/// ```ignore
/// struct MyService;
///
/// impl Service<u8> for MyService {
/// impl Service for MyService {
/// type Request = u8;
/// type Response = u64;
/// type Error = MyError;
/// type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>>>>;
/// type Future = Pin<Box<Future<Output=Result<Self::Response, Self::Error>>>>;
///
/// fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { ... }
///
/// fn call(&self, req: u8) -> Self::Future { ... }
/// fn call(&self, req: Self::Request) -> Self::Future { ... }
/// }
/// ```
///
/// Sometimes it is not necessary to implement the Service trait. For example, the above service
/// could be rewritten as a simple function and passed to [`fn_service`](fn_service()).
/// could be rewritten as a simple function and passed to [fn_service](fn_service()).
///
/// ```ignore
/// async fn my_service(req: u8) -> Result<u64, MyError>;
///
/// let svc = fn_service(my_service)
/// svc.call(123)
/// ```
pub trait Service<Req> {
/// Responses given by the service.
@@ -102,12 +93,13 @@ pub trait Service<Req> {
/// Returns `Ready` when the service is able to process requests.
///
/// If the service is at capacity, then `Pending` is returned and the task is notified when the
/// service becomes ready again. This function is expected to be called while on a task.
/// If the service is at capacity, then `Pending` is returned and the task
/// is notified when the service becomes ready again. This function is
/// expected to be called while on a task.
///
/// This is a best effort implementation. False positives are permitted. It is permitted for
/// the service to return `Ready` from a `poll_ready` call and the next invocation of `call`
/// results in an error.
/// This is a **best effort** implementation. False positives are permitted.
/// It is permitted for the service to return `Ready` from a `poll_ready`
/// call and the next invocation of `call` results in an error.
///
/// # Notes
/// 1. `poll_ready` might be called on a different task to `call`.
@@ -116,26 +108,25 @@ pub trait Service<Req> {
/// Process the request and return the response asynchronously.
///
/// This function is expected to be callable off-task. As such, implementations of `call` should
/// take care to not call `poll_ready`. If the service is at capacity and the request is unable
/// to be handled, the returned `Future` should resolve to an error.
/// This function is expected to be callable off task. As such,
/// implementations should take care to not call `poll_ready`. If the
/// service is at capacity and the request is unable to be handled, the
/// returned `Future` should resolve to an error.
///
/// Invoking `call` without first invoking `poll_ready` is permitted. Implementations must be
/// resilient to this fact.
/// Calling `call` without calling `poll_ready` is permitted. The
/// implementation must be resilient to this fact.
fn call(&self, req: Req) -> Self::Future;
}
/// Factory for creating `Service`s.
///
/// This is useful for cases where new `Service`s must be produced. One case is a TCP
/// server listener: a listener accepts new connections, constructs a new `Service` for each using
/// the `ServiceFactory` trait, and uses the new `Service` to process inbound requests on that new
/// connection.
/// Acts as a service factory. This is useful for cases where new `Service`s
/// must be produced. One case is a TCP server listener. The listener
/// accepts new TCP streams, obtains a new `Service` using the
/// `ServiceFactory` trait, and uses the new `Service` to process inbound
/// requests on that new TCP stream.
///
/// `Config` is a service factory configuration type.
///
/// Simple factories may be able to use [`fn_factory`] or [`fn_factory_with_config`] to
/// reduce boilerplate.
pub trait ServiceFactory<Req> {
/// Responses given by the created services.
type Response;
@@ -152,14 +143,13 @@ pub trait ServiceFactory<Req> {
/// Errors potentially raised while building a service.
type InitError;
/// The future of the `Service` instance.g
/// The future of the `Service` instance.
type Future: Future<Output = Result<Self::Service, Self::InitError>>;
/// Create and return a new service asynchronously.
fn new_service(&self, cfg: Self::Config) -> Self::Future;
}
// TODO: remove implement on mut reference.
impl<'a, S, Req> Service<Req> for &'a mut S
where
S: Service<Req> + 'a,
@@ -177,23 +167,6 @@ where
}
}
impl<'a, S, Req> Service<Req> for &'a S
where
S: Service<Req> + 'a,
{
type Response = S::Response;
type Error = S::Error;
type Future = S::Future;
fn poll_ready(&self, ctx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
(**self).poll_ready(ctx)
}
fn call(&self, request: Req) -> S::Future {
(**self).call(request)
}
}
impl<S, Req> Service<Req> for Box<S>
where
S: Service<Req> + ?Sized,
@@ -211,25 +184,24 @@ where
}
}
impl<S, Req> Service<Req> for Rc<S>
impl<S, Req> Service<Req> for RefCell<S>
where
S: Service<Req> + ?Sized,
S: Service<Req>,
{
type Response = S::Response;
type Error = S::Error;
type Future = S::Future;
fn poll_ready(&self, ctx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
(**self).poll_ready(ctx)
self.borrow().poll_ready(ctx)
}
fn call(&self, request: Req) -> S::Future {
(**self).call(request)
self.borrow().call(request)
}
}
/// This impl is deprecated since v2 because the `Service` trait now receives shared reference.
impl<S, Req> Service<Req> for RefCell<S>
impl<S, Req> Service<Req> for Rc<RefCell<S>>
where
S: Service<Req>,
{
@@ -322,3 +294,44 @@ where
{
tp.into_service()
}
pub mod dev {
pub use crate::apply::{Apply, ApplyFactory};
pub use crate::fn_service::{
FnService, FnServiceConfig, FnServiceFactory, FnServiceNoConfig,
};
pub use crate::map::{Map, MapServiceFactory};
pub use crate::map_config::{MapConfig, UnitConfig};
pub use crate::map_err::{MapErr, MapErrServiceFactory};
pub use crate::map_init_err::MapInitErr;
pub use crate::transform::ApplyTransform;
pub use crate::transform_err::TransformMapInitErr;
}
#[macro_export]
macro_rules! always_ready {
() => {
#[inline]
fn poll_ready(
&self,
_: &mut ::core::task::Context<'_>,
) -> ::core::task::Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
};
}
#[macro_export]
macro_rules! forward_ready {
($field:ident) => {
#[inline]
fn poll_ready(
&self,
cx: &mut ::core::task::Context<'_>,
) -> ::core::task::Poll<Result<(), Self::Error>> {
self.$field
.poll_ready(cx)
.map_err(::core::convert::Into::into)
}
};
}

View File

@@ -1,185 +0,0 @@
/// An implementation of [`poll_ready`]() that always signals readiness.
///
/// This should only be used for basic leaf services that have no concept of un-readiness.
/// For wrapper or other serivice types, use [`forward_ready!`] for simple cases or write a bespoke
/// `poll_ready` implementation.
///
/// [`poll_ready`]: crate::Service::poll_ready
///
/// # Examples
/// ```no_run
/// use actix_service::Service;
/// use futures_util::future::{ready, Ready};
///
/// struct IdentityService;
///
/// impl Service<u32> for IdentityService {
/// type Response = u32;
/// type Error = ();
/// type Future = Ready<Result<Self::Response, Self::Error>>;
///
/// actix_service::always_ready!();
///
/// fn call(&self, req: u32) -> Self::Future {
/// ready(Ok(req))
/// }
/// }
/// ```
#[macro_export]
macro_rules! always_ready {
() => {
#[inline]
fn poll_ready(
&self,
_: &mut ::core::task::Context<'_>,
) -> ::core::task::Poll<Result<(), Self::Error>> {
::core::task::Poll::Ready(Ok(()))
}
};
}
/// An implementation of [`poll_ready`] that forwards readiness checks to a
/// named struct field.
///
/// Tuple structs are not supported.
///
/// [`poll_ready`]: crate::Service::poll_ready
///
/// # Examples
/// ```no_run
/// use actix_service::Service;
/// use futures_util::future::{ready, Ready};
///
/// struct WrapperService<S> {
/// inner: S,
/// }
///
/// impl<S> Service<()> for WrapperService<S>
/// where
/// S: Service<()>,
/// {
/// type Response = S::Response;
/// type Error = S::Error;
/// type Future = S::Future;
///
/// actix_service::forward_ready!(inner);
///
/// fn call(&self, req: ()) -> Self::Future {
/// self.inner.call(req)
/// }
/// }
/// ```
#[macro_export]
macro_rules! forward_ready {
($field:ident) => {
#[inline]
fn poll_ready(
&self,
cx: &mut ::core::task::Context<'_>,
) -> ::core::task::Poll<Result<(), Self::Error>> {
self.$field
.poll_ready(cx)
.map_err(::core::convert::Into::into)
}
};
}
#[cfg(test)]
mod tests {
use core::{
cell::Cell,
convert::Infallible,
task::{self, Context, Poll},
};
use futures_util::{
future::{ready, Ready},
task::noop_waker,
};
use crate::Service;
struct IdentityService;
impl Service<u32> for IdentityService {
type Response = u32;
type Error = Infallible;
type Future = Ready<Result<Self::Response, Self::Error>>;
always_ready!();
fn call(&self, req: u32) -> Self::Future {
ready(Ok(req))
}
}
struct CountdownService(Cell<u32>);
impl Service<()> for CountdownService {
type Response = ();
type Error = Infallible;
type Future = Ready<Result<Self::Response, Self::Error>>;
fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
let count = self.0.get();
if count == 0 {
Poll::Ready(Ok(()))
} else {
self.0.set(count - 1);
cx.waker().wake_by_ref();
Poll::Pending
}
}
fn call(&self, _: ()) -> Self::Future {
ready(Ok(()))
}
}
struct WrapperService<S> {
inner: S,
}
impl<S> Service<()> for WrapperService<S>
where
S: Service<()>,
{
type Response = S::Response;
type Error = S::Error;
type Future = S::Future;
forward_ready!(inner);
fn call(&self, _: ()) -> Self::Future {
self.inner.call(())
}
}
#[test]
fn test_always_ready_macro() {
let waker = noop_waker();
let mut cx = task::Context::from_waker(&waker);
let svc = IdentityService;
assert!(svc.poll_ready(&mut cx).is_ready());
assert!(svc.poll_ready(&mut cx).is_ready());
assert!(svc.poll_ready(&mut cx).is_ready());
}
#[test]
fn test_forward_ready_macro() {
let waker = noop_waker();
let mut cx = task::Context::from_waker(&waker);
let svc = WrapperService {
inner: CountdownService(Cell::new(3)),
};
assert!(svc.poll_ready(&mut cx).is_pending());
assert!(svc.poll_ready(&mut cx).is_pending());
assert!(svc.poll_ready(&mut cx).is_pending());
assert!(svc.poll_ready(&mut cx).is_ready());
}
}

View File

@@ -180,7 +180,7 @@ where
F: Fn(A::Error) -> E,
{
fn new(fut: A::Future, f: F) -> Self {
MapErrServiceFuture { fut, f }
MapErrServiceFuture { f, fut }
}
}

View File

@@ -1,6 +1,3 @@
// TODO: see if pipeline is necessary
#![allow(dead_code)]
use core::{
marker::PhantomData,
task::{Context, Poll},
@@ -14,7 +11,7 @@ use crate::then::{ThenService, ThenServiceFactory};
use crate::{IntoService, IntoServiceFactory, Service, ServiceFactory};
/// Construct new pipeline with one service in pipeline chain.
pub(crate) fn pipeline<I, S, Req>(service: I) -> Pipeline<S, Req>
pub fn pipeline<I, S, Req>(service: I) -> Pipeline<S, Req>
where
I: IntoService<S, Req>,
S: Service<Req>,
@@ -26,7 +23,7 @@ where
}
/// Construct new pipeline factory with one service factory.
pub(crate) fn pipeline_factory<I, SF, Req>(factory: I) -> PipelineFactory<SF, Req>
pub fn pipeline_factory<I, SF, Req>(factory: I) -> PipelineFactory<SF, Req>
where
I: IntoServiceFactory<SF, Req>,
SF: ServiceFactory<Req>,
@@ -38,7 +35,7 @@ where
}
/// Pipeline service - pipeline allows to compose multiple service into one service.
pub(crate) struct Pipeline<S, Req> {
pub struct Pipeline<S, Req> {
service: S,
_phantom: PhantomData<Req>,
}
@@ -160,7 +157,7 @@ impl<S: Service<Req>, Req> Service<Req> for Pipeline<S, Req> {
}
/// Pipeline factory
pub(crate) struct PipelineFactory<SF, Req> {
pub struct PipelineFactory<SF, Req> {
factory: SF,
_phantom: PhantomData<Req>,
}

View File

@@ -246,11 +246,7 @@ mod tests {
use futures_util::future::lazy;
use crate::{
err, ok,
pipeline::{pipeline, pipeline_factory},
ready, Ready, Service, ServiceFactory,
};
use crate::{err, ok, pipeline, pipeline_factory, ready, Ready, Service, ServiceFactory};
#[derive(Clone)]
struct Srv1(Rc<Cell<usize>>);

View File

@@ -9,9 +9,10 @@ use core::{
use futures_core::ready;
use pin_project_lite::pin_project;
use crate::transform_err::TransformMapInitErr;
use crate::{IntoServiceFactory, Service, ServiceFactory};
/// Apply a [`Transform`] to a [`Service`].
/// Apply transform to a service.
pub fn apply<T, S, I, Req>(t: T, factory: I) -> ApplyTransform<T, S, Req>
where
I: IntoServiceFactory<S, Req>,
@@ -21,12 +22,14 @@ where
ApplyTransform::new(t, factory.into_factory())
}
/// Defines the interface of a service factory that wraps inner service during construction.
/// The `Transform` trait defines the interface of a service factory that wraps inner service
/// during construction.
///
/// Transformers wrap an inner service and runs during inbound and/or outbound processing in the
/// service lifecycle. It may modify request and/or response.
/// Transform(middleware) wraps inner service and runs during
/// inbound and/or outbound processing in the request/response lifecycle.
/// It may modify request and/or response.
///
/// For example, a timeout service wrapper:
/// For example, timeout transform:
///
/// ```ignore
/// pub struct Timeout<S> {
@@ -34,38 +37,47 @@ where
/// timeout: Duration,
/// }
///
/// impl<S: Service<Req>, Req> Service<Req> for Timeout<S> {
/// impl<S> Service for Timeout<S>
/// where
/// S: Service,
/// {
/// type Request = S::Request;
/// type Response = S::Response;
/// type Error = TimeoutError<S::Error>;
/// type Future = TimeoutServiceResponse<S>;
///
/// actix_service::forward_ready!(service);
///
/// fn call(&self, req: Req) -> Self::Future {
/// fn call(&self, req: S::Request) -> Self::Future {
/// TimeoutServiceResponse {
/// fut: self.service.call(req),
/// sleep: Sleep::new(clock::now() + self.timeout),
/// sleep: Delay::new(clock::now() + self.timeout),
/// }
/// }
/// }
/// ```
///
/// This wrapper service is decoupled from the underlying service implementation and could be
/// applied to any service.
/// Timeout service in above example is decoupled from underlying service implementation
/// and could be applied to any service.
///
/// The `Transform` trait defines the interface of a service wrapper. `Transform` is often
/// implemented for middleware, defining how to construct a middleware Service. A Service that is
/// constructed by the factory takes the Service that follows it during execution as a parameter,
/// assuming ownership of the next Service.
/// The `Transform` trait defines the interface of a Service factory. `Transform`
/// is often implemented for middleware, defining how to construct a
/// middleware Service. A Service that is constructed by the factory takes
/// the Service that follows it during execution as a parameter, assuming
/// ownership of the next Service.
///
/// A transform for the `Timeout` middleware could look like this:
/// Factory for `Timeout` middleware from the above example could look like this:
///
/// ```ignore
/// pub struct TimeoutTransform {
/// timeout: Duration,
/// }
///
/// impl<S: Service<Req>, Req> Transform<S, Req> for TimeoutTransform {
/// impl<S> Transform<S> for TimeoutTransform
/// where
/// S: Service,
/// {
/// type Request = S::Request;
/// type Response = S::Response;
/// type Error = TimeoutError<S::Error>;
/// type InitError = S::Error;
@@ -73,15 +85,15 @@ where
/// type Future = Ready<Result<Self::Transform, Self::InitError>>;
///
/// fn new_transform(&self, service: S) -> Self::Future {
/// ready(Ok(Timeout {
/// ok(TimeoutService {
/// service,
/// timeout: self.timeout,
/// }))
/// })
/// }
/// }
/// ```
pub trait Transform<S, Req> {
/// Responses produced by the service.
/// Responses given by the service.
type Response;
/// Errors produced by the service.
@@ -98,6 +110,16 @@ pub trait Transform<S, Req> {
/// Creates and returns a new Transform component, asynchronously
fn new_transform(&self, service: S) -> Self::Future;
/// Map this transform's factory error to a different error,
/// returning a new transform service factory.
fn map_init_err<F, E>(self, f: F) -> TransformMapInitErr<Self, S, Req, F, E>
where
Self: Sized,
F: Fn(Self::InitError) -> E + Clone,
{
TransformMapInitErr::new(self, f)
}
}
impl<T, S, Req> Transform<S, Req> for Rc<T>
@@ -130,7 +152,7 @@ where
}
}
/// Apply a [`Transform`] to a [`Service`].
/// `Apply` transform to new service
pub struct ApplyTransform<T, S, Req>(Rc<(T, S)>, PhantomData<Req>);
impl<T, S, Req> ApplyTransform<T, S, Req>
@@ -218,53 +240,3 @@ where
}
}
}
#[cfg(test)]
mod tests {
use core::time::Duration;
use actix_utils::future::{ready, Ready};
use super::*;
use crate::Service;
// pseudo-doctest for Transform trait
pub struct TimeoutTransform {
timeout: Duration,
}
// pseudo-doctest for Transform trait
impl<S: Service<Req>, Req> Transform<S, Req> for TimeoutTransform {
type Response = S::Response;
type Error = S::Error;
type InitError = S::Error;
type Transform = Timeout<S>;
type Future = Ready<Result<Self::Transform, Self::InitError>>;
fn new_transform(&self, service: S) -> Self::Future {
ready(Ok(Timeout {
service,
_timeout: self.timeout,
}))
}
}
// pseudo-doctest for Transform trait
pub struct Timeout<S> {
service: S,
_timeout: Duration,
}
// pseudo-doctest for Transform trait
impl<S: Service<Req>, Req> Service<Req> for Timeout<S> {
type Response = S::Response;
type Error = S::Error;
type Future = S::Future;
crate::forward_ready!(service);
fn call(&self, req: Req) -> Self::Future {
self.service.call(req)
}
}
}

View File

@@ -9,8 +9,10 @@ use pin_project_lite::pin_project;
use super::Transform;
/// Transform for the [`TransformExt::map_init_err`] combinator, changing the type of a new
/// [`Transform`]'s initialization error.
/// Transform for the `map_init_err` combinator, changing the type of a new
/// transform's init error.
///
/// This is created by the `Transform::map_init_err` method.
pub struct TransformMapInitErr<T, S, Req, F, E> {
transform: T,
mapper: F,

View File

@@ -1,28 +1,9 @@
# Changes
## Unreleased - 2021-xx-xx
## 3.0.0-beta.5 - 2021-03-29
* Changed `connect::ssl::rustls::RustlsConnectorService` to return error when `DNSNameRef`
generation failed instead of panic. [#296]
* Remove `connect::ssl::openssl::OpensslConnectServiceFactory`. [#297]
* Remove `connect::ssl::openssl::OpensslConnectService`. [#297]
* Add `connect::ssl::native_tls` module for native tls support. [#295]
* Rename `accept::{nativetls => native_tls}`. [#295]
* Remove `connect::TcpConnectService` type. service caller expect a `TcpStream` should use
`connect::ConnectService` instead and call `Connection<T, TcpStream>::into_parts`. [#299]
[#295]: https://github.com/actix/actix-net/pull/295
[#296]: https://github.com/actix/actix-net/pull/296
[#297]: https://github.com/actix/actix-net/pull/297
[#299]: https://github.com/actix/actix-net/pull/299
## 3.0.0-beta.4 - 2021-02-24
* Rename `accept::openssl::{SslStream => TlsStream}`.
* Add `connect::Connect::set_local_addr` to attach local `IpAddr`. [#282]
* `connector::TcpConnector` service will try to bind to local_addr of `IpAddr` when given. [#282]
* Add `connect::Connect::set_local_addr` to attach local `Ipaddr`. [#282]
* `connector::TcpConnector` service would try to bind to local_addr of `IpAddr` when given [#282]
[#282]: https://github.com/actix/actix-net/pull/282

View File

@@ -1,6 +1,6 @@
[package]
name = "actix-tls"
version = "3.0.0-beta.5"
version = "3.0.0-beta.3"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "TLS acceptor and connector services for Actix ecosystem"
keywords = ["network", "tls", "ssl", "async", "transport"]
@@ -41,9 +41,9 @@ uri = ["http"]
[dependencies]
actix-codec = "0.4.0-beta.1"
actix-rt = { version = "2.2.0", default-features = false }
actix-service = "2.0.0"
actix-utils = "3.0.0"
actix-rt = { version = "2.0.0", default-features = false }
actix-service = "2.0.0-beta.4"
actix-utils = "3.0.0-beta.2"
derive_more = "0.99.5"
futures-core = { version = "0.3.7", default-features = false, features = ["alloc"] }
@@ -62,9 +62,15 @@ webpki-roots = { version = "0.21", optional = true }
# native-tls
tokio-native-tls = { version = "0.3", optional = true }
[target.'cfg(windows)'.dependencies.tls-openssl]
version = "0.10.9"
package = "openssl"
features = ["vendored"]
optional = true
[dev-dependencies]
actix-rt = "2.2.0"
actix-server = "2.0.0-beta.5"
actix-rt = "2.0.0"
actix-server = "2.0.0-beta.3"
bytes = "1"
env_logger = "0.8"
futures-util = { version = "0.3.7", default-features = false, features = ["sink"] }
@@ -72,5 +78,5 @@ log = "0.4"
trust-dns-resolver = "0.20.0"
[[example]]
name = "tcp-rustls"
name = "basic"
required-features = ["accept", "rustls"]

View File

@@ -31,7 +31,7 @@ use std::{
use actix_rt::net::TcpStream;
use actix_server::Server;
use actix_service::ServiceFactoryExt as _;
use actix_service::pipeline_factory;
use actix_tls::accept::rustls::{Acceptor as RustlsAcceptor, TlsStream};
use futures_util::future::ok;
use log::info;
@@ -39,9 +39,14 @@ use rustls::{
internal::pemfile::certs, internal::pemfile::rsa_private_keys, NoClientAuth, ServerConfig,
};
#[derive(Debug)]
struct ServiceState {
num: Arc<AtomicUsize>,
}
#[actix_rt::main]
async fn main() -> io::Result<()> {
env::set_var("RUST_LOG", "info");
env::set_var("RUST_LOG", "actix=trace,basic=trace");
env_logger::init();
let mut tls_config = ServerConfig::new(NoClientAuth::new());
@@ -68,8 +73,7 @@ async fn main() -> io::Result<()> {
let count = Arc::clone(&count);
// Set up TLS service factory
tls_acceptor
.clone()
pipeline_factory(tls_acceptor.clone())
.map_err(|err| println!("Rustls error: {:?}", err))
.and_then(move |stream: TlsStream<TcpStream>| {
let num = count.fetch_add(1, Ordering::Relaxed);

View File

@@ -16,7 +16,7 @@ pub mod openssl;
pub mod rustls;
#[cfg(feature = "native-tls")]
pub mod native_tls;
pub mod nativetls;
pub(crate) static MAX_CONN: AtomicUsize = AtomicUsize::new(256);

View File

@@ -6,7 +6,7 @@ use std::{
};
use actix_codec::{AsyncRead, AsyncWrite, ReadBuf};
use actix_rt::net::{ActixStream, Ready};
use actix_rt::net::ActixStream;
use actix_service::{Service, ServiceFactory};
use actix_utils::counter::Counter;
use futures_core::future::LocalBoxFuture;
@@ -80,11 +80,11 @@ impl<T: ActixStream> AsyncWrite for TlsStream<T> {
}
impl<T: ActixStream> ActixStream for TlsStream<T> {
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
T::poll_read_ready((&**self).get_ref().get_ref().get_ref(), cx)
}
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
T::poll_write_ready((&**self).get_ref().get_ref().get_ref(), cx)
}
}
@@ -113,7 +113,7 @@ impl Clone for Acceptor {
}
}
impl<T: ActixStream + 'static> ServiceFactory<T> for Acceptor {
impl<T: ActixStream> ServiceFactory<T> for Acceptor {
type Response = TlsStream<T>;
type Error = Error;
type Config = ();
@@ -138,7 +138,16 @@ pub struct NativeTlsAcceptorService {
conns: Counter,
}
impl<T: ActixStream + 'static> Service<T> for NativeTlsAcceptorService {
impl Clone for NativeTlsAcceptorService {
fn clone(&self) -> Self {
Self {
acceptor: self.acceptor.clone(),
conns: self.conns.clone(),
}
}
}
impl<T: ActixStream> Service<T> for NativeTlsAcceptorService {
type Response = TlsStream<T>;
type Error = Error;
type Future = LocalBoxFuture<'static, Result<TlsStream<T>, Error>>;
@@ -153,9 +162,9 @@ impl<T: ActixStream + 'static> Service<T> for NativeTlsAcceptorService {
fn call(&self, io: T) -> Self::Future {
let guard = self.conns.get();
let acceptor = self.acceptor.clone();
let this = self.clone();
Box::pin(async move {
let io = acceptor.accept(io).await;
let io = this.acceptor.accept(io).await;
drop(guard);
io.map(Into::into)
})

View File

@@ -7,7 +7,7 @@ use std::{
};
use actix_codec::{AsyncRead, AsyncWrite, ReadBuf};
use actix_rt::net::{ActixStream, Ready};
use actix_rt::net::ActixStream;
use actix_service::{Service, ServiceFactory};
use actix_utils::counter::{Counter, CounterGuard};
use futures_core::{future::LocalBoxFuture, ready};
@@ -82,11 +82,11 @@ impl<T: ActixStream> AsyncWrite for TlsStream<T> {
}
impl<T: ActixStream> ActixStream for TlsStream<T> {
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
T::poll_read_ready((&**self).get_ref(), cx)
}
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
T::poll_write_ready((&**self).get_ref(), cx)
}
}

View File

@@ -8,7 +8,7 @@ use std::{
};
use actix_codec::{AsyncRead, AsyncWrite, ReadBuf};
use actix_rt::net::{ActixStream, Ready};
use actix_rt::net::ActixStream;
use actix_service::{Service, ServiceFactory};
use actix_utils::counter::{Counter, CounterGuard};
use futures_core::future::LocalBoxFuture;
@@ -82,11 +82,11 @@ impl<T: ActixStream> AsyncWrite for TlsStream<T> {
}
impl<T: ActixStream> ActixStream for TlsStream<T> {
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
T::poll_read_ready((&**self).get_ref().0, cx)
}
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
T::poll_write_ready((&**self).get_ref().0, cx)
}
}

View File

@@ -19,7 +19,7 @@ pub trait Address: Unpin + 'static {
impl Address for String {
fn hostname(&self) -> &str {
self
&self
}
}

View File

@@ -72,7 +72,7 @@ pub enum TcpConnectorResponse<T> {
port: u16,
local_addr: Option<IpAddr>,
addrs: Option<VecDeque<SocketAddr>>,
stream: ReusableBoxFuture<Result<TcpStream, io::Error>>,
stream: Option<ReusableBoxFuture<Result<TcpStream, io::Error>>>,
},
Error(Option<ConnectError>),
}
@@ -103,22 +103,18 @@ impl<T: Address> TcpConnectorResponse<T> {
port,
local_addr,
addrs: None,
stream: ReusableBoxFuture::new(connect(addr, local_addr)),
stream: Some(ReusableBoxFuture::new(connect(addr, local_addr))),
},
// when resolver returns multiple socket addr for request they would be popped from
// front end of queue and returns with the first successful tcp connection.
ConnectAddrs::Multi(mut addrs) => {
let addr = addrs.pop_front().unwrap();
TcpConnectorResponse::Response {
req: Some(req),
port,
local_addr,
addrs: Some(addrs),
stream: ReusableBoxFuture::new(connect(addr, local_addr)),
}
}
ConnectAddrs::Multi(addrs) => TcpConnectorResponse::Response {
req: Some(req),
port,
local_addr,
addrs: Some(addrs),
stream: None,
},
}
}
}
@@ -137,31 +133,40 @@ impl<T: Address> Future for TcpConnectorResponse<T> {
addrs,
stream,
} => loop {
match ready!(stream.poll(cx)) {
Ok(sock) => {
let req = req.take().unwrap();
trace!(
"TCP connector: successfully connected to {:?} - {:?}",
req.hostname(),
sock.peer_addr()
);
return Poll::Ready(Ok(Connection::new(sock, req)));
}
if let Some(new) = stream.as_mut() {
match ready!(new.poll(cx)) {
Ok(sock) => {
let req = req.take().unwrap();
trace!(
"TCP connector: successfully connected to {:?} - {:?}",
req.hostname(),
sock.peer_addr()
);
return Poll::Ready(Ok(Connection::new(sock, req)));
}
Err(err) => {
trace!(
"TCP connector: failed to connect to {:?} port: {}",
req.as_ref().unwrap().hostname(),
port,
);
Err(err) => {
trace!(
"TCP connector: failed to connect to {:?} port: {}",
req.as_ref().unwrap().hostname(),
port,
);
if let Some(addr) = addrs.as_mut().and_then(|addrs| addrs.pop_front()) {
stream.set(connect(addr, *local_addr));
} else {
return Poll::Ready(Err(ConnectError::Io(err)));
if addrs.is_none() || addrs.as_ref().unwrap().is_empty() {
return Poll::Ready(Err(ConnectError::Io(err)));
}
}
}
}
// try to connect
let addr = addrs.as_mut().unwrap().pop_front().unwrap();
let fut = connect(addr, *local_addr);
match stream {
Some(rbf) => rbf.set(fut),
None => *stream = Some(ReusableBoxFuture::new(fut)),
}
},
}
}

View File

@@ -26,20 +26,20 @@ pub mod ssl;
mod uri;
use actix_rt::net::TcpStream;
use actix_service::{Service, ServiceFactory};
use actix_service::{pipeline, pipeline_factory, Service, ServiceFactory};
pub use self::connect::{Address, Connect, Connection};
pub use self::connector::{TcpConnector, TcpConnectorFactory};
pub use self::error::ConnectError;
pub use self::resolve::{Resolve, Resolver, ResolverFactory};
pub use self::service::{ConnectService, ConnectServiceFactory};
pub use self::service::{ConnectService, ConnectServiceFactory, TcpConnectService};
/// Create TCP connector service.
pub fn new_connector<T: Address + 'static>(
resolver: Resolver,
) -> impl Service<Connect<T>, Response = Connection<T, TcpStream>, Error = ConnectError> + Clone
{
ConnectServiceFactory::new(resolver).service()
pipeline(resolver).and_then(TcpConnector)
}
/// Create TCP connector service factory.
@@ -52,7 +52,7 @@ pub fn new_connector_factory<T: Address + 'static>(
Error = ConnectError,
InitError = (),
> + Clone {
ConnectServiceFactory::new(resolver)
pipeline_factory(ResolverFactory::new(resolver)).and_then(TcpConnectorFactory)
}
/// Create connector service with default parameters.

View File

@@ -56,7 +56,7 @@ pub enum Resolver {
/// An interface for custom async DNS resolvers.
///
/// # Usage
/// ```
/// ```rust
/// use std::net::SocketAddr;
///
/// use actix_tls::connect::{Resolve, Resolver};
@@ -164,7 +164,7 @@ impl<T: Address> Service<Connect<T>> for Resolver {
}
Self::Custom(resolver) => {
let resolver = Rc::clone(resolver);
let resolver = Rc::clone(&resolver);
ResolverFuture::LookupCustom(Box::pin(async move {
let addrs = resolver
.lookup(req.hostname(), req.port())

View File

@@ -34,6 +34,14 @@ impl ConnectServiceFactory {
resolver: self.resolver.service(),
}
}
/// Construct new tcp stream service
pub fn tcp_service(&self) -> TcpConnectService {
TcpConnectService {
tcp: self.tcp.service(),
resolver: self.resolver.service(),
}
}
}
impl Clone for ConnectServiceFactory {
@@ -55,7 +63,7 @@ impl<T: Address> ServiceFactory<Connect<T>> for ConnectServiceFactory {
fn new_service(&self, _: ()) -> Self::Future {
let service = self.service();
Box::pin(async { Ok(service) })
Box::pin(async move { Ok(service) })
}
}
@@ -127,3 +135,44 @@ impl<T: Address> Future for ConnectServiceResponse<T> {
}
}
}
#[derive(Clone)]
pub struct TcpConnectService {
tcp: TcpConnector,
resolver: Resolver,
}
impl<T: Address> Service<Connect<T>> for TcpConnectService {
type Response = TcpStream;
type Error = ConnectError;
type Future = TcpConnectServiceResponse<T>;
actix_service::always_ready!();
fn call(&self, req: Connect<T>) -> Self::Future {
TcpConnectServiceResponse {
fut: ConnectFuture::Resolve(self.resolver.call(req)),
tcp: self.tcp,
}
}
}
pub struct TcpConnectServiceResponse<T: Address> {
fut: ConnectFuture<T>,
tcp: TcpConnector,
}
impl<T: Address> Future for TcpConnectServiceResponse<T> {
type Output = Result<TcpStream, ConnectError>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
loop {
match ready!(self.fut.poll_connect(cx))? {
ConnectOutput::Resolved(res) => {
self.fut = ConnectFuture::Connect(self.tcp.call(res));
}
ConnectOutput::Connected(conn) => return Poll::Ready(Ok(conn.into_parts().0)),
}
}
}
}

View File

@@ -5,6 +5,3 @@ pub mod openssl;
#[cfg(feature = "rustls")]
pub mod rustls;
#[cfg(feature = "native-tls")]
pub mod native_tls;

View File

@@ -1,88 +0,0 @@
use std::io;
use actix_rt::net::ActixStream;
use actix_service::{Service, ServiceFactory};
use futures_core::future::LocalBoxFuture;
use log::trace;
use tokio_native_tls::{TlsConnector as TokioNativetlsConnector, TlsStream};
pub use tokio_native_tls::native_tls::TlsConnector;
use crate::connect::{Address, Connection};
/// Native-tls connector factory and service
pub struct NativetlsConnector {
connector: TokioNativetlsConnector,
}
impl NativetlsConnector {
pub fn new(connector: TlsConnector) -> Self {
Self {
connector: TokioNativetlsConnector::from(connector),
}
}
}
impl NativetlsConnector {
pub fn service(connector: TlsConnector) -> Self {
Self::new(connector)
}
}
impl Clone for NativetlsConnector {
fn clone(&self) -> Self {
Self {
connector: self.connector.clone(),
}
}
}
impl<T: Address, U> ServiceFactory<Connection<T, U>> for NativetlsConnector
where
U: ActixStream + 'static,
{
type Response = Connection<T, TlsStream<U>>;
type Error = io::Error;
type Config = ();
type Service = Self;
type InitError = ();
type Future = LocalBoxFuture<'static, Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: ()) -> Self::Future {
let connector = self.clone();
Box::pin(async { Ok(connector) })
}
}
// NativetlsConnector is both it's ServiceFactory and Service impl type.
// As the factory and service share the same type and state.
impl<T, U> Service<Connection<T, U>> for NativetlsConnector
where
T: Address,
U: ActixStream + 'static,
{
type Response = Connection<T, TlsStream<U>>;
type Error = io::Error;
type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>;
actix_service::always_ready!();
fn call(&self, stream: Connection<T, U>) -> Self::Future {
let (io, stream) = stream.replace_io(());
let connector = self.connector.clone();
Box::pin(async move {
trace!("SSL Handshake start for: {:?}", stream.host());
connector
.connect(stream.host(), io)
.await
.map(|res| {
trace!("SSL Handshake success: {:?}", stream.host());
stream.replace_io(res).1
})
.map_err(|e| {
trace!("SSL Handshake error: {:?}", e);
io::Error::new(io::ErrorKind::Other, format!("{}", e))
})
})
}
}

View File

@@ -1,11 +1,13 @@
use std::{
fmt,
future::Future,
io,
pin::Pin,
task::{Context, Poll},
};
use actix_rt::net::ActixStream;
use actix_codec::{AsyncRead, AsyncWrite};
use actix_rt::net::TcpStream;
use actix_service::{Service, ServiceFactory};
use futures_core::{future::LocalBoxFuture, ready};
use log::trace;
@@ -13,7 +15,10 @@ use log::trace;
pub use openssl::ssl::{Error as SslError, HandshakeError, SslConnector, SslMethod};
pub use tokio_openssl::SslStream;
use crate::connect::{Address, Connection};
use crate::connect::resolve::Resolve;
use crate::connect::{
Address, Connect, ConnectError, ConnectService, ConnectServiceFactory, Connection, Resolver,
};
/// OpenSSL connector factory
pub struct OpensslConnector {
@@ -40,8 +45,8 @@ impl Clone for OpensslConnector {
impl<T, U> ServiceFactory<Connection<T, U>> for OpensslConnector
where
T: Address,
U: ActixStream + 'static,
T: Address + 'static,
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug + 'static,
{
type Response = Connection<T, SslStream<U>>;
type Error = io::Error;
@@ -70,8 +75,8 @@ impl Clone for OpensslConnectorService {
impl<T, U> Service<Connection<T, U>> for OpensslConnectorService
where
T: Address,
U: ActixStream,
T: Address + 'static,
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug + 'static,
{
type Response = Connection<T, SslStream<U>>;
type Error = io::Error;
@@ -107,8 +112,7 @@ pub struct ConnectAsyncExt<T, U> {
impl<T: Address, U> Future for ConnectAsyncExt<T, U>
where
T: Address,
U: ActixStream,
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug + 'static,
{
type Output = Result<Connection<T, SslStream<U>>, io::Error>;
@@ -128,3 +132,115 @@ where
}
}
}
pub struct OpensslConnectServiceFactory {
tcp: ConnectServiceFactory,
openssl: OpensslConnector,
}
impl OpensslConnectServiceFactory {
/// Construct new OpensslConnectService factory
pub fn new(connector: SslConnector) -> Self {
OpensslConnectServiceFactory {
tcp: ConnectServiceFactory::new(Resolver::Default),
openssl: OpensslConnector::new(connector),
}
}
/// Construct new connect service with custom DNS resolver
pub fn with_resolver(connector: SslConnector, resolver: impl Resolve + 'static) -> Self {
OpensslConnectServiceFactory {
tcp: ConnectServiceFactory::new(Resolver::new_custom(resolver)),
openssl: OpensslConnector::new(connector),
}
}
/// Construct OpenSSL connect service
pub fn service(&self) -> OpensslConnectService {
OpensslConnectService {
tcp: self.tcp.service(),
openssl: OpensslConnectorService {
connector: self.openssl.connector.clone(),
},
}
}
}
impl Clone for OpensslConnectServiceFactory {
fn clone(&self) -> Self {
OpensslConnectServiceFactory {
tcp: self.tcp.clone(),
openssl: self.openssl.clone(),
}
}
}
impl<T: Address + 'static> ServiceFactory<Connect<T>> for OpensslConnectServiceFactory {
type Response = SslStream<TcpStream>;
type Error = ConnectError;
type Config = ();
type Service = OpensslConnectService;
type InitError = ();
type Future = LocalBoxFuture<'static, Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: ()) -> Self::Future {
let service = self.service();
Box::pin(async { Ok(service) })
}
}
#[derive(Clone)]
pub struct OpensslConnectService {
tcp: ConnectService,
openssl: OpensslConnectorService,
}
impl<T: Address + 'static> Service<Connect<T>> for OpensslConnectService {
type Response = SslStream<TcpStream>;
type Error = ConnectError;
type Future = OpensslConnectServiceResponse<T>;
actix_service::always_ready!();
fn call(&self, req: Connect<T>) -> Self::Future {
OpensslConnectServiceResponse {
fut1: Some(self.tcp.call(req)),
fut2: None,
openssl: self.openssl.clone(),
}
}
}
pub struct OpensslConnectServiceResponse<T: Address + 'static> {
fut1: Option<<ConnectService as Service<Connect<T>>>::Future>,
fut2: Option<<OpensslConnectorService as Service<Connection<T, TcpStream>>>::Future>,
openssl: OpensslConnectorService,
}
impl<T: Address> Future for OpensslConnectServiceResponse<T> {
type Output = Result<SslStream<TcpStream>, ConnectError>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
if let Some(ref mut fut) = self.fut1 {
match ready!(Pin::new(fut).poll(cx)) {
Ok(res) => {
let _ = self.fut1.take();
self.fut2 = Some(self.openssl.call(res));
}
Err(e) => return Poll::Ready(Err(e)),
}
}
if let Some(ref mut fut) = self.fut2 {
match ready!(Pin::new(fut).poll(cx)) {
Ok(connect) => Poll::Ready(Ok(connect.into_parts().0)),
Err(e) => Poll::Ready(Err(ConnectError::Io(io::Error::new(
io::ErrorKind::Other,
e,
)))),
}
} else {
Poll::Pending
}
}
}

View File

@@ -1,6 +1,6 @@
use std::{
fmt,
future::Future,
io,
pin::Pin,
sync::Arc,
task::{Context, Poll},
@@ -10,7 +10,7 @@ pub use tokio_rustls::rustls::Session;
pub use tokio_rustls::{client::TlsStream, rustls::ClientConfig};
pub use webpki_roots::TLS_SERVER_ROOTS;
use actix_rt::net::ActixStream;
use actix_codec::{AsyncRead, AsyncWrite};
use actix_service::{Service, ServiceFactory};
use futures_core::{future::LocalBoxFuture, ready};
use log::trace;
@@ -44,13 +44,12 @@ impl Clone for RustlsConnector {
}
}
impl<T, U> ServiceFactory<Connection<T, U>> for RustlsConnector
impl<T: Address, U> ServiceFactory<Connection<T, U>> for RustlsConnector
where
T: Address,
U: ActixStream + 'static,
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug,
{
type Response = Connection<T, TlsStream<U>>;
type Error = io::Error;
type Error = std::io::Error;
type Config = ();
type Service = RustlsConnectorService;
type InitError = ();
@@ -77,55 +76,43 @@ impl Clone for RustlsConnectorService {
impl<T, U> Service<Connection<T, U>> for RustlsConnectorService
where
T: Address,
U: ActixStream,
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug,
{
type Response = Connection<T, TlsStream<U>>;
type Error = io::Error;
type Future = RustlsConnectorServiceFuture<T, U>;
type Error = std::io::Error;
type Future = ConnectAsyncExt<T, U>;
actix_service::always_ready!();
fn call(&self, connection: Connection<T, U>) -> Self::Future {
trace!("SSL Handshake start for: {:?}", connection.host());
let (stream, connection) = connection.replace_io(());
match DNSNameRef::try_from_ascii_str(connection.host()) {
Ok(host) => RustlsConnectorServiceFuture::Future {
connect: TlsConnector::from(self.connector.clone()).connect(host, stream),
connection: Some(connection),
},
Err(_) => RustlsConnectorServiceFuture::InvalidDns,
fn call(&self, stream: Connection<T, U>) -> Self::Future {
trace!("SSL Handshake start for: {:?}", stream.host());
let (io, stream) = stream.replace_io(());
let host = DNSNameRef::try_from_ascii_str(stream.host())
.expect("rustls currently only handles hostname-based connections. See https://github.com/briansmith/webpki/issues/54");
ConnectAsyncExt {
fut: TlsConnector::from(self.connector.clone()).connect(host, io),
stream: Some(stream),
}
}
}
pub enum RustlsConnectorServiceFuture<T, U> {
/// See issue https://github.com/briansmith/webpki/issues/54
InvalidDns,
Future {
connect: Connect<U>,
connection: Option<Connection<T, ()>>,
},
pub struct ConnectAsyncExt<T, U> {
fut: Connect<U>,
stream: Option<Connection<T, ()>>,
}
impl<T, U> Future for RustlsConnectorServiceFuture<T, U>
impl<T, U> Future for ConnectAsyncExt<T, U>
where
T: Address,
U: ActixStream,
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug,
{
type Output = Result<Connection<T, TlsStream<U>>, io::Error>;
type Output = Result<Connection<T, TlsStream<U>>, std::io::Error>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
match self.get_mut() {
Self::InvalidDns => Poll::Ready(Err(
io::Error::new(io::ErrorKind::Other, "rustls currently only handles hostname-based connections. See https://github.com/briansmith/webpki/issues/54")
)),
Self::Future { connect, connection } => {
let stream = ready!(Pin::new(connect).poll(cx))?;
let connection = connection.take().unwrap();
trace!("SSL Handshake success: {:?}", connection.host());
Poll::Ready(Ok(connection.replace_io(stream).1))
}
}
let this = self.get_mut();
let stream = ready!(Pin::new(&mut this.fut).poll(cx))?;
let s = this.stream.take().unwrap();
trace!("SSL Handshake success: {:?}", s.host());
Poll::Ready(Ok(s.replace_io(stream).1))
}
}

View File

@@ -5,7 +5,6 @@
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
#[cfg(feature = "openssl")]
#[allow(unused_extern_crates)]
extern crate tls_openssl as openssl;
#[cfg(feature = "accept")]

View File

@@ -16,9 +16,9 @@ name = "actix_tracing"
path = "src/lib.rs"
[dependencies]
actix-service = "2.0.0"
actix-utils = "3.0.0"
actix-service = "2.0.0-beta.4"
futures-util = { version = "0.3.4", default-features = false }
tracing = "0.1"
tracing-futures = "0.2"

View File

@@ -7,9 +7,9 @@
use core::marker::PhantomData;
use actix_service::{
apply, ApplyTransform, IntoServiceFactory, Service, ServiceFactory, Transform,
apply, dev::ApplyTransform, IntoServiceFactory, Service, ServiceFactory, Transform,
};
use actix_utils::future::{ok, Either, Ready};
use futures_util::future::{ok, Either, Ready};
use tracing_futures::{Instrument, Instrumented};
/// A `Service` implementation that automatically enters/exits tracing spans
@@ -48,9 +48,9 @@ where
.clone()
.map(|span| tracing::span!(parent: &span, tracing::Level::INFO, "future"))
{
Either::right(fut.instrument(span))
Either::Right(fut.instrument(span))
} else {
Either::left(fut)
Either::Left(fut)
}
}
}

View File

@@ -3,26 +3,6 @@
## Unreleased - 2021-xx-xx
## 3.0.0 - 2021-04-16
* No significant changes from `3.0.0-beta.4`.
## 3.0.0-beta.4 - 2021-04-01
* Add `future::Either` type. [#305]
[#305]: https://github.com/actix/actix-net/pull/305
## 3.0.0-beta.3 - 2021-04-01
* Moved `mpsc` to own crate `local-channel`. [#301]
* Moved `task::LocalWaker` to own crate `local-waker`. [#301]
* Remove `timeout` module. [#301]
* Remove `dispatcher` module. [#301]
* Expose `future` mod with `ready` and `poll_fn` helpers. [#301]
[#301]: https://github.com/actix/actix-net/pull/301
## 3.0.0-beta.2 - 2021-02-06
* Update `actix-rt` to `2.0.0`. [#273]

View File

@@ -1,14 +1,13 @@
[package]
name = "actix-utils"
version = "3.0.0"
authors = [
"Nikolay Kim <fafhrd91@gmail.com>",
"Rob Ede <robjtede@icloud.com>",
]
description = "Various utilities used in the Actix ecosystem"
version = "3.0.0-beta.2"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Various network related services and utilities for the Actix ecosystem"
keywords = ["network", "framework", "async", "futures"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-utils"
categories = ["network-programming", "asynchronous"]
repository = "https://github.com/actix/actix-net"
license = "MIT OR Apache-2.0"
edition = "2018"
@@ -17,8 +16,15 @@ name = "actix_utils"
path = "src/lib.rs"
[dependencies]
pin-project-lite = "0.2"
local-waker = "0.1"
actix-codec = "0.4.0-beta.1"
actix-rt = { version = "2.0.0", default-features = false }
actix-service = "2.0.0-beta.4"
futures-core = { version = "0.3.7", default-features = false }
futures-sink = { version = "0.3.7", default-features = false }
log = "0.4"
pin-project-lite = "0.2.0"
tokio = { version = "1", features = ["sync"] }
[dev-dependencies]
actix-rt = "2.0.0"

View File

@@ -1,18 +1,24 @@
//! Task-notifying counter.
use core::cell::Cell;
use core::task;
use core::{cell::Cell, fmt, task};
use std::rc::Rc;
use local_waker::LocalWaker;
use crate::task::LocalWaker;
#[derive(Clone)]
/// Simple counter with ability to notify task on reaching specific number
///
/// Counter could be cloned, total n-count is shared across all clones.
#[derive(Debug, Clone)]
pub struct Counter(Rc<CounterInner>);
struct CounterInner {
count: Cell<usize>,
capacity: usize,
task: LocalWaker,
}
impl Counter {
/// Create `Counter` instance with max value.
/// Create `Counter` instance and set max value.
pub fn new(capacity: usize) -> Self {
Counter(Rc::new(CounterInner {
capacity,
@@ -21,26 +27,38 @@ impl Counter {
}))
}
/// Create new counter guard, incrementing the counter.
/// Get counter guard.
pub fn get(&self) -> CounterGuard {
CounterGuard::new(self.0.clone())
}
/// Notify current task and return true if counter is at capacity.
/// Check if counter is not at capacity. If counter at capacity
/// it registers notification for current task.
pub fn available(&self, cx: &mut task::Context<'_>) -> bool {
self.0.available(cx)
}
/// Get total number of acquired guards.
/// Get total number of acquired counts
pub fn total(&self) -> usize {
self.0.count.get()
}
}
struct CounterInner {
count: Cell<usize>,
capacity: usize,
task: LocalWaker,
pub struct CounterGuard(Rc<CounterInner>);
impl CounterGuard {
fn new(inner: Rc<CounterInner>) -> Self {
inner.inc();
CounterGuard(inner)
}
}
impl Unpin for CounterGuard {}
impl Drop for CounterGuard {
fn drop(&mut self) {
self.0.dec();
}
}
impl CounterInner {
@@ -65,32 +83,3 @@ impl CounterInner {
}
}
}
impl fmt::Debug for CounterInner {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Counter")
.field("count", &self.count.get())
.field("capacity", &self.capacity)
.field("task", &self.task)
.finish()
}
}
/// An RAII structure that keeps the underlying counter incremented until this guard is dropped.
#[derive(Debug)]
pub struct CounterGuard(Rc<CounterInner>);
impl CounterGuard {
fn new(inner: Rc<CounterInner>) -> Self {
inner.inc();
CounterGuard(inner)
}
}
impl Unpin for CounterGuard {}
impl Drop for CounterGuard {
fn drop(&mut self) {
self.0.dec();
}
}

View File

@@ -0,0 +1,319 @@
//! Framed dispatcher service and related utilities.
use core::{
fmt,
future::Future,
mem,
pin::Pin,
task::{Context, Poll},
};
use actix_codec::{AsyncRead, AsyncWrite, Decoder, Encoder, Framed};
use actix_service::{IntoService, Service};
use log::debug;
use pin_project_lite::pin_project;
use tokio::sync::mpsc;
/// Framed transport errors.
pub enum DispatcherError<E, U: Encoder<I> + Decoder, I> {
Service(E),
Encoder(<U as Encoder<I>>::Error),
Decoder(<U as Decoder>::Error),
}
impl<E, U: Encoder<I> + Decoder, I> From<E> for DispatcherError<E, U, I> {
fn from(err: E) -> Self {
DispatcherError::Service(err)
}
}
impl<E, U: Encoder<I> + Decoder, I> fmt::Debug for DispatcherError<E, U, I>
where
E: fmt::Debug,
<U as Encoder<I>>::Error: fmt::Debug,
<U as Decoder>::Error: fmt::Debug,
{
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
DispatcherError::Service(ref e) => write!(fmt, "DispatcherError::Service({:?})", e),
DispatcherError::Encoder(ref e) => write!(fmt, "DispatcherError::Encoder({:?})", e),
DispatcherError::Decoder(ref e) => write!(fmt, "DispatcherError::Decoder({:?})", e),
}
}
}
impl<E, U: Encoder<I> + Decoder, I> fmt::Display for DispatcherError<E, U, I>
where
E: fmt::Display,
<U as Encoder<I>>::Error: fmt::Debug,
<U as Decoder>::Error: fmt::Debug,
{
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
DispatcherError::Service(ref e) => write!(fmt, "{}", e),
DispatcherError::Encoder(ref e) => write!(fmt, "{:?}", e),
DispatcherError::Decoder(ref e) => write!(fmt, "{:?}", e),
}
}
}
pub enum Message<T> {
Item(T),
Close,
}
pin_project! {
/// Dispatcher is a future that reads frames from Framed object and passes them to the service.
pub struct Dispatcher<S, T, U, I>
where
S: Service<<U as Decoder>::Item, Response = I>,
S::Error: 'static,
S::Future: 'static,
T: AsyncRead,
T: AsyncWrite,
U: Encoder<I>,
U: Decoder,
I: 'static,
<U as Encoder<I>>::Error: fmt::Debug,
{
service: S,
state: State<S, U, I>,
#[pin]
framed: Framed<T, U>,
rx: mpsc::UnboundedReceiver<Result<Message<I>, S::Error>>,
tx: mpsc::UnboundedSender<Result<Message<I>, S::Error>>,
}
}
enum State<S, U, I>
where
S: Service<<U as Decoder>::Item>,
U: Encoder<I> + Decoder,
{
Processing,
Error(DispatcherError<S::Error, U, I>),
FramedError(DispatcherError<S::Error, U, I>),
FlushAndStop,
Stopping,
}
impl<S, U, I> State<S, U, I>
where
S: Service<<U as Decoder>::Item>,
U: Encoder<I> + Decoder,
{
fn take_error(&mut self) -> DispatcherError<S::Error, U, I> {
match mem::replace(self, State::Processing) {
State::Error(err) => err,
_ => panic!(),
}
}
fn take_framed_error(&mut self) -> DispatcherError<S::Error, U, I> {
match mem::replace(self, State::Processing) {
State::FramedError(err) => err,
_ => panic!(),
}
}
}
impl<S, T, U, I> Dispatcher<S, T, U, I>
where
S: Service<<U as Decoder>::Item, Response = I>,
S::Error: 'static,
S::Future: 'static,
T: AsyncRead + AsyncWrite,
U: Decoder + Encoder<I>,
I: 'static,
<U as Decoder>::Error: fmt::Debug,
<U as Encoder<I>>::Error: fmt::Debug,
{
pub fn new<F>(framed: Framed<T, U>, service: F) -> Self
where
F: IntoService<S, <U as Decoder>::Item>,
{
let (tx, rx) = mpsc::unbounded_channel();
Dispatcher {
framed,
rx,
tx,
service: service.into_service(),
state: State::Processing,
}
}
/// Get sink
pub fn tx(&self) -> mpsc::UnboundedSender<Result<Message<I>, S::Error>> {
self.tx.clone()
}
/// Get reference to a service wrapped by `Dispatcher` instance.
pub fn service(&self) -> &S {
&self.service
}
/// Get mutable reference to a service wrapped by `Dispatcher` instance.
pub fn service_mut(&mut self) -> &mut S {
&mut self.service
}
/// Get reference to a framed instance wrapped by `Dispatcher`
/// instance.
pub fn framed(&self) -> &Framed<T, U> {
&self.framed
}
/// Get mutable reference to a framed instance wrapped by `Dispatcher` instance.
pub fn framed_mut(&mut self) -> &mut Framed<T, U> {
&mut self.framed
}
fn poll_read(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> bool
where
S: Service<<U as Decoder>::Item, Response = I>,
S::Error: 'static,
S::Future: 'static,
T: AsyncRead + AsyncWrite,
U: Decoder + Encoder<I>,
I: 'static,
<U as Encoder<I>>::Error: fmt::Debug,
{
loop {
let this = self.as_mut().project();
match this.service.poll_ready(cx) {
Poll::Ready(Ok(_)) => {
let item = match this.framed.next_item(cx) {
Poll::Ready(Some(Ok(el))) => el,
Poll::Ready(Some(Err(err))) => {
*this.state = State::FramedError(DispatcherError::Decoder(err));
return true;
}
Poll::Pending => return false,
Poll::Ready(None) => {
*this.state = State::Stopping;
return true;
}
};
let tx = this.tx.clone();
let fut = this.service.call(item);
actix_rt::spawn(async move {
let item = fut.await;
let _ = tx.send(item.map(Message::Item));
});
}
Poll::Pending => return false,
Poll::Ready(Err(err)) => {
*this.state = State::Error(DispatcherError::Service(err));
return true;
}
}
}
}
/// write to framed object
fn poll_write(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> bool
where
S: Service<<U as Decoder>::Item, Response = I>,
S::Error: 'static,
S::Future: 'static,
T: AsyncRead + AsyncWrite,
U: Decoder + Encoder<I>,
I: 'static,
<U as Encoder<I>>::Error: fmt::Debug,
{
loop {
let mut this = self.as_mut().project();
while !this.framed.is_write_buf_full() {
match this.rx.poll_recv(cx) {
Poll::Ready(Some(Ok(Message::Item(msg)))) => {
if let Err(err) = this.framed.as_mut().write(msg) {
*this.state = State::FramedError(DispatcherError::Encoder(err));
return true;
}
}
Poll::Ready(Some(Ok(Message::Close))) => {
*this.state = State::FlushAndStop;
return true;
}
Poll::Ready(Some(Err(err))) => {
*this.state = State::Error(DispatcherError::Service(err));
return true;
}
Poll::Ready(None) | Poll::Pending => break,
}
}
if !this.framed.is_write_buf_empty() {
match this.framed.poll_flush(cx) {
Poll::Pending => break,
Poll::Ready(Ok(_)) => (),
Poll::Ready(Err(err)) => {
debug!("Error sending data: {:?}", err);
*this.state = State::FramedError(DispatcherError::Encoder(err));
return true;
}
}
} else {
break;
}
}
false
}
}
impl<S, T, U, I> Future for Dispatcher<S, T, U, I>
where
S: Service<<U as Decoder>::Item, Response = I>,
S::Error: 'static,
S::Future: 'static,
T: AsyncRead + AsyncWrite,
U: Decoder + Encoder<I>,
I: 'static,
<U as Encoder<I>>::Error: fmt::Debug,
<U as Decoder>::Error: fmt::Debug,
{
type Output = Result<(), DispatcherError<S::Error, U, I>>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.as_mut().project();
match this.state {
State::Processing => {
if self.as_mut().poll_read(cx) || self.as_mut().poll_write(cx) {
self.poll(cx)
} else {
Poll::Pending
}
}
State::Error(_) => {
// flush write buffer
if !this.framed.is_write_buf_empty() && this.framed.poll_flush(cx).is_pending()
{
return Poll::Pending;
}
Poll::Ready(Err(this.state.take_error()))
}
State::FlushAndStop => {
if !this.framed.is_write_buf_empty() {
this.framed.poll_flush(cx).map(|res| {
if let Err(err) = res {
debug!("Error sending data: {:?}", err);
}
Ok(())
})
} else {
Poll::Ready(Ok(()))
}
}
State::FramedError(_) => Poll::Ready(Err(this.state.take_framed_error())),
State::Stopping => Poll::Ready(Ok(())),
}
}
}

View File

@@ -1,91 +0,0 @@
//! A symmetric either future.
use core::{
future::Future,
pin::Pin,
task::{Context, Poll},
};
use pin_project_lite::pin_project;
pin_project! {
/// Combines two different futures that have the same output type.
///
/// Construct variants with [`Either::left`] and [`Either::right`].
///
/// # Examples
/// ```
/// use actix_utils::future::{ready, Ready, Either};
///
/// # async fn run() {
/// let res = Either::<_, Ready<usize>>::left(ready(42));
/// assert_eq!(res.await, 42);
///
/// let res = Either::<Ready<usize>, _>::right(ready(43));
/// assert_eq!(res.await, 43);
/// # }
/// ```
#[project = EitherProj]
#[derive(Debug, Clone)]
pub enum Either<L, R> {
/// A value of type `L`.
#[allow(missing_docs)]
Left { #[pin] value: L },
/// A value of type `R`.
#[allow(missing_docs)]
Right { #[pin] value: R },
}
}
impl<L, R> Either<L, R> {
/// Creates new `Either` using left variant.
pub fn left(value: L) -> Either<L, R> {
Either::Left { value }
}
/// Creates new `Either` using right variant.
pub fn right(value: R) -> Either<L, R> {
Either::Right { value }
}
}
impl<T> Either<T, T> {
/// Unwraps into inner value when left and right have a common type.
pub fn into_inner(self) -> T {
match self {
Either::Left { value } => value,
Either::Right { value } => value,
}
}
}
impl<L, R> Future for Either<L, R>
where
L: Future,
R: Future<Output = L::Output>,
{
type Output = L::Output;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
match self.project() {
EitherProj::Left { value } => value.poll(cx),
EitherProj::Right { value } => value.poll(cx),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::future::{ready, Ready};
#[actix_rt::test]
async fn test_either() {
let res = Either::<_, Ready<usize>>::left(ready(42));
assert_eq!(res.await, 42);
let res = Either::<Ready<usize>, _>::right(ready(43));
assert_eq!(res.await, 43);
}
}

View File

@@ -1,9 +0,0 @@
//! Asynchronous values.
mod either;
mod poll_fn;
mod ready;
pub use self::either::Either;
pub use self::poll_fn::{poll_fn, PollFn};
pub use self::ready::{err, ok, ready, Ready};

View File

@@ -1,65 +0,0 @@
//! Simple "poll function" future and factory.
use core::{
fmt,
future::Future,
pin::Pin,
task::{Context, Poll},
};
/// Creates a future driven by the provided function that receives a task context.
pub fn poll_fn<F, T>(f: F) -> PollFn<F>
where
F: FnMut(&mut Context<'_>) -> Poll<T>,
{
PollFn { f }
}
/// A Future driven by the inner function.
pub struct PollFn<F> {
f: F,
}
impl<F> Unpin for PollFn<F> {}
impl<F> fmt::Debug for PollFn<F> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("PollFn").finish()
}
}
impl<F, T> Future for PollFn<F>
where
F: FnMut(&mut Context<'_>) -> Poll<T>,
{
type Output = T;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
(self.f)(cx)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[actix_rt::test]
async fn test_poll_fn() {
let res = poll_fn(|_| Poll::Ready(42)).await;
assert_eq!(res, 42);
let mut i = 5;
let res = poll_fn(|cx| {
i -= 1;
if i > 0 {
cx.waker().wake_by_ref();
Poll::Pending
} else {
Poll::Ready(42)
}
})
.await;
assert_eq!(res, 42);
}
}

View File

@@ -1,122 +0,0 @@
//! When MSRV is 1.48, replace with `core::future::Ready` and `core::future::ready()`.
use core::{
future::Future,
pin::Pin,
task::{Context, Poll},
};
/// Future for the [`ready`](ready()) function.
///
/// Panic will occur if polled more than once.
///
/// # Examples
/// ```
/// use actix_utils::future::ready;
///
/// // async
/// # async fn run() {
/// let a = ready(1);
/// assert_eq!(a.await, 1);
/// # }
///
/// // sync
/// let a = ready(1);
/// assert_eq!(a.into_inner(), 1);
/// ```
#[derive(Debug, Clone)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct Ready<T> {
val: Option<T>,
}
impl<T> Ready<T> {
/// Unwraps the value from this immediately ready future.
#[inline]
pub fn into_inner(mut self) -> T {
self.val.take().unwrap()
}
}
impl<T> Unpin for Ready<T> {}
impl<T> Future for Ready<T> {
type Output = T;
#[inline]
fn poll(mut self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<T> {
let val = self.val.take().expect("Ready polled after completion");
Poll::Ready(val)
}
}
/// Creates a future that is immediately ready with a value.
///
/// # Examples
/// ```no_run
/// use actix_utils::future::ready;
///
/// # async fn run() {
/// let a = ready(1);
/// assert_eq!(a.await, 1);
/// # }
///
/// // sync
/// let a = ready(1);
/// assert_eq!(a.into_inner(), 1);
/// ```
pub fn ready<T>(val: T) -> Ready<T> {
Ready { val: Some(val) }
}
/// Creates a future that is immediately ready with a success value.
///
/// # Examples
/// ```no_run
/// use actix_utils::future::ok;
///
/// # async fn run() {
/// let a = ok::<_, ()>(1);
/// assert_eq!(a.await, Ok(1));
/// # }
/// ```
pub fn ok<T, E>(val: T) -> Ready<Result<T, E>> {
Ready { val: Some(Ok(val)) }
}
/// Creates a future that is immediately ready with an error value.
///
/// # Examples
/// ```no_run
/// use actix_utils::future::err;
///
/// # async fn run() {
/// let a = err::<(), _>(1);
/// assert_eq!(a.await, Err(1));
/// # }
/// ```
pub fn err<T, E>(err: E) -> Ready<Result<T, E>> {
Ready {
val: Some(Err(err)),
}
}
#[cfg(test)]
mod tests {
use futures_util::task::noop_waker;
use super::*;
#[test]
#[should_panic]
fn multiple_poll_panics() {
let waker = noop_waker();
let mut cx = Context::from_waker(&waker);
let mut ready = ready(1);
assert_eq!(Pin::new(&mut ready).poll(&mut cx), Poll::Ready(1));
// panic!
let _ = Pin::new(&mut ready).poll(&mut cx);
}
}

View File

@@ -1,9 +1,11 @@
//! Various utilities used in the Actix ecosystem.
//! Various network related services and utilities for the Actix ecosystem.
#![deny(rust_2018_idioms, nonstandard_style)]
#![warn(missing_docs)]
#![allow(clippy::type_complexity)]
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
pub mod counter;
pub mod future;
pub mod dispatcher;
pub mod task;
pub mod timeout;

View File

@@ -1,9 +1,3 @@
//! A synchronization primitive for thread-local task wakeup.
//!
//! See docs for [`LocalWaker`].
#![no_std]
use core::{cell::Cell, fmt, marker::PhantomData, task::Waker};
/// A synchronization primitive for task wakeup.
@@ -15,14 +9,11 @@ use core::{cell::Cell, fmt, marker::PhantomData, task::Waker};
/// logical task.
///
/// Consumers should call [`register`] before checking the result of a computation and producers
/// should call [`wake`] after producing the computation (this differs from the usual `thread::park`
/// should call `wake` after producing the computation (this differs from the usual `thread::park`
/// pattern). It is also permitted for [`wake`] to be called _before_ [`register`]. This results in
/// a no-op.
///
/// A single `LocalWaker` may be reused for any number of calls to [`register`] or [`wake`].
///
/// [`register`]: LocalWaker::register
/// [`wake`]: LocalWaker::wake
#[derive(Default)]
pub struct LocalWaker {
pub(crate) waker: Cell<Option<Waker>>,

256
actix-utils/src/timeout.rs Normal file
View File

@@ -0,0 +1,256 @@
//! Service that applies a timeout to requests.
//!
//! If the response does not complete within the specified timeout, the response will be aborted.
use core::future::Future;
use core::marker::PhantomData;
use core::pin::Pin;
use core::task::{Context, Poll};
use core::{fmt, time};
use actix_rt::time::{sleep, Sleep};
use actix_service::{IntoService, Service, Transform};
use pin_project_lite::pin_project;
/// Applies a timeout to requests.
#[derive(Debug)]
pub struct Timeout<E = ()> {
timeout: time::Duration,
_t: PhantomData<E>,
}
/// Timeout error
pub enum TimeoutError<E> {
/// Service error
Service(E),
/// Service call timeout
Timeout,
}
impl<E> From<E> for TimeoutError<E> {
fn from(err: E) -> Self {
TimeoutError::Service(err)
}
}
impl<E: fmt::Debug> fmt::Debug for TimeoutError<E> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
TimeoutError::Service(e) => write!(f, "TimeoutError::Service({:?})", e),
TimeoutError::Timeout => write!(f, "TimeoutError::Timeout"),
}
}
}
impl<E: fmt::Display> fmt::Display for TimeoutError<E> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
TimeoutError::Service(e) => e.fmt(f),
TimeoutError::Timeout => write!(f, "Service call timeout"),
}
}
}
impl<E: PartialEq> PartialEq for TimeoutError<E> {
fn eq(&self, other: &TimeoutError<E>) -> bool {
match self {
TimeoutError::Service(e1) => match other {
TimeoutError::Service(e2) => e1 == e2,
TimeoutError::Timeout => false,
},
TimeoutError::Timeout => matches!(other, TimeoutError::Timeout),
}
}
}
impl<E> Timeout<E> {
pub fn new(timeout: time::Duration) -> Self {
Timeout {
timeout,
_t: PhantomData,
}
}
}
impl<E> Clone for Timeout<E> {
fn clone(&self) -> Self {
Timeout::new(self.timeout)
}
}
impl<S, E, Req> Transform<S, Req> for Timeout<E>
where
S: Service<Req>,
{
type Response = S::Response;
type Error = TimeoutError<S::Error>;
type Transform = TimeoutService<S, Req>;
type InitError = E;
type Future = TimeoutFuture<Self::Transform, Self::InitError>;
fn new_transform(&self, service: S) -> Self::Future {
let service = TimeoutService {
service,
timeout: self.timeout,
_phantom: PhantomData,
};
TimeoutFuture {
service: Some(service),
_err: PhantomData,
}
}
}
pub struct TimeoutFuture<T, E> {
service: Option<T>,
_err: PhantomData<E>,
}
impl<T, E> Unpin for TimeoutFuture<T, E> {}
impl<T, E> Future for TimeoutFuture<T, E> {
type Output = Result<T, E>;
fn poll(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Self::Output> {
Poll::Ready(Ok(self.get_mut().service.take().unwrap()))
}
}
/// Applies a timeout to requests.
#[derive(Debug, Clone)]
pub struct TimeoutService<S, Req> {
service: S,
timeout: time::Duration,
_phantom: PhantomData<Req>,
}
impl<S, Req> TimeoutService<S, Req>
where
S: Service<Req>,
{
pub fn new<U>(timeout: time::Duration, service: U) -> Self
where
U: IntoService<S, Req>,
{
TimeoutService {
timeout,
service: service.into_service(),
_phantom: PhantomData,
}
}
}
impl<S, Req> Service<Req> for TimeoutService<S, Req>
where
S: Service<Req>,
{
type Response = S::Response;
type Error = TimeoutError<S::Error>;
type Future = TimeoutServiceResponse<S, Req>;
actix_service::forward_ready!(service);
fn call(&self, request: Req) -> Self::Future {
TimeoutServiceResponse {
fut: self.service.call(request),
sleep: sleep(self.timeout),
}
}
}
pin_project! {
/// `TimeoutService` response future
#[derive(Debug)]
pub struct TimeoutServiceResponse<S, Req>
where
S: Service<Req>
{
#[pin]
fut: S::Future,
#[pin]
sleep: Sleep,
}
}
impl<S, Req> Future for TimeoutServiceResponse<S, Req>
where
S: Service<Req>,
{
type Output = Result<S::Response, TimeoutError<S::Error>>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
// First, try polling the future
if let Poll::Ready(res) = this.fut.poll(cx) {
return match res {
Ok(v) => Poll::Ready(Ok(v)),
Err(e) => Poll::Ready(Err(TimeoutError::Service(e))),
};
}
// Now check the sleep
this.sleep.poll(cx).map(|_| Err(TimeoutError::Timeout))
}
}
#[cfg(test)]
mod tests {
use core::task::Poll;
use core::time::Duration;
use super::*;
use actix_service::{apply, fn_factory, Service, ServiceFactory};
use futures_core::future::LocalBoxFuture;
struct SleepService(Duration);
impl Service<()> for SleepService {
type Response = ();
type Error = ();
type Future = LocalBoxFuture<'static, Result<(), ()>>;
actix_service::always_ready!();
fn call(&self, _: ()) -> Self::Future {
let sleep = actix_rt::time::sleep(self.0);
Box::pin(async move {
sleep.await;
Ok(())
})
}
}
#[actix_rt::test]
async fn test_success() {
let resolution = Duration::from_millis(100);
let wait_time = Duration::from_millis(50);
let timeout = TimeoutService::new(resolution, SleepService(wait_time));
assert_eq!(timeout.call(()).await, Ok(()));
}
#[actix_rt::test]
async fn test_timeout() {
let resolution = Duration::from_millis(100);
let wait_time = Duration::from_millis(500);
let timeout = TimeoutService::new(resolution, SleepService(wait_time));
assert_eq!(timeout.call(()).await, Err(TimeoutError::Timeout));
}
#[actix_rt::test]
async fn test_timeout_new_service() {
let resolution = Duration::from_millis(100);
let wait_time = Duration::from_millis(500);
let timeout = apply(
Timeout::new(resolution),
fn_factory(|| async { Ok::<_, ()>(SleepService(wait_time)) }),
);
let srv = timeout.new_service(&()).await.unwrap();
assert_eq!(srv.call(()).await, Err(TimeoutError::Timeout));
}
}

View File

@@ -1,7 +0,0 @@
# Changes
## Unreleased - 2021-xx-xx
## 0.1.1 - 2021-03-29
* Move local mpsc channel to it's own crate.

View File

@@ -1,21 +0,0 @@
[package]
name = "local-channel"
version = "0.1.2"
description = "A non-threadsafe multi-producer, single-consumer, futures-aware, FIFO queue"
authors = [
"Nikolay Kim <fafhrd91@gmail.com>",
"Rob Ede <robjtede@icloud.com>",
]
repository = "https://github.com/actix/actix-net.git"
keywords = ["channel", "local", "futures"]
license = "MIT OR Apache-2.0"
edition = "2018"
[dependencies]
futures-core = { version = "0.3.7", default-features = false }
futures-sink = { version = "0.3.7", default-features = false }
futures-util = { version = "0.3.7", default-features = false }
local-waker = "0.1"
[dev-dependencies]
tokio = { version = "1", features = ["rt", "macros"] }

View File

@@ -1,3 +0,0 @@
//! Non-thread-safe channels.
pub mod mpsc;

Some files were not shown because too many files have changed in this diff Show More