mirror of
https://github.com/fafhrd91/actix-net
synced 2025-08-12 05:17:14 +02:00
Compare commits
62 Commits
server-v2.
...
codec-v0.4
Author | SHA1 | Date | |
---|---|---|---|
|
2080f4c149 | ||
|
b2cef8fcdb | ||
|
15279eaf3d | ||
|
7d98247cb0 | ||
|
5b537c7b10 | ||
|
81d7295486 | ||
|
581e599209 | ||
|
1c8fcaebbc | ||
|
a1d15f2e08 | ||
|
70ea5322ab | ||
|
303666278a | ||
|
669e868370 | ||
|
47f278b17a | ||
|
ca77d8d835 | ||
|
00775884f8 | ||
|
4ff8a2cf68 | ||
|
5c555a9408 | ||
|
ca435b2575 | ||
|
9fa8d7fc5a | ||
|
b03fe7c5b6 | ||
|
6fed1c3e7d | ||
|
c3d697df97 | ||
|
80a362712f | ||
|
2b1edb95ea | ||
|
4644fa41cf | ||
|
98c37fe47d | ||
|
b9455d2ca9 | ||
|
0183b0f8cc | ||
|
b122a1ae1a | ||
|
4303058243 | ||
|
48b2e11509 | ||
|
5379a46a99 | ||
|
f8f1ac94bc | ||
|
82cd5b8290 | ||
|
c65e8524b2 | ||
|
a83dfaa162 | ||
|
e4ec956001 | ||
|
95cba659ff | ||
|
5687e81d9f | ||
|
a0fe2a9b2e | ||
|
ad22a93466 | ||
|
c2d5b2398a | ||
|
5b1ff30dd9 | ||
|
e1317bb3a0 | ||
|
dcea009158 | ||
|
13c18b8a51 | ||
|
06b17d6a43 | ||
|
605ec25143 | ||
|
3824493fd3 | ||
|
3be3e11aa5 | ||
|
6a5ea0342b | ||
|
23b1f63345 | ||
|
3aa037d07d | ||
|
cf21df14f2 | ||
|
a1bf8662c9 | ||
|
6f4d2220fa | ||
|
54b22f9fce | ||
|
983abec77d | ||
|
e4d4ae21ee | ||
|
8ad5f58d38 | ||
|
613b2be51f | ||
|
b2e9640952 |
@@ -1,3 +1,26 @@
|
||||
[alias]
|
||||
chk = "hack check --workspace --all-features --tests --examples"
|
||||
lint = "hack --clean-per-run clippy --workspace --tests --examples"
|
||||
lint = "clippy --workspace --tests --examples --bins -- -Dclippy::todo"
|
||||
lint-all = "clippy --workspace --all-features --tests --examples --bins -- -Dclippy::todo"
|
||||
|
||||
ci-doctest = "test --workspace --all-features --doc --no-fail-fast -- --nocapture"
|
||||
|
||||
# just check the library (without dev deps)
|
||||
ci-check-min = "hack --workspace check --no-default-features"
|
||||
ci-check-lib = "hack --workspace --feature-powerset --exclude-features=io-uring check"
|
||||
ci-check-lib-linux = "hack --workspace --feature-powerset check"
|
||||
|
||||
# check everything
|
||||
ci-check = "hack --workspace --feature-powerset --exclude-features=io-uring check --tests --examples"
|
||||
ci-check-linux = "hack --workspace --feature-powerset check --tests --examples"
|
||||
|
||||
# tests avoiding io-uring feature
|
||||
ci-test = "hack test --workspace --exclude=actix-rt --exclude=actix-server --all-features --lib --tests --no-fail-fast -- --nocapture"
|
||||
ci-test-rt = " hack --feature-powerset --exclude-features=io-uring test --package=actix-rt --lib --tests --no-fail-fast -- --nocapture"
|
||||
ci-test-server = "hack --feature-powerset --exclude-features=io-uring test --package=actix-server --lib --tests --no-fail-fast -- --nocapture"
|
||||
|
||||
# test with io-uring feature
|
||||
ci-test-rt-linux = " hack --feature-powerset test --package=actix-rt --lib --tests --no-fail-fast -- --nocapture"
|
||||
ci-test-server-linux = "hack --feature-powerset test --package=actix-server --lib --tests --no-fail-fast -- --nocapture"
|
||||
|
||||
# test lower msrv
|
||||
ci-test-lower-msrv = "hack --workspace --exclude=actix-server --exclude=actix-tls --feature-powerset test --lib --tests --no-fail-fast -- --nocapture"
|
||||
|
155
.github/workflows/ci.yml
vendored
155
.github/workflows/ci.yml
vendored
@@ -18,7 +18,7 @@ jobs:
|
||||
- { name: Windows (MinGW), os: windows-latest, triple: x86_64-pc-windows-gnu }
|
||||
- { name: Windows (32-bit), os: windows-latest, triple: i686-pc-windows-msvc }
|
||||
version:
|
||||
- 1.46.0 # MSRV
|
||||
- 1.52.0 # MSRV for -server and -tls
|
||||
- stable
|
||||
- nightly
|
||||
|
||||
@@ -64,8 +64,7 @@ jobs:
|
||||
|
||||
# - name: Generate Cargo.lock
|
||||
# uses: actions-rs/cargo@v1
|
||||
# with:
|
||||
# command: generate-lockfile
|
||||
# with: { command: generate-lockfile }
|
||||
# - name: Cache Dependencies
|
||||
# uses: Swatinem/rust-cache@v1.2.0
|
||||
|
||||
@@ -75,57 +74,135 @@ jobs:
|
||||
command: install
|
||||
args: cargo-hack
|
||||
|
||||
- name: check minimal
|
||||
- name: check lib
|
||||
if: >
|
||||
matrix.target.os != 'ubuntu-latest'
|
||||
&& matrix.target.triple != 'x86_64-pc-windows-gnu'
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: hack
|
||||
args: check --workspace --no-default-features
|
||||
|
||||
- name: check minimal + tests
|
||||
with: { command: ci-check-lib }
|
||||
- name: check lib
|
||||
if: matrix.target.os == 'ubuntu-latest'
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: hack
|
||||
args: check --workspace --no-default-features --tests --examples
|
||||
|
||||
- name: check default
|
||||
with: { command: ci-check-lib-linux }
|
||||
- name: check lib
|
||||
if: matrix.target.triple == 'x86_64-pc-windows-gnu'
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: check
|
||||
args: --workspace --tests --examples
|
||||
|
||||
with: { command: ci-check-min }
|
||||
|
||||
- name: check full
|
||||
# TODO: compile OpenSSL and run tests on MinGW
|
||||
if: matrix.target.triple != 'x86_64-pc-windows-gnu'
|
||||
if: >
|
||||
matrix.target.os != 'ubuntu-latest'
|
||||
&& matrix.target.triple != 'x86_64-pc-windows-gnu'
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: check
|
||||
args: --workspace --all-features --tests --examples
|
||||
with: { command: ci-check }
|
||||
- name: check all
|
||||
if: matrix.target.os == 'ubuntu-latest'
|
||||
uses: actions-rs/cargo@v1
|
||||
with: { command: ci-check-linux }
|
||||
|
||||
- name: tests
|
||||
if: matrix.target.triple != 'x86_64-pc-windows-gnu'
|
||||
if: >
|
||||
matrix.target.os != 'ubuntu-latest'
|
||||
&& matrix.target.triple != 'x86_64-pc-windows-gnu'
|
||||
run: |
|
||||
cargo ci-test
|
||||
cargo ci-test-rt
|
||||
cargo ci-test-server
|
||||
- name: tests
|
||||
if: matrix.target.os == 'ubuntu-latest'
|
||||
run: |
|
||||
sudo bash -c "ulimit -Sl 512 && ulimit -Hl 512 && PATH=$PATH:/usr/share/rust/.cargo/bin && RUSTUP_TOOLCHAIN=${{ matrix.version }} cargo ci-test && RUSTUP_TOOLCHAIN=${{ matrix.version }} cargo ci-test-rt-linux && RUSTUP_TOOLCHAIN=${{ matrix.version }} cargo ci-test-server-linux"
|
||||
|
||||
- name: Clear the cargo caches
|
||||
run: |
|
||||
cargo install cargo-cache --version 0.6.2 --no-default-features --features ci-autoclean
|
||||
cargo-cache
|
||||
|
||||
build_and_test_lower_msrv:
|
||||
name: Linux / 1.46 (lower MSRV)
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install 1.46.0 # MSRV for all but -server and -tls
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: 1.46.0-x86_64-unknown-linux-gnu
|
||||
profile: minimal
|
||||
override: true
|
||||
|
||||
- name: Install cargo-hack
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
args: --workspace --all-features --no-fail-fast -- --nocapture
|
||||
command: install
|
||||
args: cargo-hack
|
||||
|
||||
- name: tests
|
||||
run: |
|
||||
sudo bash -c "ulimit -Sl 512 && ulimit -Hl 512 && PATH=$PATH:/usr/share/rust/.cargo/bin && RUSTUP_TOOLCHAIN=1.46 cargo ci-test-lower-msrv"
|
||||
|
||||
- name: Clear the cargo caches
|
||||
run: |
|
||||
cargo install cargo-cache --version 0.6.2 --no-default-features --features ci-autoclean
|
||||
cargo-cache
|
||||
|
||||
coverage:
|
||||
name: coverage
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install Rust (nightly)
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable-x86_64-unknown-linux-gnu
|
||||
profile: minimal
|
||||
override: true
|
||||
|
||||
- name: Generate Cargo.lock
|
||||
uses: actions-rs/cargo@v1
|
||||
with: { command: generate-lockfile }
|
||||
- name: Cache Dependencies
|
||||
uses: Swatinem/rust-cache@v1.3.0
|
||||
|
||||
- name: Generate coverage file
|
||||
if: >
|
||||
matrix.target.os == 'ubuntu-latest'
|
||||
&& matrix.version == 'stable'
|
||||
&& github.ref == 'refs/heads/master'
|
||||
if: github.ref == 'refs/heads/master'
|
||||
run: |
|
||||
cargo install cargo-tarpaulin
|
||||
cargo tarpaulin --out Xml --verbose
|
||||
- name: Upload to Codecov
|
||||
if: >
|
||||
matrix.target.os == 'ubuntu-latest'
|
||||
&& matrix.version == 'stable'
|
||||
&& github.ref == 'refs/heads/master'
|
||||
if: github.ref == 'refs/heads/master'
|
||||
uses: codecov/codecov-action@v1
|
||||
with:
|
||||
file: cobertura.xml
|
||||
with: { file: cobertura.xml }
|
||||
|
||||
- name: Clear the cargo caches
|
||||
run: |
|
||||
cargo install cargo-cache --no-default-features --features ci-autoclean
|
||||
cargo-cache
|
||||
rustdoc:
|
||||
name: rustdoc
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install Rust (nightly)
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-x86_64-unknown-linux-gnu
|
||||
profile: minimal
|
||||
override: true
|
||||
|
||||
- name: Generate Cargo.lock
|
||||
uses: actions-rs/cargo@v1
|
||||
with: { command: generate-lockfile }
|
||||
- name: Cache Dependencies
|
||||
uses: Swatinem/rust-cache@v1.3.0
|
||||
|
||||
- name: Install cargo-hack
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: install
|
||||
args: cargo-hack
|
||||
|
||||
- name: doc tests
|
||||
uses: actions-rs/cargo@v1
|
||||
timeout-minutes: 40
|
||||
with: { command: ci-doctest }
|
||||
|
2
.github/workflows/clippy-fmt.yml
vendored
2
.github/workflows/clippy-fmt.yml
vendored
@@ -39,4 +39,4 @@ jobs:
|
||||
uses: actions-rs/clippy-check@v1
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
args: --workspace --tests --all-features
|
||||
args: --workspace --all-features --tests --examples --bins -- -Dclippy::todo
|
||||
|
@@ -2,7 +2,6 @@
|
||||
members = [
|
||||
"actix-codec",
|
||||
"actix-macros",
|
||||
"actix-router",
|
||||
"actix-rt",
|
||||
"actix-server",
|
||||
"actix-service",
|
||||
@@ -17,7 +16,6 @@ members = [
|
||||
[patch.crates-io]
|
||||
actix-codec = { path = "actix-codec" }
|
||||
actix-macros = { path = "actix-macros" }
|
||||
actix-router = { path = "actix-router" }
|
||||
actix-rt = { path = "actix-rt" }
|
||||
actix-server = { path = "actix-server" }
|
||||
actix-service = { path = "actix-service" }
|
||||
@@ -27,3 +25,8 @@ actix-utils = { path = "actix-utils" }
|
||||
bytestring = { path = "bytestring" }
|
||||
local-channel = { path = "local-channel" }
|
||||
local-waker = { path = "local-waker" }
|
||||
|
||||
[profile.release]
|
||||
lto = true
|
||||
opt-level = 3
|
||||
codegen-units = 1
|
||||
|
@@ -1,6 +1,15 @@
|
||||
# Changes
|
||||
|
||||
## Unreleased - 2021-xx-xx
|
||||
* Added `LinesCodec.` [#338]
|
||||
* `Framed::poll_ready` flushes when the buffer is full. [#409]
|
||||
|
||||
[#338]: https://github.com/actix/actix-net/pull/338
|
||||
[#409]: https://github.com/actix/actix-net/pull/409
|
||||
|
||||
|
||||
## 0.4.0 - 2021-04-20
|
||||
* No significant changes since v0.4.0-beta.1.
|
||||
|
||||
|
||||
## 0.4.0-beta.1 - 2020-12-28
|
||||
@@ -23,28 +32,28 @@
|
||||
## 0.3.0-beta.1 - 2020-08-19
|
||||
* Use `.advance()` instead of `.split_to()`.
|
||||
* Upgrade `tokio-util` to `0.3`.
|
||||
* Improve `BytesCodec` `.encode()` performance
|
||||
* Simplify `BytesCodec` `.decode()`
|
||||
* Improve `BytesCodec::encode()` performance.
|
||||
* Simplify `BytesCodec::decode()`.
|
||||
* Rename methods on `Framed` to better describe their use.
|
||||
* Add method on `Framed` to get a pinned reference to the underlying I/O.
|
||||
* Add method on `Framed` check emptiness of read buffer.
|
||||
|
||||
|
||||
## 0.2.0 - 2019-12-10
|
||||
* Use specific futures dependencies
|
||||
* Use specific futures dependencies.
|
||||
|
||||
|
||||
## 0.2.0-alpha.4
|
||||
* Fix buffer remaining capacity calculation
|
||||
* Fix buffer remaining capacity calculation.
|
||||
|
||||
|
||||
## 0.2.0-alpha.3
|
||||
* Use tokio 0.2
|
||||
* Fix low/high watermark for write/read buffers
|
||||
* Use tokio 0.2.
|
||||
* Fix low/high watermark for write/read buffers.
|
||||
|
||||
|
||||
## 0.2.0-alpha.2
|
||||
* Migrated to `std::future`
|
||||
* Migrated to `std::future`.
|
||||
|
||||
|
||||
## 0.1.2 - 2019-03-27
|
||||
@@ -56,4 +65,4 @@
|
||||
|
||||
|
||||
## 0.1.0 - 2018-12-09
|
||||
* Move codec to separate crate
|
||||
* Move codec to separate crate.
|
||||
|
@@ -1,12 +1,13 @@
|
||||
[package]
|
||||
name = "actix-codec"
|
||||
version = "0.4.0-beta.1"
|
||||
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
|
||||
version = "0.4.0"
|
||||
authors = [
|
||||
"Nikolay Kim <fafhrd91@gmail.com>",
|
||||
"Rob Ede <robjtede@icloud.com>",
|
||||
]
|
||||
description = "Codec utilities for working with framed protocols"
|
||||
keywords = ["network", "framework", "async", "futures"]
|
||||
homepage = "https://actix.rs"
|
||||
repository = "https://github.com/actix/actix-net.git"
|
||||
documentation = "https://docs.rs/actix-codec"
|
||||
repository = "https://github.com/actix/actix-net"
|
||||
categories = ["network-programming", "asynchronous"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
edition = "2018"
|
||||
@@ -21,6 +22,15 @@ bytes = "1"
|
||||
futures-core = { version = "0.3.7", default-features = false }
|
||||
futures-sink = { version = "0.3.7", default-features = false }
|
||||
log = "0.4"
|
||||
memchr = "2.3"
|
||||
pin-project-lite = "0.2"
|
||||
tokio = "1"
|
||||
tokio = "1.5.1"
|
||||
tokio-util = { version = "0.6", features = ["codec", "io"] }
|
||||
|
||||
[dev-dependencies]
|
||||
criterion = { version = "0.3", features = ["html_reports"] }
|
||||
tokio-test = "0.4.2"
|
||||
|
||||
[[bench]]
|
||||
name = "lines"
|
||||
harness = false
|
||||
|
57
actix-codec/benches/lines.rs
Normal file
57
actix-codec/benches/lines.rs
Normal file
@@ -0,0 +1,57 @@
|
||||
use bytes::BytesMut;
|
||||
use criterion::{criterion_group, criterion_main, Criterion};
|
||||
|
||||
const INPUT: &[u8] = include_bytes!("./lorem.txt");
|
||||
|
||||
fn bench_lines_codec(c: &mut Criterion) {
|
||||
let mut decode_group = c.benchmark_group("lines decode");
|
||||
|
||||
decode_group.bench_function("actix", |b| {
|
||||
b.iter(|| {
|
||||
use actix_codec::Decoder as _;
|
||||
|
||||
let mut codec = actix_codec::LinesCodec::default();
|
||||
let mut buf = BytesMut::from(INPUT);
|
||||
while let Ok(Some(_bytes)) = codec.decode_eof(&mut buf) {}
|
||||
});
|
||||
});
|
||||
|
||||
decode_group.bench_function("tokio", |b| {
|
||||
b.iter(|| {
|
||||
use tokio_util::codec::Decoder as _;
|
||||
|
||||
let mut codec = tokio_util::codec::LinesCodec::new();
|
||||
let mut buf = BytesMut::from(INPUT);
|
||||
while let Ok(Some(_bytes)) = codec.decode_eof(&mut buf) {}
|
||||
});
|
||||
});
|
||||
|
||||
decode_group.finish();
|
||||
|
||||
let mut encode_group = c.benchmark_group("lines encode");
|
||||
|
||||
encode_group.bench_function("actix", |b| {
|
||||
b.iter(|| {
|
||||
use actix_codec::Encoder as _;
|
||||
|
||||
let mut codec = actix_codec::LinesCodec::default();
|
||||
let mut buf = BytesMut::new();
|
||||
codec.encode("123", &mut buf).unwrap();
|
||||
});
|
||||
});
|
||||
|
||||
encode_group.bench_function("tokio", |b| {
|
||||
b.iter(|| {
|
||||
use tokio_util::codec::Encoder as _;
|
||||
|
||||
let mut codec = tokio_util::codec::LinesCodec::new();
|
||||
let mut buf = BytesMut::new();
|
||||
codec.encode("123", &mut buf).unwrap();
|
||||
});
|
||||
});
|
||||
|
||||
encode_group.finish();
|
||||
}
|
||||
|
||||
criterion_group!(benches, bench_lines_codec);
|
||||
criterion_main!(benches);
|
5
actix-codec/benches/lorem.txt
Normal file
5
actix-codec/benches/lorem.txt
Normal file
@@ -0,0 +1,5 @@
|
||||
Lorem ipsum dolor sit amet, consectetur adipiscing elit. In tortor quam, pulvinar sit amet vestibulum eget, tincidunt non urna. Sed eu sem in felis malesuada venenatis. Suspendisse volutpat aliquet nisi, in condimentum nibh convallis id. Quisque gravida felis scelerisque ipsum aliquam consequat. Praesent libero odio, malesuada vitae odio quis, aliquam aliquet enim. In fringilla ut turpis nec pharetra. Duis eu posuere metus. Sed a aliquet massa. Mauris non tempus mi, quis mattis libero. Vivamus ornare ex at semper cursus. Vestibulum sed facilisis erat, aliquet mollis est. In interdum, magna iaculis ultricies elementum, mi ante vestibulum mauris, nec viverra turpis lorem quis ante. Proin in auctor erat. Vivamus dictum congue massa, fermentum bibendum leo pretium quis. Integer dapibus sodales ligula, sit amet imperdiet felis suscipit eu. Phasellus non ornare enim.
|
||||
Nam feugiat neque sit amet hendrerit rhoncus. Nunc suscipit molestie vehicula. Aenean vulputate porttitor augue, sit amet molestie dolor volutpat vitae. Nulla vitae condimentum eros. Aliquam tristique purus at metus lacinia egestas. Cras euismod lorem eu orci lobortis, sed tincidunt nisl laoreet. Ut suscipit fermentum mi, et euismod tortor. Pellentesque vitae tempor quam, sed dignissim mi. Suspendisse luctus lacus vitae ligula blandit vehicula. Quisque interdum iaculis tincidunt. Nunc elementum mi vitae tempor placerat. Suspendisse potenti. Donec blandit laoreet ipsum, quis rhoncus velit vulputate sed.
|
||||
Aliquam suscipit lectus eros, at maximus dolor efficitur quis. Integer blandit tortor orci, nec mattis nunc eleifend ac. Mauris pharetra vel quam quis lacinia. Duis lobortis condimentum nunc ut facilisis. Praesent arcu nisi, porta sit amet viverra sit amet, pellentesque ut nisi. Nunc gravida tortor eu ligula tempus, in interdum magna pretium. Fusce eu ornare sapien. Nullam pellentesque cursus eros. Nam orci massa, faucibus eget leo eget, elementum vulputate erat. Fusce vehicula augue et dui hendrerit vulputate. Mauris neque lacus, porttitor ut condimentum id, efficitur ac neque. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia curae; Donec accumsan, lectus fermentum elementum tristique, ipsum tortor mollis ante, non lacinia nibh ex quis sapien.
|
||||
Donec pharetra, elit eget rutrum luctus, urna ligula facilisis lorem, sit amet rhoncus ante est eu mi. Vestibulum vestibulum ultricies interdum. Nulla tincidunt ante non hendrerit venenatis. Curabitur vestibulum turpis erat, id efficitur quam venenatis eu. Fusce nulla sem, dapibus vel quam feugiat, ornare fermentum ligula. Praesent tempus tincidunt mauris, non pellentesque felis varius in. Aenean eu arcu ligula. Morbi dapibus maximus nulla a pharetra. Fusce leo metus, luctus ut cursus non, sollicitudin non lectus. Integer pellentesque eleifend erat, vel gravida purus tempus a. Mauris id vestibulum quam. Nunc vitae ullamcorper metus, pharetra placerat enim. Fusce in ultrices nisl. Curabitur justo mauris, dignissim in aliquam sit amet, sollicitudin ut risus. Cras tempor rutrum justo, non tincidunt est maximus at.
|
||||
Aliquam ac velit tincidunt, ullamcorper velit sit amet, pulvinar nisi. Nullam rhoncus rhoncus egestas. Cras ac luctus nisi. Mauris sit amet risus at magna volutpat ultrices quis ac dui. Aliquam condimentum tellus purus, vel sagittis odio vulputate at. Sed ut finibus tellus. Aliquam tincidunt vehicula diam.
|
@@ -1,11 +1,10 @@
|
||||
use bytes::{Buf, Bytes, BytesMut};
|
||||
use std::io;
|
||||
|
||||
use bytes::{Buf, Bytes, BytesMut};
|
||||
|
||||
use super::{Decoder, Encoder};
|
||||
|
||||
/// Bytes codec.
|
||||
///
|
||||
/// Reads/Writes chunks of bytes from a stream.
|
||||
/// Bytes codec. Reads/writes chunks of bytes from a stream.
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub struct BytesCodec;
|
||||
|
||||
|
@@ -21,14 +21,13 @@ bitflags::bitflags! {
|
||||
}
|
||||
|
||||
pin_project_lite::pin_project! {
|
||||
/// A unified `Stream` and `Sink` interface to an underlying I/O object, using
|
||||
/// the `Encoder` and `Decoder` traits to encode and decode frames.
|
||||
/// A unified `Stream` and `Sink` interface to an underlying I/O object, using the `Encoder` and
|
||||
/// `Decoder` traits to encode and decode frames.
|
||||
///
|
||||
/// Raw I/O objects work with byte sequences, but higher-level code usually
|
||||
/// wants to batch these into meaningful chunks, called "frames". This
|
||||
/// method layers framing on top of an I/O object, by using the `Encoder`/`Decoder`
|
||||
/// traits to handle encoding and decoding of message frames. Note that
|
||||
/// the incoming and outgoing frame types may be distinct.
|
||||
/// Raw I/O objects work with byte sequences, but higher-level code usually wants to batch these
|
||||
/// into meaningful chunks, called "frames". This method layers framing on top of an I/O object,
|
||||
/// by using the `Encoder`/`Decoder` traits to handle encoding and decoding of message frames.
|
||||
/// Note that the incoming and outgoing frame types may be distinct.
|
||||
pub struct Framed<T, U> {
|
||||
#[pin]
|
||||
io: T,
|
||||
@@ -44,10 +43,9 @@ where
|
||||
T: AsyncRead + AsyncWrite,
|
||||
U: Decoder,
|
||||
{
|
||||
/// This function returns a *single* object that is both `Stream` and
|
||||
/// `Sink`; grouping this into a single object is often useful for layering
|
||||
/// things like gzip or TLS, which require both read and write access to the
|
||||
/// underlying object.
|
||||
/// This function returns a *single* object that is both `Stream` and `Sink`; grouping this into
|
||||
/// a single object is often useful for layering things like gzip or TLS, which require both
|
||||
/// read and write access to the underlying object.
|
||||
pub fn new(io: T, codec: U) -> Framed<T, U> {
|
||||
Framed {
|
||||
io,
|
||||
@@ -70,21 +68,18 @@ impl<T, U> Framed<T, U> {
|
||||
&mut self.codec
|
||||
}
|
||||
|
||||
/// Returns a reference to the underlying I/O stream wrapped by
|
||||
/// `Frame`.
|
||||
/// Returns a reference to the underlying I/O stream wrapped by `Frame`.
|
||||
///
|
||||
/// Note that care should be taken to not tamper with the underlying stream
|
||||
/// of data coming in as it may corrupt the stream of frames otherwise
|
||||
/// being worked with.
|
||||
/// Note that care should be taken to not tamper with the underlying stream of data coming in as
|
||||
/// it may corrupt the stream of frames otherwise being worked with.
|
||||
pub fn io_ref(&self) -> &T {
|
||||
&self.io
|
||||
}
|
||||
|
||||
/// Returns a mutable reference to the underlying I/O stream.
|
||||
///
|
||||
/// Note that care should be taken to not tamper with the underlying stream
|
||||
/// of data coming in as it may corrupt the stream of frames otherwise
|
||||
/// being worked with.
|
||||
/// Note that care should be taken to not tamper with the underlying stream of data coming in as
|
||||
/// it may corrupt the stream of frames otherwise being worked with.
|
||||
pub fn io_mut(&mut self) -> &mut T {
|
||||
&mut self.io
|
||||
}
|
||||
@@ -183,16 +178,15 @@ impl<T, U> Framed<T, U> {
|
||||
U: Decoder,
|
||||
{
|
||||
loop {
|
||||
let mut this = self.as_mut().project();
|
||||
// Repeatedly call `decode` or `decode_eof` as long as it is
|
||||
// "readable". Readable is defined as not having returned `None`. If
|
||||
// the upstream has returned EOF, and the decoder is no longer
|
||||
// readable, it can be assumed that the decoder will never become
|
||||
let this = self.as_mut().project();
|
||||
// Repeatedly call `decode` or `decode_eof` as long as it is "readable". Readable is
|
||||
// defined as not having returned `None`. If the upstream has returned EOF, and the
|
||||
// decoder is no longer readable, it can be assumed that the decoder will never become
|
||||
// readable again, at which point the stream is terminated.
|
||||
|
||||
if this.flags.contains(Flags::READABLE) {
|
||||
if this.flags.contains(Flags::EOF) {
|
||||
match this.codec.decode_eof(&mut this.read_buf) {
|
||||
match this.codec.decode_eof(this.read_buf) {
|
||||
Ok(Some(frame)) => return Poll::Ready(Some(Ok(frame))),
|
||||
Ok(None) => return Poll::Ready(None),
|
||||
Err(e) => return Poll::Ready(Some(Err(e))),
|
||||
@@ -201,7 +195,7 @@ impl<T, U> Framed<T, U> {
|
||||
|
||||
log::trace!("attempting to decode a frame");
|
||||
|
||||
match this.codec.decode(&mut this.read_buf) {
|
||||
match this.codec.decode(this.read_buf) {
|
||||
Ok(Some(frame)) => {
|
||||
log::trace!("frame decoded from buffer");
|
||||
return Poll::Ready(Some(Ok(frame)));
|
||||
@@ -215,7 +209,7 @@ impl<T, U> Framed<T, U> {
|
||||
|
||||
debug_assert!(!this.flags.contains(Flags::EOF));
|
||||
|
||||
// Otherwise, try to read more data and try again. Make sure we've got room
|
||||
// Otherwise, try to read more data and try again. Make sure we've got room.
|
||||
let remaining = this.read_buf.capacity() - this.read_buf.len();
|
||||
if remaining < LW {
|
||||
this.read_buf.reserve(HW - remaining)
|
||||
@@ -306,11 +300,11 @@ where
|
||||
{
|
||||
type Error = U::Error;
|
||||
|
||||
fn poll_ready(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
if self.is_write_ready() {
|
||||
Poll::Ready(Ok(()))
|
||||
} else {
|
||||
Poll::Pending
|
||||
self.flush(cx)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -341,13 +335,12 @@ where
|
||||
}
|
||||
|
||||
impl<T, U> Framed<T, U> {
|
||||
/// This function returns a *single* object that is both `Stream` and
|
||||
/// `Sink`; grouping this into a single object is often useful for layering
|
||||
/// things like gzip or TLS, which require both read and write access to the
|
||||
/// underlying object.
|
||||
/// This function returns a *single* object that is both `Stream` and `Sink`; grouping this into
|
||||
/// a single object is often useful for layering things like gzip or TLS, which require both
|
||||
/// read and write access to the underlying object.
|
||||
///
|
||||
/// These objects take a stream, a read buffer and a write buffer. These
|
||||
/// fields can be obtained from an existing `Framed` with the `into_parts` method.
|
||||
/// These objects take a stream, a read buffer and a write buffer. These fields can be obtained
|
||||
/// from an existing `Framed` with the `into_parts` method.
|
||||
pub fn from_parts(parts: FramedParts<T, U>) -> Framed<T, U> {
|
||||
Framed {
|
||||
io: parts.io,
|
||||
@@ -358,12 +351,11 @@ impl<T, U> Framed<T, U> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Consumes the `Frame`, returning its underlying I/O stream, the buffer
|
||||
/// with unprocessed data, and the codec.
|
||||
/// Consumes the `Frame`, returning its underlying I/O stream, the buffer with unprocessed data,
|
||||
/// and the codec.
|
||||
///
|
||||
/// Note that care should be taken to not tamper with the underlying stream
|
||||
/// of data coming in as it may corrupt the stream of frames otherwise
|
||||
/// being worked with.
|
||||
/// Note that care should be taken to not tamper with the underlying stream of data coming in as
|
||||
/// it may corrupt the stream of frames otherwise being worked with.
|
||||
pub fn into_parts(self) -> FramedParts<T, U> {
|
||||
FramedParts {
|
||||
io: self.io,
|
||||
@@ -376,14 +368,15 @@ impl<T, U> Framed<T, U> {
|
||||
}
|
||||
|
||||
/// `FramedParts` contains an export of the data of a Framed transport.
|
||||
/// It can be used to construct a new `Framed` with a different codec.
|
||||
/// It contains all current buffers and the inner transport.
|
||||
///
|
||||
/// It can be used to construct a new `Framed` with a different codec. It contains all current
|
||||
/// buffers and the inner transport.
|
||||
#[derive(Debug)]
|
||||
pub struct FramedParts<T, U> {
|
||||
/// The inner transport used to read bytes to and write bytes to
|
||||
/// The inner transport used to read bytes to and write bytes to.
|
||||
pub io: T,
|
||||
|
||||
/// The codec
|
||||
/// The codec object.
|
||||
pub codec: U,
|
||||
|
||||
/// The buffer with read but unprocessed data.
|
||||
@@ -396,7 +389,7 @@ pub struct FramedParts<T, U> {
|
||||
}
|
||||
|
||||
impl<T, U> FramedParts<T, U> {
|
||||
/// Create a new, default, `FramedParts`
|
||||
/// Creates a new default `FramedParts`.
|
||||
pub fn new(io: T, codec: U) -> FramedParts<T, U> {
|
||||
FramedParts {
|
||||
io,
|
||||
@@ -407,7 +400,7 @@ impl<T, U> FramedParts<T, U> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new `FramedParts` with read buffer
|
||||
/// Creates a new `FramedParts` with read buffer.
|
||||
pub fn with_read_buf(io: T, codec: U, read_buf: BytesMut) -> FramedParts<T, U> {
|
||||
FramedParts {
|
||||
io,
|
||||
|
@@ -14,9 +14,11 @@
|
||||
|
||||
mod bcodec;
|
||||
mod framed;
|
||||
mod lines;
|
||||
|
||||
pub use self::bcodec::BytesCodec;
|
||||
pub use self::framed::{Framed, FramedParts};
|
||||
pub use self::lines::LinesCodec;
|
||||
|
||||
pub use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
|
||||
pub use tokio_util::codec::{Decoder, Encoder};
|
||||
|
158
actix-codec/src/lines.rs
Normal file
158
actix-codec/src/lines.rs
Normal file
@@ -0,0 +1,158 @@
|
||||
use std::io;
|
||||
|
||||
use bytes::{Buf, BufMut, Bytes, BytesMut};
|
||||
use memchr::memchr;
|
||||
|
||||
use super::{Decoder, Encoder};
|
||||
|
||||
/// Lines codec. Reads/writes line delimited strings.
|
||||
///
|
||||
/// Will split input up by LF or CRLF delimiters. I.e. carriage return characters at the end of
|
||||
/// lines are not preserved.
|
||||
#[derive(Debug, Copy, Clone, Default)]
|
||||
#[non_exhaustive]
|
||||
pub struct LinesCodec;
|
||||
|
||||
impl<T: AsRef<str>> Encoder<T> for LinesCodec {
|
||||
type Error = io::Error;
|
||||
|
||||
#[inline]
|
||||
fn encode(&mut self, item: T, dst: &mut BytesMut) -> Result<(), Self::Error> {
|
||||
let item = item.as_ref();
|
||||
dst.reserve(item.len() + 1);
|
||||
dst.put_slice(item.as_bytes());
|
||||
dst.put_u8(b'\n');
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Decoder for LinesCodec {
|
||||
type Item = String;
|
||||
type Error = io::Error;
|
||||
|
||||
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
|
||||
if src.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let len = match memchr(b'\n', src) {
|
||||
Some(n) => n,
|
||||
None => {
|
||||
return Ok(None);
|
||||
}
|
||||
};
|
||||
|
||||
// split up to new line char
|
||||
let mut buf = src.split_to(len);
|
||||
debug_assert_eq!(len, buf.len());
|
||||
|
||||
// remove new line char from source
|
||||
src.advance(1);
|
||||
|
||||
match buf.last() {
|
||||
// remove carriage returns at the end of buf
|
||||
Some(b'\r') => buf.truncate(len - 1),
|
||||
|
||||
// line is empty
|
||||
None => return Ok(Some(String::new())),
|
||||
|
||||
_ => {}
|
||||
}
|
||||
|
||||
try_into_utf8(buf.freeze())
|
||||
}
|
||||
|
||||
fn decode_eof(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
|
||||
match self.decode(src)? {
|
||||
Some(frame) => Ok(Some(frame)),
|
||||
None if src.is_empty() => Ok(None),
|
||||
None => {
|
||||
let buf = match src.last() {
|
||||
// if last line ends in a CR then take everything up to it
|
||||
Some(b'\r') => src.split_to(src.len() - 1),
|
||||
|
||||
// take all bytes from source
|
||||
_ => src.split(),
|
||||
};
|
||||
|
||||
if buf.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
try_into_utf8(buf.freeze())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Attempts to convert bytes into a `String`.
|
||||
fn try_into_utf8(buf: Bytes) -> io::Result<Option<String>> {
|
||||
String::from_utf8(buf.to_vec())
|
||||
.map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err))
|
||||
.map(Some)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use bytes::BufMut as _;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn lines_decoder() {
|
||||
let mut codec = LinesCodec::default();
|
||||
let mut buf = BytesMut::from("\nline 1\nline 2\r\nline 3\n\r\n\r");
|
||||
|
||||
assert_eq!("", codec.decode(&mut buf).unwrap().unwrap());
|
||||
assert_eq!("line 1", codec.decode(&mut buf).unwrap().unwrap());
|
||||
assert_eq!("line 2", codec.decode(&mut buf).unwrap().unwrap());
|
||||
assert_eq!("line 3", codec.decode(&mut buf).unwrap().unwrap());
|
||||
assert_eq!("", codec.decode(&mut buf).unwrap().unwrap());
|
||||
assert!(codec.decode(&mut buf).unwrap().is_none());
|
||||
assert!(codec.decode_eof(&mut buf).unwrap().is_none());
|
||||
|
||||
buf.put_slice(b"k");
|
||||
assert!(codec.decode(&mut buf).unwrap().is_none());
|
||||
assert_eq!("\rk", codec.decode_eof(&mut buf).unwrap().unwrap());
|
||||
|
||||
assert!(codec.decode(&mut buf).unwrap().is_none());
|
||||
assert!(codec.decode_eof(&mut buf).unwrap().is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn lines_encoder() {
|
||||
let mut codec = LinesCodec::default();
|
||||
|
||||
let mut buf = BytesMut::new();
|
||||
|
||||
codec.encode("", &mut buf).unwrap();
|
||||
assert_eq!(&buf[..], b"\n");
|
||||
|
||||
codec.encode("test", &mut buf).unwrap();
|
||||
assert_eq!(&buf[..], b"\ntest\n");
|
||||
|
||||
codec.encode("a\nb", &mut buf).unwrap();
|
||||
assert_eq!(&buf[..], b"\ntest\na\nb\n");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn lines_encoder_no_overflow() {
|
||||
let mut codec = LinesCodec::default();
|
||||
|
||||
let mut buf = BytesMut::new();
|
||||
codec.encode("1234567", &mut buf).unwrap();
|
||||
assert_eq!(&buf[..], b"1234567\n");
|
||||
|
||||
let mut buf = BytesMut::new();
|
||||
codec.encode("12345678", &mut buf).unwrap();
|
||||
assert_eq!(&buf[..], b"12345678\n");
|
||||
|
||||
let mut buf = BytesMut::new();
|
||||
codec.encode("123456789111213", &mut buf).unwrap();
|
||||
assert_eq!(&buf[..], b"123456789111213\n");
|
||||
|
||||
let mut buf = BytesMut::new();
|
||||
codec.encode("1234567891112131", &mut buf).unwrap();
|
||||
assert_eq!(&buf[..], b"1234567891112131\n");
|
||||
}
|
||||
}
|
221
actix-codec/tests/test_framed_sink.rs
Normal file
221
actix-codec/tests/test_framed_sink.rs
Normal file
@@ -0,0 +1,221 @@
|
||||
use actix_codec::*;
|
||||
use bytes::Buf;
|
||||
use bytes::{BufMut, BytesMut};
|
||||
use futures_sink::Sink;
|
||||
use std::collections::VecDeque;
|
||||
use std::io::{self, Write};
|
||||
use std::pin::Pin;
|
||||
use std::task::Poll::{Pending, Ready};
|
||||
use std::task::{Context, Poll};
|
||||
use tokio_test::{assert_ready, task};
|
||||
|
||||
macro_rules! bilateral {
|
||||
($($x:expr,)*) => {{
|
||||
let mut v = VecDeque::new();
|
||||
v.extend(vec![$($x),*]);
|
||||
Bilateral { calls: v }
|
||||
}};
|
||||
}
|
||||
|
||||
macro_rules! assert_ready {
|
||||
($e:expr) => {{
|
||||
use core::task::Poll::*;
|
||||
match $e {
|
||||
Ready(v) => v,
|
||||
Pending => panic!("pending"),
|
||||
}
|
||||
}};
|
||||
($e:expr, $($msg:tt),+) => {{
|
||||
use core::task::Poll::*;
|
||||
match $e {
|
||||
Ready(v) => v,
|
||||
Pending => {
|
||||
let msg = format_args!($($msg),+);
|
||||
panic!("pending; {}", msg)
|
||||
}
|
||||
}
|
||||
}};
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Bilateral {
|
||||
pub calls: VecDeque<io::Result<Vec<u8>>>,
|
||||
}
|
||||
|
||||
impl Write for Bilateral {
|
||||
fn write(&mut self, src: &[u8]) -> io::Result<usize> {
|
||||
match self.calls.pop_front() {
|
||||
Some(Ok(data)) => {
|
||||
assert!(src.len() >= data.len());
|
||||
assert_eq!(&data[..], &src[..data.len()]);
|
||||
Ok(data.len())
|
||||
}
|
||||
Some(Err(e)) => Err(e),
|
||||
None => panic!("unexpected write; {:?}", src),
|
||||
}
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl AsyncWrite for Bilateral {
|
||||
fn poll_write(
|
||||
self: Pin<&mut Self>,
|
||||
_cx: &mut Context<'_>,
|
||||
buf: &[u8],
|
||||
) -> Poll<Result<usize, io::Error>> {
|
||||
match Pin::get_mut(self).write(buf) {
|
||||
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => Pending,
|
||||
other => Ready(other),
|
||||
}
|
||||
}
|
||||
fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
|
||||
match Pin::get_mut(self).flush() {
|
||||
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => Pending,
|
||||
other => Ready(other),
|
||||
}
|
||||
}
|
||||
fn poll_shutdown(
|
||||
self: Pin<&mut Self>,
|
||||
_cx: &mut Context<'_>,
|
||||
) -> Poll<Result<(), io::Error>> {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
impl AsyncRead for Bilateral {
|
||||
fn poll_read(
|
||||
mut self: Pin<&mut Self>,
|
||||
_: &mut Context<'_>,
|
||||
buf: &mut ReadBuf<'_>,
|
||||
) -> Poll<Result<(), std::io::Error>> {
|
||||
use io::ErrorKind::WouldBlock;
|
||||
|
||||
match self.calls.pop_front() {
|
||||
Some(Ok(data)) => {
|
||||
debug_assert!(buf.remaining() >= data.len());
|
||||
buf.put_slice(&data);
|
||||
Ready(Ok(()))
|
||||
}
|
||||
Some(Err(ref e)) if e.kind() == WouldBlock => Pending,
|
||||
Some(Err(e)) => Ready(Err(e)),
|
||||
None => Ready(Ok(())),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct U32;
|
||||
|
||||
impl Encoder<u32> for U32 {
|
||||
type Error = io::Error;
|
||||
|
||||
fn encode(&mut self, item: u32, dst: &mut BytesMut) -> io::Result<()> {
|
||||
// Reserve space
|
||||
dst.reserve(4);
|
||||
dst.put_u32(item);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Decoder for U32 {
|
||||
type Item = u32;
|
||||
type Error = io::Error;
|
||||
|
||||
fn decode(&mut self, buf: &mut BytesMut) -> io::Result<Option<u32>> {
|
||||
if buf.len() < 4 {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let n = buf.split_to(4).get_u32();
|
||||
Ok(Some(n))
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_write_hits_highwater_mark() {
|
||||
// see here for what this test is based on:
|
||||
// https://github.com/tokio-rs/tokio/blob/75c07770bfbfea4e5fd914af819c741ed9c3fc36/tokio-util/tests/framed_write.rs#L69
|
||||
|
||||
const ITER: usize = 2 * 1024;
|
||||
|
||||
let mut bi = bilateral! {
|
||||
Err(io::Error::new(io::ErrorKind::WouldBlock, "not ready")),
|
||||
Ok(b"".to_vec()),
|
||||
};
|
||||
|
||||
for i in 0..=ITER {
|
||||
let mut b = BytesMut::with_capacity(4);
|
||||
b.put_u32(i as u32);
|
||||
|
||||
// Append to the end
|
||||
match bi.calls.back_mut().unwrap() {
|
||||
Ok(ref mut data) => {
|
||||
// Write in 2kb chunks
|
||||
if data.len() < ITER {
|
||||
data.extend_from_slice(&b[..]);
|
||||
continue;
|
||||
} // else fall through and create a new buffer
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
|
||||
// Push a new new chunk
|
||||
bi.calls.push_back(Ok(b[..].to_vec()));
|
||||
}
|
||||
|
||||
assert_eq!(bi.calls.len(), 6);
|
||||
let mut framed = Framed::new(bi, U32);
|
||||
// Send 8KB. This fills up FramedWrite2 buffer
|
||||
let mut task = task::spawn(());
|
||||
task.enter(|cx, _| {
|
||||
// Send 8KB. This fills up Framed buffer
|
||||
for i in 0..ITER {
|
||||
{
|
||||
#[allow(unused_mut)]
|
||||
let mut framed = Pin::new(&mut framed);
|
||||
assert!(assert_ready!(framed.poll_ready(cx)).is_ok());
|
||||
}
|
||||
|
||||
#[allow(unused_mut)]
|
||||
let mut framed = Pin::new(&mut framed);
|
||||
// write the buffer
|
||||
assert!(framed.start_send(i as u32).is_ok());
|
||||
}
|
||||
|
||||
{
|
||||
#[allow(unused_mut)]
|
||||
let mut framed = Pin::new(&mut framed);
|
||||
|
||||
// Now we poll_ready which forces a flush. The bilateral pops the front message
|
||||
// and decides to block.
|
||||
assert!(framed.poll_ready(cx).is_pending());
|
||||
}
|
||||
|
||||
{
|
||||
#[allow(unused_mut)]
|
||||
let mut framed = Pin::new(&mut framed);
|
||||
// We poll again, forcing another flush, which this time succeeds
|
||||
// The whole 8KB buffer is flushed
|
||||
assert!(assert_ready!(framed.poll_ready(cx)).is_ok());
|
||||
}
|
||||
|
||||
{
|
||||
#[allow(unused_mut)]
|
||||
let mut framed = Pin::new(&mut framed);
|
||||
// Send more data. This matches the final message expected by the bilateral
|
||||
assert!(framed.start_send(ITER as u32).is_ok());
|
||||
}
|
||||
|
||||
{
|
||||
#[allow(unused_mut)]
|
||||
let mut framed = Pin::new(&mut framed);
|
||||
// Flush the rest of the buffer
|
||||
assert!(assert_ready!(framed.poll_flush(cx)).is_ok());
|
||||
}
|
||||
|
||||
// Ensure the mock is empty
|
||||
assert_eq!(0, Pin::new(&framed).get_ref().io_ref().calls.len());
|
||||
});
|
||||
}
|
@@ -3,6 +3,25 @@
|
||||
## Unreleased - 2021-xx-xx
|
||||
|
||||
|
||||
## 0.2.3 - 2021-10-19
|
||||
* Fix test macro in presence of other imports named "test". [#399]
|
||||
|
||||
[#399]: https://github.com/actix/actix-net/pull/399
|
||||
|
||||
|
||||
## 0.2.2 - 2021-10-14
|
||||
* Improve error recovery potential when macro input is invalid. [#391]
|
||||
* Allow custom `System`s on test macro. [#391]
|
||||
|
||||
[#391]: https://github.com/actix/actix-net/pull/391
|
||||
|
||||
|
||||
## 0.2.1 - 2021-02-02
|
||||
* Add optional argument `system` to `main` macro which can be used to specify the path to `actix_rt::System` (useful for re-exports). [#363]
|
||||
|
||||
[#363]: https://github.com/actix/actix-net/pull/363
|
||||
|
||||
|
||||
## 0.2.0 - 2021-02-02
|
||||
* Update to latest `actix_rt::System::new` signature. [#261]
|
||||
|
||||
|
@@ -1,10 +1,13 @@
|
||||
[package]
|
||||
name = "actix-macros"
|
||||
version = "0.2.0"
|
||||
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
|
||||
version = "0.2.3"
|
||||
authors = [
|
||||
"Nikolay Kim <fafhrd91@gmail.com>",
|
||||
"Ibraheem Ahmed <ibrah1440@gmail.com>",
|
||||
"Rob Ede <robjtede@icloud.com>",
|
||||
]
|
||||
description = "Macros for Actix system and runtime"
|
||||
repository = "https://github.com/actix/actix-net.git"
|
||||
documentation = "https://docs.rs/actix-macros"
|
||||
categories = ["network-programming", "asynchronous"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
edition = "2018"
|
||||
@@ -20,4 +23,5 @@ syn = { version = "^1", features = ["full"] }
|
||||
actix-rt = "2.0.0"
|
||||
|
||||
futures-util = { version = "0.3.7", default-features = false }
|
||||
rustversion = "1"
|
||||
trybuild = "1"
|
||||
|
@@ -27,8 +27,15 @@ use quote::quote;
|
||||
#[allow(clippy::needless_doctest_main)]
|
||||
#[proc_macro_attribute]
|
||||
#[cfg(not(test))] // Work around for rust-lang/rust#62127
|
||||
pub fn main(_: TokenStream, item: TokenStream) -> TokenStream {
|
||||
let mut input = syn::parse_macro_input!(item as syn::ItemFn);
|
||||
pub fn main(args: TokenStream, item: TokenStream) -> TokenStream {
|
||||
let mut input = match syn::parse::<syn::ItemFn>(item.clone()) {
|
||||
Ok(input) => input,
|
||||
// on parse err, make IDEs happy; see fn docs
|
||||
Err(err) => return input_and_compile_error(item, err),
|
||||
};
|
||||
|
||||
let args = syn::parse_macro_input!(args as syn::AttributeArgs);
|
||||
|
||||
let attrs = &input.attrs;
|
||||
let vis = &input.vis;
|
||||
let sig = &mut input.sig;
|
||||
@@ -43,13 +50,47 @@ pub fn main(_: TokenStream, item: TokenStream) -> TokenStream {
|
||||
.into();
|
||||
}
|
||||
|
||||
let mut system = syn::parse_str::<syn::Path>("::actix_rt::System").unwrap();
|
||||
|
||||
for arg in &args {
|
||||
match arg {
|
||||
syn::NestedMeta::Meta(syn::Meta::NameValue(syn::MetaNameValue {
|
||||
lit: syn::Lit::Str(lit),
|
||||
path,
|
||||
..
|
||||
})) => match path
|
||||
.get_ident()
|
||||
.map(|i| i.to_string().to_lowercase())
|
||||
.as_deref()
|
||||
{
|
||||
Some("system") => match lit.parse() {
|
||||
Ok(path) => system = path,
|
||||
Err(_) => {
|
||||
return syn::Error::new_spanned(lit, "Expected path")
|
||||
.to_compile_error()
|
||||
.into();
|
||||
}
|
||||
},
|
||||
_ => {
|
||||
return syn::Error::new_spanned(arg, "Unknown attribute specified")
|
||||
.to_compile_error()
|
||||
.into();
|
||||
}
|
||||
},
|
||||
_ => {
|
||||
return syn::Error::new_spanned(arg, "Unknown attribute specified")
|
||||
.to_compile_error()
|
||||
.into();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sig.asyncness = None;
|
||||
|
||||
(quote! {
|
||||
#(#attrs)*
|
||||
#vis #sig {
|
||||
actix_rt::System::new()
|
||||
.block_on(async move { #body })
|
||||
<#system>::new().block_on(async move { #body })
|
||||
}
|
||||
})
|
||||
.into()
|
||||
@@ -65,8 +106,15 @@ pub fn main(_: TokenStream, item: TokenStream) -> TokenStream {
|
||||
/// }
|
||||
/// ```
|
||||
#[proc_macro_attribute]
|
||||
pub fn test(_: TokenStream, item: TokenStream) -> TokenStream {
|
||||
let mut input = syn::parse_macro_input!(item as syn::ItemFn);
|
||||
pub fn test(args: TokenStream, item: TokenStream) -> TokenStream {
|
||||
let mut input = match syn::parse::<syn::ItemFn>(item.clone()) {
|
||||
Ok(input) => input,
|
||||
// on parse err, make IDEs happy; see fn docs
|
||||
Err(err) => return input_and_compile_error(item, err),
|
||||
};
|
||||
|
||||
let args = syn::parse_macro_input!(args as syn::AttributeArgs);
|
||||
|
||||
let attrs = &input.attrs;
|
||||
let vis = &input.vis;
|
||||
let sig = &mut input.sig;
|
||||
@@ -91,18 +139,64 @@ pub fn test(_: TokenStream, item: TokenStream) -> TokenStream {
|
||||
sig.asyncness = None;
|
||||
|
||||
let missing_test_attr = if has_test_attr {
|
||||
quote!()
|
||||
quote! {}
|
||||
} else {
|
||||
quote!(#[test])
|
||||
quote! { #[::core::prelude::v1::test] }
|
||||
};
|
||||
|
||||
let mut system = syn::parse_str::<syn::Path>("::actix_rt::System").unwrap();
|
||||
|
||||
for arg in &args {
|
||||
match arg {
|
||||
syn::NestedMeta::Meta(syn::Meta::NameValue(syn::MetaNameValue {
|
||||
lit: syn::Lit::Str(lit),
|
||||
path,
|
||||
..
|
||||
})) => match path
|
||||
.get_ident()
|
||||
.map(|i| i.to_string().to_lowercase())
|
||||
.as_deref()
|
||||
{
|
||||
Some("system") => match lit.parse() {
|
||||
Ok(path) => system = path,
|
||||
Err(_) => {
|
||||
return syn::Error::new_spanned(lit, "Expected path")
|
||||
.to_compile_error()
|
||||
.into();
|
||||
}
|
||||
},
|
||||
_ => {
|
||||
return syn::Error::new_spanned(arg, "Unknown attribute specified")
|
||||
.to_compile_error()
|
||||
.into();
|
||||
}
|
||||
},
|
||||
_ => {
|
||||
return syn::Error::new_spanned(arg, "Unknown attribute specified")
|
||||
.to_compile_error()
|
||||
.into();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
(quote! {
|
||||
#missing_test_attr
|
||||
#(#attrs)*
|
||||
#vis #sig {
|
||||
actix_rt::System::new()
|
||||
.block_on(async { #body })
|
||||
<#system>::new().block_on(async { #body })
|
||||
}
|
||||
})
|
||||
.into()
|
||||
}
|
||||
|
||||
/// Converts the error to a token stream and appends it to the original input.
|
||||
///
|
||||
/// Returning the original input in addition to the error is good for IDEs which can gracefully
|
||||
/// recover and show more precise errors within the macro body.
|
||||
///
|
||||
/// See <https://github.com/rust-analyzer/rust-analyzer/issues/10468> for more info.
|
||||
fn input_and_compile_error(mut item: TokenStream, err: syn::Error) -> TokenStream {
|
||||
let compile_err = TokenStream::from(err.to_compile_error());
|
||||
item.extend(compile_err);
|
||||
item
|
||||
}
|
||||
|
@@ -1,11 +1,18 @@
|
||||
#[rustversion::stable(1.46)] // MSRV
|
||||
#[test]
|
||||
fn compile_macros() {
|
||||
let t = trybuild::TestCases::new();
|
||||
t.pass("tests/trybuild/main-01-basic.rs");
|
||||
t.compile_fail("tests/trybuild/main-02-only-async.rs");
|
||||
t.pass("tests/trybuild/main-03-fn-params.rs");
|
||||
t.pass("tests/trybuild/main-04-system-path.rs");
|
||||
t.compile_fail("tests/trybuild/main-05-system-expect-path.rs");
|
||||
t.compile_fail("tests/trybuild/main-06-unknown-attr.rs");
|
||||
|
||||
t.pass("tests/trybuild/test-01-basic.rs");
|
||||
t.pass("tests/trybuild/test-02-keep-attrs.rs");
|
||||
t.compile_fail("tests/trybuild/test-03-only-async.rs");
|
||||
t.pass("tests/trybuild/test-04-system-path.rs");
|
||||
t.compile_fail("tests/trybuild/test-05-system-expect-path.rs");
|
||||
t.compile_fail("tests/trybuild/test-06-unknown-attr.rs");
|
||||
}
|
||||
|
8
actix-macros/tests/trybuild/main-04-system-path.rs
Normal file
8
actix-macros/tests/trybuild/main-04-system-path.rs
Normal file
@@ -0,0 +1,8 @@
|
||||
mod system {
|
||||
pub use actix_rt::System as MySystem;
|
||||
}
|
||||
|
||||
#[actix_rt::main(system = "system::MySystem")]
|
||||
async fn main() {
|
||||
futures_util::future::ready(()).await
|
||||
}
|
@@ -0,0 +1,4 @@
|
||||
#[actix_rt::main(system = "!@#*&")]
|
||||
async fn main2() {}
|
||||
|
||||
fn main() {}
|
@@ -0,0 +1,5 @@
|
||||
error: Expected path
|
||||
--> $DIR/main-05-system-expect-path.rs:1:27
|
||||
|
|
||||
1 | #[actix_rt::main(system = "!@#*&")]
|
||||
| ^^^^^^^
|
7
actix-macros/tests/trybuild/main-06-unknown-attr.rs
Normal file
7
actix-macros/tests/trybuild/main-06-unknown-attr.rs
Normal file
@@ -0,0 +1,7 @@
|
||||
#[actix_rt::main(foo = "bar")]
|
||||
async fn async_main() {}
|
||||
|
||||
#[actix_rt::main(bar::baz)]
|
||||
async fn async_main2() {}
|
||||
|
||||
fn main() {}
|
11
actix-macros/tests/trybuild/main-06-unknown-attr.stderr
Normal file
11
actix-macros/tests/trybuild/main-06-unknown-attr.stderr
Normal file
@@ -0,0 +1,11 @@
|
||||
error: Unknown attribute specified
|
||||
--> $DIR/main-06-unknown-attr.rs:1:18
|
||||
|
|
||||
1 | #[actix_rt::main(foo = "bar")]
|
||||
| ^^^^^^^^^^^
|
||||
|
||||
error: Unknown attribute specified
|
||||
--> $DIR/main-06-unknown-attr.rs:4:18
|
||||
|
|
||||
4 | #[actix_rt::main(bar::baz)]
|
||||
| ^^^^^^^^
|
10
actix-macros/tests/trybuild/test-04-system-path.rs
Normal file
10
actix-macros/tests/trybuild/test-04-system-path.rs
Normal file
@@ -0,0 +1,10 @@
|
||||
mod system {
|
||||
pub use actix_rt::System as MySystem;
|
||||
}
|
||||
|
||||
#[actix_rt::test(system = "system::MySystem")]
|
||||
async fn my_test() {
|
||||
futures_util::future::ready(()).await
|
||||
}
|
||||
|
||||
fn main() {}
|
@@ -0,0 +1,4 @@
|
||||
#[actix_rt::test(system = "!@#*&")]
|
||||
async fn my_test() {}
|
||||
|
||||
fn main() {}
|
@@ -0,0 +1,5 @@
|
||||
error: Expected path
|
||||
--> $DIR/test-05-system-expect-path.rs:1:27
|
||||
|
|
||||
1 | #[actix_rt::test(system = "!@#*&")]
|
||||
| ^^^^^^^
|
7
actix-macros/tests/trybuild/test-06-unknown-attr.rs
Normal file
7
actix-macros/tests/trybuild/test-06-unknown-attr.rs
Normal file
@@ -0,0 +1,7 @@
|
||||
#[actix_rt::test(foo = "bar")]
|
||||
async fn my_test_1() {}
|
||||
|
||||
#[actix_rt::test(bar::baz)]
|
||||
async fn my_test_2() {}
|
||||
|
||||
fn main() {}
|
11
actix-macros/tests/trybuild/test-06-unknown-attr.stderr
Normal file
11
actix-macros/tests/trybuild/test-06-unknown-attr.stderr
Normal file
@@ -0,0 +1,11 @@
|
||||
error: Unknown attribute specified
|
||||
--> $DIR/test-06-unknown-attr.rs:1:18
|
||||
|
|
||||
1 | #[actix_rt::test(foo = "bar")]
|
||||
| ^^^^^^^^^^^
|
||||
|
||||
error: Unknown attribute specified
|
||||
--> $DIR/test-06-unknown-attr.rs:4:18
|
||||
|
|
||||
4 | #[actix_rt::test(bar::baz)]
|
||||
| ^^^^^^^^
|
@@ -1,67 +0,0 @@
|
||||
# Changes
|
||||
|
||||
## Unreleased - 2021-xx-xx
|
||||
|
||||
|
||||
## 0.2.7 - 2021-02-06
|
||||
* Add `Router::recognize_checked` [#247]
|
||||
|
||||
[#247]: https://github.com/actix/actix-net/pull/247
|
||||
|
||||
|
||||
## 0.2.6 - 2021-01-09
|
||||
* Use `bytestring` version range compatible with Bytes v1.0. [#246]
|
||||
|
||||
[#246]: https://github.com/actix/actix-net/pull/246
|
||||
|
||||
|
||||
## 0.2.5 - 2020-09-20
|
||||
* Fix `from_hex()` method
|
||||
|
||||
|
||||
## 0.2.4 - 2019-12-31
|
||||
* Add `ResourceDef::resource_path_named()` path generation method
|
||||
|
||||
|
||||
## 0.2.3 - 2019-12-25
|
||||
* Add impl `IntoPattern` for `&String`
|
||||
|
||||
|
||||
## 0.2.2 - 2019-12-25
|
||||
* Use `IntoPattern` for `RouterBuilder::path()`
|
||||
|
||||
|
||||
## 0.2.1 - 2019-12-25
|
||||
* Add `IntoPattern` trait
|
||||
* Add multi-pattern resources
|
||||
|
||||
|
||||
## 0.2.0 - 2019-12-07
|
||||
* Update http to 0.2
|
||||
* Update regex to 1.3
|
||||
* Use bytestring instead of string
|
||||
|
||||
|
||||
## 0.1.5 - 2019-05-15
|
||||
* Remove debug prints
|
||||
|
||||
|
||||
## 0.1.4 - 2019-05-15
|
||||
* Fix checked resource match
|
||||
|
||||
|
||||
## 0.1.3 - 2019-04-22
|
||||
* Added support for `remainder match` (i.e "/path/{tail}*")
|
||||
|
||||
|
||||
## 0.1.2 - 2019-04-07
|
||||
* Export `Quoter` type
|
||||
* Allow to reset `Path` instance
|
||||
|
||||
|
||||
## 0.1.1 - 2019-04-03
|
||||
* Get dynamic segment by name instead of iterator.
|
||||
|
||||
|
||||
## 0.1.0 - 2019-03-09
|
||||
* Initial release
|
@@ -1,29 +0,0 @@
|
||||
[package]
|
||||
name = "actix-router"
|
||||
version = "0.2.7"
|
||||
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
|
||||
description = "Resource path matching library"
|
||||
keywords = ["actix", "router", "routing"]
|
||||
homepage = "https://actix.rs"
|
||||
repository = "https://github.com/actix/actix-net.git"
|
||||
documentation = "https://docs.rs/actix-router"
|
||||
license = "MIT OR Apache-2.0"
|
||||
edition = "2018"
|
||||
|
||||
[lib]
|
||||
name = "actix_router"
|
||||
path = "src/lib.rs"
|
||||
|
||||
[features]
|
||||
default = ["http"]
|
||||
|
||||
[dependencies]
|
||||
regex = "1.3.1"
|
||||
serde = "1.0.104"
|
||||
bytestring = ">=0.1.5, <2"
|
||||
log = "0.4.8"
|
||||
http = { version = "0.2.2", optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
http = "0.2.2"
|
||||
serde_derive = "1.0"
|
@@ -1 +0,0 @@
|
||||
../LICENSE-APACHE
|
@@ -1 +0,0 @@
|
||||
../LICENSE-MIT
|
@@ -1,717 +0,0 @@
|
||||
use serde::de::{self, Deserializer, Error as DeError, Visitor};
|
||||
use serde::forward_to_deserialize_any;
|
||||
|
||||
use crate::path::{Path, PathIter};
|
||||
use crate::ResourcePath;
|
||||
|
||||
macro_rules! unsupported_type {
|
||||
($trait_fn:ident, $name:expr) => {
|
||||
fn $trait_fn<V>(self, _: V) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
Err(de::value::Error::custom(concat!(
|
||||
"unsupported type: ",
|
||||
$name
|
||||
)))
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! parse_single_value {
|
||||
($trait_fn:ident, $visit_fn:ident, $tp:tt) => {
|
||||
fn $trait_fn<V>(self, visitor: V) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
if self.path.len() != 1 {
|
||||
Err(de::value::Error::custom(
|
||||
format!("wrong number of parameters: {} expected 1", self.path.len())
|
||||
.as_str(),
|
||||
))
|
||||
} else {
|
||||
let v = self.path[0].parse().map_err(|_| {
|
||||
de::value::Error::custom(format!(
|
||||
"can not parse {:?} to a {}",
|
||||
&self.path[0], $tp
|
||||
))
|
||||
})?;
|
||||
visitor.$visit_fn(v)
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub struct PathDeserializer<'de, T: ResourcePath> {
|
||||
path: &'de Path<T>,
|
||||
}
|
||||
|
||||
impl<'de, T: ResourcePath + 'de> PathDeserializer<'de, T> {
|
||||
pub fn new(path: &'de Path<T>) -> Self {
|
||||
PathDeserializer { path }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de, T: ResourcePath + 'de> Deserializer<'de> for PathDeserializer<'de, T> {
|
||||
type Error = de::value::Error;
|
||||
|
||||
fn deserialize_map<V>(self, visitor: V) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
visitor.visit_map(ParamsDeserializer {
|
||||
params: self.path.iter(),
|
||||
current: None,
|
||||
})
|
||||
}
|
||||
|
||||
fn deserialize_struct<V>(
|
||||
self,
|
||||
_: &'static str,
|
||||
_: &'static [&'static str],
|
||||
visitor: V,
|
||||
) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
self.deserialize_map(visitor)
|
||||
}
|
||||
|
||||
fn deserialize_unit<V>(self, visitor: V) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
visitor.visit_unit()
|
||||
}
|
||||
|
||||
fn deserialize_unit_struct<V>(
|
||||
self,
|
||||
_: &'static str,
|
||||
visitor: V,
|
||||
) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
self.deserialize_unit(visitor)
|
||||
}
|
||||
|
||||
fn deserialize_newtype_struct<V>(
|
||||
self,
|
||||
_: &'static str,
|
||||
visitor: V,
|
||||
) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
visitor.visit_newtype_struct(self)
|
||||
}
|
||||
|
||||
fn deserialize_tuple<V>(self, len: usize, visitor: V) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
if self.path.len() < len {
|
||||
Err(de::value::Error::custom(
|
||||
format!(
|
||||
"wrong number of parameters: {} expected {}",
|
||||
self.path.len(),
|
||||
len
|
||||
)
|
||||
.as_str(),
|
||||
))
|
||||
} else {
|
||||
visitor.visit_seq(ParamsSeq {
|
||||
params: self.path.iter(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn deserialize_tuple_struct<V>(
|
||||
self,
|
||||
_: &'static str,
|
||||
len: usize,
|
||||
visitor: V,
|
||||
) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
if self.path.len() < len {
|
||||
Err(de::value::Error::custom(
|
||||
format!(
|
||||
"wrong number of parameters: {} expected {}",
|
||||
self.path.len(),
|
||||
len
|
||||
)
|
||||
.as_str(),
|
||||
))
|
||||
} else {
|
||||
visitor.visit_seq(ParamsSeq {
|
||||
params: self.path.iter(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn deserialize_enum<V>(
|
||||
self,
|
||||
_: &'static str,
|
||||
_: &'static [&'static str],
|
||||
visitor: V,
|
||||
) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
if self.path.is_empty() {
|
||||
Err(de::value::Error::custom("expected at least one parameters"))
|
||||
} else {
|
||||
visitor.visit_enum(ValueEnum {
|
||||
value: &self.path[0],
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn deserialize_str<V>(self, visitor: V) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
if self.path.len() != 1 {
|
||||
Err(de::value::Error::custom(
|
||||
format!("wrong number of parameters: {} expected 1", self.path.len()).as_str(),
|
||||
))
|
||||
} else {
|
||||
visitor.visit_str(&self.path[0])
|
||||
}
|
||||
}
|
||||
|
||||
fn deserialize_seq<V>(self, visitor: V) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
visitor.visit_seq(ParamsSeq {
|
||||
params: self.path.iter(),
|
||||
})
|
||||
}
|
||||
|
||||
unsupported_type!(deserialize_any, "'any'");
|
||||
unsupported_type!(deserialize_bytes, "bytes");
|
||||
unsupported_type!(deserialize_option, "Option<T>");
|
||||
unsupported_type!(deserialize_identifier, "identifier");
|
||||
unsupported_type!(deserialize_ignored_any, "ignored_any");
|
||||
|
||||
parse_single_value!(deserialize_bool, visit_bool, "bool");
|
||||
parse_single_value!(deserialize_i8, visit_i8, "i8");
|
||||
parse_single_value!(deserialize_i16, visit_i16, "i16");
|
||||
parse_single_value!(deserialize_i32, visit_i32, "i32");
|
||||
parse_single_value!(deserialize_i64, visit_i64, "i64");
|
||||
parse_single_value!(deserialize_u8, visit_u8, "u8");
|
||||
parse_single_value!(deserialize_u16, visit_u16, "u16");
|
||||
parse_single_value!(deserialize_u32, visit_u32, "u32");
|
||||
parse_single_value!(deserialize_u64, visit_u64, "u64");
|
||||
parse_single_value!(deserialize_f32, visit_f32, "f32");
|
||||
parse_single_value!(deserialize_f64, visit_f64, "f64");
|
||||
parse_single_value!(deserialize_string, visit_string, "String");
|
||||
parse_single_value!(deserialize_byte_buf, visit_string, "String");
|
||||
parse_single_value!(deserialize_char, visit_char, "char");
|
||||
}
|
||||
|
||||
struct ParamsDeserializer<'de, T: ResourcePath> {
|
||||
params: PathIter<'de, T>,
|
||||
current: Option<(&'de str, &'de str)>,
|
||||
}
|
||||
|
||||
impl<'de, T: ResourcePath> de::MapAccess<'de> for ParamsDeserializer<'de, T> {
|
||||
type Error = de::value::Error;
|
||||
|
||||
fn next_key_seed<K>(&mut self, seed: K) -> Result<Option<K::Value>, Self::Error>
|
||||
where
|
||||
K: de::DeserializeSeed<'de>,
|
||||
{
|
||||
self.current = self.params.next().map(|ref item| (item.0, item.1));
|
||||
match self.current {
|
||||
Some((key, _)) => Ok(Some(seed.deserialize(Key { key })?)),
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
fn next_value_seed<V>(&mut self, seed: V) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: de::DeserializeSeed<'de>,
|
||||
{
|
||||
if let Some((_, value)) = self.current.take() {
|
||||
seed.deserialize(Value { value })
|
||||
} else {
|
||||
Err(de::value::Error::custom("unexpected item"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct Key<'de> {
|
||||
key: &'de str,
|
||||
}
|
||||
|
||||
impl<'de> Deserializer<'de> for Key<'de> {
|
||||
type Error = de::value::Error;
|
||||
|
||||
fn deserialize_identifier<V>(self, visitor: V) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
visitor.visit_str(self.key)
|
||||
}
|
||||
|
||||
fn deserialize_any<V>(self, _visitor: V) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
Err(de::value::Error::custom("Unexpected"))
|
||||
}
|
||||
|
||||
forward_to_deserialize_any! {
|
||||
bool i8 i16 i32 i64 u8 u16 u32 u64 f32 f64 char str string bytes
|
||||
byte_buf option unit unit_struct newtype_struct seq tuple
|
||||
tuple_struct map struct enum ignored_any
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! parse_value {
|
||||
($trait_fn:ident, $visit_fn:ident, $tp:tt) => {
|
||||
fn $trait_fn<V>(self, visitor: V) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
let v = self.value.parse().map_err(|_| {
|
||||
de::value::Error::custom(format!("can not parse {:?} to a {}", self.value, $tp))
|
||||
})?;
|
||||
visitor.$visit_fn(v)
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
struct Value<'de> {
|
||||
value: &'de str,
|
||||
}
|
||||
|
||||
impl<'de> Deserializer<'de> for Value<'de> {
|
||||
type Error = de::value::Error;
|
||||
|
||||
parse_value!(deserialize_bool, visit_bool, "bool");
|
||||
parse_value!(deserialize_i8, visit_i8, "i8");
|
||||
parse_value!(deserialize_i16, visit_i16, "i16");
|
||||
parse_value!(deserialize_i32, visit_i32, "i16");
|
||||
parse_value!(deserialize_i64, visit_i64, "i64");
|
||||
parse_value!(deserialize_u8, visit_u8, "u8");
|
||||
parse_value!(deserialize_u16, visit_u16, "u16");
|
||||
parse_value!(deserialize_u32, visit_u32, "u32");
|
||||
parse_value!(deserialize_u64, visit_u64, "u64");
|
||||
parse_value!(deserialize_f32, visit_f32, "f32");
|
||||
parse_value!(deserialize_f64, visit_f64, "f64");
|
||||
parse_value!(deserialize_string, visit_string, "String");
|
||||
parse_value!(deserialize_byte_buf, visit_string, "String");
|
||||
parse_value!(deserialize_char, visit_char, "char");
|
||||
|
||||
fn deserialize_ignored_any<V>(self, visitor: V) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
visitor.visit_unit()
|
||||
}
|
||||
|
||||
fn deserialize_unit<V>(self, visitor: V) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
visitor.visit_unit()
|
||||
}
|
||||
|
||||
fn deserialize_unit_struct<V>(
|
||||
self,
|
||||
_: &'static str,
|
||||
visitor: V,
|
||||
) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
visitor.visit_unit()
|
||||
}
|
||||
|
||||
fn deserialize_bytes<V>(self, visitor: V) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
visitor.visit_borrowed_bytes(self.value.as_bytes())
|
||||
}
|
||||
|
||||
fn deserialize_str<V>(self, visitor: V) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
visitor.visit_borrowed_str(self.value)
|
||||
}
|
||||
|
||||
fn deserialize_option<V>(self, visitor: V) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
visitor.visit_some(self)
|
||||
}
|
||||
|
||||
fn deserialize_enum<V>(
|
||||
self,
|
||||
_: &'static str,
|
||||
_: &'static [&'static str],
|
||||
visitor: V,
|
||||
) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
visitor.visit_enum(ValueEnum { value: self.value })
|
||||
}
|
||||
|
||||
fn deserialize_newtype_struct<V>(
|
||||
self,
|
||||
_: &'static str,
|
||||
visitor: V,
|
||||
) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
visitor.visit_newtype_struct(self)
|
||||
}
|
||||
|
||||
fn deserialize_tuple<V>(self, _: usize, _: V) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
Err(de::value::Error::custom("unsupported type: tuple"))
|
||||
}
|
||||
|
||||
fn deserialize_struct<V>(
|
||||
self,
|
||||
_: &'static str,
|
||||
_: &'static [&'static str],
|
||||
_: V,
|
||||
) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
Err(de::value::Error::custom("unsupported type: struct"))
|
||||
}
|
||||
|
||||
fn deserialize_tuple_struct<V>(
|
||||
self,
|
||||
_: &'static str,
|
||||
_: usize,
|
||||
_: V,
|
||||
) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
Err(de::value::Error::custom("unsupported type: tuple struct"))
|
||||
}
|
||||
|
||||
unsupported_type!(deserialize_any, "any");
|
||||
unsupported_type!(deserialize_seq, "seq");
|
||||
unsupported_type!(deserialize_map, "map");
|
||||
unsupported_type!(deserialize_identifier, "identifier");
|
||||
}
|
||||
|
||||
struct ParamsSeq<'de, T: ResourcePath> {
|
||||
params: PathIter<'de, T>,
|
||||
}
|
||||
|
||||
impl<'de, T: ResourcePath> de::SeqAccess<'de> for ParamsSeq<'de, T> {
|
||||
type Error = de::value::Error;
|
||||
|
||||
fn next_element_seed<U>(&mut self, seed: U) -> Result<Option<U::Value>, Self::Error>
|
||||
where
|
||||
U: de::DeserializeSeed<'de>,
|
||||
{
|
||||
match self.params.next() {
|
||||
Some(item) => Ok(Some(seed.deserialize(Value { value: item.1 })?)),
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct ValueEnum<'de> {
|
||||
value: &'de str,
|
||||
}
|
||||
|
||||
impl<'de> de::EnumAccess<'de> for ValueEnum<'de> {
|
||||
type Error = de::value::Error;
|
||||
type Variant = UnitVariant;
|
||||
|
||||
fn variant_seed<V>(self, seed: V) -> Result<(V::Value, Self::Variant), Self::Error>
|
||||
where
|
||||
V: de::DeserializeSeed<'de>,
|
||||
{
|
||||
Ok((seed.deserialize(Key { key: self.value })?, UnitVariant))
|
||||
}
|
||||
}
|
||||
|
||||
struct UnitVariant;
|
||||
|
||||
impl<'de> de::VariantAccess<'de> for UnitVariant {
|
||||
type Error = de::value::Error;
|
||||
|
||||
fn unit_variant(self) -> Result<(), Self::Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn newtype_variant_seed<T>(self, _seed: T) -> Result<T::Value, Self::Error>
|
||||
where
|
||||
T: de::DeserializeSeed<'de>,
|
||||
{
|
||||
Err(de::value::Error::custom("not supported"))
|
||||
}
|
||||
|
||||
fn tuple_variant<V>(self, _len: usize, _visitor: V) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
Err(de::value::Error::custom("not supported"))
|
||||
}
|
||||
|
||||
fn struct_variant<V>(
|
||||
self,
|
||||
_: &'static [&'static str],
|
||||
_: V,
|
||||
) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
Err(de::value::Error::custom("not supported"))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use serde::de;
|
||||
use serde_derive::Deserialize;
|
||||
|
||||
use super::*;
|
||||
use crate::path::Path;
|
||||
use crate::router::Router;
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct MyStruct {
|
||||
key: String,
|
||||
value: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct Id {
|
||||
_id: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct Test1(String, u32);
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct Test2 {
|
||||
key: String,
|
||||
value: u32,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
enum TestEnum {
|
||||
Val1,
|
||||
Val2,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct Test3 {
|
||||
val: TestEnum,
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_request_extract() {
|
||||
let mut router = Router::<()>::build();
|
||||
router.path("/{key}/{value}/", ());
|
||||
let router = router.finish();
|
||||
|
||||
let mut path = Path::new("/name/user1/");
|
||||
assert!(router.recognize(&mut path).is_some());
|
||||
|
||||
let s: MyStruct = de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
|
||||
assert_eq!(s.key, "name");
|
||||
assert_eq!(s.value, "user1");
|
||||
|
||||
let s: (String, String) =
|
||||
de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
|
||||
assert_eq!(s.0, "name");
|
||||
assert_eq!(s.1, "user1");
|
||||
|
||||
let mut router = Router::<()>::build();
|
||||
router.path("/{key}/{value}/", ());
|
||||
let router = router.finish();
|
||||
|
||||
let mut path = Path::new("/name/32/");
|
||||
assert!(router.recognize(&mut path).is_some());
|
||||
|
||||
let s: Test1 = de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
|
||||
assert_eq!(s.0, "name");
|
||||
assert_eq!(s.1, 32);
|
||||
|
||||
let s: Test2 = de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
|
||||
assert_eq!(s.key, "name");
|
||||
assert_eq!(s.value, 32);
|
||||
|
||||
let s: (String, u8) =
|
||||
de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
|
||||
assert_eq!(s.0, "name");
|
||||
assert_eq!(s.1, 32);
|
||||
|
||||
let res: Vec<String> =
|
||||
de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
|
||||
assert_eq!(res[0], "name".to_owned());
|
||||
assert_eq!(res[1], "32".to_owned());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_path_single() {
|
||||
let mut router = Router::<()>::build();
|
||||
router.path("/{value}/", ());
|
||||
let router = router.finish();
|
||||
|
||||
let mut path = Path::new("/32/");
|
||||
assert!(router.recognize(&mut path).is_some());
|
||||
let i: i8 = de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
|
||||
assert_eq!(i, 32);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_enum() {
|
||||
let mut router = Router::<()>::build();
|
||||
router.path("/{val}/", ());
|
||||
let router = router.finish();
|
||||
|
||||
let mut path = Path::new("/val1/");
|
||||
assert!(router.recognize(&mut path).is_some());
|
||||
let i: TestEnum = de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
|
||||
assert_eq!(i, TestEnum::Val1);
|
||||
|
||||
let mut router = Router::<()>::build();
|
||||
router.path("/{val1}/{val2}/", ());
|
||||
let router = router.finish();
|
||||
|
||||
let mut path = Path::new("/val1/val2/");
|
||||
assert!(router.recognize(&mut path).is_some());
|
||||
let i: (TestEnum, TestEnum) =
|
||||
de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
|
||||
assert_eq!(i, (TestEnum::Val1, TestEnum::Val2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_enum_value() {
|
||||
let mut router = Router::<()>::build();
|
||||
router.path("/{val}/", ());
|
||||
let router = router.finish();
|
||||
|
||||
let mut path = Path::new("/val1/");
|
||||
assert!(router.recognize(&mut path).is_some());
|
||||
let i: Test3 = de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
|
||||
assert_eq!(i.val, TestEnum::Val1);
|
||||
|
||||
let mut path = Path::new("/val3/");
|
||||
assert!(router.recognize(&mut path).is_some());
|
||||
let i: Result<Test3, de::value::Error> =
|
||||
de::Deserialize::deserialize(PathDeserializer::new(&path));
|
||||
assert!(i.is_err());
|
||||
assert!(format!("{:?}", i).contains("unknown variant"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_errors() {
|
||||
let mut router = Router::<()>::build();
|
||||
router.path("/{value}/", ());
|
||||
let router = router.finish();
|
||||
|
||||
let mut path = Path::new("/name/");
|
||||
assert!(router.recognize(&mut path).is_some());
|
||||
|
||||
let s: Result<Test1, de::value::Error> =
|
||||
de::Deserialize::deserialize(PathDeserializer::new(&path));
|
||||
assert!(s.is_err());
|
||||
assert!(format!("{:?}", s).contains("wrong number of parameters"));
|
||||
|
||||
let s: Result<Test2, de::value::Error> =
|
||||
de::Deserialize::deserialize(PathDeserializer::new(&path));
|
||||
assert!(s.is_err());
|
||||
assert!(format!("{:?}", s).contains("can not parse"));
|
||||
|
||||
let s: Result<(String, String), de::value::Error> =
|
||||
de::Deserialize::deserialize(PathDeserializer::new(&path));
|
||||
assert!(s.is_err());
|
||||
assert!(format!("{:?}", s).contains("wrong number of parameters"));
|
||||
|
||||
let s: Result<u32, de::value::Error> =
|
||||
de::Deserialize::deserialize(PathDeserializer::new(&path));
|
||||
assert!(s.is_err());
|
||||
assert!(format!("{:?}", s).contains("can not parse"));
|
||||
}
|
||||
|
||||
// #[test]
|
||||
// fn test_extract_path_decode() {
|
||||
// let mut router = Router::<()>::default();
|
||||
// router.register_resource(Resource::new(ResourceDef::new("/{value}/")));
|
||||
|
||||
// macro_rules! test_single_value {
|
||||
// ($value:expr, $expected:expr) => {{
|
||||
// let req = TestRequest::with_uri($value).finish();
|
||||
// let info = router.recognize(&req, &(), 0);
|
||||
// let req = req.with_route_info(info);
|
||||
// assert_eq!(
|
||||
// *Path::<String>::from_request(&req, &PathConfig::default()).unwrap(),
|
||||
// $expected
|
||||
// );
|
||||
// }};
|
||||
// }
|
||||
|
||||
// test_single_value!("/%25/", "%");
|
||||
// test_single_value!("/%40%C2%A3%24%25%5E%26%2B%3D/", "@£$%^&+=");
|
||||
// test_single_value!("/%2B/", "+");
|
||||
// test_single_value!("/%252B/", "%2B");
|
||||
// test_single_value!("/%2F/", "/");
|
||||
// test_single_value!("/%252F/", "%2F");
|
||||
// test_single_value!(
|
||||
// "/http%3A%2F%2Flocalhost%3A80%2Ffoo/",
|
||||
// "http://localhost:80/foo"
|
||||
// );
|
||||
// test_single_value!("/%2Fvar%2Flog%2Fsyslog/", "/var/log/syslog");
|
||||
// test_single_value!(
|
||||
// "/http%3A%2F%2Flocalhost%3A80%2Ffile%2F%252Fvar%252Flog%252Fsyslog/",
|
||||
// "http://localhost:80/file/%2Fvar%2Flog%2Fsyslog"
|
||||
// );
|
||||
|
||||
// let req = TestRequest::with_uri("/%25/7/?id=test").finish();
|
||||
|
||||
// let mut router = Router::<()>::default();
|
||||
// router.register_resource(Resource::new(ResourceDef::new("/{key}/{value}/")));
|
||||
// let info = router.recognize(&req, &(), 0);
|
||||
// let req = req.with_route_info(info);
|
||||
|
||||
// let s = Path::<Test2>::from_request(&req, &PathConfig::default()).unwrap();
|
||||
// assert_eq!(s.key, "%");
|
||||
// assert_eq!(s.value, 7);
|
||||
|
||||
// let s = Path::<(String, String)>::from_request(&req, &PathConfig::default()).unwrap();
|
||||
// assert_eq!(s.0, "%");
|
||||
// assert_eq!(s.1, "7");
|
||||
// }
|
||||
|
||||
// #[test]
|
||||
// fn test_extract_path_no_decode() {
|
||||
// let mut router = Router::<()>::default();
|
||||
// router.register_resource(Resource::new(ResourceDef::new("/{value}/")));
|
||||
|
||||
// let req = TestRequest::with_uri("/%25/").finish();
|
||||
// let info = router.recognize(&req, &(), 0);
|
||||
// let req = req.with_route_info(info);
|
||||
// assert_eq!(
|
||||
// *Path::<String>::from_request(&req, &&PathConfig::default().disable_decoding())
|
||||
// .unwrap(),
|
||||
// "%25"
|
||||
// );
|
||||
// }
|
||||
}
|
@@ -1,152 +0,0 @@
|
||||
//! Resource path matching library.
|
||||
|
||||
#![deny(rust_2018_idioms, nonstandard_style)]
|
||||
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
|
||||
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
|
||||
|
||||
mod de;
|
||||
mod path;
|
||||
mod resource;
|
||||
mod router;
|
||||
|
||||
pub use self::de::PathDeserializer;
|
||||
pub use self::path::Path;
|
||||
pub use self::resource::ResourceDef;
|
||||
pub use self::router::{ResourceInfo, Router, RouterBuilder};
|
||||
|
||||
pub trait Resource<T: ResourcePath> {
|
||||
fn resource_path(&mut self) -> &mut Path<T>;
|
||||
}
|
||||
|
||||
pub trait ResourcePath {
|
||||
fn path(&self) -> &str;
|
||||
}
|
||||
|
||||
impl ResourcePath for String {
|
||||
fn path(&self) -> &str {
|
||||
self.as_str()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> ResourcePath for &'a str {
|
||||
fn path(&self) -> &str {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl ResourcePath for bytestring::ByteString {
|
||||
fn path(&self) -> &str {
|
||||
&*self
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper trait for type that could be converted to path pattern
|
||||
pub trait IntoPattern {
|
||||
fn is_single(&self) -> bool;
|
||||
|
||||
fn patterns(&self) -> Vec<String>;
|
||||
}
|
||||
|
||||
impl IntoPattern for String {
|
||||
fn is_single(&self) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn patterns(&self) -> Vec<String> {
|
||||
vec![self.clone()]
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> IntoPattern for &'a String {
|
||||
fn is_single(&self) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn patterns(&self) -> Vec<String> {
|
||||
vec![self.as_str().to_string()]
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> IntoPattern for &'a str {
|
||||
fn is_single(&self) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn patterns(&self) -> Vec<String> {
|
||||
vec![(*self).to_string()]
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: AsRef<str>> IntoPattern for Vec<T> {
|
||||
fn is_single(&self) -> bool {
|
||||
self.len() == 1
|
||||
}
|
||||
|
||||
fn patterns(&self) -> Vec<String> {
|
||||
self.iter().map(|v| v.as_ref().to_string()).collect()
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! array_patterns (($tp:ty, $num:tt) => {
|
||||
impl IntoPattern for [$tp; $num] {
|
||||
fn is_single(&self) -> bool {
|
||||
$num == 1
|
||||
}
|
||||
|
||||
fn patterns(&self) -> Vec<String> {
|
||||
self.iter().map(|v| v.to_string()).collect()
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
array_patterns!(&str, 1);
|
||||
array_patterns!(&str, 2);
|
||||
array_patterns!(&str, 3);
|
||||
array_patterns!(&str, 4);
|
||||
array_patterns!(&str, 5);
|
||||
array_patterns!(&str, 6);
|
||||
array_patterns!(&str, 7);
|
||||
array_patterns!(&str, 8);
|
||||
array_patterns!(&str, 9);
|
||||
array_patterns!(&str, 10);
|
||||
array_patterns!(&str, 11);
|
||||
array_patterns!(&str, 12);
|
||||
array_patterns!(&str, 13);
|
||||
array_patterns!(&str, 14);
|
||||
array_patterns!(&str, 15);
|
||||
array_patterns!(&str, 16);
|
||||
|
||||
array_patterns!(String, 1);
|
||||
array_patterns!(String, 2);
|
||||
array_patterns!(String, 3);
|
||||
array_patterns!(String, 4);
|
||||
array_patterns!(String, 5);
|
||||
array_patterns!(String, 6);
|
||||
array_patterns!(String, 7);
|
||||
array_patterns!(String, 8);
|
||||
array_patterns!(String, 9);
|
||||
array_patterns!(String, 10);
|
||||
array_patterns!(String, 11);
|
||||
array_patterns!(String, 12);
|
||||
array_patterns!(String, 13);
|
||||
array_patterns!(String, 14);
|
||||
array_patterns!(String, 15);
|
||||
array_patterns!(String, 16);
|
||||
|
||||
#[cfg(feature = "http")]
|
||||
mod url;
|
||||
|
||||
#[cfg(feature = "http")]
|
||||
pub use self::url::{Quoter, Url};
|
||||
|
||||
#[cfg(feature = "http")]
|
||||
mod http_support {
|
||||
use super::ResourcePath;
|
||||
use http::Uri;
|
||||
|
||||
impl ResourcePath for Uri {
|
||||
fn path(&self) -> &str {
|
||||
self.path()
|
||||
}
|
||||
}
|
||||
}
|
@@ -1,222 +0,0 @@
|
||||
use std::ops::Index;
|
||||
|
||||
use serde::de;
|
||||
|
||||
use crate::de::PathDeserializer;
|
||||
use crate::{Resource, ResourcePath};
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub(crate) enum PathItem {
|
||||
Static(&'static str),
|
||||
Segment(u16, u16),
|
||||
}
|
||||
|
||||
/// Resource path match information
|
||||
///
|
||||
/// If resource path contains variable patterns, `Path` stores them.
|
||||
#[derive(Debug)]
|
||||
pub struct Path<T> {
|
||||
path: T,
|
||||
pub(crate) skip: u16,
|
||||
pub(crate) segments: Vec<(&'static str, PathItem)>,
|
||||
}
|
||||
|
||||
impl<T: Default> Default for Path<T> {
|
||||
fn default() -> Self {
|
||||
Path {
|
||||
path: T::default(),
|
||||
skip: 0,
|
||||
segments: Vec::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Clone> Clone for Path<T> {
|
||||
fn clone(&self) -> Self {
|
||||
Path {
|
||||
path: self.path.clone(),
|
||||
skip: self.skip,
|
||||
segments: self.segments.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ResourcePath> Path<T> {
|
||||
pub fn new(path: T) -> Path<T> {
|
||||
Path {
|
||||
path,
|
||||
skip: 0,
|
||||
segments: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get reference to inner path instance
|
||||
#[inline]
|
||||
pub fn get_ref(&self) -> &T {
|
||||
&self.path
|
||||
}
|
||||
|
||||
/// Get mutable reference to inner path instance
|
||||
#[inline]
|
||||
pub fn get_mut(&mut self) -> &mut T {
|
||||
&mut self.path
|
||||
}
|
||||
|
||||
/// Path
|
||||
#[inline]
|
||||
pub fn path(&self) -> &str {
|
||||
let skip = self.skip as usize;
|
||||
let path = self.path.path();
|
||||
if skip <= path.len() {
|
||||
&path[skip..]
|
||||
} else {
|
||||
""
|
||||
}
|
||||
}
|
||||
|
||||
/// Set new path
|
||||
#[inline]
|
||||
pub fn set(&mut self, path: T) {
|
||||
self.skip = 0;
|
||||
self.path = path;
|
||||
self.segments.clear();
|
||||
}
|
||||
|
||||
/// Reset state
|
||||
#[inline]
|
||||
pub fn reset(&mut self) {
|
||||
self.skip = 0;
|
||||
self.segments.clear();
|
||||
}
|
||||
|
||||
/// Skip first `n` chars in path
|
||||
#[inline]
|
||||
pub fn skip(&mut self, n: u16) {
|
||||
self.skip += n;
|
||||
}
|
||||
|
||||
pub(crate) fn add(&mut self, name: &'static str, value: PathItem) {
|
||||
match value {
|
||||
PathItem::Static(s) => self.segments.push((name, PathItem::Static(s))),
|
||||
PathItem::Segment(begin, end) => self
|
||||
.segments
|
||||
.push((name, PathItem::Segment(self.skip + begin, self.skip + end))),
|
||||
}
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
pub fn add_static(&mut self, name: &'static str, value: &'static str) {
|
||||
self.segments.push((name, PathItem::Static(value)));
|
||||
}
|
||||
|
||||
/// Check if there are any matched patterns
|
||||
#[inline]
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.segments.is_empty()
|
||||
}
|
||||
|
||||
/// Check number of extracted parameters
|
||||
#[inline]
|
||||
pub fn len(&self) -> usize {
|
||||
self.segments.len()
|
||||
}
|
||||
|
||||
/// Get matched parameter by name without type conversion
|
||||
pub fn get(&self, key: &str) -> Option<&str> {
|
||||
for item in self.segments.iter() {
|
||||
if key == item.0 {
|
||||
return match item.1 {
|
||||
PathItem::Static(ref s) => Some(&s),
|
||||
PathItem::Segment(s, e) => {
|
||||
Some(&self.path.path()[(s as usize)..(e as usize)])
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
if key == "tail" {
|
||||
Some(&self.path.path()[(self.skip as usize)..])
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Get unprocessed part of the path
|
||||
pub fn unprocessed(&self) -> &str {
|
||||
&self.path.path()[(self.skip as usize)..]
|
||||
}
|
||||
|
||||
/// Get matched parameter by name.
|
||||
///
|
||||
/// If keyed parameter is not available empty string is used as default
|
||||
/// value.
|
||||
pub fn query(&self, key: &str) -> &str {
|
||||
if let Some(s) = self.get(key) {
|
||||
s
|
||||
} else {
|
||||
""
|
||||
}
|
||||
}
|
||||
|
||||
/// Return iterator to items in parameter container
|
||||
pub fn iter(&self) -> PathIter<'_, T> {
|
||||
PathIter {
|
||||
idx: 0,
|
||||
params: self,
|
||||
}
|
||||
}
|
||||
|
||||
/// Try to deserialize matching parameters to a specified type `U`
|
||||
pub fn load<'de, U: serde::Deserialize<'de>>(&'de self) -> Result<U, de::value::Error> {
|
||||
de::Deserialize::deserialize(PathDeserializer::new(self))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct PathIter<'a, T> {
|
||||
idx: usize,
|
||||
params: &'a Path<T>,
|
||||
}
|
||||
|
||||
impl<'a, T: ResourcePath> Iterator for PathIter<'a, T> {
|
||||
type Item = (&'a str, &'a str);
|
||||
|
||||
#[inline]
|
||||
fn next(&mut self) -> Option<(&'a str, &'a str)> {
|
||||
if self.idx < self.params.len() {
|
||||
let idx = self.idx;
|
||||
let res = match self.params.segments[idx].1 {
|
||||
PathItem::Static(ref s) => &s,
|
||||
PathItem::Segment(s, e) => &self.params.path.path()[(s as usize)..(e as usize)],
|
||||
};
|
||||
self.idx += 1;
|
||||
return Some((&self.params.segments[idx].0, res));
|
||||
}
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: ResourcePath> Index<&'a str> for Path<T> {
|
||||
type Output = str;
|
||||
|
||||
fn index(&self, name: &'a str) -> &str {
|
||||
self.get(name)
|
||||
.expect("Value for parameter is not available")
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ResourcePath> Index<usize> for Path<T> {
|
||||
type Output = str;
|
||||
|
||||
fn index(&self, idx: usize) -> &str {
|
||||
match self.segments[idx].1 {
|
||||
PathItem::Static(ref s) => &s,
|
||||
PathItem::Segment(s, e) => &self.path.path()[(s as usize)..(e as usize)],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ResourcePath> Resource<T> for Path<T> {
|
||||
fn resource_path(&mut self) -> &mut Self {
|
||||
self
|
||||
}
|
||||
}
|
@@ -1,945 +0,0 @@
|
||||
use std::cmp::min;
|
||||
use std::collections::HashMap;
|
||||
use std::hash::{Hash, Hasher};
|
||||
|
||||
use regex::{escape, Regex, RegexSet};
|
||||
|
||||
use crate::path::{Path, PathItem};
|
||||
use crate::{IntoPattern, Resource, ResourcePath};
|
||||
|
||||
const MAX_DYNAMIC_SEGMENTS: usize = 16;
|
||||
|
||||
/// ResourceDef describes an entry in resources table
|
||||
///
|
||||
/// Resource definition can contain only 16 dynamic segments
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ResourceDef {
|
||||
id: u16,
|
||||
tp: PatternType,
|
||||
name: String,
|
||||
pattern: String,
|
||||
elements: Vec<PatternElement>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
enum PatternElement {
|
||||
Str(String),
|
||||
Var(String),
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
enum PatternType {
|
||||
Static(String),
|
||||
Prefix(String),
|
||||
Dynamic(Regex, Vec<&'static str>, usize),
|
||||
DynamicSet(RegexSet, Vec<(Regex, Vec<&'static str>, usize)>),
|
||||
}
|
||||
|
||||
impl ResourceDef {
|
||||
/// Parse path pattern and create new `Pattern` instance.
|
||||
///
|
||||
/// Panics if path pattern is malformed.
|
||||
pub fn new<T: IntoPattern>(path: T) -> Self {
|
||||
if path.is_single() {
|
||||
let patterns = path.patterns();
|
||||
ResourceDef::with_prefix(&patterns[0], false)
|
||||
} else {
|
||||
let set = path.patterns();
|
||||
let mut data = Vec::new();
|
||||
let mut re_set = Vec::new();
|
||||
|
||||
for path in set {
|
||||
let (pattern, _, _, len) = ResourceDef::parse(&path, false);
|
||||
|
||||
let re = match Regex::new(&pattern) {
|
||||
Ok(re) => re,
|
||||
Err(err) => panic!("Wrong path pattern: \"{}\" {}", path, err),
|
||||
};
|
||||
// actix creates one router per thread
|
||||
let names: Vec<_> = re
|
||||
.capture_names()
|
||||
.filter_map(|name| {
|
||||
name.map(|name| Box::leak(Box::new(name.to_owned())).as_str())
|
||||
})
|
||||
.collect();
|
||||
data.push((re, names, len));
|
||||
re_set.push(pattern);
|
||||
}
|
||||
|
||||
ResourceDef {
|
||||
id: 0,
|
||||
tp: PatternType::DynamicSet(RegexSet::new(re_set).unwrap(), data),
|
||||
elements: Vec::new(),
|
||||
name: String::new(),
|
||||
pattern: "".to_owned(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse path pattern and create new `Pattern` instance.
|
||||
///
|
||||
/// Use `prefix` type instead of `static`.
|
||||
///
|
||||
/// Panics if path regex pattern is malformed.
|
||||
pub fn prefix(path: &str) -> Self {
|
||||
ResourceDef::with_prefix(path, true)
|
||||
}
|
||||
|
||||
/// Parse path pattern and create new `Pattern` instance.
|
||||
/// Inserts `/` to begging of the pattern.
|
||||
///
|
||||
///
|
||||
/// Use `prefix` type instead of `static`.
|
||||
///
|
||||
/// Panics if path regex pattern is malformed.
|
||||
pub fn root_prefix(path: &str) -> Self {
|
||||
ResourceDef::with_prefix(&insert_slash(path), true)
|
||||
}
|
||||
|
||||
/// Resource id
|
||||
pub fn id(&self) -> u16 {
|
||||
self.id
|
||||
}
|
||||
|
||||
/// Set resource id
|
||||
pub fn set_id(&mut self, id: u16) {
|
||||
self.id = id;
|
||||
}
|
||||
|
||||
/// Parse path pattern and create new `Pattern` instance with custom prefix
|
||||
fn with_prefix(path: &str, for_prefix: bool) -> Self {
|
||||
let path = path.to_owned();
|
||||
let (pattern, elements, is_dynamic, len) = ResourceDef::parse(&path, for_prefix);
|
||||
|
||||
let tp = if is_dynamic {
|
||||
let re = match Regex::new(&pattern) {
|
||||
Ok(re) => re,
|
||||
Err(err) => panic!("Wrong path pattern: \"{}\" {}", path, err),
|
||||
};
|
||||
// actix creates one router per thread
|
||||
let names = re
|
||||
.capture_names()
|
||||
.filter_map(|name| {
|
||||
name.map(|name| Box::leak(Box::new(name.to_owned())).as_str())
|
||||
})
|
||||
.collect();
|
||||
PatternType::Dynamic(re, names, len)
|
||||
} else if for_prefix {
|
||||
PatternType::Prefix(pattern)
|
||||
} else {
|
||||
PatternType::Static(pattern)
|
||||
};
|
||||
|
||||
ResourceDef {
|
||||
tp,
|
||||
elements,
|
||||
id: 0,
|
||||
name: String::new(),
|
||||
pattern: path,
|
||||
}
|
||||
}
|
||||
|
||||
/// Resource pattern name
|
||||
pub fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
|
||||
/// Mutable reference to a name of a resource definition.
|
||||
pub fn name_mut(&mut self) -> &mut String {
|
||||
&mut self.name
|
||||
}
|
||||
|
||||
/// Path pattern of the resource
|
||||
pub fn pattern(&self) -> &str {
|
||||
&self.pattern
|
||||
}
|
||||
|
||||
/// Check if path matches this pattern.
|
||||
#[inline]
|
||||
pub fn is_match(&self, path: &str) -> bool {
|
||||
match self.tp {
|
||||
PatternType::Static(ref s) => s == path,
|
||||
PatternType::Prefix(ref s) => path.starts_with(s),
|
||||
PatternType::Dynamic(ref re, _, _) => re.is_match(path),
|
||||
PatternType::DynamicSet(ref re, _) => re.is_match(path),
|
||||
}
|
||||
}
|
||||
|
||||
/// Is prefix path a match against this resource.
|
||||
pub fn is_prefix_match(&self, path: &str) -> Option<usize> {
|
||||
let p_len = path.len();
|
||||
let path = if path.is_empty() { "/" } else { path };
|
||||
|
||||
match self.tp {
|
||||
PatternType::Static(ref s) => {
|
||||
if s == path {
|
||||
Some(p_len)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
PatternType::Dynamic(ref re, _, len) => {
|
||||
if let Some(captures) = re.captures(path) {
|
||||
let mut pos = 0;
|
||||
let mut passed = false;
|
||||
for capture in captures.iter() {
|
||||
if let Some(ref m) = capture {
|
||||
if !passed {
|
||||
passed = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
pos = m.end();
|
||||
}
|
||||
}
|
||||
Some(pos + len)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
PatternType::Prefix(ref s) => {
|
||||
let len = if path == s {
|
||||
s.len()
|
||||
} else if path.starts_with(s)
|
||||
&& (s.ends_with('/') || path.split_at(s.len()).1.starts_with('/'))
|
||||
{
|
||||
if s.ends_with('/') {
|
||||
s.len() - 1
|
||||
} else {
|
||||
s.len()
|
||||
}
|
||||
} else {
|
||||
return None;
|
||||
};
|
||||
Some(min(p_len, len))
|
||||
}
|
||||
PatternType::DynamicSet(ref re, ref params) => {
|
||||
if let Some(idx) = re.matches(path).into_iter().next() {
|
||||
let (ref pattern, _, len) = params[idx];
|
||||
if let Some(captures) = pattern.captures(path) {
|
||||
let mut pos = 0;
|
||||
let mut passed = false;
|
||||
for capture in captures.iter() {
|
||||
if let Some(ref m) = capture {
|
||||
if !passed {
|
||||
passed = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
pos = m.end();
|
||||
}
|
||||
}
|
||||
Some(pos + len)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Is the given path and parameters a match against this pattern.
|
||||
pub fn match_path<T: ResourcePath>(&self, path: &mut Path<T>) -> bool {
|
||||
match self.tp {
|
||||
PatternType::Static(ref s) => {
|
||||
if s == path.path() {
|
||||
path.skip(path.len() as u16);
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
PatternType::Prefix(ref s) => {
|
||||
let r_path = path.path();
|
||||
let len = if s == r_path {
|
||||
s.len()
|
||||
} else if r_path.starts_with(s)
|
||||
&& (s.ends_with('/') || r_path.split_at(s.len()).1.starts_with('/'))
|
||||
{
|
||||
if s.ends_with('/') {
|
||||
s.len() - 1
|
||||
} else {
|
||||
s.len()
|
||||
}
|
||||
} else {
|
||||
return false;
|
||||
};
|
||||
let r_path_len = r_path.len();
|
||||
path.skip(min(r_path_len, len) as u16);
|
||||
true
|
||||
}
|
||||
PatternType::Dynamic(ref re, ref names, len) => {
|
||||
let mut idx = 0;
|
||||
let mut pos = 0;
|
||||
let mut segments: [PathItem; MAX_DYNAMIC_SEGMENTS] =
|
||||
[PathItem::Static(""); MAX_DYNAMIC_SEGMENTS];
|
||||
|
||||
if let Some(captures) = re.captures(path.path()) {
|
||||
for (no, name) in names.iter().enumerate() {
|
||||
if let Some(m) = captures.name(&name) {
|
||||
idx += 1;
|
||||
pos = m.end();
|
||||
segments[no] = PathItem::Segment(m.start() as u16, m.end() as u16);
|
||||
} else {
|
||||
log::error!(
|
||||
"Dynamic path match but not all segments found: {}",
|
||||
name
|
||||
);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
for idx in 0..idx {
|
||||
path.add(names[idx], segments[idx]);
|
||||
}
|
||||
path.skip((pos + len) as u16);
|
||||
true
|
||||
}
|
||||
PatternType::DynamicSet(ref re, ref params) => {
|
||||
if let Some(idx) = re.matches(path.path()).into_iter().next() {
|
||||
let (ref pattern, ref names, len) = params[idx];
|
||||
let mut idx = 0;
|
||||
let mut pos = 0;
|
||||
let mut segments: [PathItem; MAX_DYNAMIC_SEGMENTS] =
|
||||
[PathItem::Static(""); MAX_DYNAMIC_SEGMENTS];
|
||||
|
||||
if let Some(captures) = pattern.captures(path.path()) {
|
||||
for (no, name) in names.iter().enumerate() {
|
||||
if let Some(m) = captures.name(&name) {
|
||||
idx += 1;
|
||||
pos = m.end();
|
||||
segments[no] =
|
||||
PathItem::Segment(m.start() as u16, m.end() as u16);
|
||||
} else {
|
||||
log::error!(
|
||||
"Dynamic path match but not all segments found: {}",
|
||||
name
|
||||
);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
for idx in 0..idx {
|
||||
path.add(names[idx], segments[idx]);
|
||||
}
|
||||
path.skip((pos + len) as u16);
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Is the given path and parameters a match against this pattern?
|
||||
pub fn match_path_checked<R, T, F, U>(
|
||||
&self,
|
||||
res: &mut R,
|
||||
check: &F,
|
||||
user_data: &Option<U>,
|
||||
) -> bool
|
||||
where
|
||||
T: ResourcePath,
|
||||
R: Resource<T>,
|
||||
F: Fn(&R, &Option<U>) -> bool,
|
||||
{
|
||||
match self.tp {
|
||||
PatternType::Static(ref s) => {
|
||||
if s == res.resource_path().path() && check(res, user_data) {
|
||||
let path = res.resource_path();
|
||||
path.skip(path.len() as u16);
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
PatternType::Prefix(ref s) => {
|
||||
let len = {
|
||||
let r_path = res.resource_path().path();
|
||||
if s == r_path {
|
||||
s.len()
|
||||
} else if r_path.starts_with(s)
|
||||
&& (s.ends_with('/') || r_path.split_at(s.len()).1.starts_with('/'))
|
||||
{
|
||||
if s.ends_with('/') {
|
||||
s.len() - 1
|
||||
} else {
|
||||
s.len()
|
||||
}
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
if !check(res, user_data) {
|
||||
return false;
|
||||
}
|
||||
let path = res.resource_path();
|
||||
path.skip(min(path.path().len(), len) as u16);
|
||||
true
|
||||
}
|
||||
PatternType::Dynamic(ref re, ref names, len) => {
|
||||
let mut idx = 0;
|
||||
let mut pos = 0;
|
||||
let mut segments: [PathItem; MAX_DYNAMIC_SEGMENTS] =
|
||||
[PathItem::Static(""); MAX_DYNAMIC_SEGMENTS];
|
||||
|
||||
if let Some(captures) = re.captures(res.resource_path().path()) {
|
||||
for (no, name) in names.iter().enumerate() {
|
||||
if let Some(m) = captures.name(&name) {
|
||||
idx += 1;
|
||||
pos = m.end();
|
||||
segments[no] = PathItem::Segment(m.start() as u16, m.end() as u16);
|
||||
} else {
|
||||
log::error!(
|
||||
"Dynamic path match but not all segments found: {}",
|
||||
name
|
||||
);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
|
||||
if !check(res, user_data) {
|
||||
return false;
|
||||
}
|
||||
|
||||
let path = res.resource_path();
|
||||
for idx in 0..idx {
|
||||
path.add(names[idx], segments[idx]);
|
||||
}
|
||||
path.skip((pos + len) as u16);
|
||||
true
|
||||
}
|
||||
PatternType::DynamicSet(ref re, ref params) => {
|
||||
let path = res.resource_path().path();
|
||||
if let Some(idx) = re.matches(path).into_iter().next() {
|
||||
let (ref pattern, ref names, len) = params[idx];
|
||||
let mut idx = 0;
|
||||
let mut pos = 0;
|
||||
let mut segments: [PathItem; MAX_DYNAMIC_SEGMENTS] =
|
||||
[PathItem::Static(""); MAX_DYNAMIC_SEGMENTS];
|
||||
|
||||
if let Some(captures) = pattern.captures(path) {
|
||||
for (no, name) in names.iter().enumerate() {
|
||||
if let Some(m) = captures.name(&name) {
|
||||
idx += 1;
|
||||
pos = m.end();
|
||||
segments[no] =
|
||||
PathItem::Segment(m.start() as u16, m.end() as u16);
|
||||
} else {
|
||||
log::error!(
|
||||
"Dynamic path match but not all segments found: {}",
|
||||
name
|
||||
);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
|
||||
if !check(res, user_data) {
|
||||
return false;
|
||||
}
|
||||
|
||||
let path = res.resource_path();
|
||||
for idx in 0..idx {
|
||||
path.add(names[idx], segments[idx]);
|
||||
}
|
||||
path.skip((pos + len) as u16);
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Build resource path from elements. Returns `true` on success.
|
||||
pub fn resource_path<U, I>(&self, path: &mut String, elements: &mut U) -> bool
|
||||
where
|
||||
U: Iterator<Item = I>,
|
||||
I: AsRef<str>,
|
||||
{
|
||||
match self.tp {
|
||||
PatternType::Prefix(ref p) => path.push_str(p),
|
||||
PatternType::Static(ref p) => path.push_str(p),
|
||||
PatternType::Dynamic(..) => {
|
||||
for el in &self.elements {
|
||||
match *el {
|
||||
PatternElement::Str(ref s) => path.push_str(s),
|
||||
PatternElement::Var(_) => {
|
||||
if let Some(val) = elements.next() {
|
||||
path.push_str(val.as_ref())
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
PatternType::DynamicSet(..) => {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
true
|
||||
}
|
||||
|
||||
/// Build resource path from elements. Returns `true` on success.
|
||||
pub fn resource_path_named<K, V, S>(
|
||||
&self,
|
||||
path: &mut String,
|
||||
elements: &HashMap<K, V, S>,
|
||||
) -> bool
|
||||
where
|
||||
K: std::borrow::Borrow<str> + Eq + Hash,
|
||||
V: AsRef<str>,
|
||||
S: std::hash::BuildHasher,
|
||||
{
|
||||
match self.tp {
|
||||
PatternType::Prefix(ref p) => path.push_str(p),
|
||||
PatternType::Static(ref p) => path.push_str(p),
|
||||
PatternType::Dynamic(..) => {
|
||||
for el in &self.elements {
|
||||
match *el {
|
||||
PatternElement::Str(ref s) => path.push_str(s),
|
||||
PatternElement::Var(ref name) => {
|
||||
if let Some(val) = elements.get(name) {
|
||||
path.push_str(val.as_ref())
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
PatternType::DynamicSet(..) => {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
true
|
||||
}
|
||||
|
||||
fn parse_param(pattern: &str) -> (PatternElement, String, &str, bool) {
|
||||
const DEFAULT_PATTERN: &str = "[^/]+";
|
||||
const DEFAULT_PATTERN_TAIL: &str = ".*";
|
||||
let mut params_nesting = 0usize;
|
||||
let close_idx = pattern
|
||||
.find(|c| match c {
|
||||
'{' => {
|
||||
params_nesting += 1;
|
||||
false
|
||||
}
|
||||
'}' => {
|
||||
params_nesting -= 1;
|
||||
params_nesting == 0
|
||||
}
|
||||
_ => false,
|
||||
})
|
||||
.expect("malformed dynamic segment");
|
||||
let (mut param, mut rem) = pattern.split_at(close_idx + 1);
|
||||
param = ¶m[1..param.len() - 1]; // Remove outer brackets
|
||||
let tail = rem == "*";
|
||||
|
||||
let (name, pattern) = match param.find(':') {
|
||||
Some(idx) => {
|
||||
if tail {
|
||||
panic!("Custom regex is not supported for remainder match");
|
||||
}
|
||||
let (name, pattern) = param.split_at(idx);
|
||||
(name, &pattern[1..])
|
||||
}
|
||||
None => (
|
||||
param,
|
||||
if tail {
|
||||
rem = &rem[1..];
|
||||
DEFAULT_PATTERN_TAIL
|
||||
} else {
|
||||
DEFAULT_PATTERN
|
||||
},
|
||||
),
|
||||
};
|
||||
(
|
||||
PatternElement::Var(name.to_string()),
|
||||
format!(r"(?P<{}>{})", &name, &pattern),
|
||||
rem,
|
||||
tail,
|
||||
)
|
||||
}
|
||||
|
||||
fn parse(
|
||||
mut pattern: &str,
|
||||
mut for_prefix: bool,
|
||||
) -> (String, Vec<PatternElement>, bool, usize) {
|
||||
if pattern.find('{').is_none() {
|
||||
return if let Some(path) = pattern.strip_suffix('*') {
|
||||
let re = String::from("^") + path + "(.*)";
|
||||
(re, vec![PatternElement::Str(String::from(path))], true, 0)
|
||||
} else {
|
||||
(
|
||||
String::from(pattern),
|
||||
vec![PatternElement::Str(String::from(pattern))],
|
||||
false,
|
||||
pattern.chars().count(),
|
||||
)
|
||||
};
|
||||
}
|
||||
|
||||
let mut elements = Vec::new();
|
||||
let mut re = String::from("^");
|
||||
let mut dyn_elements = 0;
|
||||
|
||||
while let Some(idx) = pattern.find('{') {
|
||||
let (prefix, rem) = pattern.split_at(idx);
|
||||
elements.push(PatternElement::Str(String::from(prefix)));
|
||||
re.push_str(&escape(prefix));
|
||||
let (param_pattern, re_part, rem, tail) = Self::parse_param(rem);
|
||||
if tail {
|
||||
for_prefix = true;
|
||||
}
|
||||
|
||||
elements.push(param_pattern);
|
||||
re.push_str(&re_part);
|
||||
pattern = rem;
|
||||
dyn_elements += 1;
|
||||
}
|
||||
|
||||
elements.push(PatternElement::Str(String::from(pattern)));
|
||||
re.push_str(&escape(pattern));
|
||||
|
||||
if dyn_elements > MAX_DYNAMIC_SEGMENTS {
|
||||
panic!(
|
||||
"Only {} dynamic segments are allowed, provided: {}",
|
||||
MAX_DYNAMIC_SEGMENTS, dyn_elements
|
||||
);
|
||||
}
|
||||
|
||||
if !for_prefix {
|
||||
re.push('$');
|
||||
}
|
||||
(re, elements, true, pattern.chars().count())
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for ResourceDef {}
|
||||
|
||||
impl PartialEq for ResourceDef {
|
||||
fn eq(&self, other: &ResourceDef) -> bool {
|
||||
self.pattern == other.pattern
|
||||
}
|
||||
}
|
||||
|
||||
impl Hash for ResourceDef {
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
self.pattern.hash(state);
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> From<&'a str> for ResourceDef {
|
||||
fn from(path: &'a str) -> ResourceDef {
|
||||
ResourceDef::new(path)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<String> for ResourceDef {
|
||||
fn from(path: String) -> ResourceDef {
|
||||
ResourceDef::new(path)
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn insert_slash(path: &str) -> String {
|
||||
let mut path = path.to_owned();
|
||||
if !path.is_empty() && !path.starts_with('/') {
|
||||
path.insert(0, '/');
|
||||
};
|
||||
path
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_parse_static() {
|
||||
let re = ResourceDef::new("/");
|
||||
assert!(re.is_match("/"));
|
||||
assert!(!re.is_match("/a"));
|
||||
|
||||
let re = ResourceDef::new("/name");
|
||||
assert!(re.is_match("/name"));
|
||||
assert!(!re.is_match("/name1"));
|
||||
assert!(!re.is_match("/name/"));
|
||||
assert!(!re.is_match("/name~"));
|
||||
|
||||
assert_eq!(re.is_prefix_match("/name"), Some(5));
|
||||
assert_eq!(re.is_prefix_match("/name1"), None);
|
||||
assert_eq!(re.is_prefix_match("/name/"), None);
|
||||
assert_eq!(re.is_prefix_match("/name~"), None);
|
||||
|
||||
let re = ResourceDef::new("/name/");
|
||||
assert!(re.is_match("/name/"));
|
||||
assert!(!re.is_match("/name"));
|
||||
assert!(!re.is_match("/name/gs"));
|
||||
|
||||
let re = ResourceDef::new("/user/profile");
|
||||
assert!(re.is_match("/user/profile"));
|
||||
assert!(!re.is_match("/user/profile/profile"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_param() {
|
||||
let re = ResourceDef::new("/user/{id}");
|
||||
assert!(re.is_match("/user/profile"));
|
||||
assert!(re.is_match("/user/2345"));
|
||||
assert!(!re.is_match("/user/2345/"));
|
||||
assert!(!re.is_match("/user/2345/sdg"));
|
||||
|
||||
let mut path = Path::new("/user/profile");
|
||||
assert!(re.match_path(&mut path));
|
||||
assert_eq!(path.get("id").unwrap(), "profile");
|
||||
|
||||
let mut path = Path::new("/user/1245125");
|
||||
assert!(re.match_path(&mut path));
|
||||
assert_eq!(path.get("id").unwrap(), "1245125");
|
||||
|
||||
let re = ResourceDef::new("/v{version}/resource/{id}");
|
||||
assert!(re.is_match("/v1/resource/320120"));
|
||||
assert!(!re.is_match("/v/resource/1"));
|
||||
assert!(!re.is_match("/resource"));
|
||||
|
||||
let mut path = Path::new("/v151/resource/adage32");
|
||||
assert!(re.match_path(&mut path));
|
||||
assert_eq!(path.get("version").unwrap(), "151");
|
||||
assert_eq!(path.get("id").unwrap(), "adage32");
|
||||
|
||||
let re = ResourceDef::new("/{id:[[:digit:]]{6}}");
|
||||
assert!(re.is_match("/012345"));
|
||||
assert!(!re.is_match("/012"));
|
||||
assert!(!re.is_match("/01234567"));
|
||||
assert!(!re.is_match("/XXXXXX"));
|
||||
|
||||
let mut path = Path::new("/012345");
|
||||
assert!(re.match_path(&mut path));
|
||||
assert_eq!(path.get("id").unwrap(), "012345");
|
||||
}
|
||||
|
||||
#[allow(clippy::cognitive_complexity)]
|
||||
#[test]
|
||||
fn test_dynamic_set() {
|
||||
let re = ResourceDef::new(vec![
|
||||
"/user/{id}",
|
||||
"/v{version}/resource/{id}",
|
||||
"/{id:[[:digit:]]{6}}",
|
||||
]);
|
||||
assert!(re.is_match("/user/profile"));
|
||||
assert!(re.is_match("/user/2345"));
|
||||
assert!(!re.is_match("/user/2345/"));
|
||||
assert!(!re.is_match("/user/2345/sdg"));
|
||||
|
||||
let mut path = Path::new("/user/profile");
|
||||
assert!(re.match_path(&mut path));
|
||||
assert_eq!(path.get("id").unwrap(), "profile");
|
||||
|
||||
let mut path = Path::new("/user/1245125");
|
||||
assert!(re.match_path(&mut path));
|
||||
assert_eq!(path.get("id").unwrap(), "1245125");
|
||||
|
||||
assert!(re.is_match("/v1/resource/320120"));
|
||||
assert!(!re.is_match("/v/resource/1"));
|
||||
assert!(!re.is_match("/resource"));
|
||||
|
||||
let mut path = Path::new("/v151/resource/adage32");
|
||||
assert!(re.match_path(&mut path));
|
||||
assert_eq!(path.get("version").unwrap(), "151");
|
||||
assert_eq!(path.get("id").unwrap(), "adage32");
|
||||
|
||||
assert!(re.is_match("/012345"));
|
||||
assert!(!re.is_match("/012"));
|
||||
assert!(!re.is_match("/01234567"));
|
||||
assert!(!re.is_match("/XXXXXX"));
|
||||
|
||||
let mut path = Path::new("/012345");
|
||||
assert!(re.match_path(&mut path));
|
||||
assert_eq!(path.get("id").unwrap(), "012345");
|
||||
|
||||
let re = ResourceDef::new([
|
||||
"/user/{id}",
|
||||
"/v{version}/resource/{id}",
|
||||
"/{id:[[:digit:]]{6}}",
|
||||
]);
|
||||
assert!(re.is_match("/user/profile"));
|
||||
assert!(re.is_match("/user/2345"));
|
||||
assert!(!re.is_match("/user/2345/"));
|
||||
assert!(!re.is_match("/user/2345/sdg"));
|
||||
|
||||
let re = ResourceDef::new([
|
||||
"/user/{id}".to_string(),
|
||||
"/v{version}/resource/{id}".to_string(),
|
||||
"/{id:[[:digit:]]{6}}".to_string(),
|
||||
]);
|
||||
assert!(re.is_match("/user/profile"));
|
||||
assert!(re.is_match("/user/2345"));
|
||||
assert!(!re.is_match("/user/2345/"));
|
||||
assert!(!re.is_match("/user/2345/sdg"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_tail() {
|
||||
let re = ResourceDef::new("/user/-{id}*");
|
||||
|
||||
let mut path = Path::new("/user/-profile");
|
||||
assert!(re.match_path(&mut path));
|
||||
assert_eq!(path.get("id").unwrap(), "profile");
|
||||
|
||||
let mut path = Path::new("/user/-2345");
|
||||
assert!(re.match_path(&mut path));
|
||||
assert_eq!(path.get("id").unwrap(), "2345");
|
||||
|
||||
let mut path = Path::new("/user/-2345/");
|
||||
assert!(re.match_path(&mut path));
|
||||
assert_eq!(path.get("id").unwrap(), "2345/");
|
||||
|
||||
let mut path = Path::new("/user/-2345/sdg");
|
||||
assert!(re.match_path(&mut path));
|
||||
assert_eq!(path.get("id").unwrap(), "2345/sdg");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_static_tail() {
|
||||
let re = ResourceDef::new("/user*");
|
||||
assert!(re.is_match("/user/profile"));
|
||||
assert!(re.is_match("/user/2345"));
|
||||
assert!(re.is_match("/user/2345/"));
|
||||
assert!(re.is_match("/user/2345/sdg"));
|
||||
|
||||
let re = ResourceDef::new("/user/*");
|
||||
assert!(re.is_match("/user/profile"));
|
||||
assert!(re.is_match("/user/2345"));
|
||||
assert!(re.is_match("/user/2345/"));
|
||||
assert!(re.is_match("/user/2345/sdg"));
|
||||
}
|
||||
|
||||
#[cfg(feature = "http")]
|
||||
#[test]
|
||||
fn test_parse_urlencoded_param() {
|
||||
use std::convert::TryFrom;
|
||||
|
||||
let re = ResourceDef::new("/user/{id}/test");
|
||||
|
||||
let mut path = Path::new("/user/2345/test");
|
||||
assert!(re.match_path(&mut path));
|
||||
assert_eq!(path.get("id").unwrap(), "2345");
|
||||
|
||||
let mut path = Path::new("/user/qwe%25/test");
|
||||
assert!(re.match_path(&mut path));
|
||||
assert_eq!(path.get("id").unwrap(), "qwe%25");
|
||||
|
||||
let uri = http::Uri::try_from("/user/qwe%25/test").unwrap();
|
||||
let mut path = Path::new(uri);
|
||||
assert!(re.match_path(&mut path));
|
||||
assert_eq!(path.get("id").unwrap(), "qwe%25");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_resource_prefix() {
|
||||
let re = ResourceDef::prefix("/name");
|
||||
assert!(re.is_match("/name"));
|
||||
assert!(re.is_match("/name/"));
|
||||
assert!(re.is_match("/name/test/test"));
|
||||
assert!(re.is_match("/name1"));
|
||||
assert!(re.is_match("/name~"));
|
||||
|
||||
assert_eq!(re.is_prefix_match("/name"), Some(5));
|
||||
assert_eq!(re.is_prefix_match("/name/"), Some(5));
|
||||
assert_eq!(re.is_prefix_match("/name/test/test"), Some(5));
|
||||
assert_eq!(re.is_prefix_match("/name1"), None);
|
||||
assert_eq!(re.is_prefix_match("/name~"), None);
|
||||
|
||||
let re = ResourceDef::prefix("/name/");
|
||||
assert!(re.is_match("/name/"));
|
||||
assert!(re.is_match("/name/gs"));
|
||||
assert!(!re.is_match("/name"));
|
||||
|
||||
let re = ResourceDef::root_prefix("name/");
|
||||
assert!(re.is_match("/name/"));
|
||||
assert!(re.is_match("/name/gs"));
|
||||
assert!(!re.is_match("/name"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_resource_prefix_dynamic() {
|
||||
let re = ResourceDef::prefix("/{name}/");
|
||||
assert!(re.is_match("/name/"));
|
||||
assert!(re.is_match("/name/gs"));
|
||||
assert!(!re.is_match("/name"));
|
||||
|
||||
assert_eq!(re.is_prefix_match("/name/"), Some(6));
|
||||
assert_eq!(re.is_prefix_match("/name/gs"), Some(6));
|
||||
assert_eq!(re.is_prefix_match("/name"), None);
|
||||
|
||||
let mut path = Path::new("/test2/");
|
||||
assert!(re.match_path(&mut path));
|
||||
assert_eq!(&path["name"], "test2");
|
||||
assert_eq!(&path[0], "test2");
|
||||
|
||||
let mut path = Path::new("/test2/subpath1/subpath2/index.html");
|
||||
assert!(re.match_path(&mut path));
|
||||
assert_eq!(&path["name"], "test2");
|
||||
assert_eq!(&path[0], "test2");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_resource_path() {
|
||||
let mut s = String::new();
|
||||
let resource = ResourceDef::new("/user/{item1}/test");
|
||||
assert!(resource.resource_path(&mut s, &mut (&["user1"]).iter()));
|
||||
assert_eq!(s, "/user/user1/test");
|
||||
|
||||
let mut s = String::new();
|
||||
let resource = ResourceDef::new("/user/{item1}/{item2}/test");
|
||||
assert!(resource.resource_path(&mut s, &mut (&["item", "item2"]).iter()));
|
||||
assert_eq!(s, "/user/item/item2/test");
|
||||
|
||||
let mut s = String::new();
|
||||
let resource = ResourceDef::new("/user/{item1}/{item2}");
|
||||
assert!(resource.resource_path(&mut s, &mut (&["item", "item2"]).iter()));
|
||||
assert_eq!(s, "/user/item/item2");
|
||||
|
||||
let mut s = String::new();
|
||||
let resource = ResourceDef::new("/user/{item1}/{item2}/");
|
||||
assert!(resource.resource_path(&mut s, &mut (&["item", "item2"]).iter()));
|
||||
assert_eq!(s, "/user/item/item2/");
|
||||
|
||||
let mut s = String::new();
|
||||
assert!(!resource.resource_path(&mut s, &mut (&["item"]).iter()));
|
||||
|
||||
let mut s = String::new();
|
||||
assert!(resource.resource_path(&mut s, &mut (&["item", "item2"]).iter()));
|
||||
assert_eq!(s, "/user/item/item2/");
|
||||
assert!(!resource.resource_path(&mut s, &mut (&["item"]).iter()));
|
||||
|
||||
let mut s = String::new();
|
||||
assert!(resource.resource_path(&mut s, &mut vec!["item", "item2"].into_iter()));
|
||||
assert_eq!(s, "/user/item/item2/");
|
||||
|
||||
let mut map = HashMap::new();
|
||||
map.insert("item1", "item");
|
||||
|
||||
let mut s = String::new();
|
||||
assert!(!resource.resource_path_named(&mut s, &map));
|
||||
|
||||
let mut s = String::new();
|
||||
map.insert("item2", "item2");
|
||||
assert!(resource.resource_path_named(&mut s, &map));
|
||||
assert_eq!(s, "/user/item/item2/");
|
||||
}
|
||||
}
|
@@ -1,259 +0,0 @@
|
||||
use crate::{IntoPattern, Resource, ResourceDef, ResourcePath};
|
||||
|
||||
#[derive(Debug, Copy, Clone, PartialEq)]
|
||||
pub struct ResourceId(pub u16);
|
||||
|
||||
/// Information about current resource
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ResourceInfo {
|
||||
resource: ResourceId,
|
||||
}
|
||||
|
||||
/// Resource router.
|
||||
pub struct Router<T, U = ()>(Vec<(ResourceDef, T, Option<U>)>);
|
||||
|
||||
impl<T, U> Router<T, U> {
|
||||
pub fn build() -> RouterBuilder<T, U> {
|
||||
RouterBuilder {
|
||||
resources: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn recognize<R, P>(&self, resource: &mut R) -> Option<(&T, ResourceId)>
|
||||
where
|
||||
R: Resource<P>,
|
||||
P: ResourcePath,
|
||||
{
|
||||
for item in self.0.iter() {
|
||||
if item.0.match_path(resource.resource_path()) {
|
||||
return Some((&item.1, ResourceId(item.0.id())));
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
pub fn recognize_mut<R, P>(&mut self, resource: &mut R) -> Option<(&mut T, ResourceId)>
|
||||
where
|
||||
R: Resource<P>,
|
||||
P: ResourcePath,
|
||||
{
|
||||
for item in self.0.iter_mut() {
|
||||
if item.0.match_path(resource.resource_path()) {
|
||||
return Some((&mut item.1, ResourceId(item.0.id())));
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
pub fn recognize_checked<R, P, F>(
|
||||
&self,
|
||||
resource: &mut R,
|
||||
check: F,
|
||||
) -> Option<(&T, ResourceId)>
|
||||
where
|
||||
F: Fn(&R, &Option<U>) -> bool,
|
||||
R: Resource<P>,
|
||||
P: ResourcePath,
|
||||
{
|
||||
for item in self.0.iter() {
|
||||
if item.0.match_path_checked(resource, &check, &item.2) {
|
||||
return Some((&item.1, ResourceId(item.0.id())));
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
pub fn recognize_mut_checked<R, P, F>(
|
||||
&mut self,
|
||||
resource: &mut R,
|
||||
check: F,
|
||||
) -> Option<(&mut T, ResourceId)>
|
||||
where
|
||||
F: Fn(&R, &Option<U>) -> bool,
|
||||
R: Resource<P>,
|
||||
P: ResourcePath,
|
||||
{
|
||||
for item in self.0.iter_mut() {
|
||||
if item.0.match_path_checked(resource, &check, &item.2) {
|
||||
return Some((&mut item.1, ResourceId(item.0.id())));
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RouterBuilder<T, U = ()> {
|
||||
resources: Vec<(ResourceDef, T, Option<U>)>,
|
||||
}
|
||||
|
||||
impl<T, U> RouterBuilder<T, U> {
|
||||
/// Register resource for specified path.
|
||||
pub fn path<P: IntoPattern>(
|
||||
&mut self,
|
||||
path: P,
|
||||
resource: T,
|
||||
) -> &mut (ResourceDef, T, Option<U>) {
|
||||
self.resources
|
||||
.push((ResourceDef::new(path), resource, None));
|
||||
self.resources.last_mut().unwrap()
|
||||
}
|
||||
|
||||
/// Register resource for specified path prefix.
|
||||
pub fn prefix(&mut self, prefix: &str, resource: T) -> &mut (ResourceDef, T, Option<U>) {
|
||||
self.resources
|
||||
.push((ResourceDef::prefix(prefix), resource, None));
|
||||
self.resources.last_mut().unwrap()
|
||||
}
|
||||
|
||||
/// Register resource for ResourceDef
|
||||
pub fn rdef(&mut self, rdef: ResourceDef, resource: T) -> &mut (ResourceDef, T, Option<U>) {
|
||||
self.resources.push((rdef, resource, None));
|
||||
self.resources.last_mut().unwrap()
|
||||
}
|
||||
|
||||
/// Finish configuration and create router instance.
|
||||
pub fn finish(self) -> Router<T, U> {
|
||||
Router(self.resources)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::path::Path;
|
||||
use crate::router::{ResourceId, Router};
|
||||
|
||||
#[allow(clippy::cognitive_complexity)]
|
||||
#[test]
|
||||
fn test_recognizer_1() {
|
||||
let mut router = Router::<usize>::build();
|
||||
router.path("/name", 10).0.set_id(0);
|
||||
router.path("/name/{val}", 11).0.set_id(1);
|
||||
router.path("/name/{val}/index.html", 12).0.set_id(2);
|
||||
router.path("/file/{file}.{ext}", 13).0.set_id(3);
|
||||
router.path("/v{val}/{val2}/index.html", 14).0.set_id(4);
|
||||
router.path("/v/{tail:.*}", 15).0.set_id(5);
|
||||
router.path("/test2/{test}.html", 16).0.set_id(6);
|
||||
router.path("/{test}/index.html", 17).0.set_id(7);
|
||||
let mut router = router.finish();
|
||||
|
||||
let mut path = Path::new("/unknown");
|
||||
assert!(router.recognize_mut(&mut path).is_none());
|
||||
|
||||
let mut path = Path::new("/name");
|
||||
let (h, info) = router.recognize_mut(&mut path).unwrap();
|
||||
assert_eq!(*h, 10);
|
||||
assert_eq!(info, ResourceId(0));
|
||||
assert!(path.is_empty());
|
||||
|
||||
let mut path = Path::new("/name/value");
|
||||
let (h, info) = router.recognize_mut(&mut path).unwrap();
|
||||
assert_eq!(*h, 11);
|
||||
assert_eq!(info, ResourceId(1));
|
||||
assert_eq!(path.get("val").unwrap(), "value");
|
||||
assert_eq!(&path["val"], "value");
|
||||
|
||||
let mut path = Path::new("/name/value2/index.html");
|
||||
let (h, info) = router.recognize_mut(&mut path).unwrap();
|
||||
assert_eq!(*h, 12);
|
||||
assert_eq!(info, ResourceId(2));
|
||||
assert_eq!(path.get("val").unwrap(), "value2");
|
||||
|
||||
let mut path = Path::new("/file/file.gz");
|
||||
let (h, info) = router.recognize_mut(&mut path).unwrap();
|
||||
assert_eq!(*h, 13);
|
||||
assert_eq!(info, ResourceId(3));
|
||||
assert_eq!(path.get("file").unwrap(), "file");
|
||||
assert_eq!(path.get("ext").unwrap(), "gz");
|
||||
|
||||
let mut path = Path::new("/vtest/ttt/index.html");
|
||||
let (h, info) = router.recognize_mut(&mut path).unwrap();
|
||||
assert_eq!(*h, 14);
|
||||
assert_eq!(info, ResourceId(4));
|
||||
assert_eq!(path.get("val").unwrap(), "test");
|
||||
assert_eq!(path.get("val2").unwrap(), "ttt");
|
||||
|
||||
let mut path = Path::new("/v/blah-blah/index.html");
|
||||
let (h, info) = router.recognize_mut(&mut path).unwrap();
|
||||
assert_eq!(*h, 15);
|
||||
assert_eq!(info, ResourceId(5));
|
||||
assert_eq!(path.get("tail").unwrap(), "blah-blah/index.html");
|
||||
|
||||
let mut path = Path::new("/test2/index.html");
|
||||
let (h, info) = router.recognize_mut(&mut path).unwrap();
|
||||
assert_eq!(*h, 16);
|
||||
assert_eq!(info, ResourceId(6));
|
||||
assert_eq!(path.get("test").unwrap(), "index");
|
||||
|
||||
let mut path = Path::new("/bbb/index.html");
|
||||
let (h, info) = router.recognize_mut(&mut path).unwrap();
|
||||
assert_eq!(*h, 17);
|
||||
assert_eq!(info, ResourceId(7));
|
||||
assert_eq!(path.get("test").unwrap(), "bbb");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_recognizer_2() {
|
||||
let mut router = Router::<usize>::build();
|
||||
router.path("/index.json", 10);
|
||||
router.path("/{source}.json", 11);
|
||||
let mut router = router.finish();
|
||||
|
||||
let mut path = Path::new("/index.json");
|
||||
let (h, _) = router.recognize_mut(&mut path).unwrap();
|
||||
assert_eq!(*h, 10);
|
||||
|
||||
let mut path = Path::new("/test.json");
|
||||
let (h, _) = router.recognize_mut(&mut path).unwrap();
|
||||
assert_eq!(*h, 11);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_recognizer_with_prefix() {
|
||||
let mut router = Router::<usize>::build();
|
||||
router.path("/name", 10).0.set_id(0);
|
||||
router.path("/name/{val}", 11).0.set_id(1);
|
||||
let mut router = router.finish();
|
||||
|
||||
let mut path = Path::new("/name");
|
||||
path.skip(5);
|
||||
assert!(router.recognize_mut(&mut path).is_none());
|
||||
|
||||
let mut path = Path::new("/test/name");
|
||||
path.skip(5);
|
||||
let (h, _) = router.recognize_mut(&mut path).unwrap();
|
||||
assert_eq!(*h, 10);
|
||||
|
||||
let mut path = Path::new("/test/name/value");
|
||||
path.skip(5);
|
||||
let (h, id) = router.recognize_mut(&mut path).unwrap();
|
||||
assert_eq!(*h, 11);
|
||||
assert_eq!(id, ResourceId(1));
|
||||
assert_eq!(path.get("val").unwrap(), "value");
|
||||
assert_eq!(&path["val"], "value");
|
||||
|
||||
// same patterns
|
||||
let mut router = Router::<usize>::build();
|
||||
router.path("/name", 10);
|
||||
router.path("/name/{val}", 11);
|
||||
let mut router = router.finish();
|
||||
|
||||
let mut path = Path::new("/name");
|
||||
path.skip(6);
|
||||
assert!(router.recognize_mut(&mut path).is_none());
|
||||
|
||||
let mut path = Path::new("/test2/name");
|
||||
path.skip(6);
|
||||
let (h, _) = router.recognize_mut(&mut path).unwrap();
|
||||
assert_eq!(*h, 10);
|
||||
|
||||
let mut path = Path::new("/test2/name-test");
|
||||
path.skip(6);
|
||||
assert!(router.recognize_mut(&mut path).is_none());
|
||||
|
||||
let mut path = Path::new("/test2/name/ttt");
|
||||
path.skip(6);
|
||||
let (h, _) = router.recognize_mut(&mut path).unwrap();
|
||||
assert_eq!(*h, 11);
|
||||
assert_eq!(&path["val"], "ttt");
|
||||
}
|
||||
}
|
@@ -1,247 +0,0 @@
|
||||
use crate::ResourcePath;
|
||||
|
||||
#[allow(dead_code)]
|
||||
const GEN_DELIMS: &[u8] = b":/?#[]@";
|
||||
#[allow(dead_code)]
|
||||
const SUB_DELIMS_WITHOUT_QS: &[u8] = b"!$'()*,";
|
||||
#[allow(dead_code)]
|
||||
const SUB_DELIMS: &[u8] = b"!$'()*,+?=;";
|
||||
#[allow(dead_code)]
|
||||
const RESERVED: &[u8] = b":/?#[]@!$'()*,+?=;";
|
||||
#[allow(dead_code)]
|
||||
const UNRESERVED: &[u8] = b"abcdefghijklmnopqrstuvwxyz
|
||||
ABCDEFGHIJKLMNOPQRSTUVWXYZ
|
||||
1234567890
|
||||
-._~";
|
||||
const ALLOWED: &[u8] = b"abcdefghijklmnopqrstuvwxyz
|
||||
ABCDEFGHIJKLMNOPQRSTUVWXYZ
|
||||
1234567890
|
||||
-._~
|
||||
!$'()*,";
|
||||
const QS: &[u8] = b"+&=;b";
|
||||
|
||||
#[inline]
|
||||
fn bit_at(array: &[u8], ch: u8) -> bool {
|
||||
array[(ch >> 3) as usize] & (1 << (ch & 7)) != 0
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn set_bit(array: &mut [u8], ch: u8) {
|
||||
array[(ch >> 3) as usize] |= 1 << (ch & 7)
|
||||
}
|
||||
|
||||
thread_local! {
|
||||
static DEFAULT_QUOTER: Quoter = Quoter::new(b"@:", b"/+");
|
||||
}
|
||||
|
||||
#[derive(Default, Clone, Debug)]
|
||||
pub struct Url {
|
||||
uri: http::Uri,
|
||||
path: Option<String>,
|
||||
}
|
||||
|
||||
impl Url {
|
||||
pub fn new(uri: http::Uri) -> Url {
|
||||
let path = DEFAULT_QUOTER.with(|q| q.requote(uri.path().as_bytes()));
|
||||
|
||||
Url { uri, path }
|
||||
}
|
||||
|
||||
pub fn with_quoter(uri: http::Uri, quoter: &Quoter) -> Url {
|
||||
Url {
|
||||
path: quoter.requote(uri.path().as_bytes()),
|
||||
uri,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn uri(&self) -> &http::Uri {
|
||||
&self.uri
|
||||
}
|
||||
|
||||
pub fn path(&self) -> &str {
|
||||
if let Some(ref s) = self.path {
|
||||
s
|
||||
} else {
|
||||
self.uri.path()
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn update(&mut self, uri: &http::Uri) {
|
||||
self.uri = uri.clone();
|
||||
self.path = DEFAULT_QUOTER.with(|q| q.requote(uri.path().as_bytes()));
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn update_with_quoter(&mut self, uri: &http::Uri, quoter: &Quoter) {
|
||||
self.uri = uri.clone();
|
||||
self.path = quoter.requote(uri.path().as_bytes());
|
||||
}
|
||||
}
|
||||
|
||||
impl ResourcePath for Url {
|
||||
#[inline]
|
||||
fn path(&self) -> &str {
|
||||
self.path()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Quoter {
|
||||
safe_table: [u8; 16],
|
||||
protected_table: [u8; 16],
|
||||
}
|
||||
|
||||
impl Quoter {
|
||||
pub fn new(safe: &[u8], protected: &[u8]) -> Quoter {
|
||||
let mut q = Quoter {
|
||||
safe_table: [0; 16],
|
||||
protected_table: [0; 16],
|
||||
};
|
||||
|
||||
// prepare safe table
|
||||
for i in 0..128 {
|
||||
if ALLOWED.contains(&i) {
|
||||
set_bit(&mut q.safe_table, i);
|
||||
}
|
||||
if QS.contains(&i) {
|
||||
set_bit(&mut q.safe_table, i);
|
||||
}
|
||||
}
|
||||
|
||||
for ch in safe {
|
||||
set_bit(&mut q.safe_table, *ch)
|
||||
}
|
||||
|
||||
// prepare protected table
|
||||
for ch in protected {
|
||||
set_bit(&mut q.safe_table, *ch);
|
||||
set_bit(&mut q.protected_table, *ch);
|
||||
}
|
||||
|
||||
q
|
||||
}
|
||||
|
||||
pub fn requote(&self, val: &[u8]) -> Option<String> {
|
||||
let mut has_pct = 0;
|
||||
let mut pct = [b'%', 0, 0];
|
||||
let mut idx = 0;
|
||||
let mut cloned: Option<Vec<u8>> = None;
|
||||
|
||||
let len = val.len();
|
||||
while idx < len {
|
||||
let ch = val[idx];
|
||||
|
||||
if has_pct != 0 {
|
||||
pct[has_pct] = val[idx];
|
||||
has_pct += 1;
|
||||
if has_pct == 3 {
|
||||
has_pct = 0;
|
||||
let buf = cloned.as_mut().unwrap();
|
||||
|
||||
if let Some(ch) = restore_ch(pct[1], pct[2]) {
|
||||
if ch < 128 {
|
||||
if bit_at(&self.protected_table, ch) {
|
||||
buf.extend_from_slice(&pct);
|
||||
idx += 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
if bit_at(&self.safe_table, ch) {
|
||||
buf.push(ch);
|
||||
idx += 1;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
buf.push(ch);
|
||||
} else {
|
||||
buf.extend_from_slice(&pct[..]);
|
||||
}
|
||||
}
|
||||
} else if ch == b'%' {
|
||||
has_pct = 1;
|
||||
if cloned.is_none() {
|
||||
let mut c = Vec::with_capacity(len);
|
||||
c.extend_from_slice(&val[..idx]);
|
||||
cloned = Some(c);
|
||||
}
|
||||
} else if let Some(ref mut cloned) = cloned {
|
||||
cloned.push(ch)
|
||||
}
|
||||
idx += 1;
|
||||
}
|
||||
|
||||
cloned.map(|data| {
|
||||
// SAFETY: we get data from http::Uri, which does UTF-8 checks already
|
||||
// this code only decodes valid pct encoded values
|
||||
unsafe { String::from_utf8_unchecked(data) }
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn from_hex(v: u8) -> Option<u8> {
|
||||
if (b'0'..=b'9').contains(&v) {
|
||||
Some(v - 0x30) // ord('0') == 0x30
|
||||
} else if (b'A'..=b'F').contains(&v) {
|
||||
Some(v - 0x41 + 10) // ord('A') == 0x41
|
||||
} else if (b'a'..=b'f').contains(&v) {
|
||||
Some(v - 0x61 + 10) // ord('a') == 0x61
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn restore_ch(d1: u8, d2: u8) -> Option<u8> {
|
||||
from_hex(d1).and_then(|d1| from_hex(d2).map(move |d2| d1 << 4 | d2))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use http::Uri;
|
||||
use std::convert::TryFrom;
|
||||
|
||||
use super::*;
|
||||
use crate::{Path, ResourceDef};
|
||||
|
||||
#[test]
|
||||
fn test_parse_url() {
|
||||
let re = ResourceDef::new("/user/{id}/test");
|
||||
|
||||
let url = Uri::try_from("/user/2345/test").unwrap();
|
||||
let mut path = Path::new(Url::new(url));
|
||||
assert!(re.match_path(&mut path));
|
||||
assert_eq!(path.get("id").unwrap(), "2345");
|
||||
|
||||
let url = Uri::try_from("/user/qwe%25/test").unwrap();
|
||||
let mut path = Path::new(Url::new(url));
|
||||
assert!(re.match_path(&mut path));
|
||||
assert_eq!(path.get("id").unwrap(), "qwe%");
|
||||
|
||||
let url = Uri::try_from("/user/qwe%25rty/test").unwrap();
|
||||
let mut path = Path::new(Url::new(url));
|
||||
assert!(re.match_path(&mut path));
|
||||
assert_eq!(path.get("id").unwrap(), "qwe%rty");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_from_hex() {
|
||||
let hex = b"0123456789abcdefABCDEF";
|
||||
|
||||
for i in 0..256 {
|
||||
let c = i as u8;
|
||||
if hex.contains(&c) {
|
||||
assert!(from_hex(c).is_some())
|
||||
} else {
|
||||
assert!(from_hex(c).is_none())
|
||||
}
|
||||
}
|
||||
|
||||
let expected = [
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 10, 11, 12, 13, 14, 15,
|
||||
];
|
||||
for i in 0..hex.len() {
|
||||
assert_eq!(from_hex(hex[i]).unwrap(), expected[i]);
|
||||
}
|
||||
}
|
||||
}
|
@@ -1,6 +1,19 @@
|
||||
# Changes
|
||||
|
||||
## Unreleased - 2021-xx-xx
|
||||
* Add `Arbiter::try_current` for situations where thread may or may not have Arbiter context. [#408]
|
||||
* Start io-uring with `System::new` when feature is enabled. [#395]
|
||||
|
||||
[#395]: https://github.com/actix/actix-net/pull/395
|
||||
[#408]: https://github.com/actix/actix-net/pull/408
|
||||
|
||||
|
||||
## 2.3.0 - 2021-10-11
|
||||
* The `spawn` method can now resolve with non-unit outputs. [#369]
|
||||
* Add experimental (semver-exempt) `io-uring` feature for enabling async file I/O on linux. [#374]
|
||||
|
||||
[#369]: https://github.com/actix/actix-net/pull/369
|
||||
[#374]: https://github.com/actix/actix-net/pull/374
|
||||
|
||||
|
||||
## 2.2.0 - 2021-03-29
|
||||
@@ -8,7 +21,7 @@
|
||||
`Ready` object in ok variant. [#293]
|
||||
* Breakage is acceptable since `ActixStream` was not intended to be public.
|
||||
|
||||
[#293] https://github.com/actix/actix-net/pull/293
|
||||
[#293]: https://github.com/actix/actix-net/pull/293
|
||||
|
||||
|
||||
## 2.1.0 - 2021-02-24
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "actix-rt"
|
||||
version = "2.2.0"
|
||||
version = "2.3.0"
|
||||
authors = [
|
||||
"Nikolay Kim <fafhrd91@gmail.com>",
|
||||
"Rob Ede <robjtede@icloud.com>",
|
||||
@@ -8,8 +8,7 @@ authors = [
|
||||
description = "Tokio-based single-threaded async runtime for the Actix ecosystem"
|
||||
keywords = ["async", "futures", "io", "runtime"]
|
||||
homepage = "https://actix.rs"
|
||||
repository = "https://github.com/actix/actix-net"
|
||||
documentation = "https://docs.rs/actix-rt"
|
||||
repository = "https://github.com/actix/actix-net.git"
|
||||
categories = ["network-programming", "asynchronous"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
edition = "2018"
|
||||
@@ -21,13 +20,17 @@ path = "src/lib.rs"
|
||||
[features]
|
||||
default = ["macros"]
|
||||
macros = ["actix-macros"]
|
||||
io-uring = ["tokio-uring"]
|
||||
|
||||
[dependencies]
|
||||
actix-macros = { version = "0.2.0", optional = true }
|
||||
actix-macros = { version = "0.2.3", optional = true }
|
||||
|
||||
futures-core = { version = "0.3", default-features = false }
|
||||
tokio = { version = "1.3", features = ["rt", "net", "parking_lot", "signal", "sync", "time"] }
|
||||
tokio = { version = "1.5.1", features = ["rt", "net", "parking_lot", "signal", "sync", "time"] }
|
||||
|
||||
[target.'cfg(target_os = "linux")'.dependencies]
|
||||
tokio-uring = { version = "0.1", optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { version = "1.2", features = ["full"] }
|
||||
tokio = { version = "1.5.1", features = ["full"] }
|
||||
hyper = { version = "0.14", default-features = false, features = ["server", "tcp", "http1"] }
|
||||
|
@@ -3,11 +3,11 @@
|
||||
> Tokio-based single-threaded async runtime for the Actix ecosystem.
|
||||
|
||||
[](https://crates.io/crates/actix-rt)
|
||||
[](https://docs.rs/actix-rt/2.2.0)
|
||||
[](https://docs.rs/actix-rt/2.3.0)
|
||||
[](https://blog.rust-lang.org/2020/03/12/Rust-1.46.html)
|
||||

|
||||
<br />
|
||||
[](https://deps.rs/crate/actix-rt/2.2.0)
|
||||
[](https://deps.rs/crate/actix-rt/2.3.0)
|
||||

|
||||
[](https://discord.gg/WghFtEH6Hb)
|
||||
|
||||
|
@@ -9,12 +9,9 @@ use std::{
|
||||
};
|
||||
|
||||
use futures_core::ready;
|
||||
use tokio::{sync::mpsc, task::LocalSet};
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
use crate::{
|
||||
runtime::{default_tokio_runtime, Runtime},
|
||||
system::{System, SystemCommand},
|
||||
};
|
||||
use crate::system::{System, SystemCommand};
|
||||
|
||||
pub(crate) static COUNT: AtomicUsize = AtomicUsize::new(0);
|
||||
|
||||
@@ -98,16 +95,19 @@ impl Arbiter {
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics if a [System] is not registered on the current thread.
|
||||
#[cfg(not(all(target_os = "linux", feature = "io-uring")))]
|
||||
#[allow(clippy::new_without_default)]
|
||||
pub fn new() -> Arbiter {
|
||||
Self::with_tokio_rt(|| {
|
||||
default_tokio_runtime().expect("Cannot create new Arbiter's Runtime.")
|
||||
crate::runtime::default_tokio_runtime()
|
||||
.expect("Cannot create new Arbiter's Runtime.")
|
||||
})
|
||||
}
|
||||
|
||||
/// Spawn a new Arbiter using the [Tokio Runtime](tokio-runtime) returned from a closure.
|
||||
///
|
||||
/// [tokio-runtime]: tokio::runtime::Runtime
|
||||
#[cfg(not(all(target_os = "linux", feature = "io-uring")))]
|
||||
#[doc(hidden)]
|
||||
pub fn with_tokio_rt<F>(runtime_factory: F) -> Arbiter
|
||||
where
|
||||
@@ -127,7 +127,7 @@ impl Arbiter {
|
||||
.spawn({
|
||||
let tx = tx.clone();
|
||||
move || {
|
||||
let rt = Runtime::from(runtime_factory());
|
||||
let rt = crate::runtime::Runtime::from(runtime_factory());
|
||||
let hnd = ArbiterHandle::new(tx);
|
||||
|
||||
System::set_current(sys);
|
||||
@@ -159,15 +159,67 @@ impl Arbiter {
|
||||
Arbiter { tx, thread_handle }
|
||||
}
|
||||
|
||||
/// Sets up an Arbiter runner in a new System using the provided runtime local task set.
|
||||
pub(crate) fn in_new_system(local: &LocalSet) -> ArbiterHandle {
|
||||
/// Spawn a new Arbiter thread and start its event loop with `tokio-uring` runtime.
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics if a [System] is not registered on the current thread.
|
||||
#[cfg(all(target_os = "linux", feature = "io-uring"))]
|
||||
#[allow(clippy::new_without_default)]
|
||||
pub fn new() -> Arbiter {
|
||||
let sys = System::current();
|
||||
let system_id = sys.id();
|
||||
let arb_id = COUNT.fetch_add(1, Ordering::Relaxed);
|
||||
|
||||
let name = format!("actix-rt|system:{}|arbiter:{}", system_id, arb_id);
|
||||
let (tx, rx) = mpsc::unbounded_channel();
|
||||
|
||||
let (ready_tx, ready_rx) = std::sync::mpsc::channel::<()>();
|
||||
|
||||
let thread_handle = thread::Builder::new()
|
||||
.name(name.clone())
|
||||
.spawn({
|
||||
let tx = tx.clone();
|
||||
move || {
|
||||
let hnd = ArbiterHandle::new(tx);
|
||||
|
||||
System::set_current(sys);
|
||||
|
||||
HANDLE.with(|cell| *cell.borrow_mut() = Some(hnd.clone()));
|
||||
|
||||
// register arbiter
|
||||
let _ = System::current()
|
||||
.tx()
|
||||
.send(SystemCommand::RegisterArbiter(arb_id, hnd));
|
||||
|
||||
ready_tx.send(()).unwrap();
|
||||
|
||||
// run arbiter event processing loop
|
||||
tokio_uring::start(ArbiterRunner { rx });
|
||||
|
||||
// deregister arbiter
|
||||
let _ = System::current()
|
||||
.tx()
|
||||
.send(SystemCommand::DeregisterArbiter(arb_id));
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|err| {
|
||||
panic!("Cannot spawn Arbiter's thread: {:?}. {:?}", &name, err)
|
||||
});
|
||||
|
||||
ready_rx.recv().unwrap();
|
||||
|
||||
Arbiter { tx, thread_handle }
|
||||
}
|
||||
|
||||
/// Sets up an Arbiter runner in a new System using the environment's local set.
|
||||
pub(crate) fn in_new_system() -> ArbiterHandle {
|
||||
let (tx, rx) = mpsc::unbounded_channel();
|
||||
|
||||
let hnd = ArbiterHandle::new(tx);
|
||||
|
||||
HANDLE.with(|cell| *cell.borrow_mut() = Some(hnd.clone()));
|
||||
|
||||
local.spawn_local(ArbiterRunner { rx });
|
||||
crate::spawn(ArbiterRunner { rx });
|
||||
|
||||
hnd
|
||||
}
|
||||
@@ -188,6 +240,15 @@ impl Arbiter {
|
||||
})
|
||||
}
|
||||
|
||||
/// Try to get current running arbiter handle.
|
||||
///
|
||||
/// Returns `None` if no Arbiter has been started.
|
||||
///
|
||||
/// Unlike [`current`](Self::current), this never panics.
|
||||
pub fn try_current() -> Option<ArbiterHandle> {
|
||||
HANDLE.with(|cell| cell.borrow().clone())
|
||||
}
|
||||
|
||||
/// Stop Arbiter from continuing it's event loop.
|
||||
///
|
||||
/// Returns true if stop message was sent successfully and false if the Arbiter has been dropped.
|
||||
|
@@ -15,7 +15,7 @@
|
||||
//! blocking task thread-pool using [`task::spawn_blocking`].
|
||||
//!
|
||||
//! # Examples
|
||||
//! ```
|
||||
//! ```no_run
|
||||
//! use std::sync::mpsc;
|
||||
//! use actix_rt::{Arbiter, System};
|
||||
//!
|
||||
@@ -32,6 +32,10 @@
|
||||
//! arbiter.stop();
|
||||
//! arbiter.join().unwrap();
|
||||
//! ```
|
||||
//!
|
||||
//! # `io-uring` Support
|
||||
//! There is experimental support for using io-uring with this crate by enabling the
|
||||
//! `io-uring` feature. For now, it is semver exempt.
|
||||
|
||||
#![deny(rust_2018_idioms, nonstandard_style)]
|
||||
#![allow(clippy::type_complexity)]
|
||||
@@ -39,6 +43,9 @@
|
||||
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
|
||||
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
|
||||
|
||||
#[cfg(all(not(target_os = "linux"), feature = "io-uring"))]
|
||||
compile_error!("io_uring is a linux only feature.");
|
||||
|
||||
use std::future::Future;
|
||||
|
||||
use tokio::task::JoinHandle;
|
||||
@@ -46,7 +53,10 @@ use tokio::task::JoinHandle;
|
||||
// Cannot define a main macro when compiled into test harness.
|
||||
// Workaround for https://github.com/rust-lang/rust/issues/62127.
|
||||
#[cfg(all(feature = "macros", not(test)))]
|
||||
pub use actix_macros::{main, test};
|
||||
pub use actix_macros::main;
|
||||
|
||||
#[cfg(feature = "macros")]
|
||||
pub use actix_macros::test;
|
||||
|
||||
mod arbiter;
|
||||
mod runtime;
|
||||
@@ -155,14 +165,41 @@ pub mod task {
|
||||
pub use tokio::task::{spawn_blocking, yield_now, JoinError, JoinHandle};
|
||||
}
|
||||
|
||||
/// Spawns a future on the current thread.
|
||||
/// Spawns a future on the current thread as a new task.
|
||||
///
|
||||
/// If not immediately awaited, the task can be cancelled using [`JoinHandle::abort`].
|
||||
///
|
||||
/// The provided future is spawned as a new task; therefore, panics are caught.
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics if Actix system is not running.
|
||||
///
|
||||
/// # Examples
|
||||
/// ```
|
||||
/// # use std::time::Duration;
|
||||
/// # actix_rt::Runtime::new().unwrap().block_on(async {
|
||||
/// // task resolves successfully
|
||||
/// assert_eq!(actix_rt::spawn(async { 1 }).await.unwrap(), 1);
|
||||
///
|
||||
/// // task panics
|
||||
/// assert!(actix_rt::spawn(async {
|
||||
/// panic!("panic is caught at task boundary");
|
||||
/// })
|
||||
/// .await
|
||||
/// .unwrap_err()
|
||||
/// .is_panic());
|
||||
///
|
||||
/// // task is cancelled before completion
|
||||
/// let handle = actix_rt::spawn(actix_rt::time::sleep(Duration::from_secs(100)));
|
||||
/// handle.abort();
|
||||
/// assert!(handle.await.unwrap_err().is_cancelled());
|
||||
/// # });
|
||||
/// ```
|
||||
#[inline]
|
||||
pub fn spawn<Fut>(f: Fut) -> JoinHandle<()>
|
||||
pub fn spawn<Fut>(f: Fut) -> JoinHandle<Fut::Output>
|
||||
where
|
||||
Fut: Future<Output = ()> + 'static,
|
||||
Fut: Future + 'static,
|
||||
Fut::Output: 'static,
|
||||
{
|
||||
tokio::task::spawn_local(f)
|
||||
}
|
||||
|
@@ -31,11 +31,6 @@ impl Runtime {
|
||||
})
|
||||
}
|
||||
|
||||
/// Reference to local task set.
|
||||
pub(crate) fn local_set(&self) -> &LocalSet {
|
||||
&self.local
|
||||
}
|
||||
|
||||
/// Offload a future onto the single-threaded runtime.
|
||||
///
|
||||
/// The returned join handle can be used to await the future's result.
|
||||
|
@@ -11,7 +11,7 @@ use std::{
|
||||
use futures_core::ready;
|
||||
use tokio::sync::{mpsc, oneshot};
|
||||
|
||||
use crate::{arbiter::ArbiterHandle, runtime::default_tokio_runtime, Arbiter, Runtime};
|
||||
use crate::{arbiter::ArbiterHandle, Arbiter};
|
||||
|
||||
static SYSTEM_COUNT: AtomicUsize = AtomicUsize::new(0);
|
||||
|
||||
@@ -29,6 +29,7 @@ pub struct System {
|
||||
arbiter_handle: ArbiterHandle,
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "io-uring"))]
|
||||
impl System {
|
||||
/// Create a new system.
|
||||
///
|
||||
@@ -37,7 +38,7 @@ impl System {
|
||||
#[allow(clippy::new_ret_no_self)]
|
||||
pub fn new() -> SystemRunner {
|
||||
Self::with_tokio_rt(|| {
|
||||
default_tokio_runtime()
|
||||
crate::runtime::default_tokio_runtime()
|
||||
.expect("Default Actix (Tokio) runtime could not be created.")
|
||||
})
|
||||
}
|
||||
@@ -53,8 +54,8 @@ impl System {
|
||||
let (stop_tx, stop_rx) = oneshot::channel();
|
||||
let (sys_tx, sys_rx) = mpsc::unbounded_channel();
|
||||
|
||||
let rt = Runtime::from(runtime_factory());
|
||||
let sys_arbiter = Arbiter::in_new_system(rt.local_set());
|
||||
let rt = crate::runtime::Runtime::from(runtime_factory());
|
||||
let sys_arbiter = rt.block_on(async { Arbiter::in_new_system() });
|
||||
let system = System::construct(sys_tx, sys_arbiter.clone());
|
||||
|
||||
system
|
||||
@@ -72,7 +73,32 @@ impl System {
|
||||
system,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "io-uring")]
|
||||
impl System {
|
||||
/// Create a new system.
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics if underlying Tokio runtime can not be created.
|
||||
#[allow(clippy::new_ret_no_self)]
|
||||
pub fn new() -> SystemRunner {
|
||||
SystemRunner
|
||||
}
|
||||
|
||||
/// Create a new System using the [Tokio Runtime](tokio-runtime) returned from a closure.
|
||||
///
|
||||
/// [tokio-runtime]: tokio::runtime::Runtime
|
||||
#[doc(hidden)]
|
||||
pub fn with_tokio_rt<F>(_: F) -> SystemRunner
|
||||
where
|
||||
F: Fn() -> tokio::runtime::Runtime,
|
||||
{
|
||||
unimplemented!("System::with_tokio_rt is not implemented yet")
|
||||
}
|
||||
}
|
||||
|
||||
impl System {
|
||||
/// Constructs new system and registers it on the current thread.
|
||||
pub(crate) fn construct(
|
||||
sys_tx: mpsc::UnboundedSender<SystemCommand>,
|
||||
@@ -104,7 +130,7 @@ impl System {
|
||||
///
|
||||
/// Returns `None` if no System has been started.
|
||||
///
|
||||
/// Contrary to `current`, this never panics.
|
||||
/// Unlike [`current`](Self::current), this never panics.
|
||||
pub fn try_current() -> Option<System> {
|
||||
CURRENT.with(|cell| cell.borrow().clone())
|
||||
}
|
||||
@@ -149,15 +175,18 @@ impl System {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "io-uring"))]
|
||||
/// Runner that keeps a [System]'s event loop alive until stop message is received.
|
||||
#[must_use = "A SystemRunner does nothing unless `run` is called."]
|
||||
#[derive(Debug)]
|
||||
pub struct SystemRunner {
|
||||
rt: Runtime,
|
||||
rt: crate::runtime::Runtime,
|
||||
stop_rx: oneshot::Receiver<i32>,
|
||||
#[allow(dead_code)]
|
||||
system: System,
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "io-uring"))]
|
||||
impl SystemRunner {
|
||||
/// Starts event loop and will return once [System] is [stopped](System::stop).
|
||||
pub fn run(self) -> io::Result<()> {
|
||||
@@ -187,6 +216,45 @@ impl SystemRunner {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "io-uring")]
|
||||
/// Runner that keeps a [System]'s event loop alive until stop message is received.
|
||||
#[must_use = "A SystemRunner does nothing unless `run` is called."]
|
||||
#[derive(Debug)]
|
||||
pub struct SystemRunner;
|
||||
|
||||
#[cfg(feature = "io-uring")]
|
||||
impl SystemRunner {
|
||||
/// Starts event loop and will return once [System] is [stopped](System::stop).
|
||||
pub fn run(self) -> io::Result<()> {
|
||||
unimplemented!("SystemRunner::run is not implemented yet")
|
||||
}
|
||||
|
||||
/// Runs the provided future, blocking the current thread until the future completes.
|
||||
#[inline]
|
||||
pub fn block_on<F: Future>(&self, fut: F) -> F::Output {
|
||||
tokio_uring::start(async move {
|
||||
let (stop_tx, stop_rx) = oneshot::channel();
|
||||
let (sys_tx, sys_rx) = mpsc::unbounded_channel();
|
||||
|
||||
let sys_arbiter = Arbiter::in_new_system();
|
||||
let system = System::construct(sys_tx, sys_arbiter.clone());
|
||||
|
||||
system
|
||||
.tx()
|
||||
.send(SystemCommand::RegisterArbiter(usize::MAX, sys_arbiter))
|
||||
.unwrap();
|
||||
|
||||
// init background system arbiter
|
||||
let sys_ctrl = SystemController::new(sys_rx, stop_tx);
|
||||
tokio_uring::spawn(sys_ctrl);
|
||||
|
||||
let res = fut.await;
|
||||
drop(stop_rx);
|
||||
res
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) enum SystemCommand {
|
||||
Exit(i32),
|
||||
|
17
actix-rt/tests/test-macro-import-conflict.rs
Normal file
17
actix-rt/tests/test-macro-import-conflict.rs
Normal file
@@ -0,0 +1,17 @@
|
||||
//! Checks that test macro does not cause problems in the presence of imports named "test" that
|
||||
//! could be either a module with test items or the "test with runtime" macro itself.
|
||||
//!
|
||||
//! Before actix/actix-net#399 was implemented, this macro was running twice. The first run output
|
||||
//! `#[test]` and it got run again and since it was in scope.
|
||||
//!
|
||||
//! Prevented by using the fully-qualified test marker (`#[::core::prelude::v1::test]`).
|
||||
|
||||
#![cfg(feature = "macros")]
|
||||
|
||||
use actix_rt::time as test;
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_naming_conflict() {
|
||||
use test as time;
|
||||
time::sleep(std::time::Duration::from_millis(2)).await;
|
||||
}
|
@@ -1,15 +1,15 @@
|
||||
use std::{
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
mpsc::channel,
|
||||
Arc,
|
||||
},
|
||||
thread,
|
||||
future::Future,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
use actix_rt::{Arbiter, System};
|
||||
use tokio::sync::oneshot;
|
||||
use actix_rt::{task::JoinError, Arbiter, System};
|
||||
|
||||
#[cfg(not(feature = "io-uring"))]
|
||||
use {
|
||||
std::{sync::mpsc::channel, thread},
|
||||
tokio::sync::oneshot,
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn await_for_timer() {
|
||||
@@ -106,6 +106,10 @@ fn wait_for_spawns() {
|
||||
assert!(rt.block_on(handle).is_err());
|
||||
}
|
||||
|
||||
// Temporary disabled tests for io-uring feature.
|
||||
// They should be enabled when possible.
|
||||
|
||||
#[cfg(not(feature = "io-uring"))]
|
||||
#[test]
|
||||
fn arbiter_spawn_fn_runs() {
|
||||
let _ = System::new();
|
||||
@@ -122,6 +126,7 @@ fn arbiter_spawn_fn_runs() {
|
||||
arbiter.join().unwrap();
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "io-uring"))]
|
||||
#[test]
|
||||
fn arbiter_handle_spawn_fn_runs() {
|
||||
let sys = System::new();
|
||||
@@ -144,6 +149,7 @@ fn arbiter_handle_spawn_fn_runs() {
|
||||
sys.run().unwrap();
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "io-uring"))]
|
||||
#[test]
|
||||
fn arbiter_drop_no_panic_fn() {
|
||||
let _ = System::new();
|
||||
@@ -155,6 +161,7 @@ fn arbiter_drop_no_panic_fn() {
|
||||
arbiter.join().unwrap();
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "io-uring"))]
|
||||
#[test]
|
||||
fn arbiter_drop_no_panic_fut() {
|
||||
let _ = System::new();
|
||||
@@ -166,18 +173,7 @@ fn arbiter_drop_no_panic_fut() {
|
||||
arbiter.join().unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn no_system_current_panic() {
|
||||
System::current();
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn no_system_arbiter_new_panic() {
|
||||
Arbiter::new();
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "io-uring"))]
|
||||
#[test]
|
||||
fn system_arbiter_spawn() {
|
||||
let runner = System::new();
|
||||
@@ -208,6 +204,7 @@ fn system_arbiter_spawn() {
|
||||
thread.join().unwrap();
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "io-uring"))]
|
||||
#[test]
|
||||
fn system_stop_stops_arbiters() {
|
||||
let sys = System::new();
|
||||
@@ -220,8 +217,8 @@ fn system_stop_stops_arbiters() {
|
||||
System::current().stop();
|
||||
sys.run().unwrap();
|
||||
|
||||
// account for slightly slow thread de-spawns (only observed on windows)
|
||||
thread::sleep(Duration::from_millis(100));
|
||||
// account for slightly slow thread de-spawns
|
||||
thread::sleep(Duration::from_millis(500));
|
||||
|
||||
// arbiter should be dead and return false
|
||||
assert!(!Arbiter::current().spawn_fn(|| {}));
|
||||
@@ -230,6 +227,7 @@ fn system_stop_stops_arbiters() {
|
||||
arb.join().unwrap();
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "io-uring"))]
|
||||
#[test]
|
||||
fn new_system_with_tokio() {
|
||||
let (tx, rx) = channel();
|
||||
@@ -262,8 +260,14 @@ fn new_system_with_tokio() {
|
||||
assert_eq!(rx.recv().unwrap(), 42);
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "io-uring"))]
|
||||
#[test]
|
||||
fn new_arbiter_with_tokio() {
|
||||
use std::sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Arc,
|
||||
};
|
||||
|
||||
let _ = System::new();
|
||||
|
||||
let arb = Arbiter::with_tokio_rt(|| {
|
||||
@@ -286,7 +290,19 @@ fn new_arbiter_with_tokio() {
|
||||
|
||||
arb.join().unwrap();
|
||||
|
||||
assert_eq!(false, counter.load(Ordering::SeqCst));
|
||||
assert!(!counter.load(Ordering::SeqCst));
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn no_system_current_panic() {
|
||||
System::current();
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn no_system_arbiter_new_panic() {
|
||||
Arbiter::new();
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -298,3 +314,55 @@ fn try_current_no_system() {
|
||||
fn try_current_with_system() {
|
||||
System::new().block_on(async { assert!(System::try_current().is_some()) });
|
||||
}
|
||||
|
||||
#[allow(clippy::unit_cmp)]
|
||||
#[test]
|
||||
fn spawn_local() {
|
||||
System::new().block_on(async {
|
||||
// demonstrate that spawn -> R is strictly more capable than spawn -> ()
|
||||
|
||||
assert_eq!(actix_rt::spawn(async {}).await.unwrap(), ());
|
||||
assert_eq!(actix_rt::spawn(async { 1 }).await.unwrap(), 1);
|
||||
assert!(actix_rt::spawn(async { panic!("") }).await.is_err());
|
||||
|
||||
actix_rt::spawn(async { tokio::time::sleep(Duration::from_millis(50)).await })
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
fn g<F: Future<Output = Result<(), JoinError>>>(_f: F) {}
|
||||
g(actix_rt::spawn(async {}));
|
||||
// g(actix_rt::spawn(async { 1 })); // compile err
|
||||
|
||||
fn h<F: Future<Output = Result<R, JoinError>>, R>(_f: F) {}
|
||||
h(actix_rt::spawn(async {}));
|
||||
h(actix_rt::spawn(async { 1 }));
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(all(target_os = "linux", feature = "io-uring"))]
|
||||
#[test]
|
||||
fn tokio_uring_arbiter() {
|
||||
System::new().block_on(async {
|
||||
let (tx, rx) = std::sync::mpsc::channel();
|
||||
|
||||
Arbiter::new().spawn(async move {
|
||||
let handle = actix_rt::spawn(async move {
|
||||
let f = tokio_uring::fs::File::create("test.txt").await.unwrap();
|
||||
let buf = b"Hello World!";
|
||||
|
||||
let (res, _) = f.write_at(&buf[..], 0).await;
|
||||
assert!(res.is_ok());
|
||||
|
||||
f.sync_all().await.unwrap();
|
||||
f.close().await.unwrap();
|
||||
|
||||
std::fs::remove_file("test.txt").unwrap();
|
||||
});
|
||||
|
||||
handle.await.unwrap();
|
||||
tx.send(true).unwrap();
|
||||
});
|
||||
|
||||
assert!(rx.recv().unwrap());
|
||||
})
|
||||
}
|
||||
|
@@ -1,12 +1,35 @@
|
||||
# Changes
|
||||
|
||||
## Unreleased - 2021-xx-xx
|
||||
* Server can be started in regular Tokio runtime. [#408]
|
||||
* Expose new `Server` type whose `Future` impl resolves when server stops. [#408]
|
||||
* Rename `Server` to `ServerHandle`. [#407]
|
||||
* Add `Server::handle` to obtain handle to server. [#408]
|
||||
* Rename `ServerBuilder::{maxconn => max_concurrent_connections}`. [#407]
|
||||
* Deprecate crate-level `new` shortcut for server builder. [#408]
|
||||
* Minimum supported Rust version (MSRV) is now 1.52.
|
||||
|
||||
[#407]: https://github.com/actix/actix-net/pull/407
|
||||
[#408]: https://github.com/actix/actix-net/pull/408
|
||||
|
||||
|
||||
## 2.0.0-beta.6 - 2021-10-11
|
||||
* Add experimental (semver-exempt) `io-uring` feature for enabling async file I/O on linux. [#374]
|
||||
* Server no long listens to `SIGHUP` signal. Previously, the received was not used but did block
|
||||
subsequent exit signals from working. [#389]
|
||||
* Remove `config` module. `ServiceConfig`, `ServiceRuntime` public types are removed due to
|
||||
this change. [#349]
|
||||
* Remove `ServerBuilder::configure` [#349]
|
||||
|
||||
[#374]: https://github.com/actix/actix-net/pull/374
|
||||
[#349]: https://github.com/actix/actix-net/pull/349
|
||||
[#389]: https://github.com/actix/actix-net/pull/389
|
||||
|
||||
|
||||
## 2.0.0-beta.5 - 2021-04-20
|
||||
* Server shutdown would notify all workers to exit regardless if shutdown is graceful.
|
||||
This would make all worker shutdown immediately in force shutdown case. [#333]
|
||||
|
||||
* Server shutdown notifies all workers to exit regardless if shutdown is graceful. This causes all
|
||||
workers to shutdown immediately in force shutdown case. [#333]
|
||||
|
||||
[#333]: https://github.com/actix/actix-net/pull/333
|
||||
|
||||
|
||||
|
@@ -1,13 +1,12 @@
|
||||
[package]
|
||||
name = "actix-server"
|
||||
version = "2.0.0-beta.5"
|
||||
version = "2.0.0-beta.6"
|
||||
authors = [
|
||||
"Nikolay Kim <fafhrd91@gmail.com>",
|
||||
"fakeshadow <24548779@qq.com>",
|
||||
]
|
||||
description = "General purpose TCP server built for the Actix ecosystem"
|
||||
keywords = ["network", "framework", "async", "futures"]
|
||||
homepage = "https://actix.rs"
|
||||
repository = "https://github.com/actix/actix-net.git"
|
||||
categories = ["network-programming", "asynchronous"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
@@ -19,6 +18,7 @@ path = "src/lib.rs"
|
||||
|
||||
[features]
|
||||
default = []
|
||||
io-uring = ["actix-rt/io-uring"]
|
||||
|
||||
[dependencies]
|
||||
actix-rt = { version = "2.0.0", default-features = false }
|
||||
@@ -29,14 +29,13 @@ futures-core = { version = "0.3.7", default-features = false, features = ["alloc
|
||||
log = "0.4"
|
||||
mio = { version = "0.7.6", features = ["os-poll", "net"] }
|
||||
num_cpus = "1.13"
|
||||
slab = "0.4"
|
||||
tokio = { version = "1.2", features = ["sync"] }
|
||||
tokio = { version = "1.5.1", features = ["sync"] }
|
||||
|
||||
[dev-dependencies]
|
||||
actix-codec = "0.4.0-beta.1"
|
||||
actix-codec = "0.4.0"
|
||||
actix-rt = "2.0.0"
|
||||
|
||||
bytes = "1"
|
||||
env_logger = "0.8"
|
||||
env_logger = "0.9"
|
||||
futures-util = { version = "0.3.7", default-features = false, features = ["sink"] }
|
||||
tokio = { version = "1", features = ["io-util"] }
|
||||
tokio = { version = "1.5.1", features = ["io-util", "rt-multi-thread", "macros"] }
|
||||
|
@@ -10,7 +10,7 @@
|
||||
//! the length of each line it echos and the total size of data sent when the connection is closed.
|
||||
|
||||
use std::{
|
||||
env, io,
|
||||
io,
|
||||
sync::{
|
||||
atomic::{AtomicUsize, Ordering},
|
||||
Arc,
|
||||
@@ -23,12 +23,10 @@ use actix_service::{fn_service, ServiceFactoryExt as _};
|
||||
use bytes::BytesMut;
|
||||
use futures_util::future::ok;
|
||||
use log::{error, info};
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
use tokio::io::{AsyncReadExt as _, AsyncWriteExt as _};
|
||||
|
||||
#[actix_rt::main]
|
||||
async fn main() -> io::Result<()> {
|
||||
env::set_var("RUST_LOG", "info");
|
||||
env_logger::init();
|
||||
async fn run() -> io::Result<()> {
|
||||
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).init();
|
||||
|
||||
let count = Arc::new(AtomicUsize::new(0));
|
||||
|
||||
@@ -88,3 +86,16 @@ async fn main() -> io::Result<()> {
|
||||
.run()
|
||||
.await
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> io::Result<()> {
|
||||
run().await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// alternatively:
|
||||
// #[actix_rt::main]
|
||||
// async fn main() -> io::Result<()> {
|
||||
// run().await?;
|
||||
// Ok(())
|
||||
// }
|
||||
|
@@ -1,226 +1,135 @@
|
||||
use std::time::Duration;
|
||||
use std::{io, thread};
|
||||
use std::{io, thread, time::Duration};
|
||||
|
||||
use actix_rt::{
|
||||
time::{sleep, Instant},
|
||||
System,
|
||||
};
|
||||
use log::{error, info};
|
||||
use actix_rt::time::Instant;
|
||||
use log::{debug, error, info};
|
||||
use mio::{Interest, Poll, Token as MioToken};
|
||||
use slab::Slab;
|
||||
|
||||
use crate::server::Server;
|
||||
use crate::socket::{MioListener, SocketAddr};
|
||||
use crate::waker_queue::{WakerInterest, WakerQueue, WAKER_TOKEN};
|
||||
use crate::worker::{Conn, WorkerHandleAccept};
|
||||
use crate::Token;
|
||||
use crate::{
|
||||
availability::Availability,
|
||||
socket::MioListener,
|
||||
waker_queue::{WakerInterest, WakerQueue, WAKER_TOKEN},
|
||||
worker::{Conn, ServerWorker, WorkerHandleAccept, WorkerHandleServer},
|
||||
ServerBuilder, ServerHandle,
|
||||
};
|
||||
|
||||
const TIMEOUT_DURATION_ON_ERROR: Duration = Duration::from_millis(510);
|
||||
|
||||
struct ServerSocketInfo {
|
||||
/// Address of socket. Mainly used for logging.
|
||||
addr: SocketAddr,
|
||||
|
||||
/// Beware this is the crate token for identify socket and should not be confused
|
||||
/// with `mio::Token`.
|
||||
token: Token,
|
||||
token: usize,
|
||||
|
||||
lst: MioListener,
|
||||
|
||||
/// Timeout is used to mark the deadline when this socket's listener should be registered again
|
||||
/// after an error.
|
||||
timeout: Option<Instant>,
|
||||
}
|
||||
|
||||
/// Accept loop would live with `ServerBuilder`.
|
||||
///
|
||||
/// It's tasked with construct `Poll` instance and `WakerQueue` which would be distributed to
|
||||
/// `Accept` and `Worker`.
|
||||
///
|
||||
/// It would also listen to `ServerCommand` and push interests to `WakerQueue`.
|
||||
pub(crate) struct AcceptLoop {
|
||||
srv: Option<Server>,
|
||||
poll: Option<Poll>,
|
||||
waker: WakerQueue,
|
||||
}
|
||||
|
||||
impl AcceptLoop {
|
||||
pub fn new(srv: Server) -> Self {
|
||||
let poll = Poll::new().unwrap_or_else(|e| panic!("Can not create `mio::Poll`: {}", e));
|
||||
let waker = WakerQueue::new(poll.registry())
|
||||
.unwrap_or_else(|e| panic!("Can not create `mio::Waker`: {}", e));
|
||||
|
||||
Self {
|
||||
srv: Some(srv),
|
||||
poll: Some(poll),
|
||||
waker,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn waker_owned(&self) -> WakerQueue {
|
||||
self.waker.clone()
|
||||
}
|
||||
|
||||
pub fn wake(&self, i: WakerInterest) {
|
||||
self.waker.wake(i);
|
||||
}
|
||||
|
||||
pub(crate) fn start(
|
||||
&mut self,
|
||||
socks: Vec<(Token, MioListener)>,
|
||||
handles: Vec<WorkerHandleAccept>,
|
||||
) {
|
||||
let srv = self.srv.take().expect("Can not re-use AcceptInfo");
|
||||
let poll = self.poll.take().unwrap();
|
||||
let waker = self.waker.clone();
|
||||
|
||||
Accept::start(poll, waker, socks, srv, handles);
|
||||
}
|
||||
timeout: Option<actix_rt::time::Instant>,
|
||||
}
|
||||
|
||||
/// poll instance of the server.
|
||||
struct Accept {
|
||||
pub(crate) struct Accept {
|
||||
poll: Poll,
|
||||
waker: WakerQueue,
|
||||
waker_queue: WakerQueue,
|
||||
handles: Vec<WorkerHandleAccept>,
|
||||
srv: Server,
|
||||
srv: ServerHandle,
|
||||
next: usize,
|
||||
avail: Availability,
|
||||
backpressure: bool,
|
||||
}
|
||||
|
||||
/// Array of u128 with every bit as marker for a worker handle's availability.
|
||||
struct Availability([u128; 4]);
|
||||
|
||||
impl Default for Availability {
|
||||
fn default() -> Self {
|
||||
Self([0; 4])
|
||||
}
|
||||
}
|
||||
|
||||
impl Availability {
|
||||
/// Check if any worker handle is available
|
||||
fn available(&self) -> bool {
|
||||
self.0.iter().any(|a| *a != 0)
|
||||
}
|
||||
|
||||
/// Set worker handle available state by index.
|
||||
fn set_available(&mut self, idx: usize, avail: bool) {
|
||||
let (offset, idx) = if idx < 128 {
|
||||
(0, idx)
|
||||
} else if idx < 128 * 2 {
|
||||
(1, idx - 128)
|
||||
} else if idx < 128 * 3 {
|
||||
(2, idx - 128 * 2)
|
||||
} else if idx < 128 * 4 {
|
||||
(3, idx - 128 * 3)
|
||||
} else {
|
||||
panic!("Max WorkerHandle count is 512")
|
||||
};
|
||||
|
||||
let off = 1 << idx as u128;
|
||||
if avail {
|
||||
self.0[offset] |= off;
|
||||
} else {
|
||||
self.0[offset] &= !off
|
||||
}
|
||||
}
|
||||
|
||||
/// Set all worker handle to available state.
|
||||
/// This would result in a re-check on all workers' availability.
|
||||
fn set_available_all(&mut self, handles: &[WorkerHandleAccept]) {
|
||||
handles.iter().for_each(|handle| {
|
||||
self.set_available(handle.idx(), true);
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// This function defines errors that are per-connection. Which basically
|
||||
/// means that if we get this error from `accept()` system call it means
|
||||
/// next connection might be ready to be accepted.
|
||||
///
|
||||
/// All other errors will incur a timeout before next `accept()` is performed.
|
||||
/// The timeout is useful to handle resource exhaustion errors like ENFILE
|
||||
/// and EMFILE. Otherwise, could enter into tight loop.
|
||||
fn connection_error(e: &io::Error) -> bool {
|
||||
e.kind() == io::ErrorKind::ConnectionRefused
|
||||
|| e.kind() == io::ErrorKind::ConnectionAborted
|
||||
|| e.kind() == io::ErrorKind::ConnectionReset
|
||||
/// use the smallest duration from sockets timeout.
|
||||
timeout: Option<Duration>,
|
||||
paused: bool,
|
||||
}
|
||||
|
||||
impl Accept {
|
||||
pub(crate) fn start(
|
||||
poll: Poll,
|
||||
waker: WakerQueue,
|
||||
socks: Vec<(Token, MioListener)>,
|
||||
srv: Server,
|
||||
handles: Vec<WorkerHandleAccept>,
|
||||
) {
|
||||
// Accept runs in its own thread and would want to spawn additional futures to current
|
||||
// actix system.
|
||||
let sys = System::current();
|
||||
thread::Builder::new()
|
||||
.name("actix-server accept loop".to_owned())
|
||||
.spawn(move || {
|
||||
System::set_current(sys);
|
||||
let (mut accept, sockets) =
|
||||
Accept::new_with_sockets(poll, waker, socks, handles, srv);
|
||||
sockets: Vec<(usize, MioListener)>,
|
||||
builder: &ServerBuilder,
|
||||
) -> io::Result<(WakerQueue, Vec<WorkerHandleServer>)> {
|
||||
let handle_server = ServerHandle::new(builder.cmd_tx.clone());
|
||||
|
||||
accept.poll_with(sockets);
|
||||
// construct poll instance and its waker
|
||||
let poll = Poll::new()?;
|
||||
let waker_queue = WakerQueue::new(poll.registry())?;
|
||||
|
||||
// start workers and collect handles
|
||||
let (handles_accept, handles_server) = (0..builder.threads)
|
||||
.map(|idx| {
|
||||
// clone service factories
|
||||
let factories = builder
|
||||
.factories
|
||||
.iter()
|
||||
.map(|f| f.clone_factory())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// start worker using service factories
|
||||
ServerWorker::start(idx, factories, waker_queue.clone(), builder.worker_config)
|
||||
})
|
||||
.unwrap();
|
||||
.collect::<io::Result<Vec<_>>>()?
|
||||
.into_iter()
|
||||
.unzip();
|
||||
|
||||
let (mut accept, mut sockets) = Accept::new_with_sockets(
|
||||
poll,
|
||||
waker_queue.clone(),
|
||||
sockets,
|
||||
handles_accept,
|
||||
handle_server,
|
||||
)?;
|
||||
|
||||
thread::Builder::new()
|
||||
.name("actix-server acceptor".to_owned())
|
||||
.spawn(move || accept.poll_with(&mut sockets))
|
||||
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))?;
|
||||
|
||||
Ok((waker_queue, handles_server))
|
||||
}
|
||||
|
||||
fn new_with_sockets(
|
||||
poll: Poll,
|
||||
waker: WakerQueue,
|
||||
socks: Vec<(Token, MioListener)>,
|
||||
handles: Vec<WorkerHandleAccept>,
|
||||
srv: Server,
|
||||
) -> (Accept, Slab<ServerSocketInfo>) {
|
||||
let mut sockets = Slab::new();
|
||||
for (hnd_token, mut lst) in socks.into_iter() {
|
||||
let addr = lst.local_addr();
|
||||
waker_queue: WakerQueue,
|
||||
sockets: Vec<(usize, MioListener)>,
|
||||
accept_handles: Vec<WorkerHandleAccept>,
|
||||
server_handle: ServerHandle,
|
||||
) -> io::Result<(Accept, Box<[ServerSocketInfo]>)> {
|
||||
let sockets = sockets
|
||||
.into_iter()
|
||||
.map(|(token, mut lst)| {
|
||||
// Start listening for incoming connections
|
||||
poll.registry()
|
||||
.register(&mut lst, MioToken(token), Interest::READABLE)?;
|
||||
|
||||
let entry = sockets.vacant_entry();
|
||||
let token = entry.key();
|
||||
|
||||
// Start listening for incoming connections
|
||||
poll.registry()
|
||||
.register(&mut lst, MioToken(token), Interest::READABLE)
|
||||
.unwrap_or_else(|e| panic!("Can not register io: {}", e));
|
||||
|
||||
entry.insert(ServerSocketInfo {
|
||||
addr,
|
||||
token: hnd_token,
|
||||
lst,
|
||||
timeout: None,
|
||||
});
|
||||
}
|
||||
Ok(ServerSocketInfo {
|
||||
token,
|
||||
lst,
|
||||
timeout: None,
|
||||
})
|
||||
})
|
||||
.collect::<io::Result<_>>()?;
|
||||
|
||||
let mut avail = Availability::default();
|
||||
|
||||
// Assume all handles are avail at construct time.
|
||||
avail.set_available_all(&handles);
|
||||
avail.set_available_all(&accept_handles);
|
||||
|
||||
let accept = Accept {
|
||||
poll,
|
||||
waker,
|
||||
handles,
|
||||
srv,
|
||||
waker_queue,
|
||||
handles: accept_handles,
|
||||
srv: server_handle,
|
||||
next: 0,
|
||||
avail,
|
||||
backpressure: false,
|
||||
timeout: None,
|
||||
paused: false,
|
||||
};
|
||||
|
||||
(accept, sockets)
|
||||
Ok((accept, sockets))
|
||||
}
|
||||
|
||||
fn poll_with(&mut self, mut sockets: Slab<ServerSocketInfo>) {
|
||||
let mut events = mio::Events::with_capacity(128);
|
||||
/// blocking wait for readiness events triggered by mio
|
||||
fn poll_with(&mut self, sockets: &mut [ServerSocketInfo]) {
|
||||
let mut events = mio::Events::with_capacity(256);
|
||||
|
||||
loop {
|
||||
if let Err(e) = self.poll.poll(&mut events, None) {
|
||||
match e.kind() {
|
||||
std::io::ErrorKind::Interrupted => continue,
|
||||
io::ErrorKind::Interrupted => {}
|
||||
_ => panic!("Poll error: {}", e),
|
||||
}
|
||||
}
|
||||
@@ -228,206 +137,210 @@ impl Accept {
|
||||
for event in events.iter() {
|
||||
let token = event.token();
|
||||
match token {
|
||||
// This is a loop because interests for command from previous version was
|
||||
// a loop that would try to drain the command channel. It's yet unknown
|
||||
// if it's necessary/good practice to actively drain the waker queue.
|
||||
WAKER_TOKEN => 'waker: loop {
|
||||
// take guard with every iteration so no new interest can be added
|
||||
// until the current task is done.
|
||||
let mut guard = self.waker.guard();
|
||||
match guard.pop_front() {
|
||||
// worker notify it becomes available. we may want to recover
|
||||
// from backpressure.
|
||||
Some(WakerInterest::WorkerAvailable(idx)) => {
|
||||
drop(guard);
|
||||
self.maybe_backpressure(&mut sockets, false);
|
||||
self.avail.set_available(idx, true);
|
||||
}
|
||||
// a new worker thread is made and it's handle would be added to Accept
|
||||
Some(WakerInterest::Worker(handle)) => {
|
||||
drop(guard);
|
||||
// maybe we want to recover from a backpressure.
|
||||
self.maybe_backpressure(&mut sockets, false);
|
||||
self.avail.set_available(handle.idx(), true);
|
||||
self.handles.push(handle);
|
||||
}
|
||||
// got timer interest and it's time to try register socket(s) again
|
||||
Some(WakerInterest::Timer) => {
|
||||
drop(guard);
|
||||
self.process_timer(&mut sockets)
|
||||
}
|
||||
Some(WakerInterest::Pause) => {
|
||||
drop(guard);
|
||||
self.deregister_all(&mut sockets);
|
||||
}
|
||||
Some(WakerInterest::Resume) => {
|
||||
drop(guard);
|
||||
sockets.iter_mut().for_each(|(token, info)| {
|
||||
self.register_logged(token, info);
|
||||
});
|
||||
}
|
||||
Some(WakerInterest::Stop) => {
|
||||
return self.deregister_all(&mut sockets);
|
||||
}
|
||||
// waker queue is drained
|
||||
None => {
|
||||
// Reset the WakerQueue before break so it does not grow infinitely
|
||||
WakerQueue::reset(&mut guard);
|
||||
break 'waker;
|
||||
}
|
||||
WAKER_TOKEN => {
|
||||
let exit = self.handle_waker(sockets);
|
||||
if exit {
|
||||
info!("Accept thread stopped");
|
||||
return;
|
||||
}
|
||||
},
|
||||
}
|
||||
_ => {
|
||||
let token = usize::from(token);
|
||||
self.accept(&mut sockets, token);
|
||||
self.accept(sockets, token);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// check for timeout and re-register sockets
|
||||
self.process_timeout(sockets);
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_waker(&mut self, sockets: &mut [ServerSocketInfo]) -> bool {
|
||||
// This is a loop because interests for command from previous version was
|
||||
// a loop that would try to drain the command channel. It's yet unknown
|
||||
// if it's necessary/good practice to actively drain the waker queue.
|
||||
loop {
|
||||
// take guard with every iteration so no new interest can be added
|
||||
// until the current task is done.
|
||||
let mut guard = self.waker_queue.guard();
|
||||
match guard.pop_front() {
|
||||
// worker notify it becomes available.
|
||||
Some(WakerInterest::WorkerAvailable(idx)) => {
|
||||
drop(guard);
|
||||
|
||||
self.avail.set_available(idx, true);
|
||||
|
||||
if !self.paused {
|
||||
self.accept_all(sockets);
|
||||
}
|
||||
}
|
||||
|
||||
// a new worker thread is made and it's handle would be added to Accept
|
||||
Some(WakerInterest::Worker(handle)) => {
|
||||
drop(guard);
|
||||
|
||||
self.avail.set_available(handle.idx(), true);
|
||||
self.handles.push(handle);
|
||||
|
||||
if !self.paused {
|
||||
self.accept_all(sockets);
|
||||
}
|
||||
}
|
||||
|
||||
Some(WakerInterest::Pause) => {
|
||||
drop(guard);
|
||||
|
||||
if !self.paused {
|
||||
self.paused = true;
|
||||
|
||||
self.deregister_all(sockets);
|
||||
}
|
||||
}
|
||||
|
||||
Some(WakerInterest::Resume) => {
|
||||
drop(guard);
|
||||
|
||||
if self.paused {
|
||||
self.paused = false;
|
||||
|
||||
sockets.iter_mut().for_each(|info| {
|
||||
self.register_logged(info);
|
||||
});
|
||||
|
||||
self.accept_all(sockets);
|
||||
}
|
||||
}
|
||||
|
||||
Some(WakerInterest::Stop) => {
|
||||
if !self.paused {
|
||||
self.deregister_all(sockets);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// waker queue is drained
|
||||
None => {
|
||||
// Reset the WakerQueue before break so it does not grow infinitely
|
||||
WakerQueue::reset(&mut guard);
|
||||
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn process_timer(&self, sockets: &mut Slab<ServerSocketInfo>) {
|
||||
let now = Instant::now();
|
||||
sockets
|
||||
.iter_mut()
|
||||
// Only sockets that had an associated timeout were deregistered.
|
||||
.filter(|(_, info)| info.timeout.is_some())
|
||||
.for_each(|(token, info)| {
|
||||
let inst = info.timeout.take().unwrap();
|
||||
fn process_timeout(&mut self, sockets: &mut [ServerSocketInfo]) {
|
||||
// always remove old timeouts
|
||||
if self.timeout.take().is_some() {
|
||||
let now = Instant::now();
|
||||
|
||||
if now < inst {
|
||||
info.timeout = Some(inst);
|
||||
} else if !self.backpressure {
|
||||
self.register_logged(token, info);
|
||||
sockets
|
||||
.iter_mut()
|
||||
// Only sockets that had an associated timeout were deregistered.
|
||||
.filter(|info| info.timeout.is_some())
|
||||
.for_each(|info| {
|
||||
let inst = info.timeout.take().unwrap();
|
||||
|
||||
if now < inst {
|
||||
// still timed out; try to set new timeout
|
||||
info.timeout = Some(inst);
|
||||
self.set_timeout(inst - now);
|
||||
} else if !self.paused {
|
||||
// timeout expired; register socket again
|
||||
self.register_logged(info);
|
||||
}
|
||||
|
||||
// Drop the timeout if server is paused and socket timeout is expired.
|
||||
// When server recovers from pause it will register all sockets without
|
||||
// a timeout value so this socket register will be delayed till then.
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/// Update accept timeout with `duration` if it is shorter than current timeout.
|
||||
fn set_timeout(&mut self, duration: Duration) {
|
||||
match self.timeout {
|
||||
Some(ref mut timeout) => {
|
||||
if *timeout > duration {
|
||||
*timeout = duration;
|
||||
}
|
||||
|
||||
// Drop the timeout if server is in backpressure and socket timeout is expired.
|
||||
// When server recovers from backpressure it will register all sockets without
|
||||
// a timeout value so this socket register will be delayed till then.
|
||||
});
|
||||
}
|
||||
None => self.timeout = Some(duration),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
fn register(&self, token: usize, info: &mut ServerSocketInfo) -> io::Result<()> {
|
||||
fn register(&self, info: &mut ServerSocketInfo) -> io::Result<()> {
|
||||
let token = MioToken(info.token);
|
||||
self.poll
|
||||
.registry()
|
||||
.register(&mut info.lst, MioToken(token), Interest::READABLE)
|
||||
.register(&mut info.lst, token, Interest::READABLE)
|
||||
}
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
fn register(&self, token: usize, info: &mut ServerSocketInfo) -> io::Result<()> {
|
||||
fn register(&self, info: &mut ServerSocketInfo) -> io::Result<()> {
|
||||
// On windows, calling register without deregister cause an error.
|
||||
// See https://github.com/actix/actix-web/issues/905
|
||||
// Calling reregister seems to fix the issue.
|
||||
let token = MioToken(info.token);
|
||||
self.poll
|
||||
.registry()
|
||||
.register(&mut info.lst, mio::Token(token), Interest::READABLE)
|
||||
.register(&mut info.lst, token, Interest::READABLE)
|
||||
.or_else(|_| {
|
||||
self.poll.registry().reregister(
|
||||
&mut info.lst,
|
||||
mio::Token(token),
|
||||
Interest::READABLE,
|
||||
)
|
||||
self.poll
|
||||
.registry()
|
||||
.reregister(&mut info.lst, token, Interest::READABLE)
|
||||
})
|
||||
}
|
||||
|
||||
fn register_logged(&self, token: usize, info: &mut ServerSocketInfo) {
|
||||
match self.register(token, info) {
|
||||
Ok(_) => info!("Resume accepting connections on {}", info.addr),
|
||||
fn register_logged(&self, info: &mut ServerSocketInfo) {
|
||||
match self.register(info) {
|
||||
Ok(_) => debug!("Resume accepting connections on {}", info.lst.local_addr()),
|
||||
Err(e) => error!("Can not register server socket {}", e),
|
||||
}
|
||||
}
|
||||
|
||||
fn deregister(&self, info: &mut ServerSocketInfo) -> io::Result<()> {
|
||||
self.poll.registry().deregister(&mut info.lst)
|
||||
}
|
||||
|
||||
fn deregister_logged(&self, info: &mut ServerSocketInfo) {
|
||||
match self.deregister(info) {
|
||||
Ok(_) => info!("Paused accepting connections on {}", info.addr),
|
||||
match self.poll.registry().deregister(&mut info.lst) {
|
||||
Ok(_) => debug!("Paused accepting connections on {}", info.lst.local_addr()),
|
||||
Err(e) => {
|
||||
error!("Can not deregister server socket {}", e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn deregister_all(&self, sockets: &mut Slab<ServerSocketInfo>) {
|
||||
fn deregister_all(&self, sockets: &mut [ServerSocketInfo]) {
|
||||
// This is a best effort implementation with following limitation:
|
||||
//
|
||||
// Every ServerSocketInfo with associate timeout will be skipped and it's timeout
|
||||
// is removed in the process.
|
||||
// Every ServerSocketInfo with associated timeout will be skipped and it's timeout is
|
||||
// removed in the process.
|
||||
//
|
||||
// Therefore WakerInterest::Pause followed by WakerInterest::Resume in a very short
|
||||
// gap (less than 500ms) would cause all timing out ServerSocketInfos be reregistered
|
||||
// before expected timing.
|
||||
// Therefore WakerInterest::Pause followed by WakerInterest::Resume in a very short gap
|
||||
// (less than 500ms) would cause all timing out ServerSocketInfos be re-registered before
|
||||
// expected timing.
|
||||
sockets
|
||||
.iter_mut()
|
||||
// Take all timeout.
|
||||
// This is to prevent Accept::process_timer method re-register a socket afterwards.
|
||||
.map(|(_, info)| (info.timeout.take(), info))
|
||||
.map(|info| (info.timeout.take(), info))
|
||||
// Socket info with a timeout is already deregistered so skip them.
|
||||
.filter(|(timeout, _)| timeout.is_none())
|
||||
.for_each(|(_, info)| self.deregister_logged(info));
|
||||
}
|
||||
|
||||
fn maybe_backpressure(&mut self, sockets: &mut Slab<ServerSocketInfo>, on: bool) {
|
||||
// Only operate when server is in a different backpressure than the given flag.
|
||||
if self.backpressure != on {
|
||||
self.backpressure = on;
|
||||
sockets
|
||||
.iter_mut()
|
||||
// Only operate on sockets without associated timeout.
|
||||
// Sockets with it should be handled by `accept` and `process_timer` methods.
|
||||
// They are already deregistered or need to be reregister in the future.
|
||||
.filter(|(_, info)| info.timeout.is_none())
|
||||
.for_each(|(token, info)| {
|
||||
if on {
|
||||
self.deregister_logged(info);
|
||||
} else {
|
||||
self.register_logged(token, info);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
fn accept_one(&mut self, sockets: &mut Slab<ServerSocketInfo>, mut conn: Conn) {
|
||||
if self.backpressure {
|
||||
// send_connection would remove fault worker from handles.
|
||||
// worst case here is conn get dropped after all handles are gone.
|
||||
while let Err(c) = self.send_connection(sockets, conn) {
|
||||
conn = c
|
||||
}
|
||||
} else {
|
||||
while self.avail.available() {
|
||||
let next = self.next();
|
||||
let idx = next.idx();
|
||||
if next.available() {
|
||||
self.avail.set_available(idx, true);
|
||||
match self.send_connection(sockets, conn) {
|
||||
Ok(_) => return,
|
||||
Err(c) => conn = c,
|
||||
}
|
||||
} else {
|
||||
self.avail.set_available(idx, false);
|
||||
self.set_next();
|
||||
}
|
||||
}
|
||||
|
||||
// Sending Conn failed due to either all workers are in error or not available.
|
||||
// Enter backpressure state and try again.
|
||||
self.maybe_backpressure(sockets, true);
|
||||
self.accept_one(sockets, conn);
|
||||
}
|
||||
}
|
||||
|
||||
// Send connection to worker and handle error.
|
||||
fn send_connection(
|
||||
&mut self,
|
||||
sockets: &mut Slab<ServerSocketInfo>,
|
||||
conn: Conn,
|
||||
) -> Result<(), Conn> {
|
||||
match self.next().send(conn) {
|
||||
fn send_connection(&mut self, conn: Conn) -> Result<(), Conn> {
|
||||
let next = self.next();
|
||||
match next.send(conn) {
|
||||
Ok(_) => {
|
||||
// Increment counter of WorkerHandle.
|
||||
// Set worker to unavailable with it hit max (Return false).
|
||||
if !next.inc_counter() {
|
||||
let idx = next.idx();
|
||||
self.avail.set_available(idx, false);
|
||||
}
|
||||
self.set_next();
|
||||
Ok(())
|
||||
}
|
||||
@@ -438,7 +351,6 @@ impl Accept {
|
||||
|
||||
if self.handles.is_empty() {
|
||||
error!("No workers");
|
||||
self.maybe_backpressure(sockets, true);
|
||||
// All workers are gone and Conn is nowhere to be sent.
|
||||
// Treat this situation as Ok and drop Conn.
|
||||
return Ok(());
|
||||
@@ -451,19 +363,38 @@ impl Accept {
|
||||
}
|
||||
}
|
||||
|
||||
fn accept(&mut self, sockets: &mut Slab<ServerSocketInfo>, token: usize) {
|
||||
fn accept_one(&mut self, mut conn: Conn) {
|
||||
loop {
|
||||
let info = sockets
|
||||
.get_mut(token)
|
||||
.expect("ServerSocketInfo is removed from Slab");
|
||||
let next = self.next();
|
||||
let idx = next.idx();
|
||||
|
||||
if self.avail.get_available(idx) {
|
||||
match self.send_connection(conn) {
|
||||
Ok(_) => return,
|
||||
Err(c) => conn = c,
|
||||
}
|
||||
} else {
|
||||
self.avail.set_available(idx, false);
|
||||
self.set_next();
|
||||
|
||||
if !self.avail.available() {
|
||||
while let Err(c) = self.send_connection(conn) {
|
||||
conn = c;
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn accept(&mut self, sockets: &mut [ServerSocketInfo], token: usize) {
|
||||
while self.avail.available() {
|
||||
let info = &mut sockets[token];
|
||||
|
||||
match info.lst.accept() {
|
||||
Ok(io) => {
|
||||
let msg = Conn {
|
||||
io,
|
||||
token: info.token,
|
||||
};
|
||||
self.accept_one(sockets, msg);
|
||||
let conn = Conn { io, token };
|
||||
self.accept_one(conn);
|
||||
}
|
||||
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => return,
|
||||
Err(ref e) if connection_error(e) => continue,
|
||||
@@ -477,13 +408,7 @@ impl Accept {
|
||||
// the poll would need it mark which socket and when it's
|
||||
// listener should be registered
|
||||
info.timeout = Some(Instant::now() + Duration::from_millis(500));
|
||||
|
||||
// after the sleep a Timer interest is sent to Accept Poll
|
||||
let waker = self.waker.clone();
|
||||
System::current().arbiter().spawn(async move {
|
||||
sleep(Duration::from_millis(510)).await;
|
||||
waker.wake(WakerInterest::Timer);
|
||||
});
|
||||
self.set_timeout(TIMEOUT_DURATION_ON_ERROR);
|
||||
|
||||
return;
|
||||
}
|
||||
@@ -491,11 +416,22 @@ impl Accept {
|
||||
}
|
||||
}
|
||||
|
||||
fn accept_all(&mut self, sockets: &mut [ServerSocketInfo]) {
|
||||
sockets
|
||||
.iter_mut()
|
||||
.map(|info| info.token)
|
||||
.collect::<Vec<_>>()
|
||||
.into_iter()
|
||||
.for_each(|idx| self.accept(sockets, idx))
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn next(&self) -> &WorkerHandleAccept {
|
||||
&self.handles[self.next]
|
||||
}
|
||||
|
||||
/// Set next worker handle that would accept connection.
|
||||
#[inline(always)]
|
||||
fn set_next(&mut self) {
|
||||
self.next = (self.next + 1) % self.handles.len();
|
||||
}
|
||||
@@ -511,67 +447,14 @@ impl Accept {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::Availability;
|
||||
|
||||
fn single(aval: &mut Availability, idx: usize) {
|
||||
aval.set_available(idx, true);
|
||||
assert!(aval.available());
|
||||
|
||||
aval.set_available(idx, true);
|
||||
|
||||
aval.set_available(idx, false);
|
||||
assert!(!aval.available());
|
||||
|
||||
aval.set_available(idx, false);
|
||||
assert!(!aval.available());
|
||||
}
|
||||
|
||||
fn multi(aval: &mut Availability, mut idx: Vec<usize>) {
|
||||
idx.iter().for_each(|idx| aval.set_available(*idx, true));
|
||||
|
||||
assert!(aval.available());
|
||||
|
||||
while let Some(idx) = idx.pop() {
|
||||
assert!(aval.available());
|
||||
aval.set_available(idx, false);
|
||||
}
|
||||
|
||||
assert!(!aval.available());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn availability() {
|
||||
let mut aval = Availability::default();
|
||||
|
||||
single(&mut aval, 1);
|
||||
single(&mut aval, 128);
|
||||
single(&mut aval, 256);
|
||||
single(&mut aval, 511);
|
||||
|
||||
let idx = (0..511).filter(|i| i % 3 == 0 && i % 5 == 0).collect();
|
||||
|
||||
multi(&mut aval, idx);
|
||||
|
||||
multi(&mut aval, (0..511).collect())
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn overflow() {
|
||||
let mut aval = Availability::default();
|
||||
single(&mut aval, 512);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn pin_point() {
|
||||
let mut aval = Availability::default();
|
||||
|
||||
aval.set_available(438, true);
|
||||
|
||||
aval.set_available(479, true);
|
||||
|
||||
assert_eq!(aval.0[3], 1 << (438 - 384) | 1 << (479 - 384));
|
||||
}
|
||||
/// This function defines errors that are per-connection; if we get this error from the `accept()`
|
||||
/// system call it means the next connection might be ready to be accepted.
|
||||
///
|
||||
/// All other errors will incur a timeout before next `accept()` call is attempted. The timeout is
|
||||
/// useful to handle resource exhaustion errors like `ENFILE` and `EMFILE`. Otherwise, it could
|
||||
/// enter into a temporary spin loop.
|
||||
fn connection_error(e: &io::Error) -> bool {
|
||||
e.kind() == io::ErrorKind::ConnectionRefused
|
||||
|| e.kind() == io::ErrorKind::ConnectionAborted
|
||||
|| e.kind() == io::ErrorKind::ConnectionReset
|
||||
}
|
||||
|
121
actix-server/src/availability.rs
Normal file
121
actix-server/src/availability.rs
Normal file
@@ -0,0 +1,121 @@
|
||||
use crate::worker::WorkerHandleAccept;
|
||||
|
||||
/// Array of u128 with every bit as marker for a worker handle's availability.
|
||||
#[derive(Debug, Default)]
|
||||
pub(crate) struct Availability([u128; 4]);
|
||||
|
||||
impl Availability {
|
||||
/// Check if any worker handle is available
|
||||
#[inline(always)]
|
||||
pub(crate) fn available(&self) -> bool {
|
||||
self.0.iter().any(|a| *a != 0)
|
||||
}
|
||||
|
||||
/// Check if worker handle is available by index
|
||||
#[inline(always)]
|
||||
pub(crate) fn get_available(&self, idx: usize) -> bool {
|
||||
let (offset, idx) = Self::offset(idx);
|
||||
|
||||
self.0[offset] & (1 << idx as u128) != 0
|
||||
}
|
||||
|
||||
/// Set worker handle available state by index.
|
||||
pub(crate) fn set_available(&mut self, idx: usize, avail: bool) {
|
||||
let (offset, idx) = Self::offset(idx);
|
||||
|
||||
let off = 1 << idx as u128;
|
||||
if avail {
|
||||
self.0[offset] |= off;
|
||||
} else {
|
||||
self.0[offset] &= !off
|
||||
}
|
||||
}
|
||||
|
||||
/// Set all worker handle to available state.
|
||||
/// This would result in a re-check on all workers' availability.
|
||||
pub(crate) fn set_available_all(&mut self, handles: &[WorkerHandleAccept]) {
|
||||
handles.iter().for_each(|handle| {
|
||||
self.set_available(handle.idx(), true);
|
||||
})
|
||||
}
|
||||
|
||||
/// Get offset and adjusted index of given worker handle index.
|
||||
pub(crate) fn offset(idx: usize) -> (usize, usize) {
|
||||
if idx < 128 {
|
||||
(0, idx)
|
||||
} else if idx < 128 * 2 {
|
||||
(1, idx - 128)
|
||||
} else if idx < 128 * 3 {
|
||||
(2, idx - 128 * 2)
|
||||
} else if idx < 128 * 4 {
|
||||
(3, idx - 128 * 3)
|
||||
} else {
|
||||
panic!("Max WorkerHandle count is 512")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn single(aval: &mut Availability, idx: usize) {
|
||||
aval.set_available(idx, true);
|
||||
assert!(aval.available());
|
||||
|
||||
aval.set_available(idx, true);
|
||||
|
||||
aval.set_available(idx, false);
|
||||
assert!(!aval.available());
|
||||
|
||||
aval.set_available(idx, false);
|
||||
assert!(!aval.available());
|
||||
}
|
||||
|
||||
fn multi(aval: &mut Availability, mut idx: Vec<usize>) {
|
||||
idx.iter().for_each(|idx| aval.set_available(*idx, true));
|
||||
|
||||
assert!(aval.available());
|
||||
|
||||
while let Some(idx) = idx.pop() {
|
||||
assert!(aval.available());
|
||||
aval.set_available(idx, false);
|
||||
}
|
||||
|
||||
assert!(!aval.available());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn availability() {
|
||||
let mut aval = Availability::default();
|
||||
|
||||
single(&mut aval, 1);
|
||||
single(&mut aval, 128);
|
||||
single(&mut aval, 256);
|
||||
single(&mut aval, 511);
|
||||
|
||||
let idx = (0..511).filter(|i| i % 3 == 0 && i % 5 == 0).collect();
|
||||
|
||||
multi(&mut aval, idx);
|
||||
|
||||
multi(&mut aval, (0..511).collect())
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn overflow() {
|
||||
let mut aval = Availability::default();
|
||||
single(&mut aval, 512);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn pin_point() {
|
||||
let mut aval = Availability::default();
|
||||
|
||||
aval.set_available(438, true);
|
||||
|
||||
aval.set_available(479, true);
|
||||
|
||||
assert_eq!(aval.0[3], 1 << (438 - 384) | 1 << (479 - 384));
|
||||
}
|
||||
}
|
@@ -1,45 +1,31 @@
|
||||
use std::{
|
||||
future::Future,
|
||||
io, mem,
|
||||
pin::Pin,
|
||||
task::{Context, Poll},
|
||||
time::Duration,
|
||||
use std::{io, time::Duration};
|
||||
|
||||
use actix_rt::net::TcpStream;
|
||||
use log::{info, trace};
|
||||
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender};
|
||||
|
||||
use crate::{
|
||||
server::ServerCommand,
|
||||
service::{InternalServiceFactory, ServiceFactory, StreamNewService},
|
||||
socket::{
|
||||
MioListener, MioTcpListener, MioTcpSocket, StdSocketAddr, StdTcpListener, ToSocketAddrs,
|
||||
},
|
||||
worker::ServerWorkerConfig,
|
||||
Server,
|
||||
};
|
||||
|
||||
use actix_rt::{self as rt, net::TcpStream, time::sleep, System};
|
||||
use log::{error, info};
|
||||
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver};
|
||||
use tokio::sync::oneshot;
|
||||
|
||||
use crate::accept::AcceptLoop;
|
||||
use crate::config::{ConfiguredService, ServiceConfig};
|
||||
use crate::server::{Server, ServerCommand};
|
||||
use crate::service::{InternalServiceFactory, ServiceFactory, StreamNewService};
|
||||
use crate::signals::{Signal, Signals};
|
||||
use crate::socket::{MioListener, StdSocketAddr, StdTcpListener, ToSocketAddrs};
|
||||
use crate::socket::{MioTcpListener, MioTcpSocket};
|
||||
use crate::waker_queue::{WakerInterest, WakerQueue};
|
||||
use crate::worker::{
|
||||
ServerWorker, ServerWorkerConfig, WorkerAvailability, WorkerHandleAccept,
|
||||
WorkerHandleServer,
|
||||
};
|
||||
use crate::{join_all, Token};
|
||||
|
||||
/// Server builder
|
||||
/// [Server] builder.
|
||||
pub struct ServerBuilder {
|
||||
threads: usize,
|
||||
token: Token,
|
||||
backlog: u32,
|
||||
handles: Vec<(usize, WorkerHandleServer)>,
|
||||
services: Vec<Box<dyn InternalServiceFactory>>,
|
||||
sockets: Vec<(Token, String, MioListener)>,
|
||||
accept: AcceptLoop,
|
||||
exit: bool,
|
||||
no_signals: bool,
|
||||
cmd: UnboundedReceiver<ServerCommand>,
|
||||
server: Server,
|
||||
notify: Vec<oneshot::Sender<()>>,
|
||||
worker_config: ServerWorkerConfig,
|
||||
pub(crate) threads: usize,
|
||||
pub(crate) token: usize,
|
||||
pub(crate) backlog: u32,
|
||||
pub(crate) factories: Vec<Box<dyn InternalServiceFactory>>,
|
||||
pub(crate) sockets: Vec<(usize, String, MioListener)>,
|
||||
pub(crate) exit: bool,
|
||||
pub(crate) listen_os_signals: bool,
|
||||
pub(crate) cmd_tx: UnboundedSender<ServerCommand>,
|
||||
pub(crate) cmd_rx: UnboundedReceiver<ServerCommand>,
|
||||
pub(crate) worker_config: ServerWorkerConfig,
|
||||
}
|
||||
|
||||
impl Default for ServerBuilder {
|
||||
@@ -51,30 +37,26 @@ impl Default for ServerBuilder {
|
||||
impl ServerBuilder {
|
||||
/// Create new Server builder instance
|
||||
pub fn new() -> ServerBuilder {
|
||||
let (tx, rx) = unbounded_channel();
|
||||
let server = Server::new(tx);
|
||||
let (cmd_tx, cmd_rx) = unbounded_channel();
|
||||
|
||||
ServerBuilder {
|
||||
threads: num_cpus::get(),
|
||||
token: Token::default(),
|
||||
handles: Vec::new(),
|
||||
services: Vec::new(),
|
||||
token: 0,
|
||||
factories: Vec::new(),
|
||||
sockets: Vec::new(),
|
||||
accept: AcceptLoop::new(server.clone()),
|
||||
backlog: 2048,
|
||||
exit: false,
|
||||
no_signals: false,
|
||||
cmd: rx,
|
||||
notify: Vec::new(),
|
||||
server,
|
||||
listen_os_signals: true,
|
||||
cmd_tx,
|
||||
cmd_rx,
|
||||
worker_config: ServerWorkerConfig::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Set number of workers to start.
|
||||
///
|
||||
/// By default server uses number of available logical cpu as workers
|
||||
/// count. Workers must be greater than 0.
|
||||
/// By default server uses number of available logical CPU as workers count. Workers must be
|
||||
/// greater than 0.
|
||||
pub fn workers(mut self, num: usize) -> Self {
|
||||
assert_ne!(num, 0, "workers must be greater than 0");
|
||||
self.threads = num;
|
||||
@@ -101,10 +83,9 @@ impl ServerBuilder {
|
||||
|
||||
/// Set the maximum number of pending connections.
|
||||
///
|
||||
/// This refers to the number of clients that can be waiting to be served.
|
||||
/// Exceeding this number results in the client getting an error when
|
||||
/// attempting to connect. It should only affect servers under significant
|
||||
/// load.
|
||||
/// This refers to the number of clients that can be waiting to be served. Exceeding this number
|
||||
/// results in the client getting an error when attempting to connect. It should only affect
|
||||
/// servers under significant load.
|
||||
///
|
||||
/// Generally set in the 64-2048 range. Default value is 2048.
|
||||
///
|
||||
@@ -116,24 +97,30 @@ impl ServerBuilder {
|
||||
|
||||
/// Sets the maximum per-worker number of concurrent connections.
|
||||
///
|
||||
/// All socket listeners will stop accepting connections when this limit is
|
||||
/// reached for each worker.
|
||||
/// All socket listeners will stop accepting connections when this limit is reached for
|
||||
/// each worker.
|
||||
///
|
||||
/// By default max connections is set to a 25k per worker.
|
||||
pub fn maxconn(mut self, num: usize) -> Self {
|
||||
pub fn max_concurrent_connections(mut self, num: usize) -> Self {
|
||||
self.worker_config.max_concurrent_connections(num);
|
||||
self
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
#[deprecated(since = "2.0.0", note = "Renamed to `max_concurrent_connections`.")]
|
||||
pub fn maxconn(self, num: usize) -> Self {
|
||||
self.max_concurrent_connections(num)
|
||||
}
|
||||
|
||||
/// Stop Actix system.
|
||||
pub fn system_exit(mut self) -> Self {
|
||||
self.exit = true;
|
||||
self
|
||||
}
|
||||
|
||||
/// Disable signal handling.
|
||||
/// Disable OS signal handling.
|
||||
pub fn disable_signals(mut self) -> Self {
|
||||
self.no_signals = true;
|
||||
self.listen_os_signals = false;
|
||||
self
|
||||
}
|
||||
|
||||
@@ -149,32 +136,6 @@ impl ServerBuilder {
|
||||
self
|
||||
}
|
||||
|
||||
/// Execute external configuration as part of the server building process.
|
||||
///
|
||||
/// This function is useful for moving parts of configuration to a different module or
|
||||
/// even library.
|
||||
pub fn configure<F>(mut self, f: F) -> io::Result<ServerBuilder>
|
||||
where
|
||||
F: Fn(&mut ServiceConfig) -> io::Result<()>,
|
||||
{
|
||||
let mut cfg = ServiceConfig::new(self.threads, self.backlog);
|
||||
|
||||
f(&mut cfg)?;
|
||||
|
||||
if let Some(apply) = cfg.apply {
|
||||
let mut srv = ConfiguredService::new(apply);
|
||||
for (name, lst) in cfg.services {
|
||||
let token = self.token.next();
|
||||
srv.stream(token, name.clone(), lst.local_addr()?);
|
||||
self.sockets.push((token, name, MioListener::Tcp(lst)));
|
||||
}
|
||||
self.services.push(Box::new(srv));
|
||||
}
|
||||
self.threads = cfg.threads;
|
||||
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Add new service to the server.
|
||||
pub fn bind<F, U, N: AsRef<str>>(mut self, name: N, addr: U, factory: F) -> io::Result<Self>
|
||||
where
|
||||
@@ -183,9 +144,11 @@ impl ServerBuilder {
|
||||
{
|
||||
let sockets = bind_addr(addr, self.backlog)?;
|
||||
|
||||
trace!("binding server to: {:?}", &sockets);
|
||||
|
||||
for lst in sockets {
|
||||
let token = self.token.next();
|
||||
self.services.push(StreamNewService::create(
|
||||
let token = self.next_token();
|
||||
self.factories.push(StreamNewService::create(
|
||||
name.as_ref().to_string(),
|
||||
token,
|
||||
factory.clone(),
|
||||
@@ -194,11 +157,57 @@ impl ServerBuilder {
|
||||
self.sockets
|
||||
.push((token, name.as_ref().to_string(), MioListener::Tcp(lst)));
|
||||
}
|
||||
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Add new service to the server.
|
||||
pub fn listen<F, N: AsRef<str>>(
|
||||
mut self,
|
||||
name: N,
|
||||
lst: StdTcpListener,
|
||||
factory: F,
|
||||
) -> io::Result<Self>
|
||||
where
|
||||
F: ServiceFactory<TcpStream>,
|
||||
{
|
||||
lst.set_nonblocking(true)?;
|
||||
let addr = lst.local_addr()?;
|
||||
|
||||
let token = self.next_token();
|
||||
self.factories.push(StreamNewService::create(
|
||||
name.as_ref().to_string(),
|
||||
token,
|
||||
factory,
|
||||
addr,
|
||||
));
|
||||
|
||||
self.sockets
|
||||
.push((token, name.as_ref().to_string(), MioListener::from(lst)));
|
||||
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Starts processing incoming connections and return server controller.
|
||||
pub fn run(self) -> Server {
|
||||
if self.sockets.is_empty() {
|
||||
panic!("Server should have at least one bound socket");
|
||||
} else {
|
||||
info!("Starting {} workers", self.threads);
|
||||
Server::new(self)
|
||||
}
|
||||
}
|
||||
|
||||
fn next_token(&mut self) -> usize {
|
||||
let token = self.token;
|
||||
self.token += 1;
|
||||
token
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
impl ServerBuilder {
|
||||
/// Add new unix domain service to the server.
|
||||
#[cfg(unix)]
|
||||
pub fn bind_uds<F, U, N>(self, name: N, addr: U, factory: F) -> io::Result<Self>
|
||||
where
|
||||
F: ServiceFactory<actix_rt::net::UnixStream>,
|
||||
@@ -219,9 +228,8 @@ impl ServerBuilder {
|
||||
}
|
||||
|
||||
/// Add new unix domain service to the server.
|
||||
/// Useful when running as a systemd service and
|
||||
/// a socket FD can be acquired using the systemd crate.
|
||||
#[cfg(unix)]
|
||||
///
|
||||
/// Useful when running as a systemd service and a socket FD is acquired externally.
|
||||
pub fn listen_uds<F, N: AsRef<str>>(
|
||||
mut self,
|
||||
name: N,
|
||||
@@ -233,9 +241,9 @@ impl ServerBuilder {
|
||||
{
|
||||
use std::net::{IpAddr, Ipv4Addr};
|
||||
lst.set_nonblocking(true)?;
|
||||
let token = self.token.next();
|
||||
let token = self.next_token();
|
||||
let addr = StdSocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
|
||||
self.services.push(StreamNewService::create(
|
||||
self.factories.push(StreamNewService::create(
|
||||
name.as_ref().to_string(),
|
||||
token,
|
||||
factory,
|
||||
@@ -245,211 +253,6 @@ impl ServerBuilder {
|
||||
.push((token, name.as_ref().to_string(), MioListener::from(lst)));
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Add new service to the server.
|
||||
pub fn listen<F, N: AsRef<str>>(
|
||||
mut self,
|
||||
name: N,
|
||||
lst: StdTcpListener,
|
||||
factory: F,
|
||||
) -> io::Result<Self>
|
||||
where
|
||||
F: ServiceFactory<TcpStream>,
|
||||
{
|
||||
lst.set_nonblocking(true)?;
|
||||
let addr = lst.local_addr()?;
|
||||
|
||||
let token = self.token.next();
|
||||
self.services.push(StreamNewService::create(
|
||||
name.as_ref().to_string(),
|
||||
token,
|
||||
factory,
|
||||
addr,
|
||||
));
|
||||
|
||||
self.sockets
|
||||
.push((token, name.as_ref().to_string(), MioListener::from(lst)));
|
||||
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Starts processing incoming connections and return server controller.
|
||||
pub fn run(mut self) -> Server {
|
||||
if self.sockets.is_empty() {
|
||||
panic!("Server should have at least one bound socket");
|
||||
} else {
|
||||
info!("Starting {} workers", self.threads);
|
||||
|
||||
// start workers
|
||||
let handles = (0..self.threads)
|
||||
.map(|idx| {
|
||||
let (handle_accept, handle_server) =
|
||||
self.start_worker(idx, self.accept.waker_owned());
|
||||
self.handles.push((idx, handle_server));
|
||||
|
||||
handle_accept
|
||||
})
|
||||
.collect();
|
||||
|
||||
// start accept thread
|
||||
for sock in &self.sockets {
|
||||
info!("Starting \"{}\" service on {}", sock.1, sock.2);
|
||||
}
|
||||
self.accept.start(
|
||||
mem::take(&mut self.sockets)
|
||||
.into_iter()
|
||||
.map(|t| (t.0, t.2))
|
||||
.collect(),
|
||||
handles,
|
||||
);
|
||||
|
||||
// handle signals
|
||||
if !self.no_signals {
|
||||
Signals::start(self.server.clone());
|
||||
}
|
||||
|
||||
// start http server actor
|
||||
let server = self.server.clone();
|
||||
rt::spawn(self);
|
||||
server
|
||||
}
|
||||
}
|
||||
|
||||
fn start_worker(
|
||||
&self,
|
||||
idx: usize,
|
||||
waker: WakerQueue,
|
||||
) -> (WorkerHandleAccept, WorkerHandleServer) {
|
||||
let avail = WorkerAvailability::new(idx, waker);
|
||||
let services = self.services.iter().map(|v| v.clone_factory()).collect();
|
||||
|
||||
ServerWorker::start(idx, services, avail, self.worker_config)
|
||||
}
|
||||
|
||||
fn handle_cmd(&mut self, item: ServerCommand) {
|
||||
match item {
|
||||
ServerCommand::Pause(tx) => {
|
||||
self.accept.wake(WakerInterest::Pause);
|
||||
let _ = tx.send(());
|
||||
}
|
||||
ServerCommand::Resume(tx) => {
|
||||
self.accept.wake(WakerInterest::Resume);
|
||||
let _ = tx.send(());
|
||||
}
|
||||
ServerCommand::Signal(sig) => {
|
||||
// Signals support
|
||||
// Handle `SIGINT`, `SIGTERM`, `SIGQUIT` signals and stop actix system
|
||||
match sig {
|
||||
Signal::Int => {
|
||||
info!("SIGINT received, exiting");
|
||||
self.exit = true;
|
||||
self.handle_cmd(ServerCommand::Stop {
|
||||
graceful: false,
|
||||
completion: None,
|
||||
})
|
||||
}
|
||||
Signal::Term => {
|
||||
info!("SIGTERM received, stopping");
|
||||
self.exit = true;
|
||||
self.handle_cmd(ServerCommand::Stop {
|
||||
graceful: true,
|
||||
completion: None,
|
||||
})
|
||||
}
|
||||
Signal::Quit => {
|
||||
info!("SIGQUIT received, exiting");
|
||||
self.exit = true;
|
||||
self.handle_cmd(ServerCommand::Stop {
|
||||
graceful: false,
|
||||
completion: None,
|
||||
})
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
ServerCommand::Notify(tx) => {
|
||||
self.notify.push(tx);
|
||||
}
|
||||
ServerCommand::Stop {
|
||||
graceful,
|
||||
completion,
|
||||
} => {
|
||||
let exit = self.exit;
|
||||
|
||||
// stop accept thread
|
||||
self.accept.wake(WakerInterest::Stop);
|
||||
let notify = std::mem::take(&mut self.notify);
|
||||
|
||||
// stop workers
|
||||
let stop = self
|
||||
.handles
|
||||
.iter()
|
||||
.map(move |worker| worker.1.stop(graceful))
|
||||
.collect();
|
||||
|
||||
rt::spawn(async move {
|
||||
if graceful {
|
||||
let _ = join_all(stop).await;
|
||||
}
|
||||
|
||||
if let Some(tx) = completion {
|
||||
let _ = tx.send(());
|
||||
}
|
||||
for tx in notify {
|
||||
let _ = tx.send(());
|
||||
}
|
||||
|
||||
if exit {
|
||||
sleep(Duration::from_millis(300)).await;
|
||||
System::current().stop();
|
||||
}
|
||||
});
|
||||
}
|
||||
ServerCommand::WorkerFaulted(idx) => {
|
||||
let mut found = false;
|
||||
for i in 0..self.handles.len() {
|
||||
if self.handles[i].0 == idx {
|
||||
self.handles.swap_remove(i);
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if found {
|
||||
error!("Worker has died {:?}, restarting", idx);
|
||||
|
||||
let mut new_idx = self.handles.len();
|
||||
'found: loop {
|
||||
for i in 0..self.handles.len() {
|
||||
if self.handles[i].0 == new_idx {
|
||||
new_idx += 1;
|
||||
continue 'found;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
let (handle_accept, handle_server) =
|
||||
self.start_worker(new_idx, self.accept.waker_owned());
|
||||
self.handles.push((new_idx, handle_server));
|
||||
self.accept.wake(WakerInterest::Worker(handle_accept));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Future for ServerBuilder {
|
||||
type Output = ();
|
||||
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
loop {
|
||||
match Pin::new(&mut self.cmd).poll_recv(cx) {
|
||||
Poll::Ready(Some(it)) => self.as_mut().get_mut().handle_cmd(it),
|
||||
_ => return Poll::Pending,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn bind_addr<S: ToSocketAddrs>(
|
||||
@@ -457,29 +260,27 @@ pub(super) fn bind_addr<S: ToSocketAddrs>(
|
||||
backlog: u32,
|
||||
) -> io::Result<Vec<MioTcpListener>> {
|
||||
let mut err = None;
|
||||
let mut succ = false;
|
||||
let mut success = false;
|
||||
let mut sockets = Vec::new();
|
||||
for addr in addr.to_socket_addrs()? {
|
||||
match create_tcp_listener(addr, backlog) {
|
||||
Ok(lst) => {
|
||||
succ = true;
|
||||
success = true;
|
||||
sockets.push(lst);
|
||||
}
|
||||
Err(e) => err = Some(e),
|
||||
}
|
||||
}
|
||||
|
||||
if !succ {
|
||||
if let Some(e) = err.take() {
|
||||
Err(e)
|
||||
} else {
|
||||
Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
"Can not bind to address.",
|
||||
))
|
||||
}
|
||||
} else {
|
||||
if success {
|
||||
Ok(sockets)
|
||||
} else if let Some(err) = err.take() {
|
||||
Err(err)
|
||||
} else {
|
||||
Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
"Can not bind to address.",
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -1,287 +0,0 @@
|
||||
use std::collections::HashMap;
|
||||
use std::future::Future;
|
||||
use std::{fmt, io};
|
||||
|
||||
use actix_rt::net::TcpStream;
|
||||
use actix_service::{
|
||||
fn_service, IntoServiceFactory as IntoBaseServiceFactory,
|
||||
ServiceFactory as BaseServiceFactory,
|
||||
};
|
||||
use actix_utils::{counter::CounterGuard, future::ready};
|
||||
use futures_core::future::LocalBoxFuture;
|
||||
use log::error;
|
||||
|
||||
use crate::builder::bind_addr;
|
||||
use crate::service::{BoxedServerService, InternalServiceFactory, StreamService};
|
||||
use crate::socket::{MioStream, MioTcpListener, StdSocketAddr, StdTcpListener, ToSocketAddrs};
|
||||
use crate::Token;
|
||||
|
||||
pub struct ServiceConfig {
|
||||
pub(crate) services: Vec<(String, MioTcpListener)>,
|
||||
pub(crate) apply: Option<Box<dyn ServiceRuntimeConfiguration>>,
|
||||
pub(crate) threads: usize,
|
||||
pub(crate) backlog: u32,
|
||||
}
|
||||
|
||||
impl ServiceConfig {
|
||||
pub(super) fn new(threads: usize, backlog: u32) -> ServiceConfig {
|
||||
ServiceConfig {
|
||||
threads,
|
||||
backlog,
|
||||
services: Vec::new(),
|
||||
apply: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set number of workers to start.
|
||||
///
|
||||
/// By default server uses number of available logical cpu as workers
|
||||
/// count.
|
||||
pub fn workers(&mut self, num: usize) {
|
||||
self.threads = num;
|
||||
}
|
||||
|
||||
/// Add new service to server
|
||||
pub fn bind<U, N: AsRef<str>>(&mut self, name: N, addr: U) -> io::Result<&mut Self>
|
||||
where
|
||||
U: ToSocketAddrs,
|
||||
{
|
||||
let sockets = bind_addr(addr, self.backlog)?;
|
||||
|
||||
for lst in sockets {
|
||||
self._listen(name.as_ref(), lst);
|
||||
}
|
||||
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Add new service to server
|
||||
pub fn listen<N: AsRef<str>>(&mut self, name: N, lst: StdTcpListener) -> &mut Self {
|
||||
self._listen(name, MioTcpListener::from_std(lst))
|
||||
}
|
||||
|
||||
/// Register service configuration function. This function get called
|
||||
/// during worker runtime configuration. It get executed in worker thread.
|
||||
pub fn apply<F>(&mut self, f: F) -> io::Result<()>
|
||||
where
|
||||
F: Fn(&mut ServiceRuntime) + Send + Clone + 'static,
|
||||
{
|
||||
self.apply = Some(Box::new(f));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn _listen<N: AsRef<str>>(&mut self, name: N, lst: MioTcpListener) -> &mut Self {
|
||||
if self.apply.is_none() {
|
||||
self.apply = Some(Box::new(not_configured));
|
||||
}
|
||||
self.services.push((name.as_ref().to_string(), lst));
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) struct ConfiguredService {
|
||||
rt: Box<dyn ServiceRuntimeConfiguration>,
|
||||
names: HashMap<Token, (String, StdSocketAddr)>,
|
||||
topics: HashMap<String, Token>,
|
||||
services: Vec<Token>,
|
||||
}
|
||||
|
||||
impl ConfiguredService {
|
||||
pub(super) fn new(rt: Box<dyn ServiceRuntimeConfiguration>) -> Self {
|
||||
ConfiguredService {
|
||||
rt,
|
||||
names: HashMap::new(),
|
||||
topics: HashMap::new(),
|
||||
services: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn stream(&mut self, token: Token, name: String, addr: StdSocketAddr) {
|
||||
self.names.insert(token, (name.clone(), addr));
|
||||
self.topics.insert(name, token);
|
||||
self.services.push(token);
|
||||
}
|
||||
}
|
||||
|
||||
impl InternalServiceFactory for ConfiguredService {
|
||||
fn name(&self, token: Token) -> &str {
|
||||
&self.names[&token].0
|
||||
}
|
||||
|
||||
fn clone_factory(&self) -> Box<dyn InternalServiceFactory> {
|
||||
Box::new(Self {
|
||||
rt: self.rt.clone(),
|
||||
names: self.names.clone(),
|
||||
topics: self.topics.clone(),
|
||||
services: self.services.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
fn create(&self) -> LocalBoxFuture<'static, Result<Vec<(Token, BoxedServerService)>, ()>> {
|
||||
// configure services
|
||||
let mut rt = ServiceRuntime::new(self.topics.clone());
|
||||
self.rt.configure(&mut rt);
|
||||
rt.validate();
|
||||
let mut names = self.names.clone();
|
||||
let tokens = self.services.clone();
|
||||
|
||||
// construct services
|
||||
Box::pin(async move {
|
||||
let mut services = rt.services;
|
||||
// TODO: Proper error handling here
|
||||
for f in rt.onstart.into_iter() {
|
||||
f.await;
|
||||
}
|
||||
let mut res = vec![];
|
||||
for token in tokens {
|
||||
if let Some(srv) = services.remove(&token) {
|
||||
let newserv = srv.new_service(());
|
||||
match newserv.await {
|
||||
Ok(serv) => {
|
||||
res.push((token, serv));
|
||||
}
|
||||
Err(_) => {
|
||||
error!("Can not construct service");
|
||||
return Err(());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let name = names.remove(&token).unwrap().0;
|
||||
res.push((
|
||||
token,
|
||||
Box::new(StreamService::new(fn_service(move |_: TcpStream| {
|
||||
error!("Service {:?} is not configured", name);
|
||||
ready::<Result<_, ()>>(Ok(()))
|
||||
}))),
|
||||
));
|
||||
};
|
||||
}
|
||||
Ok(res)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) trait ServiceRuntimeConfiguration: Send {
|
||||
fn clone(&self) -> Box<dyn ServiceRuntimeConfiguration>;
|
||||
|
||||
fn configure(&self, rt: &mut ServiceRuntime);
|
||||
}
|
||||
|
||||
impl<F> ServiceRuntimeConfiguration for F
|
||||
where
|
||||
F: Fn(&mut ServiceRuntime) + Send + Clone + 'static,
|
||||
{
|
||||
fn clone(&self) -> Box<dyn ServiceRuntimeConfiguration> {
|
||||
Box::new(self.clone())
|
||||
}
|
||||
|
||||
fn configure(&self, rt: &mut ServiceRuntime) {
|
||||
(self)(rt)
|
||||
}
|
||||
}
|
||||
|
||||
fn not_configured(_: &mut ServiceRuntime) {
|
||||
error!("Service is not configured");
|
||||
}
|
||||
|
||||
pub struct ServiceRuntime {
|
||||
names: HashMap<String, Token>,
|
||||
services: HashMap<Token, BoxedNewService>,
|
||||
onstart: Vec<LocalBoxFuture<'static, ()>>,
|
||||
}
|
||||
|
||||
impl ServiceRuntime {
|
||||
fn new(names: HashMap<String, Token>) -> Self {
|
||||
ServiceRuntime {
|
||||
names,
|
||||
services: HashMap::new(),
|
||||
onstart: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn validate(&self) {
|
||||
for (name, token) in &self.names {
|
||||
if !self.services.contains_key(&token) {
|
||||
error!("Service {:?} is not configured", name);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Register service.
|
||||
///
|
||||
/// Name of the service must be registered during configuration stage with
|
||||
/// *ServiceConfig::bind()* or *ServiceConfig::listen()* methods.
|
||||
pub fn service<T, F>(&mut self, name: &str, service: F)
|
||||
where
|
||||
F: IntoBaseServiceFactory<T, TcpStream>,
|
||||
T: BaseServiceFactory<TcpStream, Config = ()> + 'static,
|
||||
T::Future: 'static,
|
||||
T::Service: 'static,
|
||||
T::InitError: fmt::Debug,
|
||||
{
|
||||
// let name = name.to_owned();
|
||||
if let Some(token) = self.names.get(name) {
|
||||
self.services.insert(
|
||||
*token,
|
||||
Box::new(ServiceFactory {
|
||||
inner: service.into_factory(),
|
||||
}),
|
||||
);
|
||||
} else {
|
||||
panic!("Unknown service: {:?}", name);
|
||||
}
|
||||
}
|
||||
|
||||
/// Execute future before services initialization.
|
||||
pub fn on_start<F>(&mut self, fut: F)
|
||||
where
|
||||
F: Future<Output = ()> + 'static,
|
||||
{
|
||||
self.onstart.push(Box::pin(fut))
|
||||
}
|
||||
}
|
||||
|
||||
type BoxedNewService = Box<
|
||||
dyn BaseServiceFactory<
|
||||
(CounterGuard, MioStream),
|
||||
Response = (),
|
||||
Error = (),
|
||||
InitError = (),
|
||||
Config = (),
|
||||
Service = BoxedServerService,
|
||||
Future = LocalBoxFuture<'static, Result<BoxedServerService, ()>>,
|
||||
>,
|
||||
>;
|
||||
|
||||
struct ServiceFactory<T> {
|
||||
inner: T,
|
||||
}
|
||||
|
||||
impl<T> BaseServiceFactory<(CounterGuard, MioStream)> for ServiceFactory<T>
|
||||
where
|
||||
T: BaseServiceFactory<TcpStream, Config = ()>,
|
||||
T::Future: 'static,
|
||||
T::Service: 'static,
|
||||
T::Error: 'static,
|
||||
T::InitError: fmt::Debug + 'static,
|
||||
{
|
||||
type Response = ();
|
||||
type Error = ();
|
||||
type Config = ();
|
||||
type Service = BoxedServerService;
|
||||
type InitError = ();
|
||||
type Future = LocalBoxFuture<'static, Result<BoxedServerService, ()>>;
|
||||
|
||||
fn new_service(&self, _: ()) -> Self::Future {
|
||||
let fut = self.inner.new_service(());
|
||||
Box::pin(async move {
|
||||
match fut.await {
|
||||
Ok(s) => Ok(Box::new(StreamService::new(s)) as BoxedServerService),
|
||||
Err(e) => {
|
||||
error!("Can not construct service: {:?}", e);
|
||||
Err(())
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
53
actix-server/src/handle.rs
Normal file
53
actix-server/src/handle.rs
Normal file
@@ -0,0 +1,53 @@
|
||||
use std::future::Future;
|
||||
|
||||
use tokio::sync::{mpsc::UnboundedSender, oneshot};
|
||||
|
||||
use crate::server::ServerCommand;
|
||||
|
||||
/// Server handle.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ServerHandle {
|
||||
cmd_tx: UnboundedSender<ServerCommand>,
|
||||
}
|
||||
|
||||
impl ServerHandle {
|
||||
pub(crate) fn new(cmd_tx: UnboundedSender<ServerCommand>) -> Self {
|
||||
ServerHandle { cmd_tx }
|
||||
}
|
||||
|
||||
pub(crate) fn worker_faulted(&self, idx: usize) {
|
||||
let _ = self.cmd_tx.send(ServerCommand::WorkerFaulted(idx));
|
||||
}
|
||||
|
||||
/// Pause accepting incoming connections.
|
||||
///
|
||||
/// May drop socket pending connection. All open connections remain active.
|
||||
pub fn pause(&self) -> impl Future<Output = ()> {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
let _ = self.cmd_tx.send(ServerCommand::Pause(tx));
|
||||
async {
|
||||
let _ = rx.await;
|
||||
}
|
||||
}
|
||||
|
||||
/// Resume accepting incoming connections.
|
||||
pub fn resume(&self) -> impl Future<Output = ()> {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
let _ = self.cmd_tx.send(ServerCommand::Resume(tx));
|
||||
async {
|
||||
let _ = rx.await;
|
||||
}
|
||||
}
|
||||
|
||||
/// Stop incoming connection processing, stop all workers and exit.
|
||||
pub fn stop(&self, graceful: bool) -> impl Future<Output = ()> {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
let _ = self.cmd_tx.send(ServerCommand::Stop {
|
||||
graceful,
|
||||
completion: Some(tx),
|
||||
});
|
||||
async {
|
||||
let _ = rx.await;
|
||||
}
|
||||
}
|
||||
}
|
144
actix-server/src/join_all.rs
Normal file
144
actix-server/src/join_all.rs
Normal file
@@ -0,0 +1,144 @@
|
||||
use std::{
|
||||
future::Future,
|
||||
pin::Pin,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use futures_core::future::{BoxFuture, LocalBoxFuture};
|
||||
|
||||
// a poor man's join future. joined future is only used when starting/stopping the server.
|
||||
// pin_project and pinned futures are overkill for this task.
|
||||
pub(crate) struct JoinAll<T> {
|
||||
fut: Vec<JoinFuture<T>>,
|
||||
}
|
||||
|
||||
pub(crate) fn join_all<T>(fut: Vec<impl Future<Output = T> + Send + 'static>) -> JoinAll<T> {
|
||||
let fut = fut
|
||||
.into_iter()
|
||||
.map(|f| JoinFuture::Future(Box::pin(f)))
|
||||
.collect();
|
||||
|
||||
JoinAll { fut }
|
||||
}
|
||||
|
||||
enum JoinFuture<T> {
|
||||
Future(BoxFuture<'static, T>),
|
||||
Result(Option<T>),
|
||||
}
|
||||
|
||||
impl<T> Unpin for JoinAll<T> {}
|
||||
|
||||
impl<T> Future for JoinAll<T> {
|
||||
type Output = Vec<T>;
|
||||
|
||||
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
let mut ready = true;
|
||||
|
||||
let this = self.get_mut();
|
||||
for fut in this.fut.iter_mut() {
|
||||
if let JoinFuture::Future(f) = fut {
|
||||
match f.as_mut().poll(cx) {
|
||||
Poll::Ready(t) => {
|
||||
*fut = JoinFuture::Result(Some(t));
|
||||
}
|
||||
Poll::Pending => ready = false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ready {
|
||||
let mut res = Vec::new();
|
||||
for fut in this.fut.iter_mut() {
|
||||
if let JoinFuture::Result(f) = fut {
|
||||
res.push(f.take().unwrap());
|
||||
}
|
||||
}
|
||||
|
||||
Poll::Ready(res)
|
||||
} else {
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn join_all_local<T>(
|
||||
fut: Vec<impl Future<Output = T> + 'static>,
|
||||
) -> JoinAllLocal<T> {
|
||||
let fut = fut
|
||||
.into_iter()
|
||||
.map(|f| JoinLocalFuture::LocalFuture(Box::pin(f)))
|
||||
.collect();
|
||||
|
||||
JoinAllLocal { fut }
|
||||
}
|
||||
|
||||
// a poor man's join future. joined future is only used when starting/stopping the server.
|
||||
// pin_project and pinned futures are overkill for this task.
|
||||
pub(crate) struct JoinAllLocal<T> {
|
||||
fut: Vec<JoinLocalFuture<T>>,
|
||||
}
|
||||
|
||||
enum JoinLocalFuture<T> {
|
||||
LocalFuture(LocalBoxFuture<'static, T>),
|
||||
Result(Option<T>),
|
||||
}
|
||||
|
||||
impl<T> Unpin for JoinAllLocal<T> {}
|
||||
|
||||
impl<T> Future for JoinAllLocal<T> {
|
||||
type Output = Vec<T>;
|
||||
|
||||
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
let mut ready = true;
|
||||
|
||||
let this = self.get_mut();
|
||||
for fut in this.fut.iter_mut() {
|
||||
if let JoinLocalFuture::LocalFuture(f) = fut {
|
||||
match f.as_mut().poll(cx) {
|
||||
Poll::Ready(t) => {
|
||||
*fut = JoinLocalFuture::Result(Some(t));
|
||||
}
|
||||
Poll::Pending => ready = false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ready {
|
||||
let mut res = Vec::new();
|
||||
for fut in this.fut.iter_mut() {
|
||||
if let JoinLocalFuture::Result(f) = fut {
|
||||
res.push(f.take().unwrap());
|
||||
}
|
||||
}
|
||||
|
||||
Poll::Ready(res)
|
||||
} else {
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
use actix_utils::future::ready;
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_join_all() {
|
||||
let futs = vec![ready(Ok(1)), ready(Err(3)), ready(Ok(9))];
|
||||
let mut res = join_all(futs).await.into_iter();
|
||||
assert_eq!(Ok(1), res.next().unwrap());
|
||||
assert_eq!(Err(3), res.next().unwrap());
|
||||
assert_eq!(Ok(9), res.next().unwrap());
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_join_all_local() {
|
||||
let futs = vec![ready(Ok(1)), ready(Err(3)), ready(Ok(9))];
|
||||
let mut res = join_all_local(futs).await.into_iter();
|
||||
assert_eq!(Ok(1), res.next().unwrap());
|
||||
assert_eq!(Err(3), res.next().unwrap());
|
||||
assert_eq!(Ok(9), res.next().unwrap());
|
||||
}
|
||||
}
|
@@ -5,8 +5,10 @@
|
||||
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
|
||||
|
||||
mod accept;
|
||||
mod availability;
|
||||
mod builder;
|
||||
mod config;
|
||||
mod handle;
|
||||
mod join_all;
|
||||
mod server;
|
||||
mod service;
|
||||
mod signals;
|
||||
@@ -16,7 +18,7 @@ mod waker_queue;
|
||||
mod worker;
|
||||
|
||||
pub use self::builder::ServerBuilder;
|
||||
pub use self::config::{ServiceConfig, ServiceRuntime};
|
||||
pub use self::handle::ServerHandle;
|
||||
pub use self::server::Server;
|
||||
pub use self::service::ServiceFactory;
|
||||
pub use self::test_server::TestServer;
|
||||
@@ -24,104 +26,9 @@ pub use self::test_server::TestServer;
|
||||
#[doc(hidden)]
|
||||
pub use self::socket::FromStream;
|
||||
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
/// Socket ID token
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
|
||||
pub(crate) struct Token(usize);
|
||||
|
||||
impl Default for Token {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl Token {
|
||||
fn new() -> Self {
|
||||
Self(0)
|
||||
}
|
||||
|
||||
pub(crate) fn next(&mut self) -> Token {
|
||||
let token = Token(self.0);
|
||||
self.0 += 1;
|
||||
token
|
||||
}
|
||||
}
|
||||
|
||||
/// Start server building process
|
||||
#[doc(hidden)]
|
||||
#[deprecated(since = "2.0.0", note = "Use `Server::build()`.")]
|
||||
pub fn new() -> ServerBuilder {
|
||||
ServerBuilder::default()
|
||||
}
|
||||
|
||||
// a poor man's join future. joined future is only used when starting/stopping the server.
|
||||
// pin_project and pinned futures are overkill for this task.
|
||||
pub(crate) struct JoinAll<T> {
|
||||
fut: Vec<JoinFuture<T>>,
|
||||
}
|
||||
|
||||
pub(crate) fn join_all<T>(fut: Vec<impl Future<Output = T> + 'static>) -> JoinAll<T> {
|
||||
let fut = fut
|
||||
.into_iter()
|
||||
.map(|f| JoinFuture::Future(Box::pin(f)))
|
||||
.collect();
|
||||
|
||||
JoinAll { fut }
|
||||
}
|
||||
|
||||
enum JoinFuture<T> {
|
||||
Future(Pin<Box<dyn Future<Output = T>>>),
|
||||
Result(Option<T>),
|
||||
}
|
||||
|
||||
impl<T> Unpin for JoinAll<T> {}
|
||||
|
||||
impl<T> Future for JoinAll<T> {
|
||||
type Output = Vec<T>;
|
||||
|
||||
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
let mut ready = true;
|
||||
|
||||
let this = self.get_mut();
|
||||
for fut in this.fut.iter_mut() {
|
||||
if let JoinFuture::Future(f) = fut {
|
||||
match f.as_mut().poll(cx) {
|
||||
Poll::Ready(t) => {
|
||||
*fut = JoinFuture::Result(Some(t));
|
||||
}
|
||||
Poll::Pending => ready = false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ready {
|
||||
let mut res = Vec::new();
|
||||
for fut in this.fut.iter_mut() {
|
||||
if let JoinFuture::Result(f) = fut {
|
||||
res.push(f.take().unwrap());
|
||||
}
|
||||
}
|
||||
|
||||
Poll::Ready(res)
|
||||
} else {
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
use actix_utils::future::ready;
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_join_all() {
|
||||
let futs = vec![ready(Ok(1)), ready(Err(3)), ready(Ok(9))];
|
||||
let mut res = join_all(futs).await.into_iter();
|
||||
assert_eq!(Ok(1), res.next().unwrap());
|
||||
assert_eq!(Err(3), res.next().unwrap());
|
||||
assert_eq!(Ok(9), res.next().unwrap());
|
||||
}
|
||||
}
|
||||
|
@@ -1,112 +1,359 @@
|
||||
use std::future::Future;
|
||||
use std::io;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
use std::{
|
||||
future::Future,
|
||||
io, mem,
|
||||
pin::Pin,
|
||||
task::{Context, Poll},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use tokio::sync::mpsc::UnboundedSender;
|
||||
use tokio::sync::oneshot;
|
||||
use actix_rt::{time::sleep, System};
|
||||
use futures_core::future::BoxFuture;
|
||||
use log::{error, info};
|
||||
use tokio::sync::{
|
||||
mpsc::{UnboundedReceiver, UnboundedSender},
|
||||
oneshot,
|
||||
};
|
||||
|
||||
use crate::builder::ServerBuilder;
|
||||
use crate::signals::Signal;
|
||||
use crate::{
|
||||
accept::Accept,
|
||||
builder::ServerBuilder,
|
||||
join_all::join_all,
|
||||
service::InternalServiceFactory,
|
||||
signals::{Signal, Signals},
|
||||
waker_queue::{WakerInterest, WakerQueue},
|
||||
worker::{ServerWorker, ServerWorkerConfig, WorkerHandleServer},
|
||||
ServerHandle,
|
||||
};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) enum ServerCommand {
|
||||
/// TODO
|
||||
WorkerFaulted(usize),
|
||||
|
||||
/// Contains return channel to notify caller of successful state change.
|
||||
Pause(oneshot::Sender<()>),
|
||||
|
||||
/// Contains return channel to notify caller of successful state change.
|
||||
Resume(oneshot::Sender<()>),
|
||||
Signal(Signal),
|
||||
/// Whether to try and shut down gracefully
|
||||
|
||||
/// TODO
|
||||
Stop {
|
||||
/// True if shut down should be graceful.
|
||||
graceful: bool,
|
||||
|
||||
/// Return channel to notify caller that shutdown is complete.
|
||||
completion: Option<oneshot::Sender<()>>,
|
||||
},
|
||||
/// Notify of server stop
|
||||
Notify(oneshot::Sender<()>),
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Server(
|
||||
UnboundedSender<ServerCommand>,
|
||||
Option<oneshot::Receiver<()>>,
|
||||
);
|
||||
/// General purpose TCP server that runs services receiving Tokio `TcpStream`s.
|
||||
///
|
||||
/// Handles creating worker threads, restarting faulted workers, connection accepting, and
|
||||
/// back-pressure logic.
|
||||
///
|
||||
/// Creates a worker per CPU core (or the number specified in [`ServerBuilder::workers`]) and
|
||||
/// distributes connections with a round-robin strategy.
|
||||
///
|
||||
/// The [Server] must be awaited to process stop commands and listen for OS signals. It will resolve
|
||||
/// when the server has fully shut down.
|
||||
///
|
||||
/// # Shutdown Signals
|
||||
/// On UNIX systems, `SIGQUIT` will start a graceful shutdown and `SIGTERM` or `SIGINT` will start a
|
||||
/// forced shutdown. On Windows, a Ctrl-C signal will start a forced shutdown.
|
||||
///
|
||||
/// A graceful shutdown will wait for all workers to stop first.
|
||||
///
|
||||
/// # Examples
|
||||
/// The following is a TCP echo server. Test using `telnet 127.0.0.1 8080`.
|
||||
///
|
||||
/// ```no_run
|
||||
/// use std::io;
|
||||
///
|
||||
/// use actix_rt::net::TcpStream;
|
||||
/// use actix_server::Server;
|
||||
/// use actix_service::{fn_service, ServiceFactoryExt as _};
|
||||
/// use bytes::BytesMut;
|
||||
/// use tokio::io::{AsyncReadExt as _, AsyncWriteExt as _};
|
||||
///
|
||||
/// #[actix_rt::main]
|
||||
/// async fn main() -> io::Result<()> {
|
||||
/// let bind_addr = ("127.0.0.1", 8080);
|
||||
///
|
||||
/// Server::build()
|
||||
/// .bind("echo", bind_addr, move || {
|
||||
/// fn_service(move |mut stream: TcpStream| {
|
||||
/// async move {
|
||||
/// let mut size = 0;
|
||||
/// let mut buf = BytesMut::new();
|
||||
///
|
||||
/// loop {
|
||||
/// match stream.read_buf(&mut buf).await {
|
||||
/// // end of stream; bail from loop
|
||||
/// Ok(0) => break,
|
||||
///
|
||||
/// // write bytes back to stream
|
||||
/// Ok(bytes_read) => {
|
||||
/// stream.write_all(&buf[size..]).await.unwrap();
|
||||
/// size += bytes_read;
|
||||
/// }
|
||||
///
|
||||
/// Err(err) => {
|
||||
/// eprintln!("Stream Error: {:?}", err);
|
||||
/// return Err(());
|
||||
/// }
|
||||
/// }
|
||||
/// }
|
||||
///
|
||||
/// Ok(())
|
||||
/// }
|
||||
/// })
|
||||
/// .map_err(|err| eprintln!("Service Error: {:?}", err))
|
||||
/// })?
|
||||
/// .run()
|
||||
/// .await
|
||||
/// }
|
||||
/// ```
|
||||
#[must_use = "futures do nothing unless you `.await` or poll them"]
|
||||
pub enum Server {
|
||||
Server(ServerInner),
|
||||
Error(Option<io::Error>),
|
||||
}
|
||||
|
||||
impl Server {
|
||||
pub(crate) fn new(tx: UnboundedSender<ServerCommand>) -> Self {
|
||||
Server(tx, None)
|
||||
}
|
||||
|
||||
/// Start server building process
|
||||
/// Create server build.
|
||||
pub fn build() -> ServerBuilder {
|
||||
ServerBuilder::default()
|
||||
}
|
||||
|
||||
pub(crate) fn signal(&self, sig: Signal) {
|
||||
let _ = self.0.send(ServerCommand::Signal(sig));
|
||||
pub(crate) fn new(mut builder: ServerBuilder) -> Self {
|
||||
let sockets = mem::take(&mut builder.sockets)
|
||||
.into_iter()
|
||||
.map(|t| (t.0, t.2))
|
||||
.collect();
|
||||
|
||||
// Give log information on what runtime will be used.
|
||||
let is_tokio = tokio::runtime::Handle::try_current().is_ok();
|
||||
let is_actix = actix_rt::System::try_current().is_some();
|
||||
|
||||
match (is_tokio, is_actix) {
|
||||
(true, false) => info!("Tokio runtime found. Starting in existing Tokio runtime"),
|
||||
(_, true) => info!("Actix runtime found. Starting in Actix runtime"),
|
||||
(_, _) => info!(
|
||||
"Actix/Tokio runtime not found. Starting in newt Tokio current-thread runtime"
|
||||
),
|
||||
}
|
||||
|
||||
for (_, name, lst) in &builder.sockets {
|
||||
info!(
|
||||
r#"Starting service: "{}", workers: {}, listening on: {}"#,
|
||||
name,
|
||||
builder.threads,
|
||||
lst.local_addr()
|
||||
);
|
||||
}
|
||||
|
||||
match Accept::start(sockets, &builder) {
|
||||
Ok((waker_queue, worker_handles)) => {
|
||||
// construct OS signals listener future
|
||||
let signals = (builder.listen_os_signals).then(Signals::new);
|
||||
|
||||
Self::Server(ServerInner {
|
||||
cmd_tx: builder.cmd_tx.clone(),
|
||||
cmd_rx: builder.cmd_rx,
|
||||
signals,
|
||||
waker_queue,
|
||||
worker_handles,
|
||||
worker_config: builder.worker_config,
|
||||
services: builder.factories,
|
||||
exit: builder.exit,
|
||||
stop_task: None,
|
||||
})
|
||||
}
|
||||
|
||||
Err(err) => Self::Error(Some(err)),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn worker_faulted(&self, idx: usize) {
|
||||
let _ = self.0.send(ServerCommand::WorkerFaulted(idx));
|
||||
}
|
||||
|
||||
/// Pause accepting incoming connections
|
||||
/// Get a handle for ServerFuture that can be used to change state of actix server.
|
||||
///
|
||||
/// If socket contains some pending connection, they might be dropped.
|
||||
/// All opened connection remains active.
|
||||
pub fn pause(&self) -> impl Future<Output = ()> {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
let _ = self.0.send(ServerCommand::Pause(tx));
|
||||
async {
|
||||
let _ = rx.await;
|
||||
/// See [ServerHandle](ServerHandle) for usage.
|
||||
pub fn handle(&self) -> ServerHandle {
|
||||
match self {
|
||||
Server::Server(inner) => ServerHandle::new(inner.cmd_tx.clone()),
|
||||
Server::Error(err) => {
|
||||
// TODO: i don't think this is the best way to handle server startup fail
|
||||
panic!(
|
||||
"server handle can not be obtained because server failed to start up: {}",
|
||||
err.as_ref().unwrap()
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Resume accepting incoming connections
|
||||
pub fn resume(&self) -> impl Future<Output = ()> {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
let _ = self.0.send(ServerCommand::Resume(tx));
|
||||
async {
|
||||
let _ = rx.await;
|
||||
}
|
||||
}
|
||||
|
||||
/// Stop incoming connection processing, stop all workers and exit.
|
||||
///
|
||||
/// If server starts with `spawn()` method, then spawned thread get terminated.
|
||||
pub fn stop(&self, graceful: bool) -> impl Future<Output = ()> {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
let _ = self.0.send(ServerCommand::Stop {
|
||||
graceful,
|
||||
completion: Some(tx),
|
||||
});
|
||||
async {
|
||||
let _ = rx.await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for Server {
|
||||
fn clone(&self) -> Self {
|
||||
Self(self.0.clone(), None)
|
||||
}
|
||||
}
|
||||
|
||||
impl Future for Server {
|
||||
type Output = io::Result<()>;
|
||||
|
||||
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
let this = self.get_mut();
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
match self.as_mut().get_mut() {
|
||||
Server::Error(err) => Poll::Ready(Err(err
|
||||
.take()
|
||||
.expect("Server future cannot be polled after error"))),
|
||||
|
||||
if this.1.is_none() {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
if this.0.send(ServerCommand::Notify(tx)).is_err() {
|
||||
return Poll::Ready(Ok(()));
|
||||
Server::Server(inner) => {
|
||||
// poll Signals
|
||||
if let Some(ref mut signals) = inner.signals {
|
||||
if let Poll::Ready(signal) = Pin::new(signals).poll(cx) {
|
||||
inner.stop_task = inner.handle_signal(signal);
|
||||
// drop signals listener
|
||||
inner.signals = None;
|
||||
}
|
||||
}
|
||||
|
||||
// handle stop tasks and eager drain command channel
|
||||
loop {
|
||||
if let Some(ref mut fut) = inner.stop_task {
|
||||
// only resolve stop task and exit
|
||||
return fut.as_mut().poll(cx).map(|_| Ok(()));
|
||||
}
|
||||
|
||||
match Pin::new(&mut inner.cmd_rx).poll_recv(cx) {
|
||||
Poll::Ready(Some(cmd)) => {
|
||||
// if stop task is required, set it and loop
|
||||
inner.stop_task = inner.handle_cmd(cmd);
|
||||
}
|
||||
_ => return Poll::Pending,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ServerInner {
|
||||
worker_handles: Vec<WorkerHandleServer>,
|
||||
worker_config: ServerWorkerConfig,
|
||||
services: Vec<Box<dyn InternalServiceFactory>>,
|
||||
exit: bool,
|
||||
cmd_tx: UnboundedSender<ServerCommand>,
|
||||
cmd_rx: UnboundedReceiver<ServerCommand>,
|
||||
signals: Option<Signals>,
|
||||
waker_queue: WakerQueue,
|
||||
stop_task: Option<BoxFuture<'static, ()>>,
|
||||
}
|
||||
|
||||
impl ServerInner {
|
||||
fn handle_cmd(&mut self, item: ServerCommand) -> Option<BoxFuture<'static, ()>> {
|
||||
match item {
|
||||
ServerCommand::Pause(tx) => {
|
||||
self.waker_queue.wake(WakerInterest::Pause);
|
||||
let _ = tx.send(());
|
||||
None
|
||||
}
|
||||
|
||||
ServerCommand::Resume(tx) => {
|
||||
self.waker_queue.wake(WakerInterest::Resume);
|
||||
let _ = tx.send(());
|
||||
None
|
||||
}
|
||||
|
||||
ServerCommand::Stop {
|
||||
graceful,
|
||||
completion,
|
||||
} => {
|
||||
let exit = self.exit;
|
||||
|
||||
// stop accept thread
|
||||
self.waker_queue.wake(WakerInterest::Stop);
|
||||
|
||||
// stop workers
|
||||
let workers_stop = self
|
||||
.worker_handles
|
||||
.iter()
|
||||
.map(|worker| worker.stop(graceful))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Some(Box::pin(async move {
|
||||
if graceful {
|
||||
// wait for all workers to shut down
|
||||
let _ = join_all(workers_stop).await;
|
||||
}
|
||||
|
||||
if let Some(tx) = completion {
|
||||
let _ = tx.send(());
|
||||
}
|
||||
|
||||
if exit {
|
||||
sleep(Duration::from_millis(300)).await;
|
||||
System::try_current().as_ref().map(System::stop);
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
ServerCommand::WorkerFaulted(idx) => {
|
||||
// TODO: maybe just return with warning log if not found ?
|
||||
assert!(self.worker_handles.iter().any(|wrk| wrk.idx == idx));
|
||||
|
||||
error!("Worker {} has died; restarting", idx);
|
||||
|
||||
let factories = self
|
||||
.services
|
||||
.iter()
|
||||
.map(|service| service.clone_factory())
|
||||
.collect();
|
||||
|
||||
match ServerWorker::start(
|
||||
idx,
|
||||
factories,
|
||||
self.waker_queue.clone(),
|
||||
self.worker_config,
|
||||
) {
|
||||
Ok((handle_accept, handle_server)) => {
|
||||
*self
|
||||
.worker_handles
|
||||
.iter_mut()
|
||||
.find(|wrk| wrk.idx == idx)
|
||||
.unwrap() = handle_server;
|
||||
|
||||
self.waker_queue.wake(WakerInterest::Worker(handle_accept));
|
||||
}
|
||||
|
||||
Err(err) => error!("can not restart worker {}: {}", idx, err),
|
||||
};
|
||||
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_signal(&mut self, signal: Signal) -> Option<BoxFuture<'static, ()>> {
|
||||
match signal {
|
||||
Signal::Int => {
|
||||
info!("SIGINT received; starting forced shutdown");
|
||||
self.exit = true;
|
||||
self.handle_cmd(ServerCommand::Stop {
|
||||
graceful: false,
|
||||
completion: None,
|
||||
})
|
||||
}
|
||||
|
||||
Signal::Term => {
|
||||
info!("SIGTERM received; starting graceful shutdown");
|
||||
self.exit = true;
|
||||
self.handle_cmd(ServerCommand::Stop {
|
||||
graceful: true,
|
||||
completion: None,
|
||||
})
|
||||
}
|
||||
|
||||
Signal::Quit => {
|
||||
info!("SIGQUIT received; starting forced shutdown");
|
||||
self.exit = true;
|
||||
self.handle_cmd(ServerCommand::Stop {
|
||||
graceful: false,
|
||||
completion: None,
|
||||
})
|
||||
}
|
||||
this.1 = Some(rx);
|
||||
}
|
||||
|
||||
match Pin::new(this.1.as_mut().unwrap()).poll(cx) {
|
||||
Poll::Pending => Poll::Pending,
|
||||
Poll::Ready(_) => Poll::Ready(Ok(())),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -3,15 +3,12 @@ use std::net::SocketAddr;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use actix_service::{Service, ServiceFactory as BaseServiceFactory};
|
||||
use actix_utils::{
|
||||
counter::CounterGuard,
|
||||
future::{ready, Ready},
|
||||
};
|
||||
use actix_utils::future::{ready, Ready};
|
||||
use futures_core::future::LocalBoxFuture;
|
||||
use log::error;
|
||||
|
||||
use crate::socket::{FromStream, MioStream};
|
||||
use crate::Token;
|
||||
use crate::worker::WorkerCounterGuard;
|
||||
|
||||
pub trait ServiceFactory<Stream: FromStream>: Send + Clone + 'static {
|
||||
type Factory: BaseServiceFactory<Stream, Config = ()>;
|
||||
@@ -20,16 +17,16 @@ pub trait ServiceFactory<Stream: FromStream>: Send + Clone + 'static {
|
||||
}
|
||||
|
||||
pub(crate) trait InternalServiceFactory: Send {
|
||||
fn name(&self, token: Token) -> &str;
|
||||
fn name(&self, token: usize) -> &str;
|
||||
|
||||
fn clone_factory(&self) -> Box<dyn InternalServiceFactory>;
|
||||
|
||||
fn create(&self) -> LocalBoxFuture<'static, Result<Vec<(Token, BoxedServerService)>, ()>>;
|
||||
fn create(&self) -> LocalBoxFuture<'static, Result<(usize, BoxedServerService), ()>>;
|
||||
}
|
||||
|
||||
pub(crate) type BoxedServerService = Box<
|
||||
dyn Service<
|
||||
(CounterGuard, MioStream),
|
||||
(WorkerCounterGuard, MioStream),
|
||||
Response = (),
|
||||
Error = (),
|
||||
Future = Ready<Result<(), ()>>,
|
||||
@@ -50,7 +47,7 @@ impl<S, I> StreamService<S, I> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, I> Service<(CounterGuard, MioStream)> for StreamService<S, I>
|
||||
impl<S, I> Service<(WorkerCounterGuard, MioStream)> for StreamService<S, I>
|
||||
where
|
||||
S: Service<I>,
|
||||
S::Future: 'static,
|
||||
@@ -65,7 +62,7 @@ where
|
||||
self.service.poll_ready(ctx).map_err(|_| ())
|
||||
}
|
||||
|
||||
fn call(&self, (guard, req): (CounterGuard, MioStream)) -> Self::Future {
|
||||
fn call(&self, (guard, req): (WorkerCounterGuard, MioStream)) -> Self::Future {
|
||||
ready(match FromStream::from_mio(req) {
|
||||
Ok(stream) => {
|
||||
let f = self.service.call(stream);
|
||||
@@ -86,7 +83,7 @@ where
|
||||
pub(crate) struct StreamNewService<F: ServiceFactory<Io>, Io: FromStream> {
|
||||
name: String,
|
||||
inner: F,
|
||||
token: Token,
|
||||
token: usize,
|
||||
addr: SocketAddr,
|
||||
_t: PhantomData<Io>,
|
||||
}
|
||||
@@ -98,7 +95,7 @@ where
|
||||
{
|
||||
pub(crate) fn create(
|
||||
name: String,
|
||||
token: Token,
|
||||
token: usize,
|
||||
inner: F,
|
||||
addr: SocketAddr,
|
||||
) -> Box<dyn InternalServiceFactory> {
|
||||
@@ -117,7 +114,7 @@ where
|
||||
F: ServiceFactory<Io>,
|
||||
Io: FromStream + Send + 'static,
|
||||
{
|
||||
fn name(&self, _: Token) -> &str {
|
||||
fn name(&self, _: usize) -> &str {
|
||||
&self.name
|
||||
}
|
||||
|
||||
@@ -131,14 +128,14 @@ where
|
||||
})
|
||||
}
|
||||
|
||||
fn create(&self) -> LocalBoxFuture<'static, Result<Vec<(Token, BoxedServerService)>, ()>> {
|
||||
fn create(&self) -> LocalBoxFuture<'static, Result<(usize, BoxedServerService), ()>> {
|
||||
let token = self.token;
|
||||
let fut = self.inner.create().new_service(());
|
||||
Box::pin(async move {
|
||||
match fut.await {
|
||||
Ok(inner) => {
|
||||
let service = Box::new(StreamService::new(inner)) as _;
|
||||
Ok(vec![(token, service)])
|
||||
Ok((token, service))
|
||||
}
|
||||
Err(_) => Err(()),
|
||||
}
|
||||
|
@@ -1,47 +1,64 @@
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
use std::{
|
||||
fmt,
|
||||
future::Future,
|
||||
pin::Pin,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use crate::server::Server;
|
||||
use log::trace;
|
||||
|
||||
/// Different types of process signals
|
||||
#[allow(dead_code)]
|
||||
#[derive(PartialEq, Clone, Copy, Debug)]
|
||||
/// Types of process signals.
|
||||
// #[allow(dead_code)]
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
#[allow(dead_code)] // variants are never constructed on non-unix
|
||||
pub(crate) enum Signal {
|
||||
/// SIGHUP
|
||||
Hup,
|
||||
/// SIGINT
|
||||
/// `SIGINT`
|
||||
Int,
|
||||
/// SIGTERM
|
||||
|
||||
/// `SIGTERM`
|
||||
Term,
|
||||
/// SIGQUIT
|
||||
|
||||
/// `SIGQUIT`
|
||||
Quit,
|
||||
}
|
||||
|
||||
impl fmt::Display for Signal {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.write_str(match self {
|
||||
Signal::Int => "SIGINT",
|
||||
Signal::Term => "SIGTERM",
|
||||
Signal::Quit => "SIGQUIT",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Process signal listener.
|
||||
pub(crate) struct Signals {
|
||||
srv: Server,
|
||||
#[cfg(not(unix))]
|
||||
signals: futures_core::future::LocalBoxFuture<'static, std::io::Result<()>>,
|
||||
|
||||
#[cfg(unix)]
|
||||
signals: Vec<(Signal, actix_rt::signal::unix::Signal)>,
|
||||
}
|
||||
|
||||
impl Signals {
|
||||
pub(crate) fn start(srv: Server) {
|
||||
/// Constructs an OS signal listening future.
|
||||
pub(crate) fn new() -> Self {
|
||||
trace!("setting up OS signal listener");
|
||||
|
||||
#[cfg(not(unix))]
|
||||
{
|
||||
actix_rt::spawn(Signals {
|
||||
srv,
|
||||
Signals {
|
||||
signals: Box::pin(actix_rt::signal::ctrl_c()),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
{
|
||||
use actix_rt::signal::unix;
|
||||
|
||||
let sig_map = [
|
||||
(unix::SignalKind::interrupt(), Signal::Int),
|
||||
(unix::SignalKind::hangup(), Signal::Hup),
|
||||
(unix::SignalKind::terminate(), Signal::Term),
|
||||
(unix::SignalKind::quit(), Signal::Quit),
|
||||
];
|
||||
@@ -62,32 +79,30 @@ impl Signals {
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
actix_rt::spawn(Signals { srv, signals });
|
||||
Signals { signals }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Future for Signals {
|
||||
type Output = ();
|
||||
type Output = Signal;
|
||||
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
#[cfg(not(unix))]
|
||||
match self.signals.as_mut().poll(cx) {
|
||||
Poll::Ready(_) => {
|
||||
self.srv.signal(Signal::Int);
|
||||
Poll::Ready(())
|
||||
}
|
||||
Poll::Pending => Poll::Pending,
|
||||
{
|
||||
self.signals.as_mut().poll(cx).map(|_| Signal::Int)
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
{
|
||||
for (sig, fut) in self.signals.iter_mut() {
|
||||
// TODO: match on if let Some ?
|
||||
if Pin::new(fut).poll_recv(cx).is_ready() {
|
||||
let sig = *sig;
|
||||
self.srv.signal(sig);
|
||||
return Poll::Ready(());
|
||||
trace!("{} received", sig);
|
||||
return Poll::Ready(*sig);
|
||||
}
|
||||
}
|
||||
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
|
@@ -23,9 +23,15 @@ pub(crate) enum MioListener {
|
||||
impl MioListener {
|
||||
pub(crate) fn local_addr(&self) -> SocketAddr {
|
||||
match *self {
|
||||
MioListener::Tcp(ref lst) => SocketAddr::Tcp(lst.local_addr().unwrap()),
|
||||
MioListener::Tcp(ref lst) => lst
|
||||
.local_addr()
|
||||
.map(SocketAddr::Tcp)
|
||||
.unwrap_or(SocketAddr::Unknown),
|
||||
#[cfg(unix)]
|
||||
MioListener::Uds(ref lst) => SocketAddr::Uds(lst.local_addr().unwrap()),
|
||||
MioListener::Uds(ref lst) => lst
|
||||
.local_addr()
|
||||
.map(SocketAddr::Uds)
|
||||
.unwrap_or(SocketAddr::Unknown),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -110,14 +116,15 @@ impl fmt::Debug for MioListener {
|
||||
impl fmt::Display for MioListener {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match *self {
|
||||
MioListener::Tcp(ref lst) => write!(f, "{}", lst.local_addr().ok().unwrap()),
|
||||
MioListener::Tcp(ref lst) => write!(f, "{:?}", lst),
|
||||
#[cfg(unix)]
|
||||
MioListener::Uds(ref lst) => write!(f, "{:?}", lst.local_addr().ok().unwrap()),
|
||||
MioListener::Uds(ref lst) => write!(f, "{:?}", lst),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) enum SocketAddr {
|
||||
Unknown,
|
||||
Tcp(StdSocketAddr),
|
||||
#[cfg(unix)]
|
||||
Uds(mio::net::SocketAddr),
|
||||
@@ -126,9 +133,10 @@ pub(crate) enum SocketAddr {
|
||||
impl fmt::Display for SocketAddr {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match *self {
|
||||
SocketAddr::Tcp(ref addr) => write!(f, "{}", addr),
|
||||
Self::Unknown => write!(f, "Unknown SocketAddr"),
|
||||
Self::Tcp(ref addr) => write!(f, "{}", addr),
|
||||
#[cfg(unix)]
|
||||
SocketAddr::Uds(ref addr) => write!(f, "{:?}", addr),
|
||||
Self::Uds(ref addr) => write!(f, "{:?}", addr),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -136,9 +144,10 @@ impl fmt::Display for SocketAddr {
|
||||
impl fmt::Debug for SocketAddr {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match *self {
|
||||
SocketAddr::Tcp(ref addr) => write!(f, "{:?}", addr),
|
||||
Self::Unknown => write!(f, "Unknown SocketAddr"),
|
||||
Self::Tcp(ref addr) => write!(f, "{:?}", addr),
|
||||
#[cfg(unix)]
|
||||
SocketAddr::Uds(ref addr) => write!(f, "{:?}", addr),
|
||||
Self::Uds(ref addr) => write!(f, "{:?}", addr),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -1,17 +1,16 @@
|
||||
use std::sync::mpsc;
|
||||
use std::{net, thread};
|
||||
use std::{io, net, thread};
|
||||
|
||||
use actix_rt::{net::TcpStream, System};
|
||||
|
||||
use crate::{Server, ServerBuilder, ServiceFactory};
|
||||
use crate::{Server, ServerBuilder, ServerHandle, ServiceFactory};
|
||||
|
||||
/// The `TestServer` type.
|
||||
/// A testing server.
|
||||
///
|
||||
/// `TestServer` is very simple test server that simplify process of writing
|
||||
/// integration tests for actix-net applications.
|
||||
/// `TestServer` is very simple test server that simplify process of writing integration tests for
|
||||
/// network applications.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use actix_service::fn_service;
|
||||
/// use actix_server::TestServer;
|
||||
@@ -35,11 +34,12 @@ pub struct TestServerRuntime {
|
||||
addr: net::SocketAddr,
|
||||
host: String,
|
||||
port: u16,
|
||||
system: System,
|
||||
server_handle: ServerHandle,
|
||||
thread_handle: Option<thread::JoinHandle<io::Result<()>>>,
|
||||
}
|
||||
|
||||
impl TestServer {
|
||||
/// Start new server with server builder
|
||||
/// Start new server with server builder.
|
||||
pub fn start<F>(mut factory: F) -> TestServerRuntime
|
||||
where
|
||||
F: FnMut(ServerBuilder) -> ServerBuilder + Send + 'static,
|
||||
@@ -47,46 +47,49 @@ impl TestServer {
|
||||
let (tx, rx) = mpsc::channel();
|
||||
|
||||
// run server in separate thread
|
||||
thread::spawn(move || {
|
||||
let sys = System::new();
|
||||
factory(Server::build()).workers(1).disable_signals().run();
|
||||
|
||||
tx.send(System::current()).unwrap();
|
||||
sys.run()
|
||||
let thread_handle = thread::spawn(move || {
|
||||
System::new().block_on(async {
|
||||
let server = factory(Server::build()).workers(1).disable_signals().run();
|
||||
tx.send(server.handle()).unwrap();
|
||||
server.await
|
||||
})
|
||||
});
|
||||
let system = rx.recv().unwrap();
|
||||
|
||||
let server_handle = rx.recv().unwrap();
|
||||
|
||||
TestServerRuntime {
|
||||
system,
|
||||
addr: "127.0.0.1:0".parse().unwrap(),
|
||||
host: "127.0.0.1".to_string(),
|
||||
port: 0,
|
||||
server_handle,
|
||||
thread_handle: Some(thread_handle),
|
||||
}
|
||||
}
|
||||
|
||||
/// Start new test server with application factory
|
||||
/// Start new test server with application factory.
|
||||
pub fn with<F: ServiceFactory<TcpStream>>(factory: F) -> TestServerRuntime {
|
||||
let (tx, rx) = mpsc::channel();
|
||||
|
||||
// run server in separate thread
|
||||
thread::spawn(move || {
|
||||
let thread_handle = thread::spawn(move || {
|
||||
let sys = System::new();
|
||||
let tcp = net::TcpListener::bind("127.0.0.1:0").unwrap();
|
||||
let local_addr = tcp.local_addr().unwrap();
|
||||
|
||||
sys.block_on(async {
|
||||
Server::build()
|
||||
let server = Server::build()
|
||||
.listen("test", tcp, factory)
|
||||
.unwrap()
|
||||
.workers(1)
|
||||
.disable_signals()
|
||||
.run();
|
||||
tx.send((System::current(), local_addr)).unwrap();
|
||||
});
|
||||
sys.run()
|
||||
|
||||
tx.send((server.handle(), local_addr)).unwrap();
|
||||
server.await
|
||||
})
|
||||
});
|
||||
|
||||
let (system, addr) = rx.recv().unwrap();
|
||||
let (server_handle, addr) = rx.recv().unwrap();
|
||||
|
||||
let host = format!("{}", addr.ip());
|
||||
let port = addr.port();
|
||||
@@ -95,11 +98,12 @@ impl TestServer {
|
||||
addr,
|
||||
host,
|
||||
port,
|
||||
system,
|
||||
server_handle,
|
||||
thread_handle: Some(thread_handle),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get first available unused local address
|
||||
/// Get first available unused local address.
|
||||
pub fn unused_addr() -> net::SocketAddr {
|
||||
let addr: net::SocketAddr = "127.0.0.1:0".parse().unwrap();
|
||||
let socket = mio::net::TcpSocket::new_v4().unwrap();
|
||||
@@ -111,27 +115,28 @@ impl TestServer {
|
||||
}
|
||||
|
||||
impl TestServerRuntime {
|
||||
/// Test server host
|
||||
/// Test server host.
|
||||
pub fn host(&self) -> &str {
|
||||
&self.host
|
||||
}
|
||||
|
||||
/// Test server port
|
||||
/// Test server port.
|
||||
pub fn port(&self) -> u16 {
|
||||
self.port
|
||||
}
|
||||
|
||||
/// Get test server address
|
||||
/// Get test server address.
|
||||
pub fn addr(&self) -> net::SocketAddr {
|
||||
self.addr
|
||||
}
|
||||
|
||||
/// Stop http server
|
||||
/// Stop server.
|
||||
fn stop(&mut self) {
|
||||
self.system.stop();
|
||||
let _ = self.server_handle.stop(false);
|
||||
self.thread_handle.take().unwrap().join().unwrap().unwrap();
|
||||
}
|
||||
|
||||
/// Connect to server, return tokio TcpStream
|
||||
/// Connect to server, returning a Tokio `TcpStream`.
|
||||
pub fn connect(&self) -> std::io::Result<TcpStream> {
|
||||
TcpStream::from_std(net::TcpStream::connect(self.addr)?)
|
||||
}
|
||||
|
@@ -78,12 +78,7 @@ pub(crate) enum WakerInterest {
|
||||
Pause,
|
||||
Resume,
|
||||
Stop,
|
||||
/// `Timer` is an interest sent as a delayed future. When an error happens on accepting
|
||||
/// connection `Accept` would deregister socket listener temporary and wake up the poll and
|
||||
/// register them again after the delayed future resolve.
|
||||
Timer,
|
||||
/// `Worker` is an interest happen after a worker runs into faulted state(This is determined
|
||||
/// by if work can be sent to it successfully).`Accept` would be waked up and add the new
|
||||
/// `WorkerHandleAccept`.
|
||||
/// `Worker` is an interest that is triggered after a worker faults. This is determined by
|
||||
/// trying to send work to it. `Accept` would be waked up and add the new `WorkerHandleAccept`.
|
||||
Worker(WorkerHandleAccept),
|
||||
}
|
||||
|
@@ -1,9 +1,10 @@
|
||||
use std::{
|
||||
future::Future,
|
||||
mem,
|
||||
io, mem,
|
||||
pin::Pin,
|
||||
rc::Rc,
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
atomic::{AtomicUsize, Ordering},
|
||||
Arc,
|
||||
},
|
||||
task::{Context, Poll},
|
||||
@@ -13,9 +14,8 @@ use std::{
|
||||
use actix_rt::{
|
||||
spawn,
|
||||
time::{sleep, Instant, Sleep},
|
||||
Arbiter,
|
||||
Arbiter, ArbiterHandle, System,
|
||||
};
|
||||
use actix_utils::counter::Counter;
|
||||
use futures_core::{future::LocalBoxFuture, ready};
|
||||
use log::{error, info, trace};
|
||||
use tokio::sync::{
|
||||
@@ -23,12 +23,14 @@ use tokio::sync::{
|
||||
oneshot,
|
||||
};
|
||||
|
||||
use crate::service::{BoxedServerService, InternalServiceFactory};
|
||||
use crate::socket::MioStream;
|
||||
use crate::waker_queue::{WakerInterest, WakerQueue};
|
||||
use crate::{join_all, Token};
|
||||
use crate::{
|
||||
join_all::join_all_local,
|
||||
service::{BoxedServerService, InternalServiceFactory},
|
||||
socket::MioStream,
|
||||
waker_queue::{WakerInterest, WakerQueue},
|
||||
};
|
||||
|
||||
/// Stop worker message. Returns `true` on successful graceful shutdown.
|
||||
/// Stop worker message. Returns `true` on successful graceful shutdown
|
||||
/// and `false` if some connections still alive when shutdown execute.
|
||||
pub(crate) struct Stop {
|
||||
graceful: bool,
|
||||
@@ -38,98 +40,162 @@ pub(crate) struct Stop {
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct Conn {
|
||||
pub io: MioStream,
|
||||
pub token: Token,
|
||||
pub token: usize,
|
||||
}
|
||||
|
||||
/// Create accept and server worker handles.
|
||||
fn handle_pair(
|
||||
idx: usize,
|
||||
tx1: UnboundedSender<Conn>,
|
||||
tx2: UnboundedSender<Stop>,
|
||||
avail: WorkerAvailability,
|
||||
conn_tx: UnboundedSender<Conn>,
|
||||
stop_tx: UnboundedSender<Stop>,
|
||||
counter: Counter,
|
||||
) -> (WorkerHandleAccept, WorkerHandleServer) {
|
||||
let accept = WorkerHandleAccept { tx: tx1, avail };
|
||||
let accept = WorkerHandleAccept {
|
||||
idx,
|
||||
conn_tx,
|
||||
counter,
|
||||
};
|
||||
|
||||
let server = WorkerHandleServer { idx, tx: tx2 };
|
||||
let server = WorkerHandleServer { idx, stop_tx };
|
||||
|
||||
(accept, server)
|
||||
}
|
||||
|
||||
/// Handle to worker that can send connection message to worker and share the
|
||||
/// availability of worker to other thread.
|
||||
/// counter: Arc<AtomicUsize> field is owned by `Accept` thread and `ServerWorker` thread.
|
||||
///
|
||||
/// `Accept` would increment the counter and `ServerWorker` would decrement it.
|
||||
///
|
||||
/// # Atomic Ordering:
|
||||
///
|
||||
/// `Accept` always look into it's cached `Availability` field for `ServerWorker` state.
|
||||
/// It lazily increment counter after successful dispatching new work to `ServerWorker`.
|
||||
/// On reaching counter limit `Accept` update it's cached `Availability` and mark worker as
|
||||
/// unable to accept any work.
|
||||
///
|
||||
/// `ServerWorker` always decrement the counter when every work received from `Accept` is done.
|
||||
/// On reaching counter limit worker would use `mio::Waker` and `WakerQueue` to wake up `Accept`
|
||||
/// and notify it to update cached `Availability` again to mark worker as able to accept work again.
|
||||
///
|
||||
/// Hence, a wake up would only happen after `Accept` increment it to limit.
|
||||
/// And a decrement to limit always wake up `Accept`.
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct Counter {
|
||||
counter: Arc<AtomicUsize>,
|
||||
limit: usize,
|
||||
}
|
||||
|
||||
impl Counter {
|
||||
pub(crate) fn new(limit: usize) -> Self {
|
||||
Self {
|
||||
counter: Arc::new(AtomicUsize::new(1)),
|
||||
limit,
|
||||
}
|
||||
}
|
||||
|
||||
/// Increment counter by 1 and return true when hitting limit
|
||||
#[inline(always)]
|
||||
pub(crate) fn inc(&self) -> bool {
|
||||
self.counter.fetch_add(1, Ordering::Relaxed) != self.limit
|
||||
}
|
||||
|
||||
/// Decrement counter by 1 and return true if crossing limit.
|
||||
#[inline(always)]
|
||||
pub(crate) fn dec(&self) -> bool {
|
||||
self.counter.fetch_sub(1, Ordering::Relaxed) == self.limit
|
||||
}
|
||||
|
||||
pub(crate) fn total(&self) -> usize {
|
||||
self.counter.load(Ordering::SeqCst) - 1
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct WorkerCounter {
|
||||
idx: usize,
|
||||
inner: Rc<(WakerQueue, Counter)>,
|
||||
}
|
||||
|
||||
impl Clone for WorkerCounter {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
idx: self.idx,
|
||||
inner: self.inner.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl WorkerCounter {
|
||||
pub(crate) fn new(idx: usize, waker_queue: WakerQueue, counter: Counter) -> Self {
|
||||
Self {
|
||||
idx,
|
||||
inner: Rc::new((waker_queue, counter)),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub(crate) fn guard(&self) -> WorkerCounterGuard {
|
||||
WorkerCounterGuard(self.clone())
|
||||
}
|
||||
|
||||
fn total(&self) -> usize {
|
||||
self.inner.1.total()
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct WorkerCounterGuard(WorkerCounter);
|
||||
|
||||
impl Drop for WorkerCounterGuard {
|
||||
fn drop(&mut self) {
|
||||
let (waker_queue, counter) = &*self.0.inner;
|
||||
if counter.dec() {
|
||||
waker_queue.wake(WakerInterest::WorkerAvailable(self.0.idx));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle to worker that can send connection message to worker and share the availability of worker
|
||||
/// to other threads.
|
||||
///
|
||||
/// Held by [Accept](crate::accept::Accept).
|
||||
pub(crate) struct WorkerHandleAccept {
|
||||
tx: UnboundedSender<Conn>,
|
||||
avail: WorkerAvailability,
|
||||
idx: usize,
|
||||
conn_tx: UnboundedSender<Conn>,
|
||||
counter: Counter,
|
||||
}
|
||||
|
||||
impl WorkerHandleAccept {
|
||||
#[inline(always)]
|
||||
pub(crate) fn idx(&self) -> usize {
|
||||
self.avail.idx
|
||||
self.idx
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub(crate) fn send(&self, msg: Conn) -> Result<(), Conn> {
|
||||
self.tx.send(msg).map_err(|msg| msg.0)
|
||||
pub(crate) fn send(&self, conn: Conn) -> Result<(), Conn> {
|
||||
self.conn_tx.send(conn).map_err(|msg| msg.0)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub(crate) fn available(&self) -> bool {
|
||||
self.avail.available()
|
||||
pub(crate) fn inc_counter(&self) -> bool {
|
||||
self.counter.inc()
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle to worker than can send stop message to worker.
|
||||
///
|
||||
/// Held by [ServerBuilder](crate::builder::ServerBuilder).
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct WorkerHandleServer {
|
||||
pub idx: usize,
|
||||
tx: UnboundedSender<Stop>,
|
||||
pub(crate) idx: usize,
|
||||
stop_tx: UnboundedSender<Stop>,
|
||||
}
|
||||
|
||||
impl WorkerHandleServer {
|
||||
pub(crate) fn stop(&self, graceful: bool) -> oneshot::Receiver<bool> {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
let _ = self.tx.send(Stop { graceful, tx });
|
||||
let _ = self.stop_tx.send(Stop { graceful, tx });
|
||||
rx
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct WorkerAvailability {
|
||||
idx: usize,
|
||||
waker: WakerQueue,
|
||||
available: Arc<AtomicBool>,
|
||||
}
|
||||
|
||||
impl WorkerAvailability {
|
||||
pub fn new(idx: usize, waker: WakerQueue) -> Self {
|
||||
WorkerAvailability {
|
||||
idx,
|
||||
waker,
|
||||
available: Arc::new(AtomicBool::new(false)),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn available(&self) -> bool {
|
||||
self.available.load(Ordering::Acquire)
|
||||
}
|
||||
|
||||
pub fn set(&self, val: bool) {
|
||||
// Ordering:
|
||||
//
|
||||
// There could be multiple set calls happen in one <ServerWorker as Future>::poll.
|
||||
// Order is important between them.
|
||||
let old = self.available.swap(val, Ordering::AcqRel);
|
||||
// Notify the accept on switched to available.
|
||||
if !old && val {
|
||||
self.waker.wake(WakerInterest::WorkerAvailable(self.idx));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Service worker.
|
||||
///
|
||||
/// Worker accepts Socket objects via unbounded channel and starts stream processing.
|
||||
@@ -138,9 +204,8 @@ pub(crate) struct ServerWorker {
|
||||
// It must be dropped as soon as ServerWorker dropping.
|
||||
rx: UnboundedReceiver<Conn>,
|
||||
rx2: UnboundedReceiver<Stop>,
|
||||
counter: WorkerCounter,
|
||||
services: Box<[WorkerService]>,
|
||||
availability: WorkerAvailability,
|
||||
conns: Counter,
|
||||
factories: Box<[Box<dyn InternalServiceFactory>]>,
|
||||
state: WorkerState,
|
||||
shutdown_timeout: Duration,
|
||||
@@ -159,7 +224,7 @@ impl WorkerService {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
enum WorkerServiceStatus {
|
||||
Available,
|
||||
Unavailable,
|
||||
@@ -170,7 +235,7 @@ enum WorkerServiceStatus {
|
||||
}
|
||||
|
||||
/// Config for worker behavior passed down from server builder.
|
||||
#[derive(Copy, Clone)]
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub(crate) struct ServerWorkerConfig {
|
||||
shutdown_timeout: Duration,
|
||||
max_blocking_threads: usize,
|
||||
@@ -207,89 +272,119 @@ impl ServerWorker {
|
||||
pub(crate) fn start(
|
||||
idx: usize,
|
||||
factories: Vec<Box<dyn InternalServiceFactory>>,
|
||||
availability: WorkerAvailability,
|
||||
waker_queue: WakerQueue,
|
||||
config: ServerWorkerConfig,
|
||||
) -> (WorkerHandleAccept, WorkerHandleServer) {
|
||||
assert!(!availability.available());
|
||||
) -> io::Result<(WorkerHandleAccept, WorkerHandleServer)> {
|
||||
trace!("starting server worker {}", idx);
|
||||
|
||||
let (tx1, rx) = unbounded_channel();
|
||||
let (tx2, rx2) = unbounded_channel();
|
||||
let avail = availability.clone();
|
||||
|
||||
let counter = Counter::new(config.max_concurrent_connections);
|
||||
|
||||
let counter_clone = counter.clone();
|
||||
// every worker runs in it's own arbiter.
|
||||
// use a custom tokio runtime builder to change the settings of runtime.
|
||||
Arbiter::with_tokio_rt(move || {
|
||||
tokio::runtime::Builder::new_current_thread()
|
||||
.enable_all()
|
||||
.max_blocking_threads(config.max_blocking_threads)
|
||||
.build()
|
||||
.unwrap()
|
||||
})
|
||||
.spawn(async move {
|
||||
let fut = factories
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(idx, factory)| {
|
||||
let fut = factory.create();
|
||||
async move {
|
||||
fut.await.map(|r| {
|
||||
r.into_iter().map(|(t, s)| (idx, t, s)).collect::<Vec<_>>()
|
||||
})
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
#[cfg(all(target_os = "linux", feature = "io-uring"))]
|
||||
let arbiter = {
|
||||
// TODO: pass max blocking thread config when tokio-uring enable configuration
|
||||
// on building runtime.
|
||||
let _ = config.max_blocking_threads;
|
||||
Arbiter::new()
|
||||
};
|
||||
|
||||
// a second spawn to run !Send future tasks.
|
||||
spawn(async move {
|
||||
let res = join_all(fut)
|
||||
// get actix system context if it is set
|
||||
let sys = System::try_current();
|
||||
|
||||
// service factories initialization channel
|
||||
let (factory_tx, factory_rx) = std::sync::mpsc::sync_channel(1);
|
||||
|
||||
std::thread::Builder::new()
|
||||
.name(format!("actix-server worker {}", idx))
|
||||
.spawn(move || {
|
||||
// forward existing actix system context
|
||||
if let Some(sys) = sys {
|
||||
System::set_current(sys);
|
||||
}
|
||||
|
||||
let rt = tokio::runtime::Builder::new_current_thread()
|
||||
.enable_all()
|
||||
.max_blocking_threads(config.max_blocking_threads)
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
rt.block_on(tokio::task::LocalSet::new().run_until(async move {
|
||||
let fut = factories
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(idx, factory)| {
|
||||
let fut = factory.create();
|
||||
async move { fut.await.map(|(t, s)| (idx, t, s)) }
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// a second spawn to run !Send future tasks.
|
||||
spawn(async move {
|
||||
let res = join_all_local(fut)
|
||||
.await
|
||||
.into_iter()
|
||||
.collect::<Result<Vec<_>, _>>();
|
||||
|
||||
let services = match res {
|
||||
Ok(res) => res
|
||||
.into_iter()
|
||||
.fold(Vec::new(), |mut services, (factory, token, service)| {
|
||||
assert_eq!(token, services.len());
|
||||
services.push(WorkerService {
|
||||
factory,
|
||||
service,
|
||||
status: WorkerServiceStatus::Unavailable,
|
||||
});
|
||||
services
|
||||
})
|
||||
.into_boxed_slice(),
|
||||
|
||||
Err(e) => {
|
||||
error!("Can not start worker: {:?}", e);
|
||||
Arbiter::try_current().as_ref().map(ArbiterHandle::stop);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
factory_tx.send(()).unwrap();
|
||||
|
||||
// a third spawn to make sure ServerWorker runs as non boxed future.
|
||||
spawn(ServerWorker {
|
||||
rx,
|
||||
rx2,
|
||||
services,
|
||||
counter: WorkerCounter::new(idx, waker_queue, counter_clone),
|
||||
factories: factories.into_boxed_slice(),
|
||||
state: Default::default(),
|
||||
shutdown_timeout: config.shutdown_timeout,
|
||||
})
|
||||
.await
|
||||
.expect("task 3 panic");
|
||||
})
|
||||
.await
|
||||
.into_iter()
|
||||
.collect::<Result<Vec<_>, _>>();
|
||||
let services = match res {
|
||||
Ok(res) => res
|
||||
.into_iter()
|
||||
.flatten()
|
||||
.fold(Vec::new(), |mut services, (factory, token, service)| {
|
||||
assert_eq!(token.0, services.len());
|
||||
services.push(WorkerService {
|
||||
factory,
|
||||
service,
|
||||
status: WorkerServiceStatus::Unavailable,
|
||||
});
|
||||
services
|
||||
})
|
||||
.into_boxed_slice(),
|
||||
Err(e) => {
|
||||
error!("Can not start worker: {:?}", e);
|
||||
Arbiter::current().stop();
|
||||
return;
|
||||
}
|
||||
};
|
||||
.expect("task 2 panic");
|
||||
}))
|
||||
})
|
||||
.expect("worker thread error/panic");
|
||||
|
||||
// a third spawn to make sure ServerWorker runs as non boxed future.
|
||||
spawn(ServerWorker {
|
||||
rx,
|
||||
rx2,
|
||||
services,
|
||||
availability,
|
||||
conns: Counter::new(config.max_concurrent_connections),
|
||||
factories: factories.into_boxed_slice(),
|
||||
state: Default::default(),
|
||||
shutdown_timeout: config.shutdown_timeout,
|
||||
});
|
||||
});
|
||||
});
|
||||
// wait for service factories initialization
|
||||
factory_rx.recv().unwrap();
|
||||
|
||||
handle_pair(idx, tx1, tx2, avail)
|
||||
Ok(handle_pair(idx, tx1, tx2, counter))
|
||||
}
|
||||
|
||||
fn restart_service(&mut self, token: Token, factory_id: usize) {
|
||||
fn restart_service(&mut self, idx: usize, factory_id: usize) {
|
||||
let factory = &self.factories[factory_id];
|
||||
trace!("Service {:?} failed, restarting", factory.name(token));
|
||||
self.services[token.0].status = WorkerServiceStatus::Restarting;
|
||||
trace!("Service {:?} failed, restarting", factory.name(idx));
|
||||
self.services[idx].status = WorkerServiceStatus::Restarting;
|
||||
self.state = WorkerState::Restarting(Restart {
|
||||
factory_id,
|
||||
token,
|
||||
token: idx,
|
||||
fut: factory.create(),
|
||||
});
|
||||
}
|
||||
@@ -307,8 +402,8 @@ impl ServerWorker {
|
||||
});
|
||||
}
|
||||
|
||||
fn check_readiness(&mut self, cx: &mut Context<'_>) -> Result<bool, (Token, usize)> {
|
||||
let mut ready = self.conns.available(cx);
|
||||
fn check_readiness(&mut self, cx: &mut Context<'_>) -> Result<bool, (usize, usize)> {
|
||||
let mut ready = true;
|
||||
for (idx, srv) in self.services.iter_mut().enumerate() {
|
||||
if srv.status == WorkerServiceStatus::Available
|
||||
|| srv.status == WorkerServiceStatus::Unavailable
|
||||
@@ -318,7 +413,7 @@ impl ServerWorker {
|
||||
if srv.status == WorkerServiceStatus::Unavailable {
|
||||
trace!(
|
||||
"Service {:?} is available",
|
||||
self.factories[srv.factory].name(Token(idx))
|
||||
self.factories[srv.factory].name(idx)
|
||||
);
|
||||
srv.status = WorkerServiceStatus::Available;
|
||||
}
|
||||
@@ -329,7 +424,7 @@ impl ServerWorker {
|
||||
if srv.status == WorkerServiceStatus::Available {
|
||||
trace!(
|
||||
"Service {:?} is unavailable",
|
||||
self.factories[srv.factory].name(Token(idx))
|
||||
self.factories[srv.factory].name(idx)
|
||||
);
|
||||
srv.status = WorkerServiceStatus::Unavailable;
|
||||
}
|
||||
@@ -337,10 +432,10 @@ impl ServerWorker {
|
||||
Poll::Ready(Err(_)) => {
|
||||
error!(
|
||||
"Service {:?} readiness check returned error, restarting",
|
||||
self.factories[srv.factory].name(Token(idx))
|
||||
self.factories[srv.factory].name(idx)
|
||||
);
|
||||
srv.status = WorkerServiceStatus::Failed;
|
||||
return Err((Token(idx), srv.factory));
|
||||
return Err((idx, srv.factory));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -359,17 +454,19 @@ enum WorkerState {
|
||||
|
||||
struct Restart {
|
||||
factory_id: usize,
|
||||
token: Token,
|
||||
fut: LocalBoxFuture<'static, Result<Vec<(Token, BoxedServerService)>, ()>>,
|
||||
token: usize,
|
||||
fut: LocalBoxFuture<'static, Result<(usize, BoxedServerService), ()>>,
|
||||
}
|
||||
|
||||
// Shutdown keep states necessary for server shutdown:
|
||||
// Sleep for interval check the shutdown progress.
|
||||
// Instant for the start time of shutdown.
|
||||
// Sender for send back the shutdown outcome(force/grace) to StopCommand caller.
|
||||
/// State necessary for server shutdown.
|
||||
struct Shutdown {
|
||||
// Interval for checking the shutdown progress.
|
||||
timer: Pin<Box<Sleep>>,
|
||||
|
||||
/// Start time of shutdown.
|
||||
start_from: Instant,
|
||||
|
||||
/// Notify caller of the shutdown outcome (graceful/force).
|
||||
tx: oneshot::Sender<bool>,
|
||||
}
|
||||
|
||||
@@ -381,12 +478,8 @@ impl Default for WorkerState {
|
||||
|
||||
impl Drop for ServerWorker {
|
||||
fn drop(&mut self) {
|
||||
// Set availability to true so if accept try to send connection to this worker
|
||||
// it would find worker is gone and remove it.
|
||||
// This is helpful when worker is dropped unexpected.
|
||||
self.availability.set(true);
|
||||
// Stop the Arbiter ServerWorker runs on on drop.
|
||||
Arbiter::current().stop();
|
||||
trace!("stopping ServerWorker Arbiter");
|
||||
Arbiter::try_current().as_ref().map(ArbiterHandle::stop);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -399,14 +492,13 @@ impl Future for ServerWorker {
|
||||
// `StopWorker` message handler
|
||||
if let Poll::Ready(Some(Stop { graceful, tx })) = Pin::new(&mut this.rx2).poll_recv(cx)
|
||||
{
|
||||
this.availability.set(false);
|
||||
let num = this.conns.total();
|
||||
let num = this.counter.total();
|
||||
if num == 0 {
|
||||
info!("Shutting down worker, 0 connections");
|
||||
info!("Shutting down idle worker");
|
||||
let _ = tx.send(true);
|
||||
return Poll::Ready(());
|
||||
} else if graceful {
|
||||
info!("Graceful worker shutdown, {} connections", num);
|
||||
info!("Graceful worker shutdown; finishing {} connections", num);
|
||||
this.shutdown(false);
|
||||
|
||||
this.state = WorkerState::Shutdown(Shutdown {
|
||||
@@ -415,7 +507,7 @@ impl Future for ServerWorker {
|
||||
tx,
|
||||
});
|
||||
} else {
|
||||
info!("Force shutdown worker, {} connections", num);
|
||||
info!("Force shutdown worker, closing {} connections", num);
|
||||
this.shutdown(true);
|
||||
|
||||
let _ = tx.send(false);
|
||||
@@ -427,7 +519,6 @@ impl Future for ServerWorker {
|
||||
WorkerState::Unavailable => match this.check_readiness(cx) {
|
||||
Ok(true) => {
|
||||
this.state = WorkerState::Available;
|
||||
this.availability.set(true);
|
||||
self.poll(cx)
|
||||
}
|
||||
Ok(false) => Poll::Pending,
|
||||
@@ -440,48 +531,46 @@ impl Future for ServerWorker {
|
||||
let factory_id = restart.factory_id;
|
||||
let token = restart.token;
|
||||
|
||||
let service = ready!(restart.fut.as_mut().poll(cx))
|
||||
let (token_new, service) = ready!(restart.fut.as_mut().poll(cx))
|
||||
.unwrap_or_else(|_| {
|
||||
panic!(
|
||||
"Can not restart {:?} service",
|
||||
this.factories[factory_id].name(token)
|
||||
)
|
||||
})
|
||||
.into_iter()
|
||||
// Find the same token from vector. There should be only one
|
||||
// So the first match would be enough.
|
||||
.find(|(t, _)| *t == token)
|
||||
.map(|(_, service)| service)
|
||||
.expect("No BoxedServerService found");
|
||||
});
|
||||
|
||||
assert_eq!(token, token_new);
|
||||
|
||||
trace!(
|
||||
"Service {:?} has been restarted",
|
||||
this.factories[factory_id].name(token)
|
||||
);
|
||||
|
||||
this.services[token.0].created(service);
|
||||
this.services[token].created(service);
|
||||
this.state = WorkerState::Unavailable;
|
||||
|
||||
self.poll(cx)
|
||||
}
|
||||
WorkerState::Shutdown(ref mut shutdown) => {
|
||||
// Wait for 1 second.
|
||||
// wait for 1 second
|
||||
ready!(shutdown.timer.as_mut().poll(cx));
|
||||
|
||||
if this.conns.total() == 0 {
|
||||
// Graceful shutdown.
|
||||
if this.counter.total() == 0 {
|
||||
// graceful shutdown
|
||||
if let WorkerState::Shutdown(shutdown) = mem::take(&mut this.state) {
|
||||
let _ = shutdown.tx.send(true);
|
||||
}
|
||||
|
||||
Poll::Ready(())
|
||||
} else if shutdown.start_from.elapsed() >= this.shutdown_timeout {
|
||||
// Timeout forceful shutdown.
|
||||
// timeout forceful shutdown
|
||||
if let WorkerState::Shutdown(shutdown) = mem::take(&mut this.state) {
|
||||
let _ = shutdown.tx.send(false);
|
||||
}
|
||||
|
||||
Poll::Ready(())
|
||||
} else {
|
||||
// Reset timer and wait for 1 second.
|
||||
// reset timer and wait for 1 second
|
||||
let time = Instant::now() + Duration::from_secs(1);
|
||||
shutdown.timer.as_mut().reset(time);
|
||||
shutdown.timer.as_mut().poll(cx)
|
||||
@@ -493,22 +582,20 @@ impl Future for ServerWorker {
|
||||
Ok(true) => {}
|
||||
Ok(false) => {
|
||||
trace!("Worker is unavailable");
|
||||
this.availability.set(false);
|
||||
this.state = WorkerState::Unavailable;
|
||||
return self.poll(cx);
|
||||
}
|
||||
Err((token, idx)) => {
|
||||
this.restart_service(token, idx);
|
||||
this.availability.set(false);
|
||||
return self.poll(cx);
|
||||
}
|
||||
}
|
||||
|
||||
// handle incoming io stream
|
||||
match ready!(Pin::new(&mut this.rx).poll_recv(cx)) {
|
||||
// handle incoming io stream
|
||||
Some(msg) => {
|
||||
let guard = this.conns.get();
|
||||
let _ = this.services[msg.token.0].service.call((guard, msg.io));
|
||||
let guard = this.counter.guard();
|
||||
let _ = this.services[msg.token].service.call((guard, msg.io));
|
||||
}
|
||||
None => return Poll::Ready(()),
|
||||
};
|
||||
|
@@ -5,8 +5,6 @@ use std::{net, thread, time::Duration};
|
||||
use actix_rt::{net::TcpStream, time::sleep};
|
||||
use actix_server::Server;
|
||||
use actix_service::fn_service;
|
||||
use actix_utils::future::ok;
|
||||
use futures_util::future::lazy;
|
||||
|
||||
fn unused_addr() -> net::SocketAddr {
|
||||
let addr: net::SocketAddr = "127.0.0.1:0".parse().unwrap();
|
||||
@@ -23,25 +21,28 @@ fn test_bind() {
|
||||
let (tx, rx) = mpsc::channel();
|
||||
|
||||
let h = thread::spawn(move || {
|
||||
let sys = actix_rt::System::new();
|
||||
let srv = sys.block_on(lazy(|_| {
|
||||
Server::build()
|
||||
actix_rt::System::new().block_on(async {
|
||||
let srv = Server::build()
|
||||
.workers(1)
|
||||
.disable_signals()
|
||||
.bind("test", addr, move || fn_service(|_| ok::<_, ()>(())))
|
||||
.unwrap()
|
||||
.run()
|
||||
}));
|
||||
.bind("test", addr, move || {
|
||||
fn_service(|_| async { Ok::<_, ()>(()) })
|
||||
})?
|
||||
.run();
|
||||
|
||||
let _ = tx.send((srv, actix_rt::System::current()));
|
||||
let _ = sys.run();
|
||||
let _ = tx.send((srv.handle(), actix_rt::System::current()));
|
||||
|
||||
srv.await
|
||||
})
|
||||
});
|
||||
let (_, sys) = rx.recv().unwrap();
|
||||
let (srv, sys) = rx.recv().unwrap();
|
||||
|
||||
thread::sleep(Duration::from_millis(500));
|
||||
assert!(net::TcpStream::connect(addr).is_ok());
|
||||
|
||||
let _ = srv.stop(true);
|
||||
sys.stop();
|
||||
let _ = h.join();
|
||||
h.join().unwrap().unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -50,25 +51,30 @@ fn test_listen() {
|
||||
let (tx, rx) = mpsc::channel();
|
||||
|
||||
let h = thread::spawn(move || {
|
||||
let sys = actix_rt::System::new();
|
||||
let lst = net::TcpListener::bind(addr).unwrap();
|
||||
sys.block_on(async {
|
||||
Server::build()
|
||||
let lst = net::TcpListener::bind(addr)?;
|
||||
actix_rt::System::new().block_on(async {
|
||||
let srv = Server::build()
|
||||
.disable_signals()
|
||||
.workers(1)
|
||||
.listen("test", lst, move || fn_service(|_| ok::<_, ()>(())))
|
||||
.unwrap()
|
||||
.listen("test", lst, move || {
|
||||
fn_service(|_| async { Ok::<_, ()>(()) })
|
||||
})?
|
||||
.run();
|
||||
let _ = tx.send(actix_rt::System::current());
|
||||
});
|
||||
let _ = sys.run();
|
||||
|
||||
let _ = tx.send((srv.handle(), actix_rt::System::current()));
|
||||
|
||||
srv.await
|
||||
})
|
||||
});
|
||||
let sys = rx.recv().unwrap();
|
||||
|
||||
let (srv, sys) = rx.recv().unwrap();
|
||||
|
||||
thread::sleep(Duration::from_millis(500));
|
||||
assert!(net::TcpStream::connect(addr).is_ok());
|
||||
|
||||
let _ = srv.stop(true);
|
||||
sys.stop();
|
||||
let _ = h.join();
|
||||
h.join().unwrap().unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -84,9 +90,8 @@ fn test_start() {
|
||||
let (tx, rx) = mpsc::channel();
|
||||
|
||||
let h = thread::spawn(move || {
|
||||
let sys = actix_rt::System::new();
|
||||
let srv = sys.block_on(lazy(|_| {
|
||||
Server::build()
|
||||
actix_rt::System::new().block_on(async {
|
||||
let srv = Server::build()
|
||||
.backlog(100)
|
||||
.disable_signals()
|
||||
.bind("test", addr, move || {
|
||||
@@ -95,13 +100,13 @@ fn test_start() {
|
||||
f.send(Bytes::from_static(b"test")).await.unwrap();
|
||||
Ok::<_, ()>(())
|
||||
})
|
||||
})
|
||||
.unwrap()
|
||||
.run()
|
||||
}));
|
||||
})?
|
||||
.run();
|
||||
|
||||
let _ = tx.send((srv, actix_rt::System::current()));
|
||||
let _ = sys.run();
|
||||
let _ = tx.send((srv.handle(), actix_rt::System::current()));
|
||||
|
||||
srv.await
|
||||
})
|
||||
});
|
||||
|
||||
let (srv, sys) = rx.recv().unwrap();
|
||||
@@ -134,63 +139,11 @@ fn test_start() {
|
||||
|
||||
// stop
|
||||
let _ = srv.stop(false);
|
||||
thread::sleep(Duration::from_millis(100));
|
||||
sys.stop();
|
||||
h.join().unwrap().unwrap();
|
||||
|
||||
thread::sleep(Duration::from_secs(1));
|
||||
assert!(net::TcpStream::connect(addr).is_err());
|
||||
|
||||
thread::sleep(Duration::from_millis(100));
|
||||
sys.stop();
|
||||
let _ = h.join();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_configure() {
|
||||
let addr1 = unused_addr();
|
||||
let addr2 = unused_addr();
|
||||
let addr3 = unused_addr();
|
||||
let (tx, rx) = mpsc::channel();
|
||||
let num = Arc::new(AtomicUsize::new(0));
|
||||
let num2 = num.clone();
|
||||
|
||||
let h = thread::spawn(move || {
|
||||
let num = num2.clone();
|
||||
let sys = actix_rt::System::new();
|
||||
let srv = sys.block_on(lazy(|_| {
|
||||
Server::build()
|
||||
.disable_signals()
|
||||
.configure(move |cfg| {
|
||||
let num = num.clone();
|
||||
let lst = net::TcpListener::bind(addr3).unwrap();
|
||||
cfg.bind("addr1", addr1)
|
||||
.unwrap()
|
||||
.bind("addr2", addr2)
|
||||
.unwrap()
|
||||
.listen("addr3", lst)
|
||||
.apply(move |rt| {
|
||||
let num = num.clone();
|
||||
rt.service("addr1", fn_service(|_| ok::<_, ()>(())));
|
||||
rt.service("addr3", fn_service(|_| ok::<_, ()>(())));
|
||||
rt.on_start(lazy(move |_| {
|
||||
let _ = num.fetch_add(1, Ordering::Relaxed);
|
||||
}))
|
||||
})
|
||||
})
|
||||
.unwrap()
|
||||
.workers(1)
|
||||
.run()
|
||||
}));
|
||||
|
||||
let _ = tx.send((srv, actix_rt::System::current()));
|
||||
let _ = sys.run();
|
||||
});
|
||||
let (_, sys) = rx.recv().unwrap();
|
||||
thread::sleep(Duration::from_millis(500));
|
||||
|
||||
assert!(net::TcpStream::connect(addr1).is_ok());
|
||||
assert!(net::TcpStream::connect(addr2).is_ok());
|
||||
assert!(net::TcpStream::connect(addr3).is_ok());
|
||||
assert_eq!(num.load(Ordering::Relaxed), 1);
|
||||
sys.stop();
|
||||
let _ = h.join();
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
@@ -213,11 +166,11 @@ async fn test_max_concurrent_connections() {
|
||||
|
||||
let h = thread::spawn(move || {
|
||||
actix_rt::System::new().block_on(async {
|
||||
let server = Server::build()
|
||||
let srv = Server::build()
|
||||
// Set a relative higher backlog.
|
||||
.backlog(12)
|
||||
// max connection for a worker is 3.
|
||||
.maxconn(max_conn)
|
||||
.max_concurrent_connections(max_conn)
|
||||
.workers(1)
|
||||
.disable_signals()
|
||||
.bind("test", addr, move || {
|
||||
@@ -234,9 +187,9 @@ async fn test_max_concurrent_connections() {
|
||||
})?
|
||||
.run();
|
||||
|
||||
let _ = tx.send((server.clone(), actix_rt::System::current()));
|
||||
let _ = tx.send((srv.handle(), actix_rt::System::current()));
|
||||
|
||||
server.await
|
||||
srv.await
|
||||
})
|
||||
});
|
||||
|
||||
@@ -260,9 +213,8 @@ async fn test_max_concurrent_connections() {
|
||||
}
|
||||
|
||||
srv.stop(false).await;
|
||||
|
||||
sys.stop();
|
||||
let _ = h.join().unwrap();
|
||||
h.join().unwrap().unwrap();
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
@@ -305,85 +257,10 @@ async fn test_service_restart() {
|
||||
let num_clone = num.clone();
|
||||
let num2_clone = num2.clone();
|
||||
|
||||
let h = thread::spawn(move || {
|
||||
actix_rt::System::new().block_on(async {
|
||||
let server = Server::build()
|
||||
.backlog(1)
|
||||
.disable_signals()
|
||||
.configure(move |cfg| {
|
||||
let num = num.clone();
|
||||
let num2 = num2.clone();
|
||||
cfg.bind("addr1", addr1)
|
||||
.unwrap()
|
||||
.bind("addr2", addr2)
|
||||
.unwrap()
|
||||
.apply(move |rt| {
|
||||
let num = num.clone();
|
||||
let num2 = num2.clone();
|
||||
rt.service(
|
||||
"addr1",
|
||||
fn_factory(move || {
|
||||
let num = num.clone();
|
||||
async move { Ok::<_, ()>(TestService(num)) }
|
||||
}),
|
||||
);
|
||||
rt.service(
|
||||
"addr2",
|
||||
fn_factory(move || {
|
||||
let num2 = num2.clone();
|
||||
async move { Ok::<_, ()>(TestService(num2)) }
|
||||
}),
|
||||
);
|
||||
})
|
||||
})
|
||||
.unwrap()
|
||||
.workers(1)
|
||||
.run();
|
||||
|
||||
let _ = tx.send((server.clone(), actix_rt::System::current()));
|
||||
server.await
|
||||
})
|
||||
});
|
||||
|
||||
let (server, sys) = rx.recv().unwrap();
|
||||
|
||||
for _ in 0..5 {
|
||||
TcpStream::connect(addr1)
|
||||
.await
|
||||
.unwrap()
|
||||
.shutdown()
|
||||
.await
|
||||
.unwrap();
|
||||
TcpStream::connect(addr2)
|
||||
.await
|
||||
.unwrap()
|
||||
.shutdown()
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
sleep(Duration::from_secs(3)).await;
|
||||
|
||||
assert!(num_clone.load(Ordering::SeqCst) > 5);
|
||||
assert!(num2_clone.load(Ordering::SeqCst) > 5);
|
||||
|
||||
sys.stop();
|
||||
let _ = server.stop(false);
|
||||
let _ = h.join().unwrap();
|
||||
|
||||
let addr1 = unused_addr();
|
||||
let addr2 = unused_addr();
|
||||
let (tx, rx) = mpsc::channel();
|
||||
let num = Arc::new(AtomicUsize::new(0));
|
||||
let num2 = Arc::new(AtomicUsize::new(0));
|
||||
|
||||
let num_clone = num.clone();
|
||||
let num2_clone = num2.clone();
|
||||
|
||||
let h = thread::spawn(move || {
|
||||
let num = num.clone();
|
||||
actix_rt::System::new().block_on(async {
|
||||
let server = Server::build()
|
||||
let srv = Server::build()
|
||||
.backlog(1)
|
||||
.disable_signals()
|
||||
.bind("addr1", addr1, move || {
|
||||
@@ -392,25 +269,23 @@ async fn test_service_restart() {
|
||||
let num = num.clone();
|
||||
async move { Ok::<_, ()>(TestService(num)) }
|
||||
})
|
||||
})
|
||||
.unwrap()
|
||||
})?
|
||||
.bind("addr2", addr2, move || {
|
||||
let num2 = num2.clone();
|
||||
fn_factory(move || {
|
||||
let num2 = num2.clone();
|
||||
async move { Ok::<_, ()>(TestService(num2)) }
|
||||
})
|
||||
})
|
||||
.unwrap()
|
||||
})?
|
||||
.workers(1)
|
||||
.run();
|
||||
|
||||
let _ = tx.send((server.clone(), actix_rt::System::current()));
|
||||
server.await
|
||||
let _ = tx.send((srv.handle(), actix_rt::System::current()));
|
||||
srv.await
|
||||
})
|
||||
});
|
||||
|
||||
let (server, sys) = rx.recv().unwrap();
|
||||
let (srv, sys) = rx.recv().unwrap();
|
||||
|
||||
for _ in 0..5 {
|
||||
TcpStream::connect(addr1)
|
||||
@@ -432,12 +307,12 @@ async fn test_service_restart() {
|
||||
assert!(num_clone.load(Ordering::SeqCst) > 5);
|
||||
assert!(num2_clone.load(Ordering::SeqCst) > 5);
|
||||
|
||||
let _ = srv.stop(false);
|
||||
sys.stop();
|
||||
let _ = server.stop(false);
|
||||
let _ = h.join().unwrap();
|
||||
h.join().unwrap().unwrap();
|
||||
}
|
||||
|
||||
#[ignore]
|
||||
#[ignore] // non-deterministic on CI
|
||||
#[actix_rt::test]
|
||||
async fn worker_restart() {
|
||||
use actix_service::{Service, ServiceFactory};
|
||||
@@ -504,19 +379,19 @@ async fn worker_restart() {
|
||||
let h = thread::spawn(move || {
|
||||
let counter = counter.clone();
|
||||
actix_rt::System::new().block_on(async {
|
||||
let server = Server::build()
|
||||
let srv = Server::build()
|
||||
.disable_signals()
|
||||
.bind("addr", addr, move || TestServiceFactory(counter.clone()))
|
||||
.unwrap()
|
||||
.bind("addr", addr, move || TestServiceFactory(counter.clone()))?
|
||||
.workers(2)
|
||||
.run();
|
||||
|
||||
let _ = tx.send((server.clone(), actix_rt::System::current()));
|
||||
server.await
|
||||
let _ = tx.send((srv.handle(), actix_rt::System::current()));
|
||||
|
||||
srv.await
|
||||
})
|
||||
});
|
||||
|
||||
let (server, sys) = rx.recv().unwrap();
|
||||
let (srv, sys) = rx.recv().unwrap();
|
||||
|
||||
sleep(Duration::from_secs(3)).await;
|
||||
|
||||
@@ -573,7 +448,7 @@ async fn worker_restart() {
|
||||
assert_eq!("3", id);
|
||||
stream.shutdown().await.unwrap();
|
||||
|
||||
let _ = srv.stop(false);
|
||||
sys.stop();
|
||||
let _ = server.stop(false);
|
||||
let _ = h.join().unwrap();
|
||||
h.join().unwrap().unwrap();
|
||||
}
|
||||
|
@@ -3,6 +3,10 @@
|
||||
## Unreleased - 2021-xx-xx
|
||||
|
||||
|
||||
## 2.0.1 - 2021-10-11
|
||||
* Documentation fix.
|
||||
|
||||
|
||||
## 2.0.0 - 2021-04-16
|
||||
* Removed pipeline and related structs/functions. [#335]
|
||||
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "actix-service"
|
||||
version = "2.0.0"
|
||||
version = "2.0.1"
|
||||
authors = [
|
||||
"Nikolay Kim <fafhrd91@gmail.com>",
|
||||
"Rob Ede <robjtede@icloud.com>",
|
||||
@@ -8,7 +8,7 @@ authors = [
|
||||
]
|
||||
description = "Service trait and combinators for representing asynchronous request/response operations."
|
||||
keywords = ["network", "framework", "async", "futures", "service"]
|
||||
categories = ["network-programming", "asynchronous"]
|
||||
categories = ["network-programming", "asynchronous", "no-std"]
|
||||
repository = "https://github.com/actix/actix-net"
|
||||
license = "MIT OR Apache-2.0"
|
||||
edition = "2018"
|
||||
|
@@ -3,10 +3,10 @@
|
||||
> Service trait and combinators for representing asynchronous request/response operations.
|
||||
|
||||
[](https://crates.io/crates/actix-service)
|
||||
[](https://docs.rs/actix-service/2.0.0)
|
||||
[](https://docs.rs/actix-service/2.0.1)
|
||||
[](https://blog.rust-lang.org/2020/03/12/Rust-1.46.html)
|
||||

|
||||
[](https://deps.rs/crate/actix-service/2.0.0)
|
||||
[](https://deps.rs/crate/actix-service/2.0.1)
|
||||

|
||||
[](https://discord.gg/NWpN5mmg3x)
|
||||
|
||||
|
33
actix-service/examples/clone.rs
Normal file
33
actix-service/examples/clone.rs
Normal file
@@ -0,0 +1,33 @@
|
||||
use std::{future::Future, sync::mpsc, time::Duration};
|
||||
|
||||
async fn oracle<F, Fut>(f: F) -> (u32, u32)
|
||||
where
|
||||
F: FnOnce() -> Fut + Clone + Send + 'static,
|
||||
Fut: Future<Output = u32> + 'static,
|
||||
{
|
||||
let f1 = actix_rt::spawn(f.clone()());
|
||||
let f2 = actix_rt::spawn(f());
|
||||
|
||||
(f1.await.unwrap(), f2.await.unwrap())
|
||||
}
|
||||
|
||||
#[actix_rt::main]
|
||||
async fn main() {
|
||||
let (tx, rx) = mpsc::channel();
|
||||
|
||||
let (r1, r2) = oracle({
|
||||
let tx = tx.clone();
|
||||
|
||||
|| async move {
|
||||
tx.send(()).unwrap();
|
||||
4 * 4
|
||||
}
|
||||
})
|
||||
.await;
|
||||
assert_eq!(r1, r2);
|
||||
|
||||
tx.send(()).unwrap();
|
||||
|
||||
rx.recv_timeout(Duration::from_millis(100)).unwrap();
|
||||
rx.recv_timeout(Duration::from_millis(100)).unwrap();
|
||||
}
|
@@ -52,9 +52,9 @@ pub fn fn_factory<F, Cfg, Srv, Req, Fut, Err>(
|
||||
f: F,
|
||||
) -> FnServiceNoConfig<F, Cfg, Srv, Req, Fut, Err>
|
||||
where
|
||||
Srv: Service<Req>,
|
||||
F: Fn() -> Fut,
|
||||
Fut: Future<Output = Result<Srv, Err>>,
|
||||
Srv: Service<Req>,
|
||||
{
|
||||
FnServiceNoConfig::new(f)
|
||||
}
|
||||
|
@@ -77,7 +77,7 @@ use self::ready::{err, ok, ready, Ready};
|
||||
///
|
||||
/// fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { ... }
|
||||
///
|
||||
/// fn call(&self, req: Self::Request) -> Self::Future { ... }
|
||||
/// fn call(&self, req: u8) -> Self::Future { ... }
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
|
@@ -1,5 +1,9 @@
|
||||
/// An implementation of [`poll_ready`]() that always signals readiness.
|
||||
///
|
||||
/// This should only be used for basic leaf services that have no concept of un-readiness.
|
||||
/// For wrapper or other serivice types, use [`forward_ready!`] for simple cases or write a bespoke
|
||||
/// `poll_ready` implementation.
|
||||
///
|
||||
/// [`poll_ready`]: crate::Service::poll_ready
|
||||
///
|
||||
/// # Examples
|
||||
|
@@ -9,26 +9,25 @@ use pin_project_lite::pin_project;
|
||||
|
||||
use super::{Service, ServiceFactory};
|
||||
|
||||
/// Service for the `map_err` combinator, changing the type of a service's
|
||||
/// error.
|
||||
/// Service for the `map_err` combinator, changing the type of a service's error.
|
||||
///
|
||||
/// This is created by the `ServiceExt::map_err` method.
|
||||
pub struct MapErr<S, Req, F, E> {
|
||||
service: S,
|
||||
f: F,
|
||||
mapper: F,
|
||||
_t: PhantomData<(E, Req)>,
|
||||
}
|
||||
|
||||
impl<S, Req, F, E> MapErr<S, Req, F, E> {
|
||||
/// Create new `MapErr` combinator
|
||||
pub(crate) fn new(service: S, f: F) -> Self
|
||||
pub(crate) fn new(service: S, mapper: F) -> Self
|
||||
where
|
||||
S: Service<Req>,
|
||||
F: Fn(S::Error) -> E,
|
||||
{
|
||||
Self {
|
||||
service,
|
||||
f,
|
||||
mapper,
|
||||
_t: PhantomData,
|
||||
}
|
||||
}
|
||||
@@ -42,7 +41,7 @@ where
|
||||
fn clone(&self) -> Self {
|
||||
MapErr {
|
||||
service: self.service.clone(),
|
||||
f: self.f.clone(),
|
||||
mapper: self.mapper.clone(),
|
||||
_t: PhantomData,
|
||||
}
|
||||
}
|
||||
@@ -58,11 +57,11 @@ where
|
||||
type Future = MapErrFuture<A, Req, F, E>;
|
||||
|
||||
fn poll_ready(&self, ctx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
self.service.poll_ready(ctx).map_err(&self.f)
|
||||
self.service.poll_ready(ctx).map_err(&self.mapper)
|
||||
}
|
||||
|
||||
fn call(&self, req: Req) -> Self::Future {
|
||||
MapErrFuture::new(self.service.call(req), self.f.clone())
|
||||
MapErrFuture::new(self.service.call(req), self.mapper.clone())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -105,23 +104,23 @@ where
|
||||
/// service's error.
|
||||
///
|
||||
/// This is created by the `NewServiceExt::map_err` method.
|
||||
pub struct MapErrServiceFactory<A, Req, F, E>
|
||||
pub struct MapErrServiceFactory<SF, Req, F, E>
|
||||
where
|
||||
A: ServiceFactory<Req>,
|
||||
F: Fn(A::Error) -> E + Clone,
|
||||
SF: ServiceFactory<Req>,
|
||||
F: Fn(SF::Error) -> E + Clone,
|
||||
{
|
||||
a: A,
|
||||
a: SF,
|
||||
f: F,
|
||||
e: PhantomData<(E, Req)>,
|
||||
}
|
||||
|
||||
impl<A, Req, F, E> MapErrServiceFactory<A, Req, F, E>
|
||||
impl<SF, Req, F, E> MapErrServiceFactory<SF, Req, F, E>
|
||||
where
|
||||
A: ServiceFactory<Req>,
|
||||
F: Fn(A::Error) -> E + Clone,
|
||||
SF: ServiceFactory<Req>,
|
||||
F: Fn(SF::Error) -> E + Clone,
|
||||
{
|
||||
/// Create new `MapErr` new service instance
|
||||
pub(crate) fn new(a: A, f: F) -> Self {
|
||||
pub(crate) fn new(a: SF, f: F) -> Self {
|
||||
Self {
|
||||
a,
|
||||
f,
|
||||
@@ -130,10 +129,10 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<A, Req, F, E> Clone for MapErrServiceFactory<A, Req, F, E>
|
||||
impl<SF, Req, F, E> Clone for MapErrServiceFactory<SF, Req, F, E>
|
||||
where
|
||||
A: ServiceFactory<Req> + Clone,
|
||||
F: Fn(A::Error) -> E + Clone,
|
||||
SF: ServiceFactory<Req> + Clone,
|
||||
F: Fn(SF::Error) -> E + Clone,
|
||||
{
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
@@ -144,57 +143,57 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<A, Req, F, E> ServiceFactory<Req> for MapErrServiceFactory<A, Req, F, E>
|
||||
impl<SF, Req, F, E> ServiceFactory<Req> for MapErrServiceFactory<SF, Req, F, E>
|
||||
where
|
||||
A: ServiceFactory<Req>,
|
||||
F: Fn(A::Error) -> E + Clone,
|
||||
SF: ServiceFactory<Req>,
|
||||
F: Fn(SF::Error) -> E + Clone,
|
||||
{
|
||||
type Response = A::Response;
|
||||
type Response = SF::Response;
|
||||
type Error = E;
|
||||
|
||||
type Config = A::Config;
|
||||
type Service = MapErr<A::Service, Req, F, E>;
|
||||
type InitError = A::InitError;
|
||||
type Future = MapErrServiceFuture<A, Req, F, E>;
|
||||
type Config = SF::Config;
|
||||
type Service = MapErr<SF::Service, Req, F, E>;
|
||||
type InitError = SF::InitError;
|
||||
type Future = MapErrServiceFuture<SF, Req, F, E>;
|
||||
|
||||
fn new_service(&self, cfg: A::Config) -> Self::Future {
|
||||
fn new_service(&self, cfg: SF::Config) -> Self::Future {
|
||||
MapErrServiceFuture::new(self.a.new_service(cfg), self.f.clone())
|
||||
}
|
||||
}
|
||||
|
||||
pin_project! {
|
||||
pub struct MapErrServiceFuture<A, Req, F, E>
|
||||
pub struct MapErrServiceFuture<SF, Req, F, E>
|
||||
where
|
||||
A: ServiceFactory<Req>,
|
||||
F: Fn(A::Error) -> E,
|
||||
SF: ServiceFactory<Req>,
|
||||
F: Fn(SF::Error) -> E,
|
||||
{
|
||||
#[pin]
|
||||
fut: A::Future,
|
||||
f: F,
|
||||
fut: SF::Future,
|
||||
mapper: F,
|
||||
}
|
||||
}
|
||||
|
||||
impl<A, Req, F, E> MapErrServiceFuture<A, Req, F, E>
|
||||
impl<SF, Req, F, E> MapErrServiceFuture<SF, Req, F, E>
|
||||
where
|
||||
A: ServiceFactory<Req>,
|
||||
F: Fn(A::Error) -> E,
|
||||
SF: ServiceFactory<Req>,
|
||||
F: Fn(SF::Error) -> E,
|
||||
{
|
||||
fn new(fut: A::Future, f: F) -> Self {
|
||||
MapErrServiceFuture { fut, f }
|
||||
fn new(fut: SF::Future, mapper: F) -> Self {
|
||||
MapErrServiceFuture { fut, mapper }
|
||||
}
|
||||
}
|
||||
|
||||
impl<A, Req, F, E> Future for MapErrServiceFuture<A, Req, F, E>
|
||||
impl<SF, Req, F, E> Future for MapErrServiceFuture<SF, Req, F, E>
|
||||
where
|
||||
A: ServiceFactory<Req>,
|
||||
F: Fn(A::Error) -> E + Clone,
|
||||
SF: ServiceFactory<Req>,
|
||||
F: Fn(SF::Error) -> E + Clone,
|
||||
{
|
||||
type Output = Result<MapErr<A::Service, Req, F, E>, A::InitError>;
|
||||
type Output = Result<MapErr<SF::Service, Req, F, E>, SF::InitError>;
|
||||
|
||||
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
let this = self.project();
|
||||
if let Poll::Ready(svc) = this.fut.poll(cx)? {
|
||||
Poll::Ready(Ok(MapErr::new(svc, this.f.clone())))
|
||||
Poll::Ready(Ok(MapErr::new(svc, this.mapper.clone())))
|
||||
} else {
|
||||
Poll::Pending
|
||||
}
|
||||
|
@@ -41,7 +41,7 @@ where
|
||||
///
|
||||
/// actix_service::forward_ready!(service);
|
||||
///
|
||||
/// fn call(&self, req: S::Request) -> Self::Future {
|
||||
/// fn call(&self, req: Req) -> Self::Future {
|
||||
/// TimeoutServiceResponse {
|
||||
/// fut: self.service.call(req),
|
||||
/// sleep: Sleep::new(clock::now() + self.timeout),
|
||||
|
@@ -3,14 +3,29 @@
|
||||
## Unreleased - 2021-xx-xx
|
||||
|
||||
|
||||
## 3.0.0-beta.7 - 2021-10-20
|
||||
* Add `webpki_roots_cert_store()` to get rustls compatible webpki roots cert store. [#401]
|
||||
* Alias `connect::ssl` to `connect::tls`. [#401]
|
||||
|
||||
[#401]: https://github.com/actix/actix-net/pull/401
|
||||
|
||||
|
||||
## 3.0.0-beta.6 - 2021-10-19
|
||||
* Update `tokio-rustls` to `0.23` which uses `rustls` `0.20`. [#396]
|
||||
* Removed a re-export of `Session` from `rustls` as it no longer exist. [#396]
|
||||
* Minimum supported Rust version (MSRV) is now 1.52.
|
||||
|
||||
[#396]: https://github.com/actix/actix-net/pull/396
|
||||
|
||||
|
||||
## 3.0.0-beta.5 - 2021-03-29
|
||||
* Changed `connect::ssl::rustls::RustlsConnectorService` to return error when `DNSNameRef`
|
||||
* Changed `connect::ssl::rustls::RustlsConnectorService` to return error when `DNSNameRef`
|
||||
generation failed instead of panic. [#296]
|
||||
* Remove `connect::ssl::openssl::OpensslConnectServiceFactory`. [#297]
|
||||
* Remove `connect::ssl::openssl::OpensslConnectService`. [#297]
|
||||
* Add `connect::ssl::native_tls` module for native tls support. [#295]
|
||||
* Rename `accept::{nativetls => native_tls}`. [#295]
|
||||
* Remove `connect::TcpConnectService` type. service caller expect a `TcpStream` should use
|
||||
* Remove `connect::TcpConnectService` type. service caller expect a `TcpStream` should use
|
||||
`connect::ConnectService` instead and call `Connection<T, TcpStream>::into_parts`. [#299]
|
||||
|
||||
[#295]: https://github.com/actix/actix-net/pull/295
|
||||
|
@@ -1,12 +1,10 @@
|
||||
[package]
|
||||
name = "actix-tls"
|
||||
version = "3.0.0-beta.5"
|
||||
version = "3.0.0-beta.7"
|
||||
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
|
||||
description = "TLS acceptor and connector services for Actix ecosystem"
|
||||
keywords = ["network", "tls", "ssl", "async", "transport"]
|
||||
homepage = "https://actix.rs"
|
||||
repository = "https://github.com/actix/actix-net.git"
|
||||
documentation = "https://docs.rs/actix-tls"
|
||||
categories = ["network-programming", "asynchronous"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
edition = "2018"
|
||||
@@ -40,7 +38,7 @@ native-tls = ["tokio-native-tls"]
|
||||
uri = ["http"]
|
||||
|
||||
[dependencies]
|
||||
actix-codec = "0.4.0-beta.1"
|
||||
actix-codec = "0.4.0"
|
||||
actix-rt = { version = "2.2.0", default-features = false }
|
||||
actix-service = "2.0.0"
|
||||
actix-utils = "3.0.0"
|
||||
@@ -56,19 +54,20 @@ tls-openssl = { package = "openssl", version = "0.10.9", optional = true }
|
||||
tokio-openssl = { version = "0.6", optional = true }
|
||||
|
||||
# rustls
|
||||
tokio-rustls = { version = "0.22", optional = true }
|
||||
webpki-roots = { version = "0.21", optional = true }
|
||||
tokio-rustls = { version = "0.23", optional = true }
|
||||
webpki-roots = { version = "0.22", optional = true }
|
||||
|
||||
# native-tls
|
||||
tokio-native-tls = { version = "0.3", optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
actix-rt = "2.2.0"
|
||||
actix-server = "2.0.0-beta.5"
|
||||
actix-server = "2.0.0-beta.6"
|
||||
bytes = "1"
|
||||
env_logger = "0.8"
|
||||
env_logger = "0.9"
|
||||
futures-util = { version = "0.3.7", default-features = false, features = ["sink"] }
|
||||
log = "0.4"
|
||||
rustls-pemfile = "0.2.1"
|
||||
trust-dns-resolver = "0.20.0"
|
||||
|
||||
[[example]]
|
||||
|
@@ -35,25 +35,29 @@ use actix_service::ServiceFactoryExt as _;
|
||||
use actix_tls::accept::rustls::{Acceptor as RustlsAcceptor, TlsStream};
|
||||
use futures_util::future::ok;
|
||||
use log::info;
|
||||
use rustls::{
|
||||
internal::pemfile::certs, internal::pemfile::rsa_private_keys, NoClientAuth, ServerConfig,
|
||||
};
|
||||
use rustls::{server::ServerConfig, Certificate, PrivateKey};
|
||||
use rustls_pemfile::{certs, rsa_private_keys};
|
||||
|
||||
#[actix_rt::main]
|
||||
async fn main() -> io::Result<()> {
|
||||
env::set_var("RUST_LOG", "info");
|
||||
env_logger::init();
|
||||
|
||||
let mut tls_config = ServerConfig::new(NoClientAuth::new());
|
||||
|
||||
// Load TLS key and cert files
|
||||
let cert_file = &mut BufReader::new(File::open("./examples/cert.pem").unwrap());
|
||||
let key_file = &mut BufReader::new(File::open("./examples/key.pem").unwrap());
|
||||
|
||||
let cert_chain = certs(cert_file).unwrap();
|
||||
let cert_chain = certs(cert_file)
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
.map(Certificate)
|
||||
.collect();
|
||||
let mut keys = rsa_private_keys(key_file).unwrap();
|
||||
tls_config
|
||||
.set_single_cert(cert_chain, keys.remove(0))
|
||||
|
||||
let tls_config = ServerConfig::builder()
|
||||
.with_safe_defaults()
|
||||
.with_no_client_auth()
|
||||
.with_single_cert(cert_chain, PrivateKey(keys.remove(0)))
|
||||
.unwrap();
|
||||
|
||||
let tls_acceptor = RustlsAcceptor::new(tls_config);
|
||||
|
@@ -14,7 +14,7 @@ use actix_utils::counter::{Counter, CounterGuard};
|
||||
use futures_core::future::LocalBoxFuture;
|
||||
use tokio_rustls::{Accept, TlsAcceptor};
|
||||
|
||||
pub use tokio_rustls::rustls::{ServerConfig, Session};
|
||||
pub use tokio_rustls::rustls::ServerConfig;
|
||||
|
||||
use super::MAX_CONN_COUNTER;
|
||||
|
||||
|
@@ -19,7 +19,7 @@ pub trait Address: Unpin + 'static {
|
||||
|
||||
impl Address for String {
|
||||
fn hostname(&self) -> &str {
|
||||
&self
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -21,7 +21,9 @@ mod connector;
|
||||
mod error;
|
||||
mod resolve;
|
||||
mod service;
|
||||
pub mod ssl;
|
||||
pub mod tls;
|
||||
#[doc(hidden)]
|
||||
pub use tls as ssl;
|
||||
#[cfg(feature = "uri")]
|
||||
mod uri;
|
||||
|
||||
|
@@ -164,7 +164,7 @@ impl<T: Address> Service<Connect<T>> for Resolver {
|
||||
}
|
||||
|
||||
Self::Custom(resolver) => {
|
||||
let resolver = Rc::clone(&resolver);
|
||||
let resolver = Rc::clone(resolver);
|
||||
ResolverFuture::LookupCustom(Box::pin(async move {
|
||||
let addrs = resolver
|
||||
.lookup(req.hostname(), req.port())
|
||||
|
@@ -1,4 +1,4 @@
|
||||
//! SSL Services
|
||||
//! TLS Services
|
||||
|
||||
#[cfg(feature = "openssl")]
|
||||
pub mod openssl;
|
@@ -1,4 +1,5 @@
|
||||
use std::{
|
||||
convert::TryFrom,
|
||||
future::Future,
|
||||
io,
|
||||
pin::Pin,
|
||||
@@ -6,7 +7,6 @@ use std::{
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
pub use tokio_rustls::rustls::Session;
|
||||
pub use tokio_rustls::{client::TlsStream, rustls::ClientConfig};
|
||||
pub use webpki_roots::TLS_SERVER_ROOTS;
|
||||
|
||||
@@ -14,11 +14,26 @@ use actix_rt::net::ActixStream;
|
||||
use actix_service::{Service, ServiceFactory};
|
||||
use futures_core::{future::LocalBoxFuture, ready};
|
||||
use log::trace;
|
||||
use tokio_rustls::webpki::DNSNameRef;
|
||||
use tokio_rustls::rustls::{client::ServerName, OwnedTrustAnchor, RootCertStore};
|
||||
use tokio_rustls::{Connect, TlsConnector};
|
||||
|
||||
use crate::connect::{Address, Connection};
|
||||
|
||||
/// Returns standard root certificates from `webpki-roots` crate as a rustls certificate store.
|
||||
pub fn webpki_roots_cert_store() -> RootCertStore {
|
||||
let mut root_certs = RootCertStore::empty();
|
||||
for cert in TLS_SERVER_ROOTS.0 {
|
||||
let cert = OwnedTrustAnchor::from_subject_spki_name_constraints(
|
||||
cert.subject,
|
||||
cert.spki,
|
||||
cert.name_constraints,
|
||||
);
|
||||
let certs = vec![cert].into_iter();
|
||||
root_certs.add_server_trust_anchors(certs);
|
||||
}
|
||||
root_certs
|
||||
}
|
||||
|
||||
/// Rustls connector factory
|
||||
pub struct RustlsConnector {
|
||||
connector: Arc<ClientConfig>,
|
||||
@@ -89,7 +104,7 @@ where
|
||||
trace!("SSL Handshake start for: {:?}", connection.host());
|
||||
let (stream, connection) = connection.replace_io(());
|
||||
|
||||
match DNSNameRef::try_from_ascii_str(connection.host()) {
|
||||
match ServerName::try_from(connection.host()) {
|
||||
Ok(host) => RustlsConnectorServiceFuture::Future {
|
||||
connect: TlsConnector::from(self.connector.clone()).connect(host, stream),
|
||||
connection: Some(connection),
|
@@ -5,6 +5,7 @@
|
||||
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
|
||||
|
||||
#[cfg(feature = "openssl")]
|
||||
#[allow(unused_extern_crates)]
|
||||
extern crate tls_openssl as openssl;
|
||||
|
||||
#[cfg(feature = "accept")]
|
||||
|
@@ -24,4 +24,5 @@ serde = { version = "1.0", optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
serde_json = "1.0"
|
||||
ahash = { version = "0.7", default-features = false }
|
||||
# TODO: remove when ahash MSRV is restored
|
||||
ahash = { version = "=0.7.4", default-features = false }
|
||||
|
1
clippy.toml
Normal file
1
clippy.toml
Normal file
@@ -0,0 +1 @@
|
||||
msrv = "1.48"
|
@@ -18,4 +18,4 @@ futures-util = { version = "0.3.7", default-features = false }
|
||||
local-waker = "0.1"
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { version = "1", features = ["rt", "macros"] }
|
||||
tokio = { version = "1.5.1", features = ["rt", "macros"] }
|
||||
|
Reference in New Issue
Block a user