mirror of
https://github.com/fafhrd91/actix-net
synced 2025-08-14 03:08:24 +02:00
Compare commits
132 Commits
rt-v2.0.1
...
tls-v3.0.0
Author | SHA1 | Date | |
---|---|---|---|
|
303666278a | ||
|
669e868370 | ||
|
47f278b17a | ||
|
ca77d8d835 | ||
|
00775884f8 | ||
|
4ff8a2cf68 | ||
|
5c555a9408 | ||
|
ca435b2575 | ||
|
9fa8d7fc5a | ||
|
b03fe7c5b6 | ||
|
6fed1c3e7d | ||
|
c3d697df97 | ||
|
80a362712f | ||
|
2b1edb95ea | ||
|
4644fa41cf | ||
|
98c37fe47d | ||
|
b9455d2ca9 | ||
|
0183b0f8cc | ||
|
b122a1ae1a | ||
|
4303058243 | ||
|
48b2e11509 | ||
|
5379a46a99 | ||
|
f8f1ac94bc | ||
|
82cd5b8290 | ||
|
c65e8524b2 | ||
|
a83dfaa162 | ||
|
e4ec956001 | ||
|
95cba659ff | ||
|
5687e81d9f | ||
|
a0fe2a9b2e | ||
|
ad22a93466 | ||
|
c2d5b2398a | ||
|
5b1ff30dd9 | ||
|
e1317bb3a0 | ||
|
dcea009158 | ||
|
13c18b8a51 | ||
|
06b17d6a43 | ||
|
605ec25143 | ||
|
3824493fd3 | ||
|
3be3e11aa5 | ||
|
6a5ea0342b | ||
|
23b1f63345 | ||
|
3aa037d07d | ||
|
cf21df14f2 | ||
|
a1bf8662c9 | ||
|
6f4d2220fa | ||
|
54b22f9fce | ||
|
983abec77d | ||
|
e4d4ae21ee | ||
|
8ad5f58d38 | ||
|
613b2be51f | ||
|
b2e9640952 | ||
|
76338a5822 | ||
|
978e4f25fb | ||
|
1c4e965366 | ||
|
2435520e67 | ||
|
19468feef8 | ||
|
bd48908792 | ||
|
20c2da17ed | ||
|
fdafc1dd65 | ||
|
7749dfe46a | ||
|
aeb81ad3fd | ||
|
47fba25d67 | ||
|
7a82288066 | ||
|
4e6d88d143 | ||
|
ef206f40fb | ||
|
8e98d9168c | ||
|
3c1f57706a | ||
|
d49ecf7203 | ||
|
e0fb67f646 | ||
|
ddce2d6d12 | ||
|
0a11cf5cba | ||
|
859f45868d | ||
|
d4829b046d | ||
|
5961eb892e | ||
|
995efcf427 | ||
|
f1573931dd | ||
|
3859e91799 | ||
|
8aade720ed | ||
|
8079c50ddb | ||
|
05689b86d9 | ||
|
fd3e5fba02 | ||
|
39d1f282f7 | ||
|
d8889c63ef | ||
|
fdac52aa11 | ||
|
6d66cfb06a | ||
|
fb27ffc525 | ||
|
b068ea16f8 | ||
|
4eebdf4070 | ||
|
b09e7cd417 | ||
|
2c5c9167a5 | ||
|
ee3a548a85 | ||
|
f21eaa954f | ||
|
8becb0db70 | ||
|
26a5af70cb | ||
|
0ee8d032b6 | ||
|
3cf1c548fd | ||
|
4544562e1b | ||
|
bb27bac216 | ||
|
f9262dbec0 | ||
|
12d3942b98 | ||
|
a3c9ebc7fa | ||
|
b7bfff2b32 | ||
|
0c73f13c8b | ||
|
945479e0c3 | ||
|
746cc2ab89 | ||
|
91ea8c5dad | ||
|
0a705b1023 | ||
|
9e2bcec226 | ||
|
382830a37e | ||
|
493a1a32c0 | ||
|
50a195e9ce | ||
|
06ddad0051 | ||
|
789e6a8a46 | ||
|
6e590fd042 | ||
|
fa8ded3a34 | ||
|
841c611233 | ||
|
81a2b6a425 | ||
|
a6e79453d0 | ||
|
17f711a9d6 | ||
|
c3be839a69 | ||
|
8d74cf387d | ||
|
7e483cc356 | ||
|
75d7ae3139 | ||
|
2cfe1d88ad | ||
|
cb07ead392 | ||
|
32543809f9 | ||
|
eb4d29e15e | ||
|
7ee42b50b4 | ||
|
0da848e4ae | ||
|
5f80d85010 | ||
|
16ba77c4c8 |
25
.cargo/config.toml
Normal file
25
.cargo/config.toml
Normal file
@@ -0,0 +1,25 @@
|
||||
[alias]
|
||||
lint = "clippy --workspace --all-features --tests --examples --bins -- -Dclippy::todo"
|
||||
|
||||
ci-doctest = "test --workspace --all-features --doc --no-fail-fast -- --nocapture"
|
||||
|
||||
# just check the library (without dev deps)
|
||||
ci-check-min = "hack --workspace check --no-default-features"
|
||||
ci-check-lib = "hack --workspace --feature-powerset --exclude-features=io-uring check"
|
||||
ci-check-lib-linux = "hack --workspace --feature-powerset check"
|
||||
|
||||
# check everything
|
||||
ci-check = "hack --workspace --feature-powerset --exclude-features=io-uring check --tests --examples"
|
||||
ci-check-linux = "hack --workspace --feature-powerset check --tests --examples"
|
||||
|
||||
# tests avoiding io-uring feature
|
||||
ci-test = "hack test --workspace --exclude=actix-rt --exclude=actix-server --all-features --lib --tests --no-fail-fast -- --nocapture"
|
||||
ci-test-rt = " hack --feature-powerset --exclude-features=io-uring test --package=actix-rt --lib --tests --no-fail-fast -- --nocapture"
|
||||
ci-test-server = "hack --feature-powerset --exclude-features=io-uring test --package=actix-server --lib --tests --no-fail-fast -- --nocapture"
|
||||
|
||||
# test with io-uring feature
|
||||
ci-test-rt-linux = " hack --feature-powerset test --package=actix-rt --lib --tests --no-fail-fast -- --nocapture"
|
||||
ci-test-server-linux = "hack --feature-powerset test --package=actix-server --lib --tests --no-fail-fast -- --nocapture"
|
||||
|
||||
# test lower msrv
|
||||
ci-test-lower-msrv = "hack --workspace --exclude=actix-server --exclude=actix-tls --feature-powerset test --lib --tests --no-fail-fast -- --nocapture"
|
209
.github/workflows/ci.yml
vendored
Normal file
209
.github/workflows/ci.yml
vendored
Normal file
@@ -0,0 +1,209 @@
|
||||
name: CI
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
push:
|
||||
branches: [master]
|
||||
|
||||
jobs:
|
||||
build_and_test:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target:
|
||||
- { name: Linux, os: ubuntu-latest, triple: x86_64-unknown-linux-gnu }
|
||||
- { name: macOS, os: macos-latest, triple: x86_64-apple-darwin }
|
||||
- { name: Windows, os: windows-latest, triple: x86_64-pc-windows-msvc }
|
||||
- { name: Windows (MinGW), os: windows-latest, triple: x86_64-pc-windows-gnu }
|
||||
- { name: Windows (32-bit), os: windows-latest, triple: i686-pc-windows-msvc }
|
||||
version:
|
||||
- 1.52.0 # MSRV for -server and -tls
|
||||
- stable
|
||||
- nightly
|
||||
|
||||
name: ${{ matrix.target.name }} / ${{ matrix.version }}
|
||||
runs-on: ${{ matrix.target.os }}
|
||||
|
||||
env:
|
||||
VCPKGRS_DYNAMIC: 1
|
||||
|
||||
steps:
|
||||
- name: Setup Routing
|
||||
if: matrix.target.os == 'macos-latest'
|
||||
run: sudo ifconfig lo0 alias 127.0.0.3
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
# install OpenSSL on Windows
|
||||
- name: Set vcpkg root
|
||||
if: matrix.target.triple == 'x86_64-pc-windows-msvc' || matrix.target.triple == 'i686-pc-windows-msvc'
|
||||
run: echo "VCPKG_ROOT=$env:VCPKG_INSTALLATION_ROOT" | Out-File -FilePath $env:GITHUB_ENV -Append
|
||||
- name: Install OpenSSL
|
||||
if: matrix.target.triple == 'x86_64-pc-windows-msvc'
|
||||
run: vcpkg install openssl:x64-windows
|
||||
- name: Install OpenSSL
|
||||
if: matrix.target.triple == 'i686-pc-windows-msvc'
|
||||
run: vcpkg install openssl:x86-windows
|
||||
|
||||
- name: Install ${{ matrix.version }}
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: ${{ matrix.version }}-${{ matrix.target.triple }}
|
||||
profile: minimal
|
||||
override: true
|
||||
|
||||
# - name: Install MSYS2
|
||||
# if: matrix.target.triple == 'x86_64-pc-windows-gnu'
|
||||
# uses: msys2/setup-msys2@v2
|
||||
# - name: Install MinGW Packages
|
||||
# if: matrix.target.triple == 'x86_64-pc-windows-gnu'
|
||||
# run: |
|
||||
# msys2 -c 'pacman -Sy --noconfirm pacman'
|
||||
# msys2 -c 'pacman --noconfirm -S base-devel pkg-config'
|
||||
|
||||
# - name: Generate Cargo.lock
|
||||
# uses: actions-rs/cargo@v1
|
||||
# with: { command: generate-lockfile }
|
||||
# - name: Cache Dependencies
|
||||
# uses: Swatinem/rust-cache@v1.2.0
|
||||
|
||||
- name: Install cargo-hack
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: install
|
||||
args: cargo-hack
|
||||
|
||||
- name: check lib
|
||||
if: >
|
||||
matrix.target.os != 'ubuntu-latest'
|
||||
&& matrix.target.triple != 'x86_64-pc-windows-gnu'
|
||||
uses: actions-rs/cargo@v1
|
||||
with: { command: ci-check-lib }
|
||||
- name: check lib
|
||||
if: matrix.target.os == 'ubuntu-latest'
|
||||
uses: actions-rs/cargo@v1
|
||||
with: { command: ci-check-lib-linux }
|
||||
- name: check lib
|
||||
if: matrix.target.triple == 'x86_64-pc-windows-gnu'
|
||||
uses: actions-rs/cargo@v1
|
||||
with: { command: ci-check-min }
|
||||
|
||||
- name: check full
|
||||
# TODO: compile OpenSSL and run tests on MinGW
|
||||
if: >
|
||||
matrix.target.os != 'ubuntu-latest'
|
||||
&& matrix.target.triple != 'x86_64-pc-windows-gnu'
|
||||
uses: actions-rs/cargo@v1
|
||||
with: { command: ci-check }
|
||||
- name: check all
|
||||
if: matrix.target.os == 'ubuntu-latest'
|
||||
uses: actions-rs/cargo@v1
|
||||
with: { command: ci-check-linux }
|
||||
|
||||
- name: tests
|
||||
if: >
|
||||
matrix.target.os != 'ubuntu-latest'
|
||||
&& matrix.target.triple != 'x86_64-pc-windows-gnu'
|
||||
run: |
|
||||
cargo ci-test
|
||||
cargo ci-test-rt
|
||||
cargo ci-test-server
|
||||
- name: tests
|
||||
if: matrix.target.os == 'ubuntu-latest'
|
||||
run: |
|
||||
cargo ci-test
|
||||
cargo ci-test-rt-linux
|
||||
cargo ci-test-server-linux
|
||||
|
||||
- name: Clear the cargo caches
|
||||
run: |
|
||||
cargo install cargo-cache --version 0.6.2 --no-default-features --features ci-autoclean
|
||||
cargo-cache
|
||||
|
||||
build_and_test_lower_msrv:
|
||||
name: Linux / 1.46 (lower MSRV)
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install 1.46.0 # MSRV for all but -server and -tls
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: 1.46.0-x86_64-unknown-linux-gnu
|
||||
profile: minimal
|
||||
override: true
|
||||
|
||||
- name: Install cargo-hack
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: install
|
||||
args: cargo-hack
|
||||
|
||||
- name: tests
|
||||
run: cargo ci-test-lower-msrv
|
||||
|
||||
- name: Clear the cargo caches
|
||||
run: |
|
||||
cargo install cargo-cache --version 0.6.2 --no-default-features --features ci-autoclean
|
||||
cargo-cache
|
||||
|
||||
coverage:
|
||||
name: coverage
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install Rust (nightly)
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable-x86_64-unknown-linux-gnu
|
||||
profile: minimal
|
||||
override: true
|
||||
|
||||
- name: Generate Cargo.lock
|
||||
uses: actions-rs/cargo@v1
|
||||
with: { command: generate-lockfile }
|
||||
- name: Cache Dependencies
|
||||
uses: Swatinem/rust-cache@v1.3.0
|
||||
|
||||
- name: Generate coverage file
|
||||
if: github.ref == 'refs/heads/master'
|
||||
run: |
|
||||
cargo install cargo-tarpaulin
|
||||
cargo tarpaulin --out Xml --verbose
|
||||
- name: Upload to Codecov
|
||||
if: github.ref == 'refs/heads/master'
|
||||
uses: codecov/codecov-action@v1
|
||||
with: { file: cobertura.xml }
|
||||
|
||||
rustdoc:
|
||||
name: rustdoc
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install Rust (nightly)
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-x86_64-unknown-linux-gnu
|
||||
profile: minimal
|
||||
override: true
|
||||
|
||||
- name: Generate Cargo.lock
|
||||
uses: actions-rs/cargo@v1
|
||||
with: { command: generate-lockfile }
|
||||
- name: Cache Dependencies
|
||||
uses: Swatinem/rust-cache@v1.3.0
|
||||
|
||||
- name: Install cargo-hack
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: install
|
||||
args: cargo-hack
|
||||
|
||||
- name: doc tests
|
||||
uses: actions-rs/cargo@v1
|
||||
timeout-minutes: 40
|
||||
with: { command: ci-doctest }
|
28
.github/workflows/clippy-fmt.yml
vendored
28
.github/workflows/clippy-fmt.yml
vendored
@@ -1,34 +1,42 @@
|
||||
name: Lint
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
|
||||
name: Clippy and rustfmt Check
|
||||
jobs:
|
||||
clippy_check:
|
||||
fmt:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- uses: actions-rs/toolchain@v1
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
components: rustfmt
|
||||
profile: minimal
|
||||
components: rustfmt
|
||||
override: true
|
||||
- name: Check with rustfmt
|
||||
- name: Rustfmt Check
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: fmt
|
||||
args: --all -- --check
|
||||
|
||||
- uses: actions-rs/toolchain@v1
|
||||
clippy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly
|
||||
components: clippy
|
||||
toolchain: stable
|
||||
profile: minimal
|
||||
components: clippy
|
||||
override: true
|
||||
- name: Check with Clippy
|
||||
- name: Clippy Check
|
||||
uses: actions-rs/clippy-check@v1
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
args: --workspace --tests
|
||||
args: --workspace --all-features --tests --examples --bins -- -Dclippy::todo
|
||||
|
82
.github/workflows/linux.yml
vendored
82
.github/workflows/linux.yml
vendored
@@ -1,82 +0,0 @@
|
||||
name: CI (Linux)
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- '1.0'
|
||||
|
||||
jobs:
|
||||
build_and_test:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
version:
|
||||
- 1.46.0
|
||||
- stable
|
||||
- nightly
|
||||
|
||||
name: ${{ matrix.version }} - x86_64-unknown-linux-gnu
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install ${{ matrix.version }}
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: ${{ matrix.version }}-x86_64-unknown-linux-gnu
|
||||
profile: minimal
|
||||
override: true
|
||||
|
||||
- name: Generate Cargo.lock
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: generate-lockfile
|
||||
- name: Cache cargo dirs
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path:
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
~/.cargo/bin
|
||||
key: ${{ matrix.version }}-x86_64-unknown-linux-gnu-cargo-trimmed-${{ hashFiles('**/Cargo.lock') }}
|
||||
- name: Cache cargo build
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: target
|
||||
key: ${{ matrix.version }}-x86_64-unknown-linux-gnu-cargo-build-trimmed-${{ hashFiles('**/Cargo.lock') }}
|
||||
|
||||
- name: check build
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: check
|
||||
args: --workspace --bins --examples --tests
|
||||
|
||||
- name: tests
|
||||
uses: actions-rs/cargo@v1
|
||||
timeout-minutes: 40
|
||||
with:
|
||||
command: test
|
||||
args: --workspace --exclude=actix-tls --no-fail-fast -- --nocapture
|
||||
|
||||
- name: Generate coverage file
|
||||
if: matrix.version == 'stable' && (github.ref == 'refs/heads/master' || github.event_name == 'pull_request')
|
||||
run: |
|
||||
cargo install cargo-tarpaulin
|
||||
cargo tarpaulin --out Xml --workspace
|
||||
|
||||
- name: Upload to Codecov
|
||||
if: matrix.version == 'stable' && (github.ref == 'refs/heads/master' || github.event_name == 'pull_request')
|
||||
uses: codecov/codecov-action@v1
|
||||
with:
|
||||
file: cobertura.xml
|
||||
|
||||
- name: Clear the cargo caches
|
||||
run: |
|
||||
rustup update stable
|
||||
rustup override set stable
|
||||
cargo install cargo-cache --no-default-features --features ci-autoclean
|
||||
cargo-cache
|
43
.github/workflows/macos.yml
vendored
43
.github/workflows/macos.yml
vendored
@@ -1,43 +0,0 @@
|
||||
name: CI (macOS)
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- '1.0'
|
||||
|
||||
jobs:
|
||||
build_and_test:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
version:
|
||||
- stable
|
||||
- nightly
|
||||
|
||||
name: ${{ matrix.version }} - x86_64-apple-darwin
|
||||
runs-on: macos-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install ${{ matrix.version }}
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: ${{ matrix.version }}-x86_64-apple-darwin
|
||||
profile: minimal
|
||||
override: true
|
||||
|
||||
- name: check build
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: check
|
||||
args: --workspace --bins --examples --tests
|
||||
|
||||
- name: tests
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
args: --workspace --exclude=actix-tls --no-fail-fast -- --nocapture
|
35
.github/workflows/upload-doc.yml
vendored
Normal file
35
.github/workflows/upload-doc.yml
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
name: Upload documentation
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-x86_64-unknown-linux-gnu
|
||||
profile: minimal
|
||||
override: true
|
||||
|
||||
- name: Build Docs
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: doc
|
||||
args: --workspace --all-features --no-deps
|
||||
|
||||
- name: Tweak HTML
|
||||
run: echo '<meta http-equiv="refresh" content="0;url=actix_server/index.html">' > target/doc/index.html
|
||||
|
||||
- name: Deploy to GitHub Pages
|
||||
uses: JamesIves/github-pages-deploy-action@3.7.1
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
BRANCH: gh-pages
|
||||
FOLDER: target/doc
|
45
.github/workflows/windows-mingw.yml
vendored
45
.github/workflows/windows-mingw.yml
vendored
@@ -1,45 +0,0 @@
|
||||
name: CI (Windows-mingw)
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- '1.0'
|
||||
|
||||
jobs:
|
||||
build_and_test:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
version:
|
||||
- stable
|
||||
- nightly
|
||||
|
||||
name: ${{ matrix.version }} - x86_64-pc-windows-gnu
|
||||
runs-on: windows-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install ${{ matrix.version }}
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: ${{ matrix.version }}-x86_64-pc-windows-gnu
|
||||
profile: minimal
|
||||
override: true
|
||||
|
||||
- name: Install MSYS2
|
||||
uses: msys2/setup-msys2@v2
|
||||
|
||||
- name: Install packages
|
||||
run: |
|
||||
msys2 -c 'pacman -Sy --noconfirm pacman'
|
||||
msys2 -c 'pacman --noconfirm -S base-devel pkg-config'
|
||||
|
||||
- name: check build
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: check
|
||||
args: --workspace --bins --examples --tests
|
69
.github/workflows/windows.yml
vendored
69
.github/workflows/windows.yml
vendored
@@ -1,69 +0,0 @@
|
||||
name: CI (Windows)
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- '1.0'
|
||||
|
||||
env:
|
||||
VCPKGRS_DYNAMIC: 1
|
||||
|
||||
jobs:
|
||||
build_and_test:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
version:
|
||||
- stable
|
||||
- nightly
|
||||
target:
|
||||
- x86_64-pc-windows-msvc
|
||||
- i686-pc-windows-msvc
|
||||
|
||||
name: ${{ matrix.version }} - ${{ matrix.target }}
|
||||
runs-on: windows-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install ${{ matrix.version }}
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: ${{ matrix.version }}-${{ matrix.target }}
|
||||
profile: minimal
|
||||
override: true
|
||||
|
||||
- name: Install OpenSSL (x64)
|
||||
if: matrix.target == 'x86_64-pc-windows-msvc'
|
||||
run: |
|
||||
vcpkg integrate install
|
||||
vcpkg install openssl:x64-windows
|
||||
Get-ChildItem C:\vcpkg\installed\x64-windows\bin
|
||||
Get-ChildItem C:\vcpkg\installed\x64-windows\lib
|
||||
Copy-Item C:\vcpkg\installed\x64-windows\bin\libcrypto-1_1-x64.dll C:\vcpkg\installed\x64-windows\bin\libcrypto.dll
|
||||
Copy-Item C:\vcpkg\installed\x64-windows\bin\libssl-1_1-x64.dll C:\vcpkg\installed\x64-windows\bin\libssl.dll
|
||||
|
||||
- name: Install OpenSSL (x86)
|
||||
if: matrix.target == 'i686-pc-windows-msvc'
|
||||
run: |
|
||||
vcpkg integrate install
|
||||
vcpkg install openssl:x86-windows
|
||||
Get-ChildItem C:\vcpkg\installed\x86-windows\bin
|
||||
Get-ChildItem C:\vcpkg\installed\x86-windows\lib
|
||||
Copy-Item C:\vcpkg\installed\x86-windows\bin\libcrypto-1_1.dll C:\vcpkg\installed\x86-windows\bin\libcrypto.dll
|
||||
Copy-Item C:\vcpkg\installed\x86-windows\bin\libssl-1_1.dll C:\vcpkg\installed\x86-windows\bin\libssl.dll
|
||||
|
||||
- name: check build
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: check
|
||||
args: --workspace --bins --examples --tests
|
||||
|
||||
- name: tests
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
args: --workspace --exclude=actix-tls --no-fail-fast -- --nocapture
|
11
Cargo.toml
11
Cargo.toml
@@ -2,7 +2,6 @@
|
||||
members = [
|
||||
"actix-codec",
|
||||
"actix-macros",
|
||||
"actix-router",
|
||||
"actix-rt",
|
||||
"actix-server",
|
||||
"actix-service",
|
||||
@@ -10,12 +9,13 @@ members = [
|
||||
"actix-tracing",
|
||||
"actix-utils",
|
||||
"bytestring",
|
||||
"local-channel",
|
||||
"local-waker",
|
||||
]
|
||||
|
||||
[patch.crates-io]
|
||||
actix-codec = { path = "actix-codec" }
|
||||
actix-macros = { path = "actix-macros" }
|
||||
actix-router = { path = "actix-router" }
|
||||
actix-rt = { path = "actix-rt" }
|
||||
actix-server = { path = "actix-server" }
|
||||
actix-service = { path = "actix-service" }
|
||||
@@ -23,3 +23,10 @@ actix-tls = { path = "actix-tls" }
|
||||
actix-tracing = { path = "actix-tracing" }
|
||||
actix-utils = { path = "actix-utils" }
|
||||
bytestring = { path = "bytestring" }
|
||||
local-channel = { path = "local-channel" }
|
||||
local-waker = { path = "local-waker" }
|
||||
|
||||
[profile.release]
|
||||
lto = true
|
||||
opt-level = 3
|
||||
codegen-units = 1
|
||||
|
@@ -186,7 +186,7 @@
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2017-NOW Nikolay Kim
|
||||
Copyright 2017-NOW Actix Team
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
@@ -1,4 +1,4 @@
|
||||
Copyright (c) 2017 Nikolay Kim
|
||||
Copyright (c) 2017-NOW Actix Team
|
||||
|
||||
Permission is hereby granted, free of charge, to any
|
||||
person obtaining a copy of this software and associated
|
||||
|
@@ -3,6 +3,10 @@
|
||||
## Unreleased - 2021-xx-xx
|
||||
|
||||
|
||||
## 0.4.0 - 2021-04-20
|
||||
* No significant changes since v0.4.0-beta.1.
|
||||
|
||||
|
||||
## 0.4.0-beta.1 - 2020-12-28
|
||||
* Replace `pin-project` with `pin-project-lite`. [#237]
|
||||
* Upgrade `tokio` dependency to `1`. [#237]
|
||||
@@ -23,28 +27,28 @@
|
||||
## 0.3.0-beta.1 - 2020-08-19
|
||||
* Use `.advance()` instead of `.split_to()`.
|
||||
* Upgrade `tokio-util` to `0.3`.
|
||||
* Improve `BytesCodec` `.encode()` performance
|
||||
* Simplify `BytesCodec` `.decode()`
|
||||
* Improve `BytesCodec::encode()` performance.
|
||||
* Simplify `BytesCodec::decode()`.
|
||||
* Rename methods on `Framed` to better describe their use.
|
||||
* Add method on `Framed` to get a pinned reference to the underlying I/O.
|
||||
* Add method on `Framed` check emptiness of read buffer.
|
||||
|
||||
|
||||
## 0.2.0 - 2019-12-10
|
||||
* Use specific futures dependencies
|
||||
* Use specific futures dependencies.
|
||||
|
||||
|
||||
## 0.2.0-alpha.4
|
||||
* Fix buffer remaining capacity calculation
|
||||
* Fix buffer remaining capacity calculation.
|
||||
|
||||
|
||||
## 0.2.0-alpha.3
|
||||
* Use tokio 0.2
|
||||
* Fix low/high watermark for write/read buffers
|
||||
* Use tokio 0.2.
|
||||
* Fix low/high watermark for write/read buffers.
|
||||
|
||||
|
||||
## 0.2.0-alpha.2
|
||||
* Migrated to `std::future`
|
||||
* Migrated to `std::future`.
|
||||
|
||||
|
||||
## 0.1.2 - 2019-03-27
|
||||
@@ -56,4 +60,4 @@
|
||||
|
||||
|
||||
## 0.1.0 - 2018-12-09
|
||||
* Move codec to separate crate
|
||||
* Move codec to separate crate.
|
||||
|
@@ -1,12 +1,10 @@
|
||||
[package]
|
||||
name = "actix-codec"
|
||||
version = "0.4.0-beta.1"
|
||||
version = "0.4.0"
|
||||
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
|
||||
description = "Codec utilities for working with framed protocols"
|
||||
keywords = ["network", "framework", "async", "futures"]
|
||||
homepage = "https://actix.rs"
|
||||
repository = "https://github.com/actix/actix-net.git"
|
||||
documentation = "https://docs.rs/actix-codec"
|
||||
repository = "https://github.com/actix/actix-net"
|
||||
categories = ["network-programming", "asynchronous"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
edition = "2018"
|
||||
@@ -22,5 +20,5 @@ futures-core = { version = "0.3.7", default-features = false }
|
||||
futures-sink = { version = "0.3.7", default-features = false }
|
||||
log = "0.4"
|
||||
pin-project-lite = "0.2"
|
||||
tokio = "1"
|
||||
tokio = "1.5.1"
|
||||
tokio-util = { version = "0.6", features = ["codec", "io"] }
|
||||
|
@@ -21,14 +21,13 @@ bitflags::bitflags! {
|
||||
}
|
||||
|
||||
pin_project_lite::pin_project! {
|
||||
/// A unified `Stream` and `Sink` interface to an underlying I/O object, using
|
||||
/// the `Encoder` and `Decoder` traits to encode and decode frames.
|
||||
/// A unified `Stream` and `Sink` interface to an underlying I/O object, using the `Encoder` and
|
||||
/// `Decoder` traits to encode and decode frames.
|
||||
///
|
||||
/// Raw I/O objects work with byte sequences, but higher-level code usually
|
||||
/// wants to batch these into meaningful chunks, called "frames". This
|
||||
/// method layers framing on top of an I/O object, by using the `Encoder`/`Decoder`
|
||||
/// traits to handle encoding and decoding of message frames. Note that
|
||||
/// the incoming and outgoing frame types may be distinct.
|
||||
/// Raw I/O objects work with byte sequences, but higher-level code usually wants to batch these
|
||||
/// into meaningful chunks, called "frames". This method layers framing on top of an I/O object,
|
||||
/// by using the `Encoder`/`Decoder` traits to handle encoding and decoding of message frames.
|
||||
/// Note that the incoming and outgoing frame types may be distinct.
|
||||
pub struct Framed<T, U> {
|
||||
#[pin]
|
||||
io: T,
|
||||
@@ -44,10 +43,9 @@ where
|
||||
T: AsyncRead + AsyncWrite,
|
||||
U: Decoder,
|
||||
{
|
||||
/// This function returns a *single* object that is both `Stream` and
|
||||
/// `Sink`; grouping this into a single object is often useful for layering
|
||||
/// things like gzip or TLS, which require both read and write access to the
|
||||
/// underlying object.
|
||||
/// This function returns a *single* object that is both `Stream` and `Sink`; grouping this into
|
||||
/// a single object is often useful for layering things like gzip or TLS, which require both
|
||||
/// read and write access to the underlying object.
|
||||
pub fn new(io: T, codec: U) -> Framed<T, U> {
|
||||
Framed {
|
||||
io,
|
||||
@@ -70,21 +68,18 @@ impl<T, U> Framed<T, U> {
|
||||
&mut self.codec
|
||||
}
|
||||
|
||||
/// Returns a reference to the underlying I/O stream wrapped by
|
||||
/// `Frame`.
|
||||
/// Returns a reference to the underlying I/O stream wrapped by `Frame`.
|
||||
///
|
||||
/// Note that care should be taken to not tamper with the underlying stream
|
||||
/// of data coming in as it may corrupt the stream of frames otherwise
|
||||
/// being worked with.
|
||||
/// Note that care should be taken to not tamper with the underlying stream of data coming in as
|
||||
/// it may corrupt the stream of frames otherwise being worked with.
|
||||
pub fn io_ref(&self) -> &T {
|
||||
&self.io
|
||||
}
|
||||
|
||||
/// Returns a mutable reference to the underlying I/O stream.
|
||||
///
|
||||
/// Note that care should be taken to not tamper with the underlying stream
|
||||
/// of data coming in as it may corrupt the stream of frames otherwise
|
||||
/// being worked with.
|
||||
/// Note that care should be taken to not tamper with the underlying stream of data coming in as
|
||||
/// it may corrupt the stream of frames otherwise being worked with.
|
||||
pub fn io_mut(&mut self) -> &mut T {
|
||||
&mut self.io
|
||||
}
|
||||
@@ -183,16 +178,15 @@ impl<T, U> Framed<T, U> {
|
||||
U: Decoder,
|
||||
{
|
||||
loop {
|
||||
let mut this = self.as_mut().project();
|
||||
// Repeatedly call `decode` or `decode_eof` as long as it is
|
||||
// "readable". Readable is defined as not having returned `None`. If
|
||||
// the upstream has returned EOF, and the decoder is no longer
|
||||
// readable, it can be assumed that the decoder will never become
|
||||
let this = self.as_mut().project();
|
||||
// Repeatedly call `decode` or `decode_eof` as long as it is "readable". Readable is
|
||||
// defined as not having returned `None`. If the upstream has returned EOF, and the
|
||||
// decoder is no longer readable, it can be assumed that the decoder will never become
|
||||
// readable again, at which point the stream is terminated.
|
||||
|
||||
if this.flags.contains(Flags::READABLE) {
|
||||
if this.flags.contains(Flags::EOF) {
|
||||
match this.codec.decode_eof(&mut this.read_buf) {
|
||||
match this.codec.decode_eof(this.read_buf) {
|
||||
Ok(Some(frame)) => return Poll::Ready(Some(Ok(frame))),
|
||||
Ok(None) => return Poll::Ready(None),
|
||||
Err(e) => return Poll::Ready(Some(Err(e))),
|
||||
@@ -201,7 +195,7 @@ impl<T, U> Framed<T, U> {
|
||||
|
||||
log::trace!("attempting to decode a frame");
|
||||
|
||||
match this.codec.decode(&mut this.read_buf) {
|
||||
match this.codec.decode(this.read_buf) {
|
||||
Ok(Some(frame)) => {
|
||||
log::trace!("frame decoded from buffer");
|
||||
return Poll::Ready(Some(Ok(frame)));
|
||||
@@ -215,7 +209,7 @@ impl<T, U> Framed<T, U> {
|
||||
|
||||
debug_assert!(!this.flags.contains(Flags::EOF));
|
||||
|
||||
// Otherwise, try to read more data and try again. Make sure we've got room
|
||||
// Otherwise, try to read more data and try again. Make sure we've got room.
|
||||
let remaining = this.read_buf.capacity() - this.read_buf.len();
|
||||
if remaining < LW {
|
||||
this.read_buf.reserve(HW - remaining)
|
||||
@@ -341,13 +335,12 @@ where
|
||||
}
|
||||
|
||||
impl<T, U> Framed<T, U> {
|
||||
/// This function returns a *single* object that is both `Stream` and
|
||||
/// `Sink`; grouping this into a single object is often useful for layering
|
||||
/// things like gzip or TLS, which require both read and write access to the
|
||||
/// underlying object.
|
||||
/// This function returns a *single* object that is both `Stream` and `Sink`; grouping this into
|
||||
/// a single object is often useful for layering things like gzip or TLS, which require both
|
||||
/// read and write access to the underlying object.
|
||||
///
|
||||
/// These objects take a stream, a read buffer and a write buffer. These
|
||||
/// fields can be obtained from an existing `Framed` with the `into_parts` method.
|
||||
/// These objects take a stream, a read buffer and a write buffer. These fields can be obtained
|
||||
/// from an existing `Framed` with the `into_parts` method.
|
||||
pub fn from_parts(parts: FramedParts<T, U>) -> Framed<T, U> {
|
||||
Framed {
|
||||
io: parts.io,
|
||||
@@ -358,12 +351,11 @@ impl<T, U> Framed<T, U> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Consumes the `Frame`, returning its underlying I/O stream, the buffer
|
||||
/// with unprocessed data, and the codec.
|
||||
/// Consumes the `Frame`, returning its underlying I/O stream, the buffer with unprocessed data,
|
||||
/// and the codec.
|
||||
///
|
||||
/// Note that care should be taken to not tamper with the underlying stream
|
||||
/// of data coming in as it may corrupt the stream of frames otherwise
|
||||
/// being worked with.
|
||||
/// Note that care should be taken to not tamper with the underlying stream of data coming in as
|
||||
/// it may corrupt the stream of frames otherwise being worked with.
|
||||
pub fn into_parts(self) -> FramedParts<T, U> {
|
||||
FramedParts {
|
||||
io: self.io,
|
||||
@@ -376,14 +368,15 @@ impl<T, U> Framed<T, U> {
|
||||
}
|
||||
|
||||
/// `FramedParts` contains an export of the data of a Framed transport.
|
||||
/// It can be used to construct a new `Framed` with a different codec.
|
||||
/// It contains all current buffers and the inner transport.
|
||||
///
|
||||
/// It can be used to construct a new `Framed` with a different codec. It contains all current
|
||||
/// buffers and the inner transport.
|
||||
#[derive(Debug)]
|
||||
pub struct FramedParts<T, U> {
|
||||
/// The inner transport used to read bytes to and write bytes to
|
||||
/// The inner transport used to read bytes to and write bytes to.
|
||||
pub io: T,
|
||||
|
||||
/// The codec
|
||||
/// The codec object.
|
||||
pub codec: U,
|
||||
|
||||
/// The buffer with read but unprocessed data.
|
||||
@@ -396,7 +389,7 @@ pub struct FramedParts<T, U> {
|
||||
}
|
||||
|
||||
impl<T, U> FramedParts<T, U> {
|
||||
/// Create a new, default, `FramedParts`
|
||||
/// Creates a new default `FramedParts`.
|
||||
pub fn new(io: T, codec: U) -> FramedParts<T, U> {
|
||||
FramedParts {
|
||||
io,
|
||||
@@ -407,7 +400,7 @@ impl<T, U> FramedParts<T, U> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new `FramedParts` with read buffer
|
||||
/// Creates a new `FramedParts` with read buffer.
|
||||
pub fn with_read_buf(io: T, codec: U, read_buf: BytesMut) -> FramedParts<T, U> {
|
||||
FramedParts {
|
||||
io,
|
||||
|
@@ -7,7 +7,7 @@
|
||||
//! [`Sink`]: futures_sink::Sink
|
||||
//! [`Stream`]: futures_core::Stream
|
||||
|
||||
#![deny(rust_2018_idioms, nonstandard_style)]
|
||||
#![deny(rust_2018_idioms, nonstandard_style, future_incompatible)]
|
||||
#![warn(missing_docs)]
|
||||
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
|
||||
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
|
||||
|
@@ -3,6 +3,25 @@
|
||||
## Unreleased - 2021-xx-xx
|
||||
|
||||
|
||||
## 0.2.3 - 2021-10-19
|
||||
* Fix test macro in presence of other imports named "test". [#399]
|
||||
|
||||
[#399]: https://github.com/actix/actix-net/pull/399
|
||||
|
||||
|
||||
## 0.2.2 - 2021-10-14
|
||||
* Improve error recovery potential when macro input is invalid. [#391]
|
||||
* Allow custom `System`s on test macro. [#391]
|
||||
|
||||
[#391]: https://github.com/actix/actix-net/pull/391
|
||||
|
||||
|
||||
## 0.2.1 - 2021-02-02
|
||||
* Add optional argument `system` to `main` macro which can be used to specify the path to `actix_rt::System` (useful for re-exports). [#363]
|
||||
|
||||
[#363]: https://github.com/actix/actix-net/pull/363
|
||||
|
||||
|
||||
## 0.2.0 - 2021-02-02
|
||||
* Update to latest `actix_rt::System::new` signature. [#261]
|
||||
|
||||
|
@@ -1,10 +1,13 @@
|
||||
[package]
|
||||
name = "actix-macros"
|
||||
version = "0.2.0"
|
||||
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
|
||||
version = "0.2.3"
|
||||
authors = [
|
||||
"Nikolay Kim <fafhrd91@gmail.com>",
|
||||
"Ibraheem Ahmed <ibrah1440@gmail.com>",
|
||||
"Rob Ede <robjtede@icloud.com>",
|
||||
]
|
||||
description = "Macros for Actix system and runtime"
|
||||
repository = "https://github.com/actix/actix-net.git"
|
||||
documentation = "https://docs.rs/actix-macros"
|
||||
categories = ["network-programming", "asynchronous"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
edition = "2018"
|
||||
@@ -19,5 +22,6 @@ syn = { version = "^1", features = ["full"] }
|
||||
[dev-dependencies]
|
||||
actix-rt = "2.0.0"
|
||||
|
||||
futures-util = { version = "0.3", default-features = false }
|
||||
futures-util = { version = "0.3.7", default-features = false }
|
||||
rustversion = "1"
|
||||
trybuild = "1"
|
||||
|
@@ -27,8 +27,15 @@ use quote::quote;
|
||||
#[allow(clippy::needless_doctest_main)]
|
||||
#[proc_macro_attribute]
|
||||
#[cfg(not(test))] // Work around for rust-lang/rust#62127
|
||||
pub fn main(_: TokenStream, item: TokenStream) -> TokenStream {
|
||||
let mut input = syn::parse_macro_input!(item as syn::ItemFn);
|
||||
pub fn main(args: TokenStream, item: TokenStream) -> TokenStream {
|
||||
let mut input = match syn::parse::<syn::ItemFn>(item.clone()) {
|
||||
Ok(input) => input,
|
||||
// on parse err, make IDEs happy; see fn docs
|
||||
Err(err) => return input_and_compile_error(item, err),
|
||||
};
|
||||
|
||||
let args = syn::parse_macro_input!(args as syn::AttributeArgs);
|
||||
|
||||
let attrs = &input.attrs;
|
||||
let vis = &input.vis;
|
||||
let sig = &mut input.sig;
|
||||
@@ -43,13 +50,47 @@ pub fn main(_: TokenStream, item: TokenStream) -> TokenStream {
|
||||
.into();
|
||||
}
|
||||
|
||||
let mut system = syn::parse_str::<syn::Path>("::actix_rt::System").unwrap();
|
||||
|
||||
for arg in &args {
|
||||
match arg {
|
||||
syn::NestedMeta::Meta(syn::Meta::NameValue(syn::MetaNameValue {
|
||||
lit: syn::Lit::Str(lit),
|
||||
path,
|
||||
..
|
||||
})) => match path
|
||||
.get_ident()
|
||||
.map(|i| i.to_string().to_lowercase())
|
||||
.as_deref()
|
||||
{
|
||||
Some("system") => match lit.parse() {
|
||||
Ok(path) => system = path,
|
||||
Err(_) => {
|
||||
return syn::Error::new_spanned(lit, "Expected path")
|
||||
.to_compile_error()
|
||||
.into();
|
||||
}
|
||||
},
|
||||
_ => {
|
||||
return syn::Error::new_spanned(arg, "Unknown attribute specified")
|
||||
.to_compile_error()
|
||||
.into();
|
||||
}
|
||||
},
|
||||
_ => {
|
||||
return syn::Error::new_spanned(arg, "Unknown attribute specified")
|
||||
.to_compile_error()
|
||||
.into();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sig.asyncness = None;
|
||||
|
||||
(quote! {
|
||||
#(#attrs)*
|
||||
#vis #sig {
|
||||
actix_rt::System::new()
|
||||
.block_on(async move { #body })
|
||||
<#system>::new().block_on(async move { #body })
|
||||
}
|
||||
})
|
||||
.into()
|
||||
@@ -65,8 +106,15 @@ pub fn main(_: TokenStream, item: TokenStream) -> TokenStream {
|
||||
/// }
|
||||
/// ```
|
||||
#[proc_macro_attribute]
|
||||
pub fn test(_: TokenStream, item: TokenStream) -> TokenStream {
|
||||
let mut input = syn::parse_macro_input!(item as syn::ItemFn);
|
||||
pub fn test(args: TokenStream, item: TokenStream) -> TokenStream {
|
||||
let mut input = match syn::parse::<syn::ItemFn>(item.clone()) {
|
||||
Ok(input) => input,
|
||||
// on parse err, make IDEs happy; see fn docs
|
||||
Err(err) => return input_and_compile_error(item, err),
|
||||
};
|
||||
|
||||
let args = syn::parse_macro_input!(args as syn::AttributeArgs);
|
||||
|
||||
let attrs = &input.attrs;
|
||||
let vis = &input.vis;
|
||||
let sig = &mut input.sig;
|
||||
@@ -91,18 +139,64 @@ pub fn test(_: TokenStream, item: TokenStream) -> TokenStream {
|
||||
sig.asyncness = None;
|
||||
|
||||
let missing_test_attr = if has_test_attr {
|
||||
quote!()
|
||||
quote! {}
|
||||
} else {
|
||||
quote!(#[test])
|
||||
quote! { #[::core::prelude::v1::test] }
|
||||
};
|
||||
|
||||
let mut system = syn::parse_str::<syn::Path>("::actix_rt::System").unwrap();
|
||||
|
||||
for arg in &args {
|
||||
match arg {
|
||||
syn::NestedMeta::Meta(syn::Meta::NameValue(syn::MetaNameValue {
|
||||
lit: syn::Lit::Str(lit),
|
||||
path,
|
||||
..
|
||||
})) => match path
|
||||
.get_ident()
|
||||
.map(|i| i.to_string().to_lowercase())
|
||||
.as_deref()
|
||||
{
|
||||
Some("system") => match lit.parse() {
|
||||
Ok(path) => system = path,
|
||||
Err(_) => {
|
||||
return syn::Error::new_spanned(lit, "Expected path")
|
||||
.to_compile_error()
|
||||
.into();
|
||||
}
|
||||
},
|
||||
_ => {
|
||||
return syn::Error::new_spanned(arg, "Unknown attribute specified")
|
||||
.to_compile_error()
|
||||
.into();
|
||||
}
|
||||
},
|
||||
_ => {
|
||||
return syn::Error::new_spanned(arg, "Unknown attribute specified")
|
||||
.to_compile_error()
|
||||
.into();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
(quote! {
|
||||
#missing_test_attr
|
||||
#(#attrs)*
|
||||
#vis #sig {
|
||||
actix_rt::System::new()
|
||||
.block_on(async { #body })
|
||||
<#system>::new().block_on(async { #body })
|
||||
}
|
||||
})
|
||||
.into()
|
||||
}
|
||||
|
||||
/// Converts the error to a token stream and appends it to the original input.
|
||||
///
|
||||
/// Returning the original input in addition to the error is good for IDEs which can gracefully
|
||||
/// recover and show more precise errors within the macro body.
|
||||
///
|
||||
/// See <https://github.com/rust-analyzer/rust-analyzer/issues/10468> for more info.
|
||||
fn input_and_compile_error(mut item: TokenStream, err: syn::Error) -> TokenStream {
|
||||
let compile_err = TokenStream::from(err.to_compile_error());
|
||||
item.extend(compile_err);
|
||||
item
|
||||
}
|
||||
|
@@ -1,11 +1,18 @@
|
||||
#[rustversion::stable(1.46)] // MSRV
|
||||
#[test]
|
||||
fn compile_macros() {
|
||||
let t = trybuild::TestCases::new();
|
||||
t.pass("tests/trybuild/main-01-basic.rs");
|
||||
t.compile_fail("tests/trybuild/main-02-only-async.rs");
|
||||
t.pass("tests/trybuild/main-03-fn-params.rs");
|
||||
t.pass("tests/trybuild/main-04-system-path.rs");
|
||||
t.compile_fail("tests/trybuild/main-05-system-expect-path.rs");
|
||||
t.compile_fail("tests/trybuild/main-06-unknown-attr.rs");
|
||||
|
||||
t.pass("tests/trybuild/test-01-basic.rs");
|
||||
t.pass("tests/trybuild/test-02-keep-attrs.rs");
|
||||
t.compile_fail("tests/trybuild/test-03-only-async.rs");
|
||||
t.pass("tests/trybuild/test-04-system-path.rs");
|
||||
t.compile_fail("tests/trybuild/test-05-system-expect-path.rs");
|
||||
t.compile_fail("tests/trybuild/test-06-unknown-attr.rs");
|
||||
}
|
||||
|
8
actix-macros/tests/trybuild/main-04-system-path.rs
Normal file
8
actix-macros/tests/trybuild/main-04-system-path.rs
Normal file
@@ -0,0 +1,8 @@
|
||||
mod system {
|
||||
pub use actix_rt::System as MySystem;
|
||||
}
|
||||
|
||||
#[actix_rt::main(system = "system::MySystem")]
|
||||
async fn main() {
|
||||
futures_util::future::ready(()).await
|
||||
}
|
@@ -0,0 +1,4 @@
|
||||
#[actix_rt::main(system = "!@#*&")]
|
||||
async fn main2() {}
|
||||
|
||||
fn main() {}
|
@@ -0,0 +1,5 @@
|
||||
error: Expected path
|
||||
--> $DIR/main-05-system-expect-path.rs:1:27
|
||||
|
|
||||
1 | #[actix_rt::main(system = "!@#*&")]
|
||||
| ^^^^^^^
|
7
actix-macros/tests/trybuild/main-06-unknown-attr.rs
Normal file
7
actix-macros/tests/trybuild/main-06-unknown-attr.rs
Normal file
@@ -0,0 +1,7 @@
|
||||
#[actix_rt::main(foo = "bar")]
|
||||
async fn async_main() {}
|
||||
|
||||
#[actix_rt::main(bar::baz)]
|
||||
async fn async_main2() {}
|
||||
|
||||
fn main() {}
|
11
actix-macros/tests/trybuild/main-06-unknown-attr.stderr
Normal file
11
actix-macros/tests/trybuild/main-06-unknown-attr.stderr
Normal file
@@ -0,0 +1,11 @@
|
||||
error: Unknown attribute specified
|
||||
--> $DIR/main-06-unknown-attr.rs:1:18
|
||||
|
|
||||
1 | #[actix_rt::main(foo = "bar")]
|
||||
| ^^^^^^^^^^^
|
||||
|
||||
error: Unknown attribute specified
|
||||
--> $DIR/main-06-unknown-attr.rs:4:18
|
||||
|
|
||||
4 | #[actix_rt::main(bar::baz)]
|
||||
| ^^^^^^^^
|
10
actix-macros/tests/trybuild/test-04-system-path.rs
Normal file
10
actix-macros/tests/trybuild/test-04-system-path.rs
Normal file
@@ -0,0 +1,10 @@
|
||||
mod system {
|
||||
pub use actix_rt::System as MySystem;
|
||||
}
|
||||
|
||||
#[actix_rt::test(system = "system::MySystem")]
|
||||
async fn my_test() {
|
||||
futures_util::future::ready(()).await
|
||||
}
|
||||
|
||||
fn main() {}
|
@@ -0,0 +1,4 @@
|
||||
#[actix_rt::test(system = "!@#*&")]
|
||||
async fn my_test() {}
|
||||
|
||||
fn main() {}
|
@@ -0,0 +1,5 @@
|
||||
error: Expected path
|
||||
--> $DIR/test-05-system-expect-path.rs:1:27
|
||||
|
|
||||
1 | #[actix_rt::test(system = "!@#*&")]
|
||||
| ^^^^^^^
|
7
actix-macros/tests/trybuild/test-06-unknown-attr.rs
Normal file
7
actix-macros/tests/trybuild/test-06-unknown-attr.rs
Normal file
@@ -0,0 +1,7 @@
|
||||
#[actix_rt::test(foo = "bar")]
|
||||
async fn my_test_1() {}
|
||||
|
||||
#[actix_rt::test(bar::baz)]
|
||||
async fn my_test_2() {}
|
||||
|
||||
fn main() {}
|
11
actix-macros/tests/trybuild/test-06-unknown-attr.stderr
Normal file
11
actix-macros/tests/trybuild/test-06-unknown-attr.stderr
Normal file
@@ -0,0 +1,11 @@
|
||||
error: Unknown attribute specified
|
||||
--> $DIR/test-06-unknown-attr.rs:1:18
|
||||
|
|
||||
1 | #[actix_rt::test(foo = "bar")]
|
||||
| ^^^^^^^^^^^
|
||||
|
||||
error: Unknown attribute specified
|
||||
--> $DIR/test-06-unknown-attr.rs:4:18
|
||||
|
|
||||
4 | #[actix_rt::test(bar::baz)]
|
||||
| ^^^^^^^^
|
@@ -1,64 +0,0 @@
|
||||
# Changes
|
||||
|
||||
## Unreleased - 2021-xx-xx
|
||||
* Add `Router::recognize_checked` [#247]
|
||||
|
||||
[#247]: https://github.com/actix/actix-net/pull/247
|
||||
|
||||
|
||||
## 0.2.6 - 2021-01-09
|
||||
* Use `bytestring` version range compatible with Bytes v1.0. [#246]
|
||||
|
||||
[#246]: https://github.com/actix/actix-net/pull/246
|
||||
|
||||
|
||||
## 0.2.5 - 2020-09-20
|
||||
* Fix `from_hex()` method
|
||||
|
||||
|
||||
## 0.2.4 - 2019-12-31
|
||||
* Add `ResourceDef::resource_path_named()` path generation method
|
||||
|
||||
|
||||
## 0.2.3 - 2019-12-25
|
||||
* Add impl `IntoPattern` for `&String`
|
||||
|
||||
|
||||
## 0.2.2 - 2019-12-25
|
||||
* Use `IntoPattern` for `RouterBuilder::path()`
|
||||
|
||||
|
||||
## 0.2.1 - 2019-12-25
|
||||
* Add `IntoPattern` trait
|
||||
* Add multi-pattern resources
|
||||
|
||||
|
||||
## 0.2.0 - 2019-12-07
|
||||
* Update http to 0.2
|
||||
* Update regex to 1.3
|
||||
* Use bytestring instead of string
|
||||
|
||||
|
||||
## 0.1.5 - 2019-05-15
|
||||
* Remove debug prints
|
||||
|
||||
|
||||
## 0.1.4 - 2019-05-15
|
||||
* Fix checked resource match
|
||||
|
||||
|
||||
## 0.1.3 - 2019-04-22
|
||||
* Added support for `remainder match` (i.e "/path/{tail}*")
|
||||
|
||||
|
||||
## 0.1.2 - 2019-04-07
|
||||
* Export `Quoter` type
|
||||
* Allow to reset `Path` instance
|
||||
|
||||
|
||||
## 0.1.1 - 2019-04-03
|
||||
* Get dynamic segment by name instead of iterator.
|
||||
|
||||
|
||||
## 0.1.0 - 2019-03-09
|
||||
* Initial release
|
@@ -1,29 +0,0 @@
|
||||
[package]
|
||||
name = "actix-router"
|
||||
version = "0.2.6"
|
||||
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
|
||||
description = "Resource path matching library"
|
||||
keywords = ["actix"]
|
||||
homepage = "https://actix.rs"
|
||||
repository = "https://github.com/actix/actix-net.git"
|
||||
documentation = "https://docs.rs/actix-router"
|
||||
license = "MIT OR Apache-2.0"
|
||||
edition = "2018"
|
||||
|
||||
[lib]
|
||||
name = "actix_router"
|
||||
path = "src/lib.rs"
|
||||
|
||||
[features]
|
||||
default = ["http"]
|
||||
|
||||
[dependencies]
|
||||
regex = "1.3.1"
|
||||
serde = "1.0.104"
|
||||
bytestring = ">=0.1.5, <2"
|
||||
log = "0.4.8"
|
||||
http = { version = "0.2.2", optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
http = "0.2.2"
|
||||
serde_derive = "1.0"
|
@@ -1 +0,0 @@
|
||||
../LICENSE-APACHE
|
@@ -1 +0,0 @@
|
||||
../LICENSE-MIT
|
@@ -1,717 +0,0 @@
|
||||
use serde::de::{self, Deserializer, Error as DeError, Visitor};
|
||||
use serde::forward_to_deserialize_any;
|
||||
|
||||
use crate::path::{Path, PathIter};
|
||||
use crate::ResourcePath;
|
||||
|
||||
macro_rules! unsupported_type {
|
||||
($trait_fn:ident, $name:expr) => {
|
||||
fn $trait_fn<V>(self, _: V) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
Err(de::value::Error::custom(concat!(
|
||||
"unsupported type: ",
|
||||
$name
|
||||
)))
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! parse_single_value {
|
||||
($trait_fn:ident, $visit_fn:ident, $tp:tt) => {
|
||||
fn $trait_fn<V>(self, visitor: V) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
if self.path.len() != 1 {
|
||||
Err(de::value::Error::custom(
|
||||
format!("wrong number of parameters: {} expected 1", self.path.len())
|
||||
.as_str(),
|
||||
))
|
||||
} else {
|
||||
let v = self.path[0].parse().map_err(|_| {
|
||||
de::value::Error::custom(format!(
|
||||
"can not parse {:?} to a {}",
|
||||
&self.path[0], $tp
|
||||
))
|
||||
})?;
|
||||
visitor.$visit_fn(v)
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub struct PathDeserializer<'de, T: ResourcePath> {
|
||||
path: &'de Path<T>,
|
||||
}
|
||||
|
||||
impl<'de, T: ResourcePath + 'de> PathDeserializer<'de, T> {
|
||||
pub fn new(path: &'de Path<T>) -> Self {
|
||||
PathDeserializer { path }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de, T: ResourcePath + 'de> Deserializer<'de> for PathDeserializer<'de, T> {
|
||||
type Error = de::value::Error;
|
||||
|
||||
fn deserialize_map<V>(self, visitor: V) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
visitor.visit_map(ParamsDeserializer {
|
||||
params: self.path.iter(),
|
||||
current: None,
|
||||
})
|
||||
}
|
||||
|
||||
fn deserialize_struct<V>(
|
||||
self,
|
||||
_: &'static str,
|
||||
_: &'static [&'static str],
|
||||
visitor: V,
|
||||
) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
self.deserialize_map(visitor)
|
||||
}
|
||||
|
||||
fn deserialize_unit<V>(self, visitor: V) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
visitor.visit_unit()
|
||||
}
|
||||
|
||||
fn deserialize_unit_struct<V>(
|
||||
self,
|
||||
_: &'static str,
|
||||
visitor: V,
|
||||
) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
self.deserialize_unit(visitor)
|
||||
}
|
||||
|
||||
fn deserialize_newtype_struct<V>(
|
||||
self,
|
||||
_: &'static str,
|
||||
visitor: V,
|
||||
) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
visitor.visit_newtype_struct(self)
|
||||
}
|
||||
|
||||
fn deserialize_tuple<V>(self, len: usize, visitor: V) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
if self.path.len() < len {
|
||||
Err(de::value::Error::custom(
|
||||
format!(
|
||||
"wrong number of parameters: {} expected {}",
|
||||
self.path.len(),
|
||||
len
|
||||
)
|
||||
.as_str(),
|
||||
))
|
||||
} else {
|
||||
visitor.visit_seq(ParamsSeq {
|
||||
params: self.path.iter(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn deserialize_tuple_struct<V>(
|
||||
self,
|
||||
_: &'static str,
|
||||
len: usize,
|
||||
visitor: V,
|
||||
) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
if self.path.len() < len {
|
||||
Err(de::value::Error::custom(
|
||||
format!(
|
||||
"wrong number of parameters: {} expected {}",
|
||||
self.path.len(),
|
||||
len
|
||||
)
|
||||
.as_str(),
|
||||
))
|
||||
} else {
|
||||
visitor.visit_seq(ParamsSeq {
|
||||
params: self.path.iter(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn deserialize_enum<V>(
|
||||
self,
|
||||
_: &'static str,
|
||||
_: &'static [&'static str],
|
||||
visitor: V,
|
||||
) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
if self.path.is_empty() {
|
||||
Err(de::value::Error::custom("expected at least one parameters"))
|
||||
} else {
|
||||
visitor.visit_enum(ValueEnum {
|
||||
value: &self.path[0],
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn deserialize_str<V>(self, visitor: V) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
if self.path.len() != 1 {
|
||||
Err(de::value::Error::custom(
|
||||
format!("wrong number of parameters: {} expected 1", self.path.len()).as_str(),
|
||||
))
|
||||
} else {
|
||||
visitor.visit_str(&self.path[0])
|
||||
}
|
||||
}
|
||||
|
||||
fn deserialize_seq<V>(self, visitor: V) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
visitor.visit_seq(ParamsSeq {
|
||||
params: self.path.iter(),
|
||||
})
|
||||
}
|
||||
|
||||
unsupported_type!(deserialize_any, "'any'");
|
||||
unsupported_type!(deserialize_bytes, "bytes");
|
||||
unsupported_type!(deserialize_option, "Option<T>");
|
||||
unsupported_type!(deserialize_identifier, "identifier");
|
||||
unsupported_type!(deserialize_ignored_any, "ignored_any");
|
||||
|
||||
parse_single_value!(deserialize_bool, visit_bool, "bool");
|
||||
parse_single_value!(deserialize_i8, visit_i8, "i8");
|
||||
parse_single_value!(deserialize_i16, visit_i16, "i16");
|
||||
parse_single_value!(deserialize_i32, visit_i32, "i32");
|
||||
parse_single_value!(deserialize_i64, visit_i64, "i64");
|
||||
parse_single_value!(deserialize_u8, visit_u8, "u8");
|
||||
parse_single_value!(deserialize_u16, visit_u16, "u16");
|
||||
parse_single_value!(deserialize_u32, visit_u32, "u32");
|
||||
parse_single_value!(deserialize_u64, visit_u64, "u64");
|
||||
parse_single_value!(deserialize_f32, visit_f32, "f32");
|
||||
parse_single_value!(deserialize_f64, visit_f64, "f64");
|
||||
parse_single_value!(deserialize_string, visit_string, "String");
|
||||
parse_single_value!(deserialize_byte_buf, visit_string, "String");
|
||||
parse_single_value!(deserialize_char, visit_char, "char");
|
||||
}
|
||||
|
||||
struct ParamsDeserializer<'de, T: ResourcePath> {
|
||||
params: PathIter<'de, T>,
|
||||
current: Option<(&'de str, &'de str)>,
|
||||
}
|
||||
|
||||
impl<'de, T: ResourcePath> de::MapAccess<'de> for ParamsDeserializer<'de, T> {
|
||||
type Error = de::value::Error;
|
||||
|
||||
fn next_key_seed<K>(&mut self, seed: K) -> Result<Option<K::Value>, Self::Error>
|
||||
where
|
||||
K: de::DeserializeSeed<'de>,
|
||||
{
|
||||
self.current = self.params.next().map(|ref item| (item.0, item.1));
|
||||
match self.current {
|
||||
Some((key, _)) => Ok(Some(seed.deserialize(Key { key })?)),
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
fn next_value_seed<V>(&mut self, seed: V) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: de::DeserializeSeed<'de>,
|
||||
{
|
||||
if let Some((_, value)) = self.current.take() {
|
||||
seed.deserialize(Value { value })
|
||||
} else {
|
||||
Err(de::value::Error::custom("unexpected item"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct Key<'de> {
|
||||
key: &'de str,
|
||||
}
|
||||
|
||||
impl<'de> Deserializer<'de> for Key<'de> {
|
||||
type Error = de::value::Error;
|
||||
|
||||
fn deserialize_identifier<V>(self, visitor: V) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
visitor.visit_str(self.key)
|
||||
}
|
||||
|
||||
fn deserialize_any<V>(self, _visitor: V) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
Err(de::value::Error::custom("Unexpected"))
|
||||
}
|
||||
|
||||
forward_to_deserialize_any! {
|
||||
bool i8 i16 i32 i64 u8 u16 u32 u64 f32 f64 char str string bytes
|
||||
byte_buf option unit unit_struct newtype_struct seq tuple
|
||||
tuple_struct map struct enum ignored_any
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! parse_value {
|
||||
($trait_fn:ident, $visit_fn:ident, $tp:tt) => {
|
||||
fn $trait_fn<V>(self, visitor: V) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
let v = self.value.parse().map_err(|_| {
|
||||
de::value::Error::custom(format!("can not parse {:?} to a {}", self.value, $tp))
|
||||
})?;
|
||||
visitor.$visit_fn(v)
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
struct Value<'de> {
|
||||
value: &'de str,
|
||||
}
|
||||
|
||||
impl<'de> Deserializer<'de> for Value<'de> {
|
||||
type Error = de::value::Error;
|
||||
|
||||
parse_value!(deserialize_bool, visit_bool, "bool");
|
||||
parse_value!(deserialize_i8, visit_i8, "i8");
|
||||
parse_value!(deserialize_i16, visit_i16, "i16");
|
||||
parse_value!(deserialize_i32, visit_i32, "i16");
|
||||
parse_value!(deserialize_i64, visit_i64, "i64");
|
||||
parse_value!(deserialize_u8, visit_u8, "u8");
|
||||
parse_value!(deserialize_u16, visit_u16, "u16");
|
||||
parse_value!(deserialize_u32, visit_u32, "u32");
|
||||
parse_value!(deserialize_u64, visit_u64, "u64");
|
||||
parse_value!(deserialize_f32, visit_f32, "f32");
|
||||
parse_value!(deserialize_f64, visit_f64, "f64");
|
||||
parse_value!(deserialize_string, visit_string, "String");
|
||||
parse_value!(deserialize_byte_buf, visit_string, "String");
|
||||
parse_value!(deserialize_char, visit_char, "char");
|
||||
|
||||
fn deserialize_ignored_any<V>(self, visitor: V) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
visitor.visit_unit()
|
||||
}
|
||||
|
||||
fn deserialize_unit<V>(self, visitor: V) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
visitor.visit_unit()
|
||||
}
|
||||
|
||||
fn deserialize_unit_struct<V>(
|
||||
self,
|
||||
_: &'static str,
|
||||
visitor: V,
|
||||
) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
visitor.visit_unit()
|
||||
}
|
||||
|
||||
fn deserialize_bytes<V>(self, visitor: V) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
visitor.visit_borrowed_bytes(self.value.as_bytes())
|
||||
}
|
||||
|
||||
fn deserialize_str<V>(self, visitor: V) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
visitor.visit_borrowed_str(self.value)
|
||||
}
|
||||
|
||||
fn deserialize_option<V>(self, visitor: V) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
visitor.visit_some(self)
|
||||
}
|
||||
|
||||
fn deserialize_enum<V>(
|
||||
self,
|
||||
_: &'static str,
|
||||
_: &'static [&'static str],
|
||||
visitor: V,
|
||||
) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
visitor.visit_enum(ValueEnum { value: self.value })
|
||||
}
|
||||
|
||||
fn deserialize_newtype_struct<V>(
|
||||
self,
|
||||
_: &'static str,
|
||||
visitor: V,
|
||||
) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
visitor.visit_newtype_struct(self)
|
||||
}
|
||||
|
||||
fn deserialize_tuple<V>(self, _: usize, _: V) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
Err(de::value::Error::custom("unsupported type: tuple"))
|
||||
}
|
||||
|
||||
fn deserialize_struct<V>(
|
||||
self,
|
||||
_: &'static str,
|
||||
_: &'static [&'static str],
|
||||
_: V,
|
||||
) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
Err(de::value::Error::custom("unsupported type: struct"))
|
||||
}
|
||||
|
||||
fn deserialize_tuple_struct<V>(
|
||||
self,
|
||||
_: &'static str,
|
||||
_: usize,
|
||||
_: V,
|
||||
) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
Err(de::value::Error::custom("unsupported type: tuple struct"))
|
||||
}
|
||||
|
||||
unsupported_type!(deserialize_any, "any");
|
||||
unsupported_type!(deserialize_seq, "seq");
|
||||
unsupported_type!(deserialize_map, "map");
|
||||
unsupported_type!(deserialize_identifier, "identifier");
|
||||
}
|
||||
|
||||
struct ParamsSeq<'de, T: ResourcePath> {
|
||||
params: PathIter<'de, T>,
|
||||
}
|
||||
|
||||
impl<'de, T: ResourcePath> de::SeqAccess<'de> for ParamsSeq<'de, T> {
|
||||
type Error = de::value::Error;
|
||||
|
||||
fn next_element_seed<U>(&mut self, seed: U) -> Result<Option<U::Value>, Self::Error>
|
||||
where
|
||||
U: de::DeserializeSeed<'de>,
|
||||
{
|
||||
match self.params.next() {
|
||||
Some(item) => Ok(Some(seed.deserialize(Value { value: item.1 })?)),
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct ValueEnum<'de> {
|
||||
value: &'de str,
|
||||
}
|
||||
|
||||
impl<'de> de::EnumAccess<'de> for ValueEnum<'de> {
|
||||
type Error = de::value::Error;
|
||||
type Variant = UnitVariant;
|
||||
|
||||
fn variant_seed<V>(self, seed: V) -> Result<(V::Value, Self::Variant), Self::Error>
|
||||
where
|
||||
V: de::DeserializeSeed<'de>,
|
||||
{
|
||||
Ok((seed.deserialize(Key { key: self.value })?, UnitVariant))
|
||||
}
|
||||
}
|
||||
|
||||
struct UnitVariant;
|
||||
|
||||
impl<'de> de::VariantAccess<'de> for UnitVariant {
|
||||
type Error = de::value::Error;
|
||||
|
||||
fn unit_variant(self) -> Result<(), Self::Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn newtype_variant_seed<T>(self, _seed: T) -> Result<T::Value, Self::Error>
|
||||
where
|
||||
T: de::DeserializeSeed<'de>,
|
||||
{
|
||||
Err(de::value::Error::custom("not supported"))
|
||||
}
|
||||
|
||||
fn tuple_variant<V>(self, _len: usize, _visitor: V) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
Err(de::value::Error::custom("not supported"))
|
||||
}
|
||||
|
||||
fn struct_variant<V>(
|
||||
self,
|
||||
_: &'static [&'static str],
|
||||
_: V,
|
||||
) -> Result<V::Value, Self::Error>
|
||||
where
|
||||
V: Visitor<'de>,
|
||||
{
|
||||
Err(de::value::Error::custom("not supported"))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use serde::de;
|
||||
use serde_derive::Deserialize;
|
||||
|
||||
use super::*;
|
||||
use crate::path::Path;
|
||||
use crate::router::Router;
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct MyStruct {
|
||||
key: String,
|
||||
value: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct Id {
|
||||
_id: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct Test1(String, u32);
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct Test2 {
|
||||
key: String,
|
||||
value: u32,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
enum TestEnum {
|
||||
Val1,
|
||||
Val2,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct Test3 {
|
||||
val: TestEnum,
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_request_extract() {
|
||||
let mut router = Router::<()>::build();
|
||||
router.path("/{key}/{value}/", ());
|
||||
let router = router.finish();
|
||||
|
||||
let mut path = Path::new("/name/user1/");
|
||||
assert!(router.recognize(&mut path).is_some());
|
||||
|
||||
let s: MyStruct = de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
|
||||
assert_eq!(s.key, "name");
|
||||
assert_eq!(s.value, "user1");
|
||||
|
||||
let s: (String, String) =
|
||||
de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
|
||||
assert_eq!(s.0, "name");
|
||||
assert_eq!(s.1, "user1");
|
||||
|
||||
let mut router = Router::<()>::build();
|
||||
router.path("/{key}/{value}/", ());
|
||||
let router = router.finish();
|
||||
|
||||
let mut path = Path::new("/name/32/");
|
||||
assert!(router.recognize(&mut path).is_some());
|
||||
|
||||
let s: Test1 = de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
|
||||
assert_eq!(s.0, "name");
|
||||
assert_eq!(s.1, 32);
|
||||
|
||||
let s: Test2 = de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
|
||||
assert_eq!(s.key, "name");
|
||||
assert_eq!(s.value, 32);
|
||||
|
||||
let s: (String, u8) =
|
||||
de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
|
||||
assert_eq!(s.0, "name");
|
||||
assert_eq!(s.1, 32);
|
||||
|
||||
let res: Vec<String> =
|
||||
de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
|
||||
assert_eq!(res[0], "name".to_owned());
|
||||
assert_eq!(res[1], "32".to_owned());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_path_single() {
|
||||
let mut router = Router::<()>::build();
|
||||
router.path("/{value}/", ());
|
||||
let router = router.finish();
|
||||
|
||||
let mut path = Path::new("/32/");
|
||||
assert!(router.recognize(&mut path).is_some());
|
||||
let i: i8 = de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
|
||||
assert_eq!(i, 32);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_enum() {
|
||||
let mut router = Router::<()>::build();
|
||||
router.path("/{val}/", ());
|
||||
let router = router.finish();
|
||||
|
||||
let mut path = Path::new("/val1/");
|
||||
assert!(router.recognize(&mut path).is_some());
|
||||
let i: TestEnum = de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
|
||||
assert_eq!(i, TestEnum::Val1);
|
||||
|
||||
let mut router = Router::<()>::build();
|
||||
router.path("/{val1}/{val2}/", ());
|
||||
let router = router.finish();
|
||||
|
||||
let mut path = Path::new("/val1/val2/");
|
||||
assert!(router.recognize(&mut path).is_some());
|
||||
let i: (TestEnum, TestEnum) =
|
||||
de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
|
||||
assert_eq!(i, (TestEnum::Val1, TestEnum::Val2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_enum_value() {
|
||||
let mut router = Router::<()>::build();
|
||||
router.path("/{val}/", ());
|
||||
let router = router.finish();
|
||||
|
||||
let mut path = Path::new("/val1/");
|
||||
assert!(router.recognize(&mut path).is_some());
|
||||
let i: Test3 = de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
|
||||
assert_eq!(i.val, TestEnum::Val1);
|
||||
|
||||
let mut path = Path::new("/val3/");
|
||||
assert!(router.recognize(&mut path).is_some());
|
||||
let i: Result<Test3, de::value::Error> =
|
||||
de::Deserialize::deserialize(PathDeserializer::new(&path));
|
||||
assert!(i.is_err());
|
||||
assert!(format!("{:?}", i).contains("unknown variant"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_errors() {
|
||||
let mut router = Router::<()>::build();
|
||||
router.path("/{value}/", ());
|
||||
let router = router.finish();
|
||||
|
||||
let mut path = Path::new("/name/");
|
||||
assert!(router.recognize(&mut path).is_some());
|
||||
|
||||
let s: Result<Test1, de::value::Error> =
|
||||
de::Deserialize::deserialize(PathDeserializer::new(&path));
|
||||
assert!(s.is_err());
|
||||
assert!(format!("{:?}", s).contains("wrong number of parameters"));
|
||||
|
||||
let s: Result<Test2, de::value::Error> =
|
||||
de::Deserialize::deserialize(PathDeserializer::new(&path));
|
||||
assert!(s.is_err());
|
||||
assert!(format!("{:?}", s).contains("can not parse"));
|
||||
|
||||
let s: Result<(String, String), de::value::Error> =
|
||||
de::Deserialize::deserialize(PathDeserializer::new(&path));
|
||||
assert!(s.is_err());
|
||||
assert!(format!("{:?}", s).contains("wrong number of parameters"));
|
||||
|
||||
let s: Result<u32, de::value::Error> =
|
||||
de::Deserialize::deserialize(PathDeserializer::new(&path));
|
||||
assert!(s.is_err());
|
||||
assert!(format!("{:?}", s).contains("can not parse"));
|
||||
}
|
||||
|
||||
// #[test]
|
||||
// fn test_extract_path_decode() {
|
||||
// let mut router = Router::<()>::default();
|
||||
// router.register_resource(Resource::new(ResourceDef::new("/{value}/")));
|
||||
|
||||
// macro_rules! test_single_value {
|
||||
// ($value:expr, $expected:expr) => {{
|
||||
// let req = TestRequest::with_uri($value).finish();
|
||||
// let info = router.recognize(&req, &(), 0);
|
||||
// let req = req.with_route_info(info);
|
||||
// assert_eq!(
|
||||
// *Path::<String>::from_request(&req, &PathConfig::default()).unwrap(),
|
||||
// $expected
|
||||
// );
|
||||
// }};
|
||||
// }
|
||||
|
||||
// test_single_value!("/%25/", "%");
|
||||
// test_single_value!("/%40%C2%A3%24%25%5E%26%2B%3D/", "@£$%^&+=");
|
||||
// test_single_value!("/%2B/", "+");
|
||||
// test_single_value!("/%252B/", "%2B");
|
||||
// test_single_value!("/%2F/", "/");
|
||||
// test_single_value!("/%252F/", "%2F");
|
||||
// test_single_value!(
|
||||
// "/http%3A%2F%2Flocalhost%3A80%2Ffoo/",
|
||||
// "http://localhost:80/foo"
|
||||
// );
|
||||
// test_single_value!("/%2Fvar%2Flog%2Fsyslog/", "/var/log/syslog");
|
||||
// test_single_value!(
|
||||
// "/http%3A%2F%2Flocalhost%3A80%2Ffile%2F%252Fvar%252Flog%252Fsyslog/",
|
||||
// "http://localhost:80/file/%2Fvar%2Flog%2Fsyslog"
|
||||
// );
|
||||
|
||||
// let req = TestRequest::with_uri("/%25/7/?id=test").finish();
|
||||
|
||||
// let mut router = Router::<()>::default();
|
||||
// router.register_resource(Resource::new(ResourceDef::new("/{key}/{value}/")));
|
||||
// let info = router.recognize(&req, &(), 0);
|
||||
// let req = req.with_route_info(info);
|
||||
|
||||
// let s = Path::<Test2>::from_request(&req, &PathConfig::default()).unwrap();
|
||||
// assert_eq!(s.key, "%");
|
||||
// assert_eq!(s.value, 7);
|
||||
|
||||
// let s = Path::<(String, String)>::from_request(&req, &PathConfig::default()).unwrap();
|
||||
// assert_eq!(s.0, "%");
|
||||
// assert_eq!(s.1, "7");
|
||||
// }
|
||||
|
||||
// #[test]
|
||||
// fn test_extract_path_no_decode() {
|
||||
// let mut router = Router::<()>::default();
|
||||
// router.register_resource(Resource::new(ResourceDef::new("/{value}/")));
|
||||
|
||||
// let req = TestRequest::with_uri("/%25/").finish();
|
||||
// let info = router.recognize(&req, &(), 0);
|
||||
// let req = req.with_route_info(info);
|
||||
// assert_eq!(
|
||||
// *Path::<String>::from_request(&req, &&PathConfig::default().disable_decoding())
|
||||
// .unwrap(),
|
||||
// "%25"
|
||||
// );
|
||||
// }
|
||||
}
|
@@ -1,152 +0,0 @@
|
||||
//! Resource path matching library.
|
||||
|
||||
#![deny(rust_2018_idioms, nonstandard_style)]
|
||||
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
|
||||
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
|
||||
|
||||
mod de;
|
||||
mod path;
|
||||
mod resource;
|
||||
mod router;
|
||||
|
||||
pub use self::de::PathDeserializer;
|
||||
pub use self::path::Path;
|
||||
pub use self::resource::ResourceDef;
|
||||
pub use self::router::{ResourceInfo, Router, RouterBuilder};
|
||||
|
||||
pub trait Resource<T: ResourcePath> {
|
||||
fn resource_path(&mut self) -> &mut Path<T>;
|
||||
}
|
||||
|
||||
pub trait ResourcePath {
|
||||
fn path(&self) -> &str;
|
||||
}
|
||||
|
||||
impl ResourcePath for String {
|
||||
fn path(&self) -> &str {
|
||||
self.as_str()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> ResourcePath for &'a str {
|
||||
fn path(&self) -> &str {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl ResourcePath for bytestring::ByteString {
|
||||
fn path(&self) -> &str {
|
||||
&*self
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper trait for type that could be converted to path pattern
|
||||
pub trait IntoPattern {
|
||||
fn is_single(&self) -> bool;
|
||||
|
||||
fn patterns(&self) -> Vec<String>;
|
||||
}
|
||||
|
||||
impl IntoPattern for String {
|
||||
fn is_single(&self) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn patterns(&self) -> Vec<String> {
|
||||
vec![self.clone()]
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> IntoPattern for &'a String {
|
||||
fn is_single(&self) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn patterns(&self) -> Vec<String> {
|
||||
vec![self.as_str().to_string()]
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> IntoPattern for &'a str {
|
||||
fn is_single(&self) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn patterns(&self) -> Vec<String> {
|
||||
vec![(*self).to_string()]
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: AsRef<str>> IntoPattern for Vec<T> {
|
||||
fn is_single(&self) -> bool {
|
||||
self.len() == 1
|
||||
}
|
||||
|
||||
fn patterns(&self) -> Vec<String> {
|
||||
self.iter().map(|v| v.as_ref().to_string()).collect()
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! array_patterns (($tp:ty, $num:tt) => {
|
||||
impl IntoPattern for [$tp; $num] {
|
||||
fn is_single(&self) -> bool {
|
||||
$num == 1
|
||||
}
|
||||
|
||||
fn patterns(&self) -> Vec<String> {
|
||||
self.iter().map(|v| v.to_string()).collect()
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
array_patterns!(&str, 1);
|
||||
array_patterns!(&str, 2);
|
||||
array_patterns!(&str, 3);
|
||||
array_patterns!(&str, 4);
|
||||
array_patterns!(&str, 5);
|
||||
array_patterns!(&str, 6);
|
||||
array_patterns!(&str, 7);
|
||||
array_patterns!(&str, 8);
|
||||
array_patterns!(&str, 9);
|
||||
array_patterns!(&str, 10);
|
||||
array_patterns!(&str, 11);
|
||||
array_patterns!(&str, 12);
|
||||
array_patterns!(&str, 13);
|
||||
array_patterns!(&str, 14);
|
||||
array_patterns!(&str, 15);
|
||||
array_patterns!(&str, 16);
|
||||
|
||||
array_patterns!(String, 1);
|
||||
array_patterns!(String, 2);
|
||||
array_patterns!(String, 3);
|
||||
array_patterns!(String, 4);
|
||||
array_patterns!(String, 5);
|
||||
array_patterns!(String, 6);
|
||||
array_patterns!(String, 7);
|
||||
array_patterns!(String, 8);
|
||||
array_patterns!(String, 9);
|
||||
array_patterns!(String, 10);
|
||||
array_patterns!(String, 11);
|
||||
array_patterns!(String, 12);
|
||||
array_patterns!(String, 13);
|
||||
array_patterns!(String, 14);
|
||||
array_patterns!(String, 15);
|
||||
array_patterns!(String, 16);
|
||||
|
||||
#[cfg(feature = "http")]
|
||||
mod url;
|
||||
|
||||
#[cfg(feature = "http")]
|
||||
pub use self::url::{Quoter, Url};
|
||||
|
||||
#[cfg(feature = "http")]
|
||||
mod http_support {
|
||||
use super::ResourcePath;
|
||||
use http::Uri;
|
||||
|
||||
impl ResourcePath for Uri {
|
||||
fn path(&self) -> &str {
|
||||
self.path()
|
||||
}
|
||||
}
|
||||
}
|
@@ -1,222 +0,0 @@
|
||||
use std::ops::Index;
|
||||
|
||||
use serde::de;
|
||||
|
||||
use crate::de::PathDeserializer;
|
||||
use crate::{Resource, ResourcePath};
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub(crate) enum PathItem {
|
||||
Static(&'static str),
|
||||
Segment(u16, u16),
|
||||
}
|
||||
|
||||
/// Resource path match information
|
||||
///
|
||||
/// If resource path contains variable patterns, `Path` stores them.
|
||||
#[derive(Debug)]
|
||||
pub struct Path<T> {
|
||||
path: T,
|
||||
pub(crate) skip: u16,
|
||||
pub(crate) segments: Vec<(&'static str, PathItem)>,
|
||||
}
|
||||
|
||||
impl<T: Default> Default for Path<T> {
|
||||
fn default() -> Self {
|
||||
Path {
|
||||
path: T::default(),
|
||||
skip: 0,
|
||||
segments: Vec::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Clone> Clone for Path<T> {
|
||||
fn clone(&self) -> Self {
|
||||
Path {
|
||||
path: self.path.clone(),
|
||||
skip: self.skip,
|
||||
segments: self.segments.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ResourcePath> Path<T> {
|
||||
pub fn new(path: T) -> Path<T> {
|
||||
Path {
|
||||
path,
|
||||
skip: 0,
|
||||
segments: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get reference to inner path instance
|
||||
#[inline]
|
||||
pub fn get_ref(&self) -> &T {
|
||||
&self.path
|
||||
}
|
||||
|
||||
/// Get mutable reference to inner path instance
|
||||
#[inline]
|
||||
pub fn get_mut(&mut self) -> &mut T {
|
||||
&mut self.path
|
||||
}
|
||||
|
||||
/// Path
|
||||
#[inline]
|
||||
pub fn path(&self) -> &str {
|
||||
let skip = self.skip as usize;
|
||||
let path = self.path.path();
|
||||
if skip <= path.len() {
|
||||
&path[skip..]
|
||||
} else {
|
||||
""
|
||||
}
|
||||
}
|
||||
|
||||
/// Set new path
|
||||
#[inline]
|
||||
pub fn set(&mut self, path: T) {
|
||||
self.skip = 0;
|
||||
self.path = path;
|
||||
self.segments.clear();
|
||||
}
|
||||
|
||||
/// Reset state
|
||||
#[inline]
|
||||
pub fn reset(&mut self) {
|
||||
self.skip = 0;
|
||||
self.segments.clear();
|
||||
}
|
||||
|
||||
/// Skip first `n` chars in path
|
||||
#[inline]
|
||||
pub fn skip(&mut self, n: u16) {
|
||||
self.skip += n;
|
||||
}
|
||||
|
||||
pub(crate) fn add(&mut self, name: &'static str, value: PathItem) {
|
||||
match value {
|
||||
PathItem::Static(s) => self.segments.push((name, PathItem::Static(s))),
|
||||
PathItem::Segment(begin, end) => self
|
||||
.segments
|
||||
.push((name, PathItem::Segment(self.skip + begin, self.skip + end))),
|
||||
}
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
pub fn add_static(&mut self, name: &'static str, value: &'static str) {
|
||||
self.segments.push((name, PathItem::Static(value)));
|
||||
}
|
||||
|
||||
/// Check if there are any matched patterns
|
||||
#[inline]
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.segments.is_empty()
|
||||
}
|
||||
|
||||
/// Check number of extracted parameters
|
||||
#[inline]
|
||||
pub fn len(&self) -> usize {
|
||||
self.segments.len()
|
||||
}
|
||||
|
||||
/// Get matched parameter by name without type conversion
|
||||
pub fn get(&self, key: &str) -> Option<&str> {
|
||||
for item in self.segments.iter() {
|
||||
if key == item.0 {
|
||||
return match item.1 {
|
||||
PathItem::Static(ref s) => Some(&s),
|
||||
PathItem::Segment(s, e) => {
|
||||
Some(&self.path.path()[(s as usize)..(e as usize)])
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
if key == "tail" {
|
||||
Some(&self.path.path()[(self.skip as usize)..])
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Get unprocessed part of the path
|
||||
pub fn unprocessed(&self) -> &str {
|
||||
&self.path.path()[(self.skip as usize)..]
|
||||
}
|
||||
|
||||
/// Get matched parameter by name.
|
||||
///
|
||||
/// If keyed parameter is not available empty string is used as default
|
||||
/// value.
|
||||
pub fn query(&self, key: &str) -> &str {
|
||||
if let Some(s) = self.get(key) {
|
||||
s
|
||||
} else {
|
||||
""
|
||||
}
|
||||
}
|
||||
|
||||
/// Return iterator to items in parameter container
|
||||
pub fn iter(&self) -> PathIter<'_, T> {
|
||||
PathIter {
|
||||
idx: 0,
|
||||
params: self,
|
||||
}
|
||||
}
|
||||
|
||||
/// Try to deserialize matching parameters to a specified type `U`
|
||||
pub fn load<'de, U: serde::Deserialize<'de>>(&'de self) -> Result<U, de::value::Error> {
|
||||
de::Deserialize::deserialize(PathDeserializer::new(self))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct PathIter<'a, T> {
|
||||
idx: usize,
|
||||
params: &'a Path<T>,
|
||||
}
|
||||
|
||||
impl<'a, T: ResourcePath> Iterator for PathIter<'a, T> {
|
||||
type Item = (&'a str, &'a str);
|
||||
|
||||
#[inline]
|
||||
fn next(&mut self) -> Option<(&'a str, &'a str)> {
|
||||
if self.idx < self.params.len() {
|
||||
let idx = self.idx;
|
||||
let res = match self.params.segments[idx].1 {
|
||||
PathItem::Static(ref s) => &s,
|
||||
PathItem::Segment(s, e) => &self.params.path.path()[(s as usize)..(e as usize)],
|
||||
};
|
||||
self.idx += 1;
|
||||
return Some((&self.params.segments[idx].0, res));
|
||||
}
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: ResourcePath> Index<&'a str> for Path<T> {
|
||||
type Output = str;
|
||||
|
||||
fn index(&self, name: &'a str) -> &str {
|
||||
self.get(name)
|
||||
.expect("Value for parameter is not available")
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ResourcePath> Index<usize> for Path<T> {
|
||||
type Output = str;
|
||||
|
||||
fn index(&self, idx: usize) -> &str {
|
||||
match self.segments[idx].1 {
|
||||
PathItem::Static(ref s) => &s,
|
||||
PathItem::Segment(s, e) => &self.path.path()[(s as usize)..(e as usize)],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ResourcePath> Resource<T> for Path<T> {
|
||||
fn resource_path(&mut self) -> &mut Self {
|
||||
self
|
||||
}
|
||||
}
|
@@ -1,947 +0,0 @@
|
||||
use std::cmp::min;
|
||||
use std::collections::HashMap;
|
||||
use std::hash::{Hash, Hasher};
|
||||
|
||||
use regex::{escape, Regex, RegexSet};
|
||||
|
||||
use crate::path::{Path, PathItem};
|
||||
use crate::{IntoPattern, Resource, ResourcePath};
|
||||
|
||||
const MAX_DYNAMIC_SEGMENTS: usize = 16;
|
||||
|
||||
/// ResourceDef describes an entry in resources table
|
||||
///
|
||||
/// Resource definition can contain only 16 dynamic segments
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ResourceDef {
|
||||
id: u16,
|
||||
tp: PatternType,
|
||||
name: String,
|
||||
pattern: String,
|
||||
elements: Vec<PatternElement>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
enum PatternElement {
|
||||
Str(String),
|
||||
Var(String),
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
enum PatternType {
|
||||
Static(String),
|
||||
Prefix(String),
|
||||
Dynamic(Regex, Vec<&'static str>, usize),
|
||||
DynamicSet(RegexSet, Vec<(Regex, Vec<&'static str>, usize)>),
|
||||
}
|
||||
|
||||
impl ResourceDef {
|
||||
/// Parse path pattern and create new `Pattern` instance.
|
||||
///
|
||||
/// Panics if path pattern is malformed.
|
||||
pub fn new<T: IntoPattern>(path: T) -> Self {
|
||||
if path.is_single() {
|
||||
let patterns = path.patterns();
|
||||
ResourceDef::with_prefix(&patterns[0], false)
|
||||
} else {
|
||||
let set = path.patterns();
|
||||
let mut data = Vec::new();
|
||||
let mut re_set = Vec::new();
|
||||
|
||||
for path in set {
|
||||
let (pattern, _, _, len) = ResourceDef::parse(&path, false);
|
||||
|
||||
let re = match Regex::new(&pattern) {
|
||||
Ok(re) => re,
|
||||
Err(err) => panic!("Wrong path pattern: \"{}\" {}", path, err),
|
||||
};
|
||||
// actix creates one router per thread
|
||||
let names: Vec<_> = re
|
||||
.capture_names()
|
||||
.filter_map(|name| {
|
||||
name.map(|name| Box::leak(Box::new(name.to_owned())).as_str())
|
||||
})
|
||||
.collect();
|
||||
data.push((re, names, len));
|
||||
re_set.push(pattern);
|
||||
}
|
||||
|
||||
ResourceDef {
|
||||
id: 0,
|
||||
tp: PatternType::DynamicSet(RegexSet::new(re_set).unwrap(), data),
|
||||
elements: Vec::new(),
|
||||
name: String::new(),
|
||||
pattern: "".to_owned(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse path pattern and create new `Pattern` instance.
|
||||
///
|
||||
/// Use `prefix` type instead of `static`.
|
||||
///
|
||||
/// Panics if path regex pattern is malformed.
|
||||
pub fn prefix(path: &str) -> Self {
|
||||
ResourceDef::with_prefix(path, true)
|
||||
}
|
||||
|
||||
/// Parse path pattern and create new `Pattern` instance.
|
||||
/// Inserts `/` to begging of the pattern.
|
||||
///
|
||||
///
|
||||
/// Use `prefix` type instead of `static`.
|
||||
///
|
||||
/// Panics if path regex pattern is malformed.
|
||||
pub fn root_prefix(path: &str) -> Self {
|
||||
ResourceDef::with_prefix(&insert_slash(path), true)
|
||||
}
|
||||
|
||||
/// Resource id
|
||||
pub fn id(&self) -> u16 {
|
||||
self.id
|
||||
}
|
||||
|
||||
/// Set resource id
|
||||
pub fn set_id(&mut self, id: u16) {
|
||||
self.id = id;
|
||||
}
|
||||
|
||||
/// Parse path pattern and create new `Pattern` instance with custom prefix
|
||||
fn with_prefix(path: &str, for_prefix: bool) -> Self {
|
||||
let path = path.to_owned();
|
||||
let (pattern, elements, is_dynamic, len) = ResourceDef::parse(&path, for_prefix);
|
||||
|
||||
let tp = if is_dynamic {
|
||||
let re = match Regex::new(&pattern) {
|
||||
Ok(re) => re,
|
||||
Err(err) => panic!("Wrong path pattern: \"{}\" {}", path, err),
|
||||
};
|
||||
// actix creates one router per thread
|
||||
let names = re
|
||||
.capture_names()
|
||||
.filter_map(|name| {
|
||||
name.map(|name| Box::leak(Box::new(name.to_owned())).as_str())
|
||||
})
|
||||
.collect();
|
||||
PatternType::Dynamic(re, names, len)
|
||||
} else if for_prefix {
|
||||
PatternType::Prefix(pattern)
|
||||
} else {
|
||||
PatternType::Static(pattern)
|
||||
};
|
||||
|
||||
ResourceDef {
|
||||
tp,
|
||||
elements,
|
||||
id: 0,
|
||||
name: String::new(),
|
||||
pattern: path,
|
||||
}
|
||||
}
|
||||
|
||||
/// Resource pattern name
|
||||
pub fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
|
||||
/// Mutable reference to a name of a resource definition.
|
||||
pub fn name_mut(&mut self) -> &mut String {
|
||||
&mut self.name
|
||||
}
|
||||
|
||||
/// Path pattern of the resource
|
||||
pub fn pattern(&self) -> &str {
|
||||
&self.pattern
|
||||
}
|
||||
|
||||
/// Check if path matches this pattern.
|
||||
#[inline]
|
||||
pub fn is_match(&self, path: &str) -> bool {
|
||||
match self.tp {
|
||||
PatternType::Static(ref s) => s == path,
|
||||
PatternType::Prefix(ref s) => path.starts_with(s),
|
||||
PatternType::Dynamic(ref re, _, _) => re.is_match(path),
|
||||
PatternType::DynamicSet(ref re, _) => re.is_match(path),
|
||||
}
|
||||
}
|
||||
|
||||
/// Is prefix path a match against this resource.
|
||||
pub fn is_prefix_match(&self, path: &str) -> Option<usize> {
|
||||
let p_len = path.len();
|
||||
let path = if path.is_empty() { "/" } else { path };
|
||||
|
||||
match self.tp {
|
||||
PatternType::Static(ref s) => {
|
||||
if s == path {
|
||||
Some(p_len)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
PatternType::Dynamic(ref re, _, len) => {
|
||||
if let Some(captures) = re.captures(path) {
|
||||
let mut pos = 0;
|
||||
let mut passed = false;
|
||||
for capture in captures.iter() {
|
||||
if let Some(ref m) = capture {
|
||||
if !passed {
|
||||
passed = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
pos = m.end();
|
||||
}
|
||||
}
|
||||
Some(pos + len)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
PatternType::Prefix(ref s) => {
|
||||
let len = if path == s {
|
||||
s.len()
|
||||
} else if path.starts_with(s)
|
||||
&& (s.ends_with('/') || path.split_at(s.len()).1.starts_with('/'))
|
||||
{
|
||||
if s.ends_with('/') {
|
||||
s.len() - 1
|
||||
} else {
|
||||
s.len()
|
||||
}
|
||||
} else {
|
||||
return None;
|
||||
};
|
||||
Some(min(p_len, len))
|
||||
}
|
||||
PatternType::DynamicSet(ref re, ref params) => {
|
||||
if let Some(idx) = re.matches(path).into_iter().next() {
|
||||
let (ref pattern, _, len) = params[idx];
|
||||
if let Some(captures) = pattern.captures(path) {
|
||||
let mut pos = 0;
|
||||
let mut passed = false;
|
||||
for capture in captures.iter() {
|
||||
if let Some(ref m) = capture {
|
||||
if !passed {
|
||||
passed = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
pos = m.end();
|
||||
}
|
||||
}
|
||||
Some(pos + len)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Is the given path and parameters a match against this pattern.
|
||||
pub fn match_path<T: ResourcePath>(&self, path: &mut Path<T>) -> bool {
|
||||
match self.tp {
|
||||
PatternType::Static(ref s) => {
|
||||
if s == path.path() {
|
||||
path.skip(path.len() as u16);
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
PatternType::Prefix(ref s) => {
|
||||
let r_path = path.path();
|
||||
let len = if s == r_path {
|
||||
s.len()
|
||||
} else if r_path.starts_with(s)
|
||||
&& (s.ends_with('/') || r_path.split_at(s.len()).1.starts_with('/'))
|
||||
{
|
||||
if s.ends_with('/') {
|
||||
s.len() - 1
|
||||
} else {
|
||||
s.len()
|
||||
}
|
||||
} else {
|
||||
return false;
|
||||
};
|
||||
let r_path_len = r_path.len();
|
||||
path.skip(min(r_path_len, len) as u16);
|
||||
true
|
||||
}
|
||||
PatternType::Dynamic(ref re, ref names, len) => {
|
||||
let mut idx = 0;
|
||||
let mut pos = 0;
|
||||
let mut segments: [PathItem; MAX_DYNAMIC_SEGMENTS] =
|
||||
[PathItem::Static(""); MAX_DYNAMIC_SEGMENTS];
|
||||
|
||||
if let Some(captures) = re.captures(path.path()) {
|
||||
for (no, name) in names.iter().enumerate() {
|
||||
if let Some(m) = captures.name(&name) {
|
||||
idx += 1;
|
||||
pos = m.end();
|
||||
segments[no] = PathItem::Segment(m.start() as u16, m.end() as u16);
|
||||
} else {
|
||||
log::error!(
|
||||
"Dynamic path match but not all segments found: {}",
|
||||
name
|
||||
);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
for idx in 0..idx {
|
||||
path.add(names[idx], segments[idx]);
|
||||
}
|
||||
path.skip((pos + len) as u16);
|
||||
true
|
||||
}
|
||||
PatternType::DynamicSet(ref re, ref params) => {
|
||||
if let Some(idx) = re.matches(path.path()).into_iter().next() {
|
||||
let (ref pattern, ref names, len) = params[idx];
|
||||
let mut idx = 0;
|
||||
let mut pos = 0;
|
||||
let mut segments: [PathItem; MAX_DYNAMIC_SEGMENTS] =
|
||||
[PathItem::Static(""); MAX_DYNAMIC_SEGMENTS];
|
||||
|
||||
if let Some(captures) = pattern.captures(path.path()) {
|
||||
for (no, name) in names.iter().enumerate() {
|
||||
if let Some(m) = captures.name(&name) {
|
||||
idx += 1;
|
||||
pos = m.end();
|
||||
segments[no] =
|
||||
PathItem::Segment(m.start() as u16, m.end() as u16);
|
||||
} else {
|
||||
log::error!(
|
||||
"Dynamic path match but not all segments found: {}",
|
||||
name
|
||||
);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
for idx in 0..idx {
|
||||
path.add(names[idx], segments[idx]);
|
||||
}
|
||||
path.skip((pos + len) as u16);
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Is the given path and parameters a match against this pattern?
|
||||
pub fn match_path_checked<R, T, F, U>(
|
||||
&self,
|
||||
res: &mut R,
|
||||
check: &F,
|
||||
user_data: &Option<U>,
|
||||
) -> bool
|
||||
where
|
||||
T: ResourcePath,
|
||||
R: Resource<T>,
|
||||
F: Fn(&R, &Option<U>) -> bool,
|
||||
{
|
||||
match self.tp {
|
||||
PatternType::Static(ref s) => {
|
||||
if s == res.resource_path().path() && check(res, user_data) {
|
||||
let path = res.resource_path();
|
||||
path.skip(path.len() as u16);
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
PatternType::Prefix(ref s) => {
|
||||
let len = {
|
||||
let r_path = res.resource_path().path();
|
||||
if s == r_path {
|
||||
s.len()
|
||||
} else if r_path.starts_with(s)
|
||||
&& (s.ends_with('/') || r_path.split_at(s.len()).1.starts_with('/'))
|
||||
{
|
||||
if s.ends_with('/') {
|
||||
s.len() - 1
|
||||
} else {
|
||||
s.len()
|
||||
}
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
if !check(res, user_data) {
|
||||
return false;
|
||||
}
|
||||
let path = res.resource_path();
|
||||
path.skip(min(path.path().len(), len) as u16);
|
||||
true
|
||||
}
|
||||
PatternType::Dynamic(ref re, ref names, len) => {
|
||||
let mut idx = 0;
|
||||
let mut pos = 0;
|
||||
let mut segments: [PathItem; MAX_DYNAMIC_SEGMENTS] =
|
||||
[PathItem::Static(""); MAX_DYNAMIC_SEGMENTS];
|
||||
|
||||
if let Some(captures) = re.captures(res.resource_path().path()) {
|
||||
for (no, name) in names.iter().enumerate() {
|
||||
if let Some(m) = captures.name(&name) {
|
||||
idx += 1;
|
||||
pos = m.end();
|
||||
segments[no] = PathItem::Segment(m.start() as u16, m.end() as u16);
|
||||
} else {
|
||||
log::error!(
|
||||
"Dynamic path match but not all segments found: {}",
|
||||
name
|
||||
);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
|
||||
if !check(res, user_data) {
|
||||
return false;
|
||||
}
|
||||
|
||||
let path = res.resource_path();
|
||||
for idx in 0..idx {
|
||||
path.add(names[idx], segments[idx]);
|
||||
}
|
||||
path.skip((pos + len) as u16);
|
||||
true
|
||||
}
|
||||
PatternType::DynamicSet(ref re, ref params) => {
|
||||
let path = res.resource_path().path();
|
||||
if let Some(idx) = re.matches(path).into_iter().next() {
|
||||
let (ref pattern, ref names, len) = params[idx];
|
||||
let mut idx = 0;
|
||||
let mut pos = 0;
|
||||
let mut segments: [PathItem; MAX_DYNAMIC_SEGMENTS] =
|
||||
[PathItem::Static(""); MAX_DYNAMIC_SEGMENTS];
|
||||
|
||||
if let Some(captures) = pattern.captures(path) {
|
||||
for (no, name) in names.iter().enumerate() {
|
||||
if let Some(m) = captures.name(&name) {
|
||||
idx += 1;
|
||||
pos = m.end();
|
||||
segments[no] =
|
||||
PathItem::Segment(m.start() as u16, m.end() as u16);
|
||||
} else {
|
||||
log::error!(
|
||||
"Dynamic path match but not all segments found: {}",
|
||||
name
|
||||
);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
|
||||
if !check(res, user_data) {
|
||||
return false;
|
||||
}
|
||||
|
||||
let path = res.resource_path();
|
||||
for idx in 0..idx {
|
||||
path.add(names[idx], segments[idx]);
|
||||
}
|
||||
path.skip((pos + len) as u16);
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Build resource path from elements. Returns `true` on success.
|
||||
pub fn resource_path<U, I>(&self, path: &mut String, elements: &mut U) -> bool
|
||||
where
|
||||
U: Iterator<Item = I>,
|
||||
I: AsRef<str>,
|
||||
{
|
||||
match self.tp {
|
||||
PatternType::Prefix(ref p) => path.push_str(p),
|
||||
PatternType::Static(ref p) => path.push_str(p),
|
||||
PatternType::Dynamic(..) => {
|
||||
for el in &self.elements {
|
||||
match *el {
|
||||
PatternElement::Str(ref s) => path.push_str(s),
|
||||
PatternElement::Var(_) => {
|
||||
if let Some(val) = elements.next() {
|
||||
path.push_str(val.as_ref())
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
PatternType::DynamicSet(..) => {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
true
|
||||
}
|
||||
|
||||
/// Build resource path from elements. Returns `true` on success.
|
||||
pub fn resource_path_named<K, V, S>(
|
||||
&self,
|
||||
path: &mut String,
|
||||
elements: &HashMap<K, V, S>,
|
||||
) -> bool
|
||||
where
|
||||
K: std::borrow::Borrow<str> + Eq + Hash,
|
||||
V: AsRef<str>,
|
||||
S: std::hash::BuildHasher,
|
||||
{
|
||||
match self.tp {
|
||||
PatternType::Prefix(ref p) => path.push_str(p),
|
||||
PatternType::Static(ref p) => path.push_str(p),
|
||||
PatternType::Dynamic(..) => {
|
||||
for el in &self.elements {
|
||||
match *el {
|
||||
PatternElement::Str(ref s) => path.push_str(s),
|
||||
PatternElement::Var(ref name) => {
|
||||
if let Some(val) = elements.get(name) {
|
||||
path.push_str(val.as_ref())
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
PatternType::DynamicSet(..) => {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
true
|
||||
}
|
||||
|
||||
fn parse_param(pattern: &str) -> (PatternElement, String, &str, bool) {
|
||||
const DEFAULT_PATTERN: &str = "[^/]+";
|
||||
const DEFAULT_PATTERN_TAIL: &str = ".*";
|
||||
let mut params_nesting = 0usize;
|
||||
let close_idx = pattern
|
||||
.find(|c| match c {
|
||||
'{' => {
|
||||
params_nesting += 1;
|
||||
false
|
||||
}
|
||||
'}' => {
|
||||
params_nesting -= 1;
|
||||
params_nesting == 0
|
||||
}
|
||||
_ => false,
|
||||
})
|
||||
.expect("malformed dynamic segment");
|
||||
let (mut param, mut rem) = pattern.split_at(close_idx + 1);
|
||||
param = ¶m[1..param.len() - 1]; // Remove outer brackets
|
||||
let tail = rem == "*";
|
||||
|
||||
let (name, pattern) = match param.find(':') {
|
||||
Some(idx) => {
|
||||
if tail {
|
||||
panic!("Custom regex is not supported for remainder match");
|
||||
}
|
||||
let (name, pattern) = param.split_at(idx);
|
||||
(name, &pattern[1..])
|
||||
}
|
||||
None => (
|
||||
param,
|
||||
if tail {
|
||||
rem = &rem[1..];
|
||||
DEFAULT_PATTERN_TAIL
|
||||
} else {
|
||||
DEFAULT_PATTERN
|
||||
},
|
||||
),
|
||||
};
|
||||
(
|
||||
PatternElement::Var(name.to_string()),
|
||||
format!(r"(?P<{}>{})", &name, &pattern),
|
||||
rem,
|
||||
tail,
|
||||
)
|
||||
}
|
||||
|
||||
fn parse(
|
||||
mut pattern: &str,
|
||||
mut for_prefix: bool,
|
||||
) -> (String, Vec<PatternElement>, bool, usize) {
|
||||
if pattern.find('{').is_none() {
|
||||
// TODO: MSRV: 1.45
|
||||
#[allow(clippy::manual_strip)]
|
||||
return if pattern.ends_with('*') {
|
||||
let path = &pattern[..pattern.len() - 1];
|
||||
let re = String::from("^") + path + "(.*)";
|
||||
(re, vec![PatternElement::Str(String::from(path))], true, 0)
|
||||
} else {
|
||||
(
|
||||
String::from(pattern),
|
||||
vec![PatternElement::Str(String::from(pattern))],
|
||||
false,
|
||||
pattern.chars().count(),
|
||||
)
|
||||
};
|
||||
}
|
||||
|
||||
let mut elements = Vec::new();
|
||||
let mut re = String::from("^");
|
||||
let mut dyn_elements = 0;
|
||||
|
||||
while let Some(idx) = pattern.find('{') {
|
||||
let (prefix, rem) = pattern.split_at(idx);
|
||||
elements.push(PatternElement::Str(String::from(prefix)));
|
||||
re.push_str(&escape(prefix));
|
||||
let (param_pattern, re_part, rem, tail) = Self::parse_param(rem);
|
||||
if tail {
|
||||
for_prefix = true;
|
||||
}
|
||||
|
||||
elements.push(param_pattern);
|
||||
re.push_str(&re_part);
|
||||
pattern = rem;
|
||||
dyn_elements += 1;
|
||||
}
|
||||
|
||||
elements.push(PatternElement::Str(String::from(pattern)));
|
||||
re.push_str(&escape(pattern));
|
||||
|
||||
if dyn_elements > MAX_DYNAMIC_SEGMENTS {
|
||||
panic!(
|
||||
"Only {} dynamic segments are allowed, provided: {}",
|
||||
MAX_DYNAMIC_SEGMENTS, dyn_elements
|
||||
);
|
||||
}
|
||||
|
||||
if !for_prefix {
|
||||
re.push('$');
|
||||
}
|
||||
(re, elements, true, pattern.chars().count())
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for ResourceDef {}
|
||||
|
||||
impl PartialEq for ResourceDef {
|
||||
fn eq(&self, other: &ResourceDef) -> bool {
|
||||
self.pattern == other.pattern
|
||||
}
|
||||
}
|
||||
|
||||
impl Hash for ResourceDef {
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
self.pattern.hash(state);
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> From<&'a str> for ResourceDef {
|
||||
fn from(path: &'a str) -> ResourceDef {
|
||||
ResourceDef::new(path)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<String> for ResourceDef {
|
||||
fn from(path: String) -> ResourceDef {
|
||||
ResourceDef::new(path)
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn insert_slash(path: &str) -> String {
|
||||
let mut path = path.to_owned();
|
||||
if !path.is_empty() && !path.starts_with('/') {
|
||||
path.insert(0, '/');
|
||||
};
|
||||
path
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use http::Uri;
|
||||
use std::convert::TryFrom;
|
||||
|
||||
#[test]
|
||||
fn test_parse_static() {
|
||||
let re = ResourceDef::new("/");
|
||||
assert!(re.is_match("/"));
|
||||
assert!(!re.is_match("/a"));
|
||||
|
||||
let re = ResourceDef::new("/name");
|
||||
assert!(re.is_match("/name"));
|
||||
assert!(!re.is_match("/name1"));
|
||||
assert!(!re.is_match("/name/"));
|
||||
assert!(!re.is_match("/name~"));
|
||||
|
||||
assert_eq!(re.is_prefix_match("/name"), Some(5));
|
||||
assert_eq!(re.is_prefix_match("/name1"), None);
|
||||
assert_eq!(re.is_prefix_match("/name/"), None);
|
||||
assert_eq!(re.is_prefix_match("/name~"), None);
|
||||
|
||||
let re = ResourceDef::new("/name/");
|
||||
assert!(re.is_match("/name/"));
|
||||
assert!(!re.is_match("/name"));
|
||||
assert!(!re.is_match("/name/gs"));
|
||||
|
||||
let re = ResourceDef::new("/user/profile");
|
||||
assert!(re.is_match("/user/profile"));
|
||||
assert!(!re.is_match("/user/profile/profile"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_param() {
|
||||
let re = ResourceDef::new("/user/{id}");
|
||||
assert!(re.is_match("/user/profile"));
|
||||
assert!(re.is_match("/user/2345"));
|
||||
assert!(!re.is_match("/user/2345/"));
|
||||
assert!(!re.is_match("/user/2345/sdg"));
|
||||
|
||||
let mut path = Path::new("/user/profile");
|
||||
assert!(re.match_path(&mut path));
|
||||
assert_eq!(path.get("id").unwrap(), "profile");
|
||||
|
||||
let mut path = Path::new("/user/1245125");
|
||||
assert!(re.match_path(&mut path));
|
||||
assert_eq!(path.get("id").unwrap(), "1245125");
|
||||
|
||||
let re = ResourceDef::new("/v{version}/resource/{id}");
|
||||
assert!(re.is_match("/v1/resource/320120"));
|
||||
assert!(!re.is_match("/v/resource/1"));
|
||||
assert!(!re.is_match("/resource"));
|
||||
|
||||
let mut path = Path::new("/v151/resource/adage32");
|
||||
assert!(re.match_path(&mut path));
|
||||
assert_eq!(path.get("version").unwrap(), "151");
|
||||
assert_eq!(path.get("id").unwrap(), "adage32");
|
||||
|
||||
let re = ResourceDef::new("/{id:[[:digit:]]{6}}");
|
||||
assert!(re.is_match("/012345"));
|
||||
assert!(!re.is_match("/012"));
|
||||
assert!(!re.is_match("/01234567"));
|
||||
assert!(!re.is_match("/XXXXXX"));
|
||||
|
||||
let mut path = Path::new("/012345");
|
||||
assert!(re.match_path(&mut path));
|
||||
assert_eq!(path.get("id").unwrap(), "012345");
|
||||
}
|
||||
|
||||
#[allow(clippy::cognitive_complexity)]
|
||||
#[test]
|
||||
fn test_dynamic_set() {
|
||||
let re = ResourceDef::new(vec![
|
||||
"/user/{id}",
|
||||
"/v{version}/resource/{id}",
|
||||
"/{id:[[:digit:]]{6}}",
|
||||
]);
|
||||
assert!(re.is_match("/user/profile"));
|
||||
assert!(re.is_match("/user/2345"));
|
||||
assert!(!re.is_match("/user/2345/"));
|
||||
assert!(!re.is_match("/user/2345/sdg"));
|
||||
|
||||
let mut path = Path::new("/user/profile");
|
||||
assert!(re.match_path(&mut path));
|
||||
assert_eq!(path.get("id").unwrap(), "profile");
|
||||
|
||||
let mut path = Path::new("/user/1245125");
|
||||
assert!(re.match_path(&mut path));
|
||||
assert_eq!(path.get("id").unwrap(), "1245125");
|
||||
|
||||
assert!(re.is_match("/v1/resource/320120"));
|
||||
assert!(!re.is_match("/v/resource/1"));
|
||||
assert!(!re.is_match("/resource"));
|
||||
|
||||
let mut path = Path::new("/v151/resource/adage32");
|
||||
assert!(re.match_path(&mut path));
|
||||
assert_eq!(path.get("version").unwrap(), "151");
|
||||
assert_eq!(path.get("id").unwrap(), "adage32");
|
||||
|
||||
assert!(re.is_match("/012345"));
|
||||
assert!(!re.is_match("/012"));
|
||||
assert!(!re.is_match("/01234567"));
|
||||
assert!(!re.is_match("/XXXXXX"));
|
||||
|
||||
let mut path = Path::new("/012345");
|
||||
assert!(re.match_path(&mut path));
|
||||
assert_eq!(path.get("id").unwrap(), "012345");
|
||||
|
||||
let re = ResourceDef::new([
|
||||
"/user/{id}",
|
||||
"/v{version}/resource/{id}",
|
||||
"/{id:[[:digit:]]{6}}",
|
||||
]);
|
||||
assert!(re.is_match("/user/profile"));
|
||||
assert!(re.is_match("/user/2345"));
|
||||
assert!(!re.is_match("/user/2345/"));
|
||||
assert!(!re.is_match("/user/2345/sdg"));
|
||||
|
||||
let re = ResourceDef::new([
|
||||
"/user/{id}".to_string(),
|
||||
"/v{version}/resource/{id}".to_string(),
|
||||
"/{id:[[:digit:]]{6}}".to_string(),
|
||||
]);
|
||||
assert!(re.is_match("/user/profile"));
|
||||
assert!(re.is_match("/user/2345"));
|
||||
assert!(!re.is_match("/user/2345/"));
|
||||
assert!(!re.is_match("/user/2345/sdg"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_tail() {
|
||||
let re = ResourceDef::new("/user/-{id}*");
|
||||
|
||||
let mut path = Path::new("/user/-profile");
|
||||
assert!(re.match_path(&mut path));
|
||||
assert_eq!(path.get("id").unwrap(), "profile");
|
||||
|
||||
let mut path = Path::new("/user/-2345");
|
||||
assert!(re.match_path(&mut path));
|
||||
assert_eq!(path.get("id").unwrap(), "2345");
|
||||
|
||||
let mut path = Path::new("/user/-2345/");
|
||||
assert!(re.match_path(&mut path));
|
||||
assert_eq!(path.get("id").unwrap(), "2345/");
|
||||
|
||||
let mut path = Path::new("/user/-2345/sdg");
|
||||
assert!(re.match_path(&mut path));
|
||||
assert_eq!(path.get("id").unwrap(), "2345/sdg");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_static_tail() {
|
||||
let re = ResourceDef::new("/user*");
|
||||
assert!(re.is_match("/user/profile"));
|
||||
assert!(re.is_match("/user/2345"));
|
||||
assert!(re.is_match("/user/2345/"));
|
||||
assert!(re.is_match("/user/2345/sdg"));
|
||||
|
||||
let re = ResourceDef::new("/user/*");
|
||||
assert!(re.is_match("/user/profile"));
|
||||
assert!(re.is_match("/user/2345"));
|
||||
assert!(re.is_match("/user/2345/"));
|
||||
assert!(re.is_match("/user/2345/sdg"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_urlencoded_param() {
|
||||
let re = ResourceDef::new("/user/{id}/test");
|
||||
|
||||
let mut path = Path::new("/user/2345/test");
|
||||
assert!(re.match_path(&mut path));
|
||||
assert_eq!(path.get("id").unwrap(), "2345");
|
||||
|
||||
let mut path = Path::new("/user/qwe%25/test");
|
||||
assert!(re.match_path(&mut path));
|
||||
assert_eq!(path.get("id").unwrap(), "qwe%25");
|
||||
|
||||
let uri = Uri::try_from("/user/qwe%25/test").unwrap();
|
||||
let mut path = Path::new(uri);
|
||||
assert!(re.match_path(&mut path));
|
||||
assert_eq!(path.get("id").unwrap(), "qwe%25");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_resource_prefix() {
|
||||
let re = ResourceDef::prefix("/name");
|
||||
assert!(re.is_match("/name"));
|
||||
assert!(re.is_match("/name/"));
|
||||
assert!(re.is_match("/name/test/test"));
|
||||
assert!(re.is_match("/name1"));
|
||||
assert!(re.is_match("/name~"));
|
||||
|
||||
assert_eq!(re.is_prefix_match("/name"), Some(5));
|
||||
assert_eq!(re.is_prefix_match("/name/"), Some(5));
|
||||
assert_eq!(re.is_prefix_match("/name/test/test"), Some(5));
|
||||
assert_eq!(re.is_prefix_match("/name1"), None);
|
||||
assert_eq!(re.is_prefix_match("/name~"), None);
|
||||
|
||||
let re = ResourceDef::prefix("/name/");
|
||||
assert!(re.is_match("/name/"));
|
||||
assert!(re.is_match("/name/gs"));
|
||||
assert!(!re.is_match("/name"));
|
||||
|
||||
let re = ResourceDef::root_prefix("name/");
|
||||
assert!(re.is_match("/name/"));
|
||||
assert!(re.is_match("/name/gs"));
|
||||
assert!(!re.is_match("/name"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_resource_prefix_dynamic() {
|
||||
let re = ResourceDef::prefix("/{name}/");
|
||||
assert!(re.is_match("/name/"));
|
||||
assert!(re.is_match("/name/gs"));
|
||||
assert!(!re.is_match("/name"));
|
||||
|
||||
assert_eq!(re.is_prefix_match("/name/"), Some(6));
|
||||
assert_eq!(re.is_prefix_match("/name/gs"), Some(6));
|
||||
assert_eq!(re.is_prefix_match("/name"), None);
|
||||
|
||||
let mut path = Path::new("/test2/");
|
||||
assert!(re.match_path(&mut path));
|
||||
assert_eq!(&path["name"], "test2");
|
||||
assert_eq!(&path[0], "test2");
|
||||
|
||||
let mut path = Path::new("/test2/subpath1/subpath2/index.html");
|
||||
assert!(re.match_path(&mut path));
|
||||
assert_eq!(&path["name"], "test2");
|
||||
assert_eq!(&path[0], "test2");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_resource_path() {
|
||||
let mut s = String::new();
|
||||
let resource = ResourceDef::new("/user/{item1}/test");
|
||||
assert!(resource.resource_path(&mut s, &mut (&["user1"]).iter()));
|
||||
assert_eq!(s, "/user/user1/test");
|
||||
|
||||
let mut s = String::new();
|
||||
let resource = ResourceDef::new("/user/{item1}/{item2}/test");
|
||||
assert!(resource.resource_path(&mut s, &mut (&["item", "item2"]).iter()));
|
||||
assert_eq!(s, "/user/item/item2/test");
|
||||
|
||||
let mut s = String::new();
|
||||
let resource = ResourceDef::new("/user/{item1}/{item2}");
|
||||
assert!(resource.resource_path(&mut s, &mut (&["item", "item2"]).iter()));
|
||||
assert_eq!(s, "/user/item/item2");
|
||||
|
||||
let mut s = String::new();
|
||||
let resource = ResourceDef::new("/user/{item1}/{item2}/");
|
||||
assert!(resource.resource_path(&mut s, &mut (&["item", "item2"]).iter()));
|
||||
assert_eq!(s, "/user/item/item2/");
|
||||
|
||||
let mut s = String::new();
|
||||
assert!(!resource.resource_path(&mut s, &mut (&["item"]).iter()));
|
||||
|
||||
let mut s = String::new();
|
||||
assert!(resource.resource_path(&mut s, &mut (&["item", "item2"]).iter()));
|
||||
assert_eq!(s, "/user/item/item2/");
|
||||
assert!(!resource.resource_path(&mut s, &mut (&["item"]).iter()));
|
||||
|
||||
let mut s = String::new();
|
||||
assert!(resource.resource_path(&mut s, &mut vec!["item", "item2"].into_iter()));
|
||||
assert_eq!(s, "/user/item/item2/");
|
||||
|
||||
let mut map = HashMap::new();
|
||||
map.insert("item1", "item");
|
||||
|
||||
let mut s = String::new();
|
||||
assert!(!resource.resource_path_named(&mut s, &map));
|
||||
|
||||
let mut s = String::new();
|
||||
map.insert("item2", "item2");
|
||||
assert!(resource.resource_path_named(&mut s, &map));
|
||||
assert_eq!(s, "/user/item/item2/");
|
||||
}
|
||||
}
|
@@ -1,259 +0,0 @@
|
||||
use crate::{IntoPattern, Resource, ResourceDef, ResourcePath};
|
||||
|
||||
#[derive(Debug, Copy, Clone, PartialEq)]
|
||||
pub struct ResourceId(pub u16);
|
||||
|
||||
/// Information about current resource
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ResourceInfo {
|
||||
resource: ResourceId,
|
||||
}
|
||||
|
||||
/// Resource router.
|
||||
pub struct Router<T, U = ()>(Vec<(ResourceDef, T, Option<U>)>);
|
||||
|
||||
impl<T, U> Router<T, U> {
|
||||
pub fn build() -> RouterBuilder<T, U> {
|
||||
RouterBuilder {
|
||||
resources: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn recognize<R, P>(&self, resource: &mut R) -> Option<(&T, ResourceId)>
|
||||
where
|
||||
R: Resource<P>,
|
||||
P: ResourcePath,
|
||||
{
|
||||
for item in self.0.iter() {
|
||||
if item.0.match_path(resource.resource_path()) {
|
||||
return Some((&item.1, ResourceId(item.0.id())));
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
pub fn recognize_mut<R, P>(&mut self, resource: &mut R) -> Option<(&mut T, ResourceId)>
|
||||
where
|
||||
R: Resource<P>,
|
||||
P: ResourcePath,
|
||||
{
|
||||
for item in self.0.iter_mut() {
|
||||
if item.0.match_path(resource.resource_path()) {
|
||||
return Some((&mut item.1, ResourceId(item.0.id())));
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
pub fn recognize_checked<R, P, F>(
|
||||
&self,
|
||||
resource: &mut R,
|
||||
check: F,
|
||||
) -> Option<(&T, ResourceId)>
|
||||
where
|
||||
F: Fn(&R, &Option<U>) -> bool,
|
||||
R: Resource<P>,
|
||||
P: ResourcePath,
|
||||
{
|
||||
for item in self.0.iter() {
|
||||
if item.0.match_path_checked(resource, &check, &item.2) {
|
||||
return Some((&item.1, ResourceId(item.0.id())));
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
pub fn recognize_mut_checked<R, P, F>(
|
||||
&mut self,
|
||||
resource: &mut R,
|
||||
check: F,
|
||||
) -> Option<(&mut T, ResourceId)>
|
||||
where
|
||||
F: Fn(&R, &Option<U>) -> bool,
|
||||
R: Resource<P>,
|
||||
P: ResourcePath,
|
||||
{
|
||||
for item in self.0.iter_mut() {
|
||||
if item.0.match_path_checked(resource, &check, &item.2) {
|
||||
return Some((&mut item.1, ResourceId(item.0.id())));
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RouterBuilder<T, U = ()> {
|
||||
resources: Vec<(ResourceDef, T, Option<U>)>,
|
||||
}
|
||||
|
||||
impl<T, U> RouterBuilder<T, U> {
|
||||
/// Register resource for specified path.
|
||||
pub fn path<P: IntoPattern>(
|
||||
&mut self,
|
||||
path: P,
|
||||
resource: T,
|
||||
) -> &mut (ResourceDef, T, Option<U>) {
|
||||
self.resources
|
||||
.push((ResourceDef::new(path), resource, None));
|
||||
self.resources.last_mut().unwrap()
|
||||
}
|
||||
|
||||
/// Register resource for specified path prefix.
|
||||
pub fn prefix(&mut self, prefix: &str, resource: T) -> &mut (ResourceDef, T, Option<U>) {
|
||||
self.resources
|
||||
.push((ResourceDef::prefix(prefix), resource, None));
|
||||
self.resources.last_mut().unwrap()
|
||||
}
|
||||
|
||||
/// Register resource for ResourceDef
|
||||
pub fn rdef(&mut self, rdef: ResourceDef, resource: T) -> &mut (ResourceDef, T, Option<U>) {
|
||||
self.resources.push((rdef, resource, None));
|
||||
self.resources.last_mut().unwrap()
|
||||
}
|
||||
|
||||
/// Finish configuration and create router instance.
|
||||
pub fn finish(self) -> Router<T, U> {
|
||||
Router(self.resources)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::path::Path;
|
||||
use crate::router::{ResourceId, Router};
|
||||
|
||||
#[allow(clippy::cognitive_complexity)]
|
||||
#[test]
|
||||
fn test_recognizer_1() {
|
||||
let mut router = Router::<usize>::build();
|
||||
router.path("/name", 10).0.set_id(0);
|
||||
router.path("/name/{val}", 11).0.set_id(1);
|
||||
router.path("/name/{val}/index.html", 12).0.set_id(2);
|
||||
router.path("/file/{file}.{ext}", 13).0.set_id(3);
|
||||
router.path("/v{val}/{val2}/index.html", 14).0.set_id(4);
|
||||
router.path("/v/{tail:.*}", 15).0.set_id(5);
|
||||
router.path("/test2/{test}.html", 16).0.set_id(6);
|
||||
router.path("/{test}/index.html", 17).0.set_id(7);
|
||||
let mut router = router.finish();
|
||||
|
||||
let mut path = Path::new("/unknown");
|
||||
assert!(router.recognize_mut(&mut path).is_none());
|
||||
|
||||
let mut path = Path::new("/name");
|
||||
let (h, info) = router.recognize_mut(&mut path).unwrap();
|
||||
assert_eq!(*h, 10);
|
||||
assert_eq!(info, ResourceId(0));
|
||||
assert!(path.is_empty());
|
||||
|
||||
let mut path = Path::new("/name/value");
|
||||
let (h, info) = router.recognize_mut(&mut path).unwrap();
|
||||
assert_eq!(*h, 11);
|
||||
assert_eq!(info, ResourceId(1));
|
||||
assert_eq!(path.get("val").unwrap(), "value");
|
||||
assert_eq!(&path["val"], "value");
|
||||
|
||||
let mut path = Path::new("/name/value2/index.html");
|
||||
let (h, info) = router.recognize_mut(&mut path).unwrap();
|
||||
assert_eq!(*h, 12);
|
||||
assert_eq!(info, ResourceId(2));
|
||||
assert_eq!(path.get("val").unwrap(), "value2");
|
||||
|
||||
let mut path = Path::new("/file/file.gz");
|
||||
let (h, info) = router.recognize_mut(&mut path).unwrap();
|
||||
assert_eq!(*h, 13);
|
||||
assert_eq!(info, ResourceId(3));
|
||||
assert_eq!(path.get("file").unwrap(), "file");
|
||||
assert_eq!(path.get("ext").unwrap(), "gz");
|
||||
|
||||
let mut path = Path::new("/vtest/ttt/index.html");
|
||||
let (h, info) = router.recognize_mut(&mut path).unwrap();
|
||||
assert_eq!(*h, 14);
|
||||
assert_eq!(info, ResourceId(4));
|
||||
assert_eq!(path.get("val").unwrap(), "test");
|
||||
assert_eq!(path.get("val2").unwrap(), "ttt");
|
||||
|
||||
let mut path = Path::new("/v/blah-blah/index.html");
|
||||
let (h, info) = router.recognize_mut(&mut path).unwrap();
|
||||
assert_eq!(*h, 15);
|
||||
assert_eq!(info, ResourceId(5));
|
||||
assert_eq!(path.get("tail").unwrap(), "blah-blah/index.html");
|
||||
|
||||
let mut path = Path::new("/test2/index.html");
|
||||
let (h, info) = router.recognize_mut(&mut path).unwrap();
|
||||
assert_eq!(*h, 16);
|
||||
assert_eq!(info, ResourceId(6));
|
||||
assert_eq!(path.get("test").unwrap(), "index");
|
||||
|
||||
let mut path = Path::new("/bbb/index.html");
|
||||
let (h, info) = router.recognize_mut(&mut path).unwrap();
|
||||
assert_eq!(*h, 17);
|
||||
assert_eq!(info, ResourceId(7));
|
||||
assert_eq!(path.get("test").unwrap(), "bbb");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_recognizer_2() {
|
||||
let mut router = Router::<usize>::build();
|
||||
router.path("/index.json", 10);
|
||||
router.path("/{source}.json", 11);
|
||||
let mut router = router.finish();
|
||||
|
||||
let mut path = Path::new("/index.json");
|
||||
let (h, _) = router.recognize_mut(&mut path).unwrap();
|
||||
assert_eq!(*h, 10);
|
||||
|
||||
let mut path = Path::new("/test.json");
|
||||
let (h, _) = router.recognize_mut(&mut path).unwrap();
|
||||
assert_eq!(*h, 11);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_recognizer_with_prefix() {
|
||||
let mut router = Router::<usize>::build();
|
||||
router.path("/name", 10).0.set_id(0);
|
||||
router.path("/name/{val}", 11).0.set_id(1);
|
||||
let mut router = router.finish();
|
||||
|
||||
let mut path = Path::new("/name");
|
||||
path.skip(5);
|
||||
assert!(router.recognize_mut(&mut path).is_none());
|
||||
|
||||
let mut path = Path::new("/test/name");
|
||||
path.skip(5);
|
||||
let (h, _) = router.recognize_mut(&mut path).unwrap();
|
||||
assert_eq!(*h, 10);
|
||||
|
||||
let mut path = Path::new("/test/name/value");
|
||||
path.skip(5);
|
||||
let (h, id) = router.recognize_mut(&mut path).unwrap();
|
||||
assert_eq!(*h, 11);
|
||||
assert_eq!(id, ResourceId(1));
|
||||
assert_eq!(path.get("val").unwrap(), "value");
|
||||
assert_eq!(&path["val"], "value");
|
||||
|
||||
// same patterns
|
||||
let mut router = Router::<usize>::build();
|
||||
router.path("/name", 10);
|
||||
router.path("/name/{val}", 11);
|
||||
let mut router = router.finish();
|
||||
|
||||
let mut path = Path::new("/name");
|
||||
path.skip(6);
|
||||
assert!(router.recognize_mut(&mut path).is_none());
|
||||
|
||||
let mut path = Path::new("/test2/name");
|
||||
path.skip(6);
|
||||
let (h, _) = router.recognize_mut(&mut path).unwrap();
|
||||
assert_eq!(*h, 10);
|
||||
|
||||
let mut path = Path::new("/test2/name-test");
|
||||
path.skip(6);
|
||||
assert!(router.recognize_mut(&mut path).is_none());
|
||||
|
||||
let mut path = Path::new("/test2/name/ttt");
|
||||
path.skip(6);
|
||||
let (h, _) = router.recognize_mut(&mut path).unwrap();
|
||||
assert_eq!(*h, 11);
|
||||
assert_eq!(&path["val"], "ttt");
|
||||
}
|
||||
}
|
@@ -1,249 +0,0 @@
|
||||
use crate::ResourcePath;
|
||||
|
||||
#[allow(dead_code)]
|
||||
const GEN_DELIMS: &[u8] = b":/?#[]@";
|
||||
#[allow(dead_code)]
|
||||
const SUB_DELIMS_WITHOUT_QS: &[u8] = b"!$'()*,";
|
||||
#[allow(dead_code)]
|
||||
const SUB_DELIMS: &[u8] = b"!$'()*,+?=;";
|
||||
#[allow(dead_code)]
|
||||
const RESERVED: &[u8] = b":/?#[]@!$'()*,+?=;";
|
||||
#[allow(dead_code)]
|
||||
const UNRESERVED: &[u8] = b"abcdefghijklmnopqrstuvwxyz
|
||||
ABCDEFGHIJKLMNOPQRSTUVWXYZ
|
||||
1234567890
|
||||
-._~";
|
||||
const ALLOWED: &[u8] = b"abcdefghijklmnopqrstuvwxyz
|
||||
ABCDEFGHIJKLMNOPQRSTUVWXYZ
|
||||
1234567890
|
||||
-._~
|
||||
!$'()*,";
|
||||
const QS: &[u8] = b"+&=;b";
|
||||
|
||||
#[inline]
|
||||
fn bit_at(array: &[u8], ch: u8) -> bool {
|
||||
array[(ch >> 3) as usize] & (1 << (ch & 7)) != 0
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn set_bit(array: &mut [u8], ch: u8) {
|
||||
array[(ch >> 3) as usize] |= 1 << (ch & 7)
|
||||
}
|
||||
|
||||
thread_local! {
|
||||
static DEFAULT_QUOTER: Quoter = Quoter::new(b"@:", b"/+");
|
||||
}
|
||||
|
||||
#[derive(Default, Clone, Debug)]
|
||||
pub struct Url {
|
||||
uri: http::Uri,
|
||||
path: Option<String>,
|
||||
}
|
||||
|
||||
impl Url {
|
||||
pub fn new(uri: http::Uri) -> Url {
|
||||
let path = DEFAULT_QUOTER.with(|q| q.requote(uri.path().as_bytes()));
|
||||
|
||||
Url { uri, path }
|
||||
}
|
||||
|
||||
pub fn with_quoter(uri: http::Uri, quoter: &Quoter) -> Url {
|
||||
Url {
|
||||
path: quoter.requote(uri.path().as_bytes()),
|
||||
uri,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn uri(&self) -> &http::Uri {
|
||||
&self.uri
|
||||
}
|
||||
|
||||
pub fn path(&self) -> &str {
|
||||
if let Some(ref s) = self.path {
|
||||
s
|
||||
} else {
|
||||
self.uri.path()
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn update(&mut self, uri: &http::Uri) {
|
||||
self.uri = uri.clone();
|
||||
self.path = DEFAULT_QUOTER.with(|q| q.requote(uri.path().as_bytes()));
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn update_with_quoter(&mut self, uri: &http::Uri, quoter: &Quoter) {
|
||||
self.uri = uri.clone();
|
||||
self.path = quoter.requote(uri.path().as_bytes());
|
||||
}
|
||||
}
|
||||
|
||||
impl ResourcePath for Url {
|
||||
#[inline]
|
||||
fn path(&self) -> &str {
|
||||
self.path()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Quoter {
|
||||
safe_table: [u8; 16],
|
||||
protected_table: [u8; 16],
|
||||
}
|
||||
|
||||
impl Quoter {
|
||||
pub fn new(safe: &[u8], protected: &[u8]) -> Quoter {
|
||||
let mut q = Quoter {
|
||||
safe_table: [0; 16],
|
||||
protected_table: [0; 16],
|
||||
};
|
||||
|
||||
// prepare safe table
|
||||
for i in 0..128 {
|
||||
if ALLOWED.contains(&i) {
|
||||
set_bit(&mut q.safe_table, i);
|
||||
}
|
||||
if QS.contains(&i) {
|
||||
set_bit(&mut q.safe_table, i);
|
||||
}
|
||||
}
|
||||
|
||||
for ch in safe {
|
||||
set_bit(&mut q.safe_table, *ch)
|
||||
}
|
||||
|
||||
// prepare protected table
|
||||
for ch in protected {
|
||||
set_bit(&mut q.safe_table, *ch);
|
||||
set_bit(&mut q.protected_table, *ch);
|
||||
}
|
||||
|
||||
q
|
||||
}
|
||||
|
||||
pub fn requote(&self, val: &[u8]) -> Option<String> {
|
||||
let mut has_pct = 0;
|
||||
let mut pct = [b'%', 0, 0];
|
||||
let mut idx = 0;
|
||||
let mut cloned: Option<Vec<u8>> = None;
|
||||
|
||||
let len = val.len();
|
||||
while idx < len {
|
||||
let ch = val[idx];
|
||||
|
||||
if has_pct != 0 {
|
||||
pct[has_pct] = val[idx];
|
||||
has_pct += 1;
|
||||
if has_pct == 3 {
|
||||
has_pct = 0;
|
||||
let buf = cloned.as_mut().unwrap();
|
||||
|
||||
if let Some(ch) = restore_ch(pct[1], pct[2]) {
|
||||
if ch < 128 {
|
||||
if bit_at(&self.protected_table, ch) {
|
||||
buf.extend_from_slice(&pct);
|
||||
idx += 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
if bit_at(&self.safe_table, ch) {
|
||||
buf.push(ch);
|
||||
idx += 1;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
buf.push(ch);
|
||||
} else {
|
||||
buf.extend_from_slice(&pct[..]);
|
||||
}
|
||||
}
|
||||
} else if ch == b'%' {
|
||||
has_pct = 1;
|
||||
if cloned.is_none() {
|
||||
let mut c = Vec::with_capacity(len);
|
||||
c.extend_from_slice(&val[..idx]);
|
||||
cloned = Some(c);
|
||||
}
|
||||
} else if let Some(ref mut cloned) = cloned {
|
||||
cloned.push(ch)
|
||||
}
|
||||
idx += 1;
|
||||
}
|
||||
|
||||
if let Some(data) = cloned {
|
||||
// Unsafe: we get data from http::Uri, which does utf-8 checks already
|
||||
// this code only decodes valid pct encoded values
|
||||
Some(unsafe { String::from_utf8_unchecked(data) })
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn from_hex(v: u8) -> Option<u8> {
|
||||
if (b'0'..=b'9').contains(&v) {
|
||||
Some(v - 0x30) // ord('0') == 0x30
|
||||
} else if (b'A'..=b'F').contains(&v) {
|
||||
Some(v - 0x41 + 10) // ord('A') == 0x41
|
||||
} else if (b'a'..=b'f').contains(&v) {
|
||||
Some(v - 0x61 + 10) // ord('a') == 0x61
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn restore_ch(d1: u8, d2: u8) -> Option<u8> {
|
||||
from_hex(d1).and_then(|d1| from_hex(d2).map(move |d2| d1 << 4 | d2))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use http::Uri;
|
||||
use std::convert::TryFrom;
|
||||
|
||||
use super::*;
|
||||
use crate::{Path, ResourceDef};
|
||||
|
||||
#[test]
|
||||
fn test_parse_url() {
|
||||
let re = ResourceDef::new("/user/{id}/test");
|
||||
|
||||
let url = Uri::try_from("/user/2345/test").unwrap();
|
||||
let mut path = Path::new(Url::new(url));
|
||||
assert!(re.match_path(&mut path));
|
||||
assert_eq!(path.get("id").unwrap(), "2345");
|
||||
|
||||
let url = Uri::try_from("/user/qwe%25/test").unwrap();
|
||||
let mut path = Path::new(Url::new(url));
|
||||
assert!(re.match_path(&mut path));
|
||||
assert_eq!(path.get("id").unwrap(), "qwe%");
|
||||
|
||||
let url = Uri::try_from("/user/qwe%25rty/test").unwrap();
|
||||
let mut path = Path::new(Url::new(url));
|
||||
assert!(re.match_path(&mut path));
|
||||
assert_eq!(path.get("id").unwrap(), "qwe%rty");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_from_hex() {
|
||||
let hex = b"0123456789abcdefABCDEF";
|
||||
|
||||
for i in 0..256 {
|
||||
let c = i as u8;
|
||||
if hex.contains(&c) {
|
||||
assert!(from_hex(c).is_some())
|
||||
} else {
|
||||
assert!(from_hex(c).is_none())
|
||||
}
|
||||
}
|
||||
|
||||
let expected = [
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 10, 11, 12, 13, 14, 15,
|
||||
];
|
||||
for i in 0..hex.len() {
|
||||
assert_eq!(from_hex(hex[i]).unwrap(), expected[i]);
|
||||
}
|
||||
}
|
||||
}
|
@@ -3,6 +3,38 @@
|
||||
## Unreleased - 2021-xx-xx
|
||||
|
||||
|
||||
## 2.3.0 - 2021-10-11
|
||||
* The `spawn` method can now resolve with non-unit outputs. [#369]
|
||||
* Add experimental (semver-exempt) `io-uring` feature for enabling async file I/O on linux. [#374]
|
||||
|
||||
[#369]: https://github.com/actix/actix-net/pull/369
|
||||
[#374]: https://github.com/actix/actix-net/pull/374
|
||||
|
||||
|
||||
## 2.2.0 - 2021-03-29
|
||||
* **BREAKING** `ActixStream::{poll_read_ready, poll_write_ready}` methods now return
|
||||
`Ready` object in ok variant. [#293]
|
||||
* Breakage is acceptable since `ActixStream` was not intended to be public.
|
||||
|
||||
[#293]: https://github.com/actix/actix-net/pull/293
|
||||
|
||||
|
||||
## 2.1.0 - 2021-02-24
|
||||
* Add `ActixStream` extension trait to include readiness methods. [#276]
|
||||
* Re-export `tokio::net::TcpSocket` in `net` module [#282]
|
||||
|
||||
[#276]: https://github.com/actix/actix-net/pull/276
|
||||
[#282]: https://github.com/actix/actix-net/pull/282
|
||||
|
||||
|
||||
## 2.0.2 - 2021-02-06
|
||||
* Add `Arbiter::handle` to get a handle of an owned Arbiter. [#274]
|
||||
* Add `System::try_current` for situations where actix may or may not be running a System. [#275]
|
||||
|
||||
[#274]: https://github.com/actix/actix-net/pull/274
|
||||
[#275]: https://github.com/actix/actix-net/pull/275
|
||||
|
||||
|
||||
## 2.0.1 - 2021-02-06
|
||||
* Expose `JoinError` from Tokio. [#271]
|
||||
|
||||
@@ -48,10 +80,7 @@
|
||||
|
||||
|
||||
## 2.0.0-beta.1 - 2020-12-28
|
||||
### Added
|
||||
* Add `System::attach_to_tokio` method. [#173]
|
||||
|
||||
### Changed
|
||||
* Update `tokio` dependency to `1.0`. [#236]
|
||||
* Rename `time` module `delay_for` to `sleep`, `delay_until` to `sleep_until`, `Delay` to `Sleep`
|
||||
to stay aligned with Tokio's naming. [#236]
|
||||
@@ -59,27 +88,19 @@
|
||||
* These methods now accept `&self` when calling. [#236]
|
||||
* Remove `'static` lifetime requirement for `System::run` and `Builder::run`. [#236]
|
||||
* `Arbiter::spawn` now panics when `System` is not in scope. [#207]
|
||||
|
||||
### Fixed
|
||||
* Fix work load issue by removing `PENDING` thread local. [#207]
|
||||
|
||||
[#207]: https://github.com/actix/actix-net/pull/207
|
||||
[#236]: https://github.com/actix/actix-net/pull/236
|
||||
|
||||
## [1.1.1] - 2020-04-30
|
||||
|
||||
### Fixed
|
||||
|
||||
## 1.1.1 - 2020-04-30
|
||||
* Fix memory leak due to [#94] (see [#129] for more detail)
|
||||
|
||||
[#129]: https://github.com/actix/actix-net/issues/129
|
||||
|
||||
## [1.1.0] - 2020-04-08
|
||||
|
||||
**This version has been yanked.**
|
||||
|
||||
### Added
|
||||
|
||||
## 1.1.0 - 2020-04-08 (YANKED)
|
||||
* Expose `System::is_set` to check if current system has ben started [#99]
|
||||
* Add `Arbiter::is_running` to check if event loop is running [#124]
|
||||
* Add `Arbiter::local_join` associated function
|
||||
@@ -89,96 +110,57 @@
|
||||
[#99]: https://github.com/actix/actix-net/pull/99
|
||||
[#124]: https://github.com/actix/actix-net/pull/124
|
||||
|
||||
## [1.0.0] - 2019-12-11
|
||||
|
||||
## 1.0.0 - 2019-12-11
|
||||
* Update dependencies
|
||||
|
||||
## [1.0.0-alpha.3] - 2019-12-07
|
||||
|
||||
### Fixed
|
||||
|
||||
## 1.0.0-alpha.3 - 2019-12-07
|
||||
* Migrate to tokio 0.2
|
||||
* Fix compilation on non-unix platforms
|
||||
|
||||
### Changed
|
||||
|
||||
* Migrate to tokio 0.2
|
||||
|
||||
|
||||
## [1.0.0-alpha.2] - 2019-12-02
|
||||
|
||||
Added
|
||||
|
||||
## 1.0.0-alpha.2 - 2019-12-02
|
||||
* Export `main` and `test` attribute macros
|
||||
|
||||
* Export `time` module (re-export of tokio-timer)
|
||||
|
||||
* Export `net` module (re-export of tokio-net)
|
||||
|
||||
|
||||
## [1.0.0-alpha.1] - 2019-11-22
|
||||
|
||||
### Changed
|
||||
|
||||
## 1.0.0-alpha.1 - 2019-11-22
|
||||
* Migrate to std::future and tokio 0.2
|
||||
|
||||
|
||||
## [0.2.6] - 2019-11-14
|
||||
|
||||
### Fixed
|
||||
|
||||
## 0.2.6 - 2019-11-14
|
||||
* Allow to join arbiter's thread. #60
|
||||
* Fix arbiter's thread panic message.
|
||||
|
||||
### Added
|
||||
|
||||
* Allow to join arbiter's thread. #60
|
||||
|
||||
|
||||
## [0.2.5] - 2019-09-02
|
||||
|
||||
### Added
|
||||
|
||||
## 0.2.5 - 2019-09-02
|
||||
* Add arbiter specific storage
|
||||
|
||||
|
||||
## [0.2.4] - 2019-07-17
|
||||
|
||||
### Changed
|
||||
|
||||
## 0.2.4 - 2019-07-17
|
||||
* Avoid a copy of the Future when initializing the Box. #29
|
||||
|
||||
|
||||
## [0.2.3] - 2019-06-22
|
||||
|
||||
### Added
|
||||
|
||||
* Allow to start System using exsiting CurrentThread Handle #22
|
||||
## 0.2.3 - 2019-06-22
|
||||
* Allow to start System using existing CurrentThread Handle #22
|
||||
|
||||
|
||||
## [0.2.2] - 2019-03-28
|
||||
|
||||
### Changed
|
||||
|
||||
## 0.2.2 - 2019-03-28
|
||||
* Moved `blocking` module to `actix-threadpool` crate
|
||||
|
||||
|
||||
## [0.2.1] - 2019-03-11
|
||||
|
||||
### Added
|
||||
|
||||
## 0.2.1 - 2019-03-11
|
||||
* Added `blocking` module
|
||||
|
||||
* Arbiter::exec_fn - execute fn on the arbiter's thread
|
||||
|
||||
* Arbiter::exec - execute fn on the arbiter's thread and wait result
|
||||
* Added `Arbiter::exec_fn` - execute fn on the arbiter's thread
|
||||
* Added `Arbiter::exec` - execute fn on the arbiter's thread and wait result
|
||||
|
||||
|
||||
## [0.2.0] - 2019-03-06
|
||||
|
||||
## 0.2.0 - 2019-03-06
|
||||
* `run` method returns `io::Result<()>`
|
||||
|
||||
* Removed `Handle`
|
||||
|
||||
|
||||
## [0.1.0] - 2018-12-09
|
||||
|
||||
## 0.1.0 - 2018-12-09
|
||||
* Initial release
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "actix-rt"
|
||||
version = "2.0.1"
|
||||
version = "2.3.0"
|
||||
authors = [
|
||||
"Nikolay Kim <fafhrd91@gmail.com>",
|
||||
"Rob Ede <robjtede@icloud.com>",
|
||||
@@ -9,7 +9,6 @@ description = "Tokio-based single-threaded async runtime for the Actix ecosystem
|
||||
keywords = ["async", "futures", "io", "runtime"]
|
||||
homepage = "https://actix.rs"
|
||||
repository = "https://github.com/actix/actix-net.git"
|
||||
documentation = "https://docs.rs/actix-rt"
|
||||
categories = ["network-programming", "asynchronous"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
edition = "2018"
|
||||
@@ -21,13 +20,17 @@ path = "src/lib.rs"
|
||||
[features]
|
||||
default = ["macros"]
|
||||
macros = ["actix-macros"]
|
||||
io-uring = ["tokio-uring"]
|
||||
|
||||
[dependencies]
|
||||
actix-macros = { version = "0.2.0", optional = true }
|
||||
actix-macros = { version = "0.2.3", optional = true }
|
||||
|
||||
futures-core = { version = "0.3", default-features = false }
|
||||
tokio = { version = "1.2", features = ["rt", "net", "parking_lot", "signal", "sync", "time"] }
|
||||
tokio = { version = "1.5.1", features = ["rt", "net", "parking_lot", "signal", "sync", "time"] }
|
||||
|
||||
[target.'cfg(target_os = "linux")'.dependencies]
|
||||
tokio-uring = { version = "0.1", optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { version = "1.2", features = ["full"] }
|
||||
tokio = { version = "1.5.1", features = ["full"] }
|
||||
hyper = { version = "0.14", default-features = false, features = ["server", "tcp", "http1"] }
|
||||
|
@@ -2,4 +2,13 @@
|
||||
|
||||
> Tokio-based single-threaded async runtime for the Actix ecosystem.
|
||||
|
||||
[](https://crates.io/crates/actix-rt)
|
||||
[](https://docs.rs/actix-rt/2.3.0)
|
||||
[](https://blog.rust-lang.org/2020/03/12/Rust-1.46.html)
|
||||

|
||||
<br />
|
||||
[](https://deps.rs/crate/actix-rt/2.3.0)
|
||||

|
||||
[](https://discord.gg/WghFtEH6Hb)
|
||||
|
||||
See crate documentation for more: https://docs.rs/actix-rt.
|
||||
|
60
actix-rt/examples/multi_thread_system.rs
Normal file
60
actix-rt/examples/multi_thread_system.rs
Normal file
@@ -0,0 +1,60 @@
|
||||
//! An example on how to build a multi-thread tokio runtime for Actix System.
|
||||
//! Then spawn async task that can make use of work stealing of tokio runtime.
|
||||
|
||||
use actix_rt::System;
|
||||
|
||||
fn main() {
|
||||
System::with_tokio_rt(|| {
|
||||
// build system with a multi-thread tokio runtime.
|
||||
tokio::runtime::Builder::new_multi_thread()
|
||||
.worker_threads(2)
|
||||
.enable_all()
|
||||
.build()
|
||||
.unwrap()
|
||||
})
|
||||
.block_on(async_main());
|
||||
}
|
||||
|
||||
// async main function that acts like #[actix_web::main] or #[tokio::main]
|
||||
async fn async_main() {
|
||||
let (tx, rx) = tokio::sync::oneshot::channel();
|
||||
|
||||
// get a handle to system arbiter and spawn async task on it
|
||||
System::current().arbiter().spawn(async {
|
||||
// use tokio::spawn to get inside the context of multi thread tokio runtime
|
||||
let h1 = tokio::spawn(async {
|
||||
println!("thread id is {:?}", std::thread::current().id());
|
||||
std::thread::sleep(std::time::Duration::from_secs(2));
|
||||
});
|
||||
|
||||
// work stealing occurs for this task spawn
|
||||
let h2 = tokio::spawn(async {
|
||||
println!("thread id is {:?}", std::thread::current().id());
|
||||
});
|
||||
|
||||
h1.await.unwrap();
|
||||
h2.await.unwrap();
|
||||
let _ = tx.send(());
|
||||
});
|
||||
|
||||
rx.await.unwrap();
|
||||
|
||||
let (tx, rx) = tokio::sync::oneshot::channel();
|
||||
let now = std::time::Instant::now();
|
||||
|
||||
// without additional tokio::spawn, all spawned tasks run on single thread
|
||||
System::current().arbiter().spawn(async {
|
||||
println!("thread id is {:?}", std::thread::current().id());
|
||||
std::thread::sleep(std::time::Duration::from_secs(2));
|
||||
let _ = tx.send(());
|
||||
});
|
||||
|
||||
// previous spawn task has blocked the system arbiter thread
|
||||
// so this task will wait for 2 seconds until it can be run
|
||||
System::current().arbiter().spawn(async move {
|
||||
println!("thread id is {:?}", std::thread::current().id());
|
||||
assert!(now.elapsed() > std::time::Duration::from_secs(2));
|
||||
});
|
||||
|
||||
rx.await.unwrap();
|
||||
}
|
@@ -9,12 +9,9 @@ use std::{
|
||||
};
|
||||
|
||||
use futures_core::ready;
|
||||
use tokio::{sync::mpsc, task::LocalSet};
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
use crate::{
|
||||
runtime::{default_tokio_runtime, Runtime},
|
||||
system::{System, SystemCommand},
|
||||
};
|
||||
use crate::system::{System, SystemCommand};
|
||||
|
||||
pub(crate) static COUNT: AtomicUsize = AtomicUsize::new(0);
|
||||
|
||||
@@ -98,16 +95,19 @@ impl Arbiter {
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics if a [System] is not registered on the current thread.
|
||||
#[cfg(not(all(target_os = "linux", feature = "io-uring")))]
|
||||
#[allow(clippy::new_without_default)]
|
||||
pub fn new() -> Arbiter {
|
||||
Self::with_tokio_rt(|| {
|
||||
default_tokio_runtime().expect("Cannot create new Arbiter's Runtime.")
|
||||
crate::runtime::default_tokio_runtime()
|
||||
.expect("Cannot create new Arbiter's Runtime.")
|
||||
})
|
||||
}
|
||||
|
||||
/// Spawn a new Arbiter using the [Tokio Runtime](tokio-runtime) returned from a closure.
|
||||
///
|
||||
/// [tokio-runtime]: tokio::runtime::Runtime
|
||||
#[cfg(not(all(target_os = "linux", feature = "io-uring")))]
|
||||
#[doc(hidden)]
|
||||
pub fn with_tokio_rt<F>(runtime_factory: F) -> Arbiter
|
||||
where
|
||||
@@ -127,7 +127,7 @@ impl Arbiter {
|
||||
.spawn({
|
||||
let tx = tx.clone();
|
||||
move || {
|
||||
let rt = Runtime::from(runtime_factory());
|
||||
let rt = crate::runtime::Runtime::from(runtime_factory());
|
||||
let hnd = ArbiterHandle::new(tx);
|
||||
|
||||
System::set_current(sys);
|
||||
@@ -159,26 +159,83 @@ impl Arbiter {
|
||||
Arbiter { tx, thread_handle }
|
||||
}
|
||||
|
||||
/// Sets up an Arbiter runner in a new System using the provided runtime local task set.
|
||||
pub(crate) fn in_new_system(local: &LocalSet) -> ArbiterHandle {
|
||||
/// Spawn a new Arbiter thread and start its event loop with `tokio-uring` runtime.
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics if a [System] is not registered on the current thread.
|
||||
#[cfg(all(target_os = "linux", feature = "io-uring"))]
|
||||
#[allow(clippy::new_without_default)]
|
||||
pub fn new() -> Arbiter {
|
||||
let sys = System::current();
|
||||
let system_id = sys.id();
|
||||
let arb_id = COUNT.fetch_add(1, Ordering::Relaxed);
|
||||
|
||||
let name = format!("actix-rt|system:{}|arbiter:{}", system_id, arb_id);
|
||||
let (tx, rx) = mpsc::unbounded_channel();
|
||||
|
||||
let (ready_tx, ready_rx) = std::sync::mpsc::channel::<()>();
|
||||
|
||||
let thread_handle = thread::Builder::new()
|
||||
.name(name.clone())
|
||||
.spawn({
|
||||
let tx = tx.clone();
|
||||
move || {
|
||||
let hnd = ArbiterHandle::new(tx);
|
||||
|
||||
System::set_current(sys);
|
||||
|
||||
HANDLE.with(|cell| *cell.borrow_mut() = Some(hnd.clone()));
|
||||
|
||||
// register arbiter
|
||||
let _ = System::current()
|
||||
.tx()
|
||||
.send(SystemCommand::RegisterArbiter(arb_id, hnd));
|
||||
|
||||
ready_tx.send(()).unwrap();
|
||||
|
||||
// run arbiter event processing loop
|
||||
tokio_uring::start(ArbiterRunner { rx });
|
||||
|
||||
// deregister arbiter
|
||||
let _ = System::current()
|
||||
.tx()
|
||||
.send(SystemCommand::DeregisterArbiter(arb_id));
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|err| {
|
||||
panic!("Cannot spawn Arbiter's thread: {:?}. {:?}", &name, err)
|
||||
});
|
||||
|
||||
ready_rx.recv().unwrap();
|
||||
|
||||
Arbiter { tx, thread_handle }
|
||||
}
|
||||
|
||||
/// Sets up an Arbiter runner in a new System using the environment's local set.
|
||||
pub(crate) fn in_new_system() -> ArbiterHandle {
|
||||
let (tx, rx) = mpsc::unbounded_channel();
|
||||
|
||||
let hnd = ArbiterHandle::new(tx);
|
||||
|
||||
HANDLE.with(|cell| *cell.borrow_mut() = Some(hnd.clone()));
|
||||
|
||||
local.spawn_local(ArbiterRunner { rx });
|
||||
crate::spawn(ArbiterRunner { rx });
|
||||
|
||||
hnd
|
||||
}
|
||||
|
||||
/// Return a handle to the this Arbiter's message sender.
|
||||
pub fn handle(&self) -> ArbiterHandle {
|
||||
ArbiterHandle::new(self.tx.clone())
|
||||
}
|
||||
|
||||
/// Return a handle to the current thread's Arbiter's message sender.
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics if no Arbiter is running on the current thread.
|
||||
pub fn current() -> ArbiterHandle {
|
||||
HANDLE.with(|cell| match *cell.borrow() {
|
||||
Some(ref addr) => addr.clone(),
|
||||
Some(ref hnd) => hnd.clone(),
|
||||
None => panic!("Arbiter is not running."),
|
||||
})
|
||||
}
|
||||
|
@@ -32,6 +32,10 @@
|
||||
//! arbiter.stop();
|
||||
//! arbiter.join().unwrap();
|
||||
//! ```
|
||||
//!
|
||||
//! # `io-uring` Support
|
||||
//! There is experimental support for using io-uring with this crate by enabling the
|
||||
//! `io-uring` feature. For now, it is semver exempt.
|
||||
|
||||
#![deny(rust_2018_idioms, nonstandard_style)]
|
||||
#![allow(clippy::type_complexity)]
|
||||
@@ -39,6 +43,9 @@
|
||||
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
|
||||
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
|
||||
|
||||
#[cfg(all(not(target_os = "linux"), feature = "io-uring"))]
|
||||
compile_error!("io_uring is a linux only feature.");
|
||||
|
||||
use std::future::Future;
|
||||
|
||||
use tokio::task::JoinHandle;
|
||||
@@ -46,7 +53,10 @@ use tokio::task::JoinHandle;
|
||||
// Cannot define a main macro when compiled into test harness.
|
||||
// Workaround for https://github.com/rust-lang/rust/issues/62127.
|
||||
#[cfg(all(feature = "macros", not(test)))]
|
||||
pub use actix_macros::{main, test};
|
||||
pub use actix_macros::main;
|
||||
|
||||
#[cfg(feature = "macros")]
|
||||
pub use actix_macros::test;
|
||||
|
||||
mod arbiter;
|
||||
mod runtime;
|
||||
@@ -70,13 +80,74 @@ pub mod signal {
|
||||
}
|
||||
|
||||
pub mod net {
|
||||
//! TCP/UDP/Unix bindings (Tokio re-exports).
|
||||
//! TCP/UDP/Unix bindings (mostly Tokio re-exports).
|
||||
|
||||
use std::{
|
||||
future::Future,
|
||||
io,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
pub use tokio::io::Ready;
|
||||
use tokio::io::{AsyncRead, AsyncWrite, Interest};
|
||||
pub use tokio::net::UdpSocket;
|
||||
pub use tokio::net::{TcpListener, TcpStream};
|
||||
pub use tokio::net::{TcpListener, TcpSocket, TcpStream};
|
||||
|
||||
#[cfg(unix)]
|
||||
pub use tokio::net::{UnixDatagram, UnixListener, UnixStream};
|
||||
|
||||
/// Extension trait over async read+write types that can also signal readiness.
|
||||
#[doc(hidden)]
|
||||
pub trait ActixStream: AsyncRead + AsyncWrite + Unpin {
|
||||
/// Poll stream and check read readiness of Self.
|
||||
///
|
||||
/// See [tokio::net::TcpStream::poll_read_ready] for detail on intended use.
|
||||
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>>;
|
||||
|
||||
/// Poll stream and check write readiness of Self.
|
||||
///
|
||||
/// See [tokio::net::TcpStream::poll_write_ready] for detail on intended use.
|
||||
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>>;
|
||||
}
|
||||
|
||||
impl ActixStream for TcpStream {
|
||||
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
|
||||
let ready = self.ready(Interest::READABLE);
|
||||
tokio::pin!(ready);
|
||||
ready.poll(cx)
|
||||
}
|
||||
|
||||
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
|
||||
let ready = self.ready(Interest::WRITABLE);
|
||||
tokio::pin!(ready);
|
||||
ready.poll(cx)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
impl ActixStream for UnixStream {
|
||||
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
|
||||
let ready = self.ready(Interest::READABLE);
|
||||
tokio::pin!(ready);
|
||||
ready.poll(cx)
|
||||
}
|
||||
|
||||
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
|
||||
let ready = self.ready(Interest::WRITABLE);
|
||||
tokio::pin!(ready);
|
||||
ready.poll(cx)
|
||||
}
|
||||
}
|
||||
|
||||
impl<Io: ActixStream + ?Sized> ActixStream for Box<Io> {
|
||||
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
|
||||
(**self).poll_read_ready(cx)
|
||||
}
|
||||
|
||||
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
|
||||
(**self).poll_write_ready(cx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub mod time {
|
||||
@@ -94,14 +165,41 @@ pub mod task {
|
||||
pub use tokio::task::{spawn_blocking, yield_now, JoinError, JoinHandle};
|
||||
}
|
||||
|
||||
/// Spawns a future on the current thread.
|
||||
/// Spawns a future on the current thread as a new task.
|
||||
///
|
||||
/// If not immediately awaited, the task can be cancelled using [`JoinHandle::abort`].
|
||||
///
|
||||
/// The provided future is spawned as a new task; therefore, panics are caught.
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics if Actix system is not running.
|
||||
///
|
||||
/// # Examples
|
||||
/// ```
|
||||
/// # use std::time::Duration;
|
||||
/// # actix_rt::Runtime::new().unwrap().block_on(async {
|
||||
/// // task resolves successfully
|
||||
/// assert_eq!(actix_rt::spawn(async { 1 }).await.unwrap(), 1);
|
||||
///
|
||||
/// // task panics
|
||||
/// assert!(actix_rt::spawn(async {
|
||||
/// panic!("panic is caught at task boundary");
|
||||
/// })
|
||||
/// .await
|
||||
/// .unwrap_err()
|
||||
/// .is_panic());
|
||||
///
|
||||
/// // task is cancelled before completion
|
||||
/// let handle = actix_rt::spawn(actix_rt::time::sleep(Duration::from_secs(100)));
|
||||
/// handle.abort();
|
||||
/// assert!(handle.await.unwrap_err().is_cancelled());
|
||||
/// # });
|
||||
/// ```
|
||||
#[inline]
|
||||
pub fn spawn<Fut>(f: Fut) -> JoinHandle<()>
|
||||
pub fn spawn<Fut>(f: Fut) -> JoinHandle<Fut::Output>
|
||||
where
|
||||
Fut: Future<Output = ()> + 'static,
|
||||
Fut: Future + 'static,
|
||||
Fut::Output: 'static,
|
||||
{
|
||||
tokio::task::spawn_local(f)
|
||||
}
|
||||
|
@@ -31,11 +31,6 @@ impl Runtime {
|
||||
})
|
||||
}
|
||||
|
||||
/// Reference to local task set.
|
||||
pub(crate) fn local_set(&self) -> &LocalSet {
|
||||
&self.local
|
||||
}
|
||||
|
||||
/// Offload a future onto the single-threaded runtime.
|
||||
///
|
||||
/// The returned join handle can be used to await the future's result.
|
||||
|
@@ -54,7 +54,7 @@ impl System {
|
||||
let (sys_tx, sys_rx) = mpsc::unbounded_channel();
|
||||
|
||||
let rt = Runtime::from(runtime_factory());
|
||||
let sys_arbiter = Arbiter::in_new_system(rt.local_set());
|
||||
let sys_arbiter = rt.block_on(async { Arbiter::in_new_system() });
|
||||
let system = System::construct(sys_tx, sys_arbiter.clone());
|
||||
|
||||
system
|
||||
@@ -100,6 +100,15 @@ impl System {
|
||||
})
|
||||
}
|
||||
|
||||
/// Try to get current running system.
|
||||
///
|
||||
/// Returns `None` if no System has been started.
|
||||
///
|
||||
/// Contrary to `current`, this never panics.
|
||||
pub fn try_current() -> Option<System> {
|
||||
CURRENT.with(|cell| cell.borrow().clone())
|
||||
}
|
||||
|
||||
/// Get handle to a the System's initial [Arbiter].
|
||||
pub fn arbiter(&self) -> &ArbiterHandle {
|
||||
&self.arbiter_handle
|
||||
@@ -146,6 +155,7 @@ impl System {
|
||||
pub struct SystemRunner {
|
||||
rt: Runtime,
|
||||
stop_rx: oneshot::Receiver<i32>,
|
||||
#[allow(dead_code)]
|
||||
system: System,
|
||||
}
|
||||
|
||||
|
17
actix-rt/tests/test-macro-import-conflict.rs
Normal file
17
actix-rt/tests/test-macro-import-conflict.rs
Normal file
@@ -0,0 +1,17 @@
|
||||
//! Checks that test macro does not cause problems in the presence of imports named "test" that
|
||||
//! could be either a module with test items or the "test with runtime" macro itself.
|
||||
//!
|
||||
//! Before actix/actix-net#399 was implemented, this macro was running twice. The first run output
|
||||
//! `#[test]` and it got run again and since it was in scope.
|
||||
//!
|
||||
//! Prevented by using the fully-qualified test marker (`#[::core::prelude::v1::test]`).
|
||||
|
||||
#![cfg(feature = "macros")]
|
||||
|
||||
use actix_rt::time as test;
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_naming_conflict() {
|
||||
use test as time;
|
||||
time::sleep(std::time::Duration::from_millis(2)).await;
|
||||
}
|
@@ -1,14 +1,11 @@
|
||||
use std::{
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
mpsc::channel,
|
||||
Arc,
|
||||
},
|
||||
future::Future,
|
||||
sync::mpsc::channel,
|
||||
thread,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
use actix_rt::{Arbiter, System};
|
||||
use actix_rt::{task::JoinError, Arbiter, System};
|
||||
use tokio::sync::oneshot;
|
||||
|
||||
#[test]
|
||||
@@ -122,6 +119,28 @@ fn arbiter_spawn_fn_runs() {
|
||||
arbiter.join().unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn arbiter_handle_spawn_fn_runs() {
|
||||
let sys = System::new();
|
||||
|
||||
let (tx, rx) = channel::<u32>();
|
||||
|
||||
let arbiter = Arbiter::new();
|
||||
let handle = arbiter.handle();
|
||||
drop(arbiter);
|
||||
|
||||
handle.spawn_fn(move || {
|
||||
tx.send(42).unwrap();
|
||||
System::current().stop()
|
||||
});
|
||||
|
||||
let num = rx.recv_timeout(Duration::from_secs(2)).unwrap();
|
||||
assert_eq!(num, 42);
|
||||
|
||||
handle.stop();
|
||||
sys.run().unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn arbiter_drop_no_panic_fn() {
|
||||
let _ = System::new();
|
||||
@@ -198,8 +217,8 @@ fn system_stop_stops_arbiters() {
|
||||
System::current().stop();
|
||||
sys.run().unwrap();
|
||||
|
||||
// account for slightly slow thread de-spawns (only observed on windows)
|
||||
thread::sleep(Duration::from_millis(100));
|
||||
// account for slightly slow thread de-spawns
|
||||
thread::sleep(Duration::from_millis(500));
|
||||
|
||||
// arbiter should be dead and return false
|
||||
assert!(!Arbiter::current().spawn_fn(|| {}));
|
||||
@@ -208,6 +227,7 @@ fn system_stop_stops_arbiters() {
|
||||
arb.join().unwrap();
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "io-uring"))]
|
||||
#[test]
|
||||
fn new_system_with_tokio() {
|
||||
let (tx, rx) = channel();
|
||||
@@ -240,8 +260,14 @@ fn new_system_with_tokio() {
|
||||
assert_eq!(rx.recv().unwrap(), 42);
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "io-uring"))]
|
||||
#[test]
|
||||
fn new_arbiter_with_tokio() {
|
||||
use std::sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Arc,
|
||||
};
|
||||
|
||||
let _ = System::new();
|
||||
|
||||
let arb = Arbiter::with_tokio_rt(|| {
|
||||
@@ -264,5 +290,68 @@ fn new_arbiter_with_tokio() {
|
||||
|
||||
arb.join().unwrap();
|
||||
|
||||
assert_eq!(false, counter.load(Ordering::SeqCst));
|
||||
assert!(!counter.load(Ordering::SeqCst));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn try_current_no_system() {
|
||||
assert!(System::try_current().is_none())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn try_current_with_system() {
|
||||
System::new().block_on(async { assert!(System::try_current().is_some()) });
|
||||
}
|
||||
|
||||
#[allow(clippy::unit_cmp)]
|
||||
#[test]
|
||||
fn spawn_local() {
|
||||
System::new().block_on(async {
|
||||
// demonstrate that spawn -> R is strictly more capable than spawn -> ()
|
||||
|
||||
assert_eq!(actix_rt::spawn(async {}).await.unwrap(), ());
|
||||
assert_eq!(actix_rt::spawn(async { 1 }).await.unwrap(), 1);
|
||||
assert!(actix_rt::spawn(async { panic!("") }).await.is_err());
|
||||
|
||||
actix_rt::spawn(async { tokio::time::sleep(Duration::from_millis(50)).await })
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
fn g<F: Future<Output = Result<(), JoinError>>>(_f: F) {}
|
||||
g(actix_rt::spawn(async {}));
|
||||
// g(actix_rt::spawn(async { 1 })); // compile err
|
||||
|
||||
fn h<F: Future<Output = Result<R, JoinError>>, R>(_f: F) {}
|
||||
h(actix_rt::spawn(async {}));
|
||||
h(actix_rt::spawn(async { 1 }));
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(all(target_os = "linux", feature = "io-uring"))]
|
||||
#[test]
|
||||
fn tokio_uring_arbiter() {
|
||||
let system = System::new();
|
||||
let (tx, rx) = std::sync::mpsc::channel();
|
||||
|
||||
Arbiter::new().spawn(async move {
|
||||
let handle = actix_rt::spawn(async move {
|
||||
let f = tokio_uring::fs::File::create("test.txt").await.unwrap();
|
||||
let buf = b"Hello World!";
|
||||
|
||||
let (res, _) = f.write_at(&buf[..], 0).await;
|
||||
assert!(res.is_ok());
|
||||
|
||||
f.sync_all().await.unwrap();
|
||||
f.close().await.unwrap();
|
||||
|
||||
std::fs::remove_file("test.txt").unwrap();
|
||||
});
|
||||
|
||||
handle.await.unwrap();
|
||||
tx.send(true).unwrap();
|
||||
});
|
||||
|
||||
assert!(rx.recv().unwrap());
|
||||
|
||||
drop(system);
|
||||
}
|
||||
|
@@ -1,13 +1,45 @@
|
||||
# Changes
|
||||
|
||||
## Unreleased - 2021-xx-xx
|
||||
* Minimum supported Rust version (MSRV) is now 1.52.
|
||||
|
||||
|
||||
## 2.0.0-beta.6 - 2021-10-11
|
||||
* Add experimental (semver-exempt) `io-uring` feature for enabling async file I/O on linux. [#374]
|
||||
* Server no long listens to `SIGHUP` signal. Previously, the received was not used but did block
|
||||
subsequent exit signals from working. [#389]
|
||||
* Remove `config` module. `ServiceConfig`, `ServiceRuntime` public types are removed due to
|
||||
this change. [#349]
|
||||
* Remove `ServerBuilder::configure` [#349]
|
||||
|
||||
[#374]: https://github.com/actix/actix-net/pull/374
|
||||
[#349]: https://github.com/actix/actix-net/pull/349
|
||||
[#389]: https://github.com/actix/actix-net/pull/389
|
||||
|
||||
|
||||
## 2.0.0-beta.5 - 2021-04-20
|
||||
* Server shutdown notifies all workers to exit regardless if shutdown is graceful. This causes all
|
||||
workers to shutdown immediately in force shutdown case. [#333]
|
||||
|
||||
[#333]: https://github.com/actix/actix-net/pull/333
|
||||
|
||||
|
||||
## 2.0.0-beta.4 - 2021-04-01
|
||||
* Prevent panic when `shutdown_timeout` is very large. [f9262db]
|
||||
|
||||
[f9262db]: https://github.com/actix/actix-net/commit/f9262db
|
||||
|
||||
|
||||
## 2.0.0-beta.3 - 2021-02-06
|
||||
* Hidden `ServerBuilder::start` method has been removed. Use `ServerBuilder::run`. [#246]
|
||||
* Add retry for EINTR(`io::Interrupted`) in `Accept`'s poll loop. [#264]
|
||||
* Add `ServerBuilder::worker_max_blocking_threads` for customize blocking thread pool. [#265]
|
||||
* Add retry for EINTR signal (`io::Interrupted`) in `Accept`'s poll loop. [#264]
|
||||
* Add `ServerBuilder::worker_max_blocking_threads` to customize blocking thread pool size. [#265]
|
||||
* Update `actix-rt` to `2.0.0`. [#273]
|
||||
|
||||
[#246]: https://github.com/actix/actix-net/pull/246
|
||||
[#264]: https://github.com/actix/actix-net/pull/264
|
||||
[#265]: https://github.com/actix/actix-net/pull/265
|
||||
[#273]: https://github.com/actix/actix-net/pull/273
|
||||
|
||||
|
||||
## 2.0.0-beta.2 - 2021-01-03
|
||||
|
@@ -1,18 +1,15 @@
|
||||
[package]
|
||||
name = "actix-server"
|
||||
version = "2.0.0-beta.2"
|
||||
version = "2.0.0-beta.6"
|
||||
authors = [
|
||||
"Nikolay Kim <fafhrd91@gmail.com>",
|
||||
"fakeshadow <24548779@qq.com>",
|
||||
]
|
||||
description = "General purpose TCP server built for the Actix ecosystem"
|
||||
keywords = ["network", "framework", "async", "futures"]
|
||||
homepage = "https://actix.rs"
|
||||
repository = "https://github.com/actix/actix-net.git"
|
||||
documentation = "https://docs.rs/actix-server"
|
||||
categories = ["network-programming", "asynchronous"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
exclude = [".gitignore", ".cargo/config"]
|
||||
edition = "2018"
|
||||
|
||||
[lib]
|
||||
@@ -21,23 +18,24 @@ path = "src/lib.rs"
|
||||
|
||||
[features]
|
||||
default = []
|
||||
io-uring = ["actix-rt/io-uring"]
|
||||
|
||||
[dependencies]
|
||||
actix-codec = "0.4.0-beta.1"
|
||||
actix-rt = { version = "2.0.0", default-features = false }
|
||||
actix-service = "2.0.0-beta.4"
|
||||
actix-utils = "3.0.0-beta.1"
|
||||
actix-service = "2.0.0"
|
||||
actix-utils = "3.0.0"
|
||||
|
||||
futures-core = { version = "0.3.7", default-features = false, features = ["alloc"] }
|
||||
log = "0.4"
|
||||
mio = { version = "0.7.6", features = ["os-poll", "net"] }
|
||||
num_cpus = "1.13"
|
||||
slab = "0.4"
|
||||
tokio = { version = "1", features = ["sync"] }
|
||||
tokio = { version = "1.5.1", features = ["sync"] }
|
||||
|
||||
[dev-dependencies]
|
||||
actix-codec = "0.4.0"
|
||||
actix-rt = "2.0.0"
|
||||
|
||||
bytes = "1"
|
||||
env_logger = "0.8"
|
||||
env_logger = "0.9"
|
||||
futures-util = { version = "0.3.7", default-features = false, features = ["sink"] }
|
||||
tokio = { version = "1", features = ["io-util"] }
|
||||
tokio = { version = "1.5.1", features = ["io-util"] }
|
||||
|
@@ -9,15 +9,17 @@
|
||||
//! Start typing. When you press enter the typed line will be echoed back. The server will log
|
||||
//! the length of each line it echos and the total size of data sent when the connection is closed.
|
||||
|
||||
use std::sync::{
|
||||
atomic::{AtomicUsize, Ordering},
|
||||
Arc,
|
||||
use std::{
|
||||
env, io,
|
||||
sync::{
|
||||
atomic::{AtomicUsize, Ordering},
|
||||
Arc,
|
||||
},
|
||||
};
|
||||
use std::{env, io};
|
||||
|
||||
use actix_rt::net::TcpStream;
|
||||
use actix_server::Server;
|
||||
use actix_service::pipeline_factory;
|
||||
use actix_service::{fn_service, ServiceFactoryExt as _};
|
||||
use bytes::BytesMut;
|
||||
use futures_util::future::ok;
|
||||
use log::{error, info};
|
||||
@@ -25,7 +27,7 @@ use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
|
||||
#[actix_rt::main]
|
||||
async fn main() -> io::Result<()> {
|
||||
env::set_var("RUST_LOG", "actix=trace,basic=trace");
|
||||
env::set_var("RUST_LOG", "info");
|
||||
env_logger::init();
|
||||
|
||||
let count = Arc::new(AtomicUsize::new(0));
|
||||
@@ -41,7 +43,7 @@ async fn main() -> io::Result<()> {
|
||||
let count = Arc::clone(&count);
|
||||
let num2 = Arc::clone(&count);
|
||||
|
||||
pipeline_factory(move |mut stream: TcpStream| {
|
||||
fn_service(move |mut stream: TcpStream| {
|
||||
let count = Arc::clone(&count);
|
||||
|
||||
async move {
|
@@ -2,28 +2,24 @@ use std::time::Duration;
|
||||
use std::{io, thread};
|
||||
|
||||
use actix_rt::{
|
||||
time::{sleep_until, Instant},
|
||||
time::{sleep, Instant},
|
||||
System,
|
||||
};
|
||||
use log::{error, info};
|
||||
use mio::{Interest, Poll, Token as MioToken};
|
||||
use slab::Slab;
|
||||
|
||||
use crate::server::Server;
|
||||
use crate::socket::{MioListener, SocketAddr};
|
||||
use crate::socket::MioListener;
|
||||
use crate::waker_queue::{WakerInterest, WakerQueue, WAKER_TOKEN};
|
||||
use crate::worker::{Conn, WorkerHandle};
|
||||
use crate::Token;
|
||||
use crate::worker::{Conn, WorkerHandleAccept};
|
||||
|
||||
struct ServerSocketInfo {
|
||||
// addr for socket. mainly used for logging.
|
||||
addr: SocketAddr,
|
||||
// be ware this is the crate token for identify socket and should not be confused with
|
||||
// mio::Token
|
||||
token: Token,
|
||||
token: usize,
|
||||
|
||||
lst: MioListener,
|
||||
// timeout is used to mark the deadline when this socket's listener should be registered again
|
||||
// after an error.
|
||||
|
||||
/// Timeout is used to mark the deadline when this socket's listener should be registered again
|
||||
/// after an error.
|
||||
timeout: Option<Instant>,
|
||||
}
|
||||
|
||||
@@ -62,8 +58,8 @@ impl AcceptLoop {
|
||||
|
||||
pub(crate) fn start(
|
||||
&mut self,
|
||||
socks: Vec<(Token, MioListener)>,
|
||||
handles: Vec<WorkerHandle>,
|
||||
socks: Vec<(usize, MioListener)>,
|
||||
handles: Vec<WorkerHandleAccept>,
|
||||
) {
|
||||
let srv = self.srv.take().expect("Can not re-use AcceptInfo");
|
||||
let poll = self.poll.take().unwrap();
|
||||
@@ -77,10 +73,66 @@ impl AcceptLoop {
|
||||
struct Accept {
|
||||
poll: Poll,
|
||||
waker: WakerQueue,
|
||||
handles: Vec<WorkerHandle>,
|
||||
handles: Vec<WorkerHandleAccept>,
|
||||
srv: Server,
|
||||
next: usize,
|
||||
backpressure: bool,
|
||||
avail: Availability,
|
||||
paused: bool,
|
||||
}
|
||||
|
||||
/// Array of u128 with every bit as marker for a worker handle's availability.
|
||||
#[derive(Debug, Default)]
|
||||
struct Availability([u128; 4]);
|
||||
|
||||
impl Availability {
|
||||
/// Check if any worker handle is available
|
||||
#[inline(always)]
|
||||
fn available(&self) -> bool {
|
||||
self.0.iter().any(|a| *a != 0)
|
||||
}
|
||||
|
||||
/// Check if worker handle is available by index
|
||||
#[inline(always)]
|
||||
fn get_available(&self, idx: usize) -> bool {
|
||||
let (offset, idx) = Self::offset(idx);
|
||||
|
||||
self.0[offset] & (1 << idx as u128) != 0
|
||||
}
|
||||
|
||||
/// Set worker handle available state by index.
|
||||
fn set_available(&mut self, idx: usize, avail: bool) {
|
||||
let (offset, idx) = Self::offset(idx);
|
||||
|
||||
let off = 1 << idx as u128;
|
||||
if avail {
|
||||
self.0[offset] |= off;
|
||||
} else {
|
||||
self.0[offset] &= !off
|
||||
}
|
||||
}
|
||||
|
||||
/// Set all worker handle to available state.
|
||||
/// This would result in a re-check on all workers' availability.
|
||||
fn set_available_all(&mut self, handles: &[WorkerHandleAccept]) {
|
||||
handles.iter().for_each(|handle| {
|
||||
self.set_available(handle.idx(), true);
|
||||
})
|
||||
}
|
||||
|
||||
/// Get offset and adjusted index of given worker handle index.
|
||||
fn offset(idx: usize) -> (usize, usize) {
|
||||
if idx < 128 {
|
||||
(0, idx)
|
||||
} else if idx < 128 * 2 {
|
||||
(1, idx - 128)
|
||||
} else if idx < 128 * 3 {
|
||||
(2, idx - 128 * 2)
|
||||
} else if idx < 128 * 4 {
|
||||
(3, idx - 128 * 3)
|
||||
} else {
|
||||
panic!("Max WorkerHandle count is 512")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// This function defines errors that are per-connection. Which basically
|
||||
@@ -100,9 +152,9 @@ impl Accept {
|
||||
pub(crate) fn start(
|
||||
poll: Poll,
|
||||
waker: WakerQueue,
|
||||
socks: Vec<(Token, MioListener)>,
|
||||
socks: Vec<(usize, MioListener)>,
|
||||
srv: Server,
|
||||
handles: Vec<WorkerHandle>,
|
||||
handles: Vec<WorkerHandleAccept>,
|
||||
) {
|
||||
// Accept runs in its own thread and would want to spawn additional futures to current
|
||||
// actix system.
|
||||
@@ -111,9 +163,10 @@ impl Accept {
|
||||
.name("actix-server accept loop".to_owned())
|
||||
.spawn(move || {
|
||||
System::set_current(sys);
|
||||
let (mut accept, sockets) =
|
||||
let (mut accept, mut sockets) =
|
||||
Accept::new_with_sockets(poll, waker, socks, handles, srv);
|
||||
accept.poll_with(sockets);
|
||||
|
||||
accept.poll_with(&mut sockets);
|
||||
})
|
||||
.unwrap();
|
||||
}
|
||||
@@ -121,29 +174,30 @@ impl Accept {
|
||||
fn new_with_sockets(
|
||||
poll: Poll,
|
||||
waker: WakerQueue,
|
||||
socks: Vec<(Token, MioListener)>,
|
||||
handles: Vec<WorkerHandle>,
|
||||
socks: Vec<(usize, MioListener)>,
|
||||
handles: Vec<WorkerHandleAccept>,
|
||||
srv: Server,
|
||||
) -> (Accept, Slab<ServerSocketInfo>) {
|
||||
let mut sockets = Slab::new();
|
||||
for (hnd_token, mut lst) in socks.into_iter() {
|
||||
let addr = lst.local_addr();
|
||||
) -> (Accept, Vec<ServerSocketInfo>) {
|
||||
let sockets = socks
|
||||
.into_iter()
|
||||
.map(|(token, mut lst)| {
|
||||
// Start listening for incoming connections
|
||||
poll.registry()
|
||||
.register(&mut lst, MioToken(token), Interest::READABLE)
|
||||
.unwrap_or_else(|e| panic!("Can not register io: {}", e));
|
||||
|
||||
let entry = sockets.vacant_entry();
|
||||
let token = entry.key();
|
||||
ServerSocketInfo {
|
||||
token,
|
||||
lst,
|
||||
timeout: None,
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Start listening for incoming connections
|
||||
poll.registry()
|
||||
.register(&mut lst, MioToken(token), Interest::READABLE)
|
||||
.unwrap_or_else(|e| panic!("Can not register io: {}", e));
|
||||
let mut avail = Availability::default();
|
||||
|
||||
entry.insert(ServerSocketInfo {
|
||||
addr,
|
||||
token: hnd_token,
|
||||
lst,
|
||||
timeout: None,
|
||||
});
|
||||
}
|
||||
// Assume all handles are avail at construct time.
|
||||
avail.set_available_all(&handles);
|
||||
|
||||
let accept = Accept {
|
||||
poll,
|
||||
@@ -151,278 +205,383 @@ impl Accept {
|
||||
handles,
|
||||
srv,
|
||||
next: 0,
|
||||
backpressure: false,
|
||||
avail,
|
||||
paused: false,
|
||||
};
|
||||
|
||||
(accept, sockets)
|
||||
}
|
||||
|
||||
fn poll_with(&mut self, mut sockets: Slab<ServerSocketInfo>) {
|
||||
fn poll_with(&mut self, sockets: &mut [ServerSocketInfo]) {
|
||||
let mut events = mio::Events::with_capacity(128);
|
||||
|
||||
loop {
|
||||
if let Err(e) = self.poll.poll(&mut events, None) {
|
||||
match e.kind() {
|
||||
std::io::ErrorKind::Interrupted => {
|
||||
continue;
|
||||
}
|
||||
_ => {
|
||||
panic!("Poll error: {}", e);
|
||||
}
|
||||
io::ErrorKind::Interrupted => {}
|
||||
_ => panic!("Poll error: {}", e),
|
||||
}
|
||||
}
|
||||
|
||||
for event in events.iter() {
|
||||
let token = event.token();
|
||||
match token {
|
||||
// This is a loop because interests for command from previous version was
|
||||
// a loop that would try to drain the command channel. It's yet unknown
|
||||
// if it's necessary/good practice to actively drain the waker queue.
|
||||
WAKER_TOKEN => 'waker: loop {
|
||||
// take guard with every iteration so no new interest can be added
|
||||
// until the current task is done.
|
||||
let mut guard = self.waker.guard();
|
||||
match guard.pop_front() {
|
||||
// worker notify it becomes available. we may want to recover
|
||||
// from backpressure.
|
||||
Some(WakerInterest::WorkerAvailable) => {
|
||||
drop(guard);
|
||||
self.maybe_backpressure(&mut sockets, false);
|
||||
}
|
||||
// a new worker thread is made and it's handle would be added
|
||||
// to Accept
|
||||
Some(WakerInterest::Worker(handle)) => {
|
||||
drop(guard);
|
||||
// maybe we want to recover from a backpressure.
|
||||
self.maybe_backpressure(&mut sockets, false);
|
||||
self.handles.push(handle);
|
||||
}
|
||||
// got timer interest and it's time to try register socket(s)
|
||||
// again.
|
||||
Some(WakerInterest::Timer) => {
|
||||
drop(guard);
|
||||
self.process_timer(&mut sockets)
|
||||
}
|
||||
Some(WakerInterest::Pause) => {
|
||||
drop(guard);
|
||||
sockets.iter_mut().for_each(|(_, info)| {
|
||||
match self.deregister(info) {
|
||||
Ok(_) => info!(
|
||||
"Paused accepting connections on {}",
|
||||
info.addr
|
||||
),
|
||||
Err(e) => {
|
||||
error!("Can not deregister server socket {}", e)
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
Some(WakerInterest::Resume) => {
|
||||
drop(guard);
|
||||
sockets.iter_mut().for_each(|(token, info)| {
|
||||
self.register_logged(token, info);
|
||||
});
|
||||
}
|
||||
Some(WakerInterest::Stop) => {
|
||||
return self.deregister_all(&mut sockets);
|
||||
}
|
||||
// waker queue is drained.
|
||||
None => {
|
||||
// Reset the WakerQueue before break so it does not grow
|
||||
// infinitely.
|
||||
WakerQueue::reset(&mut guard);
|
||||
break 'waker;
|
||||
}
|
||||
WAKER_TOKEN => {
|
||||
let exit = self.handle_waker(sockets);
|
||||
if exit {
|
||||
info!("Accept is stopped.");
|
||||
return;
|
||||
}
|
||||
},
|
||||
}
|
||||
_ => {
|
||||
let token = usize::from(token);
|
||||
self.accept(&mut sockets, token);
|
||||
self.accept(sockets, token);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn process_timer(&self, sockets: &mut Slab<ServerSocketInfo>) {
|
||||
let now = Instant::now();
|
||||
sockets.iter_mut().for_each(|(token, info)| {
|
||||
// only the ServerSocketInfo have an associate timeout value was de registered.
|
||||
if let Some(inst) = info.timeout.take() {
|
||||
if now > inst {
|
||||
self.register_logged(token, info);
|
||||
} else {
|
||||
info.timeout = Some(inst);
|
||||
fn handle_waker(&mut self, sockets: &mut [ServerSocketInfo]) -> bool {
|
||||
// This is a loop because interests for command from previous version was
|
||||
// a loop that would try to drain the command channel. It's yet unknown
|
||||
// if it's necessary/good practice to actively drain the waker queue.
|
||||
loop {
|
||||
// take guard with every iteration so no new interest can be added
|
||||
// until the current task is done.
|
||||
let mut guard = self.waker.guard();
|
||||
match guard.pop_front() {
|
||||
// worker notify it becomes available.
|
||||
Some(WakerInterest::WorkerAvailable(idx)) => {
|
||||
drop(guard);
|
||||
|
||||
self.avail.set_available(idx, true);
|
||||
|
||||
if !self.paused {
|
||||
self.accept_all(sockets);
|
||||
}
|
||||
}
|
||||
// a new worker thread is made and it's handle would be added to Accept
|
||||
Some(WakerInterest::Worker(handle)) => {
|
||||
drop(guard);
|
||||
|
||||
self.avail.set_available(handle.idx(), true);
|
||||
self.handles.push(handle);
|
||||
|
||||
if !self.paused {
|
||||
self.accept_all(sockets);
|
||||
}
|
||||
}
|
||||
// got timer interest and it's time to try register socket(s) again
|
||||
Some(WakerInterest::Timer) => {
|
||||
drop(guard);
|
||||
|
||||
self.process_timer(sockets)
|
||||
}
|
||||
Some(WakerInterest::Pause) => {
|
||||
drop(guard);
|
||||
|
||||
if !self.paused {
|
||||
self.paused = true;
|
||||
|
||||
self.deregister_all(sockets);
|
||||
}
|
||||
}
|
||||
Some(WakerInterest::Resume) => {
|
||||
drop(guard);
|
||||
|
||||
if self.paused {
|
||||
self.paused = false;
|
||||
|
||||
sockets.iter_mut().for_each(|info| {
|
||||
self.register_logged(info);
|
||||
});
|
||||
|
||||
self.accept_all(sockets);
|
||||
}
|
||||
}
|
||||
Some(WakerInterest::Stop) => {
|
||||
if !self.paused {
|
||||
self.deregister_all(sockets);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
// waker queue is drained
|
||||
None => {
|
||||
// Reset the WakerQueue before break so it does not grow infinitely
|
||||
WakerQueue::reset(&mut guard);
|
||||
|
||||
return false;
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
fn process_timer(&self, sockets: &mut [ServerSocketInfo]) {
|
||||
let now = Instant::now();
|
||||
sockets
|
||||
.iter_mut()
|
||||
// Only sockets that had an associated timeout were deregistered.
|
||||
.filter(|info| info.timeout.is_some())
|
||||
.for_each(|info| {
|
||||
let inst = info.timeout.take().unwrap();
|
||||
|
||||
if now < inst {
|
||||
info.timeout = Some(inst);
|
||||
} else if !self.paused {
|
||||
self.register_logged(info);
|
||||
}
|
||||
|
||||
// Drop the timeout if server is paused and socket timeout is expired.
|
||||
// When server recovers from pause it will register all sockets without
|
||||
// a timeout value so this socket register will be delayed till then.
|
||||
});
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
fn register(&self, token: usize, info: &mut ServerSocketInfo) -> io::Result<()> {
|
||||
fn register(&self, info: &mut ServerSocketInfo) -> io::Result<()> {
|
||||
let token = MioToken(info.token);
|
||||
self.poll
|
||||
.registry()
|
||||
.register(&mut info.lst, MioToken(token), Interest::READABLE)
|
||||
.register(&mut info.lst, token, Interest::READABLE)
|
||||
}
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
fn register(&self, token: usize, info: &mut ServerSocketInfo) -> io::Result<()> {
|
||||
fn register(&self, info: &mut ServerSocketInfo) -> io::Result<()> {
|
||||
// On windows, calling register without deregister cause an error.
|
||||
// See https://github.com/actix/actix-web/issues/905
|
||||
// Calling reregister seems to fix the issue.
|
||||
let token = MioToken(info.token);
|
||||
self.poll
|
||||
.registry()
|
||||
.register(&mut info.lst, mio::Token(token), Interest::READABLE)
|
||||
.register(&mut info.lst, token, Interest::READABLE)
|
||||
.or_else(|_| {
|
||||
self.poll.registry().reregister(
|
||||
&mut info.lst,
|
||||
mio::Token(token),
|
||||
Interest::READABLE,
|
||||
)
|
||||
self.poll
|
||||
.registry()
|
||||
.reregister(&mut info.lst, token, Interest::READABLE)
|
||||
})
|
||||
}
|
||||
|
||||
fn register_logged(&self, token: usize, info: &mut ServerSocketInfo) {
|
||||
match self.register(token, info) {
|
||||
Ok(_) => info!("Resume accepting connections on {}", info.addr),
|
||||
fn register_logged(&self, info: &mut ServerSocketInfo) {
|
||||
match self.register(info) {
|
||||
Ok(_) => info!("Resume accepting connections on {}", info.lst.local_addr()),
|
||||
Err(e) => error!("Can not register server socket {}", e),
|
||||
}
|
||||
}
|
||||
|
||||
fn deregister(&self, info: &mut ServerSocketInfo) -> io::Result<()> {
|
||||
self.poll.registry().deregister(&mut info.lst)
|
||||
}
|
||||
|
||||
fn deregister_all(&self, sockets: &mut Slab<ServerSocketInfo>) {
|
||||
sockets.iter_mut().for_each(|(_, info)| {
|
||||
info!("Accepting connections on {} has been paused", info.addr);
|
||||
let _ = self.deregister(info);
|
||||
});
|
||||
}
|
||||
|
||||
fn maybe_backpressure(&mut self, sockets: &mut Slab<ServerSocketInfo>, on: bool) {
|
||||
if self.backpressure {
|
||||
if !on {
|
||||
self.backpressure = false;
|
||||
for (token, info) in sockets.iter_mut() {
|
||||
if info.timeout.is_some() {
|
||||
// socket will attempt to re-register itself when its timeout completes
|
||||
continue;
|
||||
}
|
||||
self.register_logged(token, info);
|
||||
}
|
||||
fn deregister_logged(&self, info: &mut ServerSocketInfo) {
|
||||
match self.poll.registry().deregister(&mut info.lst) {
|
||||
Ok(_) => info!("Paused accepting connections on {}", info.lst.local_addr()),
|
||||
Err(e) => {
|
||||
error!("Can not deregister server socket {}", e)
|
||||
}
|
||||
} else if on {
|
||||
self.backpressure = true;
|
||||
self.deregister_all(sockets);
|
||||
}
|
||||
}
|
||||
|
||||
fn accept_one(&mut self, sockets: &mut Slab<ServerSocketInfo>, mut msg: Conn) {
|
||||
if self.backpressure {
|
||||
while !self.handles.is_empty() {
|
||||
match self.handles[self.next].send(msg) {
|
||||
Ok(_) => {
|
||||
self.set_next();
|
||||
break;
|
||||
}
|
||||
Err(tmp) => {
|
||||
// worker lost contact and could be gone. a message is sent to
|
||||
// `ServerBuilder` future to notify it a new worker should be made.
|
||||
// after that remove the fault worker.
|
||||
self.srv.worker_faulted(self.handles[self.next].idx);
|
||||
msg = tmp;
|
||||
self.handles.swap_remove(self.next);
|
||||
if self.handles.is_empty() {
|
||||
error!("No workers");
|
||||
return;
|
||||
} else if self.handles.len() <= self.next {
|
||||
self.next = 0;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let mut idx = 0;
|
||||
while idx < self.handles.len() {
|
||||
idx += 1;
|
||||
if self.handles[self.next].available() {
|
||||
match self.handles[self.next].send(msg) {
|
||||
Ok(_) => {
|
||||
self.set_next();
|
||||
return;
|
||||
}
|
||||
// worker lost contact and could be gone. a message is sent to
|
||||
// `ServerBuilder` future to notify it a new worker should be made.
|
||||
// after that remove the fault worker and enter backpressure if necessary.
|
||||
Err(tmp) => {
|
||||
self.srv.worker_faulted(self.handles[self.next].idx);
|
||||
msg = tmp;
|
||||
self.handles.swap_remove(self.next);
|
||||
if self.handles.is_empty() {
|
||||
error!("No workers");
|
||||
self.maybe_backpressure(sockets, true);
|
||||
return;
|
||||
} else if self.handles.len() <= self.next {
|
||||
self.next = 0;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
fn deregister_all(&self, sockets: &mut [ServerSocketInfo]) {
|
||||
// This is a best effort implementation with following limitation:
|
||||
//
|
||||
// Every ServerSocketInfo with associate timeout will be skipped and it's timeout
|
||||
// is removed in the process.
|
||||
//
|
||||
// Therefore WakerInterest::Pause followed by WakerInterest::Resume in a very short
|
||||
// gap (less than 500ms) would cause all timing out ServerSocketInfos be reregistered
|
||||
// before expected timing.
|
||||
sockets
|
||||
.iter_mut()
|
||||
// Take all timeout.
|
||||
// This is to prevent Accept::process_timer method re-register a socket afterwards.
|
||||
.map(|info| (info.timeout.take(), info))
|
||||
// Socket info with a timeout is already deregistered so skip them.
|
||||
.filter(|(timeout, _)| timeout.is_none())
|
||||
.for_each(|(_, info)| self.deregister_logged(info));
|
||||
}
|
||||
|
||||
// Send connection to worker and handle error.
|
||||
fn send_connection(&mut self, conn: Conn) -> Result<(), Conn> {
|
||||
let next = self.next();
|
||||
match next.send(conn) {
|
||||
Ok(_) => {
|
||||
// Increment counter of WorkerHandle.
|
||||
// Set worker to unavailable with it hit max (Return false).
|
||||
if !next.inc_counter() {
|
||||
let idx = next.idx();
|
||||
self.avail.set_available(idx, false);
|
||||
}
|
||||
self.set_next();
|
||||
Ok(())
|
||||
}
|
||||
Err(conn) => {
|
||||
// Worker thread is error and could be gone.
|
||||
// Remove worker handle and notify `ServerBuilder`.
|
||||
self.remove_next();
|
||||
|
||||
if self.handles.is_empty() {
|
||||
error!("No workers");
|
||||
// All workers are gone and Conn is nowhere to be sent.
|
||||
// Treat this situation as Ok and drop Conn.
|
||||
return Ok(());
|
||||
} else if self.handles.len() <= self.next {
|
||||
self.next = 0;
|
||||
}
|
||||
|
||||
Err(conn)
|
||||
}
|
||||
// enable backpressure
|
||||
self.maybe_backpressure(sockets, true);
|
||||
self.accept_one(sockets, msg);
|
||||
}
|
||||
}
|
||||
|
||||
// set next worker handle that would accept work.
|
||||
fn accept_one(&mut self, mut conn: Conn) {
|
||||
loop {
|
||||
let next = self.next();
|
||||
let idx = next.idx();
|
||||
|
||||
if self.avail.get_available(idx) {
|
||||
match self.send_connection(conn) {
|
||||
Ok(_) => return,
|
||||
Err(c) => conn = c,
|
||||
}
|
||||
} else {
|
||||
self.avail.set_available(idx, false);
|
||||
self.set_next();
|
||||
|
||||
if !self.avail.available() {
|
||||
while let Err(c) = self.send_connection(conn) {
|
||||
conn = c;
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn accept(&mut self, sockets: &mut [ServerSocketInfo], token: usize) {
|
||||
while self.avail.available() {
|
||||
let info = &mut sockets[token];
|
||||
|
||||
match info.lst.accept() {
|
||||
Ok(io) => {
|
||||
let conn = Conn { io, token };
|
||||
self.accept_one(conn);
|
||||
}
|
||||
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => return,
|
||||
Err(ref e) if connection_error(e) => continue,
|
||||
Err(e) => {
|
||||
error!("Error accepting connection: {}", e);
|
||||
|
||||
// deregister listener temporary
|
||||
self.deregister_logged(info);
|
||||
|
||||
// sleep after error. write the timeout to socket info as later
|
||||
// the poll would need it mark which socket and when it's
|
||||
// listener should be registered
|
||||
info.timeout = Some(Instant::now() + Duration::from_millis(500));
|
||||
|
||||
// after the sleep a Timer interest is sent to Accept Poll
|
||||
let waker = self.waker.clone();
|
||||
System::current().arbiter().spawn(async move {
|
||||
sleep(Duration::from_millis(510)).await;
|
||||
waker.wake(WakerInterest::Timer);
|
||||
});
|
||||
|
||||
return;
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
fn accept_all(&mut self, sockets: &mut [ServerSocketInfo]) {
|
||||
sockets
|
||||
.iter_mut()
|
||||
.map(|info| info.token)
|
||||
.collect::<Vec<_>>()
|
||||
.into_iter()
|
||||
.for_each(|idx| self.accept(sockets, idx))
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn next(&self) -> &WorkerHandleAccept {
|
||||
&self.handles[self.next]
|
||||
}
|
||||
|
||||
/// Set next worker handle that would accept connection.
|
||||
#[inline(always)]
|
||||
fn set_next(&mut self) {
|
||||
self.next = (self.next + 1) % self.handles.len();
|
||||
}
|
||||
|
||||
fn accept(&mut self, sockets: &mut Slab<ServerSocketInfo>, token: usize) {
|
||||
loop {
|
||||
let msg = if let Some(info) = sockets.get_mut(token) {
|
||||
match info.lst.accept() {
|
||||
Ok(Some((io, addr))) => Conn {
|
||||
io,
|
||||
token: info.token,
|
||||
peer: Some(addr),
|
||||
},
|
||||
Ok(None) => return,
|
||||
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => return,
|
||||
Err(ref e) if connection_error(e) => continue,
|
||||
Err(e) => {
|
||||
// deregister listener temporary
|
||||
error!("Error accepting connection: {}", e);
|
||||
if let Err(err) = self.deregister(info) {
|
||||
error!("Can not deregister server socket {}", err);
|
||||
}
|
||||
|
||||
// sleep after error. write the timeout to socket info as later the poll
|
||||
// would need it mark which socket and when it's listener should be
|
||||
// registered.
|
||||
info.timeout = Some(Instant::now() + Duration::from_millis(500));
|
||||
|
||||
// after the sleep a Timer interest is sent to Accept Poll
|
||||
let waker = self.waker.clone();
|
||||
System::current().arbiter().spawn(async move {
|
||||
sleep_until(Instant::now() + Duration::from_millis(510)).await;
|
||||
waker.wake(WakerInterest::Timer);
|
||||
});
|
||||
|
||||
return;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return;
|
||||
};
|
||||
|
||||
self.accept_one(sockets, msg);
|
||||
}
|
||||
/// Remove next worker handle that fail to accept connection.
|
||||
fn remove_next(&mut self) {
|
||||
let handle = self.handles.swap_remove(self.next);
|
||||
let idx = handle.idx();
|
||||
// A message is sent to `ServerBuilder` future to notify it a new worker
|
||||
// should be made.
|
||||
self.srv.worker_faulted(idx);
|
||||
self.avail.set_available(idx, false);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::Availability;
|
||||
|
||||
fn single(aval: &mut Availability, idx: usize) {
|
||||
aval.set_available(idx, true);
|
||||
assert!(aval.available());
|
||||
|
||||
aval.set_available(idx, true);
|
||||
|
||||
aval.set_available(idx, false);
|
||||
assert!(!aval.available());
|
||||
|
||||
aval.set_available(idx, false);
|
||||
assert!(!aval.available());
|
||||
}
|
||||
|
||||
fn multi(aval: &mut Availability, mut idx: Vec<usize>) {
|
||||
idx.iter().for_each(|idx| aval.set_available(*idx, true));
|
||||
|
||||
assert!(aval.available());
|
||||
|
||||
while let Some(idx) = idx.pop() {
|
||||
assert!(aval.available());
|
||||
aval.set_available(idx, false);
|
||||
}
|
||||
|
||||
assert!(!aval.available());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn availability() {
|
||||
let mut aval = Availability::default();
|
||||
|
||||
single(&mut aval, 1);
|
||||
single(&mut aval, 128);
|
||||
single(&mut aval, 256);
|
||||
single(&mut aval, 511);
|
||||
|
||||
let idx = (0..511).filter(|i| i % 3 == 0 && i % 5 == 0).collect();
|
||||
|
||||
multi(&mut aval, idx);
|
||||
|
||||
multi(&mut aval, (0..511).collect())
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn overflow() {
|
||||
let mut aval = Availability::default();
|
||||
single(&mut aval, 512);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn pin_point() {
|
||||
let mut aval = Availability::default();
|
||||
|
||||
aval.set_available(438, true);
|
||||
|
||||
aval.set_available(479, true);
|
||||
|
||||
assert_eq!(aval.0[3], 1 << (438 - 384) | 1 << (479 - 384));
|
||||
}
|
||||
}
|
||||
|
@@ -1,35 +1,36 @@
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
use std::time::Duration;
|
||||
use std::{io, mem};
|
||||
use std::{
|
||||
future::Future,
|
||||
io, mem,
|
||||
pin::Pin,
|
||||
task::{Context, Poll},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use actix_rt::net::TcpStream;
|
||||
use actix_rt::time::{sleep_until, Instant};
|
||||
use actix_rt::{self as rt, System};
|
||||
use actix_rt::{self as rt, net::TcpStream, time::sleep, System};
|
||||
use log::{error, info};
|
||||
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver};
|
||||
use tokio::sync::oneshot;
|
||||
use tokio::sync::{
|
||||
mpsc::{unbounded_channel, UnboundedReceiver},
|
||||
oneshot,
|
||||
};
|
||||
|
||||
use crate::accept::AcceptLoop;
|
||||
use crate::config::{ConfiguredService, ServiceConfig};
|
||||
use crate::join_all;
|
||||
use crate::server::{Server, ServerCommand};
|
||||
use crate::service::{InternalServiceFactory, ServiceFactory, StreamNewService};
|
||||
use crate::signals::{Signal, Signals};
|
||||
use crate::socket::{MioListener, StdSocketAddr, StdTcpListener, ToSocketAddrs};
|
||||
use crate::socket::{MioTcpListener, MioTcpSocket};
|
||||
use crate::waker_queue::{WakerInterest, WakerQueue};
|
||||
use crate::worker::{self, ServerWorker, ServerWorkerConfig, WorkerAvailability, WorkerHandle};
|
||||
use crate::{join_all, Token};
|
||||
use crate::worker::{ServerWorker, ServerWorkerConfig, WorkerHandleAccept, WorkerHandleServer};
|
||||
|
||||
/// Server builder
|
||||
pub struct ServerBuilder {
|
||||
threads: usize,
|
||||
token: Token,
|
||||
token: usize,
|
||||
backlog: u32,
|
||||
handles: Vec<(usize, WorkerHandle)>,
|
||||
handles: Vec<(usize, WorkerHandleServer)>,
|
||||
services: Vec<Box<dyn InternalServiceFactory>>,
|
||||
sockets: Vec<(Token, String, MioListener)>,
|
||||
sockets: Vec<(usize, String, MioListener)>,
|
||||
accept: AcceptLoop,
|
||||
exit: bool,
|
||||
no_signals: bool,
|
||||
@@ -53,7 +54,7 @@ impl ServerBuilder {
|
||||
|
||||
ServerBuilder {
|
||||
threads: num_cpus::get(),
|
||||
token: Token::default(),
|
||||
token: 0,
|
||||
handles: Vec::new(),
|
||||
services: Vec::new(),
|
||||
sockets: Vec::new(),
|
||||
@@ -117,18 +118,18 @@ impl ServerBuilder {
|
||||
/// reached for each worker.
|
||||
///
|
||||
/// By default max connections is set to a 25k per worker.
|
||||
pub fn maxconn(self, num: usize) -> Self {
|
||||
worker::max_concurrent_connections(num);
|
||||
pub fn maxconn(mut self, num: usize) -> Self {
|
||||
self.worker_config.max_concurrent_connections(num);
|
||||
self
|
||||
}
|
||||
|
||||
/// Stop actix system.
|
||||
/// Stop Actix system.
|
||||
pub fn system_exit(mut self) -> Self {
|
||||
self.exit = true;
|
||||
self
|
||||
}
|
||||
|
||||
/// Disable signal handling
|
||||
/// Disable signal handling.
|
||||
pub fn disable_signals(mut self) -> Self {
|
||||
self.no_signals = true;
|
||||
self
|
||||
@@ -136,9 +137,8 @@ impl ServerBuilder {
|
||||
|
||||
/// Timeout for graceful workers shutdown in seconds.
|
||||
///
|
||||
/// After receiving a stop signal, workers have this much time to finish
|
||||
/// serving requests. Workers still alive after the timeout are force
|
||||
/// dropped.
|
||||
/// After receiving a stop signal, workers have this much time to finish serving requests.
|
||||
/// Workers still alive after the timeout are force dropped.
|
||||
///
|
||||
/// By default shutdown timeout sets to 30 seconds.
|
||||
pub fn shutdown_timeout(mut self, sec: u64) -> Self {
|
||||
@@ -147,33 +147,6 @@ impl ServerBuilder {
|
||||
self
|
||||
}
|
||||
|
||||
/// Execute external configuration as part of the server building
|
||||
/// process.
|
||||
///
|
||||
/// This function is useful for moving parts of configuration to a
|
||||
/// different module or even library.
|
||||
pub fn configure<F>(mut self, f: F) -> io::Result<ServerBuilder>
|
||||
where
|
||||
F: Fn(&mut ServiceConfig) -> io::Result<()>,
|
||||
{
|
||||
let mut cfg = ServiceConfig::new(self.threads, self.backlog);
|
||||
|
||||
f(&mut cfg)?;
|
||||
|
||||
if let Some(apply) = cfg.apply {
|
||||
let mut srv = ConfiguredService::new(apply);
|
||||
for (name, lst) in cfg.services {
|
||||
let token = self.token.next();
|
||||
srv.stream(token, name.clone(), lst.local_addr()?);
|
||||
self.sockets.push((token, name, MioListener::Tcp(lst)));
|
||||
}
|
||||
self.services.push(Box::new(srv));
|
||||
}
|
||||
self.threads = cfg.threads;
|
||||
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Add new service to the server.
|
||||
pub fn bind<F, U, N: AsRef<str>>(mut self, name: N, addr: U, factory: F) -> io::Result<Self>
|
||||
where
|
||||
@@ -183,7 +156,7 @@ impl ServerBuilder {
|
||||
let sockets = bind_addr(addr, self.backlog)?;
|
||||
|
||||
for lst in sockets {
|
||||
let token = self.token.next();
|
||||
let token = self.next_token();
|
||||
self.services.push(StreamNewService::create(
|
||||
name.as_ref().to_string(),
|
||||
token,
|
||||
@@ -232,7 +205,7 @@ impl ServerBuilder {
|
||||
{
|
||||
use std::net::{IpAddr, Ipv4Addr};
|
||||
lst.set_nonblocking(true)?;
|
||||
let token = self.token.next();
|
||||
let token = self.next_token();
|
||||
let addr = StdSocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
|
||||
self.services.push(StreamNewService::create(
|
||||
name.as_ref().to_string(),
|
||||
@@ -258,7 +231,7 @@ impl ServerBuilder {
|
||||
lst.set_nonblocking(true)?;
|
||||
let addr = lst.local_addr()?;
|
||||
|
||||
let token = self.token.next();
|
||||
let token = self.next_token();
|
||||
self.services.push(StreamNewService::create(
|
||||
name.as_ref().to_string(),
|
||||
token,
|
||||
@@ -268,6 +241,7 @@ impl ServerBuilder {
|
||||
|
||||
self.sockets
|
||||
.push((token, name.as_ref().to_string(), MioListener::from(lst)));
|
||||
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
@@ -281,10 +255,11 @@ impl ServerBuilder {
|
||||
// start workers
|
||||
let handles = (0..self.threads)
|
||||
.map(|idx| {
|
||||
let handle = self.start_worker(idx, self.accept.waker_owned());
|
||||
self.handles.push((idx, handle.clone()));
|
||||
let (handle_accept, handle_server) =
|
||||
self.start_worker(idx, self.accept.waker_owned());
|
||||
self.handles.push((idx, handle_server));
|
||||
|
||||
handle
|
||||
handle_accept
|
||||
})
|
||||
.collect();
|
||||
|
||||
@@ -312,11 +287,14 @@ impl ServerBuilder {
|
||||
}
|
||||
}
|
||||
|
||||
fn start_worker(&self, idx: usize, waker: WakerQueue) -> WorkerHandle {
|
||||
let avail = WorkerAvailability::new(waker);
|
||||
fn start_worker(
|
||||
&self,
|
||||
idx: usize,
|
||||
waker_queue: WakerQueue,
|
||||
) -> (WorkerHandleAccept, WorkerHandleServer) {
|
||||
let services = self.services.iter().map(|v| v.clone_factory()).collect();
|
||||
|
||||
ServerWorker::start(idx, services, avail, self.worker_config)
|
||||
ServerWorker::start(idx, services, waker_queue, self.worker_config)
|
||||
}
|
||||
|
||||
fn handle_cmd(&mut self, item: ServerCommand) {
|
||||
@@ -334,30 +312,31 @@ impl ServerBuilder {
|
||||
// Handle `SIGINT`, `SIGTERM`, `SIGQUIT` signals and stop actix system
|
||||
match sig {
|
||||
Signal::Int => {
|
||||
info!("SIGINT received, exiting");
|
||||
info!("SIGINT received, starting forced shutdown");
|
||||
self.exit = true;
|
||||
self.handle_cmd(ServerCommand::Stop {
|
||||
graceful: false,
|
||||
completion: None,
|
||||
})
|
||||
}
|
||||
|
||||
Signal::Term => {
|
||||
info!("SIGTERM received, stopping");
|
||||
info!("SIGTERM received, starting graceful shutdown");
|
||||
self.exit = true;
|
||||
self.handle_cmd(ServerCommand::Stop {
|
||||
graceful: true,
|
||||
completion: None,
|
||||
})
|
||||
}
|
||||
|
||||
Signal::Quit => {
|
||||
info!("SIGQUIT received, exiting");
|
||||
info!("SIGQUIT received, starting forced shutdown");
|
||||
self.exit = true;
|
||||
self.handle_cmd(ServerCommand::Stop {
|
||||
graceful: false,
|
||||
completion: None,
|
||||
})
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
ServerCommand::Notify(tx) => {
|
||||
@@ -374,45 +353,31 @@ impl ServerBuilder {
|
||||
let notify = std::mem::take(&mut self.notify);
|
||||
|
||||
// stop workers
|
||||
if !self.handles.is_empty() && graceful {
|
||||
let iter = self
|
||||
.handles
|
||||
.iter()
|
||||
.map(move |worker| worker.1.stop(graceful))
|
||||
.collect();
|
||||
let stop = self
|
||||
.handles
|
||||
.iter()
|
||||
.map(move |worker| worker.1.stop(graceful))
|
||||
.collect();
|
||||
|
||||
let fut = join_all(iter);
|
||||
|
||||
rt::spawn(async move {
|
||||
let _ = fut.await;
|
||||
if let Some(tx) = completion {
|
||||
let _ = tx.send(());
|
||||
}
|
||||
for tx in notify {
|
||||
let _ = tx.send(());
|
||||
}
|
||||
if exit {
|
||||
rt::spawn(async {
|
||||
sleep_until(Instant::now() + Duration::from_millis(300)).await;
|
||||
System::current().stop();
|
||||
});
|
||||
}
|
||||
});
|
||||
} else {
|
||||
// we need to stop system if server was spawned
|
||||
if self.exit {
|
||||
rt::spawn(async {
|
||||
sleep_until(Instant::now() + Duration::from_millis(300)).await;
|
||||
System::current().stop();
|
||||
});
|
||||
rt::spawn(async move {
|
||||
if graceful {
|
||||
// wait for all workers to shut down
|
||||
let _ = join_all(stop).await;
|
||||
}
|
||||
|
||||
if let Some(tx) = completion {
|
||||
let _ = tx.send(());
|
||||
}
|
||||
|
||||
for tx in notify {
|
||||
let _ = tx.send(());
|
||||
}
|
||||
}
|
||||
|
||||
if exit {
|
||||
sleep(Duration::from_millis(300)).await;
|
||||
System::current().stop();
|
||||
}
|
||||
});
|
||||
}
|
||||
ServerCommand::WorkerFaulted(idx) => {
|
||||
let mut found = false;
|
||||
@@ -438,13 +403,20 @@ impl ServerBuilder {
|
||||
break;
|
||||
}
|
||||
|
||||
let handle = self.start_worker(new_idx, self.accept.waker_owned());
|
||||
self.handles.push((new_idx, handle.clone()));
|
||||
self.accept.wake(WakerInterest::Worker(handle));
|
||||
let (handle_accept, handle_server) =
|
||||
self.start_worker(new_idx, self.accept.waker_owned());
|
||||
self.handles.push((new_idx, handle_server));
|
||||
self.accept.wake(WakerInterest::Worker(handle_accept));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn next_token(&mut self) -> usize {
|
||||
let token = self.token;
|
||||
self.token += 1;
|
||||
token
|
||||
}
|
||||
}
|
||||
|
||||
impl Future for ServerBuilder {
|
||||
|
@@ -1,287 +0,0 @@
|
||||
use std::collections::HashMap;
|
||||
use std::future::Future;
|
||||
use std::{fmt, io};
|
||||
|
||||
use actix_rt::net::TcpStream;
|
||||
use actix_service::{
|
||||
fn_service, IntoServiceFactory as IntoBaseServiceFactory,
|
||||
ServiceFactory as BaseServiceFactory,
|
||||
};
|
||||
use actix_utils::counter::CounterGuard;
|
||||
use futures_core::future::LocalBoxFuture;
|
||||
use log::error;
|
||||
|
||||
use crate::builder::bind_addr;
|
||||
use crate::service::{BoxedServerService, InternalServiceFactory, StreamService};
|
||||
use crate::socket::{MioStream, MioTcpListener, StdSocketAddr, StdTcpListener, ToSocketAddrs};
|
||||
use crate::{ready, Token};
|
||||
|
||||
pub struct ServiceConfig {
|
||||
pub(crate) services: Vec<(String, MioTcpListener)>,
|
||||
pub(crate) apply: Option<Box<dyn ServiceRuntimeConfiguration>>,
|
||||
pub(crate) threads: usize,
|
||||
pub(crate) backlog: u32,
|
||||
}
|
||||
|
||||
impl ServiceConfig {
|
||||
pub(super) fn new(threads: usize, backlog: u32) -> ServiceConfig {
|
||||
ServiceConfig {
|
||||
threads,
|
||||
backlog,
|
||||
services: Vec::new(),
|
||||
apply: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set number of workers to start.
|
||||
///
|
||||
/// By default server uses number of available logical cpu as workers
|
||||
/// count.
|
||||
pub fn workers(&mut self, num: usize) {
|
||||
self.threads = num;
|
||||
}
|
||||
|
||||
/// Add new service to server
|
||||
pub fn bind<U, N: AsRef<str>>(&mut self, name: N, addr: U) -> io::Result<&mut Self>
|
||||
where
|
||||
U: ToSocketAddrs,
|
||||
{
|
||||
let sockets = bind_addr(addr, self.backlog)?;
|
||||
|
||||
for lst in sockets {
|
||||
self._listen(name.as_ref(), lst);
|
||||
}
|
||||
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Add new service to server
|
||||
pub fn listen<N: AsRef<str>>(&mut self, name: N, lst: StdTcpListener) -> &mut Self {
|
||||
self._listen(name, MioTcpListener::from_std(lst))
|
||||
}
|
||||
|
||||
/// Register service configuration function. This function get called
|
||||
/// during worker runtime configuration. It get executed in worker thread.
|
||||
pub fn apply<F>(&mut self, f: F) -> io::Result<()>
|
||||
where
|
||||
F: Fn(&mut ServiceRuntime) + Send + Clone + 'static,
|
||||
{
|
||||
self.apply = Some(Box::new(f));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn _listen<N: AsRef<str>>(&mut self, name: N, lst: MioTcpListener) -> &mut Self {
|
||||
if self.apply.is_none() {
|
||||
self.apply = Some(Box::new(not_configured));
|
||||
}
|
||||
self.services.push((name.as_ref().to_string(), lst));
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) struct ConfiguredService {
|
||||
rt: Box<dyn ServiceRuntimeConfiguration>,
|
||||
names: HashMap<Token, (String, StdSocketAddr)>,
|
||||
topics: HashMap<String, Token>,
|
||||
services: Vec<Token>,
|
||||
}
|
||||
|
||||
impl ConfiguredService {
|
||||
pub(super) fn new(rt: Box<dyn ServiceRuntimeConfiguration>) -> Self {
|
||||
ConfiguredService {
|
||||
rt,
|
||||
names: HashMap::new(),
|
||||
topics: HashMap::new(),
|
||||
services: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn stream(&mut self, token: Token, name: String, addr: StdSocketAddr) {
|
||||
self.names.insert(token, (name.clone(), addr));
|
||||
self.topics.insert(name, token);
|
||||
self.services.push(token);
|
||||
}
|
||||
}
|
||||
|
||||
impl InternalServiceFactory for ConfiguredService {
|
||||
fn name(&self, token: Token) -> &str {
|
||||
&self.names[&token].0
|
||||
}
|
||||
|
||||
fn clone_factory(&self) -> Box<dyn InternalServiceFactory> {
|
||||
Box::new(Self {
|
||||
rt: self.rt.clone(),
|
||||
names: self.names.clone(),
|
||||
topics: self.topics.clone(),
|
||||
services: self.services.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
fn create(&self) -> LocalBoxFuture<'static, Result<Vec<(Token, BoxedServerService)>, ()>> {
|
||||
// configure services
|
||||
let mut rt = ServiceRuntime::new(self.topics.clone());
|
||||
self.rt.configure(&mut rt);
|
||||
rt.validate();
|
||||
let mut names = self.names.clone();
|
||||
let tokens = self.services.clone();
|
||||
|
||||
// construct services
|
||||
Box::pin(async move {
|
||||
let mut services = rt.services;
|
||||
// TODO: Proper error handling here
|
||||
for f in rt.onstart.into_iter() {
|
||||
f.await;
|
||||
}
|
||||
let mut res = vec![];
|
||||
for token in tokens {
|
||||
if let Some(srv) = services.remove(&token) {
|
||||
let newserv = srv.new_service(());
|
||||
match newserv.await {
|
||||
Ok(serv) => {
|
||||
res.push((token, serv));
|
||||
}
|
||||
Err(_) => {
|
||||
error!("Can not construct service");
|
||||
return Err(());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let name = names.remove(&token).unwrap().0;
|
||||
res.push((
|
||||
token,
|
||||
Box::new(StreamService::new(fn_service(move |_: TcpStream| {
|
||||
error!("Service {:?} is not configured", name);
|
||||
ready::<Result<_, ()>>(Ok(()))
|
||||
}))),
|
||||
));
|
||||
};
|
||||
}
|
||||
Ok(res)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) trait ServiceRuntimeConfiguration: Send {
|
||||
fn clone(&self) -> Box<dyn ServiceRuntimeConfiguration>;
|
||||
|
||||
fn configure(&self, rt: &mut ServiceRuntime);
|
||||
}
|
||||
|
||||
impl<F> ServiceRuntimeConfiguration for F
|
||||
where
|
||||
F: Fn(&mut ServiceRuntime) + Send + Clone + 'static,
|
||||
{
|
||||
fn clone(&self) -> Box<dyn ServiceRuntimeConfiguration> {
|
||||
Box::new(self.clone())
|
||||
}
|
||||
|
||||
fn configure(&self, rt: &mut ServiceRuntime) {
|
||||
(self)(rt)
|
||||
}
|
||||
}
|
||||
|
||||
fn not_configured(_: &mut ServiceRuntime) {
|
||||
error!("Service is not configured");
|
||||
}
|
||||
|
||||
pub struct ServiceRuntime {
|
||||
names: HashMap<String, Token>,
|
||||
services: HashMap<Token, BoxedNewService>,
|
||||
onstart: Vec<LocalBoxFuture<'static, ()>>,
|
||||
}
|
||||
|
||||
impl ServiceRuntime {
|
||||
fn new(names: HashMap<String, Token>) -> Self {
|
||||
ServiceRuntime {
|
||||
names,
|
||||
services: HashMap::new(),
|
||||
onstart: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn validate(&self) {
|
||||
for (name, token) in &self.names {
|
||||
if !self.services.contains_key(&token) {
|
||||
error!("Service {:?} is not configured", name);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Register service.
|
||||
///
|
||||
/// Name of the service must be registered during configuration stage with
|
||||
/// *ServiceConfig::bind()* or *ServiceConfig::listen()* methods.
|
||||
pub fn service<T, F>(&mut self, name: &str, service: F)
|
||||
where
|
||||
F: IntoBaseServiceFactory<T, TcpStream>,
|
||||
T: BaseServiceFactory<TcpStream, Config = ()> + 'static,
|
||||
T::Future: 'static,
|
||||
T::Service: 'static,
|
||||
T::InitError: fmt::Debug,
|
||||
{
|
||||
// let name = name.to_owned();
|
||||
if let Some(token) = self.names.get(name) {
|
||||
self.services.insert(
|
||||
*token,
|
||||
Box::new(ServiceFactory {
|
||||
inner: service.into_factory(),
|
||||
}),
|
||||
);
|
||||
} else {
|
||||
panic!("Unknown service: {:?}", name);
|
||||
}
|
||||
}
|
||||
|
||||
/// Execute future before services initialization.
|
||||
pub fn on_start<F>(&mut self, fut: F)
|
||||
where
|
||||
F: Future<Output = ()> + 'static,
|
||||
{
|
||||
self.onstart.push(Box::pin(fut))
|
||||
}
|
||||
}
|
||||
|
||||
type BoxedNewService = Box<
|
||||
dyn BaseServiceFactory<
|
||||
(Option<CounterGuard>, MioStream),
|
||||
Response = (),
|
||||
Error = (),
|
||||
InitError = (),
|
||||
Config = (),
|
||||
Service = BoxedServerService,
|
||||
Future = LocalBoxFuture<'static, Result<BoxedServerService, ()>>,
|
||||
>,
|
||||
>;
|
||||
|
||||
struct ServiceFactory<T> {
|
||||
inner: T,
|
||||
}
|
||||
|
||||
impl<T> BaseServiceFactory<(Option<CounterGuard>, MioStream)> for ServiceFactory<T>
|
||||
where
|
||||
T: BaseServiceFactory<TcpStream, Config = ()>,
|
||||
T::Future: 'static,
|
||||
T::Service: 'static,
|
||||
T::Error: 'static,
|
||||
T::InitError: fmt::Debug + 'static,
|
||||
{
|
||||
type Response = ();
|
||||
type Error = ();
|
||||
type Config = ();
|
||||
type Service = BoxedServerService;
|
||||
type InitError = ();
|
||||
type Future = LocalBoxFuture<'static, Result<BoxedServerService, ()>>;
|
||||
|
||||
fn new_service(&self, _: ()) -> Self::Future {
|
||||
let fut = self.inner.new_service(());
|
||||
Box::pin(async move {
|
||||
match fut.await {
|
||||
Ok(s) => Ok(Box::new(StreamService::new(s)) as BoxedServerService),
|
||||
Err(e) => {
|
||||
error!("Can not construct service: {:?}", e);
|
||||
Err(())
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@@ -6,7 +6,6 @@
|
||||
|
||||
mod accept;
|
||||
mod builder;
|
||||
mod config;
|
||||
mod server;
|
||||
mod service;
|
||||
mod signals;
|
||||
@@ -16,7 +15,6 @@ mod waker_queue;
|
||||
mod worker;
|
||||
|
||||
pub use self::builder::ServerBuilder;
|
||||
pub use self::config::{ServiceConfig, ServiceRuntime};
|
||||
pub use self::server::Server;
|
||||
pub use self::service::ServiceFactory;
|
||||
pub use self::test_server::TestServer;
|
||||
@@ -28,51 +26,11 @@ use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
/// Socket ID token
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
|
||||
pub(crate) struct Token(usize);
|
||||
|
||||
impl Default for Token {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl Token {
|
||||
fn new() -> Self {
|
||||
Self(0)
|
||||
}
|
||||
|
||||
pub(crate) fn next(&mut self) -> Token {
|
||||
let token = Token(self.0);
|
||||
self.0 += 1;
|
||||
token
|
||||
}
|
||||
}
|
||||
|
||||
/// Start server building process
|
||||
pub fn new() -> ServerBuilder {
|
||||
ServerBuilder::default()
|
||||
}
|
||||
|
||||
// temporary Ready type for std::future::{ready, Ready}; Can be removed when MSRV surpass 1.48
|
||||
#[doc(hidden)]
|
||||
pub struct Ready<T>(Option<T>);
|
||||
|
||||
pub(crate) fn ready<T>(t: T) -> Ready<T> {
|
||||
Ready(Some(t))
|
||||
}
|
||||
|
||||
impl<T> Unpin for Ready<T> {}
|
||||
|
||||
impl<T> Future for Ready<T> {
|
||||
type Output = T;
|
||||
|
||||
fn poll(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
Poll::Ready(self.get_mut().0.take().unwrap())
|
||||
}
|
||||
}
|
||||
|
||||
// a poor man's join future. joined future is only used when starting/stopping the server.
|
||||
// pin_project and pinned futures are overkill for this task.
|
||||
pub(crate) struct JoinAll<T> {
|
||||
@@ -132,6 +90,8 @@ impl<T> Future for JoinAll<T> {
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
use actix_utils::future::ready;
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_join_all() {
|
||||
let futs = vec![ready(Ok(1)), ready(Err(3)), ready(Ok(9))];
|
||||
|
@@ -15,8 +15,8 @@ pub(crate) enum ServerCommand {
|
||||
Pause(oneshot::Sender<()>),
|
||||
Resume(oneshot::Sender<()>),
|
||||
Signal(Signal),
|
||||
/// Whether to try and shut down gracefully
|
||||
Stop {
|
||||
/// True if shut down should be graceful.
|
||||
graceful: bool,
|
||||
completion: Option<oneshot::Sender<()>>,
|
||||
},
|
||||
@@ -24,6 +24,13 @@ pub(crate) enum ServerCommand {
|
||||
Notify(oneshot::Sender<()>),
|
||||
}
|
||||
|
||||
/// Server handle.
|
||||
///
|
||||
/// # Shutdown Signals
|
||||
/// On UNIX systems, `SIGQUIT` will start a graceful shutdown and `SIGTERM` or `SIGINT` will start a
|
||||
/// forced shutdown. On Windows, a CTRL-C signal will start a forced shutdown.
|
||||
///
|
||||
/// A graceful shutdown will wait for all workers to stop first.
|
||||
#[derive(Debug)]
|
||||
pub struct Server(
|
||||
UnboundedSender<ServerCommand>,
|
||||
|
@@ -3,12 +3,12 @@ use std::net::SocketAddr;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use actix_service::{Service, ServiceFactory as BaseServiceFactory};
|
||||
use actix_utils::counter::CounterGuard;
|
||||
use actix_utils::future::{ready, Ready};
|
||||
use futures_core::future::LocalBoxFuture;
|
||||
use log::error;
|
||||
|
||||
use crate::socket::{FromStream, MioStream};
|
||||
use crate::{ready, Ready, Token};
|
||||
use crate::worker::WorkerCounterGuard;
|
||||
|
||||
pub trait ServiceFactory<Stream: FromStream>: Send + Clone + 'static {
|
||||
type Factory: BaseServiceFactory<Stream, Config = ()>;
|
||||
@@ -17,16 +17,16 @@ pub trait ServiceFactory<Stream: FromStream>: Send + Clone + 'static {
|
||||
}
|
||||
|
||||
pub(crate) trait InternalServiceFactory: Send {
|
||||
fn name(&self, token: Token) -> &str;
|
||||
fn name(&self, token: usize) -> &str;
|
||||
|
||||
fn clone_factory(&self) -> Box<dyn InternalServiceFactory>;
|
||||
|
||||
fn create(&self) -> LocalBoxFuture<'static, Result<Vec<(Token, BoxedServerService)>, ()>>;
|
||||
fn create(&self) -> LocalBoxFuture<'static, Result<(usize, BoxedServerService), ()>>;
|
||||
}
|
||||
|
||||
pub(crate) type BoxedServerService = Box<
|
||||
dyn Service<
|
||||
(Option<CounterGuard>, MioStream),
|
||||
(WorkerCounterGuard, MioStream),
|
||||
Response = (),
|
||||
Error = (),
|
||||
Future = Ready<Result<(), ()>>,
|
||||
@@ -47,7 +47,7 @@ impl<S, I> StreamService<S, I> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, I> Service<(Option<CounterGuard>, MioStream)> for StreamService<S, I>
|
||||
impl<S, I> Service<(WorkerCounterGuard, MioStream)> for StreamService<S, I>
|
||||
where
|
||||
S: Service<I>,
|
||||
S::Future: 'static,
|
||||
@@ -62,7 +62,7 @@ where
|
||||
self.service.poll_ready(ctx).map_err(|_| ())
|
||||
}
|
||||
|
||||
fn call(&self, (guard, req): (Option<CounterGuard>, MioStream)) -> Self::Future {
|
||||
fn call(&self, (guard, req): (WorkerCounterGuard, MioStream)) -> Self::Future {
|
||||
ready(match FromStream::from_mio(req) {
|
||||
Ok(stream) => {
|
||||
let f = self.service.call(stream);
|
||||
@@ -83,7 +83,7 @@ where
|
||||
pub(crate) struct StreamNewService<F: ServiceFactory<Io>, Io: FromStream> {
|
||||
name: String,
|
||||
inner: F,
|
||||
token: Token,
|
||||
token: usize,
|
||||
addr: SocketAddr,
|
||||
_t: PhantomData<Io>,
|
||||
}
|
||||
@@ -95,7 +95,7 @@ where
|
||||
{
|
||||
pub(crate) fn create(
|
||||
name: String,
|
||||
token: Token,
|
||||
token: usize,
|
||||
inner: F,
|
||||
addr: SocketAddr,
|
||||
) -> Box<dyn InternalServiceFactory> {
|
||||
@@ -114,7 +114,7 @@ where
|
||||
F: ServiceFactory<Io>,
|
||||
Io: FromStream + Send + 'static,
|
||||
{
|
||||
fn name(&self, _: Token) -> &str {
|
||||
fn name(&self, _: usize) -> &str {
|
||||
&self.name
|
||||
}
|
||||
|
||||
@@ -128,14 +128,14 @@ where
|
||||
})
|
||||
}
|
||||
|
||||
fn create(&self) -> LocalBoxFuture<'static, Result<Vec<(Token, BoxedServerService)>, ()>> {
|
||||
fn create(&self) -> LocalBoxFuture<'static, Result<(usize, BoxedServerService), ()>> {
|
||||
let token = self.token;
|
||||
let fut = self.inner.create().new_service(());
|
||||
Box::pin(async move {
|
||||
match fut.await {
|
||||
Ok(inner) => {
|
||||
let service = Box::new(StreamService::new(inner)) as _;
|
||||
Ok(vec![(token, service)])
|
||||
Ok((token, service))
|
||||
}
|
||||
Err(_) => Err(()),
|
||||
}
|
||||
|
@@ -4,29 +4,33 @@ use std::task::{Context, Poll};
|
||||
|
||||
use crate::server::Server;
|
||||
|
||||
/// Different types of process signals
|
||||
/// Types of process signals.
|
||||
#[allow(dead_code)]
|
||||
#[derive(PartialEq, Clone, Copy, Debug)]
|
||||
pub(crate) enum Signal {
|
||||
/// SIGHUP
|
||||
Hup,
|
||||
/// SIGINT
|
||||
/// `SIGINT`
|
||||
Int,
|
||||
/// SIGTERM
|
||||
|
||||
/// `SIGTERM`
|
||||
Term,
|
||||
/// SIGQUIT
|
||||
|
||||
/// `SIGQUIT`
|
||||
Quit,
|
||||
}
|
||||
|
||||
/// Process signal listener.
|
||||
pub(crate) struct Signals {
|
||||
srv: Server,
|
||||
|
||||
#[cfg(not(unix))]
|
||||
signals: futures_core::future::LocalBoxFuture<'static, std::io::Result<()>>,
|
||||
|
||||
#[cfg(unix)]
|
||||
signals: Vec<(Signal, actix_rt::signal::unix::Signal)>,
|
||||
}
|
||||
|
||||
impl Signals {
|
||||
/// Spawns a signal listening future that is able to send commands to the `Server`.
|
||||
pub(crate) fn start(srv: Server) {
|
||||
#[cfg(not(unix))]
|
||||
{
|
||||
@@ -35,13 +39,13 @@ impl Signals {
|
||||
signals: Box::pin(actix_rt::signal::ctrl_c()),
|
||||
});
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
{
|
||||
use actix_rt::signal::unix;
|
||||
|
||||
let sig_map = [
|
||||
(unix::SignalKind::interrupt(), Signal::Int),
|
||||
(unix::SignalKind::hangup(), Signal::Hup),
|
||||
(unix::SignalKind::terminate(), Signal::Term),
|
||||
(unix::SignalKind::quit(), Signal::Quit),
|
||||
];
|
||||
@@ -79,6 +83,7 @@ impl Future for Signals {
|
||||
}
|
||||
Poll::Pending => Poll::Pending,
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
{
|
||||
for (sig, fut) in self.signals.iter_mut() {
|
||||
|
@@ -12,18 +12,7 @@ pub(crate) use {
|
||||
use std::{fmt, io};
|
||||
|
||||
use actix_rt::net::TcpStream;
|
||||
use mio::event::Source;
|
||||
use mio::net::TcpStream as MioTcpStream;
|
||||
use mio::{Interest, Registry, Token};
|
||||
|
||||
#[cfg(windows)]
|
||||
use std::os::windows::io::{FromRawSocket, IntoRawSocket};
|
||||
#[cfg(unix)]
|
||||
use {
|
||||
actix_rt::net::UnixStream,
|
||||
mio::net::{SocketAddr as MioSocketAddr, UnixStream as MioUnixStream},
|
||||
std::os::unix::io::{FromRawFd, IntoRawFd},
|
||||
};
|
||||
use mio::{event::Source, Interest, Registry, Token};
|
||||
|
||||
pub(crate) enum MioListener {
|
||||
Tcp(MioTcpListener),
|
||||
@@ -34,21 +23,23 @@ pub(crate) enum MioListener {
|
||||
impl MioListener {
|
||||
pub(crate) fn local_addr(&self) -> SocketAddr {
|
||||
match *self {
|
||||
MioListener::Tcp(ref lst) => SocketAddr::Tcp(lst.local_addr().unwrap()),
|
||||
MioListener::Tcp(ref lst) => lst
|
||||
.local_addr()
|
||||
.map(SocketAddr::Tcp)
|
||||
.unwrap_or(SocketAddr::Unknown),
|
||||
#[cfg(unix)]
|
||||
MioListener::Uds(ref lst) => SocketAddr::Uds(lst.local_addr().unwrap()),
|
||||
MioListener::Uds(ref lst) => lst
|
||||
.local_addr()
|
||||
.map(SocketAddr::Uds)
|
||||
.unwrap_or(SocketAddr::Unknown),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn accept(&self) -> io::Result<Option<(MioStream, SocketAddr)>> {
|
||||
pub(crate) fn accept(&self) -> io::Result<MioStream> {
|
||||
match *self {
|
||||
MioListener::Tcp(ref lst) => lst
|
||||
.accept()
|
||||
.map(|(stream, addr)| Some((MioStream::Tcp(stream), SocketAddr::Tcp(addr)))),
|
||||
MioListener::Tcp(ref lst) => lst.accept().map(|(stream, _)| MioStream::Tcp(stream)),
|
||||
#[cfg(unix)]
|
||||
MioListener::Uds(ref lst) => lst
|
||||
.accept()
|
||||
.map(|(stream, addr)| Some((MioStream::Uds(stream), SocketAddr::Uds(addr)))),
|
||||
MioListener::Uds(ref lst) => lst.accept().map(|(stream, _)| MioStream::Uds(stream)),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -125,25 +116,27 @@ impl fmt::Debug for MioListener {
|
||||
impl fmt::Display for MioListener {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match *self {
|
||||
MioListener::Tcp(ref lst) => write!(f, "{}", lst.local_addr().ok().unwrap()),
|
||||
MioListener::Tcp(ref lst) => write!(f, "{:?}", lst),
|
||||
#[cfg(unix)]
|
||||
MioListener::Uds(ref lst) => write!(f, "{:?}", lst.local_addr().ok().unwrap()),
|
||||
MioListener::Uds(ref lst) => write!(f, "{:?}", lst),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) enum SocketAddr {
|
||||
Unknown,
|
||||
Tcp(StdSocketAddr),
|
||||
#[cfg(unix)]
|
||||
Uds(MioSocketAddr),
|
||||
Uds(mio::net::SocketAddr),
|
||||
}
|
||||
|
||||
impl fmt::Display for SocketAddr {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match *self {
|
||||
SocketAddr::Tcp(ref addr) => write!(f, "{}", addr),
|
||||
Self::Unknown => write!(f, "Unknown SocketAddr"),
|
||||
Self::Tcp(ref addr) => write!(f, "{}", addr),
|
||||
#[cfg(unix)]
|
||||
SocketAddr::Uds(ref addr) => write!(f, "{:?}", addr),
|
||||
Self::Uds(ref addr) => write!(f, "{:?}", addr),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -151,18 +144,19 @@ impl fmt::Display for SocketAddr {
|
||||
impl fmt::Debug for SocketAddr {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match *self {
|
||||
SocketAddr::Tcp(ref addr) => write!(f, "{:?}", addr),
|
||||
Self::Unknown => write!(f, "Unknown SocketAddr"),
|
||||
Self::Tcp(ref addr) => write!(f, "{:?}", addr),
|
||||
#[cfg(unix)]
|
||||
SocketAddr::Uds(ref addr) => write!(f, "{:?}", addr),
|
||||
Self::Uds(ref addr) => write!(f, "{:?}", addr),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum MioStream {
|
||||
Tcp(MioTcpStream),
|
||||
Tcp(mio::net::TcpStream),
|
||||
#[cfg(unix)]
|
||||
Uds(MioUnixStream),
|
||||
Uds(mio::net::UnixStream),
|
||||
}
|
||||
|
||||
/// helper trait for converting mio stream to tokio stream.
|
||||
@@ -170,47 +164,60 @@ pub trait FromStream: Sized {
|
||||
fn from_mio(sock: MioStream) -> io::Result<Self>;
|
||||
}
|
||||
|
||||
// FIXME: This is a workaround and we need an efficient way to convert between mio and tokio stream
|
||||
#[cfg(unix)]
|
||||
impl FromStream for TcpStream {
|
||||
fn from_mio(sock: MioStream) -> io::Result<Self> {
|
||||
match sock {
|
||||
MioStream::Tcp(mio) => {
|
||||
let raw = IntoRawFd::into_raw_fd(mio);
|
||||
// SAFETY: This is a in place conversion from mio stream to tokio stream.
|
||||
TcpStream::from_std(unsafe { FromRawFd::from_raw_fd(raw) })
|
||||
}
|
||||
MioStream::Uds(_) => {
|
||||
panic!("Should not happen, bug in server impl");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME: This is a workaround and we need an efficient way to convert between mio and tokio stream
|
||||
#[cfg(windows)]
|
||||
impl FromStream for TcpStream {
|
||||
fn from_mio(sock: MioStream) -> io::Result<Self> {
|
||||
match sock {
|
||||
MioStream::Tcp(mio) => {
|
||||
let raw = IntoRawSocket::into_raw_socket(mio);
|
||||
// SAFETY: This is a in place conversion from mio stream to tokio stream.
|
||||
TcpStream::from_std(unsafe { FromRawSocket::from_raw_socket(raw) })
|
||||
mod win_impl {
|
||||
use super::*;
|
||||
|
||||
use std::os::windows::io::{FromRawSocket, IntoRawSocket};
|
||||
|
||||
// FIXME: This is a workaround and we need an efficient way to convert between mio and tokio stream
|
||||
impl FromStream for TcpStream {
|
||||
fn from_mio(sock: MioStream) -> io::Result<Self> {
|
||||
match sock {
|
||||
MioStream::Tcp(mio) => {
|
||||
let raw = IntoRawSocket::into_raw_socket(mio);
|
||||
// SAFETY: This is a in place conversion from mio stream to tokio stream.
|
||||
TcpStream::from_std(unsafe { FromRawSocket::from_raw_socket(raw) })
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME: This is a workaround and we need an efficient way to convert between mio and tokio stream
|
||||
#[cfg(unix)]
|
||||
impl FromStream for UnixStream {
|
||||
fn from_mio(sock: MioStream) -> io::Result<Self> {
|
||||
match sock {
|
||||
MioStream::Tcp(_) => panic!("Should not happen, bug in server impl"),
|
||||
MioStream::Uds(mio) => {
|
||||
let raw = IntoRawFd::into_raw_fd(mio);
|
||||
// SAFETY: This is a in place conversion from mio stream to tokio stream.
|
||||
UnixStream::from_std(unsafe { FromRawFd::from_raw_fd(raw) })
|
||||
mod unix_impl {
|
||||
use super::*;
|
||||
|
||||
use std::os::unix::io::{FromRawFd, IntoRawFd};
|
||||
|
||||
use actix_rt::net::UnixStream;
|
||||
|
||||
// FIXME: This is a workaround and we need an efficient way to convert between mio and tokio stream
|
||||
impl FromStream for TcpStream {
|
||||
fn from_mio(sock: MioStream) -> io::Result<Self> {
|
||||
match sock {
|
||||
MioStream::Tcp(mio) => {
|
||||
let raw = IntoRawFd::into_raw_fd(mio);
|
||||
// SAFETY: This is a in place conversion from mio stream to tokio stream.
|
||||
TcpStream::from_std(unsafe { FromRawFd::from_raw_fd(raw) })
|
||||
}
|
||||
MioStream::Uds(_) => {
|
||||
panic!("Should not happen, bug in server impl");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME: This is a workaround and we need an efficient way to convert between mio and tokio stream
|
||||
impl FromStream for UnixStream {
|
||||
fn from_mio(sock: MioStream) -> io::Result<Self> {
|
||||
match sock {
|
||||
MioStream::Tcp(_) => panic!("Should not happen, bug in server impl"),
|
||||
MioStream::Uds(mio) => {
|
||||
let raw = IntoRawFd::into_raw_fd(mio);
|
||||
// SAFETY: This is a in place conversion from mio stream to tokio stream.
|
||||
UnixStream::from_std(unsafe { FromRawFd::from_raw_fd(raw) })
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -5,13 +5,12 @@ use actix_rt::{net::TcpStream, System};
|
||||
|
||||
use crate::{Server, ServerBuilder, ServiceFactory};
|
||||
|
||||
/// The `TestServer` type.
|
||||
/// A testing server.
|
||||
///
|
||||
/// `TestServer` is very simple test server that simplify process of writing
|
||||
/// integration tests for actix-net applications.
|
||||
/// `TestServer` is very simple test server that simplify process of writing integration tests for
|
||||
/// network applications.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use actix_service::fn_service;
|
||||
/// use actix_server::TestServer;
|
||||
@@ -39,7 +38,7 @@ pub struct TestServerRuntime {
|
||||
}
|
||||
|
||||
impl TestServer {
|
||||
/// Start new server with server builder
|
||||
/// Start new server with server builder.
|
||||
pub fn start<F>(mut factory: F) -> TestServerRuntime
|
||||
where
|
||||
F: FnMut(ServerBuilder) -> ServerBuilder + Send + 'static,
|
||||
@@ -64,7 +63,7 @@ impl TestServer {
|
||||
}
|
||||
}
|
||||
|
||||
/// Start new test server with application factory
|
||||
/// Start new test server with application factory.
|
||||
pub fn with<F: ServiceFactory<TcpStream>>(factory: F) -> TestServerRuntime {
|
||||
let (tx, rx) = mpsc::channel();
|
||||
|
||||
@@ -92,14 +91,14 @@ impl TestServer {
|
||||
let port = addr.port();
|
||||
|
||||
TestServerRuntime {
|
||||
system,
|
||||
addr,
|
||||
host,
|
||||
port,
|
||||
system,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get first available unused local address
|
||||
/// Get first available unused local address.
|
||||
pub fn unused_addr() -> net::SocketAddr {
|
||||
let addr: net::SocketAddr = "127.0.0.1:0".parse().unwrap();
|
||||
let socket = mio::net::TcpSocket::new_v4().unwrap();
|
||||
@@ -111,27 +110,27 @@ impl TestServer {
|
||||
}
|
||||
|
||||
impl TestServerRuntime {
|
||||
/// Test server host
|
||||
/// Test server host.
|
||||
pub fn host(&self) -> &str {
|
||||
&self.host
|
||||
}
|
||||
|
||||
/// Test server port
|
||||
/// Test server port.
|
||||
pub fn port(&self) -> u16 {
|
||||
self.port
|
||||
}
|
||||
|
||||
/// Get test server address
|
||||
/// Get test server address.
|
||||
pub fn addr(&self) -> net::SocketAddr {
|
||||
self.addr
|
||||
}
|
||||
|
||||
/// Stop http server
|
||||
/// Stop server.
|
||||
fn stop(&mut self) {
|
||||
self.system.stop();
|
||||
}
|
||||
|
||||
/// Connect to server, return tokio TcpStream
|
||||
/// Connect to server, returning a Tokio `TcpStream`.
|
||||
pub fn connect(&self) -> std::io::Result<TcpStream> {
|
||||
TcpStream::from_std(net::TcpStream::connect(self.addr)?)
|
||||
}
|
||||
|
@@ -6,9 +6,9 @@ use std::{
|
||||
|
||||
use mio::{Registry, Token as MioToken, Waker};
|
||||
|
||||
use crate::worker::WorkerHandle;
|
||||
use crate::worker::WorkerHandleAccept;
|
||||
|
||||
/// waker token for `mio::Poll` instance
|
||||
/// Waker token for `mio::Poll` instance.
|
||||
pub(crate) const WAKER_TOKEN: MioToken = MioToken(usize::MAX);
|
||||
|
||||
/// `mio::Waker` with a queue for waking up the `Accept`'s `Poll` and contains the `WakerInterest`
|
||||
@@ -30,7 +30,7 @@ impl Deref for WakerQueue {
|
||||
}
|
||||
|
||||
impl WakerQueue {
|
||||
/// construct a waker queue with given `Poll`'s `Registry` and capacity.
|
||||
/// Construct a waker queue with given `Poll`'s `Registry` and capacity.
|
||||
///
|
||||
/// A fixed `WAKER_TOKEN` is used to identify the wake interest and the `Poll` needs to match
|
||||
/// event's token for it to properly handle `WakerInterest`.
|
||||
@@ -41,7 +41,7 @@ impl WakerQueue {
|
||||
Ok(Self(Arc::new((waker, queue))))
|
||||
}
|
||||
|
||||
/// push a new interest to the queue and wake up the accept poll afterwards.
|
||||
/// Push a new interest to the queue and wake up the accept poll afterwards.
|
||||
pub(crate) fn wake(&self, interest: WakerInterest) {
|
||||
let (waker, queue) = self.deref();
|
||||
|
||||
@@ -55,24 +55,24 @@ impl WakerQueue {
|
||||
.unwrap_or_else(|e| panic!("can not wake up Accept Poll: {}", e));
|
||||
}
|
||||
|
||||
/// get a MutexGuard of the waker queue.
|
||||
/// Get a MutexGuard of the waker queue.
|
||||
pub(crate) fn guard(&self) -> MutexGuard<'_, VecDeque<WakerInterest>> {
|
||||
self.deref().1.lock().expect("Failed to lock WakerQueue")
|
||||
}
|
||||
|
||||
/// reset the waker queue so it does not grow infinitely.
|
||||
/// Reset the waker queue so it does not grow infinitely.
|
||||
pub(crate) fn reset(queue: &mut VecDeque<WakerInterest>) {
|
||||
std::mem::swap(&mut VecDeque::<WakerInterest>::with_capacity(16), queue);
|
||||
}
|
||||
}
|
||||
|
||||
/// types of interests we would look into when `Accept`'s `Poll` is waked up by waker.
|
||||
/// Types of interests we would look into when `Accept`'s `Poll` is waked up by waker.
|
||||
///
|
||||
/// *. These interests should not be confused with `mio::Interest` and mostly not I/O related
|
||||
/// These interests should not be confused with `mio::Interest` and mostly not I/O related
|
||||
pub(crate) enum WakerInterest {
|
||||
/// `WorkerAvailable` is an interest from `Worker` notifying `Accept` there is a worker
|
||||
/// available and can accept new tasks.
|
||||
WorkerAvailable,
|
||||
WorkerAvailable(usize),
|
||||
/// `Pause`, `Resume`, `Stop` Interest are from `ServerBuilder` future. It listens to
|
||||
/// `ServerCommand` and notify `Accept` to do exactly these tasks.
|
||||
Pause,
|
||||
@@ -84,6 +84,6 @@ pub(crate) enum WakerInterest {
|
||||
Timer,
|
||||
/// `Worker` is an interest happen after a worker runs into faulted state(This is determined
|
||||
/// by if work can be sent to it successfully).`Accept` would be waked up and add the new
|
||||
/// `WorkerHandle`.
|
||||
Worker(WorkerHandle),
|
||||
/// `WorkerHandleAccept`.
|
||||
Worker(WorkerHandleAccept),
|
||||
}
|
||||
|
@@ -1,139 +1,212 @@
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::task::{Context, Poll};
|
||||
use std::time::Duration;
|
||||
use std::{
|
||||
future::Future,
|
||||
mem,
|
||||
pin::Pin,
|
||||
rc::Rc,
|
||||
sync::{
|
||||
atomic::{AtomicUsize, Ordering},
|
||||
Arc,
|
||||
},
|
||||
task::{Context, Poll},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use actix_rt::time::{sleep_until, Instant, Sleep};
|
||||
use actix_rt::{spawn, Arbiter};
|
||||
use actix_utils::counter::Counter;
|
||||
use futures_core::future::LocalBoxFuture;
|
||||
use actix_rt::{
|
||||
spawn,
|
||||
time::{sleep, Instant, Sleep},
|
||||
Arbiter,
|
||||
};
|
||||
use futures_core::{future::LocalBoxFuture, ready};
|
||||
use log::{error, info, trace};
|
||||
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender};
|
||||
use tokio::sync::oneshot;
|
||||
use tokio::sync::{
|
||||
mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender},
|
||||
oneshot,
|
||||
};
|
||||
|
||||
use crate::join_all;
|
||||
use crate::service::{BoxedServerService, InternalServiceFactory};
|
||||
use crate::socket::{MioStream, SocketAddr};
|
||||
use crate::socket::MioStream;
|
||||
use crate::waker_queue::{WakerInterest, WakerQueue};
|
||||
use crate::{join_all, Token};
|
||||
|
||||
pub(crate) struct WorkerCommand(Conn);
|
||||
|
||||
/// Stop worker message. Returns `true` on successful shutdown
|
||||
/// and `false` if some connections still alive.
|
||||
pub(crate) struct StopCommand {
|
||||
/// Stop worker message. Returns `true` on successful graceful shutdown.
|
||||
/// and `false` if some connections still alive when shutdown execute.
|
||||
pub(crate) struct Stop {
|
||||
graceful: bool,
|
||||
result: oneshot::Sender<bool>,
|
||||
tx: oneshot::Sender<bool>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct Conn {
|
||||
pub io: MioStream,
|
||||
pub token: Token,
|
||||
pub peer: Option<SocketAddr>,
|
||||
pub token: usize,
|
||||
}
|
||||
|
||||
static MAX_CONNS: AtomicUsize = AtomicUsize::new(25600);
|
||||
fn handle_pair(
|
||||
idx: usize,
|
||||
tx1: UnboundedSender<Conn>,
|
||||
tx2: UnboundedSender<Stop>,
|
||||
counter: Counter,
|
||||
) -> (WorkerHandleAccept, WorkerHandleServer) {
|
||||
let accept = WorkerHandleAccept {
|
||||
idx,
|
||||
tx: tx1,
|
||||
counter,
|
||||
};
|
||||
|
||||
/// Sets the maximum per-worker number of concurrent connections.
|
||||
let server = WorkerHandleServer { idx, tx: tx2 };
|
||||
|
||||
(accept, server)
|
||||
}
|
||||
|
||||
/// counter: Arc<AtomicUsize> field is owned by `Accept` thread and `ServerWorker` thread.
|
||||
///
|
||||
/// All socket listeners will stop accepting connections when this limit is
|
||||
/// reached for each worker.
|
||||
/// `Accept` would increment the counter and `ServerWorker` would decrement it.
|
||||
///
|
||||
/// By default max connections is set to a 25k per worker.
|
||||
pub fn max_concurrent_connections(num: usize) {
|
||||
MAX_CONNS.store(num, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
thread_local! {
|
||||
static MAX_CONNS_COUNTER: Counter =
|
||||
Counter::new(MAX_CONNS.load(Ordering::Relaxed));
|
||||
}
|
||||
|
||||
pub(crate) fn num_connections() -> usize {
|
||||
MAX_CONNS_COUNTER.with(|conns| conns.total())
|
||||
}
|
||||
|
||||
// a handle to worker that can send message to worker and share the availability of worker to other
|
||||
// thread.
|
||||
/// # Atomic Ordering:
|
||||
///
|
||||
/// `Accept` always look into it's cached `Availability` field for `ServerWorker` state.
|
||||
/// It lazily increment counter after successful dispatching new work to `ServerWorker`.
|
||||
/// On reaching counter limit `Accept` update it's cached `Availability` and mark worker as
|
||||
/// unable to accept any work.
|
||||
///
|
||||
/// `ServerWorker` always decrement the counter when every work received from `Accept` is done.
|
||||
/// On reaching counter limit worker would use `mio::Waker` and `WakerQueue` to wake up `Accept`
|
||||
/// and notify it to update cached `Availability` again to mark worker as able to accept work again.
|
||||
///
|
||||
/// Hence, a wake up would only happen after `Accept` increment it to limit.
|
||||
/// And a decrement to limit always wake up `Accept`.
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct WorkerHandle {
|
||||
pub idx: usize,
|
||||
tx1: UnboundedSender<WorkerCommand>,
|
||||
tx2: UnboundedSender<StopCommand>,
|
||||
avail: WorkerAvailability,
|
||||
pub(crate) struct Counter {
|
||||
counter: Arc<AtomicUsize>,
|
||||
limit: usize,
|
||||
}
|
||||
|
||||
impl WorkerHandle {
|
||||
pub fn new(
|
||||
idx: usize,
|
||||
tx1: UnboundedSender<WorkerCommand>,
|
||||
tx2: UnboundedSender<StopCommand>,
|
||||
avail: WorkerAvailability,
|
||||
) -> Self {
|
||||
WorkerHandle {
|
||||
impl Counter {
|
||||
pub(crate) fn new(limit: usize) -> Self {
|
||||
Self {
|
||||
counter: Arc::new(AtomicUsize::new(1)),
|
||||
limit,
|
||||
}
|
||||
}
|
||||
|
||||
/// Increment counter by 1 and return true when hitting limit
|
||||
#[inline(always)]
|
||||
pub(crate) fn inc(&self) -> bool {
|
||||
self.counter.fetch_add(1, Ordering::Relaxed) != self.limit
|
||||
}
|
||||
|
||||
/// Decrement counter by 1 and return true if crossing limit.
|
||||
#[inline(always)]
|
||||
pub(crate) fn dec(&self) -> bool {
|
||||
self.counter.fetch_sub(1, Ordering::Relaxed) == self.limit
|
||||
}
|
||||
|
||||
pub(crate) fn total(&self) -> usize {
|
||||
self.counter.load(Ordering::SeqCst) - 1
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct WorkerCounter {
|
||||
idx: usize,
|
||||
inner: Rc<(WakerQueue, Counter)>,
|
||||
}
|
||||
|
||||
impl Clone for WorkerCounter {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
idx: self.idx,
|
||||
inner: self.inner.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl WorkerCounter {
|
||||
pub(crate) fn new(idx: usize, waker_queue: WakerQueue, counter: Counter) -> Self {
|
||||
Self {
|
||||
idx,
|
||||
tx1,
|
||||
tx2,
|
||||
avail,
|
||||
inner: Rc::new((waker_queue, counter)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn send(&self, msg: Conn) -> Result<(), Conn> {
|
||||
self.tx1.send(WorkerCommand(msg)).map_err(|msg| msg.0 .0)
|
||||
#[inline(always)]
|
||||
pub(crate) fn guard(&self) -> WorkerCounterGuard {
|
||||
WorkerCounterGuard(self.clone())
|
||||
}
|
||||
|
||||
pub fn available(&self) -> bool {
|
||||
self.avail.available()
|
||||
fn total(&self) -> usize {
|
||||
self.inner.1.total()
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct WorkerCounterGuard(WorkerCounter);
|
||||
|
||||
impl Drop for WorkerCounterGuard {
|
||||
fn drop(&mut self) {
|
||||
let (waker_queue, counter) = &*self.0.inner;
|
||||
if counter.dec() {
|
||||
waker_queue.wake(WakerInterest::WorkerAvailable(self.0.idx));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle to worker that can send connection message to worker and share the
|
||||
/// availability of worker to other thread.
|
||||
///
|
||||
/// Held by [Accept](crate::accept::Accept).
|
||||
pub(crate) struct WorkerHandleAccept {
|
||||
idx: usize,
|
||||
tx: UnboundedSender<Conn>,
|
||||
counter: Counter,
|
||||
}
|
||||
|
||||
impl WorkerHandleAccept {
|
||||
#[inline(always)]
|
||||
pub(crate) fn idx(&self) -> usize {
|
||||
self.idx
|
||||
}
|
||||
|
||||
pub fn stop(&self, graceful: bool) -> oneshot::Receiver<bool> {
|
||||
let (result, rx) = oneshot::channel();
|
||||
let _ = self.tx2.send(StopCommand { graceful, result });
|
||||
#[inline(always)]
|
||||
pub(crate) fn send(&self, msg: Conn) -> Result<(), Conn> {
|
||||
self.tx.send(msg).map_err(|msg| msg.0)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub(crate) fn inc_counter(&self) -> bool {
|
||||
self.counter.inc()
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle to worker than can send stop message to worker.
|
||||
///
|
||||
/// Held by [ServerBuilder](crate::builder::ServerBuilder).
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct WorkerHandleServer {
|
||||
#[allow(dead_code)]
|
||||
idx: usize,
|
||||
tx: UnboundedSender<Stop>,
|
||||
}
|
||||
|
||||
impl WorkerHandleServer {
|
||||
pub(crate) fn stop(&self, graceful: bool) -> oneshot::Receiver<bool> {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
let _ = self.tx.send(Stop { graceful, tx });
|
||||
rx
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct WorkerAvailability {
|
||||
waker: WakerQueue,
|
||||
available: Arc<AtomicBool>,
|
||||
}
|
||||
|
||||
impl WorkerAvailability {
|
||||
pub fn new(waker: WakerQueue) -> Self {
|
||||
WorkerAvailability {
|
||||
waker,
|
||||
available: Arc::new(AtomicBool::new(false)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn available(&self) -> bool {
|
||||
self.available.load(Ordering::Acquire)
|
||||
}
|
||||
|
||||
pub fn set(&self, val: bool) {
|
||||
let old = self.available.swap(val, Ordering::Release);
|
||||
// notify the accept on switched to available.
|
||||
if !old && val {
|
||||
self.waker.wake(WakerInterest::WorkerAvailable);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Service worker.
|
||||
///
|
||||
/// Worker accepts Socket objects via unbounded channel and starts stream processing.
|
||||
pub(crate) struct ServerWorker {
|
||||
rx: UnboundedReceiver<WorkerCommand>,
|
||||
rx2: UnboundedReceiver<StopCommand>,
|
||||
services: Vec<WorkerService>,
|
||||
availability: WorkerAvailability,
|
||||
conns: Counter,
|
||||
factories: Vec<Box<dyn InternalServiceFactory>>,
|
||||
// UnboundedReceiver<Conn> should always be the first field.
|
||||
// It must be dropped as soon as ServerWorker dropping.
|
||||
rx: UnboundedReceiver<Conn>,
|
||||
rx2: UnboundedReceiver<Stop>,
|
||||
counter: WorkerCounter,
|
||||
services: Box<[WorkerService]>,
|
||||
factories: Box<[Box<dyn InternalServiceFactory>]>,
|
||||
state: WorkerState,
|
||||
config: ServerWorkerConfig,
|
||||
shutdown_timeout: Duration,
|
||||
}
|
||||
|
||||
struct WorkerService {
|
||||
@@ -164,6 +237,7 @@ enum WorkerServiceStatus {
|
||||
pub(crate) struct ServerWorkerConfig {
|
||||
shutdown_timeout: Duration,
|
||||
max_blocking_threads: usize,
|
||||
max_concurrent_connections: usize,
|
||||
}
|
||||
|
||||
impl Default for ServerWorkerConfig {
|
||||
@@ -173,6 +247,7 @@ impl Default for ServerWorkerConfig {
|
||||
Self {
|
||||
shutdown_timeout: Duration::from_secs(30),
|
||||
max_blocking_threads,
|
||||
max_concurrent_connections: 25600,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -182,6 +257,10 @@ impl ServerWorkerConfig {
|
||||
self.max_blocking_threads = num;
|
||||
}
|
||||
|
||||
pub(crate) fn max_concurrent_connections(&mut self, num: usize) {
|
||||
self.max_concurrent_connections = num;
|
||||
}
|
||||
|
||||
pub(crate) fn shutdown_timeout(&mut self, dur: Duration) {
|
||||
self.shutdown_timeout = dur;
|
||||
}
|
||||
@@ -191,97 +270,112 @@ impl ServerWorker {
|
||||
pub(crate) fn start(
|
||||
idx: usize,
|
||||
factories: Vec<Box<dyn InternalServiceFactory>>,
|
||||
availability: WorkerAvailability,
|
||||
waker_queue: WakerQueue,
|
||||
config: ServerWorkerConfig,
|
||||
) -> WorkerHandle {
|
||||
) -> (WorkerHandleAccept, WorkerHandleServer) {
|
||||
let (tx1, rx) = unbounded_channel();
|
||||
let (tx2, rx2) = unbounded_channel();
|
||||
let avail = availability.clone();
|
||||
|
||||
let counter = Counter::new(config.max_concurrent_connections);
|
||||
|
||||
let counter_clone = counter.clone();
|
||||
// every worker runs in it's own arbiter.
|
||||
// use a custom tokio runtime builder to change the settings of runtime.
|
||||
Arbiter::with_tokio_rt(move || {
|
||||
#[cfg(all(target_os = "linux", feature = "io-uring"))]
|
||||
let arbiter = {
|
||||
// TODO: pass max blocking thread config when tokio-uring enable configuration
|
||||
// on building runtime.
|
||||
let _ = config.max_blocking_threads;
|
||||
Arbiter::new()
|
||||
};
|
||||
|
||||
#[cfg(not(all(target_os = "linux", feature = "io-uring")))]
|
||||
let arbiter = Arbiter::with_tokio_rt(move || {
|
||||
tokio::runtime::Builder::new_current_thread()
|
||||
.enable_all()
|
||||
.max_blocking_threads(config.max_blocking_threads)
|
||||
.build()
|
||||
.unwrap()
|
||||
})
|
||||
.spawn(async move {
|
||||
availability.set(false);
|
||||
let mut wrk = MAX_CONNS_COUNTER.with(move |conns| ServerWorker {
|
||||
rx,
|
||||
rx2,
|
||||
availability,
|
||||
factories,
|
||||
config,
|
||||
services: Vec::new(),
|
||||
conns: conns.clone(),
|
||||
state: WorkerState::Unavailable,
|
||||
});
|
||||
});
|
||||
|
||||
let fut = wrk
|
||||
.factories
|
||||
arbiter.spawn(async move {
|
||||
let fut = factories
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(idx, factory)| {
|
||||
let fut = factory.create();
|
||||
async move {
|
||||
fut.await.map(|r| {
|
||||
r.into_iter().map(|(t, s)| (idx, t, s)).collect::<Vec<_>>()
|
||||
})
|
||||
}
|
||||
async move { fut.await.map(|(t, s)| (idx, t, s)) }
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// a second spawn to make sure worker future runs as non boxed future.
|
||||
// As Arbiter::spawn would box the future before send it to arbiter.
|
||||
// a second spawn to run !Send future tasks.
|
||||
spawn(async move {
|
||||
let res: Result<Vec<_>, _> = join_all(fut).await.into_iter().collect();
|
||||
match res {
|
||||
Ok(services) => {
|
||||
for item in services {
|
||||
for (factory, token, service) in item {
|
||||
assert_eq!(token.0, wrk.services.len());
|
||||
wrk.services.push(WorkerService {
|
||||
factory,
|
||||
service,
|
||||
status: WorkerServiceStatus::Unavailable,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
let res = join_all(fut)
|
||||
.await
|
||||
.into_iter()
|
||||
.collect::<Result<Vec<_>, _>>();
|
||||
let services = match res {
|
||||
Ok(res) => res
|
||||
.into_iter()
|
||||
.fold(Vec::new(), |mut services, (factory, token, service)| {
|
||||
assert_eq!(token, services.len());
|
||||
services.push(WorkerService {
|
||||
factory,
|
||||
service,
|
||||
status: WorkerServiceStatus::Unavailable,
|
||||
});
|
||||
services
|
||||
})
|
||||
.into_boxed_slice(),
|
||||
Err(e) => {
|
||||
error!("Can not start worker: {:?}", e);
|
||||
Arbiter::current().stop();
|
||||
return;
|
||||
}
|
||||
}
|
||||
wrk.await
|
||||
};
|
||||
|
||||
// a third spawn to make sure ServerWorker runs as non boxed future.
|
||||
spawn(ServerWorker {
|
||||
rx,
|
||||
rx2,
|
||||
services,
|
||||
counter: WorkerCounter::new(idx, waker_queue, counter_clone),
|
||||
factories: factories.into_boxed_slice(),
|
||||
state: Default::default(),
|
||||
shutdown_timeout: config.shutdown_timeout,
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
WorkerHandle::new(idx, tx1, tx2, avail)
|
||||
handle_pair(idx, tx1, tx2, counter)
|
||||
}
|
||||
|
||||
fn restart_service(&mut self, idx: usize, factory_id: usize) {
|
||||
let factory = &self.factories[factory_id];
|
||||
trace!("Service {:?} failed, restarting", factory.name(idx));
|
||||
self.services[idx].status = WorkerServiceStatus::Restarting;
|
||||
self.state = WorkerState::Restarting(Restart {
|
||||
factory_id,
|
||||
token: idx,
|
||||
fut: factory.create(),
|
||||
});
|
||||
}
|
||||
|
||||
fn shutdown(&mut self, force: bool) {
|
||||
if force {
|
||||
self.services.iter_mut().for_each(|srv| {
|
||||
if srv.status == WorkerServiceStatus::Available {
|
||||
srv.status = WorkerServiceStatus::Stopped;
|
||||
}
|
||||
self.services
|
||||
.iter_mut()
|
||||
.filter(|srv| srv.status == WorkerServiceStatus::Available)
|
||||
.for_each(|srv| {
|
||||
srv.status = if force {
|
||||
WorkerServiceStatus::Stopped
|
||||
} else {
|
||||
WorkerServiceStatus::Stopping
|
||||
};
|
||||
});
|
||||
} else {
|
||||
self.services.iter_mut().for_each(move |srv| {
|
||||
if srv.status == WorkerServiceStatus::Available {
|
||||
srv.status = WorkerServiceStatus::Stopping;
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
fn check_readiness(&mut self, cx: &mut Context<'_>) -> Result<bool, (Token, usize)> {
|
||||
let mut ready = self.conns.available(cx);
|
||||
let mut failed = None;
|
||||
fn check_readiness(&mut self, cx: &mut Context<'_>) -> Result<bool, (usize, usize)> {
|
||||
let mut ready = true;
|
||||
for (idx, srv) in self.services.iter_mut().enumerate() {
|
||||
if srv.status == WorkerServiceStatus::Available
|
||||
|| srv.status == WorkerServiceStatus::Unavailable
|
||||
@@ -291,7 +385,7 @@ impl ServerWorker {
|
||||
if srv.status == WorkerServiceStatus::Unavailable {
|
||||
trace!(
|
||||
"Service {:?} is available",
|
||||
self.factories[srv.factory].name(Token(idx))
|
||||
self.factories[srv.factory].name(idx)
|
||||
);
|
||||
srv.status = WorkerServiceStatus::Available;
|
||||
}
|
||||
@@ -302,7 +396,7 @@ impl ServerWorker {
|
||||
if srv.status == WorkerServiceStatus::Available {
|
||||
trace!(
|
||||
"Service {:?} is unavailable",
|
||||
self.factories[srv.factory].name(Token(idx))
|
||||
self.factories[srv.factory].name(idx)
|
||||
);
|
||||
srv.status = WorkerServiceStatus::Unavailable;
|
||||
}
|
||||
@@ -310,173 +404,172 @@ impl ServerWorker {
|
||||
Poll::Ready(Err(_)) => {
|
||||
error!(
|
||||
"Service {:?} readiness check returned error, restarting",
|
||||
self.factories[srv.factory].name(Token(idx))
|
||||
self.factories[srv.factory].name(idx)
|
||||
);
|
||||
failed = Some((Token(idx), srv.factory));
|
||||
srv.status = WorkerServiceStatus::Failed;
|
||||
return Err((idx, srv.factory));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if let Some(idx) = failed {
|
||||
Err(idx)
|
||||
} else {
|
||||
Ok(ready)
|
||||
}
|
||||
|
||||
Ok(ready)
|
||||
}
|
||||
}
|
||||
|
||||
enum WorkerState {
|
||||
Available,
|
||||
Unavailable,
|
||||
Restarting(
|
||||
usize,
|
||||
Token,
|
||||
LocalBoxFuture<'static, Result<Vec<(Token, BoxedServerService)>, ()>>,
|
||||
),
|
||||
Shutdown(
|
||||
Pin<Box<Sleep>>,
|
||||
Pin<Box<Sleep>>,
|
||||
Option<oneshot::Sender<bool>>,
|
||||
),
|
||||
Restarting(Restart),
|
||||
Shutdown(Shutdown),
|
||||
}
|
||||
|
||||
struct Restart {
|
||||
factory_id: usize,
|
||||
token: usize,
|
||||
fut: LocalBoxFuture<'static, Result<(usize, BoxedServerService), ()>>,
|
||||
}
|
||||
|
||||
/// State necessary for server shutdown.
|
||||
struct Shutdown {
|
||||
// Interval for checking the shutdown progress.
|
||||
timer: Pin<Box<Sleep>>,
|
||||
|
||||
/// Start time of shutdown.
|
||||
start_from: Instant,
|
||||
|
||||
/// Notify of the shutdown outcome (force/grace) to stop caller.
|
||||
tx: oneshot::Sender<bool>,
|
||||
}
|
||||
|
||||
impl Default for WorkerState {
|
||||
fn default() -> Self {
|
||||
Self::Unavailable
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for ServerWorker {
|
||||
fn drop(&mut self) {
|
||||
// Stop the Arbiter ServerWorker runs on on drop.
|
||||
Arbiter::current().stop();
|
||||
}
|
||||
}
|
||||
|
||||
impl Future for ServerWorker {
|
||||
type Output = ();
|
||||
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
let this = self.as_mut().get_mut();
|
||||
|
||||
// `StopWorker` message handler
|
||||
if let Poll::Ready(Some(StopCommand { graceful, result })) =
|
||||
Pin::new(&mut self.rx2).poll_recv(cx)
|
||||
if let Poll::Ready(Some(Stop { graceful, tx })) = Pin::new(&mut this.rx2).poll_recv(cx)
|
||||
{
|
||||
self.availability.set(false);
|
||||
let num = num_connections();
|
||||
let num = this.counter.total();
|
||||
if num == 0 {
|
||||
info!("Shutting down worker, 0 connections");
|
||||
let _ = result.send(true);
|
||||
let _ = tx.send(true);
|
||||
return Poll::Ready(());
|
||||
} else if graceful {
|
||||
self.shutdown(false);
|
||||
let num = num_connections();
|
||||
if num != 0 {
|
||||
info!("Graceful worker shutdown, {} connections", num);
|
||||
self.state = WorkerState::Shutdown(
|
||||
Box::pin(sleep_until(Instant::now() + Duration::from_secs(1))),
|
||||
Box::pin(sleep_until(Instant::now() + self.config.shutdown_timeout)),
|
||||
Some(result),
|
||||
);
|
||||
} else {
|
||||
let _ = result.send(true);
|
||||
return Poll::Ready(());
|
||||
}
|
||||
info!("Graceful worker shutdown, {} connections", num);
|
||||
this.shutdown(false);
|
||||
|
||||
this.state = WorkerState::Shutdown(Shutdown {
|
||||
timer: Box::pin(sleep(Duration::from_secs(1))),
|
||||
start_from: Instant::now(),
|
||||
tx,
|
||||
});
|
||||
} else {
|
||||
info!("Force shutdown worker, {} connections", num);
|
||||
self.shutdown(true);
|
||||
let _ = result.send(false);
|
||||
this.shutdown(true);
|
||||
|
||||
let _ = tx.send(false);
|
||||
return Poll::Ready(());
|
||||
}
|
||||
}
|
||||
|
||||
match self.state {
|
||||
WorkerState::Unavailable => match self.check_readiness(cx) {
|
||||
match this.state {
|
||||
WorkerState::Unavailable => match this.check_readiness(cx) {
|
||||
Ok(true) => {
|
||||
self.state = WorkerState::Available;
|
||||
self.availability.set(true);
|
||||
this.state = WorkerState::Available;
|
||||
self.poll(cx)
|
||||
}
|
||||
Ok(false) => Poll::Pending,
|
||||
Err((token, idx)) => {
|
||||
trace!(
|
||||
"Service {:?} failed, restarting",
|
||||
self.factories[idx].name(token)
|
||||
);
|
||||
self.services[token.0].status = WorkerServiceStatus::Restarting;
|
||||
self.state =
|
||||
WorkerState::Restarting(idx, token, self.factories[idx].create());
|
||||
this.restart_service(token, idx);
|
||||
self.poll(cx)
|
||||
}
|
||||
},
|
||||
WorkerState::Restarting(idx, token, ref mut fut) => {
|
||||
match fut.as_mut().poll(cx) {
|
||||
Poll::Ready(Ok(item)) => {
|
||||
// only interest in the first item?
|
||||
if let Some((token, service)) = item.into_iter().next() {
|
||||
trace!(
|
||||
"Service {:?} has been restarted",
|
||||
self.factories[idx].name(token)
|
||||
);
|
||||
self.services[token.0].created(service);
|
||||
self.state = WorkerState::Unavailable;
|
||||
return self.poll(cx);
|
||||
}
|
||||
}
|
||||
Poll::Ready(Err(_)) => {
|
||||
WorkerState::Restarting(ref mut restart) => {
|
||||
let factory_id = restart.factory_id;
|
||||
let token = restart.token;
|
||||
|
||||
let (token_new, service) = ready!(restart.fut.as_mut().poll(cx))
|
||||
.unwrap_or_else(|_| {
|
||||
panic!(
|
||||
"Can not restart {:?} service",
|
||||
self.factories[idx].name(token)
|
||||
);
|
||||
}
|
||||
Poll::Pending => return Poll::Pending,
|
||||
}
|
||||
this.factories[factory_id].name(token)
|
||||
)
|
||||
});
|
||||
|
||||
assert_eq!(token, token_new);
|
||||
|
||||
trace!(
|
||||
"Service {:?} has been restarted",
|
||||
this.factories[factory_id].name(token)
|
||||
);
|
||||
|
||||
this.services[token].created(service);
|
||||
this.state = WorkerState::Unavailable;
|
||||
|
||||
self.poll(cx)
|
||||
}
|
||||
WorkerState::Shutdown(ref mut t1, ref mut t2, ref mut tx) => {
|
||||
let num = num_connections();
|
||||
if num == 0 {
|
||||
let _ = tx.take().unwrap().send(true);
|
||||
Arbiter::current().stop();
|
||||
return Poll::Ready(());
|
||||
}
|
||||
WorkerState::Shutdown(ref mut shutdown) => {
|
||||
// wait for 1 second
|
||||
ready!(shutdown.timer.as_mut().poll(cx));
|
||||
|
||||
// check graceful timeout
|
||||
if Pin::new(t2).poll(cx).is_ready() {
|
||||
let _ = tx.take().unwrap().send(false);
|
||||
self.shutdown(true);
|
||||
Arbiter::current().stop();
|
||||
return Poll::Ready(());
|
||||
}
|
||||
if this.counter.total() == 0 {
|
||||
// graceful shutdown
|
||||
if let WorkerState::Shutdown(shutdown) = mem::take(&mut this.state) {
|
||||
let _ = shutdown.tx.send(true);
|
||||
}
|
||||
|
||||
// sleep for 1 second and then check again
|
||||
if t1.as_mut().poll(cx).is_ready() {
|
||||
*t1 = Box::pin(sleep_until(Instant::now() + Duration::from_secs(1)));
|
||||
let _ = t1.as_mut().poll(cx);
|
||||
}
|
||||
Poll::Ready(())
|
||||
} else if shutdown.start_from.elapsed() >= this.shutdown_timeout {
|
||||
// timeout forceful shutdown
|
||||
if let WorkerState::Shutdown(shutdown) = mem::take(&mut this.state) {
|
||||
let _ = shutdown.tx.send(false);
|
||||
}
|
||||
|
||||
Poll::Pending
|
||||
Poll::Ready(())
|
||||
} else {
|
||||
// reset timer and wait for 1 second
|
||||
let time = Instant::now() + Duration::from_secs(1);
|
||||
shutdown.timer.as_mut().reset(time);
|
||||
shutdown.timer.as_mut().poll(cx)
|
||||
}
|
||||
}
|
||||
// actively poll stream and handle worker command
|
||||
WorkerState::Available => loop {
|
||||
match self.check_readiness(cx) {
|
||||
Ok(true) => (),
|
||||
match this.check_readiness(cx) {
|
||||
Ok(true) => {}
|
||||
Ok(false) => {
|
||||
trace!("Worker is unavailable");
|
||||
self.availability.set(false);
|
||||
self.state = WorkerState::Unavailable;
|
||||
this.state = WorkerState::Unavailable;
|
||||
return self.poll(cx);
|
||||
}
|
||||
Err((token, idx)) => {
|
||||
trace!(
|
||||
"Service {:?} failed, restarting",
|
||||
self.factories[idx].name(token)
|
||||
);
|
||||
self.availability.set(false);
|
||||
self.services[token.0].status = WorkerServiceStatus::Restarting;
|
||||
self.state =
|
||||
WorkerState::Restarting(idx, token, self.factories[idx].create());
|
||||
this.restart_service(token, idx);
|
||||
return self.poll(cx);
|
||||
}
|
||||
}
|
||||
|
||||
match Pin::new(&mut self.rx).poll_recv(cx) {
|
||||
// handle incoming io stream
|
||||
Poll::Ready(Some(WorkerCommand(msg))) => {
|
||||
let guard = self.conns.get();
|
||||
let _ = self.services[msg.token.0]
|
||||
.service
|
||||
.call((Some(guard), msg.io));
|
||||
// handle incoming io stream
|
||||
match ready!(Pin::new(&mut this.rx).poll_recv(cx)) {
|
||||
Some(msg) => {
|
||||
let guard = this.counter.guard();
|
||||
let _ = this.services[msg.token].service.call((guard, msg.io));
|
||||
}
|
||||
Poll::Pending => return Poll::Pending,
|
||||
Poll::Ready(None) => return Poll::Ready(()),
|
||||
None => return Poll::Ready(()),
|
||||
};
|
||||
},
|
||||
}
|
||||
|
@@ -1,10 +1,12 @@
|
||||
use std::sync::atomic::{AtomicUsize, Ordering::Relaxed};
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::{mpsc, Arc};
|
||||
use std::{net, thread, time};
|
||||
use std::{net, thread, time::Duration};
|
||||
|
||||
use actix_rt::{net::TcpStream, time::sleep};
|
||||
use actix_server::Server;
|
||||
use actix_service::fn_service;
|
||||
use futures_util::future::{lazy, ok};
|
||||
use actix_utils::future::ok;
|
||||
use futures_util::future::lazy;
|
||||
|
||||
fn unused_addr() -> net::SocketAddr {
|
||||
let addr: net::SocketAddr = "127.0.0.1:0".parse().unwrap();
|
||||
@@ -30,12 +32,13 @@ fn test_bind() {
|
||||
.unwrap()
|
||||
.run()
|
||||
}));
|
||||
|
||||
let _ = tx.send((srv, actix_rt::System::current()));
|
||||
let _ = sys.run();
|
||||
});
|
||||
let (_, sys) = rx.recv().unwrap();
|
||||
|
||||
thread::sleep(time::Duration::from_millis(500));
|
||||
thread::sleep(Duration::from_millis(500));
|
||||
assert!(net::TcpStream::connect(addr).is_ok());
|
||||
sys.stop();
|
||||
let _ = h.join();
|
||||
@@ -62,7 +65,7 @@ fn test_listen() {
|
||||
});
|
||||
let sys = rx.recv().unwrap();
|
||||
|
||||
thread::sleep(time::Duration::from_millis(500));
|
||||
thread::sleep(Duration::from_millis(500));
|
||||
assert!(net::TcpStream::connect(addr).is_ok());
|
||||
sys.stop();
|
||||
let _ = h.join();
|
||||
@@ -71,11 +74,11 @@ fn test_listen() {
|
||||
#[test]
|
||||
#[cfg(unix)]
|
||||
fn test_start() {
|
||||
use std::io::Read;
|
||||
|
||||
use actix_codec::{BytesCodec, Framed};
|
||||
use actix_rt::net::TcpStream;
|
||||
use bytes::Bytes;
|
||||
use futures_util::sink::SinkExt;
|
||||
use std::io::Read;
|
||||
|
||||
let addr = unused_addr();
|
||||
let (tx, rx) = mpsc::channel();
|
||||
@@ -110,16 +113,16 @@ fn test_start() {
|
||||
|
||||
// pause
|
||||
let _ = srv.pause();
|
||||
thread::sleep(time::Duration::from_millis(200));
|
||||
thread::sleep(Duration::from_millis(200));
|
||||
let mut conn = net::TcpStream::connect(addr).unwrap();
|
||||
conn.set_read_timeout(Some(time::Duration::from_millis(100)))
|
||||
conn.set_read_timeout(Some(Duration::from_millis(100)))
|
||||
.unwrap();
|
||||
let res = conn.read_exact(&mut buf);
|
||||
assert!(res.is_err());
|
||||
|
||||
// resume
|
||||
let _ = srv.resume();
|
||||
thread::sleep(time::Duration::from_millis(100));
|
||||
thread::sleep(Duration::from_millis(100));
|
||||
assert!(net::TcpStream::connect(addr).is_ok());
|
||||
assert!(net::TcpStream::connect(addr).is_ok());
|
||||
assert!(net::TcpStream::connect(addr).is_ok());
|
||||
@@ -131,60 +134,320 @@ fn test_start() {
|
||||
|
||||
// stop
|
||||
let _ = srv.stop(false);
|
||||
thread::sleep(time::Duration::from_millis(100));
|
||||
thread::sleep(Duration::from_millis(100));
|
||||
assert!(net::TcpStream::connect(addr).is_err());
|
||||
|
||||
thread::sleep(time::Duration::from_millis(100));
|
||||
thread::sleep(Duration::from_millis(100));
|
||||
sys.stop();
|
||||
let _ = h.join();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_configure() {
|
||||
let addr1 = unused_addr();
|
||||
let addr2 = unused_addr();
|
||||
let addr3 = unused_addr();
|
||||
#[actix_rt::test]
|
||||
async fn test_max_concurrent_connections() {
|
||||
// Note:
|
||||
// A tcp listener would accept connects based on it's backlog setting.
|
||||
//
|
||||
// The limit test on the other hand is only for concurrent tcp stream limiting a work
|
||||
// thread accept.
|
||||
|
||||
use tokio::io::AsyncWriteExt;
|
||||
|
||||
let addr = unused_addr();
|
||||
let (tx, rx) = mpsc::channel();
|
||||
let num = Arc::new(AtomicUsize::new(0));
|
||||
let num2 = num.clone();
|
||||
|
||||
let counter = Arc::new(AtomicUsize::new(0));
|
||||
let counter_clone = counter.clone();
|
||||
|
||||
let max_conn = 3;
|
||||
|
||||
let h = thread::spawn(move || {
|
||||
let num = num2.clone();
|
||||
let sys = actix_rt::System::new();
|
||||
let srv = sys.block_on(lazy(|_| {
|
||||
Server::build()
|
||||
actix_rt::System::new().block_on(async {
|
||||
let server = Server::build()
|
||||
// Set a relative higher backlog.
|
||||
.backlog(12)
|
||||
// max connection for a worker is 3.
|
||||
.maxconn(max_conn)
|
||||
.workers(1)
|
||||
.disable_signals()
|
||||
.configure(move |cfg| {
|
||||
.bind("test", addr, move || {
|
||||
let counter = counter.clone();
|
||||
fn_service(move |_io: TcpStream| {
|
||||
let counter = counter.clone();
|
||||
async move {
|
||||
counter.fetch_add(1, Ordering::SeqCst);
|
||||
sleep(Duration::from_secs(20)).await;
|
||||
counter.fetch_sub(1, Ordering::SeqCst);
|
||||
Ok::<(), ()>(())
|
||||
}
|
||||
})
|
||||
})?
|
||||
.run();
|
||||
|
||||
let _ = tx.send((server.clone(), actix_rt::System::current()));
|
||||
|
||||
server.await
|
||||
})
|
||||
});
|
||||
|
||||
let (srv, sys) = rx.recv().unwrap();
|
||||
|
||||
let mut conns = vec![];
|
||||
|
||||
for _ in 0..12 {
|
||||
let conn = tokio::net::TcpStream::connect(addr).await.unwrap();
|
||||
conns.push(conn);
|
||||
}
|
||||
|
||||
sleep(Duration::from_secs(5)).await;
|
||||
|
||||
// counter would remain at 3 even with 12 successful connection.
|
||||
// and 9 of them remain in backlog.
|
||||
assert_eq!(max_conn, counter_clone.load(Ordering::SeqCst));
|
||||
|
||||
for mut conn in conns {
|
||||
conn.shutdown().await.unwrap();
|
||||
}
|
||||
|
||||
srv.stop(false).await;
|
||||
|
||||
sys.stop();
|
||||
let _ = h.join().unwrap();
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_service_restart() {
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use actix_service::{fn_factory, Service};
|
||||
use futures_core::future::LocalBoxFuture;
|
||||
use tokio::io::AsyncWriteExt;
|
||||
|
||||
struct TestService(Arc<AtomicUsize>);
|
||||
|
||||
impl Service<TcpStream> for TestService {
|
||||
type Response = ();
|
||||
type Error = ();
|
||||
type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>;
|
||||
|
||||
fn poll_ready(&self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
let TestService(ref counter) = self;
|
||||
let c = counter.fetch_add(1, Ordering::SeqCst);
|
||||
// Force the service to restart on first readiness check.
|
||||
if c > 0 {
|
||||
Poll::Ready(Ok(()))
|
||||
} else {
|
||||
Poll::Ready(Err(()))
|
||||
}
|
||||
}
|
||||
|
||||
fn call(&self, _: TcpStream) -> Self::Future {
|
||||
Box::pin(async { Ok(()) })
|
||||
}
|
||||
}
|
||||
|
||||
let addr1 = unused_addr();
|
||||
let addr2 = unused_addr();
|
||||
let (tx, rx) = mpsc::channel();
|
||||
let num = Arc::new(AtomicUsize::new(0));
|
||||
let num2 = Arc::new(AtomicUsize::new(0));
|
||||
|
||||
let num_clone = num.clone();
|
||||
let num2_clone = num2.clone();
|
||||
|
||||
let h = thread::spawn(move || {
|
||||
let num = num.clone();
|
||||
actix_rt::System::new().block_on(async {
|
||||
let server = Server::build()
|
||||
.backlog(1)
|
||||
.disable_signals()
|
||||
.bind("addr1", addr1, move || {
|
||||
let num = num.clone();
|
||||
let lst = net::TcpListener::bind(addr3).unwrap();
|
||||
cfg.bind("addr1", addr1)
|
||||
.unwrap()
|
||||
.bind("addr2", addr2)
|
||||
.unwrap()
|
||||
.listen("addr3", lst)
|
||||
.apply(move |rt| {
|
||||
let num = num.clone();
|
||||
rt.service("addr1", fn_service(|_| ok::<_, ()>(())));
|
||||
rt.service("addr3", fn_service(|_| ok::<_, ()>(())));
|
||||
rt.on_start(lazy(move |_| {
|
||||
let _ = num.fetch_add(1, Relaxed);
|
||||
}))
|
||||
})
|
||||
fn_factory(move || {
|
||||
let num = num.clone();
|
||||
async move { Ok::<_, ()>(TestService(num)) }
|
||||
})
|
||||
})
|
||||
.unwrap()
|
||||
.bind("addr2", addr2, move || {
|
||||
let num2 = num2.clone();
|
||||
fn_factory(move || {
|
||||
let num2 = num2.clone();
|
||||
async move { Ok::<_, ()>(TestService(num2)) }
|
||||
})
|
||||
})
|
||||
.unwrap()
|
||||
.workers(1)
|
||||
.run()
|
||||
}));
|
||||
let _ = tx.send((srv, actix_rt::System::current()));
|
||||
let _ = sys.run();
|
||||
});
|
||||
let (_, sys) = rx.recv().unwrap();
|
||||
thread::sleep(time::Duration::from_millis(500));
|
||||
.run();
|
||||
|
||||
let _ = tx.send((server.clone(), actix_rt::System::current()));
|
||||
server.await
|
||||
})
|
||||
});
|
||||
|
||||
let (server, sys) = rx.recv().unwrap();
|
||||
|
||||
for _ in 0..5 {
|
||||
TcpStream::connect(addr1)
|
||||
.await
|
||||
.unwrap()
|
||||
.shutdown()
|
||||
.await
|
||||
.unwrap();
|
||||
TcpStream::connect(addr2)
|
||||
.await
|
||||
.unwrap()
|
||||
.shutdown()
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
sleep(Duration::from_secs(3)).await;
|
||||
|
||||
assert!(num_clone.load(Ordering::SeqCst) > 5);
|
||||
assert!(num2_clone.load(Ordering::SeqCst) > 5);
|
||||
|
||||
assert!(net::TcpStream::connect(addr1).is_ok());
|
||||
assert!(net::TcpStream::connect(addr2).is_ok());
|
||||
assert!(net::TcpStream::connect(addr3).is_ok());
|
||||
assert_eq!(num.load(Relaxed), 1);
|
||||
sys.stop();
|
||||
let _ = h.join();
|
||||
let _ = server.stop(false);
|
||||
let _ = h.join().unwrap();
|
||||
}
|
||||
|
||||
#[ignore]
|
||||
#[actix_rt::test]
|
||||
async fn worker_restart() {
|
||||
use actix_service::{Service, ServiceFactory};
|
||||
use futures_core::future::LocalBoxFuture;
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
|
||||
struct TestServiceFactory(Arc<AtomicUsize>);
|
||||
|
||||
impl ServiceFactory<TcpStream> for TestServiceFactory {
|
||||
type Response = ();
|
||||
type Error = ();
|
||||
type Config = ();
|
||||
type Service = TestService;
|
||||
type InitError = ();
|
||||
type Future = LocalBoxFuture<'static, Result<Self::Service, Self::InitError>>;
|
||||
|
||||
fn new_service(&self, _: Self::Config) -> Self::Future {
|
||||
let counter = self.0.fetch_add(1, Ordering::Relaxed);
|
||||
|
||||
Box::pin(async move { Ok(TestService(counter)) })
|
||||
}
|
||||
}
|
||||
|
||||
struct TestService(usize);
|
||||
|
||||
impl Service<TcpStream> for TestService {
|
||||
type Response = ();
|
||||
type Error = ();
|
||||
type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>;
|
||||
|
||||
actix_service::always_ready!();
|
||||
|
||||
fn call(&self, stream: TcpStream) -> Self::Future {
|
||||
let counter = self.0;
|
||||
|
||||
let mut stream = stream.into_std().unwrap();
|
||||
use std::io::Write;
|
||||
let str = counter.to_string();
|
||||
let buf = str.as_bytes();
|
||||
|
||||
let mut written = 0;
|
||||
|
||||
while written < buf.len() {
|
||||
if let Ok(n) = stream.write(&buf[written..]) {
|
||||
written += n;
|
||||
}
|
||||
}
|
||||
stream.flush().unwrap();
|
||||
stream.shutdown(net::Shutdown::Write).unwrap();
|
||||
|
||||
// force worker 2 to restart service once.
|
||||
if counter == 2 {
|
||||
panic!("panic on purpose")
|
||||
} else {
|
||||
Box::pin(async { Ok(()) })
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let addr = unused_addr();
|
||||
let (tx, rx) = mpsc::channel();
|
||||
|
||||
let counter = Arc::new(AtomicUsize::new(1));
|
||||
let h = thread::spawn(move || {
|
||||
let counter = counter.clone();
|
||||
actix_rt::System::new().block_on(async {
|
||||
let server = Server::build()
|
||||
.disable_signals()
|
||||
.bind("addr", addr, move || TestServiceFactory(counter.clone()))
|
||||
.unwrap()
|
||||
.workers(2)
|
||||
.run();
|
||||
|
||||
let _ = tx.send((server.clone(), actix_rt::System::current()));
|
||||
server.await
|
||||
})
|
||||
});
|
||||
|
||||
let (server, sys) = rx.recv().unwrap();
|
||||
|
||||
sleep(Duration::from_secs(3)).await;
|
||||
|
||||
let mut buf = [0; 8];
|
||||
|
||||
// worker 1 would not restart and return it's id consistently.
|
||||
let mut stream = TcpStream::connect(addr).await.unwrap();
|
||||
let n = stream.read(&mut buf).await.unwrap();
|
||||
let id = String::from_utf8_lossy(&buf[0..n]);
|
||||
assert_eq!("1", id);
|
||||
stream.shutdown().await.unwrap();
|
||||
|
||||
// worker 2 dead after return response.
|
||||
let mut stream = TcpStream::connect(addr).await.unwrap();
|
||||
let n = stream.read(&mut buf).await.unwrap();
|
||||
let id = String::from_utf8_lossy(&buf[0..n]);
|
||||
assert_eq!("2", id);
|
||||
stream.shutdown().await.unwrap();
|
||||
|
||||
// request to worker 1
|
||||
let mut stream = TcpStream::connect(addr).await.unwrap();
|
||||
let n = stream.read(&mut buf).await.unwrap();
|
||||
let id = String::from_utf8_lossy(&buf[0..n]);
|
||||
assert_eq!("1", id);
|
||||
stream.shutdown().await.unwrap();
|
||||
|
||||
// TODO: Remove sleep if it can pass CI.
|
||||
sleep(Duration::from_secs(3)).await;
|
||||
|
||||
// worker 2 restarting and work goes to worker 1.
|
||||
let mut stream = TcpStream::connect(addr).await.unwrap();
|
||||
let n = stream.read(&mut buf).await.unwrap();
|
||||
let id = String::from_utf8_lossy(&buf[0..n]);
|
||||
assert_eq!("1", id);
|
||||
stream.shutdown().await.unwrap();
|
||||
|
||||
// TODO: Remove sleep if it can pass CI.
|
||||
sleep(Duration::from_secs(3)).await;
|
||||
|
||||
// worker 2 restarted but worker 1 was still the next to accept connection.
|
||||
let mut stream = TcpStream::connect(addr).await.unwrap();
|
||||
let n = stream.read(&mut buf).await.unwrap();
|
||||
let id = String::from_utf8_lossy(&buf[0..n]);
|
||||
assert_eq!("1", id);
|
||||
stream.shutdown().await.unwrap();
|
||||
|
||||
// TODO: Remove sleep if it can pass CI.
|
||||
sleep(Duration::from_secs(3)).await;
|
||||
|
||||
// worker 2 accept connection again but it's id is 3.
|
||||
let mut stream = TcpStream::connect(addr).await.unwrap();
|
||||
let n = stream.read(&mut buf).await.unwrap();
|
||||
let id = String::from_utf8_lossy(&buf[0..n]);
|
||||
assert_eq!("3", id);
|
||||
stream.shutdown().await.unwrap();
|
||||
|
||||
sys.stop();
|
||||
let _ = server.stop(false);
|
||||
let _ = h.join().unwrap();
|
||||
}
|
||||
|
@@ -3,6 +3,24 @@
|
||||
## Unreleased - 2021-xx-xx
|
||||
|
||||
|
||||
## 2.0.1 - 2021-10-11
|
||||
* Documentation fix.
|
||||
|
||||
|
||||
## 2.0.0 - 2021-04-16
|
||||
* Removed pipeline and related structs/functions. [#335]
|
||||
|
||||
[#335]: https://github.com/actix/actix-net/pull/335
|
||||
|
||||
|
||||
## 2.0.0-beta.5 - 2021-03-15
|
||||
* Add default `Service` trait impl for `Rc<S: Service>` and `&S: Service`. [#288]
|
||||
* Add `boxed::rc_service` function for constructing `boxed::RcService` type [#290]
|
||||
|
||||
[#288]: https://github.com/actix/actix-net/pull/288
|
||||
[#290]: https://github.com/actix/actix-net/pull/290
|
||||
|
||||
|
||||
## 2.0.0-beta.4 - 2021-02-04
|
||||
* `Service::poll_ready` and `Service::call` receive `&self`. [#247]
|
||||
* `apply_fn` and `apply_fn_factory` now receive `Fn(Req, &Service)` function type. [#247]
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "actix-service"
|
||||
version = "2.0.0-beta.4"
|
||||
version = "2.0.1"
|
||||
authors = [
|
||||
"Nikolay Kim <fafhrd91@gmail.com>",
|
||||
"Rob Ede <robjtede@icloud.com>",
|
||||
@@ -8,11 +8,8 @@ authors = [
|
||||
]
|
||||
description = "Service trait and combinators for representing asynchronous request/response operations."
|
||||
keywords = ["network", "framework", "async", "futures", "service"]
|
||||
homepage = "https://actix.rs"
|
||||
repository = "https://github.com/actix/actix-net.git"
|
||||
documentation = "https://docs.rs/actix-service"
|
||||
readme = "README.md"
|
||||
categories = ["network-programming", "asynchronous"]
|
||||
categories = ["network-programming", "asynchronous", "no-std"]
|
||||
repository = "https://github.com/actix/actix-net"
|
||||
license = "MIT OR Apache-2.0"
|
||||
edition = "2018"
|
||||
|
||||
@@ -22,8 +19,10 @@ path = "src/lib.rs"
|
||||
|
||||
[dependencies]
|
||||
futures-core = { version = "0.3.7", default-features = false }
|
||||
paste = "1"
|
||||
pin-project-lite = "0.2"
|
||||
|
||||
[dev-dependencies]
|
||||
actix-rt = "2.0.0"
|
||||
actix-utils = "3.0.0"
|
||||
futures-util = { version = "0.3.7", default-features = false }
|
||||
|
@@ -3,11 +3,11 @@
|
||||
> Service trait and combinators for representing asynchronous request/response operations.
|
||||
|
||||
[](https://crates.io/crates/actix-service)
|
||||
[](https://docs.rs/actix-service/2.0.0-beta.4)
|
||||
[](https://docs.rs/actix-service/2.0.1)
|
||||
[](https://blog.rust-lang.org/2020/03/12/Rust-1.46.html)
|
||||

|
||||
[](https://deps.rs/crate/actix-service/2.0.0-beta.4)
|
||||
[](https://crates.io/crates/actix-service)
|
||||
[](https://deps.rs/crate/actix-service/2.0.1)
|
||||

|
||||
[](https://discord.gg/NWpN5mmg3x)
|
||||
|
||||
See documentation for detailed explanations of these components: https://docs.rs/actix-service.
|
||||
|
33
actix-service/examples/clone.rs
Normal file
33
actix-service/examples/clone.rs
Normal file
@@ -0,0 +1,33 @@
|
||||
use std::{future::Future, sync::mpsc, time::Duration};
|
||||
|
||||
async fn oracle<F, Fut>(f: F) -> (u32, u32)
|
||||
where
|
||||
F: FnOnce() -> Fut + Clone + Send + 'static,
|
||||
Fut: Future<Output = u32> + 'static,
|
||||
{
|
||||
let f1 = actix_rt::spawn(f.clone()());
|
||||
let f2 = actix_rt::spawn(f());
|
||||
|
||||
(f1.await.unwrap(), f2.await.unwrap())
|
||||
}
|
||||
|
||||
#[actix_rt::main]
|
||||
async fn main() {
|
||||
let (tx, rx) = mpsc::channel();
|
||||
|
||||
let (r1, r2) = oracle({
|
||||
let tx = tx.clone();
|
||||
|
||||
|| async move {
|
||||
tx.send(()).unwrap();
|
||||
4 * 4
|
||||
}
|
||||
})
|
||||
.await;
|
||||
assert_eq!(r1, r2);
|
||||
|
||||
tx.send(()).unwrap();
|
||||
|
||||
rx.recv_timeout(Duration::from_millis(100)).unwrap();
|
||||
rx.recv_timeout(Duration::from_millis(100)).unwrap();
|
||||
}
|
@@ -11,11 +11,11 @@ use pin_project_lite::pin_project;
|
||||
|
||||
use super::{Service, ServiceFactory};
|
||||
|
||||
/// Service for the `and_then` combinator, chaining a computation onto the end
|
||||
/// of another service which completes successfully.
|
||||
/// Service for the `and_then` combinator, chaining a computation onto the end of another service
|
||||
/// which completes successfully.
|
||||
///
|
||||
/// This is created by the `Pipeline::and_then` method.
|
||||
pub(crate) struct AndThenService<A, B, Req>(Rc<(A, B)>, PhantomData<Req>);
|
||||
pub struct AndThenService<A, B, Req>(Rc<(A, B)>, PhantomData<Req>);
|
||||
|
||||
impl<A, B, Req> AndThenService<A, B, Req> {
|
||||
/// Create new `AndThen` combinator
|
||||
@@ -64,7 +64,7 @@ where
|
||||
}
|
||||
|
||||
pin_project! {
|
||||
pub(crate) struct AndThenServiceResponse<A, B, Req>
|
||||
pub struct AndThenServiceResponse<A, B, Req>
|
||||
where
|
||||
A: Service<Req>,
|
||||
B: Service<A::Response, Error = A::Error>,
|
||||
@@ -117,7 +117,7 @@ where
|
||||
}
|
||||
|
||||
/// `.and_then()` service factory combinator
|
||||
pub(crate) struct AndThenServiceFactory<A, B, Req>
|
||||
pub struct AndThenServiceFactory<A, B, Req>
|
||||
where
|
||||
A: ServiceFactory<Req>,
|
||||
A::Config: Clone,
|
||||
@@ -200,7 +200,7 @@ where
|
||||
}
|
||||
|
||||
pin_project! {
|
||||
pub(crate) struct AndThenServiceFactoryResponse<A, B, Req>
|
||||
pub struct AndThenServiceFactoryResponse<A, B, Req>
|
||||
where
|
||||
A: ServiceFactory<Req>,
|
||||
B: ServiceFactory<A::Response>,
|
||||
@@ -272,7 +272,9 @@ mod tests {
|
||||
use futures_util::future::lazy;
|
||||
|
||||
use crate::{
|
||||
fn_factory, ok, pipeline, pipeline_factory, ready, Ready, Service, ServiceFactory,
|
||||
fn_factory, ok,
|
||||
pipeline::{pipeline, pipeline_factory},
|
||||
ready, Ready, Service, ServiceFactory,
|
||||
};
|
||||
|
||||
struct Srv1(Rc<Cell<usize>>);
|
||||
|
@@ -214,7 +214,11 @@ mod tests {
|
||||
use futures_util::future::lazy;
|
||||
|
||||
use super::*;
|
||||
use crate::{ok, pipeline, pipeline_factory, Ready, Service, ServiceFactory};
|
||||
use crate::{
|
||||
ok,
|
||||
pipeline::{pipeline, pipeline_factory},
|
||||
Ready, Service, ServiceFactory,
|
||||
};
|
||||
|
||||
#[derive(Clone)]
|
||||
struct Srv;
|
||||
|
@@ -1,21 +1,69 @@
|
||||
use alloc::boxed::Box;
|
||||
use core::{
|
||||
future::Future,
|
||||
marker::PhantomData,
|
||||
pin::Pin,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
//! Trait object forms of services and service factories.
|
||||
|
||||
use alloc::{boxed::Box, rc::Rc};
|
||||
use core::{future::Future, pin::Pin};
|
||||
|
||||
use paste::paste;
|
||||
|
||||
use crate::{Service, ServiceFactory};
|
||||
|
||||
/// A boxed future with no send bound or lifetime parameters.
|
||||
pub type BoxFuture<T> = Pin<Box<dyn Future<Output = T>>>;
|
||||
|
||||
pub type BoxService<Req, Res, Err> =
|
||||
Box<dyn Service<Req, Response = Res, Error = Err, Future = BoxFuture<Result<Res, Err>>>>;
|
||||
macro_rules! service_object {
|
||||
($name: ident, $type: tt, $fn_name: ident) => {
|
||||
paste! {
|
||||
#[doc = "Type alias for service trait object using `" $type "`."]
|
||||
pub type $name<Req, Res, Err> = $type<
|
||||
dyn Service<Req, Response = Res, Error = Err, Future = BoxFuture<Result<Res, Err>>>,
|
||||
>;
|
||||
|
||||
#[doc = "Wraps service as a trait object using [`" $name "`]."]
|
||||
pub fn $fn_name<S, Req>(service: S) -> $name<Req, S::Response, S::Error>
|
||||
where
|
||||
S: Service<Req> + 'static,
|
||||
Req: 'static,
|
||||
S::Future: 'static,
|
||||
{
|
||||
$type::new(ServiceWrapper::new(service))
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
service_object!(BoxService, Box, service);
|
||||
service_object!(RcService, Rc, rc_service);
|
||||
|
||||
struct ServiceWrapper<S> {
|
||||
inner: S,
|
||||
}
|
||||
|
||||
impl<S> ServiceWrapper<S> {
|
||||
fn new(inner: S) -> Self {
|
||||
Self { inner }
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, Req, Res, Err> Service<Req> for ServiceWrapper<S>
|
||||
where
|
||||
S: Service<Req, Response = Res, Error = Err>,
|
||||
S::Future: 'static,
|
||||
{
|
||||
type Response = Res;
|
||||
type Error = Err;
|
||||
type Future = BoxFuture<Result<Res, Err>>;
|
||||
|
||||
crate::forward_ready!(inner);
|
||||
|
||||
fn call(&self, req: Req) -> Self::Future {
|
||||
Box::pin(self.inner.call(req))
|
||||
}
|
||||
}
|
||||
|
||||
/// Wrapper for a service factory that will map it's services to boxed trait object services.
|
||||
pub struct BoxServiceFactory<Cfg, Req, Res, Err, InitErr>(Inner<Cfg, Req, Res, Err, InitErr>);
|
||||
|
||||
/// Create boxed service factory
|
||||
/// Wraps a service factory that returns service trait objects.
|
||||
pub fn factory<SF, Req>(
|
||||
factory: SF,
|
||||
) -> BoxServiceFactory<SF::Config, Req, SF::Response, SF::Error, SF::InitError>
|
||||
@@ -28,20 +76,7 @@ where
|
||||
SF::Error: 'static,
|
||||
SF::InitError: 'static,
|
||||
{
|
||||
BoxServiceFactory(Box::new(FactoryWrapper {
|
||||
factory,
|
||||
_t: PhantomData,
|
||||
}))
|
||||
}
|
||||
|
||||
/// Create boxed service
|
||||
pub fn service<S, Req>(service: S) -> BoxService<Req, S::Response, S::Error>
|
||||
where
|
||||
S: Service<Req> + 'static,
|
||||
Req: 'static,
|
||||
S::Future: 'static,
|
||||
{
|
||||
Box::new(ServiceWrapper(service, PhantomData))
|
||||
BoxServiceFactory(Box::new(FactoryWrapper(factory)))
|
||||
}
|
||||
|
||||
type Inner<C, Req, Res, Err, InitErr> = Box<
|
||||
@@ -66,9 +101,9 @@ where
|
||||
{
|
||||
type Response = Res;
|
||||
type Error = Err;
|
||||
type InitError = InitErr;
|
||||
type Config = C;
|
||||
type Service = BoxService<Req, Res, Err>;
|
||||
type InitError = InitErr;
|
||||
|
||||
type Future = BoxFuture<Result<Self::Service, InitErr>>;
|
||||
|
||||
@@ -77,12 +112,9 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
struct FactoryWrapper<SF, Req, Cfg> {
|
||||
factory: SF,
|
||||
_t: PhantomData<(Req, Cfg)>,
|
||||
}
|
||||
struct FactoryWrapper<SF>(SF);
|
||||
|
||||
impl<SF, Req, Cfg, Res, Err, InitErr> ServiceFactory<Req> for FactoryWrapper<SF, Req, Cfg>
|
||||
impl<SF, Req, Cfg, Res, Err, InitErr> ServiceFactory<Req> for FactoryWrapper<SF>
|
||||
where
|
||||
Req: 'static,
|
||||
Res: 'static,
|
||||
@@ -95,47 +127,13 @@ where
|
||||
{
|
||||
type Response = Res;
|
||||
type Error = Err;
|
||||
type InitError = InitErr;
|
||||
type Config = Cfg;
|
||||
type Service = BoxService<Req, Res, Err>;
|
||||
type InitError = InitErr;
|
||||
type Future = BoxFuture<Result<Self::Service, Self::InitError>>;
|
||||
|
||||
fn new_service(&self, cfg: Cfg) -> Self::Future {
|
||||
let fut = self.factory.new_service(cfg);
|
||||
Box::pin(async {
|
||||
let res = fut.await;
|
||||
res.map(ServiceWrapper::boxed)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
struct ServiceWrapper<S: Service<Req>, Req>(S, PhantomData<Req>);
|
||||
|
||||
impl<S, Req> ServiceWrapper<S, Req>
|
||||
where
|
||||
S: Service<Req> + 'static,
|
||||
Req: 'static,
|
||||
S::Future: 'static,
|
||||
{
|
||||
fn boxed(service: S) -> BoxService<Req, S::Response, S::Error> {
|
||||
Box::new(ServiceWrapper(service, PhantomData))
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, Req, Res, Err> Service<Req> for ServiceWrapper<S, Req>
|
||||
where
|
||||
S: Service<Req, Response = Res, Error = Err>,
|
||||
S::Future: 'static,
|
||||
{
|
||||
type Response = Res;
|
||||
type Error = Err;
|
||||
type Future = BoxFuture<Result<Res, Err>>;
|
||||
|
||||
fn poll_ready(&self, ctx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
self.0.poll_ready(ctx)
|
||||
}
|
||||
|
||||
fn call(&self, req: Req) -> Self::Future {
|
||||
Box::pin(self.0.call(req))
|
||||
let f = self.0.new_service(cfg);
|
||||
Box::pin(async { f.await.map(|s| Box::new(ServiceWrapper::new(s)) as _) })
|
||||
}
|
||||
}
|
||||
|
@@ -1,5 +1,12 @@
|
||||
use crate::{dev, Service, ServiceFactory};
|
||||
use crate::{
|
||||
and_then::{AndThenService, AndThenServiceFactory},
|
||||
map::Map,
|
||||
map_err::MapErr,
|
||||
transform_err::TransformMapInitErr,
|
||||
IntoService, IntoServiceFactory, Service, ServiceFactory, Transform,
|
||||
};
|
||||
|
||||
/// An extension trait for [`Service`]s that provides a variety of convenient adapters.
|
||||
pub trait ServiceExt<Req>: Service<Req> {
|
||||
/// Map this service's output to a different type, returning a new service
|
||||
/// of the resulting type.
|
||||
@@ -10,12 +17,12 @@ pub trait ServiceExt<Req>: Service<Req> {
|
||||
/// Note that this function consumes the receiving service and returns a
|
||||
/// wrapped version of it, similar to the existing `map` methods in the
|
||||
/// standard library.
|
||||
fn map<F, R>(self, f: F) -> dev::Map<Self, F, Req, R>
|
||||
fn map<F, R>(self, f: F) -> Map<Self, F, Req, R>
|
||||
where
|
||||
Self: Sized,
|
||||
F: FnMut(Self::Response) -> R,
|
||||
{
|
||||
dev::Map::new(self, f)
|
||||
Map::new(self, f)
|
||||
}
|
||||
|
||||
/// Map this service's error to a different error, returning a new service.
|
||||
@@ -26,17 +33,34 @@ pub trait ServiceExt<Req>: Service<Req> {
|
||||
///
|
||||
/// Note that this function consumes the receiving service and returns a
|
||||
/// wrapped version of it.
|
||||
fn map_err<F, E>(self, f: F) -> dev::MapErr<Self, Req, F, E>
|
||||
fn map_err<F, E>(self, f: F) -> MapErr<Self, Req, F, E>
|
||||
where
|
||||
Self: Sized,
|
||||
F: Fn(Self::Error) -> E,
|
||||
{
|
||||
dev::MapErr::new(self, f)
|
||||
MapErr::new(self, f)
|
||||
}
|
||||
|
||||
/// Call another service after call to this one has resolved successfully.
|
||||
///
|
||||
/// This function can be used to chain two services together and ensure that the second service
|
||||
/// isn't called until call to the fist service have finished. Result of the call to the first
|
||||
/// service is used as an input parameter for the second service's call.
|
||||
///
|
||||
/// Note that this function consumes the receiving service and returns a wrapped version of it.
|
||||
fn and_then<I, S1>(self, service: I) -> AndThenService<Self, S1, Req>
|
||||
where
|
||||
Self: Sized,
|
||||
I: IntoService<S1, Self::Response>,
|
||||
S1: Service<Self::Response, Error = Self::Error>,
|
||||
{
|
||||
AndThenService::new(self, service.into_service())
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, Req> ServiceExt<Req> for S where S: Service<Req> {}
|
||||
|
||||
/// An extension trait for [`ServiceFactory`]s that provides a variety of convenient adapters.
|
||||
pub trait ServiceFactoryExt<Req>: ServiceFactory<Req> {
|
||||
/// Map this service's output to a different type, returning a new service
|
||||
/// of the resulting type.
|
||||
@@ -65,6 +89,36 @@ pub trait ServiceFactoryExt<Req>: ServiceFactory<Req> {
|
||||
{
|
||||
crate::map_init_err::MapInitErr::new(self, f)
|
||||
}
|
||||
|
||||
/// Call another service after call to this one has resolved successfully.
|
||||
fn and_then<I, SF1>(self, factory: I) -> AndThenServiceFactory<Self, SF1, Req>
|
||||
where
|
||||
Self: Sized,
|
||||
Self::Config: Clone,
|
||||
I: IntoServiceFactory<SF1, Self::Response>,
|
||||
SF1: ServiceFactory<
|
||||
Self::Response,
|
||||
Config = Self::Config,
|
||||
Error = Self::Error,
|
||||
InitError = Self::InitError,
|
||||
>,
|
||||
{
|
||||
AndThenServiceFactory::new(self, factory.into_factory())
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, Req> ServiceFactoryExt<Req> for S where S: ServiceFactory<Req> {}
|
||||
impl<SF, Req> ServiceFactoryExt<Req> for SF where SF: ServiceFactory<Req> {}
|
||||
|
||||
/// An extension trait for [`Transform`]s that provides a variety of convenient adapters.
|
||||
pub trait TransformExt<S, Req>: Transform<S, Req> {
|
||||
/// Return a new `Transform` whose init error is mapped to to a different type.
|
||||
fn map_init_err<F, E>(self, f: F) -> TransformMapInitErr<Self, S, Req, F, E>
|
||||
where
|
||||
Self: Sized,
|
||||
F: Fn(Self::InitError) -> E + Clone,
|
||||
{
|
||||
TransformMapInitErr::new(self, f)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, Req> TransformExt<T, Req> for T where T: Transform<T, Req> {}
|
||||
|
@@ -1,4 +1,4 @@
|
||||
use core::{future::Future, marker::PhantomData, task::Poll};
|
||||
use core::{future::Future, marker::PhantomData};
|
||||
|
||||
use crate::{ok, IntoService, IntoServiceFactory, Ready, Service, ServiceFactory};
|
||||
|
||||
@@ -15,8 +15,7 @@ where
|
||||
|
||||
/// Create `ServiceFactory` for function that can produce services
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// # Examples
|
||||
/// ```
|
||||
/// use std::io;
|
||||
/// use actix_service::{fn_factory, fn_service, Service, ServiceFactory};
|
||||
@@ -53,20 +52,19 @@ pub fn fn_factory<F, Cfg, Srv, Req, Fut, Err>(
|
||||
f: F,
|
||||
) -> FnServiceNoConfig<F, Cfg, Srv, Req, Fut, Err>
|
||||
where
|
||||
Srv: Service<Req>,
|
||||
F: Fn() -> Fut,
|
||||
Fut: Future<Output = Result<Srv, Err>>,
|
||||
Srv: Service<Req>,
|
||||
{
|
||||
FnServiceNoConfig::new(f)
|
||||
}
|
||||
|
||||
/// Create `ServiceFactory` for function that accepts config argument and can produce services
|
||||
///
|
||||
/// Any function that has following form `Fn(Config) -> Future<Output = Service>` could
|
||||
/// act as a `ServiceFactory`.
|
||||
///
|
||||
/// # Example
|
||||
/// Any function that has following form `Fn(Config) -> Future<Output = Service>` could act as
|
||||
/// a `ServiceFactory`.
|
||||
///
|
||||
/// # Examples
|
||||
/// ```
|
||||
/// use std::io;
|
||||
/// use actix_service::{fn_factory_with_config, fn_service, Service, ServiceFactory};
|
||||
|
@@ -1,7 +1,8 @@
|
||||
//! See [`Service`] docs for information on this crate's foundational trait.
|
||||
|
||||
#![no_std]
|
||||
#![deny(rust_2018_idioms, nonstandard_style)]
|
||||
#![deny(rust_2018_idioms, nonstandard_style, future_incompatible)]
|
||||
#![warn(missing_docs)]
|
||||
#![allow(clippy::type_complexity)]
|
||||
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
|
||||
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
|
||||
@@ -21,6 +22,7 @@ mod apply_cfg;
|
||||
pub mod boxed;
|
||||
mod ext;
|
||||
mod fn_service;
|
||||
mod macros;
|
||||
mod map;
|
||||
mod map_config;
|
||||
mod map_err;
|
||||
@@ -33,11 +35,10 @@ mod transform_err;
|
||||
|
||||
pub use self::apply::{apply_fn, apply_fn_factory};
|
||||
pub use self::apply_cfg::{apply_cfg, apply_cfg_factory};
|
||||
pub use self::ext::{ServiceExt, ServiceFactoryExt};
|
||||
pub use self::ext::{ServiceExt, ServiceFactoryExt, TransformExt};
|
||||
pub use self::fn_service::{fn_factory, fn_factory_with_config, fn_service};
|
||||
pub use self::map_config::{map_config, unit_config};
|
||||
pub use self::pipeline::{pipeline, pipeline_factory, Pipeline, PipelineFactory};
|
||||
pub use self::transform::{apply, Transform};
|
||||
pub use self::transform::{apply, ApplyTransform, Transform};
|
||||
|
||||
#[allow(unused_imports)]
|
||||
use self::ready::{err, ok, ready, Ready};
|
||||
@@ -52,8 +53,14 @@ use self::ready::{err, ok, ready, Ready};
|
||||
/// async fn(Request) -> Result<Response, Err>
|
||||
/// ```
|
||||
///
|
||||
/// The `Service` trait just generalizes this form where each parameter is described as an
|
||||
/// associated type on the trait. Services can also have mutable state that influence computation.
|
||||
/// The `Service` trait just generalizes this form. Requests are defined as a generic type parameter
|
||||
/// and responses and other details are defined as associated types on the trait impl. Notice that
|
||||
/// this design means that services can receive many request types and converge them to a single
|
||||
/// response type.
|
||||
///
|
||||
/// Services can also have mutable state that influence computation by using a `Cell`, `RefCell`
|
||||
/// or `Mutex`. Services intentionally do not take `&mut self` to reduce overhead in the
|
||||
/// common cases.
|
||||
///
|
||||
/// `Service` provides a symmetric and uniform API; the same abstractions can be used to represent
|
||||
/// both clients and servers. Services describe only _transformation_ operations which encourage
|
||||
@@ -63,23 +70,25 @@ use self::ready::{err, ok, ready, Ready};
|
||||
/// ```ignore
|
||||
/// struct MyService;
|
||||
///
|
||||
/// impl Service for MyService {
|
||||
/// type Request = u8;
|
||||
/// impl Service<u8> for MyService {
|
||||
/// type Response = u64;
|
||||
/// type Error = MyError;
|
||||
/// type Future = Pin<Box<Future<Output=Result<Self::Response, Self::Error>>>>;
|
||||
/// type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>>>>;
|
||||
///
|
||||
/// fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { ... }
|
||||
///
|
||||
/// fn call(&self, req: Self::Request) -> Self::Future { ... }
|
||||
/// fn call(&self, req: u8) -> Self::Future { ... }
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// Sometimes it is not necessary to implement the Service trait. For example, the above service
|
||||
/// could be rewritten as a simple function and passed to [fn_service](fn_service()).
|
||||
/// could be rewritten as a simple function and passed to [`fn_service`](fn_service()).
|
||||
///
|
||||
/// ```ignore
|
||||
/// async fn my_service(req: u8) -> Result<u64, MyError>;
|
||||
///
|
||||
/// let svc = fn_service(my_service)
|
||||
/// svc.call(123)
|
||||
/// ```
|
||||
pub trait Service<Req> {
|
||||
/// Responses given by the service.
|
||||
@@ -93,40 +102,40 @@ pub trait Service<Req> {
|
||||
|
||||
/// Returns `Ready` when the service is able to process requests.
|
||||
///
|
||||
/// If the service is at capacity, then `Pending` is returned and the task
|
||||
/// is notified when the service becomes ready again. This function is
|
||||
/// expected to be called while on a task.
|
||||
/// If the service is at capacity, then `Pending` is returned and the task is notified when the
|
||||
/// service becomes ready again. This function is expected to be called while on a task.
|
||||
///
|
||||
/// This is a **best effort** implementation. False positives are permitted.
|
||||
/// It is permitted for the service to return `Ready` from a `poll_ready`
|
||||
/// call and the next invocation of `call` results in an error.
|
||||
/// This is a best effort implementation. False positives are permitted. It is permitted for
|
||||
/// the service to return `Ready` from a `poll_ready` call and the next invocation of `call`
|
||||
/// results in an error.
|
||||
///
|
||||
/// # Notes
|
||||
/// 1. `.poll_ready()` might be called on different task from actual service call.
|
||||
/// 1. In case of chained services, `.poll_ready()` get called for all services at once.
|
||||
/// 1. `poll_ready` might be called on a different task to `call`.
|
||||
/// 1. In cases of chained services, `.poll_ready()` is called for all services at once.
|
||||
fn poll_ready(&self, ctx: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>>;
|
||||
|
||||
/// Process the request and return the response asynchronously.
|
||||
///
|
||||
/// This function is expected to be callable off task. As such,
|
||||
/// implementations should take care to not call `poll_ready`. If the
|
||||
/// service is at capacity and the request is unable to be handled, the
|
||||
/// returned `Future` should resolve to an error.
|
||||
/// This function is expected to be callable off-task. As such, implementations of `call` should
|
||||
/// take care to not call `poll_ready`. If the service is at capacity and the request is unable
|
||||
/// to be handled, the returned `Future` should resolve to an error.
|
||||
///
|
||||
/// Calling `call` without calling `poll_ready` is permitted. The
|
||||
/// implementation must be resilient to this fact.
|
||||
/// Invoking `call` without first invoking `poll_ready` is permitted. Implementations must be
|
||||
/// resilient to this fact.
|
||||
fn call(&self, req: Req) -> Self::Future;
|
||||
}
|
||||
|
||||
/// Factory for creating `Service`s.
|
||||
///
|
||||
/// Acts as a service factory. This is useful for cases where new `Service`s
|
||||
/// must be produced. One case is a TCP server listener. The listener
|
||||
/// accepts new TCP streams, obtains a new `Service` using the
|
||||
/// `ServiceFactory` trait, and uses the new `Service` to process inbound
|
||||
/// requests on that new TCP stream.
|
||||
/// This is useful for cases where new `Service`s must be produced. One case is a TCP
|
||||
/// server listener: a listener accepts new connections, constructs a new `Service` for each using
|
||||
/// the `ServiceFactory` trait, and uses the new `Service` to process inbound requests on that new
|
||||
/// connection.
|
||||
///
|
||||
/// `Config` is a service factory configuration type.
|
||||
///
|
||||
/// Simple factories may be able to use [`fn_factory`] or [`fn_factory_with_config`] to
|
||||
/// reduce boilerplate.
|
||||
pub trait ServiceFactory<Req> {
|
||||
/// Responses given by the created services.
|
||||
type Response;
|
||||
@@ -143,13 +152,14 @@ pub trait ServiceFactory<Req> {
|
||||
/// Errors potentially raised while building a service.
|
||||
type InitError;
|
||||
|
||||
/// The future of the `Service` instance.
|
||||
/// The future of the `Service` instance.g
|
||||
type Future: Future<Output = Result<Self::Service, Self::InitError>>;
|
||||
|
||||
/// Create and return a new service asynchronously.
|
||||
fn new_service(&self, cfg: Self::Config) -> Self::Future;
|
||||
}
|
||||
|
||||
// TODO: remove implement on mut reference.
|
||||
impl<'a, S, Req> Service<Req> for &'a mut S
|
||||
where
|
||||
S: Service<Req> + 'a,
|
||||
@@ -167,6 +177,23 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, S, Req> Service<Req> for &'a S
|
||||
where
|
||||
S: Service<Req> + 'a,
|
||||
{
|
||||
type Response = S::Response;
|
||||
type Error = S::Error;
|
||||
type Future = S::Future;
|
||||
|
||||
fn poll_ready(&self, ctx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
(**self).poll_ready(ctx)
|
||||
}
|
||||
|
||||
fn call(&self, request: Req) -> S::Future {
|
||||
(**self).call(request)
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, Req> Service<Req> for Box<S>
|
||||
where
|
||||
S: Service<Req> + ?Sized,
|
||||
@@ -184,24 +211,25 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, Req> Service<Req> for RefCell<S>
|
||||
impl<S, Req> Service<Req> for Rc<S>
|
||||
where
|
||||
S: Service<Req>,
|
||||
S: Service<Req> + ?Sized,
|
||||
{
|
||||
type Response = S::Response;
|
||||
type Error = S::Error;
|
||||
type Future = S::Future;
|
||||
|
||||
fn poll_ready(&self, ctx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
self.borrow().poll_ready(ctx)
|
||||
(**self).poll_ready(ctx)
|
||||
}
|
||||
|
||||
fn call(&self, request: Req) -> S::Future {
|
||||
self.borrow().call(request)
|
||||
(**self).call(request)
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, Req> Service<Req> for Rc<RefCell<S>>
|
||||
/// This impl is deprecated since v2 because the `Service` trait now receives shared reference.
|
||||
impl<S, Req> Service<Req> for RefCell<S>
|
||||
where
|
||||
S: Service<Req>,
|
||||
{
|
||||
@@ -294,44 +322,3 @@ where
|
||||
{
|
||||
tp.into_service()
|
||||
}
|
||||
|
||||
pub mod dev {
|
||||
pub use crate::apply::{Apply, ApplyFactory};
|
||||
pub use crate::fn_service::{
|
||||
FnService, FnServiceConfig, FnServiceFactory, FnServiceNoConfig,
|
||||
};
|
||||
pub use crate::map::{Map, MapServiceFactory};
|
||||
pub use crate::map_config::{MapConfig, UnitConfig};
|
||||
pub use crate::map_err::{MapErr, MapErrServiceFactory};
|
||||
pub use crate::map_init_err::MapInitErr;
|
||||
pub use crate::transform::ApplyTransform;
|
||||
pub use crate::transform_err::TransformMapInitErr;
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! always_ready {
|
||||
() => {
|
||||
#[inline]
|
||||
fn poll_ready(
|
||||
&self,
|
||||
_: &mut ::core::task::Context<'_>,
|
||||
) -> ::core::task::Poll<Result<(), Self::Error>> {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! forward_ready {
|
||||
($field:ident) => {
|
||||
#[inline]
|
||||
fn poll_ready(
|
||||
&self,
|
||||
cx: &mut ::core::task::Context<'_>,
|
||||
) -> ::core::task::Poll<Result<(), Self::Error>> {
|
||||
self.$field
|
||||
.poll_ready(cx)
|
||||
.map_err(::core::convert::Into::into)
|
||||
}
|
||||
};
|
||||
}
|
||||
|
185
actix-service/src/macros.rs
Normal file
185
actix-service/src/macros.rs
Normal file
@@ -0,0 +1,185 @@
|
||||
/// An implementation of [`poll_ready`]() that always signals readiness.
|
||||
///
|
||||
/// This should only be used for basic leaf services that have no concept of un-readiness.
|
||||
/// For wrapper or other serivice types, use [`forward_ready!`] for simple cases or write a bespoke
|
||||
/// `poll_ready` implementation.
|
||||
///
|
||||
/// [`poll_ready`]: crate::Service::poll_ready
|
||||
///
|
||||
/// # Examples
|
||||
/// ```no_run
|
||||
/// use actix_service::Service;
|
||||
/// use futures_util::future::{ready, Ready};
|
||||
///
|
||||
/// struct IdentityService;
|
||||
///
|
||||
/// impl Service<u32> for IdentityService {
|
||||
/// type Response = u32;
|
||||
/// type Error = ();
|
||||
/// type Future = Ready<Result<Self::Response, Self::Error>>;
|
||||
///
|
||||
/// actix_service::always_ready!();
|
||||
///
|
||||
/// fn call(&self, req: u32) -> Self::Future {
|
||||
/// ready(Ok(req))
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
#[macro_export]
|
||||
macro_rules! always_ready {
|
||||
() => {
|
||||
#[inline]
|
||||
fn poll_ready(
|
||||
&self,
|
||||
_: &mut ::core::task::Context<'_>,
|
||||
) -> ::core::task::Poll<Result<(), Self::Error>> {
|
||||
::core::task::Poll::Ready(Ok(()))
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/// An implementation of [`poll_ready`] that forwards readiness checks to a
|
||||
/// named struct field.
|
||||
///
|
||||
/// Tuple structs are not supported.
|
||||
///
|
||||
/// [`poll_ready`]: crate::Service::poll_ready
|
||||
///
|
||||
/// # Examples
|
||||
/// ```no_run
|
||||
/// use actix_service::Service;
|
||||
/// use futures_util::future::{ready, Ready};
|
||||
///
|
||||
/// struct WrapperService<S> {
|
||||
/// inner: S,
|
||||
/// }
|
||||
///
|
||||
/// impl<S> Service<()> for WrapperService<S>
|
||||
/// where
|
||||
/// S: Service<()>,
|
||||
/// {
|
||||
/// type Response = S::Response;
|
||||
/// type Error = S::Error;
|
||||
/// type Future = S::Future;
|
||||
///
|
||||
/// actix_service::forward_ready!(inner);
|
||||
///
|
||||
/// fn call(&self, req: ()) -> Self::Future {
|
||||
/// self.inner.call(req)
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
#[macro_export]
|
||||
macro_rules! forward_ready {
|
||||
($field:ident) => {
|
||||
#[inline]
|
||||
fn poll_ready(
|
||||
&self,
|
||||
cx: &mut ::core::task::Context<'_>,
|
||||
) -> ::core::task::Poll<Result<(), Self::Error>> {
|
||||
self.$field
|
||||
.poll_ready(cx)
|
||||
.map_err(::core::convert::Into::into)
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use core::{
|
||||
cell::Cell,
|
||||
convert::Infallible,
|
||||
task::{self, Context, Poll},
|
||||
};
|
||||
|
||||
use futures_util::{
|
||||
future::{ready, Ready},
|
||||
task::noop_waker,
|
||||
};
|
||||
|
||||
use crate::Service;
|
||||
|
||||
struct IdentityService;
|
||||
|
||||
impl Service<u32> for IdentityService {
|
||||
type Response = u32;
|
||||
type Error = Infallible;
|
||||
type Future = Ready<Result<Self::Response, Self::Error>>;
|
||||
|
||||
always_ready!();
|
||||
|
||||
fn call(&self, req: u32) -> Self::Future {
|
||||
ready(Ok(req))
|
||||
}
|
||||
}
|
||||
|
||||
struct CountdownService(Cell<u32>);
|
||||
|
||||
impl Service<()> for CountdownService {
|
||||
type Response = ();
|
||||
type Error = Infallible;
|
||||
type Future = Ready<Result<Self::Response, Self::Error>>;
|
||||
|
||||
fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
let count = self.0.get();
|
||||
|
||||
if count == 0 {
|
||||
Poll::Ready(Ok(()))
|
||||
} else {
|
||||
self.0.set(count - 1);
|
||||
cx.waker().wake_by_ref();
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
|
||||
fn call(&self, _: ()) -> Self::Future {
|
||||
ready(Ok(()))
|
||||
}
|
||||
}
|
||||
|
||||
struct WrapperService<S> {
|
||||
inner: S,
|
||||
}
|
||||
|
||||
impl<S> Service<()> for WrapperService<S>
|
||||
where
|
||||
S: Service<()>,
|
||||
{
|
||||
type Response = S::Response;
|
||||
type Error = S::Error;
|
||||
type Future = S::Future;
|
||||
|
||||
forward_ready!(inner);
|
||||
|
||||
fn call(&self, _: ()) -> Self::Future {
|
||||
self.inner.call(())
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_always_ready_macro() {
|
||||
let waker = noop_waker();
|
||||
let mut cx = task::Context::from_waker(&waker);
|
||||
|
||||
let svc = IdentityService;
|
||||
|
||||
assert!(svc.poll_ready(&mut cx).is_ready());
|
||||
assert!(svc.poll_ready(&mut cx).is_ready());
|
||||
assert!(svc.poll_ready(&mut cx).is_ready());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_forward_ready_macro() {
|
||||
let waker = noop_waker();
|
||||
let mut cx = task::Context::from_waker(&waker);
|
||||
|
||||
let svc = WrapperService {
|
||||
inner: CountdownService(Cell::new(3)),
|
||||
};
|
||||
|
||||
assert!(svc.poll_ready(&mut cx).is_pending());
|
||||
assert!(svc.poll_ready(&mut cx).is_pending());
|
||||
assert!(svc.poll_ready(&mut cx).is_pending());
|
||||
assert!(svc.poll_ready(&mut cx).is_ready());
|
||||
}
|
||||
}
|
@@ -180,7 +180,7 @@ where
|
||||
F: Fn(A::Error) -> E,
|
||||
{
|
||||
fn new(fut: A::Future, f: F) -> Self {
|
||||
MapErrServiceFuture { f, fut }
|
||||
MapErrServiceFuture { fut, f }
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -1,3 +1,6 @@
|
||||
// TODO: see if pipeline is necessary
|
||||
#![allow(dead_code)]
|
||||
|
||||
use core::{
|
||||
marker::PhantomData,
|
||||
task::{Context, Poll},
|
||||
@@ -11,7 +14,7 @@ use crate::then::{ThenService, ThenServiceFactory};
|
||||
use crate::{IntoService, IntoServiceFactory, Service, ServiceFactory};
|
||||
|
||||
/// Construct new pipeline with one service in pipeline chain.
|
||||
pub fn pipeline<I, S, Req>(service: I) -> Pipeline<S, Req>
|
||||
pub(crate) fn pipeline<I, S, Req>(service: I) -> Pipeline<S, Req>
|
||||
where
|
||||
I: IntoService<S, Req>,
|
||||
S: Service<Req>,
|
||||
@@ -23,7 +26,7 @@ where
|
||||
}
|
||||
|
||||
/// Construct new pipeline factory with one service factory.
|
||||
pub fn pipeline_factory<I, SF, Req>(factory: I) -> PipelineFactory<SF, Req>
|
||||
pub(crate) fn pipeline_factory<I, SF, Req>(factory: I) -> PipelineFactory<SF, Req>
|
||||
where
|
||||
I: IntoServiceFactory<SF, Req>,
|
||||
SF: ServiceFactory<Req>,
|
||||
@@ -35,7 +38,7 @@ where
|
||||
}
|
||||
|
||||
/// Pipeline service - pipeline allows to compose multiple service into one service.
|
||||
pub struct Pipeline<S, Req> {
|
||||
pub(crate) struct Pipeline<S, Req> {
|
||||
service: S,
|
||||
_phantom: PhantomData<Req>,
|
||||
}
|
||||
@@ -157,7 +160,7 @@ impl<S: Service<Req>, Req> Service<Req> for Pipeline<S, Req> {
|
||||
}
|
||||
|
||||
/// Pipeline factory
|
||||
pub struct PipelineFactory<SF, Req> {
|
||||
pub(crate) struct PipelineFactory<SF, Req> {
|
||||
factory: SF,
|
||||
_phantom: PhantomData<Req>,
|
||||
}
|
||||
|
@@ -246,7 +246,11 @@ mod tests {
|
||||
|
||||
use futures_util::future::lazy;
|
||||
|
||||
use crate::{err, ok, pipeline, pipeline_factory, ready, Ready, Service, ServiceFactory};
|
||||
use crate::{
|
||||
err, ok,
|
||||
pipeline::{pipeline, pipeline_factory},
|
||||
ready, Ready, Service, ServiceFactory,
|
||||
};
|
||||
|
||||
#[derive(Clone)]
|
||||
struct Srv1(Rc<Cell<usize>>);
|
||||
|
@@ -9,10 +9,9 @@ use core::{
|
||||
use futures_core::ready;
|
||||
use pin_project_lite::pin_project;
|
||||
|
||||
use crate::transform_err::TransformMapInitErr;
|
||||
use crate::{IntoServiceFactory, Service, ServiceFactory};
|
||||
|
||||
/// Apply transform to a service.
|
||||
/// Apply a [`Transform`] to a [`Service`].
|
||||
pub fn apply<T, S, I, Req>(t: T, factory: I) -> ApplyTransform<T, S, Req>
|
||||
where
|
||||
I: IntoServiceFactory<S, Req>,
|
||||
@@ -22,14 +21,12 @@ where
|
||||
ApplyTransform::new(t, factory.into_factory())
|
||||
}
|
||||
|
||||
/// The `Transform` trait defines the interface of a service factory that wraps inner service
|
||||
/// during construction.
|
||||
/// Defines the interface of a service factory that wraps inner service during construction.
|
||||
///
|
||||
/// Transform(middleware) wraps inner service and runs during
|
||||
/// inbound and/or outbound processing in the request/response lifecycle.
|
||||
/// It may modify request and/or response.
|
||||
/// Transformers wrap an inner service and runs during inbound and/or outbound processing in the
|
||||
/// service lifecycle. It may modify request and/or response.
|
||||
///
|
||||
/// For example, timeout transform:
|
||||
/// For example, a timeout service wrapper:
|
||||
///
|
||||
/// ```ignore
|
||||
/// pub struct Timeout<S> {
|
||||
@@ -37,47 +34,38 @@ where
|
||||
/// timeout: Duration,
|
||||
/// }
|
||||
///
|
||||
/// impl<S> Service for Timeout<S>
|
||||
/// where
|
||||
/// S: Service,
|
||||
/// {
|
||||
/// type Request = S::Request;
|
||||
/// impl<S: Service<Req>, Req> Service<Req> for Timeout<S> {
|
||||
/// type Response = S::Response;
|
||||
/// type Error = TimeoutError<S::Error>;
|
||||
/// type Future = TimeoutServiceResponse<S>;
|
||||
///
|
||||
/// actix_service::forward_ready!(service);
|
||||
///
|
||||
/// fn call(&self, req: S::Request) -> Self::Future {
|
||||
/// fn call(&self, req: Req) -> Self::Future {
|
||||
/// TimeoutServiceResponse {
|
||||
/// fut: self.service.call(req),
|
||||
/// sleep: Delay::new(clock::now() + self.timeout),
|
||||
/// sleep: Sleep::new(clock::now() + self.timeout),
|
||||
/// }
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// Timeout service in above example is decoupled from underlying service implementation
|
||||
/// and could be applied to any service.
|
||||
/// This wrapper service is decoupled from the underlying service implementation and could be
|
||||
/// applied to any service.
|
||||
///
|
||||
/// The `Transform` trait defines the interface of a Service factory. `Transform`
|
||||
/// is often implemented for middleware, defining how to construct a
|
||||
/// middleware Service. A Service that is constructed by the factory takes
|
||||
/// the Service that follows it during execution as a parameter, assuming
|
||||
/// ownership of the next Service.
|
||||
/// The `Transform` trait defines the interface of a service wrapper. `Transform` is often
|
||||
/// implemented for middleware, defining how to construct a middleware Service. A Service that is
|
||||
/// constructed by the factory takes the Service that follows it during execution as a parameter,
|
||||
/// assuming ownership of the next Service.
|
||||
///
|
||||
/// Factory for `Timeout` middleware from the above example could look like this:
|
||||
/// A transform for the `Timeout` middleware could look like this:
|
||||
///
|
||||
/// ```ignore
|
||||
/// pub struct TimeoutTransform {
|
||||
/// timeout: Duration,
|
||||
/// }
|
||||
///
|
||||
/// impl<S> Transform<S> for TimeoutTransform
|
||||
/// where
|
||||
/// S: Service,
|
||||
/// {
|
||||
/// type Request = S::Request;
|
||||
/// impl<S: Service<Req>, Req> Transform<S, Req> for TimeoutTransform {
|
||||
/// type Response = S::Response;
|
||||
/// type Error = TimeoutError<S::Error>;
|
||||
/// type InitError = S::Error;
|
||||
@@ -85,15 +73,15 @@ where
|
||||
/// type Future = Ready<Result<Self::Transform, Self::InitError>>;
|
||||
///
|
||||
/// fn new_transform(&self, service: S) -> Self::Future {
|
||||
/// ok(TimeoutService {
|
||||
/// ready(Ok(Timeout {
|
||||
/// service,
|
||||
/// timeout: self.timeout,
|
||||
/// })
|
||||
/// }))
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
pub trait Transform<S, Req> {
|
||||
/// Responses given by the service.
|
||||
/// Responses produced by the service.
|
||||
type Response;
|
||||
|
||||
/// Errors produced by the service.
|
||||
@@ -110,16 +98,6 @@ pub trait Transform<S, Req> {
|
||||
|
||||
/// Creates and returns a new Transform component, asynchronously
|
||||
fn new_transform(&self, service: S) -> Self::Future;
|
||||
|
||||
/// Map this transform's factory error to a different error,
|
||||
/// returning a new transform service factory.
|
||||
fn map_init_err<F, E>(self, f: F) -> TransformMapInitErr<Self, S, Req, F, E>
|
||||
where
|
||||
Self: Sized,
|
||||
F: Fn(Self::InitError) -> E + Clone,
|
||||
{
|
||||
TransformMapInitErr::new(self, f)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, S, Req> Transform<S, Req> for Rc<T>
|
||||
@@ -152,7 +130,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// `Apply` transform to new service
|
||||
/// Apply a [`Transform`] to a [`Service`].
|
||||
pub struct ApplyTransform<T, S, Req>(Rc<(T, S)>, PhantomData<Req>);
|
||||
|
||||
impl<T, S, Req> ApplyTransform<T, S, Req>
|
||||
@@ -240,3 +218,53 @@ where
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use core::time::Duration;
|
||||
|
||||
use actix_utils::future::{ready, Ready};
|
||||
|
||||
use super::*;
|
||||
use crate::Service;
|
||||
|
||||
// pseudo-doctest for Transform trait
|
||||
pub struct TimeoutTransform {
|
||||
timeout: Duration,
|
||||
}
|
||||
|
||||
// pseudo-doctest for Transform trait
|
||||
impl<S: Service<Req>, Req> Transform<S, Req> for TimeoutTransform {
|
||||
type Response = S::Response;
|
||||
type Error = S::Error;
|
||||
type InitError = S::Error;
|
||||
type Transform = Timeout<S>;
|
||||
type Future = Ready<Result<Self::Transform, Self::InitError>>;
|
||||
|
||||
fn new_transform(&self, service: S) -> Self::Future {
|
||||
ready(Ok(Timeout {
|
||||
service,
|
||||
_timeout: self.timeout,
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
// pseudo-doctest for Transform trait
|
||||
pub struct Timeout<S> {
|
||||
service: S,
|
||||
_timeout: Duration,
|
||||
}
|
||||
|
||||
// pseudo-doctest for Transform trait
|
||||
impl<S: Service<Req>, Req> Service<Req> for Timeout<S> {
|
||||
type Response = S::Response;
|
||||
type Error = S::Error;
|
||||
type Future = S::Future;
|
||||
|
||||
crate::forward_ready!(service);
|
||||
|
||||
fn call(&self, req: Req) -> Self::Future {
|
||||
self.service.call(req)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -9,10 +9,8 @@ use pin_project_lite::pin_project;
|
||||
|
||||
use super::Transform;
|
||||
|
||||
/// Transform for the `map_init_err` combinator, changing the type of a new
|
||||
/// transform's init error.
|
||||
///
|
||||
/// This is created by the `Transform::map_init_err` method.
|
||||
/// Transform for the [`TransformExt::map_init_err`] combinator, changing the type of a new
|
||||
/// [`Transform`]'s initialization error.
|
||||
pub struct TransformMapInitErr<T, S, Req, F, E> {
|
||||
transform: T,
|
||||
mapper: F,
|
||||
|
@@ -1,17 +1,54 @@
|
||||
# Changes
|
||||
|
||||
## Unreleased - 2021-xx-xx
|
||||
|
||||
|
||||
## 3.0.0-beta.6 - 2021-10-19
|
||||
* Update `tokio-rustls` to `0.23` which uses `rustls` `0.20`. [#396]
|
||||
* Removed a re-export of `Session` from `rustls` as it no longer exist. [#396]
|
||||
* Minimum supported Rust version (MSRV) is now 1.52.
|
||||
|
||||
[#396]: https://github.com/actix/actix-net/pull/396
|
||||
|
||||
|
||||
## 3.0.0-beta.5 - 2021-03-29
|
||||
* Changed `connect::ssl::rustls::RustlsConnectorService` to return error when `DNSNameRef`
|
||||
generation failed instead of panic. [#296]
|
||||
* Remove `connect::ssl::openssl::OpensslConnectServiceFactory`. [#297]
|
||||
* Remove `connect::ssl::openssl::OpensslConnectService`. [#297]
|
||||
* Add `connect::ssl::native_tls` module for native tls support. [#295]
|
||||
* Rename `accept::{nativetls => native_tls}`. [#295]
|
||||
* Remove `connect::TcpConnectService` type. service caller expect a `TcpStream` should use
|
||||
`connect::ConnectService` instead and call `Connection<T, TcpStream>::into_parts`. [#299]
|
||||
|
||||
[#295]: https://github.com/actix/actix-net/pull/295
|
||||
[#296]: https://github.com/actix/actix-net/pull/296
|
||||
[#297]: https://github.com/actix/actix-net/pull/297
|
||||
[#299]: https://github.com/actix/actix-net/pull/299
|
||||
|
||||
|
||||
## 3.0.0-beta.4 - 2021-02-24
|
||||
* Rename `accept::openssl::{SslStream => TlsStream}`.
|
||||
* Add `connect::Connect::set_local_addr` to attach local `IpAddr`. [#282]
|
||||
* `connector::TcpConnector` service will try to bind to local_addr of `IpAddr` when given. [#282]
|
||||
|
||||
[#282]: https://github.com/actix/actix-net/pull/282
|
||||
|
||||
|
||||
## 3.0.0-beta.3 - 2021-02-06
|
||||
* Remove `trust-dns-proto` and `trust-dns-resolver`. [#248]
|
||||
* Use `std::net::ToSocketAddrs` as simple and basic default resolver. [#248]
|
||||
* Add `Resolve` trait for custom dns resolver. [#248]
|
||||
* Add `Resolve` trait for custom DNS resolvers. [#248]
|
||||
* Add `Resolver::new_custom` function to construct custom resolvers. [#248]
|
||||
* Export `webpki_roots::TLS_SERVER_ROOTS` in `actix_tls::connect` mod and remove
|
||||
the export from `actix_tls::accept` [#248]
|
||||
* Remove `ConnectTakeAddrsIter`. `Connect::take_addrs` now returns `ConnectAddrsIter<'static>`
|
||||
as owned iterator. [#248]
|
||||
* Rename `Address::{host => hostname}` to more accurately describe which URL segment is returned.
|
||||
* Update `actix-rt` to `2.0.0`. [#273]
|
||||
|
||||
[#248]: https://github.com/actix/actix-net/pull/248
|
||||
[#273]: https://github.com/actix/actix-net/pull/273
|
||||
|
||||
|
||||
## 3.0.0-beta.2 - 2021-xx-xx
|
||||
|
@@ -1,12 +1,10 @@
|
||||
[package]
|
||||
name = "actix-tls"
|
||||
version = "3.0.0-beta.2"
|
||||
version = "3.0.0-beta.6"
|
||||
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
|
||||
description = "TLS acceptor and connector services for Actix ecosystem"
|
||||
keywords = ["network", "tls", "ssl", "async", "transport"]
|
||||
homepage = "https://actix.rs"
|
||||
repository = "https://github.com/actix/actix-net.git"
|
||||
documentation = "https://docs.rs/actix-tls"
|
||||
categories = ["network-programming", "asynchronous"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
edition = "2018"
|
||||
@@ -40,10 +38,10 @@ native-tls = ["tokio-native-tls"]
|
||||
uri = ["http"]
|
||||
|
||||
[dependencies]
|
||||
actix-codec = "0.4.0-beta.1"
|
||||
actix-rt = { version = "2.0.0", default-features = false }
|
||||
actix-service = "2.0.0-beta.4"
|
||||
actix-utils = "3.0.0-beta.1"
|
||||
actix-codec = "0.4.0"
|
||||
actix-rt = { version = "2.2.0", default-features = false }
|
||||
actix-service = "2.0.0"
|
||||
actix-utils = "3.0.0"
|
||||
|
||||
derive_more = "0.99.5"
|
||||
futures-core = { version = "0.3.7", default-features = false, features = ["alloc"] }
|
||||
@@ -52,25 +50,26 @@ log = "0.4"
|
||||
tokio-util = { version = "0.6.3", default-features = false }
|
||||
|
||||
# openssl
|
||||
tls-openssl = { package = "openssl", version = "0.10", optional = true }
|
||||
tls-openssl = { package = "openssl", version = "0.10.9", optional = true }
|
||||
tokio-openssl = { version = "0.6", optional = true }
|
||||
|
||||
# rustls
|
||||
tokio-rustls = { version = "0.22", optional = true }
|
||||
tokio-rustls = { version = "0.23", optional = true }
|
||||
webpki-roots = { version = "0.21", optional = true }
|
||||
|
||||
# native-tls
|
||||
tokio-native-tls = { version = "0.3", optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
actix-rt = "2.0.0"
|
||||
actix-server = "2.0.0-beta.2"
|
||||
actix-rt = "2.2.0"
|
||||
actix-server = "2.0.0-beta.6"
|
||||
bytes = "1"
|
||||
env_logger = "0.8"
|
||||
futures-util = { version = "0.3.7", default-features = false, features = ["sink"] }
|
||||
log = "0.4"
|
||||
rustls-pemfile = "0.2.1"
|
||||
trust-dns-resolver = "0.20.0"
|
||||
|
||||
[[example]]
|
||||
name = "basic"
|
||||
name = "tcp-rustls"
|
||||
required-features = ["accept", "rustls"]
|
||||
|
@@ -29,35 +29,35 @@ use std::{
|
||||
},
|
||||
};
|
||||
|
||||
use actix_rt::net::TcpStream;
|
||||
use actix_server::Server;
|
||||
use actix_service::pipeline_factory;
|
||||
use actix_tls::accept::rustls::Acceptor as RustlsAcceptor;
|
||||
use actix_service::ServiceFactoryExt as _;
|
||||
use actix_tls::accept::rustls::{Acceptor as RustlsAcceptor, TlsStream};
|
||||
use futures_util::future::ok;
|
||||
use log::info;
|
||||
use rustls::{
|
||||
internal::pemfile::certs, internal::pemfile::rsa_private_keys, NoClientAuth, ServerConfig,
|
||||
};
|
||||
|
||||
#[derive(Debug)]
|
||||
struct ServiceState {
|
||||
num: Arc<AtomicUsize>,
|
||||
}
|
||||
use rustls::{server::ServerConfig, Certificate, PrivateKey};
|
||||
use rustls_pemfile::{certs, rsa_private_keys};
|
||||
|
||||
#[actix_rt::main]
|
||||
async fn main() -> io::Result<()> {
|
||||
env::set_var("RUST_LOG", "actix=trace,basic=trace");
|
||||
env::set_var("RUST_LOG", "info");
|
||||
env_logger::init();
|
||||
|
||||
let mut tls_config = ServerConfig::new(NoClientAuth::new());
|
||||
|
||||
// Load TLS key and cert files
|
||||
let cert_file = &mut BufReader::new(File::open("./examples/cert.pem").unwrap());
|
||||
let key_file = &mut BufReader::new(File::open("./examples/key.pem").unwrap());
|
||||
|
||||
let cert_chain = certs(cert_file).unwrap();
|
||||
let cert_chain = certs(cert_file)
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
.map(Certificate)
|
||||
.collect();
|
||||
let mut keys = rsa_private_keys(key_file).unwrap();
|
||||
tls_config
|
||||
.set_single_cert(cert_chain, keys.remove(0))
|
||||
|
||||
let tls_config = ServerConfig::builder()
|
||||
.with_safe_defaults()
|
||||
.with_no_client_auth()
|
||||
.with_single_cert(cert_chain, PrivateKey(keys.remove(0)))
|
||||
.unwrap();
|
||||
|
||||
let tls_acceptor = RustlsAcceptor::new(tls_config);
|
||||
@@ -72,11 +72,12 @@ async fn main() -> io::Result<()> {
|
||||
let count = Arc::clone(&count);
|
||||
|
||||
// Set up TLS service factory
|
||||
pipeline_factory(tls_acceptor.clone())
|
||||
tls_acceptor
|
||||
.clone()
|
||||
.map_err(|err| println!("Rustls error: {:?}", err))
|
||||
.and_then(move |stream| {
|
||||
.and_then(move |stream: TlsStream<TcpStream>| {
|
||||
let num = count.fetch_add(1, Ordering::Relaxed);
|
||||
info!("[{}] Got TLS connection: {:?}", num, stream);
|
||||
info!("[{}] Got TLS connection: {:?}", num, &*stream);
|
||||
ok(())
|
||||
})
|
||||
})?
|
@@ -16,7 +16,7 @@ pub mod openssl;
|
||||
pub mod rustls;
|
||||
|
||||
#[cfg(feature = "native-tls")]
|
||||
pub mod nativetls;
|
||||
pub mod native_tls;
|
||||
|
||||
pub(crate) static MAX_CONN: AtomicUsize = AtomicUsize::new(256);
|
||||
|
||||
|
163
actix-tls/src/accept/native_tls.rs
Normal file
163
actix-tls/src/accept/native_tls.rs
Normal file
@@ -0,0 +1,163 @@
|
||||
use std::{
|
||||
io::{self, IoSlice},
|
||||
ops::{Deref, DerefMut},
|
||||
pin::Pin,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use actix_codec::{AsyncRead, AsyncWrite, ReadBuf};
|
||||
use actix_rt::net::{ActixStream, Ready};
|
||||
use actix_service::{Service, ServiceFactory};
|
||||
use actix_utils::counter::Counter;
|
||||
use futures_core::future::LocalBoxFuture;
|
||||
|
||||
pub use tokio_native_tls::native_tls::Error;
|
||||
pub use tokio_native_tls::TlsAcceptor;
|
||||
|
||||
use super::MAX_CONN_COUNTER;
|
||||
|
||||
/// Wrapper type for `tokio_native_tls::TlsStream` in order to impl `ActixStream` trait.
|
||||
pub struct TlsStream<T>(tokio_native_tls::TlsStream<T>);
|
||||
|
||||
impl<T> From<tokio_native_tls::TlsStream<T>> for TlsStream<T> {
|
||||
fn from(stream: tokio_native_tls::TlsStream<T>) -> Self {
|
||||
Self(stream)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ActixStream> Deref for TlsStream<T> {
|
||||
type Target = tokio_native_tls::TlsStream<T>;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ActixStream> DerefMut for TlsStream<T> {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ActixStream> AsyncRead for TlsStream<T> {
|
||||
fn poll_read(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
buf: &mut ReadBuf<'_>,
|
||||
) -> Poll<io::Result<()>> {
|
||||
Pin::new(&mut **self.get_mut()).poll_read(cx, buf)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ActixStream> AsyncWrite for TlsStream<T> {
|
||||
fn poll_write(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
buf: &[u8],
|
||||
) -> Poll<io::Result<usize>> {
|
||||
Pin::new(&mut **self.get_mut()).poll_write(cx, buf)
|
||||
}
|
||||
|
||||
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||
Pin::new(&mut **self.get_mut()).poll_flush(cx)
|
||||
}
|
||||
|
||||
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||
Pin::new(&mut **self.get_mut()).poll_shutdown(cx)
|
||||
}
|
||||
|
||||
fn poll_write_vectored(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
bufs: &[IoSlice<'_>],
|
||||
) -> Poll<io::Result<usize>> {
|
||||
Pin::new(&mut **self.get_mut()).poll_write_vectored(cx, bufs)
|
||||
}
|
||||
|
||||
fn is_write_vectored(&self) -> bool {
|
||||
(&**self).is_write_vectored()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ActixStream> ActixStream for TlsStream<T> {
|
||||
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
|
||||
T::poll_read_ready((&**self).get_ref().get_ref().get_ref(), cx)
|
||||
}
|
||||
|
||||
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
|
||||
T::poll_write_ready((&**self).get_ref().get_ref().get_ref(), cx)
|
||||
}
|
||||
}
|
||||
|
||||
/// Accept TLS connections via `native-tls` package.
|
||||
///
|
||||
/// `native-tls` feature enables this `Acceptor` type.
|
||||
pub struct Acceptor {
|
||||
acceptor: TlsAcceptor,
|
||||
}
|
||||
|
||||
impl Acceptor {
|
||||
/// Create `native-tls` based `Acceptor` service factory.
|
||||
#[inline]
|
||||
pub fn new(acceptor: TlsAcceptor) -> Self {
|
||||
Acceptor { acceptor }
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for Acceptor {
|
||||
#[inline]
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
acceptor: self.acceptor.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ActixStream + 'static> ServiceFactory<T> for Acceptor {
|
||||
type Response = TlsStream<T>;
|
||||
type Error = Error;
|
||||
type Config = ();
|
||||
|
||||
type Service = NativeTlsAcceptorService;
|
||||
type InitError = ();
|
||||
type Future = LocalBoxFuture<'static, Result<Self::Service, Self::InitError>>;
|
||||
|
||||
fn new_service(&self, _: ()) -> Self::Future {
|
||||
let res = MAX_CONN_COUNTER.with(|conns| {
|
||||
Ok(NativeTlsAcceptorService {
|
||||
acceptor: self.acceptor.clone(),
|
||||
conns: conns.clone(),
|
||||
})
|
||||
});
|
||||
Box::pin(async { res })
|
||||
}
|
||||
}
|
||||
|
||||
pub struct NativeTlsAcceptorService {
|
||||
acceptor: TlsAcceptor,
|
||||
conns: Counter,
|
||||
}
|
||||
|
||||
impl<T: ActixStream + 'static> Service<T> for NativeTlsAcceptorService {
|
||||
type Response = TlsStream<T>;
|
||||
type Error = Error;
|
||||
type Future = LocalBoxFuture<'static, Result<TlsStream<T>, Error>>;
|
||||
|
||||
fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
if self.conns.available(cx) {
|
||||
Poll::Ready(Ok(()))
|
||||
} else {
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
|
||||
fn call(&self, io: T) -> Self::Future {
|
||||
let guard = self.conns.get();
|
||||
let acceptor = self.acceptor.clone();
|
||||
Box::pin(async move {
|
||||
let io = acceptor.accept(io).await;
|
||||
drop(guard);
|
||||
io.map(Into::into)
|
||||
})
|
||||
}
|
||||
}
|
@@ -1,99 +0,0 @@
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use actix_codec::{AsyncRead, AsyncWrite};
|
||||
use actix_service::{Service, ServiceFactory};
|
||||
use actix_utils::counter::Counter;
|
||||
use futures_core::future::LocalBoxFuture;
|
||||
|
||||
pub use tokio_native_tls::native_tls::Error;
|
||||
pub use tokio_native_tls::{TlsAcceptor, TlsStream};
|
||||
|
||||
use super::MAX_CONN_COUNTER;
|
||||
|
||||
/// Accept TLS connections via `native-tls` package.
|
||||
///
|
||||
/// `native-tls` feature enables this `Acceptor` type.
|
||||
pub struct Acceptor {
|
||||
acceptor: TlsAcceptor,
|
||||
}
|
||||
|
||||
impl Acceptor {
|
||||
/// Create `native-tls` based `Acceptor` service factory.
|
||||
#[inline]
|
||||
pub fn new(acceptor: TlsAcceptor) -> Self {
|
||||
Acceptor { acceptor }
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for Acceptor {
|
||||
#[inline]
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
acceptor: self.acceptor.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> ServiceFactory<T> for Acceptor
|
||||
where
|
||||
T: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
{
|
||||
type Response = TlsStream<T>;
|
||||
type Error = Error;
|
||||
type Config = ();
|
||||
|
||||
type Service = NativeTlsAcceptorService;
|
||||
type InitError = ();
|
||||
type Future = LocalBoxFuture<'static, Result<Self::Service, Self::InitError>>;
|
||||
|
||||
fn new_service(&self, _: ()) -> Self::Future {
|
||||
let res = MAX_CONN_COUNTER.with(|conns| {
|
||||
Ok(NativeTlsAcceptorService {
|
||||
acceptor: self.acceptor.clone(),
|
||||
conns: conns.clone(),
|
||||
})
|
||||
});
|
||||
Box::pin(async { res })
|
||||
}
|
||||
}
|
||||
|
||||
pub struct NativeTlsAcceptorService {
|
||||
acceptor: TlsAcceptor,
|
||||
conns: Counter,
|
||||
}
|
||||
|
||||
impl Clone for NativeTlsAcceptorService {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
acceptor: self.acceptor.clone(),
|
||||
conns: self.conns.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Service<T> for NativeTlsAcceptorService
|
||||
where
|
||||
T: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
{
|
||||
type Response = TlsStream<T>;
|
||||
type Error = Error;
|
||||
type Future = LocalBoxFuture<'static, Result<TlsStream<T>, Error>>;
|
||||
|
||||
fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
if self.conns.available(cx) {
|
||||
Poll::Ready(Ok(()))
|
||||
} else {
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
|
||||
fn call(&self, io: T) -> Self::Future {
|
||||
let guard = self.conns.get();
|
||||
let this = self.clone();
|
||||
Box::pin(async move {
|
||||
let io = this.acceptor.accept(io).await;
|
||||
drop(guard);
|
||||
io
|
||||
})
|
||||
}
|
||||
}
|
@@ -1,10 +1,13 @@
|
||||
use std::{
|
||||
future::Future,
|
||||
io::{self, IoSlice},
|
||||
ops::{Deref, DerefMut},
|
||||
pin::Pin,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use actix_codec::{AsyncRead, AsyncWrite};
|
||||
use actix_codec::{AsyncRead, AsyncWrite, ReadBuf};
|
||||
use actix_rt::net::{ActixStream, Ready};
|
||||
use actix_service::{Service, ServiceFactory};
|
||||
use actix_utils::counter::{Counter, CounterGuard};
|
||||
use futures_core::{future::LocalBoxFuture, ready};
|
||||
@@ -12,10 +15,82 @@ use futures_core::{future::LocalBoxFuture, ready};
|
||||
pub use openssl::ssl::{
|
||||
AlpnError, Error as SslError, HandshakeError, Ssl, SslAcceptor, SslAcceptorBuilder,
|
||||
};
|
||||
pub use tokio_openssl::SslStream;
|
||||
|
||||
use super::MAX_CONN_COUNTER;
|
||||
|
||||
/// Wrapper type for `tokio_openssl::SslStream` in order to impl `ActixStream` trait.
|
||||
pub struct TlsStream<T>(tokio_openssl::SslStream<T>);
|
||||
|
||||
impl<T> From<tokio_openssl::SslStream<T>> for TlsStream<T> {
|
||||
fn from(stream: tokio_openssl::SslStream<T>) -> Self {
|
||||
Self(stream)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Deref for TlsStream<T> {
|
||||
type Target = tokio_openssl::SslStream<T>;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> DerefMut for TlsStream<T> {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ActixStream> AsyncRead for TlsStream<T> {
|
||||
fn poll_read(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
buf: &mut ReadBuf<'_>,
|
||||
) -> Poll<io::Result<()>> {
|
||||
Pin::new(&mut **self.get_mut()).poll_read(cx, buf)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ActixStream> AsyncWrite for TlsStream<T> {
|
||||
fn poll_write(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
buf: &[u8],
|
||||
) -> Poll<io::Result<usize>> {
|
||||
Pin::new(&mut **self.get_mut()).poll_write(cx, buf)
|
||||
}
|
||||
|
||||
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||
Pin::new(&mut **self.get_mut()).poll_flush(cx)
|
||||
}
|
||||
|
||||
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||
Pin::new(&mut **self.get_mut()).poll_shutdown(cx)
|
||||
}
|
||||
|
||||
fn poll_write_vectored(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
bufs: &[IoSlice<'_>],
|
||||
) -> Poll<io::Result<usize>> {
|
||||
Pin::new(&mut **self.get_mut()).poll_write_vectored(cx, bufs)
|
||||
}
|
||||
|
||||
fn is_write_vectored(&self) -> bool {
|
||||
(&**self).is_write_vectored()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ActixStream> ActixStream for TlsStream<T> {
|
||||
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
|
||||
T::poll_read_ready((&**self).get_ref(), cx)
|
||||
}
|
||||
|
||||
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
|
||||
T::poll_write_ready((&**self).get_ref(), cx)
|
||||
}
|
||||
}
|
||||
|
||||
/// Accept TLS connections via `openssl` package.
|
||||
///
|
||||
/// `openssl` feature enables this `Acceptor` type.
|
||||
@@ -40,11 +115,8 @@ impl Clone for Acceptor {
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> ServiceFactory<T> for Acceptor
|
||||
where
|
||||
T: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
{
|
||||
type Response = SslStream<T>;
|
||||
impl<T: ActixStream> ServiceFactory<T> for Acceptor {
|
||||
type Response = TlsStream<T>;
|
||||
type Error = SslError;
|
||||
type Config = ();
|
||||
type Service = AcceptorService;
|
||||
@@ -67,11 +139,8 @@ pub struct AcceptorService {
|
||||
conns: Counter,
|
||||
}
|
||||
|
||||
impl<T> Service<T> for AcceptorService
|
||||
where
|
||||
T: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
{
|
||||
type Response = SslStream<T>;
|
||||
impl<T: ActixStream> Service<T> for AcceptorService {
|
||||
type Response = TlsStream<T>;
|
||||
type Error = SslError;
|
||||
type Future = AcceptorServiceResponse<T>;
|
||||
|
||||
@@ -88,24 +157,25 @@ where
|
||||
let ssl = Ssl::new(ssl_ctx).expect("Provided SSL acceptor was invalid.");
|
||||
AcceptorServiceResponse {
|
||||
_guard: self.conns.get(),
|
||||
stream: Some(SslStream::new(ssl, io).unwrap()),
|
||||
stream: Some(tokio_openssl::SslStream::new(ssl, io).unwrap()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct AcceptorServiceResponse<T>
|
||||
where
|
||||
T: AsyncRead + AsyncWrite,
|
||||
{
|
||||
stream: Option<SslStream<T>>,
|
||||
pub struct AcceptorServiceResponse<T: ActixStream> {
|
||||
stream: Option<tokio_openssl::SslStream<T>>,
|
||||
_guard: CounterGuard,
|
||||
}
|
||||
|
||||
impl<T: AsyncRead + AsyncWrite + Unpin> Future for AcceptorServiceResponse<T> {
|
||||
type Output = Result<SslStream<T>, SslError>;
|
||||
impl<T: ActixStream> Future for AcceptorServiceResponse<T> {
|
||||
type Output = Result<TlsStream<T>, SslError>;
|
||||
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
ready!(Pin::new(self.stream.as_mut().unwrap()).poll_accept(cx))?;
|
||||
Poll::Ready(Ok(self.stream.take().expect("SSL connect has resolved.")))
|
||||
Poll::Ready(Ok(self
|
||||
.stream
|
||||
.take()
|
||||
.expect("SSL connect has resolved.")
|
||||
.into()))
|
||||
}
|
||||
}
|
||||
|
@@ -1,22 +1,96 @@
|
||||
use std::{
|
||||
future::Future,
|
||||
io,
|
||||
io::{self, IoSlice},
|
||||
ops::{Deref, DerefMut},
|
||||
pin::Pin,
|
||||
sync::Arc,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use actix_codec::{AsyncRead, AsyncWrite};
|
||||
use actix_codec::{AsyncRead, AsyncWrite, ReadBuf};
|
||||
use actix_rt::net::{ActixStream, Ready};
|
||||
use actix_service::{Service, ServiceFactory};
|
||||
use actix_utils::counter::{Counter, CounterGuard};
|
||||
use futures_core::future::LocalBoxFuture;
|
||||
use tokio_rustls::{Accept, TlsAcceptor};
|
||||
|
||||
pub use tokio_rustls::rustls::{ServerConfig, Session};
|
||||
pub use tokio_rustls::server::TlsStream;
|
||||
pub use tokio_rustls::rustls::ServerConfig;
|
||||
|
||||
use super::MAX_CONN_COUNTER;
|
||||
|
||||
/// Wrapper type for `tokio_openssl::SslStream` in order to impl `ActixStream` trait.
|
||||
pub struct TlsStream<T>(tokio_rustls::server::TlsStream<T>);
|
||||
|
||||
impl<T> From<tokio_rustls::server::TlsStream<T>> for TlsStream<T> {
|
||||
fn from(stream: tokio_rustls::server::TlsStream<T>) -> Self {
|
||||
Self(stream)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Deref for TlsStream<T> {
|
||||
type Target = tokio_rustls::server::TlsStream<T>;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> DerefMut for TlsStream<T> {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ActixStream> AsyncRead for TlsStream<T> {
|
||||
fn poll_read(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
buf: &mut ReadBuf<'_>,
|
||||
) -> Poll<io::Result<()>> {
|
||||
Pin::new(&mut **self.get_mut()).poll_read(cx, buf)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ActixStream> AsyncWrite for TlsStream<T> {
|
||||
fn poll_write(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
buf: &[u8],
|
||||
) -> Poll<io::Result<usize>> {
|
||||
Pin::new(&mut **self.get_mut()).poll_write(cx, buf)
|
||||
}
|
||||
|
||||
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||
Pin::new(&mut **self.get_mut()).poll_flush(cx)
|
||||
}
|
||||
|
||||
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||
Pin::new(&mut **self.get_mut()).poll_shutdown(cx)
|
||||
}
|
||||
|
||||
fn poll_write_vectored(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
bufs: &[IoSlice<'_>],
|
||||
) -> Poll<io::Result<usize>> {
|
||||
Pin::new(&mut **self.get_mut()).poll_write_vectored(cx, bufs)
|
||||
}
|
||||
|
||||
fn is_write_vectored(&self) -> bool {
|
||||
(&**self).is_write_vectored()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ActixStream> ActixStream for TlsStream<T> {
|
||||
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
|
||||
T::poll_read_ready((&**self).get_ref().0, cx)
|
||||
}
|
||||
|
||||
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
|
||||
T::poll_write_ready((&**self).get_ref().0, cx)
|
||||
}
|
||||
}
|
||||
|
||||
/// Accept TLS connections via `rustls` package.
|
||||
///
|
||||
/// `rustls` feature enables this `Acceptor` type.
|
||||
@@ -43,10 +117,7 @@ impl Clone for Acceptor {
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> ServiceFactory<T> for Acceptor
|
||||
where
|
||||
T: AsyncRead + AsyncWrite + Unpin,
|
||||
{
|
||||
impl<T: ActixStream> ServiceFactory<T> for Acceptor {
|
||||
type Response = TlsStream<T>;
|
||||
type Error = io::Error;
|
||||
type Config = ();
|
||||
@@ -72,10 +143,7 @@ pub struct AcceptorService {
|
||||
conns: Counter,
|
||||
}
|
||||
|
||||
impl<T> Service<T> for AcceptorService
|
||||
where
|
||||
T: AsyncRead + AsyncWrite + Unpin,
|
||||
{
|
||||
impl<T: ActixStream> Service<T> for AcceptorService {
|
||||
type Response = TlsStream<T>;
|
||||
type Error = io::Error;
|
||||
type Future = AcceptorServiceFut<T>;
|
||||
@@ -96,22 +164,16 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
pub struct AcceptorServiceFut<T>
|
||||
where
|
||||
T: AsyncRead + AsyncWrite + Unpin,
|
||||
{
|
||||
pub struct AcceptorServiceFut<T: ActixStream> {
|
||||
fut: Accept<T>,
|
||||
_guard: CounterGuard,
|
||||
}
|
||||
|
||||
impl<T> Future for AcceptorServiceFut<T>
|
||||
where
|
||||
T: AsyncRead + AsyncWrite + Unpin,
|
||||
{
|
||||
impl<T: ActixStream> Future for AcceptorServiceFut<T> {
|
||||
type Output = Result<TlsStream<T>, io::Error>;
|
||||
|
||||
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
let this = self.get_mut();
|
||||
Pin::new(&mut this.fut).poll(cx)
|
||||
Pin::new(&mut this.fut).poll(cx).map_ok(TlsStream)
|
||||
}
|
||||
}
|
||||
|
@@ -3,7 +3,7 @@ use std::{
|
||||
fmt,
|
||||
iter::{self, FromIterator as _},
|
||||
mem,
|
||||
net::SocketAddr,
|
||||
net::{IpAddr, SocketAddr},
|
||||
};
|
||||
|
||||
/// Parse a host into parts (hostname and port).
|
||||
@@ -19,7 +19,7 @@ pub trait Address: Unpin + 'static {
|
||||
|
||||
impl Address for String {
|
||||
fn hostname(&self) -> &str {
|
||||
&self
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
@@ -67,6 +67,7 @@ pub struct Connect<T> {
|
||||
pub(crate) req: T,
|
||||
pub(crate) port: u16,
|
||||
pub(crate) addr: ConnectAddrs,
|
||||
pub(crate) local_addr: Option<IpAddr>,
|
||||
}
|
||||
|
||||
impl<T: Address> Connect<T> {
|
||||
@@ -78,6 +79,7 @@ impl<T: Address> Connect<T> {
|
||||
req,
|
||||
port: port.unwrap_or(0),
|
||||
addr: ConnectAddrs::None,
|
||||
local_addr: None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -88,6 +90,7 @@ impl<T: Address> Connect<T> {
|
||||
req,
|
||||
port: 0,
|
||||
addr: ConnectAddrs::One(addr),
|
||||
local_addr: None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -119,6 +122,12 @@ impl<T: Address> Connect<T> {
|
||||
self
|
||||
}
|
||||
|
||||
/// Set local_addr of connect.
|
||||
pub fn set_local_addr(mut self, addr: impl Into<IpAddr>) -> Self {
|
||||
self.local_addr = Some(addr.into());
|
||||
self
|
||||
}
|
||||
|
||||
/// Get hostname.
|
||||
pub fn hostname(&self) -> &str {
|
||||
self.req.hostname()
|
||||
@@ -285,7 +294,7 @@ fn parse_host(host: &str) -> (&str, Option<u16>) {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::net::{IpAddr, Ipv4Addr};
|
||||
use std::net::Ipv4Addr;
|
||||
|
||||
use super::*;
|
||||
|
||||
@@ -329,4 +338,13 @@ mod tests {
|
||||
let mut iter = ConnectAddrsIter::None;
|
||||
assert_eq!(iter.next(), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_local_addr() {
|
||||
let conn = Connect::new("hello").set_local_addr([127, 0, 0, 1]);
|
||||
assert_eq!(
|
||||
conn.local_addr.unwrap(),
|
||||
IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1))
|
||||
)
|
||||
}
|
||||
}
|
||||
|
@@ -2,12 +2,12 @@ use std::{
|
||||
collections::VecDeque,
|
||||
future::Future,
|
||||
io,
|
||||
net::SocketAddr,
|
||||
net::{IpAddr, SocketAddr, SocketAddrV4, SocketAddrV6},
|
||||
pin::Pin,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use actix_rt::net::TcpStream;
|
||||
use actix_rt::net::{TcpSocket, TcpStream};
|
||||
use actix_service::{Service, ServiceFactory};
|
||||
use futures_core::{future::LocalBoxFuture, ready};
|
||||
use log::{error, trace};
|
||||
@@ -54,9 +54,14 @@ impl<T: Address> Service<Connect<T>> for TcpConnector {
|
||||
|
||||
fn call(&self, req: Connect<T>) -> Self::Future {
|
||||
let port = req.port();
|
||||
let Connect { req, addr, .. } = req;
|
||||
let Connect {
|
||||
req,
|
||||
addr,
|
||||
local_addr,
|
||||
..
|
||||
} = req;
|
||||
|
||||
TcpConnectorResponse::new(req, port, addr)
|
||||
TcpConnectorResponse::new(req, port, local_addr, addr)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -65,14 +70,20 @@ pub enum TcpConnectorResponse<T> {
|
||||
Response {
|
||||
req: Option<T>,
|
||||
port: u16,
|
||||
local_addr: Option<IpAddr>,
|
||||
addrs: Option<VecDeque<SocketAddr>>,
|
||||
stream: Option<ReusableBoxFuture<Result<TcpStream, io::Error>>>,
|
||||
stream: ReusableBoxFuture<Result<TcpStream, io::Error>>,
|
||||
},
|
||||
Error(Option<ConnectError>),
|
||||
}
|
||||
|
||||
impl<T: Address> TcpConnectorResponse<T> {
|
||||
pub(crate) fn new(req: T, port: u16, addr: ConnectAddrs) -> TcpConnectorResponse<T> {
|
||||
pub(crate) fn new(
|
||||
req: T,
|
||||
port: u16,
|
||||
local_addr: Option<IpAddr>,
|
||||
addr: ConnectAddrs,
|
||||
) -> TcpConnectorResponse<T> {
|
||||
if addr.is_none() {
|
||||
error!("TCP connector: unresolved connection address");
|
||||
return TcpConnectorResponse::Error(Some(ConnectError::Unresolved));
|
||||
@@ -90,18 +101,24 @@ impl<T: Address> TcpConnectorResponse<T> {
|
||||
ConnectAddrs::One(addr) => TcpConnectorResponse::Response {
|
||||
req: Some(req),
|
||||
port,
|
||||
local_addr,
|
||||
addrs: None,
|
||||
stream: Some(ReusableBoxFuture::new(TcpStream::connect(addr))),
|
||||
stream: ReusableBoxFuture::new(connect(addr, local_addr)),
|
||||
},
|
||||
|
||||
// when resolver returns multiple socket addr for request they would be popped from
|
||||
// front end of queue and returns with the first successful tcp connection.
|
||||
ConnectAddrs::Multi(addrs) => TcpConnectorResponse::Response {
|
||||
req: Some(req),
|
||||
port,
|
||||
addrs: Some(addrs),
|
||||
stream: None,
|
||||
},
|
||||
ConnectAddrs::Multi(mut addrs) => {
|
||||
let addr = addrs.pop_front().unwrap();
|
||||
|
||||
TcpConnectorResponse::Response {
|
||||
req: Some(req),
|
||||
port,
|
||||
local_addr,
|
||||
addrs: Some(addrs),
|
||||
stream: ReusableBoxFuture::new(connect(addr, local_addr)),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -116,43 +133,62 @@ impl<T: Address> Future for TcpConnectorResponse<T> {
|
||||
TcpConnectorResponse::Response {
|
||||
req,
|
||||
port,
|
||||
local_addr,
|
||||
addrs,
|
||||
stream,
|
||||
} => loop {
|
||||
if let Some(new) = stream.as_mut() {
|
||||
match ready!(new.poll(cx)) {
|
||||
Ok(sock) => {
|
||||
let req = req.take().unwrap();
|
||||
trace!(
|
||||
"TCP connector: successfully connected to {:?} - {:?}",
|
||||
req.hostname(),
|
||||
sock.peer_addr()
|
||||
);
|
||||
return Poll::Ready(Ok(Connection::new(sock, req)));
|
||||
}
|
||||
match ready!(stream.poll(cx)) {
|
||||
Ok(sock) => {
|
||||
let req = req.take().unwrap();
|
||||
trace!(
|
||||
"TCP connector: successfully connected to {:?} - {:?}",
|
||||
req.hostname(),
|
||||
sock.peer_addr()
|
||||
);
|
||||
return Poll::Ready(Ok(Connection::new(sock, req)));
|
||||
}
|
||||
|
||||
Err(err) => {
|
||||
trace!(
|
||||
"TCP connector: failed to connect to {:?} port: {}",
|
||||
req.as_ref().unwrap().hostname(),
|
||||
port,
|
||||
);
|
||||
Err(err) => {
|
||||
trace!(
|
||||
"TCP connector: failed to connect to {:?} port: {}",
|
||||
req.as_ref().unwrap().hostname(),
|
||||
port,
|
||||
);
|
||||
|
||||
if addrs.is_none() || addrs.as_ref().unwrap().is_empty() {
|
||||
return Poll::Ready(Err(ConnectError::Io(err)));
|
||||
}
|
||||
if let Some(addr) = addrs.as_mut().and_then(|addrs| addrs.pop_front()) {
|
||||
stream.set(connect(addr, *local_addr));
|
||||
} else {
|
||||
return Poll::Ready(Err(ConnectError::Io(err)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// try to connect
|
||||
let addr = addrs.as_mut().unwrap().pop_front().unwrap();
|
||||
|
||||
match stream {
|
||||
Some(rbf) => rbf.set(TcpStream::connect(addr)),
|
||||
None => *stream = Some(ReusableBoxFuture::new(TcpStream::connect(addr))),
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn connect(addr: SocketAddr, local_addr: Option<IpAddr>) -> io::Result<TcpStream> {
|
||||
// use local addr if connect asks for it.
|
||||
match local_addr {
|
||||
Some(ip_addr) => {
|
||||
let socket = match ip_addr {
|
||||
IpAddr::V4(ip_addr) => {
|
||||
let socket = TcpSocket::new_v4()?;
|
||||
let addr = SocketAddr::V4(SocketAddrV4::new(ip_addr, 0));
|
||||
socket.bind(addr)?;
|
||||
socket
|
||||
}
|
||||
IpAddr::V6(ip_addr) => {
|
||||
let socket = TcpSocket::new_v6()?;
|
||||
let addr = SocketAddr::V6(SocketAddrV6::new(ip_addr, 0, 0, 0));
|
||||
socket.bind(addr)?;
|
||||
socket
|
||||
}
|
||||
};
|
||||
|
||||
socket.connect(addr).await
|
||||
}
|
||||
|
||||
None => TcpStream::connect(addr).await,
|
||||
}
|
||||
}
|
||||
|
@@ -26,20 +26,20 @@ pub mod ssl;
|
||||
mod uri;
|
||||
|
||||
use actix_rt::net::TcpStream;
|
||||
use actix_service::{pipeline, pipeline_factory, Service, ServiceFactory};
|
||||
use actix_service::{Service, ServiceFactory};
|
||||
|
||||
pub use self::connect::{Address, Connect, Connection};
|
||||
pub use self::connector::{TcpConnector, TcpConnectorFactory};
|
||||
pub use self::error::ConnectError;
|
||||
pub use self::resolve::{Resolve, Resolver, ResolverFactory};
|
||||
pub use self::service::{ConnectService, ConnectServiceFactory, TcpConnectService};
|
||||
pub use self::service::{ConnectService, ConnectServiceFactory};
|
||||
|
||||
/// Create TCP connector service.
|
||||
pub fn new_connector<T: Address + 'static>(
|
||||
resolver: Resolver,
|
||||
) -> impl Service<Connect<T>, Response = Connection<T, TcpStream>, Error = ConnectError> + Clone
|
||||
{
|
||||
pipeline(resolver).and_then(TcpConnector)
|
||||
ConnectServiceFactory::new(resolver).service()
|
||||
}
|
||||
|
||||
/// Create TCP connector service factory.
|
||||
@@ -52,7 +52,7 @@ pub fn new_connector_factory<T: Address + 'static>(
|
||||
Error = ConnectError,
|
||||
InitError = (),
|
||||
> + Clone {
|
||||
pipeline_factory(ResolverFactory::new(resolver)).and_then(TcpConnectorFactory)
|
||||
ConnectServiceFactory::new(resolver)
|
||||
}
|
||||
|
||||
/// Create connector service with default parameters.
|
||||
|
@@ -56,7 +56,7 @@ pub enum Resolver {
|
||||
/// An interface for custom async DNS resolvers.
|
||||
///
|
||||
/// # Usage
|
||||
/// ```rust
|
||||
/// ```
|
||||
/// use std::net::SocketAddr;
|
||||
///
|
||||
/// use actix_tls::connect::{Resolve, Resolver};
|
||||
@@ -164,7 +164,7 @@ impl<T: Address> Service<Connect<T>> for Resolver {
|
||||
}
|
||||
|
||||
Self::Custom(resolver) => {
|
||||
let resolver = Rc::clone(&resolver);
|
||||
let resolver = Rc::clone(resolver);
|
||||
ResolverFuture::LookupCustom(Box::pin(async move {
|
||||
let addrs = resolver
|
||||
.lookup(req.hostname(), req.port())
|
||||
|
@@ -34,14 +34,6 @@ impl ConnectServiceFactory {
|
||||
resolver: self.resolver.service(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Construct new tcp stream service
|
||||
pub fn tcp_service(&self) -> TcpConnectService {
|
||||
TcpConnectService {
|
||||
tcp: self.tcp.service(),
|
||||
resolver: self.resolver.service(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for ConnectServiceFactory {
|
||||
@@ -63,7 +55,7 @@ impl<T: Address> ServiceFactory<Connect<T>> for ConnectServiceFactory {
|
||||
|
||||
fn new_service(&self, _: ()) -> Self::Future {
|
||||
let service = self.service();
|
||||
Box::pin(async move { Ok(service) })
|
||||
Box::pin(async { Ok(service) })
|
||||
}
|
||||
}
|
||||
|
||||
@@ -135,44 +127,3 @@ impl<T: Address> Future for ConnectServiceResponse<T> {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct TcpConnectService {
|
||||
tcp: TcpConnector,
|
||||
resolver: Resolver,
|
||||
}
|
||||
|
||||
impl<T: Address> Service<Connect<T>> for TcpConnectService {
|
||||
type Response = TcpStream;
|
||||
type Error = ConnectError;
|
||||
type Future = TcpConnectServiceResponse<T>;
|
||||
|
||||
actix_service::always_ready!();
|
||||
|
||||
fn call(&self, req: Connect<T>) -> Self::Future {
|
||||
TcpConnectServiceResponse {
|
||||
fut: ConnectFuture::Resolve(self.resolver.call(req)),
|
||||
tcp: self.tcp,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct TcpConnectServiceResponse<T: Address> {
|
||||
fut: ConnectFuture<T>,
|
||||
tcp: TcpConnector,
|
||||
}
|
||||
|
||||
impl<T: Address> Future for TcpConnectServiceResponse<T> {
|
||||
type Output = Result<TcpStream, ConnectError>;
|
||||
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
loop {
|
||||
match ready!(self.fut.poll_connect(cx))? {
|
||||
ConnectOutput::Resolved(res) => {
|
||||
self.fut = ConnectFuture::Connect(self.tcp.call(res));
|
||||
}
|
||||
ConnectOutput::Connected(conn) => return Poll::Ready(Ok(conn.into_parts().0)),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -5,3 +5,6 @@ pub mod openssl;
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
pub mod rustls;
|
||||
|
||||
#[cfg(feature = "native-tls")]
|
||||
pub mod native_tls;
|
||||
|
88
actix-tls/src/connect/ssl/native_tls.rs
Normal file
88
actix-tls/src/connect/ssl/native_tls.rs
Normal file
@@ -0,0 +1,88 @@
|
||||
use std::io;
|
||||
|
||||
use actix_rt::net::ActixStream;
|
||||
use actix_service::{Service, ServiceFactory};
|
||||
use futures_core::future::LocalBoxFuture;
|
||||
use log::trace;
|
||||
use tokio_native_tls::{TlsConnector as TokioNativetlsConnector, TlsStream};
|
||||
|
||||
pub use tokio_native_tls::native_tls::TlsConnector;
|
||||
|
||||
use crate::connect::{Address, Connection};
|
||||
|
||||
/// Native-tls connector factory and service
|
||||
pub struct NativetlsConnector {
|
||||
connector: TokioNativetlsConnector,
|
||||
}
|
||||
|
||||
impl NativetlsConnector {
|
||||
pub fn new(connector: TlsConnector) -> Self {
|
||||
Self {
|
||||
connector: TokioNativetlsConnector::from(connector),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl NativetlsConnector {
|
||||
pub fn service(connector: TlsConnector) -> Self {
|
||||
Self::new(connector)
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for NativetlsConnector {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
connector: self.connector.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Address, U> ServiceFactory<Connection<T, U>> for NativetlsConnector
|
||||
where
|
||||
U: ActixStream + 'static,
|
||||
{
|
||||
type Response = Connection<T, TlsStream<U>>;
|
||||
type Error = io::Error;
|
||||
type Config = ();
|
||||
type Service = Self;
|
||||
type InitError = ();
|
||||
type Future = LocalBoxFuture<'static, Result<Self::Service, Self::InitError>>;
|
||||
|
||||
fn new_service(&self, _: ()) -> Self::Future {
|
||||
let connector = self.clone();
|
||||
Box::pin(async { Ok(connector) })
|
||||
}
|
||||
}
|
||||
|
||||
// NativetlsConnector is both it's ServiceFactory and Service impl type.
|
||||
// As the factory and service share the same type and state.
|
||||
impl<T, U> Service<Connection<T, U>> for NativetlsConnector
|
||||
where
|
||||
T: Address,
|
||||
U: ActixStream + 'static,
|
||||
{
|
||||
type Response = Connection<T, TlsStream<U>>;
|
||||
type Error = io::Error;
|
||||
type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>;
|
||||
|
||||
actix_service::always_ready!();
|
||||
|
||||
fn call(&self, stream: Connection<T, U>) -> Self::Future {
|
||||
let (io, stream) = stream.replace_io(());
|
||||
let connector = self.connector.clone();
|
||||
Box::pin(async move {
|
||||
trace!("SSL Handshake start for: {:?}", stream.host());
|
||||
connector
|
||||
.connect(stream.host(), io)
|
||||
.await
|
||||
.map(|res| {
|
||||
trace!("SSL Handshake success: {:?}", stream.host());
|
||||
stream.replace_io(res).1
|
||||
})
|
||||
.map_err(|e| {
|
||||
trace!("SSL Handshake error: {:?}", e);
|
||||
io::Error::new(io::ErrorKind::Other, format!("{}", e))
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
@@ -1,13 +1,11 @@
|
||||
use std::{
|
||||
fmt,
|
||||
future::Future,
|
||||
io,
|
||||
pin::Pin,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use actix_codec::{AsyncRead, AsyncWrite};
|
||||
use actix_rt::net::TcpStream;
|
||||
use actix_rt::net::ActixStream;
|
||||
use actix_service::{Service, ServiceFactory};
|
||||
use futures_core::{future::LocalBoxFuture, ready};
|
||||
use log::trace;
|
||||
@@ -15,10 +13,7 @@ use log::trace;
|
||||
pub use openssl::ssl::{Error as SslError, HandshakeError, SslConnector, SslMethod};
|
||||
pub use tokio_openssl::SslStream;
|
||||
|
||||
use crate::connect::resolve::Resolve;
|
||||
use crate::connect::{
|
||||
Address, Connect, ConnectError, ConnectService, ConnectServiceFactory, Connection, Resolver,
|
||||
};
|
||||
use crate::connect::{Address, Connection};
|
||||
|
||||
/// OpenSSL connector factory
|
||||
pub struct OpensslConnector {
|
||||
@@ -45,8 +40,8 @@ impl Clone for OpensslConnector {
|
||||
|
||||
impl<T, U> ServiceFactory<Connection<T, U>> for OpensslConnector
|
||||
where
|
||||
T: Address + 'static,
|
||||
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug + 'static,
|
||||
T: Address,
|
||||
U: ActixStream + 'static,
|
||||
{
|
||||
type Response = Connection<T, SslStream<U>>;
|
||||
type Error = io::Error;
|
||||
@@ -75,8 +70,8 @@ impl Clone for OpensslConnectorService {
|
||||
|
||||
impl<T, U> Service<Connection<T, U>> for OpensslConnectorService
|
||||
where
|
||||
T: Address + 'static,
|
||||
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug + 'static,
|
||||
T: Address,
|
||||
U: ActixStream,
|
||||
{
|
||||
type Response = Connection<T, SslStream<U>>;
|
||||
type Error = io::Error;
|
||||
@@ -112,7 +107,8 @@ pub struct ConnectAsyncExt<T, U> {
|
||||
|
||||
impl<T: Address, U> Future for ConnectAsyncExt<T, U>
|
||||
where
|
||||
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug + 'static,
|
||||
T: Address,
|
||||
U: ActixStream,
|
||||
{
|
||||
type Output = Result<Connection<T, SslStream<U>>, io::Error>;
|
||||
|
||||
@@ -132,115 +128,3 @@ where
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct OpensslConnectServiceFactory {
|
||||
tcp: ConnectServiceFactory,
|
||||
openssl: OpensslConnector,
|
||||
}
|
||||
|
||||
impl OpensslConnectServiceFactory {
|
||||
/// Construct new OpensslConnectService factory
|
||||
pub fn new(connector: SslConnector) -> Self {
|
||||
OpensslConnectServiceFactory {
|
||||
tcp: ConnectServiceFactory::new(Resolver::Default),
|
||||
openssl: OpensslConnector::new(connector),
|
||||
}
|
||||
}
|
||||
|
||||
/// Construct new connect service with custom DNS resolver
|
||||
pub fn with_resolver(connector: SslConnector, resolver: impl Resolve + 'static) -> Self {
|
||||
OpensslConnectServiceFactory {
|
||||
tcp: ConnectServiceFactory::new(Resolver::new_custom(resolver)),
|
||||
openssl: OpensslConnector::new(connector),
|
||||
}
|
||||
}
|
||||
|
||||
/// Construct OpenSSL connect service
|
||||
pub fn service(&self) -> OpensslConnectService {
|
||||
OpensslConnectService {
|
||||
tcp: self.tcp.service(),
|
||||
openssl: OpensslConnectorService {
|
||||
connector: self.openssl.connector.clone(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for OpensslConnectServiceFactory {
|
||||
fn clone(&self) -> Self {
|
||||
OpensslConnectServiceFactory {
|
||||
tcp: self.tcp.clone(),
|
||||
openssl: self.openssl.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Address + 'static> ServiceFactory<Connect<T>> for OpensslConnectServiceFactory {
|
||||
type Response = SslStream<TcpStream>;
|
||||
type Error = ConnectError;
|
||||
type Config = ();
|
||||
type Service = OpensslConnectService;
|
||||
type InitError = ();
|
||||
type Future = LocalBoxFuture<'static, Result<Self::Service, Self::InitError>>;
|
||||
|
||||
fn new_service(&self, _: ()) -> Self::Future {
|
||||
let service = self.service();
|
||||
Box::pin(async { Ok(service) })
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct OpensslConnectService {
|
||||
tcp: ConnectService,
|
||||
openssl: OpensslConnectorService,
|
||||
}
|
||||
|
||||
impl<T: Address + 'static> Service<Connect<T>> for OpensslConnectService {
|
||||
type Response = SslStream<TcpStream>;
|
||||
type Error = ConnectError;
|
||||
type Future = OpensslConnectServiceResponse<T>;
|
||||
|
||||
actix_service::always_ready!();
|
||||
|
||||
fn call(&self, req: Connect<T>) -> Self::Future {
|
||||
OpensslConnectServiceResponse {
|
||||
fut1: Some(self.tcp.call(req)),
|
||||
fut2: None,
|
||||
openssl: self.openssl.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct OpensslConnectServiceResponse<T: Address + 'static> {
|
||||
fut1: Option<<ConnectService as Service<Connect<T>>>::Future>,
|
||||
fut2: Option<<OpensslConnectorService as Service<Connection<T, TcpStream>>>::Future>,
|
||||
openssl: OpensslConnectorService,
|
||||
}
|
||||
|
||||
impl<T: Address> Future for OpensslConnectServiceResponse<T> {
|
||||
type Output = Result<SslStream<TcpStream>, ConnectError>;
|
||||
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
if let Some(ref mut fut) = self.fut1 {
|
||||
match ready!(Pin::new(fut).poll(cx)) {
|
||||
Ok(res) => {
|
||||
let _ = self.fut1.take();
|
||||
self.fut2 = Some(self.openssl.call(res));
|
||||
}
|
||||
Err(e) => return Poll::Ready(Err(e)),
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(ref mut fut) = self.fut2 {
|
||||
match ready!(Pin::new(fut).poll(cx)) {
|
||||
Ok(connect) => Poll::Ready(Ok(connect.into_parts().0)),
|
||||
Err(e) => Poll::Ready(Err(ConnectError::Io(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
e,
|
||||
)))),
|
||||
}
|
||||
} else {
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -1,20 +1,20 @@
|
||||
use std::{
|
||||
fmt,
|
||||
convert::TryFrom,
|
||||
future::Future,
|
||||
io,
|
||||
pin::Pin,
|
||||
sync::Arc,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
pub use tokio_rustls::rustls::Session;
|
||||
pub use tokio_rustls::{client::TlsStream, rustls::ClientConfig};
|
||||
pub use webpki_roots::TLS_SERVER_ROOTS;
|
||||
|
||||
use actix_codec::{AsyncRead, AsyncWrite};
|
||||
use actix_rt::net::ActixStream;
|
||||
use actix_service::{Service, ServiceFactory};
|
||||
use futures_core::{future::LocalBoxFuture, ready};
|
||||
use log::trace;
|
||||
use tokio_rustls::webpki::DNSNameRef;
|
||||
use tokio_rustls::rustls::client::ServerName;
|
||||
use tokio_rustls::{Connect, TlsConnector};
|
||||
|
||||
use crate::connect::{Address, Connection};
|
||||
@@ -44,12 +44,13 @@ impl Clone for RustlsConnector {
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Address, U> ServiceFactory<Connection<T, U>> for RustlsConnector
|
||||
impl<T, U> ServiceFactory<Connection<T, U>> for RustlsConnector
|
||||
where
|
||||
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug,
|
||||
T: Address,
|
||||
U: ActixStream + 'static,
|
||||
{
|
||||
type Response = Connection<T, TlsStream<U>>;
|
||||
type Error = std::io::Error;
|
||||
type Error = io::Error;
|
||||
type Config = ();
|
||||
type Service = RustlsConnectorService;
|
||||
type InitError = ();
|
||||
@@ -76,43 +77,55 @@ impl Clone for RustlsConnectorService {
|
||||
impl<T, U> Service<Connection<T, U>> for RustlsConnectorService
|
||||
where
|
||||
T: Address,
|
||||
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug,
|
||||
U: ActixStream,
|
||||
{
|
||||
type Response = Connection<T, TlsStream<U>>;
|
||||
type Error = std::io::Error;
|
||||
type Future = ConnectAsyncExt<T, U>;
|
||||
type Error = io::Error;
|
||||
type Future = RustlsConnectorServiceFuture<T, U>;
|
||||
|
||||
actix_service::always_ready!();
|
||||
|
||||
fn call(&self, stream: Connection<T, U>) -> Self::Future {
|
||||
trace!("SSL Handshake start for: {:?}", stream.host());
|
||||
let (io, stream) = stream.replace_io(());
|
||||
let host = DNSNameRef::try_from_ascii_str(stream.host())
|
||||
.expect("rustls currently only handles hostname-based connections. See https://github.com/briansmith/webpki/issues/54");
|
||||
ConnectAsyncExt {
|
||||
fut: TlsConnector::from(self.connector.clone()).connect(host, io),
|
||||
stream: Some(stream),
|
||||
fn call(&self, connection: Connection<T, U>) -> Self::Future {
|
||||
trace!("SSL Handshake start for: {:?}", connection.host());
|
||||
let (stream, connection) = connection.replace_io(());
|
||||
|
||||
match ServerName::try_from(connection.host()) {
|
||||
Ok(host) => RustlsConnectorServiceFuture::Future {
|
||||
connect: TlsConnector::from(self.connector.clone()).connect(host, stream),
|
||||
connection: Some(connection),
|
||||
},
|
||||
Err(_) => RustlsConnectorServiceFuture::InvalidDns,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ConnectAsyncExt<T, U> {
|
||||
fut: Connect<U>,
|
||||
stream: Option<Connection<T, ()>>,
|
||||
pub enum RustlsConnectorServiceFuture<T, U> {
|
||||
/// See issue https://github.com/briansmith/webpki/issues/54
|
||||
InvalidDns,
|
||||
Future {
|
||||
connect: Connect<U>,
|
||||
connection: Option<Connection<T, ()>>,
|
||||
},
|
||||
}
|
||||
|
||||
impl<T, U> Future for ConnectAsyncExt<T, U>
|
||||
impl<T, U> Future for RustlsConnectorServiceFuture<T, U>
|
||||
where
|
||||
T: Address,
|
||||
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug,
|
||||
U: ActixStream,
|
||||
{
|
||||
type Output = Result<Connection<T, TlsStream<U>>, std::io::Error>;
|
||||
type Output = Result<Connection<T, TlsStream<U>>, io::Error>;
|
||||
|
||||
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
let this = self.get_mut();
|
||||
let stream = ready!(Pin::new(&mut this.fut).poll(cx))?;
|
||||
let s = this.stream.take().unwrap();
|
||||
trace!("SSL Handshake success: {:?}", s.host());
|
||||
Poll::Ready(Ok(s.replace_io(stream).1))
|
||||
match self.get_mut() {
|
||||
Self::InvalidDns => Poll::Ready(Err(
|
||||
io::Error::new(io::ErrorKind::Other, "rustls currently only handles hostname-based connections. See https://github.com/briansmith/webpki/issues/54")
|
||||
)),
|
||||
Self::Future { connect, connection } => {
|
||||
let stream = ready!(Pin::new(connect).poll(cx))?;
|
||||
let connection = connection.take().unwrap();
|
||||
trace!("SSL Handshake success: {:?}", connection.host());
|
||||
Poll::Ready(Ok(connection.replace_io(stream).1))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -5,6 +5,7 @@
|
||||
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
|
||||
|
||||
#[cfg(feature = "openssl")]
|
||||
#[allow(unused_extern_crates)]
|
||||
extern crate tls_openssl as openssl;
|
||||
|
||||
#[cfg(feature = "accept")]
|
||||
|
@@ -1,4 +1,9 @@
|
||||
use std::io;
|
||||
#![cfg(feature = "connect")]
|
||||
|
||||
use std::{
|
||||
io,
|
||||
net::{IpAddr, Ipv4Addr},
|
||||
};
|
||||
|
||||
use actix_codec::{BytesCodec, Framed};
|
||||
use actix_rt::net::TcpStream;
|
||||
@@ -9,7 +14,7 @@ use futures_util::sink::SinkExt;
|
||||
|
||||
use actix_tls::connect::{self as actix_connect, Connect};
|
||||
|
||||
#[cfg(all(feature = "connect", feature = "openssl"))]
|
||||
#[cfg(feature = "openssl")]
|
||||
#[actix_rt::test]
|
||||
async fn test_string() {
|
||||
let srv = TestServer::with(|| {
|
||||
@@ -125,3 +130,25 @@ async fn test_rustls_uri() {
|
||||
let con = conn.call(addr.into()).await.unwrap();
|
||||
assert_eq!(con.peer_addr().unwrap(), srv.addr());
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_local_addr() {
|
||||
let srv = TestServer::with(|| {
|
||||
fn_service(|io: TcpStream| async {
|
||||
let mut framed = Framed::new(io, BytesCodec);
|
||||
framed.send(Bytes::from_static(b"test")).await?;
|
||||
Ok::<_, io::Error>(())
|
||||
})
|
||||
});
|
||||
|
||||
let conn = actix_connect::default_connector();
|
||||
let local = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 3));
|
||||
|
||||
let (con, _) = conn
|
||||
.call(Connect::with_addr("10", srv.addr()).set_local_addr(local))
|
||||
.await
|
||||
.unwrap()
|
||||
.into_parts();
|
||||
|
||||
assert_eq!(con.local_addr().unwrap().ip(), local)
|
||||
}
|
||||
|
@@ -1,3 +1,5 @@
|
||||
#![cfg(feature = "connect")]
|
||||
|
||||
use std::{
|
||||
io,
|
||||
net::{Ipv4Addr, SocketAddr},
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user