1
0
mirror of https://github.com/fafhrd91/actix-net synced 2025-08-12 20:07:06 +02:00

Compare commits

..

2 Commits

Author SHA1 Message Date
Nikolay Kim
ebf8d7fa34 Fix back-pressure handling for concurrent connections 2018-12-21 10:43:18 -08:00
Nikolay Kim
298727dcbd back port bug fixes 2018-12-12 19:01:59 -08:00
177 changed files with 8176 additions and 16208 deletions

41
.appveyor.yml Normal file
View File

@@ -0,0 +1,41 @@
environment:
global:
PROJECT_NAME: actix-net
matrix:
# Stable channel
- TARGET: i686-pc-windows-msvc
CHANNEL: stable
- TARGET: x86_64-pc-windows-gnu
CHANNEL: stable
- TARGET: x86_64-pc-windows-msvc
CHANNEL: stable
# Nightly channel
- TARGET: i686-pc-windows-msvc
CHANNEL: nightly
- TARGET: x86_64-pc-windows-gnu
CHANNEL: nightly
- TARGET: x86_64-pc-windows-msvc
CHANNEL: nightly
# Install Rust and Cargo
# (Based on from https://github.com/rust-lang/libc/blob/master/appveyor.yml)
install:
- ps: >-
If ($Env:TARGET -eq 'x86_64-pc-windows-gnu') {
$Env:PATH += ';C:\msys64\mingw64\bin'
} ElseIf ($Env:TARGET -eq 'i686-pc-windows-gnu') {
$Env:PATH += ';C:\MinGW\bin'
}
- curl -sSf -o rustup-init.exe https://win.rustup.rs
- rustup-init.exe --default-host %TARGET% --default-toolchain %CHANNEL% -y
- set PATH=%PATH%;C:\Users\appveyor\.cargo\bin
- rustc -Vv
- cargo -V
# 'cargo test' takes care of building for us, so disable Appveyor's build stage.
build: false
# Equivalent to Travis' `script` phase
test_script:
- cargo clean
- cargo test

View File

@@ -1,24 +0,0 @@
## PR Type
<!-- What kind of change does this PR make? -->
<!-- Bug Fix / Feature / Refactor / Code Style / Other -->
INSERT_PR_TYPE
## PR Checklist
Check your PR fulfills the following:
<!-- For draft PRs check the boxes as you complete them. -->
- [ ] Tests for the changes have been added / updated.
- [ ] Documentation comments have been added / updated.
- [ ] A changelog entry has been made for the appropriate packages.
- [ ] Format code with the latest stable rustfmt
## Overview
<!-- Describe the current and new behavior. -->
<!-- Emphasize any breaking changes. -->
<!-- If this PR fixes or closes an issue, reference it here. -->
<!-- Closes #000 -->

View File

@@ -1,103 +0,0 @@
name: CI
on:
pull_request:
types: [opened, synchronize, reopened]
push:
branches: [master]
jobs:
build_and_test:
strategy:
fail-fast: false
matrix:
target:
- { name: Linux, os: ubuntu-latest, triple: x86_64-unknown-linux-gnu }
- { name: macOS, os: macos-latest, triple: x86_64-apple-darwin }
- { name: Windows, os: windows-latest, triple: x86_64-pc-windows-msvc }
- { name: Windows (MinGW), os: windows-latest, triple: x86_64-pc-windows-gnu }
- { name: Windows (32-bit), os: windows-latest, triple: i686-pc-windows-msvc }
version:
- 1.46.0 # MSRV
- stable
- nightly
name: ${{ matrix.target.name }} / ${{ matrix.version }}
runs-on: ${{ matrix.target.os }}
steps:
- name: Setup Routing
if: matrix.target.os == 'macos-latest'
run: sudo ifconfig lo0 alias 127.0.0.3
- uses: actions/checkout@v2
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-${{ matrix.target.triple }}
profile: minimal
override: true
- name: Install MSYS2
if: matrix.target.triple == 'x86_64-pc-windows-gnu'
uses: msys2/setup-msys2@v2
- name: Install MinGW Packages
if: matrix.target.triple == 'x86_64-pc-windows-gnu'
run: |
msys2 -c 'pacman -Sy --noconfirm pacman'
msys2 -c 'pacman --noconfirm -S base-devel pkg-config'
- name: Generate Cargo.lock
uses: actions-rs/cargo@v1
with:
command: generate-lockfile
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.2.0
- name: Install cargo-hack
uses: actions-rs/cargo@v1
with:
command: install
args: cargo-hack
- name: check minimal
uses: actions-rs/cargo@v1
with:
command: hack
args: --clean-per-run check --workspace --no-default-features --tests
- name: check full
uses: actions-rs/cargo@v1
with:
command: check
args: --workspace --bins --examples --tests
- name: tests
if: matrix.target.triple != 'x86_64-pc-windows-gnu'
uses: actions-rs/cargo@v1
with:
command: test
args: --workspace --all-features --no-fail-fast -- --nocapture
- name: Generate coverage file
if: >
matrix.target.os == 'ubuntu-latest'
&& matrix.version == 'stable'
&& github.ref == 'refs/heads/master'
run: |
cargo install cargo-tarpaulin
cargo tarpaulin --out Xml --verbose
- name: Upload to Codecov
if: >
matrix.target.os == 'ubuntu-latest'
&& matrix.version == 'stable'
&& github.ref == 'refs/heads/master'
uses: codecov/codecov-action@v1
with:
file: cobertura.xml
- name: Clear the cargo caches
run: |
cargo install cargo-cache --no-default-features --features ci-autoclean
cargo-cache

View File

@@ -1,42 +0,0 @@
name: Lint
on:
pull_request:
types: [opened, synchronize, reopened]
jobs:
fmt:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
profile: minimal
components: rustfmt
override: true
- name: Rustfmt Check
uses: actions-rs/cargo@v1
with:
command: fmt
args: --all -- --check
clippy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
profile: minimal
components: clippy
override: true
- name: Clippy Check
uses: actions-rs/clippy-check@v1
with:
token: ${{ secrets.GITHUB_TOKEN }}
args: --workspace --tests --all-features

View File

@@ -1,35 +0,0 @@
name: Upload documentation
on:
push:
branches: [master]
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: nightly-x86_64-unknown-linux-gnu
profile: minimal
override: true
- name: Build Docs
uses: actions-rs/cargo@v1
with:
command: doc
args: --workspace --all-features --no-deps
- name: Tweak HTML
run: echo '<meta http-equiv="refresh" content="0;url=actix_server/index.html">' > target/doc/index.html
- name: Deploy to GitHub Pages
uses: JamesIves/github-pages-deploy-action@3.7.1
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
BRANCH: gh-pages
FOLDER: target/doc

2
.gitignore vendored
View File

@@ -12,5 +12,3 @@ guide/build/
# These are backup files generated by rustfmt
**/*.rs.bk
.idea

54
.travis.yml Normal file
View File

@@ -0,0 +1,54 @@
language: rust
sudo: required
dist: trusty
cache:
cargo: true
apt: true
matrix:
include:
- rust: stable
- rust: beta
- rust: nightly
allow_failures:
- rust: nightly
env:
global:
# - RUSTFLAGS="-C link-dead-code"
- OPENSSL_VERSION=openssl-1.0.2
before_install:
- sudo add-apt-repository -y ppa:0k53d-karl-f830m/openssl
- sudo apt-get update -qq
- sudo apt-get install -y openssl libssl-dev libelf-dev libdw-dev cmake gcc binutils-dev libiberty-dev
# Add clippy
before_script:
- export PATH=$PATH:~/.cargo/bin
script:
- |
if [[ "$TRAVIS_RUST_VERSION" != "nightly" ]]; then
cargo clean
cargo test --features="ssl,tls,rust-tls" -- --nocapture
fi
- |
if [[ "$TRAVIS_RUST_VERSION" == "nightly" ]]; then
RUSTFLAGS="--cfg procmacro2_semver_exempt" cargo install -f cargo-tarpaulin
cargo tarpaulin --features="ssl,tls,rust-tls" --out Xml
bash <(curl -s https://codecov.io/bash)
echo "Uploaded code coverage"
fi
# Upload docs
after_success:
- |
if [[ "$TRAVIS_OS_NAME" == "linux" && "$TRAVIS_PULL_REQUEST" = "false" && "$TRAVIS_BRANCH" == "master" && "$TRAVIS_RUST_VERSION" == "beta" ]]; then
cargo doc --features "ssl,tls,rust-tls" --no-deps &&
echo "<meta http-equiv=refresh content=0;url=os_balloon/index.html>" > target/doc/index.html &&
git clone https://github.com/davisp/ghp-import.git &&
./ghp-import/ghp_import.py -n -p -f -m "Documentation upload" -r https://"$GH_TOKEN"@github.com/"$TRAVIS_REPO_SLUG.git" target/doc &&
echo "Uploaded documentation"
fi

77
CHANGES.md Normal file
View File

@@ -0,0 +1,77 @@
# Changes
## [0.2.6] - 2018-12-21
### Fixed
* Fix back-pressure handling for concurrent connections
## [0.2.5] - 2018-12-12
### Fixed
* Fix back-pressure for concurrent ssl handshakes
* Drop completed future for .then and .and_then combinators
## [0.2.4] - 2018-11-21
### Added
* Allow to skip name resolution stage in Connector
## [0.2.3] - 2018-11-17
### Added
* Framed::is_write_buf_empty() checks if write buffer is flushed
## [0.2.2] - 2018-11-14
### Added
* Add low/high caps to Framed
### Changed
* Refactor Connector and Resolver services
### Fixed
* Fix wrong service to socket binding
## [0.2.0] - 2018-11-08
### Added
* Timeout service
* Added ServiceConfig and ServiceRuntime for server service configuration
### Changed
* Connector has been refactored
* timer and LowResTimer renamed to time and LowResTime
* Refactored `Server::configure()` method
## [0.1.1] - 2018-10-10
### Changed
- Set actix min version - 0.7.5
- Set trust-dns min version
## [0.1.0] - 2018-10-08
* Initial impl

View File

@@ -34,13 +34,10 @@ This Code of Conduct applies both within project spaces and in public spaces whe
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at robjtede@icloud.com ([@robjtede]) or huyuumi@neet.club ([@JohnTitor]). The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at fafhrd91@gmail.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
[@robjtede]: https://github.com/robjtede
[@JohnTitor]: https://github.com/JohnTitor
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]

View File

@@ -1,25 +1,82 @@
[workspace]
members = [
"actix-codec",
"actix-macros",
"actix-router",
"actix-rt",
"actix-server",
"actix-service",
"actix-tls",
"actix-tracing",
"actix-utils",
"bytestring",
]
[package]
name = "actix-net"
version = "0.2.6"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix net - framework for the compisible network services for Rust (experimental)"
readme = "README.md"
keywords = ["network", "framework", "async", "futures"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-net/"
categories = ["network-programming", "asynchronous"]
license = "MIT/Apache-2.0"
exclude = [".gitignore", ".travis.yml", ".cargo/config", "appveyor.yml"]
[patch.crates-io]
actix-codec = { path = "actix-codec" }
actix-macros = { path = "actix-macros" }
actix-router = { path = "actix-router" }
actix-rt = { path = "actix-rt" }
actix-server = { path = "actix-server" }
actix-service = { path = "actix-service" }
actix-tls = { path = "actix-tls" }
actix-tracing = { path = "actix-tracing" }
actix-utils = { path = "actix-utils" }
bytestring = { path = "bytestring" }
[package.metadata.docs.rs]
features = ["ssl", "tls", "rust-tls"]
[badges]
travis-ci = { repository = "actix/actix-net", branch = "master" }
# appveyor = { repository = "fafhrd91/actix-web-hdy9d" }
codecov = { repository = "actix/actix-net", branch = "master", service = "github" }
[lib]
name = "actix_net"
path = "src/lib.rs"
[features]
default = []
# tls
tls = ["native-tls"]
# openssl
ssl = ["openssl", "tokio-openssl"]
# rustls
rust-tls = ["rustls", "tokio-rustls", "webpki", "webpki-roots"]
cell = []
[dependencies]
actix = "0.7.6"
log = "0.4"
num_cpus = "1.0"
# io
mio = "^0.6.13"
net2 = "0.2"
bytes = "0.4"
futures = "0.1"
slab = "0.4"
tokio = "0.1"
tokio-codec = "0.1"
tokio-io = "0.1"
tokio-tcp = "0.1"
tokio-timer = "0.2"
tokio-reactor = "0.1"
tokio-current-thread = "0.1"
tower-service = "0.1"
trust-dns-resolver = "^0.10.2"
# native-tls
native-tls = { version="0.2", optional = true }
# openssl
openssl = { version="0.10", optional = true }
tokio-openssl = { version="0.2", optional = true }
#rustls
rustls = { version = "^0.14", optional = true }
tokio-rustls = { version = "^0.8", optional = true }
webpki = { version = "0.18", optional = true }
webpki-roots = { version = "0.15", optional = true }
[dev-dependencies]
env_logger = "0.5"
[profile.release]
lto = true
opt-level = 3
codegen-units = 1

View File

@@ -1,26 +1,67 @@
# Actix Net
# Actix net [![Build Status](https://travis-ci.org/actix/actix-net.svg?branch=master)](https://travis-ci.org/actix/actix-net) [![codecov](https://codecov.io/gh/actix/actix-net/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-net) [![crates.io](https://meritbadge.herokuapp.com/actix-net)](https://crates.io/crates/actix-net) [![Join the chat at https://gitter.im/actix/actix](https://badges.gitter.im/actix/actix.svg)](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
> A collection of lower-level libraries for composable network services.
Actix net - framework for composable network services (experimental)
![Apache 2.0 or MIT licensed](https://img.shields.io/crates/l/actix-server)
[![codecov](https://codecov.io/gh/actix/actix-net/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-net)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x)
## Documentation & community resources
## Build statuses
| Platform | Build Status |
| ---------------- | ------------ |
| Linux | [![build status](https://github.com/actix/actix-net/workflows/CI%20%28Linux%29/badge.svg?branch=master&event=push)](https://github.com/actix/actix-net/actions?query=workflow%3A"CI+(Linux)") |
| macOS | [![build status](https://github.com/actix/actix-net/workflows/CI%20%28macOS%29/badge.svg?branch=master&event=push)](https://github.com/actix/actix-net/actions?query=workflow%3A"CI+(macOS)") |
| Windows | [![build status](https://github.com/actix/actix-net/workflows/CI%20%28Windows%29/badge.svg?branch=master&event=push)](https://github.com/actix/actix-net/actions?query=workflow%3A"CI+(Windows)") |
| Windows (MinGW) | [![build status](https://github.com/actix/actix-net/workflows/CI%20%28Windows-mingw%29/badge.svg?branch=master&event=push)](https://github.com/actix/actix-net/actions?query=workflow%3A"CI+(Windows-mingw)") |
* [API Documentation (Development)](https://actix.rs/actix-net/actix_net/)
* [Chat on gitter](https://gitter.im/actix/actix)
* Cargo package: [actix-net](https://crates.io/crates/actix-net)
* Minimum supported Rust version: 1.26 or later
## Example
See `actix-server/examples` and `actix-tls/examples` for some basic examples.
### MSRV
This repo's Minimum Supported Rust Version (MSRV) is 1.46.0.
```rust
fn main() {
let sys = actix::System::new("test");
// load ssl keys
let mut builder = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap();
builder.set_private_key_file("./examples/key.pem", SslFiletype::PEM).unwrap();
builder.set_certificate_chain_file("./examples/cert.pem").unwrap();
let acceptor = builder.build();
let num = Arc::new(AtomicUsize::new(0));
// bind socket address and start workers. By default server uses number of
// available logical cpu as threads count. actix net start separate
// instances of service pipeline in each worker.
Server::default()
.bind(
// configure service pipeline
"basic", "0.0.0.0:8443",
move || {
let num = num.clone();
let acceptor = acceptor.clone();
// service for converting incoming TcpStream to a SslStream<TcpStream>
(move |stream| {
SslAcceptorExt::accept_async(&acceptor, stream)
.map_err(|e| println!("Openssl error: {}", e))
})
// convert closure to a `NewService`
.into_new_service()
// .and_then() combinator uses other service to convert incoming `Request` to a `Response`
// and then uses that response as an input for next service.
// in this case, on success we use `logger` service
.and_then(logger)
// Next service counts number of connections
.and_then(move |req| {
let num = num.fetch_add(1, Ordering::Relaxed);
println!("processed {:?} connections", num);
future::ok(())
})
}).unwrap()
.start();
sys.run();
}
```
## License
This project is licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0))
@@ -30,5 +71,6 @@ at your option.
## Code of Conduct
Contribution to the actix-net repo is organized under the terms of the Contributor Covenant.
The Actix team promises to intervene to uphold that code of conduct.
Contribution to the actix-net crate is organized under the terms of the
Contributor Covenant, the maintainer of actix-net, @fafhrd91, promises to
intervene to uphold that code of conduct.

View File

@@ -1,59 +0,0 @@
# Changes
## Unreleased - 2021-xx-xx
## 0.4.0-beta.1 - 2020-12-28
* Replace `pin-project` with `pin-project-lite`. [#237]
* Upgrade `tokio` dependency to `1`. [#237]
* Upgrade `tokio-util` dependency to `0.6`. [#237]
* Upgrade `bytes` dependency to `1`. [#237]
[#237]: https://github.com/actix/actix-net/pull/237
## 0.3.0 - 2020-08-23
* No changes from beta 2.
## 0.3.0-beta.2 - 2020-08-19
* Remove unused type parameter from `Framed::replace_codec`.
## 0.3.0-beta.1 - 2020-08-19
* Use `.advance()` instead of `.split_to()`.
* Upgrade `tokio-util` to `0.3`.
* Improve `BytesCodec` `.encode()` performance
* Simplify `BytesCodec` `.decode()`
* Rename methods on `Framed` to better describe their use.
* Add method on `Framed` to get a pinned reference to the underlying I/O.
* Add method on `Framed` check emptiness of read buffer.
## 0.2.0 - 2019-12-10
* Use specific futures dependencies
## 0.2.0-alpha.4
* Fix buffer remaining capacity calculation
## 0.2.0-alpha.3
* Use tokio 0.2
* Fix low/high watermark for write/read buffers
## 0.2.0-alpha.2
* Migrated to `std::future`
## 0.1.2 - 2019-03-27
* Added `Framed::map_io()` method.
## 0.1.1 - 2019-03-06
* Added `FramedParts::with_read_buffer()` method.
## 0.1.0 - 2018-12-09
* Move codec to separate crate

View File

@@ -1,26 +0,0 @@
[package]
name = "actix-codec"
version = "0.4.0-beta.1"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Codec utilities for working with framed protocols"
keywords = ["network", "framework", "async", "futures"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-codec"
categories = ["network-programming", "asynchronous"]
license = "MIT OR Apache-2.0"
edition = "2018"
[lib]
name = "actix_codec"
path = "src/lib.rs"
[dependencies]
bitflags = "1.2.1"
bytes = "1"
futures-core = { version = "0.3.7", default-features = false }
futures-sink = { version = "0.3.7", default-features = false }
log = "0.4"
pin-project-lite = "0.2"
tokio = "1"
tokio-util = { version = "0.6", features = ["codec", "io"] }

View File

@@ -1 +0,0 @@
../LICENSE-APACHE

View File

@@ -1 +0,0 @@
../LICENSE-MIT

View File

@@ -1,420 +0,0 @@
use std::pin::Pin;
use std::task::{Context, Poll};
use std::{fmt, io};
use bytes::{Buf, BytesMut};
use futures_core::{ready, Stream};
use futures_sink::Sink;
use crate::{AsyncRead, AsyncWrite, Decoder, Encoder};
/// Low-water mark
const LW: usize = 1024;
/// High-water mark
const HW: usize = 8 * 1024;
bitflags::bitflags! {
struct Flags: u8 {
const EOF = 0b0001;
const READABLE = 0b0010;
}
}
pin_project_lite::pin_project! {
/// A unified `Stream` and `Sink` interface to an underlying I/O object, using
/// the `Encoder` and `Decoder` traits to encode and decode frames.
///
/// Raw I/O objects work with byte sequences, but higher-level code usually
/// wants to batch these into meaningful chunks, called "frames". This
/// method layers framing on top of an I/O object, by using the `Encoder`/`Decoder`
/// traits to handle encoding and decoding of message frames. Note that
/// the incoming and outgoing frame types may be distinct.
pub struct Framed<T, U> {
#[pin]
io: T,
codec: U,
flags: Flags,
read_buf: BytesMut,
write_buf: BytesMut,
}
}
impl<T, U> Framed<T, U>
where
T: AsyncRead + AsyncWrite,
U: Decoder,
{
/// This function returns a *single* object that is both `Stream` and
/// `Sink`; grouping this into a single object is often useful for layering
/// things like gzip or TLS, which require both read and write access to the
/// underlying object.
pub fn new(io: T, codec: U) -> Framed<T, U> {
Framed {
io,
codec,
flags: Flags::empty(),
read_buf: BytesMut::with_capacity(HW),
write_buf: BytesMut::with_capacity(HW),
}
}
}
impl<T, U> Framed<T, U> {
/// Returns a reference to the underlying codec.
pub fn codec_ref(&self) -> &U {
&self.codec
}
/// Returns a mutable reference to the underlying codec.
pub fn codec_mut(&mut self) -> &mut U {
&mut self.codec
}
/// Returns a reference to the underlying I/O stream wrapped by
/// `Frame`.
///
/// Note that care should be taken to not tamper with the underlying stream
/// of data coming in as it may corrupt the stream of frames otherwise
/// being worked with.
pub fn io_ref(&self) -> &T {
&self.io
}
/// Returns a mutable reference to the underlying I/O stream.
///
/// Note that care should be taken to not tamper with the underlying stream
/// of data coming in as it may corrupt the stream of frames otherwise
/// being worked with.
pub fn io_mut(&mut self) -> &mut T {
&mut self.io
}
/// Returns a `Pin` of a mutable reference to the underlying I/O stream.
pub fn io_pin(self: Pin<&mut Self>) -> Pin<&mut T> {
self.project().io
}
/// Check if read buffer is empty.
pub fn is_read_buf_empty(&self) -> bool {
self.read_buf.is_empty()
}
/// Check if write buffer is empty.
pub fn is_write_buf_empty(&self) -> bool {
self.write_buf.is_empty()
}
/// Check if write buffer is full.
pub fn is_write_buf_full(&self) -> bool {
self.write_buf.len() >= HW
}
/// Check if framed is able to write more data.
///
/// `Framed` object considers ready if there is free space in write buffer.
pub fn is_write_ready(&self) -> bool {
self.write_buf.len() < HW
}
/// Consume the `Frame`, returning `Frame` with different codec.
pub fn replace_codec<U2>(self, codec: U2) -> Framed<T, U2> {
Framed {
codec,
io: self.io,
flags: self.flags,
read_buf: self.read_buf,
write_buf: self.write_buf,
}
}
/// Consume the `Frame`, returning `Frame` with different io.
pub fn into_map_io<F, T2>(self, f: F) -> Framed<T2, U>
where
F: Fn(T) -> T2,
{
Framed {
io: f(self.io),
codec: self.codec,
flags: self.flags,
read_buf: self.read_buf,
write_buf: self.write_buf,
}
}
/// Consume the `Frame`, returning `Frame` with different codec.
pub fn into_map_codec<F, U2>(self, f: F) -> Framed<T, U2>
where
F: Fn(U) -> U2,
{
Framed {
io: self.io,
codec: f(self.codec),
flags: self.flags,
read_buf: self.read_buf,
write_buf: self.write_buf,
}
}
}
impl<T, U> Framed<T, U> {
/// Serialize item and Write to the inner buffer
pub fn write<I>(mut self: Pin<&mut Self>, item: I) -> Result<(), <U as Encoder<I>>::Error>
where
T: AsyncWrite,
U: Encoder<I>,
{
let this = self.as_mut().project();
let remaining = this.write_buf.capacity() - this.write_buf.len();
if remaining < LW {
this.write_buf.reserve(HW - remaining);
}
this.codec.encode(item, this.write_buf)?;
Ok(())
}
/// Try to read underlying I/O stream and decode item.
pub fn next_item(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<<U as Decoder>::Item, U::Error>>>
where
T: AsyncRead,
U: Decoder,
{
loop {
let mut this = self.as_mut().project();
// Repeatedly call `decode` or `decode_eof` as long as it is
// "readable". Readable is defined as not having returned `None`. If
// the upstream has returned EOF, and the decoder is no longer
// readable, it can be assumed that the decoder will never become
// readable again, at which point the stream is terminated.
if this.flags.contains(Flags::READABLE) {
if this.flags.contains(Flags::EOF) {
match this.codec.decode_eof(&mut this.read_buf) {
Ok(Some(frame)) => return Poll::Ready(Some(Ok(frame))),
Ok(None) => return Poll::Ready(None),
Err(e) => return Poll::Ready(Some(Err(e))),
}
}
log::trace!("attempting to decode a frame");
match this.codec.decode(&mut this.read_buf) {
Ok(Some(frame)) => {
log::trace!("frame decoded from buffer");
return Poll::Ready(Some(Ok(frame)));
}
Err(e) => return Poll::Ready(Some(Err(e))),
_ => (), // Need more data
}
this.flags.remove(Flags::READABLE);
}
debug_assert!(!this.flags.contains(Flags::EOF));
// Otherwise, try to read more data and try again. Make sure we've got room
let remaining = this.read_buf.capacity() - this.read_buf.len();
if remaining < LW {
this.read_buf.reserve(HW - remaining)
}
let cnt = match tokio_util::io::poll_read_buf(this.io, cx, this.read_buf) {
Poll::Pending => return Poll::Pending,
Poll::Ready(Err(e)) => return Poll::Ready(Some(Err(e.into()))),
Poll::Ready(Ok(cnt)) => cnt,
};
if cnt == 0 {
this.flags.insert(Flags::EOF);
}
this.flags.insert(Flags::READABLE);
}
}
/// Flush write buffer to underlying I/O stream.
pub fn poll_flush<I>(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), U::Error>>
where
T: AsyncWrite,
U: Encoder<I>,
{
let mut this = self.as_mut().project();
log::trace!("flushing framed transport");
while !this.write_buf.is_empty() {
log::trace!("writing; remaining={}", this.write_buf.len());
let n = ready!(this.io.as_mut().poll_write(cx, this.write_buf))?;
if n == 0 {
return Poll::Ready(Err(io::Error::new(
io::ErrorKind::WriteZero,
"failed to write frame to transport",
)
.into()));
}
// remove written data
this.write_buf.advance(n);
}
// Try flushing the underlying IO
ready!(this.io.poll_flush(cx))?;
log::trace!("framed transport flushed");
Poll::Ready(Ok(()))
}
/// Flush write buffer and shutdown underlying I/O stream.
pub fn poll_close<I>(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), U::Error>>
where
T: AsyncWrite,
U: Encoder<I>,
{
let mut this = self.as_mut().project();
ready!(this.io.as_mut().poll_flush(cx))?;
ready!(this.io.as_mut().poll_shutdown(cx))?;
Poll::Ready(Ok(()))
}
}
impl<T, U> Stream for Framed<T, U>
where
T: AsyncRead,
U: Decoder,
{
type Item = Result<U::Item, U::Error>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
self.next_item(cx)
}
}
impl<T, U, I> Sink<I> for Framed<T, U>
where
T: AsyncWrite,
U: Encoder<I>,
U::Error: From<io::Error>,
{
type Error = U::Error;
fn poll_ready(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
if self.is_write_ready() {
Poll::Ready(Ok(()))
} else {
Poll::Pending
}
}
fn start_send(self: Pin<&mut Self>, item: I) -> Result<(), Self::Error> {
self.write(item)
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.poll_flush(cx)
}
fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.poll_close(cx)
}
}
impl<T, U> fmt::Debug for Framed<T, U>
where
T: fmt::Debug,
U: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Framed")
.field("io", &self.io)
.field("codec", &self.codec)
.finish()
}
}
impl<T, U> Framed<T, U> {
/// This function returns a *single* object that is both `Stream` and
/// `Sink`; grouping this into a single object is often useful for layering
/// things like gzip or TLS, which require both read and write access to the
/// underlying object.
///
/// These objects take a stream, a read buffer and a write buffer. These
/// fields can be obtained from an existing `Framed` with the `into_parts` method.
pub fn from_parts(parts: FramedParts<T, U>) -> Framed<T, U> {
Framed {
io: parts.io,
codec: parts.codec,
flags: parts.flags,
write_buf: parts.write_buf,
read_buf: parts.read_buf,
}
}
/// Consumes the `Frame`, returning its underlying I/O stream, the buffer
/// with unprocessed data, and the codec.
///
/// Note that care should be taken to not tamper with the underlying stream
/// of data coming in as it may corrupt the stream of frames otherwise
/// being worked with.
pub fn into_parts(self) -> FramedParts<T, U> {
FramedParts {
io: self.io,
codec: self.codec,
flags: self.flags,
read_buf: self.read_buf,
write_buf: self.write_buf,
}
}
}
/// `FramedParts` contains an export of the data of a Framed transport.
/// It can be used to construct a new `Framed` with a different codec.
/// It contains all current buffers and the inner transport.
#[derive(Debug)]
pub struct FramedParts<T, U> {
/// The inner transport used to read bytes to and write bytes to
pub io: T,
/// The codec
pub codec: U,
/// The buffer with read but unprocessed data.
pub read_buf: BytesMut,
/// A buffer with unprocessed data which are not written yet.
pub write_buf: BytesMut,
flags: Flags,
}
impl<T, U> FramedParts<T, U> {
/// Create a new, default, `FramedParts`
pub fn new(io: T, codec: U) -> FramedParts<T, U> {
FramedParts {
io,
codec,
flags: Flags::empty(),
read_buf: BytesMut::new(),
write_buf: BytesMut::new(),
}
}
/// Create a new `FramedParts` with read buffer
pub fn with_read_buf(io: T, codec: U, read_buf: BytesMut) -> FramedParts<T, U> {
FramedParts {
io,
codec,
read_buf,
flags: Flags::empty(),
write_buf: BytesMut::new(),
}
}
}

View File

@@ -1,23 +0,0 @@
//! Codec utilities for working with framed protocols.
//!
//! Contains adapters to go from streams of bytes, [`AsyncRead`] and
//! [`AsyncWrite`], to framed streams implementing [`Sink`] and [`Stream`].
//! Framed streams are also known as `transports`.
//!
//! [`Sink`]: futures_sink::Sink
//! [`Stream`]: futures_core::Stream
#![deny(rust_2018_idioms, nonstandard_style)]
#![warn(missing_docs)]
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
mod bcodec;
mod framed;
pub use self::bcodec::BytesCodec;
pub use self::framed::{Framed, FramedParts};
pub use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
pub use tokio_util::codec::{Decoder, Encoder};
pub use tokio_util::io::poll_read_buf;

View File

@@ -1 +0,0 @@
/wip

View File

@@ -1,27 +0,0 @@
# Changes
## Unreleased - 2021-xx-xx
## 0.2.0 - 2021-02-02
* Update to latest `actix_rt::System::new` signature. [#261]
[#261]: https://github.com/actix/actix-net/pull/261
## 0.2.0-beta.1 - 2021-01-09
* Remove `actix-reexport` feature. [#218]
[#218]: https://github.com/actix/actix-net/pull/218
## 0.1.3 - 2020-12-03
* Add `actix-reexport` feature. [#218]
[#218]: https://github.com/actix/actix-net/pull/218
## 0.1.2 - 2020-05-18
* Forward actix_rt::test arguments to test function [#127]
[#127]: https://github.com/actix/actix-net/pull/127

View File

@@ -1,23 +0,0 @@
[package]
name = "actix-macros"
version = "0.2.0"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Macros for Actix system and runtime"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-macros"
categories = ["network-programming", "asynchronous"]
license = "MIT OR Apache-2.0"
edition = "2018"
[lib]
proc-macro = true
[dependencies]
quote = "1.0.3"
syn = { version = "^1", features = ["full"] }
[dev-dependencies]
actix-rt = "2.0.0"
futures-util = { version = "0.3", default-features = false }
trybuild = "1"

View File

@@ -1 +0,0 @@
../LICENSE-APACHE

View File

@@ -1 +0,0 @@
../LICENSE-MIT

View File

@@ -1,108 +0,0 @@
//! Macros for Actix system and runtime.
//!
//! The [`actix-rt`](https://docs.rs/actix-rt) crate must be available for macro output to compile.
//!
//! # Entry-point
//! See docs for the [`#[main]`](macro@main) macro.
//!
//! # Tests
//! See docs for the [`#[test]`](macro@test) macro.
#![deny(rust_2018_idioms, nonstandard_style)]
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
use proc_macro::TokenStream;
use quote::quote;
/// Marks async entry-point function to be executed by Actix system.
///
/// # Examples
/// ```
/// #[actix_rt::main]
/// async fn main() {
/// println!("Hello world");
/// }
/// ```
#[allow(clippy::needless_doctest_main)]
#[proc_macro_attribute]
#[cfg(not(test))] // Work around for rust-lang/rust#62127
pub fn main(_: TokenStream, item: TokenStream) -> TokenStream {
let mut input = syn::parse_macro_input!(item as syn::ItemFn);
let attrs = &input.attrs;
let vis = &input.vis;
let sig = &mut input.sig;
let body = &input.block;
if sig.asyncness.is_none() {
return syn::Error::new_spanned(
sig.fn_token,
"the async keyword is missing from the function declaration",
)
.to_compile_error()
.into();
}
sig.asyncness = None;
(quote! {
#(#attrs)*
#vis #sig {
actix_rt::System::new()
.block_on(async move { #body })
}
})
.into()
}
/// Marks async test function to be executed in an Actix system.
///
/// # Examples
/// ```
/// #[actix_rt::test]
/// async fn my_test() {
/// assert!(true);
/// }
/// ```
#[proc_macro_attribute]
pub fn test(_: TokenStream, item: TokenStream) -> TokenStream {
let mut input = syn::parse_macro_input!(item as syn::ItemFn);
let attrs = &input.attrs;
let vis = &input.vis;
let sig = &mut input.sig;
let body = &input.block;
let mut has_test_attr = false;
for attr in attrs {
if attr.path.is_ident("test") {
has_test_attr = true;
}
}
if sig.asyncness.is_none() {
return syn::Error::new_spanned(
input.sig.fn_token,
"the async keyword is missing from the function declaration",
)
.to_compile_error()
.into();
}
sig.asyncness = None;
let missing_test_attr = if has_test_attr {
quote!()
} else {
quote!(#[test])
};
(quote! {
#missing_test_attr
#(#attrs)*
#vis #sig {
actix_rt::System::new()
.block_on(async { #body })
}
})
.into()
}

View File

@@ -1,11 +0,0 @@
#[test]
fn compile_macros() {
let t = trybuild::TestCases::new();
t.pass("tests/trybuild/main-01-basic.rs");
t.compile_fail("tests/trybuild/main-02-only-async.rs");
t.pass("tests/trybuild/main-03-fn-params.rs");
t.pass("tests/trybuild/test-01-basic.rs");
t.pass("tests/trybuild/test-02-keep-attrs.rs");
t.compile_fail("tests/trybuild/test-03-only-async.rs");
}

View File

@@ -1,4 +0,0 @@
#[actix_rt::main]
async fn main() {
println!("Hello world");
}

View File

@@ -1,4 +0,0 @@
#[actix_rt::main]
fn main() {
futures_util::future::ready(()).await
}

View File

@@ -1,14 +0,0 @@
error: the async keyword is missing from the function declaration
--> $DIR/main-02-only-async.rs:2:1
|
2 | fn main() {
| ^^
error[E0601]: `main` function not found in crate `$CRATE`
--> $DIR/main-02-only-async.rs:1:1
|
1 | / #[actix_rt::main]
2 | | fn main() {
3 | | futures_util::future::ready(()).await
4 | | }
| |_^ consider adding a `main` function to `$DIR/tests/trybuild/main-02-only-async.rs`

View File

@@ -1,6 +0,0 @@
#[actix_rt::main]
async fn main2(_param: bool) {
futures_util::future::ready(()).await
}
fn main() {}

View File

@@ -1,6 +0,0 @@
#[actix_rt::test]
async fn my_test() {
assert!(true);
}
fn main() {}

View File

@@ -1,7 +0,0 @@
#[actix_rt::test]
#[should_panic]
async fn my_test() {
todo!()
}
fn main() {}

View File

@@ -1,6 +0,0 @@
#[actix_rt::test]
fn my_test() {
futures_util::future::ready(()).await
}
fn main() {}

View File

@@ -1,5 +0,0 @@
error: the async keyword is missing from the function declaration
--> $DIR/test-03-only-async.rs:2:1
|
2 | fn my_test() {
| ^^

View File

@@ -1,67 +0,0 @@
# Changes
## Unreleased - 2021-xx-xx
## 0.2.7 - 2021-02-06
* Add `Router::recognize_checked` [#247]
[#247]: https://github.com/actix/actix-net/pull/247
## 0.2.6 - 2021-01-09
* Use `bytestring` version range compatible with Bytes v1.0. [#246]
[#246]: https://github.com/actix/actix-net/pull/246
## 0.2.5 - 2020-09-20
* Fix `from_hex()` method
## 0.2.4 - 2019-12-31
* Add `ResourceDef::resource_path_named()` path generation method
## 0.2.3 - 2019-12-25
* Add impl `IntoPattern` for `&String`
## 0.2.2 - 2019-12-25
* Use `IntoPattern` for `RouterBuilder::path()`
## 0.2.1 - 2019-12-25
* Add `IntoPattern` trait
* Add multi-pattern resources
## 0.2.0 - 2019-12-07
* Update http to 0.2
* Update regex to 1.3
* Use bytestring instead of string
## 0.1.5 - 2019-05-15
* Remove debug prints
## 0.1.4 - 2019-05-15
* Fix checked resource match
## 0.1.3 - 2019-04-22
* Added support for `remainder match` (i.e "/path/{tail}*")
## 0.1.2 - 2019-04-07
* Export `Quoter` type
* Allow to reset `Path` instance
## 0.1.1 - 2019-04-03
* Get dynamic segment by name instead of iterator.
## 0.1.0 - 2019-03-09
* Initial release

View File

@@ -1,29 +0,0 @@
[package]
name = "actix-router"
version = "0.2.7"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Resource path matching library"
keywords = ["actix", "router", "routing"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-router"
license = "MIT OR Apache-2.0"
edition = "2018"
[lib]
name = "actix_router"
path = "src/lib.rs"
[features]
default = ["http"]
[dependencies]
regex = "1.3.1"
serde = "1.0.104"
bytestring = ">=0.1.5, <2"
log = "0.4.8"
http = { version = "0.2.2", optional = true }
[dev-dependencies]
http = "0.2.2"
serde_derive = "1.0"

View File

@@ -1 +0,0 @@
../LICENSE-APACHE

View File

@@ -1 +0,0 @@
../LICENSE-MIT

View File

@@ -1,717 +0,0 @@
use serde::de::{self, Deserializer, Error as DeError, Visitor};
use serde::forward_to_deserialize_any;
use crate::path::{Path, PathIter};
use crate::ResourcePath;
macro_rules! unsupported_type {
($trait_fn:ident, $name:expr) => {
fn $trait_fn<V>(self, _: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
Err(de::value::Error::custom(concat!(
"unsupported type: ",
$name
)))
}
};
}
macro_rules! parse_single_value {
($trait_fn:ident, $visit_fn:ident, $tp:tt) => {
fn $trait_fn<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
if self.path.len() != 1 {
Err(de::value::Error::custom(
format!("wrong number of parameters: {} expected 1", self.path.len())
.as_str(),
))
} else {
let v = self.path[0].parse().map_err(|_| {
de::value::Error::custom(format!(
"can not parse {:?} to a {}",
&self.path[0], $tp
))
})?;
visitor.$visit_fn(v)
}
}
};
}
pub struct PathDeserializer<'de, T: ResourcePath> {
path: &'de Path<T>,
}
impl<'de, T: ResourcePath + 'de> PathDeserializer<'de, T> {
pub fn new(path: &'de Path<T>) -> Self {
PathDeserializer { path }
}
}
impl<'de, T: ResourcePath + 'de> Deserializer<'de> for PathDeserializer<'de, T> {
type Error = de::value::Error;
fn deserialize_map<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_map(ParamsDeserializer {
params: self.path.iter(),
current: None,
})
}
fn deserialize_struct<V>(
self,
_: &'static str,
_: &'static [&'static str],
visitor: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_map(visitor)
}
fn deserialize_unit<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_unit()
}
fn deserialize_unit_struct<V>(
self,
_: &'static str,
visitor: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_unit(visitor)
}
fn deserialize_newtype_struct<V>(
self,
_: &'static str,
visitor: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_newtype_struct(self)
}
fn deserialize_tuple<V>(self, len: usize, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
if self.path.len() < len {
Err(de::value::Error::custom(
format!(
"wrong number of parameters: {} expected {}",
self.path.len(),
len
)
.as_str(),
))
} else {
visitor.visit_seq(ParamsSeq {
params: self.path.iter(),
})
}
}
fn deserialize_tuple_struct<V>(
self,
_: &'static str,
len: usize,
visitor: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
if self.path.len() < len {
Err(de::value::Error::custom(
format!(
"wrong number of parameters: {} expected {}",
self.path.len(),
len
)
.as_str(),
))
} else {
visitor.visit_seq(ParamsSeq {
params: self.path.iter(),
})
}
}
fn deserialize_enum<V>(
self,
_: &'static str,
_: &'static [&'static str],
visitor: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
if self.path.is_empty() {
Err(de::value::Error::custom("expected at least one parameters"))
} else {
visitor.visit_enum(ValueEnum {
value: &self.path[0],
})
}
}
fn deserialize_str<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
if self.path.len() != 1 {
Err(de::value::Error::custom(
format!("wrong number of parameters: {} expected 1", self.path.len()).as_str(),
))
} else {
visitor.visit_str(&self.path[0])
}
}
fn deserialize_seq<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_seq(ParamsSeq {
params: self.path.iter(),
})
}
unsupported_type!(deserialize_any, "'any'");
unsupported_type!(deserialize_bytes, "bytes");
unsupported_type!(deserialize_option, "Option<T>");
unsupported_type!(deserialize_identifier, "identifier");
unsupported_type!(deserialize_ignored_any, "ignored_any");
parse_single_value!(deserialize_bool, visit_bool, "bool");
parse_single_value!(deserialize_i8, visit_i8, "i8");
parse_single_value!(deserialize_i16, visit_i16, "i16");
parse_single_value!(deserialize_i32, visit_i32, "i32");
parse_single_value!(deserialize_i64, visit_i64, "i64");
parse_single_value!(deserialize_u8, visit_u8, "u8");
parse_single_value!(deserialize_u16, visit_u16, "u16");
parse_single_value!(deserialize_u32, visit_u32, "u32");
parse_single_value!(deserialize_u64, visit_u64, "u64");
parse_single_value!(deserialize_f32, visit_f32, "f32");
parse_single_value!(deserialize_f64, visit_f64, "f64");
parse_single_value!(deserialize_string, visit_string, "String");
parse_single_value!(deserialize_byte_buf, visit_string, "String");
parse_single_value!(deserialize_char, visit_char, "char");
}
struct ParamsDeserializer<'de, T: ResourcePath> {
params: PathIter<'de, T>,
current: Option<(&'de str, &'de str)>,
}
impl<'de, T: ResourcePath> de::MapAccess<'de> for ParamsDeserializer<'de, T> {
type Error = de::value::Error;
fn next_key_seed<K>(&mut self, seed: K) -> Result<Option<K::Value>, Self::Error>
where
K: de::DeserializeSeed<'de>,
{
self.current = self.params.next().map(|ref item| (item.0, item.1));
match self.current {
Some((key, _)) => Ok(Some(seed.deserialize(Key { key })?)),
None => Ok(None),
}
}
fn next_value_seed<V>(&mut self, seed: V) -> Result<V::Value, Self::Error>
where
V: de::DeserializeSeed<'de>,
{
if let Some((_, value)) = self.current.take() {
seed.deserialize(Value { value })
} else {
Err(de::value::Error::custom("unexpected item"))
}
}
}
struct Key<'de> {
key: &'de str,
}
impl<'de> Deserializer<'de> for Key<'de> {
type Error = de::value::Error;
fn deserialize_identifier<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_str(self.key)
}
fn deserialize_any<V>(self, _visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
Err(de::value::Error::custom("Unexpected"))
}
forward_to_deserialize_any! {
bool i8 i16 i32 i64 u8 u16 u32 u64 f32 f64 char str string bytes
byte_buf option unit unit_struct newtype_struct seq tuple
tuple_struct map struct enum ignored_any
}
}
macro_rules! parse_value {
($trait_fn:ident, $visit_fn:ident, $tp:tt) => {
fn $trait_fn<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
let v = self.value.parse().map_err(|_| {
de::value::Error::custom(format!("can not parse {:?} to a {}", self.value, $tp))
})?;
visitor.$visit_fn(v)
}
};
}
struct Value<'de> {
value: &'de str,
}
impl<'de> Deserializer<'de> for Value<'de> {
type Error = de::value::Error;
parse_value!(deserialize_bool, visit_bool, "bool");
parse_value!(deserialize_i8, visit_i8, "i8");
parse_value!(deserialize_i16, visit_i16, "i16");
parse_value!(deserialize_i32, visit_i32, "i16");
parse_value!(deserialize_i64, visit_i64, "i64");
parse_value!(deserialize_u8, visit_u8, "u8");
parse_value!(deserialize_u16, visit_u16, "u16");
parse_value!(deserialize_u32, visit_u32, "u32");
parse_value!(deserialize_u64, visit_u64, "u64");
parse_value!(deserialize_f32, visit_f32, "f32");
parse_value!(deserialize_f64, visit_f64, "f64");
parse_value!(deserialize_string, visit_string, "String");
parse_value!(deserialize_byte_buf, visit_string, "String");
parse_value!(deserialize_char, visit_char, "char");
fn deserialize_ignored_any<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_unit()
}
fn deserialize_unit<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_unit()
}
fn deserialize_unit_struct<V>(
self,
_: &'static str,
visitor: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_unit()
}
fn deserialize_bytes<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_borrowed_bytes(self.value.as_bytes())
}
fn deserialize_str<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_borrowed_str(self.value)
}
fn deserialize_option<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_some(self)
}
fn deserialize_enum<V>(
self,
_: &'static str,
_: &'static [&'static str],
visitor: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_enum(ValueEnum { value: self.value })
}
fn deserialize_newtype_struct<V>(
self,
_: &'static str,
visitor: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_newtype_struct(self)
}
fn deserialize_tuple<V>(self, _: usize, _: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
Err(de::value::Error::custom("unsupported type: tuple"))
}
fn deserialize_struct<V>(
self,
_: &'static str,
_: &'static [&'static str],
_: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
Err(de::value::Error::custom("unsupported type: struct"))
}
fn deserialize_tuple_struct<V>(
self,
_: &'static str,
_: usize,
_: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
Err(de::value::Error::custom("unsupported type: tuple struct"))
}
unsupported_type!(deserialize_any, "any");
unsupported_type!(deserialize_seq, "seq");
unsupported_type!(deserialize_map, "map");
unsupported_type!(deserialize_identifier, "identifier");
}
struct ParamsSeq<'de, T: ResourcePath> {
params: PathIter<'de, T>,
}
impl<'de, T: ResourcePath> de::SeqAccess<'de> for ParamsSeq<'de, T> {
type Error = de::value::Error;
fn next_element_seed<U>(&mut self, seed: U) -> Result<Option<U::Value>, Self::Error>
where
U: de::DeserializeSeed<'de>,
{
match self.params.next() {
Some(item) => Ok(Some(seed.deserialize(Value { value: item.1 })?)),
None => Ok(None),
}
}
}
struct ValueEnum<'de> {
value: &'de str,
}
impl<'de> de::EnumAccess<'de> for ValueEnum<'de> {
type Error = de::value::Error;
type Variant = UnitVariant;
fn variant_seed<V>(self, seed: V) -> Result<(V::Value, Self::Variant), Self::Error>
where
V: de::DeserializeSeed<'de>,
{
Ok((seed.deserialize(Key { key: self.value })?, UnitVariant))
}
}
struct UnitVariant;
impl<'de> de::VariantAccess<'de> for UnitVariant {
type Error = de::value::Error;
fn unit_variant(self) -> Result<(), Self::Error> {
Ok(())
}
fn newtype_variant_seed<T>(self, _seed: T) -> Result<T::Value, Self::Error>
where
T: de::DeserializeSeed<'de>,
{
Err(de::value::Error::custom("not supported"))
}
fn tuple_variant<V>(self, _len: usize, _visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
Err(de::value::Error::custom("not supported"))
}
fn struct_variant<V>(
self,
_: &'static [&'static str],
_: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
Err(de::value::Error::custom("not supported"))
}
}
#[cfg(test)]
mod tests {
use serde::de;
use serde_derive::Deserialize;
use super::*;
use crate::path::Path;
use crate::router::Router;
#[derive(Deserialize)]
struct MyStruct {
key: String,
value: String,
}
#[derive(Deserialize)]
struct Id {
_id: String,
}
#[derive(Debug, Deserialize)]
struct Test1(String, u32);
#[derive(Debug, Deserialize)]
struct Test2 {
key: String,
value: u32,
}
#[derive(Debug, Deserialize, PartialEq)]
#[serde(rename_all = "lowercase")]
enum TestEnum {
Val1,
Val2,
}
#[derive(Debug, Deserialize)]
struct Test3 {
val: TestEnum,
}
#[test]
fn test_request_extract() {
let mut router = Router::<()>::build();
router.path("/{key}/{value}/", ());
let router = router.finish();
let mut path = Path::new("/name/user1/");
assert!(router.recognize(&mut path).is_some());
let s: MyStruct = de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
assert_eq!(s.key, "name");
assert_eq!(s.value, "user1");
let s: (String, String) =
de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
assert_eq!(s.0, "name");
assert_eq!(s.1, "user1");
let mut router = Router::<()>::build();
router.path("/{key}/{value}/", ());
let router = router.finish();
let mut path = Path::new("/name/32/");
assert!(router.recognize(&mut path).is_some());
let s: Test1 = de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
assert_eq!(s.0, "name");
assert_eq!(s.1, 32);
let s: Test2 = de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
assert_eq!(s.key, "name");
assert_eq!(s.value, 32);
let s: (String, u8) =
de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
assert_eq!(s.0, "name");
assert_eq!(s.1, 32);
let res: Vec<String> =
de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
assert_eq!(res[0], "name".to_owned());
assert_eq!(res[1], "32".to_owned());
}
#[test]
fn test_extract_path_single() {
let mut router = Router::<()>::build();
router.path("/{value}/", ());
let router = router.finish();
let mut path = Path::new("/32/");
assert!(router.recognize(&mut path).is_some());
let i: i8 = de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
assert_eq!(i, 32);
}
#[test]
fn test_extract_enum() {
let mut router = Router::<()>::build();
router.path("/{val}/", ());
let router = router.finish();
let mut path = Path::new("/val1/");
assert!(router.recognize(&mut path).is_some());
let i: TestEnum = de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
assert_eq!(i, TestEnum::Val1);
let mut router = Router::<()>::build();
router.path("/{val1}/{val2}/", ());
let router = router.finish();
let mut path = Path::new("/val1/val2/");
assert!(router.recognize(&mut path).is_some());
let i: (TestEnum, TestEnum) =
de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
assert_eq!(i, (TestEnum::Val1, TestEnum::Val2));
}
#[test]
fn test_extract_enum_value() {
let mut router = Router::<()>::build();
router.path("/{val}/", ());
let router = router.finish();
let mut path = Path::new("/val1/");
assert!(router.recognize(&mut path).is_some());
let i: Test3 = de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
assert_eq!(i.val, TestEnum::Val1);
let mut path = Path::new("/val3/");
assert!(router.recognize(&mut path).is_some());
let i: Result<Test3, de::value::Error> =
de::Deserialize::deserialize(PathDeserializer::new(&path));
assert!(i.is_err());
assert!(format!("{:?}", i).contains("unknown variant"));
}
#[test]
fn test_extract_errors() {
let mut router = Router::<()>::build();
router.path("/{value}/", ());
let router = router.finish();
let mut path = Path::new("/name/");
assert!(router.recognize(&mut path).is_some());
let s: Result<Test1, de::value::Error> =
de::Deserialize::deserialize(PathDeserializer::new(&path));
assert!(s.is_err());
assert!(format!("{:?}", s).contains("wrong number of parameters"));
let s: Result<Test2, de::value::Error> =
de::Deserialize::deserialize(PathDeserializer::new(&path));
assert!(s.is_err());
assert!(format!("{:?}", s).contains("can not parse"));
let s: Result<(String, String), de::value::Error> =
de::Deserialize::deserialize(PathDeserializer::new(&path));
assert!(s.is_err());
assert!(format!("{:?}", s).contains("wrong number of parameters"));
let s: Result<u32, de::value::Error> =
de::Deserialize::deserialize(PathDeserializer::new(&path));
assert!(s.is_err());
assert!(format!("{:?}", s).contains("can not parse"));
}
// #[test]
// fn test_extract_path_decode() {
// let mut router = Router::<()>::default();
// router.register_resource(Resource::new(ResourceDef::new("/{value}/")));
// macro_rules! test_single_value {
// ($value:expr, $expected:expr) => {{
// let req = TestRequest::with_uri($value).finish();
// let info = router.recognize(&req, &(), 0);
// let req = req.with_route_info(info);
// assert_eq!(
// *Path::<String>::from_request(&req, &PathConfig::default()).unwrap(),
// $expected
// );
// }};
// }
// test_single_value!("/%25/", "%");
// test_single_value!("/%40%C2%A3%24%25%5E%26%2B%3D/", "@£$%^&+=");
// test_single_value!("/%2B/", "+");
// test_single_value!("/%252B/", "%2B");
// test_single_value!("/%2F/", "/");
// test_single_value!("/%252F/", "%2F");
// test_single_value!(
// "/http%3A%2F%2Flocalhost%3A80%2Ffoo/",
// "http://localhost:80/foo"
// );
// test_single_value!("/%2Fvar%2Flog%2Fsyslog/", "/var/log/syslog");
// test_single_value!(
// "/http%3A%2F%2Flocalhost%3A80%2Ffile%2F%252Fvar%252Flog%252Fsyslog/",
// "http://localhost:80/file/%2Fvar%2Flog%2Fsyslog"
// );
// let req = TestRequest::with_uri("/%25/7/?id=test").finish();
// let mut router = Router::<()>::default();
// router.register_resource(Resource::new(ResourceDef::new("/{key}/{value}/")));
// let info = router.recognize(&req, &(), 0);
// let req = req.with_route_info(info);
// let s = Path::<Test2>::from_request(&req, &PathConfig::default()).unwrap();
// assert_eq!(s.key, "%");
// assert_eq!(s.value, 7);
// let s = Path::<(String, String)>::from_request(&req, &PathConfig::default()).unwrap();
// assert_eq!(s.0, "%");
// assert_eq!(s.1, "7");
// }
// #[test]
// fn test_extract_path_no_decode() {
// let mut router = Router::<()>::default();
// router.register_resource(Resource::new(ResourceDef::new("/{value}/")));
// let req = TestRequest::with_uri("/%25/").finish();
// let info = router.recognize(&req, &(), 0);
// let req = req.with_route_info(info);
// assert_eq!(
// *Path::<String>::from_request(&req, &&PathConfig::default().disable_decoding())
// .unwrap(),
// "%25"
// );
// }
}

View File

@@ -1,152 +0,0 @@
//! Resource path matching library.
#![deny(rust_2018_idioms, nonstandard_style)]
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
mod de;
mod path;
mod resource;
mod router;
pub use self::de::PathDeserializer;
pub use self::path::Path;
pub use self::resource::ResourceDef;
pub use self::router::{ResourceInfo, Router, RouterBuilder};
pub trait Resource<T: ResourcePath> {
fn resource_path(&mut self) -> &mut Path<T>;
}
pub trait ResourcePath {
fn path(&self) -> &str;
}
impl ResourcePath for String {
fn path(&self) -> &str {
self.as_str()
}
}
impl<'a> ResourcePath for &'a str {
fn path(&self) -> &str {
self
}
}
impl ResourcePath for bytestring::ByteString {
fn path(&self) -> &str {
&*self
}
}
/// Helper trait for type that could be converted to path pattern
pub trait IntoPattern {
fn is_single(&self) -> bool;
fn patterns(&self) -> Vec<String>;
}
impl IntoPattern for String {
fn is_single(&self) -> bool {
true
}
fn patterns(&self) -> Vec<String> {
vec![self.clone()]
}
}
impl<'a> IntoPattern for &'a String {
fn is_single(&self) -> bool {
true
}
fn patterns(&self) -> Vec<String> {
vec![self.as_str().to_string()]
}
}
impl<'a> IntoPattern for &'a str {
fn is_single(&self) -> bool {
true
}
fn patterns(&self) -> Vec<String> {
vec![(*self).to_string()]
}
}
impl<T: AsRef<str>> IntoPattern for Vec<T> {
fn is_single(&self) -> bool {
self.len() == 1
}
fn patterns(&self) -> Vec<String> {
self.iter().map(|v| v.as_ref().to_string()).collect()
}
}
macro_rules! array_patterns (($tp:ty, $num:tt) => {
impl IntoPattern for [$tp; $num] {
fn is_single(&self) -> bool {
$num == 1
}
fn patterns(&self) -> Vec<String> {
self.iter().map(|v| v.to_string()).collect()
}
}
});
array_patterns!(&str, 1);
array_patterns!(&str, 2);
array_patterns!(&str, 3);
array_patterns!(&str, 4);
array_patterns!(&str, 5);
array_patterns!(&str, 6);
array_patterns!(&str, 7);
array_patterns!(&str, 8);
array_patterns!(&str, 9);
array_patterns!(&str, 10);
array_patterns!(&str, 11);
array_patterns!(&str, 12);
array_patterns!(&str, 13);
array_patterns!(&str, 14);
array_patterns!(&str, 15);
array_patterns!(&str, 16);
array_patterns!(String, 1);
array_patterns!(String, 2);
array_patterns!(String, 3);
array_patterns!(String, 4);
array_patterns!(String, 5);
array_patterns!(String, 6);
array_patterns!(String, 7);
array_patterns!(String, 8);
array_patterns!(String, 9);
array_patterns!(String, 10);
array_patterns!(String, 11);
array_patterns!(String, 12);
array_patterns!(String, 13);
array_patterns!(String, 14);
array_patterns!(String, 15);
array_patterns!(String, 16);
#[cfg(feature = "http")]
mod url;
#[cfg(feature = "http")]
pub use self::url::{Quoter, Url};
#[cfg(feature = "http")]
mod http_support {
use super::ResourcePath;
use http::Uri;
impl ResourcePath for Uri {
fn path(&self) -> &str {
self.path()
}
}
}

View File

@@ -1,222 +0,0 @@
use std::ops::Index;
use serde::de;
use crate::de::PathDeserializer;
use crate::{Resource, ResourcePath};
#[derive(Debug, Clone, Copy)]
pub(crate) enum PathItem {
Static(&'static str),
Segment(u16, u16),
}
/// Resource path match information
///
/// If resource path contains variable patterns, `Path` stores them.
#[derive(Debug)]
pub struct Path<T> {
path: T,
pub(crate) skip: u16,
pub(crate) segments: Vec<(&'static str, PathItem)>,
}
impl<T: Default> Default for Path<T> {
fn default() -> Self {
Path {
path: T::default(),
skip: 0,
segments: Vec::new(),
}
}
}
impl<T: Clone> Clone for Path<T> {
fn clone(&self) -> Self {
Path {
path: self.path.clone(),
skip: self.skip,
segments: self.segments.clone(),
}
}
}
impl<T: ResourcePath> Path<T> {
pub fn new(path: T) -> Path<T> {
Path {
path,
skip: 0,
segments: Vec::new(),
}
}
/// Get reference to inner path instance
#[inline]
pub fn get_ref(&self) -> &T {
&self.path
}
/// Get mutable reference to inner path instance
#[inline]
pub fn get_mut(&mut self) -> &mut T {
&mut self.path
}
/// Path
#[inline]
pub fn path(&self) -> &str {
let skip = self.skip as usize;
let path = self.path.path();
if skip <= path.len() {
&path[skip..]
} else {
""
}
}
/// Set new path
#[inline]
pub fn set(&mut self, path: T) {
self.skip = 0;
self.path = path;
self.segments.clear();
}
/// Reset state
#[inline]
pub fn reset(&mut self) {
self.skip = 0;
self.segments.clear();
}
/// Skip first `n` chars in path
#[inline]
pub fn skip(&mut self, n: u16) {
self.skip += n;
}
pub(crate) fn add(&mut self, name: &'static str, value: PathItem) {
match value {
PathItem::Static(s) => self.segments.push((name, PathItem::Static(s))),
PathItem::Segment(begin, end) => self
.segments
.push((name, PathItem::Segment(self.skip + begin, self.skip + end))),
}
}
#[doc(hidden)]
pub fn add_static(&mut self, name: &'static str, value: &'static str) {
self.segments.push((name, PathItem::Static(value)));
}
/// Check if there are any matched patterns
#[inline]
pub fn is_empty(&self) -> bool {
self.segments.is_empty()
}
/// Check number of extracted parameters
#[inline]
pub fn len(&self) -> usize {
self.segments.len()
}
/// Get matched parameter by name without type conversion
pub fn get(&self, key: &str) -> Option<&str> {
for item in self.segments.iter() {
if key == item.0 {
return match item.1 {
PathItem::Static(ref s) => Some(&s),
PathItem::Segment(s, e) => {
Some(&self.path.path()[(s as usize)..(e as usize)])
}
};
}
}
if key == "tail" {
Some(&self.path.path()[(self.skip as usize)..])
} else {
None
}
}
/// Get unprocessed part of the path
pub fn unprocessed(&self) -> &str {
&self.path.path()[(self.skip as usize)..]
}
/// Get matched parameter by name.
///
/// If keyed parameter is not available empty string is used as default
/// value.
pub fn query(&self, key: &str) -> &str {
if let Some(s) = self.get(key) {
s
} else {
""
}
}
/// Return iterator to items in parameter container
pub fn iter(&self) -> PathIter<'_, T> {
PathIter {
idx: 0,
params: self,
}
}
/// Try to deserialize matching parameters to a specified type `U`
pub fn load<'de, U: serde::Deserialize<'de>>(&'de self) -> Result<U, de::value::Error> {
de::Deserialize::deserialize(PathDeserializer::new(self))
}
}
#[derive(Debug)]
pub struct PathIter<'a, T> {
idx: usize,
params: &'a Path<T>,
}
impl<'a, T: ResourcePath> Iterator for PathIter<'a, T> {
type Item = (&'a str, &'a str);
#[inline]
fn next(&mut self) -> Option<(&'a str, &'a str)> {
if self.idx < self.params.len() {
let idx = self.idx;
let res = match self.params.segments[idx].1 {
PathItem::Static(ref s) => &s,
PathItem::Segment(s, e) => &self.params.path.path()[(s as usize)..(e as usize)],
};
self.idx += 1;
return Some((&self.params.segments[idx].0, res));
}
None
}
}
impl<'a, T: ResourcePath> Index<&'a str> for Path<T> {
type Output = str;
fn index(&self, name: &'a str) -> &str {
self.get(name)
.expect("Value for parameter is not available")
}
}
impl<T: ResourcePath> Index<usize> for Path<T> {
type Output = str;
fn index(&self, idx: usize) -> &str {
match self.segments[idx].1 {
PathItem::Static(ref s) => &s,
PathItem::Segment(s, e) => &self.path.path()[(s as usize)..(e as usize)],
}
}
}
impl<T: ResourcePath> Resource<T> for Path<T> {
fn resource_path(&mut self) -> &mut Self {
self
}
}

View File

@@ -1,948 +0,0 @@
use std::cmp::min;
use std::collections::HashMap;
use std::hash::{Hash, Hasher};
use regex::{escape, Regex, RegexSet};
use crate::path::{Path, PathItem};
use crate::{IntoPattern, Resource, ResourcePath};
const MAX_DYNAMIC_SEGMENTS: usize = 16;
/// ResourceDef describes an entry in resources table
///
/// Resource definition can contain only 16 dynamic segments
#[derive(Clone, Debug)]
pub struct ResourceDef {
id: u16,
tp: PatternType,
name: String,
pattern: String,
elements: Vec<PatternElement>,
}
#[derive(Debug, Clone, PartialEq)]
enum PatternElement {
Str(String),
Var(String),
}
#[derive(Clone, Debug)]
#[allow(clippy::large_enum_variant)]
enum PatternType {
Static(String),
Prefix(String),
Dynamic(Regex, Vec<&'static str>, usize),
DynamicSet(RegexSet, Vec<(Regex, Vec<&'static str>, usize)>),
}
impl ResourceDef {
/// Parse path pattern and create new `Pattern` instance.
///
/// Panics if path pattern is malformed.
pub fn new<T: IntoPattern>(path: T) -> Self {
if path.is_single() {
let patterns = path.patterns();
ResourceDef::with_prefix(&patterns[0], false)
} else {
let set = path.patterns();
let mut data = Vec::new();
let mut re_set = Vec::new();
for path in set {
let (pattern, _, _, len) = ResourceDef::parse(&path, false);
let re = match Regex::new(&pattern) {
Ok(re) => re,
Err(err) => panic!("Wrong path pattern: \"{}\" {}", path, err),
};
// actix creates one router per thread
let names: Vec<_> = re
.capture_names()
.filter_map(|name| {
name.map(|name| Box::leak(Box::new(name.to_owned())).as_str())
})
.collect();
data.push((re, names, len));
re_set.push(pattern);
}
ResourceDef {
id: 0,
tp: PatternType::DynamicSet(RegexSet::new(re_set).unwrap(), data),
elements: Vec::new(),
name: String::new(),
pattern: "".to_owned(),
}
}
}
/// Parse path pattern and create new `Pattern` instance.
///
/// Use `prefix` type instead of `static`.
///
/// Panics if path regex pattern is malformed.
pub fn prefix(path: &str) -> Self {
ResourceDef::with_prefix(path, true)
}
/// Parse path pattern and create new `Pattern` instance.
/// Inserts `/` to begging of the pattern.
///
///
/// Use `prefix` type instead of `static`.
///
/// Panics if path regex pattern is malformed.
pub fn root_prefix(path: &str) -> Self {
ResourceDef::with_prefix(&insert_slash(path), true)
}
/// Resource id
pub fn id(&self) -> u16 {
self.id
}
/// Set resource id
pub fn set_id(&mut self, id: u16) {
self.id = id;
}
/// Parse path pattern and create new `Pattern` instance with custom prefix
fn with_prefix(path: &str, for_prefix: bool) -> Self {
let path = path.to_owned();
let (pattern, elements, is_dynamic, len) = ResourceDef::parse(&path, for_prefix);
let tp = if is_dynamic {
let re = match Regex::new(&pattern) {
Ok(re) => re,
Err(err) => panic!("Wrong path pattern: \"{}\" {}", path, err),
};
// actix creates one router per thread
let names = re
.capture_names()
.filter_map(|name| {
name.map(|name| Box::leak(Box::new(name.to_owned())).as_str())
})
.collect();
PatternType::Dynamic(re, names, len)
} else if for_prefix {
PatternType::Prefix(pattern)
} else {
PatternType::Static(pattern)
};
ResourceDef {
tp,
elements,
id: 0,
name: String::new(),
pattern: path,
}
}
/// Resource pattern name
pub fn name(&self) -> &str {
&self.name
}
/// Mutable reference to a name of a resource definition.
pub fn name_mut(&mut self) -> &mut String {
&mut self.name
}
/// Path pattern of the resource
pub fn pattern(&self) -> &str {
&self.pattern
}
/// Check if path matches this pattern.
#[inline]
pub fn is_match(&self, path: &str) -> bool {
match self.tp {
PatternType::Static(ref s) => s == path,
PatternType::Prefix(ref s) => path.starts_with(s),
PatternType::Dynamic(ref re, _, _) => re.is_match(path),
PatternType::DynamicSet(ref re, _) => re.is_match(path),
}
}
/// Is prefix path a match against this resource.
pub fn is_prefix_match(&self, path: &str) -> Option<usize> {
let p_len = path.len();
let path = if path.is_empty() { "/" } else { path };
match self.tp {
PatternType::Static(ref s) => {
if s == path {
Some(p_len)
} else {
None
}
}
PatternType::Dynamic(ref re, _, len) => {
if let Some(captures) = re.captures(path) {
let mut pos = 0;
let mut passed = false;
for capture in captures.iter() {
if let Some(ref m) = capture {
if !passed {
passed = true;
continue;
}
pos = m.end();
}
}
Some(pos + len)
} else {
None
}
}
PatternType::Prefix(ref s) => {
let len = if path == s {
s.len()
} else if path.starts_with(s)
&& (s.ends_with('/') || path.split_at(s.len()).1.starts_with('/'))
{
if s.ends_with('/') {
s.len() - 1
} else {
s.len()
}
} else {
return None;
};
Some(min(p_len, len))
}
PatternType::DynamicSet(ref re, ref params) => {
if let Some(idx) = re.matches(path).into_iter().next() {
let (ref pattern, _, len) = params[idx];
if let Some(captures) = pattern.captures(path) {
let mut pos = 0;
let mut passed = false;
for capture in captures.iter() {
if let Some(ref m) = capture {
if !passed {
passed = true;
continue;
}
pos = m.end();
}
}
Some(pos + len)
} else {
None
}
} else {
None
}
}
}
}
/// Is the given path and parameters a match against this pattern.
pub fn match_path<T: ResourcePath>(&self, path: &mut Path<T>) -> bool {
match self.tp {
PatternType::Static(ref s) => {
if s == path.path() {
path.skip(path.len() as u16);
true
} else {
false
}
}
PatternType::Prefix(ref s) => {
let r_path = path.path();
let len = if s == r_path {
s.len()
} else if r_path.starts_with(s)
&& (s.ends_with('/') || r_path.split_at(s.len()).1.starts_with('/'))
{
if s.ends_with('/') {
s.len() - 1
} else {
s.len()
}
} else {
return false;
};
let r_path_len = r_path.len();
path.skip(min(r_path_len, len) as u16);
true
}
PatternType::Dynamic(ref re, ref names, len) => {
let mut idx = 0;
let mut pos = 0;
let mut segments: [PathItem; MAX_DYNAMIC_SEGMENTS] =
[PathItem::Static(""); MAX_DYNAMIC_SEGMENTS];
if let Some(captures) = re.captures(path.path()) {
for (no, name) in names.iter().enumerate() {
if let Some(m) = captures.name(&name) {
idx += 1;
pos = m.end();
segments[no] = PathItem::Segment(m.start() as u16, m.end() as u16);
} else {
log::error!(
"Dynamic path match but not all segments found: {}",
name
);
return false;
}
}
} else {
return false;
}
for idx in 0..idx {
path.add(names[idx], segments[idx]);
}
path.skip((pos + len) as u16);
true
}
PatternType::DynamicSet(ref re, ref params) => {
if let Some(idx) = re.matches(path.path()).into_iter().next() {
let (ref pattern, ref names, len) = params[idx];
let mut idx = 0;
let mut pos = 0;
let mut segments: [PathItem; MAX_DYNAMIC_SEGMENTS] =
[PathItem::Static(""); MAX_DYNAMIC_SEGMENTS];
if let Some(captures) = pattern.captures(path.path()) {
for (no, name) in names.iter().enumerate() {
if let Some(m) = captures.name(&name) {
idx += 1;
pos = m.end();
segments[no] =
PathItem::Segment(m.start() as u16, m.end() as u16);
} else {
log::error!(
"Dynamic path match but not all segments found: {}",
name
);
return false;
}
}
} else {
return false;
}
for idx in 0..idx {
path.add(names[idx], segments[idx]);
}
path.skip((pos + len) as u16);
true
} else {
false
}
}
}
}
/// Is the given path and parameters a match against this pattern?
pub fn match_path_checked<R, T, F, U>(
&self,
res: &mut R,
check: &F,
user_data: &Option<U>,
) -> bool
where
T: ResourcePath,
R: Resource<T>,
F: Fn(&R, &Option<U>) -> bool,
{
match self.tp {
PatternType::Static(ref s) => {
if s == res.resource_path().path() && check(res, user_data) {
let path = res.resource_path();
path.skip(path.len() as u16);
true
} else {
false
}
}
PatternType::Prefix(ref s) => {
let len = {
let r_path = res.resource_path().path();
if s == r_path {
s.len()
} else if r_path.starts_with(s)
&& (s.ends_with('/') || r_path.split_at(s.len()).1.starts_with('/'))
{
if s.ends_with('/') {
s.len() - 1
} else {
s.len()
}
} else {
return false;
}
};
if !check(res, user_data) {
return false;
}
let path = res.resource_path();
path.skip(min(path.path().len(), len) as u16);
true
}
PatternType::Dynamic(ref re, ref names, len) => {
let mut idx = 0;
let mut pos = 0;
let mut segments: [PathItem; MAX_DYNAMIC_SEGMENTS] =
[PathItem::Static(""); MAX_DYNAMIC_SEGMENTS];
if let Some(captures) = re.captures(res.resource_path().path()) {
for (no, name) in names.iter().enumerate() {
if let Some(m) = captures.name(&name) {
idx += 1;
pos = m.end();
segments[no] = PathItem::Segment(m.start() as u16, m.end() as u16);
} else {
log::error!(
"Dynamic path match but not all segments found: {}",
name
);
return false;
}
}
} else {
return false;
}
if !check(res, user_data) {
return false;
}
let path = res.resource_path();
for idx in 0..idx {
path.add(names[idx], segments[idx]);
}
path.skip((pos + len) as u16);
true
}
PatternType::DynamicSet(ref re, ref params) => {
let path = res.resource_path().path();
if let Some(idx) = re.matches(path).into_iter().next() {
let (ref pattern, ref names, len) = params[idx];
let mut idx = 0;
let mut pos = 0;
let mut segments: [PathItem; MAX_DYNAMIC_SEGMENTS] =
[PathItem::Static(""); MAX_DYNAMIC_SEGMENTS];
if let Some(captures) = pattern.captures(path) {
for (no, name) in names.iter().enumerate() {
if let Some(m) = captures.name(&name) {
idx += 1;
pos = m.end();
segments[no] =
PathItem::Segment(m.start() as u16, m.end() as u16);
} else {
log::error!(
"Dynamic path match but not all segments found: {}",
name
);
return false;
}
}
} else {
return false;
}
if !check(res, user_data) {
return false;
}
let path = res.resource_path();
for idx in 0..idx {
path.add(names[idx], segments[idx]);
}
path.skip((pos + len) as u16);
true
} else {
false
}
}
}
}
/// Build resource path from elements. Returns `true` on success.
pub fn resource_path<U, I>(&self, path: &mut String, elements: &mut U) -> bool
where
U: Iterator<Item = I>,
I: AsRef<str>,
{
match self.tp {
PatternType::Prefix(ref p) => path.push_str(p),
PatternType::Static(ref p) => path.push_str(p),
PatternType::Dynamic(..) => {
for el in &self.elements {
match *el {
PatternElement::Str(ref s) => path.push_str(s),
PatternElement::Var(_) => {
if let Some(val) = elements.next() {
path.push_str(val.as_ref())
} else {
return false;
}
}
}
}
}
PatternType::DynamicSet(..) => {
return false;
}
}
true
}
/// Build resource path from elements. Returns `true` on success.
pub fn resource_path_named<K, V, S>(
&self,
path: &mut String,
elements: &HashMap<K, V, S>,
) -> bool
where
K: std::borrow::Borrow<str> + Eq + Hash,
V: AsRef<str>,
S: std::hash::BuildHasher,
{
match self.tp {
PatternType::Prefix(ref p) => path.push_str(p),
PatternType::Static(ref p) => path.push_str(p),
PatternType::Dynamic(..) => {
for el in &self.elements {
match *el {
PatternElement::Str(ref s) => path.push_str(s),
PatternElement::Var(ref name) => {
if let Some(val) = elements.get(name) {
path.push_str(val.as_ref())
} else {
return false;
}
}
}
}
}
PatternType::DynamicSet(..) => {
return false;
}
}
true
}
fn parse_param(pattern: &str) -> (PatternElement, String, &str, bool) {
const DEFAULT_PATTERN: &str = "[^/]+";
const DEFAULT_PATTERN_TAIL: &str = ".*";
let mut params_nesting = 0usize;
let close_idx = pattern
.find(|c| match c {
'{' => {
params_nesting += 1;
false
}
'}' => {
params_nesting -= 1;
params_nesting == 0
}
_ => false,
})
.expect("malformed dynamic segment");
let (mut param, mut rem) = pattern.split_at(close_idx + 1);
param = &param[1..param.len() - 1]; // Remove outer brackets
let tail = rem == "*";
let (name, pattern) = match param.find(':') {
Some(idx) => {
if tail {
panic!("Custom regex is not supported for remainder match");
}
let (name, pattern) = param.split_at(idx);
(name, &pattern[1..])
}
None => (
param,
if tail {
rem = &rem[1..];
DEFAULT_PATTERN_TAIL
} else {
DEFAULT_PATTERN
},
),
};
(
PatternElement::Var(name.to_string()),
format!(r"(?P<{}>{})", &name, &pattern),
rem,
tail,
)
}
fn parse(
mut pattern: &str,
mut for_prefix: bool,
) -> (String, Vec<PatternElement>, bool, usize) {
if pattern.find('{').is_none() {
// TODO: MSRV: 1.45
#[allow(clippy::manual_strip)]
return if pattern.ends_with('*') {
let path = &pattern[..pattern.len() - 1];
let re = String::from("^") + path + "(.*)";
(re, vec![PatternElement::Str(String::from(path))], true, 0)
} else {
(
String::from(pattern),
vec![PatternElement::Str(String::from(pattern))],
false,
pattern.chars().count(),
)
};
}
let mut elements = Vec::new();
let mut re = String::from("^");
let mut dyn_elements = 0;
while let Some(idx) = pattern.find('{') {
let (prefix, rem) = pattern.split_at(idx);
elements.push(PatternElement::Str(String::from(prefix)));
re.push_str(&escape(prefix));
let (param_pattern, re_part, rem, tail) = Self::parse_param(rem);
if tail {
for_prefix = true;
}
elements.push(param_pattern);
re.push_str(&re_part);
pattern = rem;
dyn_elements += 1;
}
elements.push(PatternElement::Str(String::from(pattern)));
re.push_str(&escape(pattern));
if dyn_elements > MAX_DYNAMIC_SEGMENTS {
panic!(
"Only {} dynamic segments are allowed, provided: {}",
MAX_DYNAMIC_SEGMENTS, dyn_elements
);
}
if !for_prefix {
re.push('$');
}
(re, elements, true, pattern.chars().count())
}
}
impl Eq for ResourceDef {}
impl PartialEq for ResourceDef {
fn eq(&self, other: &ResourceDef) -> bool {
self.pattern == other.pattern
}
}
impl Hash for ResourceDef {
fn hash<H: Hasher>(&self, state: &mut H) {
self.pattern.hash(state);
}
}
impl<'a> From<&'a str> for ResourceDef {
fn from(path: &'a str) -> ResourceDef {
ResourceDef::new(path)
}
}
impl From<String> for ResourceDef {
fn from(path: String) -> ResourceDef {
ResourceDef::new(path)
}
}
pub(crate) fn insert_slash(path: &str) -> String {
let mut path = path.to_owned();
if !path.is_empty() && !path.starts_with('/') {
path.insert(0, '/');
};
path
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parse_static() {
let re = ResourceDef::new("/");
assert!(re.is_match("/"));
assert!(!re.is_match("/a"));
let re = ResourceDef::new("/name");
assert!(re.is_match("/name"));
assert!(!re.is_match("/name1"));
assert!(!re.is_match("/name/"));
assert!(!re.is_match("/name~"));
assert_eq!(re.is_prefix_match("/name"), Some(5));
assert_eq!(re.is_prefix_match("/name1"), None);
assert_eq!(re.is_prefix_match("/name/"), None);
assert_eq!(re.is_prefix_match("/name~"), None);
let re = ResourceDef::new("/name/");
assert!(re.is_match("/name/"));
assert!(!re.is_match("/name"));
assert!(!re.is_match("/name/gs"));
let re = ResourceDef::new("/user/profile");
assert!(re.is_match("/user/profile"));
assert!(!re.is_match("/user/profile/profile"));
}
#[test]
fn test_parse_param() {
let re = ResourceDef::new("/user/{id}");
assert!(re.is_match("/user/profile"));
assert!(re.is_match("/user/2345"));
assert!(!re.is_match("/user/2345/"));
assert!(!re.is_match("/user/2345/sdg"));
let mut path = Path::new("/user/profile");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "profile");
let mut path = Path::new("/user/1245125");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "1245125");
let re = ResourceDef::new("/v{version}/resource/{id}");
assert!(re.is_match("/v1/resource/320120"));
assert!(!re.is_match("/v/resource/1"));
assert!(!re.is_match("/resource"));
let mut path = Path::new("/v151/resource/adage32");
assert!(re.match_path(&mut path));
assert_eq!(path.get("version").unwrap(), "151");
assert_eq!(path.get("id").unwrap(), "adage32");
let re = ResourceDef::new("/{id:[[:digit:]]{6}}");
assert!(re.is_match("/012345"));
assert!(!re.is_match("/012"));
assert!(!re.is_match("/01234567"));
assert!(!re.is_match("/XXXXXX"));
let mut path = Path::new("/012345");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "012345");
}
#[allow(clippy::cognitive_complexity)]
#[test]
fn test_dynamic_set() {
let re = ResourceDef::new(vec![
"/user/{id}",
"/v{version}/resource/{id}",
"/{id:[[:digit:]]{6}}",
]);
assert!(re.is_match("/user/profile"));
assert!(re.is_match("/user/2345"));
assert!(!re.is_match("/user/2345/"));
assert!(!re.is_match("/user/2345/sdg"));
let mut path = Path::new("/user/profile");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "profile");
let mut path = Path::new("/user/1245125");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "1245125");
assert!(re.is_match("/v1/resource/320120"));
assert!(!re.is_match("/v/resource/1"));
assert!(!re.is_match("/resource"));
let mut path = Path::new("/v151/resource/adage32");
assert!(re.match_path(&mut path));
assert_eq!(path.get("version").unwrap(), "151");
assert_eq!(path.get("id").unwrap(), "adage32");
assert!(re.is_match("/012345"));
assert!(!re.is_match("/012"));
assert!(!re.is_match("/01234567"));
assert!(!re.is_match("/XXXXXX"));
let mut path = Path::new("/012345");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "012345");
let re = ResourceDef::new([
"/user/{id}",
"/v{version}/resource/{id}",
"/{id:[[:digit:]]{6}}",
]);
assert!(re.is_match("/user/profile"));
assert!(re.is_match("/user/2345"));
assert!(!re.is_match("/user/2345/"));
assert!(!re.is_match("/user/2345/sdg"));
let re = ResourceDef::new([
"/user/{id}".to_string(),
"/v{version}/resource/{id}".to_string(),
"/{id:[[:digit:]]{6}}".to_string(),
]);
assert!(re.is_match("/user/profile"));
assert!(re.is_match("/user/2345"));
assert!(!re.is_match("/user/2345/"));
assert!(!re.is_match("/user/2345/sdg"));
}
#[test]
fn test_parse_tail() {
let re = ResourceDef::new("/user/-{id}*");
let mut path = Path::new("/user/-profile");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "profile");
let mut path = Path::new("/user/-2345");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "2345");
let mut path = Path::new("/user/-2345/");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "2345/");
let mut path = Path::new("/user/-2345/sdg");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "2345/sdg");
}
#[test]
fn test_static_tail() {
let re = ResourceDef::new("/user*");
assert!(re.is_match("/user/profile"));
assert!(re.is_match("/user/2345"));
assert!(re.is_match("/user/2345/"));
assert!(re.is_match("/user/2345/sdg"));
let re = ResourceDef::new("/user/*");
assert!(re.is_match("/user/profile"));
assert!(re.is_match("/user/2345"));
assert!(re.is_match("/user/2345/"));
assert!(re.is_match("/user/2345/sdg"));
}
#[cfg(feature = "http")]
#[test]
fn test_parse_urlencoded_param() {
use std::convert::TryFrom;
let re = ResourceDef::new("/user/{id}/test");
let mut path = Path::new("/user/2345/test");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "2345");
let mut path = Path::new("/user/qwe%25/test");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "qwe%25");
let uri = http::Uri::try_from("/user/qwe%25/test").unwrap();
let mut path = Path::new(uri);
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "qwe%25");
}
#[test]
fn test_resource_prefix() {
let re = ResourceDef::prefix("/name");
assert!(re.is_match("/name"));
assert!(re.is_match("/name/"));
assert!(re.is_match("/name/test/test"));
assert!(re.is_match("/name1"));
assert!(re.is_match("/name~"));
assert_eq!(re.is_prefix_match("/name"), Some(5));
assert_eq!(re.is_prefix_match("/name/"), Some(5));
assert_eq!(re.is_prefix_match("/name/test/test"), Some(5));
assert_eq!(re.is_prefix_match("/name1"), None);
assert_eq!(re.is_prefix_match("/name~"), None);
let re = ResourceDef::prefix("/name/");
assert!(re.is_match("/name/"));
assert!(re.is_match("/name/gs"));
assert!(!re.is_match("/name"));
let re = ResourceDef::root_prefix("name/");
assert!(re.is_match("/name/"));
assert!(re.is_match("/name/gs"));
assert!(!re.is_match("/name"));
}
#[test]
fn test_resource_prefix_dynamic() {
let re = ResourceDef::prefix("/{name}/");
assert!(re.is_match("/name/"));
assert!(re.is_match("/name/gs"));
assert!(!re.is_match("/name"));
assert_eq!(re.is_prefix_match("/name/"), Some(6));
assert_eq!(re.is_prefix_match("/name/gs"), Some(6));
assert_eq!(re.is_prefix_match("/name"), None);
let mut path = Path::new("/test2/");
assert!(re.match_path(&mut path));
assert_eq!(&path["name"], "test2");
assert_eq!(&path[0], "test2");
let mut path = Path::new("/test2/subpath1/subpath2/index.html");
assert!(re.match_path(&mut path));
assert_eq!(&path["name"], "test2");
assert_eq!(&path[0], "test2");
}
#[test]
fn test_resource_path() {
let mut s = String::new();
let resource = ResourceDef::new("/user/{item1}/test");
assert!(resource.resource_path(&mut s, &mut (&["user1"]).iter()));
assert_eq!(s, "/user/user1/test");
let mut s = String::new();
let resource = ResourceDef::new("/user/{item1}/{item2}/test");
assert!(resource.resource_path(&mut s, &mut (&["item", "item2"]).iter()));
assert_eq!(s, "/user/item/item2/test");
let mut s = String::new();
let resource = ResourceDef::new("/user/{item1}/{item2}");
assert!(resource.resource_path(&mut s, &mut (&["item", "item2"]).iter()));
assert_eq!(s, "/user/item/item2");
let mut s = String::new();
let resource = ResourceDef::new("/user/{item1}/{item2}/");
assert!(resource.resource_path(&mut s, &mut (&["item", "item2"]).iter()));
assert_eq!(s, "/user/item/item2/");
let mut s = String::new();
assert!(!resource.resource_path(&mut s, &mut (&["item"]).iter()));
let mut s = String::new();
assert!(resource.resource_path(&mut s, &mut (&["item", "item2"]).iter()));
assert_eq!(s, "/user/item/item2/");
assert!(!resource.resource_path(&mut s, &mut (&["item"]).iter()));
let mut s = String::new();
assert!(resource.resource_path(&mut s, &mut vec!["item", "item2"].into_iter()));
assert_eq!(s, "/user/item/item2/");
let mut map = HashMap::new();
map.insert("item1", "item");
let mut s = String::new();
assert!(!resource.resource_path_named(&mut s, &map));
let mut s = String::new();
map.insert("item2", "item2");
assert!(resource.resource_path_named(&mut s, &map));
assert_eq!(s, "/user/item/item2/");
}
}

View File

@@ -1,259 +0,0 @@
use crate::{IntoPattern, Resource, ResourceDef, ResourcePath};
#[derive(Debug, Copy, Clone, PartialEq)]
pub struct ResourceId(pub u16);
/// Information about current resource
#[derive(Clone, Debug)]
pub struct ResourceInfo {
resource: ResourceId,
}
/// Resource router.
pub struct Router<T, U = ()>(Vec<(ResourceDef, T, Option<U>)>);
impl<T, U> Router<T, U> {
pub fn build() -> RouterBuilder<T, U> {
RouterBuilder {
resources: Vec::new(),
}
}
pub fn recognize<R, P>(&self, resource: &mut R) -> Option<(&T, ResourceId)>
where
R: Resource<P>,
P: ResourcePath,
{
for item in self.0.iter() {
if item.0.match_path(resource.resource_path()) {
return Some((&item.1, ResourceId(item.0.id())));
}
}
None
}
pub fn recognize_mut<R, P>(&mut self, resource: &mut R) -> Option<(&mut T, ResourceId)>
where
R: Resource<P>,
P: ResourcePath,
{
for item in self.0.iter_mut() {
if item.0.match_path(resource.resource_path()) {
return Some((&mut item.1, ResourceId(item.0.id())));
}
}
None
}
pub fn recognize_checked<R, P, F>(
&self,
resource: &mut R,
check: F,
) -> Option<(&T, ResourceId)>
where
F: Fn(&R, &Option<U>) -> bool,
R: Resource<P>,
P: ResourcePath,
{
for item in self.0.iter() {
if item.0.match_path_checked(resource, &check, &item.2) {
return Some((&item.1, ResourceId(item.0.id())));
}
}
None
}
pub fn recognize_mut_checked<R, P, F>(
&mut self,
resource: &mut R,
check: F,
) -> Option<(&mut T, ResourceId)>
where
F: Fn(&R, &Option<U>) -> bool,
R: Resource<P>,
P: ResourcePath,
{
for item in self.0.iter_mut() {
if item.0.match_path_checked(resource, &check, &item.2) {
return Some((&mut item.1, ResourceId(item.0.id())));
}
}
None
}
}
pub struct RouterBuilder<T, U = ()> {
resources: Vec<(ResourceDef, T, Option<U>)>,
}
impl<T, U> RouterBuilder<T, U> {
/// Register resource for specified path.
pub fn path<P: IntoPattern>(
&mut self,
path: P,
resource: T,
) -> &mut (ResourceDef, T, Option<U>) {
self.resources
.push((ResourceDef::new(path), resource, None));
self.resources.last_mut().unwrap()
}
/// Register resource for specified path prefix.
pub fn prefix(&mut self, prefix: &str, resource: T) -> &mut (ResourceDef, T, Option<U>) {
self.resources
.push((ResourceDef::prefix(prefix), resource, None));
self.resources.last_mut().unwrap()
}
/// Register resource for ResourceDef
pub fn rdef(&mut self, rdef: ResourceDef, resource: T) -> &mut (ResourceDef, T, Option<U>) {
self.resources.push((rdef, resource, None));
self.resources.last_mut().unwrap()
}
/// Finish configuration and create router instance.
pub fn finish(self) -> Router<T, U> {
Router(self.resources)
}
}
#[cfg(test)]
mod tests {
use crate::path::Path;
use crate::router::{ResourceId, Router};
#[allow(clippy::cognitive_complexity)]
#[test]
fn test_recognizer_1() {
let mut router = Router::<usize>::build();
router.path("/name", 10).0.set_id(0);
router.path("/name/{val}", 11).0.set_id(1);
router.path("/name/{val}/index.html", 12).0.set_id(2);
router.path("/file/{file}.{ext}", 13).0.set_id(3);
router.path("/v{val}/{val2}/index.html", 14).0.set_id(4);
router.path("/v/{tail:.*}", 15).0.set_id(5);
router.path("/test2/{test}.html", 16).0.set_id(6);
router.path("/{test}/index.html", 17).0.set_id(7);
let mut router = router.finish();
let mut path = Path::new("/unknown");
assert!(router.recognize_mut(&mut path).is_none());
let mut path = Path::new("/name");
let (h, info) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 10);
assert_eq!(info, ResourceId(0));
assert!(path.is_empty());
let mut path = Path::new("/name/value");
let (h, info) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 11);
assert_eq!(info, ResourceId(1));
assert_eq!(path.get("val").unwrap(), "value");
assert_eq!(&path["val"], "value");
let mut path = Path::new("/name/value2/index.html");
let (h, info) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 12);
assert_eq!(info, ResourceId(2));
assert_eq!(path.get("val").unwrap(), "value2");
let mut path = Path::new("/file/file.gz");
let (h, info) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 13);
assert_eq!(info, ResourceId(3));
assert_eq!(path.get("file").unwrap(), "file");
assert_eq!(path.get("ext").unwrap(), "gz");
let mut path = Path::new("/vtest/ttt/index.html");
let (h, info) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 14);
assert_eq!(info, ResourceId(4));
assert_eq!(path.get("val").unwrap(), "test");
assert_eq!(path.get("val2").unwrap(), "ttt");
let mut path = Path::new("/v/blah-blah/index.html");
let (h, info) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 15);
assert_eq!(info, ResourceId(5));
assert_eq!(path.get("tail").unwrap(), "blah-blah/index.html");
let mut path = Path::new("/test2/index.html");
let (h, info) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 16);
assert_eq!(info, ResourceId(6));
assert_eq!(path.get("test").unwrap(), "index");
let mut path = Path::new("/bbb/index.html");
let (h, info) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 17);
assert_eq!(info, ResourceId(7));
assert_eq!(path.get("test").unwrap(), "bbb");
}
#[test]
fn test_recognizer_2() {
let mut router = Router::<usize>::build();
router.path("/index.json", 10);
router.path("/{source}.json", 11);
let mut router = router.finish();
let mut path = Path::new("/index.json");
let (h, _) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 10);
let mut path = Path::new("/test.json");
let (h, _) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 11);
}
#[test]
fn test_recognizer_with_prefix() {
let mut router = Router::<usize>::build();
router.path("/name", 10).0.set_id(0);
router.path("/name/{val}", 11).0.set_id(1);
let mut router = router.finish();
let mut path = Path::new("/name");
path.skip(5);
assert!(router.recognize_mut(&mut path).is_none());
let mut path = Path::new("/test/name");
path.skip(5);
let (h, _) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 10);
let mut path = Path::new("/test/name/value");
path.skip(5);
let (h, id) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 11);
assert_eq!(id, ResourceId(1));
assert_eq!(path.get("val").unwrap(), "value");
assert_eq!(&path["val"], "value");
// same patterns
let mut router = Router::<usize>::build();
router.path("/name", 10);
router.path("/name/{val}", 11);
let mut router = router.finish();
let mut path = Path::new("/name");
path.skip(6);
assert!(router.recognize_mut(&mut path).is_none());
let mut path = Path::new("/test2/name");
path.skip(6);
let (h, _) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 10);
let mut path = Path::new("/test2/name-test");
path.skip(6);
assert!(router.recognize_mut(&mut path).is_none());
let mut path = Path::new("/test2/name/ttt");
path.skip(6);
let (h, _) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 11);
assert_eq!(&path["val"], "ttt");
}
}

View File

@@ -1,249 +0,0 @@
use crate::ResourcePath;
#[allow(dead_code)]
const GEN_DELIMS: &[u8] = b":/?#[]@";
#[allow(dead_code)]
const SUB_DELIMS_WITHOUT_QS: &[u8] = b"!$'()*,";
#[allow(dead_code)]
const SUB_DELIMS: &[u8] = b"!$'()*,+?=;";
#[allow(dead_code)]
const RESERVED: &[u8] = b":/?#[]@!$'()*,+?=;";
#[allow(dead_code)]
const UNRESERVED: &[u8] = b"abcdefghijklmnopqrstuvwxyz
ABCDEFGHIJKLMNOPQRSTUVWXYZ
1234567890
-._~";
const ALLOWED: &[u8] = b"abcdefghijklmnopqrstuvwxyz
ABCDEFGHIJKLMNOPQRSTUVWXYZ
1234567890
-._~
!$'()*,";
const QS: &[u8] = b"+&=;b";
#[inline]
fn bit_at(array: &[u8], ch: u8) -> bool {
array[(ch >> 3) as usize] & (1 << (ch & 7)) != 0
}
#[inline]
fn set_bit(array: &mut [u8], ch: u8) {
array[(ch >> 3) as usize] |= 1 << (ch & 7)
}
thread_local! {
static DEFAULT_QUOTER: Quoter = Quoter::new(b"@:", b"/+");
}
#[derive(Default, Clone, Debug)]
pub struct Url {
uri: http::Uri,
path: Option<String>,
}
impl Url {
pub fn new(uri: http::Uri) -> Url {
let path = DEFAULT_QUOTER.with(|q| q.requote(uri.path().as_bytes()));
Url { uri, path }
}
pub fn with_quoter(uri: http::Uri, quoter: &Quoter) -> Url {
Url {
path: quoter.requote(uri.path().as_bytes()),
uri,
}
}
pub fn uri(&self) -> &http::Uri {
&self.uri
}
pub fn path(&self) -> &str {
if let Some(ref s) = self.path {
s
} else {
self.uri.path()
}
}
#[inline]
pub fn update(&mut self, uri: &http::Uri) {
self.uri = uri.clone();
self.path = DEFAULT_QUOTER.with(|q| q.requote(uri.path().as_bytes()));
}
#[inline]
pub fn update_with_quoter(&mut self, uri: &http::Uri, quoter: &Quoter) {
self.uri = uri.clone();
self.path = quoter.requote(uri.path().as_bytes());
}
}
impl ResourcePath for Url {
#[inline]
fn path(&self) -> &str {
self.path()
}
}
pub struct Quoter {
safe_table: [u8; 16],
protected_table: [u8; 16],
}
impl Quoter {
pub fn new(safe: &[u8], protected: &[u8]) -> Quoter {
let mut q = Quoter {
safe_table: [0; 16],
protected_table: [0; 16],
};
// prepare safe table
for i in 0..128 {
if ALLOWED.contains(&i) {
set_bit(&mut q.safe_table, i);
}
if QS.contains(&i) {
set_bit(&mut q.safe_table, i);
}
}
for ch in safe {
set_bit(&mut q.safe_table, *ch)
}
// prepare protected table
for ch in protected {
set_bit(&mut q.safe_table, *ch);
set_bit(&mut q.protected_table, *ch);
}
q
}
pub fn requote(&self, val: &[u8]) -> Option<String> {
let mut has_pct = 0;
let mut pct = [b'%', 0, 0];
let mut idx = 0;
let mut cloned: Option<Vec<u8>> = None;
let len = val.len();
while idx < len {
let ch = val[idx];
if has_pct != 0 {
pct[has_pct] = val[idx];
has_pct += 1;
if has_pct == 3 {
has_pct = 0;
let buf = cloned.as_mut().unwrap();
if let Some(ch) = restore_ch(pct[1], pct[2]) {
if ch < 128 {
if bit_at(&self.protected_table, ch) {
buf.extend_from_slice(&pct);
idx += 1;
continue;
}
if bit_at(&self.safe_table, ch) {
buf.push(ch);
idx += 1;
continue;
}
}
buf.push(ch);
} else {
buf.extend_from_slice(&pct[..]);
}
}
} else if ch == b'%' {
has_pct = 1;
if cloned.is_none() {
let mut c = Vec::with_capacity(len);
c.extend_from_slice(&val[..idx]);
cloned = Some(c);
}
} else if let Some(ref mut cloned) = cloned {
cloned.push(ch)
}
idx += 1;
}
if let Some(data) = cloned {
// Unsafe: we get data from http::Uri, which does utf-8 checks already
// this code only decodes valid pct encoded values
Some(unsafe { String::from_utf8_unchecked(data) })
} else {
None
}
}
}
#[inline]
fn from_hex(v: u8) -> Option<u8> {
if (b'0'..=b'9').contains(&v) {
Some(v - 0x30) // ord('0') == 0x30
} else if (b'A'..=b'F').contains(&v) {
Some(v - 0x41 + 10) // ord('A') == 0x41
} else if (b'a'..=b'f').contains(&v) {
Some(v - 0x61 + 10) // ord('a') == 0x61
} else {
None
}
}
#[inline]
fn restore_ch(d1: u8, d2: u8) -> Option<u8> {
from_hex(d1).and_then(|d1| from_hex(d2).map(move |d2| d1 << 4 | d2))
}
#[cfg(test)]
mod tests {
use http::Uri;
use std::convert::TryFrom;
use super::*;
use crate::{Path, ResourceDef};
#[test]
fn test_parse_url() {
let re = ResourceDef::new("/user/{id}/test");
let url = Uri::try_from("/user/2345/test").unwrap();
let mut path = Path::new(Url::new(url));
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "2345");
let url = Uri::try_from("/user/qwe%25/test").unwrap();
let mut path = Path::new(Url::new(url));
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "qwe%");
let url = Uri::try_from("/user/qwe%25rty/test").unwrap();
let mut path = Path::new(Url::new(url));
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "qwe%rty");
}
#[test]
fn test_from_hex() {
let hex = b"0123456789abcdefABCDEF";
for i in 0..256 {
let c = i as u8;
if hex.contains(&c) {
assert!(from_hex(c).is_some())
} else {
assert!(from_hex(c).is_none())
}
}
let expected = [
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 10, 11, 12, 13, 14, 15,
];
for i in 0..hex.len() {
assert_eq!(from_hex(hex[i]).unwrap(), expected[i]);
}
}
}

View File

@@ -1,197 +0,0 @@
# Changes
## Unreleased - 2021-xx-xx
* Add `ActixStream` extension trait to include readiness methods. [#276]
* Re-export `tokio::net::TcpSocket` in `net` module [#282]
[#276]: https://github.com/actix/actix-net/pull/276
[#282]: https://github.com/actix/actix-net/pull/282
## 2.0.2 - 2021-02-06
* Add `Arbiter::handle` to get a handle of an owned Arbiter. [#274]
* Add `System::try_current` for situations where actix may or may not be running a System. [#275]
[#274]: https://github.com/actix/actix-net/pull/274
[#275]: https://github.com/actix/actix-net/pull/275
## 2.0.1 - 2021-02-06
* Expose `JoinError` from Tokio. [#271]
[#271]: https://github.com/actix/actix-net/pull/271
## 2.0.0 - 2021-02-02
* Remove all Arbiter-local storage methods. [#262]
* Re-export `tokio::pin`. [#262]
[#262]: https://github.com/actix/actix-net/pull/262
## 2.0.0-beta.3 - 2021-01-31
* Remove `run_in_tokio`, `attach_to_tokio` and `AsyncSystemRunner`. [#253]
* Return `JoinHandle` from `actix_rt::spawn`. [#253]
* Remove old `Arbiter::spawn`. Implementation is now inlined into `actix_rt::spawn`. [#253]
* Rename `Arbiter::{send => spawn}` and `Arbiter::{exec_fn => spawn_fn}`. [#253]
* Remove `Arbiter::exec`. [#253]
* Remove deprecated `Arbiter::local_join` and `Arbiter::is_running`. [#253]
* `Arbiter::spawn` now accepts !Unpin futures. [#256]
* `System::new` no longer takes arguments. [#257]
* Remove `System::with_current`. [#257]
* Remove `Builder`. [#257]
* Add `System::with_init` as replacement for `Builder::run`. [#257]
* Rename `System::{is_set => is_registered}`. [#257]
* Add `ArbiterHandle` for sending messages to non-current-thread arbiters. [#257].
* `System::arbiter` now returns an `&ArbiterHandle`. [#257]
* `Arbiter::current` now returns an `ArbiterHandle` instead. [#257]
* `Arbiter::join` now takes self by value. [#257]
[#253]: https://github.com/actix/actix-net/pull/253
[#254]: https://github.com/actix/actix-net/pull/254
[#256]: https://github.com/actix/actix-net/pull/256
[#257]: https://github.com/actix/actix-net/pull/257
## 2.0.0-beta.2 - 2021-01-09
* Add `task` mod with re-export of `tokio::task::{spawn_blocking, yield_now, JoinHandle}` [#245]
* Add default "macros" feature to allow faster compile times when using `default-features=false`.
[#245]: https://github.com/actix/actix-net/pull/245
## 2.0.0-beta.1 - 2020-12-28
### Added
* Add `System::attach_to_tokio` method. [#173]
### Changed
* Update `tokio` dependency to `1.0`. [#236]
* Rename `time` module `delay_for` to `sleep`, `delay_until` to `sleep_until`, `Delay` to `Sleep`
to stay aligned with Tokio's naming. [#236]
* Remove `'static` lifetime requirement for `Runtime::block_on` and `SystemRunner::block_on`.
* These methods now accept `&self` when calling. [#236]
* Remove `'static` lifetime requirement for `System::run` and `Builder::run`. [#236]
* `Arbiter::spawn` now panics when `System` is not in scope. [#207]
### Fixed
* Fix work load issue by removing `PENDING` thread local. [#207]
[#207]: https://github.com/actix/actix-net/pull/207
[#236]: https://github.com/actix/actix-net/pull/236
## [1.1.1] - 2020-04-30
### Fixed
* Fix memory leak due to [#94] (see [#129] for more detail)
[#129]: https://github.com/actix/actix-net/issues/129
## [1.1.0] - 2020-04-08
**This version has been yanked.**
### Added
* Expose `System::is_set` to check if current system has ben started [#99]
* Add `Arbiter::is_running` to check if event loop is running [#124]
* Add `Arbiter::local_join` associated function
to get be able to `await` for spawned futures [#94]
[#94]: https://github.com/actix/actix-net/pull/94
[#99]: https://github.com/actix/actix-net/pull/99
[#124]: https://github.com/actix/actix-net/pull/124
## [1.0.0] - 2019-12-11
* Update dependencies
## [1.0.0-alpha.3] - 2019-12-07
### Fixed
* Fix compilation on non-unix platforms
### Changed
* Migrate to tokio 0.2
## [1.0.0-alpha.2] - 2019-12-02
Added
* Export `main` and `test` attribute macros
* Export `time` module (re-export of tokio-timer)
* Export `net` module (re-export of tokio-net)
## [1.0.0-alpha.1] - 2019-11-22
### Changed
* Migrate to std::future and tokio 0.2
## [0.2.6] - 2019-11-14
### Fixed
* Fix arbiter's thread panic message.
### Added
* Allow to join arbiter's thread. #60
## [0.2.5] - 2019-09-02
### Added
* Add arbiter specific storage
## [0.2.4] - 2019-07-17
### Changed
* Avoid a copy of the Future when initializing the Box. #29
## [0.2.3] - 2019-06-22
### Added
* Allow to start System using exsiting CurrentThread Handle #22
## [0.2.2] - 2019-03-28
### Changed
* Moved `blocking` module to `actix-threadpool` crate
## [0.2.1] - 2019-03-11
### Added
* Added `blocking` module
* Arbiter::exec_fn - execute fn on the arbiter's thread
* Arbiter::exec - execute fn on the arbiter's thread and wait result
## [0.2.0] - 2019-03-06
* `run` method returns `io::Result<()>`
* Removed `Handle`
## [0.1.0] - 2018-12-09
* Initial release

View File

@@ -1,33 +0,0 @@
[package]
name = "actix-rt"
version = "2.0.2"
authors = [
"Nikolay Kim <fafhrd91@gmail.com>",
"Rob Ede <robjtede@icloud.com>",
]
description = "Tokio-based single-threaded async runtime for the Actix ecosystem"
keywords = ["async", "futures", "io", "runtime"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-rt"
categories = ["network-programming", "asynchronous"]
license = "MIT OR Apache-2.0"
edition = "2018"
[lib]
name = "actix_rt"
path = "src/lib.rs"
[features]
default = ["macros"]
macros = ["actix-macros"]
[dependencies]
actix-macros = { version = "0.2.0", optional = true }
futures-core = { version = "0.3", default-features = false }
tokio = { version = "1.2", features = ["rt", "net", "parking_lot", "signal", "sync", "time"] }
[dev-dependencies]
tokio = { version = "1.2", features = ["full"] }
hyper = { version = "0.14", default-features = false, features = ["server", "tcp", "http1"] }

View File

@@ -1 +0,0 @@
../LICENSE-APACHE

View File

@@ -1 +0,0 @@
../LICENSE-MIT

View File

@@ -1,5 +0,0 @@
# actix-rt
> Tokio-based single-threaded async runtime for the Actix ecosystem.
See crate documentation for more: https://docs.rs/actix-rt.

View File

@@ -1,28 +0,0 @@
use hyper::service::{make_service_fn, service_fn};
use hyper::{Body, Request, Response, Server};
use std::convert::Infallible;
use std::net::SocketAddr;
async fn handle(_req: Request<Body>) -> Result<Response<Body>, Infallible> {
Ok(Response::new(Body::from("Hello World")))
}
fn main() {
actix_rt::System::with_tokio_rt(|| {
tokio::runtime::Builder::new_multi_thread()
.enable_all()
.build()
.unwrap()
})
.block_on(async {
let make_service =
make_service_fn(|_conn| async { Ok::<_, Infallible>(service_fn(handle)) });
let server =
Server::bind(&SocketAddr::from(([127, 0, 0, 1], 3000))).serve(make_service);
if let Err(e) = server.await {
eprintln!("server error: {}", e);
}
})
}

View File

@@ -1,260 +0,0 @@
use std::{
cell::RefCell,
fmt,
future::Future,
pin::Pin,
sync::atomic::{AtomicUsize, Ordering},
task::{Context, Poll},
thread,
};
use futures_core::ready;
use tokio::{sync::mpsc, task::LocalSet};
use crate::{
runtime::{default_tokio_runtime, Runtime},
system::{System, SystemCommand},
};
pub(crate) static COUNT: AtomicUsize = AtomicUsize::new(0);
thread_local!(
static HANDLE: RefCell<Option<ArbiterHandle>> = RefCell::new(None);
);
pub(crate) enum ArbiterCommand {
Stop,
Execute(Pin<Box<dyn Future<Output = ()> + Send>>),
}
impl fmt::Debug for ArbiterCommand {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
ArbiterCommand::Stop => write!(f, "ArbiterCommand::Stop"),
ArbiterCommand::Execute(_) => write!(f, "ArbiterCommand::Execute"),
}
}
}
/// A handle for sending spawn and stop messages to an [Arbiter].
#[derive(Debug, Clone)]
pub struct ArbiterHandle {
tx: mpsc::UnboundedSender<ArbiterCommand>,
}
impl ArbiterHandle {
pub(crate) fn new(tx: mpsc::UnboundedSender<ArbiterCommand>) -> Self {
Self { tx }
}
/// Send a future to the [Arbiter]'s thread and spawn it.
///
/// If you require a result, include a response channel in the future.
///
/// Returns true if future was sent successfully and false if the [Arbiter] has died.
pub fn spawn<Fut>(&self, future: Fut) -> bool
where
Fut: Future<Output = ()> + Send + 'static,
{
self.tx
.send(ArbiterCommand::Execute(Box::pin(future)))
.is_ok()
}
/// Send a function to the [Arbiter]'s thread and execute it.
///
/// Any result from the function is discarded. If you require a result, include a response
/// channel in the function.
///
/// Returns true if function was sent successfully and false if the [Arbiter] has died.
pub fn spawn_fn<F>(&self, f: F) -> bool
where
F: FnOnce() + Send + 'static,
{
self.spawn(async { f() })
}
/// Instruct [Arbiter] to stop processing it's event loop.
///
/// Returns true if stop message was sent successfully and false if the [Arbiter] has
/// been dropped.
pub fn stop(&self) -> bool {
self.tx.send(ArbiterCommand::Stop).is_ok()
}
}
/// An Arbiter represents a thread that provides an asynchronous execution environment for futures
/// and functions.
///
/// When an arbiter is created, it spawns a new [OS thread](thread), and hosts an event loop.
#[derive(Debug)]
pub struct Arbiter {
tx: mpsc::UnboundedSender<ArbiterCommand>,
thread_handle: thread::JoinHandle<()>,
}
impl Arbiter {
/// Spawn a new Arbiter thread and start its event loop.
///
/// # Panics
/// Panics if a [System] is not registered on the current thread.
#[allow(clippy::new_without_default)]
pub fn new() -> Arbiter {
Self::with_tokio_rt(|| {
default_tokio_runtime().expect("Cannot create new Arbiter's Runtime.")
})
}
/// Spawn a new Arbiter using the [Tokio Runtime](tokio-runtime) returned from a closure.
///
/// [tokio-runtime]: tokio::runtime::Runtime
#[doc(hidden)]
pub fn with_tokio_rt<F>(runtime_factory: F) -> Arbiter
where
F: Fn() -> tokio::runtime::Runtime + Send + 'static,
{
let sys = System::current();
let system_id = sys.id();
let arb_id = COUNT.fetch_add(1, Ordering::Relaxed);
let name = format!("actix-rt|system:{}|arbiter:{}", system_id, arb_id);
let (tx, rx) = mpsc::unbounded_channel();
let (ready_tx, ready_rx) = std::sync::mpsc::channel::<()>();
let thread_handle = thread::Builder::new()
.name(name.clone())
.spawn({
let tx = tx.clone();
move || {
let rt = Runtime::from(runtime_factory());
let hnd = ArbiterHandle::new(tx);
System::set_current(sys);
HANDLE.with(|cell| *cell.borrow_mut() = Some(hnd.clone()));
// register arbiter
let _ = System::current()
.tx()
.send(SystemCommand::RegisterArbiter(arb_id, hnd));
ready_tx.send(()).unwrap();
// run arbiter event processing loop
rt.block_on(ArbiterRunner { rx });
// deregister arbiter
let _ = System::current()
.tx()
.send(SystemCommand::DeregisterArbiter(arb_id));
}
})
.unwrap_or_else(|err| {
panic!("Cannot spawn Arbiter's thread: {:?}. {:?}", &name, err)
});
ready_rx.recv().unwrap();
Arbiter { tx, thread_handle }
}
/// Sets up an Arbiter runner in a new System using the provided runtime local task set.
pub(crate) fn in_new_system(local: &LocalSet) -> ArbiterHandle {
let (tx, rx) = mpsc::unbounded_channel();
let hnd = ArbiterHandle::new(tx);
HANDLE.with(|cell| *cell.borrow_mut() = Some(hnd.clone()));
local.spawn_local(ArbiterRunner { rx });
hnd
}
/// Return a handle to the this Arbiter's message sender.
pub fn handle(&self) -> ArbiterHandle {
ArbiterHandle::new(self.tx.clone())
}
/// Return a handle to the current thread's Arbiter's message sender.
///
/// # Panics
/// Panics if no Arbiter is running on the current thread.
pub fn current() -> ArbiterHandle {
HANDLE.with(|cell| match *cell.borrow() {
Some(ref hnd) => hnd.clone(),
None => panic!("Arbiter is not running."),
})
}
/// Stop Arbiter from continuing it's event loop.
///
/// Returns true if stop message was sent successfully and false if the Arbiter has been dropped.
pub fn stop(&self) -> bool {
self.tx.send(ArbiterCommand::Stop).is_ok()
}
/// Send a future to the Arbiter's thread and spawn it.
///
/// If you require a result, include a response channel in the future.
///
/// Returns true if future was sent successfully and false if the Arbiter has died.
pub fn spawn<Fut>(&self, future: Fut) -> bool
where
Fut: Future<Output = ()> + Send + 'static,
{
self.tx
.send(ArbiterCommand::Execute(Box::pin(future)))
.is_ok()
}
/// Send a function to the Arbiter's thread and execute it.
///
/// Any result from the function is discarded. If you require a result, include a response
/// channel in the function.
///
/// Returns true if function was sent successfully and false if the Arbiter has died.
pub fn spawn_fn<F>(&self, f: F) -> bool
where
F: FnOnce() + Send + 'static,
{
self.spawn(async { f() })
}
/// Wait for Arbiter's event loop to complete.
///
/// Joins the underlying OS thread handle. See [`JoinHandle::join`](thread::JoinHandle::join).
pub fn join(self) -> thread::Result<()> {
self.thread_handle.join()
}
}
/// A persistent future that processes [Arbiter] commands.
struct ArbiterRunner {
rx: mpsc::UnboundedReceiver<ArbiterCommand>,
}
impl Future for ArbiterRunner {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
// process all items currently buffered in channel
loop {
match ready!(Pin::new(&mut self.rx).poll_recv(cx)) {
// channel closed; no more messages can be received
None => return Poll::Ready(()),
// process arbiter command
Some(item) => match item {
ArbiterCommand::Stop => {
return Poll::Ready(());
}
ArbiterCommand::Execute(task_fut) => {
tokio::task::spawn_local(task_fut);
}
},
}
}
}
}

View File

@@ -1,144 +0,0 @@
//! Tokio-based single-threaded async runtime for the Actix ecosystem.
//!
//! In most parts of the the Actix ecosystem, it has been chosen to use !Send futures. For this
//! reason, a single-threaded runtime is appropriate since it is guaranteed that futures will not
//! be moved between threads. This can result in small performance improvements over cases where
//! atomics would otherwise be needed.
//!
//! To achieve similar performance to multi-threaded, work-stealing runtimes, applications
//! using `actix-rt` will create multiple, mostly disconnected, single-threaded runtimes.
//! This approach has good performance characteristics for workloads where the majority of tasks
//! have similar runtime expense.
//!
//! The disadvantage is that idle threads will not steal work from very busy, stuck or otherwise
//! backlogged threads. Tasks that are disproportionately expensive should be offloaded to the
//! blocking task thread-pool using [`task::spawn_blocking`].
//!
//! # Examples
//! ```
//! use std::sync::mpsc;
//! use actix_rt::{Arbiter, System};
//!
//! let _ = System::new();
//!
//! let (tx, rx) = mpsc::channel::<u32>();
//!
//! let arbiter = Arbiter::new();
//! arbiter.spawn_fn(move || tx.send(42).unwrap());
//!
//! let num = rx.recv().unwrap();
//! assert_eq!(num, 42);
//!
//! arbiter.stop();
//! arbiter.join().unwrap();
//! ```
#![deny(rust_2018_idioms, nonstandard_style)]
#![allow(clippy::type_complexity)]
#![warn(missing_docs)]
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
use std::future::Future;
use tokio::task::JoinHandle;
// Cannot define a main macro when compiled into test harness.
// Workaround for https://github.com/rust-lang/rust/issues/62127.
#[cfg(all(feature = "macros", not(test)))]
pub use actix_macros::{main, test};
mod arbiter;
mod runtime;
mod system;
pub use self::arbiter::{Arbiter, ArbiterHandle};
pub use self::runtime::Runtime;
pub use self::system::{System, SystemRunner};
pub use tokio::pin;
pub mod signal {
//! Asynchronous signal handling (Tokio re-exports).
#[cfg(unix)]
pub mod unix {
//! Unix specific signals (Tokio re-exports).
pub use tokio::signal::unix::*;
}
pub use tokio::signal::ctrl_c;
}
pub mod net {
//! TCP/UDP/Unix bindings (mostly Tokio re-exports).
use std::task::{Context, Poll};
use tokio::io::{AsyncRead, AsyncWrite};
pub use tokio::net::UdpSocket;
pub use tokio::net::{TcpListener, TcpSocket, TcpStream};
#[cfg(unix)]
pub use tokio::net::{UnixDatagram, UnixListener, UnixStream};
/// Extension trait over async read+write types that can also signal readiness.
pub trait ActixStream: AsyncRead + AsyncWrite + Unpin + 'static {
/// Poll stream and check read readiness of Self.
///
/// See [tokio::net::TcpStream::poll_read_ready] for detail on intended use.
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<std::io::Result<()>>;
/// Poll stream and check write readiness of Self.
///
/// See [tokio::net::TcpStream::poll_write_ready] for detail on intended use.
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<std::io::Result<()>>;
}
impl ActixStream for TcpStream {
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
TcpStream::poll_read_ready(self, cx)
}
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
TcpStream::poll_write_ready(self, cx)
}
}
#[cfg(unix)]
impl ActixStream for UnixStream {
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
UnixStream::poll_read_ready(self, cx)
}
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
UnixStream::poll_write_ready(self, cx)
}
}
}
pub mod time {
//! Utilities for tracking time (Tokio re-exports).
pub use tokio::time::Instant;
pub use tokio::time::{interval, interval_at, Interval};
pub use tokio::time::{sleep, sleep_until, Sleep};
pub use tokio::time::{timeout, Timeout};
}
pub mod task {
//! Task management (Tokio re-exports).
pub use tokio::task::{spawn_blocking, yield_now, JoinError, JoinHandle};
}
/// Spawns a future on the current thread.
///
/// # Panics
/// Panics if Actix system is not running.
#[inline]
pub fn spawn<Fut>(f: Fut) -> JoinHandle<()>
where
Fut: Future<Output = ()> + 'static,
{
tokio::task::spawn_local(f)
}

View File

@@ -1,96 +0,0 @@
use std::{future::Future, io};
use tokio::task::{JoinHandle, LocalSet};
/// A Tokio-based runtime proxy.
///
/// All spawned futures will be executed on the current thread. Therefore, there is no `Send` bound
/// on submitted futures.
#[derive(Debug)]
pub struct Runtime {
local: LocalSet,
rt: tokio::runtime::Runtime,
}
pub(crate) fn default_tokio_runtime() -> io::Result<tokio::runtime::Runtime> {
tokio::runtime::Builder::new_current_thread()
.enable_io()
.enable_time()
.build()
}
impl Runtime {
/// Returns a new runtime initialized with default configuration values.
#[allow(clippy::new_ret_no_self)]
pub fn new() -> io::Result<Self> {
let rt = default_tokio_runtime()?;
Ok(Runtime {
rt,
local: LocalSet::new(),
})
}
/// Reference to local task set.
pub(crate) fn local_set(&self) -> &LocalSet {
&self.local
}
/// Offload a future onto the single-threaded runtime.
///
/// The returned join handle can be used to await the future's result.
///
/// See [crate root][crate] documentation for more details.
///
/// # Examples
/// ```
/// let rt = actix_rt::Runtime::new().unwrap();
///
/// // Spawn a future onto the runtime
/// let handle = rt.spawn(async {
/// println!("running on the runtime");
/// 42
/// });
///
/// assert_eq!(rt.block_on(handle).unwrap(), 42);
/// ```
///
/// # Panics
/// This function panics if the spawn fails. Failure occurs if the executor is currently at
/// capacity and is unable to spawn a new future.
pub fn spawn<F>(&self, future: F) -> JoinHandle<F::Output>
where
F: Future + 'static,
{
self.local.spawn_local(future)
}
/// Runs the provided future, blocking the current thread until the future completes.
///
/// This function can be used to synchronously block the current thread until the provided
/// `future` has resolved either successfully or with an error. The result of the future is
/// then returned from this function call.
///
/// Note that this function will also execute any spawned futures on the current thread, but
/// will not block until these other spawned futures have completed. Once the function returns,
/// any uncompleted futures remain pending in the `Runtime` instance. These futures will not run
/// until `block_on` or `run` is called again.
///
/// The caller is responsible for ensuring that other spawned futures complete execution by
/// calling `block_on` or `run`.
pub fn block_on<F>(&self, f: F) -> F::Output
where
F: Future,
{
self.local.block_on(&self.rt, f)
}
}
impl From<tokio::runtime::Runtime> for Runtime {
fn from(rt: tokio::runtime::Runtime) -> Self {
Self {
local: LocalSet::new(),
rt,
}
}
}

View File

@@ -1,255 +0,0 @@
use std::{
cell::RefCell,
collections::HashMap,
future::Future,
io,
pin::Pin,
sync::atomic::{AtomicUsize, Ordering},
task::{Context, Poll},
};
use futures_core::ready;
use tokio::sync::{mpsc, oneshot};
use crate::{arbiter::ArbiterHandle, runtime::default_tokio_runtime, Arbiter, Runtime};
static SYSTEM_COUNT: AtomicUsize = AtomicUsize::new(0);
thread_local!(
static CURRENT: RefCell<Option<System>> = RefCell::new(None);
);
/// A manager for a per-thread distributed async runtime.
#[derive(Clone, Debug)]
pub struct System {
id: usize,
sys_tx: mpsc::UnboundedSender<SystemCommand>,
/// Handle to the first [Arbiter] that is created with the System.
arbiter_handle: ArbiterHandle,
}
impl System {
/// Create a new system.
///
/// # Panics
/// Panics if underlying Tokio runtime can not be created.
#[allow(clippy::new_ret_no_self)]
pub fn new() -> SystemRunner {
Self::with_tokio_rt(|| {
default_tokio_runtime()
.expect("Default Actix (Tokio) runtime could not be created.")
})
}
/// Create a new System using the [Tokio Runtime](tokio-runtime) returned from a closure.
///
/// [tokio-runtime]: tokio::runtime::Runtime
#[doc(hidden)]
pub fn with_tokio_rt<F>(runtime_factory: F) -> SystemRunner
where
F: Fn() -> tokio::runtime::Runtime,
{
let (stop_tx, stop_rx) = oneshot::channel();
let (sys_tx, sys_rx) = mpsc::unbounded_channel();
let rt = Runtime::from(runtime_factory());
let sys_arbiter = Arbiter::in_new_system(rt.local_set());
let system = System::construct(sys_tx, sys_arbiter.clone());
system
.tx()
.send(SystemCommand::RegisterArbiter(usize::MAX, sys_arbiter))
.unwrap();
// init background system arbiter
let sys_ctrl = SystemController::new(sys_rx, stop_tx);
rt.spawn(sys_ctrl);
SystemRunner {
rt,
stop_rx,
system,
}
}
/// Constructs new system and registers it on the current thread.
pub(crate) fn construct(
sys_tx: mpsc::UnboundedSender<SystemCommand>,
arbiter_handle: ArbiterHandle,
) -> Self {
let sys = System {
sys_tx,
arbiter_handle,
id: SYSTEM_COUNT.fetch_add(1, Ordering::SeqCst),
};
System::set_current(sys.clone());
sys
}
/// Get current running system.
///
/// # Panics
/// Panics if no system is registered on the current thread.
pub fn current() -> System {
CURRENT.with(|cell| match *cell.borrow() {
Some(ref sys) => sys.clone(),
None => panic!("System is not running"),
})
}
/// Try to get current running system.
///
/// Returns `None` if no System has been started.
///
/// Contrary to `current`, this never panics.
pub fn try_current() -> Option<System> {
CURRENT.with(|cell| cell.borrow().clone())
}
/// Get handle to a the System's initial [Arbiter].
pub fn arbiter(&self) -> &ArbiterHandle {
&self.arbiter_handle
}
/// Check if there is a System registered on the current thread.
pub fn is_registered() -> bool {
CURRENT.with(|sys| sys.borrow().is_some())
}
/// Register given system on current thread.
#[doc(hidden)]
pub fn set_current(sys: System) {
CURRENT.with(|cell| {
*cell.borrow_mut() = Some(sys);
})
}
/// Numeric system identifier.
///
/// Useful when using multiple Systems.
pub fn id(&self) -> usize {
self.id
}
/// Stop the system (with code 0).
pub fn stop(&self) {
self.stop_with_code(0)
}
/// Stop the system with a given exit code.
pub fn stop_with_code(&self, code: i32) {
let _ = self.sys_tx.send(SystemCommand::Exit(code));
}
pub(crate) fn tx(&self) -> &mpsc::UnboundedSender<SystemCommand> {
&self.sys_tx
}
}
/// Runner that keeps a [System]'s event loop alive until stop message is received.
#[must_use = "A SystemRunner does nothing unless `run` is called."]
#[derive(Debug)]
pub struct SystemRunner {
rt: Runtime,
stop_rx: oneshot::Receiver<i32>,
system: System,
}
impl SystemRunner {
/// Starts event loop and will return once [System] is [stopped](System::stop).
pub fn run(self) -> io::Result<()> {
let SystemRunner { rt, stop_rx, .. } = self;
// run loop
match rt.block_on(stop_rx) {
Ok(code) => {
if code != 0 {
Err(io::Error::new(
io::ErrorKind::Other,
format!("Non-zero exit code: {}", code),
))
} else {
Ok(())
}
}
Err(e) => Err(io::Error::new(io::ErrorKind::Other, e)),
}
}
/// Runs the provided future, blocking the current thread until the future completes.
#[inline]
pub fn block_on<F: Future>(&self, fut: F) -> F::Output {
self.rt.block_on(fut)
}
}
#[derive(Debug)]
pub(crate) enum SystemCommand {
Exit(i32),
RegisterArbiter(usize, ArbiterHandle),
DeregisterArbiter(usize),
}
/// There is one `SystemController` per [System]. It runs in the background, keeping track of
/// [Arbiter]s and is able to distribute a system-wide stop command.
#[derive(Debug)]
pub(crate) struct SystemController {
stop_tx: Option<oneshot::Sender<i32>>,
cmd_rx: mpsc::UnboundedReceiver<SystemCommand>,
arbiters: HashMap<usize, ArbiterHandle>,
}
impl SystemController {
pub(crate) fn new(
cmd_rx: mpsc::UnboundedReceiver<SystemCommand>,
stop_tx: oneshot::Sender<i32>,
) -> Self {
SystemController {
cmd_rx,
stop_tx: Some(stop_tx),
arbiters: HashMap::with_capacity(4),
}
}
}
impl Future for SystemController {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
// process all items currently buffered in channel
loop {
match ready!(Pin::new(&mut self.cmd_rx).poll_recv(cx)) {
// channel closed; no more messages can be received
None => return Poll::Ready(()),
// process system command
Some(cmd) => match cmd {
SystemCommand::Exit(code) => {
// stop all arbiters
for arb in self.arbiters.values() {
arb.stop();
}
// stop event loop
// will only fire once
if let Some(stop_tx) = self.stop_tx.take() {
let _ = stop_tx.send(code);
}
}
SystemCommand::RegisterArbiter(id, arb) => {
self.arbiters.insert(id, arb);
}
SystemCommand::DeregisterArbiter(id) => {
self.arbiters.remove(&id);
}
},
}
}
}
}

View File

@@ -1,300 +0,0 @@
use std::{
sync::{
atomic::{AtomicBool, Ordering},
mpsc::channel,
Arc,
},
thread,
time::{Duration, Instant},
};
use actix_rt::{Arbiter, System};
use tokio::sync::oneshot;
#[test]
fn await_for_timer() {
let time = Duration::from_secs(1);
let instant = Instant::now();
System::new().block_on(async move {
tokio::time::sleep(time).await;
});
assert!(
instant.elapsed() >= time,
"Block on should poll awaited future to completion"
);
}
#[test]
fn join_another_arbiter() {
let time = Duration::from_secs(1);
let instant = Instant::now();
System::new().block_on(async move {
let arbiter = Arbiter::new();
arbiter.spawn(Box::pin(async move {
tokio::time::sleep(time).await;
Arbiter::current().stop();
}));
arbiter.join().unwrap();
});
assert!(
instant.elapsed() >= time,
"Join on another arbiter should complete only when it calls stop"
);
let instant = Instant::now();
System::new().block_on(async move {
let arbiter = Arbiter::new();
arbiter.spawn_fn(move || {
actix_rt::spawn(async move {
tokio::time::sleep(time).await;
Arbiter::current().stop();
});
});
arbiter.join().unwrap();
});
assert!(
instant.elapsed() >= time,
"Join on an arbiter that has used actix_rt::spawn should wait for said future"
);
let instant = Instant::now();
System::new().block_on(async move {
let arbiter = Arbiter::new();
arbiter.spawn(Box::pin(async move {
tokio::time::sleep(time).await;
Arbiter::current().stop();
}));
arbiter.stop();
arbiter.join().unwrap();
});
assert!(
instant.elapsed() < time,
"Premature stop of arbiter should conclude regardless of it's current state"
);
}
#[test]
fn non_static_block_on() {
let string = String::from("test_str");
let string = string.as_str();
let sys = System::new();
sys.block_on(async {
actix_rt::time::sleep(Duration::from_millis(1)).await;
assert_eq!("test_str", string);
});
let rt = actix_rt::Runtime::new().unwrap();
rt.block_on(async {
actix_rt::time::sleep(Duration::from_millis(1)).await;
assert_eq!("test_str", string);
});
}
#[test]
fn wait_for_spawns() {
let rt = actix_rt::Runtime::new().unwrap();
let handle = rt.spawn(async {
println!("running on the runtime");
// assertion panic is caught at task boundary
assert_eq!(1, 2);
});
assert!(rt.block_on(handle).is_err());
}
#[test]
fn arbiter_spawn_fn_runs() {
let _ = System::new();
let (tx, rx) = channel::<u32>();
let arbiter = Arbiter::new();
arbiter.spawn_fn(move || tx.send(42).unwrap());
let num = rx.recv().unwrap();
assert_eq!(num, 42);
arbiter.stop();
arbiter.join().unwrap();
}
#[test]
fn arbiter_handle_spawn_fn_runs() {
let sys = System::new();
let (tx, rx) = channel::<u32>();
let arbiter = Arbiter::new();
let handle = arbiter.handle();
drop(arbiter);
handle.spawn_fn(move || {
tx.send(42).unwrap();
System::current().stop()
});
let num = rx.recv_timeout(Duration::from_secs(2)).unwrap();
assert_eq!(num, 42);
handle.stop();
sys.run().unwrap();
}
#[test]
fn arbiter_drop_no_panic_fn() {
let _ = System::new();
let arbiter = Arbiter::new();
arbiter.spawn_fn(|| panic!("test"));
arbiter.stop();
arbiter.join().unwrap();
}
#[test]
fn arbiter_drop_no_panic_fut() {
let _ = System::new();
let arbiter = Arbiter::new();
arbiter.spawn(async { panic!("test") });
arbiter.stop();
arbiter.join().unwrap();
}
#[test]
#[should_panic]
fn no_system_current_panic() {
System::current();
}
#[test]
#[should_panic]
fn no_system_arbiter_new_panic() {
Arbiter::new();
}
#[test]
fn system_arbiter_spawn() {
let runner = System::new();
let (tx, rx) = oneshot::channel();
let sys = System::current();
thread::spawn(|| {
// this thread will have no arbiter in it's thread local so call will panic
Arbiter::current();
})
.join()
.unwrap_err();
let thread = thread::spawn(|| {
// this thread will have no arbiter in it's thread local so use the system handle instead
System::set_current(sys);
let sys = System::current();
let arb = sys.arbiter();
arb.spawn(async move {
tx.send(42u32).unwrap();
System::current().stop();
});
});
assert_eq!(runner.block_on(rx).unwrap(), 42);
thread.join().unwrap();
}
#[test]
fn system_stop_stops_arbiters() {
let sys = System::new();
let arb = Arbiter::new();
// arbiter should be alive to receive spawn msg
assert!(Arbiter::current().spawn_fn(|| {}));
assert!(arb.spawn_fn(|| {}));
System::current().stop();
sys.run().unwrap();
// account for slightly slow thread de-spawns (only observed on windows)
thread::sleep(Duration::from_millis(100));
// arbiter should be dead and return false
assert!(!Arbiter::current().spawn_fn(|| {}));
assert!(!arb.spawn_fn(|| {}));
arb.join().unwrap();
}
#[test]
fn new_system_with_tokio() {
let (tx, rx) = channel();
let res = System::with_tokio_rt(move || {
tokio::runtime::Builder::new_multi_thread()
.enable_io()
.enable_time()
.thread_keep_alive(Duration::from_millis(1000))
.worker_threads(2)
.max_blocking_threads(2)
.on_thread_start(|| {})
.on_thread_stop(|| {})
.build()
.unwrap()
})
.block_on(async {
actix_rt::time::sleep(Duration::from_millis(1)).await;
tokio::task::spawn(async move {
tx.send(42).unwrap();
})
.await
.unwrap();
123usize
});
assert_eq!(res, 123);
assert_eq!(rx.recv().unwrap(), 42);
}
#[test]
fn new_arbiter_with_tokio() {
let _ = System::new();
let arb = Arbiter::with_tokio_rt(|| {
tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap()
});
let counter = Arc::new(AtomicBool::new(true));
let counter1 = counter.clone();
let did_spawn = arb.spawn(async move {
actix_rt::time::sleep(Duration::from_millis(1)).await;
counter1.store(false, Ordering::SeqCst);
Arbiter::current().stop();
});
assert!(did_spawn);
arb.join().unwrap();
assert_eq!(false, counter.load(Ordering::SeqCst));
}
#[test]
fn try_current_no_system() {
assert!(System::try_current().is_none())
}
#[test]
fn try_current_with_system() {
System::new().block_on(async { assert!(System::try_current().is_some()) });
}

View File

@@ -1,162 +0,0 @@
# Changes
## Unreleased - 2021-xx-xx
## 2.0.0-beta.3 - 2021-02-06
* Hidden `ServerBuilder::start` method has been removed. Use `ServerBuilder::run`. [#246]
* Add retry for EINTR signal (`io::Interrupted`) in `Accept`'s poll loop. [#264]
* Add `ServerBuilder::worker_max_blocking_threads` to customize blocking thread pool size. [#265]
* Update `actix-rt` to `2.0.0`. [#273]
[#246]: https://github.com/actix/actix-net/pull/246
[#264]: https://github.com/actix/actix-net/pull/264
[#265]: https://github.com/actix/actix-net/pull/265
[#273]: https://github.com/actix/actix-net/pull/273
## 2.0.0-beta.2 - 2021-01-03
* Merge `actix-testing` to `actix-server` as `test_server` mod. [#242]
[#242]: https://github.com/actix/actix-net/pull/242
## 2.0.0-beta.1 - 2020-12-28
* Added explicit info log message on accept queue pause. [#215]
* Prevent double registration of sockets when back-pressure is resolved. [#223]
* Update `mio` dependency to `0.7.3`. [#239]
* Remove `socket2` dependency. [#239]
* `ServerBuilder::backlog` now accepts `u32` instead of `i32`. [#239]
* Remove `AcceptNotify` type and pass `WakerQueue` to `Worker` to wake up `Accept`'s `Poll`. [#239]
* Convert `mio::net::TcpStream` to `actix_rt::net::TcpStream`(`UnixStream` for uds) using
`FromRawFd` and `IntoRawFd`(`FromRawSocket` and `IntoRawSocket` on windows). [#239]
* Remove `AsyncRead` and `AsyncWrite` trait bound for `socket::FromStream` trait. [#239]
[#215]: https://github.com/actix/actix-net/pull/215
[#223]: https://github.com/actix/actix-net/pull/223
[#239]: https://github.com/actix/actix-net/pull/239
## 1.0.4 - 2020-09-12
* Update actix-codec to 0.3.0.
* Workers must be greater than 0. [#167]
[#167]: https://github.com/actix/actix-net/pull/167
## 1.0.3 - 2020-05-19
* Replace deprecated `net2` crate with `socket2` [#140]
[#140]: https://github.com/actix/actix-net/pull/140
## 1.0.2 - 2020-02-26
* Avoid error by calling `reregister()` on Windows [#103]
[#103]: https://github.com/actix/actix-net/pull/103
## 1.0.1 - 2019-12-29
* Rename `.start()` method to `.run()`
## 1.0.0 - 2019-12-11
* Use actix-net releases
## 1.0.0-alpha.4 - 2019-12-08
* Use actix-service 1.0.0-alpha.4
## 1.0.0-alpha.3 - 2019-12-07
* Migrate to tokio 0.2
* Fix compilation on non-unix platforms
* Better handling server configuration
## 1.0.0-alpha.2 - 2019-12-02
* Simplify server service (remove actix-server-config)
* Allow to wait on `Server` until server stops
## 0.8.0-alpha.1 - 2019-11-22
* Migrate to `std::future`
## 0.7.0 - 2019-10-04
* Update `rustls` to 0.16
* Minimum required Rust version upped to 1.37.0
## 0.6.1 - 2019-09-25
* Add UDS listening support to `ServerBuilder`
## 0.6.0 - 2019-07-18
* Support Unix domain sockets #3
## 0.5.1 - 2019-05-18
* ServerBuilder::shutdown_timeout() accepts u64
## 0.5.0 - 2019-05-12
* Add `Debug` impl for `SslError`
* Derive debug for `Server` and `ServerCommand`
* Upgrade to actix-service 0.4
## 0.4.3 - 2019-04-16
* Re-export `IoStream` trait
* Depend on `ssl` and `rust-tls` features from actix-server-config
## 0.4.2 - 2019-03-30
* Fix SIGINT force shutdown
## 0.4.1 - 2019-03-14
* `SystemRuntime::on_start()` - allow to run future before server service initialization
## 0.4.0 - 2019-03-12
* Use `ServerConfig` for service factory
* Wrap tcp socket to `Io` type
* Upgrade actix-service
## 0.3.1 - 2019-03-04
* Add `ServerBuilder::maxconnrate` sets the maximum per-worker number of concurrent connections
* Add helper ssl error `SslError`
* Rename `StreamServiceFactory` to `ServiceFactory`
* Deprecate `StreamServiceFactory`
## 0.3.0 - 2019-03-02
* Use new `NewService` trait
## 0.2.1 - 2019-02-09
* Drop service response
## 0.2.0 - 2019-02-01
* Migrate to actix-service 0.2
* Updated rustls dependency
## 0.1.3 - 2018-12-21
* Fix max concurrent connections handling
## 0.1.2 - 2018-12-12
* rename ServiceConfig::rt() to ServiceConfig::apply()
* Fix back-pressure for concurrent ssl handshakes
## 0.1.1 - 2018-12-11
* Fix signal handling on windows
## 0.1.0 - 2018-12-09
* Move server to separate crate

View File

@@ -1,42 +0,0 @@
[package]
name = "actix-server"
version = "2.0.0-beta.3"
authors = [
"Nikolay Kim <fafhrd91@gmail.com>",
"fakeshadow <24548779@qq.com>",
]
description = "General purpose TCP server built for the Actix ecosystem"
keywords = ["network", "framework", "async", "futures"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-server"
categories = ["network-programming", "asynchronous"]
license = "MIT OR Apache-2.0"
edition = "2018"
[lib]
name = "actix_server"
path = "src/lib.rs"
[features]
default = []
[dependencies]
actix-codec = "0.4.0-beta.1"
actix-rt = { version = "2.0.0", default-features = false }
actix-service = "2.0.0-beta.4"
actix-utils = "3.0.0-beta.2"
futures-core = { version = "0.3.7", default-features = false, features = ["alloc"] }
log = "0.4"
mio = { version = "0.7.6", features = ["os-poll", "net"] }
num_cpus = "1.13"
slab = "0.4"
tokio = { version = "1.2", features = ["sync"] }
[dev-dependencies]
actix-rt = "2.0.0"
bytes = "1"
env_logger = "0.8"
futures-util = { version = "0.3.7", default-features = false, features = ["sink"] }
tokio = { version = "1", features = ["io-util"] }

View File

@@ -1 +0,0 @@
../LICENSE-APACHE

View File

@@ -1 +0,0 @@
../LICENSE-MIT

View File

@@ -1,88 +0,0 @@
//! Simple composite-service TCP echo server.
//!
//! Using the following command:
//!
//! ```sh
//! nc 127.0.0.1 8080
//! ```
//!
//! Start typing. When you press enter the typed line will be echoed back. The server will log
//! the length of each line it echos and the total size of data sent when the connection is closed.
use std::sync::{
atomic::{AtomicUsize, Ordering},
Arc,
};
use std::{env, io};
use actix_rt::net::TcpStream;
use actix_server::Server;
use actix_service::pipeline_factory;
use bytes::BytesMut;
use futures_util::future::ok;
use log::{error, info};
use tokio::io::{AsyncReadExt, AsyncWriteExt};
#[actix_rt::main]
async fn main() -> io::Result<()> {
env::set_var("RUST_LOG", "actix=trace,basic=trace");
env_logger::init();
let count = Arc::new(AtomicUsize::new(0));
let addr = ("127.0.0.1", 8080);
info!("starting server on port: {}", &addr.0);
// Bind socket address and start worker(s). By default, the server uses the number of available
// logical CPU cores as the worker count. For this reason, the closure passed to bind needs
// to return a service *factory*; so it can be created once per worker.
Server::build()
.bind("echo", addr, move || {
let count = Arc::clone(&count);
let num2 = Arc::clone(&count);
pipeline_factory(move |mut stream: TcpStream| {
let count = Arc::clone(&count);
async move {
let num = count.fetch_add(1, Ordering::SeqCst);
let num = num + 1;
let mut size = 0;
let mut buf = BytesMut::new();
loop {
match stream.read_buf(&mut buf).await {
// end of stream; bail from loop
Ok(0) => break,
// more bytes to process
Ok(bytes_read) => {
info!("[{}] read {} bytes", num, bytes_read);
stream.write_all(&buf[size..]).await.unwrap();
size += bytes_read;
}
// stream error; bail from loop with error
Err(err) => {
error!("Stream Error: {:?}", err);
return Err(());
}
}
}
// send data down service pipeline
Ok((buf.freeze(), size))
}
})
.map_err(|err| error!("Service Error: {:?}", err))
.and_then(move |(_, size)| {
let num = num2.load(Ordering::SeqCst);
info!("[{}] total bytes read: {}", num, size);
ok(size)
})
})?
.workers(1)
.run()
.await
}

View File

@@ -1,428 +0,0 @@
use std::time::Duration;
use std::{io, thread};
use actix_rt::{
time::{sleep_until, Instant},
System,
};
use log::{error, info};
use mio::{Interest, Poll, Token as MioToken};
use slab::Slab;
use crate::server::Server;
use crate::socket::{MioListener, SocketAddr};
use crate::waker_queue::{WakerInterest, WakerQueue, WAKER_TOKEN};
use crate::worker::{Conn, WorkerHandle};
use crate::Token;
struct ServerSocketInfo {
// addr for socket. mainly used for logging.
addr: SocketAddr,
// be ware this is the crate token for identify socket and should not be confused with
// mio::Token
token: Token,
lst: MioListener,
// timeout is used to mark the deadline when this socket's listener should be registered again
// after an error.
timeout: Option<Instant>,
}
/// Accept loop would live with `ServerBuilder`.
///
/// It's tasked with construct `Poll` instance and `WakerQueue` which would be distributed to
/// `Accept` and `Worker`.
///
/// It would also listen to `ServerCommand` and push interests to `WakerQueue`.
pub(crate) struct AcceptLoop {
srv: Option<Server>,
poll: Option<Poll>,
waker: WakerQueue,
}
impl AcceptLoop {
pub fn new(srv: Server) -> Self {
let poll = Poll::new().unwrap_or_else(|e| panic!("Can not create `mio::Poll`: {}", e));
let waker = WakerQueue::new(poll.registry())
.unwrap_or_else(|e| panic!("Can not create `mio::Waker`: {}", e));
Self {
srv: Some(srv),
poll: Some(poll),
waker,
}
}
pub(crate) fn waker_owned(&self) -> WakerQueue {
self.waker.clone()
}
pub fn wake(&self, i: WakerInterest) {
self.waker.wake(i);
}
pub(crate) fn start(
&mut self,
socks: Vec<(Token, MioListener)>,
handles: Vec<WorkerHandle>,
) {
let srv = self.srv.take().expect("Can not re-use AcceptInfo");
let poll = self.poll.take().unwrap();
let waker = self.waker.clone();
Accept::start(poll, waker, socks, srv, handles);
}
}
/// poll instance of the server.
struct Accept {
poll: Poll,
waker: WakerQueue,
handles: Vec<WorkerHandle>,
srv: Server,
next: usize,
backpressure: bool,
}
/// This function defines errors that are per-connection. Which basically
/// means that if we get this error from `accept()` system call it means
/// next connection might be ready to be accepted.
///
/// All other errors will incur a timeout before next `accept()` is performed.
/// The timeout is useful to handle resource exhaustion errors like ENFILE
/// and EMFILE. Otherwise, could enter into tight loop.
fn connection_error(e: &io::Error) -> bool {
e.kind() == io::ErrorKind::ConnectionRefused
|| e.kind() == io::ErrorKind::ConnectionAborted
|| e.kind() == io::ErrorKind::ConnectionReset
}
impl Accept {
pub(crate) fn start(
poll: Poll,
waker: WakerQueue,
socks: Vec<(Token, MioListener)>,
srv: Server,
handles: Vec<WorkerHandle>,
) {
// Accept runs in its own thread and would want to spawn additional futures to current
// actix system.
let sys = System::current();
thread::Builder::new()
.name("actix-server accept loop".to_owned())
.spawn(move || {
System::set_current(sys);
let (mut accept, sockets) =
Accept::new_with_sockets(poll, waker, socks, handles, srv);
accept.poll_with(sockets);
})
.unwrap();
}
fn new_with_sockets(
poll: Poll,
waker: WakerQueue,
socks: Vec<(Token, MioListener)>,
handles: Vec<WorkerHandle>,
srv: Server,
) -> (Accept, Slab<ServerSocketInfo>) {
let mut sockets = Slab::new();
for (hnd_token, mut lst) in socks.into_iter() {
let addr = lst.local_addr();
let entry = sockets.vacant_entry();
let token = entry.key();
// Start listening for incoming connections
poll.registry()
.register(&mut lst, MioToken(token), Interest::READABLE)
.unwrap_or_else(|e| panic!("Can not register io: {}", e));
entry.insert(ServerSocketInfo {
addr,
token: hnd_token,
lst,
timeout: None,
});
}
let accept = Accept {
poll,
waker,
handles,
srv,
next: 0,
backpressure: false,
};
(accept, sockets)
}
fn poll_with(&mut self, mut sockets: Slab<ServerSocketInfo>) {
let mut events = mio::Events::with_capacity(128);
loop {
if let Err(e) = self.poll.poll(&mut events, None) {
match e.kind() {
std::io::ErrorKind::Interrupted => {
continue;
}
_ => {
panic!("Poll error: {}", e);
}
}
}
for event in events.iter() {
let token = event.token();
match token {
// This is a loop because interests for command from previous version was
// a loop that would try to drain the command channel. It's yet unknown
// if it's necessary/good practice to actively drain the waker queue.
WAKER_TOKEN => 'waker: loop {
// take guard with every iteration so no new interest can be added
// until the current task is done.
let mut guard = self.waker.guard();
match guard.pop_front() {
// worker notify it becomes available. we may want to recover
// from backpressure.
Some(WakerInterest::WorkerAvailable) => {
drop(guard);
self.maybe_backpressure(&mut sockets, false);
}
// a new worker thread is made and it's handle would be added
// to Accept
Some(WakerInterest::Worker(handle)) => {
drop(guard);
// maybe we want to recover from a backpressure.
self.maybe_backpressure(&mut sockets, false);
self.handles.push(handle);
}
// got timer interest and it's time to try register socket(s)
// again.
Some(WakerInterest::Timer) => {
drop(guard);
self.process_timer(&mut sockets)
}
Some(WakerInterest::Pause) => {
drop(guard);
sockets.iter_mut().for_each(|(_, info)| {
match self.deregister(info) {
Ok(_) => info!(
"Paused accepting connections on {}",
info.addr
),
Err(e) => {
error!("Can not deregister server socket {}", e)
}
}
});
}
Some(WakerInterest::Resume) => {
drop(guard);
sockets.iter_mut().for_each(|(token, info)| {
self.register_logged(token, info);
});
}
Some(WakerInterest::Stop) => {
return self.deregister_all(&mut sockets);
}
// waker queue is drained.
None => {
// Reset the WakerQueue before break so it does not grow
// infinitely.
WakerQueue::reset(&mut guard);
break 'waker;
}
}
},
_ => {
let token = usize::from(token);
self.accept(&mut sockets, token);
}
}
}
}
}
fn process_timer(&self, sockets: &mut Slab<ServerSocketInfo>) {
let now = Instant::now();
sockets.iter_mut().for_each(|(token, info)| {
// only the ServerSocketInfo have an associate timeout value was de registered.
if let Some(inst) = info.timeout.take() {
if now > inst {
self.register_logged(token, info);
} else {
info.timeout = Some(inst);
}
}
});
}
#[cfg(not(target_os = "windows"))]
fn register(&self, token: usize, info: &mut ServerSocketInfo) -> io::Result<()> {
self.poll
.registry()
.register(&mut info.lst, MioToken(token), Interest::READABLE)
}
#[cfg(target_os = "windows")]
fn register(&self, token: usize, info: &mut ServerSocketInfo) -> io::Result<()> {
// On windows, calling register without deregister cause an error.
// See https://github.com/actix/actix-web/issues/905
// Calling reregister seems to fix the issue.
self.poll
.registry()
.register(&mut info.lst, mio::Token(token), Interest::READABLE)
.or_else(|_| {
self.poll.registry().reregister(
&mut info.lst,
mio::Token(token),
Interest::READABLE,
)
})
}
fn register_logged(&self, token: usize, info: &mut ServerSocketInfo) {
match self.register(token, info) {
Ok(_) => info!("Resume accepting connections on {}", info.addr),
Err(e) => error!("Can not register server socket {}", e),
}
}
fn deregister(&self, info: &mut ServerSocketInfo) -> io::Result<()> {
self.poll.registry().deregister(&mut info.lst)
}
fn deregister_all(&self, sockets: &mut Slab<ServerSocketInfo>) {
sockets.iter_mut().for_each(|(_, info)| {
info!("Accepting connections on {} has been paused", info.addr);
let _ = self.deregister(info);
});
}
fn maybe_backpressure(&mut self, sockets: &mut Slab<ServerSocketInfo>, on: bool) {
if self.backpressure {
if !on {
self.backpressure = false;
for (token, info) in sockets.iter_mut() {
if info.timeout.is_some() {
// socket will attempt to re-register itself when its timeout completes
continue;
}
self.register_logged(token, info);
}
}
} else if on {
self.backpressure = true;
self.deregister_all(sockets);
}
}
fn accept_one(&mut self, sockets: &mut Slab<ServerSocketInfo>, mut msg: Conn) {
if self.backpressure {
while !self.handles.is_empty() {
match self.handles[self.next].send(msg) {
Ok(_) => {
self.set_next();
break;
}
Err(tmp) => {
// worker lost contact and could be gone. a message is sent to
// `ServerBuilder` future to notify it a new worker should be made.
// after that remove the fault worker.
self.srv.worker_faulted(self.handles[self.next].idx);
msg = tmp;
self.handles.swap_remove(self.next);
if self.handles.is_empty() {
error!("No workers");
return;
} else if self.handles.len() <= self.next {
self.next = 0;
}
continue;
}
}
}
} else {
let mut idx = 0;
while idx < self.handles.len() {
idx += 1;
if self.handles[self.next].available() {
match self.handles[self.next].send(msg) {
Ok(_) => {
self.set_next();
return;
}
// worker lost contact and could be gone. a message is sent to
// `ServerBuilder` future to notify it a new worker should be made.
// after that remove the fault worker and enter backpressure if necessary.
Err(tmp) => {
self.srv.worker_faulted(self.handles[self.next].idx);
msg = tmp;
self.handles.swap_remove(self.next);
if self.handles.is_empty() {
error!("No workers");
self.maybe_backpressure(sockets, true);
return;
} else if self.handles.len() <= self.next {
self.next = 0;
}
continue;
}
}
}
self.set_next();
}
// enable backpressure
self.maybe_backpressure(sockets, true);
self.accept_one(sockets, msg);
}
}
// set next worker handle that would accept work.
fn set_next(&mut self) {
self.next = (self.next + 1) % self.handles.len();
}
fn accept(&mut self, sockets: &mut Slab<ServerSocketInfo>, token: usize) {
loop {
let msg = if let Some(info) = sockets.get_mut(token) {
match info.lst.accept() {
Ok(Some((io, addr))) => Conn {
io,
token: info.token,
peer: Some(addr),
},
Ok(None) => return,
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => return,
Err(ref e) if connection_error(e) => continue,
Err(e) => {
// deregister listener temporary
error!("Error accepting connection: {}", e);
if let Err(err) = self.deregister(info) {
error!("Can not deregister server socket {}", err);
}
// sleep after error. write the timeout to socket info as later the poll
// would need it mark which socket and when it's listener should be
// registered.
info.timeout = Some(Instant::now() + Duration::from_millis(500));
// after the sleep a Timer interest is sent to Accept Poll
let waker = self.waker.clone();
System::current().arbiter().spawn(async move {
sleep_until(Instant::now() + Duration::from_millis(510)).await;
waker.wake(WakerInterest::Timer);
});
return;
}
}
} else {
return;
};
self.accept_one(sockets, msg);
}
}
}

View File

@@ -1,503 +0,0 @@
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::time::Duration;
use std::{io, mem};
use actix_rt::net::TcpStream;
use actix_rt::time::{sleep_until, Instant};
use actix_rt::{self as rt, System};
use log::{error, info};
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver};
use tokio::sync::oneshot;
use crate::accept::AcceptLoop;
use crate::config::{ConfiguredService, ServiceConfig};
use crate::server::{Server, ServerCommand};
use crate::service::{InternalServiceFactory, ServiceFactory, StreamNewService};
use crate::signals::{Signal, Signals};
use crate::socket::{MioListener, StdSocketAddr, StdTcpListener, ToSocketAddrs};
use crate::socket::{MioTcpListener, MioTcpSocket};
use crate::waker_queue::{WakerInterest, WakerQueue};
use crate::worker::{self, ServerWorker, ServerWorkerConfig, WorkerAvailability, WorkerHandle};
use crate::{join_all, Token};
/// Server builder
pub struct ServerBuilder {
threads: usize,
token: Token,
backlog: u32,
handles: Vec<(usize, WorkerHandle)>,
services: Vec<Box<dyn InternalServiceFactory>>,
sockets: Vec<(Token, String, MioListener)>,
accept: AcceptLoop,
exit: bool,
no_signals: bool,
cmd: UnboundedReceiver<ServerCommand>,
server: Server,
notify: Vec<oneshot::Sender<()>>,
worker_config: ServerWorkerConfig,
}
impl Default for ServerBuilder {
fn default() -> Self {
Self::new()
}
}
impl ServerBuilder {
/// Create new Server builder instance
pub fn new() -> ServerBuilder {
let (tx, rx) = unbounded_channel();
let server = Server::new(tx);
ServerBuilder {
threads: num_cpus::get(),
token: Token::default(),
handles: Vec::new(),
services: Vec::new(),
sockets: Vec::new(),
accept: AcceptLoop::new(server.clone()),
backlog: 2048,
exit: false,
no_signals: false,
cmd: rx,
notify: Vec::new(),
server,
worker_config: ServerWorkerConfig::default(),
}
}
/// Set number of workers to start.
///
/// By default server uses number of available logical cpu as workers
/// count. Workers must be greater than 0.
pub fn workers(mut self, num: usize) -> Self {
assert_ne!(num, 0, "workers must be greater than 0");
self.threads = num;
self
}
/// Set max number of threads for each worker's blocking task thread pool.
///
/// One thread pool is set up **per worker**; not shared across workers.
///
/// # Examples:
/// ```
/// # use actix_server::ServerBuilder;
/// let builder = ServerBuilder::new()
/// .workers(4) // server has 4 worker thread.
/// .worker_max_blocking_threads(4); // every worker has 4 max blocking threads.
/// ```
///
/// See [tokio::runtime::Builder::max_blocking_threads] for behavior reference.
pub fn worker_max_blocking_threads(mut self, num: usize) -> Self {
self.worker_config.max_blocking_threads(num);
self
}
/// Set the maximum number of pending connections.
///
/// This refers to the number of clients that can be waiting to be served.
/// Exceeding this number results in the client getting an error when
/// attempting to connect. It should only affect servers under significant
/// load.
///
/// Generally set in the 64-2048 range. Default value is 2048.
///
/// This method should be called before `bind()` method call.
pub fn backlog(mut self, num: u32) -> Self {
self.backlog = num;
self
}
/// Sets the maximum per-worker number of concurrent connections.
///
/// All socket listeners will stop accepting connections when this limit is
/// reached for each worker.
///
/// By default max connections is set to a 25k per worker.
pub fn maxconn(self, num: usize) -> Self {
worker::max_concurrent_connections(num);
self
}
/// Stop actix system.
pub fn system_exit(mut self) -> Self {
self.exit = true;
self
}
/// Disable signal handling
pub fn disable_signals(mut self) -> Self {
self.no_signals = true;
self
}
/// Timeout for graceful workers shutdown in seconds.
///
/// After receiving a stop signal, workers have this much time to finish
/// serving requests. Workers still alive after the timeout are force
/// dropped.
///
/// By default shutdown timeout sets to 30 seconds.
pub fn shutdown_timeout(mut self, sec: u64) -> Self {
self.worker_config
.shutdown_timeout(Duration::from_secs(sec));
self
}
/// Execute external configuration as part of the server building
/// process.
///
/// This function is useful for moving parts of configuration to a
/// different module or even library.
pub fn configure<F>(mut self, f: F) -> io::Result<ServerBuilder>
where
F: Fn(&mut ServiceConfig) -> io::Result<()>,
{
let mut cfg = ServiceConfig::new(self.threads, self.backlog);
f(&mut cfg)?;
if let Some(apply) = cfg.apply {
let mut srv = ConfiguredService::new(apply);
for (name, lst) in cfg.services {
let token = self.token.next();
srv.stream(token, name.clone(), lst.local_addr()?);
self.sockets.push((token, name, MioListener::Tcp(lst)));
}
self.services.push(Box::new(srv));
}
self.threads = cfg.threads;
Ok(self)
}
/// Add new service to the server.
pub fn bind<F, U, N: AsRef<str>>(mut self, name: N, addr: U, factory: F) -> io::Result<Self>
where
F: ServiceFactory<TcpStream>,
U: ToSocketAddrs,
{
let sockets = bind_addr(addr, self.backlog)?;
for lst in sockets {
let token = self.token.next();
self.services.push(StreamNewService::create(
name.as_ref().to_string(),
token,
factory.clone(),
lst.local_addr()?,
));
self.sockets
.push((token, name.as_ref().to_string(), MioListener::Tcp(lst)));
}
Ok(self)
}
/// Add new unix domain service to the server.
#[cfg(unix)]
pub fn bind_uds<F, U, N>(self, name: N, addr: U, factory: F) -> io::Result<Self>
where
F: ServiceFactory<actix_rt::net::UnixStream>,
N: AsRef<str>,
U: AsRef<std::path::Path>,
{
// The path must not exist when we try to bind.
// Try to remove it to avoid bind error.
if let Err(e) = std::fs::remove_file(addr.as_ref()) {
// NotFound is expected and not an issue. Anything else is.
if e.kind() != std::io::ErrorKind::NotFound {
return Err(e);
}
}
let lst = crate::socket::StdUnixListener::bind(addr)?;
self.listen_uds(name, lst, factory)
}
/// Add new unix domain service to the server.
/// Useful when running as a systemd service and
/// a socket FD can be acquired using the systemd crate.
#[cfg(unix)]
pub fn listen_uds<F, N: AsRef<str>>(
mut self,
name: N,
lst: crate::socket::StdUnixListener,
factory: F,
) -> io::Result<Self>
where
F: ServiceFactory<actix_rt::net::UnixStream>,
{
use std::net::{IpAddr, Ipv4Addr};
lst.set_nonblocking(true)?;
let token = self.token.next();
let addr = StdSocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
self.services.push(StreamNewService::create(
name.as_ref().to_string(),
token,
factory,
addr,
));
self.sockets
.push((token, name.as_ref().to_string(), MioListener::from(lst)));
Ok(self)
}
/// Add new service to the server.
pub fn listen<F, N: AsRef<str>>(
mut self,
name: N,
lst: StdTcpListener,
factory: F,
) -> io::Result<Self>
where
F: ServiceFactory<TcpStream>,
{
lst.set_nonblocking(true)?;
let addr = lst.local_addr()?;
let token = self.token.next();
self.services.push(StreamNewService::create(
name.as_ref().to_string(),
token,
factory,
addr,
));
self.sockets
.push((token, name.as_ref().to_string(), MioListener::from(lst)));
Ok(self)
}
/// Starts processing incoming connections and return server controller.
pub fn run(mut self) -> Server {
if self.sockets.is_empty() {
panic!("Server should have at least one bound socket");
} else {
info!("Starting {} workers", self.threads);
// start workers
let handles = (0..self.threads)
.map(|idx| {
let handle = self.start_worker(idx, self.accept.waker_owned());
self.handles.push((idx, handle.clone()));
handle
})
.collect();
// start accept thread
for sock in &self.sockets {
info!("Starting \"{}\" service on {}", sock.1, sock.2);
}
self.accept.start(
mem::take(&mut self.sockets)
.into_iter()
.map(|t| (t.0, t.2))
.collect(),
handles,
);
// handle signals
if !self.no_signals {
Signals::start(self.server.clone());
}
// start http server actor
let server = self.server.clone();
rt::spawn(self);
server
}
}
fn start_worker(&self, idx: usize, waker: WakerQueue) -> WorkerHandle {
let avail = WorkerAvailability::new(waker);
let services = self.services.iter().map(|v| v.clone_factory()).collect();
ServerWorker::start(idx, services, avail, self.worker_config)
}
fn handle_cmd(&mut self, item: ServerCommand) {
match item {
ServerCommand::Pause(tx) => {
self.accept.wake(WakerInterest::Pause);
let _ = tx.send(());
}
ServerCommand::Resume(tx) => {
self.accept.wake(WakerInterest::Resume);
let _ = tx.send(());
}
ServerCommand::Signal(sig) => {
// Signals support
// Handle `SIGINT`, `SIGTERM`, `SIGQUIT` signals and stop actix system
match sig {
Signal::Int => {
info!("SIGINT received, exiting");
self.exit = true;
self.handle_cmd(ServerCommand::Stop {
graceful: false,
completion: None,
})
}
Signal::Term => {
info!("SIGTERM received, stopping");
self.exit = true;
self.handle_cmd(ServerCommand::Stop {
graceful: true,
completion: None,
})
}
Signal::Quit => {
info!("SIGQUIT received, exiting");
self.exit = true;
self.handle_cmd(ServerCommand::Stop {
graceful: false,
completion: None,
})
}
_ => (),
}
}
ServerCommand::Notify(tx) => {
self.notify.push(tx);
}
ServerCommand::Stop {
graceful,
completion,
} => {
let exit = self.exit;
// stop accept thread
self.accept.wake(WakerInterest::Stop);
let notify = std::mem::take(&mut self.notify);
// stop workers
if !self.handles.is_empty() && graceful {
let iter = self
.handles
.iter()
.map(move |worker| worker.1.stop(graceful))
.collect();
let fut = join_all(iter);
rt::spawn(async move {
let _ = fut.await;
if let Some(tx) = completion {
let _ = tx.send(());
}
for tx in notify {
let _ = tx.send(());
}
if exit {
rt::spawn(async {
sleep_until(Instant::now() + Duration::from_millis(300)).await;
System::current().stop();
});
}
});
} else {
// we need to stop system if server was spawned
if self.exit {
rt::spawn(async {
sleep_until(Instant::now() + Duration::from_millis(300)).await;
System::current().stop();
});
}
if let Some(tx) = completion {
let _ = tx.send(());
}
for tx in notify {
let _ = tx.send(());
}
}
}
ServerCommand::WorkerFaulted(idx) => {
let mut found = false;
for i in 0..self.handles.len() {
if self.handles[i].0 == idx {
self.handles.swap_remove(i);
found = true;
break;
}
}
if found {
error!("Worker has died {:?}, restarting", idx);
let mut new_idx = self.handles.len();
'found: loop {
for i in 0..self.handles.len() {
if self.handles[i].0 == new_idx {
new_idx += 1;
continue 'found;
}
}
break;
}
let handle = self.start_worker(new_idx, self.accept.waker_owned());
self.handles.push((new_idx, handle.clone()));
self.accept.wake(WakerInterest::Worker(handle));
}
}
}
}
}
impl Future for ServerBuilder {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
loop {
match Pin::new(&mut self.cmd).poll_recv(cx) {
Poll::Ready(Some(it)) => self.as_mut().get_mut().handle_cmd(it),
_ => return Poll::Pending,
}
}
}
}
pub(super) fn bind_addr<S: ToSocketAddrs>(
addr: S,
backlog: u32,
) -> io::Result<Vec<MioTcpListener>> {
let mut err = None;
let mut succ = false;
let mut sockets = Vec::new();
for addr in addr.to_socket_addrs()? {
match create_tcp_listener(addr, backlog) {
Ok(lst) => {
succ = true;
sockets.push(lst);
}
Err(e) => err = Some(e),
}
}
if !succ {
if let Some(e) = err.take() {
Err(e)
} else {
Err(io::Error::new(
io::ErrorKind::Other,
"Can not bind to address.",
))
}
} else {
Ok(sockets)
}
}
fn create_tcp_listener(addr: StdSocketAddr, backlog: u32) -> io::Result<MioTcpListener> {
let socket = match addr {
StdSocketAddr::V4(_) => MioTcpSocket::new_v4()?,
StdSocketAddr::V6(_) => MioTcpSocket::new_v6()?,
};
socket.set_reuseaddr(true)?;
socket.bind(addr)?;
socket.listen(backlog)
}

View File

@@ -1,287 +0,0 @@
use std::collections::HashMap;
use std::future::Future;
use std::{fmt, io};
use actix_rt::net::TcpStream;
use actix_service::{
fn_service, IntoServiceFactory as IntoBaseServiceFactory,
ServiceFactory as BaseServiceFactory,
};
use actix_utils::counter::CounterGuard;
use futures_core::future::LocalBoxFuture;
use log::error;
use crate::builder::bind_addr;
use crate::service::{BoxedServerService, InternalServiceFactory, StreamService};
use crate::socket::{MioStream, MioTcpListener, StdSocketAddr, StdTcpListener, ToSocketAddrs};
use crate::{ready, Token};
pub struct ServiceConfig {
pub(crate) services: Vec<(String, MioTcpListener)>,
pub(crate) apply: Option<Box<dyn ServiceRuntimeConfiguration>>,
pub(crate) threads: usize,
pub(crate) backlog: u32,
}
impl ServiceConfig {
pub(super) fn new(threads: usize, backlog: u32) -> ServiceConfig {
ServiceConfig {
threads,
backlog,
services: Vec::new(),
apply: None,
}
}
/// Set number of workers to start.
///
/// By default server uses number of available logical cpu as workers
/// count.
pub fn workers(&mut self, num: usize) {
self.threads = num;
}
/// Add new service to server
pub fn bind<U, N: AsRef<str>>(&mut self, name: N, addr: U) -> io::Result<&mut Self>
where
U: ToSocketAddrs,
{
let sockets = bind_addr(addr, self.backlog)?;
for lst in sockets {
self._listen(name.as_ref(), lst);
}
Ok(self)
}
/// Add new service to server
pub fn listen<N: AsRef<str>>(&mut self, name: N, lst: StdTcpListener) -> &mut Self {
self._listen(name, MioTcpListener::from_std(lst))
}
/// Register service configuration function. This function get called
/// during worker runtime configuration. It get executed in worker thread.
pub fn apply<F>(&mut self, f: F) -> io::Result<()>
where
F: Fn(&mut ServiceRuntime) + Send + Clone + 'static,
{
self.apply = Some(Box::new(f));
Ok(())
}
fn _listen<N: AsRef<str>>(&mut self, name: N, lst: MioTcpListener) -> &mut Self {
if self.apply.is_none() {
self.apply = Some(Box::new(not_configured));
}
self.services.push((name.as_ref().to_string(), lst));
self
}
}
pub(super) struct ConfiguredService {
rt: Box<dyn ServiceRuntimeConfiguration>,
names: HashMap<Token, (String, StdSocketAddr)>,
topics: HashMap<String, Token>,
services: Vec<Token>,
}
impl ConfiguredService {
pub(super) fn new(rt: Box<dyn ServiceRuntimeConfiguration>) -> Self {
ConfiguredService {
rt,
names: HashMap::new(),
topics: HashMap::new(),
services: Vec::new(),
}
}
pub(super) fn stream(&mut self, token: Token, name: String, addr: StdSocketAddr) {
self.names.insert(token, (name.clone(), addr));
self.topics.insert(name, token);
self.services.push(token);
}
}
impl InternalServiceFactory for ConfiguredService {
fn name(&self, token: Token) -> &str {
&self.names[&token].0
}
fn clone_factory(&self) -> Box<dyn InternalServiceFactory> {
Box::new(Self {
rt: self.rt.clone(),
names: self.names.clone(),
topics: self.topics.clone(),
services: self.services.clone(),
})
}
fn create(&self) -> LocalBoxFuture<'static, Result<Vec<(Token, BoxedServerService)>, ()>> {
// configure services
let mut rt = ServiceRuntime::new(self.topics.clone());
self.rt.configure(&mut rt);
rt.validate();
let mut names = self.names.clone();
let tokens = self.services.clone();
// construct services
Box::pin(async move {
let mut services = rt.services;
// TODO: Proper error handling here
for f in rt.onstart.into_iter() {
f.await;
}
let mut res = vec![];
for token in tokens {
if let Some(srv) = services.remove(&token) {
let newserv = srv.new_service(());
match newserv.await {
Ok(serv) => {
res.push((token, serv));
}
Err(_) => {
error!("Can not construct service");
return Err(());
}
}
} else {
let name = names.remove(&token).unwrap().0;
res.push((
token,
Box::new(StreamService::new(fn_service(move |_: TcpStream| {
error!("Service {:?} is not configured", name);
ready::<Result<_, ()>>(Ok(()))
}))),
));
};
}
Ok(res)
})
}
}
pub(super) trait ServiceRuntimeConfiguration: Send {
fn clone(&self) -> Box<dyn ServiceRuntimeConfiguration>;
fn configure(&self, rt: &mut ServiceRuntime);
}
impl<F> ServiceRuntimeConfiguration for F
where
F: Fn(&mut ServiceRuntime) + Send + Clone + 'static,
{
fn clone(&self) -> Box<dyn ServiceRuntimeConfiguration> {
Box::new(self.clone())
}
fn configure(&self, rt: &mut ServiceRuntime) {
(self)(rt)
}
}
fn not_configured(_: &mut ServiceRuntime) {
error!("Service is not configured");
}
pub struct ServiceRuntime {
names: HashMap<String, Token>,
services: HashMap<Token, BoxedNewService>,
onstart: Vec<LocalBoxFuture<'static, ()>>,
}
impl ServiceRuntime {
fn new(names: HashMap<String, Token>) -> Self {
ServiceRuntime {
names,
services: HashMap::new(),
onstart: Vec::new(),
}
}
fn validate(&self) {
for (name, token) in &self.names {
if !self.services.contains_key(&token) {
error!("Service {:?} is not configured", name);
}
}
}
/// Register service.
///
/// Name of the service must be registered during configuration stage with
/// *ServiceConfig::bind()* or *ServiceConfig::listen()* methods.
pub fn service<T, F>(&mut self, name: &str, service: F)
where
F: IntoBaseServiceFactory<T, TcpStream>,
T: BaseServiceFactory<TcpStream, Config = ()> + 'static,
T::Future: 'static,
T::Service: 'static,
T::InitError: fmt::Debug,
{
// let name = name.to_owned();
if let Some(token) = self.names.get(name) {
self.services.insert(
*token,
Box::new(ServiceFactory {
inner: service.into_factory(),
}),
);
} else {
panic!("Unknown service: {:?}", name);
}
}
/// Execute future before services initialization.
pub fn on_start<F>(&mut self, fut: F)
where
F: Future<Output = ()> + 'static,
{
self.onstart.push(Box::pin(fut))
}
}
type BoxedNewService = Box<
dyn BaseServiceFactory<
(Option<CounterGuard>, MioStream),
Response = (),
Error = (),
InitError = (),
Config = (),
Service = BoxedServerService,
Future = LocalBoxFuture<'static, Result<BoxedServerService, ()>>,
>,
>;
struct ServiceFactory<T> {
inner: T,
}
impl<T> BaseServiceFactory<(Option<CounterGuard>, MioStream)> for ServiceFactory<T>
where
T: BaseServiceFactory<TcpStream, Config = ()>,
T::Future: 'static,
T::Service: 'static,
T::Error: 'static,
T::InitError: fmt::Debug + 'static,
{
type Response = ();
type Error = ();
type Config = ();
type Service = BoxedServerService;
type InitError = ();
type Future = LocalBoxFuture<'static, Result<BoxedServerService, ()>>;
fn new_service(&self, _: ()) -> Self::Future {
let fut = self.inner.new_service(());
Box::pin(async move {
match fut.await {
Ok(s) => Ok(Box::new(StreamService::new(s)) as BoxedServerService),
Err(e) => {
error!("Can not construct service: {:?}", e);
Err(())
}
}
})
}
}

View File

@@ -1,143 +0,0 @@
//! General purpose TCP server.
#![deny(rust_2018_idioms, nonstandard_style)]
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
mod accept;
mod builder;
mod config;
mod server;
mod service;
mod signals;
mod socket;
mod test_server;
mod waker_queue;
mod worker;
pub use self::builder::ServerBuilder;
pub use self::config::{ServiceConfig, ServiceRuntime};
pub use self::server::Server;
pub use self::service::ServiceFactory;
pub use self::test_server::TestServer;
#[doc(hidden)]
pub use self::socket::FromStream;
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
/// Socket ID token
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub(crate) struct Token(usize);
impl Default for Token {
fn default() -> Self {
Self::new()
}
}
impl Token {
fn new() -> Self {
Self(0)
}
pub(crate) fn next(&mut self) -> Token {
let token = Token(self.0);
self.0 += 1;
token
}
}
/// Start server building process
pub fn new() -> ServerBuilder {
ServerBuilder::default()
}
// temporary Ready type for std::future::{ready, Ready}; Can be removed when MSRV surpass 1.48
#[doc(hidden)]
pub struct Ready<T>(Option<T>);
pub(crate) fn ready<T>(t: T) -> Ready<T> {
Ready(Some(t))
}
impl<T> Unpin for Ready<T> {}
impl<T> Future for Ready<T> {
type Output = T;
fn poll(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Self::Output> {
Poll::Ready(self.get_mut().0.take().unwrap())
}
}
// a poor man's join future. joined future is only used when starting/stopping the server.
// pin_project and pinned futures are overkill for this task.
pub(crate) struct JoinAll<T> {
fut: Vec<JoinFuture<T>>,
}
pub(crate) fn join_all<T>(fut: Vec<impl Future<Output = T> + 'static>) -> JoinAll<T> {
let fut = fut
.into_iter()
.map(|f| JoinFuture::Future(Box::pin(f)))
.collect();
JoinAll { fut }
}
enum JoinFuture<T> {
Future(Pin<Box<dyn Future<Output = T>>>),
Result(Option<T>),
}
impl<T> Unpin for JoinAll<T> {}
impl<T> Future for JoinAll<T> {
type Output = Vec<T>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut ready = true;
let this = self.get_mut();
for fut in this.fut.iter_mut() {
if let JoinFuture::Future(f) = fut {
match f.as_mut().poll(cx) {
Poll::Ready(t) => {
*fut = JoinFuture::Result(Some(t));
}
Poll::Pending => ready = false,
}
}
}
if ready {
let mut res = Vec::new();
for fut in this.fut.iter_mut() {
if let JoinFuture::Result(f) = fut {
res.push(f.take().unwrap());
}
}
Poll::Ready(res)
} else {
Poll::Pending
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[actix_rt::test]
async fn test_join_all() {
let futs = vec![ready(Ok(1)), ready(Err(3)), ready(Ok(9))];
let mut res = join_all(futs).await.into_iter();
assert_eq!(Ok(1), res.next().unwrap());
assert_eq!(Err(3), res.next().unwrap());
assert_eq!(Ok(9), res.next().unwrap());
}
}

View File

@@ -1,112 +0,0 @@
use std::future::Future;
use std::io;
use std::pin::Pin;
use std::task::{Context, Poll};
use tokio::sync::mpsc::UnboundedSender;
use tokio::sync::oneshot;
use crate::builder::ServerBuilder;
use crate::signals::Signal;
#[derive(Debug)]
pub(crate) enum ServerCommand {
WorkerFaulted(usize),
Pause(oneshot::Sender<()>),
Resume(oneshot::Sender<()>),
Signal(Signal),
/// Whether to try and shut down gracefully
Stop {
graceful: bool,
completion: Option<oneshot::Sender<()>>,
},
/// Notify of server stop
Notify(oneshot::Sender<()>),
}
#[derive(Debug)]
pub struct Server(
UnboundedSender<ServerCommand>,
Option<oneshot::Receiver<()>>,
);
impl Server {
pub(crate) fn new(tx: UnboundedSender<ServerCommand>) -> Self {
Server(tx, None)
}
/// Start server building process
pub fn build() -> ServerBuilder {
ServerBuilder::default()
}
pub(crate) fn signal(&self, sig: Signal) {
let _ = self.0.send(ServerCommand::Signal(sig));
}
pub(crate) fn worker_faulted(&self, idx: usize) {
let _ = self.0.send(ServerCommand::WorkerFaulted(idx));
}
/// Pause accepting incoming connections
///
/// If socket contains some pending connection, they might be dropped.
/// All opened connection remains active.
pub fn pause(&self) -> impl Future<Output = ()> {
let (tx, rx) = oneshot::channel();
let _ = self.0.send(ServerCommand::Pause(tx));
async {
let _ = rx.await;
}
}
/// Resume accepting incoming connections
pub fn resume(&self) -> impl Future<Output = ()> {
let (tx, rx) = oneshot::channel();
let _ = self.0.send(ServerCommand::Resume(tx));
async {
let _ = rx.await;
}
}
/// Stop incoming connection processing, stop all workers and exit.
///
/// If server starts with `spawn()` method, then spawned thread get terminated.
pub fn stop(&self, graceful: bool) -> impl Future<Output = ()> {
let (tx, rx) = oneshot::channel();
let _ = self.0.send(ServerCommand::Stop {
graceful,
completion: Some(tx),
});
async {
let _ = rx.await;
}
}
}
impl Clone for Server {
fn clone(&self) -> Self {
Self(self.0.clone(), None)
}
}
impl Future for Server {
type Output = io::Result<()>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.get_mut();
if this.1.is_none() {
let (tx, rx) = oneshot::channel();
if this.0.send(ServerCommand::Notify(tx)).is_err() {
return Poll::Ready(Ok(()));
}
this.1 = Some(rx);
}
match Pin::new(this.1.as_mut().unwrap()).poll(cx) {
Poll::Pending => Poll::Pending,
Poll::Ready(_) => Poll::Ready(Ok(())),
}
}
}

View File

@@ -1,157 +0,0 @@
use std::marker::PhantomData;
use std::net::SocketAddr;
use std::task::{Context, Poll};
use actix_service::{Service, ServiceFactory as BaseServiceFactory};
use actix_utils::counter::CounterGuard;
use futures_core::future::LocalBoxFuture;
use log::error;
use crate::socket::{FromStream, MioStream};
use crate::{ready, Ready, Token};
pub trait ServiceFactory<Stream: FromStream>: Send + Clone + 'static {
type Factory: BaseServiceFactory<Stream, Config = ()>;
fn create(&self) -> Self::Factory;
}
pub(crate) trait InternalServiceFactory: Send {
fn name(&self, token: Token) -> &str;
fn clone_factory(&self) -> Box<dyn InternalServiceFactory>;
fn create(&self) -> LocalBoxFuture<'static, Result<Vec<(Token, BoxedServerService)>, ()>>;
}
pub(crate) type BoxedServerService = Box<
dyn Service<
(Option<CounterGuard>, MioStream),
Response = (),
Error = (),
Future = Ready<Result<(), ()>>,
>,
>;
pub(crate) struct StreamService<S, I> {
service: S,
_phantom: PhantomData<I>,
}
impl<S, I> StreamService<S, I> {
pub(crate) fn new(service: S) -> Self {
StreamService {
service,
_phantom: PhantomData,
}
}
}
impl<S, I> Service<(Option<CounterGuard>, MioStream)> for StreamService<S, I>
where
S: Service<I>,
S::Future: 'static,
S::Error: 'static,
I: FromStream,
{
type Response = ();
type Error = ();
type Future = Ready<Result<(), ()>>;
fn poll_ready(&self, ctx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.service.poll_ready(ctx).map_err(|_| ())
}
fn call(&self, (guard, req): (Option<CounterGuard>, MioStream)) -> Self::Future {
ready(match FromStream::from_mio(req) {
Ok(stream) => {
let f = self.service.call(stream);
actix_rt::spawn(async move {
let _ = f.await;
drop(guard);
});
Ok(())
}
Err(e) => {
error!("Can not convert to an async tcp stream: {}", e);
Err(())
}
})
}
}
pub(crate) struct StreamNewService<F: ServiceFactory<Io>, Io: FromStream> {
name: String,
inner: F,
token: Token,
addr: SocketAddr,
_t: PhantomData<Io>,
}
impl<F, Io> StreamNewService<F, Io>
where
F: ServiceFactory<Io>,
Io: FromStream + Send + 'static,
{
pub(crate) fn create(
name: String,
token: Token,
inner: F,
addr: SocketAddr,
) -> Box<dyn InternalServiceFactory> {
Box::new(Self {
name,
token,
inner,
addr,
_t: PhantomData,
})
}
}
impl<F, Io> InternalServiceFactory for StreamNewService<F, Io>
where
F: ServiceFactory<Io>,
Io: FromStream + Send + 'static,
{
fn name(&self, _: Token) -> &str {
&self.name
}
fn clone_factory(&self) -> Box<dyn InternalServiceFactory> {
Box::new(Self {
name: self.name.clone(),
inner: self.inner.clone(),
token: self.token,
addr: self.addr,
_t: PhantomData,
})
}
fn create(&self) -> LocalBoxFuture<'static, Result<Vec<(Token, BoxedServerService)>, ()>> {
let token = self.token;
let fut = self.inner.create().new_service(());
Box::pin(async move {
match fut.await {
Ok(inner) => {
let service = Box::new(StreamService::new(inner)) as _;
Ok(vec![(token, service)])
}
Err(_) => Err(()),
}
})
}
}
impl<F, T, I> ServiceFactory<I> for F
where
F: Fn() -> T + Send + Clone + 'static,
T: BaseServiceFactory<I, Config = ()>,
I: FromStream,
{
type Factory = T;
fn create(&self) -> T {
(self)()
}
}

View File

@@ -1,94 +0,0 @@
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use crate::server::Server;
/// Different types of process signals
#[allow(dead_code)]
#[derive(PartialEq, Clone, Copy, Debug)]
pub(crate) enum Signal {
/// SIGHUP
Hup,
/// SIGINT
Int,
/// SIGTERM
Term,
/// SIGQUIT
Quit,
}
pub(crate) struct Signals {
srv: Server,
#[cfg(not(unix))]
signals: futures_core::future::LocalBoxFuture<'static, std::io::Result<()>>,
#[cfg(unix)]
signals: Vec<(Signal, actix_rt::signal::unix::Signal)>,
}
impl Signals {
pub(crate) fn start(srv: Server) {
#[cfg(not(unix))]
{
actix_rt::spawn(Signals {
srv,
signals: Box::pin(actix_rt::signal::ctrl_c()),
});
}
#[cfg(unix)]
{
use actix_rt::signal::unix;
let sig_map = [
(unix::SignalKind::interrupt(), Signal::Int),
(unix::SignalKind::hangup(), Signal::Hup),
(unix::SignalKind::terminate(), Signal::Term),
(unix::SignalKind::quit(), Signal::Quit),
];
let signals = sig_map
.iter()
.filter_map(|(kind, sig)| {
unix::signal(*kind)
.map(|tokio_sig| (*sig, tokio_sig))
.map_err(|e| {
log::error!(
"Can not initialize stream handler for {:?} err: {}",
sig,
e
)
})
.ok()
})
.collect::<Vec<_>>();
actix_rt::spawn(Signals { srv, signals });
}
}
}
impl Future for Signals {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
#[cfg(not(unix))]
match self.signals.as_mut().poll(cx) {
Poll::Ready(_) => {
self.srv.signal(Signal::Int);
Poll::Ready(())
}
Poll::Pending => Poll::Pending,
}
#[cfg(unix)]
{
for (sig, fut) in self.signals.iter_mut() {
if Pin::new(fut).poll_recv(cx).is_ready() {
let sig = *sig;
self.srv.signal(sig);
return Poll::Ready(());
}
}
Poll::Pending
}
}
}

View File

@@ -1,254 +0,0 @@
pub(crate) use std::net::{
SocketAddr as StdSocketAddr, TcpListener as StdTcpListener, ToSocketAddrs,
};
pub(crate) use mio::net::{TcpListener as MioTcpListener, TcpSocket as MioTcpSocket};
#[cfg(unix)]
pub(crate) use {
mio::net::UnixListener as MioUnixListener,
std::os::unix::net::UnixListener as StdUnixListener,
};
use std::{fmt, io};
use actix_rt::net::TcpStream;
use mio::event::Source;
use mio::net::TcpStream as MioTcpStream;
use mio::{Interest, Registry, Token};
#[cfg(windows)]
use std::os::windows::io::{FromRawSocket, IntoRawSocket};
#[cfg(unix)]
use {
actix_rt::net::UnixStream,
mio::net::{SocketAddr as MioSocketAddr, UnixStream as MioUnixStream},
std::os::unix::io::{FromRawFd, IntoRawFd},
};
pub(crate) enum MioListener {
Tcp(MioTcpListener),
#[cfg(unix)]
Uds(MioUnixListener),
}
impl MioListener {
pub(crate) fn local_addr(&self) -> SocketAddr {
match *self {
MioListener::Tcp(ref lst) => SocketAddr::Tcp(lst.local_addr().unwrap()),
#[cfg(unix)]
MioListener::Uds(ref lst) => SocketAddr::Uds(lst.local_addr().unwrap()),
}
}
pub(crate) fn accept(&self) -> io::Result<Option<(MioStream, SocketAddr)>> {
match *self {
MioListener::Tcp(ref lst) => lst
.accept()
.map(|(stream, addr)| Some((MioStream::Tcp(stream), SocketAddr::Tcp(addr)))),
#[cfg(unix)]
MioListener::Uds(ref lst) => lst
.accept()
.map(|(stream, addr)| Some((MioStream::Uds(stream), SocketAddr::Uds(addr)))),
}
}
}
impl Source for MioListener {
fn register(
&mut self,
registry: &Registry,
token: Token,
interests: Interest,
) -> io::Result<()> {
match *self {
MioListener::Tcp(ref mut lst) => lst.register(registry, token, interests),
#[cfg(unix)]
MioListener::Uds(ref mut lst) => lst.register(registry, token, interests),
}
}
fn reregister(
&mut self,
registry: &Registry,
token: Token,
interests: Interest,
) -> io::Result<()> {
match *self {
MioListener::Tcp(ref mut lst) => lst.reregister(registry, token, interests),
#[cfg(unix)]
MioListener::Uds(ref mut lst) => lst.reregister(registry, token, interests),
}
}
fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
match *self {
MioListener::Tcp(ref mut lst) => lst.deregister(registry),
#[cfg(unix)]
MioListener::Uds(ref mut lst) => {
let res = lst.deregister(registry);
// cleanup file path
if let Ok(addr) = lst.local_addr() {
if let Some(path) = addr.as_pathname() {
let _ = std::fs::remove_file(path);
}
}
res
}
}
}
}
impl From<StdTcpListener> for MioListener {
fn from(lst: StdTcpListener) -> Self {
MioListener::Tcp(MioTcpListener::from_std(lst))
}
}
#[cfg(unix)]
impl From<StdUnixListener> for MioListener {
fn from(lst: StdUnixListener) -> Self {
MioListener::Uds(MioUnixListener::from_std(lst))
}
}
impl fmt::Debug for MioListener {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
MioListener::Tcp(ref lst) => write!(f, "{:?}", lst),
#[cfg(all(unix))]
MioListener::Uds(ref lst) => write!(f, "{:?}", lst),
}
}
}
impl fmt::Display for MioListener {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
MioListener::Tcp(ref lst) => write!(f, "{}", lst.local_addr().ok().unwrap()),
#[cfg(unix)]
MioListener::Uds(ref lst) => write!(f, "{:?}", lst.local_addr().ok().unwrap()),
}
}
}
pub(crate) enum SocketAddr {
Tcp(StdSocketAddr),
#[cfg(unix)]
Uds(MioSocketAddr),
}
impl fmt::Display for SocketAddr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
SocketAddr::Tcp(ref addr) => write!(f, "{}", addr),
#[cfg(unix)]
SocketAddr::Uds(ref addr) => write!(f, "{:?}", addr),
}
}
}
impl fmt::Debug for SocketAddr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
SocketAddr::Tcp(ref addr) => write!(f, "{:?}", addr),
#[cfg(unix)]
SocketAddr::Uds(ref addr) => write!(f, "{:?}", addr),
}
}
}
#[derive(Debug)]
pub enum MioStream {
Tcp(MioTcpStream),
#[cfg(unix)]
Uds(MioUnixStream),
}
/// helper trait for converting mio stream to tokio stream.
pub trait FromStream: Sized {
fn from_mio(sock: MioStream) -> io::Result<Self>;
}
// FIXME: This is a workaround and we need an efficient way to convert between mio and tokio stream
#[cfg(unix)]
impl FromStream for TcpStream {
fn from_mio(sock: MioStream) -> io::Result<Self> {
match sock {
MioStream::Tcp(mio) => {
let raw = IntoRawFd::into_raw_fd(mio);
// SAFETY: This is a in place conversion from mio stream to tokio stream.
TcpStream::from_std(unsafe { FromRawFd::from_raw_fd(raw) })
}
MioStream::Uds(_) => {
panic!("Should not happen, bug in server impl");
}
}
}
}
// FIXME: This is a workaround and we need an efficient way to convert between mio and tokio stream
#[cfg(windows)]
impl FromStream for TcpStream {
fn from_mio(sock: MioStream) -> io::Result<Self> {
match sock {
MioStream::Tcp(mio) => {
let raw = IntoRawSocket::into_raw_socket(mio);
// SAFETY: This is a in place conversion from mio stream to tokio stream.
TcpStream::from_std(unsafe { FromRawSocket::from_raw_socket(raw) })
}
}
}
}
// FIXME: This is a workaround and we need an efficient way to convert between mio and tokio stream
#[cfg(unix)]
impl FromStream for UnixStream {
fn from_mio(sock: MioStream) -> io::Result<Self> {
match sock {
MioStream::Tcp(_) => panic!("Should not happen, bug in server impl"),
MioStream::Uds(mio) => {
let raw = IntoRawFd::into_raw_fd(mio);
// SAFETY: This is a in place conversion from mio stream to tokio stream.
UnixStream::from_std(unsafe { FromRawFd::from_raw_fd(raw) })
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn socket_addr() {
let addr = SocketAddr::Tcp("127.0.0.1:8080".parse().unwrap());
assert!(format!("{:?}", addr).contains("127.0.0.1:8080"));
assert_eq!(format!("{}", addr), "127.0.0.1:8080");
let addr: StdSocketAddr = "127.0.0.1:0".parse().unwrap();
let socket = MioTcpSocket::new_v4().unwrap();
socket.set_reuseaddr(true).unwrap();
socket.bind(addr).unwrap();
let tcp = socket.listen(128).unwrap();
let lst = MioListener::Tcp(tcp);
assert!(format!("{:?}", lst).contains("TcpListener"));
assert!(format!("{}", lst).contains("127.0.0.1"));
}
#[test]
#[cfg(unix)]
fn uds() {
let _ = std::fs::remove_file("/tmp/sock.xxxxx");
if let Ok(socket) = MioUnixListener::bind("/tmp/sock.xxxxx") {
let addr = socket.local_addr().expect("Couldn't get local address");
let a = SocketAddr::Uds(addr);
assert!(format!("{:?}", a).contains("/tmp/sock.xxxxx"));
assert!(format!("{}", a).contains("/tmp/sock.xxxxx"));
let lst = MioListener::Uds(socket);
assert!(format!("{:?}", lst).contains("/tmp/sock.xxxxx"));
assert!(format!("{}", lst).contains("/tmp/sock.xxxxx"));
}
}
}

View File

@@ -1,144 +0,0 @@
use std::sync::mpsc;
use std::{net, thread};
use actix_rt::{net::TcpStream, System};
use crate::{Server, ServerBuilder, ServiceFactory};
/// The `TestServer` type.
///
/// `TestServer` is very simple test server that simplify process of writing
/// integration tests for actix-net applications.
///
/// # Examples
///
/// ```
/// use actix_service::fn_service;
/// use actix_server::TestServer;
///
/// #[actix_rt::main]
/// async fn main() {
/// let srv = TestServer::with(|| fn_service(
/// |sock| async move {
/// println!("New connection: {:?}", sock);
/// Ok::<_, ()>(())
/// }
/// ));
///
/// println!("SOCKET: {:?}", srv.connect());
/// }
/// ```
pub struct TestServer;
/// Test server runtime
pub struct TestServerRuntime {
addr: net::SocketAddr,
host: String,
port: u16,
system: System,
}
impl TestServer {
/// Start new server with server builder
pub fn start<F>(mut factory: F) -> TestServerRuntime
where
F: FnMut(ServerBuilder) -> ServerBuilder + Send + 'static,
{
let (tx, rx) = mpsc::channel();
// run server in separate thread
thread::spawn(move || {
let sys = System::new();
factory(Server::build()).workers(1).disable_signals().run();
tx.send(System::current()).unwrap();
sys.run()
});
let system = rx.recv().unwrap();
TestServerRuntime {
system,
addr: "127.0.0.1:0".parse().unwrap(),
host: "127.0.0.1".to_string(),
port: 0,
}
}
/// Start new test server with application factory
pub fn with<F: ServiceFactory<TcpStream>>(factory: F) -> TestServerRuntime {
let (tx, rx) = mpsc::channel();
// run server in separate thread
thread::spawn(move || {
let sys = System::new();
let tcp = net::TcpListener::bind("127.0.0.1:0").unwrap();
let local_addr = tcp.local_addr().unwrap();
sys.block_on(async {
Server::build()
.listen("test", tcp, factory)
.unwrap()
.workers(1)
.disable_signals()
.run();
tx.send((System::current(), local_addr)).unwrap();
});
sys.run()
});
let (system, addr) = rx.recv().unwrap();
let host = format!("{}", addr.ip());
let port = addr.port();
TestServerRuntime {
system,
addr,
host,
port,
}
}
/// Get first available unused local address
pub fn unused_addr() -> net::SocketAddr {
let addr: net::SocketAddr = "127.0.0.1:0".parse().unwrap();
let socket = mio::net::TcpSocket::new_v4().unwrap();
socket.bind(addr).unwrap();
socket.set_reuseaddr(true).unwrap();
let tcp = socket.listen(1024).unwrap();
tcp.local_addr().unwrap()
}
}
impl TestServerRuntime {
/// Test server host
pub fn host(&self) -> &str {
&self.host
}
/// Test server port
pub fn port(&self) -> u16 {
self.port
}
/// Get test server address
pub fn addr(&self) -> net::SocketAddr {
self.addr
}
/// Stop http server
fn stop(&mut self) {
self.system.stop();
}
/// Connect to server, return tokio TcpStream
pub fn connect(&self) -> std::io::Result<TcpStream> {
TcpStream::from_std(net::TcpStream::connect(self.addr)?)
}
}
impl Drop for TestServerRuntime {
fn drop(&mut self) {
self.stop()
}
}

View File

@@ -1,89 +0,0 @@
use std::{
collections::VecDeque,
ops::Deref,
sync::{Arc, Mutex, MutexGuard},
};
use mio::{Registry, Token as MioToken, Waker};
use crate::worker::WorkerHandle;
/// Waker token for `mio::Poll` instance.
pub(crate) const WAKER_TOKEN: MioToken = MioToken(usize::MAX);
/// `mio::Waker` with a queue for waking up the `Accept`'s `Poll` and contains the `WakerInterest`
/// the `Poll` would want to look into.
pub(crate) struct WakerQueue(Arc<(Waker, Mutex<VecDeque<WakerInterest>>)>);
impl Clone for WakerQueue {
fn clone(&self) -> Self {
Self(self.0.clone())
}
}
impl Deref for WakerQueue {
type Target = (Waker, Mutex<VecDeque<WakerInterest>>);
fn deref(&self) -> &Self::Target {
self.0.deref()
}
}
impl WakerQueue {
/// Construct a waker queue with given `Poll`'s `Registry` and capacity.
///
/// A fixed `WAKER_TOKEN` is used to identify the wake interest and the `Poll` needs to match
/// event's token for it to properly handle `WakerInterest`.
pub(crate) fn new(registry: &Registry) -> std::io::Result<Self> {
let waker = Waker::new(registry, WAKER_TOKEN)?;
let queue = Mutex::new(VecDeque::with_capacity(16));
Ok(Self(Arc::new((waker, queue))))
}
/// Push a new interest to the queue and wake up the accept poll afterwards.
pub(crate) fn wake(&self, interest: WakerInterest) {
let (waker, queue) = self.deref();
queue
.lock()
.expect("Failed to lock WakerQueue")
.push_back(interest);
waker
.wake()
.unwrap_or_else(|e| panic!("can not wake up Accept Poll: {}", e));
}
/// Get a MutexGuard of the waker queue.
pub(crate) fn guard(&self) -> MutexGuard<'_, VecDeque<WakerInterest>> {
self.deref().1.lock().expect("Failed to lock WakerQueue")
}
/// Reset the waker queue so it does not grow infinitely.
pub(crate) fn reset(queue: &mut VecDeque<WakerInterest>) {
std::mem::swap(&mut VecDeque::<WakerInterest>::with_capacity(16), queue);
}
}
/// Types of interests we would look into when `Accept`'s `Poll` is waked up by waker.
///
/// These interests should not be confused with `mio::Interest` and mostly not I/O related
pub(crate) enum WakerInterest {
/// `WorkerAvailable` is an interest from `Worker` notifying `Accept` there is a worker
/// available and can accept new tasks.
WorkerAvailable,
/// `Pause`, `Resume`, `Stop` Interest are from `ServerBuilder` future. It listens to
/// `ServerCommand` and notify `Accept` to do exactly these tasks.
Pause,
Resume,
Stop,
/// `Timer` is an interest sent as a delayed future. When an error happens on accepting
/// connection `Accept` would deregister socket listener temporary and wake up the poll and
/// register them again after the delayed future resolve.
Timer,
/// `Worker` is an interest happen after a worker runs into faulted state(This is determined
/// by if work can be sent to it successfully).`Accept` would be waked up and add the new
/// `WorkerHandle`.
Worker(WorkerHandle),
}

View File

@@ -1,484 +0,0 @@
use std::future::Future;
use std::pin::Pin;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::sync::Arc;
use std::task::{Context, Poll};
use std::time::Duration;
use actix_rt::time::{sleep_until, Instant, Sleep};
use actix_rt::{spawn, Arbiter};
use actix_utils::counter::Counter;
use futures_core::future::LocalBoxFuture;
use log::{error, info, trace};
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender};
use tokio::sync::oneshot;
use crate::service::{BoxedServerService, InternalServiceFactory};
use crate::socket::{MioStream, SocketAddr};
use crate::waker_queue::{WakerInterest, WakerQueue};
use crate::{join_all, Token};
pub(crate) struct WorkerCommand(Conn);
/// Stop worker message. Returns `true` on successful shutdown
/// and `false` if some connections still alive.
pub(crate) struct StopCommand {
graceful: bool,
result: oneshot::Sender<bool>,
}
#[derive(Debug)]
pub(crate) struct Conn {
pub io: MioStream,
pub token: Token,
pub peer: Option<SocketAddr>,
}
static MAX_CONNS: AtomicUsize = AtomicUsize::new(25600);
/// Sets the maximum per-worker number of concurrent connections.
///
/// All socket listeners will stop accepting connections when this limit is
/// reached for each worker.
///
/// By default max connections is set to a 25k per worker.
pub fn max_concurrent_connections(num: usize) {
MAX_CONNS.store(num, Ordering::Relaxed);
}
thread_local! {
static MAX_CONNS_COUNTER: Counter =
Counter::new(MAX_CONNS.load(Ordering::Relaxed));
}
pub(crate) fn num_connections() -> usize {
MAX_CONNS_COUNTER.with(|conns| conns.total())
}
// a handle to worker that can send message to worker and share the availability of worker to other
// thread.
#[derive(Clone)]
pub(crate) struct WorkerHandle {
pub idx: usize,
tx1: UnboundedSender<WorkerCommand>,
tx2: UnboundedSender<StopCommand>,
avail: WorkerAvailability,
}
impl WorkerHandle {
pub fn new(
idx: usize,
tx1: UnboundedSender<WorkerCommand>,
tx2: UnboundedSender<StopCommand>,
avail: WorkerAvailability,
) -> Self {
WorkerHandle {
idx,
tx1,
tx2,
avail,
}
}
pub fn send(&self, msg: Conn) -> Result<(), Conn> {
self.tx1.send(WorkerCommand(msg)).map_err(|msg| msg.0 .0)
}
pub fn available(&self) -> bool {
self.avail.available()
}
pub fn stop(&self, graceful: bool) -> oneshot::Receiver<bool> {
let (result, rx) = oneshot::channel();
let _ = self.tx2.send(StopCommand { graceful, result });
rx
}
}
#[derive(Clone)]
pub(crate) struct WorkerAvailability {
waker: WakerQueue,
available: Arc<AtomicBool>,
}
impl WorkerAvailability {
pub fn new(waker: WakerQueue) -> Self {
WorkerAvailability {
waker,
available: Arc::new(AtomicBool::new(false)),
}
}
pub fn available(&self) -> bool {
self.available.load(Ordering::Acquire)
}
pub fn set(&self, val: bool) {
let old = self.available.swap(val, Ordering::Release);
// notify the accept on switched to available.
if !old && val {
self.waker.wake(WakerInterest::WorkerAvailable);
}
}
}
/// Service worker.
///
/// Worker accepts Socket objects via unbounded channel and starts stream processing.
pub(crate) struct ServerWorker {
rx: UnboundedReceiver<WorkerCommand>,
rx2: UnboundedReceiver<StopCommand>,
services: Vec<WorkerService>,
availability: WorkerAvailability,
conns: Counter,
factories: Vec<Box<dyn InternalServiceFactory>>,
state: WorkerState,
config: ServerWorkerConfig,
}
struct WorkerService {
factory: usize,
status: WorkerServiceStatus,
service: BoxedServerService,
}
impl WorkerService {
fn created(&mut self, service: BoxedServerService) {
self.service = service;
self.status = WorkerServiceStatus::Unavailable;
}
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
enum WorkerServiceStatus {
Available,
Unavailable,
Failed,
Restarting,
Stopping,
Stopped,
}
/// Config for worker behavior passed down from server builder.
#[derive(Copy, Clone)]
pub(crate) struct ServerWorkerConfig {
shutdown_timeout: Duration,
max_blocking_threads: usize,
}
impl Default for ServerWorkerConfig {
fn default() -> Self {
// 512 is the default max blocking thread count of tokio runtime.
let max_blocking_threads = std::cmp::max(512 / num_cpus::get(), 1);
Self {
shutdown_timeout: Duration::from_secs(30),
max_blocking_threads,
}
}
}
impl ServerWorkerConfig {
pub(crate) fn max_blocking_threads(&mut self, num: usize) {
self.max_blocking_threads = num;
}
pub(crate) fn shutdown_timeout(&mut self, dur: Duration) {
self.shutdown_timeout = dur;
}
}
impl ServerWorker {
pub(crate) fn start(
idx: usize,
factories: Vec<Box<dyn InternalServiceFactory>>,
availability: WorkerAvailability,
config: ServerWorkerConfig,
) -> WorkerHandle {
let (tx1, rx) = unbounded_channel();
let (tx2, rx2) = unbounded_channel();
let avail = availability.clone();
// every worker runs in it's own arbiter.
// use a custom tokio runtime builder to change the settings of runtime.
Arbiter::with_tokio_rt(move || {
tokio::runtime::Builder::new_current_thread()
.enable_all()
.max_blocking_threads(config.max_blocking_threads)
.build()
.unwrap()
})
.spawn(async move {
availability.set(false);
let mut wrk = MAX_CONNS_COUNTER.with(move |conns| ServerWorker {
rx,
rx2,
availability,
factories,
config,
services: Vec::new(),
conns: conns.clone(),
state: WorkerState::Unavailable,
});
let fut = wrk
.factories
.iter()
.enumerate()
.map(|(idx, factory)| {
let fut = factory.create();
async move {
fut.await.map(|r| {
r.into_iter().map(|(t, s)| (idx, t, s)).collect::<Vec<_>>()
})
}
})
.collect::<Vec<_>>();
// a second spawn to make sure worker future runs as non boxed future.
// As Arbiter::spawn would box the future before send it to arbiter.
spawn(async move {
let res: Result<Vec<_>, _> = join_all(fut).await.into_iter().collect();
match res {
Ok(services) => {
for item in services {
for (factory, token, service) in item {
assert_eq!(token.0, wrk.services.len());
wrk.services.push(WorkerService {
factory,
service,
status: WorkerServiceStatus::Unavailable,
});
}
}
}
Err(e) => {
error!("Can not start worker: {:?}", e);
Arbiter::current().stop();
}
}
wrk.await
});
});
WorkerHandle::new(idx, tx1, tx2, avail)
}
fn shutdown(&mut self, force: bool) {
if force {
self.services.iter_mut().for_each(|srv| {
if srv.status == WorkerServiceStatus::Available {
srv.status = WorkerServiceStatus::Stopped;
}
});
} else {
self.services.iter_mut().for_each(move |srv| {
if srv.status == WorkerServiceStatus::Available {
srv.status = WorkerServiceStatus::Stopping;
}
});
}
}
fn check_readiness(&mut self, cx: &mut Context<'_>) -> Result<bool, (Token, usize)> {
let mut ready = self.conns.available(cx);
let mut failed = None;
for (idx, srv) in self.services.iter_mut().enumerate() {
if srv.status == WorkerServiceStatus::Available
|| srv.status == WorkerServiceStatus::Unavailable
{
match srv.service.poll_ready(cx) {
Poll::Ready(Ok(_)) => {
if srv.status == WorkerServiceStatus::Unavailable {
trace!(
"Service {:?} is available",
self.factories[srv.factory].name(Token(idx))
);
srv.status = WorkerServiceStatus::Available;
}
}
Poll::Pending => {
ready = false;
if srv.status == WorkerServiceStatus::Available {
trace!(
"Service {:?} is unavailable",
self.factories[srv.factory].name(Token(idx))
);
srv.status = WorkerServiceStatus::Unavailable;
}
}
Poll::Ready(Err(_)) => {
error!(
"Service {:?} readiness check returned error, restarting",
self.factories[srv.factory].name(Token(idx))
);
failed = Some((Token(idx), srv.factory));
srv.status = WorkerServiceStatus::Failed;
}
}
}
}
if let Some(idx) = failed {
Err(idx)
} else {
Ok(ready)
}
}
}
enum WorkerState {
Available,
Unavailable,
Restarting(
usize,
Token,
LocalBoxFuture<'static, Result<Vec<(Token, BoxedServerService)>, ()>>,
),
Shutdown(
Pin<Box<Sleep>>,
Pin<Box<Sleep>>,
Option<oneshot::Sender<bool>>,
),
}
impl Future for ServerWorker {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
// `StopWorker` message handler
if let Poll::Ready(Some(StopCommand { graceful, result })) =
Pin::new(&mut self.rx2).poll_recv(cx)
{
self.availability.set(false);
let num = num_connections();
if num == 0 {
info!("Shutting down worker, 0 connections");
let _ = result.send(true);
return Poll::Ready(());
} else if graceful {
self.shutdown(false);
let num = num_connections();
if num != 0 {
info!("Graceful worker shutdown, {} connections", num);
self.state = WorkerState::Shutdown(
Box::pin(sleep_until(Instant::now() + Duration::from_secs(1))),
Box::pin(sleep_until(Instant::now() + self.config.shutdown_timeout)),
Some(result),
);
} else {
let _ = result.send(true);
return Poll::Ready(());
}
} else {
info!("Force shutdown worker, {} connections", num);
self.shutdown(true);
let _ = result.send(false);
return Poll::Ready(());
}
}
match self.state {
WorkerState::Unavailable => match self.check_readiness(cx) {
Ok(true) => {
self.state = WorkerState::Available;
self.availability.set(true);
self.poll(cx)
}
Ok(false) => Poll::Pending,
Err((token, idx)) => {
trace!(
"Service {:?} failed, restarting",
self.factories[idx].name(token)
);
self.services[token.0].status = WorkerServiceStatus::Restarting;
self.state =
WorkerState::Restarting(idx, token, self.factories[idx].create());
self.poll(cx)
}
},
WorkerState::Restarting(idx, token, ref mut fut) => {
match fut.as_mut().poll(cx) {
Poll::Ready(Ok(item)) => {
// only interest in the first item?
if let Some((token, service)) = item.into_iter().next() {
trace!(
"Service {:?} has been restarted",
self.factories[idx].name(token)
);
self.services[token.0].created(service);
self.state = WorkerState::Unavailable;
return self.poll(cx);
}
}
Poll::Ready(Err(_)) => {
panic!(
"Can not restart {:?} service",
self.factories[idx].name(token)
);
}
Poll::Pending => return Poll::Pending,
}
self.poll(cx)
}
WorkerState::Shutdown(ref mut t1, ref mut t2, ref mut tx) => {
let num = num_connections();
if num == 0 {
let _ = tx.take().unwrap().send(true);
Arbiter::current().stop();
return Poll::Ready(());
}
// check graceful timeout
if Pin::new(t2).poll(cx).is_ready() {
let _ = tx.take().unwrap().send(false);
self.shutdown(true);
Arbiter::current().stop();
return Poll::Ready(());
}
// sleep for 1 second and then check again
if t1.as_mut().poll(cx).is_ready() {
*t1 = Box::pin(sleep_until(Instant::now() + Duration::from_secs(1)));
let _ = t1.as_mut().poll(cx);
}
Poll::Pending
}
// actively poll stream and handle worker command
WorkerState::Available => loop {
match self.check_readiness(cx) {
Ok(true) => (),
Ok(false) => {
trace!("Worker is unavailable");
self.availability.set(false);
self.state = WorkerState::Unavailable;
return self.poll(cx);
}
Err((token, idx)) => {
trace!(
"Service {:?} failed, restarting",
self.factories[idx].name(token)
);
self.availability.set(false);
self.services[token.0].status = WorkerServiceStatus::Restarting;
self.state =
WorkerState::Restarting(idx, token, self.factories[idx].create());
return self.poll(cx);
}
}
match Pin::new(&mut self.rx).poll_recv(cx) {
// handle incoming io stream
Poll::Ready(Some(WorkerCommand(msg))) => {
let guard = self.conns.get();
let _ = self.services[msg.token.0]
.service
.call((Some(guard), msg.io));
}
Poll::Pending => return Poll::Pending,
Poll::Ready(None) => return Poll::Ready(()),
};
},
}
}
}

View File

@@ -1,190 +0,0 @@
use std::sync::atomic::{AtomicUsize, Ordering::Relaxed};
use std::sync::{mpsc, Arc};
use std::{net, thread, time};
use actix_server::Server;
use actix_service::fn_service;
use futures_util::future::{lazy, ok};
fn unused_addr() -> net::SocketAddr {
let addr: net::SocketAddr = "127.0.0.1:0".parse().unwrap();
let socket = mio::net::TcpSocket::new_v4().unwrap();
socket.bind(addr).unwrap();
socket.set_reuseaddr(true).unwrap();
let tcp = socket.listen(32).unwrap();
tcp.local_addr().unwrap()
}
#[test]
fn test_bind() {
let addr = unused_addr();
let (tx, rx) = mpsc::channel();
let h = thread::spawn(move || {
let sys = actix_rt::System::new();
let srv = sys.block_on(lazy(|_| {
Server::build()
.workers(1)
.disable_signals()
.bind("test", addr, move || fn_service(|_| ok::<_, ()>(())))
.unwrap()
.run()
}));
let _ = tx.send((srv, actix_rt::System::current()));
let _ = sys.run();
});
let (_, sys) = rx.recv().unwrap();
thread::sleep(time::Duration::from_millis(500));
assert!(net::TcpStream::connect(addr).is_ok());
sys.stop();
let _ = h.join();
}
#[test]
fn test_listen() {
let addr = unused_addr();
let (tx, rx) = mpsc::channel();
let h = thread::spawn(move || {
let sys = actix_rt::System::new();
let lst = net::TcpListener::bind(addr).unwrap();
sys.block_on(async {
Server::build()
.disable_signals()
.workers(1)
.listen("test", lst, move || fn_service(|_| ok::<_, ()>(())))
.unwrap()
.run();
let _ = tx.send(actix_rt::System::current());
});
let _ = sys.run();
});
let sys = rx.recv().unwrap();
thread::sleep(time::Duration::from_millis(500));
assert!(net::TcpStream::connect(addr).is_ok());
sys.stop();
let _ = h.join();
}
#[test]
#[cfg(unix)]
fn test_start() {
use actix_codec::{BytesCodec, Framed};
use actix_rt::net::TcpStream;
use bytes::Bytes;
use futures_util::sink::SinkExt;
use std::io::Read;
let addr = unused_addr();
let (tx, rx) = mpsc::channel();
let h = thread::spawn(move || {
let sys = actix_rt::System::new();
let srv = sys.block_on(lazy(|_| {
Server::build()
.backlog(100)
.disable_signals()
.bind("test", addr, move || {
fn_service(|io: TcpStream| async move {
let mut f = Framed::new(io, BytesCodec);
f.send(Bytes::from_static(b"test")).await.unwrap();
Ok::<_, ()>(())
})
})
.unwrap()
.run()
}));
let _ = tx.send((srv, actix_rt::System::current()));
let _ = sys.run();
});
let (srv, sys) = rx.recv().unwrap();
let mut buf = [1u8; 4];
let mut conn = net::TcpStream::connect(addr).unwrap();
let _ = conn.read_exact(&mut buf);
assert_eq!(buf, b"test"[..]);
// pause
let _ = srv.pause();
thread::sleep(time::Duration::from_millis(200));
let mut conn = net::TcpStream::connect(addr).unwrap();
conn.set_read_timeout(Some(time::Duration::from_millis(100)))
.unwrap();
let res = conn.read_exact(&mut buf);
assert!(res.is_err());
// resume
let _ = srv.resume();
thread::sleep(time::Duration::from_millis(100));
assert!(net::TcpStream::connect(addr).is_ok());
assert!(net::TcpStream::connect(addr).is_ok());
assert!(net::TcpStream::connect(addr).is_ok());
let mut buf = [0u8; 4];
let mut conn = net::TcpStream::connect(addr).unwrap();
let _ = conn.read_exact(&mut buf);
assert_eq!(buf, b"test"[..]);
// stop
let _ = srv.stop(false);
thread::sleep(time::Duration::from_millis(100));
assert!(net::TcpStream::connect(addr).is_err());
thread::sleep(time::Duration::from_millis(100));
sys.stop();
let _ = h.join();
}
#[test]
fn test_configure() {
let addr1 = unused_addr();
let addr2 = unused_addr();
let addr3 = unused_addr();
let (tx, rx) = mpsc::channel();
let num = Arc::new(AtomicUsize::new(0));
let num2 = num.clone();
let h = thread::spawn(move || {
let num = num2.clone();
let sys = actix_rt::System::new();
let srv = sys.block_on(lazy(|_| {
Server::build()
.disable_signals()
.configure(move |cfg| {
let num = num.clone();
let lst = net::TcpListener::bind(addr3).unwrap();
cfg.bind("addr1", addr1)
.unwrap()
.bind("addr2", addr2)
.unwrap()
.listen("addr3", lst)
.apply(move |rt| {
let num = num.clone();
rt.service("addr1", fn_service(|_| ok::<_, ()>(())));
rt.service("addr3", fn_service(|_| ok::<_, ()>(())));
rt.on_start(lazy(move |_| {
let _ = num.fetch_add(1, Relaxed);
}))
})
})
.unwrap()
.workers(1)
.run()
}));
let _ = tx.send((srv, actix_rt::System::current()));
let _ = sys.run();
});
let (_, sys) = rx.recv().unwrap();
thread::sleep(time::Duration::from_millis(500));
assert!(net::TcpStream::connect(addr1).is_ok());
assert!(net::TcpStream::connect(addr2).is_ok());
assert!(net::TcpStream::connect(addr3).is_ok());
assert_eq!(num.load(Relaxed), 1);
sys.stop();
let _ = h.join();
}

View File

@@ -1,319 +0,0 @@
# Changes
## Unreleased - 2021-xx-xx
## 2.0.0-beta.4 - 2021-02-04
* `Service::poll_ready` and `Service::call` receive `&self`. [#247]
* `apply_fn` and `apply_fn_factory` now receive `Fn(Req, &Service)` function type. [#247]
* `apply_cfg` and `apply_cfg_factory` now receive `Fn(Req, &Service)` function type. [#247]
* `fn_service` and friends now receive `Fn(Req)` function type. [#247]
[#247]: https://github.com/actix/actix-net/pull/247
## 2.0.0-beta.3 - 2021-01-09
* The `forward_ready!` macro converts errors. [#246]
[#246]: https://github.com/actix/actix-net/pull/246
## 2.0.0-beta.2 - 2021-01-03
* Remove redundant type parameter from `map_config`.
## 2.0.0-beta.1 - 2020-12-28
* `Service`, other traits, and many type signatures now take the the request type as a type
parameter instead of an associated type. [#232]
* Add `always_ready!` and `forward_ready!` macros. [#233]
* Crate is now `no_std`. [#233]
* Migrate pin projections to `pin-project-lite`. [#233]
* Remove `AndThenApplyFn` and Pipeline `and_then_apply_fn`. Use the
`.and_then(apply_fn(...))` construction. [#233]
* Move non-vital methods to `ServiceExt` and `ServiceFactoryExt` extension traits. [#235]
[#232]: https://github.com/actix/actix-net/pull/232
[#233]: https://github.com/actix/actix-net/pull/233
[#235]: https://github.com/actix/actix-net/pull/235
## 1.0.6 - 2020-08-09
### Fixed
* Removed unsound custom Cell implementation that allowed obtaining several mutable references to
the same data, which is undefined behavior in Rust and could lead to violations of memory safety. External code could obtain several mutable references to the same data through
service combinators. Attempts to acquire several mutable references to the same data will instead
result in a panic.
## [1.0.5] - 2020-01-16
### Fixed
* Fixed unsoundness in .and_then()/.then() service combinators
## [1.0.4] - 2020-01-15
### Fixed
* Revert 1.0.3 change
## [1.0.3] - 2020-01-15
### Fixed
* Fixed unsoundness in `AndThenService` impl
## [1.0.2] - 2020-01-08
### Added
* Add `into_service` helper function
## [1.0.1] - 2019-12-22
### Changed
* `map_config()` and `unit_config()` accepts `IntoServiceFactory` type
## [1.0.0] - 2019-12-11
### Added
* Add Clone impl for Apply service
## [1.0.0-alpha.4] - 2019-12-08
### Changed
* Renamed `service_fn` to `fn_service`
* Renamed `factory_fn` to `fn_factory`
* Renamed `factory_fn_cfg` to `fn_factory_with_config`
## [1.0.0-alpha.3] - 2019-12-06
### Changed
* Add missing Clone impls
* Restore `Transform::map_init_err()` combinator
* Restore `Service/Factory::apply_fn()` in form of `Pipeline/Factory::and_then_apply_fn()`
* Optimize service combinators and futures memory layout
## [1.0.0-alpha.2] - 2019-12-02
### Changed
* Use owned config value for service factory
* Renamed BoxedNewService/BoxedService to BoxServiceFactory/BoxService
## [1.0.0-alpha.1] - 2019-11-25
### Changed
* Migraded to `std::future`
* `NewService` renamed to `ServiceFactory`
* Added `pipeline` and `pipeline_factory` function
## [0.4.2] - 2019-08-27
### Fixed
* Check service readiness for `new_apply_cfg` combinator
## [0.4.1] - 2019-06-06
### Added
* Add `new_apply_cfg` function
## [0.4.0] - 2019-05-12
### Changed
* Use associated type for `NewService` config
* Change `apply_cfg` function
* Renamed helper functions
### Added
* Add `NewService::map_config` and `NewService::unit_config` combinators
## [0.3.6] - 2019-04-07
### Changed
* Poll boxed service call result immediately
## [0.3.5] - 2019-03-29
### Added
* Add `impl<S: Service> Service for Rc<RefCell<S>>`
## [0.3.4] - 2019-03-12
### Added
* Add `Transform::from_err()` combinator
* Add `apply_fn` helper
* Add `apply_fn_factory` helper
* Add `apply_transform` helper
* Add `apply_cfg` helper
## [0.3.3] - 2019-03-09
### Added
* Add `ApplyTransform` new service for transform and new service.
* Add `NewService::apply_cfg()` combinator, allows to use
nested `NewService` with different config parameter.
### Changed
* Revert IntoFuture change
## [0.3.2] - 2019-03-04
### Changed
* Change `NewService::Future` and `Transform::Future` to the `IntoFuture` trait.
* Export `AndThenTransform` type
## [0.3.1] - 2019-03-04
### Changed
* Simplify Transform trait
## [0.3.0] - 2019-03-02
## Added
* Added boxed NewService and Service.
## Changed
* Added `Config` parameter to `NewService` trait.
* Added `Config` parameter to `NewTransform` trait.
## [0.2.2] - 2019-02-19
### Added
* Added `NewService` impl for `Rc<S> where S: NewService`
* Added `NewService` impl for `Arc<S> where S: NewService`
## [0.2.1] - 2019-02-03
### Changed
* Generalize `.apply` combinator with Transform trait
## [0.2.0] - 2019-02-01
### Changed
* Use associated type instead of generic for Service definition.
* Before:
```rust
impl Service<Request> for Client {
type Response = Response;
// ...
}
```
* After:
```rust
impl Service for Client {
type Request = Request;
type Response = Response;
// ...
}
```
## [0.1.6] - 2019-01-24
### Changed
* Use `FnMut` instead of `Fn` for .apply() and .map() combinators and `FnService` type
* Change `.apply()` error semantic, new service's error is `From<Self::Error>`
## [0.1.5] - 2019-01-13
### Changed
* Make `Out::Error` convertable from `T::Error` for apply combinator
## [0.1.4] - 2019-01-11
### Changed
* Use `FnMut` instead of `Fn` for `FnService`
## [0.1.3] - 2018-12-12
### Changed
* Split service combinators to separate trait
## [0.1.2] - 2018-12-12
### Fixed
* Release future early for `.and_then()` and `.then()` combinators
## [0.1.1] - 2018-12-09
### Added
* Added Service impl for Box<S: Service>
## [0.1.0] - 2018-12-09
* Initial import

View File

@@ -1,29 +0,0 @@
[package]
name = "actix-service"
version = "2.0.0-beta.4"
authors = [
"Nikolay Kim <fafhrd91@gmail.com>",
"Rob Ede <robjtede@icloud.com>",
"fakeshadow <24548779@qq.com>",
]
description = "Service trait and combinators for representing asynchronous request/response operations."
keywords = ["network", "framework", "async", "futures", "service"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-service"
readme = "README.md"
categories = ["network-programming", "asynchronous"]
license = "MIT OR Apache-2.0"
edition = "2018"
[lib]
name = "actix_service"
path = "src/lib.rs"
[dependencies]
futures-core = { version = "0.3.7", default-features = false }
pin-project-lite = "0.2"
[dev-dependencies]
actix-rt = "2.0.0"
futures-util = { version = "0.3.7", default-features = false }

View File

@@ -1 +0,0 @@
../LICENSE-APACHE

View File

@@ -1 +0,0 @@
../LICENSE-MIT

View File

@@ -1,13 +0,0 @@
# actix-service
> Service trait and combinators for representing asynchronous request/response operations.
[![crates.io](https://img.shields.io/crates/v/actix-service?label=latest)](https://crates.io/crates/actix-service)
[![Documentation](https://docs.rs/actix-service/badge.svg?version=2.0.0-beta.4)](https://docs.rs/actix-service/2.0.0-beta.4)
[![Version](https://img.shields.io/badge/rustc-1.46+-ab6000.svg)](https://blog.rust-lang.org/2020/03/12/Rust-1.46.html)
![License](https://img.shields.io/crates/l/actix-service.svg)
[![Dependency Status](https://deps.rs/crate/actix-service/2.0.0-beta.4/status.svg)](https://deps.rs/crate/actix-service/2.0.0-beta.4)
[![Download](https://img.shields.io/crates/d/actix-service.svg)](https://crates.io/crates/actix-service)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x)
See documentation for detailed explanations of these components: https://docs.rs/actix-service.

View File

@@ -1,344 +0,0 @@
use alloc::rc::Rc;
use core::{
future::Future,
marker::PhantomData,
pin::Pin,
task::{Context, Poll},
};
use futures_core::ready;
use pin_project_lite::pin_project;
use super::{Service, ServiceFactory};
/// Service for the `and_then` combinator, chaining a computation onto the end
/// of another service which completes successfully.
///
/// This is created by the `Pipeline::and_then` method.
pub(crate) struct AndThenService<A, B, Req>(Rc<(A, B)>, PhantomData<Req>);
impl<A, B, Req> AndThenService<A, B, Req> {
/// Create new `AndThen` combinator
pub(crate) fn new(a: A, b: B) -> Self
where
A: Service<Req>,
B: Service<A::Response, Error = A::Error>,
{
Self(Rc::new((a, b)), PhantomData)
}
}
impl<A, B, Req> Clone for AndThenService<A, B, Req> {
fn clone(&self) -> Self {
AndThenService(self.0.clone(), PhantomData)
}
}
impl<A, B, Req> Service<Req> for AndThenService<A, B, Req>
where
A: Service<Req>,
B: Service<A::Response, Error = A::Error>,
{
type Response = B::Response;
type Error = A::Error;
type Future = AndThenServiceResponse<A, B, Req>;
fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
let (a, b) = &*self.0;
let not_ready = !a.poll_ready(cx)?.is_ready();
if !b.poll_ready(cx)?.is_ready() || not_ready {
Poll::Pending
} else {
Poll::Ready(Ok(()))
}
}
fn call(&self, req: Req) -> Self::Future {
AndThenServiceResponse {
state: State::A {
fut: self.0 .0.call(req),
b: Some(self.0.clone()),
},
}
}
}
pin_project! {
pub(crate) struct AndThenServiceResponse<A, B, Req>
where
A: Service<Req>,
B: Service<A::Response, Error = A::Error>,
{
#[pin]
state: State<A, B, Req>,
}
}
pin_project! {
#[project = StateProj]
enum State<A, B, Req>
where
A: Service<Req>,
B: Service<A::Response, Error = A::Error>,
{
A {
#[pin]
fut: A::Future,
b: Option<Rc<(A, B)>>,
},
B {
#[pin]
fut: B::Future,
},
}
}
impl<A, B, Req> Future for AndThenServiceResponse<A, B, Req>
where
A: Service<Req>,
B: Service<A::Response, Error = A::Error>,
{
type Output = Result<B::Response, A::Error>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut this = self.as_mut().project();
match this.state.as_mut().project() {
StateProj::A { fut, b } => {
let res = ready!(fut.poll(cx))?;
let b = b.take().unwrap();
let fut = b.1.call(res);
this.state.set(State::B { fut });
self.poll(cx)
}
StateProj::B { fut } => fut.poll(cx),
}
}
}
/// `.and_then()` service factory combinator
pub(crate) struct AndThenServiceFactory<A, B, Req>
where
A: ServiceFactory<Req>,
A::Config: Clone,
B: ServiceFactory<
A::Response,
Config = A::Config,
Error = A::Error,
InitError = A::InitError,
>,
{
inner: Rc<(A, B)>,
_phantom: PhantomData<Req>,
}
impl<A, B, Req> AndThenServiceFactory<A, B, Req>
where
A: ServiceFactory<Req>,
A::Config: Clone,
B: ServiceFactory<
A::Response,
Config = A::Config,
Error = A::Error,
InitError = A::InitError,
>,
{
/// Create new `AndThenFactory` combinator
pub(crate) fn new(a: A, b: B) -> Self {
Self {
inner: Rc::new((a, b)),
_phantom: PhantomData,
}
}
}
impl<A, B, Req> ServiceFactory<Req> for AndThenServiceFactory<A, B, Req>
where
A: ServiceFactory<Req>,
A::Config: Clone,
B: ServiceFactory<
A::Response,
Config = A::Config,
Error = A::Error,
InitError = A::InitError,
>,
{
type Response = B::Response;
type Error = A::Error;
type Config = A::Config;
type Service = AndThenService<A::Service, B::Service, Req>;
type InitError = A::InitError;
type Future = AndThenServiceFactoryResponse<A, B, Req>;
fn new_service(&self, cfg: A::Config) -> Self::Future {
let inner = &*self.inner;
AndThenServiceFactoryResponse::new(
inner.0.new_service(cfg.clone()),
inner.1.new_service(cfg),
)
}
}
impl<A, B, Req> Clone for AndThenServiceFactory<A, B, Req>
where
A: ServiceFactory<Req>,
A::Config: Clone,
B: ServiceFactory<
A::Response,
Config = A::Config,
Error = A::Error,
InitError = A::InitError,
>,
{
fn clone(&self) -> Self {
Self {
inner: self.inner.clone(),
_phantom: PhantomData,
}
}
}
pin_project! {
pub(crate) struct AndThenServiceFactoryResponse<A, B, Req>
where
A: ServiceFactory<Req>,
B: ServiceFactory<A::Response>,
{
#[pin]
fut_a: A::Future,
#[pin]
fut_b: B::Future,
a: Option<A::Service>,
b: Option<B::Service>,
}
}
impl<A, B, Req> AndThenServiceFactoryResponse<A, B, Req>
where
A: ServiceFactory<Req>,
B: ServiceFactory<A::Response>,
{
fn new(fut_a: A::Future, fut_b: B::Future) -> Self {
AndThenServiceFactoryResponse {
fut_a,
fut_b,
a: None,
b: None,
}
}
}
impl<A, B, Req> Future for AndThenServiceFactoryResponse<A, B, Req>
where
A: ServiceFactory<Req>,
B: ServiceFactory<A::Response, Error = A::Error, InitError = A::InitError>,
{
type Output = Result<AndThenService<A::Service, B::Service, Req>, A::InitError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
if this.a.is_none() {
if let Poll::Ready(service) = this.fut_a.poll(cx)? {
*this.a = Some(service);
}
}
if this.b.is_none() {
if let Poll::Ready(service) = this.fut_b.poll(cx)? {
*this.b = Some(service);
}
}
if this.a.is_some() && this.b.is_some() {
Poll::Ready(Ok(AndThenService::new(
this.a.take().unwrap(),
this.b.take().unwrap(),
)))
} else {
Poll::Pending
}
}
}
#[cfg(test)]
mod tests {
use alloc::rc::Rc;
use core::{
cell::Cell,
task::{Context, Poll},
};
use futures_util::future::lazy;
use crate::{
fn_factory, ok, pipeline, pipeline_factory, ready, Ready, Service, ServiceFactory,
};
struct Srv1(Rc<Cell<usize>>);
impl Service<&'static str> for Srv1 {
type Response = &'static str;
type Error = ();
type Future = Ready<Result<Self::Response, ()>>;
fn poll_ready(&self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.0.set(self.0.get() + 1);
Poll::Ready(Ok(()))
}
fn call(&self, req: &'static str) -> Self::Future {
ok(req)
}
}
#[derive(Clone)]
struct Srv2(Rc<Cell<usize>>);
impl Service<&'static str> for Srv2 {
type Response = (&'static str, &'static str);
type Error = ();
type Future = Ready<Result<Self::Response, ()>>;
fn poll_ready(&self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.0.set(self.0.get() + 1);
Poll::Ready(Ok(()))
}
fn call(&self, req: &'static str) -> Self::Future {
ok((req, "srv2"))
}
}
#[actix_rt::test]
async fn test_poll_ready() {
let cnt = Rc::new(Cell::new(0));
let srv = pipeline(Srv1(cnt.clone())).and_then(Srv2(cnt.clone()));
let res = lazy(|cx| srv.poll_ready(cx)).await;
assert_eq!(res, Poll::Ready(Ok(())));
assert_eq!(cnt.get(), 2);
}
#[actix_rt::test]
async fn test_call() {
let cnt = Rc::new(Cell::new(0));
let srv = pipeline(Srv1(cnt.clone())).and_then(Srv2(cnt));
let res = srv.call("srv1").await;
assert!(res.is_ok());
assert_eq!(res.unwrap(), ("srv1", "srv2"));
}
#[actix_rt::test]
async fn test_new_service() {
let cnt = Rc::new(Cell::new(0));
let cnt2 = cnt.clone();
let new_srv =
pipeline_factory(fn_factory(move || ready(Ok::<_, ()>(Srv1(cnt2.clone())))))
.and_then(move || ready(Ok(Srv2(cnt.clone()))));
let srv = new_srv.new_service(()).await.unwrap();
let res = srv.call("srv1").await;
assert!(res.is_ok());
assert_eq!(res.unwrap(), ("srv1", "srv2"));
}
}

View File

@@ -1,272 +0,0 @@
use core::{
future::Future,
marker::PhantomData,
pin::Pin,
task::{Context, Poll},
};
use futures_core::ready;
use pin_project_lite::pin_project;
use super::{IntoService, IntoServiceFactory, Service, ServiceFactory};
/// Apply transform function to a service.
///
/// The In and Out type params refer to the request and response types for the wrapped service.
pub fn apply_fn<I, S, F, Fut, Req, In, Res, Err>(
service: I,
wrap_fn: F,
) -> Apply<S, F, Req, In, Res, Err>
where
I: IntoService<S, In>,
S: Service<In, Error = Err>,
F: Fn(Req, &S) -> Fut,
Fut: Future<Output = Result<Res, Err>>,
{
Apply::new(service.into_service(), wrap_fn)
}
/// Service factory that produces `apply_fn` service.
///
/// The In and Out type params refer to the request and response types for the wrapped service.
pub fn apply_fn_factory<I, SF, F, Fut, Req, In, Res, Err>(
service: I,
f: F,
) -> ApplyFactory<SF, F, Req, In, Res, Err>
where
I: IntoServiceFactory<SF, In>,
SF: ServiceFactory<In, Error = Err>,
F: Fn(Req, &SF::Service) -> Fut + Clone,
Fut: Future<Output = Result<Res, Err>>,
{
ApplyFactory::new(service.into_factory(), f)
}
/// `Apply` service combinator.
///
/// The In and Out type params refer to the request and response types for the wrapped service.
pub struct Apply<S, F, Req, In, Res, Err>
where
S: Service<In, Error = Err>,
{
service: S,
wrap_fn: F,
_phantom: PhantomData<(Req, In, Res, Err)>,
}
impl<S, F, Fut, Req, In, Res, Err> Apply<S, F, Req, In, Res, Err>
where
S: Service<In, Error = Err>,
F: Fn(Req, &S) -> Fut,
Fut: Future<Output = Result<Res, Err>>,
{
/// Create new `Apply` combinator
fn new(service: S, wrap_fn: F) -> Self {
Self {
service,
wrap_fn,
_phantom: PhantomData,
}
}
}
impl<S, F, Fut, Req, In, Res, Err> Clone for Apply<S, F, Req, In, Res, Err>
where
S: Service<In, Error = Err> + Clone,
F: Fn(Req, &S) -> Fut + Clone,
Fut: Future<Output = Result<Res, Err>>,
{
fn clone(&self) -> Self {
Apply {
service: self.service.clone(),
wrap_fn: self.wrap_fn.clone(),
_phantom: PhantomData,
}
}
}
impl<S, F, Fut, Req, In, Res, Err> Service<Req> for Apply<S, F, Req, In, Res, Err>
where
S: Service<In, Error = Err>,
F: Fn(Req, &S) -> Fut,
Fut: Future<Output = Result<Res, Err>>,
{
type Response = Res;
type Error = Err;
type Future = Fut;
crate::forward_ready!(service);
fn call(&self, req: Req) -> Self::Future {
(self.wrap_fn)(req, &self.service)
}
}
/// `ApplyFactory` service factory combinator.
pub struct ApplyFactory<SF, F, Req, In, Res, Err> {
factory: SF,
wrap_fn: F,
_phantom: PhantomData<(Req, In, Res, Err)>,
}
impl<SF, F, Fut, Req, In, Res, Err> ApplyFactory<SF, F, Req, In, Res, Err>
where
SF: ServiceFactory<In, Error = Err>,
F: Fn(Req, &SF::Service) -> Fut + Clone,
Fut: Future<Output = Result<Res, Err>>,
{
/// Create new `ApplyFactory` new service instance
fn new(factory: SF, wrap_fn: F) -> Self {
Self {
factory,
wrap_fn,
_phantom: PhantomData,
}
}
}
impl<SF, F, Fut, Req, In, Res, Err> Clone for ApplyFactory<SF, F, Req, In, Res, Err>
where
SF: ServiceFactory<In, Error = Err> + Clone,
F: Fn(Req, &SF::Service) -> Fut + Clone,
Fut: Future<Output = Result<Res, Err>>,
{
fn clone(&self) -> Self {
Self {
factory: self.factory.clone(),
wrap_fn: self.wrap_fn.clone(),
_phantom: PhantomData,
}
}
}
impl<SF, F, Fut, Req, In, Res, Err> ServiceFactory<Req>
for ApplyFactory<SF, F, Req, In, Res, Err>
where
SF: ServiceFactory<In, Error = Err>,
F: Fn(Req, &SF::Service) -> Fut + Clone,
Fut: Future<Output = Result<Res, Err>>,
{
type Response = Res;
type Error = Err;
type Config = SF::Config;
type Service = Apply<SF::Service, F, Req, In, Res, Err>;
type InitError = SF::InitError;
type Future = ApplyServiceFactoryResponse<SF, F, Fut, Req, In, Res, Err>;
fn new_service(&self, cfg: SF::Config) -> Self::Future {
let svc = self.factory.new_service(cfg);
ApplyServiceFactoryResponse::new(svc, self.wrap_fn.clone())
}
}
pin_project! {
pub struct ApplyServiceFactoryResponse<SF, F, Fut, Req, In, Res, Err>
where
SF: ServiceFactory<In, Error = Err>,
F: Fn(Req, &SF::Service) -> Fut,
Fut: Future<Output = Result<Res, Err>>,
{
#[pin]
fut: SF::Future,
wrap_fn: Option<F>,
_phantom: PhantomData<(Req, Res)>,
}
}
impl<SF, F, Fut, Req, In, Res, Err> ApplyServiceFactoryResponse<SF, F, Fut, Req, In, Res, Err>
where
SF: ServiceFactory<In, Error = Err>,
F: Fn(Req, &SF::Service) -> Fut,
Fut: Future<Output = Result<Res, Err>>,
{
fn new(fut: SF::Future, wrap_fn: F) -> Self {
Self {
fut,
wrap_fn: Some(wrap_fn),
_phantom: PhantomData,
}
}
}
impl<SF, F, Fut, Req, In, Res, Err> Future
for ApplyServiceFactoryResponse<SF, F, Fut, Req, In, Res, Err>
where
SF: ServiceFactory<In, Error = Err>,
F: Fn(Req, &SF::Service) -> Fut,
Fut: Future<Output = Result<Res, Err>>,
{
type Output = Result<Apply<SF::Service, F, Req, In, Res, Err>, SF::InitError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
let svc = ready!(this.fut.poll(cx))?;
Poll::Ready(Ok(Apply::new(svc, this.wrap_fn.take().unwrap())))
}
}
#[cfg(test)]
mod tests {
use core::task::Poll;
use futures_util::future::lazy;
use super::*;
use crate::{ok, pipeline, pipeline_factory, Ready, Service, ServiceFactory};
#[derive(Clone)]
struct Srv;
impl Service<()> for Srv {
type Response = ();
type Error = ();
type Future = Ready<Result<(), ()>>;
crate::always_ready!();
fn call(&self, _: ()) -> Self::Future {
ok(())
}
}
#[actix_rt::test]
async fn test_call() {
let srv = pipeline(apply_fn(Srv, |req: &'static str, srv| {
let fut = srv.call(());
async move {
fut.await.unwrap();
Ok((req, ()))
}
}));
assert_eq!(lazy(|cx| srv.poll_ready(cx)).await, Poll::Ready(Ok(())));
let res = srv.call("srv").await;
assert!(res.is_ok());
assert_eq!(res.unwrap(), ("srv", ()));
}
#[actix_rt::test]
async fn test_new_service() {
let new_srv = pipeline_factory(apply_fn_factory(
|| ok::<_, ()>(Srv),
|req: &'static str, srv| {
let fut = srv.call(());
async move {
fut.await.unwrap();
Ok((req, ()))
}
},
));
let srv = new_srv.new_service(()).await.unwrap();
assert_eq!(lazy(|cx| srv.poll_ready(cx)).await, Poll::Ready(Ok(())));
let res = srv.call("srv").await;
assert!(res.is_ok());
assert_eq!(res.unwrap(), ("srv", ()));
}
}

View File

@@ -1,233 +0,0 @@
use alloc::rc::Rc;
use core::{
future::Future,
marker::PhantomData,
pin::Pin,
task::{Context, Poll},
};
use futures_core::ready;
use pin_project_lite::pin_project;
use crate::{Service, ServiceFactory};
/// Convert `Fn(Config, &Service1) -> Future<Service2>` fn to a service factory.
pub fn apply_cfg<S1, Req, F, Cfg, Fut, S2, Err>(
srv: S1,
f: F,
) -> impl ServiceFactory<
Req,
Config = Cfg,
Response = S2::Response,
Error = S2::Error,
Service = S2,
InitError = Err,
Future = Fut,
> + Clone
where
S1: Service<Req>,
F: Fn(Cfg, &S1) -> Fut,
Fut: Future<Output = Result<S2, Err>>,
S2: Service<Req>,
{
ApplyConfigService {
srv: Rc::new((srv, f)),
_phantom: PhantomData,
}
}
/// Convert `Fn(Config, &ServiceFactory1) -> Future<ServiceFactory2>` fn to a service factory.
///
/// Service1 get constructed from `T` factory.
pub fn apply_cfg_factory<SF, Req, F, Cfg, Fut, S>(
factory: SF,
f: F,
) -> impl ServiceFactory<
Req,
Config = Cfg,
Response = S::Response,
Error = S::Error,
Service = S,
InitError = SF::InitError,
> + Clone
where
SF: ServiceFactory<Req, Config = ()>,
F: Fn(Cfg, &SF::Service) -> Fut,
SF::InitError: From<SF::Error>,
Fut: Future<Output = Result<S, SF::InitError>>,
S: Service<Req>,
{
ApplyConfigServiceFactory {
srv: Rc::new((factory, f)),
_phantom: PhantomData,
}
}
/// Convert `Fn(Config, &Server) -> Future<Service>` fn to NewService\
struct ApplyConfigService<S1, Req, F, Cfg, Fut, S2, Err>
where
S1: Service<Req>,
F: Fn(Cfg, &S1) -> Fut,
Fut: Future<Output = Result<S2, Err>>,
S2: Service<Req>,
{
srv: Rc<(S1, F)>,
_phantom: PhantomData<(Cfg, Req, Fut, S2)>,
}
impl<S1, Req, F, Cfg, Fut, S2, Err> Clone for ApplyConfigService<S1, Req, F, Cfg, Fut, S2, Err>
where
S1: Service<Req>,
F: Fn(Cfg, &S1) -> Fut,
Fut: Future<Output = Result<S2, Err>>,
S2: Service<Req>,
{
fn clone(&self) -> Self {
ApplyConfigService {
srv: self.srv.clone(),
_phantom: PhantomData,
}
}
}
impl<S1, Req, F, Cfg, Fut, S2, Err> ServiceFactory<Req>
for ApplyConfigService<S1, Req, F, Cfg, Fut, S2, Err>
where
S1: Service<Req>,
F: Fn(Cfg, &S1) -> Fut,
Fut: Future<Output = Result<S2, Err>>,
S2: Service<Req>,
{
type Response = S2::Response;
type Error = S2::Error;
type Config = Cfg;
type Service = S2;
type InitError = Err;
type Future = Fut;
fn new_service(&self, cfg: Cfg) -> Self::Future {
let (t, f) = &*self.srv;
f(cfg, t)
}
}
/// Convert `Fn(&Config) -> Future<Service>` fn to NewService
struct ApplyConfigServiceFactory<SF, Req, F, Cfg, Fut, S>
where
SF: ServiceFactory<Req, Config = ()>,
F: Fn(Cfg, &SF::Service) -> Fut,
Fut: Future<Output = Result<S, SF::InitError>>,
S: Service<Req>,
{
srv: Rc<(SF, F)>,
_phantom: PhantomData<(Cfg, Req, Fut, S)>,
}
impl<SF, Req, F, Cfg, Fut, S> Clone for ApplyConfigServiceFactory<SF, Req, F, Cfg, Fut, S>
where
SF: ServiceFactory<Req, Config = ()>,
F: Fn(Cfg, &SF::Service) -> Fut,
Fut: Future<Output = Result<S, SF::InitError>>,
S: Service<Req>,
{
fn clone(&self) -> Self {
Self {
srv: self.srv.clone(),
_phantom: PhantomData,
}
}
}
impl<SF, Req, F, Cfg, Fut, S> ServiceFactory<Req>
for ApplyConfigServiceFactory<SF, Req, F, Cfg, Fut, S>
where
SF: ServiceFactory<Req, Config = ()>,
SF::InitError: From<SF::Error>,
F: Fn(Cfg, &SF::Service) -> Fut,
Fut: Future<Output = Result<S, SF::InitError>>,
S: Service<Req>,
{
type Response = S::Response;
type Error = S::Error;
type Config = Cfg;
type Service = S;
type InitError = SF::InitError;
type Future = ApplyConfigServiceFactoryResponse<SF, Req, F, Cfg, Fut, S>;
fn new_service(&self, cfg: Cfg) -> Self::Future {
ApplyConfigServiceFactoryResponse {
cfg: Some(cfg),
store: self.srv.clone(),
state: State::A {
fut: self.srv.0.new_service(()),
},
}
}
}
pin_project! {
struct ApplyConfigServiceFactoryResponse<SF, Req, F, Cfg, Fut, S>
where
SF: ServiceFactory<Req, Config = ()>,
SF::InitError: From<SF::Error>,
F: Fn(Cfg, &SF::Service) -> Fut,
Fut: Future<Output = Result<S, SF::InitError>>,
S: Service<Req>,
{
cfg: Option<Cfg>,
store: Rc<(SF, F)>,
#[pin]
state: State<SF, Fut, S, Req>,
}
}
pin_project! {
#[project = StateProj]
enum State<SF, Fut, S, Req>
where
SF: ServiceFactory<Req, Config = ()>,
SF::InitError: From<SF::Error>,
Fut: Future<Output = Result<S, SF::InitError>>,
S: Service<Req>,
{
A { #[pin] fut: SF::Future },
B { svc: SF::Service },
C { #[pin] fut: Fut },
}
}
impl<SF, Req, F, Cfg, Fut, S> Future
for ApplyConfigServiceFactoryResponse<SF, Req, F, Cfg, Fut, S>
where
SF: ServiceFactory<Req, Config = ()>,
SF::InitError: From<SF::Error>,
F: Fn(Cfg, &SF::Service) -> Fut,
Fut: Future<Output = Result<S, SF::InitError>>,
S: Service<Req>,
{
type Output = Result<S, SF::InitError>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut this = self.as_mut().project();
match this.state.as_mut().project() {
StateProj::A { fut } => {
let svc = ready!(fut.poll(cx))?;
this.state.set(State::B { svc });
self.poll(cx)
}
StateProj::B { svc } => {
ready!(svc.poll_ready(cx))?;
{
let (_, f) = &**this.store;
let fut = f(this.cfg.take().unwrap(), svc);
this.state.set(State::C { fut });
}
self.poll(cx)
}
StateProj::C { fut } => fut.poll(cx),
}
}
}

View File

@@ -1,141 +0,0 @@
use alloc::boxed::Box;
use core::{
future::Future,
marker::PhantomData,
pin::Pin,
task::{Context, Poll},
};
use crate::{Service, ServiceFactory};
pub type BoxFuture<T> = Pin<Box<dyn Future<Output = T>>>;
pub type BoxService<Req, Res, Err> =
Box<dyn Service<Req, Response = Res, Error = Err, Future = BoxFuture<Result<Res, Err>>>>;
pub struct BoxServiceFactory<Cfg, Req, Res, Err, InitErr>(Inner<Cfg, Req, Res, Err, InitErr>);
/// Create boxed service factory
pub fn factory<SF, Req>(
factory: SF,
) -> BoxServiceFactory<SF::Config, Req, SF::Response, SF::Error, SF::InitError>
where
SF: ServiceFactory<Req> + 'static,
Req: 'static,
SF::Response: 'static,
SF::Service: 'static,
SF::Future: 'static,
SF::Error: 'static,
SF::InitError: 'static,
{
BoxServiceFactory(Box::new(FactoryWrapper {
factory,
_t: PhantomData,
}))
}
/// Create boxed service
pub fn service<S, Req>(service: S) -> BoxService<Req, S::Response, S::Error>
where
S: Service<Req> + 'static,
Req: 'static,
S::Future: 'static,
{
Box::new(ServiceWrapper(service, PhantomData))
}
type Inner<C, Req, Res, Err, InitErr> = Box<
dyn ServiceFactory<
Req,
Config = C,
Response = Res,
Error = Err,
InitError = InitErr,
Service = BoxService<Req, Res, Err>,
Future = BoxFuture<Result<BoxService<Req, Res, Err>, InitErr>>,
>,
>;
impl<C, Req, Res, Err, InitErr> ServiceFactory<Req>
for BoxServiceFactory<C, Req, Res, Err, InitErr>
where
Req: 'static,
Res: 'static,
Err: 'static,
InitErr: 'static,
{
type Response = Res;
type Error = Err;
type InitError = InitErr;
type Config = C;
type Service = BoxService<Req, Res, Err>;
type Future = BoxFuture<Result<Self::Service, InitErr>>;
fn new_service(&self, cfg: C) -> Self::Future {
self.0.new_service(cfg)
}
}
struct FactoryWrapper<SF, Req, Cfg> {
factory: SF,
_t: PhantomData<(Req, Cfg)>,
}
impl<SF, Req, Cfg, Res, Err, InitErr> ServiceFactory<Req> for FactoryWrapper<SF, Req, Cfg>
where
Req: 'static,
Res: 'static,
Err: 'static,
InitErr: 'static,
SF: ServiceFactory<Req, Config = Cfg, Response = Res, Error = Err, InitError = InitErr>,
SF::Future: 'static,
SF::Service: 'static,
<SF::Service as Service<Req>>::Future: 'static,
{
type Response = Res;
type Error = Err;
type InitError = InitErr;
type Config = Cfg;
type Service = BoxService<Req, Res, Err>;
type Future = BoxFuture<Result<Self::Service, Self::InitError>>;
fn new_service(&self, cfg: Cfg) -> Self::Future {
let fut = self.factory.new_service(cfg);
Box::pin(async {
let res = fut.await;
res.map(ServiceWrapper::boxed)
})
}
}
struct ServiceWrapper<S: Service<Req>, Req>(S, PhantomData<Req>);
impl<S, Req> ServiceWrapper<S, Req>
where
S: Service<Req> + 'static,
Req: 'static,
S::Future: 'static,
{
fn boxed(service: S) -> BoxService<Req, S::Response, S::Error> {
Box::new(ServiceWrapper(service, PhantomData))
}
}
impl<S, Req, Res, Err> Service<Req> for ServiceWrapper<S, Req>
where
S: Service<Req, Response = Res, Error = Err>,
S::Future: 'static,
{
type Response = Res;
type Error = Err;
type Future = BoxFuture<Result<Res, Err>>;
fn poll_ready(&self, ctx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.0.poll_ready(ctx)
}
fn call(&self, req: Req) -> Self::Future {
Box::pin(self.0.call(req))
}
}

View File

@@ -1,70 +0,0 @@
use crate::{dev, Service, ServiceFactory};
pub trait ServiceExt<Req>: Service<Req> {
/// Map this service's output to a different type, returning a new service
/// of the resulting type.
///
/// This function is similar to the `Option::map` or `Iterator::map` where
/// it will change the type of the underlying service.
///
/// Note that this function consumes the receiving service and returns a
/// wrapped version of it, similar to the existing `map` methods in the
/// standard library.
fn map<F, R>(self, f: F) -> dev::Map<Self, F, Req, R>
where
Self: Sized,
F: FnMut(Self::Response) -> R,
{
dev::Map::new(self, f)
}
/// Map this service's error to a different error, returning a new service.
///
/// This function is similar to the `Result::map_err` where it will change
/// the error type of the underlying service. For example, this can be useful to
/// ensure that services have the same error type.
///
/// Note that this function consumes the receiving service and returns a
/// wrapped version of it.
fn map_err<F, E>(self, f: F) -> dev::MapErr<Self, Req, F, E>
where
Self: Sized,
F: Fn(Self::Error) -> E,
{
dev::MapErr::new(self, f)
}
}
impl<S, Req> ServiceExt<Req> for S where S: Service<Req> {}
pub trait ServiceFactoryExt<Req>: ServiceFactory<Req> {
/// Map this service's output to a different type, returning a new service
/// of the resulting type.
fn map<F, R>(self, f: F) -> crate::map::MapServiceFactory<Self, F, Req, R>
where
Self: Sized,
F: FnMut(Self::Response) -> R + Clone,
{
crate::map::MapServiceFactory::new(self, f)
}
/// Map this service's error to a different error, returning a new service.
fn map_err<F, E>(self, f: F) -> crate::map_err::MapErrServiceFactory<Self, Req, F, E>
where
Self: Sized,
F: Fn(Self::Error) -> E + Clone,
{
crate::map_err::MapErrServiceFactory::new(self, f)
}
/// Map this factory's init error to a different error, returning a new service.
fn map_init_err<F, E>(self, f: F) -> crate::map_init_err::MapInitErr<Self, F, Req, E>
where
Self: Sized,
F: Fn(Self::InitError) -> E + Clone,
{
crate::map_init_err::MapInitErr::new(self, f)
}
}
impl<S, Req> ServiceFactoryExt<Req> for S where S: ServiceFactory<Req> {}

View File

@@ -1,396 +0,0 @@
use core::{future::Future, marker::PhantomData, task::Poll};
use crate::{ok, IntoService, IntoServiceFactory, Ready, Service, ServiceFactory};
/// Create `ServiceFactory` for function that can act as a `Service`
pub fn fn_service<F, Fut, Req, Res, Err, Cfg>(
f: F,
) -> FnServiceFactory<F, Fut, Req, Res, Err, Cfg>
where
F: Fn(Req) -> Fut + Clone,
Fut: Future<Output = Result<Res, Err>>,
{
FnServiceFactory::new(f)
}
/// Create `ServiceFactory` for function that can produce services
///
/// # Example
///
/// ```
/// use std::io;
/// use actix_service::{fn_factory, fn_service, Service, ServiceFactory};
/// use futures_util::future::ok;
///
/// /// Service that divides two usize values.
/// async fn div((x, y): (usize, usize)) -> Result<usize, io::Error> {
/// if y == 0 {
/// Err(io::Error::new(io::ErrorKind::Other, "divide by zero"))
/// } else {
/// Ok(x / y)
/// }
/// }
///
/// #[actix_rt::main]
/// async fn main() -> io::Result<()> {
/// // Create service factory that produces `div` services
/// let factory = fn_factory(|| {
/// ok::<_, io::Error>(fn_service(div))
/// });
///
/// // construct new service
/// let srv = factory.new_service(()).await?;
///
/// // now we can use `div` service
/// let result = srv.call((10, 20)).await?;
///
/// println!("10 / 20 = {}", result);
///
/// Ok(())
/// }
/// ```
pub fn fn_factory<F, Cfg, Srv, Req, Fut, Err>(
f: F,
) -> FnServiceNoConfig<F, Cfg, Srv, Req, Fut, Err>
where
Srv: Service<Req>,
F: Fn() -> Fut,
Fut: Future<Output = Result<Srv, Err>>,
{
FnServiceNoConfig::new(f)
}
/// Create `ServiceFactory` for function that accepts config argument and can produce services
///
/// Any function that has following form `Fn(Config) -> Future<Output = Service>` could
/// act as a `ServiceFactory`.
///
/// # Example
///
/// ```
/// use std::io;
/// use actix_service::{fn_factory_with_config, fn_service, Service, ServiceFactory};
/// use futures_util::future::ok;
///
/// #[actix_rt::main]
/// async fn main() -> io::Result<()> {
/// // Create service factory. factory uses config argument for
/// // services it generates.
/// let factory = fn_factory_with_config(|y: usize| {
/// ok::<_, io::Error>(fn_service(move |x: usize| ok::<_, io::Error>(x * y)))
/// });
///
/// // construct new service with config argument
/// let srv = factory.new_service(10).await?;
///
/// let result = srv.call(10).await?;
/// assert_eq!(result, 100);
///
/// println!("10 * 10 = {}", result);
/// Ok(())
/// }
/// ```
pub fn fn_factory_with_config<F, Fut, Cfg, Srv, Req, Err>(
f: F,
) -> FnServiceConfig<F, Fut, Cfg, Srv, Req, Err>
where
F: Fn(Cfg) -> Fut,
Fut: Future<Output = Result<Srv, Err>>,
Srv: Service<Req>,
{
FnServiceConfig::new(f)
}
pub struct FnService<F, Fut, Req, Res, Err>
where
F: FnMut(Req) -> Fut,
Fut: Future<Output = Result<Res, Err>>,
{
f: F,
_t: PhantomData<Req>,
}
impl<F, Fut, Req, Res, Err> FnService<F, Fut, Req, Res, Err>
where
F: FnMut(Req) -> Fut,
Fut: Future<Output = Result<Res, Err>>,
{
pub(crate) fn new(f: F) -> Self {
Self { f, _t: PhantomData }
}
}
impl<F, Fut, Req, Res, Err> Clone for FnService<F, Fut, Req, Res, Err>
where
F: FnMut(Req) -> Fut + Clone,
Fut: Future<Output = Result<Res, Err>>,
{
fn clone(&self) -> Self {
Self::new(self.f.clone())
}
}
impl<F, Fut, Req, Res, Err> Service<Req> for FnService<F, Fut, Req, Res, Err>
where
F: Fn(Req) -> Fut,
Fut: Future<Output = Result<Res, Err>>,
{
type Response = Res;
type Error = Err;
type Future = Fut;
crate::always_ready!();
fn call(&self, req: Req) -> Self::Future {
(self.f)(req)
}
}
impl<F, Fut, Req, Res, Err> IntoService<FnService<F, Fut, Req, Res, Err>, Req> for F
where
F: Fn(Req) -> Fut,
Fut: Future<Output = Result<Res, Err>>,
{
fn into_service(self) -> FnService<F, Fut, Req, Res, Err> {
FnService::new(self)
}
}
pub struct FnServiceFactory<F, Fut, Req, Res, Err, Cfg>
where
F: Fn(Req) -> Fut,
Fut: Future<Output = Result<Res, Err>>,
{
f: F,
_t: PhantomData<(Req, Cfg)>,
}
impl<F, Fut, Req, Res, Err, Cfg> FnServiceFactory<F, Fut, Req, Res, Err, Cfg>
where
F: Fn(Req) -> Fut + Clone,
Fut: Future<Output = Result<Res, Err>>,
{
fn new(f: F) -> Self {
FnServiceFactory { f, _t: PhantomData }
}
}
impl<F, Fut, Req, Res, Err, Cfg> Clone for FnServiceFactory<F, Fut, Req, Res, Err, Cfg>
where
F: Fn(Req) -> Fut + Clone,
Fut: Future<Output = Result<Res, Err>>,
{
fn clone(&self) -> Self {
Self::new(self.f.clone())
}
}
impl<F, Fut, Req, Res, Err> Service<Req> for FnServiceFactory<F, Fut, Req, Res, Err, ()>
where
F: Fn(Req) -> Fut + Clone,
Fut: Future<Output = Result<Res, Err>>,
{
type Response = Res;
type Error = Err;
type Future = Fut;
crate::always_ready!();
fn call(&self, req: Req) -> Self::Future {
(self.f)(req)
}
}
impl<F, Fut, Req, Res, Err, Cfg> ServiceFactory<Req>
for FnServiceFactory<F, Fut, Req, Res, Err, Cfg>
where
F: Fn(Req) -> Fut + Clone,
Fut: Future<Output = Result<Res, Err>>,
{
type Response = Res;
type Error = Err;
type Config = Cfg;
type Service = FnService<F, Fut, Req, Res, Err>;
type InitError = ();
type Future = Ready<Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: Cfg) -> Self::Future {
ok(FnService::new(self.f.clone()))
}
}
impl<F, Fut, Req, Res, Err, Cfg>
IntoServiceFactory<FnServiceFactory<F, Fut, Req, Res, Err, Cfg>, Req> for F
where
F: Fn(Req) -> Fut + Clone,
Fut: Future<Output = Result<Res, Err>>,
{
fn into_factory(self) -> FnServiceFactory<F, Fut, Req, Res, Err, Cfg> {
FnServiceFactory::new(self)
}
}
/// Convert `Fn(&Config) -> Future<Service>` fn to NewService
pub struct FnServiceConfig<F, Fut, Cfg, Srv, Req, Err>
where
F: Fn(Cfg) -> Fut,
Fut: Future<Output = Result<Srv, Err>>,
Srv: Service<Req>,
{
f: F,
_t: PhantomData<(Fut, Cfg, Req, Srv, Err)>,
}
impl<F, Fut, Cfg, Srv, Req, Err> FnServiceConfig<F, Fut, Cfg, Srv, Req, Err>
where
F: Fn(Cfg) -> Fut,
Fut: Future<Output = Result<Srv, Err>>,
Srv: Service<Req>,
{
fn new(f: F) -> Self {
FnServiceConfig { f, _t: PhantomData }
}
}
impl<F, Fut, Cfg, Srv, Req, Err> Clone for FnServiceConfig<F, Fut, Cfg, Srv, Req, Err>
where
F: Fn(Cfg) -> Fut + Clone,
Fut: Future<Output = Result<Srv, Err>>,
Srv: Service<Req>,
{
fn clone(&self) -> Self {
FnServiceConfig {
f: self.f.clone(),
_t: PhantomData,
}
}
}
impl<F, Fut, Cfg, Srv, Req, Err> ServiceFactory<Req>
for FnServiceConfig<F, Fut, Cfg, Srv, Req, Err>
where
F: Fn(Cfg) -> Fut,
Fut: Future<Output = Result<Srv, Err>>,
Srv: Service<Req>,
{
type Response = Srv::Response;
type Error = Srv::Error;
type Config = Cfg;
type Service = Srv;
type InitError = Err;
type Future = Fut;
fn new_service(&self, cfg: Cfg) -> Self::Future {
(self.f)(cfg)
}
}
/// Converter for `Fn() -> Future<Service>` fn
pub struct FnServiceNoConfig<F, Cfg, Srv, Req, Fut, Err>
where
F: Fn() -> Fut,
Srv: Service<Req>,
Fut: Future<Output = Result<Srv, Err>>,
{
f: F,
_t: PhantomData<(Cfg, Req)>,
}
impl<F, Cfg, Srv, Req, Fut, Err> FnServiceNoConfig<F, Cfg, Srv, Req, Fut, Err>
where
F: Fn() -> Fut,
Fut: Future<Output = Result<Srv, Err>>,
Srv: Service<Req>,
{
fn new(f: F) -> Self {
Self { f, _t: PhantomData }
}
}
impl<F, Cfg, Srv, Req, Fut, Err> ServiceFactory<Req>
for FnServiceNoConfig<F, Cfg, Srv, Req, Fut, Err>
where
F: Fn() -> Fut,
Fut: Future<Output = Result<Srv, Err>>,
Srv: Service<Req>,
{
type Response = Srv::Response;
type Error = Srv::Error;
type Config = Cfg;
type Service = Srv;
type InitError = Err;
type Future = Fut;
fn new_service(&self, _: Cfg) -> Self::Future {
(self.f)()
}
}
impl<F, Cfg, Srv, Req, Fut, Err> Clone for FnServiceNoConfig<F, Cfg, Srv, Req, Fut, Err>
where
F: Fn() -> Fut + Clone,
Fut: Future<Output = Result<Srv, Err>>,
Srv: Service<Req>,
{
fn clone(&self) -> Self {
Self::new(self.f.clone())
}
}
impl<F, Cfg, Srv, Req, Fut, Err>
IntoServiceFactory<FnServiceNoConfig<F, Cfg, Srv, Req, Fut, Err>, Req> for F
where
F: Fn() -> Fut,
Fut: Future<Output = Result<Srv, Err>>,
Srv: Service<Req>,
{
fn into_factory(self) -> FnServiceNoConfig<F, Cfg, Srv, Req, Fut, Err> {
FnServiceNoConfig::new(self)
}
}
#[cfg(test)]
mod tests {
use core::task::Poll;
use futures_util::future::lazy;
use super::*;
use crate::{ok, Service, ServiceFactory};
#[actix_rt::test]
async fn test_fn_service() {
let new_srv = fn_service(|()| ok::<_, ()>("srv"));
let srv = new_srv.new_service(()).await.unwrap();
let res = srv.call(()).await;
assert_eq!(lazy(|cx| srv.poll_ready(cx)).await, Poll::Ready(Ok(())));
assert!(res.is_ok());
assert_eq!(res.unwrap(), "srv");
}
#[actix_rt::test]
async fn test_fn_service_service() {
let srv = fn_service(|()| ok::<_, ()>("srv"));
let res = srv.call(()).await;
assert_eq!(lazy(|cx| srv.poll_ready(cx)).await, Poll::Ready(Ok(())));
assert!(res.is_ok());
assert_eq!(res.unwrap(), "srv");
}
#[actix_rt::test]
async fn test_fn_service_with_config() {
let new_srv = fn_factory_with_config(|cfg: usize| {
ok::<_, ()>(fn_service(move |()| ok::<_, ()>(("srv", cfg))))
});
let srv = new_srv.new_service(1).await.unwrap();
let res = srv.call(()).await;
assert_eq!(lazy(|cx| srv.poll_ready(cx)).await, Poll::Ready(Ok(())));
assert!(res.is_ok());
assert_eq!(res.unwrap(), ("srv", 1));
}
}

View File

@@ -1,337 +0,0 @@
//! See [`Service`] docs for information on this crate's foundational trait.
#![no_std]
#![deny(rust_2018_idioms, nonstandard_style)]
#![allow(clippy::type_complexity)]
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
extern crate alloc;
use alloc::{boxed::Box, rc::Rc, sync::Arc};
use core::{
cell::RefCell,
future::Future,
task::{self, Context, Poll},
};
mod and_then;
mod apply;
mod apply_cfg;
pub mod boxed;
mod ext;
mod fn_service;
mod map;
mod map_config;
mod map_err;
mod map_init_err;
mod pipeline;
mod ready;
mod then;
mod transform;
mod transform_err;
pub use self::apply::{apply_fn, apply_fn_factory};
pub use self::apply_cfg::{apply_cfg, apply_cfg_factory};
pub use self::ext::{ServiceExt, ServiceFactoryExt};
pub use self::fn_service::{fn_factory, fn_factory_with_config, fn_service};
pub use self::map_config::{map_config, unit_config};
pub use self::pipeline::{pipeline, pipeline_factory, Pipeline, PipelineFactory};
pub use self::transform::{apply, Transform};
#[allow(unused_imports)]
use self::ready::{err, ok, ready, Ready};
/// An asynchronous operation from `Request` to a `Response`.
///
/// The `Service` trait models a request/response interaction, receiving requests and returning
/// replies. You can think about a service as a function with one argument that returns some result
/// asynchronously. Conceptually, the operation looks like this:
///
/// ```ignore
/// async fn(Request) -> Result<Response, Err>
/// ```
///
/// The `Service` trait just generalizes this form where each parameter is described as an
/// associated type on the trait. Services can also have mutable state that influence computation.
///
/// `Service` provides a symmetric and uniform API; the same abstractions can be used to represent
/// both clients and servers. Services describe only _transformation_ operations which encourage
/// simple API surfaces. This leads to simpler design of each service, improves test-ability and
/// makes composition easier.
///
/// ```ignore
/// struct MyService;
///
/// impl Service for MyService {
/// type Request = u8;
/// type Response = u64;
/// type Error = MyError;
/// type Future = Pin<Box<Future<Output=Result<Self::Response, Self::Error>>>>;
///
/// fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { ... }
///
/// fn call(&self, req: Self::Request) -> Self::Future { ... }
/// }
/// ```
///
/// Sometimes it is not necessary to implement the Service trait. For example, the above service
/// could be rewritten as a simple function and passed to [fn_service](fn_service()).
///
/// ```ignore
/// async fn my_service(req: u8) -> Result<u64, MyError>;
/// ```
pub trait Service<Req> {
/// Responses given by the service.
type Response;
/// Errors produced by the service when polling readiness or executing call.
type Error;
/// The future response value.
type Future: Future<Output = Result<Self::Response, Self::Error>>;
/// Returns `Ready` when the service is able to process requests.
///
/// If the service is at capacity, then `Pending` is returned and the task
/// is notified when the service becomes ready again. This function is
/// expected to be called while on a task.
///
/// This is a **best effort** implementation. False positives are permitted.
/// It is permitted for the service to return `Ready` from a `poll_ready`
/// call and the next invocation of `call` results in an error.
///
/// # Notes
/// 1. `poll_ready` might be called on a different task to `call`.
/// 1. In cases of chained services, `.poll_ready()` is called for all services at once.
fn poll_ready(&self, ctx: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>>;
/// Process the request and return the response asynchronously.
///
/// This function is expected to be callable off task. As such,
/// implementations should take care to not call `poll_ready`. If the
/// service is at capacity and the request is unable to be handled, the
/// returned `Future` should resolve to an error.
///
/// Calling `call` without calling `poll_ready` is permitted. The
/// implementation must be resilient to this fact.
fn call(&self, req: Req) -> Self::Future;
}
/// Factory for creating `Service`s.
///
/// Acts as a service factory. This is useful for cases where new `Service`s
/// must be produced. One case is a TCP server listener. The listener
/// accepts new TCP streams, obtains a new `Service` using the
/// `ServiceFactory` trait, and uses the new `Service` to process inbound
/// requests on that new TCP stream.
///
/// `Config` is a service factory configuration type.
pub trait ServiceFactory<Req> {
/// Responses given by the created services.
type Response;
/// Errors produced by the created services.
type Error;
/// Service factory configuration.
type Config;
/// The kind of `Service` created by this factory.
type Service: Service<Req, Response = Self::Response, Error = Self::Error>;
/// Errors potentially raised while building a service.
type InitError;
/// The future of the `Service` instance.
type Future: Future<Output = Result<Self::Service, Self::InitError>>;
/// Create and return a new service asynchronously.
fn new_service(&self, cfg: Self::Config) -> Self::Future;
}
impl<'a, S, Req> Service<Req> for &'a mut S
where
S: Service<Req> + 'a,
{
type Response = S::Response;
type Error = S::Error;
type Future = S::Future;
fn poll_ready(&self, ctx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
(**self).poll_ready(ctx)
}
fn call(&self, request: Req) -> S::Future {
(**self).call(request)
}
}
impl<S, Req> Service<Req> for Box<S>
where
S: Service<Req> + ?Sized,
{
type Response = S::Response;
type Error = S::Error;
type Future = S::Future;
fn poll_ready(&self, ctx: &mut Context<'_>) -> Poll<Result<(), S::Error>> {
(**self).poll_ready(ctx)
}
fn call(&self, request: Req) -> S::Future {
(**self).call(request)
}
}
impl<S, Req> Service<Req> for RefCell<S>
where
S: Service<Req>,
{
type Response = S::Response;
type Error = S::Error;
type Future = S::Future;
fn poll_ready(&self, ctx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.borrow().poll_ready(ctx)
}
fn call(&self, request: Req) -> S::Future {
self.borrow().call(request)
}
}
impl<S, Req> Service<Req> for Rc<RefCell<S>>
where
S: Service<Req>,
{
type Response = S::Response;
type Error = S::Error;
type Future = S::Future;
fn poll_ready(&self, ctx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.borrow().poll_ready(ctx)
}
fn call(&self, request: Req) -> S::Future {
self.borrow().call(request)
}
}
impl<S, Req> ServiceFactory<Req> for Rc<S>
where
S: ServiceFactory<Req>,
{
type Response = S::Response;
type Error = S::Error;
type Config = S::Config;
type Service = S::Service;
type InitError = S::InitError;
type Future = S::Future;
fn new_service(&self, cfg: S::Config) -> S::Future {
self.as_ref().new_service(cfg)
}
}
impl<S, Req> ServiceFactory<Req> for Arc<S>
where
S: ServiceFactory<Req>,
{
type Response = S::Response;
type Error = S::Error;
type Config = S::Config;
type Service = S::Service;
type InitError = S::InitError;
type Future = S::Future;
fn new_service(&self, cfg: S::Config) -> S::Future {
self.as_ref().new_service(cfg)
}
}
/// Trait for types that can be converted to a `Service`
pub trait IntoService<S, Req>
where
S: Service<Req>,
{
/// Convert to a `Service`
fn into_service(self) -> S;
}
/// Trait for types that can be converted to a `ServiceFactory`
pub trait IntoServiceFactory<SF, Req>
where
SF: ServiceFactory<Req>,
{
/// Convert `Self` to a `ServiceFactory`
fn into_factory(self) -> SF;
}
impl<S, Req> IntoService<S, Req> for S
where
S: Service<Req>,
{
fn into_service(self) -> S {
self
}
}
impl<SF, Req> IntoServiceFactory<SF, Req> for SF
where
SF: ServiceFactory<Req>,
{
fn into_factory(self) -> SF {
self
}
}
/// Convert object of type `U` to a service `S`
pub fn into_service<I, S, Req>(tp: I) -> S
where
I: IntoService<S, Req>,
S: Service<Req>,
{
tp.into_service()
}
pub mod dev {
pub use crate::apply::{Apply, ApplyFactory};
pub use crate::fn_service::{
FnService, FnServiceConfig, FnServiceFactory, FnServiceNoConfig,
};
pub use crate::map::{Map, MapServiceFactory};
pub use crate::map_config::{MapConfig, UnitConfig};
pub use crate::map_err::{MapErr, MapErrServiceFactory};
pub use crate::map_init_err::MapInitErr;
pub use crate::transform::ApplyTransform;
pub use crate::transform_err::TransformMapInitErr;
}
#[macro_export]
macro_rules! always_ready {
() => {
#[inline]
fn poll_ready(
&self,
_: &mut ::core::task::Context<'_>,
) -> ::core::task::Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
};
}
#[macro_export]
macro_rules! forward_ready {
($field:ident) => {
#[inline]
fn poll_ready(
&self,
cx: &mut ::core::task::Context<'_>,
) -> ::core::task::Poll<Result<(), Self::Error>> {
self.$field
.poll_ready(cx)
.map_err(::core::convert::Into::into)
}
};
}

View File

@@ -1,246 +0,0 @@
use core::{
future::Future,
marker::PhantomData,
pin::Pin,
task::{Context, Poll},
};
use pin_project_lite::pin_project;
use super::{Service, ServiceFactory};
/// Service for the `map` combinator, changing the type of a service's response.
///
/// This is created by the `ServiceExt::map` method.
pub struct Map<A, F, Req, Res> {
service: A,
f: F,
_t: PhantomData<(Req, Res)>,
}
impl<A, F, Req, Res> Map<A, F, Req, Res> {
/// Create new `Map` combinator
pub(crate) fn new(service: A, f: F) -> Self
where
A: Service<Req>,
F: FnMut(A::Response) -> Res,
{
Self {
service,
f,
_t: PhantomData,
}
}
}
impl<A, F, Req, Res> Clone for Map<A, F, Req, Res>
where
A: Clone,
F: Clone,
{
fn clone(&self) -> Self {
Map {
service: self.service.clone(),
f: self.f.clone(),
_t: PhantomData,
}
}
}
impl<A, F, Req, Res> Service<Req> for Map<A, F, Req, Res>
where
A: Service<Req>,
F: FnMut(A::Response) -> Res + Clone,
{
type Response = Res;
type Error = A::Error;
type Future = MapFuture<A, F, Req, Res>;
crate::forward_ready!(service);
fn call(&self, req: Req) -> Self::Future {
MapFuture::new(self.service.call(req), self.f.clone())
}
}
pin_project! {
pub struct MapFuture<A, F, Req, Res>
where
A: Service<Req>,
F: FnMut(A::Response) -> Res,
{
f: F,
#[pin]
fut: A::Future,
}
}
impl<A, F, Req, Res> MapFuture<A, F, Req, Res>
where
A: Service<Req>,
F: FnMut(A::Response) -> Res,
{
fn new(fut: A::Future, f: F) -> Self {
MapFuture { f, fut }
}
}
impl<A, F, Req, Res> Future for MapFuture<A, F, Req, Res>
where
A: Service<Req>,
F: FnMut(A::Response) -> Res,
{
type Output = Result<Res, A::Error>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
match this.fut.poll(cx) {
Poll::Ready(Ok(resp)) => Poll::Ready(Ok((this.f)(resp))),
Poll::Ready(Err(e)) => Poll::Ready(Err(e)),
Poll::Pending => Poll::Pending,
}
}
}
/// `MapNewService` new service combinator
pub struct MapServiceFactory<A, F, Req, Res> {
a: A,
f: F,
r: PhantomData<(Res, Req)>,
}
impl<A, F, Req, Res> MapServiceFactory<A, F, Req, Res> {
/// Create new `Map` new service instance
pub(crate) fn new(a: A, f: F) -> Self
where
A: ServiceFactory<Req>,
F: FnMut(A::Response) -> Res,
{
Self {
a,
f,
r: PhantomData,
}
}
}
impl<A, F, Req, Res> Clone for MapServiceFactory<A, F, Req, Res>
where
A: Clone,
F: Clone,
{
fn clone(&self) -> Self {
Self {
a: self.a.clone(),
f: self.f.clone(),
r: PhantomData,
}
}
}
impl<A, F, Req, Res> ServiceFactory<Req> for MapServiceFactory<A, F, Req, Res>
where
A: ServiceFactory<Req>,
F: FnMut(A::Response) -> Res + Clone,
{
type Response = Res;
type Error = A::Error;
type Config = A::Config;
type Service = Map<A::Service, F, Req, Res>;
type InitError = A::InitError;
type Future = MapServiceFuture<A, F, Req, Res>;
fn new_service(&self, cfg: A::Config) -> Self::Future {
MapServiceFuture::new(self.a.new_service(cfg), self.f.clone())
}
}
pin_project! {
pub struct MapServiceFuture<A, F, Req, Res>
where
A: ServiceFactory<Req>,
F: FnMut(A::Response) -> Res,
{
#[pin]
fut: A::Future,
f: Option<F>,
}
}
impl<A, F, Req, Res> MapServiceFuture<A, F, Req, Res>
where
A: ServiceFactory<Req>,
F: FnMut(A::Response) -> Res,
{
fn new(fut: A::Future, f: F) -> Self {
MapServiceFuture { f: Some(f), fut }
}
}
impl<A, F, Req, Res> Future for MapServiceFuture<A, F, Req, Res>
where
A: ServiceFactory<Req>,
F: FnMut(A::Response) -> Res,
{
type Output = Result<Map<A::Service, F, Req, Res>, A::InitError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
if let Poll::Ready(svc) = this.fut.poll(cx)? {
Poll::Ready(Ok(Map::new(svc, this.f.take().unwrap())))
} else {
Poll::Pending
}
}
}
#[cfg(test)]
mod tests {
use futures_util::future::lazy;
use super::*;
use crate::{
ok, IntoServiceFactory, Ready, Service, ServiceExt, ServiceFactory, ServiceFactoryExt,
};
struct Srv;
impl Service<()> for Srv {
type Response = ();
type Error = ();
type Future = Ready<Result<(), ()>>;
crate::always_ready!();
fn call(&self, _: ()) -> Self::Future {
ok(())
}
}
#[actix_rt::test]
async fn test_poll_ready() {
let srv = Srv.map(|_| "ok");
let res = lazy(|cx| srv.poll_ready(cx)).await;
assert_eq!(res, Poll::Ready(Ok(())));
}
#[actix_rt::test]
async fn test_call() {
let srv = Srv.map(|_| "ok");
let res = srv.call(()).await;
assert!(res.is_ok());
assert_eq!(res.unwrap(), "ok");
}
#[actix_rt::test]
async fn test_new_service() {
let new_srv = (|| ok::<_, ()>(Srv)).into_factory().map(|_| "ok");
let srv = new_srv.new_service(&()).await.unwrap();
let res = srv.call(()).await;
assert!(res.is_ok());
assert_eq!(res.unwrap(), ("ok"));
}
}

View File

@@ -1,128 +0,0 @@
use core::marker::PhantomData;
use super::{IntoServiceFactory, ServiceFactory};
/// Adapt external config argument to a config for provided service factory
///
/// Note that this function consumes the receiving service factory and returns
/// a wrapped version of it.
pub fn map_config<I, SF, Req, F, Cfg>(factory: I, f: F) -> MapConfig<SF, Req, F, Cfg>
where
I: IntoServiceFactory<SF, Req>,
SF: ServiceFactory<Req>,
F: Fn(Cfg) -> SF::Config,
{
MapConfig::new(factory.into_factory(), f)
}
/// Replace config with unit.
pub fn unit_config<I, SF, Cfg, Req>(factory: I) -> UnitConfig<SF, Cfg, Req>
where
I: IntoServiceFactory<SF, Req>,
SF: ServiceFactory<Req, Config = ()>,
{
UnitConfig::new(factory.into_factory())
}
/// `map_config()` adapter service factory
pub struct MapConfig<SF, Req, F, Cfg> {
factory: SF,
cfg_mapper: F,
e: PhantomData<(Cfg, Req)>,
}
impl<SF, Req, F, Cfg> MapConfig<SF, Req, F, Cfg> {
/// Create new `MapConfig` combinator
pub(crate) fn new(factory: SF, cfg_mapper: F) -> Self
where
SF: ServiceFactory<Req>,
F: Fn(Cfg) -> SF::Config,
{
Self {
factory,
cfg_mapper,
e: PhantomData,
}
}
}
impl<SF, Req, F, Cfg> Clone for MapConfig<SF, Req, F, Cfg>
where
SF: Clone,
F: Clone,
{
fn clone(&self) -> Self {
Self {
factory: self.factory.clone(),
cfg_mapper: self.cfg_mapper.clone(),
e: PhantomData,
}
}
}
impl<SF, Req, F, Cfg> ServiceFactory<Req> for MapConfig<SF, Req, F, Cfg>
where
SF: ServiceFactory<Req>,
F: Fn(Cfg) -> SF::Config,
{
type Response = SF::Response;
type Error = SF::Error;
type Config = Cfg;
type Service = SF::Service;
type InitError = SF::InitError;
type Future = SF::Future;
fn new_service(&self, cfg: Self::Config) -> Self::Future {
let mapped_cfg = (self.cfg_mapper)(cfg);
self.factory.new_service(mapped_cfg)
}
}
/// `unit_config()` config combinator
pub struct UnitConfig<SF, Cfg, Req> {
factory: SF,
_phantom: PhantomData<(Cfg, Req)>,
}
impl<SF, Cfg, Req> UnitConfig<SF, Cfg, Req>
where
SF: ServiceFactory<Req, Config = ()>,
{
/// Create new `UnitConfig` combinator
pub(crate) fn new(factory: SF) -> Self {
Self {
factory,
_phantom: PhantomData,
}
}
}
impl<SF, Cfg, Req> Clone for UnitConfig<SF, Cfg, Req>
where
SF: Clone,
{
fn clone(&self) -> Self {
Self {
factory: self.factory.clone(),
_phantom: PhantomData,
}
}
}
impl<SF, Cfg, Req> ServiceFactory<Req> for UnitConfig<SF, Cfg, Req>
where
SF: ServiceFactory<Req, Config = ()>,
{
type Response = SF::Response;
type Error = SF::Error;
type Config = Cfg;
type Service = SF::Service;
type InitError = SF::InitError;
type Future = SF::Future;
fn new_service(&self, _: Cfg) -> Self::Future {
self.factory.new_service(())
}
}

View File

@@ -1,253 +0,0 @@
use core::{
future::Future,
marker::PhantomData,
pin::Pin,
task::{Context, Poll},
};
use pin_project_lite::pin_project;
use super::{Service, ServiceFactory};
/// Service for the `map_err` combinator, changing the type of a service's
/// error.
///
/// This is created by the `ServiceExt::map_err` method.
pub struct MapErr<S, Req, F, E> {
service: S,
f: F,
_t: PhantomData<(E, Req)>,
}
impl<S, Req, F, E> MapErr<S, Req, F, E> {
/// Create new `MapErr` combinator
pub(crate) fn new(service: S, f: F) -> Self
where
S: Service<Req>,
F: Fn(S::Error) -> E,
{
Self {
service,
f,
_t: PhantomData,
}
}
}
impl<S, Req, F, E> Clone for MapErr<S, Req, F, E>
where
S: Clone,
F: Clone,
{
fn clone(&self) -> Self {
MapErr {
service: self.service.clone(),
f: self.f.clone(),
_t: PhantomData,
}
}
}
impl<A, Req, F, E> Service<Req> for MapErr<A, Req, F, E>
where
A: Service<Req>,
F: Fn(A::Error) -> E + Clone,
{
type Response = A::Response;
type Error = E;
type Future = MapErrFuture<A, Req, F, E>;
fn poll_ready(&self, ctx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.service.poll_ready(ctx).map_err(&self.f)
}
fn call(&self, req: Req) -> Self::Future {
MapErrFuture::new(self.service.call(req), self.f.clone())
}
}
pin_project! {
pub struct MapErrFuture<A, Req, F, E>
where
A: Service<Req>,
F: Fn(A::Error) -> E,
{
f: F,
#[pin]
fut: A::Future,
}
}
impl<A, Req, F, E> MapErrFuture<A, Req, F, E>
where
A: Service<Req>,
F: Fn(A::Error) -> E,
{
fn new(fut: A::Future, f: F) -> Self {
MapErrFuture { f, fut }
}
}
impl<A, Req, F, E> Future for MapErrFuture<A, Req, F, E>
where
A: Service<Req>,
F: Fn(A::Error) -> E,
{
type Output = Result<A::Response, E>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
this.fut.poll(cx).map_err(this.f)
}
}
/// Factory for the `map_err` combinator, changing the type of a new
/// service's error.
///
/// This is created by the `NewServiceExt::map_err` method.
pub struct MapErrServiceFactory<A, Req, F, E>
where
A: ServiceFactory<Req>,
F: Fn(A::Error) -> E + Clone,
{
a: A,
f: F,
e: PhantomData<(E, Req)>,
}
impl<A, Req, F, E> MapErrServiceFactory<A, Req, F, E>
where
A: ServiceFactory<Req>,
F: Fn(A::Error) -> E + Clone,
{
/// Create new `MapErr` new service instance
pub(crate) fn new(a: A, f: F) -> Self {
Self {
a,
f,
e: PhantomData,
}
}
}
impl<A, Req, F, E> Clone for MapErrServiceFactory<A, Req, F, E>
where
A: ServiceFactory<Req> + Clone,
F: Fn(A::Error) -> E + Clone,
{
fn clone(&self) -> Self {
Self {
a: self.a.clone(),
f: self.f.clone(),
e: PhantomData,
}
}
}
impl<A, Req, F, E> ServiceFactory<Req> for MapErrServiceFactory<A, Req, F, E>
where
A: ServiceFactory<Req>,
F: Fn(A::Error) -> E + Clone,
{
type Response = A::Response;
type Error = E;
type Config = A::Config;
type Service = MapErr<A::Service, Req, F, E>;
type InitError = A::InitError;
type Future = MapErrServiceFuture<A, Req, F, E>;
fn new_service(&self, cfg: A::Config) -> Self::Future {
MapErrServiceFuture::new(self.a.new_service(cfg), self.f.clone())
}
}
pin_project! {
pub struct MapErrServiceFuture<A, Req, F, E>
where
A: ServiceFactory<Req>,
F: Fn(A::Error) -> E,
{
#[pin]
fut: A::Future,
f: F,
}
}
impl<A, Req, F, E> MapErrServiceFuture<A, Req, F, E>
where
A: ServiceFactory<Req>,
F: Fn(A::Error) -> E,
{
fn new(fut: A::Future, f: F) -> Self {
MapErrServiceFuture { f, fut }
}
}
impl<A, Req, F, E> Future for MapErrServiceFuture<A, Req, F, E>
where
A: ServiceFactory<Req>,
F: Fn(A::Error) -> E + Clone,
{
type Output = Result<MapErr<A::Service, Req, F, E>, A::InitError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
if let Poll::Ready(svc) = this.fut.poll(cx)? {
Poll::Ready(Ok(MapErr::new(svc, this.f.clone())))
} else {
Poll::Pending
}
}
}
#[cfg(test)]
mod tests {
use futures_util::future::lazy;
use super::*;
use crate::{
err, ok, IntoServiceFactory, Ready, Service, ServiceExt, ServiceFactory,
ServiceFactoryExt,
};
struct Srv;
impl Service<()> for Srv {
type Response = ();
type Error = ();
type Future = Ready<Result<(), ()>>;
fn poll_ready(&self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Err(()))
}
fn call(&self, _: ()) -> Self::Future {
err(())
}
}
#[actix_rt::test]
async fn test_poll_ready() {
let srv = Srv.map_err(|_| "error");
let res = lazy(|cx| srv.poll_ready(cx)).await;
assert_eq!(res, Poll::Ready(Err("error")));
}
#[actix_rt::test]
async fn test_call() {
let srv = Srv.map_err(|_| "error");
let res = srv.call(()).await;
assert!(res.is_err());
assert_eq!(res.err().unwrap(), "error");
}
#[actix_rt::test]
async fn test_new_service() {
let new_srv = (|| ok::<_, ()>(Srv)).into_factory().map_err(|_| "error");
let srv = new_srv.new_service(&()).await.unwrap();
let res = srv.call(()).await;
assert!(res.is_err());
assert_eq!(res.err().unwrap(), "error");
}
}

View File

@@ -1,99 +0,0 @@
use core::{
future::Future,
marker::PhantomData,
pin::Pin,
task::{Context, Poll},
};
use pin_project_lite::pin_project;
use super::ServiceFactory;
/// `MapInitErr` service combinator
pub struct MapInitErr<A, F, Req, Err> {
a: A,
f: F,
e: PhantomData<(Req, Err)>,
}
impl<A, F, Req, Err> MapInitErr<A, F, Req, Err>
where
A: ServiceFactory<Req>,
F: Fn(A::InitError) -> Err,
{
/// Create new `MapInitErr` combinator
pub(crate) fn new(a: A, f: F) -> Self {
Self {
a,
f,
e: PhantomData,
}
}
}
impl<A, F, Req, E> Clone for MapInitErr<A, F, Req, E>
where
A: Clone,
F: Clone,
{
fn clone(&self) -> Self {
Self {
a: self.a.clone(),
f: self.f.clone(),
e: PhantomData,
}
}
}
impl<A, F, Req, E> ServiceFactory<Req> for MapInitErr<A, F, Req, E>
where
A: ServiceFactory<Req>,
F: Fn(A::InitError) -> E + Clone,
{
type Response = A::Response;
type Error = A::Error;
type Config = A::Config;
type Service = A::Service;
type InitError = E;
type Future = MapInitErrFuture<A, F, Req, E>;
fn new_service(&self, cfg: A::Config) -> Self::Future {
MapInitErrFuture::new(self.a.new_service(cfg), self.f.clone())
}
}
pin_project! {
pub struct MapInitErrFuture<A, F, Req, E>
where
A: ServiceFactory<Req>,
F: Fn(A::InitError) -> E,
{
f: F,
#[pin]
fut: A::Future,
}
}
impl<A, F, Req, E> MapInitErrFuture<A, F, Req, E>
where
A: ServiceFactory<Req>,
F: Fn(A::InitError) -> E,
{
fn new(fut: A::Future, f: F) -> Self {
MapInitErrFuture { f, fut }
}
}
impl<A, F, Req, E> Future for MapInitErrFuture<A, F, Req, E>
where
A: ServiceFactory<Req>,
F: Fn(A::InitError) -> E,
{
type Output = Result<A::Service, E>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
this.fut.poll(cx).map_err(this.f)
}
}

View File

@@ -1,306 +0,0 @@
use core::{
marker::PhantomData,
task::{Context, Poll},
};
use crate::and_then::{AndThenService, AndThenServiceFactory};
use crate::map::{Map, MapServiceFactory};
use crate::map_err::{MapErr, MapErrServiceFactory};
use crate::map_init_err::MapInitErr;
use crate::then::{ThenService, ThenServiceFactory};
use crate::{IntoService, IntoServiceFactory, Service, ServiceFactory};
/// Construct new pipeline with one service in pipeline chain.
pub fn pipeline<I, S, Req>(service: I) -> Pipeline<S, Req>
where
I: IntoService<S, Req>,
S: Service<Req>,
{
Pipeline {
service: service.into_service(),
_phantom: PhantomData,
}
}
/// Construct new pipeline factory with one service factory.
pub fn pipeline_factory<I, SF, Req>(factory: I) -> PipelineFactory<SF, Req>
where
I: IntoServiceFactory<SF, Req>,
SF: ServiceFactory<Req>,
{
PipelineFactory {
factory: factory.into_factory(),
_phantom: PhantomData,
}
}
/// Pipeline service - pipeline allows to compose multiple service into one service.
pub struct Pipeline<S, Req> {
service: S,
_phantom: PhantomData<Req>,
}
impl<S, Req> Pipeline<S, Req>
where
S: Service<Req>,
{
/// Call another service after call to this one has resolved successfully.
///
/// This function can be used to chain two services together and ensure that
/// the second service isn't called until call to the fist service have
/// finished. Result of the call to the first service is used as an
/// input parameter for the second service's call.
///
/// Note that this function consumes the receiving service and returns a
/// wrapped version of it.
pub fn and_then<I, S1>(
self,
service: I,
) -> Pipeline<impl Service<Req, Response = S1::Response, Error = S::Error> + Clone, Req>
where
Self: Sized,
I: IntoService<S1, S::Response>,
S1: Service<S::Response, Error = S::Error>,
{
Pipeline {
service: AndThenService::new(self.service, service.into_service()),
_phantom: PhantomData,
}
}
/// Chain on a computation for when a call to the service finished,
/// passing the result of the call to the next service `U`.
///
/// Note that this function consumes the receiving pipeline and returns a
/// wrapped version of it.
pub fn then<F, S1>(
self,
service: F,
) -> Pipeline<impl Service<Req, Response = S1::Response, Error = S::Error> + Clone, Req>
where
Self: Sized,
F: IntoService<S1, Result<S::Response, S::Error>>,
S1: Service<Result<S::Response, S::Error>, Error = S::Error>,
{
Pipeline {
service: ThenService::new(self.service, service.into_service()),
_phantom: PhantomData,
}
}
/// Map this service's output to a different type, returning a new service
/// of the resulting type.
///
/// This function is similar to the `Option::map` or `Iterator::map` where
/// it will change the type of the underlying service.
///
/// Note that this function consumes the receiving service and returns a
/// wrapped version of it, similar to the existing `map` methods in the
/// standard library.
pub fn map<F, R>(self, f: F) -> Pipeline<Map<S, F, Req, R>, Req>
where
Self: Sized,
F: FnMut(S::Response) -> R,
{
Pipeline {
service: Map::new(self.service, f),
_phantom: PhantomData,
}
}
/// Map this service's error to a different error, returning a new service.
///
/// This function is similar to the `Result::map_err` where it will change
/// the error type of the underlying service. This is useful for example to
/// ensure that services have the same error type.
///
/// Note that this function consumes the receiving service and returns a
/// wrapped version of it.
pub fn map_err<F, E>(self, f: F) -> Pipeline<MapErr<S, Req, F, E>, Req>
where
Self: Sized,
F: Fn(S::Error) -> E,
{
Pipeline {
service: MapErr::new(self.service, f),
_phantom: PhantomData,
}
}
}
impl<T, Req> Clone for Pipeline<T, Req>
where
T: Clone,
{
fn clone(&self) -> Self {
Pipeline {
service: self.service.clone(),
_phantom: PhantomData,
}
}
}
impl<S: Service<Req>, Req> Service<Req> for Pipeline<S, Req> {
type Response = S::Response;
type Error = S::Error;
type Future = S::Future;
#[inline]
fn poll_ready(&self, ctx: &mut Context<'_>) -> Poll<Result<(), S::Error>> {
self.service.poll_ready(ctx)
}
#[inline]
fn call(&self, req: Req) -> Self::Future {
self.service.call(req)
}
}
/// Pipeline factory
pub struct PipelineFactory<SF, Req> {
factory: SF,
_phantom: PhantomData<Req>,
}
impl<SF, Req> PipelineFactory<SF, Req>
where
SF: ServiceFactory<Req>,
{
/// Call another service after call to this one has resolved successfully.
pub fn and_then<I, SF1>(
self,
factory: I,
) -> PipelineFactory<
impl ServiceFactory<
Req,
Response = SF1::Response,
Error = SF::Error,
Config = SF::Config,
InitError = SF::InitError,
Service = impl Service<Req, Response = SF1::Response, Error = SF::Error> + Clone,
> + Clone,
Req,
>
where
Self: Sized,
SF::Config: Clone,
I: IntoServiceFactory<SF1, SF::Response>,
SF1: ServiceFactory<
SF::Response,
Config = SF::Config,
Error = SF::Error,
InitError = SF::InitError,
>,
{
PipelineFactory {
factory: AndThenServiceFactory::new(self.factory, factory.into_factory()),
_phantom: PhantomData,
}
}
/// Create `NewService` to chain on a computation for when a call to the
/// service finished, passing the result of the call to the next
/// service `U`.
///
/// Note that this function consumes the receiving pipeline and returns a
/// wrapped version of it.
pub fn then<I, SF1>(
self,
factory: I,
) -> PipelineFactory<
impl ServiceFactory<
Req,
Response = SF1::Response,
Error = SF::Error,
Config = SF::Config,
InitError = SF::InitError,
Service = impl Service<Req, Response = SF1::Response, Error = SF::Error> + Clone,
> + Clone,
Req,
>
where
Self: Sized,
SF::Config: Clone,
I: IntoServiceFactory<SF1, Result<SF::Response, SF::Error>>,
SF1: ServiceFactory<
Result<SF::Response, SF::Error>,
Config = SF::Config,
Error = SF::Error,
InitError = SF::InitError,
>,
{
PipelineFactory {
factory: ThenServiceFactory::new(self.factory, factory.into_factory()),
_phantom: PhantomData,
}
}
/// Map this service's output to a different type, returning a new service
/// of the resulting type.
pub fn map<F, R>(self, f: F) -> PipelineFactory<MapServiceFactory<SF, F, Req, R>, Req>
where
Self: Sized,
F: FnMut(SF::Response) -> R + Clone,
{
PipelineFactory {
factory: MapServiceFactory::new(self.factory, f),
_phantom: PhantomData,
}
}
/// Map this service's error to a different error, returning a new service.
pub fn map_err<F, E>(
self,
f: F,
) -> PipelineFactory<MapErrServiceFactory<SF, Req, F, E>, Req>
where
Self: Sized,
F: Fn(SF::Error) -> E + Clone,
{
PipelineFactory {
factory: MapErrServiceFactory::new(self.factory, f),
_phantom: PhantomData,
}
}
/// Map this factory's init error to a different error, returning a new service.
pub fn map_init_err<F, E>(self, f: F) -> PipelineFactory<MapInitErr<SF, F, Req, E>, Req>
where
Self: Sized,
F: Fn(SF::InitError) -> E + Clone,
{
PipelineFactory {
factory: MapInitErr::new(self.factory, f),
_phantom: PhantomData,
}
}
}
impl<T, Req> Clone for PipelineFactory<T, Req>
where
T: Clone,
{
fn clone(&self) -> Self {
PipelineFactory {
factory: self.factory.clone(),
_phantom: PhantomData,
}
}
}
impl<SF, Req> ServiceFactory<Req> for PipelineFactory<SF, Req>
where
SF: ServiceFactory<Req>,
{
type Config = SF::Config;
type Response = SF::Response;
type Error = SF::Error;
type Service = SF::Service;
type InitError = SF::InitError;
type Future = SF::Future;
#[inline]
fn new_service(&self, cfg: SF::Config) -> Self::Future {
self.factory.new_service(cfg)
}
}

View File

@@ -1,54 +0,0 @@
//! When MSRV is 1.48, replace with `core::future::Ready` and `core::future::ready()`.
use core::{
future::Future,
pin::Pin,
task::{Context, Poll},
};
/// Future for the [`ready`](ready()) function.
#[derive(Debug, Clone)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct Ready<T> {
val: Option<T>,
}
impl<T> Ready<T> {
/// Unwraps the value from this immediately ready future.
#[inline]
pub fn into_inner(mut self) -> T {
self.val.take().unwrap()
}
}
impl<T> Unpin for Ready<T> {}
impl<T> Future for Ready<T> {
type Output = T;
#[inline]
fn poll(mut self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<T> {
let val = self.val.take().expect("Ready can not be polled twice.");
Poll::Ready(val)
}
}
/// Creates a future that is immediately ready with a value.
#[allow(dead_code)]
pub(crate) fn ready<T>(val: T) -> Ready<T> {
Ready { val: Some(val) }
}
/// Create a future that is immediately ready with a success value.
#[allow(dead_code)]
pub(crate) fn ok<T, E>(val: T) -> Ready<Result<T, E>> {
Ready { val: Some(Ok(val)) }
}
/// Create a future that is immediately ready with an error value.
#[allow(dead_code)]
pub(crate) fn err<T, E>(err: E) -> Ready<Result<T, E>> {
Ready {
val: Some(Err(err)),
}
}

View File

@@ -1,330 +0,0 @@
use alloc::rc::Rc;
use core::{
future::Future,
marker::PhantomData,
pin::Pin,
task::{Context, Poll},
};
use futures_core::ready;
use pin_project_lite::pin_project;
use super::{Service, ServiceFactory};
/// Service for the `then` combinator, chaining a computation onto the end of
/// another service.
///
/// This is created by the `Pipeline::then` method.
pub(crate) struct ThenService<A, B, Req>(Rc<(A, B)>, PhantomData<Req>);
impl<A, B, Req> ThenService<A, B, Req> {
/// Create new `.then()` combinator
pub(crate) fn new(a: A, b: B) -> ThenService<A, B, Req>
where
A: Service<Req>,
B: Service<Result<A::Response, A::Error>, Error = A::Error>,
{
Self(Rc::new((a, b)), PhantomData)
}
}
impl<A, B, Req> Clone for ThenService<A, B, Req> {
fn clone(&self) -> Self {
ThenService(self.0.clone(), PhantomData)
}
}
impl<A, B, Req> Service<Req> for ThenService<A, B, Req>
where
A: Service<Req>,
B: Service<Result<A::Response, A::Error>, Error = A::Error>,
{
type Response = B::Response;
type Error = B::Error;
type Future = ThenServiceResponse<A, B, Req>;
fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
let (a, b) = &*self.0;
let not_ready = !a.poll_ready(cx)?.is_ready();
if !b.poll_ready(cx)?.is_ready() || not_ready {
Poll::Pending
} else {
Poll::Ready(Ok(()))
}
}
fn call(&self, req: Req) -> Self::Future {
ThenServiceResponse {
state: State::A {
fut: self.0 .0.call(req),
b: Some(self.0.clone()),
},
}
}
}
pin_project! {
pub(crate) struct ThenServiceResponse<A, B, Req>
where
A: Service<Req>,
B: Service<Result<A::Response, A::Error>>,
{
#[pin]
state: State<A, B, Req>,
}
}
pin_project! {
#[project = StateProj]
enum State<A, B, Req>
where
A: Service<Req>,
B: Service<Result<A::Response, A::Error>>,
{
A { #[pin] fut: A::Future, b: Option<Rc<(A, B)>> },
B { #[pin] fut: B::Future },
}
}
impl<A, B, Req> Future for ThenServiceResponse<A, B, Req>
where
A: Service<Req>,
B: Service<Result<A::Response, A::Error>>,
{
type Output = Result<B::Response, B::Error>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut this = self.as_mut().project();
match this.state.as_mut().project() {
StateProj::A { fut, b } => {
let res = ready!(fut.poll(cx));
let b = b.take().unwrap();
let fut = b.1.call(res);
this.state.set(State::B { fut });
self.poll(cx)
}
StateProj::B { fut } => fut.poll(cx),
}
}
}
/// `.then()` service factory combinator
pub(crate) struct ThenServiceFactory<A, B, Req>(Rc<(A, B)>, PhantomData<Req>);
impl<A, B, Req> ThenServiceFactory<A, B, Req>
where
A: ServiceFactory<Req>,
A::Config: Clone,
B: ServiceFactory<
Result<A::Response, A::Error>,
Config = A::Config,
Error = A::Error,
InitError = A::InitError,
>,
{
/// Create new `AndThen` combinator
pub(crate) fn new(a: A, b: B) -> Self {
Self(Rc::new((a, b)), PhantomData)
}
}
impl<A, B, Req> ServiceFactory<Req> for ThenServiceFactory<A, B, Req>
where
A: ServiceFactory<Req>,
A::Config: Clone,
B: ServiceFactory<
Result<A::Response, A::Error>,
Config = A::Config,
Error = A::Error,
InitError = A::InitError,
>,
{
type Response = B::Response;
type Error = A::Error;
type Config = A::Config;
type Service = ThenService<A::Service, B::Service, Req>;
type InitError = A::InitError;
type Future = ThenServiceFactoryResponse<A, B, Req>;
fn new_service(&self, cfg: A::Config) -> Self::Future {
let srv = &*self.0;
ThenServiceFactoryResponse::new(srv.0.new_service(cfg.clone()), srv.1.new_service(cfg))
}
}
impl<A, B, Req> Clone for ThenServiceFactory<A, B, Req> {
fn clone(&self) -> Self {
Self(self.0.clone(), PhantomData)
}
}
pin_project! {
pub(crate) struct ThenServiceFactoryResponse<A, B, Req>
where
A: ServiceFactory<Req>,
B: ServiceFactory<
Result<A::Response, A::Error>,
Config = A::Config,
Error = A::Error,
InitError = A::InitError,
>,
{
#[pin]
fut_b: B::Future,
#[pin]
fut_a: A::Future,
a: Option<A::Service>,
b: Option<B::Service>,
}
}
impl<A, B, Req> ThenServiceFactoryResponse<A, B, Req>
where
A: ServiceFactory<Req>,
B: ServiceFactory<
Result<A::Response, A::Error>,
Config = A::Config,
Error = A::Error,
InitError = A::InitError,
>,
{
fn new(fut_a: A::Future, fut_b: B::Future) -> Self {
Self {
fut_a,
fut_b,
a: None,
b: None,
}
}
}
impl<A, B, Req> Future for ThenServiceFactoryResponse<A, B, Req>
where
A: ServiceFactory<Req>,
B: ServiceFactory<
Result<A::Response, A::Error>,
Config = A::Config,
Error = A::Error,
InitError = A::InitError,
>,
{
type Output = Result<ThenService<A::Service, B::Service, Req>, A::InitError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
if this.a.is_none() {
if let Poll::Ready(service) = this.fut_a.poll(cx)? {
*this.a = Some(service);
}
}
if this.b.is_none() {
if let Poll::Ready(service) = this.fut_b.poll(cx)? {
*this.b = Some(service);
}
}
if this.a.is_some() && this.b.is_some() {
Poll::Ready(Ok(ThenService::new(
this.a.take().unwrap(),
this.b.take().unwrap(),
)))
} else {
Poll::Pending
}
}
}
#[cfg(test)]
mod tests {
use alloc::rc::Rc;
use core::{
cell::Cell,
task::{Context, Poll},
};
use futures_util::future::lazy;
use crate::{err, ok, pipeline, pipeline_factory, ready, Ready, Service, ServiceFactory};
#[derive(Clone)]
struct Srv1(Rc<Cell<usize>>);
impl Service<Result<&'static str, &'static str>> for Srv1 {
type Response = &'static str;
type Error = ();
type Future = Ready<Result<Self::Response, Self::Error>>;
fn poll_ready(&self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.0.set(self.0.get() + 1);
Poll::Ready(Ok(()))
}
fn call(&self, req: Result<&'static str, &'static str>) -> Self::Future {
match req {
Ok(msg) => ok(msg),
Err(_) => err(()),
}
}
}
struct Srv2(Rc<Cell<usize>>);
impl Service<Result<&'static str, ()>> for Srv2 {
type Response = (&'static str, &'static str);
type Error = ();
type Future = Ready<Result<Self::Response, ()>>;
fn poll_ready(&self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.0.set(self.0.get() + 1);
Poll::Ready(Err(()))
}
fn call(&self, req: Result<&'static str, ()>) -> Self::Future {
match req {
Ok(msg) => ok((msg, "ok")),
Err(()) => ok(("srv2", "err")),
}
}
}
#[actix_rt::test]
async fn test_poll_ready() {
let cnt = Rc::new(Cell::new(0));
let srv = pipeline(Srv1(cnt.clone())).then(Srv2(cnt.clone()));
let res = lazy(|cx| srv.poll_ready(cx)).await;
assert_eq!(res, Poll::Ready(Err(())));
assert_eq!(cnt.get(), 2);
}
#[actix_rt::test]
async fn test_call() {
let cnt = Rc::new(Cell::new(0));
let srv = pipeline(Srv1(cnt.clone())).then(Srv2(cnt));
let res = srv.call(Ok("srv1")).await;
assert!(res.is_ok());
assert_eq!(res.unwrap(), ("srv1", "ok"));
let res = srv.call(Err("srv")).await;
assert!(res.is_ok());
assert_eq!(res.unwrap(), ("srv2", "err"));
}
#[actix_rt::test]
async fn test_factory() {
let cnt = Rc::new(Cell::new(0));
let cnt2 = cnt.clone();
let blank = move || ready(Ok::<_, ()>(Srv1(cnt2.clone())));
let factory = pipeline_factory(blank).then(move || ready(Ok(Srv2(cnt.clone()))));
let srv = factory.new_service(&()).await.unwrap();
let res = srv.call(Ok("srv1")).await;
assert!(res.is_ok());
assert_eq!(res.unwrap(), ("srv1", "ok"));
let res = srv.call(Err("srv")).await;
assert!(res.is_ok());
assert_eq!(res.unwrap(), ("srv2", "err"));
}
}

View File

@@ -1,242 +0,0 @@
use alloc::{rc::Rc, sync::Arc};
use core::{
future::Future,
marker::PhantomData,
pin::Pin,
task::{Context, Poll},
};
use futures_core::ready;
use pin_project_lite::pin_project;
use crate::transform_err::TransformMapInitErr;
use crate::{IntoServiceFactory, Service, ServiceFactory};
/// Apply transform to a service.
pub fn apply<T, S, I, Req>(t: T, factory: I) -> ApplyTransform<T, S, Req>
where
I: IntoServiceFactory<S, Req>,
S: ServiceFactory<Req>,
T: Transform<S::Service, Req, InitError = S::InitError>,
{
ApplyTransform::new(t, factory.into_factory())
}
/// The `Transform` trait defines the interface of a service factory that wraps inner service
/// during construction.
///
/// Transform(middleware) wraps inner service and runs during
/// inbound and/or outbound processing in the request/response lifecycle.
/// It may modify request and/or response.
///
/// For example, timeout transform:
///
/// ```ignore
/// pub struct Timeout<S> {
/// service: S,
/// timeout: Duration,
/// }
///
/// impl<S> Service for Timeout<S>
/// where
/// S: Service,
/// {
/// type Request = S::Request;
/// type Response = S::Response;
/// type Error = TimeoutError<S::Error>;
/// type Future = TimeoutServiceResponse<S>;
///
/// actix_service::forward_ready!(service);
///
/// fn call(&self, req: S::Request) -> Self::Future {
/// TimeoutServiceResponse {
/// fut: self.service.call(req),
/// sleep: Delay::new(clock::now() + self.timeout),
/// }
/// }
/// }
/// ```
///
/// Timeout service in above example is decoupled from underlying service implementation
/// and could be applied to any service.
///
/// The `Transform` trait defines the interface of a Service factory. `Transform`
/// is often implemented for middleware, defining how to construct a
/// middleware Service. A Service that is constructed by the factory takes
/// the Service that follows it during execution as a parameter, assuming
/// ownership of the next Service.
///
/// Factory for `Timeout` middleware from the above example could look like this:
///
/// ```ignore
/// pub struct TimeoutTransform {
/// timeout: Duration,
/// }
///
/// impl<S> Transform<S> for TimeoutTransform
/// where
/// S: Service,
/// {
/// type Request = S::Request;
/// type Response = S::Response;
/// type Error = TimeoutError<S::Error>;
/// type InitError = S::Error;
/// type Transform = Timeout<S>;
/// type Future = Ready<Result<Self::Transform, Self::InitError>>;
///
/// fn new_transform(&self, service: S) -> Self::Future {
/// ok(TimeoutService {
/// service,
/// timeout: self.timeout,
/// })
/// }
/// }
/// ```
pub trait Transform<S, Req> {
/// Responses given by the service.
type Response;
/// Errors produced by the service.
type Error;
/// The `TransformService` value created by this factory
type Transform: Service<Req, Response = Self::Response, Error = Self::Error>;
/// Errors produced while building a transform service.
type InitError;
/// The future response value.
type Future: Future<Output = Result<Self::Transform, Self::InitError>>;
/// Creates and returns a new Transform component, asynchronously
fn new_transform(&self, service: S) -> Self::Future;
/// Map this transform's factory error to a different error,
/// returning a new transform service factory.
fn map_init_err<F, E>(self, f: F) -> TransformMapInitErr<Self, S, Req, F, E>
where
Self: Sized,
F: Fn(Self::InitError) -> E + Clone,
{
TransformMapInitErr::new(self, f)
}
}
impl<T, S, Req> Transform<S, Req> for Rc<T>
where
T: Transform<S, Req>,
{
type Response = T::Response;
type Error = T::Error;
type Transform = T::Transform;
type InitError = T::InitError;
type Future = T::Future;
fn new_transform(&self, service: S) -> T::Future {
self.as_ref().new_transform(service)
}
}
impl<T, S, Req> Transform<S, Req> for Arc<T>
where
T: Transform<S, Req>,
{
type Response = T::Response;
type Error = T::Error;
type Transform = T::Transform;
type InitError = T::InitError;
type Future = T::Future;
fn new_transform(&self, service: S) -> T::Future {
self.as_ref().new_transform(service)
}
}
/// `Apply` transform to new service
pub struct ApplyTransform<T, S, Req>(Rc<(T, S)>, PhantomData<Req>);
impl<T, S, Req> ApplyTransform<T, S, Req>
where
S: ServiceFactory<Req>,
T: Transform<S::Service, Req, InitError = S::InitError>,
{
/// Create new `ApplyTransform` new service instance
fn new(t: T, service: S) -> Self {
Self(Rc::new((t, service)), PhantomData)
}
}
impl<T, S, Req> Clone for ApplyTransform<T, S, Req> {
fn clone(&self) -> Self {
ApplyTransform(self.0.clone(), PhantomData)
}
}
impl<T, S, Req> ServiceFactory<Req> for ApplyTransform<T, S, Req>
where
S: ServiceFactory<Req>,
T: Transform<S::Service, Req, InitError = S::InitError>,
{
type Response = T::Response;
type Error = T::Error;
type Config = S::Config;
type Service = T::Transform;
type InitError = T::InitError;
type Future = ApplyTransformFuture<T, S, Req>;
fn new_service(&self, cfg: S::Config) -> Self::Future {
ApplyTransformFuture {
store: self.0.clone(),
state: ApplyTransformFutureState::A {
fut: self.0.as_ref().1.new_service(cfg),
},
}
}
}
pin_project! {
pub struct ApplyTransformFuture<T, S, Req>
where
S: ServiceFactory<Req>,
T: Transform<S::Service, Req, InitError = S::InitError>,
{
store: Rc<(T, S)>,
#[pin]
state: ApplyTransformFutureState<T, S, Req>,
}
}
pin_project! {
#[project = ApplyTransformFutureStateProj]
pub enum ApplyTransformFutureState<T, S, Req>
where
S: ServiceFactory<Req>,
T: Transform<S::Service, Req, InitError = S::InitError>,
{
A { #[pin] fut: S::Future },
B { #[pin] fut: T::Future },
}
}
impl<T, S, Req> Future for ApplyTransformFuture<T, S, Req>
where
S: ServiceFactory<Req>,
T: Transform<S::Service, Req, InitError = S::InitError>,
{
type Output = Result<T::Transform, T::InitError>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut this = self.as_mut().project();
match this.state.as_mut().project() {
ApplyTransformFutureStateProj::A { fut } => {
let srv = ready!(fut.poll(cx))?;
let fut = this.store.0.new_transform(srv);
this.state.set(ApplyTransformFutureState::B { fut });
self.poll(cx)
}
ApplyTransformFutureStateProj::B { fut } => fut.poll(cx),
}
}
}

View File

@@ -1,97 +0,0 @@
use core::{
future::Future,
marker::PhantomData,
pin::Pin,
task::{Context, Poll},
};
use pin_project_lite::pin_project;
use super::Transform;
/// Transform for the `map_init_err` combinator, changing the type of a new
/// transform's init error.
///
/// This is created by the `Transform::map_init_err` method.
pub struct TransformMapInitErr<T, S, Req, F, E> {
transform: T,
mapper: F,
_phantom: PhantomData<(S, Req, E)>,
}
impl<T, S, F, E, Req> TransformMapInitErr<T, S, Req, F, E> {
pub(crate) fn new(t: T, f: F) -> Self
where
T: Transform<S, Req>,
F: Fn(T::InitError) -> E,
{
Self {
transform: t,
mapper: f,
_phantom: PhantomData,
}
}
}
impl<T, S, Req, F, E> Clone for TransformMapInitErr<T, S, Req, F, E>
where
T: Clone,
F: Clone,
{
fn clone(&self) -> Self {
Self {
transform: self.transform.clone(),
mapper: self.mapper.clone(),
_phantom: PhantomData,
}
}
}
impl<T, S, F, E, Req> Transform<S, Req> for TransformMapInitErr<T, S, Req, F, E>
where
T: Transform<S, Req>,
F: Fn(T::InitError) -> E + Clone,
{
type Response = T::Response;
type Error = T::Error;
type Transform = T::Transform;
type InitError = E;
type Future = TransformMapInitErrFuture<T, S, F, E, Req>;
fn new_transform(&self, service: S) -> Self::Future {
TransformMapInitErrFuture {
fut: self.transform.new_transform(service),
f: self.mapper.clone(),
}
}
}
pin_project! {
pub struct TransformMapInitErrFuture<T, S, F, E, Req>
where
T: Transform<S, Req>,
F: Fn(T::InitError) -> E,
{
#[pin]
fut: T::Future,
f: F,
}
}
impl<T, S, F, E, Req> Future for TransformMapInitErrFuture<T, S, F, E, Req>
where
T: Transform<S, Req>,
F: Fn(T::InitError) -> E + Clone,
{
type Output = Result<T::Transform, E>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
if let Poll::Ready(res) = this.fut.poll(cx) {
Poll::Ready(res.map_err(this.f))
} else {
Poll::Pending
}
}
}

View File

@@ -1,72 +0,0 @@
# Changes
## Unreleased - 2021-xx-xx
* Rename `accept::openssl::{SslStream => TlsStream}`.
* Add `connect::Connect::set_local_addr` to attach local `Ipaddr`. [#282]
* `connector::TcpConnector` service would try to bind to local_addr of `IpAddr` when given [#282]
[#282]: https://github.com/actix/actix-net/pull/282
## 3.0.0-beta.3 - 2021-02-06
* Remove `trust-dns-proto` and `trust-dns-resolver`. [#248]
* Use `std::net::ToSocketAddrs` as simple and basic default resolver. [#248]
* Add `Resolve` trait for custom DNS resolvers. [#248]
* Add `Resolver::new_custom` function to construct custom resolvers. [#248]
* Export `webpki_roots::TLS_SERVER_ROOTS` in `actix_tls::connect` mod and remove
the export from `actix_tls::accept` [#248]
* Remove `ConnectTakeAddrsIter`. `Connect::take_addrs` now returns `ConnectAddrsIter<'static>`
as owned iterator. [#248]
* Rename `Address::{host => hostname}` to more accurately describe which URL segment is returned.
* Update `actix-rt` to `2.0.0`. [#273]
[#248]: https://github.com/actix/actix-net/pull/248
[#273]: https://github.com/actix/actix-net/pull/273
## 3.0.0-beta.2 - 2021-xx-xx
* Depend on stable trust-dns packages. [#204]
[#204]: https://github.com/actix/actix-net/pull/204
## 3.0.0-beta.1 - 2020-12-29
* Move acceptors under `accept` module. [#238]
* Merge `actix-connect` crate under `connect` module. [#238]
* Add feature flags to enable acceptors and/or connectors individually. [#238]
[#238]: https://github.com/actix/actix-net/pull/238
## 2.0.0 - 2020-09-03
* `nativetls::NativeTlsAcceptor` is renamed to `nativetls::Acceptor`.
* Where possible, "SSL" terminology is replaced with "TLS".
* `SslError` is renamed to `TlsError`.
* `TlsError::Ssl` enum variant is renamed to `TlsError::Tls`.
* `max_concurrent_ssl_connect` is renamed to `max_concurrent_tls_connect`.
## 2.0.0-alpha.2 - 2020-08-17
* Update `rustls` dependency to 0.18
* Update `tokio-rustls` dependency to 0.14
* Update `webpki-roots` dependency to 0.20
## [2.0.0-alpha.1] - 2020-03-03
* Update `rustls` dependency to 0.17
* Update `tokio-rustls` dependency to 0.13
* Update `webpki-roots` dependency to 0.19
## [1.0.0] - 2019-12-11
* 1.0.0 release
## [1.0.0-alpha.3] - 2019-12-07
* Migrate to tokio 0.2
* Enable rustls acceptor service
* Enable native-tls acceptor service
## [1.0.0-alpha.1] - 2019-12-02
* Split openssl acceptor from actix-server package

View File

@@ -1,82 +0,0 @@
[package]
name = "actix-tls"
version = "3.0.0-beta.3"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "TLS acceptor and connector services for Actix ecosystem"
keywords = ["network", "tls", "ssl", "async", "transport"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-tls"
categories = ["network-programming", "asynchronous"]
license = "MIT OR Apache-2.0"
edition = "2018"
[package.metadata.docs.rs]
features = ["openssl", "rustls", "native-tls", "accept", "connect", "uri"]
[lib]
name = "actix_tls"
path = "src/lib.rs"
[features]
default = ["accept", "connect", "uri"]
# enable acceptor services
accept = []
# enable connector services
connect = []
# use openssl impls
openssl = ["tls-openssl", "tokio-openssl"]
# use rustls impls
rustls = ["tokio-rustls", "webpki-roots"]
# use native-tls impls
native-tls = ["tokio-native-tls"]
# support http::Uri as connect address
uri = ["http"]
[dependencies]
actix-codec = "0.4.0-beta.1"
actix-rt = { version = "2.0.0", default-features = false }
actix-service = "2.0.0-beta.4"
actix-utils = "3.0.0-beta.2"
derive_more = "0.99.5"
futures-core = { version = "0.3.7", default-features = false, features = ["alloc"] }
http = { version = "0.2.3", optional = true }
log = "0.4"
tokio-util = { version = "0.6.3", default-features = false }
# openssl
tls-openssl = { package = "openssl", version = "0.10.9", optional = true }
tokio-openssl = { version = "0.6", optional = true }
# rustls
tokio-rustls = { version = "0.22", optional = true }
webpki-roots = { version = "0.21", optional = true }
# native-tls
tokio-native-tls = { version = "0.3", optional = true }
[target.'cfg(windows)'.dependencies.tls-openssl]
version = "0.10.9"
package = "openssl"
features = ["vendored"]
optional = true
[dev-dependencies]
actix-rt = "2.0.0"
actix-server = "2.0.0-beta.3"
bytes = "1"
env_logger = "0.8"
futures-util = { version = "0.3.7", default-features = false, features = ["sink"] }
log = "0.4"
trust-dns-resolver = "0.20.0"
[[example]]
name = "basic"
required-features = ["accept", "rustls"]

View File

@@ -1 +0,0 @@
../LICENSE-APACHE

View File

@@ -1 +0,0 @@
../LICENSE-MIT

View File

@@ -1,87 +0,0 @@
//! TLS Acceptor Server
//!
//! Using either HTTPie (`http`) or cURL:
//!
//! This commands will produce errors in the server log:
//! ```sh
//! curl 127.0.0.1:8443
//! http 127.0.0.1:8443
//! ```
//!
//! These commands will show "empty reply" on the client but will debug print the TLS stream info
//! in the server log, indicating a successful TLS handshake:
//! ```sh
//! curl -k https://127.0.0.1:8443
//! http --verify=false https://127.0.0.1:8443
//! ```
// this use only exists because of how we have organised the crate
// it is not necessary for your actual code
use tokio_rustls::rustls;
use std::{
env,
fs::File,
io::{self, BufReader},
sync::{
atomic::{AtomicUsize, Ordering},
Arc,
},
};
use actix_rt::net::TcpStream;
use actix_server::Server;
use actix_service::pipeline_factory;
use actix_tls::accept::rustls::{Acceptor as RustlsAcceptor, TlsStream};
use futures_util::future::ok;
use log::info;
use rustls::{
internal::pemfile::certs, internal::pemfile::rsa_private_keys, NoClientAuth, ServerConfig,
};
#[derive(Debug)]
struct ServiceState {
num: Arc<AtomicUsize>,
}
#[actix_rt::main]
async fn main() -> io::Result<()> {
env::set_var("RUST_LOG", "actix=trace,basic=trace");
env_logger::init();
let mut tls_config = ServerConfig::new(NoClientAuth::new());
// Load TLS key and cert files
let cert_file = &mut BufReader::new(File::open("./examples/cert.pem").unwrap());
let key_file = &mut BufReader::new(File::open("./examples/key.pem").unwrap());
let cert_chain = certs(cert_file).unwrap();
let mut keys = rsa_private_keys(key_file).unwrap();
tls_config
.set_single_cert(cert_chain, keys.remove(0))
.unwrap();
let tls_acceptor = RustlsAcceptor::new(tls_config);
let count = Arc::new(AtomicUsize::new(0));
let addr = ("127.0.0.1", 8443);
info!("starting server on port: {}", &addr.0);
Server::build()
.bind("tls-example", addr, move || {
let count = Arc::clone(&count);
// Set up TLS service factory
pipeline_factory(tls_acceptor.clone())
.map_err(|err| println!("Rustls error: {:?}", err))
.and_then(move |stream: TlsStream<TcpStream>| {
let num = count.fetch_add(1, Ordering::Relaxed);
info!("[{}] Got TLS connection: {:?}", num, &*stream);
ok(())
})
})?
.workers(1)
.run()
.await
}

View File

@@ -1,25 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIENjCCAp6gAwIBAgIRANp+D9pBErdacw6KjrwJ+4swDQYJKoZIhvcNAQELBQAw
bTEeMBwGA1UEChMVbWtjZXJ0IGRldmVsb3BtZW50IENBMSEwHwYDVQQLDBhyb2JA
c29tYnJhLng1Mi5kZXYgKFJvYikxKDAmBgNVBAMMH21rY2VydCByb2JAc29tYnJh
Lng1Mi5kZXYgKFJvYikwHhcNMTkwNjAxMDAwMDAwWhcNMzAwOTEzMDIzNDI0WjBM
MScwJQYDVQQKEx5ta2NlcnQgZGV2ZWxvcG1lbnQgY2VydGlmaWNhdGUxITAfBgNV
BAsMGHJvYkBzb21icmEueDUyLmRldiAoUm9iKTCCASIwDQYJKoZIhvcNAQEBBQAD
ggEPADCCAQoCggEBALYAn8dsQUDTp8SptAtkiAySvQYLpAOct3/OjBn+dSYfbQcp
Ph9w/Zo83Msl7Fb1DBvADHFtyBpESATZ2chS5fwCAwUFTlKrzMk3qauEoJ3cCQa8
ccqhTMLeT38jRlhXrMHWBfz0ipqy+yTLWeM32LX8s0jPbbsZ3gVJ/Ls4qm0CTaqb
zRdcQ7GTVKYet5DR7ZvwvAaLtWk/iiHKwnOveuF27HNlxj0Rwd/lhJ/t9x8xJwyR
MTdm852KQadI8xOSbWNK4j9419yzKjUEMKgn78wT/7DQfeKKCAreHa4MaEw4+koD
2Bqb+V4fI6T84VvXkNG3CjSpmIiYGlIE1LVgBL8CAwEAAaNyMHAwDgYDVR0PAQH/
BAQDAgWgMBMGA1UdJQQMMAoGCCsGAQUFBwMBMAwGA1UdEwEB/wQCMAAwHwYDVR0j
BBgwFoAUto/ox0MqZShmQpViV/gjfJKrMDkwGgYDVR0RBBMwEYIJbG9jYWxob3N0
hwR/AAABMA0GCSqGSIb3DQEBCwUAA4IBgQBUCMzqTY0sg+61gh8gKS5TCL6qs0R1
xys/EFFaD5JYUsfM/HyhHd0jq+x5Pd3mB2Jvhoq9xhjMwP11H8Uw5lLBHA8USdF9
EiLW1GvT3/gLfMqb0lPk0RMRBeX8c0QbDtqdiUCE7S6zJbZ5gjFeRuFNjdcGA1Ss
8CPPts2mns5cwah6H7T/BFzj5aR9Qe14vo1Rpr5gD5CpHvk1t16q7YsczQfVMvt3
Ydk6p0rwA8Z5okQK7y3qKPZI+//ygWL6ZBjVjl1/Al8vybG2UYjYgfMBwaVvMiDJ
j/vCdVmlvGb+MZlZID/p2veaNeEKgi1A1EOj3sNuQYXXFfSD9mdamX7JIfGi/U7v
ivvUjJUbzGrUngldt5iCKqcCQum7nlzu9sT1Tm2t/n4tz/btrI+Wimg8riSzM+Nk
dfuvv4NbWe6Th5460HH8mMvfPZSB8dCoxwm98tuqcMXLkR1RJX5Z8LYAaPTsUs/h
HxQCY4EaY7feZ/qFal9FGwvpzVr3/XjgSCU=
-----END CERTIFICATE-----

View File

@@ -1,27 +0,0 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEAtgCfx2xBQNOnxKm0C2SIDJK9BgukA5y3f86MGf51Jh9tByk+
H3D9mjzcyyXsVvUMG8AMcW3IGkRIBNnZyFLl/AIDBQVOUqvMyTepq4SgndwJBrxx
yqFMwt5PfyNGWFeswdYF/PSKmrL7JMtZ4zfYtfyzSM9tuxneBUn8uziqbQJNqpvN
F1xDsZNUph63kNHtm/C8Bou1aT+KIcrCc6964Xbsc2XGPRHB3+WEn+33HzEnDJEx
N2bznYpBp0jzE5JtY0riP3jX3LMqNQQwqCfvzBP/sNB94ooICt4drgxoTDj6SgPY
Gpv5Xh8jpPzhW9eQ0bcKNKmYiJgaUgTUtWAEvwIDAQABAoIBADC0Zg21+Jhii6jj
SR0rYAUNV6xAfTnCPJDlMzTZlXwIOOMLtGYxlIwr8WIj2eVDWmQqtqm8GSp+T0+N
BOzI0mboGurDCryw4PKQBMWzjk/wTDITR9hT5fjYCSoaxH5rp/2PSrbwsg7ICtFD
4eAeV84Lu+amK9VADNwZepqXhXP6EDOY5yovkwzOQNDM/qVzHSe9EoFP74M/oWnY
ohIuWdZzwAZuTA5SUjPygiVzs/vhsrSE9crMIzr5VgKBi+C+ALkrL7Lc4GlRPI4r
6VsbIxZHa7who+FhjZ0cVfdXHH47QDdf10X5bEXsaFBvGGCLtkQ3XEpov6GOlaH+
aY7fzPECgYEA4LGloaMC9J27uyPxHkQwEehexmJdIu0vNUefv5yiO9PbvrjvYnh7
JxRVgv1fy2bRMOvg19TujCYRZdkrLDqSDsfFfEiThvlFBRZfKKIHmWdyfvIe9Jp9
rqdxhWAco7FoM+W6c8c4iR4xs8/GA60CVcAiTLqgPWWzn12fesiULi0CgYEAz1xD
OulJyfpHVGQ6ZM1wR0SZ9H9GS3BenpL2ue5uBfe3hM+JIAAM61Y48wJuCWT5EvfL
FgnH3oCo7SYGcgGkERS8H7k67DJCLlqDo/3FC7lX/irz+ya/FoZmKBagvjEUWhpe
Bb2dRIbqsG0lsCzU9MVrgtvodD0MBTyt0RM5fhsCgYEAhgYQiLhGBAituLN4mBgO
IDBdj7GOYk3dkcc2J0HTlyIIeduvlinNM4Myel6NrDKY5rhbtgGhhGEUkY6W7NvG
0SAh0L8tmB3JKH6upfr3023b4pKjGj2oZ+wij27DxnQEdqg5reOP+mHTPbDaKMki
kml3TBMpj1XBbXaXsNJBaMUCgYEAnnNzEC4563QrU2pvUJ3HgT4Dotgqv/Sy6NuG
W1e9jSPYgU0RDHndZWtygwdFTDpzNbJR5po8t2J7MxQOcsmcNE0y387sHpbdCYyy
8Po2uxm7CoaJ/02BUVYL8/Aujob0dVGWrS5SYY3zAjO1S+VGKXA+EjW2cDRB3jKa
45ucICcCgYBdMxB5Oj6GpdewWWaBss9dwHtDaD4oVGYIBbIc2qdyCYixWdW9NccV
fRJs0ulGrpg9OtyWbwZASu2jz55+s3hi4rnrcaXKiIh9Rs25v1irF6Dmduvo7CaN
Mf7zBg7LUttmqN6D3npIAxmBULl8KRfjnt6U2tJolF5X0qQ1uqnnTA==
-----END RSA PRIVATE KEY-----

View File

@@ -1,42 +0,0 @@
//! TLS acceptor services for Actix ecosystem.
//!
//! ## Crate Features
//! * `openssl` - TLS acceptor using the `openssl` crate.
//! * `rustls` - TLS acceptor using the `rustls` crate.
//! * `native-tls` - TLS acceptor using the `native-tls` crate.
use std::sync::atomic::{AtomicUsize, Ordering};
use actix_utils::counter::Counter;
#[cfg(feature = "openssl")]
pub mod openssl;
#[cfg(feature = "rustls")]
pub mod rustls;
#[cfg(feature = "native-tls")]
pub mod nativetls;
pub(crate) static MAX_CONN: AtomicUsize = AtomicUsize::new(256);
thread_local! {
static MAX_CONN_COUNTER: Counter = Counter::new(MAX_CONN.load(Ordering::Relaxed));
}
/// Sets the maximum per-worker concurrent TLS connection limit.
///
/// All listeners will stop accepting connections when this limit is reached.
/// It can be used to regulate the global TLS CPU usage.
///
/// By default, the connection limit is 256.
pub fn max_concurrent_tls_connect(num: usize) {
MAX_CONN.store(num, Ordering::Relaxed);
}
/// TLS error combined with service error.
#[derive(Debug)]
pub enum TlsError<E1, E2> {
Tls(E1),
Service(E2),
}

View File

@@ -1,172 +0,0 @@
use std::{
io::{self, IoSlice},
ops::{Deref, DerefMut},
pin::Pin,
task::{Context, Poll},
};
use actix_codec::{AsyncRead, AsyncWrite, ReadBuf};
use actix_rt::net::ActixStream;
use actix_service::{Service, ServiceFactory};
use actix_utils::counter::Counter;
use futures_core::future::LocalBoxFuture;
pub use tokio_native_tls::native_tls::Error;
pub use tokio_native_tls::TlsAcceptor;
use super::MAX_CONN_COUNTER;
/// Wrapper type for `tokio_native_tls::TlsStream` in order to impl `ActixStream` trait.
pub struct TlsStream<T>(tokio_native_tls::TlsStream<T>);
impl<T> From<tokio_native_tls::TlsStream<T>> for TlsStream<T> {
fn from(stream: tokio_native_tls::TlsStream<T>) -> Self {
Self(stream)
}
}
impl<T: ActixStream> Deref for TlsStream<T> {
type Target = tokio_native_tls::TlsStream<T>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<T: ActixStream> DerefMut for TlsStream<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl<T: ActixStream> AsyncRead for TlsStream<T> {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
Pin::new(&mut **self.get_mut()).poll_read(cx, buf)
}
}
impl<T: ActixStream> AsyncWrite for TlsStream<T> {
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
Pin::new(&mut **self.get_mut()).poll_write(cx, buf)
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut **self.get_mut()).poll_flush(cx)
}
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut **self.get_mut()).poll_shutdown(cx)
}
fn poll_write_vectored(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[IoSlice<'_>],
) -> Poll<io::Result<usize>> {
Pin::new(&mut **self.get_mut()).poll_write_vectored(cx, bufs)
}
fn is_write_vectored(&self) -> bool {
(&**self).is_write_vectored()
}
}
impl<T: ActixStream> ActixStream for TlsStream<T> {
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
T::poll_read_ready((&**self).get_ref().get_ref().get_ref(), cx)
}
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
T::poll_write_ready((&**self).get_ref().get_ref().get_ref(), cx)
}
}
/// Accept TLS connections via `native-tls` package.
///
/// `native-tls` feature enables this `Acceptor` type.
pub struct Acceptor {
acceptor: TlsAcceptor,
}
impl Acceptor {
/// Create `native-tls` based `Acceptor` service factory.
#[inline]
pub fn new(acceptor: TlsAcceptor) -> Self {
Acceptor { acceptor }
}
}
impl Clone for Acceptor {
#[inline]
fn clone(&self) -> Self {
Self {
acceptor: self.acceptor.clone(),
}
}
}
impl<T: ActixStream> ServiceFactory<T> for Acceptor {
type Response = TlsStream<T>;
type Error = Error;
type Config = ();
type Service = NativeTlsAcceptorService;
type InitError = ();
type Future = LocalBoxFuture<'static, Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: ()) -> Self::Future {
let res = MAX_CONN_COUNTER.with(|conns| {
Ok(NativeTlsAcceptorService {
acceptor: self.acceptor.clone(),
conns: conns.clone(),
})
});
Box::pin(async { res })
}
}
pub struct NativeTlsAcceptorService {
acceptor: TlsAcceptor,
conns: Counter,
}
impl Clone for NativeTlsAcceptorService {
fn clone(&self) -> Self {
Self {
acceptor: self.acceptor.clone(),
conns: self.conns.clone(),
}
}
}
impl<T: ActixStream> Service<T> for NativeTlsAcceptorService {
type Response = TlsStream<T>;
type Error = Error;
type Future = LocalBoxFuture<'static, Result<TlsStream<T>, Error>>;
fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
if self.conns.available(cx) {
Poll::Ready(Ok(()))
} else {
Poll::Pending
}
}
fn call(&self, io: T) -> Self::Future {
let guard = self.conns.get();
let this = self.clone();
Box::pin(async move {
let io = this.acceptor.accept(io).await;
drop(guard);
io.map(Into::into)
})
}
}

Some files were not shown because too many files have changed in this diff Show More