1
0
mirror of https://github.com/fafhrd91/actix-net synced 2025-04-21 02:37:01 +02:00

Compare commits

..

No commits in common. "master" and "tracing-0.1.0" have entirely different histories.

243 changed files with 14563 additions and 17923 deletions

41
.appveyor.yml Normal file
View File

@ -0,0 +1,41 @@
environment:
global:
PROJECT_NAME: actix-net
matrix:
# Stable channel
- TARGET: i686-pc-windows-msvc
CHANNEL: stable
- TARGET: x86_64-pc-windows-gnu
CHANNEL: stable
- TARGET: x86_64-pc-windows-msvc
CHANNEL: stable
# Nightly channel
- TARGET: i686-pc-windows-msvc
CHANNEL: nightly
- TARGET: x86_64-pc-windows-gnu
CHANNEL: nightly
- TARGET: x86_64-pc-windows-msvc
CHANNEL: nightly
# Install Rust and Cargo
# (Based on from https://github.com/rust-lang/libc/blob/master/appveyor.yml)
install:
- ps: >-
If ($Env:TARGET -eq 'x86_64-pc-windows-gnu') {
$Env:PATH += ';C:\msys64\mingw64\bin'
} ElseIf ($Env:TARGET -eq 'i686-pc-windows-gnu') {
$Env:PATH += ';C:\MinGW\bin'
}
- curl -sSf -o rustup-init.exe https://win.rustup.rs
- rustup-init.exe --default-host %TARGET% --default-toolchain %CHANNEL% -y
- set PATH=%PATH%;C:\Users\appveyor\.cargo\bin
- rustc -Vv
- cargo -V
# 'cargo test' takes care of building for us, so disable Appveyor's build stage.
build: false
# Equivalent to Travis' `script` phase
test_script:
- cargo clean
- cargo test

View File

@ -1,15 +0,0 @@
[alias]
lint = "clippy --workspace --tests --examples --bins -- -Dclippy::todo"
lint-all = "clippy --workspace --all-features --tests --examples --bins -- -Dclippy::todo"
# just check the library (without dev deps)
ci-check-min = "hack --workspace check --no-default-features"
ci-check-lib = "hack --workspace --feature-powerset --depth=2 --exclude-features=io-uring check"
ci-check-lib-linux = "hack --workspace --feature-powerset --depth=2 check"
# check everything
ci-check = "hack --workspace --feature-powerset --depth=2 --exclude-features=io-uring check --tests --examples"
ci-check-linux = "hack --workspace --feature-powerset --depth=2 check --tests --examples"
# tests avoiding io-uring feature
ci-test = "hack --feature-powerset --depth=2 --exclude-features=io-uring test --lib --tests --no-fail-fast -- --nocapture"

1
.envrc
View File

@ -1 +0,0 @@
use flake

View File

@ -1,25 +0,0 @@
## PR Type
<!-- What kind of change does this PR make? -->
<!-- Bug Fix / Feature / Refactor / Code Style / Other -->
INSERT_PR_TYPE
## PR Checklist
Check your PR fulfills the following:
<!-- For draft PRs check the boxes as you complete them. -->
- [ ] Tests for the changes have been added / updated.
- [ ] Documentation comments have been added / updated.
- [ ] A changelog entry has been made for the appropriate packages.
- [ ] Format code with the latest stable rustfmt
## Overview
<!-- Describe the current and new behavior. -->
<!-- Emphasize any breaking changes. -->
<!-- If this PR fixes or closes an issue, reference it here. -->
<!-- Closes #000 -->

View File

@ -1,10 +0,0 @@
version: 2
updates:
- package-ecosystem: github-actions
directory: /
schedule:
interval: weekly
- package-ecosystem: cargo
directory: /
schedule:
interval: weekly

View File

@ -1,129 +0,0 @@
name: CI (post-merge)
on:
push:
branches: [master]
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
build_and_test_nightly:
strategy:
fail-fast: false
matrix:
# prettier-ignore
target:
- { name: Linux, os: ubuntu-latest, triple: x86_64-unknown-linux-gnu }
- { name: macOS, os: macos-latest, triple: x86_64-apple-darwin }
- { name: Windows, os: windows-latest, triple: x86_64-pc-windows-msvc }
- { name: Windows (MinGW), os: windows-latest, triple: x86_64-pc-windows-gnu }
version:
- nightly
name: ${{ matrix.target.name }} / ${{ matrix.version }}
runs-on: ${{ matrix.target.os }}
env: {}
steps:
- name: Setup Routing
if: matrix.target.os == 'macos-latest'
run: sudo ifconfig lo0 alias 127.0.0.3
- uses: actions/checkout@v4
- name: Free Disk Space
if: matrix.target.os == 'ubuntu-latest'
run: ./scripts/free-disk-space.sh
- name: Setup mold linker
if: matrix.target.os == 'ubuntu-latest'
uses: rui314/setup-mold@v1
- name: Install nasm
if: matrix.target.os == 'windows-latest'
uses: ilammy/setup-nasm@v1.5.2
- name: Install OpenSSL
if: matrix.target.os == 'windows-latest'
shell: bash
run: |
set -e
choco install openssl --version=1.1.1.2100 -y --no-progress
echo 'OPENSSL_DIR=C:\Program Files\OpenSSL' >> $GITHUB_ENV
echo "RUSTFLAGS=-C target-feature=+crt-static" >> $GITHUB_ENV
- name: Install Rust (${{ matrix.version }})
uses: actions-rust-lang/setup-rust-toolchain@v1.11.0
with:
toolchain: ${{ matrix.version }}
- name: Install just, cargo-hack, cargo-nextest, cargo-ci-cache-clean
uses: taiki-e/install-action@v2.49.45
with:
tool: just,cargo-hack,cargo-nextest,cargo-ci-cache-clean
- name: check lib
if: >
matrix.target.os != 'ubuntu-latest'
&& matrix.target.triple != 'x86_64-pc-windows-gnu'
run: cargo ci-check-lib
- name: check lib
if: matrix.target.os == 'ubuntu-latest'
run: cargo ci-check-lib-linux
- name: check lib
if: matrix.target.triple == 'x86_64-pc-windows-gnu'
run: cargo ci-check-min
- name: check full
# TODO: compile OpenSSL and run tests on MinGW
if: >
matrix.target.os != 'ubuntu-latest'
&& matrix.target.triple != 'x86_64-pc-windows-gnu'
run: cargo ci-check
- name: check all
if: matrix.target.os == 'ubuntu-latest'
run: cargo ci-check-linux
- name: tests
run: just test
# TODO: re-instate some io-uring tests PRs
# - name: tests
# if: matrix.target.os == 'ubuntu-latest'
# run: >-
# sudo bash -c "
# ulimit -Sl 512
# && ulimit -Hl 512
# && PATH=$PATH:/usr/share/rust/.cargo/bin
# && RUSTUP_TOOLCHAIN=${{ matrix.version }} cargo ci-test-rustls-020
# && RUSTUP_TOOLCHAIN=${{ matrix.version }} cargo ci-test-rustls-021
# && RUSTUP_TOOLCHAIN=${{ matrix.version }} cargo ci-test-linux
# "
- name: CI cache clean
run: cargo-ci-cache-clean
minimal-versions:
name: minimal versions
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Rust (nightly)
uses: actions-rust-lang/setup-rust-toolchain@v1.11.0
with:
toolchain: nightly
- name: Install cargo-hack & cargo-minimal-versions
uses: taiki-e/install-action@v2.49.45
with:
tool: cargo-hack,cargo-minimal-versions
- name: Check With Minimal Versions
run: cargo minimal-versions check

View File

@ -1,133 +0,0 @@
name: CI
on:
pull_request: {}
merge_group: { types: [checks_requested] }
push: { branches: [master] }
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
read_msrv:
name: Read MSRV
uses: actions-rust-lang/msrv/.github/workflows/msrv.yml@v0.1.0
build_and_test:
needs:
- read_msrv
strategy:
fail-fast: false
matrix:
# prettier-ignore
target:
- { name: Linux, os: ubuntu-latest, triple: x86_64-unknown-linux-gnu }
- { name: macOS, os: macos-latest, triple: x86_64-apple-darwin }
- { name: Windows, os: windows-latest, triple: x86_64-pc-windows-msvc }
- { name: Windows (MinGW), os: windows-latest, triple: x86_64-pc-windows-gnu }
version:
- { name: msrv, version: "${{ needs.read_msrv.outputs.msrv }}" }
- { name: stable, version: stable }
name: ${{ matrix.target.name }} / ${{ matrix.version.name }}
runs-on: ${{ matrix.target.os }}
env: {}
steps:
- name: Setup Routing
if: matrix.target.os == 'macos-latest'
run: sudo ifconfig lo0 alias 127.0.0.3
- uses: actions/checkout@v4
- name: Free Disk Space
if: matrix.target.os == 'ubuntu-latest'
run: ./scripts/free-disk-space.sh
- name: Setup mold linker
if: matrix.target.os == 'ubuntu-latest'
uses: rui314/setup-mold@v1
- name: Install nasm
if: matrix.target.os == 'windows-latest'
uses: ilammy/setup-nasm@v1.5.2
- name: Install OpenSSL
if: matrix.target.os == 'windows-latest'
shell: bash
run: |
set -e
choco install openssl --version=1.1.1.2100 -y --no-progress
echo 'OPENSSL_DIR=C:\Program Files\OpenSSL' >> $GITHUB_ENV
echo "RUSTFLAGS=-C target-feature=+crt-static" >> $GITHUB_ENV
- name: Install Rust (${{ matrix.version.name }})
uses: actions-rust-lang/setup-rust-toolchain@v1.11.0
with:
toolchain: ${{ matrix.version.version }}
- name: Install just, cargo-hack, cargo-nextest, cargo-ci-cache-clean
uses: taiki-e/install-action@v2.49.45
with:
tool: just,cargo-hack,cargo-nextest,cargo-ci-cache-clean
- name: Generate Cargo.lock
run: cargo generate-lockfile
- name: workaround MSRV issues
if: matrix.version.name == 'msrv'
run: just downgrade-for-msrv
- name: check lib
if: >
matrix.target.os != 'ubuntu-latest'
&& matrix.target.triple != 'x86_64-pc-windows-gnu'
run: cargo ci-check-lib
- name: check lib
if: matrix.target.os == 'ubuntu-latest'
run: cargo ci-check-lib-linux
- name: check lib
if: matrix.target.triple != 'x86_64-pc-windows-gnu'
run: cargo ci-check-min
- name: check full
# TODO: compile OpenSSL and run tests on MinGW
if: >
matrix.target.os != 'ubuntu-latest'
&& matrix.target.triple != 'x86_64-pc-windows-gnu'
run: cargo ci-check
- name: check all
if: matrix.target.os == 'ubuntu-latest'
run: cargo ci-check-linux
- name: tests
run: just test
- name: CI cache clean
run: cargo-ci-cache-clean
docs:
name: Documentation
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Rust (nightly)
uses: actions-rust-lang/setup-rust-toolchain@v1.11.0
with:
toolchain: nightly
- name: Install just
uses: taiki-e/install-action@v2.49.45
with:
tool: just
- name: doc tests
run: just test-docs

View File

@ -1,39 +0,0 @@
name: Coverage
on:
push:
branches: [master]
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
coverage:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Rust
uses: actions-rust-lang/setup-rust-toolchain@v1.11.0
with:
components: llvm-tools-preview
- name: Install cargo-llvm-cov
uses: taiki-e/install-action@v2.49.45
with:
tool: cargo-llvm-cov
- name: Generate code coverage
run: cargo llvm-cov --workspace --all-features --codecov --output-path codecov.json
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v5.4.0
with:
files: codecov.json
fail_ci_if_error: true
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}

View File

@ -1,69 +0,0 @@
name: Lint
on:
pull_request: {}
merge_group: { types: [checks_requested] }
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
fmt:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions-rust-lang/setup-rust-toolchain@v1.11.0
with:
toolchain: nightly
components: rustfmt
- name: Rustfmt Check
run: cargo fmt --all -- --check
clippy:
permissions:
contents: write
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions-rust-lang/setup-rust-toolchain@v1.11.0
with: { components: clippy }
- uses: giraffate/clippy-action@v1.0.1
with:
reporter: "github-pr-check"
github_token: ${{ secrets.GITHUB_TOKEN }}
clippy_flags: --workspace --all-features --tests --examples --bins -- -Dclippy::todo -Aunknown_lints
check-external-types:
if: false # rustdoc mismatch currently
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Rust (${{ vars.RUST_VERSION_EXTERNAL_TYPES }})
uses: actions-rust-lang/setup-rust-toolchain@v1.11.0
with:
toolchain: ${{ vars.RUST_VERSION_EXTERNAL_TYPES }}
- name: Install just
uses: taiki-e/install-action@v2.49.45
with:
tool: just
- name: Install cargo-check-external-types
uses: taiki-e/cache-cargo-install-action@v2.1.1
with:
tool: cargo-check-external-types
- name: check external types
run: just check-external-types-all +${{ vars.RUST_VERSION_EXTERNAL_TYPES }}

78
.github/workflows/main.yml vendored Normal file
View File

@ -0,0 +1,78 @@
name: CI
on: [push, pull_request]
env:
VCPKGRS_DYNAMIC: 1
jobs:
build_and_test:
strategy:
fail-fast: false
matrix:
toolchain:
- x86_64-pc-windows-msvc
- x86_64-pc-windows-gnu
- i686-pc-windows-msvc
- x86_64-apple-darwin
version:
- stable
- nightly
include:
- toolchain: x86_64-pc-windows-msvc
os: windows-latest
arch: x64
- toolchain: x86_64-pc-windows-gnu
os: windows-latest
- toolchain: i686-pc-windows-msvc
os: windows-latest
arch: x86
- toolchain: x86_64-apple-darwin
os: macOS-latest
name: ${{ matrix.version }} - ${{ matrix.toolchain }}
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@master
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-${{ matrix.toolchain }}
default: true
- name: Install OpenSSL
if: matrix.toolchain == 'x86_64-pc-windows-msvc' || matrix.toolchain == 'i686-pc-windows-msvc'
run: |
vcpkg integrate install
vcpkg install openssl:${{ matrix.arch }}-windows
- name: check nightly
if: matrix.version == 'nightly'
uses: actions-rs/cargo@v1
with:
command: check
args: --all --benches --bins --examples --tests
- name: check stable
if: matrix.version == 'stable'
uses: actions-rs/cargo@v1
with:
command: check
args: --all --bins --examples --tests
- name: tests
if: matrix.toolchain != 'x86_64-pc-windows-gnu'
uses: actions-rs/cargo@v1
with:
command: test
args: --all --all-features -- --nocapture
- name: tests on x86_64-pc-windows-gnu
if: matrix.toolchain == 'x86_64-pc-windows-gnu'
uses: actions-rs/cargo@v1
with:
command: test
args: --all -- --nocapture

5
.gitignore vendored
View File

@ -1,3 +1,4 @@
Cargo.lock
target/
guide/build/
/gh-pages
@ -12,8 +13,4 @@ guide/build/
# These are backup files generated by rustfmt
**/*.rs.bk
# IDEs
.idea
# direnv
/.direnv

View File

@ -1,3 +0,0 @@
group_imports = "StdExternalCrate"
imports_granularity = "Crate"
use_field_init_shorthand = true

View File

@ -1,29 +0,0 @@
exclude = ["target/*"]
include = ["**/*.toml"]
[formatting]
column_width = 110
[[rule]]
include = ["**/Cargo.toml"]
keys = [
"dependencies",
"*-dependencies",
"workspace.dependencies",
"workspace.*-dependencies",
"target.*.dependencies",
"target.*.*-dependencies",
]
formatting.reorder_keys = true
[[rule]]
include = ["**/Cargo.toml"]
keys = [
"dependencies.*",
"*-dependencies.*",
"workspace.dependencies.*",
"workspace.*-dependencies.*",
"target.*.dependencies",
"target.*.*-dependencies",
]
formatting.reorder_keys = false

49
.travis.yml Normal file
View File

@ -0,0 +1,49 @@
language: rust
sudo: required
dist: trusty
cache:
cargo: true
apt: true
matrix:
include:
- rust: stable
- rust: beta
- rust: nightly-2019-11-07
allow_failures:
- rust: nightly-2019-11-07
env:
global:
- RUSTFLAGS="-C link-dead-code"
- OPENSSL_VERSION=openssl-1.0.2
before_install:
- sudo add-apt-repository -y ppa:0k53d-karl-f830m/openssl
- sudo apt-get update -qq
- sudo apt-get install -y openssl libssl-dev libelf-dev libdw-dev cmake gcc binutils-dev libiberty-dev
before_cache: |
if [[ "$TRAVIS_RUST_VERSION" == "nightly-2019-11-07" ]]; then
RUSTFLAGS="--cfg procmacro2_semver_exempt" cargo install --version 0.6.11 cargo-tarpaulin
fi
# Add clippy
before_script:
- export PATH=$PATH:~/.cargo/bin
script:
- |
if [[ "$TRAVIS_RUST_VERSION" != "nightly-2019-11-07" ]]; then
cargo clean
cargo test --all --all-features -- --nocapture
fi
after_success:
- |
if [[ "$TRAVIS_RUST_VERSION" == "nightly-2019-11-07" ]]; then
taskset -c 0 cargo tarpaulin --all --all-features --out Xml
echo "Uploaded code coverage"
bash <(curl -s https://codecov.io/bash)
fi

View File

@ -8,19 +8,19 @@ In the interest of fostering an open and welcoming environment, we as contributo
Examples of behavior that contributes to creating a positive environment include:
- Using welcoming and inclusive language
- Being respectful of differing viewpoints and experiences
- Gracefully accepting constructive criticism
- Focusing on what is best for the community
- Showing empathy towards other community members
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
- The use of sexualized language or imagery and unwelcome sexual attention or advances
- Trolling, insulting/derogatory comments, and personal or political attacks
- Public or private harassment
- Publishing others' private information, such as a physical or electronic address, without explicit permission
- Other conduct which could reasonably be considered inappropriate in a professional setting
* The use of sexualized language or imagery and unwelcome sexual attention or advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a professional setting
## Our Responsibilities
@ -34,13 +34,10 @@ This Code of Conduct applies both within project spaces and in public spaces whe
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at robjtede@icloud.com ([@robjtede]) or huyuumi@neet.club ([@JohnTitor]). The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at fafhrd91@gmail.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
[@robjtede]: https://github.com/robjtede
[@johntitor]: https://github.com/JohnTitor
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]

2941
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,44 +1,33 @@
[workspace]
resolver = "2"
members = [
"actix-codec",
"actix-macros",
"actix-connect",
"actix-ioframe",
"actix-rt",
"actix-server",
"actix-macros",
"actix-service",
"actix-server",
"actix-testing",
"actix-threadpool",
"actix-tls",
"actix-tracing",
"actix-utils",
"bytestring",
"local-channel",
"local-waker",
"router",
"string",
]
[workspace.package]
license = "MIT OR Apache-2.0"
edition = "2021"
rust-version = "1.71.1"
[patch.crates-io]
actix-codec = { path = "actix-codec" }
actix-macros = { path = "actix-macros" }
actix-connect = { path = "actix-connect" }
actix-ioframe = { path = "actix-ioframe" }
actix-rt = { path = "actix-rt" }
actix-macros = { path = "actix-macros" }
actix-server = { path = "actix-server" }
actix-service = { path = "actix-service" }
actix-testing = { path = "actix-testing" }
actix-threadpool = { path = "actix-threadpool" }
actix-tls = { path = "actix-tls" }
actix-tracing = { path = "actix-tracing" }
actix-utils = { path = "actix-utils" }
bytestring = { path = "bytestring" }
local-channel = { path = "local-channel" }
local-waker = { path = "local-waker" }
[profile.release]
lto = true
opt-level = 3
codegen-units = 1
[workspace.lints.rust]
rust_2018_idioms = "deny"
nonstandard-style = "deny"
future_incompatible = "deny"
missing_docs = { level = "warn", priority = -1 }
actix-router = { path = "router" }
bytestring = { path = "string" }

View File

@ -186,7 +186,7 @@
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2017-NOW Actix Team
Copyright 2017-NOW Nikolay Kim
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
Copyright (c) 2017-NOW Actix Team
Copyright (c) 2017 Nikolay Kim
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated

View File

@ -1,30 +1,69 @@
# Actix Net
# Actix net [![Build Status](https://travis-ci.org/actix/actix-net.svg?branch=master)](https://travis-ci.org/actix/actix-net) [![codecov](https://codecov.io/gh/actix/actix-net/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-net) [![Join the chat at https://gitter.im/actix/actix](https://badges.gitter.im/actix/actix.svg)](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
> A collection of lower-level libraries for composable network services.
Actix net - framework for composable network services
[![CI](https://github.com/actix/actix-net/actions/workflows/ci.yml/badge.svg?event=push&style=flat-square)](https://github.com/actix/actix-net/actions/workflows/ci.yml)
[![codecov](https://codecov.io/gh/actix/actix-net/graph/badge.svg?token=8rKIZKtLLm)](https://codecov.io/gh/actix/actix-net)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x)
[![Dependency Status](https://deps.rs/repo/github/actix/actix-net/status.svg)](https://deps.rs/repo/github/actix/actix-net)
## Documentation & community resources
## Examples
* [Chat on gitter](https://gitter.im/actix/actix)
* Minimum supported Rust version: 1.39 or later
See example folders for [`actix-server`](./actix-server/examples) and [`actix-tls`](./actix-tls/examples).
## Example
## MSRV
```rust
fn main() -> io::Result<()> {
// load ssl keys
let mut builder = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap();
builder.set_private_key_file("./examples/key.pem", SslFiletype::PEM).unwrap();
builder.set_certificate_chain_file("./examples/cert.pem").unwrap();
let acceptor = builder.build();
Crates in this repo currently have a Minimum Supported Rust Version (MSRV) of 1.65. As a policy, we permit MSRV increases in non-breaking releases.
let num = Arc::new(AtomicUsize::new(0));
// bind socket address and start workers. By default server uses number of
// available logical cpu as threads count. actix net start separate
// instances of service pipeline in each worker.
Server::build()
.bind(
// configure service pipeline
"basic", "0.0.0.0:8443",
move || {
let num = num.clone();
let acceptor = acceptor.clone();
// construct transformation pipeline
pipeline(
// service for converting incoming TcpStream to a SslStream<TcpStream>
fn_service(move |stream: actix_rt::net::TcpStream| async move {
SslAcceptorExt::accept_async(&acceptor, stream.into_parts().0).await
.map_err(|e| println!("Openssl error: {}", e))
}))
// .and_then() combinator chains result of previos service call to argument
/// for next service calll. in this case, on success we chain
/// ssl stream to the `logger` service.
.and_then(fn_service(logger))
// Next service counts number of connections
.and_then(move |_| {
let num = num.fetch_add(1, Ordering::Relaxed);
println!("got ssl connection {:?}", num);
future::ok(())
})
},
)?
.run()
}
```
## License
The crates in repo are licensed under either of:
This project is licensed under either of
- Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0))
- MIT license ([LICENSE-MIT](LICENSE-MIT) or [http://opensource.org/licenses/MIT](http://opensource.org/licenses/MIT))
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0))
* MIT license ([LICENSE-MIT](LICENSE-MIT) or [http://opensource.org/licenses/MIT](http://opensource.org/licenses/MIT))
at your option.
## Code of Conduct
Contribution to the actix-net repo is organized under the terms of the Contributor Covenant.
The Actix team promises to intervene to uphold that code of conduct.
Contribution to the actix-net crate is organized under the terms of the
Contributor Covenant, the maintainer of actix-net, @fafhrd91, promises to
intervene to uphold that code of conduct.

View File

@ -1,85 +1,33 @@
# Changes
## Unreleased
* Use `.advance()` intead of `.split_to()`
- Minimum supported Rust version (MSRV) is now 1.71.
## [0.2.0] - 2019-12-10
## 0.5.2
* Use specific futures dependencies
- Minimum supported Rust version (MSRV) is now 1.65.
## [0.2.0-alpha.4]
## 0.5.1
* Fix buffer remaining capacity calcualtion
- Logs emitted now use the `tracing` crate with `log` compatibility.
- Minimum supported Rust version (MSRV) is now 1.49.
## [0.2.0-alpha.3]
## 0.5.0
* Use tokio 0.2
- Updated `tokio-util` dependency to `0.7.0`.
* Fix low/high watermark for write/read buffers
## 0.4.2
## [0.2.0-alpha.2]
- No significant changes since `0.4.1`.
* Migrated to `std::future`
## 0.4.1
## [0.1.2] - 2019-03-27
- Added `LinesCodec`.
- `Framed::poll_ready` flushes when the buffer is full.
* Added `Framed::map_io()` method.
## 0.4.0
## [0.1.1] - 2019-03-06
- No significant changes since v0.4.0-beta.1.
* Added `FramedParts::with_read_buffer()` method.
## 0.4.0-beta.1
## [0.1.0] - 2018-12-09
- Replace `pin-project` with `pin-project-lite`.
- Upgrade `tokio` dependency to `1`.
- Upgrade `tokio-util` dependency to `0.6`.
- Upgrade `bytes` dependency to `1`.
## 0.3.0
- No changes from beta 2.
## 0.3.0-beta.2
- Remove unused type parameter from `Framed::replace_codec`.
## 0.3.0-beta.1
- Use `.advance()` instead of `.split_to()`.
- Upgrade `tokio-util` to `0.3`.
- Improve `BytesCodec::encode()` performance.
- Simplify `BytesCodec::decode()`.
- Rename methods on `Framed` to better describe their use.
- Add method on `Framed` to get a pinned reference to the underlying I/O.
- Add method on `Framed` check emptiness of read buffer.
## 0.2.0
- Use specific futures dependencies.
## 0.2.0-alpha.4
- Fix buffer remaining capacity calculation.
## 0.2.0-alpha.3
- Use tokio 0.2.
- Fix low/high watermark for write/read buffers.
## 0.2.0-alpha.2
- Migrated to `std::future`.
## 0.1.2
- Added `Framed::map_io()` method.
## 0.1.1
- Added `FramedParts::with_read_buffer()` method.
## 0.1.0
- Move codec to separate crate.
* Move codec to separate crate

View File

@ -1,36 +1,26 @@
[package]
name = "actix-codec"
version = "0.5.2"
authors = ["Nikolay Kim <fafhrd91@gmail.com>", "Rob Ede <robjtede@icloud.com>"]
description = "Codec utilities for working with framed protocols"
version = "0.2.0"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Utilities for encoding and decoding frames"
keywords = ["network", "framework", "async", "futures"]
repository = "https://github.com/actix/actix-net"
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-codec/"
categories = ["network-programming", "asynchronous"]
license = "MIT OR Apache-2.0"
edition.workspace = true
rust-version.workspace = true
license = "MIT/Apache-2.0"
edition = "2018"
workspace = ".."
[package.metadata.cargo_check_external_types]
allowed_external_types = ["bytes::*", "futures_core::*", "futures_sink::*", "tokio::*", "tokio_util::*"]
[lib]
name = "actix_codec"
path = "src/lib.rs"
[dependencies]
bitflags = "2"
bytes = "1"
futures-core = { version = "0.3.7", default-features = false }
futures-sink = { version = "0.3.7", default-features = false }
memchr = "2.3"
pin-project-lite = "0.2"
tokio = "1.44.2"
tokio-util = { version = "0.7", features = ["codec", "io"] }
tracing = { version = "0.1.30", default-features = false, features = ["log"] }
[dev-dependencies]
criterion = { version = "0.5", features = ["html_reports"] }
tokio-test = "0.4.2"
[[bench]]
name = "lines"
harness = false
[lints]
workspace = true
bitflags = "1.2.1"
bytes = "0.5.2"
futures-core = "0.3.1"
futures-sink = "0.3.1"
tokio = { version = "0.2.4", default-features=false }
tokio-util = { version = "0.2.0", default-features=false, features=["codec"] }
log = "0.4"

View File

@ -1,59 +0,0 @@
#![allow(missing_docs)]
use bytes::BytesMut;
use criterion::{criterion_group, criterion_main, Criterion};
const INPUT: &[u8] = include_bytes!("./lorem.txt");
fn bench_lines_codec(c: &mut Criterion) {
let mut decode_group = c.benchmark_group("lines decode");
decode_group.bench_function("actix", |b| {
b.iter(|| {
use actix_codec::Decoder as _;
let mut codec = actix_codec::LinesCodec::default();
let mut buf = BytesMut::from(INPUT);
while let Ok(Some(_bytes)) = codec.decode_eof(&mut buf) {}
});
});
decode_group.bench_function("tokio", |b| {
b.iter(|| {
use tokio_util::codec::Decoder as _;
let mut codec = tokio_util::codec::LinesCodec::new();
let mut buf = BytesMut::from(INPUT);
while let Ok(Some(_bytes)) = codec.decode_eof(&mut buf) {}
});
});
decode_group.finish();
let mut encode_group = c.benchmark_group("lines encode");
encode_group.bench_function("actix", |b| {
b.iter(|| {
use actix_codec::Encoder as _;
let mut codec = actix_codec::LinesCodec::default();
let mut buf = BytesMut::new();
codec.encode("123", &mut buf).unwrap();
});
});
encode_group.bench_function("tokio", |b| {
b.iter(|| {
use tokio_util::codec::Encoder as _;
let mut codec = tokio_util::codec::LinesCodec::new();
let mut buf = BytesMut::new();
codec.encode("123", &mut buf).unwrap();
});
});
encode_group.finish();
}
criterion_group!(benches, bench_lines_codec);
criterion_main!(benches);

View File

@ -1,5 +0,0 @@
Lorem ipsum dolor sit amet, consectetur adipiscing elit. In tortor quam, pulvinar sit amet vestibulum eget, tincidunt non urna. Sed eu sem in felis malesuada venenatis. Suspendisse volutpat aliquet nisi, in condimentum nibh convallis id. Quisque gravida felis scelerisque ipsum aliquam consequat. Praesent libero odio, malesuada vitae odio quis, aliquam aliquet enim. In fringilla ut turpis nec pharetra. Duis eu posuere metus. Sed a aliquet massa. Mauris non tempus mi, quis mattis libero. Vivamus ornare ex at semper cursus. Vestibulum sed facilisis erat, aliquet mollis est. In interdum, magna iaculis ultricies elementum, mi ante vestibulum mauris, nec viverra turpis lorem quis ante. Proin in auctor erat. Vivamus dictum congue massa, fermentum bibendum leo pretium quis. Integer dapibus sodales ligula, sit amet imperdiet felis suscipit eu. Phasellus non ornare enim.
Nam feugiat neque sit amet hendrerit rhoncus. Nunc suscipit molestie vehicula. Aenean vulputate porttitor augue, sit amet molestie dolor volutpat vitae. Nulla vitae condimentum eros. Aliquam tristique purus at metus lacinia egestas. Cras euismod lorem eu orci lobortis, sed tincidunt nisl laoreet. Ut suscipit fermentum mi, et euismod tortor. Pellentesque vitae tempor quam, sed dignissim mi. Suspendisse luctus lacus vitae ligula blandit vehicula. Quisque interdum iaculis tincidunt. Nunc elementum mi vitae tempor placerat. Suspendisse potenti. Donec blandit laoreet ipsum, quis rhoncus velit vulputate sed.
Aliquam suscipit lectus eros, at maximus dolor efficitur quis. Integer blandit tortor orci, nec mattis nunc eleifend ac. Mauris pharetra vel quam quis lacinia. Duis lobortis condimentum nunc ut facilisis. Praesent arcu nisi, porta sit amet viverra sit amet, pellentesque ut nisi. Nunc gravida tortor eu ligula tempus, in interdum magna pretium. Fusce eu ornare sapien. Nullam pellentesque cursus eros. Nam orci massa, faucibus eget leo eget, elementum vulputate erat. Fusce vehicula augue et dui hendrerit vulputate. Mauris neque lacus, porttitor ut condimentum id, efficitur ac neque. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia curae; Donec accumsan, lectus fermentum elementum tristique, ipsum tortor mollis ante, non lacinia nibh ex quis sapien.
Donec pharetra, elit eget rutrum luctus, urna ligula facilisis lorem, sit amet rhoncus ante est eu mi. Vestibulum vestibulum ultricies interdum. Nulla tincidunt ante non hendrerit venenatis. Curabitur vestibulum turpis erat, id efficitur quam venenatis eu. Fusce nulla sem, dapibus vel quam feugiat, ornare fermentum ligula. Praesent tempus tincidunt mauris, non pellentesque felis varius in. Aenean eu arcu ligula. Morbi dapibus maximus nulla a pharetra. Fusce leo metus, luctus ut cursus non, sollicitudin non lectus. Integer pellentesque eleifend erat, vel gravida purus tempus a. Mauris id vestibulum quam. Nunc vitae ullamcorper metus, pharetra placerat enim. Fusce in ultrices nisl. Curabitur justo mauris, dignissim in aliquam sit amet, sollicitudin ut risus. Cras tempor rutrum justo, non tincidunt est maximus at.
Aliquam ac velit tincidunt, ullamcorper velit sit amet, pulvinar nisi. Nullam rhoncus rhoncus egestas. Cras ac luctus nisi. Mauris sit amet risus at magna volutpat ultrices quis ac dui. Aliquam condimentum tellus purus, vel sagittis odio vulputate at. Sed ut finibus tellus. Aliquam tincidunt vehicula diam.

View File

@ -1,19 +1,21 @@
use bytes::{BufMut, Bytes, BytesMut};
use std::io;
use bytes::{Buf, Bytes, BytesMut};
use super::{Decoder, Encoder};
/// Bytes codec. Reads/writes chunks of bytes from a stream.
/// Bytes codec.
///
/// Reads/Writes chunks of bytes from a stream.
#[derive(Debug, Copy, Clone)]
pub struct BytesCodec;
impl Encoder<Bytes> for BytesCodec {
impl Encoder for BytesCodec {
type Item = Bytes;
type Error = io::Error;
#[inline]
fn encode(&mut self, item: Bytes, dst: &mut BytesMut) -> Result<(), Self::Error> {
dst.extend_from_slice(item.chunk());
dst.reserve(item.len());
dst.put(item);
Ok(())
}
}
@ -26,7 +28,8 @@ impl Decoder for BytesCodec {
if src.is_empty() {
Ok(None)
} else {
Ok(Some(src.split()))
let len = src.len();
Ok(Some(src.split_to(len)))
}
}
}

View File

@ -1,56 +1,53 @@
use std::{
fmt, io,
pin::Pin,
task::{Context, Poll},
};
use std::pin::Pin;
use std::task::{Context, Poll};
use std::{fmt, io};
use bitflags::bitflags;
use bytes::{Buf, BytesMut};
use futures_core::{ready, Stream};
use futures_sink::Sink;
use pin_project_lite::pin_project;
use crate::{AsyncRead, AsyncWrite, Decoder, Encoder};
/// Low-water mark
const LW: usize = 1024;
/// High-water mark
const HW: usize = 8 * 1024;
bitflags! {
#[derive(Debug, Clone, Copy)]
bitflags::bitflags! {
struct Flags: u8 {
const EOF = 0b0001;
const READABLE = 0b0010;
}
}
pin_project! {
/// A unified `Stream` and `Sink` interface to an underlying I/O object, using the `Encoder` and
/// `Decoder` traits to encode and decode frames.
///
/// Raw I/O objects work with byte sequences, but higher-level code usually wants to batch these
/// into meaningful chunks, called "frames". This method layers framing on top of an I/O object,
/// by using the `Encoder`/`Decoder` traits to handle encoding and decoding of message frames.
/// Note that the incoming and outgoing frame types may be distinct.
pub struct Framed<T, U> {
#[pin]
io: T,
codec: U,
flags: Flags,
read_buf: BytesMut,
write_buf: BytesMut,
}
/// A unified `Stream` and `Sink` interface to an underlying I/O object, using
/// the `Encoder` and `Decoder` traits to encode and decode frames.
pub struct Framed<T, U> {
io: T,
codec: U,
flags: Flags,
read_buf: BytesMut,
write_buf: BytesMut,
}
impl<T, U> Unpin for Framed<T, U> {}
impl<T, U> Framed<T, U>
where
T: AsyncRead + AsyncWrite,
U: Decoder,
U: Decoder + Encoder,
{
/// This function returns a *single* object that is both `Stream` and `Sink`; grouping this into
/// a single object is often useful for layering things like gzip or TLS, which require both
/// read and write access to the underlying object.
/// Provides a `Stream` and `Sink` interface for reading and writing to this
/// `Io` object, using `Decode` and `Encode` to read and write the raw data.
///
/// Raw I/O objects work with byte sequences, but higher-level code usually
/// wants to batch these into meaningful chunks, called "frames". This
/// method layers framing on top of an I/O object, by using the `Codec`
/// traits to handle encoding and decoding of messages frames. Note that
/// the incoming and outgoing frame types may be distinct.
///
/// This function returns a *single* object that is both `Stream` and
/// `Sink`; grouping this into a single object is often useful for layering
/// things like gzip or TLS, which require both read and write access to the
/// underlying object.
pub fn new(io: T, codec: U) -> Framed<T, U> {
Framed {
io,
@ -63,42 +60,63 @@ where
}
impl<T, U> Framed<T, U> {
/// Provides a `Stream` and `Sink` interface for reading and writing to this
/// `Io` object, using `Decode` and `Encode` to read and write the raw data.
///
/// Raw I/O objects work with byte sequences, but higher-level code usually
/// wants to batch these into meaningful chunks, called "frames". This
/// method layers framing on top of an I/O object, by using the `Codec`
/// traits to handle encoding and decoding of messages frames. Note that
/// the incoming and outgoing frame types may be distinct.
///
/// This function returns a *single* object that is both `Stream` and
/// `Sink`; grouping this into a single object is often useful for layering
/// things like gzip or TLS, which require both read and write access to the
/// underlying object.
///
/// This objects takes a stream and a readbuffer and a writebuffer. These
/// field can be obtained from an existing `Framed` with the
/// `into_parts` method.
pub fn from_parts(parts: FramedParts<T, U>) -> Framed<T, U> {
Framed {
io: parts.io,
codec: parts.codec,
flags: parts.flags,
write_buf: parts.write_buf,
read_buf: parts.read_buf,
}
}
/// Returns a reference to the underlying codec.
pub fn codec_ref(&self) -> &U {
pub fn get_codec(&self) -> &U {
&self.codec
}
/// Returns a mutable reference to the underlying codec.
pub fn codec_mut(&mut self) -> &mut U {
pub fn get_codec_mut(&mut self) -> &mut U {
&mut self.codec
}
/// Returns a reference to the underlying I/O stream wrapped by `Frame`.
/// Returns a reference to the underlying I/O stream wrapped by
/// `Frame`.
///
/// Note that care should be taken to not tamper with the underlying stream of data coming in as
/// it may corrupt the stream of frames otherwise being worked with.
pub fn io_ref(&self) -> &T {
/// Note that care should be taken to not tamper with the underlying stream
/// of data coming in as it may corrupt the stream of frames otherwise
/// being worked with.
pub fn get_ref(&self) -> &T {
&self.io
}
/// Returns a mutable reference to the underlying I/O stream.
/// Returns a mutable reference to the underlying I/O stream wrapped by
/// `Frame`.
///
/// Note that care should be taken to not tamper with the underlying stream of data coming in as
/// it may corrupt the stream of frames otherwise being worked with.
pub fn io_mut(&mut self) -> &mut T {
/// Note that care should be taken to not tamper with the underlying stream
/// of data coming in as it may corrupt the stream of frames otherwise
/// being worked with.
pub fn get_mut(&mut self) -> &mut T {
&mut self.io
}
/// Returns a `Pin` of a mutable reference to the underlying I/O stream.
pub fn io_pin(self: Pin<&mut Self>) -> Pin<&mut T> {
self.project().io
}
/// Check if read buffer is empty.
pub fn is_read_buf_empty(&self) -> bool {
self.read_buf.is_empty()
}
/// Check if write buffer is empty.
pub fn is_write_buf_empty(&self) -> bool {
self.write_buf.is_empty()
@ -109,15 +127,8 @@ impl<T, U> Framed<T, U> {
self.write_buf.len() >= HW
}
/// Check if framed is able to write more data.
///
/// `Framed` object considers ready if there is free space in write buffer.
pub fn is_write_ready(&self) -> bool {
self.write_buf.len() < HW
}
/// Consume the `Frame`, returning `Frame` with different codec.
pub fn replace_codec<U2>(self, codec: U2) -> Framed<T, U2> {
pub fn into_framed<U2>(self, codec: U2) -> Framed<T, U2> {
Framed {
codec,
io: self.io,
@ -128,7 +139,7 @@ impl<T, U> Framed<T, U> {
}
/// Consume the `Frame`, returning `Frame` with different io.
pub fn into_map_io<F, T2>(self, f: F) -> Framed<T2, U>
pub fn map_io<F, T2>(self, f: F) -> Framed<T2, U>
where
F: Fn(T) -> T2,
{
@ -142,7 +153,7 @@ impl<T, U> Framed<T, U> {
}
/// Consume the `Frame`, returning `Frame` with different codec.
pub fn into_map_codec<F, U2>(self, f: F) -> Framed<T, U2>
pub fn map_codec<F, U2>(self, f: F) -> Framed<T, U2>
where
F: Fn(U) -> U2,
{
@ -154,98 +165,119 @@ impl<T, U> Framed<T, U> {
write_buf: self.write_buf,
}
}
/// Consumes the `Frame`, returning its underlying I/O stream, the buffer
/// with unprocessed data, and the codec.
///
/// Note that care should be taken to not tamper with the underlying stream
/// of data coming in as it may corrupt the stream of frames otherwise
/// being worked with.
pub fn into_parts(self) -> FramedParts<T, U> {
FramedParts {
io: self.io,
codec: self.codec,
flags: self.flags,
read_buf: self.read_buf,
write_buf: self.write_buf,
}
}
}
impl<T, U> Framed<T, U> {
/// Serialize item and write to the inner buffer
pub fn write<I>(mut self: Pin<&mut Self>, item: I) -> Result<(), <U as Encoder<I>>::Error>
/// Serialize item and Write to the inner buffer
pub fn write(&mut self, item: <U as Encoder>::Item) -> Result<(), <U as Encoder>::Error>
where
T: AsyncWrite,
U: Encoder<I>,
U: Encoder,
{
let this = self.as_mut().project();
let remaining = this.write_buf.capacity() - this.write_buf.len();
let remaining = self.write_buf.capacity() - self.write_buf.len();
if remaining < LW {
this.write_buf.reserve(HW - remaining);
self.write_buf.reserve(HW - remaining);
}
this.codec.encode(item, this.write_buf)?;
self.codec.encode(item, &mut self.write_buf)?;
Ok(())
}
/// Check if framed is able to write more data.
///
/// `Framed` object considers ready if there is free space in write buffer.
pub fn is_write_ready(&self) -> bool {
self.write_buf.len() < HW
}
/// Try to read underlying I/O stream and decode item.
pub fn next_item(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<<U as Decoder>::Item, U::Error>>>
pub fn next_item(&mut self, cx: &mut Context<'_>) -> Poll<Option<Result<U::Item, U::Error>>>
where
T: AsyncRead,
U: Decoder,
{
loop {
let this = self.as_mut().project();
// Repeatedly call `decode` or `decode_eof` as long as it is "readable". Readable is
// defined as not having returned `None`. If the upstream has returned EOF, and the
// decoder is no longer readable, it can be assumed that the decoder will never become
// Repeatedly call `decode` or `decode_eof` as long as it is
// "readable". Readable is defined as not having returned `None`. If
// the upstream has returned EOF, and the decoder is no longer
// readable, it can be assumed that the decoder will never become
// readable again, at which point the stream is terminated.
if this.flags.contains(Flags::READABLE) {
if this.flags.contains(Flags::EOF) {
match this.codec.decode_eof(this.read_buf) {
if self.flags.contains(Flags::READABLE) {
if self.flags.contains(Flags::EOF) {
match self.codec.decode_eof(&mut self.read_buf) {
Ok(Some(frame)) => return Poll::Ready(Some(Ok(frame))),
Ok(None) => return Poll::Ready(None),
Err(err) => return Poll::Ready(Some(Err(err))),
Err(e) => return Poll::Ready(Some(Err(e))),
}
}
tracing::trace!("attempting to decode a frame");
log::trace!("attempting to decode a frame");
match this.codec.decode(this.read_buf) {
match self.codec.decode(&mut self.read_buf) {
Ok(Some(frame)) => {
tracing::trace!("frame decoded from buffer");
log::trace!("frame decoded from buffer");
return Poll::Ready(Some(Ok(frame)));
}
Err(err) => return Poll::Ready(Some(Err(err))),
Err(e) => return Poll::Ready(Some(Err(e))),
_ => (), // Need more data
}
this.flags.remove(Flags::READABLE);
self.flags.remove(Flags::READABLE);
}
debug_assert!(!this.flags.contains(Flags::EOF));
debug_assert!(!self.flags.contains(Flags::EOF));
// Otherwise, try to read more data and try again. Make sure we've got room.
let remaining = this.read_buf.capacity() - this.read_buf.len();
// Otherwise, try to read more data and try again. Make sure we've got room
let remaining = self.read_buf.capacity() - self.read_buf.len();
if remaining < LW {
this.read_buf.reserve(HW - remaining)
self.read_buf.reserve(HW - remaining)
}
let cnt = match tokio_util::io::poll_read_buf(this.io, cx, this.read_buf) {
let cnt = match unsafe {
Pin::new_unchecked(&mut self.io).poll_read_buf(cx, &mut self.read_buf)
} {
Poll::Pending => return Poll::Pending,
Poll::Ready(Err(err)) => return Poll::Ready(Some(Err(err.into()))),
Poll::Ready(Err(e)) => return Poll::Ready(Some(Err(e.into()))),
Poll::Ready(Ok(cnt)) => cnt,
};
if cnt == 0 {
this.flags.insert(Flags::EOF);
self.flags.insert(Flags::EOF);
}
this.flags.insert(Flags::READABLE);
self.flags.insert(Flags::READABLE);
}
}
/// Flush write buffer to underlying I/O stream.
pub fn flush<I>(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), U::Error>>
pub fn flush(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), U::Error>>
where
T: AsyncWrite,
U: Encoder<I>,
U: Encoder,
{
let mut this = self.as_mut().project();
tracing::trace!("flushing framed transport");
log::trace!("flushing framed transport");
while !this.write_buf.is_empty() {
tracing::trace!("writing; remaining={}", this.write_buf.len());
while !self.write_buf.is_empty() {
log::trace!("writing; remaining={}", self.write_buf.len());
let n = ready!(this.io.as_mut().poll_write(cx, this.write_buf))?;
let n = ready!(unsafe {
Pin::new_unchecked(&mut self.io).poll_write(cx, &self.write_buf)
})?;
if n == 0 {
return Poll::Ready(Err(io::Error::new(
@ -256,25 +288,26 @@ impl<T, U> Framed<T, U> {
}
// remove written data
this.write_buf.advance(n);
self.write_buf.advance(n);
}
// Try flushing the underlying IO
ready!(this.io.poll_flush(cx))?;
ready!(unsafe { Pin::new_unchecked(&mut self.io).poll_flush(cx) })?;
tracing::trace!("framed transport flushed");
log::trace!("framed transport flushed");
Poll::Ready(Ok(()))
}
/// Flush write buffer and shutdown underlying I/O stream.
pub fn close<I>(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), U::Error>>
pub fn close(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), U::Error>>
where
T: AsyncWrite,
U: Encoder<I>,
U: Encoder,
{
let mut this = self.as_mut().project();
ready!(this.io.as_mut().poll_flush(cx))?;
ready!(this.io.as_mut().poll_shutdown(cx))?;
unsafe {
ready!(Pin::new_unchecked(&mut self.io).poll_flush(cx))?;
ready!(Pin::new_unchecked(&mut self.io).poll_shutdown(cx))?;
}
Poll::Ready(Ok(()))
}
}
@ -286,36 +319,45 @@ where
{
type Item = Result<U::Item, U::Error>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
self.next_item(cx)
}
}
impl<T, U, I> Sink<I> for Framed<T, U>
impl<T, U> Sink<U::Item> for Framed<T, U>
where
T: AsyncWrite,
U: Encoder<I>,
U: Encoder,
U::Error: From<io::Error>,
{
type Error = U::Error;
fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
fn poll_ready(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
if self.is_write_ready() {
Poll::Ready(Ok(()))
} else {
self.flush(cx)
Poll::Pending
}
}
fn start_send(self: Pin<&mut Self>, item: I) -> Result<(), Self::Error> {
fn start_send(
mut self: Pin<&mut Self>,
item: <U as Encoder>::Item,
) -> Result<(), Self::Error> {
self.write(item)
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
fn poll_flush(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), Self::Error>> {
self.flush(cx)
}
fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
fn poll_close(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), Self::Error>> {
self.close(cx)
}
}
@ -333,49 +375,15 @@ where
}
}
impl<T, U> Framed<T, U> {
/// This function returns a *single* object that is both `Stream` and `Sink`; grouping this into
/// a single object is often useful for layering things like gzip or TLS, which require both
/// read and write access to the underlying object.
///
/// These objects take a stream, a read buffer and a write buffer. These fields can be obtained
/// from an existing `Framed` with the `into_parts` method.
pub fn from_parts(parts: FramedParts<T, U>) -> Framed<T, U> {
Framed {
io: parts.io,
codec: parts.codec,
flags: parts.flags,
write_buf: parts.write_buf,
read_buf: parts.read_buf,
}
}
/// Consumes the `Frame`, returning its underlying I/O stream, the buffer with unprocessed data,
/// and the codec.
///
/// Note that care should be taken to not tamper with the underlying stream of data coming in as
/// it may corrupt the stream of frames otherwise being worked with.
pub fn into_parts(self) -> FramedParts<T, U> {
FramedParts {
io: self.io,
codec: self.codec,
flags: self.flags,
read_buf: self.read_buf,
write_buf: self.write_buf,
}
}
}
/// `FramedParts` contains an export of the data of a Framed transport.
///
/// It can be used to construct a new `Framed` with a different codec. It contains all current
/// buffers and the inner transport.
/// It can be used to construct a new `Framed` with a different codec.
/// It contains all current buffers and the inner transport.
#[derive(Debug)]
pub struct FramedParts<T, U> {
/// The inner transport used to read bytes to and write bytes to.
/// The inner transport used to read bytes to and write bytes to
pub io: T,
/// The codec object.
/// The codec
pub codec: U,
/// The buffer with read but unprocessed data.
@ -388,7 +396,7 @@ pub struct FramedParts<T, U> {
}
impl<T, U> FramedParts<T, U> {
/// Creates a new default `FramedParts`.
/// Create a new, default, `FramedParts`
pub fn new(io: T, codec: U) -> FramedParts<T, U> {
FramedParts {
io,
@ -399,7 +407,7 @@ impl<T, U> FramedParts<T, U> {
}
}
/// Creates a new `FramedParts` with read buffer.
/// Create a new `FramedParts` with read buffer
pub fn with_read_buf(io: T, codec: U, read_buf: BytesMut) -> FramedParts<T, U> {
FramedParts {
io,

View File

@ -1,26 +1,18 @@
//! Codec utilities for working with framed protocols.
//! Utilities for encoding and decoding frames.
//!
//! Contains adapters to go from streams of bytes, [`AsyncRead`] and [`AsyncWrite`], to framed
//! streams implementing [`Sink`] and [`Stream`]. Framed streams are also known as `transports`.
//! Contains adapters to go from streams of bytes, [`AsyncRead`] and
//! [`AsyncWrite`], to framed streams implementing [`Sink`] and [`Stream`].
//! Framed streams are also known as [transports].
//!
//! [`Sink`]: futures_sink::Sink
//! [`Stream`]: futures_core::Stream
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
pub use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
pub use tokio_util::{
codec::{Decoder, Encoder},
io::poll_read_buf,
};
//! [`AsyncRead`]: #
//! [`AsyncWrite`]: #
#![deny(rust_2018_idioms, warnings)]
mod bcodec;
mod framed;
mod lines;
pub use self::{
bcodec::BytesCodec,
framed::{Framed, FramedParts},
lines::LinesCodec,
};
pub use self::bcodec::BytesCodec;
pub use self::framed::{Framed, FramedParts};
pub use tokio::io::{AsyncRead, AsyncWrite};
pub use tokio_util::codec::{Decoder, Encoder};

View File

@ -1,158 +0,0 @@
use std::io;
use bytes::{Buf, BufMut, Bytes, BytesMut};
use memchr::memchr;
use super::{Decoder, Encoder};
/// Lines codec. Reads/writes line delimited strings.
///
/// Will split input up by LF or CRLF delimiters. Carriage return characters at the end of lines are
/// not preserved.
#[derive(Debug, Copy, Clone, Default)]
#[non_exhaustive]
pub struct LinesCodec;
impl<T: AsRef<str>> Encoder<T> for LinesCodec {
type Error = io::Error;
#[inline]
fn encode(&mut self, item: T, dst: &mut BytesMut) -> Result<(), Self::Error> {
let item = item.as_ref();
dst.reserve(item.len() + 1);
dst.put_slice(item.as_bytes());
dst.put_u8(b'\n');
Ok(())
}
}
impl Decoder for LinesCodec {
type Item = String;
type Error = io::Error;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
if src.is_empty() {
return Ok(None);
}
let len = match memchr(b'\n', src) {
Some(n) => n,
None => {
return Ok(None);
}
};
// split up to new line char
let mut buf = src.split_to(len);
debug_assert_eq!(len, buf.len());
// remove new line char from source
src.advance(1);
match buf.last() {
// remove carriage returns at the end of buf
Some(b'\r') => buf.truncate(len - 1),
// line is empty
None => return Ok(Some(String::new())),
_ => {}
}
try_into_utf8(buf.freeze())
}
fn decode_eof(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
match self.decode(src)? {
Some(frame) => Ok(Some(frame)),
None if src.is_empty() => Ok(None),
None => {
let buf = match src.last() {
// if last line ends in a CR then take everything up to it
Some(b'\r') => src.split_to(src.len() - 1),
// take all bytes from source
_ => src.split(),
};
if buf.is_empty() {
return Ok(None);
}
try_into_utf8(buf.freeze())
}
}
}
}
// Attempts to convert bytes into a `String`.
fn try_into_utf8(buf: Bytes) -> io::Result<Option<String>> {
String::from_utf8(buf.to_vec())
.map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err))
.map(Some)
}
#[cfg(test)]
mod tests {
use bytes::BufMut as _;
use super::*;
#[test]
fn lines_decoder() {
let mut codec = LinesCodec::default();
let mut buf = BytesMut::from("\nline 1\nline 2\r\nline 3\n\r\n\r");
assert_eq!("", codec.decode(&mut buf).unwrap().unwrap());
assert_eq!("line 1", codec.decode(&mut buf).unwrap().unwrap());
assert_eq!("line 2", codec.decode(&mut buf).unwrap().unwrap());
assert_eq!("line 3", codec.decode(&mut buf).unwrap().unwrap());
assert_eq!("", codec.decode(&mut buf).unwrap().unwrap());
assert!(codec.decode(&mut buf).unwrap().is_none());
assert!(codec.decode_eof(&mut buf).unwrap().is_none());
buf.put_slice(b"k");
assert!(codec.decode(&mut buf).unwrap().is_none());
assert_eq!("\rk", codec.decode_eof(&mut buf).unwrap().unwrap());
assert!(codec.decode(&mut buf).unwrap().is_none());
assert!(codec.decode_eof(&mut buf).unwrap().is_none());
}
#[test]
fn lines_encoder() {
let mut codec = LinesCodec::default();
let mut buf = BytesMut::new();
codec.encode("", &mut buf).unwrap();
assert_eq!(&buf[..], b"\n");
codec.encode("test", &mut buf).unwrap();
assert_eq!(&buf[..], b"\ntest\n");
codec.encode("a\nb", &mut buf).unwrap();
assert_eq!(&buf[..], b"\ntest\na\nb\n");
}
#[test]
fn lines_encoder_no_overflow() {
let mut codec = LinesCodec::default();
let mut buf = BytesMut::new();
codec.encode("1234567", &mut buf).unwrap();
assert_eq!(&buf[..], b"1234567\n");
let mut buf = BytesMut::new();
codec.encode("12345678", &mut buf).unwrap();
assert_eq!(&buf[..], b"12345678\n");
let mut buf = BytesMut::new();
codec.encode("123456789111213", &mut buf).unwrap();
assert_eq!(&buf[..], b"123456789111213\n");
let mut buf = BytesMut::new();
codec.encode("1234567891112131", &mut buf).unwrap();
assert_eq!(&buf[..], b"1234567891112131\n");
}
}

View File

@ -1,224 +0,0 @@
#![allow(missing_docs)]
use std::{
collections::VecDeque,
io::{self, Write},
pin::Pin,
task::{
Context,
Poll::{self, Pending, Ready},
},
};
use actix_codec::*;
use bytes::{Buf as _, BufMut as _, BytesMut};
use futures_sink::Sink;
use tokio_test::{assert_ready, task};
macro_rules! bilateral {
($($x:expr,)*) => {{
let mut v = VecDeque::new();
v.extend(vec![$($x),*]);
Bilateral { calls: v }
}};
}
macro_rules! assert_ready {
($e:expr) => {{
use core::task::Poll::*;
match $e {
Ready(v) => v,
Pending => panic!("pending"),
}
}};
($e:expr, $($msg:tt),+) => {{
use core::task::Poll::*;
match $e {
Ready(v) => v,
Pending => {
let msg = format_args!($($msg),+);
panic!("pending; {}", msg)
}
}
}};
}
#[derive(Debug)]
pub struct Bilateral {
pub calls: VecDeque<io::Result<Vec<u8>>>,
}
impl Write for Bilateral {
fn write(&mut self, src: &[u8]) -> io::Result<usize> {
match self.calls.pop_front() {
Some(Ok(data)) => {
assert!(src.len() >= data.len());
assert_eq!(&data[..], &src[..data.len()]);
Ok(data.len())
}
Some(Err(err)) => Err(err),
None => panic!("unexpected write; {:?}", src),
}
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl AsyncWrite for Bilateral {
fn poll_write(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, io::Error>> {
match Pin::get_mut(self).write(buf) {
Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => Pending,
other => Ready(other),
}
}
fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
match Pin::get_mut(self).flush() {
Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => Pending,
other => Ready(other),
}
}
fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
unimplemented!()
}
}
impl AsyncRead for Bilateral {
fn poll_read(
mut self: Pin<&mut Self>,
_: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<Result<(), std::io::Error>> {
use io::ErrorKind::WouldBlock;
match self.calls.pop_front() {
Some(Ok(data)) => {
debug_assert!(buf.remaining() >= data.len());
buf.put_slice(&data);
Ready(Ok(()))
}
Some(Err(ref err)) if err.kind() == WouldBlock => Pending,
Some(Err(err)) => Ready(Err(err)),
None => Ready(Ok(())),
}
}
}
pub struct U32;
impl Encoder<u32> for U32 {
type Error = io::Error;
fn encode(&mut self, item: u32, dst: &mut BytesMut) -> io::Result<()> {
// Reserve space
dst.reserve(4);
dst.put_u32(item);
Ok(())
}
}
impl Decoder for U32 {
type Item = u32;
type Error = io::Error;
fn decode(&mut self, buf: &mut BytesMut) -> io::Result<Option<u32>> {
if buf.len() < 4 {
return Ok(None);
}
let n = buf.split_to(4).get_u32();
Ok(Some(n))
}
}
#[test]
fn test_write_hits_highwater_mark() {
// see here for what this test is based on:
// https://github.com/tokio-rs/tokio/blob/75c07770bfbfea4e5fd914af819c741ed9c3fc36/tokio-util/tests/framed_write.rs#L69
const ITER: usize = 2 * 1024;
let mut bi = bilateral! {
Err(io::Error::new(io::ErrorKind::WouldBlock, "not ready")),
Ok(b"".to_vec()),
};
for i in 0..=ITER {
let mut b = BytesMut::with_capacity(4);
b.put_u32(i as u32);
// Append to the end
match bi.calls.back_mut().unwrap() {
Ok(ref mut data) => {
// Write in 2kb chunks
if data.len() < ITER {
data.extend_from_slice(&b[..]);
continue;
} // else fall through and create a new buffer
}
_ => unreachable!(),
}
// Push a new new chunk
bi.calls.push_back(Ok(b[..].to_vec()));
}
assert_eq!(bi.calls.len(), 6);
let mut framed = Framed::new(bi, U32);
// Send 8KB. This fills up FramedWrite2 buffer
let mut task = task::spawn(());
task.enter(|cx, _| {
// Send 8KB. This fills up Framed buffer
for i in 0..ITER {
{
#[allow(unused_mut)]
let mut framed = Pin::new(&mut framed);
assert!(assert_ready!(framed.poll_ready(cx)).is_ok());
}
#[allow(unused_mut)]
let mut framed = Pin::new(&mut framed);
// write the buffer
assert!(framed.start_send(i as u32).is_ok());
}
{
#[allow(unused_mut)]
let mut framed = Pin::new(&mut framed);
// Now we poll_ready which forces a flush. The bilateral pops the front message
// and decides to block.
assert!(framed.poll_ready(cx).is_pending());
}
{
#[allow(unused_mut)]
let mut framed = Pin::new(&mut framed);
// We poll again, forcing another flush, which this time succeeds
// The whole 8KB buffer is flushed
assert!(assert_ready!(framed.poll_ready(cx)).is_ok());
}
{
#[allow(unused_mut)]
let mut framed = Pin::new(&mut framed);
// Send more data. This matches the final message expected by the bilateral
assert!(framed.start_send(ITER as u32).is_ok());
}
{
#[allow(unused_mut)]
let mut framed = Pin::new(&mut framed);
// Flush the rest of the buffer
assert!(assert_ready!(framed.poll_flush(cx)).is_ok());
}
// Ensure the mock is empty
assert_eq!(0, Pin::new(&framed).get_ref().io_ref().calls.len());
});
}

116
actix-connect/CHANGES.md Normal file
View File

@ -0,0 +1,116 @@
# Changes
## [1.0.2] - 2020-01-15
* Fix actix-service 1.0.3 compatibility
## [1.0.1] - 2019-12-15
* Fix trust-dns-resolver compilation
## [1.0.0] - 2019-12-11
* Release
## [1.0.0-alpha.3] - 2019-12-07
### Changed
* Migrate to tokio 0.2
## [1.0.0-alpha.2] - 2019-12-02
### Changed
* Migrated to `std::future`
## [0.3.0] - 2019-10-03
### Changed
* Update `rustls` to 0.16
* Minimum required Rust version upped to 1.37.0
## [0.2.5] - 2019-09-05
* Add `TcpConnectService`
## [0.2.4] - 2019-09-02
* Use arbiter's storage for default async resolver
## [0.2.3] - 2019-08-05
* Add `ConnectService` and `OpensslConnectService`
## [0.2.2] - 2019-07-24
* Add `rustls` support
## [0.2.1] - 2019-07-17
### Added
* Expose Connect addrs #30
### Changed
* Update `derive_more` to 0.15
## [0.2.0] - 2019-05-12
### Changed
* Upgrade to actix-service 0.4
## [0.1.5] - 2019-04-19
### Added
* `Connect::set_addr()`
### Changed
* Use trust-dns-resolver 0.11.0
## [0.1.4] - 2019-04-12
### Changed
* Do not start default resolver immediately for default connector.
## [0.1.3] - 2019-04-11
### Changed
* Start trust-dns default resolver on first use
## [0.1.2] - 2019-04-04
### Added
* Log error if dns system config could not be loaded.
### Changed
* Rename connect Connector to TcpConnector #10
## [0.1.1] - 2019-03-15
### Fixed
* Fix error handling for single address
## [0.1.0] - 2019-03-14
* Refactor resolver and connector services
* Rename crate

57
actix-connect/Cargo.toml Normal file
View File

@ -0,0 +1,57 @@
[package]
name = "actix-connect"
version = "1.0.2"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix connect - tcp connector service"
keywords = ["network", "framework", "async", "futures"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-connect/"
categories = ["network-programming", "asynchronous"]
license = "MIT/Apache-2.0"
edition = "2018"
[package.metadata.docs.rs]
features = ["openssl", "rustls", "uri"]
[lib]
name = "actix_connect"
path = "src/lib.rs"
[features]
default = ["uri"]
# openssl
openssl = ["open-ssl", "tokio-openssl"]
# rustls
rustls = ["rust-tls", "tokio-rustls", "webpki"]
# support http::Uri as connect address
uri = ["http"]
[dependencies]
actix-service = "1.0.3"
actix-codec = "0.2.0"
actix-utils = "1.0.6"
actix-rt = "1.0.0"
derive_more = "0.99.2"
either = "1.5.3"
futures = "0.3.1"
http = { version = "0.2.0", optional = true }
log = "0.4"
trust-dns-proto = "=0.18.0-alpha.2"
trust-dns-resolver = "=0.18.0-alpha.2"
# openssl
open-ssl = { version="0.10", package = "openssl", optional = true }
tokio-openssl = { version = "0.4.0", optional = true }
# rustls
rust-tls = { version = "0.16.0", package = "rustls", optional = true }
tokio-rustls = { version = "0.12.0", optional = true }
webpki = { version = "0.21", optional = true }
[dev-dependencies]
bytes = "0.5.3"
actix-testing = { version="1.0.0" }

View File

@ -0,0 +1,281 @@
use std::collections::{vec_deque, VecDeque};
use std::fmt;
use std::iter::{FromIterator, FusedIterator};
use std::net::SocketAddr;
use either::Either;
/// Connect request
pub trait Address: Unpin {
/// Host name of the request
fn host(&self) -> &str;
/// Port of the request
fn port(&self) -> Option<u16>;
}
impl Address for String {
fn host(&self) -> &str {
&self
}
fn port(&self) -> Option<u16> {
None
}
}
impl Address for &'static str {
fn host(&self) -> &str {
self
}
fn port(&self) -> Option<u16> {
None
}
}
/// Connect request
#[derive(Eq, PartialEq, Debug, Hash)]
pub struct Connect<T> {
pub(crate) req: T,
pub(crate) port: u16,
pub(crate) addr: Option<Either<SocketAddr, VecDeque<SocketAddr>>>,
}
impl<T: Address> Connect<T> {
/// Create `Connect` instance by spliting the string by ':' and convert the second part to u16
pub fn new(req: T) -> Connect<T> {
let (_, port) = parse(req.host());
Connect {
req,
port: port.unwrap_or(0),
addr: None,
}
}
/// Create new `Connect` instance from host and address. Connector skips name resolution stage for such connect messages.
pub fn with(req: T, addr: SocketAddr) -> Connect<T> {
Connect {
req,
port: 0,
addr: Some(Either::Left(addr)),
}
}
/// Use port if address does not provide one.
///
/// By default it set to 0
pub fn set_port(mut self, port: u16) -> Self {
self.port = port;
self
}
/// Use address.
pub fn set_addr(mut self, addr: Option<SocketAddr>) -> Self {
if let Some(addr) = addr {
self.addr = Some(Either::Left(addr));
}
self
}
/// Use addresses.
pub fn set_addrs<I>(mut self, addrs: I) -> Self
where
I: IntoIterator<Item = SocketAddr>,
{
let mut addrs = VecDeque::from_iter(addrs);
self.addr = if addrs.len() < 2 {
addrs.pop_front().map(Either::Left)
} else {
Some(Either::Right(addrs))
};
self
}
/// Host name
pub fn host(&self) -> &str {
self.req.host()
}
/// Port of the request
pub fn port(&self) -> u16 {
self.req.port().unwrap_or(self.port)
}
/// Preresolved addresses of the request.
pub fn addrs(&self) -> ConnectAddrsIter<'_> {
let inner = match self.addr {
None => Either::Left(None),
Some(Either::Left(addr)) => Either::Left(Some(addr)),
Some(Either::Right(ref addrs)) => Either::Right(addrs.iter()),
};
ConnectAddrsIter { inner }
}
/// Takes preresolved addresses of the request.
pub fn take_addrs(&mut self) -> ConnectTakeAddrsIter {
let inner = match self.addr.take() {
None => Either::Left(None),
Some(Either::Left(addr)) => Either::Left(Some(addr)),
Some(Either::Right(addrs)) => Either::Right(addrs.into_iter()),
};
ConnectTakeAddrsIter { inner }
}
}
impl<T: Address> From<T> for Connect<T> {
fn from(addr: T) -> Self {
Connect::new(addr)
}
}
impl<T: Address> fmt::Display for Connect<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}:{}", self.host(), self.port())
}
}
/// Iterator over addresses in a [`Connect`](struct.Connect.html) request.
#[derive(Clone)]
pub struct ConnectAddrsIter<'a> {
inner: Either<Option<SocketAddr>, vec_deque::Iter<'a, SocketAddr>>,
}
impl Iterator for ConnectAddrsIter<'_> {
type Item = SocketAddr;
fn next(&mut self) -> Option<Self::Item> {
match self.inner {
Either::Left(ref mut opt) => opt.take(),
Either::Right(ref mut iter) => iter.next().copied(),
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
match self.inner {
Either::Left(Some(_)) => (1, Some(1)),
Either::Left(None) => (0, Some(0)),
Either::Right(ref iter) => iter.size_hint(),
}
}
}
impl fmt::Debug for ConnectAddrsIter<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_list().entries(self.clone()).finish()
}
}
impl ExactSizeIterator for ConnectAddrsIter<'_> {}
impl FusedIterator for ConnectAddrsIter<'_> {}
/// Owned iterator over addresses in a [`Connect`](struct.Connect.html) request.
#[derive(Debug)]
pub struct ConnectTakeAddrsIter {
inner: Either<Option<SocketAddr>, vec_deque::IntoIter<SocketAddr>>,
}
impl Iterator for ConnectTakeAddrsIter {
type Item = SocketAddr;
fn next(&mut self) -> Option<Self::Item> {
match self.inner {
Either::Left(ref mut opt) => opt.take(),
Either::Right(ref mut iter) => iter.next(),
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
match self.inner {
Either::Left(Some(_)) => (1, Some(1)),
Either::Left(None) => (0, Some(0)),
Either::Right(ref iter) => iter.size_hint(),
}
}
}
impl ExactSizeIterator for ConnectTakeAddrsIter {}
impl FusedIterator for ConnectTakeAddrsIter {}
fn parse(host: &str) -> (&str, Option<u16>) {
let mut parts_iter = host.splitn(2, ':');
if let Some(host) = parts_iter.next() {
let port_str = parts_iter.next().unwrap_or("");
if let Ok(port) = port_str.parse::<u16>() {
(host, Some(port))
} else {
(host, None)
}
} else {
(host, None)
}
}
pub struct Connection<T, U> {
io: U,
req: T,
}
impl<T, U> Connection<T, U> {
pub fn new(io: U, req: T) -> Self {
Self { io, req }
}
}
impl<T, U> Connection<T, U> {
/// Reconstruct from a parts.
pub fn from_parts(io: U, req: T) -> Self {
Self { io, req }
}
/// Deconstruct into a parts.
pub fn into_parts(self) -> (U, T) {
(self.io, self.req)
}
/// Replace inclosed object, return new Stream and old object
pub fn replace<Y>(self, io: Y) -> (U, Connection<T, Y>) {
(self.io, Connection { io, req: self.req })
}
/// Returns a shared reference to the underlying stream.
pub fn get_ref(&self) -> &U {
&self.io
}
/// Returns a mutable reference to the underlying stream.
pub fn get_mut(&mut self) -> &mut U {
&mut self.io
}
}
impl<T: Address, U> Connection<T, U> {
/// Get request
pub fn host(&self) -> &str {
&self.req.host()
}
}
impl<T, U> std::ops::Deref for Connection<T, U> {
type Target = U;
fn deref(&self) -> &U {
&self.io
}
}
impl<T, U> std::ops::DerefMut for Connection<T, U> {
fn deref_mut(&mut self) -> &mut U {
&mut self.io
}
}
impl<T, U: fmt::Debug> fmt::Debug for Connection<T, U> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "Stream {{{:?}}}", self.io)
}
}

View File

@ -0,0 +1,171 @@
use std::collections::VecDeque;
use std::future::Future;
use std::io;
use std::marker::PhantomData;
use std::net::SocketAddr;
use std::pin::Pin;
use std::task::{Context, Poll};
use actix_rt::net::TcpStream;
use actix_service::{Service, ServiceFactory};
use futures::future::{err, ok, BoxFuture, Either, FutureExt, Ready};
use super::connect::{Address, Connect, Connection};
use super::error::ConnectError;
/// Tcp connector service factory
#[derive(Debug)]
pub struct TcpConnectorFactory<T>(PhantomData<T>);
impl<T> TcpConnectorFactory<T> {
pub fn new() -> Self {
TcpConnectorFactory(PhantomData)
}
/// Create tcp connector service
pub fn service(&self) -> TcpConnector<T> {
TcpConnector(PhantomData)
}
}
impl<T> Default for TcpConnectorFactory<T> {
fn default() -> Self {
TcpConnectorFactory(PhantomData)
}
}
impl<T> Clone for TcpConnectorFactory<T> {
fn clone(&self) -> Self {
TcpConnectorFactory(PhantomData)
}
}
impl<T: Address> ServiceFactory for TcpConnectorFactory<T> {
type Request = Connect<T>;
type Response = Connection<T, TcpStream>;
type Error = ConnectError;
type Config = ();
type Service = TcpConnector<T>;
type InitError = ();
type Future = Ready<Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: ()) -> Self::Future {
ok(self.service())
}
}
/// Tcp connector service
#[derive(Default, Debug)]
pub struct TcpConnector<T>(PhantomData<T>);
impl<T> TcpConnector<T> {
pub fn new() -> Self {
TcpConnector(PhantomData)
}
}
impl<T> Clone for TcpConnector<T> {
fn clone(&self) -> Self {
TcpConnector(PhantomData)
}
}
impl<T: Address> Service for TcpConnector<T> {
type Request = Connect<T>;
type Response = Connection<T, TcpStream>;
type Error = ConnectError;
type Future = Either<TcpConnectorResponse<T>, Ready<Result<Self::Response, Self::Error>>>;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, req: Connect<T>) -> Self::Future {
let port = req.port();
let Connect { req, addr, .. } = req;
if let Some(addr) = addr {
Either::Left(TcpConnectorResponse::new(req, port, addr))
} else {
error!("TCP connector: got unresolved address");
Either::Right(err(ConnectError::Unresolverd))
}
}
}
#[doc(hidden)]
/// Tcp stream connector response future
pub struct TcpConnectorResponse<T> {
req: Option<T>,
port: u16,
addrs: Option<VecDeque<SocketAddr>>,
stream: Option<BoxFuture<'static, Result<TcpStream, io::Error>>>,
}
impl<T: Address> TcpConnectorResponse<T> {
pub fn new(
req: T,
port: u16,
addr: either::Either<SocketAddr, VecDeque<SocketAddr>>,
) -> TcpConnectorResponse<T> {
trace!(
"TCP connector - connecting to {:?} port:{}",
req.host(),
port
);
match addr {
either::Either::Left(addr) => TcpConnectorResponse {
req: Some(req),
port,
addrs: None,
stream: Some(TcpStream::connect(addr).boxed()),
},
either::Either::Right(addrs) => TcpConnectorResponse {
req: Some(req),
port,
addrs: Some(addrs),
stream: None,
},
}
}
}
impl<T: Address> Future for TcpConnectorResponse<T> {
type Output = Result<Connection<T, TcpStream>, ConnectError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.get_mut();
// connect
loop {
if let Some(new) = this.stream.as_mut() {
match new.as_mut().poll(cx) {
Poll::Ready(Ok(sock)) => {
let req = this.req.take().unwrap();
trace!(
"TCP connector - successfully connected to connecting to {:?} - {:?}",
req.host(), sock.peer_addr()
);
return Poll::Ready(Ok(Connection::new(sock, req)));
}
Poll::Pending => return Poll::Pending,
Poll::Ready(Err(err)) => {
trace!(
"TCP connector - failed to connect to connecting to {:?} port: {}",
this.req.as_ref().unwrap().host(),
this.port,
);
if this.addrs.is_none() || this.addrs.as_ref().unwrap().is_empty() {
return Poll::Ready(Err(err.into()));
}
}
}
}
// try to connect
let addr = this.addrs.as_mut().unwrap().pop_front().unwrap();
this.stream = Some(TcpStream::connect(addr).boxed());
}
}
}

View File

@ -0,0 +1,26 @@
use std::io;
use derive_more::{Display, From};
use trust_dns_resolver::error::ResolveError;
#[derive(Debug, From, Display)]
pub enum ConnectError {
/// Failed to resolve the hostname
#[display(fmt = "Failed resolving hostname: {}", _0)]
Resolver(ResolveError),
/// No dns records
#[display(fmt = "No dns records found for the input")]
NoRecords,
/// Invalid input
InvalidInput,
/// Unresolved host name
#[display(fmt = "Connector received `Connect` method with unresolved host")]
Unresolverd,
/// Connection io error
#[display(fmt = "{}", _0)]
Io(io::Error),
}

111
actix-connect/src/lib.rs Normal file
View File

@ -0,0 +1,111 @@
//! Actix connect - tcp connector service
//!
//! ## Package feature
//!
//! * `openssl` - enables ssl support via `openssl` crate
//! * `rustls` - enables ssl support via `rustls` crate
#![deny(rust_2018_idioms, warnings)]
#![allow(clippy::type_complexity)]
#![recursion_limit = "128"]
#[macro_use]
extern crate log;
mod connect;
mod connector;
mod error;
mod resolve;
mod service;
pub mod ssl;
#[cfg(feature = "uri")]
mod uri;
use actix_rt::{net::TcpStream, Arbiter};
use actix_service::{pipeline, pipeline_factory, Service, ServiceFactory};
use trust_dns_resolver::config::{ResolverConfig, ResolverOpts};
use trust_dns_resolver::system_conf::read_system_conf;
use trust_dns_resolver::AsyncResolver;
pub mod resolver {
pub use trust_dns_resolver::config::{ResolverConfig, ResolverOpts};
pub use trust_dns_resolver::system_conf::read_system_conf;
pub use trust_dns_resolver::{error::ResolveError, AsyncResolver};
}
pub use self::connect::{Address, Connect, Connection};
pub use self::connector::{TcpConnector, TcpConnectorFactory};
pub use self::error::ConnectError;
pub use self::resolve::{Resolver, ResolverFactory};
pub use self::service::{ConnectService, ConnectServiceFactory, TcpConnectService};
pub fn start_resolver(cfg: ResolverConfig, opts: ResolverOpts) -> AsyncResolver {
let (resolver, bg) = AsyncResolver::new(cfg, opts);
actix_rt::spawn(bg);
resolver
}
struct DefaultResolver(AsyncResolver);
pub(crate) fn get_default_resolver() -> AsyncResolver {
if Arbiter::contains_item::<DefaultResolver>() {
Arbiter::get_item(|item: &DefaultResolver| item.0.clone())
} else {
let (cfg, opts) = match read_system_conf() {
Ok((cfg, opts)) => (cfg, opts),
Err(e) => {
log::error!("TRust-DNS can not load system config: {}", e);
(ResolverConfig::default(), ResolverOpts::default())
}
};
let (resolver, bg) = AsyncResolver::new(cfg, opts);
actix_rt::spawn(bg);
Arbiter::set_item(DefaultResolver(resolver.clone()));
resolver
}
}
pub fn start_default_resolver() -> AsyncResolver {
get_default_resolver()
}
/// Create tcp connector service
pub fn new_connector<T: Address + 'static>(
resolver: AsyncResolver,
) -> impl Service<Request = Connect<T>, Response = Connection<T, TcpStream>, Error = ConnectError>
+ Clone {
pipeline(Resolver::new(resolver)).and_then(TcpConnector::new())
}
/// Create tcp connector service
pub fn new_connector_factory<T: Address + 'static>(
resolver: AsyncResolver,
) -> impl ServiceFactory<
Config = (),
Request = Connect<T>,
Response = Connection<T, TcpStream>,
Error = ConnectError,
InitError = (),
> + Clone {
pipeline_factory(ResolverFactory::new(resolver)).and_then(TcpConnectorFactory::new())
}
/// Create connector service with default parameters
pub fn default_connector<T: Address + 'static>(
) -> impl Service<Request = Connect<T>, Response = Connection<T, TcpStream>, Error = ConnectError>
+ Clone {
pipeline(Resolver::default()).and_then(TcpConnector::new())
}
/// Create connector service factory with default parameters
pub fn default_connector_factory<T: Address + 'static>() -> impl ServiceFactory<
Config = (),
Request = Connect<T>,
Response = Connection<T, TcpStream>,
Error = ConnectError,
InitError = (),
> + Clone {
pipeline_factory(ResolverFactory::default()).and_then(TcpConnectorFactory::new())
}

View File

@ -0,0 +1,188 @@
use std::future::Future;
use std::marker::PhantomData;
use std::net::SocketAddr;
use std::pin::Pin;
use std::task::{Context, Poll};
use actix_service::{Service, ServiceFactory};
use futures::future::{ok, Either, Ready};
use trust_dns_resolver::lookup_ip::LookupIpFuture;
use trust_dns_resolver::{AsyncResolver, Background};
use crate::connect::{Address, Connect};
use crate::error::ConnectError;
use crate::get_default_resolver;
/// DNS Resolver Service factory
pub struct ResolverFactory<T> {
resolver: Option<AsyncResolver>,
_t: PhantomData<T>,
}
impl<T> ResolverFactory<T> {
/// Create new resolver instance with custom configuration and options.
pub fn new(resolver: AsyncResolver) -> Self {
ResolverFactory {
resolver: Some(resolver),
_t: PhantomData,
}
}
pub fn service(&self) -> Resolver<T> {
Resolver {
resolver: self.resolver.clone(),
_t: PhantomData,
}
}
}
impl<T> Default for ResolverFactory<T> {
fn default() -> Self {
ResolverFactory {
resolver: None,
_t: PhantomData,
}
}
}
impl<T> Clone for ResolverFactory<T> {
fn clone(&self) -> Self {
ResolverFactory {
resolver: self.resolver.clone(),
_t: PhantomData,
}
}
}
impl<T: Address> ServiceFactory for ResolverFactory<T> {
type Request = Connect<T>;
type Response = Connect<T>;
type Error = ConnectError;
type Config = ();
type Service = Resolver<T>;
type InitError = ();
type Future = Ready<Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: ()) -> Self::Future {
ok(self.service())
}
}
/// DNS Resolver Service
pub struct Resolver<T> {
resolver: Option<AsyncResolver>,
_t: PhantomData<T>,
}
impl<T> Resolver<T> {
/// Create new resolver instance with custom configuration and options.
pub fn new(resolver: AsyncResolver) -> Self {
Resolver {
resolver: Some(resolver),
_t: PhantomData,
}
}
}
impl<T> Default for Resolver<T> {
fn default() -> Self {
Resolver {
resolver: None,
_t: PhantomData,
}
}
}
impl<T> Clone for Resolver<T> {
fn clone(&self) -> Self {
Resolver {
resolver: self.resolver.clone(),
_t: PhantomData,
}
}
}
impl<T: Address> Service for Resolver<T> {
type Request = Connect<T>;
type Response = Connect<T>;
type Error = ConnectError;
type Future = Either<ResolverFuture<T>, Ready<Result<Connect<T>, Self::Error>>>;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, mut req: Connect<T>) -> Self::Future {
if req.addr.is_some() {
Either::Right(ok(req))
} else if let Ok(ip) = req.host().parse() {
req.addr = Some(either::Either::Left(SocketAddr::new(ip, req.port())));
Either::Right(ok(req))
} else {
trace!("DNS resolver: resolving host {:?}", req.host());
if self.resolver.is_none() {
self.resolver = Some(get_default_resolver());
}
Either::Left(ResolverFuture::new(req, self.resolver.as_ref().unwrap()))
}
}
}
#[doc(hidden)]
/// Resolver future
pub struct ResolverFuture<T: Address> {
req: Option<Connect<T>>,
lookup: Background<LookupIpFuture>,
}
impl<T: Address> ResolverFuture<T> {
pub fn new(req: Connect<T>, resolver: &AsyncResolver) -> Self {
let lookup = if let Some(host) = req.host().splitn(2, ':').next() {
resolver.lookup_ip(host)
} else {
resolver.lookup_ip(req.host())
};
ResolverFuture {
lookup,
req: Some(req),
}
}
}
impl<T: Address> Future for ResolverFuture<T> {
type Output = Result<Connect<T>, ConnectError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.get_mut();
match Pin::new(&mut this.lookup).poll(cx) {
Poll::Pending => Poll::Pending,
Poll::Ready(Ok(ips)) => {
let req = this.req.take().unwrap();
let port = req.port();
let req = req.set_addrs(ips.iter().map(|ip| SocketAddr::new(ip, port)));
trace!(
"DNS resolver: host {:?} resolved to {:?}",
req.host(),
req.addrs()
);
if req.addr.is_none() {
Poll::Ready(Err(ConnectError::NoRecords))
} else {
Poll::Ready(Ok(req))
}
}
Poll::Ready(Err(e)) => {
trace!(
"DNS resolver: failed to resolve host {:?} err: {}",
this.req.as_ref().unwrap().host(),
e
);
Poll::Ready(Err(e.into()))
}
}
}
}

View File

@ -0,0 +1,231 @@
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use actix_rt::net::TcpStream;
use actix_service::{Service, ServiceFactory};
use either::Either;
use futures::future::{ok, Ready};
use trust_dns_resolver::AsyncResolver;
use crate::connect::{Address, Connect, Connection};
use crate::connector::{TcpConnector, TcpConnectorFactory};
use crate::error::ConnectError;
use crate::resolve::{Resolver, ResolverFactory};
pub struct ConnectServiceFactory<T> {
tcp: TcpConnectorFactory<T>,
resolver: ResolverFactory<T>,
}
impl<T> ConnectServiceFactory<T> {
/// Construct new ConnectService factory
pub fn new() -> Self {
ConnectServiceFactory {
tcp: TcpConnectorFactory::default(),
resolver: ResolverFactory::default(),
}
}
/// Construct new connect service with custom dns resolver
pub fn with_resolver(resolver: AsyncResolver) -> Self {
ConnectServiceFactory {
tcp: TcpConnectorFactory::default(),
resolver: ResolverFactory::new(resolver),
}
}
/// Construct new service
pub fn service(&self) -> ConnectService<T> {
ConnectService {
tcp: self.tcp.service(),
resolver: self.resolver.service(),
}
}
/// Construct new tcp stream service
pub fn tcp_service(&self) -> TcpConnectService<T> {
TcpConnectService {
tcp: self.tcp.service(),
resolver: self.resolver.service(),
}
}
}
impl<T> Default for ConnectServiceFactory<T> {
fn default() -> Self {
ConnectServiceFactory {
tcp: TcpConnectorFactory::default(),
resolver: ResolverFactory::default(),
}
}
}
impl<T> Clone for ConnectServiceFactory<T> {
fn clone(&self) -> Self {
ConnectServiceFactory {
tcp: self.tcp.clone(),
resolver: self.resolver.clone(),
}
}
}
impl<T: Address> ServiceFactory for ConnectServiceFactory<T> {
type Request = Connect<T>;
type Response = Connection<T, TcpStream>;
type Error = ConnectError;
type Config = ();
type Service = ConnectService<T>;
type InitError = ();
type Future = Ready<Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: ()) -> Self::Future {
ok(self.service())
}
}
#[derive(Clone)]
pub struct ConnectService<T> {
tcp: TcpConnector<T>,
resolver: Resolver<T>,
}
impl<T: Address> Service for ConnectService<T> {
type Request = Connect<T>;
type Response = Connection<T, TcpStream>;
type Error = ConnectError;
type Future = ConnectServiceResponse<T>;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, req: Connect<T>) -> Self::Future {
ConnectServiceResponse {
state: ConnectState::Resolve(self.resolver.call(req)),
tcp: self.tcp.clone(),
}
}
}
enum ConnectState<T: Address> {
Resolve(<Resolver<T> as Service>::Future),
Connect(<TcpConnector<T> as Service>::Future),
}
impl<T: Address> ConnectState<T> {
fn poll(
&mut self,
cx: &mut Context<'_>,
) -> Either<Poll<Result<Connection<T, TcpStream>, ConnectError>>, Connect<T>> {
match self {
ConnectState::Resolve(ref mut fut) => match Pin::new(fut).poll(cx) {
Poll::Pending => Either::Left(Poll::Pending),
Poll::Ready(Ok(res)) => Either::Right(res),
Poll::Ready(Err(err)) => Either::Left(Poll::Ready(Err(err))),
},
ConnectState::Connect(ref mut fut) => Either::Left(Pin::new(fut).poll(cx)),
}
}
}
pub struct ConnectServiceResponse<T: Address> {
state: ConnectState<T>,
tcp: TcpConnector<T>,
}
impl<T: Address> Future for ConnectServiceResponse<T> {
type Output = Result<Connection<T, TcpStream>, ConnectError>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let res = match self.state.poll(cx) {
Either::Right(res) => {
self.state = ConnectState::Connect(self.tcp.call(res));
self.state.poll(cx)
}
Either::Left(res) => return res,
};
match res {
Either::Left(res) => res,
Either::Right(_) => panic!(),
}
}
}
#[derive(Clone)]
pub struct TcpConnectService<T> {
tcp: TcpConnector<T>,
resolver: Resolver<T>,
}
impl<T: Address + 'static> Service for TcpConnectService<T> {
type Request = Connect<T>;
type Response = TcpStream;
type Error = ConnectError;
type Future = TcpConnectServiceResponse<T>;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, req: Connect<T>) -> Self::Future {
TcpConnectServiceResponse {
state: TcpConnectState::Resolve(self.resolver.call(req)),
tcp: self.tcp.clone(),
}
}
}
enum TcpConnectState<T: Address> {
Resolve(<Resolver<T> as Service>::Future),
Connect(<TcpConnector<T> as Service>::Future),
}
impl<T: Address> TcpConnectState<T> {
fn poll(
&mut self,
cx: &mut Context<'_>,
) -> Either<Poll<Result<TcpStream, ConnectError>>, Connect<T>> {
match self {
TcpConnectState::Resolve(ref mut fut) => match Pin::new(fut).poll(cx) {
Poll::Pending => (),
Poll::Ready(Ok(res)) => return Either::Right(res),
Poll::Ready(Err(err)) => return Either::Left(Poll::Ready(Err(err))),
},
TcpConnectState::Connect(ref mut fut) => {
if let Poll::Ready(res) = Pin::new(fut).poll(cx) {
return match res {
Ok(conn) => Either::Left(Poll::Ready(Ok(conn.into_parts().0))),
Err(err) => Either::Left(Poll::Ready(Err(err))),
};
}
}
}
Either::Left(Poll::Pending)
}
}
pub struct TcpConnectServiceResponse<T: Address> {
state: TcpConnectState<T>,
tcp: TcpConnector<T>,
}
impl<T: Address> Future for TcpConnectServiceResponse<T> {
type Output = Result<TcpStream, ConnectError>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let res = match self.state.poll(cx) {
Either::Right(res) => {
self.state = TcpConnectState::Connect(self.tcp.call(res));
self.state.poll(cx)
}
Either::Left(res) => return res,
};
match res {
Either::Left(res) => res,
Either::Right(_) => panic!(),
}
}
}

View File

@ -0,0 +1,7 @@
//! SSL Services
#[cfg(feature = "openssl")]
pub mod openssl;
#[cfg(feature = "rustls")]
pub mod rustls;

View File

@ -0,0 +1,267 @@
use std::future::Future;
use std::marker::PhantomData;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::{fmt, io};
pub use open_ssl::ssl::{Error as SslError, SslConnector, SslMethod};
pub use tokio_openssl::{HandshakeError, SslStream};
use actix_codec::{AsyncRead, AsyncWrite};
use actix_rt::net::TcpStream;
use actix_service::{Service, ServiceFactory};
use futures::future::{err, ok, Either, FutureExt, LocalBoxFuture, Ready};
use trust_dns_resolver::AsyncResolver;
use crate::{
Address, Connect, ConnectError, ConnectService, ConnectServiceFactory, Connection,
};
/// Openssl connector factory
pub struct OpensslConnector<T, U> {
connector: SslConnector,
_t: PhantomData<(T, U)>,
}
impl<T, U> OpensslConnector<T, U> {
pub fn new(connector: SslConnector) -> Self {
OpensslConnector {
connector,
_t: PhantomData,
}
}
}
impl<T, U> OpensslConnector<T, U>
where
T: Address + 'static,
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug + 'static,
{
pub fn service(connector: SslConnector) -> OpensslConnectorService<T, U> {
OpensslConnectorService {
connector,
_t: PhantomData,
}
}
}
impl<T, U> Clone for OpensslConnector<T, U> {
fn clone(&self) -> Self {
Self {
connector: self.connector.clone(),
_t: PhantomData,
}
}
}
impl<T, U> ServiceFactory for OpensslConnector<T, U>
where
T: Address + 'static,
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug + 'static,
{
type Request = Connection<T, U>;
type Response = Connection<T, SslStream<U>>;
type Error = io::Error;
type Config = ();
type Service = OpensslConnectorService<T, U>;
type InitError = ();
type Future = Ready<Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: ()) -> Self::Future {
ok(OpensslConnectorService {
connector: self.connector.clone(),
_t: PhantomData,
})
}
}
pub struct OpensslConnectorService<T, U> {
connector: SslConnector,
_t: PhantomData<(T, U)>,
}
impl<T, U> Clone for OpensslConnectorService<T, U> {
fn clone(&self) -> Self {
Self {
connector: self.connector.clone(),
_t: PhantomData,
}
}
}
impl<T, U> Service for OpensslConnectorService<T, U>
where
T: Address + 'static,
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug + 'static,
{
type Request = Connection<T, U>;
type Response = Connection<T, SslStream<U>>;
type Error = io::Error;
type Future = Either<ConnectAsyncExt<T, U>, Ready<Result<Self::Response, Self::Error>>>;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, stream: Connection<T, U>) -> Self::Future {
trace!("SSL Handshake start for: {:?}", stream.host());
let (io, stream) = stream.replace(());
let host = stream.host().to_string();
match self.connector.configure() {
Err(e) => Either::Right(err(io::Error::new(io::ErrorKind::Other, e))),
Ok(config) => Either::Left(ConnectAsyncExt {
fut: async move { tokio_openssl::connect(config, &host, io).await }
.boxed_local(),
stream: Some(stream),
_t: PhantomData,
}),
}
}
}
pub struct ConnectAsyncExt<T, U> {
fut: LocalBoxFuture<'static, Result<SslStream<U>, HandshakeError<U>>>,
stream: Option<Connection<T, ()>>,
_t: PhantomData<U>,
}
impl<T: Address, U> Future for ConnectAsyncExt<T, U>
where
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug + 'static,
{
type Output = Result<Connection<T, SslStream<U>>, io::Error>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.get_mut();
match Pin::new(&mut this.fut).poll(cx) {
Poll::Ready(Ok(stream)) => {
let s = this.stream.take().unwrap();
trace!("SSL Handshake success: {:?}", s.host());
Poll::Ready(Ok(s.replace(stream).1))
}
Poll::Ready(Err(e)) => {
trace!("SSL Handshake error: {:?}", e);
Poll::Ready(Err(io::Error::new(io::ErrorKind::Other, format!("{}", e))))
}
Poll::Pending => Poll::Pending,
}
}
}
pub struct OpensslConnectServiceFactory<T> {
tcp: ConnectServiceFactory<T>,
openssl: OpensslConnector<T, TcpStream>,
}
impl<T> OpensslConnectServiceFactory<T> {
/// Construct new OpensslConnectService factory
pub fn new(connector: SslConnector) -> Self {
OpensslConnectServiceFactory {
tcp: ConnectServiceFactory::default(),
openssl: OpensslConnector::new(connector),
}
}
/// Construct new connect service with custom dns resolver
pub fn with_resolver(connector: SslConnector, resolver: AsyncResolver) -> Self {
OpensslConnectServiceFactory {
tcp: ConnectServiceFactory::with_resolver(resolver),
openssl: OpensslConnector::new(connector),
}
}
/// Construct openssl connect service
pub fn service(&self) -> OpensslConnectService<T> {
OpensslConnectService {
tcp: self.tcp.service(),
openssl: OpensslConnectorService {
connector: self.openssl.connector.clone(),
_t: PhantomData,
},
}
}
}
impl<T> Clone for OpensslConnectServiceFactory<T> {
fn clone(&self) -> Self {
OpensslConnectServiceFactory {
tcp: self.tcp.clone(),
openssl: self.openssl.clone(),
}
}
}
impl<T: Address + 'static> ServiceFactory for OpensslConnectServiceFactory<T> {
type Request = Connect<T>;
type Response = SslStream<TcpStream>;
type Error = ConnectError;
type Config = ();
type Service = OpensslConnectService<T>;
type InitError = ();
type Future = Ready<Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: ()) -> Self::Future {
ok(self.service())
}
}
#[derive(Clone)]
pub struct OpensslConnectService<T> {
tcp: ConnectService<T>,
openssl: OpensslConnectorService<T, TcpStream>,
}
impl<T: Address + 'static> Service for OpensslConnectService<T> {
type Request = Connect<T>;
type Response = SslStream<TcpStream>;
type Error = ConnectError;
type Future = OpensslConnectServiceResponse<T>;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, req: Connect<T>) -> Self::Future {
OpensslConnectServiceResponse {
fut1: Some(self.tcp.call(req)),
fut2: None,
openssl: self.openssl.clone(),
}
}
}
pub struct OpensslConnectServiceResponse<T: Address + 'static> {
fut1: Option<<ConnectService<T> as Service>::Future>,
fut2: Option<<OpensslConnectorService<T, TcpStream> as Service>::Future>,
openssl: OpensslConnectorService<T, TcpStream>,
}
impl<T: Address> Future for OpensslConnectServiceResponse<T> {
type Output = Result<SslStream<TcpStream>, ConnectError>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
if let Some(ref mut fut) = self.fut1 {
match futures::ready!(Pin::new(fut).poll(cx)) {
Ok(res) => {
let _ = self.fut1.take();
self.fut2 = Some(self.openssl.call(res));
}
Err(e) => return Poll::Ready(Err(e)),
}
}
if let Some(ref mut fut) = self.fut2 {
match futures::ready!(Pin::new(fut).poll(cx)) {
Ok(connect) => Poll::Ready(Ok(connect.into_parts().0)),
Err(e) => Poll::Ready(Err(ConnectError::Io(io::Error::new(
io::ErrorKind::Other,
e,
)))),
}
} else {
Poll::Pending
}
}
}

View File

@ -0,0 +1,136 @@
use std::fmt;
use std::future::Future;
use std::marker::PhantomData;
use std::pin::Pin;
use std::sync::Arc;
use std::task::{Context, Poll};
pub use rust_tls::Session;
pub use tokio_rustls::{client::TlsStream, rustls::ClientConfig};
use actix_codec::{AsyncRead, AsyncWrite};
use actix_service::{Service, ServiceFactory};
use futures::future::{ok, Ready};
use tokio_rustls::{Connect, TlsConnector};
use webpki::DNSNameRef;
use crate::{Address, Connection};
/// Rustls connector factory
pub struct RustlsConnector<T, U> {
connector: Arc<ClientConfig>,
_t: PhantomData<(T, U)>,
}
impl<T, U> RustlsConnector<T, U> {
pub fn new(connector: Arc<ClientConfig>) -> Self {
RustlsConnector {
connector,
_t: PhantomData,
}
}
}
impl<T, U> RustlsConnector<T, U>
where
T: Address,
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug,
{
pub fn service(connector: Arc<ClientConfig>) -> RustlsConnectorService<T, U> {
RustlsConnectorService {
connector: connector,
_t: PhantomData,
}
}
}
impl<T, U> Clone for RustlsConnector<T, U> {
fn clone(&self) -> Self {
Self {
connector: self.connector.clone(),
_t: PhantomData,
}
}
}
impl<T: Address, U> ServiceFactory for RustlsConnector<T, U>
where
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug,
{
type Request = Connection<T, U>;
type Response = Connection<T, TlsStream<U>>;
type Error = std::io::Error;
type Config = ();
type Service = RustlsConnectorService<T, U>;
type InitError = ();
type Future = Ready<Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: ()) -> Self::Future {
ok(RustlsConnectorService {
connector: self.connector.clone(),
_t: PhantomData,
})
}
}
pub struct RustlsConnectorService<T, U> {
connector: Arc<ClientConfig>,
_t: PhantomData<(T, U)>,
}
impl<T, U> Clone for RustlsConnectorService<T, U> {
fn clone(&self) -> Self {
Self {
connector: self.connector.clone(),
_t: PhantomData,
}
}
}
impl<T: Address, U> Service for RustlsConnectorService<T, U>
where
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug,
{
type Request = Connection<T, U>;
type Response = Connection<T, TlsStream<U>>;
type Error = std::io::Error;
type Future = ConnectAsyncExt<T, U>;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, stream: Connection<T, U>) -> Self::Future {
trace!("SSL Handshake start for: {:?}", stream.host());
let (io, stream) = stream.replace(());
let host = DNSNameRef::try_from_ascii_str(stream.host())
.expect("rustls currently only handles hostname-based connections. See https://github.com/briansmith/webpki/issues/54");
ConnectAsyncExt {
fut: TlsConnector::from(self.connector.clone()).connect(host, io),
stream: Some(stream),
}
}
}
pub struct ConnectAsyncExt<T, U> {
fut: Connect<U>,
stream: Option<Connection<T, ()>>,
}
impl<T: Address, U> Future for ConnectAsyncExt<T, U>
where
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug,
{
type Output = Result<Connection<T, TlsStream<U>>, std::io::Error>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.get_mut();
Poll::Ready(
futures::ready!(Pin::new(&mut this.fut).poll(cx)).map(|stream| {
let s = this.stream.take().unwrap();
trace!("SSL Handshake success: {:?}", s.host());
s.replace(stream).1
}),
)
}
}

37
actix-connect/src/uri.rs Normal file
View File

@ -0,0 +1,37 @@
use http::Uri;
use crate::Address;
impl Address for Uri {
fn host(&self) -> &str {
self.host().unwrap_or("")
}
fn port(&self) -> Option<u16> {
if let Some(port) = self.port_u16() {
Some(port)
} else {
port(self.scheme_str())
}
}
}
// TODO: load data from file
fn port(scheme: Option<&str>) -> Option<u16> {
if let Some(scheme) = scheme {
match scheme {
"http" => Some(80),
"https" => Some(443),
"ws" => Some(80),
"wss" => Some(443),
"amqp" => Some(5672),
"amqps" => Some(5671),
"sb" => Some(5671),
"mqtt" => Some(1883),
"mqtts" => Some(8883),
_ => None,
}
} else {
None
}
}

View File

@ -0,0 +1,137 @@
use std::io;
use actix_codec::{BytesCodec, Framed};
use actix_rt::net::TcpStream;
use actix_service::{fn_service, Service, ServiceFactory};
use actix_testing::TestServer;
use bytes::Bytes;
use futures::SinkExt;
use actix_connect::resolver::{ResolverConfig, ResolverOpts};
use actix_connect::Connect;
#[cfg(feature = "openssl")]
#[actix_rt::test]
async fn test_string() {
let srv = TestServer::with(|| {
fn_service(|io: TcpStream| {
async {
let mut framed = Framed::new(io, BytesCodec);
framed.send(Bytes::from_static(b"test")).await?;
Ok::<_, io::Error>(())
}
})
});
let mut conn = actix_connect::default_connector();
let addr = format!("localhost:{}", srv.port());
let con = conn.call(addr.into()).await.unwrap();
assert_eq!(con.peer_addr().unwrap(), srv.addr());
}
#[cfg(feature = "rustls")]
#[actix_rt::test]
async fn test_rustls_string() {
let srv = TestServer::with(|| {
fn_service(|io: TcpStream| {
async {
let mut framed = Framed::new(io, BytesCodec);
framed.send(Bytes::from_static(b"test")).await?;
Ok::<_, io::Error>(())
}
})
});
let mut conn = actix_connect::default_connector();
let addr = format!("localhost:{}", srv.port());
let con = conn.call(addr.into()).await.unwrap();
assert_eq!(con.peer_addr().unwrap(), srv.addr());
}
#[actix_rt::test]
async fn test_static_str() {
let srv = TestServer::with(|| {
fn_service(|io: TcpStream| {
async {
let mut framed = Framed::new(io, BytesCodec);
framed.send(Bytes::from_static(b"test")).await?;
Ok::<_, io::Error>(())
}
})
});
let resolver = actix_connect::start_default_resolver();
let mut conn = actix_connect::new_connector(resolver.clone());
let con = conn.call(Connect::with("10", srv.addr())).await.unwrap();
assert_eq!(con.peer_addr().unwrap(), srv.addr());
let connect = Connect::new(srv.host().to_owned());
let mut conn = actix_connect::new_connector(resolver);
let con = conn.call(connect).await;
assert!(con.is_err());
}
#[actix_rt::test]
async fn test_new_service() {
let srv = TestServer::with(|| {
fn_service(|io: TcpStream| {
async {
let mut framed = Framed::new(io, BytesCodec);
framed.send(Bytes::from_static(b"test")).await?;
Ok::<_, io::Error>(())
}
})
});
let resolver =
actix_connect::start_resolver(ResolverConfig::default(), ResolverOpts::default());
let factory = actix_connect::new_connector_factory(resolver);
let mut conn = factory.new_service(()).await.unwrap();
let con = conn.call(Connect::with("10", srv.addr())).await.unwrap();
assert_eq!(con.peer_addr().unwrap(), srv.addr());
}
#[cfg(feature = "openssl")]
#[actix_rt::test]
async fn test_uri() {
use std::convert::TryFrom;
let srv = TestServer::with(|| {
fn_service(|io: TcpStream| {
async {
let mut framed = Framed::new(io, BytesCodec);
framed.send(Bytes::from_static(b"test")).await?;
Ok::<_, io::Error>(())
}
})
});
let mut conn = actix_connect::default_connector();
let addr = http::Uri::try_from(format!("https://localhost:{}", srv.port())).unwrap();
let con = conn.call(addr.into()).await.unwrap();
assert_eq!(con.peer_addr().unwrap(), srv.addr());
}
#[cfg(feature = "rustls")]
#[actix_rt::test]
async fn test_rustls_uri() {
use std::convert::TryFrom;
let srv = TestServer::with(|| {
fn_service(|io: TcpStream| {
async {
let mut framed = Framed::new(io, BytesCodec);
framed.send(Bytes::from_static(b"test")).await?;
Ok::<_, io::Error>(())
}
})
});
let mut conn = actix_connect::default_connector();
let addr = http::Uri::try_from(format!("https://localhost:{}", srv.port())).unwrap();
let con = conn.call(addr.into()).await.unwrap();
assert_eq!(con.peer_addr().unwrap(), srv.addr());
}

33
actix-ioframe/CHANGES.md Normal file
View File

@ -0,0 +1,33 @@
# Changes
## [0.5.0] - 2019-12-29
* Simplify state management
* Allow to set custom output stream
* Removed disconnect callback
## [0.4.1] - 2019-12-11
* Disconnect callback accepts owned state
## [0.4.0] - 2019-12-11
* Remove `E` param
## [0.3.0-alpha.3] - 2019-12-07
* Migrate to tokio 0.2
## [0.3.0-alpha.2] - 2019-12-02
* Migrate to `std::future`
## [0.1.1] - 2019-10-14
* Re-register task on every dispatcher poll.
## [0.1.0] - 2019-09-25
* Initial release

31
actix-ioframe/Cargo.toml Normal file
View File

@ -0,0 +1,31 @@
[package]
name = "actix-ioframe"
version = "0.5.0"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix framed service"
keywords = ["network", "framework", "async", "futures"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-ioframe/"
categories = ["network-programming", "asynchronous"]
license = "MIT/Apache-2.0"
edition = "2018"
[lib]
name = "actix_ioframe"
path = "src/lib.rs"
[dependencies]
actix-service = "1.0.1"
actix-codec = "0.2.0"
actix-utils = "1.0.4"
actix-rt = "1.0.0"
bytes = "0.5.3"
either = "1.5.3"
futures = "0.3.1"
pin-project = "0.4.6"
log = "0.4"
[dev-dependencies]
actix-connect = "1.0.0"
actix-testing = "1.0.0"

View File

@ -0,0 +1,122 @@
use std::marker::PhantomData;
use std::pin::Pin;
use std::task::{Context, Poll};
use actix_codec::{AsyncRead, AsyncWrite, Decoder, Encoder, Framed};
use actix_utils::mpsc::Receiver;
use futures::Stream;
pub struct Connect<Io, Codec>
where
Codec: Encoder + Decoder,
{
io: Io,
_t: PhantomData<Codec>,
}
impl<Io, Codec> Connect<Io, Codec>
where
Io: AsyncRead + AsyncWrite,
Codec: Encoder + Decoder,
{
pub(crate) fn new(io: Io) -> Self {
Self {
io,
_t: PhantomData,
}
}
pub fn codec(
self,
codec: Codec,
) -> ConnectResult<Io, (), Codec, Receiver<<Codec as Encoder>::Item>> {
ConnectResult {
state: (),
out: None,
framed: Framed::new(self.io, codec),
}
}
}
#[pin_project::pin_project]
pub struct ConnectResult<Io, St, Codec: Encoder + Decoder, Out> {
pub(crate) state: St,
pub(crate) out: Option<Out>,
pub(crate) framed: Framed<Io, Codec>,
}
impl<Io, St, Codec: Encoder + Decoder, Out: Unpin> ConnectResult<Io, St, Codec, Out> {
#[inline]
pub fn get_ref(&self) -> &Io {
self.framed.get_ref()
}
#[inline]
pub fn get_mut(&mut self) -> &mut Io {
self.framed.get_mut()
}
pub fn out<U>(self, out: U) -> ConnectResult<Io, St, Codec, U>
where
U: Stream<Item = <Codec as Encoder>::Item> + Unpin,
{
ConnectResult {
state: self.state,
framed: self.framed,
out: Some(out),
}
}
#[inline]
pub fn state<S>(self, state: S) -> ConnectResult<Io, S, Codec, Out> {
ConnectResult {
state,
framed: self.framed,
out: self.out,
}
}
}
impl<Io, St, Codec, Out> Stream for ConnectResult<Io, St, Codec, Out>
where
Io: AsyncRead + AsyncWrite,
Codec: Encoder + Decoder,
{
type Item = Result<<Codec as Decoder>::Item, <Codec as Decoder>::Error>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
self.project().framed.next_item(cx)
}
}
impl<Io, St, Codec, Out> futures::Sink<<Codec as Encoder>::Item>
for ConnectResult<Io, St, Codec, Out>
where
Io: AsyncRead + AsyncWrite,
Codec: Encoder + Decoder,
{
type Error = <Codec as Encoder>::Error;
fn poll_ready(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
if self.framed.is_write_ready() {
Poll::Ready(Ok(()))
} else {
Poll::Pending
}
}
fn start_send(
self: Pin<&mut Self>,
item: <Codec as Encoder>::Item,
) -> Result<(), Self::Error> {
self.project().framed.write(item)
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.get_mut().framed.flush(cx)
}
fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.get_mut().framed.close(cx)
}
}

View File

@ -0,0 +1,242 @@
//! Framed dispatcher service and related utilities
use std::pin::Pin;
use std::task::{Context, Poll};
use actix_codec::{AsyncRead, AsyncWrite, Decoder, Encoder, Framed};
use actix_service::Service;
use actix_utils::mpsc;
use futures::Stream;
use log::debug;
use crate::error::ServiceError;
type Request<U> = <U as Decoder>::Item;
type Response<U> = <U as Encoder>::Item;
/// FramedTransport - is a future that reads frames from Framed object
/// and pass then to the service.
pub(crate) struct Dispatcher<S, T, U, Out>
where
S: Service<Request = Request<U>, Response = Option<Response<U>>>,
S::Error: 'static,
S::Future: 'static,
T: AsyncRead + AsyncWrite,
U: Encoder + Decoder,
<U as Encoder>::Item: 'static,
<U as Encoder>::Error: std::fmt::Debug,
Out: Stream<Item = <U as Encoder>::Item> + Unpin,
{
service: S,
sink: Option<Out>,
state: FramedState<S, U>,
framed: Framed<T, U>,
rx: mpsc::Receiver<Result<<U as Encoder>::Item, S::Error>>,
}
impl<S, T, U, Out> Dispatcher<S, T, U, Out>
where
S: Service<Request = Request<U>, Response = Option<Response<U>>>,
S::Error: 'static,
S::Future: 'static,
T: AsyncRead + AsyncWrite,
U: Decoder + Encoder,
<U as Encoder>::Item: 'static,
<U as Encoder>::Error: std::fmt::Debug,
Out: Stream<Item = <U as Encoder>::Item> + Unpin,
{
pub(crate) fn new(framed: Framed<T, U>, service: S, sink: Option<Out>) -> Self {
Dispatcher {
sink,
service,
framed,
rx: mpsc::channel().1,
state: FramedState::Processing,
}
}
}
enum FramedState<S: Service, U: Encoder + Decoder> {
Processing,
Error(ServiceError<S::Error, U>),
FramedError(ServiceError<S::Error, U>),
FlushAndStop,
Stopping,
}
impl<S: Service, U: Encoder + Decoder> FramedState<S, U> {
fn take_error(&mut self) -> ServiceError<S::Error, U> {
match std::mem::replace(self, FramedState::Processing) {
FramedState::Error(err) => err,
_ => panic!(),
}
}
fn take_framed_error(&mut self) -> ServiceError<S::Error, U> {
match std::mem::replace(self, FramedState::Processing) {
FramedState::FramedError(err) => err,
_ => panic!(),
}
}
}
impl<S, T, U, Out> Dispatcher<S, T, U, Out>
where
S: Service<Request = Request<U>, Response = Option<Response<U>>>,
S::Error: 'static,
S::Future: 'static,
T: AsyncRead + AsyncWrite,
U: Decoder + Encoder,
<U as Encoder>::Item: 'static,
<U as Encoder>::Error: std::fmt::Debug,
Out: Stream<Item = <U as Encoder>::Item> + Unpin,
{
fn poll_read(&mut self, cx: &mut Context<'_>) -> bool {
loop {
match self.service.poll_ready(cx) {
Poll::Ready(Ok(_)) => {
let item = match self.framed.next_item(cx) {
Poll::Ready(Some(Ok(el))) => el,
Poll::Ready(Some(Err(err))) => {
self.state = FramedState::FramedError(ServiceError::Decoder(err));
return true;
}
Poll::Pending => return false,
Poll::Ready(None) => {
log::trace!("Client disconnected");
self.state = FramedState::Stopping;
return true;
}
};
let tx = self.rx.sender();
let fut = self.service.call(item);
actix_rt::spawn(async move {
let item = fut.await;
let item = match item {
Ok(Some(item)) => Ok(item),
Ok(None) => return,
Err(err) => Err(err),
};
let _ = tx.send(item);
});
}
Poll::Pending => return false,
Poll::Ready(Err(err)) => {
self.state = FramedState::Error(ServiceError::Service(err));
return true;
}
}
}
}
/// write to framed object
fn poll_write(&mut self, cx: &mut Context<'_>) -> bool {
loop {
while !self.framed.is_write_buf_full() {
match Pin::new(&mut self.rx).poll_next(cx) {
Poll::Ready(Some(Ok(msg))) => {
if let Err(err) = self.framed.write(msg) {
self.state = FramedState::FramedError(ServiceError::Encoder(err));
return true;
}
continue;
}
Poll::Ready(Some(Err(err))) => {
self.state = FramedState::Error(ServiceError::Service(err));
return true;
}
Poll::Ready(None) | Poll::Pending => (),
}
if self.sink.is_some() {
match Pin::new(self.sink.as_mut().unwrap()).poll_next(cx) {
Poll::Ready(Some(msg)) => {
if let Err(err) = self.framed.write(msg) {
self.state =
FramedState::FramedError(ServiceError::Encoder(err));
return true;
}
continue;
}
Poll::Ready(None) => {
let _ = self.sink.take();
self.state = FramedState::FlushAndStop;
return true;
}
Poll::Pending => (),
}
}
break;
}
if !self.framed.is_write_buf_empty() {
match self.framed.flush(cx) {
Poll::Pending => break,
Poll::Ready(Ok(_)) => (),
Poll::Ready(Err(err)) => {
debug!("Error sending data: {:?}", err);
self.state = FramedState::FramedError(ServiceError::Encoder(err));
return true;
}
}
} else {
break;
}
}
false
}
pub(crate) fn poll(
&mut self,
cx: &mut Context<'_>,
) -> Poll<Result<(), ServiceError<S::Error, U>>> {
match self.state {
FramedState::Processing => loop {
let read = self.poll_read(cx);
let write = self.poll_write(cx);
if read || write {
continue;
} else {
return Poll::Pending;
}
},
FramedState::Error(_) => {
// flush write buffer
if !self.framed.is_write_buf_empty() {
if let Poll::Pending = self.framed.flush(cx) {
return Poll::Pending;
}
}
Poll::Ready(Err(self.state.take_error()))
}
FramedState::FlushAndStop => {
// drain service responses
match Pin::new(&mut self.rx).poll_next(cx) {
Poll::Ready(Some(Ok(msg))) => {
if let Err(_) = self.framed.write(msg) {
return Poll::Ready(Ok(()));
}
}
Poll::Ready(Some(Err(_))) => return Poll::Ready(Ok(())),
Poll::Ready(None) | Poll::Pending => (),
}
// flush io
if !self.framed.is_write_buf_empty() {
match self.framed.flush(cx) {
Poll::Ready(Err(err)) => {
debug!("Error sending data: {:?}", err);
}
Poll::Pending => {
return Poll::Pending;
}
Poll::Ready(_) => (),
}
};
Poll::Ready(Ok(()))
}
FramedState::FramedError(_) => Poll::Ready(Err(self.state.take_framed_error())),
FramedState::Stopping => Poll::Ready(Ok(())),
}
}
}

View File

@ -0,0 +1,49 @@
use std::fmt;
use actix_codec::{Decoder, Encoder};
/// Framed service errors
pub enum ServiceError<E, U: Encoder + Decoder> {
/// Inner service error
Service(E),
/// Encoder parse error
Encoder(<U as Encoder>::Error),
/// Decoder parse error
Decoder(<U as Decoder>::Error),
}
impl<E, U: Encoder + Decoder> From<E> for ServiceError<E, U> {
fn from(err: E) -> Self {
ServiceError::Service(err)
}
}
impl<E, U: Encoder + Decoder> fmt::Debug for ServiceError<E, U>
where
E: fmt::Debug,
<U as Encoder>::Error: fmt::Debug,
<U as Decoder>::Error: fmt::Debug,
{
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
ServiceError::Service(ref e) => write!(fmt, "ServiceError::Service({:?})", e),
ServiceError::Encoder(ref e) => write!(fmt, "ServiceError::Encoder({:?})", e),
ServiceError::Decoder(ref e) => write!(fmt, "ServiceError::Encoder({:?})", e),
}
}
}
impl<E, U: Encoder + Decoder> fmt::Display for ServiceError<E, U>
where
E: fmt::Display,
<U as Encoder>::Error: fmt::Debug,
<U as Decoder>::Error: fmt::Debug,
{
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
ServiceError::Service(ref e) => write!(fmt, "{}", e),
ServiceError::Encoder(ref e) => write!(fmt, "{:?}", e),
ServiceError::Decoder(ref e) => write!(fmt, "{:?}", e),
}
}
}

11
actix-ioframe/src/lib.rs Normal file
View File

@ -0,0 +1,11 @@
// #![deny(rust_2018_idioms, warnings)]
#![allow(clippy::type_complexity, clippy::too_many_arguments)]
mod connect;
mod dispatcher;
mod error;
mod service;
pub use self::connect::{Connect, ConnectResult};
pub use self::error::ServiceError;
pub use self::service::{Builder, FactoryBuilder};

View File

@ -0,0 +1,416 @@
use std::future::Future;
use std::marker::PhantomData;
use std::pin::Pin;
use std::rc::Rc;
use std::task::{Context, Poll};
use actix_codec::{AsyncRead, AsyncWrite, Decoder, Encoder, Framed};
use actix_service::{IntoService, IntoServiceFactory, Service, ServiceFactory};
use either::Either;
use futures::{ready, Stream};
use pin_project::project;
use crate::connect::{Connect, ConnectResult};
use crate::dispatcher::Dispatcher;
use crate::error::ServiceError;
type RequestItem<U> = <U as Decoder>::Item;
type ResponseItem<U> = Option<<U as Encoder>::Item>;
/// Service builder - structure that follows the builder pattern
/// for building instances for framed services.
pub struct Builder<St, C, Io, Codec, Out> {
connect: C,
_t: PhantomData<(St, Io, Codec, Out)>,
}
impl<St, C, Io, Codec, Out> Builder<St, C, Io, Codec, Out>
where
C: Service<Request = Connect<Io, Codec>, Response = ConnectResult<Io, St, Codec, Out>>,
Io: AsyncRead + AsyncWrite,
Codec: Decoder + Encoder,
<Codec as Encoder>::Item: 'static,
<Codec as Encoder>::Error: std::fmt::Debug,
Out: Stream<Item = <Codec as Encoder>::Item> + Unpin,
{
/// Construct framed handler service with specified connect service
pub fn new<F>(connect: F) -> Builder<St, C, Io, Codec, Out>
where
F: IntoService<C>,
Io: AsyncRead + AsyncWrite,
C: Service<Request = Connect<Io, Codec>, Response = ConnectResult<Io, St, Codec, Out>>,
Codec: Decoder + Encoder,
Out: Stream<Item = <Codec as Encoder>::Item>,
{
Builder {
connect: connect.into_service(),
_t: PhantomData,
}
}
/// Provide stream items handler service and construct service factory.
pub fn build<F, T>(self, service: F) -> FramedServiceImpl<St, C, T, Io, Codec, Out>
where
F: IntoServiceFactory<T>,
T: ServiceFactory<
Config = St,
Request = RequestItem<Codec>,
Response = ResponseItem<Codec>,
Error = C::Error,
InitError = C::Error,
>,
{
FramedServiceImpl {
connect: self.connect,
handler: Rc::new(service.into_factory()),
_t: PhantomData,
}
}
}
/// Service builder - structure that follows the builder pattern
/// for building instances for framed services.
pub struct FactoryBuilder<St, C, Io, Codec, Out> {
connect: C,
_t: PhantomData<(St, Io, Codec, Out)>,
}
impl<St, C, Io, Codec, Out> FactoryBuilder<St, C, Io, Codec, Out>
where
Io: AsyncRead + AsyncWrite,
C: ServiceFactory<
Config = (),
Request = Connect<Io, Codec>,
Response = ConnectResult<Io, St, Codec, Out>,
>,
Codec: Decoder + Encoder,
<Codec as Encoder>::Error: std::fmt::Debug,
Out: Stream<Item = <Codec as Encoder>::Item> + Unpin,
{
/// Construct framed handler new service with specified connect service
pub fn new<F>(connect: F) -> FactoryBuilder<St, C, Io, Codec, Out>
where
F: IntoServiceFactory<C>,
Io: AsyncRead + AsyncWrite,
C: ServiceFactory<
Config = (),
Request = Connect<Io, Codec>,
Response = ConnectResult<Io, St, Codec, Out>,
>,
Codec: Decoder + Encoder,
Out: Stream<Item = <Codec as Encoder>::Item> + Unpin,
{
FactoryBuilder {
connect: connect.into_factory(),
_t: PhantomData,
}
}
pub fn build<F, T, Cfg>(self, service: F) -> FramedService<St, C, T, Io, Codec, Out, Cfg>
where
F: IntoServiceFactory<T>,
T: ServiceFactory<
Config = St,
Request = RequestItem<Codec>,
Response = ResponseItem<Codec>,
Error = C::Error,
InitError = C::Error,
>,
{
FramedService {
connect: self.connect,
handler: Rc::new(service.into_factory()),
_t: PhantomData,
}
}
}
pub struct FramedService<St, C, T, Io, Codec, Out, Cfg> {
connect: C,
handler: Rc<T>,
_t: PhantomData<(St, Io, Codec, Out, Cfg)>,
}
impl<St, C, T, Io, Codec, Out, Cfg> ServiceFactory
for FramedService<St, C, T, Io, Codec, Out, Cfg>
where
Io: AsyncRead + AsyncWrite,
C: ServiceFactory<
Config = (),
Request = Connect<Io, Codec>,
Response = ConnectResult<Io, St, Codec, Out>,
>,
T: ServiceFactory<
Config = St,
Request = RequestItem<Codec>,
Response = ResponseItem<Codec>,
Error = C::Error,
InitError = C::Error,
>,
<T::Service as Service>::Error: 'static,
<T::Service as Service>::Future: 'static,
Codec: Decoder + Encoder,
<Codec as Encoder>::Item: 'static,
<Codec as Encoder>::Error: std::fmt::Debug,
Out: Stream<Item = <Codec as Encoder>::Item> + Unpin,
{
type Config = Cfg;
type Request = Io;
type Response = ();
type Error = ServiceError<C::Error, Codec>;
type InitError = C::InitError;
type Service = FramedServiceImpl<St, C::Service, T, Io, Codec, Out>;
type Future = FramedServiceResponse<St, C, T, Io, Codec, Out>;
fn new_service(&self, _: Cfg) -> Self::Future {
// create connect service and then create service impl
FramedServiceResponse {
fut: self.connect.new_service(()),
handler: self.handler.clone(),
}
}
}
#[pin_project::pin_project]
pub struct FramedServiceResponse<St, C, T, Io, Codec, Out>
where
Io: AsyncRead + AsyncWrite,
C: ServiceFactory<
Config = (),
Request = Connect<Io, Codec>,
Response = ConnectResult<Io, St, Codec, Out>,
>,
T: ServiceFactory<
Config = St,
Request = RequestItem<Codec>,
Response = ResponseItem<Codec>,
Error = C::Error,
InitError = C::Error,
>,
<T::Service as Service>::Error: 'static,
<T::Service as Service>::Future: 'static,
Codec: Decoder + Encoder,
<Codec as Encoder>::Item: 'static,
<Codec as Encoder>::Error: std::fmt::Debug,
Out: Stream<Item = <Codec as Encoder>::Item> + Unpin,
{
#[pin]
fut: C::Future,
handler: Rc<T>,
}
impl<St, C, T, Io, Codec, Out> Future for FramedServiceResponse<St, C, T, Io, Codec, Out>
where
Io: AsyncRead + AsyncWrite,
C: ServiceFactory<
Config = (),
Request = Connect<Io, Codec>,
Response = ConnectResult<Io, St, Codec, Out>,
>,
T: ServiceFactory<
Config = St,
Request = RequestItem<Codec>,
Response = ResponseItem<Codec>,
Error = C::Error,
InitError = C::Error,
>,
<T::Service as Service>::Error: 'static,
<T::Service as Service>::Future: 'static,
Codec: Decoder + Encoder,
<Codec as Encoder>::Item: 'static,
<Codec as Encoder>::Error: std::fmt::Debug,
Out: Stream<Item = <Codec as Encoder>::Item> + Unpin,
{
type Output = Result<FramedServiceImpl<St, C::Service, T, Io, Codec, Out>, C::InitError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
let connect = ready!(this.fut.poll(cx))?;
Poll::Ready(Ok(FramedServiceImpl {
connect,
handler: this.handler.clone(),
_t: PhantomData,
}))
}
}
pub struct FramedServiceImpl<St, C, T, Io, Codec, Out> {
connect: C,
handler: Rc<T>,
_t: PhantomData<(St, Io, Codec, Out)>,
}
impl<St, C, T, Io, Codec, Out> Service for FramedServiceImpl<St, C, T, Io, Codec, Out>
where
Io: AsyncRead + AsyncWrite,
C: Service<Request = Connect<Io, Codec>, Response = ConnectResult<Io, St, Codec, Out>>,
T: ServiceFactory<
Config = St,
Request = RequestItem<Codec>,
Response = ResponseItem<Codec>,
Error = C::Error,
InitError = C::Error,
>,
<T::Service as Service>::Error: 'static,
<T::Service as Service>::Future: 'static,
Codec: Decoder + Encoder,
<Codec as Encoder>::Item: 'static,
<Codec as Encoder>::Error: std::fmt::Debug,
Out: Stream<Item = <Codec as Encoder>::Item> + Unpin,
{
type Request = Io;
type Response = ();
type Error = ServiceError<C::Error, Codec>;
type Future = FramedServiceImplResponse<St, Io, Codec, Out, C, T>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.connect.poll_ready(cx).map_err(|e| e.into())
}
fn call(&mut self, req: Io) -> Self::Future {
FramedServiceImplResponse {
inner: FramedServiceImplResponseInner::Connect(
self.connect.call(Connect::new(req)),
self.handler.clone(),
),
}
}
}
#[pin_project::pin_project]
pub struct FramedServiceImplResponse<St, Io, Codec, Out, C, T>
where
C: Service<Request = Connect<Io, Codec>, Response = ConnectResult<Io, St, Codec, Out>>,
T: ServiceFactory<
Config = St,
Request = RequestItem<Codec>,
Response = ResponseItem<Codec>,
Error = C::Error,
InitError = C::Error,
>,
<T::Service as Service>::Error: 'static,
<T::Service as Service>::Future: 'static,
Io: AsyncRead + AsyncWrite,
Codec: Encoder + Decoder,
<Codec as Encoder>::Item: 'static,
<Codec as Encoder>::Error: std::fmt::Debug,
Out: Stream<Item = <Codec as Encoder>::Item> + Unpin,
{
#[pin]
inner: FramedServiceImplResponseInner<St, Io, Codec, Out, C, T>,
}
impl<St, Io, Codec, Out, C, T> Future for FramedServiceImplResponse<St, Io, Codec, Out, C, T>
where
C: Service<Request = Connect<Io, Codec>, Response = ConnectResult<Io, St, Codec, Out>>,
T: ServiceFactory<
Config = St,
Request = RequestItem<Codec>,
Response = ResponseItem<Codec>,
Error = C::Error,
InitError = C::Error,
>,
<T::Service as Service>::Error: 'static,
<T::Service as Service>::Future: 'static,
Io: AsyncRead + AsyncWrite,
Codec: Encoder + Decoder,
<Codec as Encoder>::Item: 'static,
<Codec as Encoder>::Error: std::fmt::Debug,
Out: Stream<Item = <Codec as Encoder>::Item> + Unpin,
{
type Output = Result<(), ServiceError<C::Error, Codec>>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut this = self.as_mut().project();
loop {
match this.inner.poll(cx) {
Either::Left(new) => {
this = self.as_mut().project();
this.inner.set(new)
}
Either::Right(poll) => return poll,
};
}
}
}
#[pin_project::pin_project]
enum FramedServiceImplResponseInner<St, Io, Codec, Out, C, T>
where
C: Service<Request = Connect<Io, Codec>, Response = ConnectResult<Io, St, Codec, Out>>,
T: ServiceFactory<
Config = St,
Request = RequestItem<Codec>,
Response = ResponseItem<Codec>,
Error = C::Error,
InitError = C::Error,
>,
<T::Service as Service>::Error: 'static,
<T::Service as Service>::Future: 'static,
Io: AsyncRead + AsyncWrite,
Codec: Encoder + Decoder,
<Codec as Encoder>::Item: 'static,
<Codec as Encoder>::Error: std::fmt::Debug,
Out: Stream<Item = <Codec as Encoder>::Item> + Unpin,
{
Connect(#[pin] C::Future, Rc<T>),
Handler(#[pin] T::Future, Option<Framed<Io, Codec>>, Option<Out>),
Dispatcher(Dispatcher<T::Service, Io, Codec, Out>),
}
impl<St, Io, Codec, Out, C, T> FramedServiceImplResponseInner<St, Io, Codec, Out, C, T>
where
C: Service<Request = Connect<Io, Codec>, Response = ConnectResult<Io, St, Codec, Out>>,
T: ServiceFactory<
Config = St,
Request = RequestItem<Codec>,
Response = ResponseItem<Codec>,
Error = C::Error,
InitError = C::Error,
>,
<T::Service as Service>::Error: 'static,
<T::Service as Service>::Future: 'static,
Io: AsyncRead + AsyncWrite,
Codec: Encoder + Decoder,
<Codec as Encoder>::Item: 'static,
<Codec as Encoder>::Error: std::fmt::Debug,
Out: Stream<Item = <Codec as Encoder>::Item> + Unpin,
{
#[project]
fn poll(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Either<
FramedServiceImplResponseInner<St, Io, Codec, Out, C, T>,
Poll<Result<(), ServiceError<C::Error, Codec>>>,
> {
#[project]
match self.project() {
FramedServiceImplResponseInner::Connect(fut, handler) => match fut.poll(cx) {
Poll::Ready(Ok(res)) => Either::Left(FramedServiceImplResponseInner::Handler(
handler.new_service(res.state),
Some(res.framed),
res.out,
)),
Poll::Pending => Either::Right(Poll::Pending),
Poll::Ready(Err(e)) => Either::Right(Poll::Ready(Err(e.into()))),
},
FramedServiceImplResponseInner::Handler(fut, framed, out) => {
match fut.poll(cx) {
Poll::Ready(Ok(handler)) => {
Either::Left(FramedServiceImplResponseInner::Dispatcher(
Dispatcher::new(framed.take().unwrap(), handler, out.take()),
))
}
Poll::Pending => Either::Right(Poll::Pending),
Poll::Ready(Err(e)) => Either::Right(Poll::Ready(Err(e.into()))),
}
}
FramedServiceImplResponseInner::Dispatcher(ref mut fut) => {
Either::Right(fut.poll(cx))
}
}
}
}

View File

@ -0,0 +1,55 @@
use std::cell::Cell;
use std::rc::Rc;
use actix_codec::BytesCodec;
use actix_service::{fn_factory_with_config, fn_service, IntoService, Service};
use actix_testing::TestServer;
use actix_utils::mpsc;
use bytes::{Bytes, BytesMut};
use futures::future::ok;
use actix_ioframe::{Builder, Connect, FactoryBuilder};
#[derive(Clone)]
struct State(Option<mpsc::Sender<Bytes>>);
#[actix_rt::test]
async fn test_basic() {
let client_item = Rc::new(Cell::new(false));
let srv = TestServer::with(move || {
FactoryBuilder::new(fn_service(|conn: Connect<_, _>| {
ok(conn.codec(BytesCodec).state(State(None)))
}))
// echo
.build(fn_service(|t: BytesMut| ok(Some(t.freeze()))))
});
let item = client_item.clone();
let mut client = Builder::new(fn_service(move |conn: Connect<_, _>| {
async move {
let (tx, rx) = mpsc::channel();
let _ = tx.send(Bytes::from_static(b"Hello"));
Ok(conn.codec(BytesCodec).out(rx).state(State(Some(tx))))
}
}))
.build(fn_factory_with_config(move |mut cfg: State| {
let item = item.clone();
ok((move |t: BytesMut| {
assert_eq!(t.freeze(), Bytes::from_static(b"Hello"));
item.set(true);
// drop Sender, which will close connection
cfg.0.take();
ok::<_, ()>(None)
})
.into_service())
}));
let conn = actix_connect::default_connector()
.call(actix_connect::Connect::with(String::new(), srv.addr()))
.await
.unwrap();
client.call(conn.into_parts().0).await.unwrap();
assert!(client_item.get());
}

View File

@ -1 +0,0 @@
/wip

View File

@ -1,53 +0,0 @@
# Changes
## Unreleased
- Minimum supported Rust version (MSRV) is now 1.71.
## 0.2.4
- Update `syn` dependency to `2`.
- Minimum supported Rust version (MSRV) is now 1.65.
## 0.2.3
- Fix test macro in presence of other imports named "test". [#399]
[#399]: https://github.com/actix/actix-net/pull/399
## 0.2.2
- Improve error recovery potential when macro input is invalid. [#391]
- Allow custom `System`s on test macro. [#391]
[#391]: https://github.com/actix/actix-net/pull/391
## 0.2.1
- Add optional argument `system` to `main` macro which can be used to specify the path to `actix_rt::System` (useful for re-exports). [#363]
[#363]: https://github.com/actix/actix-net/pull/363
## 0.2.0
- Update to latest `actix_rt::System::new` signature. [#261]
[#261]: https://github.com/actix/actix-net/pull/261
## 0.2.0-beta.1
- Remove `actix-reexport` feature. [#218]
[#218]: https://github.com/actix/actix-net/pull/218
## 0.1.3
- Add `actix-reexport` feature. [#218]
[#218]: https://github.com/actix/actix-net/pull/218
## 0.1.2
- Forward actix_rt::test arguments to test function [#127]
[#127]: https://github.com/actix/actix-net/pull/127

View File

@ -1,39 +1,21 @@
[package]
name = "actix-macros"
version = "0.2.4"
authors = [
"Nikolay Kim <fafhrd91@gmail.com>",
"Ibraheem Ahmed <ibrah1440@gmail.com>",
"Rob Ede <robjtede@icloud.com>",
]
description = "Macros for Actix system and runtime"
version = "0.1.1"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix runtime macros"
repository = "https://github.com/actix/actix-net"
documentation = "https://docs.rs/actix-macros/"
categories = ["network-programming", "asynchronous"]
license.workspace = true
edition.workspace = true
rust-version.workspace = true
[package.metadata.cargo-machete]
ignored = [
"proc_macro2", # specified for minimal versions compat
]
license = "MIT/Apache-2.0"
edition = "2018"
workspace = ".."
[lib]
proc-macro = true
[dependencies]
quote = "1"
syn = { version = "2", features = ["full"] }
# minimal versions compat
[target.'cfg(any())'.dependencies]
proc-macro2 = "1.0.60"
quote = "^1"
syn = { version = "^1", features = ["full"] }
[dev-dependencies]
actix-rt = "2"
futures-util = { version = "0.3.17", default-features = false }
rustversion-msrv = "0.100"
trybuild = "1"
[lints]
workspace = true
actix-rt = { version = "1.0.0" }

View File

@ -1,97 +1,33 @@
//! Macros for Actix system and runtime.
//!
//! The [`actix-rt`](https://docs.rs/actix-rt) crate must be available for macro output to compile.
//!
//! # Entry-point
//! See docs for the [`#[main]`](macro@main) macro.
//!
//! # Tests
//! See docs for the [`#[test]`](macro@test) macro.
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
//! Macros for use with Tokio
extern crate proc_macro;
use proc_macro::TokenStream;
use quote::quote;
use syn::parse::Parser as _;
type AttributeArgs = syn::punctuated::Punctuated<syn::Meta, syn::Token![,]>;
/// Marks async entry-point function to be executed by Actix system.
/// Marks async function to be executed by actix system.
///
/// # Examples
/// ```
/// ## Usage
///
/// ```rust
/// #[actix_rt::main]
/// async fn main() {
/// println!("Hello world");
/// }
/// ```
#[proc_macro_attribute]
pub fn main(args: TokenStream, item: TokenStream) -> TokenStream {
let mut input = match syn::parse::<syn::ItemFn>(item.clone()) {
Ok(input) => input,
// on parse err, make IDEs happy; see fn docs
Err(err) => return input_and_compile_error(item, err),
};
let parser = AttributeArgs::parse_terminated;
let args = match parser.parse(args.clone()) {
Ok(args) => args,
Err(err) => return input_and_compile_error(args, err),
};
#[cfg(not(test))] // Work around for rust-lang/rust#62127
pub fn main(_: TokenStream, item: TokenStream) -> TokenStream {
let mut input = syn::parse_macro_input!(item as syn::ItemFn);
let attrs = &input.attrs;
let vis = &input.vis;
let sig = &mut input.sig;
let body = &input.block;
let name = &sig.ident;
if sig.asyncness.is_none() {
return syn::Error::new_spanned(
sig.fn_token,
"the async keyword is missing from the function declaration",
)
.to_compile_error()
.into();
}
let mut system = syn::parse_str::<syn::Path>("::actix_rt::System").unwrap();
for arg in &args {
match arg {
syn::Meta::NameValue(syn::MetaNameValue {
path,
value:
syn::Expr::Lit(syn::ExprLit {
lit: syn::Lit::Str(lit),
..
}),
..
}) => match path
.get_ident()
.map(|i| i.to_string().to_lowercase())
.as_deref()
{
Some("system") => match lit.parse() {
Ok(path) => system = path,
Err(_) => {
return syn::Error::new_spanned(lit, "Expected path")
.to_compile_error()
.into();
}
},
_ => {
return syn::Error::new_spanned(arg, "Unknown attribute specified")
.to_compile_error()
.into();
}
},
_ => {
return syn::Error::new_spanned(arg, "Unknown attribute specified")
.to_compile_error()
.into();
}
}
return syn::Error::new_spanned(sig.fn_token, "only async fn is supported")
.to_compile_error()
.into();
}
sig.asyncness = None;
@ -99,121 +35,66 @@ pub fn main(args: TokenStream, item: TokenStream) -> TokenStream {
(quote! {
#(#attrs)*
#vis #sig {
<#system>::new().block_on(async move { #body })
actix_rt::System::new(stringify!(#name))
.block_on(async move { #body })
}
})
.into()
}
/// Marks async test function to be executed in an Actix system.
/// Marks async test function to be executed by actix runtime.
///
/// # Examples
/// ```
/// ## Usage
///
/// ```no_run
/// #[actix_rt::test]
/// async fn my_test() {
/// assert!(true);
/// }
/// ```
#[proc_macro_attribute]
pub fn test(args: TokenStream, item: TokenStream) -> TokenStream {
let mut input = match syn::parse::<syn::ItemFn>(item.clone()) {
Ok(input) => input,
// on parse err, make IDEs happy; see fn docs
Err(err) => return input_and_compile_error(item, err),
};
pub fn test(_: TokenStream, item: TokenStream) -> TokenStream {
let input = syn::parse_macro_input!(item as syn::ItemFn);
let parser = AttributeArgs::parse_terminated;
let args = match parser.parse(args.clone()) {
Ok(args) => args,
Err(err) => return input_and_compile_error(args, err),
};
let attrs = &input.attrs;
let vis = &input.vis;
let sig = &mut input.sig;
let ret = &input.sig.output;
let name = &input.sig.ident;
let body = &input.block;
let attrs = &input.attrs;
let mut has_test_attr = false;
for attr in attrs {
if attr.path().is_ident("test") {
if attr.path.is_ident("test") {
has_test_attr = true;
}
}
if sig.asyncness.is_none() {
if input.sig.asyncness.is_none() {
return syn::Error::new_spanned(
input.sig.fn_token,
"the async keyword is missing from the function declaration",
format!("only async fn is supported, {}", input.sig.ident),
)
.to_compile_error()
.into();
}
sig.asyncness = None;
let missing_test_attr = if has_test_attr {
quote! {}
} else {
quote! { #[::core::prelude::v1::test] }
};
let mut system = syn::parse_str::<syn::Path>("::actix_rt::System").unwrap();
for arg in &args {
match arg {
syn::Meta::NameValue(syn::MetaNameValue {
path,
value:
syn::Expr::Lit(syn::ExprLit {
lit: syn::Lit::Str(lit),
..
}),
..
}) => match path
.get_ident()
.map(|i| i.to_string().to_lowercase())
.as_deref()
{
Some("system") => match lit.parse() {
Ok(path) => system = path,
Err(_) => {
return syn::Error::new_spanned(lit, "Expected path")
.to_compile_error()
.into();
}
},
_ => {
return syn::Error::new_spanned(arg, "Unknown attribute specified")
.to_compile_error()
.into();
}
},
_ => {
return syn::Error::new_spanned(arg, "Unknown attribute specified")
.to_compile_error()
.into();
let result = if has_test_attr {
quote! {
#(#attrs)*
fn #name() #ret {
actix_rt::System::new("test")
.block_on(async { #body })
}
}
}
(quote! {
#missing_test_attr
#(#attrs)*
#vis #sig {
<#system>::new().block_on(async { #body })
} else {
quote! {
#[test]
#(#attrs)*
fn #name() #ret {
actix_rt::System::new("test")
.block_on(async { #body })
}
}
})
.into()
}
};
/// Converts the error to a token stream and appends it to the original input.
///
/// Returning the original input in addition to the error is good for IDEs which can gracefully
/// recover and show more precise errors within the macro body.
///
/// See <https://github.com/rust-analyzer/rust-analyzer/issues/10468> for more info.
fn input_and_compile_error(mut item: TokenStream, err: syn::Error) -> TokenStream {
let compile_err = TokenStream::from(err.to_compile_error());
item.extend(compile_err);
item
result.into()
}

View File

@ -1,21 +0,0 @@
#![allow(missing_docs)]
#[rustversion_msrv::msrv]
#[test]
fn compile_macros() {
let t = trybuild::TestCases::new();
t.pass("tests/trybuild/main-01-basic.rs");
t.compile_fail("tests/trybuild/main-02-only-async.rs");
t.pass("tests/trybuild/main-03-fn-params.rs");
t.pass("tests/trybuild/main-04-system-path.rs");
t.compile_fail("tests/trybuild/main-05-system-expect-path.rs");
t.compile_fail("tests/trybuild/main-06-unknown-attr.rs");
t.pass("tests/trybuild/test-01-basic.rs");
t.pass("tests/trybuild/test-02-keep-attrs.rs");
t.compile_fail("tests/trybuild/test-03-only-async.rs");
t.pass("tests/trybuild/test-04-system-path.rs");
t.compile_fail("tests/trybuild/test-05-system-expect-path.rs");
t.compile_fail("tests/trybuild/test-06-unknown-attr.rs");
}

View File

@ -1,4 +0,0 @@
#[actix_rt::main]
async fn main() {
println!("Hello world");
}

View File

@ -1,4 +0,0 @@
#[actix_rt::main]
fn main() {
futures_util::future::ready(()).await
}

View File

@ -1,11 +0,0 @@
error: the async keyword is missing from the function declaration
--> tests/trybuild/main-02-only-async.rs:2:1
|
2 | fn main() {
| ^^
error[E0601]: `main` function not found in crate `$CRATE`
--> tests/trybuild/main-02-only-async.rs:4:2
|
4 | }
| ^ consider adding a `main` function to `$DIR/tests/trybuild/main-02-only-async.rs`

View File

@ -1,6 +0,0 @@
#[actix_rt::main]
async fn main2(_param: bool) {
futures_util::future::ready(()).await
}
fn main() {}

View File

@ -1,8 +0,0 @@
mod system {
pub use actix_rt::System as MySystem;
}
#[actix_rt::main(system = "system::MySystem")]
async fn main() {
futures_util::future::ready(()).await
}

View File

@ -1,4 +0,0 @@
#[actix_rt::main(system = "!@#*&")]
async fn main2() {}
fn main() {}

View File

@ -1,5 +0,0 @@
error: Expected path
--> $DIR/main-05-system-expect-path.rs:1:27
|
1 | #[actix_rt::main(system = "!@#*&")]
| ^^^^^^^

View File

@ -1,7 +0,0 @@
#[actix_rt::main(foo = "bar")]
async fn async_main() {}
#[actix_rt::main(bar::baz)]
async fn async_main2() {}
fn main() {}

View File

@ -1,11 +0,0 @@
error: Unknown attribute specified
--> $DIR/main-06-unknown-attr.rs:1:18
|
1 | #[actix_rt::main(foo = "bar")]
| ^^^^^^^^^^^
error: Unknown attribute specified
--> $DIR/main-06-unknown-attr.rs:4:18
|
4 | #[actix_rt::main(bar::baz)]
| ^^^^^^^^

View File

@ -1,6 +0,0 @@
#[actix_rt::test]
async fn my_test() {
assert!(true);
}
fn main() {}

View File

@ -1,7 +0,0 @@
#[actix_rt::test]
#[should_panic]
async fn my_test() {
todo!()
}
fn main() {}

View File

@ -1,6 +0,0 @@
#[actix_rt::test]
fn my_test() {
futures_util::future::ready(()).await
}
fn main() {}

View File

@ -1,5 +0,0 @@
error: the async keyword is missing from the function declaration
--> $DIR/test-03-only-async.rs:2:1
|
2 | fn my_test() {
| ^^

View File

@ -1,10 +0,0 @@
mod system {
pub use actix_rt::System as MySystem;
}
#[actix_rt::test(system = "system::MySystem")]
async fn my_test() {
futures_util::future::ready(()).await
}
fn main() {}

View File

@ -1,4 +0,0 @@
#[actix_rt::test(system = "!@#*&")]
async fn my_test() {}
fn main() {}

View File

@ -1,5 +0,0 @@
error: Expected path
--> $DIR/test-05-system-expect-path.rs:1:27
|
1 | #[actix_rt::test(system = "!@#*&")]
| ^^^^^^^

View File

@ -1,7 +0,0 @@
#[actix_rt::test(foo = "bar")]
async fn my_test_1() {}
#[actix_rt::test(bar::baz)]
async fn my_test_2() {}
fn main() {}

View File

@ -1,11 +0,0 @@
error: Unknown attribute specified
--> $DIR/test-06-unknown-attr.rs:1:18
|
1 | #[actix_rt::test(foo = "bar")]
| ^^^^^^^^^^^
error: Unknown attribute specified
--> $DIR/test-06-unknown-attr.rs:4:18
|
4 | #[actix_rt::test(bar::baz)]
| ^^^^^^^^

View File

@ -1,174 +1,95 @@
# Changes
## Unreleased
## [1.0.0] - 2019-12-11
- Minimum supported Rust version (MSRV) is now 1.71.
* Update dependencies
## 2.10.0
## [1.0.0-alpha.3] - 2019-12-07
- Relax `F`'s bound (`Fn => FnOnce`) on `{Arbiter, System}::with_tokio_rt()` functions.
- Update `tokio-uring` dependency to `0.5`.
- Minimum supported Rust version (MSRV) is now 1.70.
### Fixed
## 2.9.0
* Fix compilation on non-unix platforms
- Add `actix_rt::System::runtime()` method to retrieve the underlying `actix_rt::Runtime` runtime.
- Add `actix_rt::Runtime::tokio_runtime()` method to retrieve the underlying Tokio runtime.
- Minimum supported Rust version (MSRV) is now 1.65.
### Changed
## 2.8.0
* Migrate to tokio 0.2
- Add `#[track_caller]` attribute to `spawn` functions and methods.
- Update `tokio-uring` dependency to `0.4`.
- Minimum supported Rust version (MSRV) is now 1.59.
## 2.7.0
## [1.0.0-alpha.2] - 2019-12-02
- Update `tokio-uring` dependency to `0.3`.
- Minimum supported Rust version (MSRV) is now 1.49.
Added
## 2.6.0
* Export `main` and `test` attribute macros
- Update `tokio-uring` dependency to `0.2`.
* Export `time` module (re-export of tokio-timer)
## 2.5.1
* Export `net` module (re-export of tokio-net)
- Expose `System::with_tokio_rt` and `Arbiter::with_tokio_rt`.
## 2.5.0
## [1.0.0-alpha.1] - 2019-11-22
- Add `System::run_with_code` to allow retrieving the exit code on stop.
### Changed
## 2.4.0
* Migrate to std::future and tokio 0.2
- Add `Arbiter::try_current` for situations where thread may or may not have Arbiter context.
- Start io-uring with `System::new` when feature is enabled.
## 2.3.0
## [0.2.6] - 2019-11-14
- The `spawn` method can now resolve with non-unit outputs.
- Add experimental (semver-exempt) `io-uring` feature for enabling async file I/O on linux.
### Fixed
## 2.2.0
* Fix arbiter's thread panic message.
- **BREAKING** `ActixStream::{poll_read_ready, poll_write_ready}` methods now return `Ready` object in ok variant.
- Breakage is acceptable since `ActixStream` was not intended to be public.
### Added
## 2.1.0
* Allow to join arbiter's thread. #60
- Add `ActixStream` extension trait to include readiness methods.
- Re-export `tokio::net::TcpSocket` in `net` module
## 2.0.2
## [0.2.5] - 2019-09-02
- Add `Arbiter::handle` to get a handle of an owned Arbiter.
- Add `System::try_current` for situations where actix may or may not be running a System.
### Added
## 2.0.1
* Add arbiter specific storage
- Expose `JoinError` from Tokio.
## 2.0.0
## [0.2.4] - 2019-07-17
- Remove all Arbiter-local storage methods.
- Re-export `tokio::pin`.
### Changed
## 2.0.0-beta.3
* Avoid a copy of the Future when initializing the Box. #29
- Remove `run_in_tokio`, `attach_to_tokio` and `AsyncSystemRunner`.
- Return `JoinHandle` from `actix_rt::spawn`.
- Remove old `Arbiter::spawn`. Implementation is now inlined into `actix_rt::spawn`.
- Rename `Arbiter::{send => spawn}` and `Arbiter::{exec_fn => spawn_fn}`.
- Remove `Arbiter::exec`.
- Remove deprecated `Arbiter::local_join` and `Arbiter::is_running`.
- `Arbiter::spawn` now accepts !Unpin futures.
- `System::new` no longer takes arguments.
- Remove `System::with_current`.
- Remove `Builder`.
- Add `System::with_init` as replacement for `Builder::run`.
- Rename `System::{is_set => is_registered}`.
- Add `ArbiterHandle` for sending messages to non-current-thread arbiters.
- `System::arbiter` now returns an `&ArbiterHandle`.
- `Arbiter::current` now returns an `ArbiterHandle` instead.
- `Arbiter::join` now takes self by value.
## 2.0.0-beta.2
## [0.2.3] - 2019-06-22
- Add `task` mod with re-export of `tokio::task::{spawn_blocking, yield_now, JoinHandle}`
- Add default "macros" feature to allow faster compile times when using `default-features=false`.
### Added
## 2.0.0-beta.1
* Allow to start System using exsiting CurrentThread Handle #22
- Add `System::attach_to_tokio` method.
- Update `tokio` dependency to `1.0`.
- Rename `time` module `delay_for` to `sleep`, `delay_until` to `sleep_until`, `Delay` to `Sleep` to stay aligned with Tokio's naming.
- Remove `'static` lifetime requirement for `Runtime::block_on` and `SystemRunner::block_on`.
- These methods now accept `&self` when calling.
- Remove `'static` lifetime requirement for `System::run` and `Builder::run`.
- `Arbiter::spawn` now panics when `System` is not in scope.
- Fix work load issue by removing `PENDING` thread local.
## 1.1.1
## [0.2.2] - 2019-03-28
- Fix memory leak due to
### Changed
## 1.1.0 _(YANKED)_
* Moved `blocking` module to `actix-threadpool` crate
- Expose `System::is_set` to check if current system has ben started
- Add `Arbiter::is_running` to check if event loop is running
- Add `Arbiter::local_join` associated function to get be able to `await` for spawned futures
## 1.0.0
## [0.2.1] - 2019-03-11
- Update dependencies
### Added
## 1.0.0-alpha.3
* Added `blocking` module
- Migrate to tokio 0.2
- Fix compilation on non-unix platforms
* Arbiter::exec_fn - execute fn on the arbiter's thread
## 1.0.0-alpha.2
* Arbiter::exec - execute fn on the arbiter's thread and wait result
- Export `main` and `test` attribute macros
- Export `time` module (re-export of tokio-timer)
- Export `net` module (re-export of tokio-net)
## 1.0.0-alpha.1
## [0.2.0] - 2019-03-06
- Migrate to std::future and tokio 0.2
* `run` method returns `io::Result<()>`
## 0.2.6
* Removed `Handle`
- Allow to join arbiter's thread. #60
- Fix arbiter's thread panic message.
## 0.2.5
## [0.1.0] - 2018-12-09
- Add arbiter specific storage
## 0.2.4
- Avoid a copy of the Future when initializing the Box. #29
## 0.2.3
- Allow to start System using existing CurrentThread Handle #22
## 0.2.2
- Moved `blocking` module to `actix-threadpool` crate
## 0.2.1
- Added `blocking` module
- Added `Arbiter::exec_fn` - execute fn on the arbiter's thread
- Added `Arbiter::exec` - execute fn on the arbiter's thread and wait result
## 0.2.0
- `run` method returns `io::Result<()>`
- Removed `Handle`
## 0.1.0
- Initial release
* Initial release

View File

@ -1,36 +1,23 @@
[package]
name = "actix-rt"
version = "2.10.0"
authors = ["Nikolay Kim <fafhrd91@gmail.com>", "Rob Ede <robjtede@icloud.com>"]
description = "Tokio-based single-threaded async runtime for the Actix ecosystem"
keywords = ["async", "futures", "io", "runtime"]
version = "1.0.0"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix runtime"
keywords = ["network", "framework", "async", "futures"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-rt/"
categories = ["network-programming", "asynchronous"]
license = "MIT OR Apache-2.0"
edition.workspace = true
rust-version.workspace = true
license = "MIT/Apache-2.0"
edition = "2018"
[package.metadata.cargo_check_external_types]
allowed_external_types = ["actix_macros::*", "tokio::*"]
[features]
default = ["macros"]
macros = ["actix-macros"]
io-uring = ["tokio-uring"]
[lib]
name = "actix_rt"
path = "src/lib.rs"
[dependencies]
actix-macros = { version = "0.2.3", optional = true }
futures-core = { version = "0.3", default-features = false }
tokio = { version = "1.44.2", features = ["rt", "net", "parking_lot", "signal", "sync", "time"] }
# runtime for `io-uring` feature
[target.'cfg(target_os = "linux")'.dependencies]
tokio-uring = { version = "0.5", optional = true }
[dev-dependencies]
tokio = { version = "1.44.2", features = ["full"] }
[lints]
workspace = true
actix-macros = "0.1.0"
actix-threadpool = "0.3"
futures = "0.3.1"
copyless = "0.1.4"
tokio = { version = "0.2.6", default-features=false, features = ["rt-core", "rt-util", "io-driver", "tcp", "uds", "udp", "time", "signal", "stream"] }

View File

@ -1,14 +0,0 @@
# actix-rt
> Tokio-based single-threaded async runtime for the Actix ecosystem.
[![crates.io](https://img.shields.io/crates/v/actix-rt?label=latest)](https://crates.io/crates/actix-rt)
[![Documentation](https://docs.rs/actix-rt/badge.svg?version=2.10.0)](https://docs.rs/actix-rt/2.10.0)
[![Version](https://img.shields.io/badge/rustc-1.46+-ab6000.svg)](https://blog.rust-lang.org/2020/03/12/Rust-1.46.html)
![MIT or Apache 2.0 licensed](https://img.shields.io/crates/l/actix-rt.svg)
<br />
[![dependency status](https://deps.rs/crate/actix-rt/2.10.0/status.svg)](https://deps.rs/crate/actix-rt/2.10.0)
![Download](https://img.shields.io/crates/d/actix-rt.svg)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/WghFtEH6Hb)
See crate documentation for more: https://docs.rs/actix-rt.

View File

@ -1,60 +0,0 @@
//! An example on how to build a multi-thread tokio runtime for Actix System.
//! Then spawn async task that can make use of work stealing of tokio runtime.
use actix_rt::System;
fn main() {
System::with_tokio_rt(|| {
// build system with a multi-thread tokio runtime.
tokio::runtime::Builder::new_multi_thread()
.worker_threads(2)
.enable_all()
.build()
.unwrap()
})
.block_on(async_main());
}
// async main function that acts like #[actix_web::main] or #[tokio::main]
async fn async_main() {
let (tx, rx) = tokio::sync::oneshot::channel();
// get a handle to system arbiter and spawn async task on it
System::current().arbiter().spawn(async {
// use tokio::spawn to get inside the context of multi thread tokio runtime
let h1 = tokio::spawn(async {
println!("thread id is {:?}", std::thread::current().id());
std::thread::sleep(std::time::Duration::from_secs(2));
});
// work stealing occurs for this task spawn
let h2 = tokio::spawn(async {
println!("thread id is {:?}", std::thread::current().id());
});
h1.await.unwrap();
h2.await.unwrap();
let _ = tx.send(());
});
rx.await.unwrap();
let (tx, rx) = tokio::sync::oneshot::channel();
let now = std::time::Instant::now();
// without additional tokio::spawn, all spawned tasks run on single thread
System::current().arbiter().spawn(async {
println!("thread id is {:?}", std::thread::current().id());
std::thread::sleep(std::time::Duration::from_secs(2));
let _ = tx.send(());
});
// previous spawn task has blocked the system arbiter thread
// so this task will wait for 2 seconds until it can be run
System::current().arbiter().spawn(async move {
println!("thread id is {:?}", std::thread::current().id());
assert!(now.elapsed() > std::time::Duration::from_secs(2));
});
rx.await.unwrap();
}

View File

@ -1,27 +1,33 @@
use std::{
cell::RefCell,
fmt,
future::Future,
pin::Pin,
sync::atomic::{AtomicUsize, Ordering},
task::{Context, Poll},
thread,
};
use std::any::{Any, TypeId};
use std::cell::{Cell, RefCell};
use std::collections::HashMap;
use std::pin::Pin;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::task::{Context, Poll};
use std::{fmt, thread};
use futures_core::ready;
use tokio::sync::mpsc;
use futures::channel::mpsc::{unbounded, UnboundedReceiver, UnboundedSender};
use futures::channel::oneshot::{channel, Canceled, Sender};
use futures::{future, Future, FutureExt, Stream};
use crate::system::{System, SystemCommand};
use crate::runtime::Runtime;
use crate::system::System;
use copyless::BoxHelper;
thread_local!(
static ADDR: RefCell<Option<Arbiter>> = RefCell::new(None);
static RUNNING: Cell<bool> = Cell::new(false);
static Q: RefCell<Vec<Pin<Box<dyn Future<Output = ()>>>>> = RefCell::new(Vec::new());
static STORAGE: RefCell<HashMap<TypeId, Box<dyn Any>>> = RefCell::new(HashMap::new());
);
pub(crate) static COUNT: AtomicUsize = AtomicUsize::new(0);
thread_local!(
static HANDLE: RefCell<Option<ArbiterHandle>> = const { RefCell::new(None) };
);
pub(crate) enum ArbiterCommand {
Stop,
Execute(Pin<Box<dyn Future<Output = ()> + Send>>),
Execute(Box<dyn Future<Output = ()> + Unpin + Send>),
ExecuteFn(Box<dyn FnExec>),
}
impl fmt::Debug for ArbiterCommand {
@ -29,289 +35,377 @@ impl fmt::Debug for ArbiterCommand {
match self {
ArbiterCommand::Stop => write!(f, "ArbiterCommand::Stop"),
ArbiterCommand::Execute(_) => write!(f, "ArbiterCommand::Execute"),
ArbiterCommand::ExecuteFn(_) => write!(f, "ArbiterCommand::ExecuteFn"),
}
}
}
/// A handle for sending spawn and stop messages to an [Arbiter].
#[derive(Debug, Clone)]
pub struct ArbiterHandle {
tx: mpsc::UnboundedSender<ArbiterCommand>,
}
impl ArbiterHandle {
pub(crate) fn new(tx: mpsc::UnboundedSender<ArbiterCommand>) -> Self {
Self { tx }
}
/// Send a future to the [Arbiter]'s thread and spawn it.
///
/// If you require a result, include a response channel in the future.
///
/// Returns true if future was sent successfully and false if the [Arbiter] has died.
pub fn spawn<Fut>(&self, future: Fut) -> bool
where
Fut: Future<Output = ()> + Send + 'static,
{
self.tx
.send(ArbiterCommand::Execute(Box::pin(future)))
.is_ok()
}
/// Send a function to the [Arbiter]'s thread and execute it.
///
/// Any result from the function is discarded. If you require a result, include a response
/// channel in the function.
///
/// Returns true if function was sent successfully and false if the [Arbiter] has died.
pub fn spawn_fn<F>(&self, f: F) -> bool
where
F: FnOnce() + Send + 'static,
{
self.spawn(async { f() })
}
/// Instruct [Arbiter] to stop processing it's event loop.
///
/// Returns true if stop message was sent successfully and false if the [Arbiter] has
/// been dropped.
pub fn stop(&self) -> bool {
self.tx.send(ArbiterCommand::Stop).is_ok()
}
}
/// An Arbiter represents a thread that provides an asynchronous execution environment for futures
/// and functions.
///
/// When an arbiter is created, it spawns a new [OS thread](thread), and hosts an event loop.
#[derive(Debug)]
/// Arbiters provide an asynchronous execution environment for actors, functions
/// and futures. When an Arbiter is created, it spawns a new OS thread, and
/// hosts an event loop. Some Arbiter functions execute on the current thread.
pub struct Arbiter {
tx: mpsc::UnboundedSender<ArbiterCommand>,
thread_handle: thread::JoinHandle<()>,
sender: UnboundedSender<ArbiterCommand>,
thread_handle: Option<thread::JoinHandle<()>>,
}
impl Clone for Arbiter {
fn clone(&self) -> Self {
Self::with_sender(self.sender.clone())
}
}
impl Default for Arbiter {
fn default() -> Self {
Self::new()
}
}
impl Arbiter {
/// Spawn a new Arbiter thread and start its event loop.
///
/// # Panics
/// Panics if a [System] is not registered on the current thread.
#[cfg(not(all(target_os = "linux", feature = "io-uring")))]
#[allow(clippy::new_without_default)]
pub fn new() -> Arbiter {
Self::with_tokio_rt(|| {
crate::runtime::default_tokio_runtime().expect("Cannot create new Arbiter's Runtime.")
pub(crate) fn new_system() -> Self {
let (tx, rx) = unbounded();
let arb = Arbiter::with_sender(tx);
ADDR.with(|cell| *cell.borrow_mut() = Some(arb.clone()));
RUNNING.with(|cell| cell.set(false));
STORAGE.with(|cell| cell.borrow_mut().clear());
Arbiter::spawn(ArbiterController { stop: None, rx });
arb
}
/// Returns the current thread's arbiter's address. If no Arbiter is present, then this
/// function will panic!
pub fn current() -> Arbiter {
ADDR.with(|cell| match *cell.borrow() {
Some(ref addr) => addr.clone(),
None => panic!("Arbiter is not running"),
})
}
/// Spawn a new Arbiter using the [Tokio Runtime](tokio-runtime) returned from a closure.
///
/// [tokio-runtime]: tokio::runtime::Runtime
#[cfg(not(all(target_os = "linux", feature = "io-uring")))]
pub fn with_tokio_rt<F>(runtime_factory: F) -> Arbiter
where
F: FnOnce() -> tokio::runtime::Runtime + Send + 'static,
{
let sys = System::current();
let system_id = sys.id();
let arb_id = COUNT.fetch_add(1, Ordering::Relaxed);
let name = format!("actix-rt|system:{}|arbiter:{}", system_id, arb_id);
let (tx, rx) = mpsc::unbounded_channel();
let (ready_tx, ready_rx) = std::sync::mpsc::channel::<()>();
let thread_handle = thread::Builder::new()
.name(name.clone())
.spawn({
let tx = tx.clone();
move || {
let rt = crate::runtime::Runtime::from(runtime_factory());
let hnd = ArbiterHandle::new(tx);
System::set_current(sys);
HANDLE.with(|cell| *cell.borrow_mut() = Some(hnd.clone()));
// register arbiter
let _ = System::current()
.tx()
.send(SystemCommand::RegisterArbiter(arb_id, hnd));
ready_tx.send(()).unwrap();
// run arbiter event processing loop
rt.block_on(ArbiterRunner { rx });
// deregister arbiter
let _ = System::current()
.tx()
.send(SystemCommand::DeregisterArbiter(arb_id));
}
})
.unwrap_or_else(|err| panic!("Cannot spawn Arbiter's thread: {name:?}: {err:?}"));
ready_rx.recv().unwrap();
Arbiter { tx, thread_handle }
/// Stop arbiter from continuing it's event loop.
pub fn stop(&self) {
let _ = self.sender.unbounded_send(ArbiterCommand::Stop);
}
/// Spawn a new Arbiter thread and start its event loop with `tokio-uring` runtime.
///
/// # Panics
/// Panics if a [System] is not registered on the current thread.
#[cfg(all(target_os = "linux", feature = "io-uring"))]
#[allow(clippy::new_without_default)]
/// Spawn new thread and run event loop in spawned thread.
/// Returns address of newly created arbiter.
pub fn new() -> Arbiter {
let id = COUNT.fetch_add(1, Ordering::Relaxed);
let name = format!("actix-rt:worker:{}", id);
let sys = System::current();
let system_id = sys.id();
let arb_id = COUNT.fetch_add(1, Ordering::Relaxed);
let (arb_tx, arb_rx) = unbounded();
let arb_tx2 = arb_tx.clone();
let name = format!("actix-rt|system:{}|arbiter:{}", system_id, arb_id);
let (tx, rx) = mpsc::unbounded_channel();
let (ready_tx, ready_rx) = std::sync::mpsc::channel::<()>();
let thread_handle = thread::Builder::new()
let handle = thread::Builder::new()
.name(name.clone())
.spawn({
let tx = tx.clone();
move || {
let hnd = ArbiterHandle::new(tx);
.spawn(move || {
let mut rt = Runtime::new().expect("Can not create Runtime");
let arb = Arbiter::with_sender(arb_tx);
System::set_current(sys);
let (stop, stop_rx) = channel();
RUNNING.with(|cell| cell.set(true));
STORAGE.with(|cell| cell.borrow_mut().clear());
HANDLE.with(|cell| *cell.borrow_mut() = Some(hnd.clone()));
System::set_current(sys);
// register arbiter
let _ = System::current()
.tx()
.send(SystemCommand::RegisterArbiter(arb_id, hnd));
// start arbiter controller
rt.spawn(ArbiterController {
stop: Some(stop),
rx: arb_rx,
});
ADDR.with(|cell| *cell.borrow_mut() = Some(arb.clone()));
ready_tx.send(()).unwrap();
// register arbiter
let _ = System::current()
.sys()
.unbounded_send(SystemCommand::RegisterArbiter(id, arb));
// run arbiter event processing loop
tokio_uring::start(ArbiterRunner { rx });
// run loop
let _ = match rt.block_on(stop_rx) {
Ok(code) => code,
Err(_) => 1,
};
// deregister arbiter
let _ = System::current()
.tx()
.send(SystemCommand::DeregisterArbiter(arb_id));
}
// unregister arbiter
let _ = System::current()
.sys()
.unbounded_send(SystemCommand::UnregisterArbiter(id));
})
.unwrap_or_else(|err| panic!("Cannot spawn Arbiter's thread: {name:?}: {err:?}"));
.unwrap_or_else(|err| {
panic!("Cannot spawn an arbiter's thread {:?}: {:?}", &name, err)
});
ready_rx.recv().unwrap();
Arbiter { tx, thread_handle }
Arbiter {
sender: arb_tx2,
thread_handle: Some(handle),
}
}
/// Sets up an Arbiter runner in a new System using the environment's local set.
pub(crate) fn in_new_system() -> ArbiterHandle {
let (tx, rx) = mpsc::unbounded_channel();
let hnd = ArbiterHandle::new(tx);
HANDLE.with(|cell| *cell.borrow_mut() = Some(hnd.clone()));
crate::spawn(ArbiterRunner { rx });
hnd
pub(crate) fn run_system(rt: Option<&Runtime>) {
RUNNING.with(|cell| cell.set(true));
Q.with(|cell| {
let mut v = cell.borrow_mut();
for fut in v.drain(..) {
if let Some(rt) = rt {
rt.spawn(fut);
} else {
tokio::task::spawn_local(fut);
}
}
});
}
/// Return a handle to the this Arbiter's message sender.
pub fn handle(&self) -> ArbiterHandle {
ArbiterHandle::new(self.tx.clone())
pub(crate) fn stop_system() {
RUNNING.with(|cell| cell.set(false));
}
/// Return a handle to the current thread's Arbiter's message sender.
///
/// # Panics
/// Panics if no Arbiter is running on the current thread.
pub fn current() -> ArbiterHandle {
HANDLE.with(|cell| match *cell.borrow() {
Some(ref hnd) => hnd.clone(),
None => panic!("Arbiter is not running."),
})
}
/// Try to get current running arbiter handle.
///
/// Returns `None` if no Arbiter has been started.
///
/// Unlike [`current`](Self::current), this never panics.
pub fn try_current() -> Option<ArbiterHandle> {
HANDLE.with(|cell| cell.borrow().clone())
}
/// Stop Arbiter from continuing it's event loop.
///
/// Returns true if stop message was sent successfully and false if the Arbiter has been dropped.
pub fn stop(&self) -> bool {
self.tx.send(ArbiterCommand::Stop).is_ok()
}
/// Send a future to the Arbiter's thread and spawn it.
///
/// If you require a result, include a response channel in the future.
///
/// Returns true if future was sent successfully and false if the Arbiter has died.
#[track_caller]
pub fn spawn<Fut>(&self, future: Fut) -> bool
/// Spawn a future on the current thread. This does not create a new Arbiter
/// or Arbiter address, it is simply a helper for spawning futures on the current
/// thread.
pub fn spawn<F>(future: F)
where
Fut: Future<Output = ()> + Send + 'static,
F: Future<Output = ()> + 'static,
{
self.tx
.send(ArbiterCommand::Execute(Box::pin(future)))
.is_ok()
RUNNING.with(move |cell| {
if cell.get() {
// Spawn the future on running executor
tokio::task::spawn_local(future);
} else {
// Box the future and push it to the queue, this results in double boxing
// because the executor boxes the future again, but works for now
Q.with(move |cell| {
cell.borrow_mut()
.push(unsafe { Pin::new_unchecked(Box::alloc().init(future)) })
});
}
});
}
/// Send a function to the Arbiter's thread and execute it.
///
/// Any result from the function is discarded. If you require a result, include a response
/// channel in the function.
///
/// Returns true if function was sent successfully and false if the Arbiter has died.
#[track_caller]
pub fn spawn_fn<F>(&self, f: F) -> bool
/// Executes a future on the current thread. This does not create a new Arbiter
/// or Arbiter address, it is simply a helper for executing futures on the current
/// thread.
pub fn spawn_fn<F, R>(f: F)
where
F: FnOnce() -> R + 'static,
R: Future<Output = ()> + 'static,
{
Arbiter::spawn(future::lazy(|_| f()).flatten())
}
/// Send a future to the Arbiter's thread, and spawn it.
pub fn send<F>(&self, future: F)
where
F: Future<Output = ()> + Send + Unpin + 'static,
{
let _ = self
.sender
.unbounded_send(ArbiterCommand::Execute(Box::new(future)));
}
/// Send a function to the Arbiter's thread, and execute it. Any result from the function
/// is discarded.
pub fn exec_fn<F>(&self, f: F)
where
F: FnOnce() + Send + 'static,
{
self.spawn(async { f() })
let _ = self
.sender
.unbounded_send(ArbiterCommand::ExecuteFn(Box::new(move || {
f();
})));
}
/// Wait for Arbiter's event loop to complete.
/// Send a function to the Arbiter's thread. This function will be executed asynchronously.
/// A future is created, and when resolved will contain the result of the function sent
/// to the Arbiters thread.
pub fn exec<F, R>(&self, f: F) -> impl Future<Output = Result<R, Canceled>>
where
F: FnOnce() -> R + Send + 'static,
R: Send + 'static,
{
let (tx, rx) = channel();
let _ = self
.sender
.unbounded_send(ArbiterCommand::ExecuteFn(Box::new(move || {
if !tx.is_canceled() {
let _ = tx.send(f());
}
})));
rx
}
/// Set item to arbiter storage
pub fn set_item<T: 'static>(item: T) {
STORAGE.with(move |cell| cell.borrow_mut().insert(TypeId::of::<T>(), Box::new(item)));
}
/// Check if arbiter storage contains item
pub fn contains_item<T: 'static>() -> bool {
STORAGE.with(move |cell| cell.borrow().get(&TypeId::of::<T>()).is_some())
}
/// Get a reference to a type previously inserted on this arbiter's storage.
///
/// Joins the underlying OS thread handle. See [`JoinHandle::join`](thread::JoinHandle::join).
pub fn join(self) -> thread::Result<()> {
self.thread_handle.join()
/// Panics is item is not inserted
pub fn get_item<T: 'static, F, R>(mut f: F) -> R
where
F: FnMut(&T) -> R,
{
STORAGE.with(move |cell| {
let st = cell.borrow();
let item = st
.get(&TypeId::of::<T>())
.and_then(|boxed| (&**boxed as &(dyn Any + 'static)).downcast_ref())
.unwrap();
f(item)
})
}
/// Get a mutable reference to a type previously inserted on this arbiter's storage.
///
/// Panics is item is not inserted
pub fn get_mut_item<T: 'static, F, R>(mut f: F) -> R
where
F: FnMut(&mut T) -> R,
{
STORAGE.with(move |cell| {
let mut st = cell.borrow_mut();
let item = st
.get_mut(&TypeId::of::<T>())
.and_then(|boxed| (&mut **boxed as &mut (dyn Any + 'static)).downcast_mut())
.unwrap();
f(item)
})
}
fn with_sender(sender: UnboundedSender<ArbiterCommand>) -> Self {
Self {
sender,
thread_handle: None,
}
}
/// Wait for the event loop to stop by joining the underlying thread (if have Some).
pub fn join(&mut self) -> thread::Result<()> {
if let Some(thread_handle) = self.thread_handle.take() {
thread_handle.join()
} else {
Ok(())
}
}
}
/// A persistent future that processes [Arbiter] commands.
struct ArbiterRunner {
rx: mpsc::UnboundedReceiver<ArbiterCommand>,
struct ArbiterController {
stop: Option<Sender<i32>>,
rx: UnboundedReceiver<ArbiterCommand>,
}
impl Future for ArbiterRunner {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
// process all items currently buffered in channel
loop {
match ready!(self.rx.poll_recv(cx)) {
// channel closed; no more messages can be received
None => return Poll::Ready(()),
// process arbiter command
Some(item) => match item {
ArbiterCommand::Stop => {
return Poll::Ready(());
}
ArbiterCommand::Execute(task_fut) => {
tokio::task::spawn_local(task_fut);
}
},
impl Drop for ArbiterController {
fn drop(&mut self) {
if thread::panicking() {
if System::current().stop_on_panic() {
eprintln!("Panic in Arbiter thread, shutting down system.");
System::current().stop_with_code(1)
} else {
eprintln!("Panic in Arbiter thread.");
}
}
}
}
impl Future for ArbiterController {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
loop {
match Pin::new(&mut self.rx).poll_next(cx) {
Poll::Ready(None) => return Poll::Ready(()),
Poll::Ready(Some(item)) => match item {
ArbiterCommand::Stop => {
if let Some(stop) = self.stop.take() {
let _ = stop.send(0);
};
return Poll::Ready(());
}
ArbiterCommand::Execute(fut) => {
tokio::task::spawn_local(fut);
}
ArbiterCommand::ExecuteFn(f) => {
f.call_box();
}
},
Poll::Pending => return Poll::Pending,
}
}
}
}
#[derive(Debug)]
pub(crate) enum SystemCommand {
Exit(i32),
RegisterArbiter(usize, Arbiter),
UnregisterArbiter(usize),
}
#[derive(Debug)]
pub(crate) struct SystemArbiter {
stop: Option<Sender<i32>>,
commands: UnboundedReceiver<SystemCommand>,
arbiters: HashMap<usize, Arbiter>,
}
impl SystemArbiter {
pub(crate) fn new(stop: Sender<i32>, commands: UnboundedReceiver<SystemCommand>) -> Self {
SystemArbiter {
commands,
stop: Some(stop),
arbiters: HashMap::new(),
}
}
}
impl Future for SystemArbiter {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
loop {
match Pin::new(&mut self.commands).poll_next(cx) {
Poll::Ready(None) => return Poll::Ready(()),
Poll::Ready(Some(cmd)) => match cmd {
SystemCommand::Exit(code) => {
// stop arbiters
for arb in self.arbiters.values() {
arb.stop();
}
// stop event loop
if let Some(stop) = self.stop.take() {
let _ = stop.send(code);
}
}
SystemCommand::RegisterArbiter(name, hnd) => {
self.arbiters.insert(name, hnd);
}
SystemCommand::UnregisterArbiter(name) => {
self.arbiters.remove(&name);
}
},
Poll::Pending => return Poll::Pending,
}
}
}
}
pub trait FnExec: Send + 'static {
fn call_box(self: Box<Self>);
}
impl<F> FnExec for F
where
F: FnOnce() + Send + 'static,
{
#[allow(clippy::boxed_local)]
fn call_box(self: Box<Self>) {
(*self)()
}
}

191
actix-rt/src/builder.rs Normal file
View File

@ -0,0 +1,191 @@
use std::borrow::Cow;
use std::io;
use futures::channel::mpsc::unbounded;
use futures::channel::oneshot::{channel, Receiver};
use futures::future::{lazy, Future, FutureExt};
use tokio::task::LocalSet;
use crate::arbiter::{Arbiter, SystemArbiter};
use crate::runtime::Runtime;
use crate::system::System;
/// Builder struct for a actix runtime.
///
/// Either use `Builder::build` to create a system and start actors.
/// Alternatively, use `Builder::run` to start the tokio runtime and
/// run a function in its context.
pub struct Builder {
/// Name of the System. Defaults to "actix" if unset.
name: Cow<'static, str>,
/// Whether the Arbiter will stop the whole System on uncaught panic. Defaults to false.
stop_on_panic: bool,
}
impl Builder {
pub(crate) fn new() -> Self {
Builder {
name: Cow::Borrowed("actix"),
stop_on_panic: false,
}
}
/// Sets the name of the System.
pub fn name<T: Into<String>>(mut self, name: T) -> Self {
self.name = Cow::Owned(name.into());
self
}
/// Sets the option 'stop_on_panic' which controls whether the System is stopped when an
/// uncaught panic is thrown from a worker thread.
///
/// Defaults to false.
pub fn stop_on_panic(mut self, stop_on_panic: bool) -> Self {
self.stop_on_panic = stop_on_panic;
self
}
/// Create new System.
///
/// This method panics if it can not create tokio runtime
pub fn build(self) -> SystemRunner {
self.create_runtime(|| {})
}
/// Create new System that can run asynchronously.
///
/// This method panics if it cannot start the system arbiter
pub(crate) fn build_async(self, local: &LocalSet) -> AsyncSystemRunner {
self.create_async_runtime(local)
}
/// This function will start tokio runtime and will finish once the
/// `System::stop()` message get called.
/// Function `f` get called within tokio runtime context.
pub fn run<F>(self, f: F) -> io::Result<()>
where
F: FnOnce() + 'static,
{
self.create_runtime(f).run()
}
fn create_async_runtime(self, local: &LocalSet) -> AsyncSystemRunner {
let (stop_tx, stop) = channel();
let (sys_sender, sys_receiver) = unbounded();
let system = System::construct(sys_sender, Arbiter::new_system(), self.stop_on_panic);
// system arbiter
let arb = SystemArbiter::new(stop_tx, sys_receiver);
// start the system arbiter
let _ = local.spawn_local(arb);
AsyncSystemRunner { stop, system }
}
fn create_runtime<F>(self, f: F) -> SystemRunner
where
F: FnOnce() + 'static,
{
let (stop_tx, stop) = channel();
let (sys_sender, sys_receiver) = unbounded();
let system = System::construct(sys_sender, Arbiter::new_system(), self.stop_on_panic);
// system arbiter
let arb = SystemArbiter::new(stop_tx, sys_receiver);
let mut rt = Runtime::new().unwrap();
rt.spawn(arb);
// init system arbiter and run configuration method
rt.block_on(lazy(move |_| f()));
SystemRunner { rt, stop, system }
}
}
#[derive(Debug)]
pub(crate) struct AsyncSystemRunner {
stop: Receiver<i32>,
system: System,
}
impl AsyncSystemRunner {
/// This function will start event loop and returns a future that
/// resolves once the `System::stop()` function is called.
pub(crate) fn run_nonblocking(self) -> impl Future<Output = Result<(), io::Error>> + Send {
let AsyncSystemRunner { stop, .. } = self;
// run loop
lazy(|_| {
Arbiter::run_system(None);
async {
let res = match stop.await {
Ok(code) => {
if code != 0 {
Err(io::Error::new(
io::ErrorKind::Other,
format!("Non-zero exit code: {}", code),
))
} else {
Ok(())
}
}
Err(e) => Err(io::Error::new(io::ErrorKind::Other, e)),
};
Arbiter::stop_system();
return res;
}
})
.flatten()
}
}
/// Helper object that runs System's event loop
#[must_use = "SystemRunner must be run"]
#[derive(Debug)]
pub struct SystemRunner {
rt: Runtime,
stop: Receiver<i32>,
system: System,
}
impl SystemRunner {
/// This function will start event loop and will finish once the
/// `System::stop()` function is called.
pub fn run(self) -> io::Result<()> {
let SystemRunner { mut rt, stop, .. } = self;
// run loop
Arbiter::run_system(Some(&rt));
let result = match rt.block_on(stop) {
Ok(code) => {
if code != 0 {
Err(io::Error::new(
io::ErrorKind::Other,
format!("Non-zero exit code: {}", code),
))
} else {
Ok(())
}
}
Err(e) => Err(io::Error::new(io::ErrorKind::Other, e)),
};
Arbiter::stop_system();
result
}
/// Execute a future and wait for result.
pub fn block_on<F, O>(&mut self, fut: F) -> O
where
F: Future<Output = O> + 'static,
{
Arbiter::run_system(Some(&self.rt));
let res = self.rt.block_on(fut);
Arbiter::stop_system();
res
}
}

View File

@ -1,207 +1,66 @@
//! Tokio-based single-threaded async runtime for the Actix ecosystem.
//!
//! In most parts of the the Actix ecosystem, it has been chosen to use !Send futures. For this
//! reason, a single-threaded runtime is appropriate since it is guaranteed that futures will not
//! be moved between threads. This can result in small performance improvements over cases where
//! atomics would otherwise be needed.
//!
//! To achieve similar performance to multi-threaded, work-stealing runtimes, applications
//! using `actix-rt` will create multiple, mostly disconnected, single-threaded runtimes.
//! This approach has good performance characteristics for workloads where the majority of tasks
//! have similar runtime expense.
//!
//! The disadvantage is that idle threads will not steal work from very busy, stuck or otherwise
//! backlogged threads. Tasks that are disproportionately expensive should be offloaded to the
//! blocking task thread-pool using [`task::spawn_blocking`].
//!
//! # Examples
//! ```no_run
//! use std::sync::mpsc;
//! use actix_rt::{Arbiter, System};
//!
//! let _ = System::new();
//!
//! let (tx, rx) = mpsc::channel::<u32>();
//!
//! let arbiter = Arbiter::new();
//! arbiter.spawn_fn(move || tx.send(42).unwrap());
//!
//! let num = rx.recv().unwrap();
//! assert_eq!(num, 42);
//!
//! arbiter.stop();
//! arbiter.join().unwrap();
//! ```
//!
//! # `io-uring` Support
//!
//! There is experimental support for using io-uring with this crate by enabling the
//! `io-uring` feature. For now, it is semver exempt.
//!
//! Note that there are currently some unimplemented parts of using `actix-rt` with `io-uring`.
//! In particular, when running a `System`, only `System::block_on` is supported.
//! A runtime implementation that runs everything on the current thread.
#![deny(rust_2018_idioms, warnings)]
#![allow(clippy::type_complexity)]
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
#[cfg(all(not(target_os = "linux"), feature = "io-uring"))]
compile_error!("io_uring is a linux only feature.");
use std::future::Future;
// Cannot define a main macro when compiled into test harness.
// Workaround for https://github.com/rust-lang/rust/issues/62127.
#[cfg(all(feature = "macros", not(test)))]
pub use actix_macros::main;
#[cfg(feature = "macros")]
pub use actix_macros::test;
#[cfg(not(test))] // Work around for rust-lang/rust#62127
pub use actix_macros::{main, test};
mod arbiter;
mod builder;
mod runtime;
mod system;
pub use tokio::pin;
use tokio::task::JoinHandle;
pub use self::arbiter::Arbiter;
pub use self::builder::{Builder, SystemRunner};
pub use self::runtime::Runtime;
pub use self::system::System;
pub use self::{
arbiter::{Arbiter, ArbiterHandle},
runtime::Runtime,
system::{System, SystemRunner},
};
#[doc(hidden)]
pub use actix_threadpool as blocking;
/// Spawns a future on the current arbiter.
///
/// # Panics
///
/// This function panics if actix system is not running.
pub fn spawn<F>(f: F)
where
F: futures::Future<Output = ()> + 'static,
{
if !System::is_set() {
panic!("System is not running");
}
Arbiter::spawn(f);
}
/// Asynchronous signal handling
pub mod signal {
//! Asynchronous signal handling (Tokio re-exports).
#[cfg(unix)]
pub mod unix {
//! Unix specific signals (Tokio re-exports).
pub use tokio::signal::unix::*;
}
pub use tokio::signal::ctrl_c;
}
/// TCP/UDP/Unix bindings
pub mod net {
//! TCP/UDP/Unix bindings (mostly Tokio re-exports).
pub use tokio::net::UdpSocket;
pub use tokio::net::{TcpListener, TcpStream};
use std::{
future::Future,
io,
task::{Context, Poll},
};
use tokio::io::{AsyncRead, AsyncWrite, Interest};
#[cfg(unix)]
pub use tokio::net::{UnixDatagram, UnixListener, UnixStream};
pub use tokio::{
io::Ready,
net::{TcpListener, TcpSocket, TcpStream, UdpSocket},
};
/// Extension trait over async read+write types that can also signal readiness.
#[doc(hidden)]
pub trait ActixStream: AsyncRead + AsyncWrite + Unpin {
/// Poll stream and check read readiness of Self.
///
/// See [tokio::net::TcpStream::poll_read_ready] for detail on intended use.
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>>;
/// Poll stream and check write readiness of Self.
///
/// See [tokio::net::TcpStream::poll_write_ready] for detail on intended use.
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>>;
}
impl ActixStream for TcpStream {
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
let ready = self.ready(Interest::READABLE);
tokio::pin!(ready);
ready.poll(cx)
}
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
let ready = self.ready(Interest::WRITABLE);
tokio::pin!(ready);
ready.poll(cx)
}
mod unix {
pub use tokio::net::{UnixDatagram, UnixListener, UnixStream};
}
#[cfg(unix)]
impl ActixStream for UnixStream {
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
let ready = self.ready(Interest::READABLE);
tokio::pin!(ready);
ready.poll(cx)
}
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
let ready = self.ready(Interest::WRITABLE);
tokio::pin!(ready);
ready.poll(cx)
}
}
impl<Io: ActixStream + ?Sized> ActixStream for Box<Io> {
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
(**self).poll_read_ready(cx)
}
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
(**self).poll_write_ready(cx)
}
}
pub use self::unix::*;
}
/// Utilities for tracking time.
pub mod time {
//! Utilities for tracking time (Tokio re-exports).
pub use tokio::time::{
interval, interval_at, sleep, sleep_until, timeout, Instant, Interval, Sleep, Timeout,
};
}
pub mod task {
//! Task management (Tokio re-exports).
pub use tokio::task::{spawn_blocking, yield_now, JoinError, JoinHandle};
}
/// Spawns a future on the current thread as a new task.
///
/// If not immediately awaited, the task can be cancelled using [`JoinHandle::abort`].
///
/// The provided future is spawned as a new task; therefore, panics are caught.
///
/// # Panics
/// Panics if Actix system is not running.
///
/// # Examples
/// ```
/// # use std::time::Duration;
/// # actix_rt::Runtime::new().unwrap().block_on(async {
/// // task resolves successfully
/// assert_eq!(actix_rt::spawn(async { 1 }).await.unwrap(), 1);
///
/// // task panics
/// assert!(actix_rt::spawn(async {
/// panic!("panic is caught at task boundary");
/// })
/// .await
/// .unwrap_err()
/// .is_panic());
///
/// // task is cancelled before completion
/// let handle = actix_rt::spawn(actix_rt::time::sleep(Duration::from_secs(100)));
/// handle.abort();
/// assert!(handle.await.unwrap_err().is_cancelled());
/// # });
/// ```
#[track_caller]
#[inline]
pub fn spawn<Fut>(f: Fut) -> JoinHandle<Fut::Output>
where
Fut: Future + 'static,
Fut::Output: 'static,
{
tokio::task::spawn_local(f)
pub use tokio::time::Instant;
pub use tokio::time::{delay_for, delay_until, Delay};
pub use tokio::time::{interval, interval_at, Interval};
pub use tokio::time::{timeout, Timeout};
}

View File

@ -1,29 +1,28 @@
use std::{future::Future, io};
use std::future::Future;
use std::io;
use tokio::{runtime, task::LocalSet};
use tokio::task::{JoinHandle, LocalSet};
/// A Tokio-based runtime proxy.
/// Single-threaded runtime provides a way to start reactor
/// and runtime on the current thread.
///
/// All spawned futures will be executed on the current thread. Therefore, there is no `Send` bound
/// on submitted futures.
/// See [module level][mod] documentation for more details.
///
/// [mod]: index.html
#[derive(Debug)]
pub struct Runtime {
local: LocalSet,
rt: tokio::runtime::Runtime,
}
pub(crate) fn default_tokio_runtime() -> io::Result<tokio::runtime::Runtime> {
tokio::runtime::Builder::new_current_thread()
.enable_io()
.enable_time()
.build()
rt: runtime::Runtime,
}
impl Runtime {
/// Returns a new runtime initialized with default configuration values.
#[allow(clippy::new_ret_no_self)]
pub fn new() -> io::Result<Self> {
let rt = default_tokio_runtime()?;
/// Returns a new runtime initialized with default configuration values.
pub fn new() -> io::Result<Runtime> {
let rt = runtime::Builder::new()
.enable_io()
.enable_time()
.basic_scheduler()
.build()?;
Ok(Runtime {
rt,
@ -31,119 +30,63 @@ impl Runtime {
})
}
/// Offload a future onto the single-threaded runtime.
/// Spawn a future onto the single-threaded runtime.
///
/// The returned join handle can be used to await the future's result.
/// See [module level][mod] documentation for more details.
///
/// See [crate root][crate] documentation for more details.
/// [mod]: index.html
///
/// # Examples
/// ```
/// let rt = actix_rt::Runtime::new().unwrap();
///
/// ```rust,ignore
/// # use futures::{future, Future, Stream};
/// use actix_rt::Runtime;
///
/// # fn dox() {
/// // Create the runtime
/// let mut rt = Runtime::new().unwrap();
///
/// // Spawn a future onto the runtime
/// let handle = rt.spawn(async {
/// rt.spawn(future::lazy(|_| {
/// println!("running on the runtime");
/// 42
/// });
///
/// assert_eq!(rt.block_on(handle).unwrap(), 42);
/// }));
/// # }
/// # pub fn main() {}
/// ```
///
/// # Panics
/// This function panics if the spawn fails. Failure occurs if the executor is currently at
/// capacity and is unable to spawn a new future.
#[track_caller]
pub fn spawn<F>(&self, future: F) -> JoinHandle<F::Output>
///
/// This function panics if the spawn fails. Failure occurs if the executor
/// is currently at capacity and is unable to spawn a new future.
pub fn spawn<F>(&self, future: F) -> &Self
where
F: Future<Output = ()> + 'static,
{
self.local.spawn_local(future);
self
}
/// Runs the provided future, blocking the current thread until the future
/// completes.
///
/// This function can be used to synchronously block the current thread
/// until the provided `future` has resolved either successfully or with an
/// error. The result of the future is then returned from this function
/// call.
///
/// Note that this function will **also** execute any spawned futures on the
/// current thread, but will **not** block until these other spawned futures
/// have completed. Once the function returns, any uncompleted futures
/// remain pending in the `Runtime` instance. These futures will not run
/// until `block_on` or `run` is called again.
///
/// The caller is responsible for ensuring that other spawned futures
/// complete execution by calling `block_on` or `run`.
pub fn block_on<F>(&mut self, f: F) -> F::Output
where
F: Future + 'static,
{
self.local.spawn_local(future)
}
/// Retrieves a reference to the underlying Tokio runtime associated with this instance.
///
/// The Tokio runtime is responsible for executing asynchronous tasks and managing
/// the event loop for an asynchronous Rust program. This method allows accessing
/// the runtime to interact with its features directly.
///
/// In a typical use case, you might need to share the same runtime between different
/// modules of your project. For example, a module might require a `tokio::runtime::Handle`
/// to spawn tasks on the same runtime, or the runtime itself to configure more complex
/// behaviours.
///
/// # Example
///
/// ```
/// use actix_rt::Runtime;
///
/// mod module_a {
/// pub fn do_something(handle: tokio::runtime::Handle) {
/// handle.spawn(async {
/// // Some asynchronous task here
/// });
/// }
/// }
///
/// mod module_b {
/// pub fn do_something_else(rt: &tokio::runtime::Runtime) {
/// rt.spawn(async {
/// // Another asynchronous task here
/// });
/// }
/// }
///
/// let actix_runtime = actix_rt::Runtime::new().unwrap();
/// let tokio_runtime = actix_runtime.tokio_runtime();
///
/// let handle = tokio_runtime.handle().clone();
///
/// module_a::do_something(handle);
/// module_b::do_something_else(tokio_runtime);
/// ```
///
/// # Returns
///
/// An immutable reference to the `tokio::runtime::Runtime` instance associated with this
/// `Runtime` instance.
///
/// # Note
///
/// While this method provides an immutable reference to the Tokio runtime, which is safe to share across threads,
/// be aware that spawning blocking tasks on the Tokio runtime could potentially impact the execution
/// of the Actix runtime. This is because Tokio is responsible for driving the Actix system,
/// and blocking tasks could delay or deadlock other tasks in run loop.
pub fn tokio_runtime(&self) -> &tokio::runtime::Runtime {
&self.rt
}
/// Runs the provided future, blocking the current thread until the future completes.
///
/// This function can be used to synchronously block the current thread until the provided
/// `future` has resolved either successfully or with an error. The result of the future is
/// then returned from this function call.
///
/// Note that this function will also execute any spawned futures on the current thread, but
/// will not block until these other spawned futures have completed. Once the function returns,
/// any uncompleted futures remain pending in the `Runtime` instance. These futures will not run
/// until `block_on` or `run` is called again.
///
/// The caller is responsible for ensuring that other spawned futures complete execution by
/// calling `block_on` or `run`.
#[track_caller]
pub fn block_on<F>(&self, f: F) -> F::Output
where
F: Future,
{
self.local.block_on(&self.rt, f)
}
}
impl From<tokio::runtime::Runtime> for Runtime {
fn from(rt: tokio::runtime::Runtime) -> Self {
Self {
local: LocalSet::new(),
rt,
}
let res = self.local.block_on(&mut self.rt, f);
res
}
}

View File

@ -1,119 +1,77 @@
use std::{
cell::RefCell,
collections::HashMap,
future::Future,
io,
pin::Pin,
sync::atomic::{AtomicUsize, Ordering},
task::{Context, Poll},
};
use std::cell::RefCell;
use std::future::Future;
use std::io;
use std::sync::atomic::{AtomicUsize, Ordering};
use futures_core::ready;
use tokio::sync::{mpsc, oneshot};
use futures::channel::mpsc::UnboundedSender;
use tokio::task::LocalSet;
use crate::{arbiter::ArbiterHandle, Arbiter};
use crate::arbiter::{Arbiter, SystemCommand};
use crate::builder::{Builder, SystemRunner};
static SYSTEM_COUNT: AtomicUsize = AtomicUsize::new(0);
thread_local!(
static CURRENT: RefCell<Option<System>> = const { RefCell::new(None) };
);
/// A manager for a per-thread distributed async runtime.
/// System is a runtime manager.
#[derive(Clone, Debug)]
pub struct System {
id: usize,
sys_tx: mpsc::UnboundedSender<SystemCommand>,
/// Handle to the first [Arbiter] that is created with the System.
arbiter_handle: ArbiterHandle,
sys: UnboundedSender<SystemCommand>,
arbiter: Arbiter,
stop_on_panic: bool,
}
#[cfg(not(feature = "io-uring"))]
impl System {
/// Create a new system.
///
/// # Panics
/// Panics if underlying Tokio runtime can not be created.
#[allow(clippy::new_ret_no_self)]
pub fn new() -> SystemRunner {
Self::with_tokio_rt(|| {
crate::runtime::default_tokio_runtime()
.expect("Default Actix (Tokio) runtime could not be created.")
})
}
/// Create a new System using the [Tokio Runtime](tokio-runtime) returned from a closure.
///
/// [tokio-runtime]: tokio::runtime::Runtime
pub fn with_tokio_rt<F>(runtime_factory: F) -> SystemRunner
where
F: FnOnce() -> tokio::runtime::Runtime,
{
let (stop_tx, stop_rx) = oneshot::channel();
let (sys_tx, sys_rx) = mpsc::unbounded_channel();
let rt = crate::runtime::Runtime::from(runtime_factory());
let sys_arbiter = rt.block_on(async { Arbiter::in_new_system() });
let system = System::construct(sys_tx, sys_arbiter.clone());
system
.tx()
.send(SystemCommand::RegisterArbiter(usize::MAX, sys_arbiter))
.unwrap();
// init background system arbiter
let sys_ctrl = SystemController::new(sys_rx, stop_tx);
rt.spawn(sys_ctrl);
SystemRunner { rt, stop_rx }
}
}
#[cfg(feature = "io-uring")]
impl System {
/// Create a new system.
///
/// # Panics
/// Panics if underlying Tokio runtime can not be created.
#[allow(clippy::new_ret_no_self)]
pub fn new() -> SystemRunner {
SystemRunner
}
/// Create a new System using the [Tokio Runtime](tokio-runtime) returned from a closure.
///
/// [tokio-runtime]: tokio::runtime::Runtime
#[doc(hidden)]
pub fn with_tokio_rt<F>(_: F) -> SystemRunner
where
F: FnOnce() -> tokio::runtime::Runtime,
{
unimplemented!("System::with_tokio_rt is not implemented for io-uring feature yet")
}
}
thread_local!(
static CURRENT: RefCell<Option<System>> = RefCell::new(None);
);
impl System {
/// Constructs new system and registers it on the current thread.
/// Constructs new system and sets it as current
pub(crate) fn construct(
sys_tx: mpsc::UnboundedSender<SystemCommand>,
arbiter_handle: ArbiterHandle,
sys: UnboundedSender<SystemCommand>,
arbiter: Arbiter,
stop_on_panic: bool,
) -> Self {
let sys = System {
sys_tx,
arbiter_handle,
sys,
arbiter,
stop_on_panic,
id: SYSTEM_COUNT.fetch_add(1, Ordering::SeqCst),
};
System::set_current(sys.clone());
sys
}
/// Get current running system.
/// Build a new system with a customized tokio runtime.
///
/// # Panics
/// Panics if no system is registered on the current thread.
/// This allows to customize the runtime. See struct level docs on
/// `Builder` for more information.
pub fn builder() -> Builder {
Builder::new()
}
#[allow(clippy::new_ret_no_self)]
/// Create new system.
///
/// This method panics if it can not create tokio runtime
pub fn new<T: Into<String>>(name: T) -> SystemRunner {
Self::builder().name(name).build()
}
#[allow(clippy::new_ret_no_self)]
/// Create new system using provided tokio Handle.
///
/// This method panics if it can not spawn system arbiter
pub fn run_in_tokio<T: Into<String>>(
name: T,
local: &LocalSet,
) -> impl Future<Output = io::Result<()>> {
Self::builder()
.name(name)
.build_async(local)
.run_nonblocking()
}
/// Get current running system.
pub fn current() -> System {
CURRENT.with(|cell| match *cell.borrow() {
Some(ref sys) => sys.clone(),
@ -121,237 +79,67 @@ impl System {
})
}
/// Try to get current running system.
///
/// Returns `None` if no System has been started.
///
/// Unlike [`current`](Self::current), this never panics.
pub fn try_current() -> Option<System> {
CURRENT.with(|cell| cell.borrow().clone())
/// Set current running system.
pub(crate) fn is_set() -> bool {
CURRENT.with(|cell| cell.borrow().is_some())
}
/// Get handle to a the System's initial [Arbiter].
pub fn arbiter(&self) -> &ArbiterHandle {
&self.arbiter_handle
}
/// Check if there is a System registered on the current thread.
pub fn is_registered() -> bool {
CURRENT.with(|sys| sys.borrow().is_some())
}
/// Register given system on current thread.
/// Set current running system.
#[doc(hidden)]
pub fn set_current(sys: System) {
CURRENT.with(|cell| {
*cell.borrow_mut() = Some(sys);
CURRENT.with(|s| {
*s.borrow_mut() = Some(sys);
})
}
/// Numeric system identifier.
///
/// Useful when using multiple Systems.
/// Execute function with system reference.
pub fn with_current<F, R>(f: F) -> R
where
F: FnOnce(&System) -> R,
{
CURRENT.with(|cell| match *cell.borrow() {
Some(ref sys) => f(sys),
None => panic!("System is not running"),
})
}
/// System id
pub fn id(&self) -> usize {
self.id
}
/// Stop the system (with code 0).
/// Stop the system
pub fn stop(&self) {
self.stop_with_code(0)
}
/// Stop the system with a given exit code.
/// Stop the system with a particular exit code.
pub fn stop_with_code(&self, code: i32) {
let _ = self.sys_tx.send(SystemCommand::Exit(code));
let _ = self.sys.unbounded_send(SystemCommand::Exit(code));
}
pub(crate) fn tx(&self) -> &mpsc::UnboundedSender<SystemCommand> {
&self.sys_tx
}
}
/// Runner that keeps a [System]'s event loop alive until stop message is received.
#[cfg(not(feature = "io-uring"))]
#[must_use = "A SystemRunner does nothing unless `run` is called."]
#[derive(Debug)]
pub struct SystemRunner {
rt: crate::runtime::Runtime,
stop_rx: oneshot::Receiver<i32>,
}
#[cfg(not(feature = "io-uring"))]
impl SystemRunner {
/// Starts event loop and will return once [System] is [stopped](System::stop).
pub fn run(self) -> io::Result<()> {
let exit_code = self.run_with_code()?;
match exit_code {
0 => Ok(()),
nonzero => Err(io::Error::new(
io::ErrorKind::Other,
format!("Non-zero exit code: {}", nonzero),
)),
}
}
/// Runs the event loop until [stopped](System::stop_with_code), returning the exit code.
pub fn run_with_code(self) -> io::Result<i32> {
let SystemRunner { rt, stop_rx, .. } = self;
// run loop
rt.block_on(stop_rx)
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))
}
/// Retrieves a reference to the underlying [Actix runtime](crate::Runtime) associated with this
/// `SystemRunner` instance.
///
/// The Actix runtime is responsible for managing the event loop for an Actix system and
/// executing asynchronous tasks. This method provides access to the runtime, allowing direct
/// interaction with its features.
///
/// In a typical use case, you might need to share the same runtime between different
/// parts of your project. For example, some components might require a [`Runtime`] to spawn
/// tasks on the same runtime.
///
/// Read more in the documentation for [`Runtime`].
///
/// # Examples
///
/// ```
/// let system_runner = actix_rt::System::new();
/// let actix_runtime = system_runner.runtime();
///
/// // Use the runtime to spawn an async task or perform other operations
/// ```
///
/// # Note
///
/// While this method provides an immutable reference to the Actix runtime, which is safe to
/// share across threads, be aware that spawning blocking tasks on the Actix runtime could
/// potentially impact system performance. This is because the Actix runtime is responsible for
/// driving the system, and blocking tasks could delay other tasks in the run loop.
///
/// [`Runtime`]: crate::Runtime
pub fn runtime(&self) -> &crate::runtime::Runtime {
&self.rt
}
/// Runs the provided future, blocking the current thread until the future completes.
#[track_caller]
#[inline]
pub fn block_on<F: Future>(&self, fut: F) -> F::Output {
self.rt.block_on(fut)
}
}
/// Runner that keeps a [System]'s event loop alive until stop message is received.
#[cfg(feature = "io-uring")]
#[must_use = "A SystemRunner does nothing unless `run` is called."]
#[derive(Debug)]
pub struct SystemRunner;
#[cfg(feature = "io-uring")]
impl SystemRunner {
/// Starts event loop and will return once [System] is [stopped](System::stop).
pub fn run(self) -> io::Result<()> {
unimplemented!("SystemRunner::run is not implemented for io-uring feature yet");
}
/// Runs the event loop until [stopped](System::stop_with_code), returning the exit code.
pub fn run_with_code(self) -> io::Result<i32> {
unimplemented!("SystemRunner::run_with_code is not implemented for io-uring feature yet");
}
/// Runs the provided future, blocking the current thread until the future completes.
#[inline]
pub fn block_on<F: Future>(&self, fut: F) -> F::Output {
tokio_uring::start(async move {
let (stop_tx, stop_rx) = oneshot::channel();
let (sys_tx, sys_rx) = mpsc::unbounded_channel();
let sys_arbiter = Arbiter::in_new_system();
let system = System::construct(sys_tx, sys_arbiter.clone());
system
.tx()
.send(SystemCommand::RegisterArbiter(usize::MAX, sys_arbiter))
.unwrap();
// init background system arbiter
let sys_ctrl = SystemController::new(sys_rx, stop_tx);
tokio_uring::spawn(sys_ctrl);
let res = fut.await;
drop(stop_rx);
res
})
}
}
#[derive(Debug)]
pub(crate) enum SystemCommand {
Exit(i32),
RegisterArbiter(usize, ArbiterHandle),
DeregisterArbiter(usize),
}
/// There is one `SystemController` per [System]. It runs in the background, keeping track of
/// [Arbiter]s and is able to distribute a system-wide stop command.
#[derive(Debug)]
pub(crate) struct SystemController {
stop_tx: Option<oneshot::Sender<i32>>,
cmd_rx: mpsc::UnboundedReceiver<SystemCommand>,
arbiters: HashMap<usize, ArbiterHandle>,
}
impl SystemController {
pub(crate) fn new(
cmd_rx: mpsc::UnboundedReceiver<SystemCommand>,
stop_tx: oneshot::Sender<i32>,
) -> Self {
SystemController {
cmd_rx,
stop_tx: Some(stop_tx),
arbiters: HashMap::with_capacity(4),
}
}
}
impl Future for SystemController {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
// process all items currently buffered in channel
loop {
match ready!(self.cmd_rx.poll_recv(cx)) {
// channel closed; no more messages can be received
None => return Poll::Ready(()),
// process system command
Some(cmd) => match cmd {
SystemCommand::Exit(code) => {
// stop all arbiters
for arb in self.arbiters.values() {
arb.stop();
}
// stop event loop
// will only fire once
if let Some(stop_tx) = self.stop_tx.take() {
let _ = stop_tx.send(code);
}
}
SystemCommand::RegisterArbiter(id, arb) => {
self.arbiters.insert(id, arb);
}
SystemCommand::DeregisterArbiter(id) => {
self.arbiters.remove(&id);
}
},
}
}
pub(crate) fn sys(&self) -> &UnboundedSender<SystemCommand> {
&self.sys
}
/// Return status of 'stop_on_panic' option which controls whether the System is stopped when an
/// uncaught panic is thrown from a worker thread.
pub fn stop_on_panic(&self) -> bool {
self.stop_on_panic
}
/// System arbiter
pub fn arbiter(&self) -> &Arbiter {
&self.arbiter
}
/// This function will start tokio runtime and will finish once the
/// `System::stop()` message get called.
/// Function `f` get called within tokio runtime context.
pub fn run<F>(f: F) -> io::Result<()>
where
F: FnOnce() + 'static,
{
Self::builder().run(f)
}
}

View File

@ -1,17 +0,0 @@
//! Checks that test macro does not cause problems in the presence of imports named "test" that
//! could be either a module with test items or the "test with runtime" macro itself.
//!
//! Before actix/actix-net#399 was implemented, this macro was running twice. The first run output
//! `#[test]` and it got run again and since it was in scope.
//!
//! Prevented by using the fully-qualified test marker (`#[::core::prelude::v1::test]`).
#![cfg(feature = "macros")]
use actix_rt::time as test;
#[actix_rt::test]
async fn test_naming_conflict() {
use test as time;
time::sleep(std::time::Duration::from_millis(2)).await;
}

View File

@ -1,378 +0,0 @@
#![allow(missing_docs)]
use std::{
future::Future,
time::{Duration, Instant},
};
use actix_rt::{task::JoinError, Arbiter, System};
#[cfg(not(feature = "io-uring"))]
use {
std::{sync::mpsc::channel, thread},
tokio::sync::oneshot,
};
#[test]
fn await_for_timer() {
let time = Duration::from_secs(1);
let instant = Instant::now();
System::new().block_on(async move {
tokio::time::sleep(time).await;
});
assert!(
instant.elapsed() >= time,
"Block on should poll awaited future to completion"
);
}
#[cfg(not(feature = "io-uring"))]
#[test]
fn run_with_code() {
let sys = System::new();
System::current().stop_with_code(42);
let exit_code = sys.run_with_code().expect("system stop should not error");
assert_eq!(exit_code, 42);
}
#[test]
fn join_another_arbiter() {
let time = Duration::from_secs(1);
let instant = Instant::now();
System::new().block_on(async move {
let arbiter = Arbiter::new();
arbiter.spawn(Box::pin(async move {
tokio::time::sleep(time).await;
Arbiter::current().stop();
}));
arbiter.join().unwrap();
});
assert!(
instant.elapsed() >= time,
"Join on another arbiter should complete only when it calls stop"
);
let instant = Instant::now();
System::new().block_on(async move {
let arbiter = Arbiter::new();
arbiter.spawn_fn(move || {
actix_rt::spawn(async move {
tokio::time::sleep(time).await;
Arbiter::current().stop();
});
});
arbiter.join().unwrap();
});
assert!(
instant.elapsed() >= time,
"Join on an arbiter that has used actix_rt::spawn should wait for said future"
);
let instant = Instant::now();
System::new().block_on(async move {
let arbiter = Arbiter::new();
arbiter.spawn(Box::pin(async move {
tokio::time::sleep(time).await;
Arbiter::current().stop();
}));
arbiter.stop();
arbiter.join().unwrap();
});
assert!(
instant.elapsed() < time,
"Premature stop of arbiter should conclude regardless of it's current state"
);
}
#[test]
fn non_static_block_on() {
let string = String::from("test_str");
let string = string.as_str();
let sys = System::new();
sys.block_on(async {
actix_rt::time::sleep(Duration::from_millis(1)).await;
assert_eq!("test_str", string);
});
let rt = actix_rt::Runtime::new().unwrap();
rt.block_on(async {
actix_rt::time::sleep(Duration::from_millis(1)).await;
assert_eq!("test_str", string);
});
}
#[test]
fn wait_for_spawns() {
let rt = actix_rt::Runtime::new().unwrap();
let handle = rt.spawn(async {
println!("running on the runtime");
// panic is caught at task boundary
panic!("intentional test panic");
});
assert!(rt.block_on(handle).is_err());
}
// Temporary disabled tests for io-uring feature.
// They should be enabled when possible.
#[cfg(not(feature = "io-uring"))]
#[test]
fn arbiter_spawn_fn_runs() {
let _ = System::new();
let (tx, rx) = channel::<u32>();
let arbiter = Arbiter::new();
arbiter.spawn_fn(move || tx.send(42).unwrap());
let num = rx.recv().unwrap();
assert_eq!(num, 42);
arbiter.stop();
arbiter.join().unwrap();
}
#[cfg(not(feature = "io-uring"))]
#[test]
fn arbiter_handle_spawn_fn_runs() {
let sys = System::new();
let (tx, rx) = channel::<u32>();
let arbiter = Arbiter::new();
let handle = arbiter.handle();
drop(arbiter);
handle.spawn_fn(move || {
tx.send(42).unwrap();
System::current().stop()
});
let num = rx.recv_timeout(Duration::from_secs(2)).unwrap();
assert_eq!(num, 42);
handle.stop();
sys.run().unwrap();
}
#[cfg(not(feature = "io-uring"))]
#[test]
fn arbiter_drop_no_panic_fn() {
let _ = System::new();
let arbiter = Arbiter::new();
arbiter.spawn_fn(|| panic!("test"));
arbiter.stop();
arbiter.join().unwrap();
}
#[cfg(not(feature = "io-uring"))]
#[test]
fn arbiter_drop_no_panic_fut() {
let _ = System::new();
let arbiter = Arbiter::new();
arbiter.spawn(async { panic!("test") });
arbiter.stop();
arbiter.join().unwrap();
}
#[cfg(not(feature = "io-uring"))]
#[test]
fn system_arbiter_spawn() {
let runner = System::new();
let (tx, rx) = oneshot::channel();
let sys = System::current();
thread::spawn(|| {
// this thread will have no arbiter in it's thread local so call will panic
Arbiter::current();
})
.join()
.unwrap_err();
let thread = thread::spawn(|| {
// this thread will have no arbiter in it's thread local so use the system handle instead
System::set_current(sys);
let sys = System::current();
let arb = sys.arbiter();
arb.spawn(async move {
tx.send(42u32).unwrap();
System::current().stop();
});
});
assert_eq!(runner.block_on(rx).unwrap(), 42);
thread.join().unwrap();
}
#[cfg(not(feature = "io-uring"))]
#[test]
fn system_stop_stops_arbiters() {
let sys = System::new();
let arb = Arbiter::new();
// arbiter should be alive to receive spawn msg
assert!(Arbiter::current().spawn_fn(|| {}));
assert!(arb.spawn_fn(|| {}));
System::current().stop();
sys.run().unwrap();
// account for slightly slow thread de-spawns
thread::sleep(Duration::from_millis(500));
// arbiter should be dead and return false
assert!(!Arbiter::current().spawn_fn(|| {}));
assert!(!arb.spawn_fn(|| {}));
arb.join().unwrap();
}
#[cfg(not(feature = "io-uring"))]
#[test]
fn new_system_with_tokio() {
let (tx, rx) = channel();
let res = System::with_tokio_rt(move || {
tokio::runtime::Builder::new_multi_thread()
.enable_io()
.enable_time()
.thread_keep_alive(Duration::from_millis(1000))
.worker_threads(2)
.max_blocking_threads(2)
.on_thread_start(|| {})
.on_thread_stop(|| {})
.build()
.unwrap()
})
.block_on(async {
actix_rt::time::sleep(Duration::from_millis(1)).await;
tokio::task::spawn(async move {
tx.send(42).unwrap();
})
.await
.unwrap();
123usize
});
assert_eq!(res, 123);
assert_eq!(rx.recv().unwrap(), 42);
}
#[cfg(not(feature = "io-uring"))]
#[test]
fn new_arbiter_with_tokio() {
use std::sync::{
atomic::{AtomicBool, Ordering},
Arc,
};
let _ = System::new();
let arb = Arbiter::with_tokio_rt(|| {
tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap()
});
let counter = Arc::new(AtomicBool::new(true));
let counter1 = counter.clone();
let did_spawn = arb.spawn(async move {
actix_rt::time::sleep(Duration::from_millis(1)).await;
counter1.store(false, Ordering::SeqCst);
Arbiter::current().stop();
});
assert!(did_spawn);
arb.join().unwrap();
assert!(!counter.load(Ordering::SeqCst));
}
#[test]
#[should_panic]
fn no_system_current_panic() {
System::current();
}
#[test]
#[should_panic]
fn no_system_arbiter_new_panic() {
Arbiter::new();
}
#[test]
fn try_current_no_system() {
assert!(System::try_current().is_none())
}
#[test]
fn try_current_with_system() {
System::new().block_on(async { assert!(System::try_current().is_some()) });
}
#[allow(clippy::unit_cmp)]
#[test]
fn spawn_local() {
System::new().block_on(async {
// demonstrate that spawn -> R is strictly more capable than spawn -> ()
assert_eq!(actix_rt::spawn(async {}).await.unwrap(), ());
assert_eq!(actix_rt::spawn(async { 1 }).await.unwrap(), 1);
assert!(actix_rt::spawn(async { panic!("") }).await.is_err());
actix_rt::spawn(async { tokio::time::sleep(Duration::from_millis(50)).await })
.await
.unwrap();
fn g<F: Future<Output = Result<(), JoinError>>>(_f: F) {}
g(actix_rt::spawn(async {}));
// g(actix_rt::spawn(async { 1 })); // compile err
fn h<F: Future<Output = Result<R, JoinError>>, R>(_f: F) {}
h(actix_rt::spawn(async {}));
h(actix_rt::spawn(async { 1 }));
})
}
#[cfg(all(target_os = "linux", feature = "io-uring"))]
#[test]
fn tokio_uring_arbiter() {
System::new().block_on(async {
let (tx, rx) = std::sync::mpsc::channel();
Arbiter::new().spawn(async move {
let handle = actix_rt::spawn(async move {
let f = tokio_uring::fs::File::create("test.txt").await.unwrap();
let buf = b"Hello World!";
let (res, _) = f.write_all_at(&buf[..], 0).await;
assert!(res.is_ok());
f.sync_all().await.unwrap();
f.close().await.unwrap();
std::fs::remove_file("test.txt").unwrap();
});
handle.await.unwrap();
tx.send(true).unwrap();
});
assert!(rx.recv().unwrap());
})
}

View File

@ -1,232 +1,194 @@
# Changes
## Unreleased
## [1.0.1] - 2019-12-29
## 2.5.1
### Changed
- Fix panic in test server.
- Minimum supported Rust version (MSRV) is now 1.71.
* Rename `.start()` method to `.run()`
## 2.5.0
## [1.0.0] - 2019-12-11
- Update `mio` dependency to `1`.
### Changed
## 2.4.0
* Use actix-net releases
- Update `tokio-uring` dependency to `0.5`.
- Minimum supported Rust version (MSRV) is now 1.70.
## 2.3.0
## [1.0.0-alpha.4] - 2019-12-08
- Add support for MultiPath TCP (MPTCP) with `MpTcp` enum and `ServerBuilder::mptcp()` method.
- Minimum supported Rust version (MSRV) is now 1.65.
### Changed
## 2.2.0
* Use actix-service 1.0.0-alpha.4
- Minimum supported Rust version (MSRV) is now 1.59.
- Update `tokio-uring` dependency to `0.4`.
## [1.0.0-alpha.3] - 2019-12-07
## 2.1.1
### Changed
- No significant changes since `2.1.0`.
* Migrate to tokio 0.2
## 2.1.0
### Fixed
- Update `tokio-uring` dependency to `0.3`.
- Logs emitted now use the `tracing` crate with `log` compatibility.
- Wait for accept thread to stop before sending completion signal.
* Fix compilation on non-unix platforms
## 2.0.0
* Better handling server configuration
- No significant changes since `2.0.0-rc.4`.
## 2.0.0-rc.4
## [1.0.0-alpha.2] - 2019-12-02
- Update `tokio-uring` dependency to `0.2`.
### Changed
## 2.0.0-rc.3
* Simplify server service (remove actix-server-config)
- No significant changes since `2.0.0-rc.2`.
* Allow to wait on `Server` until server stops
## 2.0.0-rc.2
- Simplify `TestServer`.
## [0.8.0-alpha.1] - 2019-11-22
## 2.0.0-rc.1
### Changed
- Hide implementation details of `Server`.
- `Server` now runs only after awaiting it.
* Migrate to `std::future`
## 2.0.0-beta.9
- Restore `Arbiter` support lost in `beta.8`.
## [0.7.0] - 2019-10-04
## 2.0.0-beta.8
### Changed
- Fix non-unix signal handler.
* Update `rustls` to 0.16
* Minimum required Rust version upped to 1.37.0
## 2.0.0-beta.7
- Server can be started in regular Tokio runtime.
- Expose new `Server` type whose `Future` impl resolves when server stops.
- Rename `Server` to `ServerHandle`.
- Add `Server::handle` to obtain handle to server.
- Rename `ServerBuilder::{maxconn => max_concurrent_connections}`.
- Deprecate crate-level `new` shortcut for server builder.
- Minimum supported Rust version (MSRV) is now 1.52.
## [0.6.1] - 2019-09-25
## 2.0.0-beta.6
### Added
- Add experimental (semver-exempt) `io-uring` feature for enabling async file I/O on linux.
- Server no long listens to `SIGHUP` signal. Previously, the received was not used but did block subsequent exit signals from working.
- Remove `config` module. `ServiceConfig`, `ServiceRuntime` public types are removed due to this change.
- Remove `ServerBuilder::configure`.
* Add UDS listening support to `ServerBuilder`
## 2.0.0-beta.5
- Server shutdown notifies all workers to exit regardless if shutdown is graceful. This causes all workers to shutdown immediately in force shutdown case.
## [0.6.0] - 2019-07-18
## 2.0.0-beta.4
### Added
- Prevent panic when `shutdown_timeout` is very large. [f9262db]
* Support Unix domain sockets #3
## 2.0.0-beta.3
- Hidden `ServerBuilder::start` method has been removed. Use `ServerBuilder::run`.
- Add retry for EINTR signal (`io::Interrupted`) in `Accept`'s poll loop.
- Add `ServerBuilder::worker_max_blocking_threads` to customize blocking thread pool size.
- Update `actix-rt` to `2.0.0`.
## [0.5.1] - 2019-05-18
## 2.0.0-beta.2
### Changed
- Merge `actix-testing` to `actix-server` as `test_server` mod.
* ServerBuilder::shutdown_timeout() accepts u64
## 2.0.0-beta.1
- Added explicit info log message on accept queue pause.
- Prevent double registration of sockets when back-pressure is resolved.
- Update `mio` dependency to `0.7.3`.
- Remove `socket2` dependency.
- `ServerBuilder::backlog` now accepts `u32` instead of `i32`.
- Remove `AcceptNotify` type and pass `WakerQueue` to `Worker` to wake up `Accept`'s `Poll`.
- Convert `mio::net::TcpStream` to `actix_rt::net::TcpStream`(`UnixStream` for uds) using `FromRawFd` and `IntoRawFd`(`FromRawSocket` and `IntoRawSocket` on windows).
- Remove `AsyncRead` and `AsyncWrite` trait bound for `socket::FromStream` trait.
## [0.5.0] - 2019-05-12
## 1.0.4
### Added
- Update actix-codec to 0.3.0.
- Workers must be greater than 0.
* Add `Debug` impl for `SslError`
## 1.0.3
* Derive debug for `Server` and `ServerCommand`
- Replace deprecated `net2` crate with `socket2`.
### Changed
## 1.0.2
* Upgrade to actix-service 0.4
- Avoid error by calling `reregister()` on Windows.
## 1.0.1
## [0.4.3] - 2019-04-16
- Rename `.start()` method to `.run()`
### Added
## 1.0.0
* Re-export `IoStream` trait
- Use actix-net releases
### Changed
## 1.0.0-alpha.4
* Deppend on `ssl` and `rust-tls` features from actix-server-config
- Use actix-service 1.0.0-alpha.4
## 1.0.0-alpha.3
## [0.4.2] - 2019-03-30
- Migrate to tokio 0.2
- Fix compilation on non-unix platforms
- Better handling server configuration
### Fixed
## 1.0.0-alpha.2
* Fix SIGINT force shutdown
- Simplify server service (remove actix-server-config)
- Allow to wait on `Server` until server stops
## 0.8.0-alpha.1
## [0.4.1] - 2019-03-14
- Migrate to `std::future`
### Added
## 0.7.0
* `SystemRuntime::on_start()` - allow to run future before server service initialization
- Update `rustls` to 0.16
- Minimum required Rust version upped to 1.37.0
## 0.6.1
## [0.4.0] - 2019-03-12
- Add UDS listening support to `ServerBuilder`
### Changed
## 0.6.0
* Use `ServerConfig` for service factory
- Support Unix domain sockets #3
* Wrap tcp socket to `Io` type
## 0.5.1
* Upgrade actix-service
- ServerBuilder::shutdown_timeout() accepts u64
## 0.5.0
## [0.3.1] - 2019-03-04
- Add `Debug` impl for `SslError`
- Derive debug for `Server` and `ServerCommand`
- Upgrade to actix-service 0.4
### Added
## 0.4.3
* Add `ServerBuilder::maxconnrate` sets the maximum per-worker number of concurrent connections
- Re-export `IoStream` trait
- Depend on `ssl` and `rust-tls` features from actix-server-config
* Add helper ssl error `SslError`
## 0.4.2
- Fix SIGINT force shutdown
### Changed
## 0.4.1
* Rename `StreamServiceFactory` to `ServiceFactory`
- `SystemRuntime::on_start()` - allow to run future before server service initialization
* Deprecate `StreamServiceFactory`
## 0.4.0
- Use `ServerConfig` for service factory
- Wrap tcp socket to `Io` type
- Upgrade actix-service
## [0.3.0] - 2019-03-02
## 0.3.1
### Changed
- Add `ServerBuilder::maxconnrate` sets the maximum per-worker number of concurrent connections
- Add helper ssl error `SslError`
- Rename `StreamServiceFactory` to `ServiceFactory`
- Deprecate `StreamServiceFactory`
* Use new `NewService` trait
## 0.3.0
- Use new `NewService` trait
## [0.2.1] - 2019-02-09
## 0.2.1
### Changed
- Drop service response
* Drop service response
## 0.2.0
- Migrate to actix-service 0.2
- Updated rustls dependency
## [0.2.0] - 2019-02-01
## 0.1.3
### Changed
- Fix max concurrent connections handling
* Migrate to actix-service 0.2
## 0.1.2
* Updated rustls dependency
- rename ServiceConfig::rt() to ServiceConfig::apply()
- Fix back-pressure for concurrent ssl handshakes
## 0.1.1
## [0.1.3] - 2018-12-21
- Fix signal handling on windows
### Fixed
## 0.1.0
* Fix max concurrent connections handling
- Move server to separate crate
## [0.1.2] - 2018-12-12
### Changed
* rename ServiceConfig::rt() to ServiceConfig::apply()
### Fixed
* Fix back-pressure for concurrent ssl handshakes
## [0.1.1] - 2018-12-11
* Fix signal handling on windows
## [0.1.0] - 2018-12-09
* Move server to separate crate

View File

@ -1,50 +1,42 @@
[package]
name = "actix-server"
version = "2.5.1"
authors = [
"Nikolay Kim <fafhrd91@gmail.com>",
"Rob Ede <robjtede@icloud.com>",
"Ali MJ Al-Nasrawy <alimjalnasrawy@gmail.com>",
]
description = "General purpose TCP server built for the Actix ecosystem"
keywords = ["network", "tcp", "server", "framework", "async"]
categories = ["network-programming", "asynchronous"]
version = "1.0.1"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix server - General purpose tcp server"
keywords = ["network", "framework", "async", "futures"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net/tree/master/actix-server"
license = "MIT OR Apache-2.0"
edition.workspace = true
rust-version.workspace = true
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-server/"
categories = ["network-programming", "asynchronous"]
license = "MIT/Apache-2.0"
exclude = [".gitignore", ".travis.yml", ".cargo/config", "appveyor.yml"]
edition = "2018"
workspace = ".."
[package.metadata.cargo_check_external_types]
allowed_external_types = ["tokio::*"]
[lib]
name = "actix_server"
path = "src/lib.rs"
[features]
default = []
io-uring = ["tokio-uring", "actix-rt/io-uring"]
[dependencies]
actix-rt = { version = "2.10", default-features = false }
actix-service = "2"
actix-utils = "3"
futures-core = { version = "0.3.17", default-features = false, features = ["alloc"] }
futures-util = { version = "0.3.17", default-features = false, features = ["alloc"] }
mio = { version = "1", features = ["os-poll", "net"] }
socket2 = "0.5"
tokio = { version = "1.44.2", features = ["sync"] }
tracing = { version = "0.1.30", default-features = false, features = ["log"] }
actix-service = "1.0.1"
actix-rt = "1.0.0"
actix-codec = "0.2.0"
actix-utils = "1.0.4"
# runtime for `io-uring` feature
[target.'cfg(target_os = "linux")'.dependencies]
tokio-uring = { version = "0.5", optional = true }
log = "0.4"
num_cpus = "1.11"
mio = "0.6.19"
net2 = "0.2"
futures = "0.3.1"
slab = "0.4"
# unix domain sockets
mio-uds = { version = "0.6.7" }
[dev-dependencies]
actix-codec = "0.5"
actix-rt = "2.8"
bytes = "1"
futures-util = { version = "0.3.17", default-features = false, features = ["sink", "async-await-macro"] }
pretty_env_logger = "0.5"
tokio = { version = "1.44.2", features = ["io-util", "rt-multi-thread", "macros", "fs"] }
[lints]
workspace = true
bytes = "0.5"
env_logger = "0.7"
actix-testing = "1.0.0"

View File

@ -1,21 +0,0 @@
# actix-server
> General purpose TCP server built for the Actix ecosystem.
<!-- prettier-ignore-start -->
[![crates.io](https://img.shields.io/crates/v/actix-server?label=latest)](https://crates.io/crates/actix-server)
[![Documentation](https://docs.rs/actix-server/badge.svg?version=2.5.1)](https://docs.rs/actix-server/2.5.1)
[![Version](https://img.shields.io/badge/rustc-1.52+-ab6000.svg)](https://blog.rust-lang.org/2021/05/06/Rust-1.52.0.html)
![MIT or Apache 2.0 licensed](https://img.shields.io/crates/l/actix-server.svg)
<br />
[![Dependency Status](https://deps.rs/crate/actix-server/2.5.1/status.svg)](https://deps.rs/crate/actix-server/2.5.1)
![Download](https://img.shields.io/crates/d/actix-server.svg)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x)
<!-- prettier-ignore-end -->
## Resources
- [Library Documentation](https://docs.rs/actix-server)
- [Examples](/actix-server/examples)

View File

@ -1,98 +0,0 @@
//! Simple file-reader TCP server with framed stream.
//!
//! Using the following command:
//!
//! ```sh
//! nc 127.0.0.1 8080
//! ```
//!
//! Follow the prompt and enter a file path, relative or absolute.
#![allow(missing_docs)]
use std::io;
use actix_codec::{Framed, LinesCodec};
use actix_rt::net::TcpStream;
use actix_server::Server;
use actix_service::{fn_service, ServiceFactoryExt as _};
use futures_util::{SinkExt as _, StreamExt as _};
use tokio::{fs::File, io::AsyncReadExt as _};
async fn run() -> io::Result<()> {
pretty_env_logger::formatted_timed_builder()
.parse_env(pretty_env_logger::env_logger::Env::default().default_filter_or("info"));
let addr = ("127.0.0.1", 8080);
tracing::info!("starting server on port: {}", &addr.0);
// Bind socket address and start worker(s). By default, the server uses the number of physical
// CPU cores as the worker count. For this reason, the closure passed to bind needs to return
// a service *factory*; so it can be created once per worker.
Server::build()
.bind("file-reader", addr, move || {
fn_service(move |stream: TcpStream| async move {
// set up codec to use with I/O resource
let mut framed = Framed::new(stream, LinesCodec::default());
loop {
// prompt for file name
framed.send("Type file name to return:").await?;
// wait for next line
match framed.next().await {
Some(Ok(line)) => {
match File::open(&line).await {
Ok(mut file) => {
tracing::info!("reading file: {}", &line);
// read file into String buffer
let mut buf = String::new();
file.read_to_string(&mut buf).await?;
// send String into framed object
framed.send(buf).await?;
// break out of loop and
break;
}
Err(err) => {
tracing::error!("{}", err);
framed
.send("File not found or not readable. Try again.")
.await?;
continue;
}
};
}
// not being able to read a line from the stream is unrecoverable
Some(Err(err)) => return Err(err),
// This EOF won't be hit.
None => continue,
}
}
// close connection after file has been copied to TCP stream
Ok(())
})
.map_err(|err| tracing::error!("service error: {:?}", err))
})?
.workers(2)
.run()
.await
}
#[tokio::main]
async fn main() -> io::Result<()> {
run().await?;
Ok(())
}
// alternatively:
// #[actix_rt::main]
// async fn main() -> io::Result<()> {
// run().await?;
// Ok(())
// }

View File

@ -1,101 +0,0 @@
//! Simple composite-service TCP echo server.
//!
//! Using the following command:
//!
//! ```sh
//! nc 127.0.0.1 8080
//! ```
//!
//! Start typing. When you press enter the typed line will be echoed back. The server will log
//! the length of each line it echos and the total size of data sent when the connection is closed.
use std::{
io,
sync::{
atomic::{AtomicUsize, Ordering},
Arc,
},
};
use actix_rt::net::TcpStream;
use actix_server::Server;
use actix_service::{fn_service, ServiceFactoryExt as _};
use bytes::BytesMut;
use futures_util::future::ok;
use tokio::io::{AsyncReadExt as _, AsyncWriteExt as _};
async fn run() -> io::Result<()> {
pretty_env_logger::formatted_timed_builder()
.parse_env(pretty_env_logger::env_logger::Env::default().default_filter_or("info"));
let count = Arc::new(AtomicUsize::new(0));
let addr = ("127.0.0.1", 8080);
tracing::info!("starting server on port: {}", &addr.0);
// Bind socket address and start worker(s). By default, the server uses the number of physical
// CPU cores as the worker count. For this reason, the closure passed to bind needs to return
// a service *factory*; so it can be created once per worker.
Server::build()
.bind("echo", addr, move || {
let count = Arc::clone(&count);
let num2 = Arc::clone(&count);
fn_service(move |mut stream: TcpStream| {
let count = Arc::clone(&count);
async move {
let num = count.fetch_add(1, Ordering::SeqCst);
let num = num + 1;
let mut size = 0;
let mut buf = BytesMut::new();
loop {
match stream.read_buf(&mut buf).await {
// end of stream; bail from loop
Ok(0) => break,
// more bytes to process
Ok(bytes_read) => {
tracing::info!("[{}] read {} bytes", num, bytes_read);
stream.write_all(&buf[size..]).await.unwrap();
size += bytes_read;
}
// stream error; bail from loop with error
Err(err) => {
tracing::error!("stream error: {:?}", err);
return Err(());
}
}
}
// send data down service pipeline
Ok((buf.freeze(), size))
}
})
.map_err(|err| tracing::error!("service error: {:?}", err))
.and_then(move |(_, size)| {
let num = num2.load(Ordering::SeqCst);
tracing::info!("[{}] total bytes read: {}", num, size);
ok(size)
})
})?
.workers(2)
.run()
.await
}
#[tokio::main]
async fn main() -> io::Result<()> {
run().await?;
Ok(())
}
// alternatively:
// #[actix_rt::main]
// async fn main() -> io::Result<()> {
// run().await?;
// Ok(())
// }

View File

@ -1,462 +1,456 @@
use std::{io, thread, time::Duration};
use std::sync::mpsc as sync_mpsc;
use std::time::Duration;
use std::{io, thread};
use actix_rt::time::Instant;
use mio::{Interest, Poll, Token as MioToken};
use tracing::{debug, error, info};
use actix_rt::time::{delay_until, Instant};
use actix_rt::System;
use log::{error, info};
use slab::Slab;
use crate::{
availability::Availability,
socket::MioListener,
waker_queue::{WakerInterest, WakerQueue, WAKER_TOKEN},
worker::{Conn, ServerWorker, WorkerHandleAccept, WorkerHandleServer},
ServerBuilder, ServerHandle,
};
use crate::server::Server;
use crate::socket::{SocketAddr, SocketListener, StdListener};
use crate::worker::{Conn, WorkerClient};
use crate::Token;
const TIMEOUT_DURATION_ON_ERROR: Duration = Duration::from_millis(510);
pub(crate) enum Command {
Pause,
Resume,
Stop,
Worker(WorkerClient),
}
struct ServerSocketInfo {
token: usize,
lst: MioListener,
/// Timeout is used to mark the deadline when this socket's listener should be registered again
/// after an error.
timeout: Option<actix_rt::time::Instant>,
addr: SocketAddr,
token: Token,
sock: SocketListener,
timeout: Option<Instant>,
}
/// Poll instance of the server.
pub(crate) struct Accept {
poll: Poll,
waker_queue: WakerQueue,
handles: Vec<WorkerHandleAccept>,
srv: ServerHandle,
next: usize,
avail: Availability,
/// use the smallest duration from sockets timeout.
timeout: Option<Duration>,
paused: bool,
#[derive(Clone)]
pub(crate) struct AcceptNotify(mio::SetReadiness);
impl AcceptNotify {
pub(crate) fn new(ready: mio::SetReadiness) -> Self {
AcceptNotify(ready)
}
pub(crate) fn notify(&self) {
let _ = self.0.set_readiness(mio::Ready::readable());
}
}
impl Accept {
impl Default for AcceptNotify {
fn default() -> Self {
AcceptNotify::new(mio::Registration::new2().1)
}
}
pub(crate) struct AcceptLoop {
cmd_reg: Option<mio::Registration>,
cmd_ready: mio::SetReadiness,
notify_reg: Option<mio::Registration>,
notify_ready: mio::SetReadiness,
tx: sync_mpsc::Sender<Command>,
rx: Option<sync_mpsc::Receiver<Command>>,
srv: Option<Server>,
}
impl AcceptLoop {
pub fn new(srv: Server) -> AcceptLoop {
let (tx, rx) = sync_mpsc::channel();
let (cmd_reg, cmd_ready) = mio::Registration::new2();
let (notify_reg, notify_ready) = mio::Registration::new2();
AcceptLoop {
tx,
cmd_ready,
cmd_reg: Some(cmd_reg),
notify_ready,
notify_reg: Some(notify_reg),
rx: Some(rx),
srv: Some(srv),
}
}
pub fn send(&self, msg: Command) {
let _ = self.tx.send(msg);
let _ = self.cmd_ready.set_readiness(mio::Ready::readable());
}
pub fn get_notify(&self) -> AcceptNotify {
AcceptNotify::new(self.notify_ready.clone())
}
pub(crate) fn start(
sockets: Vec<(usize, MioListener)>,
builder: &ServerBuilder,
) -> io::Result<(WakerQueue, Vec<WorkerHandleServer>, thread::JoinHandle<()>)> {
let handle_server = ServerHandle::new(builder.cmd_tx.clone());
&mut self,
socks: Vec<(Token, StdListener)>,
workers: Vec<WorkerClient>,
) {
let srv = self.srv.take().expect("Can not re-use AcceptInfo");
// construct poll instance and its waker
let poll = Poll::new()?;
let waker_queue = WakerQueue::new(poll.registry())?;
// start workers and collect handles
let (handles_accept, handles_server) = (0..builder.threads)
.map(|idx| {
// clone service factories
let factories = builder
.factories
.iter()
.map(|f| f.clone_factory())
.collect::<Vec<_>>();
// start worker using service factories
ServerWorker::start(idx, factories, waker_queue.clone(), builder.worker_config)
})
.collect::<io::Result<Vec<_>>>()?
.into_iter()
.unzip();
let (mut accept, mut sockets) = Accept::new_with_sockets(
poll,
waker_queue.clone(),
sockets,
handles_accept,
handle_server,
)?;
let accept_handle = thread::Builder::new()
.name("actix-server acceptor".to_owned())
.spawn(move || accept.poll_with(&mut sockets))
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))?;
Ok((waker_queue, handles_server, accept_handle))
}
fn new_with_sockets(
poll: Poll,
waker_queue: WakerQueue,
sockets: Vec<(usize, MioListener)>,
accept_handles: Vec<WorkerHandleAccept>,
server_handle: ServerHandle,
) -> io::Result<(Accept, Box<[ServerSocketInfo]>)> {
let sockets = sockets
.into_iter()
.map(|(token, mut lst)| {
// Start listening for incoming connections
poll.registry()
.register(&mut lst, MioToken(token), Interest::READABLE)?;
Ok(ServerSocketInfo {
token,
lst,
timeout: None,
})
})
.collect::<io::Result<_>>()?;
let mut avail = Availability::default();
// Assume all handles are avail at construct time.
avail.set_available_all(&accept_handles);
let accept = Accept {
poll,
waker_queue,
handles: accept_handles,
srv: server_handle,
next: 0,
avail,
timeout: None,
paused: false,
};
Ok((accept, sockets))
}
/// blocking wait for readiness events triggered by mio
fn poll_with(&mut self, sockets: &mut [ServerSocketInfo]) {
let mut events = mio::Events::with_capacity(256);
loop {
if let Err(err) = self.poll.poll(&mut events, self.timeout) {
match err.kind() {
io::ErrorKind::Interrupted => {}
_ => panic!("Poll error: {}", err),
}
}
for event in events.iter() {
let token = event.token();
match token {
WAKER_TOKEN => {
let exit = self.handle_waker(sockets);
if exit {
info!("accept thread stopped");
return;
}
}
_ => {
let token = usize::from(token);
self.accept(sockets, token);
}
}
}
// check for timeout and re-register sockets
self.process_timeout(sockets);
}
}
fn handle_waker(&mut self, sockets: &mut [ServerSocketInfo]) -> bool {
// This is a loop because interests for command from previous version was
// a loop that would try to drain the command channel. It's yet unknown
// if it's necessary/good practice to actively drain the waker queue.
loop {
// Take guard with every iteration so no new interests can be added until the current
// task is done. Take care not to take the guard again inside this loop.
let mut guard = self.waker_queue.guard();
#[allow(clippy::significant_drop_in_scrutinee)]
match guard.pop_front() {
// Worker notified it became available.
Some(WakerInterest::WorkerAvailable(idx)) => {
drop(guard);
self.avail.set_available(idx, true);
if !self.paused {
self.accept_all(sockets);
}
}
// A new worker thread has been created so store its handle.
Some(WakerInterest::Worker(handle)) => {
drop(guard);
self.avail.set_available(handle.idx(), true);
self.handles.push(handle);
if !self.paused {
self.accept_all(sockets);
}
}
Some(WakerInterest::Pause) => {
drop(guard);
if !self.paused {
self.paused = true;
self.deregister_all(sockets);
}
}
Some(WakerInterest::Resume) => {
drop(guard);
if self.paused {
self.paused = false;
sockets.iter_mut().for_each(|info| {
self.register_logged(info);
});
self.accept_all(sockets);
}
}
Some(WakerInterest::Stop) => {
if !self.paused {
self.deregister_all(sockets);
}
return true;
}
// waker queue is drained
None => {
// Reset the WakerQueue before break so it does not grow infinitely
WakerQueue::reset(&mut guard);
return false;
}
}
}
}
fn process_timeout(&mut self, sockets: &mut [ServerSocketInfo]) {
// always remove old timeouts
if self.timeout.take().is_some() {
let now = Instant::now();
sockets
.iter_mut()
// Only sockets that had an associated timeout were deregistered.
.filter(|info| info.timeout.is_some())
.for_each(|info| {
let inst = info.timeout.take().unwrap();
if now < inst {
// still timed out; try to set new timeout
info.timeout = Some(inst);
self.set_timeout(inst - now);
} else if !self.paused {
// timeout expired; register socket again
self.register_logged(info);
}
// Drop the timeout if server is paused and socket timeout is expired.
// When server recovers from pause it will register all sockets without
// a timeout value so this socket register will be delayed till then.
});
}
}
/// Update accept timeout with `duration` if it is shorter than current timeout.
fn set_timeout(&mut self, duration: Duration) {
match self.timeout {
Some(ref mut timeout) => {
if *timeout > duration {
*timeout = duration;
}
}
None => self.timeout = Some(duration),
}
}
#[cfg(not(target_os = "windows"))]
fn register(&self, info: &mut ServerSocketInfo) -> io::Result<()> {
let token = MioToken(info.token);
self.poll
.registry()
.register(&mut info.lst, token, Interest::READABLE)
}
#[cfg(target_os = "windows")]
fn register(&self, info: &mut ServerSocketInfo) -> io::Result<()> {
// On windows, calling register without deregister cause an error.
// See https://github.com/actix/actix-web/issues/905
// Calling reregister seems to fix the issue.
let token = MioToken(info.token);
self.poll
.registry()
.register(&mut info.lst, token, Interest::READABLE)
.or_else(|_| {
self.poll
.registry()
.reregister(&mut info.lst, token, Interest::READABLE)
})
}
fn register_logged(&self, info: &mut ServerSocketInfo) {
match self.register(info) {
Ok(_) => debug!("resume accepting connections on {}", info.lst.local_addr()),
Err(err) => error!("can not register server socket {}", err),
}
}
fn deregister_logged(&self, info: &mut ServerSocketInfo) {
match self.poll.registry().deregister(&mut info.lst) {
Ok(_) => debug!("paused accepting connections on {}", info.lst.local_addr()),
Err(err) => {
error!("can not deregister server socket {}", err)
}
}
}
fn deregister_all(&self, sockets: &mut [ServerSocketInfo]) {
// This is a best effort implementation with following limitation:
//
// Every ServerSocketInfo with associated timeout will be skipped and it's timeout is
// removed in the process.
//
// Therefore WakerInterest::Pause followed by WakerInterest::Resume in a very short gap
// (less than 500ms) would cause all timing out ServerSocketInfos be re-registered before
// expected timing.
sockets
.iter_mut()
// Take all timeout.
// This is to prevent Accept::process_timer method re-register a socket afterwards.
.map(|info| (info.timeout.take(), info))
// Socket info with a timeout is already deregistered so skip them.
.filter(|(timeout, _)| timeout.is_none())
.for_each(|(_, info)| self.deregister_logged(info));
}
// Send connection to worker and handle error.
fn send_connection(&mut self, conn: Conn) -> Result<(), Conn> {
let next = self.next();
match next.send(conn) {
Ok(_) => {
// Increment counter of WorkerHandle.
// Set worker to unavailable with it hit max (Return false).
if !next.inc_counter() {
let idx = next.idx();
self.avail.set_available(idx, false);
}
self.set_next();
Ok(())
}
Err(conn) => {
// Worker thread is error and could be gone.
// Remove worker handle and notify `ServerBuilder`.
self.remove_next();
if self.handles.is_empty() {
error!("no workers");
// All workers are gone and Conn is nowhere to be sent.
// Treat this situation as Ok and drop Conn.
return Ok(());
} else if self.handles.len() <= self.next {
self.next = 0;
}
Err(conn)
}
}
}
fn accept_one(&mut self, mut conn: Conn) {
loop {
let next = self.next();
let idx = next.idx();
if self.avail.get_available(idx) {
match self.send_connection(conn) {
Ok(_) => return,
Err(c) => conn = c,
}
} else {
self.avail.set_available(idx, false);
self.set_next();
if !self.avail.available() {
while let Err(c) = self.send_connection(conn) {
conn = c;
}
return;
}
}
}
}
fn accept(&mut self, sockets: &mut [ServerSocketInfo], token: usize) {
while self.avail.available() {
let info = &mut sockets[token];
match info.lst.accept() {
Ok(io) => {
let conn = Conn { io, token };
self.accept_one(conn);
}
Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => return,
Err(ref err) if connection_error(err) => continue,
Err(err) => {
error!("error accepting connection: {}", err);
// deregister listener temporary
self.deregister_logged(info);
// sleep after error. write the timeout to socket info as later
// the poll would need it mark which socket and when it's
// listener should be registered
info.timeout = Some(Instant::now() + Duration::from_millis(500));
self.set_timeout(TIMEOUT_DURATION_ON_ERROR);
return;
}
};
}
}
fn accept_all(&mut self, sockets: &mut [ServerSocketInfo]) {
sockets
.iter_mut()
.map(|info| info.token)
.collect::<Vec<_>>()
.into_iter()
.for_each(|idx| self.accept(sockets, idx))
}
#[inline(always)]
fn next(&self) -> &WorkerHandleAccept {
&self.handles[self.next]
}
/// Set next worker handle that would accept connection.
#[inline(always)]
fn set_next(&mut self) {
self.next = (self.next + 1) % self.handles.len();
}
/// Remove next worker handle that fail to accept connection.
fn remove_next(&mut self) {
let handle = self.handles.swap_remove(self.next);
let idx = handle.idx();
// A message is sent to `ServerBuilder` future to notify it a new worker
// should be made.
self.srv.worker_faulted(idx);
self.avail.set_available(idx, false);
Accept::start(
self.rx.take().expect("Can not re-use AcceptInfo"),
self.cmd_reg.take().expect("Can not re-use AcceptInfo"),
self.notify_reg.take().expect("Can not re-use AcceptInfo"),
socks,
srv,
workers,
);
}
}
/// This function defines errors that are per-connection; if we get this error from the `accept()`
/// system call it means the next connection might be ready to be accepted.
struct Accept {
poll: mio::Poll,
rx: sync_mpsc::Receiver<Command>,
sockets: Slab<ServerSocketInfo>,
workers: Vec<WorkerClient>,
srv: Server,
timer: (mio::Registration, mio::SetReadiness),
next: usize,
backpressure: bool,
}
const DELTA: usize = 100;
const CMD: mio::Token = mio::Token(0);
const TIMER: mio::Token = mio::Token(1);
const NOTIFY: mio::Token = mio::Token(2);
/// This function defines errors that are per-connection. Which basically
/// means that if we get this error from `accept()` system call it means
/// next connection might be ready to be accepted.
///
/// All other errors will incur a timeout before next `accept()` call is attempted. The timeout is
/// useful to handle resource exhaustion errors like `ENFILE` and `EMFILE`. Otherwise, it could
/// enter into a temporary spin loop.
/// All other errors will incur a timeout before next `accept()` is performed.
/// The timeout is useful to handle resource exhaustion errors like ENFILE
/// and EMFILE. Otherwise, could enter into tight loop.
fn connection_error(e: &io::Error) -> bool {
e.kind() == io::ErrorKind::ConnectionRefused
|| e.kind() == io::ErrorKind::ConnectionAborted
|| e.kind() == io::ErrorKind::ConnectionReset
}
impl Accept {
#![allow(clippy::too_many_arguments)]
pub(crate) fn start(
rx: sync_mpsc::Receiver<Command>,
cmd_reg: mio::Registration,
notify_reg: mio::Registration,
socks: Vec<(Token, StdListener)>,
srv: Server,
workers: Vec<WorkerClient>,
) {
let sys = System::current();
// start accept thread
let _ = thread::Builder::new()
.name("actix-server accept loop".to_owned())
.spawn(move || {
System::set_current(sys);
let mut accept = Accept::new(rx, socks, workers, srv);
// Start listening for incoming commands
if let Err(err) = accept.poll.register(
&cmd_reg,
CMD,
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
panic!("Can not register Registration: {}", err);
}
// Start listening for notify updates
if let Err(err) = accept.poll.register(
&notify_reg,
NOTIFY,
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
panic!("Can not register Registration: {}", err);
}
accept.poll();
});
}
fn new(
rx: sync_mpsc::Receiver<Command>,
socks: Vec<(Token, StdListener)>,
workers: Vec<WorkerClient>,
srv: Server,
) -> Accept {
// Create a poll instance
let poll = match mio::Poll::new() {
Ok(poll) => poll,
Err(err) => panic!("Can not create mio::Poll: {}", err),
};
// Start accept
let mut sockets = Slab::new();
for (hnd_token, lst) in socks.into_iter() {
let addr = lst.local_addr();
let server = lst.into_listener();
let entry = sockets.vacant_entry();
let token = entry.key();
// Start listening for incoming connections
if let Err(err) = poll.register(
&server,
mio::Token(token + DELTA),
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
panic!("Can not register io: {}", err);
}
entry.insert(ServerSocketInfo {
addr,
token: hnd_token,
sock: server,
timeout: None,
});
}
// Timer
let (tm, tmr) = mio::Registration::new2();
if let Err(err) =
poll.register(&tm, TIMER, mio::Ready::readable(), mio::PollOpt::edge())
{
panic!("Can not register Registration: {}", err);
}
Accept {
poll,
rx,
sockets,
workers,
srv,
next: 0,
timer: (tm, tmr),
backpressure: false,
}
}
fn poll(&mut self) {
// Create storage for events
let mut events = mio::Events::with_capacity(128);
loop {
if let Err(err) = self.poll.poll(&mut events, None) {
panic!("Poll error: {}", err);
}
for event in events.iter() {
let token = event.token();
match token {
CMD => {
if !self.process_cmd() {
return;
}
}
TIMER => self.process_timer(),
NOTIFY => self.backpressure(false),
_ => {
let token = usize::from(token);
if token < DELTA {
continue;
}
self.accept(token - DELTA);
}
}
}
}
}
fn process_timer(&mut self) {
let now = Instant::now();
for (token, info) in self.sockets.iter_mut() {
if let Some(inst) = info.timeout.take() {
if now > inst {
if let Err(err) = self.poll.register(
&info.sock,
mio::Token(token + DELTA),
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
error!("Can not register server socket {}", err);
} else {
info!("Resume accepting connections on {}", info.addr);
}
} else {
info.timeout = Some(inst);
}
}
}
}
fn process_cmd(&mut self) -> bool {
loop {
match self.rx.try_recv() {
Ok(cmd) => match cmd {
Command::Pause => {
for (_, info) in self.sockets.iter_mut() {
if let Err(err) = self.poll.deregister(&info.sock) {
error!("Can not deregister server socket {}", err);
} else {
info!("Paused accepting connections on {}", info.addr);
}
}
}
Command::Resume => {
for (token, info) in self.sockets.iter() {
if let Err(err) = self.poll.register(
&info.sock,
mio::Token(token + DELTA),
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
error!("Can not resume socket accept process: {}", err);
} else {
info!(
"Accepting connections on {} has been resumed",
info.addr
);
}
}
}
Command::Stop => {
for (_, info) in self.sockets.iter() {
let _ = self.poll.deregister(&info.sock);
}
return false;
}
Command::Worker(worker) => {
self.backpressure(false);
self.workers.push(worker);
}
},
Err(err) => match err {
sync_mpsc::TryRecvError::Empty => break,
sync_mpsc::TryRecvError::Disconnected => {
for (_, info) in self.sockets.iter() {
let _ = self.poll.deregister(&info.sock);
}
return false;
}
},
}
}
true
}
fn backpressure(&mut self, on: bool) {
if self.backpressure {
if !on {
self.backpressure = false;
for (token, info) in self.sockets.iter() {
if let Err(err) = self.poll.register(
&info.sock,
mio::Token(token + DELTA),
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
error!("Can not resume socket accept process: {}", err);
} else {
info!("Accepting connections on {} has been resumed", info.addr);
}
}
}
} else if on {
self.backpressure = true;
for (_, info) in self.sockets.iter() {
let _ = self.poll.deregister(&info.sock);
}
}
}
fn accept_one(&mut self, mut msg: Conn) {
if self.backpressure {
while !self.workers.is_empty() {
match self.workers[self.next].send(msg) {
Ok(_) => (),
Err(tmp) => {
self.srv.worker_faulted(self.workers[self.next].idx);
msg = tmp;
self.workers.swap_remove(self.next);
if self.workers.is_empty() {
error!("No workers");
return;
} else if self.workers.len() <= self.next {
self.next = 0;
}
continue;
}
}
self.next = (self.next + 1) % self.workers.len();
break;
}
} else {
let mut idx = 0;
while idx < self.workers.len() {
idx += 1;
if self.workers[self.next].available() {
match self.workers[self.next].send(msg) {
Ok(_) => {
self.next = (self.next + 1) % self.workers.len();
return;
}
Err(tmp) => {
self.srv.worker_faulted(self.workers[self.next].idx);
msg = tmp;
self.workers.swap_remove(self.next);
if self.workers.is_empty() {
error!("No workers");
self.backpressure(true);
return;
} else if self.workers.len() <= self.next {
self.next = 0;
}
continue;
}
}
}
self.next = (self.next + 1) % self.workers.len();
}
// enable backpressure
self.backpressure(true);
self.accept_one(msg);
}
}
fn accept(&mut self, token: usize) {
loop {
let msg = if let Some(info) = self.sockets.get_mut(token) {
match info.sock.accept() {
Ok(Some((io, addr))) => Conn {
io,
token: info.token,
peer: Some(addr),
},
Ok(None) => return,
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => return,
Err(ref e) if connection_error(e) => continue,
Err(e) => {
error!("Error accepting connection: {}", e);
if let Err(err) = self.poll.deregister(&info.sock) {
error!("Can not deregister server socket {}", err);
}
// sleep after error
info.timeout = Some(Instant::now() + Duration::from_millis(500));
let r = self.timer.1.clone();
System::current().arbiter().send(Box::pin(async move {
delay_until(Instant::now() + Duration::from_millis(510)).await;
let _ = r.set_readiness(mio::Ready::readable());
}));
return;
}
}
} else {
return;
};
self.accept_one(msg);
}
}
}

View File

@ -1,121 +0,0 @@
use crate::worker::WorkerHandleAccept;
/// Array of u128 with every bit as marker for a worker handle's availability.
#[derive(Debug, Default)]
pub(crate) struct Availability([u128; 4]);
impl Availability {
/// Check if any worker handle is available
#[inline(always)]
pub(crate) fn available(&self) -> bool {
self.0.iter().any(|a| *a != 0)
}
/// Check if worker handle is available by index
#[inline(always)]
pub(crate) fn get_available(&self, idx: usize) -> bool {
let (offset, idx) = Self::offset(idx);
self.0[offset] & (1 << idx as u128) != 0
}
/// Set worker handle available state by index.
pub(crate) fn set_available(&mut self, idx: usize, avail: bool) {
let (offset, idx) = Self::offset(idx);
let off = 1 << idx as u128;
if avail {
self.0[offset] |= off;
} else {
self.0[offset] &= !off
}
}
/// Set all worker handle to available state.
/// This would result in a re-check on all workers' availability.
pub(crate) fn set_available_all(&mut self, handles: &[WorkerHandleAccept]) {
handles.iter().for_each(|handle| {
self.set_available(handle.idx(), true);
})
}
/// Get offset and adjusted index of given worker handle index.
pub(crate) fn offset(idx: usize) -> (usize, usize) {
if idx < 128 {
(0, idx)
} else if idx < 128 * 2 {
(1, idx - 128)
} else if idx < 128 * 3 {
(2, idx - 128 * 2)
} else if idx < 128 * 4 {
(3, idx - 128 * 3)
} else {
panic!("Max WorkerHandle count is 512")
}
}
}
#[cfg(test)]
mod tests {
use super::*;
fn single(aval: &mut Availability, idx: usize) {
aval.set_available(idx, true);
assert!(aval.available());
aval.set_available(idx, true);
aval.set_available(idx, false);
assert!(!aval.available());
aval.set_available(idx, false);
assert!(!aval.available());
}
fn multi(aval: &mut Availability, mut idx: Vec<usize>) {
idx.iter().for_each(|idx| aval.set_available(*idx, true));
assert!(aval.available());
while let Some(idx) = idx.pop() {
assert!(aval.available());
aval.set_available(idx, false);
}
assert!(!aval.available());
}
#[test]
fn availability() {
let mut aval = Availability::default();
single(&mut aval, 1);
single(&mut aval, 128);
single(&mut aval, 256);
single(&mut aval, 511);
let idx = (0..511).filter(|i| i % 3 == 0 && i % 5 == 0).collect();
multi(&mut aval, idx);
multi(&mut aval, (0..511).collect())
}
#[test]
#[should_panic]
fn overflow() {
let mut aval = Availability::default();
single(&mut aval, 512);
}
#[test]
fn pin_point() {
let mut aval = Availability::default();
aval.set_available(438, true);
aval.set_available(479, true);
assert_eq!(aval.0[3], 1 << (438 - 384) | 1 << (479 - 384));
}
}

View File

@ -1,47 +1,44 @@
use std::{io, num::NonZeroUsize, time::Duration};
use std::pin::Pin;
use std::task::{Context, Poll};
use std::time::Duration;
use std::{io, mem, net};
use actix_rt::net::TcpStream;
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender};
use actix_rt::time::{delay_until, Instant};
use actix_rt::{spawn, System};
use futures::channel::mpsc::{unbounded, UnboundedReceiver};
use futures::channel::oneshot;
use futures::future::ready;
use futures::stream::FuturesUnordered;
use futures::{ready, Future, FutureExt, Stream, StreamExt};
use log::{error, info};
use net2::TcpBuilder;
use num_cpus;
use crate::{
server::ServerCommand,
service::{InternalServiceFactory, ServerServiceFactory, StreamNewService},
socket::{create_mio_tcp_listener, MioListener, MioTcpListener, StdTcpListener, ToSocketAddrs},
worker::ServerWorkerConfig,
Server,
};
use crate::accept::{AcceptLoop, AcceptNotify, Command};
use crate::config::{ConfiguredService, ServiceConfig};
use crate::server::{Server, ServerCommand};
use crate::service::{InternalServiceFactory, ServiceFactory, StreamNewService};
use crate::signals::{Signal, Signals};
use crate::socket::StdListener;
use crate::worker::{self, Worker, WorkerAvailability, WorkerClient};
use crate::Token;
/// Multipath TCP (MPTCP) preference.
///
/// Currently only useful on Linux.
///
#[cfg_attr(target_os = "linux", doc = "Also see [`ServerBuilder::mptcp()`].")]
#[derive(Debug, Clone)]
pub enum MpTcp {
/// MPTCP will not be used when binding sockets.
Disabled,
/// MPTCP will be attempted when binding sockets. If errors occur, regular TCP will be
/// attempted, too.
TcpFallback,
/// MPTCP will be used when binding sockets (with no fallback).
NoFallback,
}
/// [Server] builder.
/// Server builder
pub struct ServerBuilder {
pub(crate) threads: usize,
pub(crate) token: usize,
pub(crate) backlog: u32,
pub(crate) factories: Vec<Box<dyn InternalServiceFactory>>,
pub(crate) sockets: Vec<(usize, String, MioListener)>,
pub(crate) mptcp: MpTcp,
pub(crate) exit: bool,
pub(crate) listen_os_signals: bool,
pub(crate) cmd_tx: UnboundedSender<ServerCommand>,
pub(crate) cmd_rx: UnboundedReceiver<ServerCommand>,
pub(crate) worker_config: ServerWorkerConfig,
threads: usize,
token: Token,
backlog: i32,
workers: Vec<(usize, WorkerClient)>,
services: Vec<Box<dyn InternalServiceFactory>>,
sockets: Vec<(Token, String, StdListener)>,
accept: AcceptLoop,
exit: bool,
shutdown_timeout: Duration,
no_signals: bool,
cmd: UnboundedReceiver<ServerCommand>,
server: Server,
notify: Vec<oneshot::Sender<()>>,
}
impl Default for ServerBuilder {
@ -53,326 +50,447 @@ impl Default for ServerBuilder {
impl ServerBuilder {
/// Create new Server builder instance
pub fn new() -> ServerBuilder {
let (cmd_tx, cmd_rx) = unbounded_channel();
let (tx, rx) = unbounded();
let server = Server::new(tx);
ServerBuilder {
threads: std::thread::available_parallelism().map_or(2, NonZeroUsize::get),
token: 0,
factories: Vec::new(),
threads: num_cpus::get(),
token: Token(0),
workers: Vec::new(),
services: Vec::new(),
sockets: Vec::new(),
accept: AcceptLoop::new(server.clone()),
backlog: 2048,
mptcp: MpTcp::Disabled,
exit: false,
listen_os_signals: true,
cmd_tx,
cmd_rx,
worker_config: ServerWorkerConfig::default(),
shutdown_timeout: Duration::from_secs(30),
no_signals: false,
cmd: rx,
notify: Vec::new(),
server,
}
}
/// Sets number of workers to start.
/// Set number of workers to start.
///
/// See [`bind()`](Self::bind()) for more details on how worker count affects the number of
/// server factory instantiations.
///
/// The default worker count is the determined by [`std::thread::available_parallelism()`]. See
/// its documentation to determine what behavior you should expect when server is run.
///
/// `num` must be greater than 0.
///
/// # Panics
///
/// Panics if `num` is 0.
/// By default server uses number of available logical cpu as workers
/// count.
pub fn workers(mut self, num: usize) -> Self {
assert_ne!(num, 0, "workers must be greater than 0");
self.threads = num;
self
}
/// Set max number of threads for each worker's blocking task thread pool.
///
/// One thread pool is set up **per worker**; not shared across workers.
///
/// # Examples:
/// ```
/// # use actix_server::ServerBuilder;
/// let builder = ServerBuilder::new()
/// .workers(4) // server has 4 worker thread.
/// .worker_max_blocking_threads(4); // every worker has 4 max blocking threads.
/// ```
///
/// See [tokio::runtime::Builder::max_blocking_threads] for behavior reference.
pub fn worker_max_blocking_threads(mut self, num: usize) -> Self {
self.worker_config.max_blocking_threads(num);
self
}
/// Set the maximum number of pending connections.
///
/// This refers to the number of clients that can be waiting to be served. Exceeding this number
/// results in the client getting an error when attempting to connect. It should only affect
/// servers under significant load.
/// This refers to the number of clients that can be waiting to be served.
/// Exceeding this number results in the client getting an error when
/// attempting to connect. It should only affect servers under significant
/// load.
///
/// Generally set in the 64-2048 range. Default value is 2048.
///
/// This method should be called before `bind()` method call.
pub fn backlog(mut self, num: u32) -> Self {
pub fn backlog(mut self, num: i32) -> Self {
self.backlog = num;
self
}
/// Sets MultiPath TCP (MPTCP) preference on bound sockets.
///
/// Multipath TCP (MPTCP) builds on top of TCP to improve connection redundancy and performance
/// by sharing a network data stream across multiple underlying TCP sessions. See [mptcp.dev]
/// for more info about MPTCP itself.
///
/// MPTCP is available on Linux kernel version 5.6 and higher. In addition, you'll also need to
/// ensure the kernel option is enabled using `sysctl net.mptcp.enabled=1`.
///
/// This method will have no effect if called after a `bind()`.
///
/// [mptcp.dev]: https://www.mptcp.dev
#[cfg(target_os = "linux")]
pub fn mptcp(mut self, mptcp_enabled: MpTcp) -> Self {
self.mptcp = mptcp_enabled;
self
}
/// Sets the maximum per-worker number of concurrent connections.
///
/// All socket listeners will stop accepting connections when this limit is reached for
/// each worker.
/// All socket listeners will stop accepting connections when this limit is
/// reached for each worker.
///
/// By default max connections is set to a 25k per worker.
pub fn max_concurrent_connections(mut self, num: usize) -> Self {
self.worker_config.max_concurrent_connections(num);
pub fn maxconn(self, num: usize) -> Self {
worker::max_concurrent_connections(num);
self
}
#[doc(hidden)]
#[deprecated(since = "2.0.0", note = "Renamed to `max_concurrent_connections`.")]
pub fn maxconn(self, num: usize) -> Self {
self.max_concurrent_connections(num)
}
/// Sets flag to stop Actix `System` after server shutdown.
///
/// This has no effect when server is running in a Tokio-only runtime.
/// Stop actix system.
pub fn system_exit(mut self) -> Self {
self.exit = true;
self
}
/// Disables OS signal handling.
/// Disable signal handling
pub fn disable_signals(mut self) -> Self {
self.listen_os_signals = false;
self.no_signals = true;
self
}
/// Timeout for graceful workers shutdown in seconds.
///
/// After receiving a stop signal, workers have this much time to finish serving requests.
/// Workers still alive after the timeout are force dropped.
/// After receiving a stop signal, workers have this much time to finish
/// serving requests. Workers still alive after the timeout are force
/// dropped.
///
/// By default shutdown timeout sets to 30 seconds.
pub fn shutdown_timeout(mut self, sec: u64) -> Self {
self.worker_config
.shutdown_timeout(Duration::from_secs(sec));
self.shutdown_timeout = Duration::from_secs(sec);
self
}
/// Adds new service to the server.
/// Execute external configuration as part of the server building
/// process.
///
/// Note that, if a DNS lookup is required, resolving hostnames is a blocking operation.
///
/// # Worker Count
///
/// The `factory` will be instantiated multiple times in most scenarios. The number of
/// instantiations is number of [`workers`](Self::workers()) × number of sockets resolved by
/// `addrs`.
///
/// For example, if you've manually set [`workers`](Self::workers()) to 2, and use `127.0.0.1`
/// as the bind `addrs`, then `factory` will be instantiated twice. However, using `localhost`
/// as the bind `addrs` can often resolve to both `127.0.0.1` (IPv4) _and_ `::1` (IPv6), causing
/// the `factory` to be instantiated 4 times (2 workers × 2 bind addresses).
///
/// Using a bind address of `0.0.0.0`, which signals to use all interfaces, may also multiple
/// the number of instantiations in a similar way.
///
/// # Errors
///
/// Returns an `io::Error` if:
/// - `addrs` cannot be resolved into one or more socket addresses;
/// - all the resolved socket addresses are already bound.
pub fn bind<F, U, N>(mut self, name: N, addrs: U, factory: F) -> io::Result<Self>
/// This function is useful for moving parts of configuration to a
/// different module or even library.
pub fn configure<F>(mut self, f: F) -> io::Result<ServerBuilder>
where
F: ServerServiceFactory<TcpStream>,
U: ToSocketAddrs,
N: AsRef<str>,
F: Fn(&mut ServiceConfig) -> io::Result<()>,
{
let sockets = bind_addr(addrs, self.backlog, &self.mptcp)?;
let mut cfg = ServiceConfig::new(self.threads, self.backlog);
tracing::trace!("binding server to: {sockets:?}");
f(&mut cfg)?;
if let Some(apply) = cfg.apply {
let mut srv = ConfiguredService::new(apply);
for (name, lst) in cfg.services {
let token = self.token.next();
srv.stream(token, name.clone(), lst.local_addr()?);
self.sockets.push((token, name, StdListener::Tcp(lst)));
}
self.services.push(Box::new(srv));
}
self.threads = cfg.threads;
Ok(self)
}
/// Add new service to the server.
pub fn bind<F, U, N: AsRef<str>>(mut self, name: N, addr: U, factory: F) -> io::Result<Self>
where
F: ServiceFactory<TcpStream>,
U: net::ToSocketAddrs,
{
let sockets = bind_addr(addr, self.backlog)?;
for lst in sockets {
let token = self.next_token();
self.factories.push(StreamNewService::create(
let token = self.token.next();
self.services.push(StreamNewService::create(
name.as_ref().to_string(),
token,
factory.clone(),
lst.local_addr()?,
));
self.sockets
.push((token, name.as_ref().to_string(), MioListener::Tcp(lst)));
.push((token, name.as_ref().to_string(), StdListener::Tcp(lst)));
}
Ok(self)
}
/// Adds service to the server using a socket listener already bound.
///
/// # Worker Count
///
/// The `factory` will be instantiated multiple times in most scenarios. The number of
/// instantiations is: number of [`workers`](Self::workers()).
pub fn listen<F, N: AsRef<str>>(
mut self,
name: N,
lst: StdTcpListener,
factory: F,
) -> io::Result<Self>
where
F: ServerServiceFactory<TcpStream>,
{
lst.set_nonblocking(true)?;
let addr = lst.local_addr()?;
let token = self.next_token();
self.factories.push(StreamNewService::create(
name.as_ref().to_string(),
token,
factory,
addr,
));
self.sockets
.push((token, name.as_ref().to_string(), MioListener::from(lst)));
Ok(self)
}
/// Starts processing incoming connections and return server controller.
pub fn run(self) -> Server {
if self.sockets.is_empty() {
panic!("Server should have at least one bound socket");
} else {
tracing::info!("starting {} workers", self.threads);
Server::new(self)
}
}
fn next_token(&mut self) -> usize {
let token = self.token;
self.token += 1;
token
}
}
#[cfg(unix)]
impl ServerBuilder {
/// Adds new service to the server using a UDS (unix domain socket) address.
///
/// # Worker Count
///
/// The `factory` will be instantiated multiple times in most scenarios. The number of
/// instantiations is: number of [`workers`](Self::workers()).
#[cfg(all(unix))]
/// Add new unix domain service to the server.
pub fn bind_uds<F, U, N>(self, name: N, addr: U, factory: F) -> io::Result<Self>
where
F: ServerServiceFactory<actix_rt::net::UnixStream>,
F: ServiceFactory<actix_rt::net::UnixStream>,
N: AsRef<str>,
U: AsRef<std::path::Path>,
{
use std::os::unix::net::UnixListener;
// The path must not exist when we try to bind.
// Try to remove it to avoid bind error.
if let Err(err) = std::fs::remove_file(addr.as_ref()) {
if let Err(e) = std::fs::remove_file(addr.as_ref()) {
// NotFound is expected and not an issue. Anything else is.
if err.kind() != std::io::ErrorKind::NotFound {
return Err(err);
if e.kind() != std::io::ErrorKind::NotFound {
return Err(e);
}
}
let lst = crate::socket::StdUnixListener::bind(addr)?;
let lst = UnixListener::bind(addr)?;
self.listen_uds(name, lst, factory)
}
/// Adds new service to the server using a UDS (unix domain socket) listener already bound.
///
/// Useful when running as a systemd service and a socket FD is acquired externally.
///
/// # Worker Count
///
/// The `factory` will be instantiated multiple times in most scenarios. The number of
/// instantiations is: number of [`workers`](Self::workers()).
#[cfg(all(unix))]
/// Add new unix domain service to the server.
/// Useful when running as a systemd service and
/// a socket FD can be acquired using the systemd crate.
pub fn listen_uds<F, N: AsRef<str>>(
mut self,
name: N,
lst: crate::socket::StdUnixListener,
lst: std::os::unix::net::UnixListener,
factory: F,
) -> io::Result<Self>
where
F: ServerServiceFactory<actix_rt::net::UnixStream>,
F: ServiceFactory<actix_rt::net::UnixStream>,
{
use std::net::{IpAddr, Ipv4Addr};
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
let token = self.token.next();
let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
self.services.push(StreamNewService::create(
name.as_ref().to_string(),
token,
factory.clone(),
addr,
));
self.sockets
.push((token, name.as_ref().to_string(), StdListener::Uds(lst)));
Ok(self)
}
lst.set_nonblocking(true)?;
let token = self.next_token();
let addr = crate::socket::StdSocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
self.factories.push(StreamNewService::create(
/// Add new service to the server.
pub fn listen<F, N: AsRef<str>>(
mut self,
name: N,
lst: net::TcpListener,
factory: F,
) -> io::Result<Self>
where
F: ServiceFactory<TcpStream>,
{
let token = self.token.next();
self.services.push(StreamNewService::create(
name.as_ref().to_string(),
token,
factory,
addr,
lst.local_addr()?,
));
self.sockets
.push((token, name.as_ref().to_string(), MioListener::from(lst)));
.push((token, name.as_ref().to_string(), StdListener::Tcp(lst)));
Ok(self)
}
}
pub(super) fn bind_addr<S: ToSocketAddrs>(
addr: S,
backlog: u32,
mptcp: &MpTcp,
) -> io::Result<Vec<MioTcpListener>> {
let mut opt_err = None;
let mut success = false;
let mut sockets = Vec::new();
#[doc(hidden)]
pub fn start(self) -> Server {
self.run()
}
for addr in addr.to_socket_addrs()? {
match create_mio_tcp_listener(addr, backlog, mptcp) {
Ok(lst) => {
success = true;
sockets.push(lst);
/// Starts processing incoming connections and return server controller.
pub fn run(mut self) -> Server {
if self.sockets.is_empty() {
panic!("Server should have at least one bound socket");
} else {
info!("Starting {} workers", self.threads);
// start workers
let mut workers = Vec::new();
for idx in 0..self.threads {
let worker = self.start_worker(idx, self.accept.get_notify());
workers.push(worker.clone());
self.workers.push((idx, worker));
}
Err(err) => opt_err = Some(err),
// start accept thread
for sock in &self.sockets {
info!("Starting \"{}\" service on {}", sock.1, sock.2);
}
self.accept.start(
mem::replace(&mut self.sockets, Vec::new())
.into_iter()
.map(|t| (t.0, t.2))
.collect(),
workers,
);
// handle signals
if !self.no_signals {
Signals::start(self.server.clone()).unwrap();
}
// start http server actor
let server = self.server.clone();
spawn(self);
server
}
}
if success {
Ok(sockets)
} else if let Some(err) = opt_err.take() {
Err(err)
} else {
Err(io::Error::new(
io::ErrorKind::Other,
"Can not bind to address.",
))
fn start_worker(&self, idx: usize, notify: AcceptNotify) -> WorkerClient {
let avail = WorkerAvailability::new(notify);
let services: Vec<Box<dyn InternalServiceFactory>> =
self.services.iter().map(|v| v.clone_factory()).collect();
Worker::start(idx, services, avail, self.shutdown_timeout)
}
fn handle_cmd(&mut self, item: ServerCommand) {
match item {
ServerCommand::Pause(tx) => {
self.accept.send(Command::Pause);
let _ = tx.send(());
}
ServerCommand::Resume(tx) => {
self.accept.send(Command::Resume);
let _ = tx.send(());
}
ServerCommand::Signal(sig) => {
// Signals support
// Handle `SIGINT`, `SIGTERM`, `SIGQUIT` signals and stop actix system
match sig {
Signal::Int => {
info!("SIGINT received, exiting");
self.exit = true;
self.handle_cmd(ServerCommand::Stop {
graceful: false,
completion: None,
})
}
Signal::Term => {
info!("SIGTERM received, stopping");
self.exit = true;
self.handle_cmd(ServerCommand::Stop {
graceful: true,
completion: None,
})
}
Signal::Quit => {
info!("SIGQUIT received, exiting");
self.exit = true;
self.handle_cmd(ServerCommand::Stop {
graceful: false,
completion: None,
})
}
_ => (),
}
}
ServerCommand::Notify(tx) => {
self.notify.push(tx);
}
ServerCommand::Stop {
graceful,
completion,
} => {
let exit = self.exit;
// stop accept thread
self.accept.send(Command::Stop);
let notify = std::mem::replace(&mut self.notify, Vec::new());
// stop workers
if !self.workers.is_empty() && graceful {
spawn(
self.workers
.iter()
.map(move |worker| worker.1.stop(graceful))
.collect::<FuturesUnordered<_>>()
.collect::<Vec<_>>()
.then(move |_| {
if let Some(tx) = completion {
let _ = tx.send(());
}
for tx in notify {
let _ = tx.send(());
}
if exit {
spawn(
async {
delay_until(
Instant::now() + Duration::from_millis(300),
)
.await;
System::current().stop();
}
.boxed(),
);
}
ready(())
}),
)
} else {
// we need to stop system if server was spawned
if self.exit {
spawn(
delay_until(Instant::now() + Duration::from_millis(300)).then(
|_| {
System::current().stop();
ready(())
},
),
);
}
if let Some(tx) = completion {
let _ = tx.send(());
}
for tx in notify {
let _ = tx.send(());
}
}
}
ServerCommand::WorkerFaulted(idx) => {
let mut found = false;
for i in 0..self.workers.len() {
if self.workers[i].0 == idx {
self.workers.swap_remove(i);
found = true;
break;
}
}
if found {
error!("Worker has died {:?}, restarting", idx);
let mut new_idx = self.workers.len();
'found: loop {
for i in 0..self.workers.len() {
if self.workers[i].0 == new_idx {
new_idx += 1;
continue 'found;
}
}
break;
}
let worker = self.start_worker(new_idx, self.accept.get_notify());
self.workers.push((new_idx, worker.clone()));
self.accept.send(Command::Worker(worker));
}
}
}
}
}
impl Future for ServerBuilder {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
loop {
match ready!(Pin::new(&mut self.cmd).poll_next(cx)) {
Some(it) => self.as_mut().get_mut().handle_cmd(it),
None => {
return Poll::Pending;
}
}
}
}
}
pub(super) fn bind_addr<S: net::ToSocketAddrs>(
addr: S,
backlog: i32,
) -> io::Result<Vec<net::TcpListener>> {
let mut err = None;
let mut succ = false;
let mut sockets = Vec::new();
for addr in addr.to_socket_addrs()? {
match create_tcp_listener(addr, backlog) {
Ok(lst) => {
succ = true;
sockets.push(lst);
}
Err(e) => err = Some(e),
}
}
if !succ {
if let Some(e) = err.take() {
Err(e)
} else {
Err(io::Error::new(
io::ErrorKind::Other,
"Can not bind to address.",
))
}
} else {
Ok(sockets)
}
}
fn create_tcp_listener(addr: net::SocketAddr, backlog: i32) -> io::Result<net::TcpListener> {
let builder = match addr {
net::SocketAddr::V4(_) => TcpBuilder::new_v4()?,
net::SocketAddr::V6(_) => TcpBuilder::new_v6()?,
};
builder.reuse_address(true)?;
builder.bind(addr)?;
Ok(builder.listen(backlog)?)
}

285
actix-server/src/config.rs Normal file
View File

@ -0,0 +1,285 @@
use std::collections::HashMap;
use std::{fmt, io, net};
use actix_rt::net::TcpStream;
use actix_service as actix;
use actix_utils::counter::CounterGuard;
use futures::future::{ok, Future, FutureExt, LocalBoxFuture};
use log::error;
use super::builder::bind_addr;
use super::service::{
BoxedServerService, InternalServiceFactory, ServerMessage, StreamService,
};
use super::Token;
pub struct ServiceConfig {
pub(crate) services: Vec<(String, net::TcpListener)>,
pub(crate) apply: Option<Box<dyn ServiceRuntimeConfiguration>>,
pub(crate) threads: usize,
pub(crate) backlog: i32,
}
impl ServiceConfig {
pub(super) fn new(threads: usize, backlog: i32) -> ServiceConfig {
ServiceConfig {
threads,
backlog,
services: Vec::new(),
apply: None,
}
}
/// Set number of workers to start.
///
/// By default server uses number of available logical cpu as workers
/// count.
pub fn workers(&mut self, num: usize) {
self.threads = num;
}
/// Add new service to server
pub fn bind<U, N: AsRef<str>>(&mut self, name: N, addr: U) -> io::Result<&mut Self>
where
U: net::ToSocketAddrs,
{
let sockets = bind_addr(addr, self.backlog)?;
for lst in sockets {
self.listen(name.as_ref(), lst);
}
Ok(self)
}
/// Add new service to server
pub fn listen<N: AsRef<str>>(&mut self, name: N, lst: net::TcpListener) -> &mut Self {
if self.apply.is_none() {
self.apply = Some(Box::new(not_configured));
}
self.services.push((name.as_ref().to_string(), lst));
self
}
/// Register service configuration function. This function get called
/// during worker runtime configuration. It get executed in worker thread.
pub fn apply<F>(&mut self, f: F) -> io::Result<()>
where
F: Fn(&mut ServiceRuntime) + Send + Clone + 'static,
{
self.apply = Some(Box::new(f));
Ok(())
}
}
pub(super) struct ConfiguredService {
rt: Box<dyn ServiceRuntimeConfiguration>,
names: HashMap<Token, (String, net::SocketAddr)>,
topics: HashMap<String, Token>,
services: Vec<Token>,
}
impl ConfiguredService {
pub(super) fn new(rt: Box<dyn ServiceRuntimeConfiguration>) -> Self {
ConfiguredService {
rt,
names: HashMap::new(),
topics: HashMap::new(),
services: Vec::new(),
}
}
pub(super) fn stream(&mut self, token: Token, name: String, addr: net::SocketAddr) {
self.names.insert(token, (name.clone(), addr));
self.topics.insert(name.clone(), token);
self.services.push(token);
}
}
impl InternalServiceFactory for ConfiguredService {
fn name(&self, token: Token) -> &str {
&self.names[&token].0
}
fn clone_factory(&self) -> Box<dyn InternalServiceFactory> {
Box::new(Self {
rt: self.rt.clone(),
names: self.names.clone(),
topics: self.topics.clone(),
services: self.services.clone(),
})
}
fn create(&self) -> LocalBoxFuture<'static, Result<Vec<(Token, BoxedServerService)>, ()>> {
// configure services
let mut rt = ServiceRuntime::new(self.topics.clone());
self.rt.configure(&mut rt);
rt.validate();
let mut names = self.names.clone();
let tokens = self.services.clone();
// construct services
async move {
let mut services = rt.services;
// TODO: Proper error handling here
for f in rt.onstart.into_iter() {
f.await;
}
let mut res = vec![];
for token in tokens {
if let Some(srv) = services.remove(&token) {
let newserv = srv.new_service(());
match newserv.await {
Ok(serv) => {
res.push((token, serv));
}
Err(_) => {
error!("Can not construct service");
return Err(());
}
}
} else {
let name = names.remove(&token).unwrap().0;
res.push((
token,
Box::new(StreamService::new(actix::fn_service(
move |_: TcpStream| {
error!("Service {:?} is not configured", name);
ok::<_, ()>(())
},
))),
));
};
}
return Ok(res);
}
.boxed_local()
}
}
pub(super) trait ServiceRuntimeConfiguration: Send {
fn clone(&self) -> Box<dyn ServiceRuntimeConfiguration>;
fn configure(&self, rt: &mut ServiceRuntime);
}
impl<F> ServiceRuntimeConfiguration for F
where
F: Fn(&mut ServiceRuntime) + Send + Clone + 'static,
{
fn clone(&self) -> Box<dyn ServiceRuntimeConfiguration> {
Box::new(self.clone())
}
fn configure(&self, rt: &mut ServiceRuntime) {
(self)(rt)
}
}
fn not_configured(_: &mut ServiceRuntime) {
error!("Service is not configured");
}
pub struct ServiceRuntime {
names: HashMap<String, Token>,
services: HashMap<Token, BoxedNewService>,
onstart: Vec<LocalBoxFuture<'static, ()>>,
}
impl ServiceRuntime {
fn new(names: HashMap<String, Token>) -> Self {
ServiceRuntime {
names,
services: HashMap::new(),
onstart: Vec::new(),
}
}
fn validate(&self) {
for (name, token) in &self.names {
if !self.services.contains_key(&token) {
error!("Service {:?} is not configured", name);
}
}
}
/// Register service.
///
/// Name of the service must be registered during configuration stage with
/// *ServiceConfig::bind()* or *ServiceConfig::listen()* methods.
pub fn service<T, F>(&mut self, name: &str, service: F)
where
F: actix::IntoServiceFactory<T>,
T: actix::ServiceFactory<Config = (), Request = TcpStream> + 'static,
T::Future: 'static,
T::Service: 'static,
T::InitError: fmt::Debug,
{
// let name = name.to_owned();
if let Some(token) = self.names.get(name) {
self.services.insert(
token.clone(),
Box::new(ServiceFactory {
inner: service.into_factory(),
}),
);
} else {
panic!("Unknown service: {:?}", name);
}
}
/// Execute future before services initialization.
pub fn on_start<F>(&mut self, fut: F)
where
F: Future<Output = ()> + 'static,
{
self.onstart.push(fut.boxed_local())
}
}
type BoxedNewService = Box<
dyn actix::ServiceFactory<
Request = (Option<CounterGuard>, ServerMessage),
Response = (),
Error = (),
InitError = (),
Config = (),
Service = BoxedServerService,
Future = LocalBoxFuture<'static, Result<BoxedServerService, ()>>,
>,
>;
struct ServiceFactory<T> {
inner: T,
}
impl<T> actix::ServiceFactory for ServiceFactory<T>
where
T: actix::ServiceFactory<Config = (), Request = TcpStream>,
T::Future: 'static,
T::Service: 'static,
T::Error: 'static,
T::InitError: fmt::Debug + 'static,
{
type Request = (Option<CounterGuard>, ServerMessage);
type Response = ();
type Error = ();
type InitError = ();
type Config = ();
type Service = BoxedServerService;
type Future = LocalBoxFuture<'static, Result<BoxedServerService, ()>>;
fn new_service(&self, _: ()) -> Self::Future {
let fut = self.inner.new_service(());
async move {
return match fut.await {
Ok(s) => Ok(Box::new(StreamService::new(s)) as BoxedServerService),
Err(e) => {
error!("Can not construct service: {:?}", e);
Err(())
}
};
}
.boxed_local()
}
}

View File

@ -1,56 +0,0 @@
use std::future::Future;
use tokio::sync::{mpsc::UnboundedSender, oneshot};
use crate::server::ServerCommand;
/// Server handle.
#[derive(Debug, Clone)]
pub struct ServerHandle {
cmd_tx: UnboundedSender<ServerCommand>,
}
impl ServerHandle {
pub(crate) fn new(cmd_tx: UnboundedSender<ServerCommand>) -> Self {
ServerHandle { cmd_tx }
}
pub(crate) fn worker_faulted(&self, idx: usize) {
let _ = self.cmd_tx.send(ServerCommand::WorkerFaulted(idx));
}
/// Pause accepting incoming connections.
///
/// May drop socket pending connection. All open connections remain active.
pub fn pause(&self) -> impl Future<Output = ()> {
let (tx, rx) = oneshot::channel();
let _ = self.cmd_tx.send(ServerCommand::Pause(tx));
async {
let _ = rx.await;
}
}
/// Resume accepting incoming connections.
pub fn resume(&self) -> impl Future<Output = ()> {
let (tx, rx) = oneshot::channel();
let _ = self.cmd_tx.send(ServerCommand::Resume(tx));
async {
let _ = rx.await;
}
}
/// Stop incoming connection processing, stop all workers and exit.
pub fn stop(&self, graceful: bool) -> impl Future<Output = ()> {
let (tx, rx) = oneshot::channel();
let _ = self.cmd_tx.send(ServerCommand::Stop {
graceful,
completion: Some(tx),
force_system_stop: false,
});
async {
let _ = rx.await;
}
}
}

View File

@ -1,78 +0,0 @@
use std::{
future::Future,
pin::Pin,
task::{Context, Poll},
};
use futures_core::future::BoxFuture;
// a poor man's join future. joined future is only used when starting/stopping the server.
// pin_project and pinned futures are overkill for this task.
pub(crate) struct JoinAll<T> {
fut: Vec<JoinFuture<T>>,
}
pub(crate) fn join_all<T>(fut: Vec<impl Future<Output = T> + Send + 'static>) -> JoinAll<T> {
let fut = fut
.into_iter()
.map(|f| JoinFuture::Future(Box::pin(f)))
.collect();
JoinAll { fut }
}
enum JoinFuture<T> {
Future(BoxFuture<'static, T>),
Result(Option<T>),
}
impl<T> Unpin for JoinAll<T> {}
impl<T> Future for JoinAll<T> {
type Output = Vec<T>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut ready = true;
let this = self.get_mut();
for fut in this.fut.iter_mut() {
if let JoinFuture::Future(f) = fut {
match f.as_mut().poll(cx) {
Poll::Ready(t) => {
*fut = JoinFuture::Result(Some(t));
}
Poll::Pending => ready = false,
}
}
}
if ready {
let mut res = Vec::new();
for fut in this.fut.iter_mut() {
if let JoinFuture::Result(f) = fut {
res.push(f.take().unwrap());
}
}
Poll::Ready(res)
} else {
Poll::Pending
}
}
}
#[cfg(test)]
mod test {
use actix_utils::future::ready;
use super::*;
#[actix_rt::test]
async fn test_join_all() {
let futs = vec![ready(Ok(1)), ready(Err(3)), ready(Ok(9))];
let mut res = join_all(futs).await.into_iter();
assert_eq!(Ok(1), res.next().unwrap());
assert_eq!(Err(3), res.next().unwrap());
assert_eq!(Ok(9), res.next().unwrap());
}
}

View File

@ -1,34 +1,37 @@
//! General purpose TCP server.
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
//! General purpose tcp server
#![deny(rust_2018_idioms, warnings)]
#![allow(clippy::type_complexity)]
mod accept;
mod availability;
mod builder;
mod handle;
mod join_all;
mod config;
mod server;
mod service;
mod signals;
mod socket;
mod test_server;
mod waker_queue;
mod worker;
pub use self::builder::ServerBuilder;
pub use self::config::{ServiceConfig, ServiceRuntime};
pub use self::server::Server;
pub use self::service::ServiceFactory;
#[doc(hidden)]
pub use self::socket::FromStream;
pub use self::{
builder::{MpTcp, ServerBuilder},
handle::ServerHandle,
server::Server,
service::ServerServiceFactory,
test_server::TestServer,
};
/// Socket id token
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub(crate) struct Token(usize);
impl Token {
pub(crate) fn next(&mut self) -> Token {
let token = Token(self.0);
self.0 += 1;
token
}
}
/// Start server building process
#[doc(hidden)]
#[deprecated(since = "2.0.0", note = "Use `Server::build()`.")]
pub fn new() -> ServerBuilder {
ServerBuilder::default()
}

Some files were not shown because too many files have changed in this diff Show More