mirror of
https://github.com/fafhrd91/actix-net
synced 2025-03-14 16:46:26 +01:00
Compare commits
553 Commits
macros-v0.
...
master
Author | SHA1 | Date | |
---|---|---|---|
|
8204690568 | ||
|
bbb3139c45 | ||
|
0915904bdb | ||
|
70f0008f42 | ||
|
12df4d7027 | ||
|
6f5b81d2a0 | ||
|
4cf37171b5 | ||
|
00f40e1471 | ||
|
15f0b63492 | ||
|
9d4b1673aa | ||
|
fc902b2d56 | ||
|
f4175a4ad4 | ||
|
323a2e2931 | ||
|
4b9f7ae46d | ||
|
0e119fd9b2 | ||
|
b04b88e81a | ||
|
0a8f2baa11 | ||
|
d79d500ffe | ||
|
7a7e3de430 | ||
|
f062ede06f | ||
|
1338276934 | ||
|
4746b4e2fa | ||
|
0509f0cede | ||
|
3831e0d7fe | ||
|
60945d0481 | ||
|
2615a19e28 | ||
|
1a0f44fff1 | ||
|
8d1cd2ec87 | ||
|
b87174b5f2 | ||
|
8097af6a27 | ||
|
ecba6e21da | ||
|
23f797a81d | ||
|
d2a5091451 | ||
|
e0c09c2aa4 | ||
|
8234543066 | ||
|
34826c6253 | ||
|
42b788d131 | ||
|
9796593b24 | ||
|
c362fc4414 | ||
|
52733337e4 | ||
|
0e36c5f5c4 | ||
|
47f0017899 | ||
|
bb4fc31461 | ||
|
01a104eb82 | ||
|
4ab27bfc4a | ||
|
8084cec705 | ||
|
a2517da225 | ||
|
a4b6943ddc | ||
|
7d24196d5c | ||
|
582edf5444 | ||
|
57485f1a21 | ||
|
e8871d0d06 | ||
|
83e896a6e5 | ||
|
af00dada5c | ||
|
0681b515de | ||
|
3672137d17 | ||
|
fad1fda194 | ||
|
cfae737314 | ||
|
4583daa3c2 | ||
|
b1cbacc7f6 | ||
|
77588aba81 | ||
|
aad3a48edd | ||
|
97e8c571cf | ||
|
0d8c7e5085 | ||
|
baf1b6042a | ||
|
779fa28bd5 | ||
|
e282811d69 | ||
|
5c44115978 | ||
|
ead0e2b200 | ||
|
ace737fc4c | ||
|
c45ae294fb | ||
|
939377f6ab | ||
|
20149f957b | ||
|
544e5d3b40 | ||
|
d482af529c | ||
|
0030800b9a | ||
|
64fa2f8462 | ||
|
b0d1c8d193 | ||
|
46cc62c6d8 | ||
|
912daa3d0a | ||
|
aefa810496 | ||
|
2c443a7620 | ||
|
b3b1583115 | ||
|
0d3d1926bc | ||
|
0c26ecf9fa | ||
|
1bdb15ec20 | ||
|
a524f15e34 | ||
|
451a44c2e0 | ||
|
18071d1fc0 | ||
|
786014cc2f | ||
|
a7ef438f25 | ||
|
375c352810 | ||
|
2d1b5468d0 | ||
|
3696cda155 | ||
|
55e89d1f30 | ||
|
24be36b18d | ||
|
38ae762569 | ||
|
8cf79d3d13 | ||
|
73451070db | ||
|
2632c984cc | ||
|
af8e6cd656 | ||
|
db7988609e | ||
|
1db640f62e | ||
|
9e7d612121 | ||
|
5edbf9e3dc | ||
|
f028a74240 | ||
|
f4139a0878 | ||
|
3147dbe7ca | ||
|
95ca8f0318 | ||
|
f947374a73 | ||
|
85191934c8 | ||
|
234a4c9c7f | ||
|
d5171c2ab3 | ||
|
875218488c | ||
|
b826bf8471 | ||
|
10bd847177 | ||
|
481cf55414 | ||
|
b4990023c4 | ||
|
eb5cec0064 | ||
|
db925bf8e6 | ||
|
850f6c0491 | ||
|
0f71fd5a7a | ||
|
40b10847df | ||
|
39bab04800 | ||
|
3cb247874e | ||
|
5792d9f010 | ||
|
bd8bd1020b | ||
|
21be7d84bd | ||
|
57fd6ea809 | ||
|
9a3f3eef6a | ||
|
e427911cdb | ||
|
a1ae524512 | ||
|
88833355e4 | ||
|
7737ba5cfb | ||
|
fd32a0a97a | ||
|
07e7f82345 | ||
|
1a5d85ec8b | ||
|
bd1467e928 | ||
|
968ad3b854 | ||
|
079f0f66f0 | ||
|
d85903b31a | ||
|
a0675fb0dd | ||
|
af9ccd17d9 | ||
|
eb977e9aeb | ||
|
d28c7db3b3 | ||
|
c5b2d0cd36 | ||
|
02ac0bb4f7 | ||
|
86b000fe71 | ||
|
951e46186b | ||
|
9edc0b393a | ||
|
1945fa0675 | ||
|
923a443950 | ||
|
b526197a9a | ||
|
8fc2253c61 | ||
|
4c12b81492 | ||
|
ef716a8488 | ||
|
2a4df30c63 | ||
|
2d9b147cc3 | ||
|
5515a37002 | ||
|
4067fbe8f0 | ||
|
01f9910e7c | ||
|
df12c10a3f | ||
|
f632ef2ba8 | ||
|
19d03f0454 | ||
|
e9c2a0c318 | ||
|
f967562ac4 | ||
|
61b6e01b02 | ||
|
392e591820 | ||
|
87440e5734 | ||
|
665dec456f | ||
|
7d138f0c31 | ||
|
3cd5d8b07a | ||
|
09548c96b0 | ||
|
17fd135349 | ||
|
b9b628c47b | ||
|
580af3dec4 | ||
|
db54639f0f | ||
|
6d9eb7e162 | ||
|
69e50b5e66 | ||
|
17409cd203 | ||
|
4a7f2c95af | ||
|
c69b8e9ade | ||
|
9f59093adc | ||
|
bfeb4cd9e7 | ||
|
14272a1762 | ||
|
7e043048a0 | ||
|
45a7dcba90 | ||
|
5029beb866 | ||
|
910c181251 | ||
|
150d2c05d3 | ||
|
3b5716c23e | ||
|
0bc310a656 | ||
|
6ce8307060 | ||
|
9cb8a1fadc | ||
|
9017de439f | ||
|
3eba5b152e | ||
|
3c4b0c2755 | ||
|
462ab6a4f0 | ||
|
e539f83615 | ||
|
755b231e00 | ||
|
8d5d1dbf6f | ||
|
177590a7d8 | ||
|
4cbe741230 | ||
|
80320a0325 | ||
|
6d0dc9628b | ||
|
dbce150993 | ||
|
54ec06cd23 | ||
|
c0693da9ba | ||
|
28f36e4e30 | ||
|
c6ebbcf21b | ||
|
c60d2f9ddb | ||
|
a6bece7b33 | ||
|
fbb53f54ef | ||
|
9b388a83c9 | ||
|
878d3a1c74 | ||
|
045cf3f3e8 | ||
|
d13461b337 | ||
|
dde38bbe06 | ||
|
d973d5974a | ||
|
d7afd60606 | ||
|
7e47bf4055 | ||
|
05e7be337e | ||
|
8d964713c9 | ||
|
fe38312db0 | ||
|
2b83f08a40 | ||
|
8e9401f8e1 | ||
|
9ede174e81 | ||
|
bb36e2a072 | ||
|
6061a44a22 | ||
|
363984ad75 | ||
|
00654aadc5 | ||
|
428914e65e | ||
|
df9a9d1a1e | ||
|
056d2cd573 | ||
|
68228a6cf2 | ||
|
d5a9a6a1c5 | ||
|
ade71b7bd3 | ||
|
cb83922b29 | ||
|
25209f5bd8 | ||
|
c4a0f37d0c | ||
|
0e649329b9 | ||
|
66756bc448 | ||
|
126ed4c2e3 | ||
|
283974f3e6 | ||
|
bf2aa3902c | ||
|
71b4e55c92 | ||
|
eb5fa30ada | ||
|
49a034259f | ||
|
3337f63b4e | ||
|
86ce140249 | ||
|
635aebe887 | ||
|
4c1e581a54 | ||
|
dc67ba770d | ||
|
855e3f96fe | ||
|
737b438f73 | ||
|
0cd70b0536 | ||
|
4b6a581ef3 | ||
|
3e132d2bc6 | ||
|
c5d6174cec | ||
|
77d4a69b2f | ||
|
ae5377fd6e | ||
|
bd9bda0504 | ||
|
41ed48219d | ||
|
7804ed12eb | ||
|
2a54065fae | ||
|
217cbd2228 | ||
|
d229c1e886 | ||
|
6792f799a6 | ||
|
72481313cc | ||
|
59b629c74b | ||
|
7988694242 | ||
|
b8a7741524 | ||
|
5e290d76f8 | ||
|
0edb64575f | ||
|
941f67dec9 | ||
|
3e624b8376 | ||
|
26446fdbad | ||
|
b7b7bd2cbf | ||
|
637625f9b7 | ||
|
b1d5d85e72 | ||
|
ed2c07b304 | ||
|
4fe7fec5ef | ||
|
4c9ee88ec4 | ||
|
9ec3cc0fe7 | ||
|
01e0f922de | ||
|
10d3bb6d0d | ||
|
3ba4eafde5 | ||
|
5faa98f6d2 | ||
|
b552d847ed | ||
|
5058e2d14e | ||
|
ae9afd4de7 | ||
|
01d2f18f68 | ||
|
e92b5aaf31 | ||
|
459a6d1b02 | ||
|
9935883905 | ||
|
89a4c2ee27 | ||
|
a4681831a7 | ||
|
5d2da0fdc7 | ||
|
ef18a8342e | ||
|
621deba990 | ||
|
6a9f13c8b4 | ||
|
705b31230f | ||
|
eb490a9125 | ||
|
90f205a465 | ||
|
3a3d654cea | ||
|
ba901c70df | ||
|
4e0dd091f5 | ||
|
8c4ec34cd4 | ||
|
62ffe5f389 | ||
|
07e3b19461 | ||
|
183bcf6ae3 | ||
|
5dc2bfcb01 | ||
|
5556afd524 | ||
|
de5908bfe7 | ||
|
c63880a292 | ||
|
44e4381879 | ||
|
18eced7305 | ||
|
a2437eed29 | ||
|
67b357a175 | ||
|
3597af5c45 | ||
|
8891c2681e | ||
|
233c61ba08 | ||
|
161f239f12 | ||
|
7e7df2f931 | ||
|
ce8ec15eaa | ||
|
ae28ce5377 | ||
|
54d1d9e520 | ||
|
0b0cbd5388 | ||
|
443a328fb4 | ||
|
58a67ade32 | ||
|
38caa8f088 | ||
|
ed987eef06 | ||
|
3658929010 | ||
|
3f49d8ab54 | ||
|
161d1ee94b | ||
|
81ba7cafaa | ||
|
f8f51a2240 | ||
|
a2e765ea6e | ||
|
03dae6a4a4 | ||
|
2080f4c149 | ||
|
b2cef8fcdb | ||
|
15279eaf3d | ||
|
7d98247cb0 | ||
|
5b537c7b10 | ||
|
81d7295486 | ||
|
581e599209 | ||
|
1c8fcaebbc | ||
|
a1d15f2e08 | ||
|
70ea5322ab | ||
|
303666278a | ||
|
669e868370 | ||
|
47f278b17a | ||
|
ca77d8d835 | ||
|
00775884f8 | ||
|
4ff8a2cf68 | ||
|
5c555a9408 | ||
|
ca435b2575 | ||
|
9fa8d7fc5a | ||
|
b03fe7c5b6 | ||
|
6fed1c3e7d | ||
|
c3d697df97 | ||
|
80a362712f | ||
|
2b1edb95ea | ||
|
4644fa41cf | ||
|
98c37fe47d | ||
|
b9455d2ca9 | ||
|
0183b0f8cc | ||
|
b122a1ae1a | ||
|
4303058243 | ||
|
48b2e11509 | ||
|
5379a46a99 | ||
|
f8f1ac94bc | ||
|
82cd5b8290 | ||
|
c65e8524b2 | ||
|
a83dfaa162 | ||
|
e4ec956001 | ||
|
95cba659ff | ||
|
5687e81d9f | ||
|
a0fe2a9b2e | ||
|
ad22a93466 | ||
|
c2d5b2398a | ||
|
5b1ff30dd9 | ||
|
e1317bb3a0 | ||
|
dcea009158 | ||
|
13c18b8a51 | ||
|
06b17d6a43 | ||
|
605ec25143 | ||
|
3824493fd3 | ||
|
3be3e11aa5 | ||
|
6a5ea0342b | ||
|
23b1f63345 | ||
|
3aa037d07d | ||
|
cf21df14f2 | ||
|
a1bf8662c9 | ||
|
6f4d2220fa | ||
|
54b22f9fce | ||
|
983abec77d | ||
|
e4d4ae21ee | ||
|
8ad5f58d38 | ||
|
613b2be51f | ||
|
b2e9640952 | ||
|
76338a5822 | ||
|
978e4f25fb | ||
|
1c4e965366 | ||
|
2435520e67 | ||
|
19468feef8 | ||
|
bd48908792 | ||
|
20c2da17ed | ||
|
fdafc1dd65 | ||
|
7749dfe46a | ||
|
aeb81ad3fd | ||
|
47fba25d67 | ||
|
7a82288066 | ||
|
4e6d88d143 | ||
|
ef206f40fb | ||
|
8e98d9168c | ||
|
3c1f57706a | ||
|
d49ecf7203 | ||
|
e0fb67f646 | ||
|
ddce2d6d12 | ||
|
0a11cf5cba | ||
|
859f45868d | ||
|
d4829b046d | ||
|
5961eb892e | ||
|
995efcf427 | ||
|
f1573931dd | ||
|
3859e91799 | ||
|
8aade720ed | ||
|
8079c50ddb | ||
|
05689b86d9 | ||
|
fd3e5fba02 | ||
|
39d1f282f7 | ||
|
d8889c63ef | ||
|
fdac52aa11 | ||
|
6d66cfb06a | ||
|
fb27ffc525 | ||
|
b068ea16f8 | ||
|
4eebdf4070 | ||
|
b09e7cd417 | ||
|
2c5c9167a5 | ||
|
ee3a548a85 | ||
|
f21eaa954f | ||
|
8becb0db70 | ||
|
26a5af70cb | ||
|
0ee8d032b6 | ||
|
3cf1c548fd | ||
|
4544562e1b | ||
|
bb27bac216 | ||
|
f9262dbec0 | ||
|
12d3942b98 | ||
|
a3c9ebc7fa | ||
|
b7bfff2b32 | ||
|
0c73f13c8b | ||
|
945479e0c3 | ||
|
746cc2ab89 | ||
|
91ea8c5dad | ||
|
0a705b1023 | ||
|
9e2bcec226 | ||
|
382830a37e | ||
|
493a1a32c0 | ||
|
50a195e9ce | ||
|
06ddad0051 | ||
|
789e6a8a46 | ||
|
6e590fd042 | ||
|
fa8ded3a34 | ||
|
841c611233 | ||
|
81a2b6a425 | ||
|
a6e79453d0 | ||
|
17f711a9d6 | ||
|
c3be839a69 | ||
|
8d74cf387d | ||
|
7e483cc356 | ||
|
75d7ae3139 | ||
|
2cfe1d88ad | ||
|
cb07ead392 | ||
|
32543809f9 | ||
|
eb4d29e15e | ||
|
7ee42b50b4 | ||
|
0da848e4ae | ||
|
5f80d85010 | ||
|
16ba77c4c8 | ||
|
b4a3f51659 | ||
|
9d0901e07f | ||
|
ebb9cd055f | ||
|
a77b70aed2 | ||
|
c918da906b | ||
|
b5399c5631 | ||
|
7f0eddd794 | ||
|
db3385e865 | ||
|
4a8693d000 | ||
|
4ec358575e | ||
|
66bd5bf4a2 | ||
|
057e7cd7c9 | ||
|
0b656f51e1 | ||
|
0eb68d1c7b | ||
|
3e6f69885c | ||
|
2fa60b07ae | ||
|
b75254403a | ||
|
1b35ff8ee6 | ||
|
2924419905 | ||
|
6b86b5efc5 | ||
|
ba39c8436d | ||
|
feac376c17 | ||
|
a633d2353c | ||
|
45edff625e | ||
|
cff9deb729 | ||
|
eaefe21b98 | ||
|
636cef8868 | ||
|
874e5f2e50 | ||
|
6112a47529 | ||
|
a2e03700e7 | ||
|
6edf9b8278 | ||
|
f07d807707 | ||
|
d4c46b7da9 | ||
|
b0a8f8411b | ||
|
46bfe5de36 | ||
|
a95afe2800 | ||
|
f751cf5acb | ||
|
a1982bdbad | ||
|
147c4f4f2c | ||
|
5285656bdc | ||
|
296294061f | ||
|
93865de848 | ||
|
6bcf6d8160 | ||
|
14ff379150 | ||
|
647817ef14 | ||
|
b5eefb4d42 | ||
|
03eb96d6d4 | ||
|
0934078947 | ||
|
5759c9e144 | ||
|
3c6de3a81b | ||
|
ef83647ac9 | ||
|
98a17081b8 | ||
|
b7202db8fd | ||
|
a09f9abfcb | ||
|
e4a44b77e6 | ||
|
2ee8f45f5d | ||
|
f48e3f4cb0 | ||
|
3d3bd60368 | ||
|
d684128831 | ||
|
0c12930796 | ||
|
ba44ea7d0b | ||
|
8a58a341a4 | ||
|
33c9aa6988 | ||
|
3ab8c3eb69 | ||
|
518bf3f6a6 | ||
|
43ce25cda1 | ||
|
4e4122b702 | ||
|
b296d0f254 | ||
|
02a902068f | ||
|
049795662f | ||
|
4e43216b99 | ||
|
93889776c4 |
15
.cargo/config.toml
Normal file
15
.cargo/config.toml
Normal file
@ -0,0 +1,15 @@
|
||||
[alias]
|
||||
lint = "clippy --workspace --tests --examples --bins -- -Dclippy::todo"
|
||||
lint-all = "clippy --workspace --all-features --tests --examples --bins -- -Dclippy::todo"
|
||||
|
||||
# just check the library (without dev deps)
|
||||
ci-check-min = "hack --workspace check --no-default-features"
|
||||
ci-check-lib = "hack --workspace --feature-powerset --depth=2 --exclude-features=io-uring check"
|
||||
ci-check-lib-linux = "hack --workspace --feature-powerset --depth=2 check"
|
||||
|
||||
# check everything
|
||||
ci-check = "hack --workspace --feature-powerset --depth=2 --exclude-features=io-uring check --tests --examples"
|
||||
ci-check-linux = "hack --workspace --feature-powerset --depth=2 check --tests --examples"
|
||||
|
||||
# tests avoiding io-uring feature
|
||||
ci-test = "hack --feature-powerset --depth=2 --exclude-features=io-uring test --lib --tests --no-fail-fast -- --nocapture"
|
7
.github/PULL_REQUEST_TEMPLATE.md
vendored
7
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -1,10 +1,12 @@
|
||||
## PR Type
|
||||
|
||||
<!-- What kind of change does this PR make? -->
|
||||
<!-- Bug Fix / Feature / Refactor / Code Style / Other -->
|
||||
|
||||
INSERT_PR_TYPE
|
||||
|
||||
|
||||
## PR Checklist
|
||||
|
||||
Check your PR fulfills the following:
|
||||
|
||||
<!-- For draft PRs check the boxes as you complete them. -->
|
||||
@ -14,11 +16,10 @@ Check your PR fulfills the following:
|
||||
- [ ] A changelog entry has been made for the appropriate packages.
|
||||
- [ ] Format code with the latest stable rustfmt
|
||||
|
||||
|
||||
## Overview
|
||||
|
||||
<!-- Describe the current and new behavior. -->
|
||||
<!-- Emphasize any breaking changes. -->
|
||||
|
||||
|
||||
<!-- If this PR fixes or closes an issue, reference it here. -->
|
||||
<!-- Closes #000 -->
|
||||
|
10
.github/dependabot.yml
vendored
Normal file
10
.github/dependabot.yml
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: github-actions
|
||||
directory: /
|
||||
schedule:
|
||||
interval: weekly
|
||||
- package-ecosystem: cargo
|
||||
directory: /
|
||||
schedule:
|
||||
interval: weekly
|
29
.github/workflows/bench.yml
vendored
29
.github/workflows/bench.yml
vendored
@ -1,29 +0,0 @@
|
||||
name: Benchmark (Linux)
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- '1.0'
|
||||
|
||||
jobs:
|
||||
check_benchmark:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly
|
||||
profile: minimal
|
||||
override: true
|
||||
|
||||
- name: Check benchmark
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: bench
|
||||
args: --package=actix-service
|
129
.github/workflows/ci-post-merge.yml
vendored
Normal file
129
.github/workflows/ci-post-merge.yml
vendored
Normal file
@ -0,0 +1,129 @@
|
||||
name: CI (post-merge)
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build_and_test_nightly:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
# prettier-ignore
|
||||
target:
|
||||
- { name: Linux, os: ubuntu-latest, triple: x86_64-unknown-linux-gnu }
|
||||
- { name: macOS, os: macos-latest, triple: x86_64-apple-darwin }
|
||||
- { name: Windows, os: windows-latest, triple: x86_64-pc-windows-msvc }
|
||||
- { name: Windows (MinGW), os: windows-latest, triple: x86_64-pc-windows-gnu }
|
||||
version:
|
||||
- nightly
|
||||
|
||||
name: ${{ matrix.target.name }} / ${{ matrix.version }}
|
||||
runs-on: ${{ matrix.target.os }}
|
||||
|
||||
env: {}
|
||||
|
||||
steps:
|
||||
- name: Setup Routing
|
||||
if: matrix.target.os == 'macos-latest'
|
||||
run: sudo ifconfig lo0 alias 127.0.0.3
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Free Disk Space
|
||||
if: matrix.target.os == 'ubuntu-latest'
|
||||
run: ./scripts/free-disk-space.sh
|
||||
|
||||
- name: Setup mold linker
|
||||
if: matrix.target.os == 'ubuntu-latest'
|
||||
uses: rui314/setup-mold@v1
|
||||
|
||||
- name: Install nasm
|
||||
if: matrix.target.os == 'windows-latest'
|
||||
uses: ilammy/setup-nasm@v1.5.2
|
||||
|
||||
- name: Install OpenSSL
|
||||
if: matrix.target.os == 'windows-latest'
|
||||
shell: bash
|
||||
run: |
|
||||
set -e
|
||||
choco install openssl --version=1.1.1.2100 -y --no-progress
|
||||
echo 'OPENSSL_DIR=C:\Program Files\OpenSSL' >> $GITHUB_ENV
|
||||
echo "RUSTFLAGS=-C target-feature=+crt-static" >> $GITHUB_ENV
|
||||
|
||||
- name: Install Rust (${{ matrix.version }})
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1.11.0
|
||||
with:
|
||||
toolchain: ${{ matrix.version }}
|
||||
|
||||
- name: Install just, cargo-hack, cargo-nextest, cargo-ci-cache-clean
|
||||
uses: taiki-e/install-action@v2.49.19
|
||||
with:
|
||||
tool: just,cargo-hack,cargo-nextest,cargo-ci-cache-clean
|
||||
|
||||
- name: check lib
|
||||
if: >
|
||||
matrix.target.os != 'ubuntu-latest'
|
||||
&& matrix.target.triple != 'x86_64-pc-windows-gnu'
|
||||
run: cargo ci-check-lib
|
||||
- name: check lib
|
||||
if: matrix.target.os == 'ubuntu-latest'
|
||||
run: cargo ci-check-lib-linux
|
||||
- name: check lib
|
||||
if: matrix.target.triple == 'x86_64-pc-windows-gnu'
|
||||
run: cargo ci-check-min
|
||||
|
||||
- name: check full
|
||||
# TODO: compile OpenSSL and run tests on MinGW
|
||||
if: >
|
||||
matrix.target.os != 'ubuntu-latest'
|
||||
&& matrix.target.triple != 'x86_64-pc-windows-gnu'
|
||||
run: cargo ci-check
|
||||
- name: check all
|
||||
if: matrix.target.os == 'ubuntu-latest'
|
||||
run: cargo ci-check-linux
|
||||
|
||||
- name: tests
|
||||
run: just test
|
||||
|
||||
# TODO: re-instate some io-uring tests PRs
|
||||
# - name: tests
|
||||
# if: matrix.target.os == 'ubuntu-latest'
|
||||
# run: >-
|
||||
# sudo bash -c "
|
||||
# ulimit -Sl 512
|
||||
# && ulimit -Hl 512
|
||||
# && PATH=$PATH:/usr/share/rust/.cargo/bin
|
||||
# && RUSTUP_TOOLCHAIN=${{ matrix.version }} cargo ci-test-rustls-020
|
||||
# && RUSTUP_TOOLCHAIN=${{ matrix.version }} cargo ci-test-rustls-021
|
||||
# && RUSTUP_TOOLCHAIN=${{ matrix.version }} cargo ci-test-linux
|
||||
# "
|
||||
|
||||
- name: CI cache clean
|
||||
run: cargo-ci-cache-clean
|
||||
|
||||
minimal-versions:
|
||||
name: minimal versions
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install Rust (nightly)
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1.11.0
|
||||
with:
|
||||
toolchain: nightly
|
||||
|
||||
- name: Install cargo-hack & cargo-minimal-versions
|
||||
uses: taiki-e/install-action@v2.49.19
|
||||
with:
|
||||
tool: cargo-hack,cargo-minimal-versions
|
||||
|
||||
- name: Check With Minimal Versions
|
||||
run: cargo minimal-versions check
|
133
.github/workflows/ci.yml
vendored
Normal file
133
.github/workflows/ci.yml
vendored
Normal file
@ -0,0 +1,133 @@
|
||||
name: CI
|
||||
|
||||
on:
|
||||
pull_request: {}
|
||||
merge_group: { types: [checks_requested] }
|
||||
push: { branches: [master] }
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
read_msrv:
|
||||
name: Read MSRV
|
||||
uses: actions-rust-lang/msrv/.github/workflows/msrv.yml@v0.1.0
|
||||
|
||||
build_and_test:
|
||||
needs:
|
||||
- read_msrv
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
# prettier-ignore
|
||||
target:
|
||||
- { name: Linux, os: ubuntu-latest, triple: x86_64-unknown-linux-gnu }
|
||||
- { name: macOS, os: macos-latest, triple: x86_64-apple-darwin }
|
||||
- { name: Windows, os: windows-latest, triple: x86_64-pc-windows-msvc }
|
||||
- { name: Windows (MinGW), os: windows-latest, triple: x86_64-pc-windows-gnu }
|
||||
version:
|
||||
- { name: msrv, version: "${{ needs.read_msrv.outputs.msrv }}" }
|
||||
- { name: stable, version: stable }
|
||||
|
||||
name: ${{ matrix.target.name }} / ${{ matrix.version.name }}
|
||||
runs-on: ${{ matrix.target.os }}
|
||||
|
||||
env: {}
|
||||
|
||||
steps:
|
||||
- name: Setup Routing
|
||||
if: matrix.target.os == 'macos-latest'
|
||||
run: sudo ifconfig lo0 alias 127.0.0.3
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Free Disk Space
|
||||
if: matrix.target.os == 'ubuntu-latest'
|
||||
run: ./scripts/free-disk-space.sh
|
||||
|
||||
- name: Setup mold linker
|
||||
if: matrix.target.os == 'ubuntu-latest'
|
||||
uses: rui314/setup-mold@v1
|
||||
|
||||
- name: Install nasm
|
||||
if: matrix.target.os == 'windows-latest'
|
||||
uses: ilammy/setup-nasm@v1.5.2
|
||||
|
||||
- name: Install OpenSSL
|
||||
if: matrix.target.os == 'windows-latest'
|
||||
shell: bash
|
||||
run: |
|
||||
set -e
|
||||
choco install openssl --version=1.1.1.2100 -y --no-progress
|
||||
echo 'OPENSSL_DIR=C:\Program Files\OpenSSL' >> $GITHUB_ENV
|
||||
echo "RUSTFLAGS=-C target-feature=+crt-static" >> $GITHUB_ENV
|
||||
|
||||
- name: Install Rust (${{ matrix.version.name }})
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1.11.0
|
||||
with:
|
||||
toolchain: ${{ matrix.version.version }}
|
||||
|
||||
- name: Install just, cargo-hack, cargo-nextest, cargo-ci-cache-clean
|
||||
uses: taiki-e/install-action@v2.49.19
|
||||
with:
|
||||
tool: just,cargo-hack,cargo-nextest,cargo-ci-cache-clean
|
||||
|
||||
- name: Generate Cargo.lock
|
||||
run: cargo generate-lockfile
|
||||
|
||||
- name: workaround MSRV issues
|
||||
if: matrix.version.name == 'msrv'
|
||||
run: just downgrade-for-msrv
|
||||
|
||||
- name: check lib
|
||||
if: >
|
||||
matrix.target.os != 'ubuntu-latest'
|
||||
&& matrix.target.triple != 'x86_64-pc-windows-gnu'
|
||||
run: cargo ci-check-lib
|
||||
- name: check lib
|
||||
if: matrix.target.os == 'ubuntu-latest'
|
||||
run: cargo ci-check-lib-linux
|
||||
- name: check lib
|
||||
if: matrix.target.triple != 'x86_64-pc-windows-gnu'
|
||||
run: cargo ci-check-min
|
||||
|
||||
- name: check full
|
||||
# TODO: compile OpenSSL and run tests on MinGW
|
||||
if: >
|
||||
matrix.target.os != 'ubuntu-latest'
|
||||
&& matrix.target.triple != 'x86_64-pc-windows-gnu'
|
||||
run: cargo ci-check
|
||||
- name: check all
|
||||
if: matrix.target.os == 'ubuntu-latest'
|
||||
run: cargo ci-check-linux
|
||||
|
||||
- name: tests
|
||||
run: just test
|
||||
|
||||
- name: CI cache clean
|
||||
run: cargo-ci-cache-clean
|
||||
|
||||
docs:
|
||||
name: Documentation
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install Rust (nightly)
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1.11.0
|
||||
with:
|
||||
toolchain: nightly
|
||||
|
||||
- name: Install just
|
||||
uses: taiki-e/install-action@v2.49.19
|
||||
with:
|
||||
tool: just
|
||||
|
||||
- name: doc tests
|
||||
run: just test-docs
|
34
.github/workflows/clippy-fmt.yml
vendored
34
.github/workflows/clippy-fmt.yml
vendored
@ -1,34 +0,0 @@
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
|
||||
name: Clippy and rustfmt Check
|
||||
jobs:
|
||||
clippy_check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
components: rustfmt
|
||||
profile: minimal
|
||||
override: true
|
||||
- name: Check with rustfmt
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: fmt
|
||||
args: --all -- --check
|
||||
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly
|
||||
components: clippy
|
||||
profile: minimal
|
||||
override: true
|
||||
- name: Check with Clippy
|
||||
uses: actions-rs/clippy-check@v1
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
args: --workspace --tests
|
39
.github/workflows/coverage.yml
vendored
Normal file
39
.github/workflows/coverage.yml
vendored
Normal file
@ -0,0 +1,39 @@
|
||||
name: Coverage
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
coverage:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1.11.0
|
||||
with:
|
||||
components: llvm-tools-preview
|
||||
|
||||
- name: Install cargo-llvm-cov
|
||||
uses: taiki-e/install-action@v2.49.19
|
||||
with:
|
||||
tool: cargo-llvm-cov
|
||||
|
||||
- name: Generate code coverage
|
||||
run: cargo llvm-cov --workspace --all-features --codecov --output-path codecov.json
|
||||
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v5.4.0
|
||||
with:
|
||||
files: codecov.json
|
||||
fail_ci_if_error: true
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
69
.github/workflows/lint.yml
vendored
Normal file
69
.github/workflows/lint.yml
vendored
Normal file
@ -0,0 +1,69 @@
|
||||
name: Lint
|
||||
|
||||
on:
|
||||
pull_request: {}
|
||||
merge_group: { types: [checks_requested] }
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
fmt:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1.11.0
|
||||
with:
|
||||
toolchain: nightly
|
||||
components: rustfmt
|
||||
|
||||
- name: Rustfmt Check
|
||||
run: cargo fmt --all -- --check
|
||||
|
||||
clippy:
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1.11.0
|
||||
with: { components: clippy }
|
||||
|
||||
- uses: giraffate/clippy-action@v1.0.1
|
||||
with:
|
||||
reporter: "github-pr-check"
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
clippy_flags: --workspace --all-features --tests --examples --bins -- -Dclippy::todo -Aunknown_lints
|
||||
|
||||
check-external-types:
|
||||
if: false # rustdoc mismatch currently
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install Rust (${{ vars.RUST_VERSION_EXTERNAL_TYPES }})
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1.11.0
|
||||
with:
|
||||
toolchain: ${{ vars.RUST_VERSION_EXTERNAL_TYPES }}
|
||||
|
||||
- name: Install just
|
||||
uses: taiki-e/install-action@v2.49.19
|
||||
with:
|
||||
tool: just
|
||||
|
||||
- name: Install cargo-check-external-types
|
||||
uses: taiki-e/cache-cargo-install-action@v2.1.1
|
||||
with:
|
||||
tool: cargo-check-external-types
|
||||
|
||||
- name: check external types
|
||||
run: just check-external-types-all +${{ vars.RUST_VERSION_EXTERNAL_TYPES }}
|
82
.github/workflows/linux.yml
vendored
82
.github/workflows/linux.yml
vendored
@ -1,82 +0,0 @@
|
||||
name: CI (Linux)
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- '1.0'
|
||||
|
||||
jobs:
|
||||
build_and_test:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
version:
|
||||
- 1.42.0
|
||||
- stable
|
||||
- nightly
|
||||
|
||||
name: ${{ matrix.version }} - x86_64-unknown-linux-gnu
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install ${{ matrix.version }}
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: ${{ matrix.version }}-x86_64-unknown-linux-gnu
|
||||
profile: minimal
|
||||
override: true
|
||||
|
||||
- name: Generate Cargo.lock
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: generate-lockfile
|
||||
- name: Cache cargo dirs
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path:
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
~/.cargo/bin
|
||||
key: ${{ matrix.version }}-x86_64-unknown-linux-gnu-cargo-trimmed-${{ hashFiles('**/Cargo.lock') }}
|
||||
- name: Cache cargo build
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: target
|
||||
key: ${{ matrix.version }}-x86_64-unknown-linux-gnu-cargo-build-trimmed-${{ hashFiles('**/Cargo.lock') }}
|
||||
|
||||
- name: check build
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: check
|
||||
args: --workspace --bins --examples --tests
|
||||
|
||||
- name: tests
|
||||
uses: actions-rs/cargo@v1
|
||||
timeout-minutes: 40
|
||||
with:
|
||||
command: test
|
||||
args: --workspace --exclude=actix-tls --no-fail-fast -- --nocapture
|
||||
|
||||
- name: Generate coverage file
|
||||
if: matrix.version == 'stable' && (github.ref == 'refs/heads/master' || github.event_name == 'pull_request')
|
||||
run: |
|
||||
cargo install cargo-tarpaulin
|
||||
cargo tarpaulin --out Xml --workspace
|
||||
|
||||
- name: Upload to Codecov
|
||||
if: matrix.version == 'stable' && (github.ref == 'refs/heads/master' || github.event_name == 'pull_request')
|
||||
uses: codecov/codecov-action@v1
|
||||
with:
|
||||
file: cobertura.xml
|
||||
|
||||
- name: Clear the cargo caches
|
||||
run: |
|
||||
rustup update stable
|
||||
rustup override set stable
|
||||
cargo install cargo-cache --no-default-features --features ci-autoclean
|
||||
cargo-cache
|
43
.github/workflows/macos.yml
vendored
43
.github/workflows/macos.yml
vendored
@ -1,43 +0,0 @@
|
||||
name: CI (macOS)
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- '1.0'
|
||||
|
||||
jobs:
|
||||
build_and_test:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
version:
|
||||
- stable
|
||||
- nightly
|
||||
|
||||
name: ${{ matrix.version }} - x86_64-apple-darwin
|
||||
runs-on: macos-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install ${{ matrix.version }}
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: ${{ matrix.version }}-x86_64-apple-darwin
|
||||
profile: minimal
|
||||
override: true
|
||||
|
||||
- name: check build
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: check
|
||||
args: --workspace --bins --examples --tests
|
||||
|
||||
- name: tests
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
args: --workspace --exclude=actix-tls --no-fail-fast -- --nocapture
|
45
.github/workflows/windows-mingw.yml
vendored
45
.github/workflows/windows-mingw.yml
vendored
@ -1,45 +0,0 @@
|
||||
name: CI (Windows-mingw)
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- '1.0'
|
||||
|
||||
jobs:
|
||||
build_and_test:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
version:
|
||||
- stable
|
||||
- nightly
|
||||
|
||||
name: ${{ matrix.version }} - x86_64-pc-windows-gnu
|
||||
runs-on: windows-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install ${{ matrix.version }}
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: ${{ matrix.version }}-x86_64-pc-windows-gnu
|
||||
profile: minimal
|
||||
override: true
|
||||
|
||||
- name: Install MSYS2
|
||||
uses: msys2/setup-msys2@v2
|
||||
|
||||
- name: Install packages
|
||||
run: |
|
||||
msys2 -c 'pacman -Sy --noconfirm pacman'
|
||||
msys2 -c 'pacman --noconfirm -S base-devel pkg-config'
|
||||
|
||||
- name: check build
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: check
|
||||
args: --workspace --bins --examples --tests
|
69
.github/workflows/windows.yml
vendored
69
.github/workflows/windows.yml
vendored
@ -1,69 +0,0 @@
|
||||
name: CI (Windows)
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- '1.0'
|
||||
|
||||
env:
|
||||
VCPKGRS_DYNAMIC: 1
|
||||
|
||||
jobs:
|
||||
build_and_test:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
version:
|
||||
- stable
|
||||
- nightly
|
||||
target:
|
||||
- x86_64-pc-windows-msvc
|
||||
- i686-pc-windows-msvc
|
||||
|
||||
name: ${{ matrix.version }} - ${{ matrix.target }}
|
||||
runs-on: windows-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install ${{ matrix.version }}
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: ${{ matrix.version }}-${{ matrix.target }}
|
||||
profile: minimal
|
||||
override: true
|
||||
|
||||
- name: Install OpenSSL (x64)
|
||||
if: matrix.target == 'x86_64-pc-windows-msvc'
|
||||
run: |
|
||||
vcpkg integrate install
|
||||
vcpkg install openssl:x64-windows
|
||||
Get-ChildItem C:\vcpkg\installed\x64-windows\bin
|
||||
Get-ChildItem C:\vcpkg\installed\x64-windows\lib
|
||||
Copy-Item C:\vcpkg\installed\x64-windows\bin\libcrypto-1_1-x64.dll C:\vcpkg\installed\x64-windows\bin\libcrypto.dll
|
||||
Copy-Item C:\vcpkg\installed\x64-windows\bin\libssl-1_1-x64.dll C:\vcpkg\installed\x64-windows\bin\libssl.dll
|
||||
|
||||
- name: Install OpenSSL (x86)
|
||||
if: matrix.target == 'i686-pc-windows-msvc'
|
||||
run: |
|
||||
vcpkg integrate install
|
||||
vcpkg install openssl:x86-windows
|
||||
Get-ChildItem C:\vcpkg\installed\x86-windows\bin
|
||||
Get-ChildItem C:\vcpkg\installed\x86-windows\lib
|
||||
Copy-Item C:\vcpkg\installed\x86-windows\bin\libcrypto-1_1.dll C:\vcpkg\installed\x86-windows\bin\libcrypto.dll
|
||||
Copy-Item C:\vcpkg\installed\x86-windows\bin\libssl-1_1.dll C:\vcpkg\installed\x86-windows\bin\libssl.dll
|
||||
|
||||
- name: check build
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: check
|
||||
args: --workspace --bins --examples --tests
|
||||
|
||||
- name: tests
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
args: --workspace --exclude=actix-tls --no-fail-fast -- --nocapture
|
5
.gitignore
vendored
5
.gitignore
vendored
@ -1,4 +1,3 @@
|
||||
Cargo.lock
|
||||
target/
|
||||
guide/build/
|
||||
/gh-pages
|
||||
@ -13,4 +12,8 @@ guide/build/
|
||||
# These are backup files generated by rustfmt
|
||||
**/*.rs.bk
|
||||
|
||||
# IDEs
|
||||
.idea
|
||||
|
||||
# direnv
|
||||
/.direnv
|
||||
|
3
.rustfmt.toml
Normal file
3
.rustfmt.toml
Normal file
@ -0,0 +1,3 @@
|
||||
group_imports = "StdExternalCrate"
|
||||
imports_granularity = "Crate"
|
||||
use_field_init_shorthand = true
|
29
.taplo.toml
Normal file
29
.taplo.toml
Normal file
@ -0,0 +1,29 @@
|
||||
exclude = ["target/*"]
|
||||
include = ["**/*.toml"]
|
||||
|
||||
[formatting]
|
||||
column_width = 110
|
||||
|
||||
[[rule]]
|
||||
include = ["**/Cargo.toml"]
|
||||
keys = [
|
||||
"dependencies",
|
||||
"*-dependencies",
|
||||
"workspace.dependencies",
|
||||
"workspace.*-dependencies",
|
||||
"target.*.dependencies",
|
||||
"target.*.*-dependencies",
|
||||
]
|
||||
formatting.reorder_keys = true
|
||||
|
||||
[[rule]]
|
||||
include = ["**/Cargo.toml"]
|
||||
keys = [
|
||||
"dependencies.*",
|
||||
"*-dependencies.*",
|
||||
"workspace.dependencies.*",
|
||||
"workspace.*-dependencies.*",
|
||||
"target.*.dependencies",
|
||||
"target.*.*-dependencies",
|
||||
]
|
||||
formatting.reorder_keys = false
|
@ -8,19 +8,19 @@ In the interest of fostering an open and welcoming environment, we as contributo
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
- Using welcoming and inclusive language
|
||||
- Being respectful of differing viewpoints and experiences
|
||||
- Gracefully accepting constructive criticism
|
||||
- Focusing on what is best for the community
|
||||
- Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a professional setting
|
||||
- The use of sexualized language or imagery and unwelcome sexual attention or advances
|
||||
- Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
- Public or private harassment
|
||||
- Publishing others' private information, such as a physical or electronic address, without explicit permission
|
||||
- Other conduct which could reasonably be considered inappropriate in a professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
@ -39,7 +39,7 @@ Instances of abusive, harassing, or otherwise unacceptable behavior may be repor
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
|
||||
|
||||
[@robjtede]: https://github.com/robjtede
|
||||
[@JohnTitor]: https://github.com/JohnTitor
|
||||
[@johntitor]: https://github.com/JohnTitor
|
||||
|
||||
## Attribution
|
||||
|
||||
|
2914
Cargo.lock
generated
Normal file
2914
Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
39
Cargo.toml
39
Cargo.toml
@ -1,31 +1,44 @@
|
||||
[workspace]
|
||||
resolver = "2"
|
||||
members = [
|
||||
"actix-codec",
|
||||
"actix-connect",
|
||||
"actix-rt",
|
||||
"actix-macros",
|
||||
"actix-service",
|
||||
"actix-rt",
|
||||
"actix-server",
|
||||
"actix-testing",
|
||||
"actix-threadpool",
|
||||
"actix-service",
|
||||
"actix-tls",
|
||||
"actix-tracing",
|
||||
"actix-utils",
|
||||
"router",
|
||||
"string",
|
||||
"bytestring",
|
||||
"local-channel",
|
||||
"local-waker",
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
license = "MIT OR Apache-2.0"
|
||||
edition = "2021"
|
||||
rust-version = "1.71.1"
|
||||
|
||||
[patch.crates-io]
|
||||
actix-codec = { path = "actix-codec" }
|
||||
actix-connect = { path = "actix-connect" }
|
||||
actix-rt = { path = "actix-rt" }
|
||||
actix-macros = { path = "actix-macros" }
|
||||
actix-rt = { path = "actix-rt" }
|
||||
actix-server = { path = "actix-server" }
|
||||
actix-service = { path = "actix-service" }
|
||||
actix-testing = { path = "actix-testing" }
|
||||
actix-threadpool = { path = "actix-threadpool" }
|
||||
actix-tls = { path = "actix-tls" }
|
||||
actix-tracing = { path = "actix-tracing" }
|
||||
actix-utils = { path = "actix-utils" }
|
||||
actix-router = { path = "router" }
|
||||
bytestring = { path = "string" }
|
||||
bytestring = { path = "bytestring" }
|
||||
local-channel = { path = "local-channel" }
|
||||
local-waker = { path = "local-waker" }
|
||||
|
||||
[profile.release]
|
||||
lto = true
|
||||
opt-level = 3
|
||||
codegen-units = 1
|
||||
|
||||
[workspace.lints.rust]
|
||||
rust_2018_idioms = "deny"
|
||||
nonstandard-style = "deny"
|
||||
future_incompatible = "deny"
|
||||
missing_docs = { level = "warn", priority = -1 }
|
||||
|
@ -186,7 +186,7 @@
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2017-NOW Nikolay Kim
|
||||
Copyright 2017-NOW Actix Team
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
@ -1,4 +1,4 @@
|
||||
Copyright (c) 2017 Nikolay Kim
|
||||
Copyright (c) 2017-NOW Actix Team
|
||||
|
||||
Permission is hereby granted, free of charge, to any
|
||||
person obtaining a copy of this software and associated
|
||||
|
78
README.md
78
README.md
@ -1,78 +1,30 @@
|
||||
# Actix net [](https://codecov.io/gh/actix/actix-net) [](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
||||
# Actix Net
|
||||
|
||||
Actix net - framework for composable network services
|
||||
> A collection of lower-level libraries for composable network services.
|
||||
|
||||
## Build statuses
|
||||
[](https://github.com/actix/actix-net/actions/workflows/ci.yml)
|
||||
[](https://codecov.io/gh/actix/actix-net)
|
||||
[](https://discord.gg/NWpN5mmg3x)
|
||||
[](https://deps.rs/repo/github/actix/actix-net)
|
||||
|
||||
| Platform | Build Status |
|
||||
| ---------------- | ------------ |
|
||||
| Linux | [](https://github.com/actix/actix-net/actions?query=workflow%3A"CI+(Linux)") |
|
||||
| macOS | [](https://github.com/actix/actix-net/actions?query=workflow%3A"CI+(macOS)") |
|
||||
| Windows | [](https://github.com/actix/actix-net/actions?query=workflow%3A"CI+(Windows)") |
|
||||
| Windows (MinGW) | [](https://github.com/actix/actix-net/actions?query=workflow%3A"CI+(Windows-mingw)") |
|
||||
## Examples
|
||||
|
||||
## Documentation & community resources
|
||||
See example folders for [`actix-server`](./actix-server/examples) and [`actix-tls`](./actix-tls/examples).
|
||||
|
||||
* [Chat on Gitter](https://gitter.im/actix/actix)
|
||||
* Minimum supported Rust version: 1.42 or later
|
||||
## MSRV
|
||||
|
||||
## Example
|
||||
|
||||
```rust
|
||||
fn main() -> io::Result<()> {
|
||||
// load ssl keys
|
||||
let mut builder = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap();
|
||||
builder.set_private_key_file("./examples/key.pem", SslFiletype::PEM).unwrap();
|
||||
builder.set_certificate_chain_file("./examples/cert.pem").unwrap();
|
||||
let acceptor = builder.build();
|
||||
|
||||
let num = Arc::new(AtomicUsize::new(0));
|
||||
|
||||
// bind socket address and start workers. By default server uses number of
|
||||
// available logical cpu as threads count. actix net start separate
|
||||
// instances of service pipeline in each worker.
|
||||
Server::build()
|
||||
.bind(
|
||||
// configure service pipeline
|
||||
"basic", "0.0.0.0:8443",
|
||||
move || {
|
||||
let num = num.clone();
|
||||
let acceptor = acceptor.clone();
|
||||
|
||||
// construct transformation pipeline
|
||||
pipeline(
|
||||
// service for converting incoming TcpStream to a SslStream<TcpStream>
|
||||
fn_service(move |stream: actix_rt::net::TcpStream| async move {
|
||||
SslAcceptorExt::accept_async(&acceptor, stream.into_parts().0).await
|
||||
.map_err(|e| println!("Openssl error: {}", e))
|
||||
}))
|
||||
// .and_then() combinator chains result of previos service call to argument
|
||||
/// for next service calll. in this case, on success we chain
|
||||
/// ssl stream to the `logger` service.
|
||||
.and_then(fn_service(logger))
|
||||
// Next service counts number of connections
|
||||
.and_then(move |_| {
|
||||
let num = num.fetch_add(1, Ordering::Relaxed);
|
||||
println!("got ssl connection {:?}", num);
|
||||
future::ok(())
|
||||
})
|
||||
},
|
||||
)?
|
||||
.run()
|
||||
}
|
||||
```
|
||||
Crates in this repo currently have a Minimum Supported Rust Version (MSRV) of 1.65. As a policy, we permit MSRV increases in non-breaking releases.
|
||||
|
||||
## License
|
||||
|
||||
This project is licensed under either of
|
||||
The crates in repo are licensed under either of:
|
||||
|
||||
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0))
|
||||
* MIT license ([LICENSE-MIT](LICENSE-MIT) or [http://opensource.org/licenses/MIT](http://opensource.org/licenses/MIT))
|
||||
- Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0))
|
||||
- MIT license ([LICENSE-MIT](LICENSE-MIT) or [http://opensource.org/licenses/MIT](http://opensource.org/licenses/MIT))
|
||||
|
||||
at your option.
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
Contribution to the actix-net crate is organized under the terms of the
|
||||
Contributor Covenant, the maintainer of actix-net, @fafhrd91, promises to
|
||||
intervene to uphold that code of conduct.
|
||||
Contribution to the actix-net repo is organized under the terms of the Contributor Covenant.
|
||||
The Actix team promises to intervene to uphold that code of conduct.
|
||||
|
@ -1,49 +1,85 @@
|
||||
# Changes
|
||||
|
||||
## Unreleased - 2020-xx-xx
|
||||
* Upgrade `pin-project` to `1.0`.
|
||||
## Unreleased
|
||||
|
||||
## 0.3.0 - 2020-08-23
|
||||
* No changes from beta 2.
|
||||
- Minimum supported Rust version (MSRV) is now 1.71.
|
||||
|
||||
## 0.3.0-beta.2 - 2020-08-19
|
||||
* Remove unused type parameter from `Framed::replace_codec`.
|
||||
## 0.5.2
|
||||
|
||||
## 0.3.0-beta.1 - 2020-08-19
|
||||
* Use `.advance()` instead of `.split_to()`.
|
||||
* Upgrade `tokio-util` to `0.3`.
|
||||
* Improve `BytesCodec` `.encode()` performance
|
||||
* Simplify `BytesCodec` `.decode()`
|
||||
* Rename methods on `Framed` to better describe their use.
|
||||
* Add method on `Framed` to get a pinned reference to the underlying I/O.
|
||||
* Add method on `Framed` check emptiness of read buffer.
|
||||
- Minimum supported Rust version (MSRV) is now 1.65.
|
||||
|
||||
## [0.2.0] - 2019-12-10
|
||||
## 0.5.1
|
||||
|
||||
* Use specific futures dependencies
|
||||
- Logs emitted now use the `tracing` crate with `log` compatibility.
|
||||
- Minimum supported Rust version (MSRV) is now 1.49.
|
||||
|
||||
## [0.2.0-alpha.4]
|
||||
## 0.5.0
|
||||
|
||||
* Fix buffer remaining capacity calculation
|
||||
- Updated `tokio-util` dependency to `0.7.0`.
|
||||
|
||||
## [0.2.0-alpha.3]
|
||||
## 0.4.2
|
||||
|
||||
* Use tokio 0.2
|
||||
- No significant changes since `0.4.1`.
|
||||
|
||||
* Fix low/high watermark for write/read buffers
|
||||
## 0.4.1
|
||||
|
||||
## [0.2.0-alpha.2]
|
||||
- Added `LinesCodec`.
|
||||
- `Framed::poll_ready` flushes when the buffer is full.
|
||||
|
||||
* Migrated to `std::future`
|
||||
## 0.4.0
|
||||
|
||||
## [0.1.2] - 2019-03-27
|
||||
- No significant changes since v0.4.0-beta.1.
|
||||
|
||||
* Added `Framed::map_io()` method.
|
||||
## 0.4.0-beta.1
|
||||
|
||||
## [0.1.1] - 2019-03-06
|
||||
- Replace `pin-project` with `pin-project-lite`.
|
||||
- Upgrade `tokio` dependency to `1`.
|
||||
- Upgrade `tokio-util` dependency to `0.6`.
|
||||
- Upgrade `bytes` dependency to `1`.
|
||||
|
||||
* Added `FramedParts::with_read_buffer()` method.
|
||||
## 0.3.0
|
||||
|
||||
## [0.1.0] - 2018-12-09
|
||||
- No changes from beta 2.
|
||||
|
||||
* Move codec to separate crate
|
||||
## 0.3.0-beta.2
|
||||
|
||||
- Remove unused type parameter from `Framed::replace_codec`.
|
||||
|
||||
## 0.3.0-beta.1
|
||||
|
||||
- Use `.advance()` instead of `.split_to()`.
|
||||
- Upgrade `tokio-util` to `0.3`.
|
||||
- Improve `BytesCodec::encode()` performance.
|
||||
- Simplify `BytesCodec::decode()`.
|
||||
- Rename methods on `Framed` to better describe their use.
|
||||
- Add method on `Framed` to get a pinned reference to the underlying I/O.
|
||||
- Add method on `Framed` check emptiness of read buffer.
|
||||
|
||||
## 0.2.0
|
||||
|
||||
- Use specific futures dependencies.
|
||||
|
||||
## 0.2.0-alpha.4
|
||||
|
||||
- Fix buffer remaining capacity calculation.
|
||||
|
||||
## 0.2.0-alpha.3
|
||||
|
||||
- Use tokio 0.2.
|
||||
- Fix low/high watermark for write/read buffers.
|
||||
|
||||
## 0.2.0-alpha.2
|
||||
|
||||
- Migrated to `std::future`.
|
||||
|
||||
## 0.1.2
|
||||
|
||||
- Added `Framed::map_io()` method.
|
||||
|
||||
## 0.1.1
|
||||
|
||||
- Added `FramedParts::with_read_buffer()` method.
|
||||
|
||||
## 0.1.0
|
||||
|
||||
- Move codec to separate crate.
|
||||
|
@ -1,26 +1,36 @@
|
||||
[package]
|
||||
name = "actix-codec"
|
||||
version = "0.3.0"
|
||||
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
|
||||
description = "Codec utilities for working with framed protocols."
|
||||
version = "0.5.2"
|
||||
authors = ["Nikolay Kim <fafhrd91@gmail.com>", "Rob Ede <robjtede@icloud.com>"]
|
||||
description = "Codec utilities for working with framed protocols"
|
||||
keywords = ["network", "framework", "async", "futures"]
|
||||
homepage = "https://actix.rs"
|
||||
repository = "https://github.com/actix/actix-net.git"
|
||||
documentation = "https://docs.rs/actix-codec/"
|
||||
repository = "https://github.com/actix/actix-net"
|
||||
categories = ["network-programming", "asynchronous"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
edition = "2018"
|
||||
edition.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[lib]
|
||||
name = "actix_codec"
|
||||
path = "src/lib.rs"
|
||||
[package.metadata.cargo_check_external_types]
|
||||
allowed_external_types = ["bytes::*", "futures_core::*", "futures_sink::*", "tokio::*", "tokio_util::*"]
|
||||
|
||||
[dependencies]
|
||||
bitflags = "1.2.1"
|
||||
bytes = "0.5.2"
|
||||
futures-core = { version = "0.3.4", default-features = false }
|
||||
futures-sink = { version = "0.3.4", default-features = false }
|
||||
log = "0.4"
|
||||
pin-project = "1.0.0"
|
||||
tokio = { version = "0.2.5", default-features = false }
|
||||
tokio-util = { version = "0.3.1", default-features = false, features = ["codec"] }
|
||||
bitflags = "2"
|
||||
bytes = "1"
|
||||
futures-core = { version = "0.3.7", default-features = false }
|
||||
futures-sink = { version = "0.3.7", default-features = false }
|
||||
memchr = "2.3"
|
||||
pin-project-lite = "0.2"
|
||||
tokio = "1.23.1"
|
||||
tokio-util = { version = "0.7", features = ["codec", "io"] }
|
||||
tracing = { version = "0.1.30", default-features = false, features = ["log"] }
|
||||
|
||||
[dev-dependencies]
|
||||
criterion = { version = "0.5", features = ["html_reports"] }
|
||||
tokio-test = "0.4.2"
|
||||
|
||||
[[bench]]
|
||||
name = "lines"
|
||||
harness = false
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
59
actix-codec/benches/lines.rs
Normal file
59
actix-codec/benches/lines.rs
Normal file
@ -0,0 +1,59 @@
|
||||
#![allow(missing_docs)]
|
||||
|
||||
use bytes::BytesMut;
|
||||
use criterion::{criterion_group, criterion_main, Criterion};
|
||||
|
||||
const INPUT: &[u8] = include_bytes!("./lorem.txt");
|
||||
|
||||
fn bench_lines_codec(c: &mut Criterion) {
|
||||
let mut decode_group = c.benchmark_group("lines decode");
|
||||
|
||||
decode_group.bench_function("actix", |b| {
|
||||
b.iter(|| {
|
||||
use actix_codec::Decoder as _;
|
||||
|
||||
let mut codec = actix_codec::LinesCodec::default();
|
||||
let mut buf = BytesMut::from(INPUT);
|
||||
while let Ok(Some(_bytes)) = codec.decode_eof(&mut buf) {}
|
||||
});
|
||||
});
|
||||
|
||||
decode_group.bench_function("tokio", |b| {
|
||||
b.iter(|| {
|
||||
use tokio_util::codec::Decoder as _;
|
||||
|
||||
let mut codec = tokio_util::codec::LinesCodec::new();
|
||||
let mut buf = BytesMut::from(INPUT);
|
||||
while let Ok(Some(_bytes)) = codec.decode_eof(&mut buf) {}
|
||||
});
|
||||
});
|
||||
|
||||
decode_group.finish();
|
||||
|
||||
let mut encode_group = c.benchmark_group("lines encode");
|
||||
|
||||
encode_group.bench_function("actix", |b| {
|
||||
b.iter(|| {
|
||||
use actix_codec::Encoder as _;
|
||||
|
||||
let mut codec = actix_codec::LinesCodec::default();
|
||||
let mut buf = BytesMut::new();
|
||||
codec.encode("123", &mut buf).unwrap();
|
||||
});
|
||||
});
|
||||
|
||||
encode_group.bench_function("tokio", |b| {
|
||||
b.iter(|| {
|
||||
use tokio_util::codec::Encoder as _;
|
||||
|
||||
let mut codec = tokio_util::codec::LinesCodec::new();
|
||||
let mut buf = BytesMut::new();
|
||||
codec.encode("123", &mut buf).unwrap();
|
||||
});
|
||||
});
|
||||
|
||||
encode_group.finish();
|
||||
}
|
||||
|
||||
criterion_group!(benches, bench_lines_codec);
|
||||
criterion_main!(benches);
|
5
actix-codec/benches/lorem.txt
Normal file
5
actix-codec/benches/lorem.txt
Normal file
@ -0,0 +1,5 @@
|
||||
Lorem ipsum dolor sit amet, consectetur adipiscing elit. In tortor quam, pulvinar sit amet vestibulum eget, tincidunt non urna. Sed eu sem in felis malesuada venenatis. Suspendisse volutpat aliquet nisi, in condimentum nibh convallis id. Quisque gravida felis scelerisque ipsum aliquam consequat. Praesent libero odio, malesuada vitae odio quis, aliquam aliquet enim. In fringilla ut turpis nec pharetra. Duis eu posuere metus. Sed a aliquet massa. Mauris non tempus mi, quis mattis libero. Vivamus ornare ex at semper cursus. Vestibulum sed facilisis erat, aliquet mollis est. In interdum, magna iaculis ultricies elementum, mi ante vestibulum mauris, nec viverra turpis lorem quis ante. Proin in auctor erat. Vivamus dictum congue massa, fermentum bibendum leo pretium quis. Integer dapibus sodales ligula, sit amet imperdiet felis suscipit eu. Phasellus non ornare enim.
|
||||
Nam feugiat neque sit amet hendrerit rhoncus. Nunc suscipit molestie vehicula. Aenean vulputate porttitor augue, sit amet molestie dolor volutpat vitae. Nulla vitae condimentum eros. Aliquam tristique purus at metus lacinia egestas. Cras euismod lorem eu orci lobortis, sed tincidunt nisl laoreet. Ut suscipit fermentum mi, et euismod tortor. Pellentesque vitae tempor quam, sed dignissim mi. Suspendisse luctus lacus vitae ligula blandit vehicula. Quisque interdum iaculis tincidunt. Nunc elementum mi vitae tempor placerat. Suspendisse potenti. Donec blandit laoreet ipsum, quis rhoncus velit vulputate sed.
|
||||
Aliquam suscipit lectus eros, at maximus dolor efficitur quis. Integer blandit tortor orci, nec mattis nunc eleifend ac. Mauris pharetra vel quam quis lacinia. Duis lobortis condimentum nunc ut facilisis. Praesent arcu nisi, porta sit amet viverra sit amet, pellentesque ut nisi. Nunc gravida tortor eu ligula tempus, in interdum magna pretium. Fusce eu ornare sapien. Nullam pellentesque cursus eros. Nam orci massa, faucibus eget leo eget, elementum vulputate erat. Fusce vehicula augue et dui hendrerit vulputate. Mauris neque lacus, porttitor ut condimentum id, efficitur ac neque. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia curae; Donec accumsan, lectus fermentum elementum tristique, ipsum tortor mollis ante, non lacinia nibh ex quis sapien.
|
||||
Donec pharetra, elit eget rutrum luctus, urna ligula facilisis lorem, sit amet rhoncus ante est eu mi. Vestibulum vestibulum ultricies interdum. Nulla tincidunt ante non hendrerit venenatis. Curabitur vestibulum turpis erat, id efficitur quam venenatis eu. Fusce nulla sem, dapibus vel quam feugiat, ornare fermentum ligula. Praesent tempus tincidunt mauris, non pellentesque felis varius in. Aenean eu arcu ligula. Morbi dapibus maximus nulla a pharetra. Fusce leo metus, luctus ut cursus non, sollicitudin non lectus. Integer pellentesque eleifend erat, vel gravida purus tempus a. Mauris id vestibulum quam. Nunc vitae ullamcorper metus, pharetra placerat enim. Fusce in ultrices nisl. Curabitur justo mauris, dignissim in aliquam sit amet, sollicitudin ut risus. Cras tempor rutrum justo, non tincidunt est maximus at.
|
||||
Aliquam ac velit tincidunt, ullamcorper velit sit amet, pulvinar nisi. Nullam rhoncus rhoncus egestas. Cras ac luctus nisi. Mauris sit amet risus at magna volutpat ultrices quis ac dui. Aliquam condimentum tellus purus, vel sagittis odio vulputate at. Sed ut finibus tellus. Aliquam tincidunt vehicula diam.
|
@ -1,11 +1,10 @@
|
||||
use bytes::{Buf, Bytes, BytesMut};
|
||||
use std::io;
|
||||
|
||||
use bytes::{Buf, Bytes, BytesMut};
|
||||
|
||||
use super::{Decoder, Encoder};
|
||||
|
||||
/// Bytes codec.
|
||||
///
|
||||
/// Reads/Writes chunks of bytes from a stream.
|
||||
/// Bytes codec. Reads/writes chunks of bytes from a stream.
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub struct BytesCodec;
|
||||
|
||||
@ -14,7 +13,7 @@ impl Encoder<Bytes> for BytesCodec {
|
||||
|
||||
#[inline]
|
||||
fn encode(&mut self, item: Bytes, dst: &mut BytesMut) -> Result<(), Self::Error> {
|
||||
dst.extend_from_slice(item.bytes());
|
||||
dst.extend_from_slice(item.chunk());
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -1,11 +1,14 @@
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
use std::{fmt, io};
|
||||
use std::{
|
||||
fmt, io,
|
||||
pin::Pin,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use bitflags::bitflags;
|
||||
use bytes::{Buf, BytesMut};
|
||||
use futures_core::{ready, Stream};
|
||||
use futures_sink::Sink;
|
||||
use pin_project::pin_project;
|
||||
use pin_project_lite::pin_project;
|
||||
|
||||
use crate::{AsyncRead, AsyncWrite, Decoder, Encoder};
|
||||
|
||||
@ -14,29 +17,30 @@ const LW: usize = 1024;
|
||||
/// High-water mark
|
||||
const HW: usize = 8 * 1024;
|
||||
|
||||
bitflags::bitflags! {
|
||||
bitflags! {
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
struct Flags: u8 {
|
||||
const EOF = 0b0001;
|
||||
const READABLE = 0b0010;
|
||||
}
|
||||
}
|
||||
|
||||
/// A unified `Stream` and `Sink` interface to an underlying I/O object, using
|
||||
/// the `Encoder` and `Decoder` traits to encode and decode frames.
|
||||
///
|
||||
/// Raw I/O objects work with byte sequences, but higher-level code usually
|
||||
/// wants to batch these into meaningful chunks, called "frames". This
|
||||
/// method layers framing on top of an I/O object, by using the `Encoder`/`Decoder`
|
||||
/// traits to handle encoding and decoding of message frames. Note that
|
||||
/// the incoming and outgoing frame types may be distinct.
|
||||
#[pin_project]
|
||||
pub struct Framed<T, U> {
|
||||
#[pin]
|
||||
io: T,
|
||||
codec: U,
|
||||
flags: Flags,
|
||||
read_buf: BytesMut,
|
||||
write_buf: BytesMut,
|
||||
pin_project! {
|
||||
/// A unified `Stream` and `Sink` interface to an underlying I/O object, using the `Encoder` and
|
||||
/// `Decoder` traits to encode and decode frames.
|
||||
///
|
||||
/// Raw I/O objects work with byte sequences, but higher-level code usually wants to batch these
|
||||
/// into meaningful chunks, called "frames". This method layers framing on top of an I/O object,
|
||||
/// by using the `Encoder`/`Decoder` traits to handle encoding and decoding of message frames.
|
||||
/// Note that the incoming and outgoing frame types may be distinct.
|
||||
pub struct Framed<T, U> {
|
||||
#[pin]
|
||||
io: T,
|
||||
codec: U,
|
||||
flags: Flags,
|
||||
read_buf: BytesMut,
|
||||
write_buf: BytesMut,
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, U> Framed<T, U>
|
||||
@ -44,10 +48,9 @@ where
|
||||
T: AsyncRead + AsyncWrite,
|
||||
U: Decoder,
|
||||
{
|
||||
/// This function returns a *single* object that is both `Stream` and
|
||||
/// `Sink`; grouping this into a single object is often useful for layering
|
||||
/// things like gzip or TLS, which require both read and write access to the
|
||||
/// underlying object.
|
||||
/// This function returns a *single* object that is both `Stream` and `Sink`; grouping this into
|
||||
/// a single object is often useful for layering things like gzip or TLS, which require both
|
||||
/// read and write access to the underlying object.
|
||||
pub fn new(io: T, codec: U) -> Framed<T, U> {
|
||||
Framed {
|
||||
io,
|
||||
@ -70,21 +73,18 @@ impl<T, U> Framed<T, U> {
|
||||
&mut self.codec
|
||||
}
|
||||
|
||||
/// Returns a reference to the underlying I/O stream wrapped by
|
||||
/// `Frame`.
|
||||
/// Returns a reference to the underlying I/O stream wrapped by `Frame`.
|
||||
///
|
||||
/// Note that care should be taken to not tamper with the underlying stream
|
||||
/// of data coming in as it may corrupt the stream of frames otherwise
|
||||
/// being worked with.
|
||||
/// Note that care should be taken to not tamper with the underlying stream of data coming in as
|
||||
/// it may corrupt the stream of frames otherwise being worked with.
|
||||
pub fn io_ref(&self) -> &T {
|
||||
&self.io
|
||||
}
|
||||
|
||||
/// Returns a mutable reference to the underlying I/O stream.
|
||||
///
|
||||
/// Note that care should be taken to not tamper with the underlying stream
|
||||
/// of data coming in as it may corrupt the stream of frames otherwise
|
||||
/// being worked with.
|
||||
/// Note that care should be taken to not tamper with the underlying stream of data coming in as
|
||||
/// it may corrupt the stream of frames otherwise being worked with.
|
||||
pub fn io_mut(&mut self) -> &mut T {
|
||||
&mut self.io
|
||||
}
|
||||
@ -157,7 +157,7 @@ impl<T, U> Framed<T, U> {
|
||||
}
|
||||
|
||||
impl<T, U> Framed<T, U> {
|
||||
/// Serialize item and Write to the inner buffer
|
||||
/// Serialize item and write to the inner buffer
|
||||
pub fn write<I>(mut self: Pin<&mut Self>, item: I) -> Result<(), <U as Encoder<I>>::Error>
|
||||
where
|
||||
T: AsyncWrite,
|
||||
@ -183,30 +183,29 @@ impl<T, U> Framed<T, U> {
|
||||
U: Decoder,
|
||||
{
|
||||
loop {
|
||||
let mut this = self.as_mut().project();
|
||||
// Repeatedly call `decode` or `decode_eof` as long as it is
|
||||
// "readable". Readable is defined as not having returned `None`. If
|
||||
// the upstream has returned EOF, and the decoder is no longer
|
||||
// readable, it can be assumed that the decoder will never become
|
||||
let this = self.as_mut().project();
|
||||
// Repeatedly call `decode` or `decode_eof` as long as it is "readable". Readable is
|
||||
// defined as not having returned `None`. If the upstream has returned EOF, and the
|
||||
// decoder is no longer readable, it can be assumed that the decoder will never become
|
||||
// readable again, at which point the stream is terminated.
|
||||
|
||||
if this.flags.contains(Flags::READABLE) {
|
||||
if this.flags.contains(Flags::EOF) {
|
||||
match this.codec.decode_eof(&mut this.read_buf) {
|
||||
match this.codec.decode_eof(this.read_buf) {
|
||||
Ok(Some(frame)) => return Poll::Ready(Some(Ok(frame))),
|
||||
Ok(None) => return Poll::Ready(None),
|
||||
Err(e) => return Poll::Ready(Some(Err(e))),
|
||||
Err(err) => return Poll::Ready(Some(Err(err))),
|
||||
}
|
||||
}
|
||||
|
||||
log::trace!("attempting to decode a frame");
|
||||
tracing::trace!("attempting to decode a frame");
|
||||
|
||||
match this.codec.decode(&mut this.read_buf) {
|
||||
match this.codec.decode(this.read_buf) {
|
||||
Ok(Some(frame)) => {
|
||||
log::trace!("frame decoded from buffer");
|
||||
tracing::trace!("frame decoded from buffer");
|
||||
return Poll::Ready(Some(Ok(frame)));
|
||||
}
|
||||
Err(e) => return Poll::Ready(Some(Err(e))),
|
||||
Err(err) => return Poll::Ready(Some(Err(err))),
|
||||
_ => (), // Need more data
|
||||
}
|
||||
|
||||
@ -215,14 +214,15 @@ impl<T, U> Framed<T, U> {
|
||||
|
||||
debug_assert!(!this.flags.contains(Flags::EOF));
|
||||
|
||||
// Otherwise, try to read more data and try again. Make sure we've got room
|
||||
// Otherwise, try to read more data and try again. Make sure we've got room.
|
||||
let remaining = this.read_buf.capacity() - this.read_buf.len();
|
||||
if remaining < LW {
|
||||
this.read_buf.reserve(HW - remaining)
|
||||
}
|
||||
let cnt = match this.io.poll_read_buf(cx, &mut this.read_buf) {
|
||||
|
||||
let cnt = match tokio_util::io::poll_read_buf(this.io, cx, this.read_buf) {
|
||||
Poll::Pending => return Poll::Pending,
|
||||
Poll::Ready(Err(e)) => return Poll::Ready(Some(Err(e.into()))),
|
||||
Poll::Ready(Err(err)) => return Poll::Ready(Some(Err(err.into()))),
|
||||
Poll::Ready(Ok(cnt)) => cnt,
|
||||
};
|
||||
|
||||
@ -234,19 +234,16 @@ impl<T, U> Framed<T, U> {
|
||||
}
|
||||
|
||||
/// Flush write buffer to underlying I/O stream.
|
||||
pub fn flush<I>(
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Result<(), U::Error>>
|
||||
pub fn flush<I>(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), U::Error>>
|
||||
where
|
||||
T: AsyncWrite,
|
||||
U: Encoder<I>,
|
||||
{
|
||||
let mut this = self.as_mut().project();
|
||||
log::trace!("flushing framed transport");
|
||||
tracing::trace!("flushing framed transport");
|
||||
|
||||
while !this.write_buf.is_empty() {
|
||||
log::trace!("writing; remaining={}", this.write_buf.len());
|
||||
tracing::trace!("writing; remaining={}", this.write_buf.len());
|
||||
|
||||
let n = ready!(this.io.as_mut().poll_write(cx, this.write_buf))?;
|
||||
|
||||
@ -265,15 +262,12 @@ impl<T, U> Framed<T, U> {
|
||||
// Try flushing the underlying IO
|
||||
ready!(this.io.poll_flush(cx))?;
|
||||
|
||||
log::trace!("framed transport flushed");
|
||||
tracing::trace!("framed transport flushed");
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
/// Flush write buffer and shutdown underlying I/O stream.
|
||||
pub fn close<I>(
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Result<(), U::Error>>
|
||||
pub fn close<I>(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), U::Error>>
|
||||
where
|
||||
T: AsyncWrite,
|
||||
U: Encoder<I>,
|
||||
@ -305,11 +299,11 @@ where
|
||||
{
|
||||
type Error = U::Error;
|
||||
|
||||
fn poll_ready(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
if self.is_write_ready() {
|
||||
Poll::Ready(Ok(()))
|
||||
} else {
|
||||
Poll::Pending
|
||||
self.flush(cx)
|
||||
}
|
||||
}
|
||||
|
||||
@ -340,13 +334,12 @@ where
|
||||
}
|
||||
|
||||
impl<T, U> Framed<T, U> {
|
||||
/// This function returns a *single* object that is both `Stream` and
|
||||
/// `Sink`; grouping this into a single object is often useful for layering
|
||||
/// things like gzip or TLS, which require both read and write access to the
|
||||
/// underlying object.
|
||||
/// This function returns a *single* object that is both `Stream` and `Sink`; grouping this into
|
||||
/// a single object is often useful for layering things like gzip or TLS, which require both
|
||||
/// read and write access to the underlying object.
|
||||
///
|
||||
/// These objects take a stream, a read buffer and a write buffer. These
|
||||
/// fields can be obtained from an existing `Framed` with the `into_parts` method.
|
||||
/// These objects take a stream, a read buffer and a write buffer. These fields can be obtained
|
||||
/// from an existing `Framed` with the `into_parts` method.
|
||||
pub fn from_parts(parts: FramedParts<T, U>) -> Framed<T, U> {
|
||||
Framed {
|
||||
io: parts.io,
|
||||
@ -357,12 +350,11 @@ impl<T, U> Framed<T, U> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Consumes the `Frame`, returning its underlying I/O stream, the buffer
|
||||
/// with unprocessed data, and the codec.
|
||||
/// Consumes the `Frame`, returning its underlying I/O stream, the buffer with unprocessed data,
|
||||
/// and the codec.
|
||||
///
|
||||
/// Note that care should be taken to not tamper with the underlying stream
|
||||
/// of data coming in as it may corrupt the stream of frames otherwise
|
||||
/// being worked with.
|
||||
/// Note that care should be taken to not tamper with the underlying stream of data coming in as
|
||||
/// it may corrupt the stream of frames otherwise being worked with.
|
||||
pub fn into_parts(self) -> FramedParts<T, U> {
|
||||
FramedParts {
|
||||
io: self.io,
|
||||
@ -375,14 +367,15 @@ impl<T, U> Framed<T, U> {
|
||||
}
|
||||
|
||||
/// `FramedParts` contains an export of the data of a Framed transport.
|
||||
/// It can be used to construct a new `Framed` with a different codec.
|
||||
/// It contains all current buffers and the inner transport.
|
||||
///
|
||||
/// It can be used to construct a new `Framed` with a different codec. It contains all current
|
||||
/// buffers and the inner transport.
|
||||
#[derive(Debug)]
|
||||
pub struct FramedParts<T, U> {
|
||||
/// The inner transport used to read bytes to and write bytes to
|
||||
/// The inner transport used to read bytes to and write bytes to.
|
||||
pub io: T,
|
||||
|
||||
/// The codec
|
||||
/// The codec object.
|
||||
pub codec: U,
|
||||
|
||||
/// The buffer with read but unprocessed data.
|
||||
@ -395,7 +388,7 @@ pub struct FramedParts<T, U> {
|
||||
}
|
||||
|
||||
impl<T, U> FramedParts<T, U> {
|
||||
/// Create a new, default, `FramedParts`
|
||||
/// Creates a new default `FramedParts`.
|
||||
pub fn new(io: T, codec: U) -> FramedParts<T, U> {
|
||||
FramedParts {
|
||||
io,
|
||||
@ -406,7 +399,7 @@ impl<T, U> FramedParts<T, U> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new `FramedParts` with read buffer
|
||||
/// Creates a new `FramedParts` with read buffer.
|
||||
pub fn with_read_buf(io: T, codec: U, read_buf: BytesMut) -> FramedParts<T, U> {
|
||||
FramedParts {
|
||||
io,
|
||||
|
@ -1,22 +1,26 @@
|
||||
//! Utilities for encoding and decoding frames.
|
||||
//! Codec utilities for working with framed protocols.
|
||||
//!
|
||||
//! Contains adapters to go from streams of bytes, [`AsyncRead`] and
|
||||
//! [`AsyncWrite`], to framed streams implementing [`Sink`] and [`Stream`].
|
||||
//! Framed streams are also known as `transports`.
|
||||
//! Contains adapters to go from streams of bytes, [`AsyncRead`] and [`AsyncWrite`], to framed
|
||||
//! streams implementing [`Sink`] and [`Stream`]. Framed streams are also known as `transports`.
|
||||
//!
|
||||
//! [`AsyncRead`]: AsyncRead
|
||||
//! [`AsyncWrite`]: AsyncWrite
|
||||
//! [`Sink`]: futures_sink::Sink
|
||||
//! [`Stream`]: futures_core::Stream
|
||||
|
||||
#![deny(rust_2018_idioms)]
|
||||
#![warn(missing_docs)]
|
||||
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
|
||||
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
|
||||
|
||||
pub use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
|
||||
pub use tokio_util::{
|
||||
codec::{Decoder, Encoder},
|
||||
io::poll_read_buf,
|
||||
};
|
||||
|
||||
mod bcodec;
|
||||
mod framed;
|
||||
mod lines;
|
||||
|
||||
pub use self::bcodec::BytesCodec;
|
||||
pub use self::framed::{Framed, FramedParts};
|
||||
|
||||
pub use tokio::io::{AsyncRead, AsyncWrite};
|
||||
pub use tokio_util::codec::{Decoder, Encoder};
|
||||
pub use self::{
|
||||
bcodec::BytesCodec,
|
||||
framed::{Framed, FramedParts},
|
||||
lines::LinesCodec,
|
||||
};
|
||||
|
158
actix-codec/src/lines.rs
Normal file
158
actix-codec/src/lines.rs
Normal file
@ -0,0 +1,158 @@
|
||||
use std::io;
|
||||
|
||||
use bytes::{Buf, BufMut, Bytes, BytesMut};
|
||||
use memchr::memchr;
|
||||
|
||||
use super::{Decoder, Encoder};
|
||||
|
||||
/// Lines codec. Reads/writes line delimited strings.
|
||||
///
|
||||
/// Will split input up by LF or CRLF delimiters. Carriage return characters at the end of lines are
|
||||
/// not preserved.
|
||||
#[derive(Debug, Copy, Clone, Default)]
|
||||
#[non_exhaustive]
|
||||
pub struct LinesCodec;
|
||||
|
||||
impl<T: AsRef<str>> Encoder<T> for LinesCodec {
|
||||
type Error = io::Error;
|
||||
|
||||
#[inline]
|
||||
fn encode(&mut self, item: T, dst: &mut BytesMut) -> Result<(), Self::Error> {
|
||||
let item = item.as_ref();
|
||||
dst.reserve(item.len() + 1);
|
||||
dst.put_slice(item.as_bytes());
|
||||
dst.put_u8(b'\n');
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Decoder for LinesCodec {
|
||||
type Item = String;
|
||||
type Error = io::Error;
|
||||
|
||||
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
|
||||
if src.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let len = match memchr(b'\n', src) {
|
||||
Some(n) => n,
|
||||
None => {
|
||||
return Ok(None);
|
||||
}
|
||||
};
|
||||
|
||||
// split up to new line char
|
||||
let mut buf = src.split_to(len);
|
||||
debug_assert_eq!(len, buf.len());
|
||||
|
||||
// remove new line char from source
|
||||
src.advance(1);
|
||||
|
||||
match buf.last() {
|
||||
// remove carriage returns at the end of buf
|
||||
Some(b'\r') => buf.truncate(len - 1),
|
||||
|
||||
// line is empty
|
||||
None => return Ok(Some(String::new())),
|
||||
|
||||
_ => {}
|
||||
}
|
||||
|
||||
try_into_utf8(buf.freeze())
|
||||
}
|
||||
|
||||
fn decode_eof(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
|
||||
match self.decode(src)? {
|
||||
Some(frame) => Ok(Some(frame)),
|
||||
None if src.is_empty() => Ok(None),
|
||||
None => {
|
||||
let buf = match src.last() {
|
||||
// if last line ends in a CR then take everything up to it
|
||||
Some(b'\r') => src.split_to(src.len() - 1),
|
||||
|
||||
// take all bytes from source
|
||||
_ => src.split(),
|
||||
};
|
||||
|
||||
if buf.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
try_into_utf8(buf.freeze())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Attempts to convert bytes into a `String`.
|
||||
fn try_into_utf8(buf: Bytes) -> io::Result<Option<String>> {
|
||||
String::from_utf8(buf.to_vec())
|
||||
.map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err))
|
||||
.map(Some)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use bytes::BufMut as _;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn lines_decoder() {
|
||||
let mut codec = LinesCodec::default();
|
||||
let mut buf = BytesMut::from("\nline 1\nline 2\r\nline 3\n\r\n\r");
|
||||
|
||||
assert_eq!("", codec.decode(&mut buf).unwrap().unwrap());
|
||||
assert_eq!("line 1", codec.decode(&mut buf).unwrap().unwrap());
|
||||
assert_eq!("line 2", codec.decode(&mut buf).unwrap().unwrap());
|
||||
assert_eq!("line 3", codec.decode(&mut buf).unwrap().unwrap());
|
||||
assert_eq!("", codec.decode(&mut buf).unwrap().unwrap());
|
||||
assert!(codec.decode(&mut buf).unwrap().is_none());
|
||||
assert!(codec.decode_eof(&mut buf).unwrap().is_none());
|
||||
|
||||
buf.put_slice(b"k");
|
||||
assert!(codec.decode(&mut buf).unwrap().is_none());
|
||||
assert_eq!("\rk", codec.decode_eof(&mut buf).unwrap().unwrap());
|
||||
|
||||
assert!(codec.decode(&mut buf).unwrap().is_none());
|
||||
assert!(codec.decode_eof(&mut buf).unwrap().is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn lines_encoder() {
|
||||
let mut codec = LinesCodec::default();
|
||||
|
||||
let mut buf = BytesMut::new();
|
||||
|
||||
codec.encode("", &mut buf).unwrap();
|
||||
assert_eq!(&buf[..], b"\n");
|
||||
|
||||
codec.encode("test", &mut buf).unwrap();
|
||||
assert_eq!(&buf[..], b"\ntest\n");
|
||||
|
||||
codec.encode("a\nb", &mut buf).unwrap();
|
||||
assert_eq!(&buf[..], b"\ntest\na\nb\n");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn lines_encoder_no_overflow() {
|
||||
let mut codec = LinesCodec::default();
|
||||
|
||||
let mut buf = BytesMut::new();
|
||||
codec.encode("1234567", &mut buf).unwrap();
|
||||
assert_eq!(&buf[..], b"1234567\n");
|
||||
|
||||
let mut buf = BytesMut::new();
|
||||
codec.encode("12345678", &mut buf).unwrap();
|
||||
assert_eq!(&buf[..], b"12345678\n");
|
||||
|
||||
let mut buf = BytesMut::new();
|
||||
codec.encode("123456789111213", &mut buf).unwrap();
|
||||
assert_eq!(&buf[..], b"123456789111213\n");
|
||||
|
||||
let mut buf = BytesMut::new();
|
||||
codec.encode("1234567891112131", &mut buf).unwrap();
|
||||
assert_eq!(&buf[..], b"1234567891112131\n");
|
||||
}
|
||||
}
|
224
actix-codec/tests/test_framed_sink.rs
Normal file
224
actix-codec/tests/test_framed_sink.rs
Normal file
@ -0,0 +1,224 @@
|
||||
#![allow(missing_docs)]
|
||||
|
||||
use std::{
|
||||
collections::VecDeque,
|
||||
io::{self, Write},
|
||||
pin::Pin,
|
||||
task::{
|
||||
Context,
|
||||
Poll::{self, Pending, Ready},
|
||||
},
|
||||
};
|
||||
|
||||
use actix_codec::*;
|
||||
use bytes::{Buf as _, BufMut as _, BytesMut};
|
||||
use futures_sink::Sink;
|
||||
use tokio_test::{assert_ready, task};
|
||||
|
||||
macro_rules! bilateral {
|
||||
($($x:expr,)*) => {{
|
||||
let mut v = VecDeque::new();
|
||||
v.extend(vec![$($x),*]);
|
||||
Bilateral { calls: v }
|
||||
}};
|
||||
}
|
||||
|
||||
macro_rules! assert_ready {
|
||||
($e:expr) => {{
|
||||
use core::task::Poll::*;
|
||||
match $e {
|
||||
Ready(v) => v,
|
||||
Pending => panic!("pending"),
|
||||
}
|
||||
}};
|
||||
($e:expr, $($msg:tt),+) => {{
|
||||
use core::task::Poll::*;
|
||||
match $e {
|
||||
Ready(v) => v,
|
||||
Pending => {
|
||||
let msg = format_args!($($msg),+);
|
||||
panic!("pending; {}", msg)
|
||||
}
|
||||
}
|
||||
}};
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Bilateral {
|
||||
pub calls: VecDeque<io::Result<Vec<u8>>>,
|
||||
}
|
||||
|
||||
impl Write for Bilateral {
|
||||
fn write(&mut self, src: &[u8]) -> io::Result<usize> {
|
||||
match self.calls.pop_front() {
|
||||
Some(Ok(data)) => {
|
||||
assert!(src.len() >= data.len());
|
||||
assert_eq!(&data[..], &src[..data.len()]);
|
||||
Ok(data.len())
|
||||
}
|
||||
Some(Err(err)) => Err(err),
|
||||
None => panic!("unexpected write; {:?}", src),
|
||||
}
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl AsyncWrite for Bilateral {
|
||||
fn poll_write(
|
||||
self: Pin<&mut Self>,
|
||||
_cx: &mut Context<'_>,
|
||||
buf: &[u8],
|
||||
) -> Poll<Result<usize, io::Error>> {
|
||||
match Pin::get_mut(self).write(buf) {
|
||||
Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => Pending,
|
||||
other => Ready(other),
|
||||
}
|
||||
}
|
||||
fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
|
||||
match Pin::get_mut(self).flush() {
|
||||
Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => Pending,
|
||||
other => Ready(other),
|
||||
}
|
||||
}
|
||||
fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
impl AsyncRead for Bilateral {
|
||||
fn poll_read(
|
||||
mut self: Pin<&mut Self>,
|
||||
_: &mut Context<'_>,
|
||||
buf: &mut ReadBuf<'_>,
|
||||
) -> Poll<Result<(), std::io::Error>> {
|
||||
use io::ErrorKind::WouldBlock;
|
||||
|
||||
match self.calls.pop_front() {
|
||||
Some(Ok(data)) => {
|
||||
debug_assert!(buf.remaining() >= data.len());
|
||||
buf.put_slice(&data);
|
||||
Ready(Ok(()))
|
||||
}
|
||||
Some(Err(ref err)) if err.kind() == WouldBlock => Pending,
|
||||
Some(Err(err)) => Ready(Err(err)),
|
||||
None => Ready(Ok(())),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct U32;
|
||||
|
||||
impl Encoder<u32> for U32 {
|
||||
type Error = io::Error;
|
||||
|
||||
fn encode(&mut self, item: u32, dst: &mut BytesMut) -> io::Result<()> {
|
||||
// Reserve space
|
||||
dst.reserve(4);
|
||||
dst.put_u32(item);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Decoder for U32 {
|
||||
type Item = u32;
|
||||
type Error = io::Error;
|
||||
|
||||
fn decode(&mut self, buf: &mut BytesMut) -> io::Result<Option<u32>> {
|
||||
if buf.len() < 4 {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let n = buf.split_to(4).get_u32();
|
||||
Ok(Some(n))
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_write_hits_highwater_mark() {
|
||||
// see here for what this test is based on:
|
||||
// https://github.com/tokio-rs/tokio/blob/75c07770bfbfea4e5fd914af819c741ed9c3fc36/tokio-util/tests/framed_write.rs#L69
|
||||
|
||||
const ITER: usize = 2 * 1024;
|
||||
|
||||
let mut bi = bilateral! {
|
||||
Err(io::Error::new(io::ErrorKind::WouldBlock, "not ready")),
|
||||
Ok(b"".to_vec()),
|
||||
};
|
||||
|
||||
for i in 0..=ITER {
|
||||
let mut b = BytesMut::with_capacity(4);
|
||||
b.put_u32(i as u32);
|
||||
|
||||
// Append to the end
|
||||
match bi.calls.back_mut().unwrap() {
|
||||
Ok(ref mut data) => {
|
||||
// Write in 2kb chunks
|
||||
if data.len() < ITER {
|
||||
data.extend_from_slice(&b[..]);
|
||||
continue;
|
||||
} // else fall through and create a new buffer
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
|
||||
// Push a new new chunk
|
||||
bi.calls.push_back(Ok(b[..].to_vec()));
|
||||
}
|
||||
|
||||
assert_eq!(bi.calls.len(), 6);
|
||||
let mut framed = Framed::new(bi, U32);
|
||||
// Send 8KB. This fills up FramedWrite2 buffer
|
||||
let mut task = task::spawn(());
|
||||
task.enter(|cx, _| {
|
||||
// Send 8KB. This fills up Framed buffer
|
||||
for i in 0..ITER {
|
||||
{
|
||||
#[allow(unused_mut)]
|
||||
let mut framed = Pin::new(&mut framed);
|
||||
assert!(assert_ready!(framed.poll_ready(cx)).is_ok());
|
||||
}
|
||||
|
||||
#[allow(unused_mut)]
|
||||
let mut framed = Pin::new(&mut framed);
|
||||
// write the buffer
|
||||
assert!(framed.start_send(i as u32).is_ok());
|
||||
}
|
||||
|
||||
{
|
||||
#[allow(unused_mut)]
|
||||
let mut framed = Pin::new(&mut framed);
|
||||
|
||||
// Now we poll_ready which forces a flush. The bilateral pops the front message
|
||||
// and decides to block.
|
||||
assert!(framed.poll_ready(cx).is_pending());
|
||||
}
|
||||
|
||||
{
|
||||
#[allow(unused_mut)]
|
||||
let mut framed = Pin::new(&mut framed);
|
||||
// We poll again, forcing another flush, which this time succeeds
|
||||
// The whole 8KB buffer is flushed
|
||||
assert!(assert_ready!(framed.poll_ready(cx)).is_ok());
|
||||
}
|
||||
|
||||
{
|
||||
#[allow(unused_mut)]
|
||||
let mut framed = Pin::new(&mut framed);
|
||||
// Send more data. This matches the final message expected by the bilateral
|
||||
assert!(framed.start_send(ITER as u32).is_ok());
|
||||
}
|
||||
|
||||
{
|
||||
#[allow(unused_mut)]
|
||||
let mut framed = Pin::new(&mut framed);
|
||||
// Flush the rest of the buffer
|
||||
assert!(assert_ready!(framed.poll_flush(cx)).is_ok());
|
||||
}
|
||||
|
||||
// Ensure the mock is empty
|
||||
assert_eq!(0, Pin::new(&framed).get_ref().io_ref().calls.len());
|
||||
});
|
||||
}
|
@ -1,154 +0,0 @@
|
||||
# Changes
|
||||
|
||||
## Unreleased - 2020-xx-xx
|
||||
|
||||
|
||||
## 2.0.0 - 2020-09-02
|
||||
- No significant changes from `2.0.0-alpha.4`.
|
||||
|
||||
## 2.0.0-alpha.4 - 2020-08-17
|
||||
|
||||
### Changed
|
||||
|
||||
* Update `rustls` dependency to 0.18
|
||||
* Update `tokio-rustls` dependency to 0.14
|
||||
|
||||
|
||||
## [2.0.0-alpha.3] - 2020-05-08
|
||||
|
||||
### Fixed
|
||||
|
||||
* Corrected spelling of `ConnectError::Unresolverd` to `ConnectError::Unresolved`
|
||||
|
||||
## [2.0.0-alpha.2] - 2020-03-08
|
||||
|
||||
### Changed
|
||||
|
||||
* Update `trust-dns-proto` dependency to 0.19. [#116]
|
||||
* Update `trust-dns-resolver` dependency to 0.19. [#116]
|
||||
* `Address` trait is now required to have static lifetime. [#116]
|
||||
* `start_resolver` and `start_default_resolver` are now `async` and may return a `ConnectError`. [#116]
|
||||
|
||||
[#116]: https://github.com/actix/actix-net/pull/116
|
||||
|
||||
## [2.0.0-alpha.1] - 2020-03-03
|
||||
|
||||
### Changed
|
||||
|
||||
* Update `rustls` dependency to 0.17
|
||||
* Update `tokio-rustls` dependency to 0.13
|
||||
|
||||
## [1.0.2] - 2020-01-15
|
||||
|
||||
* Fix actix-service 1.0.3 compatibility
|
||||
|
||||
## [1.0.1] - 2019-12-15
|
||||
|
||||
* Fix trust-dns-resolver compilation
|
||||
|
||||
## [1.0.0] - 2019-12-11
|
||||
|
||||
* Release
|
||||
|
||||
## [1.0.0-alpha.3] - 2019-12-07
|
||||
|
||||
### Changed
|
||||
|
||||
* Migrate to tokio 0.2
|
||||
|
||||
|
||||
## [1.0.0-alpha.2] - 2019-12-02
|
||||
|
||||
### Changed
|
||||
|
||||
* Migrated to `std::future`
|
||||
|
||||
|
||||
## [0.3.0] - 2019-10-03
|
||||
|
||||
### Changed
|
||||
|
||||
* Update `rustls` to 0.16
|
||||
* Minimum required Rust version upped to 1.37.0
|
||||
|
||||
## [0.2.5] - 2019-09-05
|
||||
|
||||
* Add `TcpConnectService`
|
||||
|
||||
## [0.2.4] - 2019-09-02
|
||||
|
||||
* Use arbiter's storage for default async resolver
|
||||
|
||||
## [0.2.3] - 2019-08-05
|
||||
|
||||
* Add `ConnectService` and `OpensslConnectService`
|
||||
|
||||
## [0.2.2] - 2019-07-24
|
||||
|
||||
* Add `rustls` support
|
||||
|
||||
## [0.2.1] - 2019-07-17
|
||||
|
||||
### Added
|
||||
|
||||
* Expose Connect addrs #30
|
||||
|
||||
### Changed
|
||||
|
||||
* Update `derive_more` to 0.15
|
||||
|
||||
|
||||
## [0.2.0] - 2019-05-12
|
||||
|
||||
### Changed
|
||||
|
||||
* Upgrade to actix-service 0.4
|
||||
|
||||
|
||||
## [0.1.5] - 2019-04-19
|
||||
|
||||
### Added
|
||||
|
||||
* `Connect::set_addr()`
|
||||
|
||||
### Changed
|
||||
|
||||
* Use trust-dns-resolver 0.11.0
|
||||
|
||||
|
||||
## [0.1.4] - 2019-04-12
|
||||
|
||||
### Changed
|
||||
|
||||
* Do not start default resolver immediately for default connector.
|
||||
|
||||
|
||||
## [0.1.3] - 2019-04-11
|
||||
|
||||
### Changed
|
||||
|
||||
* Start trust-dns default resolver on first use
|
||||
|
||||
## [0.1.2] - 2019-04-04
|
||||
|
||||
### Added
|
||||
|
||||
* Log error if dns system config could not be loaded.
|
||||
|
||||
### Changed
|
||||
|
||||
* Rename connect Connector to TcpConnector #10
|
||||
|
||||
|
||||
## [0.1.1] - 2019-03-15
|
||||
|
||||
### Fixed
|
||||
|
||||
* Fix error handling for single address
|
||||
|
||||
|
||||
## [0.1.0] - 2019-03-14
|
||||
|
||||
* Refactor resolver and connector services
|
||||
|
||||
* Rename crate
|
@ -1,58 +0,0 @@
|
||||
[package]
|
||||
name = "actix-connect"
|
||||
version = "2.0.0"
|
||||
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
|
||||
description = "TCP connector service for Actix ecosystem."
|
||||
keywords = ["network", "framework", "async", "futures"]
|
||||
homepage = "https://actix.rs"
|
||||
repository = "https://github.com/actix/actix-net.git"
|
||||
documentation = "https://docs.rs/actix-connect/"
|
||||
categories = ["network-programming", "asynchronous"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
edition = "2018"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
features = ["openssl", "rustls", "uri"]
|
||||
|
||||
[lib]
|
||||
name = "actix_connect"
|
||||
path = "src/lib.rs"
|
||||
|
||||
[features]
|
||||
default = ["uri"]
|
||||
|
||||
# openssl
|
||||
openssl = ["open-ssl", "tokio-openssl"]
|
||||
|
||||
# rustls
|
||||
rustls = ["rust-tls", "tokio-rustls", "webpki"]
|
||||
|
||||
# support http::Uri as connect address
|
||||
uri = ["http"]
|
||||
|
||||
[dependencies]
|
||||
actix-service = "1.0.6"
|
||||
actix-codec = "0.3.0"
|
||||
actix-utils = "2.0.0"
|
||||
actix-rt = "1.1.1"
|
||||
|
||||
derive_more = "0.99.2"
|
||||
either = "1.5.3"
|
||||
futures-util = { version = "0.3.4", default-features = false }
|
||||
http = { version = "0.2.0", optional = true }
|
||||
log = "0.4"
|
||||
trust-dns-proto = { version = "0.19", default-features = false, features = ["tokio-runtime"] }
|
||||
trust-dns-resolver = { version = "0.19", default-features = false, features = ["tokio-runtime", "system-config"] }
|
||||
|
||||
# openssl
|
||||
open-ssl = { package = "openssl", version = "0.10", optional = true }
|
||||
tokio-openssl = { version = "0.4.0", optional = true }
|
||||
|
||||
# rustls
|
||||
rust-tls = { package = "rustls", version = "0.18.0", optional = true }
|
||||
tokio-rustls = { version = "0.14.0", optional = true }
|
||||
webpki = { version = "0.21", optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
bytes = "0.5.3"
|
||||
actix-testing = "1.0.0"
|
@ -1,282 +0,0 @@
|
||||
use std::collections::{vec_deque, VecDeque};
|
||||
use std::fmt;
|
||||
use std::iter::{FromIterator, FusedIterator};
|
||||
use std::net::SocketAddr;
|
||||
|
||||
use either::Either;
|
||||
|
||||
/// Connect request
|
||||
pub trait Address: Unpin + 'static {
|
||||
/// Host name of the request
|
||||
fn host(&self) -> &str;
|
||||
|
||||
/// Port of the request
|
||||
fn port(&self) -> Option<u16>;
|
||||
}
|
||||
|
||||
impl Address for String {
|
||||
fn host(&self) -> &str {
|
||||
&self
|
||||
}
|
||||
|
||||
fn port(&self) -> Option<u16> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
impl Address for &'static str {
|
||||
fn host(&self) -> &str {
|
||||
self
|
||||
}
|
||||
|
||||
fn port(&self) -> Option<u16> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Connect request
|
||||
#[derive(Eq, PartialEq, Debug, Hash)]
|
||||
pub struct Connect<T> {
|
||||
pub(crate) req: T,
|
||||
pub(crate) port: u16,
|
||||
pub(crate) addr: Option<Either<SocketAddr, VecDeque<SocketAddr>>>,
|
||||
}
|
||||
|
||||
impl<T: Address> Connect<T> {
|
||||
/// Create `Connect` instance by splitting the string by ':' and convert the second part to u16
|
||||
pub fn new(req: T) -> Connect<T> {
|
||||
let (_, port) = parse(req.host());
|
||||
Connect {
|
||||
req,
|
||||
port: port.unwrap_or(0),
|
||||
addr: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create new `Connect` instance from host and address. Connector skips name resolution stage
|
||||
/// for such connect messages.
|
||||
pub fn with(req: T, addr: SocketAddr) -> Connect<T> {
|
||||
Connect {
|
||||
req,
|
||||
port: 0,
|
||||
addr: Some(Either::Left(addr)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Use port if address does not provide one.
|
||||
///
|
||||
/// By default it set to 0
|
||||
pub fn set_port(mut self, port: u16) -> Self {
|
||||
self.port = port;
|
||||
self
|
||||
}
|
||||
|
||||
/// Use address.
|
||||
pub fn set_addr(mut self, addr: Option<SocketAddr>) -> Self {
|
||||
if let Some(addr) = addr {
|
||||
self.addr = Some(Either::Left(addr));
|
||||
}
|
||||
self
|
||||
}
|
||||
|
||||
/// Use addresses.
|
||||
pub fn set_addrs<I>(mut self, addrs: I) -> Self
|
||||
where
|
||||
I: IntoIterator<Item = SocketAddr>,
|
||||
{
|
||||
let mut addrs = VecDeque::from_iter(addrs);
|
||||
self.addr = if addrs.len() < 2 {
|
||||
addrs.pop_front().map(Either::Left)
|
||||
} else {
|
||||
Some(Either::Right(addrs))
|
||||
};
|
||||
self
|
||||
}
|
||||
|
||||
/// Host name
|
||||
pub fn host(&self) -> &str {
|
||||
self.req.host()
|
||||
}
|
||||
|
||||
/// Port of the request
|
||||
pub fn port(&self) -> u16 {
|
||||
self.req.port().unwrap_or(self.port)
|
||||
}
|
||||
|
||||
/// Pre-resolved addresses of the request.
|
||||
pub fn addrs(&self) -> ConnectAddrsIter<'_> {
|
||||
let inner = match self.addr {
|
||||
None => Either::Left(None),
|
||||
Some(Either::Left(addr)) => Either::Left(Some(addr)),
|
||||
Some(Either::Right(ref addrs)) => Either::Right(addrs.iter()),
|
||||
};
|
||||
|
||||
ConnectAddrsIter { inner }
|
||||
}
|
||||
|
||||
/// Takes pre-resolved addresses of the request.
|
||||
pub fn take_addrs(&mut self) -> ConnectTakeAddrsIter {
|
||||
let inner = match self.addr.take() {
|
||||
None => Either::Left(None),
|
||||
Some(Either::Left(addr)) => Either::Left(Some(addr)),
|
||||
Some(Either::Right(addrs)) => Either::Right(addrs.into_iter()),
|
||||
};
|
||||
|
||||
ConnectTakeAddrsIter { inner }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Address> From<T> for Connect<T> {
|
||||
fn from(addr: T) -> Self {
|
||||
Connect::new(addr)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Address> fmt::Display for Connect<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}:{}", self.host(), self.port())
|
||||
}
|
||||
}
|
||||
|
||||
/// Iterator over addresses in a [`Connect`](struct.Connect.html) request.
|
||||
#[derive(Clone)]
|
||||
pub struct ConnectAddrsIter<'a> {
|
||||
inner: Either<Option<SocketAddr>, vec_deque::Iter<'a, SocketAddr>>,
|
||||
}
|
||||
|
||||
impl Iterator for ConnectAddrsIter<'_> {
|
||||
type Item = SocketAddr;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
match self.inner {
|
||||
Either::Left(ref mut opt) => opt.take(),
|
||||
Either::Right(ref mut iter) => iter.next().copied(),
|
||||
}
|
||||
}
|
||||
|
||||
fn size_hint(&self) -> (usize, Option<usize>) {
|
||||
match self.inner {
|
||||
Either::Left(Some(_)) => (1, Some(1)),
|
||||
Either::Left(None) => (0, Some(0)),
|
||||
Either::Right(ref iter) => iter.size_hint(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for ConnectAddrsIter<'_> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_list().entries(self.clone()).finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl ExactSizeIterator for ConnectAddrsIter<'_> {}
|
||||
|
||||
impl FusedIterator for ConnectAddrsIter<'_> {}
|
||||
|
||||
/// Owned iterator over addresses in a [`Connect`](struct.Connect.html) request.
|
||||
#[derive(Debug)]
|
||||
pub struct ConnectTakeAddrsIter {
|
||||
inner: Either<Option<SocketAddr>, vec_deque::IntoIter<SocketAddr>>,
|
||||
}
|
||||
|
||||
impl Iterator for ConnectTakeAddrsIter {
|
||||
type Item = SocketAddr;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
match self.inner {
|
||||
Either::Left(ref mut opt) => opt.take(),
|
||||
Either::Right(ref mut iter) => iter.next(),
|
||||
}
|
||||
}
|
||||
|
||||
fn size_hint(&self) -> (usize, Option<usize>) {
|
||||
match self.inner {
|
||||
Either::Left(Some(_)) => (1, Some(1)),
|
||||
Either::Left(None) => (0, Some(0)),
|
||||
Either::Right(ref iter) => iter.size_hint(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ExactSizeIterator for ConnectTakeAddrsIter {}
|
||||
|
||||
impl FusedIterator for ConnectTakeAddrsIter {}
|
||||
|
||||
fn parse(host: &str) -> (&str, Option<u16>) {
|
||||
let mut parts_iter = host.splitn(2, ':');
|
||||
if let Some(host) = parts_iter.next() {
|
||||
let port_str = parts_iter.next().unwrap_or("");
|
||||
if let Ok(port) = port_str.parse::<u16>() {
|
||||
(host, Some(port))
|
||||
} else {
|
||||
(host, None)
|
||||
}
|
||||
} else {
|
||||
(host, None)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Connection<T, U> {
|
||||
io: U,
|
||||
req: T,
|
||||
}
|
||||
|
||||
impl<T, U> Connection<T, U> {
|
||||
pub fn new(io: U, req: T) -> Self {
|
||||
Self { io, req }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, U> Connection<T, U> {
|
||||
/// Reconstruct from a parts.
|
||||
pub fn from_parts(io: U, req: T) -> Self {
|
||||
Self { io, req }
|
||||
}
|
||||
|
||||
/// Deconstruct into a parts.
|
||||
pub fn into_parts(self) -> (U, T) {
|
||||
(self.io, self.req)
|
||||
}
|
||||
|
||||
/// Replace inclosed object, return new Stream and old object
|
||||
pub fn replace<Y>(self, io: Y) -> (U, Connection<T, Y>) {
|
||||
(self.io, Connection { io, req: self.req })
|
||||
}
|
||||
|
||||
/// Returns a shared reference to the underlying stream.
|
||||
pub fn get_ref(&self) -> &U {
|
||||
&self.io
|
||||
}
|
||||
|
||||
/// Returns a mutable reference to the underlying stream.
|
||||
pub fn get_mut(&mut self) -> &mut U {
|
||||
&mut self.io
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Address, U> Connection<T, U> {
|
||||
/// Get request
|
||||
pub fn host(&self) -> &str {
|
||||
&self.req.host()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, U> std::ops::Deref for Connection<T, U> {
|
||||
type Target = U;
|
||||
|
||||
fn deref(&self) -> &U {
|
||||
&self.io
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, U> std::ops::DerefMut for Connection<T, U> {
|
||||
fn deref_mut(&mut self) -> &mut U {
|
||||
&mut self.io
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, U: fmt::Debug> fmt::Debug for Connection<T, U> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "Stream {{{:?}}}", self.io)
|
||||
}
|
||||
}
|
@ -1,172 +0,0 @@
|
||||
use std::collections::VecDeque;
|
||||
use std::future::Future;
|
||||
use std::io;
|
||||
use std::marker::PhantomData;
|
||||
use std::net::SocketAddr;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use actix_rt::net::TcpStream;
|
||||
use actix_service::{Service, ServiceFactory};
|
||||
use futures_util::future::{err, ok, BoxFuture, Either, FutureExt, Ready};
|
||||
|
||||
use super::connect::{Address, Connect, Connection};
|
||||
use super::error::ConnectError;
|
||||
|
||||
/// TCP connector service factory
|
||||
#[derive(Debug)]
|
||||
pub struct TcpConnectorFactory<T>(PhantomData<T>);
|
||||
|
||||
impl<T> TcpConnectorFactory<T> {
|
||||
pub fn new() -> Self {
|
||||
TcpConnectorFactory(PhantomData)
|
||||
}
|
||||
|
||||
/// Create TCP connector service
|
||||
pub fn service(&self) -> TcpConnector<T> {
|
||||
TcpConnector(PhantomData)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Default for TcpConnectorFactory<T> {
|
||||
fn default() -> Self {
|
||||
TcpConnectorFactory(PhantomData)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Clone for TcpConnectorFactory<T> {
|
||||
fn clone(&self) -> Self {
|
||||
TcpConnectorFactory(PhantomData)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Address> ServiceFactory for TcpConnectorFactory<T> {
|
||||
type Request = Connect<T>;
|
||||
type Response = Connection<T, TcpStream>;
|
||||
type Error = ConnectError;
|
||||
type Config = ();
|
||||
type Service = TcpConnector<T>;
|
||||
type InitError = ();
|
||||
type Future = Ready<Result<Self::Service, Self::InitError>>;
|
||||
|
||||
fn new_service(&self, _: ()) -> Self::Future {
|
||||
ok(self.service())
|
||||
}
|
||||
}
|
||||
|
||||
/// TCP connector service
|
||||
#[derive(Default, Debug)]
|
||||
pub struct TcpConnector<T>(PhantomData<T>);
|
||||
|
||||
impl<T> TcpConnector<T> {
|
||||
pub fn new() -> Self {
|
||||
TcpConnector(PhantomData)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Clone for TcpConnector<T> {
|
||||
fn clone(&self) -> Self {
|
||||
TcpConnector(PhantomData)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Address> Service for TcpConnector<T> {
|
||||
type Request = Connect<T>;
|
||||
type Response = Connection<T, TcpStream>;
|
||||
type Error = ConnectError;
|
||||
#[allow(clippy::type_complexity)]
|
||||
type Future = Either<TcpConnectorResponse<T>, Ready<Result<Self::Response, Self::Error>>>;
|
||||
|
||||
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, req: Connect<T>) -> Self::Future {
|
||||
let port = req.port();
|
||||
let Connect { req, addr, .. } = req;
|
||||
|
||||
if let Some(addr) = addr {
|
||||
Either::Left(TcpConnectorResponse::new(req, port, addr))
|
||||
} else {
|
||||
error!("TCP connector: got unresolved address");
|
||||
Either::Right(err(ConnectError::Unresolved))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
/// TCP stream connector response future
|
||||
pub struct TcpConnectorResponse<T> {
|
||||
req: Option<T>,
|
||||
port: u16,
|
||||
addrs: Option<VecDeque<SocketAddr>>,
|
||||
stream: Option<BoxFuture<'static, Result<TcpStream, io::Error>>>,
|
||||
}
|
||||
|
||||
impl<T: Address> TcpConnectorResponse<T> {
|
||||
pub fn new(
|
||||
req: T,
|
||||
port: u16,
|
||||
addr: either::Either<SocketAddr, VecDeque<SocketAddr>>,
|
||||
) -> TcpConnectorResponse<T> {
|
||||
trace!(
|
||||
"TCP connector - connecting to {:?} port:{}",
|
||||
req.host(),
|
||||
port
|
||||
);
|
||||
|
||||
match addr {
|
||||
either::Either::Left(addr) => TcpConnectorResponse {
|
||||
req: Some(req),
|
||||
port,
|
||||
addrs: None,
|
||||
stream: Some(TcpStream::connect(addr).boxed()),
|
||||
},
|
||||
either::Either::Right(addrs) => TcpConnectorResponse {
|
||||
req: Some(req),
|
||||
port,
|
||||
addrs: Some(addrs),
|
||||
stream: None,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Address> Future for TcpConnectorResponse<T> {
|
||||
type Output = Result<Connection<T, TcpStream>, ConnectError>;
|
||||
|
||||
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
let this = self.get_mut();
|
||||
|
||||
// connect
|
||||
loop {
|
||||
if let Some(new) = this.stream.as_mut() {
|
||||
match new.as_mut().poll(cx) {
|
||||
Poll::Ready(Ok(sock)) => {
|
||||
let req = this.req.take().unwrap();
|
||||
trace!(
|
||||
"TCP connector - successfully connected to connecting to {:?} - {:?}",
|
||||
req.host(), sock.peer_addr()
|
||||
);
|
||||
return Poll::Ready(Ok(Connection::new(sock, req)));
|
||||
}
|
||||
Poll::Pending => return Poll::Pending,
|
||||
Poll::Ready(Err(err)) => {
|
||||
trace!(
|
||||
"TCP connector - failed to connect to connecting to {:?} port: {}",
|
||||
this.req.as_ref().unwrap().host(),
|
||||
this.port,
|
||||
);
|
||||
if this.addrs.is_none() || this.addrs.as_ref().unwrap().is_empty() {
|
||||
return Poll::Ready(Err(err.into()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// try to connect
|
||||
let addr = this.addrs.as_mut().unwrap().pop_front().unwrap();
|
||||
this.stream = Some(TcpStream::connect(addr).boxed());
|
||||
}
|
||||
}
|
||||
}
|
@ -1,26 +0,0 @@
|
||||
use std::io;
|
||||
|
||||
use derive_more::{Display, From};
|
||||
use trust_dns_resolver::error::ResolveError;
|
||||
|
||||
#[derive(Debug, From, Display)]
|
||||
pub enum ConnectError {
|
||||
/// Failed to resolve the hostname
|
||||
#[display(fmt = "Failed resolving hostname: {}", _0)]
|
||||
Resolver(ResolveError),
|
||||
|
||||
/// No dns records
|
||||
#[display(fmt = "No dns records found for the input")]
|
||||
NoRecords,
|
||||
|
||||
/// Invalid input
|
||||
InvalidInput,
|
||||
|
||||
/// Unresolved host name
|
||||
#[display(fmt = "Connector received `Connect` method with unresolved host")]
|
||||
Unresolved,
|
||||
|
||||
/// Connection IO error
|
||||
#[display(fmt = "{}", _0)]
|
||||
Io(io::Error),
|
||||
}
|
@ -1,111 +0,0 @@
|
||||
//! TCP connector service for Actix ecosystem.
|
||||
//!
|
||||
//! ## Package feature
|
||||
//!
|
||||
//! * `openssl` - enables TLS support via `openssl` crate
|
||||
//! * `rustls` - enables TLS support via `rustls` crate
|
||||
|
||||
#![deny(rust_2018_idioms)]
|
||||
#![recursion_limit = "128"]
|
||||
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
|
||||
mod connect;
|
||||
mod connector;
|
||||
mod error;
|
||||
mod resolve;
|
||||
mod service;
|
||||
pub mod ssl;
|
||||
|
||||
#[cfg(feature = "uri")]
|
||||
mod uri;
|
||||
|
||||
use actix_rt::{net::TcpStream, Arbiter};
|
||||
use actix_service::{pipeline, pipeline_factory, Service, ServiceFactory};
|
||||
use trust_dns_resolver::config::{ResolverConfig, ResolverOpts};
|
||||
use trust_dns_resolver::system_conf::read_system_conf;
|
||||
use trust_dns_resolver::TokioAsyncResolver as AsyncResolver;
|
||||
|
||||
pub mod resolver {
|
||||
pub use trust_dns_resolver::config::{ResolverConfig, ResolverOpts};
|
||||
pub use trust_dns_resolver::system_conf::read_system_conf;
|
||||
pub use trust_dns_resolver::{error::ResolveError, AsyncResolver};
|
||||
}
|
||||
|
||||
pub use self::connect::{Address, Connect, Connection};
|
||||
pub use self::connector::{TcpConnector, TcpConnectorFactory};
|
||||
pub use self::error::ConnectError;
|
||||
pub use self::resolve::{Resolver, ResolverFactory};
|
||||
pub use self::service::{ConnectService, ConnectServiceFactory, TcpConnectService};
|
||||
|
||||
pub async fn start_resolver(
|
||||
cfg: ResolverConfig,
|
||||
opts: ResolverOpts,
|
||||
) -> Result<AsyncResolver, ConnectError> {
|
||||
Ok(AsyncResolver::tokio(cfg, opts).await?)
|
||||
}
|
||||
|
||||
struct DefaultResolver(AsyncResolver);
|
||||
|
||||
pub(crate) async fn get_default_resolver() -> Result<AsyncResolver, ConnectError> {
|
||||
if Arbiter::contains_item::<DefaultResolver>() {
|
||||
Ok(Arbiter::get_item(|item: &DefaultResolver| item.0.clone()))
|
||||
} else {
|
||||
let (cfg, opts) = match read_system_conf() {
|
||||
Ok((cfg, opts)) => (cfg, opts),
|
||||
Err(e) => {
|
||||
log::error!("TRust-DNS can not load system config: {}", e);
|
||||
(ResolverConfig::default(), ResolverOpts::default())
|
||||
}
|
||||
};
|
||||
|
||||
let resolver = AsyncResolver::tokio(cfg, opts).await?;
|
||||
|
||||
Arbiter::set_item(DefaultResolver(resolver.clone()));
|
||||
Ok(resolver)
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn start_default_resolver() -> Result<AsyncResolver, ConnectError> {
|
||||
get_default_resolver().await
|
||||
}
|
||||
|
||||
/// Create TCP connector service.
|
||||
pub fn new_connector<T: Address + 'static>(
|
||||
resolver: AsyncResolver,
|
||||
) -> impl Service<Request = Connect<T>, Response = Connection<T, TcpStream>, Error = ConnectError>
|
||||
+ Clone {
|
||||
pipeline(Resolver::new(resolver)).and_then(TcpConnector::new())
|
||||
}
|
||||
|
||||
/// Create TCP connector service factory.
|
||||
pub fn new_connector_factory<T: Address + 'static>(
|
||||
resolver: AsyncResolver,
|
||||
) -> impl ServiceFactory<
|
||||
Config = (),
|
||||
Request = Connect<T>,
|
||||
Response = Connection<T, TcpStream>,
|
||||
Error = ConnectError,
|
||||
InitError = (),
|
||||
> + Clone {
|
||||
pipeline_factory(ResolverFactory::new(resolver)).and_then(TcpConnectorFactory::new())
|
||||
}
|
||||
|
||||
/// Create connector service with default parameters.
|
||||
pub fn default_connector<T: Address + 'static>(
|
||||
) -> impl Service<Request = Connect<T>, Response = Connection<T, TcpStream>, Error = ConnectError>
|
||||
+ Clone {
|
||||
pipeline(Resolver::default()).and_then(TcpConnector::new())
|
||||
}
|
||||
|
||||
/// Create connector service factory with default parameters.
|
||||
pub fn default_connector_factory<T: Address + 'static>() -> impl ServiceFactory<
|
||||
Config = (),
|
||||
Request = Connect<T>,
|
||||
Response = Connection<T, TcpStream>,
|
||||
Error = ConnectError,
|
||||
InitError = (),
|
||||
> + Clone {
|
||||
pipeline_factory(ResolverFactory::default()).and_then(TcpConnectorFactory::new())
|
||||
}
|
@ -1,208 +0,0 @@
|
||||
use std::future::Future;
|
||||
use std::marker::PhantomData;
|
||||
use std::net::SocketAddr;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use actix_service::{Service, ServiceFactory};
|
||||
use futures_util::future::{ok, Either, Ready};
|
||||
use trust_dns_resolver::TokioAsyncResolver as AsyncResolver;
|
||||
use trust_dns_resolver::{error::ResolveError, lookup_ip::LookupIp};
|
||||
|
||||
use crate::connect::{Address, Connect};
|
||||
use crate::error::ConnectError;
|
||||
use crate::get_default_resolver;
|
||||
|
||||
/// DNS Resolver Service factory
|
||||
pub struct ResolverFactory<T> {
|
||||
resolver: Option<AsyncResolver>,
|
||||
_t: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<T> ResolverFactory<T> {
|
||||
/// Create new resolver instance with custom configuration and options.
|
||||
pub fn new(resolver: AsyncResolver) -> Self {
|
||||
ResolverFactory {
|
||||
resolver: Some(resolver),
|
||||
_t: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn service(&self) -> Resolver<T> {
|
||||
Resolver {
|
||||
resolver: self.resolver.clone(),
|
||||
_t: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Default for ResolverFactory<T> {
|
||||
fn default() -> Self {
|
||||
ResolverFactory {
|
||||
resolver: None,
|
||||
_t: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Clone for ResolverFactory<T> {
|
||||
fn clone(&self) -> Self {
|
||||
ResolverFactory {
|
||||
resolver: self.resolver.clone(),
|
||||
_t: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Address> ServiceFactory for ResolverFactory<T> {
|
||||
type Request = Connect<T>;
|
||||
type Response = Connect<T>;
|
||||
type Error = ConnectError;
|
||||
type Config = ();
|
||||
type Service = Resolver<T>;
|
||||
type InitError = ();
|
||||
type Future = Ready<Result<Self::Service, Self::InitError>>;
|
||||
|
||||
fn new_service(&self, _: ()) -> Self::Future {
|
||||
ok(self.service())
|
||||
}
|
||||
}
|
||||
|
||||
/// DNS Resolver Service
|
||||
pub struct Resolver<T> {
|
||||
resolver: Option<AsyncResolver>,
|
||||
_t: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<T> Resolver<T> {
|
||||
/// Create new resolver instance with custom configuration and options.
|
||||
pub fn new(resolver: AsyncResolver) -> Self {
|
||||
Resolver {
|
||||
resolver: Some(resolver),
|
||||
_t: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Default for Resolver<T> {
|
||||
fn default() -> Self {
|
||||
Resolver {
|
||||
resolver: None,
|
||||
_t: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Clone for Resolver<T> {
|
||||
fn clone(&self) -> Self {
|
||||
Resolver {
|
||||
resolver: self.resolver.clone(),
|
||||
_t: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Address> Service for Resolver<T> {
|
||||
type Request = Connect<T>;
|
||||
type Response = Connect<T>;
|
||||
type Error = ConnectError;
|
||||
#[allow(clippy::type_complexity)]
|
||||
type Future = Either<
|
||||
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>>>>,
|
||||
Ready<Result<Connect<T>, Self::Error>>,
|
||||
>;
|
||||
|
||||
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, mut req: Connect<T>) -> Self::Future {
|
||||
if req.addr.is_some() {
|
||||
Either::Right(ok(req))
|
||||
} else if let Ok(ip) = req.host().parse() {
|
||||
req.addr = Some(either::Either::Left(SocketAddr::new(ip, req.port())));
|
||||
Either::Right(ok(req))
|
||||
} else {
|
||||
let resolver = self.resolver.as_ref().map(AsyncResolver::clone);
|
||||
Either::Left(Box::pin(async move {
|
||||
trace!("DNS resolver: resolving host {:?}", req.host());
|
||||
let resolver = if let Some(resolver) = resolver {
|
||||
resolver
|
||||
} else {
|
||||
get_default_resolver()
|
||||
.await
|
||||
.expect("Failed to get default resolver")
|
||||
};
|
||||
ResolverFuture::new(req, &resolver).await
|
||||
}))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type LookupIpFuture = Pin<Box<dyn Future<Output = Result<LookupIp, ResolveError>>>>;
|
||||
|
||||
#[doc(hidden)]
|
||||
/// Resolver future
|
||||
pub struct ResolverFuture<T: Address> {
|
||||
req: Option<Connect<T>>,
|
||||
lookup: LookupIpFuture,
|
||||
}
|
||||
|
||||
impl<T: Address> ResolverFuture<T> {
|
||||
pub fn new(req: Connect<T>, resolver: &AsyncResolver) -> Self {
|
||||
let host = if let Some(host) = req.host().splitn(2, ':').next() {
|
||||
host
|
||||
} else {
|
||||
req.host()
|
||||
};
|
||||
|
||||
// Clone data to be moved to the lookup future
|
||||
let host_clone = host.to_owned();
|
||||
let resolver_clone = resolver.clone();
|
||||
|
||||
ResolverFuture {
|
||||
lookup: Box::pin(async move {
|
||||
let resolver = resolver_clone;
|
||||
resolver.lookup_ip(host_clone).await
|
||||
}),
|
||||
req: Some(req),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Address> Future for ResolverFuture<T> {
|
||||
type Output = Result<Connect<T>, ConnectError>;
|
||||
|
||||
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
let this = self.get_mut();
|
||||
|
||||
match Pin::new(&mut this.lookup).poll(cx) {
|
||||
Poll::Pending => Poll::Pending,
|
||||
Poll::Ready(Ok(ips)) => {
|
||||
let req = this.req.take().unwrap();
|
||||
let port = req.port();
|
||||
let req = req.set_addrs(ips.iter().map(|ip| SocketAddr::new(ip, port)));
|
||||
|
||||
trace!(
|
||||
"DNS resolver: host {:?} resolved to {:?}",
|
||||
req.host(),
|
||||
req.addrs()
|
||||
);
|
||||
|
||||
if req.addr.is_none() {
|
||||
Poll::Ready(Err(ConnectError::NoRecords))
|
||||
} else {
|
||||
Poll::Ready(Ok(req))
|
||||
}
|
||||
}
|
||||
Poll::Ready(Err(e)) => {
|
||||
trace!(
|
||||
"DNS resolver: failed to resolve host {:?} err: {}",
|
||||
this.req.as_ref().unwrap().host(),
|
||||
e
|
||||
);
|
||||
Poll::Ready(Err(e.into()))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -1,232 +0,0 @@
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use actix_rt::net::TcpStream;
|
||||
use actix_service::{Service, ServiceFactory};
|
||||
use either::Either;
|
||||
use futures_util::future::{ok, Ready};
|
||||
use trust_dns_resolver::TokioAsyncResolver as AsyncResolver;
|
||||
|
||||
use crate::connect::{Address, Connect, Connection};
|
||||
use crate::connector::{TcpConnector, TcpConnectorFactory};
|
||||
use crate::error::ConnectError;
|
||||
use crate::resolve::{Resolver, ResolverFactory};
|
||||
|
||||
pub struct ConnectServiceFactory<T> {
|
||||
tcp: TcpConnectorFactory<T>,
|
||||
resolver: ResolverFactory<T>,
|
||||
}
|
||||
|
||||
impl<T> ConnectServiceFactory<T> {
|
||||
/// Construct new ConnectService factory
|
||||
pub fn new() -> Self {
|
||||
ConnectServiceFactory {
|
||||
tcp: TcpConnectorFactory::default(),
|
||||
resolver: ResolverFactory::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Construct new connect service with custom dns resolver
|
||||
pub fn with_resolver(resolver: AsyncResolver) -> Self {
|
||||
ConnectServiceFactory {
|
||||
tcp: TcpConnectorFactory::default(),
|
||||
resolver: ResolverFactory::new(resolver),
|
||||
}
|
||||
}
|
||||
|
||||
/// Construct new service
|
||||
pub fn service(&self) -> ConnectService<T> {
|
||||
ConnectService {
|
||||
tcp: self.tcp.service(),
|
||||
resolver: self.resolver.service(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Construct new tcp stream service
|
||||
pub fn tcp_service(&self) -> TcpConnectService<T> {
|
||||
TcpConnectService {
|
||||
tcp: self.tcp.service(),
|
||||
resolver: self.resolver.service(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Default for ConnectServiceFactory<T> {
|
||||
fn default() -> Self {
|
||||
ConnectServiceFactory {
|
||||
tcp: TcpConnectorFactory::default(),
|
||||
resolver: ResolverFactory::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Clone for ConnectServiceFactory<T> {
|
||||
fn clone(&self) -> Self {
|
||||
ConnectServiceFactory {
|
||||
tcp: self.tcp.clone(),
|
||||
resolver: self.resolver.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Address> ServiceFactory for ConnectServiceFactory<T> {
|
||||
type Request = Connect<T>;
|
||||
type Response = Connection<T, TcpStream>;
|
||||
type Error = ConnectError;
|
||||
type Config = ();
|
||||
type Service = ConnectService<T>;
|
||||
type InitError = ();
|
||||
type Future = Ready<Result<Self::Service, Self::InitError>>;
|
||||
|
||||
fn new_service(&self, _: ()) -> Self::Future {
|
||||
ok(self.service())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct ConnectService<T> {
|
||||
tcp: TcpConnector<T>,
|
||||
resolver: Resolver<T>,
|
||||
}
|
||||
|
||||
impl<T: Address> Service for ConnectService<T> {
|
||||
type Request = Connect<T>;
|
||||
type Response = Connection<T, TcpStream>;
|
||||
type Error = ConnectError;
|
||||
type Future = ConnectServiceResponse<T>;
|
||||
|
||||
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, req: Connect<T>) -> Self::Future {
|
||||
ConnectServiceResponse {
|
||||
state: ConnectState::Resolve(self.resolver.call(req)),
|
||||
tcp: self.tcp.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
enum ConnectState<T: Address> {
|
||||
Resolve(<Resolver<T> as Service>::Future),
|
||||
Connect(<TcpConnector<T> as Service>::Future),
|
||||
}
|
||||
|
||||
impl<T: Address> ConnectState<T> {
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn poll(
|
||||
&mut self,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Either<Poll<Result<Connection<T, TcpStream>, ConnectError>>, Connect<T>> {
|
||||
match self {
|
||||
ConnectState::Resolve(ref mut fut) => match Pin::new(fut).poll(cx) {
|
||||
Poll::Pending => Either::Left(Poll::Pending),
|
||||
Poll::Ready(Ok(res)) => Either::Right(res),
|
||||
Poll::Ready(Err(err)) => Either::Left(Poll::Ready(Err(err))),
|
||||
},
|
||||
ConnectState::Connect(ref mut fut) => Either::Left(Pin::new(fut).poll(cx)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ConnectServiceResponse<T: Address> {
|
||||
state: ConnectState<T>,
|
||||
tcp: TcpConnector<T>,
|
||||
}
|
||||
|
||||
impl<T: Address> Future for ConnectServiceResponse<T> {
|
||||
type Output = Result<Connection<T, TcpStream>, ConnectError>;
|
||||
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
let res = match self.state.poll(cx) {
|
||||
Either::Right(res) => {
|
||||
self.state = ConnectState::Connect(self.tcp.call(res));
|
||||
self.state.poll(cx)
|
||||
}
|
||||
Either::Left(res) => return res,
|
||||
};
|
||||
|
||||
match res {
|
||||
Either::Left(res) => res,
|
||||
Either::Right(_) => panic!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct TcpConnectService<T> {
|
||||
tcp: TcpConnector<T>,
|
||||
resolver: Resolver<T>,
|
||||
}
|
||||
|
||||
impl<T: Address + 'static> Service for TcpConnectService<T> {
|
||||
type Request = Connect<T>;
|
||||
type Response = TcpStream;
|
||||
type Error = ConnectError;
|
||||
type Future = TcpConnectServiceResponse<T>;
|
||||
|
||||
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, req: Connect<T>) -> Self::Future {
|
||||
TcpConnectServiceResponse {
|
||||
state: TcpConnectState::Resolve(self.resolver.call(req)),
|
||||
tcp: self.tcp.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
enum TcpConnectState<T: Address> {
|
||||
Resolve(<Resolver<T> as Service>::Future),
|
||||
Connect(<TcpConnector<T> as Service>::Future),
|
||||
}
|
||||
|
||||
impl<T: Address> TcpConnectState<T> {
|
||||
fn poll(
|
||||
&mut self,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Either<Poll<Result<TcpStream, ConnectError>>, Connect<T>> {
|
||||
match self {
|
||||
TcpConnectState::Resolve(ref mut fut) => match Pin::new(fut).poll(cx) {
|
||||
Poll::Pending => (),
|
||||
Poll::Ready(Ok(res)) => return Either::Right(res),
|
||||
Poll::Ready(Err(err)) => return Either::Left(Poll::Ready(Err(err))),
|
||||
},
|
||||
TcpConnectState::Connect(ref mut fut) => {
|
||||
if let Poll::Ready(res) = Pin::new(fut).poll(cx) {
|
||||
return match res {
|
||||
Ok(conn) => Either::Left(Poll::Ready(Ok(conn.into_parts().0))),
|
||||
Err(err) => Either::Left(Poll::Ready(Err(err))),
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
Either::Left(Poll::Pending)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct TcpConnectServiceResponse<T: Address> {
|
||||
state: TcpConnectState<T>,
|
||||
tcp: TcpConnector<T>,
|
||||
}
|
||||
|
||||
impl<T: Address> Future for TcpConnectServiceResponse<T> {
|
||||
type Output = Result<TcpStream, ConnectError>;
|
||||
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
let res = match self.state.poll(cx) {
|
||||
Either::Right(res) => {
|
||||
self.state = TcpConnectState::Connect(self.tcp.call(res));
|
||||
self.state.poll(cx)
|
||||
}
|
||||
Either::Left(res) => return res,
|
||||
};
|
||||
|
||||
match res {
|
||||
Either::Left(res) => res,
|
||||
Either::Right(_) => panic!(),
|
||||
}
|
||||
}
|
||||
}
|
@ -1,7 +0,0 @@
|
||||
//! SSL Services
|
||||
|
||||
#[cfg(feature = "openssl")]
|
||||
pub mod openssl;
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
pub mod rustls;
|
@ -1,268 +0,0 @@
|
||||
use std::future::Future;
|
||||
use std::marker::PhantomData;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
use std::{fmt, io};
|
||||
|
||||
pub use open_ssl::ssl::{Error as SslError, SslConnector, SslMethod};
|
||||
pub use tokio_openssl::{HandshakeError, SslStream};
|
||||
|
||||
use actix_codec::{AsyncRead, AsyncWrite};
|
||||
use actix_rt::net::TcpStream;
|
||||
use actix_service::{Service, ServiceFactory};
|
||||
use futures_util::future::{err, ok, Either, FutureExt, LocalBoxFuture, Ready};
|
||||
use trust_dns_resolver::TokioAsyncResolver as AsyncResolver;
|
||||
|
||||
use crate::{
|
||||
Address, Connect, ConnectError, ConnectService, ConnectServiceFactory, Connection,
|
||||
};
|
||||
|
||||
/// OpenSSL connector factory
|
||||
pub struct OpensslConnector<T, U> {
|
||||
connector: SslConnector,
|
||||
_t: PhantomData<(T, U)>,
|
||||
}
|
||||
|
||||
impl<T, U> OpensslConnector<T, U> {
|
||||
pub fn new(connector: SslConnector) -> Self {
|
||||
OpensslConnector {
|
||||
connector,
|
||||
_t: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, U> OpensslConnector<T, U>
|
||||
where
|
||||
T: Address + 'static,
|
||||
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug + 'static,
|
||||
{
|
||||
pub fn service(connector: SslConnector) -> OpensslConnectorService<T, U> {
|
||||
OpensslConnectorService {
|
||||
connector,
|
||||
_t: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, U> Clone for OpensslConnector<T, U> {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
connector: self.connector.clone(),
|
||||
_t: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, U> ServiceFactory for OpensslConnector<T, U>
|
||||
where
|
||||
T: Address + 'static,
|
||||
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug + 'static,
|
||||
{
|
||||
type Request = Connection<T, U>;
|
||||
type Response = Connection<T, SslStream<U>>;
|
||||
type Error = io::Error;
|
||||
type Config = ();
|
||||
type Service = OpensslConnectorService<T, U>;
|
||||
type InitError = ();
|
||||
type Future = Ready<Result<Self::Service, Self::InitError>>;
|
||||
|
||||
fn new_service(&self, _: ()) -> Self::Future {
|
||||
ok(OpensslConnectorService {
|
||||
connector: self.connector.clone(),
|
||||
_t: PhantomData,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub struct OpensslConnectorService<T, U> {
|
||||
connector: SslConnector,
|
||||
_t: PhantomData<(T, U)>,
|
||||
}
|
||||
|
||||
impl<T, U> Clone for OpensslConnectorService<T, U> {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
connector: self.connector.clone(),
|
||||
_t: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, U> Service for OpensslConnectorService<T, U>
|
||||
where
|
||||
T: Address + 'static,
|
||||
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug + 'static,
|
||||
{
|
||||
type Request = Connection<T, U>;
|
||||
type Response = Connection<T, SslStream<U>>;
|
||||
type Error = io::Error;
|
||||
#[allow(clippy::type_complexity)]
|
||||
type Future = Either<ConnectAsyncExt<T, U>, Ready<Result<Self::Response, Self::Error>>>;
|
||||
|
||||
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, stream: Connection<T, U>) -> Self::Future {
|
||||
trace!("SSL Handshake start for: {:?}", stream.host());
|
||||
let (io, stream) = stream.replace(());
|
||||
let host = stream.host().to_string();
|
||||
|
||||
match self.connector.configure() {
|
||||
Err(e) => Either::Right(err(io::Error::new(io::ErrorKind::Other, e))),
|
||||
Ok(config) => Either::Left(ConnectAsyncExt {
|
||||
fut: async move { tokio_openssl::connect(config, &host, io).await }
|
||||
.boxed_local(),
|
||||
stream: Some(stream),
|
||||
_t: PhantomData,
|
||||
}),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ConnectAsyncExt<T, U> {
|
||||
fut: LocalBoxFuture<'static, Result<SslStream<U>, HandshakeError<U>>>,
|
||||
stream: Option<Connection<T, ()>>,
|
||||
_t: PhantomData<U>,
|
||||
}
|
||||
|
||||
impl<T: Address, U> Future for ConnectAsyncExt<T, U>
|
||||
where
|
||||
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug + 'static,
|
||||
{
|
||||
type Output = Result<Connection<T, SslStream<U>>, io::Error>;
|
||||
|
||||
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
let this = self.get_mut();
|
||||
|
||||
match Pin::new(&mut this.fut).poll(cx) {
|
||||
Poll::Ready(Ok(stream)) => {
|
||||
let s = this.stream.take().unwrap();
|
||||
trace!("SSL Handshake success: {:?}", s.host());
|
||||
Poll::Ready(Ok(s.replace(stream).1))
|
||||
}
|
||||
Poll::Ready(Err(e)) => {
|
||||
trace!("SSL Handshake error: {:?}", e);
|
||||
Poll::Ready(Err(io::Error::new(io::ErrorKind::Other, format!("{}", e))))
|
||||
}
|
||||
Poll::Pending => Poll::Pending,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct OpensslConnectServiceFactory<T> {
|
||||
tcp: ConnectServiceFactory<T>,
|
||||
openssl: OpensslConnector<T, TcpStream>,
|
||||
}
|
||||
|
||||
impl<T> OpensslConnectServiceFactory<T> {
|
||||
/// Construct new OpensslConnectService factory
|
||||
pub fn new(connector: SslConnector) -> Self {
|
||||
OpensslConnectServiceFactory {
|
||||
tcp: ConnectServiceFactory::default(),
|
||||
openssl: OpensslConnector::new(connector),
|
||||
}
|
||||
}
|
||||
|
||||
/// Construct new connect service with custom DNS resolver
|
||||
pub fn with_resolver(connector: SslConnector, resolver: AsyncResolver) -> Self {
|
||||
OpensslConnectServiceFactory {
|
||||
tcp: ConnectServiceFactory::with_resolver(resolver),
|
||||
openssl: OpensslConnector::new(connector),
|
||||
}
|
||||
}
|
||||
|
||||
/// Construct OpenSSL connect service
|
||||
pub fn service(&self) -> OpensslConnectService<T> {
|
||||
OpensslConnectService {
|
||||
tcp: self.tcp.service(),
|
||||
openssl: OpensslConnectorService {
|
||||
connector: self.openssl.connector.clone(),
|
||||
_t: PhantomData,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Clone for OpensslConnectServiceFactory<T> {
|
||||
fn clone(&self) -> Self {
|
||||
OpensslConnectServiceFactory {
|
||||
tcp: self.tcp.clone(),
|
||||
openssl: self.openssl.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Address + 'static> ServiceFactory for OpensslConnectServiceFactory<T> {
|
||||
type Request = Connect<T>;
|
||||
type Response = SslStream<TcpStream>;
|
||||
type Error = ConnectError;
|
||||
type Config = ();
|
||||
type Service = OpensslConnectService<T>;
|
||||
type InitError = ();
|
||||
type Future = Ready<Result<Self::Service, Self::InitError>>;
|
||||
|
||||
fn new_service(&self, _: ()) -> Self::Future {
|
||||
ok(self.service())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct OpensslConnectService<T> {
|
||||
tcp: ConnectService<T>,
|
||||
openssl: OpensslConnectorService<T, TcpStream>,
|
||||
}
|
||||
|
||||
impl<T: Address + 'static> Service for OpensslConnectService<T> {
|
||||
type Request = Connect<T>;
|
||||
type Response = SslStream<TcpStream>;
|
||||
type Error = ConnectError;
|
||||
type Future = OpensslConnectServiceResponse<T>;
|
||||
|
||||
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, req: Connect<T>) -> Self::Future {
|
||||
OpensslConnectServiceResponse {
|
||||
fut1: Some(self.tcp.call(req)),
|
||||
fut2: None,
|
||||
openssl: self.openssl.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct OpensslConnectServiceResponse<T: Address + 'static> {
|
||||
fut1: Option<<ConnectService<T> as Service>::Future>,
|
||||
fut2: Option<<OpensslConnectorService<T, TcpStream> as Service>::Future>,
|
||||
openssl: OpensslConnectorService<T, TcpStream>,
|
||||
}
|
||||
|
||||
impl<T: Address> Future for OpensslConnectServiceResponse<T> {
|
||||
type Output = Result<SslStream<TcpStream>, ConnectError>;
|
||||
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
if let Some(ref mut fut) = self.fut1 {
|
||||
match futures_util::ready!(Pin::new(fut).poll(cx)) {
|
||||
Ok(res) => {
|
||||
let _ = self.fut1.take();
|
||||
self.fut2 = Some(self.openssl.call(res));
|
||||
}
|
||||
Err(e) => return Poll::Ready(Err(e)),
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(ref mut fut) = self.fut2 {
|
||||
match futures_util::ready!(Pin::new(fut).poll(cx)) {
|
||||
Ok(connect) => Poll::Ready(Ok(connect.into_parts().0)),
|
||||
Err(e) => Poll::Ready(Err(ConnectError::Io(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
e,
|
||||
)))),
|
||||
}
|
||||
} else {
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
}
|
@ -1,136 +0,0 @@
|
||||
use std::fmt;
|
||||
use std::future::Future;
|
||||
use std::marker::PhantomData;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
pub use rust_tls::Session;
|
||||
pub use tokio_rustls::{client::TlsStream, rustls::ClientConfig};
|
||||
|
||||
use actix_codec::{AsyncRead, AsyncWrite};
|
||||
use actix_service::{Service, ServiceFactory};
|
||||
use futures_util::future::{ok, Ready};
|
||||
use tokio_rustls::{Connect, TlsConnector};
|
||||
use webpki::DNSNameRef;
|
||||
|
||||
use crate::{Address, Connection};
|
||||
|
||||
/// Rustls connector factory
|
||||
pub struct RustlsConnector<T, U> {
|
||||
connector: Arc<ClientConfig>,
|
||||
_t: PhantomData<(T, U)>,
|
||||
}
|
||||
|
||||
impl<T, U> RustlsConnector<T, U> {
|
||||
pub fn new(connector: Arc<ClientConfig>) -> Self {
|
||||
RustlsConnector {
|
||||
connector,
|
||||
_t: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, U> RustlsConnector<T, U>
|
||||
where
|
||||
T: Address,
|
||||
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug,
|
||||
{
|
||||
pub fn service(connector: Arc<ClientConfig>) -> RustlsConnectorService<T, U> {
|
||||
RustlsConnectorService {
|
||||
connector,
|
||||
_t: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, U> Clone for RustlsConnector<T, U> {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
connector: self.connector.clone(),
|
||||
_t: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Address, U> ServiceFactory for RustlsConnector<T, U>
|
||||
where
|
||||
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug,
|
||||
{
|
||||
type Request = Connection<T, U>;
|
||||
type Response = Connection<T, TlsStream<U>>;
|
||||
type Error = std::io::Error;
|
||||
type Config = ();
|
||||
type Service = RustlsConnectorService<T, U>;
|
||||
type InitError = ();
|
||||
type Future = Ready<Result<Self::Service, Self::InitError>>;
|
||||
|
||||
fn new_service(&self, _: ()) -> Self::Future {
|
||||
ok(RustlsConnectorService {
|
||||
connector: self.connector.clone(),
|
||||
_t: PhantomData,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RustlsConnectorService<T, U> {
|
||||
connector: Arc<ClientConfig>,
|
||||
_t: PhantomData<(T, U)>,
|
||||
}
|
||||
|
||||
impl<T, U> Clone for RustlsConnectorService<T, U> {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
connector: self.connector.clone(),
|
||||
_t: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Address, U> Service for RustlsConnectorService<T, U>
|
||||
where
|
||||
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug,
|
||||
{
|
||||
type Request = Connection<T, U>;
|
||||
type Response = Connection<T, TlsStream<U>>;
|
||||
type Error = std::io::Error;
|
||||
type Future = ConnectAsyncExt<T, U>;
|
||||
|
||||
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, stream: Connection<T, U>) -> Self::Future {
|
||||
trace!("SSL Handshake start for: {:?}", stream.host());
|
||||
let (io, stream) = stream.replace(());
|
||||
let host = DNSNameRef::try_from_ascii_str(stream.host())
|
||||
.expect("rustls currently only handles hostname-based connections. See https://github.com/briansmith/webpki/issues/54");
|
||||
ConnectAsyncExt {
|
||||
fut: TlsConnector::from(self.connector.clone()).connect(host, io),
|
||||
stream: Some(stream),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ConnectAsyncExt<T, U> {
|
||||
fut: Connect<U>,
|
||||
stream: Option<Connection<T, ()>>,
|
||||
}
|
||||
|
||||
impl<T: Address, U> Future for ConnectAsyncExt<T, U>
|
||||
where
|
||||
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug,
|
||||
{
|
||||
type Output = Result<Connection<T, TlsStream<U>>, std::io::Error>;
|
||||
|
||||
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
let this = self.get_mut();
|
||||
Poll::Ready(
|
||||
futures_util::ready!(Pin::new(&mut this.fut).poll(cx)).map(|stream| {
|
||||
let s = this.stream.take().unwrap();
|
||||
trace!("SSL Handshake success: {:?}", s.host());
|
||||
s.replace(stream).1
|
||||
}),
|
||||
)
|
||||
}
|
||||
}
|
@ -1,37 +0,0 @@
|
||||
use http::Uri;
|
||||
|
||||
use crate::Address;
|
||||
|
||||
impl Address for Uri {
|
||||
fn host(&self) -> &str {
|
||||
self.host().unwrap_or("")
|
||||
}
|
||||
|
||||
fn port(&self) -> Option<u16> {
|
||||
if let Some(port) = self.port_u16() {
|
||||
Some(port)
|
||||
} else {
|
||||
port(self.scheme_str())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: load data from file
|
||||
fn port(scheme: Option<&str>) -> Option<u16> {
|
||||
if let Some(scheme) = scheme {
|
||||
match scheme {
|
||||
"http" => Some(80),
|
||||
"https" => Some(443),
|
||||
"ws" => Some(80),
|
||||
"wss" => Some(443),
|
||||
"amqp" => Some(5672),
|
||||
"amqps" => Some(5671),
|
||||
"sb" => Some(5671),
|
||||
"mqtt" => Some(1883),
|
||||
"mqtts" => Some(8883),
|
||||
_ => None,
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
@ -1,127 +0,0 @@
|
||||
use std::io;
|
||||
|
||||
use actix_codec::{BytesCodec, Framed};
|
||||
use actix_rt::net::TcpStream;
|
||||
use actix_service::{fn_service, Service, ServiceFactory};
|
||||
use actix_testing::TestServer;
|
||||
use bytes::Bytes;
|
||||
use futures_util::sink::SinkExt;
|
||||
|
||||
use actix_connect::resolver::{ResolverConfig, ResolverOpts};
|
||||
use actix_connect::Connect;
|
||||
|
||||
#[cfg(feature = "openssl")]
|
||||
#[actix_rt::test]
|
||||
async fn test_string() {
|
||||
let srv = TestServer::with(|| {
|
||||
fn_service(|io: TcpStream| async {
|
||||
let mut framed = Framed::new(io, BytesCodec);
|
||||
framed.send(Bytes::from_static(b"test")).await?;
|
||||
Ok::<_, io::Error>(())
|
||||
})
|
||||
});
|
||||
|
||||
let mut conn = actix_connect::default_connector();
|
||||
let addr = format!("localhost:{}", srv.port());
|
||||
let con = conn.call(addr.into()).await.unwrap();
|
||||
assert_eq!(con.peer_addr().unwrap(), srv.addr());
|
||||
}
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
#[actix_rt::test]
|
||||
async fn test_rustls_string() {
|
||||
let srv = TestServer::with(|| {
|
||||
fn_service(|io: TcpStream| async {
|
||||
let mut framed = Framed::new(io, BytesCodec);
|
||||
framed.send(Bytes::from_static(b"test")).await?;
|
||||
Ok::<_, io::Error>(())
|
||||
})
|
||||
});
|
||||
|
||||
let mut conn = actix_connect::default_connector();
|
||||
let addr = format!("localhost:{}", srv.port());
|
||||
let con = conn.call(addr.into()).await.unwrap();
|
||||
assert_eq!(con.peer_addr().unwrap(), srv.addr());
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_static_str() {
|
||||
let srv = TestServer::with(|| {
|
||||
fn_service(|io: TcpStream| async {
|
||||
let mut framed = Framed::new(io, BytesCodec);
|
||||
framed.send(Bytes::from_static(b"test")).await?;
|
||||
Ok::<_, io::Error>(())
|
||||
})
|
||||
});
|
||||
|
||||
let resolver = actix_connect::start_default_resolver().await.unwrap();
|
||||
let mut conn = actix_connect::new_connector(resolver.clone());
|
||||
|
||||
let con = conn.call(Connect::with("10", srv.addr())).await.unwrap();
|
||||
assert_eq!(con.peer_addr().unwrap(), srv.addr());
|
||||
|
||||
let connect = Connect::new(srv.host().to_owned());
|
||||
let mut conn = actix_connect::new_connector(resolver);
|
||||
let con = conn.call(connect).await;
|
||||
assert!(con.is_err());
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_new_service() {
|
||||
let srv = TestServer::with(|| {
|
||||
fn_service(|io: TcpStream| async {
|
||||
let mut framed = Framed::new(io, BytesCodec);
|
||||
framed.send(Bytes::from_static(b"test")).await?;
|
||||
Ok::<_, io::Error>(())
|
||||
})
|
||||
});
|
||||
|
||||
let resolver =
|
||||
actix_connect::start_resolver(ResolverConfig::default(), ResolverOpts::default())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let factory = actix_connect::new_connector_factory(resolver);
|
||||
|
||||
let mut conn = factory.new_service(()).await.unwrap();
|
||||
let con = conn.call(Connect::with("10", srv.addr())).await.unwrap();
|
||||
assert_eq!(con.peer_addr().unwrap(), srv.addr());
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "openssl", feature = "uri"))]
|
||||
#[actix_rt::test]
|
||||
async fn test_openssl_uri() {
|
||||
use std::convert::TryFrom;
|
||||
|
||||
let srv = TestServer::with(|| {
|
||||
fn_service(|io: TcpStream| async {
|
||||
let mut framed = Framed::new(io, BytesCodec);
|
||||
framed.send(Bytes::from_static(b"test")).await?;
|
||||
Ok::<_, io::Error>(())
|
||||
})
|
||||
});
|
||||
|
||||
let mut conn = actix_connect::default_connector();
|
||||
let addr = http::Uri::try_from(format!("https://localhost:{}", srv.port())).unwrap();
|
||||
let con = conn.call(addr.into()).await.unwrap();
|
||||
assert_eq!(con.peer_addr().unwrap(), srv.addr());
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "rustls", feature = "uri"))]
|
||||
#[actix_rt::test]
|
||||
async fn test_rustls_uri() {
|
||||
use std::convert::TryFrom;
|
||||
|
||||
let srv = TestServer::with(|| {
|
||||
fn_service(|io: TcpStream| async {
|
||||
let mut framed = Framed::new(io, BytesCodec);
|
||||
framed.send(Bytes::from_static(b"test")).await?;
|
||||
Ok::<_, io::Error>(())
|
||||
})
|
||||
});
|
||||
|
||||
let mut conn = actix_connect::default_connector();
|
||||
let addr = http::Uri::try_from(format!("https://localhost:{}", srv.port())).unwrap();
|
||||
let con = conn.call(addr.into()).await.unwrap();
|
||||
assert_eq!(con.peer_addr().unwrap(), srv.addr());
|
||||
}
|
@ -1,13 +1,53 @@
|
||||
# CHANGES
|
||||
# Changes
|
||||
|
||||
## 0.1.3 - 2020-12-3
|
||||
## Unreleased
|
||||
|
||||
* Add `actix-reexport` feature
|
||||
- Minimum supported Rust version (MSRV) is now 1.71.
|
||||
|
||||
## 0.1.2 - 2020-05-18
|
||||
## 0.2.4
|
||||
|
||||
### Changed
|
||||
- Update `syn` dependency to `2`.
|
||||
- Minimum supported Rust version (MSRV) is now 1.65.
|
||||
|
||||
* Forward actix_rt::test arguments to test function [#127]
|
||||
## 0.2.3
|
||||
|
||||
- Fix test macro in presence of other imports named "test". [#399]
|
||||
|
||||
[#399]: https://github.com/actix/actix-net/pull/399
|
||||
|
||||
## 0.2.2
|
||||
|
||||
- Improve error recovery potential when macro input is invalid. [#391]
|
||||
- Allow custom `System`s on test macro. [#391]
|
||||
|
||||
[#391]: https://github.com/actix/actix-net/pull/391
|
||||
|
||||
## 0.2.1
|
||||
|
||||
- Add optional argument `system` to `main` macro which can be used to specify the path to `actix_rt::System` (useful for re-exports). [#363]
|
||||
|
||||
[#363]: https://github.com/actix/actix-net/pull/363
|
||||
|
||||
## 0.2.0
|
||||
|
||||
- Update to latest `actix_rt::System::new` signature. [#261]
|
||||
|
||||
[#261]: https://github.com/actix/actix-net/pull/261
|
||||
|
||||
## 0.2.0-beta.1
|
||||
|
||||
- Remove `actix-reexport` feature. [#218]
|
||||
|
||||
[#218]: https://github.com/actix/actix-net/pull/218
|
||||
|
||||
## 0.1.3
|
||||
|
||||
- Add `actix-reexport` feature. [#218]
|
||||
|
||||
[#218]: https://github.com/actix/actix-net/pull/218
|
||||
|
||||
## 0.1.2
|
||||
|
||||
- Forward actix_rt::test arguments to test function [#127]
|
||||
|
||||
[#127]: https://github.com/actix/actix-net/pull/127
|
||||
|
@ -1,26 +1,39 @@
|
||||
[package]
|
||||
name = "actix-macros"
|
||||
version = "0.1.3"
|
||||
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
|
||||
description = "Actix runtime macros"
|
||||
version = "0.2.4"
|
||||
authors = [
|
||||
"Nikolay Kim <fafhrd91@gmail.com>",
|
||||
"Ibraheem Ahmed <ibrah1440@gmail.com>",
|
||||
"Rob Ede <robjtede@icloud.com>",
|
||||
]
|
||||
description = "Macros for Actix system and runtime"
|
||||
repository = "https://github.com/actix/actix-net"
|
||||
documentation = "https://docs.rs/actix-macros/"
|
||||
categories = ["network-programming", "asynchronous"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
edition = "2018"
|
||||
license.workspace = true
|
||||
edition.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[package.metadata.cargo-machete]
|
||||
ignored = [
|
||||
"proc_macro2", # specified for minimal versions compat
|
||||
]
|
||||
|
||||
[lib]
|
||||
proc-macro = true
|
||||
|
||||
[dependencies]
|
||||
quote = "1.0.3"
|
||||
syn = { version = "^1", features = ["full"] }
|
||||
quote = "1"
|
||||
syn = { version = "2", features = ["full"] }
|
||||
|
||||
[features]
|
||||
actix-reexport = []
|
||||
# minimal versions compat
|
||||
[target.'cfg(any())'.dependencies]
|
||||
proc-macro2 = "1.0.60"
|
||||
|
||||
[dev-dependencies]
|
||||
actix-rt = "1.0"
|
||||
|
||||
futures-util = { version = "0.3", default-features = false }
|
||||
actix-rt = "2"
|
||||
futures-util = { version = "0.3.17", default-features = false }
|
||||
rustversion-msrv = "0.100"
|
||||
trybuild = "1"
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
@ -1,72 +1,133 @@
|
||||
//! Macros for use with Tokio
|
||||
extern crate proc_macro;
|
||||
//! Macros for Actix system and runtime.
|
||||
//!
|
||||
//! The [`actix-rt`](https://docs.rs/actix-rt) crate must be available for macro output to compile.
|
||||
//!
|
||||
//! # Entry-point
|
||||
//! See docs for the [`#[main]`](macro@main) macro.
|
||||
//!
|
||||
//! # Tests
|
||||
//! See docs for the [`#[test]`](macro@test) macro.
|
||||
|
||||
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
|
||||
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
|
||||
|
||||
use proc_macro::TokenStream;
|
||||
use quote::quote;
|
||||
use syn::parse::Parser as _;
|
||||
|
||||
/// Marks async function to be executed by actix system.
|
||||
type AttributeArgs = syn::punctuated::Punctuated<syn::Meta, syn::Token![,]>;
|
||||
|
||||
/// Marks async entry-point function to be executed by Actix system.
|
||||
///
|
||||
/// ## Usage
|
||||
///
|
||||
/// ```rust
|
||||
/// # Examples
|
||||
/// ```
|
||||
/// #[actix_rt::main]
|
||||
/// async fn main() {
|
||||
/// println!("Hello world");
|
||||
/// }
|
||||
/// ```
|
||||
#[allow(clippy::needless_doctest_main)]
|
||||
#[proc_macro_attribute]
|
||||
#[cfg(not(test))] // Work around for rust-lang/rust#62127
|
||||
pub fn main(_: TokenStream, item: TokenStream) -> TokenStream {
|
||||
let mut input = syn::parse_macro_input!(item as syn::ItemFn);
|
||||
pub fn main(args: TokenStream, item: TokenStream) -> TokenStream {
|
||||
let mut input = match syn::parse::<syn::ItemFn>(item.clone()) {
|
||||
Ok(input) => input,
|
||||
// on parse err, make IDEs happy; see fn docs
|
||||
Err(err) => return input_and_compile_error(item, err),
|
||||
};
|
||||
|
||||
let parser = AttributeArgs::parse_terminated;
|
||||
let args = match parser.parse(args.clone()) {
|
||||
Ok(args) => args,
|
||||
Err(err) => return input_and_compile_error(args, err),
|
||||
};
|
||||
|
||||
let attrs = &input.attrs;
|
||||
let vis = &input.vis;
|
||||
let sig = &mut input.sig;
|
||||
let body = &input.block;
|
||||
let name = &sig.ident;
|
||||
|
||||
if sig.asyncness.is_none() {
|
||||
return syn::Error::new_spanned(sig.fn_token, "only async fn is supported")
|
||||
.to_compile_error()
|
||||
.into();
|
||||
return syn::Error::new_spanned(
|
||||
sig.fn_token,
|
||||
"the async keyword is missing from the function declaration",
|
||||
)
|
||||
.to_compile_error()
|
||||
.into();
|
||||
}
|
||||
|
||||
let mut system = syn::parse_str::<syn::Path>("::actix_rt::System").unwrap();
|
||||
|
||||
for arg in &args {
|
||||
match arg {
|
||||
syn::Meta::NameValue(syn::MetaNameValue {
|
||||
path,
|
||||
value:
|
||||
syn::Expr::Lit(syn::ExprLit {
|
||||
lit: syn::Lit::Str(lit),
|
||||
..
|
||||
}),
|
||||
..
|
||||
}) => match path
|
||||
.get_ident()
|
||||
.map(|i| i.to_string().to_lowercase())
|
||||
.as_deref()
|
||||
{
|
||||
Some("system") => match lit.parse() {
|
||||
Ok(path) => system = path,
|
||||
Err(_) => {
|
||||
return syn::Error::new_spanned(lit, "Expected path")
|
||||
.to_compile_error()
|
||||
.into();
|
||||
}
|
||||
},
|
||||
_ => {
|
||||
return syn::Error::new_spanned(arg, "Unknown attribute specified")
|
||||
.to_compile_error()
|
||||
.into();
|
||||
}
|
||||
},
|
||||
|
||||
_ => {
|
||||
return syn::Error::new_spanned(arg, "Unknown attribute specified")
|
||||
.to_compile_error()
|
||||
.into();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sig.asyncness = None;
|
||||
|
||||
if cfg!(feature = "actix-reexport") {
|
||||
(quote! {
|
||||
#(#attrs)*
|
||||
#vis #sig {
|
||||
actix::System::new(stringify!(#name))
|
||||
.block_on(async move { #body })
|
||||
}
|
||||
})
|
||||
.into()
|
||||
} else {
|
||||
(quote! {
|
||||
#(#attrs)*
|
||||
#vis #sig {
|
||||
actix_rt::System::new(stringify!(#name))
|
||||
.block_on(async move { #body })
|
||||
}
|
||||
})
|
||||
.into()
|
||||
}
|
||||
(quote! {
|
||||
#(#attrs)*
|
||||
#vis #sig {
|
||||
<#system>::new().block_on(async move { #body })
|
||||
}
|
||||
})
|
||||
.into()
|
||||
}
|
||||
|
||||
/// Marks async test function to be executed by actix runtime.
|
||||
/// Marks async test function to be executed in an Actix system.
|
||||
///
|
||||
/// ## Usage
|
||||
///
|
||||
/// ```no_run
|
||||
/// # Examples
|
||||
/// ```
|
||||
/// #[actix_rt::test]
|
||||
/// async fn my_test() {
|
||||
/// assert!(true);
|
||||
/// }
|
||||
/// ```
|
||||
#[proc_macro_attribute]
|
||||
pub fn test(_: TokenStream, item: TokenStream) -> TokenStream {
|
||||
let mut input = syn::parse_macro_input!(item as syn::ItemFn);
|
||||
pub fn test(args: TokenStream, item: TokenStream) -> TokenStream {
|
||||
let mut input = match syn::parse::<syn::ItemFn>(item.clone()) {
|
||||
Ok(input) => input,
|
||||
// on parse err, make IDEs happy; see fn docs
|
||||
Err(err) => return input_and_compile_error(item, err),
|
||||
};
|
||||
|
||||
let parser = AttributeArgs::parse_terminated;
|
||||
let args = match parser.parse(args.clone()) {
|
||||
Ok(args) => args,
|
||||
Err(err) => return input_and_compile_error(args, err),
|
||||
};
|
||||
|
||||
let attrs = &input.attrs;
|
||||
let vis = &input.vis;
|
||||
let sig = &mut input.sig;
|
||||
@ -74,7 +135,7 @@ pub fn test(_: TokenStream, item: TokenStream) -> TokenStream {
|
||||
let mut has_test_attr = false;
|
||||
|
||||
for attr in attrs {
|
||||
if attr.path.is_ident("test") {
|
||||
if attr.path().is_ident("test") {
|
||||
has_test_attr = true;
|
||||
}
|
||||
}
|
||||
@ -82,7 +143,7 @@ pub fn test(_: TokenStream, item: TokenStream) -> TokenStream {
|
||||
if sig.asyncness.is_none() {
|
||||
return syn::Error::new_spanned(
|
||||
input.sig.fn_token,
|
||||
format!("only async fn is supported, {}", input.sig.ident),
|
||||
"the async keyword is missing from the function declaration",
|
||||
)
|
||||
.to_compile_error()
|
||||
.into();
|
||||
@ -90,24 +151,69 @@ pub fn test(_: TokenStream, item: TokenStream) -> TokenStream {
|
||||
|
||||
sig.asyncness = None;
|
||||
|
||||
let result = if has_test_attr {
|
||||
quote! {
|
||||
#(#attrs)*
|
||||
#vis #sig {
|
||||
actix_rt::System::new("test")
|
||||
.block_on(async { #body })
|
||||
}
|
||||
}
|
||||
let missing_test_attr = if has_test_attr {
|
||||
quote! {}
|
||||
} else {
|
||||
quote! {
|
||||
#[test]
|
||||
#(#attrs)*
|
||||
#vis #sig {
|
||||
actix_rt::System::new("test")
|
||||
.block_on(async { #body })
|
||||
}
|
||||
}
|
||||
quote! { #[::core::prelude::v1::test] }
|
||||
};
|
||||
|
||||
result.into()
|
||||
let mut system = syn::parse_str::<syn::Path>("::actix_rt::System").unwrap();
|
||||
|
||||
for arg in &args {
|
||||
match arg {
|
||||
syn::Meta::NameValue(syn::MetaNameValue {
|
||||
path,
|
||||
value:
|
||||
syn::Expr::Lit(syn::ExprLit {
|
||||
lit: syn::Lit::Str(lit),
|
||||
..
|
||||
}),
|
||||
..
|
||||
}) => match path
|
||||
.get_ident()
|
||||
.map(|i| i.to_string().to_lowercase())
|
||||
.as_deref()
|
||||
{
|
||||
Some("system") => match lit.parse() {
|
||||
Ok(path) => system = path,
|
||||
Err(_) => {
|
||||
return syn::Error::new_spanned(lit, "Expected path")
|
||||
.to_compile_error()
|
||||
.into();
|
||||
}
|
||||
},
|
||||
_ => {
|
||||
return syn::Error::new_spanned(arg, "Unknown attribute specified")
|
||||
.to_compile_error()
|
||||
.into();
|
||||
}
|
||||
},
|
||||
_ => {
|
||||
return syn::Error::new_spanned(arg, "Unknown attribute specified")
|
||||
.to_compile_error()
|
||||
.into();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
(quote! {
|
||||
#missing_test_attr
|
||||
#(#attrs)*
|
||||
#vis #sig {
|
||||
<#system>::new().block_on(async { #body })
|
||||
}
|
||||
})
|
||||
.into()
|
||||
}
|
||||
|
||||
/// Converts the error to a token stream and appends it to the original input.
|
||||
///
|
||||
/// Returning the original input in addition to the error is good for IDEs which can gracefully
|
||||
/// recover and show more precise errors within the macro body.
|
||||
///
|
||||
/// See <https://github.com/rust-analyzer/rust-analyzer/issues/10468> for more info.
|
||||
fn input_and_compile_error(mut item: TokenStream, err: syn::Error) -> TokenStream {
|
||||
let compile_err = TokenStream::from(err.to_compile_error());
|
||||
item.extend(compile_err);
|
||||
item
|
||||
}
|
||||
|
@ -1,9 +1,21 @@
|
||||
#![allow(missing_docs)]
|
||||
|
||||
#[rustversion_msrv::msrv]
|
||||
#[test]
|
||||
fn compile_macros() {
|
||||
let t = trybuild::TestCases::new();
|
||||
|
||||
t.pass("tests/trybuild/main-01-basic.rs");
|
||||
t.compile_fail("tests/trybuild/main-02-only-async.rs");
|
||||
t.pass("tests/trybuild/main-03-fn-params.rs");
|
||||
t.pass("tests/trybuild/main-04-system-path.rs");
|
||||
t.compile_fail("tests/trybuild/main-05-system-expect-path.rs");
|
||||
t.compile_fail("tests/trybuild/main-06-unknown-attr.rs");
|
||||
|
||||
t.pass("tests/trybuild/test-01-basic.rs");
|
||||
t.pass("tests/trybuild/test-02-keep-attrs.rs");
|
||||
t.compile_fail("tests/trybuild/test-03-only-async.rs");
|
||||
t.pass("tests/trybuild/test-04-system-path.rs");
|
||||
t.compile_fail("tests/trybuild/test-05-system-expect-path.rs");
|
||||
t.compile_fail("tests/trybuild/test-06-unknown-attr.rs");
|
||||
}
|
||||
|
@ -1,14 +1,11 @@
|
||||
error: only async fn is supported
|
||||
--> $DIR/main-02-only-async.rs:2:1
|
||||
error: the async keyword is missing from the function declaration
|
||||
--> tests/trybuild/main-02-only-async.rs:2:1
|
||||
|
|
||||
2 | fn main() {
|
||||
| ^^
|
||||
|
||||
error[E0601]: `main` function not found in crate `$CRATE`
|
||||
--> $DIR/main-02-only-async.rs:1:1
|
||||
--> tests/trybuild/main-02-only-async.rs:4:2
|
||||
|
|
||||
1 | / #[actix_rt::main]
|
||||
2 | | fn main() {
|
||||
3 | | futures_util::future::ready(()).await
|
||||
4 | | }
|
||||
| |_^ consider adding a `main` function to `$DIR/tests/trybuild/main-02-only-async.rs`
|
||||
4 | }
|
||||
| ^ consider adding a `main` function to `$DIR/tests/trybuild/main-02-only-async.rs`
|
||||
|
6
actix-macros/tests/trybuild/main-03-fn-params.rs
Normal file
6
actix-macros/tests/trybuild/main-03-fn-params.rs
Normal file
@ -0,0 +1,6 @@
|
||||
#[actix_rt::main]
|
||||
async fn main2(_param: bool) {
|
||||
futures_util::future::ready(()).await
|
||||
}
|
||||
|
||||
fn main() {}
|
8
actix-macros/tests/trybuild/main-04-system-path.rs
Normal file
8
actix-macros/tests/trybuild/main-04-system-path.rs
Normal file
@ -0,0 +1,8 @@
|
||||
mod system {
|
||||
pub use actix_rt::System as MySystem;
|
||||
}
|
||||
|
||||
#[actix_rt::main(system = "system::MySystem")]
|
||||
async fn main() {
|
||||
futures_util::future::ready(()).await
|
||||
}
|
@ -0,0 +1,4 @@
|
||||
#[actix_rt::main(system = "!@#*&")]
|
||||
async fn main2() {}
|
||||
|
||||
fn main() {}
|
@ -0,0 +1,5 @@
|
||||
error: Expected path
|
||||
--> $DIR/main-05-system-expect-path.rs:1:27
|
||||
|
|
||||
1 | #[actix_rt::main(system = "!@#*&")]
|
||||
| ^^^^^^^
|
7
actix-macros/tests/trybuild/main-06-unknown-attr.rs
Normal file
7
actix-macros/tests/trybuild/main-06-unknown-attr.rs
Normal file
@ -0,0 +1,7 @@
|
||||
#[actix_rt::main(foo = "bar")]
|
||||
async fn async_main() {}
|
||||
|
||||
#[actix_rt::main(bar::baz)]
|
||||
async fn async_main2() {}
|
||||
|
||||
fn main() {}
|
11
actix-macros/tests/trybuild/main-06-unknown-attr.stderr
Normal file
11
actix-macros/tests/trybuild/main-06-unknown-attr.stderr
Normal file
@ -0,0 +1,11 @@
|
||||
error: Unknown attribute specified
|
||||
--> $DIR/main-06-unknown-attr.rs:1:18
|
||||
|
|
||||
1 | #[actix_rt::main(foo = "bar")]
|
||||
| ^^^^^^^^^^^
|
||||
|
||||
error: Unknown attribute specified
|
||||
--> $DIR/main-06-unknown-attr.rs:4:18
|
||||
|
|
||||
4 | #[actix_rt::main(bar::baz)]
|
||||
| ^^^^^^^^
|
6
actix-macros/tests/trybuild/test-03-only-async.rs
Normal file
6
actix-macros/tests/trybuild/test-03-only-async.rs
Normal file
@ -0,0 +1,6 @@
|
||||
#[actix_rt::test]
|
||||
fn my_test() {
|
||||
futures_util::future::ready(()).await
|
||||
}
|
||||
|
||||
fn main() {}
|
5
actix-macros/tests/trybuild/test-03-only-async.stderr
Normal file
5
actix-macros/tests/trybuild/test-03-only-async.stderr
Normal file
@ -0,0 +1,5 @@
|
||||
error: the async keyword is missing from the function declaration
|
||||
--> $DIR/test-03-only-async.rs:2:1
|
||||
|
|
||||
2 | fn my_test() {
|
||||
| ^^
|
10
actix-macros/tests/trybuild/test-04-system-path.rs
Normal file
10
actix-macros/tests/trybuild/test-04-system-path.rs
Normal file
@ -0,0 +1,10 @@
|
||||
mod system {
|
||||
pub use actix_rt::System as MySystem;
|
||||
}
|
||||
|
||||
#[actix_rt::test(system = "system::MySystem")]
|
||||
async fn my_test() {
|
||||
futures_util::future::ready(()).await
|
||||
}
|
||||
|
||||
fn main() {}
|
@ -0,0 +1,4 @@
|
||||
#[actix_rt::test(system = "!@#*&")]
|
||||
async fn my_test() {}
|
||||
|
||||
fn main() {}
|
@ -0,0 +1,5 @@
|
||||
error: Expected path
|
||||
--> $DIR/test-05-system-expect-path.rs:1:27
|
||||
|
|
||||
1 | #[actix_rt::test(system = "!@#*&")]
|
||||
| ^^^^^^^
|
7
actix-macros/tests/trybuild/test-06-unknown-attr.rs
Normal file
7
actix-macros/tests/trybuild/test-06-unknown-attr.rs
Normal file
@ -0,0 +1,7 @@
|
||||
#[actix_rt::test(foo = "bar")]
|
||||
async fn my_test_1() {}
|
||||
|
||||
#[actix_rt::test(bar::baz)]
|
||||
async fn my_test_2() {}
|
||||
|
||||
fn main() {}
|
11
actix-macros/tests/trybuild/test-06-unknown-attr.stderr
Normal file
11
actix-macros/tests/trybuild/test-06-unknown-attr.stderr
Normal file
@ -0,0 +1,11 @@
|
||||
error: Unknown attribute specified
|
||||
--> $DIR/test-06-unknown-attr.rs:1:18
|
||||
|
|
||||
1 | #[actix_rt::test(foo = "bar")]
|
||||
| ^^^^^^^^^^^
|
||||
|
||||
error: Unknown attribute specified
|
||||
--> $DIR/test-06-unknown-attr.rs:4:18
|
||||
|
|
||||
4 | #[actix_rt::test(bar::baz)]
|
||||
| ^^^^^^^^
|
@ -1,124 +1,174 @@
|
||||
# Changes
|
||||
|
||||
## Unreleased - 2020-xx-xx
|
||||
## Unreleased
|
||||
|
||||
### Added
|
||||
- Minimum supported Rust version (MSRV) is now 1.71.
|
||||
|
||||
* Add `System::attach_to_tokio` method. [#173]
|
||||
## 2.10.0
|
||||
|
||||
## [1.1.1] - 2020-04-30
|
||||
- Relax `F`'s bound (`Fn => FnOnce`) on `{Arbiter, System}::with_tokio_rt()` functions.
|
||||
- Update `tokio-uring` dependency to `0.5`.
|
||||
- Minimum supported Rust version (MSRV) is now 1.70.
|
||||
|
||||
### Fixed
|
||||
## 2.9.0
|
||||
|
||||
* Fix memory leak due to [#94] (see [#129] for more detail)
|
||||
- Add `actix_rt::System::runtime()` method to retrieve the underlying `actix_rt::Runtime` runtime.
|
||||
- Add `actix_rt::Runtime::tokio_runtime()` method to retrieve the underlying Tokio runtime.
|
||||
- Minimum supported Rust version (MSRV) is now 1.65.
|
||||
|
||||
[#129]: https://github.com/actix/actix-net/issues/129
|
||||
## 2.8.0
|
||||
|
||||
## [1.1.0] - 2020-04-08
|
||||
- Add `#[track_caller]` attribute to `spawn` functions and methods.
|
||||
- Update `tokio-uring` dependency to `0.4`.
|
||||
- Minimum supported Rust version (MSRV) is now 1.59.
|
||||
|
||||
**This version has been yanked.**
|
||||
## 2.7.0
|
||||
|
||||
### Added
|
||||
- Update `tokio-uring` dependency to `0.3`.
|
||||
- Minimum supported Rust version (MSRV) is now 1.49.
|
||||
|
||||
* Expose `System::is_set` to check if current system has ben started [#99]
|
||||
* Add `Arbiter::is_running` to check if event loop is running [#124]
|
||||
* Add `Arbiter::local_join` associated function
|
||||
to get be able to `await` for spawned futures [#94]
|
||||
## 2.6.0
|
||||
|
||||
[#94]: https://github.com/actix/actix-net/pull/94
|
||||
[#99]: https://github.com/actix/actix-net/pull/99
|
||||
[#124]: https://github.com/actix/actix-net/pull/124
|
||||
- Update `tokio-uring` dependency to `0.2`.
|
||||
|
||||
## [1.0.0] - 2019-12-11
|
||||
## 2.5.1
|
||||
|
||||
* Update dependencies
|
||||
- Expose `System::with_tokio_rt` and `Arbiter::with_tokio_rt`.
|
||||
|
||||
## [1.0.0-alpha.3] - 2019-12-07
|
||||
## 2.5.0
|
||||
|
||||
### Fixed
|
||||
- Add `System::run_with_code` to allow retrieving the exit code on stop.
|
||||
|
||||
* Fix compilation on non-unix platforms
|
||||
## 2.4.0
|
||||
|
||||
### Changed
|
||||
- Add `Arbiter::try_current` for situations where thread may or may not have Arbiter context.
|
||||
- Start io-uring with `System::new` when feature is enabled.
|
||||
|
||||
* Migrate to tokio 0.2
|
||||
## 2.3.0
|
||||
|
||||
- The `spawn` method can now resolve with non-unit outputs.
|
||||
- Add experimental (semver-exempt) `io-uring` feature for enabling async file I/O on linux.
|
||||
|
||||
## [1.0.0-alpha.2] - 2019-12-02
|
||||
## 2.2.0
|
||||
|
||||
Added
|
||||
- **BREAKING** `ActixStream::{poll_read_ready, poll_write_ready}` methods now return `Ready` object in ok variant.
|
||||
- Breakage is acceptable since `ActixStream` was not intended to be public.
|
||||
|
||||
* Export `main` and `test` attribute macros
|
||||
## 2.1.0
|
||||
|
||||
* Export `time` module (re-export of tokio-timer)
|
||||
- Add `ActixStream` extension trait to include readiness methods.
|
||||
- Re-export `tokio::net::TcpSocket` in `net` module
|
||||
|
||||
* Export `net` module (re-export of tokio-net)
|
||||
## 2.0.2
|
||||
|
||||
- Add `Arbiter::handle` to get a handle of an owned Arbiter.
|
||||
- Add `System::try_current` for situations where actix may or may not be running a System.
|
||||
|
||||
## [1.0.0-alpha.1] - 2019-11-22
|
||||
## 2.0.1
|
||||
|
||||
### Changed
|
||||
- Expose `JoinError` from Tokio.
|
||||
|
||||
* Migrate to std::future and tokio 0.2
|
||||
## 2.0.0
|
||||
|
||||
- Remove all Arbiter-local storage methods.
|
||||
- Re-export `tokio::pin`.
|
||||
|
||||
## [0.2.6] - 2019-11-14
|
||||
## 2.0.0-beta.3
|
||||
|
||||
### Fixed
|
||||
- Remove `run_in_tokio`, `attach_to_tokio` and `AsyncSystemRunner`.
|
||||
- Return `JoinHandle` from `actix_rt::spawn`.
|
||||
- Remove old `Arbiter::spawn`. Implementation is now inlined into `actix_rt::spawn`.
|
||||
- Rename `Arbiter::{send => spawn}` and `Arbiter::{exec_fn => spawn_fn}`.
|
||||
- Remove `Arbiter::exec`.
|
||||
- Remove deprecated `Arbiter::local_join` and `Arbiter::is_running`.
|
||||
- `Arbiter::spawn` now accepts !Unpin futures.
|
||||
- `System::new` no longer takes arguments.
|
||||
- Remove `System::with_current`.
|
||||
- Remove `Builder`.
|
||||
- Add `System::with_init` as replacement for `Builder::run`.
|
||||
- Rename `System::{is_set => is_registered}`.
|
||||
- Add `ArbiterHandle` for sending messages to non-current-thread arbiters.
|
||||
- `System::arbiter` now returns an `&ArbiterHandle`.
|
||||
- `Arbiter::current` now returns an `ArbiterHandle` instead.
|
||||
- `Arbiter::join` now takes self by value.
|
||||
|
||||
* Fix arbiter's thread panic message.
|
||||
## 2.0.0-beta.2
|
||||
|
||||
### Added
|
||||
- Add `task` mod with re-export of `tokio::task::{spawn_blocking, yield_now, JoinHandle}`
|
||||
- Add default "macros" feature to allow faster compile times when using `default-features=false`.
|
||||
|
||||
* Allow to join arbiter's thread. #60
|
||||
## 2.0.0-beta.1
|
||||
|
||||
- Add `System::attach_to_tokio` method.
|
||||
- Update `tokio` dependency to `1.0`.
|
||||
- Rename `time` module `delay_for` to `sleep`, `delay_until` to `sleep_until`, `Delay` to `Sleep` to stay aligned with Tokio's naming.
|
||||
- Remove `'static` lifetime requirement for `Runtime::block_on` and `SystemRunner::block_on`.
|
||||
- These methods now accept `&self` when calling.
|
||||
- Remove `'static` lifetime requirement for `System::run` and `Builder::run`.
|
||||
- `Arbiter::spawn` now panics when `System` is not in scope.
|
||||
- Fix work load issue by removing `PENDING` thread local.
|
||||
|
||||
## [0.2.5] - 2019-09-02
|
||||
## 1.1.1
|
||||
|
||||
### Added
|
||||
- Fix memory leak due to
|
||||
|
||||
* Add arbiter specific storage
|
||||
## 1.1.0 _(YANKED)_
|
||||
|
||||
- Expose `System::is_set` to check if current system has ben started
|
||||
- Add `Arbiter::is_running` to check if event loop is running
|
||||
- Add `Arbiter::local_join` associated function to get be able to `await` for spawned futures
|
||||
|
||||
## [0.2.4] - 2019-07-17
|
||||
## 1.0.0
|
||||
|
||||
### Changed
|
||||
- Update dependencies
|
||||
|
||||
* Avoid a copy of the Future when initializing the Box. #29
|
||||
## 1.0.0-alpha.3
|
||||
|
||||
- Migrate to tokio 0.2
|
||||
- Fix compilation on non-unix platforms
|
||||
|
||||
## [0.2.3] - 2019-06-22
|
||||
## 1.0.0-alpha.2
|
||||
|
||||
### Added
|
||||
- Export `main` and `test` attribute macros
|
||||
- Export `time` module (re-export of tokio-timer)
|
||||
- Export `net` module (re-export of tokio-net)
|
||||
|
||||
* Allow to start System using exsiting CurrentThread Handle #22
|
||||
## 1.0.0-alpha.1
|
||||
|
||||
- Migrate to std::future and tokio 0.2
|
||||
|
||||
## [0.2.2] - 2019-03-28
|
||||
## 0.2.6
|
||||
|
||||
### Changed
|
||||
- Allow to join arbiter's thread. #60
|
||||
- Fix arbiter's thread panic message.
|
||||
|
||||
* Moved `blocking` module to `actix-threadpool` crate
|
||||
## 0.2.5
|
||||
|
||||
- Add arbiter specific storage
|
||||
|
||||
## [0.2.1] - 2019-03-11
|
||||
## 0.2.4
|
||||
|
||||
### Added
|
||||
- Avoid a copy of the Future when initializing the Box. #29
|
||||
|
||||
* Added `blocking` module
|
||||
## 0.2.3
|
||||
|
||||
* Arbiter::exec_fn - execute fn on the arbiter's thread
|
||||
- Allow to start System using existing CurrentThread Handle #22
|
||||
|
||||
* Arbiter::exec - execute fn on the arbiter's thread and wait result
|
||||
## 0.2.2
|
||||
|
||||
- Moved `blocking` module to `actix-threadpool` crate
|
||||
|
||||
## [0.2.0] - 2019-03-06
|
||||
## 0.2.1
|
||||
|
||||
* `run` method returns `io::Result<()>`
|
||||
- Added `blocking` module
|
||||
- Added `Arbiter::exec_fn` - execute fn on the arbiter's thread
|
||||
- Added `Arbiter::exec` - execute fn on the arbiter's thread and wait result
|
||||
|
||||
* Removed `Handle`
|
||||
## 0.2.0
|
||||
|
||||
- `run` method returns `io::Result<()>`
|
||||
- Removed `Handle`
|
||||
|
||||
## [0.1.0] - 2018-12-09
|
||||
## 0.1.0
|
||||
|
||||
* Initial release
|
||||
- Initial release
|
||||
|
@ -1,27 +1,36 @@
|
||||
[package]
|
||||
name = "actix-rt"
|
||||
version = "1.1.1"
|
||||
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
|
||||
description = "Actix runtime"
|
||||
keywords = ["network", "framework", "async", "futures"]
|
||||
version = "2.10.0"
|
||||
authors = ["Nikolay Kim <fafhrd91@gmail.com>", "Rob Ede <robjtede@icloud.com>"]
|
||||
description = "Tokio-based single-threaded async runtime for the Actix ecosystem"
|
||||
keywords = ["async", "futures", "io", "runtime"]
|
||||
homepage = "https://actix.rs"
|
||||
repository = "https://github.com/actix/actix-net.git"
|
||||
documentation = "https://docs.rs/actix-rt/"
|
||||
repository = "https://github.com/actix/actix-net"
|
||||
categories = ["network-programming", "asynchronous"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
edition = "2018"
|
||||
edition.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[lib]
|
||||
name = "actix_rt"
|
||||
path = "src/lib.rs"
|
||||
[package.metadata.cargo_check_external_types]
|
||||
allowed_external_types = ["actix_macros::*", "tokio::*"]
|
||||
|
||||
[features]
|
||||
default = ["macros"]
|
||||
macros = ["actix-macros"]
|
||||
io-uring = ["tokio-uring"]
|
||||
|
||||
[dependencies]
|
||||
actix-macros = "0.1.0"
|
||||
copyless = "0.1.4"
|
||||
futures-channel = "0.3.4"
|
||||
futures-util = { version = "0.3.4", default-features = false, features = ["alloc"] }
|
||||
smallvec = "1"
|
||||
tokio = { version = "0.2.6", default-features = false, features = ["rt-core", "rt-util", "io-driver", "tcp", "uds", "udp", "time", "signal", "stream"] }
|
||||
actix-macros = { version = "0.2.3", optional = true }
|
||||
|
||||
futures-core = { version = "0.3", default-features = false }
|
||||
tokio = { version = "1.23.1", features = ["rt", "net", "parking_lot", "signal", "sync", "time"] }
|
||||
|
||||
# runtime for `io-uring` feature
|
||||
[target.'cfg(target_os = "linux")'.dependencies]
|
||||
tokio-uring = { version = "0.5", optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { version = "0.2.6", features = ["full"] }
|
||||
tokio = { version = "1.23.1", features = ["full"] }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
14
actix-rt/README.md
Normal file
14
actix-rt/README.md
Normal file
@ -0,0 +1,14 @@
|
||||
# actix-rt
|
||||
|
||||
> Tokio-based single-threaded async runtime for the Actix ecosystem.
|
||||
|
||||
[](https://crates.io/crates/actix-rt)
|
||||
[](https://docs.rs/actix-rt/2.10.0)
|
||||
[](https://blog.rust-lang.org/2020/03/12/Rust-1.46.html)
|
||||

|
||||
<br />
|
||||
[](https://deps.rs/crate/actix-rt/2.10.0)
|
||||

|
||||
[](https://discord.gg/WghFtEH6Hb)
|
||||
|
||||
See crate documentation for more: https://docs.rs/actix-rt.
|
60
actix-rt/examples/multi_thread_system.rs
Normal file
60
actix-rt/examples/multi_thread_system.rs
Normal file
@ -0,0 +1,60 @@
|
||||
//! An example on how to build a multi-thread tokio runtime for Actix System.
|
||||
//! Then spawn async task that can make use of work stealing of tokio runtime.
|
||||
|
||||
use actix_rt::System;
|
||||
|
||||
fn main() {
|
||||
System::with_tokio_rt(|| {
|
||||
// build system with a multi-thread tokio runtime.
|
||||
tokio::runtime::Builder::new_multi_thread()
|
||||
.worker_threads(2)
|
||||
.enable_all()
|
||||
.build()
|
||||
.unwrap()
|
||||
})
|
||||
.block_on(async_main());
|
||||
}
|
||||
|
||||
// async main function that acts like #[actix_web::main] or #[tokio::main]
|
||||
async fn async_main() {
|
||||
let (tx, rx) = tokio::sync::oneshot::channel();
|
||||
|
||||
// get a handle to system arbiter and spawn async task on it
|
||||
System::current().arbiter().spawn(async {
|
||||
// use tokio::spawn to get inside the context of multi thread tokio runtime
|
||||
let h1 = tokio::spawn(async {
|
||||
println!("thread id is {:?}", std::thread::current().id());
|
||||
std::thread::sleep(std::time::Duration::from_secs(2));
|
||||
});
|
||||
|
||||
// work stealing occurs for this task spawn
|
||||
let h2 = tokio::spawn(async {
|
||||
println!("thread id is {:?}", std::thread::current().id());
|
||||
});
|
||||
|
||||
h1.await.unwrap();
|
||||
h2.await.unwrap();
|
||||
let _ = tx.send(());
|
||||
});
|
||||
|
||||
rx.await.unwrap();
|
||||
|
||||
let (tx, rx) = tokio::sync::oneshot::channel();
|
||||
let now = std::time::Instant::now();
|
||||
|
||||
// without additional tokio::spawn, all spawned tasks run on single thread
|
||||
System::current().arbiter().spawn(async {
|
||||
println!("thread id is {:?}", std::thread::current().id());
|
||||
std::thread::sleep(std::time::Duration::from_secs(2));
|
||||
let _ = tx.send(());
|
||||
});
|
||||
|
||||
// previous spawn task has blocked the system arbiter thread
|
||||
// so this task will wait for 2 seconds until it can be run
|
||||
System::current().arbiter().spawn(async move {
|
||||
println!("thread id is {:?}", std::thread::current().id());
|
||||
assert!(now.elapsed() > std::time::Duration::from_secs(2));
|
||||
});
|
||||
|
||||
rx.await.unwrap();
|
||||
}
|
@ -1,40 +1,27 @@
|
||||
use std::any::{Any, TypeId};
|
||||
use std::cell::{Cell, RefCell};
|
||||
use std::collections::HashMap;
|
||||
use std::pin::Pin;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::task::{Context, Poll};
|
||||
use std::{fmt, thread};
|
||||
|
||||
use futures_channel::mpsc::{unbounded, UnboundedReceiver, UnboundedSender};
|
||||
use futures_channel::oneshot::{channel, Canceled, Sender};
|
||||
use futures_util::{
|
||||
future::{self, Future, FutureExt},
|
||||
stream::Stream,
|
||||
use std::{
|
||||
cell::RefCell,
|
||||
fmt,
|
||||
future::Future,
|
||||
pin::Pin,
|
||||
sync::atomic::{AtomicUsize, Ordering},
|
||||
task::{Context, Poll},
|
||||
thread,
|
||||
};
|
||||
|
||||
use crate::runtime::Runtime;
|
||||
use crate::system::System;
|
||||
use futures_core::ready;
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
use copyless::BoxHelper;
|
||||
|
||||
use smallvec::SmallVec;
|
||||
pub use tokio::task::JoinHandle;
|
||||
|
||||
thread_local!(
|
||||
static ADDR: RefCell<Option<Arbiter>> = RefCell::new(None);
|
||||
static RUNNING: Cell<bool> = Cell::new(false);
|
||||
static Q: RefCell<Vec<Pin<Box<dyn Future<Output = ()>>>>> = RefCell::new(Vec::new());
|
||||
static PENDING: RefCell<SmallVec<[JoinHandle<()>; 8]>> = RefCell::new(SmallVec::new());
|
||||
static STORAGE: RefCell<HashMap<TypeId, Box<dyn Any>>> = RefCell::new(HashMap::new());
|
||||
);
|
||||
use crate::system::{System, SystemCommand};
|
||||
|
||||
pub(crate) static COUNT: AtomicUsize = AtomicUsize::new(0);
|
||||
|
||||
thread_local!(
|
||||
static HANDLE: RefCell<Option<ArbiterHandle>> = const { RefCell::new(None) };
|
||||
);
|
||||
|
||||
pub(crate) enum ArbiterCommand {
|
||||
Stop,
|
||||
Execute(Box<dyn Future<Output = ()> + Unpin + Send>),
|
||||
ExecuteFn(Box<dyn FnExec>),
|
||||
Execute(Pin<Box<dyn Future<Output = ()> + Send>>),
|
||||
}
|
||||
|
||||
impl fmt::Debug for ArbiterCommand {
|
||||
@ -42,427 +29,289 @@ impl fmt::Debug for ArbiterCommand {
|
||||
match self {
|
||||
ArbiterCommand::Stop => write!(f, "ArbiterCommand::Stop"),
|
||||
ArbiterCommand::Execute(_) => write!(f, "ArbiterCommand::Execute"),
|
||||
ArbiterCommand::ExecuteFn(_) => write!(f, "ArbiterCommand::ExecuteFn"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
/// Arbiters provide an asynchronous execution environment for actors, functions
|
||||
/// and futures. When an Arbiter is created, it spawns a new OS thread, and
|
||||
/// hosts an event loop. Some Arbiter functions execute on the current thread.
|
||||
pub struct Arbiter {
|
||||
sender: UnboundedSender<ArbiterCommand>,
|
||||
thread_handle: Option<thread::JoinHandle<()>>,
|
||||
/// A handle for sending spawn and stop messages to an [Arbiter].
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ArbiterHandle {
|
||||
tx: mpsc::UnboundedSender<ArbiterCommand>,
|
||||
}
|
||||
|
||||
impl Clone for Arbiter {
|
||||
fn clone(&self) -> Self {
|
||||
Self::with_sender(self.sender.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Arbiter {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl Arbiter {
|
||||
pub(crate) fn new_system() -> Self {
|
||||
let (tx, rx) = unbounded();
|
||||
|
||||
let arb = Arbiter::with_sender(tx);
|
||||
ADDR.with(|cell| *cell.borrow_mut() = Some(arb.clone()));
|
||||
RUNNING.with(|cell| cell.set(false));
|
||||
STORAGE.with(|cell| cell.borrow_mut().clear());
|
||||
Arbiter::spawn(ArbiterController { stop: None, rx });
|
||||
|
||||
arb
|
||||
impl ArbiterHandle {
|
||||
pub(crate) fn new(tx: mpsc::UnboundedSender<ArbiterCommand>) -> Self {
|
||||
Self { tx }
|
||||
}
|
||||
|
||||
/// Returns the current thread's arbiter's address. If no Arbiter is present, then this
|
||||
/// function will panic!
|
||||
pub fn current() -> Arbiter {
|
||||
ADDR.with(|cell| match *cell.borrow() {
|
||||
Some(ref addr) => addr.clone(),
|
||||
None => panic!("Arbiter is not running"),
|
||||
})
|
||||
}
|
||||
|
||||
/// Check if current arbiter is running.
|
||||
pub fn is_running() -> bool {
|
||||
RUNNING.with(|cell| cell.get())
|
||||
}
|
||||
|
||||
/// Stop arbiter from continuing it's event loop.
|
||||
pub fn stop(&self) {
|
||||
let _ = self.sender.unbounded_send(ArbiterCommand::Stop);
|
||||
}
|
||||
|
||||
/// Spawn new thread and run event loop in spawned thread.
|
||||
/// Returns address of newly created arbiter.
|
||||
pub fn new() -> Arbiter {
|
||||
let id = COUNT.fetch_add(1, Ordering::Relaxed);
|
||||
let name = format!("actix-rt:worker:{}", id);
|
||||
let sys = System::current();
|
||||
let (arb_tx, arb_rx) = unbounded();
|
||||
let arb_tx2 = arb_tx.clone();
|
||||
|
||||
let handle = thread::Builder::new()
|
||||
.name(name.clone())
|
||||
.spawn(move || {
|
||||
let mut rt = Runtime::new().expect("Can not create Runtime");
|
||||
let arb = Arbiter::with_sender(arb_tx);
|
||||
|
||||
let (stop, stop_rx) = channel();
|
||||
RUNNING.with(|cell| cell.set(true));
|
||||
STORAGE.with(|cell| cell.borrow_mut().clear());
|
||||
|
||||
System::set_current(sys);
|
||||
|
||||
// start arbiter controller
|
||||
rt.spawn(ArbiterController {
|
||||
stop: Some(stop),
|
||||
rx: arb_rx,
|
||||
});
|
||||
ADDR.with(|cell| *cell.borrow_mut() = Some(arb.clone()));
|
||||
|
||||
// register arbiter
|
||||
let _ = System::current()
|
||||
.sys()
|
||||
.unbounded_send(SystemCommand::RegisterArbiter(id, arb));
|
||||
|
||||
// run loop
|
||||
let _ = rt.block_on(stop_rx).unwrap_or(1);
|
||||
|
||||
// unregister arbiter
|
||||
let _ = System::current()
|
||||
.sys()
|
||||
.unbounded_send(SystemCommand::UnregisterArbiter(id));
|
||||
})
|
||||
.unwrap_or_else(|err| {
|
||||
panic!("Cannot spawn an arbiter's thread {:?}: {:?}", &name, err)
|
||||
});
|
||||
|
||||
Arbiter {
|
||||
sender: arb_tx2,
|
||||
thread_handle: Some(handle),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn run_system(rt: Option<&Runtime>) {
|
||||
RUNNING.with(|cell| cell.set(true));
|
||||
Q.with(|cell| {
|
||||
let mut v = cell.borrow_mut();
|
||||
for fut in v.drain(..) {
|
||||
if let Some(rt) = rt {
|
||||
rt.spawn(fut);
|
||||
} else {
|
||||
tokio::task::spawn_local(fut);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
pub(crate) fn stop_system() {
|
||||
RUNNING.with(|cell| cell.set(false));
|
||||
}
|
||||
|
||||
/// Spawn a future on the current thread. This does not create a new Arbiter
|
||||
/// or Arbiter address, it is simply a helper for spawning futures on the current
|
||||
/// thread.
|
||||
pub fn spawn<F>(future: F)
|
||||
/// Send a future to the [Arbiter]'s thread and spawn it.
|
||||
///
|
||||
/// If you require a result, include a response channel in the future.
|
||||
///
|
||||
/// Returns true if future was sent successfully and false if the [Arbiter] has died.
|
||||
pub fn spawn<Fut>(&self, future: Fut) -> bool
|
||||
where
|
||||
F: Future<Output = ()> + 'static,
|
||||
Fut: Future<Output = ()> + Send + 'static,
|
||||
{
|
||||
RUNNING.with(move |cell| {
|
||||
if cell.get() {
|
||||
// Spawn the future on running executor
|
||||
let len = PENDING.with(move |cell| {
|
||||
let mut p = cell.borrow_mut();
|
||||
p.push(tokio::task::spawn_local(future));
|
||||
p.len()
|
||||
});
|
||||
if len > 7 {
|
||||
// Before reaching the inline size
|
||||
tokio::task::spawn_local(CleanupPending);
|
||||
}
|
||||
} else {
|
||||
// Box the future and push it to the queue, this results in double boxing
|
||||
// because the executor boxes the future again, but works for now
|
||||
Q.with(move |cell| {
|
||||
cell.borrow_mut().push(Pin::from(Box::alloc().init(future)))
|
||||
});
|
||||
}
|
||||
});
|
||||
self.tx
|
||||
.send(ArbiterCommand::Execute(Box::pin(future)))
|
||||
.is_ok()
|
||||
}
|
||||
|
||||
/// Executes a future on the current thread. This does not create a new Arbiter
|
||||
/// or Arbiter address, it is simply a helper for executing futures on the current
|
||||
/// thread.
|
||||
pub fn spawn_fn<F, R>(f: F)
|
||||
where
|
||||
F: FnOnce() -> R + 'static,
|
||||
R: Future<Output = ()> + 'static,
|
||||
{
|
||||
Arbiter::spawn(future::lazy(|_| f()).flatten())
|
||||
}
|
||||
|
||||
/// Send a future to the Arbiter's thread, and spawn it.
|
||||
pub fn send<F>(&self, future: F)
|
||||
where
|
||||
F: Future<Output = ()> + Send + Unpin + 'static,
|
||||
{
|
||||
let _ = self
|
||||
.sender
|
||||
.unbounded_send(ArbiterCommand::Execute(Box::new(future)));
|
||||
}
|
||||
|
||||
/// Send a function to the Arbiter's thread, and execute it. Any result from the function
|
||||
/// is discarded.
|
||||
pub fn exec_fn<F>(&self, f: F)
|
||||
/// Send a function to the [Arbiter]'s thread and execute it.
|
||||
///
|
||||
/// Any result from the function is discarded. If you require a result, include a response
|
||||
/// channel in the function.
|
||||
///
|
||||
/// Returns true if function was sent successfully and false if the [Arbiter] has died.
|
||||
pub fn spawn_fn<F>(&self, f: F) -> bool
|
||||
where
|
||||
F: FnOnce() + Send + 'static,
|
||||
{
|
||||
let _ = self
|
||||
.sender
|
||||
.unbounded_send(ArbiterCommand::ExecuteFn(Box::new(move || {
|
||||
f();
|
||||
})));
|
||||
self.spawn(async { f() })
|
||||
}
|
||||
|
||||
/// Send a function to the Arbiter's thread. This function will be executed asynchronously.
|
||||
/// A future is created, and when resolved will contain the result of the function sent
|
||||
/// to the Arbiters thread.
|
||||
pub fn exec<F, R>(&self, f: F) -> impl Future<Output = Result<R, Canceled>>
|
||||
where
|
||||
F: FnOnce() -> R + Send + 'static,
|
||||
R: Send + 'static,
|
||||
{
|
||||
let (tx, rx) = channel();
|
||||
let _ = self
|
||||
.sender
|
||||
.unbounded_send(ArbiterCommand::ExecuteFn(Box::new(move || {
|
||||
if !tx.is_canceled() {
|
||||
let _ = tx.send(f());
|
||||
}
|
||||
})));
|
||||
rx
|
||||
}
|
||||
|
||||
/// Set item to arbiter storage
|
||||
pub fn set_item<T: 'static>(item: T) {
|
||||
STORAGE.with(move |cell| cell.borrow_mut().insert(TypeId::of::<T>(), Box::new(item)));
|
||||
}
|
||||
|
||||
/// Check if arbiter storage contains item
|
||||
pub fn contains_item<T: 'static>() -> bool {
|
||||
STORAGE.with(move |cell| cell.borrow().get(&TypeId::of::<T>()).is_some())
|
||||
}
|
||||
|
||||
/// Get a reference to a type previously inserted on this arbiter's storage.
|
||||
/// Instruct [Arbiter] to stop processing it's event loop.
|
||||
///
|
||||
/// Panics is item is not inserted
|
||||
pub fn get_item<T: 'static, F, R>(mut f: F) -> R
|
||||
where
|
||||
F: FnMut(&T) -> R,
|
||||
{
|
||||
STORAGE.with(move |cell| {
|
||||
let st = cell.borrow();
|
||||
let item = st
|
||||
.get(&TypeId::of::<T>())
|
||||
.and_then(|boxed| (&**boxed as &(dyn Any + 'static)).downcast_ref())
|
||||
.unwrap();
|
||||
f(item)
|
||||
})
|
||||
/// Returns true if stop message was sent successfully and false if the [Arbiter] has
|
||||
/// been dropped.
|
||||
pub fn stop(&self) -> bool {
|
||||
self.tx.send(ArbiterCommand::Stop).is_ok()
|
||||
}
|
||||
}
|
||||
|
||||
/// Get a mutable reference to a type previously inserted on this arbiter's storage.
|
||||
/// An Arbiter represents a thread that provides an asynchronous execution environment for futures
|
||||
/// and functions.
|
||||
///
|
||||
/// When an arbiter is created, it spawns a new [OS thread](thread), and hosts an event loop.
|
||||
#[derive(Debug)]
|
||||
pub struct Arbiter {
|
||||
tx: mpsc::UnboundedSender<ArbiterCommand>,
|
||||
thread_handle: thread::JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl Arbiter {
|
||||
/// Spawn a new Arbiter thread and start its event loop.
|
||||
///
|
||||
/// Panics is item is not inserted
|
||||
pub fn get_mut_item<T: 'static, F, R>(mut f: F) -> R
|
||||
/// # Panics
|
||||
/// Panics if a [System] is not registered on the current thread.
|
||||
#[cfg(not(all(target_os = "linux", feature = "io-uring")))]
|
||||
#[allow(clippy::new_without_default)]
|
||||
pub fn new() -> Arbiter {
|
||||
Self::with_tokio_rt(|| {
|
||||
crate::runtime::default_tokio_runtime().expect("Cannot create new Arbiter's Runtime.")
|
||||
})
|
||||
}
|
||||
|
||||
/// Spawn a new Arbiter using the [Tokio Runtime](tokio-runtime) returned from a closure.
|
||||
///
|
||||
/// [tokio-runtime]: tokio::runtime::Runtime
|
||||
#[cfg(not(all(target_os = "linux", feature = "io-uring")))]
|
||||
pub fn with_tokio_rt<F>(runtime_factory: F) -> Arbiter
|
||||
where
|
||||
F: FnMut(&mut T) -> R,
|
||||
F: FnOnce() -> tokio::runtime::Runtime + Send + 'static,
|
||||
{
|
||||
STORAGE.with(move |cell| {
|
||||
let mut st = cell.borrow_mut();
|
||||
let item = st
|
||||
.get_mut(&TypeId::of::<T>())
|
||||
.and_then(|boxed| (&mut **boxed as &mut (dyn Any + 'static)).downcast_mut())
|
||||
.unwrap();
|
||||
f(item)
|
||||
})
|
||||
}
|
||||
let sys = System::current();
|
||||
let system_id = sys.id();
|
||||
let arb_id = COUNT.fetch_add(1, Ordering::Relaxed);
|
||||
|
||||
fn with_sender(sender: UnboundedSender<ArbiterCommand>) -> Self {
|
||||
Self {
|
||||
sender,
|
||||
thread_handle: None,
|
||||
}
|
||||
}
|
||||
let name = format!("actix-rt|system:{}|arbiter:{}", system_id, arb_id);
|
||||
let (tx, rx) = mpsc::unbounded_channel();
|
||||
|
||||
/// Wait for the event loop to stop by joining the underlying thread (if have Some).
|
||||
pub fn join(&mut self) -> thread::Result<()> {
|
||||
if let Some(thread_handle) = self.thread_handle.take() {
|
||||
thread_handle.join()
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
let (ready_tx, ready_rx) = std::sync::mpsc::channel::<()>();
|
||||
|
||||
/// Returns a future that will be completed once all currently spawned futures
|
||||
/// have completed.
|
||||
pub fn local_join() -> impl Future<Output = ()> {
|
||||
PENDING.with(move |cell| {
|
||||
let current = cell.replace(SmallVec::new());
|
||||
future::join_all(current).map(|_| ())
|
||||
})
|
||||
}
|
||||
}
|
||||
let thread_handle = thread::Builder::new()
|
||||
.name(name.clone())
|
||||
.spawn({
|
||||
let tx = tx.clone();
|
||||
move || {
|
||||
let rt = crate::runtime::Runtime::from(runtime_factory());
|
||||
let hnd = ArbiterHandle::new(tx);
|
||||
|
||||
/// Future used for cleaning-up already finished `JoinHandle`s
|
||||
/// from the `PENDING` list so the vector doesn't grow indefinitely
|
||||
struct CleanupPending;
|
||||
System::set_current(sys);
|
||||
|
||||
impl Future for CleanupPending {
|
||||
type Output = ();
|
||||
HANDLE.with(|cell| *cell.borrow_mut() = Some(hnd.clone()));
|
||||
|
||||
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
PENDING.with(move |cell| {
|
||||
let mut pending = cell.borrow_mut();
|
||||
let mut i = 0;
|
||||
while i != pending.len() {
|
||||
if let Poll::Ready(_) = Pin::new(&mut pending[i]).poll(cx) {
|
||||
pending.remove(i);
|
||||
} else {
|
||||
i += 1;
|
||||
// register arbiter
|
||||
let _ = System::current()
|
||||
.tx()
|
||||
.send(SystemCommand::RegisterArbiter(arb_id, hnd));
|
||||
|
||||
ready_tx.send(()).unwrap();
|
||||
|
||||
// run arbiter event processing loop
|
||||
rt.block_on(ArbiterRunner { rx });
|
||||
|
||||
// deregister arbiter
|
||||
let _ = System::current()
|
||||
.tx()
|
||||
.send(SystemCommand::DeregisterArbiter(arb_id));
|
||||
}
|
||||
}
|
||||
});
|
||||
})
|
||||
.unwrap_or_else(|err| panic!("Cannot spawn Arbiter's thread: {name:?}: {err:?}"));
|
||||
|
||||
Poll::Ready(())
|
||||
ready_rx.recv().unwrap();
|
||||
|
||||
Arbiter { tx, thread_handle }
|
||||
}
|
||||
|
||||
/// Spawn a new Arbiter thread and start its event loop with `tokio-uring` runtime.
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics if a [System] is not registered on the current thread.
|
||||
#[cfg(all(target_os = "linux", feature = "io-uring"))]
|
||||
#[allow(clippy::new_without_default)]
|
||||
pub fn new() -> Arbiter {
|
||||
let sys = System::current();
|
||||
let system_id = sys.id();
|
||||
let arb_id = COUNT.fetch_add(1, Ordering::Relaxed);
|
||||
|
||||
let name = format!("actix-rt|system:{}|arbiter:{}", system_id, arb_id);
|
||||
let (tx, rx) = mpsc::unbounded_channel();
|
||||
|
||||
let (ready_tx, ready_rx) = std::sync::mpsc::channel::<()>();
|
||||
|
||||
let thread_handle = thread::Builder::new()
|
||||
.name(name.clone())
|
||||
.spawn({
|
||||
let tx = tx.clone();
|
||||
move || {
|
||||
let hnd = ArbiterHandle::new(tx);
|
||||
|
||||
System::set_current(sys);
|
||||
|
||||
HANDLE.with(|cell| *cell.borrow_mut() = Some(hnd.clone()));
|
||||
|
||||
// register arbiter
|
||||
let _ = System::current()
|
||||
.tx()
|
||||
.send(SystemCommand::RegisterArbiter(arb_id, hnd));
|
||||
|
||||
ready_tx.send(()).unwrap();
|
||||
|
||||
// run arbiter event processing loop
|
||||
tokio_uring::start(ArbiterRunner { rx });
|
||||
|
||||
// deregister arbiter
|
||||
let _ = System::current()
|
||||
.tx()
|
||||
.send(SystemCommand::DeregisterArbiter(arb_id));
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|err| panic!("Cannot spawn Arbiter's thread: {name:?}: {err:?}"));
|
||||
|
||||
ready_rx.recv().unwrap();
|
||||
|
||||
Arbiter { tx, thread_handle }
|
||||
}
|
||||
|
||||
/// Sets up an Arbiter runner in a new System using the environment's local set.
|
||||
pub(crate) fn in_new_system() -> ArbiterHandle {
|
||||
let (tx, rx) = mpsc::unbounded_channel();
|
||||
|
||||
let hnd = ArbiterHandle::new(tx);
|
||||
|
||||
HANDLE.with(|cell| *cell.borrow_mut() = Some(hnd.clone()));
|
||||
|
||||
crate::spawn(ArbiterRunner { rx });
|
||||
|
||||
hnd
|
||||
}
|
||||
|
||||
/// Return a handle to the this Arbiter's message sender.
|
||||
pub fn handle(&self) -> ArbiterHandle {
|
||||
ArbiterHandle::new(self.tx.clone())
|
||||
}
|
||||
|
||||
/// Return a handle to the current thread's Arbiter's message sender.
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics if no Arbiter is running on the current thread.
|
||||
pub fn current() -> ArbiterHandle {
|
||||
HANDLE.with(|cell| match *cell.borrow() {
|
||||
Some(ref hnd) => hnd.clone(),
|
||||
None => panic!("Arbiter is not running."),
|
||||
})
|
||||
}
|
||||
|
||||
/// Try to get current running arbiter handle.
|
||||
///
|
||||
/// Returns `None` if no Arbiter has been started.
|
||||
///
|
||||
/// Unlike [`current`](Self::current), this never panics.
|
||||
pub fn try_current() -> Option<ArbiterHandle> {
|
||||
HANDLE.with(|cell| cell.borrow().clone())
|
||||
}
|
||||
|
||||
/// Stop Arbiter from continuing it's event loop.
|
||||
///
|
||||
/// Returns true if stop message was sent successfully and false if the Arbiter has been dropped.
|
||||
pub fn stop(&self) -> bool {
|
||||
self.tx.send(ArbiterCommand::Stop).is_ok()
|
||||
}
|
||||
|
||||
/// Send a future to the Arbiter's thread and spawn it.
|
||||
///
|
||||
/// If you require a result, include a response channel in the future.
|
||||
///
|
||||
/// Returns true if future was sent successfully and false if the Arbiter has died.
|
||||
#[track_caller]
|
||||
pub fn spawn<Fut>(&self, future: Fut) -> bool
|
||||
where
|
||||
Fut: Future<Output = ()> + Send + 'static,
|
||||
{
|
||||
self.tx
|
||||
.send(ArbiterCommand::Execute(Box::pin(future)))
|
||||
.is_ok()
|
||||
}
|
||||
|
||||
/// Send a function to the Arbiter's thread and execute it.
|
||||
///
|
||||
/// Any result from the function is discarded. If you require a result, include a response
|
||||
/// channel in the function.
|
||||
///
|
||||
/// Returns true if function was sent successfully and false if the Arbiter has died.
|
||||
#[track_caller]
|
||||
pub fn spawn_fn<F>(&self, f: F) -> bool
|
||||
where
|
||||
F: FnOnce() + Send + 'static,
|
||||
{
|
||||
self.spawn(async { f() })
|
||||
}
|
||||
|
||||
/// Wait for Arbiter's event loop to complete.
|
||||
///
|
||||
/// Joins the underlying OS thread handle. See [`JoinHandle::join`](thread::JoinHandle::join).
|
||||
pub fn join(self) -> thread::Result<()> {
|
||||
self.thread_handle.join()
|
||||
}
|
||||
}
|
||||
|
||||
struct ArbiterController {
|
||||
stop: Option<Sender<i32>>,
|
||||
rx: UnboundedReceiver<ArbiterCommand>,
|
||||
/// A persistent future that processes [Arbiter] commands.
|
||||
struct ArbiterRunner {
|
||||
rx: mpsc::UnboundedReceiver<ArbiterCommand>,
|
||||
}
|
||||
|
||||
impl Drop for ArbiterController {
|
||||
fn drop(&mut self) {
|
||||
if thread::panicking() {
|
||||
if System::current().stop_on_panic() {
|
||||
eprintln!("Panic in Arbiter thread, shutting down system.");
|
||||
System::current().stop_with_code(1)
|
||||
} else {
|
||||
eprintln!("Panic in Arbiter thread.");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Future for ArbiterController {
|
||||
impl Future for ArbiterRunner {
|
||||
type Output = ();
|
||||
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
// process all items currently buffered in channel
|
||||
loop {
|
||||
match Pin::new(&mut self.rx).poll_next(cx) {
|
||||
Poll::Ready(None) => return Poll::Ready(()),
|
||||
Poll::Ready(Some(item)) => match item {
|
||||
match ready!(self.rx.poll_recv(cx)) {
|
||||
// channel closed; no more messages can be received
|
||||
None => return Poll::Ready(()),
|
||||
|
||||
// process arbiter command
|
||||
Some(item) => match item {
|
||||
ArbiterCommand::Stop => {
|
||||
if let Some(stop) = self.stop.take() {
|
||||
let _ = stop.send(0);
|
||||
};
|
||||
return Poll::Ready(());
|
||||
}
|
||||
ArbiterCommand::Execute(fut) => {
|
||||
let len = PENDING.with(move |cell| {
|
||||
let mut p = cell.borrow_mut();
|
||||
p.push(tokio::task::spawn_local(fut));
|
||||
p.len()
|
||||
});
|
||||
if len > 7 {
|
||||
// Before reaching the inline size
|
||||
tokio::task::spawn_local(CleanupPending);
|
||||
}
|
||||
}
|
||||
ArbiterCommand::ExecuteFn(f) => {
|
||||
f.call_box();
|
||||
ArbiterCommand::Execute(task_fut) => {
|
||||
tokio::task::spawn_local(task_fut);
|
||||
}
|
||||
},
|
||||
Poll::Pending => return Poll::Pending,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) enum SystemCommand {
|
||||
Exit(i32),
|
||||
RegisterArbiter(usize, Arbiter),
|
||||
UnregisterArbiter(usize),
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct SystemArbiter {
|
||||
stop: Option<Sender<i32>>,
|
||||
commands: UnboundedReceiver<SystemCommand>,
|
||||
arbiters: HashMap<usize, Arbiter>,
|
||||
}
|
||||
|
||||
impl SystemArbiter {
|
||||
pub(crate) fn new(stop: Sender<i32>, commands: UnboundedReceiver<SystemCommand>) -> Self {
|
||||
SystemArbiter {
|
||||
commands,
|
||||
stop: Some(stop),
|
||||
arbiters: HashMap::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Future for SystemArbiter {
|
||||
type Output = ();
|
||||
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
loop {
|
||||
match Pin::new(&mut self.commands).poll_next(cx) {
|
||||
Poll::Ready(None) => return Poll::Ready(()),
|
||||
Poll::Ready(Some(cmd)) => match cmd {
|
||||
SystemCommand::Exit(code) => {
|
||||
// stop arbiters
|
||||
for arb in self.arbiters.values() {
|
||||
arb.stop();
|
||||
}
|
||||
// stop event loop
|
||||
if let Some(stop) = self.stop.take() {
|
||||
let _ = stop.send(code);
|
||||
}
|
||||
}
|
||||
SystemCommand::RegisterArbiter(name, hnd) => {
|
||||
self.arbiters.insert(name, hnd);
|
||||
}
|
||||
SystemCommand::UnregisterArbiter(name) => {
|
||||
self.arbiters.remove(&name);
|
||||
}
|
||||
},
|
||||
Poll::Pending => return Poll::Pending,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub trait FnExec: Send + 'static {
|
||||
fn call_box(self: Box<Self>);
|
||||
}
|
||||
|
||||
impl<F> FnExec for F
|
||||
where
|
||||
F: FnOnce() + Send + 'static,
|
||||
{
|
||||
#[allow(clippy::boxed_local)]
|
||||
fn call_box(self: Box<Self>) {
|
||||
(*self)()
|
||||
}
|
||||
}
|
||||
|
@ -1,191 +0,0 @@
|
||||
use std::borrow::Cow;
|
||||
use std::io;
|
||||
|
||||
use futures_channel::mpsc::unbounded;
|
||||
use futures_channel::oneshot::{channel, Receiver};
|
||||
use futures_util::future::{lazy, Future, FutureExt};
|
||||
use tokio::task::LocalSet;
|
||||
|
||||
use crate::arbiter::{Arbiter, SystemArbiter};
|
||||
use crate::runtime::Runtime;
|
||||
use crate::system::System;
|
||||
|
||||
/// Builder struct for a actix runtime.
|
||||
///
|
||||
/// Either use `Builder::build` to create a system and start actors.
|
||||
/// Alternatively, use `Builder::run` to start the tokio runtime and
|
||||
/// run a function in its context.
|
||||
pub struct Builder {
|
||||
/// Name of the System. Defaults to "actix" if unset.
|
||||
name: Cow<'static, str>,
|
||||
|
||||
/// Whether the Arbiter will stop the whole System on uncaught panic. Defaults to false.
|
||||
stop_on_panic: bool,
|
||||
}
|
||||
|
||||
impl Builder {
|
||||
pub(crate) fn new() -> Self {
|
||||
Builder {
|
||||
name: Cow::Borrowed("actix"),
|
||||
stop_on_panic: false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Sets the name of the System.
|
||||
pub fn name<T: Into<String>>(mut self, name: T) -> Self {
|
||||
self.name = Cow::Owned(name.into());
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the option 'stop_on_panic' which controls whether the System is stopped when an
|
||||
/// uncaught panic is thrown from a worker thread.
|
||||
///
|
||||
/// Defaults to false.
|
||||
pub fn stop_on_panic(mut self, stop_on_panic: bool) -> Self {
|
||||
self.stop_on_panic = stop_on_panic;
|
||||
self
|
||||
}
|
||||
|
||||
/// Create new System.
|
||||
///
|
||||
/// This method panics if it can not create tokio runtime
|
||||
pub fn build(self) -> SystemRunner {
|
||||
self.create_runtime(|| {})
|
||||
}
|
||||
|
||||
/// Create new System that can run asynchronously.
|
||||
///
|
||||
/// This method panics if it cannot start the system arbiter
|
||||
pub(crate) fn build_async(self, local: &LocalSet) -> AsyncSystemRunner {
|
||||
self.create_async_runtime(local)
|
||||
}
|
||||
|
||||
/// This function will start tokio runtime and will finish once the
|
||||
/// `System::stop()` message get called.
|
||||
/// Function `f` get called within tokio runtime context.
|
||||
pub fn run<F>(self, f: F) -> io::Result<()>
|
||||
where
|
||||
F: FnOnce() + 'static,
|
||||
{
|
||||
self.create_runtime(f).run()
|
||||
}
|
||||
|
||||
fn create_async_runtime(self, local: &LocalSet) -> AsyncSystemRunner {
|
||||
let (stop_tx, stop) = channel();
|
||||
let (sys_sender, sys_receiver) = unbounded();
|
||||
|
||||
let system = System::construct(sys_sender, Arbiter::new_system(), self.stop_on_panic);
|
||||
|
||||
// system arbiter
|
||||
let arb = SystemArbiter::new(stop_tx, sys_receiver);
|
||||
|
||||
// start the system arbiter
|
||||
let _ = local.spawn_local(arb);
|
||||
|
||||
AsyncSystemRunner { stop, system }
|
||||
}
|
||||
|
||||
fn create_runtime<F>(self, f: F) -> SystemRunner
|
||||
where
|
||||
F: FnOnce() + 'static,
|
||||
{
|
||||
let (stop_tx, stop) = channel();
|
||||
let (sys_sender, sys_receiver) = unbounded();
|
||||
|
||||
let system = System::construct(sys_sender, Arbiter::new_system(), self.stop_on_panic);
|
||||
|
||||
// system arbiter
|
||||
let arb = SystemArbiter::new(stop_tx, sys_receiver);
|
||||
|
||||
let mut rt = Runtime::new().unwrap();
|
||||
rt.spawn(arb);
|
||||
|
||||
// init system arbiter and run configuration method
|
||||
rt.block_on(lazy(move |_| f()));
|
||||
|
||||
SystemRunner { rt, stop, system }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct AsyncSystemRunner {
|
||||
stop: Receiver<i32>,
|
||||
system: System,
|
||||
}
|
||||
|
||||
impl AsyncSystemRunner {
|
||||
/// This function will start event loop and returns a future that
|
||||
/// resolves once the `System::stop()` function is called.
|
||||
pub(crate) fn run_nonblocking(self) -> impl Future<Output = Result<(), io::Error>> + Send {
|
||||
let AsyncSystemRunner { stop, .. } = self;
|
||||
|
||||
// run loop
|
||||
lazy(|_| {
|
||||
Arbiter::run_system(None);
|
||||
async {
|
||||
let res = match stop.await {
|
||||
Ok(code) => {
|
||||
if code != 0 {
|
||||
Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("Non-zero exit code: {}", code),
|
||||
))
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
Err(e) => Err(io::Error::new(io::ErrorKind::Other, e)),
|
||||
};
|
||||
Arbiter::stop_system();
|
||||
res
|
||||
}
|
||||
})
|
||||
.flatten()
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper object that runs System's event loop
|
||||
#[must_use = "SystemRunner must be run"]
|
||||
#[derive(Debug)]
|
||||
pub struct SystemRunner {
|
||||
rt: Runtime,
|
||||
stop: Receiver<i32>,
|
||||
system: System,
|
||||
}
|
||||
|
||||
impl SystemRunner {
|
||||
/// This function will start event loop and will finish once the
|
||||
/// `System::stop()` function is called.
|
||||
pub fn run(self) -> io::Result<()> {
|
||||
let SystemRunner { mut rt, stop, .. } = self;
|
||||
|
||||
// run loop
|
||||
Arbiter::run_system(Some(&rt));
|
||||
let result = match rt.block_on(stop) {
|
||||
Ok(code) => {
|
||||
if code != 0 {
|
||||
Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("Non-zero exit code: {}", code),
|
||||
))
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
Err(e) => Err(io::Error::new(io::ErrorKind::Other, e)),
|
||||
};
|
||||
Arbiter::stop_system();
|
||||
result
|
||||
}
|
||||
|
||||
/// Execute a future and wait for result.
|
||||
pub fn block_on<F, O>(&mut self, fut: F) -> O
|
||||
where
|
||||
F: Future<Output = O> + 'static,
|
||||
{
|
||||
Arbiter::run_system(Some(&self.rt));
|
||||
let res = self.rt.block_on(fut);
|
||||
Arbiter::stop_system();
|
||||
res
|
||||
}
|
||||
}
|
@ -1,63 +1,207 @@
|
||||
//! A runtime implementation that runs everything on the current thread.
|
||||
#![deny(rust_2018_idioms, warnings)]
|
||||
#![allow(clippy::type_complexity)]
|
||||
//! Tokio-based single-threaded async runtime for the Actix ecosystem.
|
||||
//!
|
||||
//! In most parts of the the Actix ecosystem, it has been chosen to use !Send futures. For this
|
||||
//! reason, a single-threaded runtime is appropriate since it is guaranteed that futures will not
|
||||
//! be moved between threads. This can result in small performance improvements over cases where
|
||||
//! atomics would otherwise be needed.
|
||||
//!
|
||||
//! To achieve similar performance to multi-threaded, work-stealing runtimes, applications
|
||||
//! using `actix-rt` will create multiple, mostly disconnected, single-threaded runtimes.
|
||||
//! This approach has good performance characteristics for workloads where the majority of tasks
|
||||
//! have similar runtime expense.
|
||||
//!
|
||||
//! The disadvantage is that idle threads will not steal work from very busy, stuck or otherwise
|
||||
//! backlogged threads. Tasks that are disproportionately expensive should be offloaded to the
|
||||
//! blocking task thread-pool using [`task::spawn_blocking`].
|
||||
//!
|
||||
//! # Examples
|
||||
//! ```no_run
|
||||
//! use std::sync::mpsc;
|
||||
//! use actix_rt::{Arbiter, System};
|
||||
//!
|
||||
//! let _ = System::new();
|
||||
//!
|
||||
//! let (tx, rx) = mpsc::channel::<u32>();
|
||||
//!
|
||||
//! let arbiter = Arbiter::new();
|
||||
//! arbiter.spawn_fn(move || tx.send(42).unwrap());
|
||||
//!
|
||||
//! let num = rx.recv().unwrap();
|
||||
//! assert_eq!(num, 42);
|
||||
//!
|
||||
//! arbiter.stop();
|
||||
//! arbiter.join().unwrap();
|
||||
//! ```
|
||||
//!
|
||||
//! # `io-uring` Support
|
||||
//!
|
||||
//! There is experimental support for using io-uring with this crate by enabling the
|
||||
//! `io-uring` feature. For now, it is semver exempt.
|
||||
//!
|
||||
//! Note that there are currently some unimplemented parts of using `actix-rt` with `io-uring`.
|
||||
//! In particular, when running a `System`, only `System::block_on` is supported.
|
||||
|
||||
#[cfg(not(test))] // Work around for rust-lang/rust#62127
|
||||
pub use actix_macros::{main, test};
|
||||
#![allow(clippy::type_complexity)]
|
||||
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
|
||||
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
|
||||
|
||||
#[cfg(all(not(target_os = "linux"), feature = "io-uring"))]
|
||||
compile_error!("io_uring is a linux only feature.");
|
||||
|
||||
use std::future::Future;
|
||||
|
||||
// Cannot define a main macro when compiled into test harness.
|
||||
// Workaround for https://github.com/rust-lang/rust/issues/62127.
|
||||
#[cfg(all(feature = "macros", not(test)))]
|
||||
pub use actix_macros::main;
|
||||
#[cfg(feature = "macros")]
|
||||
pub use actix_macros::test;
|
||||
|
||||
mod arbiter;
|
||||
mod builder;
|
||||
mod runtime;
|
||||
mod system;
|
||||
|
||||
pub use self::arbiter::Arbiter;
|
||||
pub use self::builder::{Builder, SystemRunner};
|
||||
pub use self::runtime::Runtime;
|
||||
pub use self::system::System;
|
||||
pub use tokio::pin;
|
||||
use tokio::task::JoinHandle;
|
||||
|
||||
/// Spawns a future on the current arbiter.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if actix system is not running.
|
||||
pub fn spawn<F>(f: F)
|
||||
where
|
||||
F: futures_util::future::Future<Output = ()> + 'static,
|
||||
{
|
||||
if !System::is_set() {
|
||||
panic!("System is not running");
|
||||
}
|
||||
pub use self::{
|
||||
arbiter::{Arbiter, ArbiterHandle},
|
||||
runtime::Runtime,
|
||||
system::{System, SystemRunner},
|
||||
};
|
||||
|
||||
Arbiter::spawn(f);
|
||||
}
|
||||
|
||||
/// Asynchronous signal handling
|
||||
pub mod signal {
|
||||
//! Asynchronous signal handling (Tokio re-exports).
|
||||
|
||||
#[cfg(unix)]
|
||||
pub mod unix {
|
||||
//! Unix specific signals (Tokio re-exports).
|
||||
pub use tokio::signal::unix::*;
|
||||
}
|
||||
pub use tokio::signal::ctrl_c;
|
||||
}
|
||||
|
||||
/// TCP/UDP/Unix bindings
|
||||
pub mod net {
|
||||
pub use tokio::net::UdpSocket;
|
||||
pub use tokio::net::{TcpListener, TcpStream};
|
||||
//! TCP/UDP/Unix bindings (mostly Tokio re-exports).
|
||||
|
||||
use std::{
|
||||
future::Future,
|
||||
io,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use tokio::io::{AsyncRead, AsyncWrite, Interest};
|
||||
#[cfg(unix)]
|
||||
mod unix {
|
||||
pub use tokio::net::{UnixDatagram, UnixListener, UnixStream};
|
||||
pub use tokio::net::{UnixDatagram, UnixListener, UnixStream};
|
||||
pub use tokio::{
|
||||
io::Ready,
|
||||
net::{TcpListener, TcpSocket, TcpStream, UdpSocket},
|
||||
};
|
||||
|
||||
/// Extension trait over async read+write types that can also signal readiness.
|
||||
#[doc(hidden)]
|
||||
pub trait ActixStream: AsyncRead + AsyncWrite + Unpin {
|
||||
/// Poll stream and check read readiness of Self.
|
||||
///
|
||||
/// See [tokio::net::TcpStream::poll_read_ready] for detail on intended use.
|
||||
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>>;
|
||||
|
||||
/// Poll stream and check write readiness of Self.
|
||||
///
|
||||
/// See [tokio::net::TcpStream::poll_write_ready] for detail on intended use.
|
||||
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>>;
|
||||
}
|
||||
|
||||
impl ActixStream for TcpStream {
|
||||
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
|
||||
let ready = self.ready(Interest::READABLE);
|
||||
tokio::pin!(ready);
|
||||
ready.poll(cx)
|
||||
}
|
||||
|
||||
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
|
||||
let ready = self.ready(Interest::WRITABLE);
|
||||
tokio::pin!(ready);
|
||||
ready.poll(cx)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
pub use self::unix::*;
|
||||
impl ActixStream for UnixStream {
|
||||
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
|
||||
let ready = self.ready(Interest::READABLE);
|
||||
tokio::pin!(ready);
|
||||
ready.poll(cx)
|
||||
}
|
||||
|
||||
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
|
||||
let ready = self.ready(Interest::WRITABLE);
|
||||
tokio::pin!(ready);
|
||||
ready.poll(cx)
|
||||
}
|
||||
}
|
||||
|
||||
impl<Io: ActixStream + ?Sized> ActixStream for Box<Io> {
|
||||
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
|
||||
(**self).poll_read_ready(cx)
|
||||
}
|
||||
|
||||
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
|
||||
(**self).poll_write_ready(cx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Utilities for tracking time.
|
||||
pub mod time {
|
||||
pub use tokio::time::Instant;
|
||||
pub use tokio::time::{delay_for, delay_until, Delay};
|
||||
pub use tokio::time::{interval, interval_at, Interval};
|
||||
pub use tokio::time::{timeout, Timeout};
|
||||
//! Utilities for tracking time (Tokio re-exports).
|
||||
|
||||
pub use tokio::time::{
|
||||
interval, interval_at, sleep, sleep_until, timeout, Instant, Interval, Sleep, Timeout,
|
||||
};
|
||||
}
|
||||
|
||||
pub mod task {
|
||||
//! Task management (Tokio re-exports).
|
||||
|
||||
pub use tokio::task::{spawn_blocking, yield_now, JoinError, JoinHandle};
|
||||
}
|
||||
|
||||
/// Spawns a future on the current thread as a new task.
|
||||
///
|
||||
/// If not immediately awaited, the task can be cancelled using [`JoinHandle::abort`].
|
||||
///
|
||||
/// The provided future is spawned as a new task; therefore, panics are caught.
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics if Actix system is not running.
|
||||
///
|
||||
/// # Examples
|
||||
/// ```
|
||||
/// # use std::time::Duration;
|
||||
/// # actix_rt::Runtime::new().unwrap().block_on(async {
|
||||
/// // task resolves successfully
|
||||
/// assert_eq!(actix_rt::spawn(async { 1 }).await.unwrap(), 1);
|
||||
///
|
||||
/// // task panics
|
||||
/// assert!(actix_rt::spawn(async {
|
||||
/// panic!("panic is caught at task boundary");
|
||||
/// })
|
||||
/// .await
|
||||
/// .unwrap_err()
|
||||
/// .is_panic());
|
||||
///
|
||||
/// // task is cancelled before completion
|
||||
/// let handle = actix_rt::spawn(actix_rt::time::sleep(Duration::from_secs(100)));
|
||||
/// handle.abort();
|
||||
/// assert!(handle.await.unwrap_err().is_cancelled());
|
||||
/// # });
|
||||
/// ```
|
||||
#[track_caller]
|
||||
#[inline]
|
||||
pub fn spawn<Fut>(f: Fut) -> JoinHandle<Fut::Output>
|
||||
where
|
||||
Fut: Future + 'static,
|
||||
Fut::Output: 'static,
|
||||
{
|
||||
tokio::task::spawn_local(f)
|
||||
}
|
||||
|
@ -1,28 +1,29 @@
|
||||
use std::future::Future;
|
||||
use std::io;
|
||||
use tokio::{runtime, task::LocalSet};
|
||||
use std::{future::Future, io};
|
||||
|
||||
/// Single-threaded runtime provides a way to start reactor
|
||||
/// and runtime on the current thread.
|
||||
use tokio::task::{JoinHandle, LocalSet};
|
||||
|
||||
/// A Tokio-based runtime proxy.
|
||||
///
|
||||
/// See [module level][mod] documentation for more details.
|
||||
///
|
||||
/// [mod]: index.html
|
||||
/// All spawned futures will be executed on the current thread. Therefore, there is no `Send` bound
|
||||
/// on submitted futures.
|
||||
#[derive(Debug)]
|
||||
pub struct Runtime {
|
||||
local: LocalSet,
|
||||
rt: runtime::Runtime,
|
||||
rt: tokio::runtime::Runtime,
|
||||
}
|
||||
|
||||
pub(crate) fn default_tokio_runtime() -> io::Result<tokio::runtime::Runtime> {
|
||||
tokio::runtime::Builder::new_current_thread()
|
||||
.enable_io()
|
||||
.enable_time()
|
||||
.build()
|
||||
}
|
||||
|
||||
impl Runtime {
|
||||
#[allow(clippy::new_ret_no_self)]
|
||||
/// Returns a new runtime initialized with default configuration values.
|
||||
pub fn new() -> io::Result<Runtime> {
|
||||
let rt = runtime::Builder::new()
|
||||
.enable_io()
|
||||
.enable_time()
|
||||
.basic_scheduler()
|
||||
.build()?;
|
||||
#[allow(clippy::new_ret_no_self)]
|
||||
pub fn new() -> io::Result<Self> {
|
||||
let rt = default_tokio_runtime()?;
|
||||
|
||||
Ok(Runtime {
|
||||
rt,
|
||||
@ -30,62 +31,119 @@ impl Runtime {
|
||||
})
|
||||
}
|
||||
|
||||
/// Spawn a future onto the single-threaded runtime.
|
||||
/// Offload a future onto the single-threaded runtime.
|
||||
///
|
||||
/// See [module level][mod] documentation for more details.
|
||||
/// The returned join handle can be used to await the future's result.
|
||||
///
|
||||
/// [mod]: index.html
|
||||
/// See [crate root][crate] documentation for more details.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```rust,ignore
|
||||
/// # use futures::{future, Future, Stream};
|
||||
/// use actix_rt::Runtime;
|
||||
///
|
||||
/// # fn dox() {
|
||||
/// // Create the runtime
|
||||
/// let mut rt = Runtime::new().unwrap();
|
||||
/// ```
|
||||
/// let rt = actix_rt::Runtime::new().unwrap();
|
||||
///
|
||||
/// // Spawn a future onto the runtime
|
||||
/// rt.spawn(future::lazy(|_| {
|
||||
/// let handle = rt.spawn(async {
|
||||
/// println!("running on the runtime");
|
||||
/// }));
|
||||
/// # }
|
||||
/// # pub fn main() {}
|
||||
/// 42
|
||||
/// });
|
||||
///
|
||||
/// assert_eq!(rt.block_on(handle).unwrap(), 42);
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if the spawn fails. Failure occurs if the executor
|
||||
/// is currently at capacity and is unable to spawn a new future.
|
||||
pub fn spawn<F>(&self, future: F) -> &Self
|
||||
where
|
||||
F: Future<Output = ()> + 'static,
|
||||
{
|
||||
self.local.spawn_local(future);
|
||||
self
|
||||
}
|
||||
|
||||
/// Runs the provided future, blocking the current thread until the future
|
||||
/// completes.
|
||||
///
|
||||
/// This function can be used to synchronously block the current thread
|
||||
/// until the provided `future` has resolved either successfully or with an
|
||||
/// error. The result of the future is then returned from this function
|
||||
/// call.
|
||||
///
|
||||
/// Note that this function will **also** execute any spawned futures on the
|
||||
/// current thread, but will **not** block until these other spawned futures
|
||||
/// have completed. Once the function returns, any uncompleted futures
|
||||
/// remain pending in the `Runtime` instance. These futures will not run
|
||||
/// until `block_on` or `run` is called again.
|
||||
///
|
||||
/// The caller is responsible for ensuring that other spawned futures
|
||||
/// complete execution by calling `block_on` or `run`.
|
||||
pub fn block_on<F>(&mut self, f: F) -> F::Output
|
||||
/// This function panics if the spawn fails. Failure occurs if the executor is currently at
|
||||
/// capacity and is unable to spawn a new future.
|
||||
#[track_caller]
|
||||
pub fn spawn<F>(&self, future: F) -> JoinHandle<F::Output>
|
||||
where
|
||||
F: Future + 'static,
|
||||
{
|
||||
self.local.block_on(&mut self.rt, f)
|
||||
self.local.spawn_local(future)
|
||||
}
|
||||
|
||||
/// Retrieves a reference to the underlying Tokio runtime associated with this instance.
|
||||
///
|
||||
/// The Tokio runtime is responsible for executing asynchronous tasks and managing
|
||||
/// the event loop for an asynchronous Rust program. This method allows accessing
|
||||
/// the runtime to interact with its features directly.
|
||||
///
|
||||
/// In a typical use case, you might need to share the same runtime between different
|
||||
/// modules of your project. For example, a module might require a `tokio::runtime::Handle`
|
||||
/// to spawn tasks on the same runtime, or the runtime itself to configure more complex
|
||||
/// behaviours.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use actix_rt::Runtime;
|
||||
///
|
||||
/// mod module_a {
|
||||
/// pub fn do_something(handle: tokio::runtime::Handle) {
|
||||
/// handle.spawn(async {
|
||||
/// // Some asynchronous task here
|
||||
/// });
|
||||
/// }
|
||||
/// }
|
||||
///
|
||||
/// mod module_b {
|
||||
/// pub fn do_something_else(rt: &tokio::runtime::Runtime) {
|
||||
/// rt.spawn(async {
|
||||
/// // Another asynchronous task here
|
||||
/// });
|
||||
/// }
|
||||
/// }
|
||||
///
|
||||
/// let actix_runtime = actix_rt::Runtime::new().unwrap();
|
||||
/// let tokio_runtime = actix_runtime.tokio_runtime();
|
||||
///
|
||||
/// let handle = tokio_runtime.handle().clone();
|
||||
///
|
||||
/// module_a::do_something(handle);
|
||||
/// module_b::do_something_else(tokio_runtime);
|
||||
/// ```
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// An immutable reference to the `tokio::runtime::Runtime` instance associated with this
|
||||
/// `Runtime` instance.
|
||||
///
|
||||
/// # Note
|
||||
///
|
||||
/// While this method provides an immutable reference to the Tokio runtime, which is safe to share across threads,
|
||||
/// be aware that spawning blocking tasks on the Tokio runtime could potentially impact the execution
|
||||
/// of the Actix runtime. This is because Tokio is responsible for driving the Actix system,
|
||||
/// and blocking tasks could delay or deadlock other tasks in run loop.
|
||||
pub fn tokio_runtime(&self) -> &tokio::runtime::Runtime {
|
||||
&self.rt
|
||||
}
|
||||
|
||||
/// Runs the provided future, blocking the current thread until the future completes.
|
||||
///
|
||||
/// This function can be used to synchronously block the current thread until the provided
|
||||
/// `future` has resolved either successfully or with an error. The result of the future is
|
||||
/// then returned from this function call.
|
||||
///
|
||||
/// Note that this function will also execute any spawned futures on the current thread, but
|
||||
/// will not block until these other spawned futures have completed. Once the function returns,
|
||||
/// any uncompleted futures remain pending in the `Runtime` instance. These futures will not run
|
||||
/// until `block_on` or `run` is called again.
|
||||
///
|
||||
/// The caller is responsible for ensuring that other spawned futures complete execution by
|
||||
/// calling `block_on` or `run`.
|
||||
#[track_caller]
|
||||
pub fn block_on<F>(&self, f: F) -> F::Output
|
||||
where
|
||||
F: Future,
|
||||
{
|
||||
self.local.block_on(&self.rt, f)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<tokio::runtime::Runtime> for Runtime {
|
||||
fn from(rt: tokio::runtime::Runtime) -> Self {
|
||||
Self {
|
||||
local: LocalSet::new(),
|
||||
rt,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,197 +1,119 @@
|
||||
use std::cell::RefCell;
|
||||
use std::future::Future;
|
||||
use std::io;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::{
|
||||
cell::RefCell,
|
||||
collections::HashMap,
|
||||
future::Future,
|
||||
io,
|
||||
pin::Pin,
|
||||
sync::atomic::{AtomicUsize, Ordering},
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use futures_channel::mpsc::UnboundedSender;
|
||||
use tokio::task::LocalSet;
|
||||
use futures_core::ready;
|
||||
use tokio::sync::{mpsc, oneshot};
|
||||
|
||||
use crate::arbiter::{Arbiter, SystemCommand};
|
||||
use crate::builder::{Builder, SystemRunner};
|
||||
use crate::{arbiter::ArbiterHandle, Arbiter};
|
||||
|
||||
static SYSTEM_COUNT: AtomicUsize = AtomicUsize::new(0);
|
||||
|
||||
/// System is a runtime manager.
|
||||
thread_local!(
|
||||
static CURRENT: RefCell<Option<System>> = const { RefCell::new(None) };
|
||||
);
|
||||
|
||||
/// A manager for a per-thread distributed async runtime.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct System {
|
||||
id: usize,
|
||||
sys: UnboundedSender<SystemCommand>,
|
||||
arbiter: Arbiter,
|
||||
stop_on_panic: bool,
|
||||
sys_tx: mpsc::UnboundedSender<SystemCommand>,
|
||||
|
||||
/// Handle to the first [Arbiter] that is created with the System.
|
||||
arbiter_handle: ArbiterHandle,
|
||||
}
|
||||
|
||||
thread_local!(
|
||||
static CURRENT: RefCell<Option<System>> = RefCell::new(None);
|
||||
);
|
||||
#[cfg(not(feature = "io-uring"))]
|
||||
impl System {
|
||||
/// Create a new system.
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics if underlying Tokio runtime can not be created.
|
||||
#[allow(clippy::new_ret_no_self)]
|
||||
pub fn new() -> SystemRunner {
|
||||
Self::with_tokio_rt(|| {
|
||||
crate::runtime::default_tokio_runtime()
|
||||
.expect("Default Actix (Tokio) runtime could not be created.")
|
||||
})
|
||||
}
|
||||
|
||||
/// Create a new System using the [Tokio Runtime](tokio-runtime) returned from a closure.
|
||||
///
|
||||
/// [tokio-runtime]: tokio::runtime::Runtime
|
||||
pub fn with_tokio_rt<F>(runtime_factory: F) -> SystemRunner
|
||||
where
|
||||
F: FnOnce() -> tokio::runtime::Runtime,
|
||||
{
|
||||
let (stop_tx, stop_rx) = oneshot::channel();
|
||||
let (sys_tx, sys_rx) = mpsc::unbounded_channel();
|
||||
|
||||
let rt = crate::runtime::Runtime::from(runtime_factory());
|
||||
let sys_arbiter = rt.block_on(async { Arbiter::in_new_system() });
|
||||
let system = System::construct(sys_tx, sys_arbiter.clone());
|
||||
|
||||
system
|
||||
.tx()
|
||||
.send(SystemCommand::RegisterArbiter(usize::MAX, sys_arbiter))
|
||||
.unwrap();
|
||||
|
||||
// init background system arbiter
|
||||
let sys_ctrl = SystemController::new(sys_rx, stop_tx);
|
||||
rt.spawn(sys_ctrl);
|
||||
|
||||
SystemRunner { rt, stop_rx }
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "io-uring")]
|
||||
impl System {
|
||||
/// Create a new system.
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics if underlying Tokio runtime can not be created.
|
||||
#[allow(clippy::new_ret_no_self)]
|
||||
pub fn new() -> SystemRunner {
|
||||
SystemRunner
|
||||
}
|
||||
|
||||
/// Create a new System using the [Tokio Runtime](tokio-runtime) returned from a closure.
|
||||
///
|
||||
/// [tokio-runtime]: tokio::runtime::Runtime
|
||||
#[doc(hidden)]
|
||||
pub fn with_tokio_rt<F>(_: F) -> SystemRunner
|
||||
where
|
||||
F: FnOnce() -> tokio::runtime::Runtime,
|
||||
{
|
||||
unimplemented!("System::with_tokio_rt is not implemented for io-uring feature yet")
|
||||
}
|
||||
}
|
||||
|
||||
impl System {
|
||||
/// Constructs new system and sets it as current
|
||||
/// Constructs new system and registers it on the current thread.
|
||||
pub(crate) fn construct(
|
||||
sys: UnboundedSender<SystemCommand>,
|
||||
arbiter: Arbiter,
|
||||
stop_on_panic: bool,
|
||||
sys_tx: mpsc::UnboundedSender<SystemCommand>,
|
||||
arbiter_handle: ArbiterHandle,
|
||||
) -> Self {
|
||||
let sys = System {
|
||||
sys,
|
||||
arbiter,
|
||||
stop_on_panic,
|
||||
sys_tx,
|
||||
arbiter_handle,
|
||||
id: SYSTEM_COUNT.fetch_add(1, Ordering::SeqCst),
|
||||
};
|
||||
|
||||
System::set_current(sys.clone());
|
||||
|
||||
sys
|
||||
}
|
||||
|
||||
/// Build a new system with a customized tokio runtime.
|
||||
///
|
||||
/// This allows to customize the runtime. See struct level docs on
|
||||
/// `Builder` for more information.
|
||||
pub fn builder() -> Builder {
|
||||
Builder::new()
|
||||
}
|
||||
|
||||
#[allow(clippy::new_ret_no_self)]
|
||||
/// Create new system.
|
||||
///
|
||||
/// This method panics if it can not create tokio runtime
|
||||
pub fn new<T: Into<String>>(name: T) -> SystemRunner {
|
||||
Self::builder().name(name).build()
|
||||
}
|
||||
|
||||
/// Create new system using provided tokio `LocalSet`.
|
||||
///
|
||||
/// This method panics if it can not spawn system arbiter
|
||||
///
|
||||
/// Note: This method uses provided `LocalSet` to create a `System` future only.
|
||||
/// All the [`Arbiter`]s will be started in separate threads using their own tokio `Runtime`s.
|
||||
/// It means that using this method currently it is impossible to make `actix-rt` work in the
|
||||
/// alternative `tokio` `Runtime`s (e.g. provided by [`tokio_compat`]).
|
||||
///
|
||||
/// [`Arbiter`]: struct.Arbiter.html
|
||||
/// [`tokio_compat`]: https://crates.io/crates/tokio-compat
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use tokio::{runtime::Runtime, task::LocalSet};
|
||||
/// use actix_rt::System;
|
||||
/// use futures_util::future::try_join_all;
|
||||
///
|
||||
/// async fn run_application() {
|
||||
/// let first_task = tokio::spawn(async {
|
||||
/// // ...
|
||||
/// # println!("One task");
|
||||
/// # Ok::<(),()>(())
|
||||
/// });
|
||||
///
|
||||
/// let second_task = tokio::spawn(async {
|
||||
/// // ...
|
||||
/// # println!("Another task");
|
||||
/// # Ok::<(),()>(())
|
||||
/// });
|
||||
///
|
||||
/// try_join_all(vec![first_task, second_task])
|
||||
/// .await
|
||||
/// .expect("Some of the futures finished unexpectedly");
|
||||
/// }
|
||||
///
|
||||
///
|
||||
/// let mut runtime = tokio::runtime::Builder::new()
|
||||
/// .core_threads(2)
|
||||
/// .enable_all()
|
||||
/// .threaded_scheduler()
|
||||
/// .build()
|
||||
/// .unwrap();
|
||||
///
|
||||
///
|
||||
/// let actix_system_task = LocalSet::new();
|
||||
/// let sys = System::run_in_tokio("actix-main-system", &actix_system_task);
|
||||
/// actix_system_task.spawn_local(sys);
|
||||
///
|
||||
/// let rest_operations = run_application();
|
||||
/// runtime.block_on(actix_system_task.run_until(rest_operations));
|
||||
/// ```
|
||||
pub fn run_in_tokio<T: Into<String>>(
|
||||
name: T,
|
||||
local: &LocalSet,
|
||||
) -> impl Future<Output = io::Result<()>> {
|
||||
Self::builder()
|
||||
.name(name)
|
||||
.build_async(local)
|
||||
.run_nonblocking()
|
||||
}
|
||||
|
||||
/// Consume the provided tokio Runtime and start the `System` in it.
|
||||
/// This method will create a `LocalSet` object and occupy the current thread
|
||||
/// for the created `System` exclusively. All the other asynchronous tasks that
|
||||
/// should be executed as well must be aggregated into one future, provided as the last
|
||||
/// argument to this method.
|
||||
///
|
||||
/// Note: This method uses provided `Runtime` to create a `System` future only.
|
||||
/// All the [`Arbiter`]s will be started in separate threads using their own tokio `Runtime`s.
|
||||
/// It means that using this method currently it is impossible to make `actix-rt` work in the
|
||||
/// alternative `tokio` `Runtime`s (e.g. provided by `tokio_compat`).
|
||||
///
|
||||
/// [`Arbiter`]: struct.Arbiter.html
|
||||
/// [`tokio_compat`]: https://crates.io/crates/tokio-compat
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// - `name`: Name of the System
|
||||
/// - `runtime`: A tokio Runtime to run the system in.
|
||||
/// - `rest_operations`: A future to be executed in the runtime along with the System.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use tokio::runtime::Runtime;
|
||||
/// use actix_rt::System;
|
||||
/// use futures_util::future::try_join_all;
|
||||
///
|
||||
/// async fn run_application() {
|
||||
/// let first_task = tokio::spawn(async {
|
||||
/// // ...
|
||||
/// # println!("One task");
|
||||
/// # Ok::<(),()>(())
|
||||
/// });
|
||||
///
|
||||
/// let second_task = tokio::spawn(async {
|
||||
/// // ...
|
||||
/// # println!("Another task");
|
||||
/// # Ok::<(),()>(())
|
||||
/// });
|
||||
///
|
||||
/// try_join_all(vec![first_task, second_task])
|
||||
/// .await
|
||||
/// .expect("Some of the futures finished unexpectedly");
|
||||
/// }
|
||||
///
|
||||
///
|
||||
/// let runtime = tokio::runtime::Builder::new()
|
||||
/// .core_threads(2)
|
||||
/// .enable_all()
|
||||
/// .threaded_scheduler()
|
||||
/// .build()
|
||||
/// .unwrap();
|
||||
///
|
||||
/// let rest_operations = run_application();
|
||||
/// System::attach_to_tokio("actix-main-system", runtime, rest_operations);
|
||||
/// ```
|
||||
pub fn attach_to_tokio<Fut, R>(
|
||||
name: impl Into<String>,
|
||||
mut runtime: tokio::runtime::Runtime,
|
||||
rest_operations: Fut,
|
||||
) -> R
|
||||
where
|
||||
Fut: std::future::Future<Output = R>,
|
||||
{
|
||||
let actix_system_task = LocalSet::new();
|
||||
let sys = System::run_in_tokio(name.into(), &actix_system_task);
|
||||
actix_system_task.spawn_local(sys);
|
||||
|
||||
runtime.block_on(actix_system_task.run_until(rest_operations))
|
||||
}
|
||||
|
||||
/// Get current running system.
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics if no system is registered on the current thread.
|
||||
pub fn current() -> System {
|
||||
CURRENT.with(|cell| match *cell.borrow() {
|
||||
Some(ref sys) => sys.clone(),
|
||||
@ -199,67 +121,237 @@ impl System {
|
||||
})
|
||||
}
|
||||
|
||||
/// Check if current system is set, i.e., as already been started.
|
||||
pub fn is_set() -> bool {
|
||||
CURRENT.with(|cell| cell.borrow().is_some())
|
||||
/// Try to get current running system.
|
||||
///
|
||||
/// Returns `None` if no System has been started.
|
||||
///
|
||||
/// Unlike [`current`](Self::current), this never panics.
|
||||
pub fn try_current() -> Option<System> {
|
||||
CURRENT.with(|cell| cell.borrow().clone())
|
||||
}
|
||||
|
||||
/// Set current running system.
|
||||
/// Get handle to a the System's initial [Arbiter].
|
||||
pub fn arbiter(&self) -> &ArbiterHandle {
|
||||
&self.arbiter_handle
|
||||
}
|
||||
|
||||
/// Check if there is a System registered on the current thread.
|
||||
pub fn is_registered() -> bool {
|
||||
CURRENT.with(|sys| sys.borrow().is_some())
|
||||
}
|
||||
|
||||
/// Register given system on current thread.
|
||||
#[doc(hidden)]
|
||||
pub fn set_current(sys: System) {
|
||||
CURRENT.with(|s| {
|
||||
*s.borrow_mut() = Some(sys);
|
||||
CURRENT.with(|cell| {
|
||||
*cell.borrow_mut() = Some(sys);
|
||||
})
|
||||
}
|
||||
|
||||
/// Execute function with system reference.
|
||||
pub fn with_current<F, R>(f: F) -> R
|
||||
where
|
||||
F: FnOnce(&System) -> R,
|
||||
{
|
||||
CURRENT.with(|cell| match *cell.borrow() {
|
||||
Some(ref sys) => f(sys),
|
||||
None => panic!("System is not running"),
|
||||
})
|
||||
}
|
||||
|
||||
/// System id
|
||||
/// Numeric system identifier.
|
||||
///
|
||||
/// Useful when using multiple Systems.
|
||||
pub fn id(&self) -> usize {
|
||||
self.id
|
||||
}
|
||||
|
||||
/// Stop the system
|
||||
/// Stop the system (with code 0).
|
||||
pub fn stop(&self) {
|
||||
self.stop_with_code(0)
|
||||
}
|
||||
|
||||
/// Stop the system with a particular exit code.
|
||||
/// Stop the system with a given exit code.
|
||||
pub fn stop_with_code(&self, code: i32) {
|
||||
let _ = self.sys.unbounded_send(SystemCommand::Exit(code));
|
||||
let _ = self.sys_tx.send(SystemCommand::Exit(code));
|
||||
}
|
||||
|
||||
pub(crate) fn sys(&self) -> &UnboundedSender<SystemCommand> {
|
||||
&self.sys
|
||||
}
|
||||
|
||||
/// Return status of 'stop_on_panic' option which controls whether the System is stopped when an
|
||||
/// uncaught panic is thrown from a worker thread.
|
||||
pub fn stop_on_panic(&self) -> bool {
|
||||
self.stop_on_panic
|
||||
}
|
||||
|
||||
/// System arbiter
|
||||
pub fn arbiter(&self) -> &Arbiter {
|
||||
&self.arbiter
|
||||
}
|
||||
|
||||
/// This function will start tokio runtime and will finish once the
|
||||
/// `System::stop()` message get called.
|
||||
/// Function `f` get called within tokio runtime context.
|
||||
pub fn run<F>(f: F) -> io::Result<()>
|
||||
where
|
||||
F: FnOnce() + 'static,
|
||||
{
|
||||
Self::builder().run(f)
|
||||
pub(crate) fn tx(&self) -> &mpsc::UnboundedSender<SystemCommand> {
|
||||
&self.sys_tx
|
||||
}
|
||||
}
|
||||
|
||||
/// Runner that keeps a [System]'s event loop alive until stop message is received.
|
||||
#[cfg(not(feature = "io-uring"))]
|
||||
#[must_use = "A SystemRunner does nothing unless `run` is called."]
|
||||
#[derive(Debug)]
|
||||
pub struct SystemRunner {
|
||||
rt: crate::runtime::Runtime,
|
||||
stop_rx: oneshot::Receiver<i32>,
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "io-uring"))]
|
||||
impl SystemRunner {
|
||||
/// Starts event loop and will return once [System] is [stopped](System::stop).
|
||||
pub fn run(self) -> io::Result<()> {
|
||||
let exit_code = self.run_with_code()?;
|
||||
|
||||
match exit_code {
|
||||
0 => Ok(()),
|
||||
nonzero => Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("Non-zero exit code: {}", nonzero),
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Runs the event loop until [stopped](System::stop_with_code), returning the exit code.
|
||||
pub fn run_with_code(self) -> io::Result<i32> {
|
||||
let SystemRunner { rt, stop_rx, .. } = self;
|
||||
|
||||
// run loop
|
||||
rt.block_on(stop_rx)
|
||||
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))
|
||||
}
|
||||
|
||||
/// Retrieves a reference to the underlying [Actix runtime](crate::Runtime) associated with this
|
||||
/// `SystemRunner` instance.
|
||||
///
|
||||
/// The Actix runtime is responsible for managing the event loop for an Actix system and
|
||||
/// executing asynchronous tasks. This method provides access to the runtime, allowing direct
|
||||
/// interaction with its features.
|
||||
///
|
||||
/// In a typical use case, you might need to share the same runtime between different
|
||||
/// parts of your project. For example, some components might require a [`Runtime`] to spawn
|
||||
/// tasks on the same runtime.
|
||||
///
|
||||
/// Read more in the documentation for [`Runtime`].
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// let system_runner = actix_rt::System::new();
|
||||
/// let actix_runtime = system_runner.runtime();
|
||||
///
|
||||
/// // Use the runtime to spawn an async task or perform other operations
|
||||
/// ```
|
||||
///
|
||||
/// # Note
|
||||
///
|
||||
/// While this method provides an immutable reference to the Actix runtime, which is safe to
|
||||
/// share across threads, be aware that spawning blocking tasks on the Actix runtime could
|
||||
/// potentially impact system performance. This is because the Actix runtime is responsible for
|
||||
/// driving the system, and blocking tasks could delay other tasks in the run loop.
|
||||
///
|
||||
/// [`Runtime`]: crate::Runtime
|
||||
pub fn runtime(&self) -> &crate::runtime::Runtime {
|
||||
&self.rt
|
||||
}
|
||||
|
||||
/// Runs the provided future, blocking the current thread until the future completes.
|
||||
#[track_caller]
|
||||
#[inline]
|
||||
pub fn block_on<F: Future>(&self, fut: F) -> F::Output {
|
||||
self.rt.block_on(fut)
|
||||
}
|
||||
}
|
||||
|
||||
/// Runner that keeps a [System]'s event loop alive until stop message is received.
|
||||
#[cfg(feature = "io-uring")]
|
||||
#[must_use = "A SystemRunner does nothing unless `run` is called."]
|
||||
#[derive(Debug)]
|
||||
pub struct SystemRunner;
|
||||
|
||||
#[cfg(feature = "io-uring")]
|
||||
impl SystemRunner {
|
||||
/// Starts event loop and will return once [System] is [stopped](System::stop).
|
||||
pub fn run(self) -> io::Result<()> {
|
||||
unimplemented!("SystemRunner::run is not implemented for io-uring feature yet");
|
||||
}
|
||||
|
||||
/// Runs the event loop until [stopped](System::stop_with_code), returning the exit code.
|
||||
pub fn run_with_code(self) -> io::Result<i32> {
|
||||
unimplemented!("SystemRunner::run_with_code is not implemented for io-uring feature yet");
|
||||
}
|
||||
|
||||
/// Runs the provided future, blocking the current thread until the future completes.
|
||||
#[inline]
|
||||
pub fn block_on<F: Future>(&self, fut: F) -> F::Output {
|
||||
tokio_uring::start(async move {
|
||||
let (stop_tx, stop_rx) = oneshot::channel();
|
||||
let (sys_tx, sys_rx) = mpsc::unbounded_channel();
|
||||
|
||||
let sys_arbiter = Arbiter::in_new_system();
|
||||
let system = System::construct(sys_tx, sys_arbiter.clone());
|
||||
|
||||
system
|
||||
.tx()
|
||||
.send(SystemCommand::RegisterArbiter(usize::MAX, sys_arbiter))
|
||||
.unwrap();
|
||||
|
||||
// init background system arbiter
|
||||
let sys_ctrl = SystemController::new(sys_rx, stop_tx);
|
||||
tokio_uring::spawn(sys_ctrl);
|
||||
|
||||
let res = fut.await;
|
||||
drop(stop_rx);
|
||||
res
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) enum SystemCommand {
|
||||
Exit(i32),
|
||||
RegisterArbiter(usize, ArbiterHandle),
|
||||
DeregisterArbiter(usize),
|
||||
}
|
||||
|
||||
/// There is one `SystemController` per [System]. It runs in the background, keeping track of
|
||||
/// [Arbiter]s and is able to distribute a system-wide stop command.
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct SystemController {
|
||||
stop_tx: Option<oneshot::Sender<i32>>,
|
||||
cmd_rx: mpsc::UnboundedReceiver<SystemCommand>,
|
||||
arbiters: HashMap<usize, ArbiterHandle>,
|
||||
}
|
||||
|
||||
impl SystemController {
|
||||
pub(crate) fn new(
|
||||
cmd_rx: mpsc::UnboundedReceiver<SystemCommand>,
|
||||
stop_tx: oneshot::Sender<i32>,
|
||||
) -> Self {
|
||||
SystemController {
|
||||
cmd_rx,
|
||||
stop_tx: Some(stop_tx),
|
||||
arbiters: HashMap::with_capacity(4),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Future for SystemController {
|
||||
type Output = ();
|
||||
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
// process all items currently buffered in channel
|
||||
loop {
|
||||
match ready!(self.cmd_rx.poll_recv(cx)) {
|
||||
// channel closed; no more messages can be received
|
||||
None => return Poll::Ready(()),
|
||||
|
||||
// process system command
|
||||
Some(cmd) => match cmd {
|
||||
SystemCommand::Exit(code) => {
|
||||
// stop all arbiters
|
||||
for arb in self.arbiters.values() {
|
||||
arb.stop();
|
||||
}
|
||||
|
||||
// stop event loop
|
||||
// will only fire once
|
||||
if let Some(stop_tx) = self.stop_tx.take() {
|
||||
let _ = stop_tx.send(code);
|
||||
}
|
||||
}
|
||||
|
||||
SystemCommand::RegisterArbiter(id, arb) => {
|
||||
self.arbiters.insert(id, arb);
|
||||
}
|
||||
|
||||
SystemCommand::DeregisterArbiter(id) => {
|
||||
self.arbiters.remove(&id);
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,114 +0,0 @@
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
#[test]
|
||||
fn start_and_stop() {
|
||||
actix_rt::System::new("start_and_stop").block_on(async move {
|
||||
assert!(
|
||||
actix_rt::Arbiter::is_running(),
|
||||
"System doesn't seem to have started"
|
||||
);
|
||||
});
|
||||
assert!(
|
||||
!actix_rt::Arbiter::is_running(),
|
||||
"System doesn't seem to have stopped"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn await_for_timer() {
|
||||
let time = Duration::from_secs(2);
|
||||
let instant = Instant::now();
|
||||
actix_rt::System::new("test_wait_timer").block_on(async move {
|
||||
tokio::time::delay_for(time).await;
|
||||
});
|
||||
assert!(
|
||||
instant.elapsed() >= time,
|
||||
"Block on should poll awaited future to completion"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn join_another_arbiter() {
|
||||
let time = Duration::from_secs(2);
|
||||
let instant = Instant::now();
|
||||
actix_rt::System::new("test_join_another_arbiter").block_on(async move {
|
||||
let mut arbiter = actix_rt::Arbiter::new();
|
||||
arbiter.send(Box::pin(async move {
|
||||
tokio::time::delay_for(time).await;
|
||||
actix_rt::Arbiter::current().stop();
|
||||
}));
|
||||
arbiter.join().unwrap();
|
||||
});
|
||||
assert!(
|
||||
instant.elapsed() >= time,
|
||||
"Join on another arbiter should complete only when it calls stop"
|
||||
);
|
||||
|
||||
let instant = Instant::now();
|
||||
actix_rt::System::new("test_join_another_arbiter").block_on(async move {
|
||||
let mut arbiter = actix_rt::Arbiter::new();
|
||||
arbiter.exec_fn(move || {
|
||||
actix_rt::spawn(async move {
|
||||
tokio::time::delay_for(time).await;
|
||||
actix_rt::Arbiter::current().stop();
|
||||
});
|
||||
});
|
||||
arbiter.join().unwrap();
|
||||
});
|
||||
assert!(
|
||||
instant.elapsed() >= time,
|
||||
"Join on a arbiter that has used actix_rt::spawn should wait for said future"
|
||||
);
|
||||
|
||||
let instant = Instant::now();
|
||||
actix_rt::System::new("test_join_another_arbiter").block_on(async move {
|
||||
let mut arbiter = actix_rt::Arbiter::new();
|
||||
arbiter.send(Box::pin(async move {
|
||||
tokio::time::delay_for(time).await;
|
||||
actix_rt::Arbiter::current().stop();
|
||||
}));
|
||||
arbiter.stop();
|
||||
arbiter.join().unwrap();
|
||||
});
|
||||
assert!(
|
||||
instant.elapsed() < time,
|
||||
"Premature stop of arbiter should conclude regardless of it's current state"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn join_current_arbiter() {
|
||||
let time = Duration::from_secs(2);
|
||||
|
||||
let instant = Instant::now();
|
||||
actix_rt::System::new("test_join_current_arbiter").block_on(async move {
|
||||
actix_rt::spawn(async move {
|
||||
tokio::time::delay_for(time).await;
|
||||
actix_rt::Arbiter::current().stop();
|
||||
});
|
||||
actix_rt::Arbiter::local_join().await;
|
||||
});
|
||||
assert!(
|
||||
instant.elapsed() >= time,
|
||||
"Join on current arbiter should wait for all spawned futures"
|
||||
);
|
||||
|
||||
let large_timer = Duration::from_secs(20);
|
||||
let instant = Instant::now();
|
||||
actix_rt::System::new("test_join_current_arbiter").block_on(async move {
|
||||
actix_rt::spawn(async move {
|
||||
tokio::time::delay_for(time).await;
|
||||
actix_rt::Arbiter::current().stop();
|
||||
});
|
||||
let f = actix_rt::Arbiter::local_join();
|
||||
actix_rt::spawn(async move {
|
||||
tokio::time::delay_for(large_timer).await;
|
||||
actix_rt::Arbiter::current().stop();
|
||||
});
|
||||
f.await;
|
||||
});
|
||||
assert!(
|
||||
instant.elapsed() < large_timer,
|
||||
"local_join should await only for the already spawned futures"
|
||||
);
|
||||
}
|
17
actix-rt/tests/test-macro-import-conflict.rs
Normal file
17
actix-rt/tests/test-macro-import-conflict.rs
Normal file
@ -0,0 +1,17 @@
|
||||
//! Checks that test macro does not cause problems in the presence of imports named "test" that
|
||||
//! could be either a module with test items or the "test with runtime" macro itself.
|
||||
//!
|
||||
//! Before actix/actix-net#399 was implemented, this macro was running twice. The first run output
|
||||
//! `#[test]` and it got run again and since it was in scope.
|
||||
//!
|
||||
//! Prevented by using the fully-qualified test marker (`#[::core::prelude::v1::test]`).
|
||||
|
||||
#![cfg(feature = "macros")]
|
||||
|
||||
use actix_rt::time as test;
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_naming_conflict() {
|
||||
use test as time;
|
||||
time::sleep(std::time::Duration::from_millis(2)).await;
|
||||
}
|
378
actix-rt/tests/tests.rs
Normal file
378
actix-rt/tests/tests.rs
Normal file
@ -0,0 +1,378 @@
|
||||
#![allow(missing_docs)]
|
||||
|
||||
use std::{
|
||||
future::Future,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
use actix_rt::{task::JoinError, Arbiter, System};
|
||||
#[cfg(not(feature = "io-uring"))]
|
||||
use {
|
||||
std::{sync::mpsc::channel, thread},
|
||||
tokio::sync::oneshot,
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn await_for_timer() {
|
||||
let time = Duration::from_secs(1);
|
||||
let instant = Instant::now();
|
||||
System::new().block_on(async move {
|
||||
tokio::time::sleep(time).await;
|
||||
});
|
||||
assert!(
|
||||
instant.elapsed() >= time,
|
||||
"Block on should poll awaited future to completion"
|
||||
);
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "io-uring"))]
|
||||
#[test]
|
||||
fn run_with_code() {
|
||||
let sys = System::new();
|
||||
System::current().stop_with_code(42);
|
||||
let exit_code = sys.run_with_code().expect("system stop should not error");
|
||||
assert_eq!(exit_code, 42);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn join_another_arbiter() {
|
||||
let time = Duration::from_secs(1);
|
||||
let instant = Instant::now();
|
||||
System::new().block_on(async move {
|
||||
let arbiter = Arbiter::new();
|
||||
arbiter.spawn(Box::pin(async move {
|
||||
tokio::time::sleep(time).await;
|
||||
Arbiter::current().stop();
|
||||
}));
|
||||
arbiter.join().unwrap();
|
||||
});
|
||||
assert!(
|
||||
instant.elapsed() >= time,
|
||||
"Join on another arbiter should complete only when it calls stop"
|
||||
);
|
||||
|
||||
let instant = Instant::now();
|
||||
System::new().block_on(async move {
|
||||
let arbiter = Arbiter::new();
|
||||
arbiter.spawn_fn(move || {
|
||||
actix_rt::spawn(async move {
|
||||
tokio::time::sleep(time).await;
|
||||
Arbiter::current().stop();
|
||||
});
|
||||
});
|
||||
arbiter.join().unwrap();
|
||||
});
|
||||
assert!(
|
||||
instant.elapsed() >= time,
|
||||
"Join on an arbiter that has used actix_rt::spawn should wait for said future"
|
||||
);
|
||||
|
||||
let instant = Instant::now();
|
||||
System::new().block_on(async move {
|
||||
let arbiter = Arbiter::new();
|
||||
arbiter.spawn(Box::pin(async move {
|
||||
tokio::time::sleep(time).await;
|
||||
Arbiter::current().stop();
|
||||
}));
|
||||
arbiter.stop();
|
||||
arbiter.join().unwrap();
|
||||
});
|
||||
assert!(
|
||||
instant.elapsed() < time,
|
||||
"Premature stop of arbiter should conclude regardless of it's current state"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn non_static_block_on() {
|
||||
let string = String::from("test_str");
|
||||
let string = string.as_str();
|
||||
|
||||
let sys = System::new();
|
||||
|
||||
sys.block_on(async {
|
||||
actix_rt::time::sleep(Duration::from_millis(1)).await;
|
||||
assert_eq!("test_str", string);
|
||||
});
|
||||
|
||||
let rt = actix_rt::Runtime::new().unwrap();
|
||||
|
||||
rt.block_on(async {
|
||||
actix_rt::time::sleep(Duration::from_millis(1)).await;
|
||||
assert_eq!("test_str", string);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn wait_for_spawns() {
|
||||
let rt = actix_rt::Runtime::new().unwrap();
|
||||
|
||||
let handle = rt.spawn(async {
|
||||
println!("running on the runtime");
|
||||
// panic is caught at task boundary
|
||||
panic!("intentional test panic");
|
||||
});
|
||||
|
||||
assert!(rt.block_on(handle).is_err());
|
||||
}
|
||||
|
||||
// Temporary disabled tests for io-uring feature.
|
||||
// They should be enabled when possible.
|
||||
|
||||
#[cfg(not(feature = "io-uring"))]
|
||||
#[test]
|
||||
fn arbiter_spawn_fn_runs() {
|
||||
let _ = System::new();
|
||||
|
||||
let (tx, rx) = channel::<u32>();
|
||||
|
||||
let arbiter = Arbiter::new();
|
||||
arbiter.spawn_fn(move || tx.send(42).unwrap());
|
||||
|
||||
let num = rx.recv().unwrap();
|
||||
assert_eq!(num, 42);
|
||||
|
||||
arbiter.stop();
|
||||
arbiter.join().unwrap();
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "io-uring"))]
|
||||
#[test]
|
||||
fn arbiter_handle_spawn_fn_runs() {
|
||||
let sys = System::new();
|
||||
|
||||
let (tx, rx) = channel::<u32>();
|
||||
|
||||
let arbiter = Arbiter::new();
|
||||
let handle = arbiter.handle();
|
||||
drop(arbiter);
|
||||
|
||||
handle.spawn_fn(move || {
|
||||
tx.send(42).unwrap();
|
||||
System::current().stop()
|
||||
});
|
||||
|
||||
let num = rx.recv_timeout(Duration::from_secs(2)).unwrap();
|
||||
assert_eq!(num, 42);
|
||||
|
||||
handle.stop();
|
||||
sys.run().unwrap();
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "io-uring"))]
|
||||
#[test]
|
||||
fn arbiter_drop_no_panic_fn() {
|
||||
let _ = System::new();
|
||||
|
||||
let arbiter = Arbiter::new();
|
||||
arbiter.spawn_fn(|| panic!("test"));
|
||||
|
||||
arbiter.stop();
|
||||
arbiter.join().unwrap();
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "io-uring"))]
|
||||
#[test]
|
||||
fn arbiter_drop_no_panic_fut() {
|
||||
let _ = System::new();
|
||||
|
||||
let arbiter = Arbiter::new();
|
||||
arbiter.spawn(async { panic!("test") });
|
||||
|
||||
arbiter.stop();
|
||||
arbiter.join().unwrap();
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "io-uring"))]
|
||||
#[test]
|
||||
fn system_arbiter_spawn() {
|
||||
let runner = System::new();
|
||||
|
||||
let (tx, rx) = oneshot::channel();
|
||||
let sys = System::current();
|
||||
|
||||
thread::spawn(|| {
|
||||
// this thread will have no arbiter in it's thread local so call will panic
|
||||
Arbiter::current();
|
||||
})
|
||||
.join()
|
||||
.unwrap_err();
|
||||
|
||||
let thread = thread::spawn(|| {
|
||||
// this thread will have no arbiter in it's thread local so use the system handle instead
|
||||
System::set_current(sys);
|
||||
let sys = System::current();
|
||||
|
||||
let arb = sys.arbiter();
|
||||
arb.spawn(async move {
|
||||
tx.send(42u32).unwrap();
|
||||
System::current().stop();
|
||||
});
|
||||
});
|
||||
|
||||
assert_eq!(runner.block_on(rx).unwrap(), 42);
|
||||
thread.join().unwrap();
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "io-uring"))]
|
||||
#[test]
|
||||
fn system_stop_stops_arbiters() {
|
||||
let sys = System::new();
|
||||
let arb = Arbiter::new();
|
||||
|
||||
// arbiter should be alive to receive spawn msg
|
||||
assert!(Arbiter::current().spawn_fn(|| {}));
|
||||
assert!(arb.spawn_fn(|| {}));
|
||||
|
||||
System::current().stop();
|
||||
sys.run().unwrap();
|
||||
|
||||
// account for slightly slow thread de-spawns
|
||||
thread::sleep(Duration::from_millis(500));
|
||||
|
||||
// arbiter should be dead and return false
|
||||
assert!(!Arbiter::current().spawn_fn(|| {}));
|
||||
assert!(!arb.spawn_fn(|| {}));
|
||||
|
||||
arb.join().unwrap();
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "io-uring"))]
|
||||
#[test]
|
||||
fn new_system_with_tokio() {
|
||||
let (tx, rx) = channel();
|
||||
|
||||
let res = System::with_tokio_rt(move || {
|
||||
tokio::runtime::Builder::new_multi_thread()
|
||||
.enable_io()
|
||||
.enable_time()
|
||||
.thread_keep_alive(Duration::from_millis(1000))
|
||||
.worker_threads(2)
|
||||
.max_blocking_threads(2)
|
||||
.on_thread_start(|| {})
|
||||
.on_thread_stop(|| {})
|
||||
.build()
|
||||
.unwrap()
|
||||
})
|
||||
.block_on(async {
|
||||
actix_rt::time::sleep(Duration::from_millis(1)).await;
|
||||
|
||||
tokio::task::spawn(async move {
|
||||
tx.send(42).unwrap();
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
123usize
|
||||
});
|
||||
|
||||
assert_eq!(res, 123);
|
||||
assert_eq!(rx.recv().unwrap(), 42);
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "io-uring"))]
|
||||
#[test]
|
||||
fn new_arbiter_with_tokio() {
|
||||
use std::sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Arc,
|
||||
};
|
||||
|
||||
let _ = System::new();
|
||||
|
||||
let arb = Arbiter::with_tokio_rt(|| {
|
||||
tokio::runtime::Builder::new_current_thread()
|
||||
.enable_all()
|
||||
.build()
|
||||
.unwrap()
|
||||
});
|
||||
|
||||
let counter = Arc::new(AtomicBool::new(true));
|
||||
|
||||
let counter1 = counter.clone();
|
||||
let did_spawn = arb.spawn(async move {
|
||||
actix_rt::time::sleep(Duration::from_millis(1)).await;
|
||||
counter1.store(false, Ordering::SeqCst);
|
||||
Arbiter::current().stop();
|
||||
});
|
||||
|
||||
assert!(did_spawn);
|
||||
|
||||
arb.join().unwrap();
|
||||
|
||||
assert!(!counter.load(Ordering::SeqCst));
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn no_system_current_panic() {
|
||||
System::current();
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn no_system_arbiter_new_panic() {
|
||||
Arbiter::new();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn try_current_no_system() {
|
||||
assert!(System::try_current().is_none())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn try_current_with_system() {
|
||||
System::new().block_on(async { assert!(System::try_current().is_some()) });
|
||||
}
|
||||
|
||||
#[allow(clippy::unit_cmp)]
|
||||
#[test]
|
||||
fn spawn_local() {
|
||||
System::new().block_on(async {
|
||||
// demonstrate that spawn -> R is strictly more capable than spawn -> ()
|
||||
|
||||
assert_eq!(actix_rt::spawn(async {}).await.unwrap(), ());
|
||||
assert_eq!(actix_rt::spawn(async { 1 }).await.unwrap(), 1);
|
||||
assert!(actix_rt::spawn(async { panic!("") }).await.is_err());
|
||||
|
||||
actix_rt::spawn(async { tokio::time::sleep(Duration::from_millis(50)).await })
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
fn g<F: Future<Output = Result<(), JoinError>>>(_f: F) {}
|
||||
g(actix_rt::spawn(async {}));
|
||||
// g(actix_rt::spawn(async { 1 })); // compile err
|
||||
|
||||
fn h<F: Future<Output = Result<R, JoinError>>, R>(_f: F) {}
|
||||
h(actix_rt::spawn(async {}));
|
||||
h(actix_rt::spawn(async { 1 }));
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(all(target_os = "linux", feature = "io-uring"))]
|
||||
#[test]
|
||||
fn tokio_uring_arbiter() {
|
||||
System::new().block_on(async {
|
||||
let (tx, rx) = std::sync::mpsc::channel();
|
||||
|
||||
Arbiter::new().spawn(async move {
|
||||
let handle = actix_rt::spawn(async move {
|
||||
let f = tokio_uring::fs::File::create("test.txt").await.unwrap();
|
||||
let buf = b"Hello World!";
|
||||
|
||||
let (res, _) = f.write_all_at(&buf[..], 0).await;
|
||||
assert!(res.is_ok());
|
||||
|
||||
f.sync_all().await.unwrap();
|
||||
f.close().await.unwrap();
|
||||
|
||||
std::fs::remove_file("test.txt").unwrap();
|
||||
});
|
||||
|
||||
handle.await.unwrap();
|
||||
tx.send(true).unwrap();
|
||||
});
|
||||
|
||||
assert!(rx.recv().unwrap());
|
||||
})
|
||||
}
|
@ -1,129 +1,232 @@
|
||||
# Changes
|
||||
|
||||
## Unreleased - 2020-xx-xx
|
||||
* Added explicit info log message on accept queue pause. [#215]
|
||||
## Unreleased
|
||||
|
||||
## 2.5.1
|
||||
|
||||
## 1.0.4 - 2020-09-12
|
||||
* Update actix-codec to 0.3.0.
|
||||
* Workers must be greater than 0. [#167]
|
||||
- Fix panic in test server.
|
||||
- Minimum supported Rust version (MSRV) is now 1.71.
|
||||
|
||||
[#167]: https://github.com/actix/actix-net/pull/167
|
||||
## 2.5.0
|
||||
|
||||
- Update `mio` dependency to `1`.
|
||||
|
||||
## 1.0.3 - 2020-05-19
|
||||
* Replace deprecated `net2` crate with `socket2` [#140]
|
||||
## 2.4.0
|
||||
|
||||
[#140]: https://github.com/actix/actix-net/pull/140
|
||||
- Update `tokio-uring` dependency to `0.5`.
|
||||
- Minimum supported Rust version (MSRV) is now 1.70.
|
||||
|
||||
## 2.3.0
|
||||
|
||||
## 1.0.2 - 2020-02-26
|
||||
* Avoid error by calling `reregister()` on Windows [#103]
|
||||
- Add support for MultiPath TCP (MPTCP) with `MpTcp` enum and `ServerBuilder::mptcp()` method.
|
||||
- Minimum supported Rust version (MSRV) is now 1.65.
|
||||
|
||||
[#103]: https://github.com/actix/actix-net/pull/103
|
||||
## 2.2.0
|
||||
|
||||
- Minimum supported Rust version (MSRV) is now 1.59.
|
||||
- Update `tokio-uring` dependency to `0.4`.
|
||||
|
||||
## 1.0.1 - 2019-12-29
|
||||
* Rename `.start()` method to `.run()`
|
||||
## 2.1.1
|
||||
|
||||
- No significant changes since `2.1.0`.
|
||||
|
||||
## 1.0.0 - 2019-12-11
|
||||
* Use actix-net releases
|
||||
## 2.1.0
|
||||
|
||||
- Update `tokio-uring` dependency to `0.3`.
|
||||
- Logs emitted now use the `tracing` crate with `log` compatibility.
|
||||
- Wait for accept thread to stop before sending completion signal.
|
||||
|
||||
## 1.0.0-alpha.4 - 2019-12-08
|
||||
* Use actix-service 1.0.0-alpha.4
|
||||
## 2.0.0
|
||||
|
||||
- No significant changes since `2.0.0-rc.4`.
|
||||
|
||||
## 1.0.0-alpha.3 - 2019-12-07
|
||||
* Migrate to tokio 0.2
|
||||
* Fix compilation on non-unix platforms
|
||||
* Better handling server configuration
|
||||
## 2.0.0-rc.4
|
||||
|
||||
- Update `tokio-uring` dependency to `0.2`.
|
||||
|
||||
## 1.0.0-alpha.2 - 2019-12-02
|
||||
* Simplify server service (remove actix-server-config)
|
||||
* Allow to wait on `Server` until server stops
|
||||
## 2.0.0-rc.3
|
||||
|
||||
- No significant changes since `2.0.0-rc.2`.
|
||||
|
||||
## 0.8.0-alpha.1 - 2019-11-22
|
||||
* Migrate to `std::future`
|
||||
## 2.0.0-rc.2
|
||||
|
||||
- Simplify `TestServer`.
|
||||
|
||||
## 0.7.0 - 2019-10-04
|
||||
* Update `rustls` to 0.16
|
||||
* Minimum required Rust version upped to 1.37.0
|
||||
## 2.0.0-rc.1
|
||||
|
||||
- Hide implementation details of `Server`.
|
||||
- `Server` now runs only after awaiting it.
|
||||
|
||||
## 0.6.1 - 2019-09-25
|
||||
* Add UDS listening support to `ServerBuilder`
|
||||
## 2.0.0-beta.9
|
||||
|
||||
- Restore `Arbiter` support lost in `beta.8`.
|
||||
|
||||
## 0.6.0 - 2019-07-18
|
||||
* Support Unix domain sockets #3
|
||||
## 2.0.0-beta.8
|
||||
|
||||
- Fix non-unix signal handler.
|
||||
|
||||
## 0.5.1 - 2019-05-18
|
||||
* ServerBuilder::shutdown_timeout() accepts u64
|
||||
## 2.0.0-beta.7
|
||||
|
||||
- Server can be started in regular Tokio runtime.
|
||||
- Expose new `Server` type whose `Future` impl resolves when server stops.
|
||||
- Rename `Server` to `ServerHandle`.
|
||||
- Add `Server::handle` to obtain handle to server.
|
||||
- Rename `ServerBuilder::{maxconn => max_concurrent_connections}`.
|
||||
- Deprecate crate-level `new` shortcut for server builder.
|
||||
- Minimum supported Rust version (MSRV) is now 1.52.
|
||||
|
||||
## 0.5.0 - 2019-05-12
|
||||
* Add `Debug` impl for `SslError`
|
||||
* Derive debug for `Server` and `ServerCommand`
|
||||
* Upgrade to actix-service 0.4
|
||||
## 2.0.0-beta.6
|
||||
|
||||
- Add experimental (semver-exempt) `io-uring` feature for enabling async file I/O on linux.
|
||||
- Server no long listens to `SIGHUP` signal. Previously, the received was not used but did block subsequent exit signals from working.
|
||||
- Remove `config` module. `ServiceConfig`, `ServiceRuntime` public types are removed due to this change.
|
||||
- Remove `ServerBuilder::configure`.
|
||||
|
||||
## 0.4.3 - 2019-04-16
|
||||
* Re-export `IoStream` trait
|
||||
* Depend on `ssl` and `rust-tls` features from actix-server-config
|
||||
## 2.0.0-beta.5
|
||||
|
||||
- Server shutdown notifies all workers to exit regardless if shutdown is graceful. This causes all workers to shutdown immediately in force shutdown case.
|
||||
|
||||
## 0.4.2 - 2019-03-30
|
||||
* Fix SIGINT force shutdown
|
||||
## 2.0.0-beta.4
|
||||
|
||||
- Prevent panic when `shutdown_timeout` is very large. [f9262db]
|
||||
|
||||
## 0.4.1 - 2019-03-14
|
||||
* `SystemRuntime::on_start()` - allow to run future before server service initialization
|
||||
## 2.0.0-beta.3
|
||||
|
||||
- Hidden `ServerBuilder::start` method has been removed. Use `ServerBuilder::run`.
|
||||
- Add retry for EINTR signal (`io::Interrupted`) in `Accept`'s poll loop.
|
||||
- Add `ServerBuilder::worker_max_blocking_threads` to customize blocking thread pool size.
|
||||
- Update `actix-rt` to `2.0.0`.
|
||||
|
||||
## 0.4.0 - 2019-03-12
|
||||
* Use `ServerConfig` for service factory
|
||||
* Wrap tcp socket to `Io` type
|
||||
* Upgrade actix-service
|
||||
## 2.0.0-beta.2
|
||||
|
||||
- Merge `actix-testing` to `actix-server` as `test_server` mod.
|
||||
|
||||
## 0.3.1 - 2019-03-04
|
||||
* Add `ServerBuilder::maxconnrate` sets the maximum per-worker number of concurrent connections
|
||||
* Add helper ssl error `SslError`
|
||||
* Rename `StreamServiceFactory` to `ServiceFactory`
|
||||
* Deprecate `StreamServiceFactory`
|
||||
## 2.0.0-beta.1
|
||||
|
||||
- Added explicit info log message on accept queue pause.
|
||||
- Prevent double registration of sockets when back-pressure is resolved.
|
||||
- Update `mio` dependency to `0.7.3`.
|
||||
- Remove `socket2` dependency.
|
||||
- `ServerBuilder::backlog` now accepts `u32` instead of `i32`.
|
||||
- Remove `AcceptNotify` type and pass `WakerQueue` to `Worker` to wake up `Accept`'s `Poll`.
|
||||
- Convert `mio::net::TcpStream` to `actix_rt::net::TcpStream`(`UnixStream` for uds) using `FromRawFd` and `IntoRawFd`(`FromRawSocket` and `IntoRawSocket` on windows).
|
||||
- Remove `AsyncRead` and `AsyncWrite` trait bound for `socket::FromStream` trait.
|
||||
|
||||
## 0.3.0 - 2019-03-02
|
||||
* Use new `NewService` trait
|
||||
## 1.0.4
|
||||
|
||||
- Update actix-codec to 0.3.0.
|
||||
- Workers must be greater than 0.
|
||||
|
||||
## 0.2.1 - 2019-02-09
|
||||
* Drop service response
|
||||
## 1.0.3
|
||||
|
||||
- Replace deprecated `net2` crate with `socket2`.
|
||||
|
||||
## 0.2.0 - 2019-02-01
|
||||
* Migrate to actix-service 0.2
|
||||
* Updated rustls dependency
|
||||
## 1.0.2
|
||||
|
||||
- Avoid error by calling `reregister()` on Windows.
|
||||
|
||||
## 0.1.3 - 2018-12-21
|
||||
* Fix max concurrent connections handling
|
||||
## 1.0.1
|
||||
|
||||
- Rename `.start()` method to `.run()`
|
||||
|
||||
## 0.1.2 - 2018-12-12
|
||||
* rename ServiceConfig::rt() to ServiceConfig::apply()
|
||||
* Fix back-pressure for concurrent ssl handshakes
|
||||
## 1.0.0
|
||||
|
||||
- Use actix-net releases
|
||||
|
||||
## 0.1.1 - 2018-12-11
|
||||
* Fix signal handling on windows
|
||||
## 1.0.0-alpha.4
|
||||
|
||||
- Use actix-service 1.0.0-alpha.4
|
||||
|
||||
## 0.1.0 - 2018-12-09
|
||||
* Move server to separate crate
|
||||
## 1.0.0-alpha.3
|
||||
|
||||
- Migrate to tokio 0.2
|
||||
- Fix compilation on non-unix platforms
|
||||
- Better handling server configuration
|
||||
|
||||
## 1.0.0-alpha.2
|
||||
|
||||
- Simplify server service (remove actix-server-config)
|
||||
- Allow to wait on `Server` until server stops
|
||||
|
||||
## 0.8.0-alpha.1
|
||||
|
||||
- Migrate to `std::future`
|
||||
|
||||
## 0.7.0
|
||||
|
||||
- Update `rustls` to 0.16
|
||||
- Minimum required Rust version upped to 1.37.0
|
||||
|
||||
## 0.6.1
|
||||
|
||||
- Add UDS listening support to `ServerBuilder`
|
||||
|
||||
## 0.6.0
|
||||
|
||||
- Support Unix domain sockets #3
|
||||
|
||||
## 0.5.1
|
||||
|
||||
- ServerBuilder::shutdown_timeout() accepts u64
|
||||
|
||||
## 0.5.0
|
||||
|
||||
- Add `Debug` impl for `SslError`
|
||||
- Derive debug for `Server` and `ServerCommand`
|
||||
- Upgrade to actix-service 0.4
|
||||
|
||||
## 0.4.3
|
||||
|
||||
- Re-export `IoStream` trait
|
||||
- Depend on `ssl` and `rust-tls` features from actix-server-config
|
||||
|
||||
## 0.4.2
|
||||
|
||||
- Fix SIGINT force shutdown
|
||||
|
||||
## 0.4.1
|
||||
|
||||
- `SystemRuntime::on_start()` - allow to run future before server service initialization
|
||||
|
||||
## 0.4.0
|
||||
|
||||
- Use `ServerConfig` for service factory
|
||||
- Wrap tcp socket to `Io` type
|
||||
- Upgrade actix-service
|
||||
|
||||
## 0.3.1
|
||||
|
||||
- Add `ServerBuilder::maxconnrate` sets the maximum per-worker number of concurrent connections
|
||||
- Add helper ssl error `SslError`
|
||||
- Rename `StreamServiceFactory` to `ServiceFactory`
|
||||
- Deprecate `StreamServiceFactory`
|
||||
|
||||
## 0.3.0
|
||||
|
||||
- Use new `NewService` trait
|
||||
|
||||
## 0.2.1
|
||||
|
||||
- Drop service response
|
||||
|
||||
## 0.2.0
|
||||
|
||||
- Migrate to actix-service 0.2
|
||||
- Updated rustls dependency
|
||||
|
||||
## 0.1.3
|
||||
|
||||
- Fix max concurrent connections handling
|
||||
|
||||
## 0.1.2
|
||||
|
||||
- rename ServiceConfig::rt() to ServiceConfig::apply()
|
||||
- Fix back-pressure for concurrent ssl handshakes
|
||||
|
||||
## 0.1.1
|
||||
|
||||
- Fix signal handling on windows
|
||||
|
||||
## 0.1.0
|
||||
|
||||
- Move server to separate crate
|
||||
|
@ -1,44 +1,50 @@
|
||||
[package]
|
||||
name = "actix-server"
|
||||
version = "1.0.4"
|
||||
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
|
||||
version = "2.5.1"
|
||||
authors = [
|
||||
"Nikolay Kim <fafhrd91@gmail.com>",
|
||||
"Rob Ede <robjtede@icloud.com>",
|
||||
"Ali MJ Al-Nasrawy <alimjalnasrawy@gmail.com>",
|
||||
]
|
||||
description = "General purpose TCP server built for the Actix ecosystem"
|
||||
keywords = ["network", "framework", "async", "futures"]
|
||||
homepage = "https://actix.rs"
|
||||
repository = "https://github.com/actix/actix-net.git"
|
||||
documentation = "https://docs.rs/actix-server/"
|
||||
keywords = ["network", "tcp", "server", "framework", "async"]
|
||||
categories = ["network-programming", "asynchronous"]
|
||||
homepage = "https://actix.rs"
|
||||
repository = "https://github.com/actix/actix-net/tree/master/actix-server"
|
||||
license = "MIT OR Apache-2.0"
|
||||
exclude = [".gitignore", ".cargo/config"]
|
||||
edition = "2018"
|
||||
edition.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[lib]
|
||||
name = "actix_server"
|
||||
path = "src/lib.rs"
|
||||
[package.metadata.cargo_check_external_types]
|
||||
allowed_external_types = ["tokio::*"]
|
||||
|
||||
[features]
|
||||
default = []
|
||||
io-uring = ["tokio-uring", "actix-rt/io-uring"]
|
||||
|
||||
[dependencies]
|
||||
actix-service = "1.0.6"
|
||||
actix-rt = "1.1.1"
|
||||
actix-codec = "0.3.0"
|
||||
actix-utils = "2.0.0"
|
||||
actix-rt = { version = "2.10", default-features = false }
|
||||
actix-service = "2"
|
||||
actix-utils = "3"
|
||||
futures-core = { version = "0.3.17", default-features = false, features = ["alloc"] }
|
||||
futures-util = { version = "0.3.17", default-features = false, features = ["alloc"] }
|
||||
mio = { version = "1", features = ["os-poll", "net"] }
|
||||
socket2 = "0.5"
|
||||
tokio = { version = "1.23.1", features = ["sync"] }
|
||||
tracing = { version = "0.1.30", default-features = false, features = ["log"] }
|
||||
|
||||
log = "0.4"
|
||||
num_cpus = "1.13"
|
||||
mio = "0.6.19"
|
||||
socket2 = "0.3"
|
||||
futures-channel = { version = "0.3.4", default-features = false }
|
||||
futures-util = { version = "0.3.4", default-features = false, features = ["sink"] }
|
||||
slab = "0.4"
|
||||
|
||||
# unix domain sockets
|
||||
# FIXME: Remove it and use mio own uds feature once mio 0.7 is released
|
||||
mio-uds = { version = "0.6.7" }
|
||||
# runtime for `io-uring` feature
|
||||
[target.'cfg(target_os = "linux")'.dependencies]
|
||||
tokio-uring = { version = "0.5", optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
bytes = "0.5"
|
||||
env_logger = "0.7"
|
||||
actix-testing = "1.0.0"
|
||||
tokio = { version = "0.2", features = ["io-util"] }
|
||||
actix-codec = "0.5"
|
||||
actix-rt = "2.8"
|
||||
|
||||
bytes = "1"
|
||||
futures-util = { version = "0.3.17", default-features = false, features = ["sink", "async-await-macro"] }
|
||||
pretty_env_logger = "0.5"
|
||||
tokio = { version = "1.23.1", features = ["io-util", "rt-multi-thread", "macros", "fs"] }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
21
actix-server/README.md
Normal file
21
actix-server/README.md
Normal file
@ -0,0 +1,21 @@
|
||||
# actix-server
|
||||
|
||||
> General purpose TCP server built for the Actix ecosystem.
|
||||
|
||||
<!-- prettier-ignore-start -->
|
||||
|
||||
[](https://crates.io/crates/actix-server)
|
||||
[](https://docs.rs/actix-server/2.5.1)
|
||||
[](https://blog.rust-lang.org/2021/05/06/Rust-1.52.0.html)
|
||||

|
||||
<br />
|
||||
[](https://deps.rs/crate/actix-server/2.5.1)
|
||||

|
||||
[](https://discord.gg/NWpN5mmg3x)
|
||||
|
||||
<!-- prettier-ignore-end -->
|
||||
|
||||
## Resources
|
||||
|
||||
- [Library Documentation](https://docs.rs/actix-server)
|
||||
- [Examples](/actix-server/examples)
|
98
actix-server/examples/file-reader.rs
Normal file
98
actix-server/examples/file-reader.rs
Normal file
@ -0,0 +1,98 @@
|
||||
//! Simple file-reader TCP server with framed stream.
|
||||
//!
|
||||
//! Using the following command:
|
||||
//!
|
||||
//! ```sh
|
||||
//! nc 127.0.0.1 8080
|
||||
//! ```
|
||||
//!
|
||||
//! Follow the prompt and enter a file path, relative or absolute.
|
||||
|
||||
#![allow(missing_docs)]
|
||||
|
||||
use std::io;
|
||||
|
||||
use actix_codec::{Framed, LinesCodec};
|
||||
use actix_rt::net::TcpStream;
|
||||
use actix_server::Server;
|
||||
use actix_service::{fn_service, ServiceFactoryExt as _};
|
||||
use futures_util::{SinkExt as _, StreamExt as _};
|
||||
use tokio::{fs::File, io::AsyncReadExt as _};
|
||||
|
||||
async fn run() -> io::Result<()> {
|
||||
pretty_env_logger::formatted_timed_builder()
|
||||
.parse_env(pretty_env_logger::env_logger::Env::default().default_filter_or("info"));
|
||||
|
||||
let addr = ("127.0.0.1", 8080);
|
||||
tracing::info!("starting server on port: {}", &addr.0);
|
||||
|
||||
// Bind socket address and start worker(s). By default, the server uses the number of physical
|
||||
// CPU cores as the worker count. For this reason, the closure passed to bind needs to return
|
||||
// a service *factory*; so it can be created once per worker.
|
||||
Server::build()
|
||||
.bind("file-reader", addr, move || {
|
||||
fn_service(move |stream: TcpStream| async move {
|
||||
// set up codec to use with I/O resource
|
||||
let mut framed = Framed::new(stream, LinesCodec::default());
|
||||
|
||||
loop {
|
||||
// prompt for file name
|
||||
framed.send("Type file name to return:").await?;
|
||||
|
||||
// wait for next line
|
||||
match framed.next().await {
|
||||
Some(Ok(line)) => {
|
||||
match File::open(&line).await {
|
||||
Ok(mut file) => {
|
||||
tracing::info!("reading file: {}", &line);
|
||||
|
||||
// read file into String buffer
|
||||
let mut buf = String::new();
|
||||
file.read_to_string(&mut buf).await?;
|
||||
|
||||
// send String into framed object
|
||||
framed.send(buf).await?;
|
||||
|
||||
// break out of loop and
|
||||
break;
|
||||
}
|
||||
Err(err) => {
|
||||
tracing::error!("{}", err);
|
||||
framed
|
||||
.send("File not found or not readable. Try again.")
|
||||
.await?;
|
||||
continue;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// not being able to read a line from the stream is unrecoverable
|
||||
Some(Err(err)) => return Err(err),
|
||||
|
||||
// This EOF won't be hit.
|
||||
None => continue,
|
||||
}
|
||||
}
|
||||
|
||||
// close connection after file has been copied to TCP stream
|
||||
Ok(())
|
||||
})
|
||||
.map_err(|err| tracing::error!("service error: {:?}", err))
|
||||
})?
|
||||
.workers(2)
|
||||
.run()
|
||||
.await
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> io::Result<()> {
|
||||
run().await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// alternatively:
|
||||
// #[actix_rt::main]
|
||||
// async fn main() -> io::Result<()> {
|
||||
// run().await?;
|
||||
// Ok(())
|
||||
// }
|
@ -9,39 +9,39 @@
|
||||
//! Start typing. When you press enter the typed line will be echoed back. The server will log
|
||||
//! the length of each line it echos and the total size of data sent when the connection is closed.
|
||||
|
||||
use std::sync::{
|
||||
atomic::{AtomicUsize, Ordering},
|
||||
Arc,
|
||||
use std::{
|
||||
io,
|
||||
sync::{
|
||||
atomic::{AtomicUsize, Ordering},
|
||||
Arc,
|
||||
},
|
||||
};
|
||||
use std::{env, io};
|
||||
|
||||
use actix_rt::net::TcpStream;
|
||||
use actix_server::Server;
|
||||
use actix_service::pipeline_factory;
|
||||
use actix_service::{fn_service, ServiceFactoryExt as _};
|
||||
use bytes::BytesMut;
|
||||
use futures_util::future::ok;
|
||||
use log::{error, info};
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
use tokio::io::{AsyncReadExt as _, AsyncWriteExt as _};
|
||||
|
||||
#[actix_rt::main]
|
||||
async fn main() -> io::Result<()> {
|
||||
env::set_var("RUST_LOG", "actix=trace,basic=trace");
|
||||
env_logger::init();
|
||||
async fn run() -> io::Result<()> {
|
||||
pretty_env_logger::formatted_timed_builder()
|
||||
.parse_env(pretty_env_logger::env_logger::Env::default().default_filter_or("info"));
|
||||
|
||||
let count = Arc::new(AtomicUsize::new(0));
|
||||
|
||||
let addr = ("127.0.0.1", 8080);
|
||||
info!("starting server on port: {}", &addr.0);
|
||||
tracing::info!("starting server on port: {}", &addr.0);
|
||||
|
||||
// Bind socket address and start worker(s). By default, the server uses the number of available
|
||||
// logical CPU cores as the worker count. For this reason, the closure passed to bind needs
|
||||
// to return a service *factory*; so it can be created once per worker.
|
||||
// Bind socket address and start worker(s). By default, the server uses the number of physical
|
||||
// CPU cores as the worker count. For this reason, the closure passed to bind needs to return
|
||||
// a service *factory*; so it can be created once per worker.
|
||||
Server::build()
|
||||
.bind("echo", addr, move || {
|
||||
let count = Arc::clone(&count);
|
||||
let num2 = Arc::clone(&count);
|
||||
|
||||
pipeline_factory(move |mut stream: TcpStream| {
|
||||
fn_service(move |mut stream: TcpStream| {
|
||||
let count = Arc::clone(&count);
|
||||
|
||||
async move {
|
||||
@ -58,14 +58,14 @@ async fn main() -> io::Result<()> {
|
||||
|
||||
// more bytes to process
|
||||
Ok(bytes_read) => {
|
||||
info!("[{}] read {} bytes", num, bytes_read);
|
||||
tracing::info!("[{}] read {} bytes", num, bytes_read);
|
||||
stream.write_all(&buf[size..]).await.unwrap();
|
||||
size += bytes_read;
|
||||
}
|
||||
|
||||
// stream error; bail from loop with error
|
||||
Err(err) => {
|
||||
error!("Stream Error: {:?}", err);
|
||||
tracing::error!("stream error: {:?}", err);
|
||||
return Err(());
|
||||
}
|
||||
}
|
||||
@ -75,14 +75,27 @@ async fn main() -> io::Result<()> {
|
||||
Ok((buf.freeze(), size))
|
||||
}
|
||||
})
|
||||
.map_err(|err| error!("Service Error: {:?}", err))
|
||||
.map_err(|err| tracing::error!("service error: {:?}", err))
|
||||
.and_then(move |(_, size)| {
|
||||
let num = num2.load(Ordering::SeqCst);
|
||||
info!("[{}] total bytes read: {}", num, size);
|
||||
tracing::info!("[{}] total bytes read: {}", num, size);
|
||||
ok(size)
|
||||
})
|
||||
})?
|
||||
.workers(1)
|
||||
.workers(2)
|
||||
.run()
|
||||
.await
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> io::Result<()> {
|
||||
run().await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// alternatively:
|
||||
// #[actix_rt::main]
|
||||
// async fn main() -> io::Result<()> {
|
||||
// run().await?;
|
||||
// Ok(())
|
||||
// }
|
@ -1,479 +1,462 @@
|
||||
use std::sync::mpsc as sync_mpsc;
|
||||
use std::time::Duration;
|
||||
use std::{io, thread};
|
||||
use std::{io, thread, time::Duration};
|
||||
|
||||
use actix_rt::time::{delay_until, Instant};
|
||||
use actix_rt::System;
|
||||
use log::{error, info};
|
||||
use slab::Slab;
|
||||
use actix_rt::time::Instant;
|
||||
use mio::{Interest, Poll, Token as MioToken};
|
||||
use tracing::{debug, error, info};
|
||||
|
||||
use crate::server::Server;
|
||||
use crate::socket::{SocketAddr, SocketListener, StdListener};
|
||||
use crate::worker::{Conn, WorkerClient};
|
||||
use crate::Token;
|
||||
use crate::{
|
||||
availability::Availability,
|
||||
socket::MioListener,
|
||||
waker_queue::{WakerInterest, WakerQueue, WAKER_TOKEN},
|
||||
worker::{Conn, ServerWorker, WorkerHandleAccept, WorkerHandleServer},
|
||||
ServerBuilder, ServerHandle,
|
||||
};
|
||||
|
||||
pub(crate) enum Command {
|
||||
Pause,
|
||||
Resume,
|
||||
Stop,
|
||||
Worker(WorkerClient),
|
||||
}
|
||||
const TIMEOUT_DURATION_ON_ERROR: Duration = Duration::from_millis(510);
|
||||
|
||||
struct ServerSocketInfo {
|
||||
addr: SocketAddr,
|
||||
token: Token,
|
||||
sock: SocketListener,
|
||||
timeout: Option<Instant>,
|
||||
token: usize,
|
||||
|
||||
lst: MioListener,
|
||||
|
||||
/// Timeout is used to mark the deadline when this socket's listener should be registered again
|
||||
/// after an error.
|
||||
timeout: Option<actix_rt::time::Instant>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct AcceptNotify(mio::SetReadiness);
|
||||
|
||||
impl AcceptNotify {
|
||||
pub(crate) fn new(ready: mio::SetReadiness) -> Self {
|
||||
AcceptNotify(ready)
|
||||
}
|
||||
|
||||
pub(crate) fn notify(&self) {
|
||||
let _ = self.0.set_readiness(mio::Ready::readable());
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for AcceptNotify {
|
||||
fn default() -> Self {
|
||||
AcceptNotify::new(mio::Registration::new2().1)
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct AcceptLoop {
|
||||
cmd_reg: Option<mio::Registration>,
|
||||
cmd_ready: mio::SetReadiness,
|
||||
notify_reg: Option<mio::Registration>,
|
||||
notify_ready: mio::SetReadiness,
|
||||
tx: sync_mpsc::Sender<Command>,
|
||||
rx: Option<sync_mpsc::Receiver<Command>>,
|
||||
srv: Option<Server>,
|
||||
}
|
||||
|
||||
impl AcceptLoop {
|
||||
pub fn new(srv: Server) -> AcceptLoop {
|
||||
let (tx, rx) = sync_mpsc::channel();
|
||||
let (cmd_reg, cmd_ready) = mio::Registration::new2();
|
||||
let (notify_reg, notify_ready) = mio::Registration::new2();
|
||||
|
||||
AcceptLoop {
|
||||
tx,
|
||||
cmd_ready,
|
||||
cmd_reg: Some(cmd_reg),
|
||||
notify_ready,
|
||||
notify_reg: Some(notify_reg),
|
||||
rx: Some(rx),
|
||||
srv: Some(srv),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn send(&self, msg: Command) {
|
||||
let _ = self.tx.send(msg);
|
||||
let _ = self.cmd_ready.set_readiness(mio::Ready::readable());
|
||||
}
|
||||
|
||||
pub fn get_notify(&self) -> AcceptNotify {
|
||||
AcceptNotify::new(self.notify_ready.clone())
|
||||
}
|
||||
|
||||
pub(crate) fn start(
|
||||
&mut self,
|
||||
socks: Vec<(Token, StdListener)>,
|
||||
workers: Vec<WorkerClient>,
|
||||
) {
|
||||
let srv = self.srv.take().expect("Can not re-use AcceptInfo");
|
||||
|
||||
Accept::start(
|
||||
self.rx.take().expect("Can not re-use AcceptInfo"),
|
||||
self.cmd_reg.take().expect("Can not re-use AcceptInfo"),
|
||||
self.notify_reg.take().expect("Can not re-use AcceptInfo"),
|
||||
socks,
|
||||
srv,
|
||||
workers,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
struct Accept {
|
||||
poll: mio::Poll,
|
||||
rx: sync_mpsc::Receiver<Command>,
|
||||
sockets: Slab<ServerSocketInfo>,
|
||||
workers: Vec<WorkerClient>,
|
||||
srv: Server,
|
||||
timer: (mio::Registration, mio::SetReadiness),
|
||||
/// Poll instance of the server.
|
||||
pub(crate) struct Accept {
|
||||
poll: Poll,
|
||||
waker_queue: WakerQueue,
|
||||
handles: Vec<WorkerHandleAccept>,
|
||||
srv: ServerHandle,
|
||||
next: usize,
|
||||
backpressure: bool,
|
||||
}
|
||||
|
||||
const DELTA: usize = 100;
|
||||
const CMD: mio::Token = mio::Token(0);
|
||||
const TIMER: mio::Token = mio::Token(1);
|
||||
const NOTIFY: mio::Token = mio::Token(2);
|
||||
|
||||
/// This function defines errors that are per-connection. Which basically
|
||||
/// means that if we get this error from `accept()` system call it means
|
||||
/// next connection might be ready to be accepted.
|
||||
///
|
||||
/// All other errors will incur a timeout before next `accept()` is performed.
|
||||
/// The timeout is useful to handle resource exhaustion errors like ENFILE
|
||||
/// and EMFILE. Otherwise, could enter into tight loop.
|
||||
fn connection_error(e: &io::Error) -> bool {
|
||||
e.kind() == io::ErrorKind::ConnectionRefused
|
||||
|| e.kind() == io::ErrorKind::ConnectionAborted
|
||||
|| e.kind() == io::ErrorKind::ConnectionReset
|
||||
avail: Availability,
|
||||
/// use the smallest duration from sockets timeout.
|
||||
timeout: Option<Duration>,
|
||||
paused: bool,
|
||||
}
|
||||
|
||||
impl Accept {
|
||||
#![allow(clippy::too_many_arguments)]
|
||||
pub(crate) fn start(
|
||||
rx: sync_mpsc::Receiver<Command>,
|
||||
cmd_reg: mio::Registration,
|
||||
notify_reg: mio::Registration,
|
||||
socks: Vec<(Token, StdListener)>,
|
||||
srv: Server,
|
||||
workers: Vec<WorkerClient>,
|
||||
) {
|
||||
let sys = System::current();
|
||||
sockets: Vec<(usize, MioListener)>,
|
||||
builder: &ServerBuilder,
|
||||
) -> io::Result<(WakerQueue, Vec<WorkerHandleServer>, thread::JoinHandle<()>)> {
|
||||
let handle_server = ServerHandle::new(builder.cmd_tx.clone());
|
||||
|
||||
// start accept thread
|
||||
let _ = thread::Builder::new()
|
||||
.name("actix-server accept loop".to_owned())
|
||||
.spawn(move || {
|
||||
System::set_current(sys);
|
||||
let mut accept = Accept::new(rx, socks, workers, srv);
|
||||
// construct poll instance and its waker
|
||||
let poll = Poll::new()?;
|
||||
let waker_queue = WakerQueue::new(poll.registry())?;
|
||||
|
||||
// Start listening for incoming commands
|
||||
if let Err(err) = accept.poll.register(
|
||||
&cmd_reg,
|
||||
CMD,
|
||||
mio::Ready::readable(),
|
||||
mio::PollOpt::edge(),
|
||||
) {
|
||||
panic!("Can not register Registration: {}", err);
|
||||
}
|
||||
// start workers and collect handles
|
||||
let (handles_accept, handles_server) = (0..builder.threads)
|
||||
.map(|idx| {
|
||||
// clone service factories
|
||||
let factories = builder
|
||||
.factories
|
||||
.iter()
|
||||
.map(|f| f.clone_factory())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// Start listening for notify updates
|
||||
if let Err(err) = accept.poll.register(
|
||||
¬ify_reg,
|
||||
NOTIFY,
|
||||
mio::Ready::readable(),
|
||||
mio::PollOpt::edge(),
|
||||
) {
|
||||
panic!("Can not register Registration: {}", err);
|
||||
}
|
||||
// start worker using service factories
|
||||
ServerWorker::start(idx, factories, waker_queue.clone(), builder.worker_config)
|
||||
})
|
||||
.collect::<io::Result<Vec<_>>>()?
|
||||
.into_iter()
|
||||
.unzip();
|
||||
|
||||
accept.poll();
|
||||
});
|
||||
let (mut accept, mut sockets) = Accept::new_with_sockets(
|
||||
poll,
|
||||
waker_queue.clone(),
|
||||
sockets,
|
||||
handles_accept,
|
||||
handle_server,
|
||||
)?;
|
||||
|
||||
let accept_handle = thread::Builder::new()
|
||||
.name("actix-server acceptor".to_owned())
|
||||
.spawn(move || accept.poll_with(&mut sockets))
|
||||
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))?;
|
||||
|
||||
Ok((waker_queue, handles_server, accept_handle))
|
||||
}
|
||||
|
||||
fn new(
|
||||
rx: sync_mpsc::Receiver<Command>,
|
||||
socks: Vec<(Token, StdListener)>,
|
||||
workers: Vec<WorkerClient>,
|
||||
srv: Server,
|
||||
) -> Accept {
|
||||
// Create a poll instance
|
||||
let poll = match mio::Poll::new() {
|
||||
Ok(poll) => poll,
|
||||
Err(err) => panic!("Can not create mio::Poll: {}", err),
|
||||
fn new_with_sockets(
|
||||
poll: Poll,
|
||||
waker_queue: WakerQueue,
|
||||
sockets: Vec<(usize, MioListener)>,
|
||||
accept_handles: Vec<WorkerHandleAccept>,
|
||||
server_handle: ServerHandle,
|
||||
) -> io::Result<(Accept, Box<[ServerSocketInfo]>)> {
|
||||
let sockets = sockets
|
||||
.into_iter()
|
||||
.map(|(token, mut lst)| {
|
||||
// Start listening for incoming connections
|
||||
poll.registry()
|
||||
.register(&mut lst, MioToken(token), Interest::READABLE)?;
|
||||
|
||||
Ok(ServerSocketInfo {
|
||||
token,
|
||||
lst,
|
||||
timeout: None,
|
||||
})
|
||||
})
|
||||
.collect::<io::Result<_>>()?;
|
||||
|
||||
let mut avail = Availability::default();
|
||||
|
||||
// Assume all handles are avail at construct time.
|
||||
avail.set_available_all(&accept_handles);
|
||||
|
||||
let accept = Accept {
|
||||
poll,
|
||||
waker_queue,
|
||||
handles: accept_handles,
|
||||
srv: server_handle,
|
||||
next: 0,
|
||||
avail,
|
||||
timeout: None,
|
||||
paused: false,
|
||||
};
|
||||
|
||||
// Start accept
|
||||
let mut sockets = Slab::new();
|
||||
for (hnd_token, lst) in socks.into_iter() {
|
||||
let addr = lst.local_addr();
|
||||
|
||||
let server = lst.into_listener();
|
||||
let entry = sockets.vacant_entry();
|
||||
let token = entry.key();
|
||||
|
||||
// Start listening for incoming connections
|
||||
if let Err(err) = poll.register(
|
||||
&server,
|
||||
mio::Token(token + DELTA),
|
||||
mio::Ready::readable(),
|
||||
mio::PollOpt::edge(),
|
||||
) {
|
||||
panic!("Can not register io: {}", err);
|
||||
}
|
||||
|
||||
entry.insert(ServerSocketInfo {
|
||||
addr,
|
||||
token: hnd_token,
|
||||
sock: server,
|
||||
timeout: None,
|
||||
});
|
||||
}
|
||||
|
||||
// Timer
|
||||
let (tm, tmr) = mio::Registration::new2();
|
||||
if let Err(err) =
|
||||
poll.register(&tm, TIMER, mio::Ready::readable(), mio::PollOpt::edge())
|
||||
{
|
||||
panic!("Can not register Registration: {}", err);
|
||||
}
|
||||
|
||||
Accept {
|
||||
poll,
|
||||
rx,
|
||||
sockets,
|
||||
workers,
|
||||
srv,
|
||||
next: 0,
|
||||
timer: (tm, tmr),
|
||||
backpressure: false,
|
||||
}
|
||||
Ok((accept, sockets))
|
||||
}
|
||||
|
||||
fn poll(&mut self) {
|
||||
// Create storage for events
|
||||
let mut events = mio::Events::with_capacity(128);
|
||||
/// blocking wait for readiness events triggered by mio
|
||||
fn poll_with(&mut self, sockets: &mut [ServerSocketInfo]) {
|
||||
let mut events = mio::Events::with_capacity(256);
|
||||
|
||||
loop {
|
||||
if let Err(err) = self.poll.poll(&mut events, None) {
|
||||
panic!("Poll error: {}", err);
|
||||
if let Err(err) = self.poll.poll(&mut events, self.timeout) {
|
||||
match err.kind() {
|
||||
io::ErrorKind::Interrupted => {}
|
||||
_ => panic!("Poll error: {}", err),
|
||||
}
|
||||
}
|
||||
|
||||
for event in events.iter() {
|
||||
let token = event.token();
|
||||
match token {
|
||||
CMD => {
|
||||
if !self.process_cmd() {
|
||||
WAKER_TOKEN => {
|
||||
let exit = self.handle_waker(sockets);
|
||||
if exit {
|
||||
info!("accept thread stopped");
|
||||
return;
|
||||
}
|
||||
}
|
||||
TIMER => self.process_timer(),
|
||||
NOTIFY => self.backpressure(false),
|
||||
_ => {
|
||||
let token = usize::from(token);
|
||||
if token < DELTA {
|
||||
continue;
|
||||
}
|
||||
self.accept(token - DELTA);
|
||||
self.accept(sockets, token);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// check for timeout and re-register sockets
|
||||
self.process_timeout(sockets);
|
||||
}
|
||||
}
|
||||
|
||||
fn process_timer(&mut self) {
|
||||
let now = Instant::now();
|
||||
for (token, info) in self.sockets.iter_mut() {
|
||||
if let Some(inst) = info.timeout.take() {
|
||||
if now > inst {
|
||||
if let Err(err) = self.poll.register(
|
||||
&info.sock,
|
||||
mio::Token(token + DELTA),
|
||||
mio::Ready::readable(),
|
||||
mio::PollOpt::edge(),
|
||||
) {
|
||||
error!("Can not register server socket {}", err);
|
||||
} else {
|
||||
info!("Resume accepting connections on {}", info.addr);
|
||||
}
|
||||
} else {
|
||||
info.timeout = Some(inst);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn process_cmd(&mut self) -> bool {
|
||||
fn handle_waker(&mut self, sockets: &mut [ServerSocketInfo]) -> bool {
|
||||
// This is a loop because interests for command from previous version was
|
||||
// a loop that would try to drain the command channel. It's yet unknown
|
||||
// if it's necessary/good practice to actively drain the waker queue.
|
||||
loop {
|
||||
match self.rx.try_recv() {
|
||||
Ok(cmd) => match cmd {
|
||||
Command::Pause => {
|
||||
for (_, info) in self.sockets.iter_mut() {
|
||||
if let Err(err) = self.poll.deregister(&info.sock) {
|
||||
error!("Can not deregister server socket {}", err);
|
||||
} else {
|
||||
info!("Paused accepting connections on {}", info.addr);
|
||||
}
|
||||
}
|
||||
// Take guard with every iteration so no new interests can be added until the current
|
||||
// task is done. Take care not to take the guard again inside this loop.
|
||||
let mut guard = self.waker_queue.guard();
|
||||
|
||||
#[allow(clippy::significant_drop_in_scrutinee)]
|
||||
match guard.pop_front() {
|
||||
// Worker notified it became available.
|
||||
Some(WakerInterest::WorkerAvailable(idx)) => {
|
||||
drop(guard);
|
||||
|
||||
self.avail.set_available(idx, true);
|
||||
|
||||
if !self.paused {
|
||||
self.accept_all(sockets);
|
||||
}
|
||||
Command::Resume => {
|
||||
for (token, info) in self.sockets.iter() {
|
||||
if let Err(err) = self.register(token, info) {
|
||||
error!("Can not resume socket accept process: {}", err);
|
||||
} else {
|
||||
info!(
|
||||
"Accepting connections on {} has been resumed",
|
||||
info.addr
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// A new worker thread has been created so store its handle.
|
||||
Some(WakerInterest::Worker(handle)) => {
|
||||
drop(guard);
|
||||
|
||||
self.avail.set_available(handle.idx(), true);
|
||||
self.handles.push(handle);
|
||||
|
||||
if !self.paused {
|
||||
self.accept_all(sockets);
|
||||
}
|
||||
Command::Stop => {
|
||||
for (_, info) in self.sockets.iter() {
|
||||
let _ = self.poll.deregister(&info.sock);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
Some(WakerInterest::Pause) => {
|
||||
drop(guard);
|
||||
|
||||
if !self.paused {
|
||||
self.paused = true;
|
||||
|
||||
self.deregister_all(sockets);
|
||||
}
|
||||
Command::Worker(worker) => {
|
||||
self.backpressure(false);
|
||||
self.workers.push(worker);
|
||||
}
|
||||
|
||||
Some(WakerInterest::Resume) => {
|
||||
drop(guard);
|
||||
|
||||
if self.paused {
|
||||
self.paused = false;
|
||||
|
||||
sockets.iter_mut().for_each(|info| {
|
||||
self.register_logged(info);
|
||||
});
|
||||
|
||||
self.accept_all(sockets);
|
||||
}
|
||||
},
|
||||
Err(err) => match err {
|
||||
sync_mpsc::TryRecvError::Empty => break,
|
||||
sync_mpsc::TryRecvError::Disconnected => {
|
||||
for (_, info) in self.sockets.iter() {
|
||||
let _ = self.poll.deregister(&info.sock);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
Some(WakerInterest::Stop) => {
|
||||
if !self.paused {
|
||||
self.deregister_all(sockets);
|
||||
}
|
||||
},
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// waker queue is drained
|
||||
None => {
|
||||
// Reset the WakerQueue before break so it does not grow infinitely
|
||||
WakerQueue::reset(&mut guard);
|
||||
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
true
|
||||
}
|
||||
|
||||
fn process_timeout(&mut self, sockets: &mut [ServerSocketInfo]) {
|
||||
// always remove old timeouts
|
||||
if self.timeout.take().is_some() {
|
||||
let now = Instant::now();
|
||||
|
||||
sockets
|
||||
.iter_mut()
|
||||
// Only sockets that had an associated timeout were deregistered.
|
||||
.filter(|info| info.timeout.is_some())
|
||||
.for_each(|info| {
|
||||
let inst = info.timeout.take().unwrap();
|
||||
|
||||
if now < inst {
|
||||
// still timed out; try to set new timeout
|
||||
info.timeout = Some(inst);
|
||||
self.set_timeout(inst - now);
|
||||
} else if !self.paused {
|
||||
// timeout expired; register socket again
|
||||
self.register_logged(info);
|
||||
}
|
||||
|
||||
// Drop the timeout if server is paused and socket timeout is expired.
|
||||
// When server recovers from pause it will register all sockets without
|
||||
// a timeout value so this socket register will be delayed till then.
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/// Update accept timeout with `duration` if it is shorter than current timeout.
|
||||
fn set_timeout(&mut self, duration: Duration) {
|
||||
match self.timeout {
|
||||
Some(ref mut timeout) => {
|
||||
if *timeout > duration {
|
||||
*timeout = duration;
|
||||
}
|
||||
}
|
||||
None => self.timeout = Some(duration),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
fn register(&self, token: usize, info: &ServerSocketInfo) -> io::Result<()> {
|
||||
self.poll.register(
|
||||
&info.sock,
|
||||
mio::Token(token + DELTA),
|
||||
mio::Ready::readable(),
|
||||
mio::PollOpt::edge(),
|
||||
)
|
||||
fn register(&self, info: &mut ServerSocketInfo) -> io::Result<()> {
|
||||
let token = MioToken(info.token);
|
||||
self.poll
|
||||
.registry()
|
||||
.register(&mut info.lst, token, Interest::READABLE)
|
||||
}
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
fn register(&self, token: usize, info: &ServerSocketInfo) -> io::Result<()> {
|
||||
fn register(&self, info: &mut ServerSocketInfo) -> io::Result<()> {
|
||||
// On windows, calling register without deregister cause an error.
|
||||
// See https://github.com/actix/actix-web/issues/905
|
||||
// Calling reregister seems to fix the issue.
|
||||
let token = MioToken(info.token);
|
||||
self.poll
|
||||
.register(
|
||||
&info.sock,
|
||||
mio::Token(token + DELTA),
|
||||
mio::Ready::readable(),
|
||||
mio::PollOpt::edge(),
|
||||
)
|
||||
.registry()
|
||||
.register(&mut info.lst, token, Interest::READABLE)
|
||||
.or_else(|_| {
|
||||
self.poll.reregister(
|
||||
&info.sock,
|
||||
mio::Token(token + DELTA),
|
||||
mio::Ready::readable(),
|
||||
mio::PollOpt::edge(),
|
||||
)
|
||||
self.poll
|
||||
.registry()
|
||||
.reregister(&mut info.lst, token, Interest::READABLE)
|
||||
})
|
||||
}
|
||||
|
||||
fn backpressure(&mut self, on: bool) {
|
||||
if self.backpressure {
|
||||
if !on {
|
||||
self.backpressure = false;
|
||||
for (token, info) in self.sockets.iter() {
|
||||
if let Err(err) = self.register(token, info) {
|
||||
error!("Can not resume socket accept process: {}", err);
|
||||
} else {
|
||||
info!("Accepting connections on {} has been resumed", info.addr);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if on {
|
||||
self.backpressure = true;
|
||||
for (_, info) in self.sockets.iter() {
|
||||
let _ = self.poll.deregister(&info.sock);
|
||||
info!("Accepting connections on {} has been paused", info.addr);
|
||||
fn register_logged(&self, info: &mut ServerSocketInfo) {
|
||||
match self.register(info) {
|
||||
Ok(_) => debug!("resume accepting connections on {}", info.lst.local_addr()),
|
||||
Err(err) => error!("can not register server socket {}", err),
|
||||
}
|
||||
}
|
||||
|
||||
fn deregister_logged(&self, info: &mut ServerSocketInfo) {
|
||||
match self.poll.registry().deregister(&mut info.lst) {
|
||||
Ok(_) => debug!("paused accepting connections on {}", info.lst.local_addr()),
|
||||
Err(err) => {
|
||||
error!("can not deregister server socket {}", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn accept_one(&mut self, mut msg: Conn) {
|
||||
if self.backpressure {
|
||||
while !self.workers.is_empty() {
|
||||
match self.workers[self.next].send(msg) {
|
||||
Ok(_) => (),
|
||||
Err(tmp) => {
|
||||
self.srv.worker_faulted(self.workers[self.next].idx);
|
||||
msg = tmp;
|
||||
self.workers.swap_remove(self.next);
|
||||
if self.workers.is_empty() {
|
||||
error!("No workers");
|
||||
return;
|
||||
} else if self.workers.len() <= self.next {
|
||||
self.next = 0;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
fn deregister_all(&self, sockets: &mut [ServerSocketInfo]) {
|
||||
// This is a best effort implementation with following limitation:
|
||||
//
|
||||
// Every ServerSocketInfo with associated timeout will be skipped and it's timeout is
|
||||
// removed in the process.
|
||||
//
|
||||
// Therefore WakerInterest::Pause followed by WakerInterest::Resume in a very short gap
|
||||
// (less than 500ms) would cause all timing out ServerSocketInfos be re-registered before
|
||||
// expected timing.
|
||||
sockets
|
||||
.iter_mut()
|
||||
// Take all timeout.
|
||||
// This is to prevent Accept::process_timer method re-register a socket afterwards.
|
||||
.map(|info| (info.timeout.take(), info))
|
||||
// Socket info with a timeout is already deregistered so skip them.
|
||||
.filter(|(timeout, _)| timeout.is_none())
|
||||
.for_each(|(_, info)| self.deregister_logged(info));
|
||||
}
|
||||
|
||||
// Send connection to worker and handle error.
|
||||
fn send_connection(&mut self, conn: Conn) -> Result<(), Conn> {
|
||||
let next = self.next();
|
||||
match next.send(conn) {
|
||||
Ok(_) => {
|
||||
// Increment counter of WorkerHandle.
|
||||
// Set worker to unavailable with it hit max (Return false).
|
||||
if !next.inc_counter() {
|
||||
let idx = next.idx();
|
||||
self.avail.set_available(idx, false);
|
||||
}
|
||||
self.next = (self.next + 1) % self.workers.len();
|
||||
break;
|
||||
self.set_next();
|
||||
Ok(())
|
||||
}
|
||||
} else {
|
||||
let mut idx = 0;
|
||||
while idx < self.workers.len() {
|
||||
idx += 1;
|
||||
if self.workers[self.next].available() {
|
||||
match self.workers[self.next].send(msg) {
|
||||
Ok(_) => {
|
||||
self.next = (self.next + 1) % self.workers.len();
|
||||
return;
|
||||
}
|
||||
Err(tmp) => {
|
||||
self.srv.worker_faulted(self.workers[self.next].idx);
|
||||
msg = tmp;
|
||||
self.workers.swap_remove(self.next);
|
||||
if self.workers.is_empty() {
|
||||
error!("No workers");
|
||||
self.backpressure(true);
|
||||
return;
|
||||
} else if self.workers.len() <= self.next {
|
||||
self.next = 0;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
Err(conn) => {
|
||||
// Worker thread is error and could be gone.
|
||||
// Remove worker handle and notify `ServerBuilder`.
|
||||
self.remove_next();
|
||||
|
||||
if self.handles.is_empty() {
|
||||
error!("no workers");
|
||||
// All workers are gone and Conn is nowhere to be sent.
|
||||
// Treat this situation as Ok and drop Conn.
|
||||
return Ok(());
|
||||
} else if self.handles.len() <= self.next {
|
||||
self.next = 0;
|
||||
}
|
||||
self.next = (self.next + 1) % self.workers.len();
|
||||
|
||||
Err(conn)
|
||||
}
|
||||
// enable backpressure
|
||||
self.backpressure(true);
|
||||
self.accept_one(msg);
|
||||
}
|
||||
}
|
||||
|
||||
fn accept(&mut self, token: usize) {
|
||||
fn accept_one(&mut self, mut conn: Conn) {
|
||||
loop {
|
||||
let msg = if let Some(info) = self.sockets.get_mut(token) {
|
||||
match info.sock.accept() {
|
||||
Ok(Some((io, addr))) => Conn {
|
||||
io,
|
||||
token: info.token,
|
||||
peer: Some(addr),
|
||||
},
|
||||
Ok(None) => return,
|
||||
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => return,
|
||||
Err(ref e) if connection_error(e) => continue,
|
||||
Err(e) => {
|
||||
error!("Error accepting connection: {}", e);
|
||||
if let Err(err) = self.poll.deregister(&info.sock) {
|
||||
error!("Can not deregister server socket {}", err);
|
||||
}
|
||||
let next = self.next();
|
||||
let idx = next.idx();
|
||||
|
||||
// sleep after error
|
||||
info.timeout = Some(Instant::now() + Duration::from_millis(500));
|
||||
|
||||
let r = self.timer.1.clone();
|
||||
System::current().arbiter().send(Box::pin(async move {
|
||||
delay_until(Instant::now() + Duration::from_millis(510)).await;
|
||||
let _ = r.set_readiness(mio::Ready::readable());
|
||||
}));
|
||||
return;
|
||||
}
|
||||
if self.avail.get_available(idx) {
|
||||
match self.send_connection(conn) {
|
||||
Ok(_) => return,
|
||||
Err(c) => conn = c,
|
||||
}
|
||||
} else {
|
||||
return;
|
||||
};
|
||||
self.avail.set_available(idx, false);
|
||||
self.set_next();
|
||||
|
||||
self.accept_one(msg);
|
||||
if !self.avail.available() {
|
||||
while let Err(c) = self.send_connection(conn) {
|
||||
conn = c;
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn accept(&mut self, sockets: &mut [ServerSocketInfo], token: usize) {
|
||||
while self.avail.available() {
|
||||
let info = &mut sockets[token];
|
||||
|
||||
match info.lst.accept() {
|
||||
Ok(io) => {
|
||||
let conn = Conn { io, token };
|
||||
self.accept_one(conn);
|
||||
}
|
||||
Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => return,
|
||||
Err(ref err) if connection_error(err) => continue,
|
||||
Err(err) => {
|
||||
error!("error accepting connection: {}", err);
|
||||
|
||||
// deregister listener temporary
|
||||
self.deregister_logged(info);
|
||||
|
||||
// sleep after error. write the timeout to socket info as later
|
||||
// the poll would need it mark which socket and when it's
|
||||
// listener should be registered
|
||||
info.timeout = Some(Instant::now() + Duration::from_millis(500));
|
||||
self.set_timeout(TIMEOUT_DURATION_ON_ERROR);
|
||||
|
||||
return;
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
fn accept_all(&mut self, sockets: &mut [ServerSocketInfo]) {
|
||||
sockets
|
||||
.iter_mut()
|
||||
.map(|info| info.token)
|
||||
.collect::<Vec<_>>()
|
||||
.into_iter()
|
||||
.for_each(|idx| self.accept(sockets, idx))
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn next(&self) -> &WorkerHandleAccept {
|
||||
&self.handles[self.next]
|
||||
}
|
||||
|
||||
/// Set next worker handle that would accept connection.
|
||||
#[inline(always)]
|
||||
fn set_next(&mut self) {
|
||||
self.next = (self.next + 1) % self.handles.len();
|
||||
}
|
||||
|
||||
/// Remove next worker handle that fail to accept connection.
|
||||
fn remove_next(&mut self) {
|
||||
let handle = self.handles.swap_remove(self.next);
|
||||
let idx = handle.idx();
|
||||
// A message is sent to `ServerBuilder` future to notify it a new worker
|
||||
// should be made.
|
||||
self.srv.worker_faulted(idx);
|
||||
self.avail.set_available(idx, false);
|
||||
}
|
||||
}
|
||||
|
||||
/// This function defines errors that are per-connection; if we get this error from the `accept()`
|
||||
/// system call it means the next connection might be ready to be accepted.
|
||||
///
|
||||
/// All other errors will incur a timeout before next `accept()` call is attempted. The timeout is
|
||||
/// useful to handle resource exhaustion errors like `ENFILE` and `EMFILE`. Otherwise, it could
|
||||
/// enter into a temporary spin loop.
|
||||
fn connection_error(e: &io::Error) -> bool {
|
||||
e.kind() == io::ErrorKind::ConnectionRefused
|
||||
|| e.kind() == io::ErrorKind::ConnectionAborted
|
||||
|| e.kind() == io::ErrorKind::ConnectionReset
|
||||
}
|
||||
|
121
actix-server/src/availability.rs
Normal file
121
actix-server/src/availability.rs
Normal file
@ -0,0 +1,121 @@
|
||||
use crate::worker::WorkerHandleAccept;
|
||||
|
||||
/// Array of u128 with every bit as marker for a worker handle's availability.
|
||||
#[derive(Debug, Default)]
|
||||
pub(crate) struct Availability([u128; 4]);
|
||||
|
||||
impl Availability {
|
||||
/// Check if any worker handle is available
|
||||
#[inline(always)]
|
||||
pub(crate) fn available(&self) -> bool {
|
||||
self.0.iter().any(|a| *a != 0)
|
||||
}
|
||||
|
||||
/// Check if worker handle is available by index
|
||||
#[inline(always)]
|
||||
pub(crate) fn get_available(&self, idx: usize) -> bool {
|
||||
let (offset, idx) = Self::offset(idx);
|
||||
|
||||
self.0[offset] & (1 << idx as u128) != 0
|
||||
}
|
||||
|
||||
/// Set worker handle available state by index.
|
||||
pub(crate) fn set_available(&mut self, idx: usize, avail: bool) {
|
||||
let (offset, idx) = Self::offset(idx);
|
||||
|
||||
let off = 1 << idx as u128;
|
||||
if avail {
|
||||
self.0[offset] |= off;
|
||||
} else {
|
||||
self.0[offset] &= !off
|
||||
}
|
||||
}
|
||||
|
||||
/// Set all worker handle to available state.
|
||||
/// This would result in a re-check on all workers' availability.
|
||||
pub(crate) fn set_available_all(&mut self, handles: &[WorkerHandleAccept]) {
|
||||
handles.iter().for_each(|handle| {
|
||||
self.set_available(handle.idx(), true);
|
||||
})
|
||||
}
|
||||
|
||||
/// Get offset and adjusted index of given worker handle index.
|
||||
pub(crate) fn offset(idx: usize) -> (usize, usize) {
|
||||
if idx < 128 {
|
||||
(0, idx)
|
||||
} else if idx < 128 * 2 {
|
||||
(1, idx - 128)
|
||||
} else if idx < 128 * 3 {
|
||||
(2, idx - 128 * 2)
|
||||
} else if idx < 128 * 4 {
|
||||
(3, idx - 128 * 3)
|
||||
} else {
|
||||
panic!("Max WorkerHandle count is 512")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn single(aval: &mut Availability, idx: usize) {
|
||||
aval.set_available(idx, true);
|
||||
assert!(aval.available());
|
||||
|
||||
aval.set_available(idx, true);
|
||||
|
||||
aval.set_available(idx, false);
|
||||
assert!(!aval.available());
|
||||
|
||||
aval.set_available(idx, false);
|
||||
assert!(!aval.available());
|
||||
}
|
||||
|
||||
fn multi(aval: &mut Availability, mut idx: Vec<usize>) {
|
||||
idx.iter().for_each(|idx| aval.set_available(*idx, true));
|
||||
|
||||
assert!(aval.available());
|
||||
|
||||
while let Some(idx) = idx.pop() {
|
||||
assert!(aval.available());
|
||||
aval.set_available(idx, false);
|
||||
}
|
||||
|
||||
assert!(!aval.available());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn availability() {
|
||||
let mut aval = Availability::default();
|
||||
|
||||
single(&mut aval, 1);
|
||||
single(&mut aval, 128);
|
||||
single(&mut aval, 256);
|
||||
single(&mut aval, 511);
|
||||
|
||||
let idx = (0..511).filter(|i| i % 3 == 0 && i % 5 == 0).collect();
|
||||
|
||||
multi(&mut aval, idx);
|
||||
|
||||
multi(&mut aval, (0..511).collect())
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn overflow() {
|
||||
let mut aval = Availability::default();
|
||||
single(&mut aval, 512);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn pin_point() {
|
||||
let mut aval = Availability::default();
|
||||
|
||||
aval.set_available(438, true);
|
||||
|
||||
aval.set_available(479, true);
|
||||
|
||||
assert_eq!(aval.0[3], 1 << (438 - 384) | 1 << (479 - 384));
|
||||
}
|
||||
}
|
@ -1,43 +1,47 @@
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
use std::time::Duration;
|
||||
use std::{io, mem, net};
|
||||
use std::{io, num::NonZeroUsize, time::Duration};
|
||||
|
||||
use actix_rt::net::TcpStream;
|
||||
use actix_rt::time::{delay_until, Instant};
|
||||
use actix_rt::{spawn, System};
|
||||
use futures_channel::mpsc::{unbounded, UnboundedReceiver};
|
||||
use futures_channel::oneshot;
|
||||
use futures_util::future::ready;
|
||||
use futures_util::stream::FuturesUnordered;
|
||||
use futures_util::{future::Future, ready, stream::Stream, FutureExt, StreamExt};
|
||||
use log::{error, info};
|
||||
use socket2::{Domain, Protocol, Socket, Type};
|
||||
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender};
|
||||
|
||||
use crate::accept::{AcceptLoop, AcceptNotify, Command};
|
||||
use crate::config::{ConfiguredService, ServiceConfig};
|
||||
use crate::server::{Server, ServerCommand};
|
||||
use crate::service::{InternalServiceFactory, ServiceFactory, StreamNewService};
|
||||
use crate::signals::{Signal, Signals};
|
||||
use crate::socket::StdListener;
|
||||
use crate::worker::{self, Worker, WorkerAvailability, WorkerClient};
|
||||
use crate::Token;
|
||||
use crate::{
|
||||
server::ServerCommand,
|
||||
service::{InternalServiceFactory, ServerServiceFactory, StreamNewService},
|
||||
socket::{create_mio_tcp_listener, MioListener, MioTcpListener, StdTcpListener, ToSocketAddrs},
|
||||
worker::ServerWorkerConfig,
|
||||
Server,
|
||||
};
|
||||
|
||||
/// Server builder
|
||||
/// Multipath TCP (MPTCP) preference.
|
||||
///
|
||||
/// Currently only useful on Linux.
|
||||
///
|
||||
#[cfg_attr(target_os = "linux", doc = "Also see [`ServerBuilder::mptcp()`].")]
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum MpTcp {
|
||||
/// MPTCP will not be used when binding sockets.
|
||||
Disabled,
|
||||
|
||||
/// MPTCP will be attempted when binding sockets. If errors occur, regular TCP will be
|
||||
/// attempted, too.
|
||||
TcpFallback,
|
||||
|
||||
/// MPTCP will be used when binding sockets (with no fallback).
|
||||
NoFallback,
|
||||
}
|
||||
|
||||
/// [Server] builder.
|
||||
pub struct ServerBuilder {
|
||||
threads: usize,
|
||||
token: Token,
|
||||
backlog: i32,
|
||||
workers: Vec<(usize, WorkerClient)>,
|
||||
services: Vec<Box<dyn InternalServiceFactory>>,
|
||||
sockets: Vec<(Token, String, StdListener)>,
|
||||
accept: AcceptLoop,
|
||||
exit: bool,
|
||||
shutdown_timeout: Duration,
|
||||
no_signals: bool,
|
||||
cmd: UnboundedReceiver<ServerCommand>,
|
||||
server: Server,
|
||||
notify: Vec<oneshot::Sender<()>>,
|
||||
pub(crate) threads: usize,
|
||||
pub(crate) token: usize,
|
||||
pub(crate) backlog: u32,
|
||||
pub(crate) factories: Vec<Box<dyn InternalServiceFactory>>,
|
||||
pub(crate) sockets: Vec<(usize, String, MioListener)>,
|
||||
pub(crate) mptcp: MpTcp,
|
||||
pub(crate) exit: bool,
|
||||
pub(crate) listen_os_signals: bool,
|
||||
pub(crate) cmd_tx: UnboundedSender<ServerCommand>,
|
||||
pub(crate) cmd_rx: UnboundedReceiver<ServerCommand>,
|
||||
pub(crate) worker_config: ServerWorkerConfig,
|
||||
}
|
||||
|
||||
impl Default for ServerBuilder {
|
||||
@ -49,452 +53,326 @@ impl Default for ServerBuilder {
|
||||
impl ServerBuilder {
|
||||
/// Create new Server builder instance
|
||||
pub fn new() -> ServerBuilder {
|
||||
let (tx, rx) = unbounded();
|
||||
let server = Server::new(tx);
|
||||
let (cmd_tx, cmd_rx) = unbounded_channel();
|
||||
|
||||
ServerBuilder {
|
||||
threads: num_cpus::get(),
|
||||
token: Token(0),
|
||||
workers: Vec::new(),
|
||||
services: Vec::new(),
|
||||
threads: std::thread::available_parallelism().map_or(2, NonZeroUsize::get),
|
||||
token: 0,
|
||||
factories: Vec::new(),
|
||||
sockets: Vec::new(),
|
||||
accept: AcceptLoop::new(server.clone()),
|
||||
backlog: 2048,
|
||||
mptcp: MpTcp::Disabled,
|
||||
exit: false,
|
||||
shutdown_timeout: Duration::from_secs(30),
|
||||
no_signals: false,
|
||||
cmd: rx,
|
||||
notify: Vec::new(),
|
||||
server,
|
||||
listen_os_signals: true,
|
||||
cmd_tx,
|
||||
cmd_rx,
|
||||
worker_config: ServerWorkerConfig::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Set number of workers to start.
|
||||
/// Sets number of workers to start.
|
||||
///
|
||||
/// By default server uses number of available logical cpu as workers
|
||||
/// count. Workers must be greater than 0.
|
||||
/// See [`bind()`](Self::bind()) for more details on how worker count affects the number of
|
||||
/// server factory instantiations.
|
||||
///
|
||||
/// The default worker count is the determined by [`std::thread::available_parallelism()`]. See
|
||||
/// its documentation to determine what behavior you should expect when server is run.
|
||||
///
|
||||
/// `num` must be greater than 0.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Panics if `num` is 0.
|
||||
pub fn workers(mut self, num: usize) -> Self {
|
||||
assert_ne!(num, 0, "workers must be greater than 0");
|
||||
self.threads = num;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set max number of threads for each worker's blocking task thread pool.
|
||||
///
|
||||
/// One thread pool is set up **per worker**; not shared across workers.
|
||||
///
|
||||
/// # Examples:
|
||||
/// ```
|
||||
/// # use actix_server::ServerBuilder;
|
||||
/// let builder = ServerBuilder::new()
|
||||
/// .workers(4) // server has 4 worker thread.
|
||||
/// .worker_max_blocking_threads(4); // every worker has 4 max blocking threads.
|
||||
/// ```
|
||||
///
|
||||
/// See [tokio::runtime::Builder::max_blocking_threads] for behavior reference.
|
||||
pub fn worker_max_blocking_threads(mut self, num: usize) -> Self {
|
||||
self.worker_config.max_blocking_threads(num);
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the maximum number of pending connections.
|
||||
///
|
||||
/// This refers to the number of clients that can be waiting to be served.
|
||||
/// Exceeding this number results in the client getting an error when
|
||||
/// attempting to connect. It should only affect servers under significant
|
||||
/// load.
|
||||
/// This refers to the number of clients that can be waiting to be served. Exceeding this number
|
||||
/// results in the client getting an error when attempting to connect. It should only affect
|
||||
/// servers under significant load.
|
||||
///
|
||||
/// Generally set in the 64-2048 range. Default value is 2048.
|
||||
///
|
||||
/// This method should be called before `bind()` method call.
|
||||
pub fn backlog(mut self, num: i32) -> Self {
|
||||
pub fn backlog(mut self, num: u32) -> Self {
|
||||
self.backlog = num;
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets MultiPath TCP (MPTCP) preference on bound sockets.
|
||||
///
|
||||
/// Multipath TCP (MPTCP) builds on top of TCP to improve connection redundancy and performance
|
||||
/// by sharing a network data stream across multiple underlying TCP sessions. See [mptcp.dev]
|
||||
/// for more info about MPTCP itself.
|
||||
///
|
||||
/// MPTCP is available on Linux kernel version 5.6 and higher. In addition, you'll also need to
|
||||
/// ensure the kernel option is enabled using `sysctl net.mptcp.enabled=1`.
|
||||
///
|
||||
/// This method will have no effect if called after a `bind()`.
|
||||
///
|
||||
/// [mptcp.dev]: https://www.mptcp.dev
|
||||
#[cfg(target_os = "linux")]
|
||||
pub fn mptcp(mut self, mptcp_enabled: MpTcp) -> Self {
|
||||
self.mptcp = mptcp_enabled;
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the maximum per-worker number of concurrent connections.
|
||||
///
|
||||
/// All socket listeners will stop accepting connections when this limit is
|
||||
/// reached for each worker.
|
||||
/// All socket listeners will stop accepting connections when this limit is reached for
|
||||
/// each worker.
|
||||
///
|
||||
/// By default max connections is set to a 25k per worker.
|
||||
pub fn maxconn(self, num: usize) -> Self {
|
||||
worker::max_concurrent_connections(num);
|
||||
pub fn max_concurrent_connections(mut self, num: usize) -> Self {
|
||||
self.worker_config.max_concurrent_connections(num);
|
||||
self
|
||||
}
|
||||
|
||||
/// Stop actix system.
|
||||
#[doc(hidden)]
|
||||
#[deprecated(since = "2.0.0", note = "Renamed to `max_concurrent_connections`.")]
|
||||
pub fn maxconn(self, num: usize) -> Self {
|
||||
self.max_concurrent_connections(num)
|
||||
}
|
||||
|
||||
/// Sets flag to stop Actix `System` after server shutdown.
|
||||
///
|
||||
/// This has no effect when server is running in a Tokio-only runtime.
|
||||
pub fn system_exit(mut self) -> Self {
|
||||
self.exit = true;
|
||||
self
|
||||
}
|
||||
|
||||
/// Disable signal handling
|
||||
/// Disables OS signal handling.
|
||||
pub fn disable_signals(mut self) -> Self {
|
||||
self.no_signals = true;
|
||||
self.listen_os_signals = false;
|
||||
self
|
||||
}
|
||||
|
||||
/// Timeout for graceful workers shutdown in seconds.
|
||||
///
|
||||
/// After receiving a stop signal, workers have this much time to finish
|
||||
/// serving requests. Workers still alive after the timeout are force
|
||||
/// dropped.
|
||||
/// After receiving a stop signal, workers have this much time to finish serving requests.
|
||||
/// Workers still alive after the timeout are force dropped.
|
||||
///
|
||||
/// By default shutdown timeout sets to 30 seconds.
|
||||
pub fn shutdown_timeout(mut self, sec: u64) -> Self {
|
||||
self.shutdown_timeout = Duration::from_secs(sec);
|
||||
self.worker_config
|
||||
.shutdown_timeout(Duration::from_secs(sec));
|
||||
self
|
||||
}
|
||||
|
||||
/// Execute external configuration as part of the server building
|
||||
/// process.
|
||||
/// Adds new service to the server.
|
||||
///
|
||||
/// This function is useful for moving parts of configuration to a
|
||||
/// different module or even library.
|
||||
pub fn configure<F>(mut self, f: F) -> io::Result<ServerBuilder>
|
||||
/// Note that, if a DNS lookup is required, resolving hostnames is a blocking operation.
|
||||
///
|
||||
/// # Worker Count
|
||||
///
|
||||
/// The `factory` will be instantiated multiple times in most scenarios. The number of
|
||||
/// instantiations is number of [`workers`](Self::workers()) × number of sockets resolved by
|
||||
/// `addrs`.
|
||||
///
|
||||
/// For example, if you've manually set [`workers`](Self::workers()) to 2, and use `127.0.0.1`
|
||||
/// as the bind `addrs`, then `factory` will be instantiated twice. However, using `localhost`
|
||||
/// as the bind `addrs` can often resolve to both `127.0.0.1` (IPv4) _and_ `::1` (IPv6), causing
|
||||
/// the `factory` to be instantiated 4 times (2 workers × 2 bind addresses).
|
||||
///
|
||||
/// Using a bind address of `0.0.0.0`, which signals to use all interfaces, may also multiple
|
||||
/// the number of instantiations in a similar way.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an `io::Error` if:
|
||||
/// - `addrs` cannot be resolved into one or more socket addresses;
|
||||
/// - all the resolved socket addresses are already bound.
|
||||
pub fn bind<F, U, N>(mut self, name: N, addrs: U, factory: F) -> io::Result<Self>
|
||||
where
|
||||
F: Fn(&mut ServiceConfig) -> io::Result<()>,
|
||||
F: ServerServiceFactory<TcpStream>,
|
||||
U: ToSocketAddrs,
|
||||
N: AsRef<str>,
|
||||
{
|
||||
let mut cfg = ServiceConfig::new(self.threads, self.backlog);
|
||||
let sockets = bind_addr(addrs, self.backlog, &self.mptcp)?;
|
||||
|
||||
f(&mut cfg)?;
|
||||
|
||||
if let Some(apply) = cfg.apply {
|
||||
let mut srv = ConfiguredService::new(apply);
|
||||
for (name, lst) in cfg.services {
|
||||
let token = self.token.next();
|
||||
srv.stream(token, name.clone(), lst.local_addr()?);
|
||||
self.sockets.push((token, name, StdListener::Tcp(lst)));
|
||||
}
|
||||
self.services.push(Box::new(srv));
|
||||
}
|
||||
self.threads = cfg.threads;
|
||||
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Add new service to the server.
|
||||
pub fn bind<F, U, N: AsRef<str>>(mut self, name: N, addr: U, factory: F) -> io::Result<Self>
|
||||
where
|
||||
F: ServiceFactory<TcpStream>,
|
||||
U: net::ToSocketAddrs,
|
||||
{
|
||||
let sockets = bind_addr(addr, self.backlog)?;
|
||||
tracing::trace!("binding server to: {sockets:?}");
|
||||
|
||||
for lst in sockets {
|
||||
let token = self.token.next();
|
||||
self.services.push(StreamNewService::create(
|
||||
let token = self.next_token();
|
||||
|
||||
self.factories.push(StreamNewService::create(
|
||||
name.as_ref().to_string(),
|
||||
token,
|
||||
factory.clone(),
|
||||
lst.local_addr()?,
|
||||
));
|
||||
|
||||
self.sockets
|
||||
.push((token, name.as_ref().to_string(), StdListener::Tcp(lst)));
|
||||
.push((token, name.as_ref().to_string(), MioListener::Tcp(lst)));
|
||||
}
|
||||
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
#[cfg(all(unix))]
|
||||
/// Add new unix domain service to the server.
|
||||
pub fn bind_uds<F, U, N>(self, name: N, addr: U, factory: F) -> io::Result<Self>
|
||||
where
|
||||
F: ServiceFactory<actix_rt::net::UnixStream>,
|
||||
N: AsRef<str>,
|
||||
U: AsRef<std::path::Path>,
|
||||
{
|
||||
use std::os::unix::net::UnixListener;
|
||||
|
||||
// The path must not exist when we try to bind.
|
||||
// Try to remove it to avoid bind error.
|
||||
if let Err(e) = std::fs::remove_file(addr.as_ref()) {
|
||||
// NotFound is expected and not an issue. Anything else is.
|
||||
if e.kind() != std::io::ErrorKind::NotFound {
|
||||
return Err(e);
|
||||
}
|
||||
}
|
||||
|
||||
let lst = UnixListener::bind(addr)?;
|
||||
self.listen_uds(name, lst, factory)
|
||||
}
|
||||
|
||||
#[cfg(all(unix))]
|
||||
/// Add new unix domain service to the server.
|
||||
/// Useful when running as a systemd service and
|
||||
/// a socket FD can be acquired using the systemd crate.
|
||||
pub fn listen_uds<F, N: AsRef<str>>(
|
||||
/// Adds service to the server using a socket listener already bound.
|
||||
///
|
||||
/// # Worker Count
|
||||
///
|
||||
/// The `factory` will be instantiated multiple times in most scenarios. The number of
|
||||
/// instantiations is: number of [`workers`](Self::workers()).
|
||||
pub fn listen<F, N: AsRef<str>>(
|
||||
mut self,
|
||||
name: N,
|
||||
lst: std::os::unix::net::UnixListener,
|
||||
lst: StdTcpListener,
|
||||
factory: F,
|
||||
) -> io::Result<Self>
|
||||
where
|
||||
F: ServiceFactory<actix_rt::net::UnixStream>,
|
||||
F: ServerServiceFactory<TcpStream>,
|
||||
{
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||
let token = self.token.next();
|
||||
let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
|
||||
self.services.push(StreamNewService::create(
|
||||
lst.set_nonblocking(true)?;
|
||||
let addr = lst.local_addr()?;
|
||||
|
||||
let token = self.next_token();
|
||||
self.factories.push(StreamNewService::create(
|
||||
name.as_ref().to_string(),
|
||||
token,
|
||||
factory,
|
||||
addr,
|
||||
));
|
||||
self.sockets
|
||||
.push((token, name.as_ref().to_string(), StdListener::Uds(lst)));
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Add new service to the server.
|
||||
pub fn listen<F, N: AsRef<str>>(
|
||||
mut self,
|
||||
name: N,
|
||||
lst: net::TcpListener,
|
||||
factory: F,
|
||||
) -> io::Result<Self>
|
||||
where
|
||||
F: ServiceFactory<TcpStream>,
|
||||
{
|
||||
let token = self.token.next();
|
||||
self.services.push(StreamNewService::create(
|
||||
name.as_ref().to_string(),
|
||||
token,
|
||||
factory,
|
||||
lst.local_addr()?,
|
||||
));
|
||||
self.sockets
|
||||
.push((token, name.as_ref().to_string(), StdListener::Tcp(lst)));
|
||||
Ok(self)
|
||||
}
|
||||
.push((token, name.as_ref().to_string(), MioListener::from(lst)));
|
||||
|
||||
#[doc(hidden)]
|
||||
pub fn start(self) -> Server {
|
||||
self.run()
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Starts processing incoming connections and return server controller.
|
||||
pub fn run(mut self) -> Server {
|
||||
pub fn run(self) -> Server {
|
||||
if self.sockets.is_empty() {
|
||||
panic!("Server should have at least one bound socket");
|
||||
} else {
|
||||
info!("Starting {} workers", self.threads);
|
||||
|
||||
// start workers
|
||||
let workers = (0..self.threads)
|
||||
.map(|idx| {
|
||||
let worker = self.start_worker(idx, self.accept.get_notify());
|
||||
self.workers.push((idx, worker.clone()));
|
||||
|
||||
worker
|
||||
})
|
||||
.collect();
|
||||
|
||||
// start accept thread
|
||||
for sock in &self.sockets {
|
||||
info!("Starting \"{}\" service on {}", sock.1, sock.2);
|
||||
}
|
||||
self.accept.start(
|
||||
mem::take(&mut self.sockets)
|
||||
.into_iter()
|
||||
.map(|t| (t.0, t.2))
|
||||
.collect(),
|
||||
workers,
|
||||
);
|
||||
|
||||
// handle signals
|
||||
if !self.no_signals {
|
||||
Signals::start(self.server.clone()).unwrap();
|
||||
}
|
||||
|
||||
// start http server actor
|
||||
let server = self.server.clone();
|
||||
spawn(self);
|
||||
server
|
||||
tracing::info!("starting {} workers", self.threads);
|
||||
Server::new(self)
|
||||
}
|
||||
}
|
||||
|
||||
fn start_worker(&self, idx: usize, notify: AcceptNotify) -> WorkerClient {
|
||||
let avail = WorkerAvailability::new(notify);
|
||||
let services: Vec<Box<dyn InternalServiceFactory>> =
|
||||
self.services.iter().map(|v| v.clone_factory()).collect();
|
||||
|
||||
Worker::start(idx, services, avail, self.shutdown_timeout)
|
||||
}
|
||||
|
||||
fn handle_cmd(&mut self, item: ServerCommand) {
|
||||
match item {
|
||||
ServerCommand::Pause(tx) => {
|
||||
self.accept.send(Command::Pause);
|
||||
let _ = tx.send(());
|
||||
}
|
||||
ServerCommand::Resume(tx) => {
|
||||
self.accept.send(Command::Resume);
|
||||
let _ = tx.send(());
|
||||
}
|
||||
ServerCommand::Signal(sig) => {
|
||||
// Signals support
|
||||
// Handle `SIGINT`, `SIGTERM`, `SIGQUIT` signals and stop actix system
|
||||
match sig {
|
||||
Signal::Int => {
|
||||
info!("SIGINT received, exiting");
|
||||
self.exit = true;
|
||||
self.handle_cmd(ServerCommand::Stop {
|
||||
graceful: false,
|
||||
completion: None,
|
||||
})
|
||||
}
|
||||
Signal::Term => {
|
||||
info!("SIGTERM received, stopping");
|
||||
self.exit = true;
|
||||
self.handle_cmd(ServerCommand::Stop {
|
||||
graceful: true,
|
||||
completion: None,
|
||||
})
|
||||
}
|
||||
Signal::Quit => {
|
||||
info!("SIGQUIT received, exiting");
|
||||
self.exit = true;
|
||||
self.handle_cmd(ServerCommand::Stop {
|
||||
graceful: false,
|
||||
completion: None,
|
||||
})
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
ServerCommand::Notify(tx) => {
|
||||
self.notify.push(tx);
|
||||
}
|
||||
ServerCommand::Stop {
|
||||
graceful,
|
||||
completion,
|
||||
} => {
|
||||
let exit = self.exit;
|
||||
|
||||
// stop accept thread
|
||||
self.accept.send(Command::Stop);
|
||||
let notify = std::mem::take(&mut self.notify);
|
||||
|
||||
// stop workers
|
||||
if !self.workers.is_empty() && graceful {
|
||||
spawn(
|
||||
self.workers
|
||||
.iter()
|
||||
.map(move |worker| worker.1.stop(graceful))
|
||||
.collect::<FuturesUnordered<_>>()
|
||||
.collect::<Vec<_>>()
|
||||
.then(move |_| {
|
||||
if let Some(tx) = completion {
|
||||
let _ = tx.send(());
|
||||
}
|
||||
for tx in notify {
|
||||
let _ = tx.send(());
|
||||
}
|
||||
if exit {
|
||||
spawn(
|
||||
async {
|
||||
delay_until(
|
||||
Instant::now() + Duration::from_millis(300),
|
||||
)
|
||||
.await;
|
||||
System::current().stop();
|
||||
}
|
||||
.boxed(),
|
||||
);
|
||||
}
|
||||
ready(())
|
||||
}),
|
||||
)
|
||||
} else {
|
||||
// we need to stop system if server was spawned
|
||||
if self.exit {
|
||||
spawn(
|
||||
delay_until(Instant::now() + Duration::from_millis(300)).then(
|
||||
|_| {
|
||||
System::current().stop();
|
||||
ready(())
|
||||
},
|
||||
),
|
||||
);
|
||||
}
|
||||
if let Some(tx) = completion {
|
||||
let _ = tx.send(());
|
||||
}
|
||||
for tx in notify {
|
||||
let _ = tx.send(());
|
||||
}
|
||||
}
|
||||
}
|
||||
ServerCommand::WorkerFaulted(idx) => {
|
||||
let mut found = false;
|
||||
for i in 0..self.workers.len() {
|
||||
if self.workers[i].0 == idx {
|
||||
self.workers.swap_remove(i);
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if found {
|
||||
error!("Worker has died {:?}, restarting", idx);
|
||||
|
||||
let mut new_idx = self.workers.len();
|
||||
'found: loop {
|
||||
for i in 0..self.workers.len() {
|
||||
if self.workers[i].0 == new_idx {
|
||||
new_idx += 1;
|
||||
continue 'found;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
let worker = self.start_worker(new_idx, self.accept.get_notify());
|
||||
self.workers.push((new_idx, worker.clone()));
|
||||
self.accept.send(Command::Worker(worker));
|
||||
}
|
||||
}
|
||||
}
|
||||
fn next_token(&mut self) -> usize {
|
||||
let token = self.token;
|
||||
self.token += 1;
|
||||
token
|
||||
}
|
||||
}
|
||||
|
||||
impl Future for ServerBuilder {
|
||||
type Output = ();
|
||||
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
loop {
|
||||
match ready!(Pin::new(&mut self.cmd).poll_next(cx)) {
|
||||
Some(it) => self.as_mut().get_mut().handle_cmd(it),
|
||||
None => {
|
||||
return Poll::Pending;
|
||||
}
|
||||
#[cfg(unix)]
|
||||
impl ServerBuilder {
|
||||
/// Adds new service to the server using a UDS (unix domain socket) address.
|
||||
///
|
||||
/// # Worker Count
|
||||
///
|
||||
/// The `factory` will be instantiated multiple times in most scenarios. The number of
|
||||
/// instantiations is: number of [`workers`](Self::workers()).
|
||||
pub fn bind_uds<F, U, N>(self, name: N, addr: U, factory: F) -> io::Result<Self>
|
||||
where
|
||||
F: ServerServiceFactory<actix_rt::net::UnixStream>,
|
||||
N: AsRef<str>,
|
||||
U: AsRef<std::path::Path>,
|
||||
{
|
||||
// The path must not exist when we try to bind.
|
||||
// Try to remove it to avoid bind error.
|
||||
if let Err(err) = std::fs::remove_file(addr.as_ref()) {
|
||||
// NotFound is expected and not an issue. Anything else is.
|
||||
if err.kind() != std::io::ErrorKind::NotFound {
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
|
||||
let lst = crate::socket::StdUnixListener::bind(addr)?;
|
||||
self.listen_uds(name, lst, factory)
|
||||
}
|
||||
|
||||
/// Adds new service to the server using a UDS (unix domain socket) listener already bound.
|
||||
///
|
||||
/// Useful when running as a systemd service and a socket FD is acquired externally.
|
||||
///
|
||||
/// # Worker Count
|
||||
///
|
||||
/// The `factory` will be instantiated multiple times in most scenarios. The number of
|
||||
/// instantiations is: number of [`workers`](Self::workers()).
|
||||
pub fn listen_uds<F, N: AsRef<str>>(
|
||||
mut self,
|
||||
name: N,
|
||||
lst: crate::socket::StdUnixListener,
|
||||
factory: F,
|
||||
) -> io::Result<Self>
|
||||
where
|
||||
F: ServerServiceFactory<actix_rt::net::UnixStream>,
|
||||
{
|
||||
use std::net::{IpAddr, Ipv4Addr};
|
||||
|
||||
lst.set_nonblocking(true)?;
|
||||
|
||||
let token = self.next_token();
|
||||
let addr = crate::socket::StdSocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
|
||||
|
||||
self.factories.push(StreamNewService::create(
|
||||
name.as_ref().to_string(),
|
||||
token,
|
||||
factory,
|
||||
addr,
|
||||
));
|
||||
|
||||
self.sockets
|
||||
.push((token, name.as_ref().to_string(), MioListener::from(lst)));
|
||||
|
||||
Ok(self)
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn bind_addr<S: net::ToSocketAddrs>(
|
||||
pub(super) fn bind_addr<S: ToSocketAddrs>(
|
||||
addr: S,
|
||||
backlog: i32,
|
||||
) -> io::Result<Vec<net::TcpListener>> {
|
||||
let mut err = None;
|
||||
let mut succ = false;
|
||||
backlog: u32,
|
||||
mptcp: &MpTcp,
|
||||
) -> io::Result<Vec<MioTcpListener>> {
|
||||
let mut opt_err = None;
|
||||
let mut success = false;
|
||||
let mut sockets = Vec::new();
|
||||
|
||||
for addr in addr.to_socket_addrs()? {
|
||||
match create_tcp_listener(addr, backlog) {
|
||||
match create_mio_tcp_listener(addr, backlog, mptcp) {
|
||||
Ok(lst) => {
|
||||
succ = true;
|
||||
success = true;
|
||||
sockets.push(lst);
|
||||
}
|
||||
Err(e) => err = Some(e),
|
||||
Err(err) => opt_err = Some(err),
|
||||
}
|
||||
}
|
||||
|
||||
if !succ {
|
||||
if let Some(e) = err.take() {
|
||||
Err(e)
|
||||
} else {
|
||||
Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
"Can not bind to address.",
|
||||
))
|
||||
}
|
||||
} else {
|
||||
if success {
|
||||
Ok(sockets)
|
||||
} else if let Some(err) = opt_err.take() {
|
||||
Err(err)
|
||||
} else {
|
||||
Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
"Can not bind to address.",
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
fn create_tcp_listener(addr: net::SocketAddr, backlog: i32) -> io::Result<net::TcpListener> {
|
||||
let domain = match addr {
|
||||
net::SocketAddr::V4(_) => Domain::ipv4(),
|
||||
net::SocketAddr::V6(_) => Domain::ipv6(),
|
||||
};
|
||||
let socket = Socket::new(domain, Type::stream(), Some(Protocol::tcp()))?;
|
||||
socket.set_reuse_address(true)?;
|
||||
socket.bind(&addr.into())?;
|
||||
socket.listen(backlog)?;
|
||||
Ok(socket.into_tcp_listener())
|
||||
}
|
||||
|
@ -1,285 +0,0 @@
|
||||
use std::collections::HashMap;
|
||||
use std::{fmt, io, net};
|
||||
|
||||
use actix_rt::net::TcpStream;
|
||||
use actix_service as actix;
|
||||
use actix_utils::counter::CounterGuard;
|
||||
use futures_util::future::{ok, Future, FutureExt, LocalBoxFuture};
|
||||
use log::error;
|
||||
|
||||
use super::builder::bind_addr;
|
||||
use super::service::{
|
||||
BoxedServerService, InternalServiceFactory, ServerMessage, StreamService,
|
||||
};
|
||||
use super::Token;
|
||||
|
||||
pub struct ServiceConfig {
|
||||
pub(crate) services: Vec<(String, net::TcpListener)>,
|
||||
pub(crate) apply: Option<Box<dyn ServiceRuntimeConfiguration>>,
|
||||
pub(crate) threads: usize,
|
||||
pub(crate) backlog: i32,
|
||||
}
|
||||
|
||||
impl ServiceConfig {
|
||||
pub(super) fn new(threads: usize, backlog: i32) -> ServiceConfig {
|
||||
ServiceConfig {
|
||||
threads,
|
||||
backlog,
|
||||
services: Vec::new(),
|
||||
apply: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set number of workers to start.
|
||||
///
|
||||
/// By default server uses number of available logical cpu as workers
|
||||
/// count.
|
||||
pub fn workers(&mut self, num: usize) {
|
||||
self.threads = num;
|
||||
}
|
||||
|
||||
/// Add new service to server
|
||||
pub fn bind<U, N: AsRef<str>>(&mut self, name: N, addr: U) -> io::Result<&mut Self>
|
||||
where
|
||||
U: net::ToSocketAddrs,
|
||||
{
|
||||
let sockets = bind_addr(addr, self.backlog)?;
|
||||
|
||||
for lst in sockets {
|
||||
self.listen(name.as_ref(), lst);
|
||||
}
|
||||
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Add new service to server
|
||||
pub fn listen<N: AsRef<str>>(&mut self, name: N, lst: net::TcpListener) -> &mut Self {
|
||||
if self.apply.is_none() {
|
||||
self.apply = Some(Box::new(not_configured));
|
||||
}
|
||||
self.services.push((name.as_ref().to_string(), lst));
|
||||
self
|
||||
}
|
||||
|
||||
/// Register service configuration function. This function get called
|
||||
/// during worker runtime configuration. It get executed in worker thread.
|
||||
pub fn apply<F>(&mut self, f: F) -> io::Result<()>
|
||||
where
|
||||
F: Fn(&mut ServiceRuntime) + Send + Clone + 'static,
|
||||
{
|
||||
self.apply = Some(Box::new(f));
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) struct ConfiguredService {
|
||||
rt: Box<dyn ServiceRuntimeConfiguration>,
|
||||
names: HashMap<Token, (String, net::SocketAddr)>,
|
||||
topics: HashMap<String, Token>,
|
||||
services: Vec<Token>,
|
||||
}
|
||||
|
||||
impl ConfiguredService {
|
||||
pub(super) fn new(rt: Box<dyn ServiceRuntimeConfiguration>) -> Self {
|
||||
ConfiguredService {
|
||||
rt,
|
||||
names: HashMap::new(),
|
||||
topics: HashMap::new(),
|
||||
services: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn stream(&mut self, token: Token, name: String, addr: net::SocketAddr) {
|
||||
self.names.insert(token, (name.clone(), addr));
|
||||
self.topics.insert(name, token);
|
||||
self.services.push(token);
|
||||
}
|
||||
}
|
||||
|
||||
impl InternalServiceFactory for ConfiguredService {
|
||||
fn name(&self, token: Token) -> &str {
|
||||
&self.names[&token].0
|
||||
}
|
||||
|
||||
fn clone_factory(&self) -> Box<dyn InternalServiceFactory> {
|
||||
Box::new(Self {
|
||||
rt: self.rt.clone(),
|
||||
names: self.names.clone(),
|
||||
topics: self.topics.clone(),
|
||||
services: self.services.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
fn create(&self) -> LocalBoxFuture<'static, Result<Vec<(Token, BoxedServerService)>, ()>> {
|
||||
// configure services
|
||||
let mut rt = ServiceRuntime::new(self.topics.clone());
|
||||
self.rt.configure(&mut rt);
|
||||
rt.validate();
|
||||
let mut names = self.names.clone();
|
||||
let tokens = self.services.clone();
|
||||
|
||||
// construct services
|
||||
async move {
|
||||
let mut services = rt.services;
|
||||
// TODO: Proper error handling here
|
||||
for f in rt.onstart.into_iter() {
|
||||
f.await;
|
||||
}
|
||||
let mut res = vec![];
|
||||
for token in tokens {
|
||||
if let Some(srv) = services.remove(&token) {
|
||||
let newserv = srv.new_service(());
|
||||
match newserv.await {
|
||||
Ok(serv) => {
|
||||
res.push((token, serv));
|
||||
}
|
||||
Err(_) => {
|
||||
error!("Can not construct service");
|
||||
return Err(());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let name = names.remove(&token).unwrap().0;
|
||||
res.push((
|
||||
token,
|
||||
Box::new(StreamService::new(actix::fn_service(
|
||||
move |_: TcpStream| {
|
||||
error!("Service {:?} is not configured", name);
|
||||
ok::<_, ()>(())
|
||||
},
|
||||
))),
|
||||
));
|
||||
};
|
||||
}
|
||||
Ok(res)
|
||||
}
|
||||
.boxed_local()
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) trait ServiceRuntimeConfiguration: Send {
|
||||
fn clone(&self) -> Box<dyn ServiceRuntimeConfiguration>;
|
||||
|
||||
fn configure(&self, rt: &mut ServiceRuntime);
|
||||
}
|
||||
|
||||
impl<F> ServiceRuntimeConfiguration for F
|
||||
where
|
||||
F: Fn(&mut ServiceRuntime) + Send + Clone + 'static,
|
||||
{
|
||||
fn clone(&self) -> Box<dyn ServiceRuntimeConfiguration> {
|
||||
Box::new(self.clone())
|
||||
}
|
||||
|
||||
fn configure(&self, rt: &mut ServiceRuntime) {
|
||||
(self)(rt)
|
||||
}
|
||||
}
|
||||
|
||||
fn not_configured(_: &mut ServiceRuntime) {
|
||||
error!("Service is not configured");
|
||||
}
|
||||
|
||||
pub struct ServiceRuntime {
|
||||
names: HashMap<String, Token>,
|
||||
services: HashMap<Token, BoxedNewService>,
|
||||
onstart: Vec<LocalBoxFuture<'static, ()>>,
|
||||
}
|
||||
|
||||
impl ServiceRuntime {
|
||||
fn new(names: HashMap<String, Token>) -> Self {
|
||||
ServiceRuntime {
|
||||
names,
|
||||
services: HashMap::new(),
|
||||
onstart: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn validate(&self) {
|
||||
for (name, token) in &self.names {
|
||||
if !self.services.contains_key(&token) {
|
||||
error!("Service {:?} is not configured", name);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Register service.
|
||||
///
|
||||
/// Name of the service must be registered during configuration stage with
|
||||
/// *ServiceConfig::bind()* or *ServiceConfig::listen()* methods.
|
||||
pub fn service<T, F>(&mut self, name: &str, service: F)
|
||||
where
|
||||
F: actix::IntoServiceFactory<T>,
|
||||
T: actix::ServiceFactory<Config = (), Request = TcpStream> + 'static,
|
||||
T::Future: 'static,
|
||||
T::Service: 'static,
|
||||
T::InitError: fmt::Debug,
|
||||
{
|
||||
// let name = name.to_owned();
|
||||
if let Some(token) = self.names.get(name) {
|
||||
self.services.insert(
|
||||
*token,
|
||||
Box::new(ServiceFactory {
|
||||
inner: service.into_factory(),
|
||||
}),
|
||||
);
|
||||
} else {
|
||||
panic!("Unknown service: {:?}", name);
|
||||
}
|
||||
}
|
||||
|
||||
/// Execute future before services initialization.
|
||||
pub fn on_start<F>(&mut self, fut: F)
|
||||
where
|
||||
F: Future<Output = ()> + 'static,
|
||||
{
|
||||
self.onstart.push(fut.boxed_local())
|
||||
}
|
||||
}
|
||||
|
||||
type BoxedNewService = Box<
|
||||
dyn actix::ServiceFactory<
|
||||
Request = (Option<CounterGuard>, ServerMessage),
|
||||
Response = (),
|
||||
Error = (),
|
||||
InitError = (),
|
||||
Config = (),
|
||||
Service = BoxedServerService,
|
||||
Future = LocalBoxFuture<'static, Result<BoxedServerService, ()>>,
|
||||
>,
|
||||
>;
|
||||
|
||||
struct ServiceFactory<T> {
|
||||
inner: T,
|
||||
}
|
||||
|
||||
impl<T> actix::ServiceFactory for ServiceFactory<T>
|
||||
where
|
||||
T: actix::ServiceFactory<Config = (), Request = TcpStream>,
|
||||
T::Future: 'static,
|
||||
T::Service: 'static,
|
||||
T::Error: 'static,
|
||||
T::InitError: fmt::Debug + 'static,
|
||||
{
|
||||
type Request = (Option<CounterGuard>, ServerMessage);
|
||||
type Response = ();
|
||||
type Error = ();
|
||||
type InitError = ();
|
||||
type Config = ();
|
||||
type Service = BoxedServerService;
|
||||
type Future = LocalBoxFuture<'static, Result<BoxedServerService, ()>>;
|
||||
|
||||
fn new_service(&self, _: ()) -> Self::Future {
|
||||
let fut = self.inner.new_service(());
|
||||
async move {
|
||||
match fut.await {
|
||||
Ok(s) => Ok(Box::new(StreamService::new(s)) as BoxedServerService),
|
||||
Err(e) => {
|
||||
error!("Can not construct service: {:?}", e);
|
||||
Err(())
|
||||
}
|
||||
}
|
||||
}
|
||||
.boxed_local()
|
||||
}
|
||||
}
|
56
actix-server/src/handle.rs
Normal file
56
actix-server/src/handle.rs
Normal file
@ -0,0 +1,56 @@
|
||||
use std::future::Future;
|
||||
|
||||
use tokio::sync::{mpsc::UnboundedSender, oneshot};
|
||||
|
||||
use crate::server::ServerCommand;
|
||||
|
||||
/// Server handle.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ServerHandle {
|
||||
cmd_tx: UnboundedSender<ServerCommand>,
|
||||
}
|
||||
|
||||
impl ServerHandle {
|
||||
pub(crate) fn new(cmd_tx: UnboundedSender<ServerCommand>) -> Self {
|
||||
ServerHandle { cmd_tx }
|
||||
}
|
||||
|
||||
pub(crate) fn worker_faulted(&self, idx: usize) {
|
||||
let _ = self.cmd_tx.send(ServerCommand::WorkerFaulted(idx));
|
||||
}
|
||||
|
||||
/// Pause accepting incoming connections.
|
||||
///
|
||||
/// May drop socket pending connection. All open connections remain active.
|
||||
pub fn pause(&self) -> impl Future<Output = ()> {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
let _ = self.cmd_tx.send(ServerCommand::Pause(tx));
|
||||
async {
|
||||
let _ = rx.await;
|
||||
}
|
||||
}
|
||||
|
||||
/// Resume accepting incoming connections.
|
||||
pub fn resume(&self) -> impl Future<Output = ()> {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
let _ = self.cmd_tx.send(ServerCommand::Resume(tx));
|
||||
async {
|
||||
let _ = rx.await;
|
||||
}
|
||||
}
|
||||
|
||||
/// Stop incoming connection processing, stop all workers and exit.
|
||||
pub fn stop(&self, graceful: bool) -> impl Future<Output = ()> {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
|
||||
let _ = self.cmd_tx.send(ServerCommand::Stop {
|
||||
graceful,
|
||||
completion: Some(tx),
|
||||
force_system_stop: false,
|
||||
});
|
||||
|
||||
async {
|
||||
let _ = rx.await;
|
||||
}
|
||||
}
|
||||
}
|
78
actix-server/src/join_all.rs
Normal file
78
actix-server/src/join_all.rs
Normal file
@ -0,0 +1,78 @@
|
||||
use std::{
|
||||
future::Future,
|
||||
pin::Pin,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use futures_core::future::BoxFuture;
|
||||
|
||||
// a poor man's join future. joined future is only used when starting/stopping the server.
|
||||
// pin_project and pinned futures are overkill for this task.
|
||||
pub(crate) struct JoinAll<T> {
|
||||
fut: Vec<JoinFuture<T>>,
|
||||
}
|
||||
|
||||
pub(crate) fn join_all<T>(fut: Vec<impl Future<Output = T> + Send + 'static>) -> JoinAll<T> {
|
||||
let fut = fut
|
||||
.into_iter()
|
||||
.map(|f| JoinFuture::Future(Box::pin(f)))
|
||||
.collect();
|
||||
|
||||
JoinAll { fut }
|
||||
}
|
||||
|
||||
enum JoinFuture<T> {
|
||||
Future(BoxFuture<'static, T>),
|
||||
Result(Option<T>),
|
||||
}
|
||||
|
||||
impl<T> Unpin for JoinAll<T> {}
|
||||
|
||||
impl<T> Future for JoinAll<T> {
|
||||
type Output = Vec<T>;
|
||||
|
||||
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
let mut ready = true;
|
||||
|
||||
let this = self.get_mut();
|
||||
for fut in this.fut.iter_mut() {
|
||||
if let JoinFuture::Future(f) = fut {
|
||||
match f.as_mut().poll(cx) {
|
||||
Poll::Ready(t) => {
|
||||
*fut = JoinFuture::Result(Some(t));
|
||||
}
|
||||
Poll::Pending => ready = false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ready {
|
||||
let mut res = Vec::new();
|
||||
for fut in this.fut.iter_mut() {
|
||||
if let JoinFuture::Result(f) = fut {
|
||||
res.push(f.take().unwrap());
|
||||
}
|
||||
}
|
||||
|
||||
Poll::Ready(res)
|
||||
} else {
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use actix_utils::future::ready;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_join_all() {
|
||||
let futs = vec![ready(Ok(1)), ready(Err(3)), ready(Ok(9))];
|
||||
let mut res = join_all(futs).await.into_iter();
|
||||
assert_eq!(Ok(1), res.next().unwrap());
|
||||
assert_eq!(Err(3), res.next().unwrap());
|
||||
assert_eq!(Ok(9), res.next().unwrap());
|
||||
}
|
||||
}
|
@ -1,37 +1,34 @@
|
||||
//! General purpose TCP server.
|
||||
|
||||
#![deny(rust_2018_idioms)]
|
||||
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
|
||||
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
|
||||
|
||||
mod accept;
|
||||
mod availability;
|
||||
mod builder;
|
||||
mod config;
|
||||
mod handle;
|
||||
mod join_all;
|
||||
mod server;
|
||||
mod service;
|
||||
mod signals;
|
||||
mod socket;
|
||||
mod test_server;
|
||||
mod waker_queue;
|
||||
mod worker;
|
||||
|
||||
pub use self::builder::ServerBuilder;
|
||||
pub use self::config::{ServiceConfig, ServiceRuntime};
|
||||
pub use self::server::Server;
|
||||
pub use self::service::ServiceFactory;
|
||||
|
||||
#[doc(hidden)]
|
||||
pub use self::socket::FromStream;
|
||||
|
||||
/// Socket ID token
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
|
||||
pub(crate) struct Token(usize);
|
||||
|
||||
impl Token {
|
||||
pub(crate) fn next(&mut self) -> Token {
|
||||
let token = Token(self.0);
|
||||
self.0 += 1;
|
||||
token
|
||||
}
|
||||
}
|
||||
pub use self::{
|
||||
builder::{MpTcp, ServerBuilder},
|
||||
handle::ServerHandle,
|
||||
server::Server,
|
||||
service::ServerServiceFactory,
|
||||
test_server::TestServer,
|
||||
};
|
||||
|
||||
/// Start server building process
|
||||
#[doc(hidden)]
|
||||
#[deprecated(since = "2.0.0", note = "Use `Server::build()`.")]
|
||||
pub fn new() -> ServerBuilder {
|
||||
ServerBuilder::default()
|
||||
}
|
||||
|
@ -1,108 +1,368 @@
|
||||
use std::future::Future;
|
||||
use std::io;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
use std::{
|
||||
future::Future,
|
||||
io, mem,
|
||||
pin::Pin,
|
||||
task::{Context, Poll},
|
||||
thread,
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use futures_channel::mpsc::UnboundedSender;
|
||||
use futures_channel::oneshot;
|
||||
use futures_util::FutureExt;
|
||||
use actix_rt::{time::sleep, System};
|
||||
use futures_core::{future::BoxFuture, Stream};
|
||||
use futures_util::stream::StreamExt as _;
|
||||
use tokio::sync::{mpsc::UnboundedReceiver, oneshot};
|
||||
use tracing::{error, info};
|
||||
|
||||
use crate::builder::ServerBuilder;
|
||||
use crate::signals::Signal;
|
||||
use crate::{
|
||||
accept::Accept,
|
||||
builder::ServerBuilder,
|
||||
join_all::join_all,
|
||||
service::InternalServiceFactory,
|
||||
signals::{SignalKind, Signals},
|
||||
waker_queue::{WakerInterest, WakerQueue},
|
||||
worker::{ServerWorker, ServerWorkerConfig, WorkerHandleServer},
|
||||
ServerHandle,
|
||||
};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) enum ServerCommand {
|
||||
/// Worker failed to accept connection, indicating a probable panic.
|
||||
///
|
||||
/// Contains index of faulted worker.
|
||||
WorkerFaulted(usize),
|
||||
|
||||
/// Pause accepting connections.
|
||||
///
|
||||
/// Contains return channel to notify caller of successful state change.
|
||||
Pause(oneshot::Sender<()>),
|
||||
|
||||
/// Resume accepting connections.
|
||||
///
|
||||
/// Contains return channel to notify caller of successful state change.
|
||||
Resume(oneshot::Sender<()>),
|
||||
Signal(Signal),
|
||||
/// Whether to try and shut down gracefully
|
||||
|
||||
/// Stop accepting connections and begin shutdown procedure.
|
||||
Stop {
|
||||
/// True if shut down should be graceful.
|
||||
graceful: bool,
|
||||
|
||||
/// Return channel to notify caller that shutdown is complete.
|
||||
completion: Option<oneshot::Sender<()>>,
|
||||
|
||||
/// Force System exit when true, overriding `ServerBuilder::system_exit()` if it is false.
|
||||
force_system_stop: bool,
|
||||
},
|
||||
/// Notify of server stop
|
||||
Notify(oneshot::Sender<()>),
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Server(
|
||||
UnboundedSender<ServerCommand>,
|
||||
Option<oneshot::Receiver<()>>,
|
||||
);
|
||||
/// General purpose TCP server that runs services receiving Tokio `TcpStream`s.
|
||||
///
|
||||
/// Handles creating worker threads, restarting faulted workers, connection accepting, and
|
||||
/// back-pressure logic.
|
||||
///
|
||||
/// Creates a worker per CPU core (or the number specified in [`ServerBuilder::workers`]) and
|
||||
/// distributes connections with a round-robin strategy.
|
||||
///
|
||||
/// The [Server] must be awaited or polled in order to start running. It will resolve when the
|
||||
/// server has fully shut down.
|
||||
///
|
||||
/// # Shutdown Signals
|
||||
/// On UNIX systems, `SIGTERM` will start a graceful shutdown and `SIGQUIT` or `SIGINT` will start a
|
||||
/// forced shutdown. On Windows, a Ctrl-C signal will start a forced shutdown.
|
||||
///
|
||||
/// A graceful shutdown will wait for all workers to stop first.
|
||||
///
|
||||
/// # Examples
|
||||
/// The following is a TCP echo server. Test using `telnet 127.0.0.1 8080`.
|
||||
///
|
||||
/// ```no_run
|
||||
/// use std::io;
|
||||
///
|
||||
/// use actix_rt::net::TcpStream;
|
||||
/// use actix_server::Server;
|
||||
/// use actix_service::{fn_service, ServiceFactoryExt as _};
|
||||
/// use bytes::BytesMut;
|
||||
/// use tokio::io::{AsyncReadExt as _, AsyncWriteExt as _};
|
||||
///
|
||||
/// #[actix_rt::main]
|
||||
/// async fn main() -> io::Result<()> {
|
||||
/// let bind_addr = ("127.0.0.1", 8080);
|
||||
///
|
||||
/// Server::build()
|
||||
/// .bind("echo", bind_addr, move || {
|
||||
/// fn_service(move |mut stream: TcpStream| {
|
||||
/// async move {
|
||||
/// let mut size = 0;
|
||||
/// let mut buf = BytesMut::new();
|
||||
///
|
||||
/// loop {
|
||||
/// match stream.read_buf(&mut buf).await {
|
||||
/// // end of stream; bail from loop
|
||||
/// Ok(0) => break,
|
||||
///
|
||||
/// // write bytes back to stream
|
||||
/// Ok(bytes_read) => {
|
||||
/// stream.write_all(&buf[size..]).await.unwrap();
|
||||
/// size += bytes_read;
|
||||
/// }
|
||||
///
|
||||
/// Err(err) => {
|
||||
/// eprintln!("Stream Error: {:?}", err);
|
||||
/// return Err(());
|
||||
/// }
|
||||
/// }
|
||||
/// }
|
||||
///
|
||||
/// Ok(())
|
||||
/// }
|
||||
/// })
|
||||
/// .map_err(|err| eprintln!("Service Error: {:?}", err))
|
||||
/// })?
|
||||
/// .run()
|
||||
/// .await
|
||||
/// }
|
||||
/// ```
|
||||
#[must_use = "Server does nothing unless you `.await` or poll it"]
|
||||
pub struct Server {
|
||||
handle: ServerHandle,
|
||||
fut: BoxFuture<'static, io::Result<()>>,
|
||||
}
|
||||
|
||||
impl Server {
|
||||
pub(crate) fn new(tx: UnboundedSender<ServerCommand>) -> Self {
|
||||
Server(tx, None)
|
||||
}
|
||||
|
||||
/// Start server building process
|
||||
/// Create server build.
|
||||
pub fn build() -> ServerBuilder {
|
||||
ServerBuilder::default()
|
||||
}
|
||||
|
||||
pub(crate) fn signal(&self, sig: Signal) {
|
||||
let _ = self.0.unbounded_send(ServerCommand::Signal(sig));
|
||||
pub(crate) fn new(builder: ServerBuilder) -> Self {
|
||||
Server {
|
||||
handle: ServerHandle::new(builder.cmd_tx.clone()),
|
||||
fut: Box::pin(ServerInner::run(builder)),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn worker_faulted(&self, idx: usize) {
|
||||
let _ = self.0.unbounded_send(ServerCommand::WorkerFaulted(idx));
|
||||
}
|
||||
|
||||
/// Pause accepting incoming connections
|
||||
/// Get a `Server` handle that can be used issue commands and change it's state.
|
||||
///
|
||||
/// If socket contains some pending connection, they might be dropped.
|
||||
/// All opened connection remains active.
|
||||
pub fn pause(&self) -> impl Future<Output = ()> {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
let _ = self.0.unbounded_send(ServerCommand::Pause(tx));
|
||||
rx.map(|_| ())
|
||||
}
|
||||
|
||||
/// Resume accepting incoming connections
|
||||
pub fn resume(&self) -> impl Future<Output = ()> {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
let _ = self.0.unbounded_send(ServerCommand::Resume(tx));
|
||||
rx.map(|_| ())
|
||||
}
|
||||
|
||||
/// Stop incoming connection processing, stop all workers and exit.
|
||||
///
|
||||
/// If server starts with `spawn()` method, then spawned thread get terminated.
|
||||
pub fn stop(&self, graceful: bool) -> impl Future<Output = ()> {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
let _ = self.0.unbounded_send(ServerCommand::Stop {
|
||||
graceful,
|
||||
completion: Some(tx),
|
||||
});
|
||||
rx.map(|_| ())
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for Server {
|
||||
fn clone(&self) -> Self {
|
||||
Self(self.0.clone(), None)
|
||||
/// See [ServerHandle](ServerHandle) for usage.
|
||||
pub fn handle(&self) -> ServerHandle {
|
||||
self.handle.clone()
|
||||
}
|
||||
}
|
||||
|
||||
impl Future for Server {
|
||||
type Output = io::Result<()>;
|
||||
|
||||
#[inline]
|
||||
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
let this = self.get_mut();
|
||||
Pin::new(&mut Pin::into_inner(self).fut).poll(cx)
|
||||
}
|
||||
}
|
||||
|
||||
if this.1.is_none() {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
if this.0.unbounded_send(ServerCommand::Notify(tx)).is_err() {
|
||||
return Poll::Ready(Ok(()));
|
||||
pub struct ServerInner {
|
||||
worker_handles: Vec<WorkerHandleServer>,
|
||||
accept_handle: Option<thread::JoinHandle<()>>,
|
||||
worker_config: ServerWorkerConfig,
|
||||
services: Vec<Box<dyn InternalServiceFactory>>,
|
||||
waker_queue: WakerQueue,
|
||||
system_stop: bool,
|
||||
stopping: bool,
|
||||
}
|
||||
|
||||
impl ServerInner {
|
||||
async fn run(builder: ServerBuilder) -> io::Result<()> {
|
||||
let (mut this, mut mux) = Self::run_sync(builder)?;
|
||||
|
||||
while let Some(cmd) = mux.next().await {
|
||||
this.handle_cmd(cmd).await;
|
||||
|
||||
if this.stopping {
|
||||
break;
|
||||
}
|
||||
this.1 = Some(rx);
|
||||
}
|
||||
|
||||
match Pin::new(this.1.as_mut().unwrap()).poll(cx) {
|
||||
Poll::Pending => Poll::Pending,
|
||||
Poll::Ready(Ok(_)) => Poll::Ready(Ok(())),
|
||||
Poll::Ready(Err(_)) => Poll::Ready(Ok(())),
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn run_sync(mut builder: ServerBuilder) -> io::Result<(Self, ServerEventMultiplexer)> {
|
||||
// Give log information on what runtime will be used.
|
||||
let is_actix = actix_rt::System::try_current().is_some();
|
||||
let is_tokio = tokio::runtime::Handle::try_current().is_ok();
|
||||
|
||||
match (is_actix, is_tokio) {
|
||||
(true, _) => info!("Actix runtime found; starting in Actix runtime"),
|
||||
(_, true) => info!("Tokio runtime found; starting in existing Tokio runtime"),
|
||||
(_, false) => panic!("Actix or Tokio runtime not found; halting"),
|
||||
}
|
||||
|
||||
for (_, name, lst) in &builder.sockets {
|
||||
info!(
|
||||
r#"starting service: "{}", workers: {}, listening on: {}"#,
|
||||
name,
|
||||
builder.threads,
|
||||
lst.local_addr()
|
||||
);
|
||||
}
|
||||
|
||||
let sockets = mem::take(&mut builder.sockets)
|
||||
.into_iter()
|
||||
.map(|t| (t.0, t.2))
|
||||
.collect();
|
||||
|
||||
let (waker_queue, worker_handles, accept_handle) = Accept::start(sockets, &builder)?;
|
||||
|
||||
let mux = ServerEventMultiplexer {
|
||||
signal_fut: (builder.listen_os_signals).then(Signals::new),
|
||||
cmd_rx: builder.cmd_rx,
|
||||
};
|
||||
|
||||
let server = ServerInner {
|
||||
waker_queue,
|
||||
accept_handle: Some(accept_handle),
|
||||
worker_handles,
|
||||
worker_config: builder.worker_config,
|
||||
services: builder.factories,
|
||||
system_stop: builder.exit,
|
||||
stopping: false,
|
||||
};
|
||||
|
||||
Ok((server, mux))
|
||||
}
|
||||
|
||||
async fn handle_cmd(&mut self, item: ServerCommand) {
|
||||
match item {
|
||||
ServerCommand::Pause(tx) => {
|
||||
self.waker_queue.wake(WakerInterest::Pause);
|
||||
let _ = tx.send(());
|
||||
}
|
||||
|
||||
ServerCommand::Resume(tx) => {
|
||||
self.waker_queue.wake(WakerInterest::Resume);
|
||||
let _ = tx.send(());
|
||||
}
|
||||
|
||||
ServerCommand::Stop {
|
||||
graceful,
|
||||
completion,
|
||||
force_system_stop,
|
||||
} => {
|
||||
self.stopping = true;
|
||||
|
||||
// Signal accept thread to stop.
|
||||
// Signal is non-blocking; we wait for thread to stop later.
|
||||
self.waker_queue.wake(WakerInterest::Stop);
|
||||
|
||||
// send stop signal to workers
|
||||
let workers_stop = self
|
||||
.worker_handles
|
||||
.iter()
|
||||
.map(|worker| worker.stop(graceful))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
if graceful {
|
||||
// wait for all workers to shut down
|
||||
let _ = join_all(workers_stop).await;
|
||||
}
|
||||
|
||||
// wait for accept thread stop
|
||||
self.accept_handle
|
||||
.take()
|
||||
.unwrap()
|
||||
.join()
|
||||
.expect("Accept thread must not panic in any case");
|
||||
|
||||
if let Some(tx) = completion {
|
||||
let _ = tx.send(());
|
||||
}
|
||||
|
||||
if self.system_stop || force_system_stop {
|
||||
sleep(Duration::from_millis(300)).await;
|
||||
System::try_current().as_ref().map(System::stop);
|
||||
}
|
||||
}
|
||||
|
||||
ServerCommand::WorkerFaulted(idx) => {
|
||||
// TODO: maybe just return with warning log if not found ?
|
||||
assert!(self.worker_handles.iter().any(|wrk| wrk.idx == idx));
|
||||
|
||||
error!("worker {} has died; restarting", idx);
|
||||
|
||||
let factories = self
|
||||
.services
|
||||
.iter()
|
||||
.map(|service| service.clone_factory())
|
||||
.collect();
|
||||
|
||||
match ServerWorker::start(
|
||||
idx,
|
||||
factories,
|
||||
self.waker_queue.clone(),
|
||||
self.worker_config,
|
||||
) {
|
||||
Ok((handle_accept, handle_server)) => {
|
||||
*self
|
||||
.worker_handles
|
||||
.iter_mut()
|
||||
.find(|wrk| wrk.idx == idx)
|
||||
.unwrap() = handle_server;
|
||||
|
||||
self.waker_queue.wake(WakerInterest::Worker(handle_accept));
|
||||
}
|
||||
|
||||
Err(err) => error!("can not restart worker {}: {}", idx, err),
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn map_signal(signal: SignalKind) -> ServerCommand {
|
||||
match signal {
|
||||
SignalKind::Int => {
|
||||
info!("SIGINT received; starting forced shutdown");
|
||||
ServerCommand::Stop {
|
||||
graceful: false,
|
||||
completion: None,
|
||||
force_system_stop: true,
|
||||
}
|
||||
}
|
||||
|
||||
SignalKind::Term => {
|
||||
info!("SIGTERM received; starting graceful shutdown");
|
||||
ServerCommand::Stop {
|
||||
graceful: true,
|
||||
completion: None,
|
||||
force_system_stop: true,
|
||||
}
|
||||
}
|
||||
|
||||
SignalKind::Quit => {
|
||||
info!("SIGQUIT received; starting forced shutdown");
|
||||
ServerCommand::Stop {
|
||||
graceful: false,
|
||||
completion: None,
|
||||
force_system_stop: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct ServerEventMultiplexer {
|
||||
cmd_rx: UnboundedReceiver<ServerCommand>,
|
||||
signal_fut: Option<Signals>,
|
||||
}
|
||||
|
||||
impl Stream for ServerEventMultiplexer {
|
||||
type Item = ServerCommand;
|
||||
|
||||
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||
let this = Pin::into_inner(self);
|
||||
|
||||
if let Some(signal_fut) = &mut this.signal_fut {
|
||||
if let Poll::Ready(signal) = Pin::new(signal_fut).poll(cx) {
|
||||
this.signal_fut = None;
|
||||
return Poll::Ready(Some(ServerInner::map_signal(signal)));
|
||||
}
|
||||
}
|
||||
|
||||
this.cmd_rx.poll_recv(cx)
|
||||
}
|
||||
}
|
||||
|
@ -1,118 +1,106 @@
|
||||
use std::marker::PhantomData;
|
||||
use std::net::SocketAddr;
|
||||
use std::task::{Context, Poll};
|
||||
use std::time::Duration;
|
||||
use std::{
|
||||
marker::PhantomData,
|
||||
net::SocketAddr,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use actix_rt::spawn;
|
||||
use actix_service::{self as actix, Service, ServiceFactory as ActixServiceFactory};
|
||||
use actix_utils::counter::CounterGuard;
|
||||
use futures_util::future::{err, ok, LocalBoxFuture, Ready};
|
||||
use futures_util::{FutureExt, TryFutureExt};
|
||||
use log::error;
|
||||
use actix_service::{Service, ServiceFactory as BaseServiceFactory};
|
||||
use actix_utils::future::{ready, Ready};
|
||||
use futures_core::future::LocalBoxFuture;
|
||||
use tracing::error;
|
||||
|
||||
use super::Token;
|
||||
use crate::socket::{FromStream, StdStream};
|
||||
use crate::{
|
||||
socket::{FromStream, MioStream},
|
||||
worker::WorkerCounterGuard,
|
||||
};
|
||||
|
||||
/// Server message
|
||||
pub(crate) enum ServerMessage {
|
||||
/// New stream
|
||||
Connect(StdStream),
|
||||
|
||||
/// Gracefully shutdown
|
||||
Shutdown(Duration),
|
||||
|
||||
/// Force shutdown
|
||||
ForceShutdown,
|
||||
}
|
||||
|
||||
pub trait ServiceFactory<Stream: FromStream>: Send + Clone + 'static {
|
||||
type Factory: actix::ServiceFactory<Config = (), Request = Stream>;
|
||||
#[doc(hidden)]
|
||||
pub trait ServerServiceFactory<Stream: FromStream>: Send + Clone + 'static {
|
||||
type Factory: BaseServiceFactory<Stream, Config = ()>;
|
||||
|
||||
fn create(&self) -> Self::Factory;
|
||||
}
|
||||
|
||||
pub(crate) trait InternalServiceFactory: Send {
|
||||
fn name(&self, token: Token) -> &str;
|
||||
fn name(&self, token: usize) -> &str;
|
||||
|
||||
fn clone_factory(&self) -> Box<dyn InternalServiceFactory>;
|
||||
|
||||
fn create(&self) -> LocalBoxFuture<'static, Result<Vec<(Token, BoxedServerService)>, ()>>;
|
||||
fn create(&self) -> LocalBoxFuture<'static, Result<(usize, BoxedServerService), ()>>;
|
||||
}
|
||||
|
||||
pub(crate) type BoxedServerService = Box<
|
||||
dyn Service<
|
||||
Request = (Option<CounterGuard>, ServerMessage),
|
||||
(WorkerCounterGuard, MioStream),
|
||||
Response = (),
|
||||
Error = (),
|
||||
Future = Ready<Result<(), ()>>,
|
||||
>,
|
||||
>;
|
||||
|
||||
pub(crate) struct StreamService<T> {
|
||||
service: T,
|
||||
pub(crate) struct StreamService<S, I> {
|
||||
service: S,
|
||||
_phantom: PhantomData<I>,
|
||||
}
|
||||
|
||||
impl<T> StreamService<T> {
|
||||
pub(crate) fn new(service: T) -> Self {
|
||||
StreamService { service }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, I> Service for StreamService<T>
|
||||
where
|
||||
T: Service<Request = I>,
|
||||
T::Future: 'static,
|
||||
T::Error: 'static,
|
||||
I: FromStream,
|
||||
{
|
||||
type Request = (Option<CounterGuard>, ServerMessage);
|
||||
type Response = ();
|
||||
type Error = ();
|
||||
type Future = Ready<Result<(), ()>>;
|
||||
|
||||
fn poll_ready(&mut self, ctx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
self.service.poll_ready(ctx).map_err(|_| ())
|
||||
}
|
||||
|
||||
fn call(&mut self, (guard, req): (Option<CounterGuard>, ServerMessage)) -> Self::Future {
|
||||
match req {
|
||||
ServerMessage::Connect(stream) => {
|
||||
let stream = FromStream::from_stdstream(stream).map_err(|e| {
|
||||
error!("Can not convert to an async tcp stream: {}", e);
|
||||
});
|
||||
|
||||
if let Ok(stream) = stream {
|
||||
let f = self.service.call(stream);
|
||||
spawn(async move {
|
||||
let _ = f.await;
|
||||
drop(guard);
|
||||
});
|
||||
ok(())
|
||||
} else {
|
||||
err(())
|
||||
}
|
||||
}
|
||||
_ => ok(()),
|
||||
impl<S, I> StreamService<S, I> {
|
||||
pub(crate) fn new(service: S) -> Self {
|
||||
StreamService {
|
||||
service,
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct StreamNewService<F: ServiceFactory<Io>, Io: FromStream> {
|
||||
impl<S, I> Service<(WorkerCounterGuard, MioStream)> for StreamService<S, I>
|
||||
where
|
||||
S: Service<I>,
|
||||
S::Future: 'static,
|
||||
S::Error: 'static,
|
||||
I: FromStream,
|
||||
{
|
||||
type Response = ();
|
||||
type Error = ();
|
||||
type Future = Ready<Result<(), ()>>;
|
||||
|
||||
fn poll_ready(&self, ctx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
self.service.poll_ready(ctx).map_err(|_| ())
|
||||
}
|
||||
|
||||
fn call(&self, (guard, req): (WorkerCounterGuard, MioStream)) -> Self::Future {
|
||||
ready(match FromStream::from_mio(req) {
|
||||
Ok(stream) => {
|
||||
let f = self.service.call(stream);
|
||||
actix_rt::spawn(async move {
|
||||
let _ = f.await;
|
||||
drop(guard);
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
Err(err) => {
|
||||
error!("can not convert to an async TCP stream: {err}");
|
||||
Err(())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct StreamNewService<F: ServerServiceFactory<Io>, Io: FromStream> {
|
||||
name: String,
|
||||
inner: F,
|
||||
token: Token,
|
||||
token: usize,
|
||||
addr: SocketAddr,
|
||||
_t: PhantomData<Io>,
|
||||
}
|
||||
|
||||
impl<F, Io> StreamNewService<F, Io>
|
||||
where
|
||||
F: ServiceFactory<Io>,
|
||||
F: ServerServiceFactory<Io>,
|
||||
Io: FromStream + Send + 'static,
|
||||
{
|
||||
pub(crate) fn create(
|
||||
name: String,
|
||||
token: Token,
|
||||
token: usize,
|
||||
inner: F,
|
||||
addr: SocketAddr,
|
||||
) -> Box<dyn InternalServiceFactory> {
|
||||
@ -128,10 +116,10 @@ where
|
||||
|
||||
impl<F, Io> InternalServiceFactory for StreamNewService<F, Io>
|
||||
where
|
||||
F: ServiceFactory<Io>,
|
||||
F: ServerServiceFactory<Io>,
|
||||
Io: FromStream + Send + 'static,
|
||||
{
|
||||
fn name(&self, _: Token) -> &str {
|
||||
fn name(&self, _: usize) -> &str {
|
||||
&self.name
|
||||
}
|
||||
|
||||
@ -145,38 +133,25 @@ where
|
||||
})
|
||||
}
|
||||
|
||||
fn create(&self) -> LocalBoxFuture<'static, Result<Vec<(Token, BoxedServerService)>, ()>> {
|
||||
fn create(&self) -> LocalBoxFuture<'static, Result<(usize, BoxedServerService), ()>> {
|
||||
let token = self.token;
|
||||
self.inner
|
||||
.create()
|
||||
.new_service(())
|
||||
.map_err(|_| ())
|
||||
.map_ok(move |inner| {
|
||||
let service: BoxedServerService = Box::new(StreamService::new(inner));
|
||||
vec![(token, service)]
|
||||
})
|
||||
.boxed_local()
|
||||
let fut = self.inner.create().new_service(());
|
||||
Box::pin(async move {
|
||||
match fut.await {
|
||||
Ok(inner) => {
|
||||
let service = Box::new(StreamService::new(inner)) as _;
|
||||
Ok((token, service))
|
||||
}
|
||||
Err(_) => Err(()),
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl InternalServiceFactory for Box<dyn InternalServiceFactory> {
|
||||
fn name(&self, token: Token) -> &str {
|
||||
self.as_ref().name(token)
|
||||
}
|
||||
|
||||
fn clone_factory(&self) -> Box<dyn InternalServiceFactory> {
|
||||
self.as_ref().clone_factory()
|
||||
}
|
||||
|
||||
fn create(&self) -> LocalBoxFuture<'static, Result<Vec<(Token, BoxedServerService)>, ()>> {
|
||||
self.as_ref().create()
|
||||
}
|
||||
}
|
||||
|
||||
impl<F, T, I> ServiceFactory<I> for F
|
||||
impl<F, T, I> ServerServiceFactory<I> for F
|
||||
where
|
||||
F: Fn() -> T + Send + Clone + 'static,
|
||||
T: actix::ServiceFactory<Config = (), Request = I>,
|
||||
T: BaseServiceFactory<I, Config = ()>,
|
||||
I: FromStream,
|
||||
{
|
||||
type Factory = T;
|
||||
|
@ -1,102 +1,107 @@
|
||||
use std::future::Future;
|
||||
use std::io;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
use std::{
|
||||
fmt,
|
||||
future::Future,
|
||||
pin::Pin,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use futures_util::future::lazy;
|
||||
use tracing::trace;
|
||||
|
||||
use crate::server::Server;
|
||||
|
||||
/// Different types of process signals
|
||||
#[allow(dead_code)]
|
||||
#[derive(PartialEq, Clone, Copy, Debug)]
|
||||
pub(crate) enum Signal {
|
||||
/// SIGHUP
|
||||
Hup,
|
||||
/// SIGINT
|
||||
/// Types of process signals.
|
||||
// #[allow(dead_code)]
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
#[allow(dead_code)] // variants are never constructed on non-unix
|
||||
pub(crate) enum SignalKind {
|
||||
/// `SIGINT`
|
||||
Int,
|
||||
/// SIGTERM
|
||||
|
||||
/// `SIGTERM`
|
||||
Term,
|
||||
/// SIGQUIT
|
||||
|
||||
/// `SIGQUIT`
|
||||
Quit,
|
||||
}
|
||||
|
||||
impl fmt::Display for SignalKind {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.write_str(match self {
|
||||
SignalKind::Int => "SIGINT",
|
||||
SignalKind::Term => "SIGTERM",
|
||||
SignalKind::Quit => "SIGQUIT",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Process signal listener.
|
||||
pub(crate) struct Signals {
|
||||
srv: Server,
|
||||
#[cfg(not(unix))]
|
||||
stream: Pin<Box<dyn Future<Output = io::Result<()>>>>,
|
||||
signals: futures_core::future::BoxFuture<'static, std::io::Result<()>>,
|
||||
|
||||
#[cfg(unix)]
|
||||
streams: Vec<(Signal, actix_rt::signal::unix::Signal)>,
|
||||
signals: Vec<(SignalKind, actix_rt::signal::unix::Signal)>,
|
||||
}
|
||||
|
||||
impl Signals {
|
||||
pub(crate) fn start(srv: Server) -> io::Result<()> {
|
||||
actix_rt::spawn(lazy(|_| {
|
||||
#[cfg(not(unix))]
|
||||
{
|
||||
actix_rt::spawn(Signals {
|
||||
srv,
|
||||
stream: Box::pin(actix_rt::signal::ctrl_c()),
|
||||
});
|
||||
/// Constructs an OS signal listening future.
|
||||
pub(crate) fn new() -> Self {
|
||||
trace!("setting up OS signal listener");
|
||||
|
||||
#[cfg(not(unix))]
|
||||
{
|
||||
Signals {
|
||||
signals: Box::pin(actix_rt::signal::ctrl_c()),
|
||||
}
|
||||
#[cfg(unix)]
|
||||
{
|
||||
use actix_rt::signal::unix;
|
||||
}
|
||||
|
||||
let mut streams = Vec::new();
|
||||
#[cfg(unix)]
|
||||
{
|
||||
use actix_rt::signal::unix;
|
||||
|
||||
let sig_map = [
|
||||
(unix::SignalKind::interrupt(), Signal::Int),
|
||||
(unix::SignalKind::hangup(), Signal::Hup),
|
||||
(unix::SignalKind::terminate(), Signal::Term),
|
||||
(unix::SignalKind::quit(), Signal::Quit),
|
||||
];
|
||||
let sig_map = [
|
||||
(unix::SignalKind::interrupt(), SignalKind::Int),
|
||||
(unix::SignalKind::terminate(), SignalKind::Term),
|
||||
(unix::SignalKind::quit(), SignalKind::Quit),
|
||||
];
|
||||
|
||||
for (kind, sig) in sig_map.iter() {
|
||||
match unix::signal(*kind) {
|
||||
Ok(stream) => streams.push((*sig, stream)),
|
||||
Err(e) => log::error!(
|
||||
"Can not initialize stream handler for {:?} err: {}",
|
||||
sig,
|
||||
e
|
||||
),
|
||||
}
|
||||
}
|
||||
let signals = sig_map
|
||||
.iter()
|
||||
.filter_map(|(kind, sig)| {
|
||||
unix::signal(*kind)
|
||||
.map(|tokio_sig| (*sig, tokio_sig))
|
||||
.map_err(|e| {
|
||||
tracing::error!(
|
||||
"can not initialize stream handler for {:?} err: {}",
|
||||
sig,
|
||||
e
|
||||
)
|
||||
})
|
||||
.ok()
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
actix_rt::spawn(Signals { srv, streams })
|
||||
}
|
||||
}));
|
||||
|
||||
Ok(())
|
||||
Signals { signals }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Future for Signals {
|
||||
type Output = ();
|
||||
type Output = SignalKind;
|
||||
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
#[cfg(not(unix))]
|
||||
match Pin::new(&mut self.stream).poll(cx) {
|
||||
Poll::Ready(_) => {
|
||||
self.srv.signal(Signal::Int);
|
||||
Poll::Ready(())
|
||||
}
|
||||
Poll::Pending => return Poll::Pending,
|
||||
{
|
||||
self.signals.as_mut().poll(cx).map(|_| SignalKind::Int)
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
{
|
||||
for idx in 0..self.streams.len() {
|
||||
loop {
|
||||
match self.streams[idx].1.poll_recv(cx) {
|
||||
Poll::Ready(None) => return Poll::Ready(()),
|
||||
Poll::Pending => break,
|
||||
Poll::Ready(Some(_)) => {
|
||||
let sig = self.streams[idx].0;
|
||||
self.srv.signal(sig);
|
||||
}
|
||||
}
|
||||
for (sig, fut) in self.signals.iter_mut() {
|
||||
if fut.poll_recv(cx).is_ready() {
|
||||
trace!("{} received", sig);
|
||||
return Poll::Ready(*sig);
|
||||
}
|
||||
}
|
||||
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
|
@ -1,135 +1,81 @@
|
||||
use std::{fmt, io, net};
|
||||
pub(crate) use std::net::{
|
||||
SocketAddr as StdSocketAddr, TcpListener as StdTcpListener, ToSocketAddrs,
|
||||
};
|
||||
use std::{fmt, io};
|
||||
|
||||
use actix_codec::{AsyncRead, AsyncWrite};
|
||||
use actix_rt::net::TcpStream;
|
||||
pub(crate) use mio::net::TcpListener as MioTcpListener;
|
||||
use mio::{event::Source, Interest, Registry, Token};
|
||||
#[cfg(unix)]
|
||||
pub(crate) use {
|
||||
mio::net::UnixListener as MioUnixListener, std::os::unix::net::UnixListener as StdUnixListener,
|
||||
};
|
||||
|
||||
pub(crate) enum StdListener {
|
||||
Tcp(net::TcpListener),
|
||||
#[cfg(all(unix))]
|
||||
Uds(std::os::unix::net::UnixListener),
|
||||
use crate::builder::MpTcp;
|
||||
|
||||
pub(crate) enum MioListener {
|
||||
Tcp(MioTcpListener),
|
||||
#[cfg(unix)]
|
||||
Uds(MioUnixListener),
|
||||
}
|
||||
|
||||
pub(crate) enum SocketAddr {
|
||||
Tcp(net::SocketAddr),
|
||||
#[cfg(all(unix))]
|
||||
Uds(std::os::unix::net::SocketAddr),
|
||||
}
|
||||
|
||||
impl fmt::Display for SocketAddr {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match *self {
|
||||
SocketAddr::Tcp(ref addr) => write!(f, "{}", addr),
|
||||
#[cfg(all(unix))]
|
||||
SocketAddr::Uds(ref addr) => write!(f, "{:?}", addr),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for SocketAddr {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match *self {
|
||||
SocketAddr::Tcp(ref addr) => write!(f, "{:?}", addr),
|
||||
#[cfg(all(unix))]
|
||||
SocketAddr::Uds(ref addr) => write!(f, "{:?}", addr),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for StdListener {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match *self {
|
||||
StdListener::Tcp(ref lst) => write!(f, "{}", lst.local_addr().ok().unwrap()),
|
||||
#[cfg(all(unix))]
|
||||
StdListener::Uds(ref lst) => write!(f, "{:?}", lst.local_addr().ok().unwrap()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl StdListener {
|
||||
impl MioListener {
|
||||
pub(crate) fn local_addr(&self) -> SocketAddr {
|
||||
match self {
|
||||
StdListener::Tcp(lst) => SocketAddr::Tcp(lst.local_addr().unwrap()),
|
||||
#[cfg(all(unix))]
|
||||
StdListener::Uds(lst) => SocketAddr::Uds(lst.local_addr().unwrap()),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn into_listener(self) -> SocketListener {
|
||||
match self {
|
||||
StdListener::Tcp(lst) => SocketListener::Tcp(
|
||||
mio::net::TcpListener::from_std(lst)
|
||||
.expect("Can not create mio::net::TcpListener"),
|
||||
),
|
||||
#[cfg(all(unix))]
|
||||
StdListener::Uds(lst) => SocketListener::Uds(
|
||||
mio_uds::UnixListener::from_listener(lst)
|
||||
.expect("Can not create mio_uds::UnixListener"),
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum StdStream {
|
||||
Tcp(std::net::TcpStream),
|
||||
#[cfg(all(unix))]
|
||||
Uds(std::os::unix::net::UnixStream),
|
||||
}
|
||||
|
||||
pub(crate) enum SocketListener {
|
||||
Tcp(mio::net::TcpListener),
|
||||
#[cfg(all(unix))]
|
||||
Uds(mio_uds::UnixListener),
|
||||
}
|
||||
|
||||
impl SocketListener {
|
||||
pub(crate) fn accept(&self) -> io::Result<Option<(StdStream, SocketAddr)>> {
|
||||
match *self {
|
||||
SocketListener::Tcp(ref lst) => lst
|
||||
.accept_std()
|
||||
.map(|(stream, addr)| Some((StdStream::Tcp(stream), SocketAddr::Tcp(addr)))),
|
||||
#[cfg(all(unix))]
|
||||
SocketListener::Uds(ref lst) => lst.accept_std().map(|res| {
|
||||
res.map(|(stream, addr)| (StdStream::Uds(stream), SocketAddr::Uds(addr)))
|
||||
}),
|
||||
MioListener::Tcp(ref lst) => lst
|
||||
.local_addr()
|
||||
.map(SocketAddr::Tcp)
|
||||
.unwrap_or(SocketAddr::Unknown),
|
||||
#[cfg(unix)]
|
||||
MioListener::Uds(ref lst) => lst
|
||||
.local_addr()
|
||||
.map(SocketAddr::Uds)
|
||||
.unwrap_or(SocketAddr::Unknown),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn accept(&self) -> io::Result<MioStream> {
|
||||
match *self {
|
||||
MioListener::Tcp(ref lst) => lst.accept().map(|(stream, _)| MioStream::Tcp(stream)),
|
||||
#[cfg(unix)]
|
||||
MioListener::Uds(ref lst) => lst.accept().map(|(stream, _)| MioStream::Uds(stream)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl mio::Evented for SocketListener {
|
||||
impl Source for MioListener {
|
||||
fn register(
|
||||
&self,
|
||||
poll: &mio::Poll,
|
||||
token: mio::Token,
|
||||
interest: mio::Ready,
|
||||
opts: mio::PollOpt,
|
||||
&mut self,
|
||||
registry: &Registry,
|
||||
token: Token,
|
||||
interests: Interest,
|
||||
) -> io::Result<()> {
|
||||
match *self {
|
||||
SocketListener::Tcp(ref lst) => lst.register(poll, token, interest, opts),
|
||||
#[cfg(all(unix))]
|
||||
SocketListener::Uds(ref lst) => lst.register(poll, token, interest, opts),
|
||||
MioListener::Tcp(ref mut lst) => lst.register(registry, token, interests),
|
||||
#[cfg(unix)]
|
||||
MioListener::Uds(ref mut lst) => lst.register(registry, token, interests),
|
||||
}
|
||||
}
|
||||
|
||||
fn reregister(
|
||||
&self,
|
||||
poll: &mio::Poll,
|
||||
token: mio::Token,
|
||||
interest: mio::Ready,
|
||||
opts: mio::PollOpt,
|
||||
&mut self,
|
||||
registry: &Registry,
|
||||
token: Token,
|
||||
interests: Interest,
|
||||
) -> io::Result<()> {
|
||||
match *self {
|
||||
SocketListener::Tcp(ref lst) => lst.reregister(poll, token, interest, opts),
|
||||
#[cfg(all(unix))]
|
||||
SocketListener::Uds(ref lst) => lst.reregister(poll, token, interest, opts),
|
||||
MioListener::Tcp(ref mut lst) => lst.reregister(registry, token, interests),
|
||||
#[cfg(unix)]
|
||||
MioListener::Uds(ref mut lst) => lst.reregister(registry, token, interests),
|
||||
}
|
||||
}
|
||||
fn deregister(&self, poll: &mio::Poll) -> io::Result<()> {
|
||||
|
||||
fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
|
||||
match *self {
|
||||
SocketListener::Tcp(ref lst) => lst.deregister(poll),
|
||||
#[cfg(all(unix))]
|
||||
SocketListener::Uds(ref lst) => {
|
||||
let res = lst.deregister(poll);
|
||||
MioListener::Tcp(ref mut lst) => lst.deregister(registry),
|
||||
#[cfg(unix)]
|
||||
MioListener::Uds(ref mut lst) => {
|
||||
let res = lst.deregister(registry);
|
||||
|
||||
// cleanup file path
|
||||
if let Ok(addr) = lst.local_addr() {
|
||||
@ -143,28 +89,206 @@ impl mio::Evented for SocketListener {
|
||||
}
|
||||
}
|
||||
|
||||
pub trait FromStream: AsyncRead + AsyncWrite + Sized {
|
||||
fn from_stdstream(sock: StdStream) -> io::Result<Self>;
|
||||
impl From<StdTcpListener> for MioListener {
|
||||
fn from(lst: StdTcpListener) -> Self {
|
||||
MioListener::Tcp(MioTcpListener::from_std(lst))
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStream for TcpStream {
|
||||
fn from_stdstream(sock: StdStream) -> io::Result<Self> {
|
||||
match sock {
|
||||
StdStream::Tcp(stream) => TcpStream::from_std(stream),
|
||||
#[cfg(all(unix))]
|
||||
StdStream::Uds(_) => {
|
||||
panic!("Should not happen, bug in server impl");
|
||||
#[cfg(unix)]
|
||||
impl From<StdUnixListener> for MioListener {
|
||||
fn from(lst: StdUnixListener) -> Self {
|
||||
MioListener::Uds(MioUnixListener::from_std(lst))
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for MioListener {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match *self {
|
||||
MioListener::Tcp(ref lst) => write!(f, "{:?}", lst),
|
||||
#[cfg(unix)]
|
||||
MioListener::Uds(ref lst) => write!(f, "{:?}", lst),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for MioListener {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match *self {
|
||||
MioListener::Tcp(ref lst) => write!(f, "{:?}", lst),
|
||||
#[cfg(unix)]
|
||||
MioListener::Uds(ref lst) => write!(f, "{:?}", lst),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) enum SocketAddr {
|
||||
Unknown,
|
||||
Tcp(StdSocketAddr),
|
||||
#[cfg(unix)]
|
||||
Uds(std::os::unix::net::SocketAddr),
|
||||
}
|
||||
|
||||
impl fmt::Display for SocketAddr {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match *self {
|
||||
Self::Unknown => write!(f, "Unknown SocketAddr"),
|
||||
Self::Tcp(ref addr) => write!(f, "{}", addr),
|
||||
#[cfg(unix)]
|
||||
Self::Uds(ref addr) => write!(f, "{:?}", addr),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for SocketAddr {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match *self {
|
||||
Self::Unknown => write!(f, "Unknown SocketAddr"),
|
||||
Self::Tcp(ref addr) => write!(f, "{:?}", addr),
|
||||
#[cfg(unix)]
|
||||
Self::Uds(ref addr) => write!(f, "{:?}", addr),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum MioStream {
|
||||
Tcp(mio::net::TcpStream),
|
||||
#[cfg(unix)]
|
||||
Uds(mio::net::UnixStream),
|
||||
}
|
||||
|
||||
/// Helper trait for converting a Mio stream into a Tokio stream.
|
||||
pub trait FromStream: Sized {
|
||||
/// Creates stream from a `mio` stream.
|
||||
fn from_mio(sock: MioStream) -> io::Result<Self>;
|
||||
}
|
||||
|
||||
#[cfg(windows)]
|
||||
mod win_impl {
|
||||
use std::os::windows::io::{FromRawSocket, IntoRawSocket};
|
||||
|
||||
use super::*;
|
||||
|
||||
// TODO: This is a workaround and we need an efficient way to convert between Mio and Tokio stream
|
||||
impl FromStream for TcpStream {
|
||||
fn from_mio(sock: MioStream) -> io::Result<Self> {
|
||||
match sock {
|
||||
MioStream::Tcp(mio) => {
|
||||
let raw = IntoRawSocket::into_raw_socket(mio);
|
||||
// SAFETY: This is an in-place conversion from Mio stream to Tokio stream.
|
||||
TcpStream::from_std(unsafe { FromRawSocket::from_raw_socket(raw) })
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(unix))]
|
||||
impl FromStream for actix_rt::net::UnixStream {
|
||||
fn from_stdstream(sock: StdStream) -> io::Result<Self> {
|
||||
match sock {
|
||||
StdStream::Tcp(_) => panic!("Should not happen, bug in server impl"),
|
||||
StdStream::Uds(stream) => actix_rt::net::UnixStream::from_std(stream),
|
||||
#[cfg(unix)]
|
||||
mod unix_impl {
|
||||
use std::os::unix::io::{FromRawFd, IntoRawFd};
|
||||
|
||||
use actix_rt::net::UnixStream;
|
||||
|
||||
use super::*;
|
||||
|
||||
// HACK: This is a workaround and we need an efficient way to convert between Mio and Tokio stream
|
||||
impl FromStream for TcpStream {
|
||||
fn from_mio(sock: MioStream) -> io::Result<Self> {
|
||||
match sock {
|
||||
MioStream::Tcp(mio) => {
|
||||
let raw = IntoRawFd::into_raw_fd(mio);
|
||||
// SAFETY: This is an in-place conversion from Mio stream to Tokio stream.
|
||||
TcpStream::from_std(unsafe { FromRawFd::from_raw_fd(raw) })
|
||||
}
|
||||
MioStream::Uds(_) => {
|
||||
panic!("Should not happen, bug in server impl");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// HACK: This is a workaround and we need an efficient way to convert between Mio and Tokio stream
|
||||
impl FromStream for UnixStream {
|
||||
fn from_mio(sock: MioStream) -> io::Result<Self> {
|
||||
match sock {
|
||||
MioStream::Tcp(_) => panic!("Should not happen, bug in server impl"),
|
||||
MioStream::Uds(mio) => {
|
||||
let raw = IntoRawFd::into_raw_fd(mio);
|
||||
// SAFETY: This is an in-place conversion from Mio stream to Tokio stream.
|
||||
UnixStream::from_std(unsafe { FromRawFd::from_raw_fd(raw) })
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn create_mio_tcp_listener(
|
||||
addr: StdSocketAddr,
|
||||
backlog: u32,
|
||||
mptcp: &MpTcp,
|
||||
) -> io::Result<MioTcpListener> {
|
||||
use socket2::{Domain, Protocol, Socket, Type};
|
||||
|
||||
#[cfg(not(target_os = "linux"))]
|
||||
let protocol = Protocol::TCP;
|
||||
#[cfg(target_os = "linux")]
|
||||
let protocol = if matches!(mptcp, MpTcp::Disabled) {
|
||||
Protocol::TCP
|
||||
} else {
|
||||
Protocol::MPTCP
|
||||
};
|
||||
|
||||
let socket = match Socket::new(Domain::for_address(addr), Type::STREAM, Some(protocol)) {
|
||||
Ok(sock) => sock,
|
||||
|
||||
Err(err) if matches!(mptcp, MpTcp::TcpFallback) => {
|
||||
tracing::warn!("binding socket as MPTCP failed: {err}");
|
||||
tracing::warn!("falling back to TCP");
|
||||
Socket::new(Domain::for_address(addr), Type::STREAM, Some(Protocol::TCP))?
|
||||
}
|
||||
|
||||
Err(err) => return Err(err),
|
||||
};
|
||||
|
||||
socket.set_reuse_address(true)?;
|
||||
socket.set_nonblocking(true)?;
|
||||
socket.bind(&addr.into())?;
|
||||
socket.listen(backlog as i32)?;
|
||||
|
||||
Ok(MioTcpListener::from_std(StdTcpListener::from(socket)))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn socket_addr() {
|
||||
let addr = SocketAddr::Tcp("127.0.0.1:8080".parse().unwrap());
|
||||
assert!(format!("{:?}", addr).contains("127.0.0.1:8080"));
|
||||
assert_eq!(format!("{}", addr), "127.0.0.1:8080");
|
||||
|
||||
let addr: StdSocketAddr = "127.0.0.1:0".parse().unwrap();
|
||||
let lst = create_mio_tcp_listener(addr, 128, &MpTcp::Disabled).unwrap();
|
||||
let lst = MioListener::Tcp(lst);
|
||||
assert!(format!("{:?}", lst).contains("TcpListener"));
|
||||
assert!(format!("{}", lst).contains("127.0.0.1"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg(unix)]
|
||||
fn uds() {
|
||||
let _ = std::fs::remove_file("/tmp/sock.xxxxx");
|
||||
if let Ok(socket) = MioUnixListener::bind("/tmp/sock.xxxxx") {
|
||||
let addr = socket.local_addr().expect("Couldn't get local address");
|
||||
let a = SocketAddr::Uds(addr);
|
||||
assert!(format!("{:?}", a).contains("/tmp/sock.xxxxx"));
|
||||
assert!(format!("{}", a).contains("/tmp/sock.xxxxx"));
|
||||
|
||||
let lst = MioListener::Uds(socket);
|
||||
assert!(format!("{:?}", lst).contains("/tmp/sock.xxxxx"));
|
||||
assert!(format!("{}", lst).contains("/tmp/sock.xxxxx"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
155
actix-server/src/test_server.rs
Normal file
155
actix-server/src/test_server.rs
Normal file
@ -0,0 +1,155 @@
|
||||
use std::{io, net, sync::mpsc, thread};
|
||||
|
||||
use actix_rt::{net::TcpStream, System};
|
||||
|
||||
use crate::{Server, ServerBuilder, ServerHandle, ServerServiceFactory};
|
||||
|
||||
/// A testing server.
|
||||
///
|
||||
/// `TestServer` is very simple test server that simplify process of writing integration tests for
|
||||
/// network applications.
|
||||
///
|
||||
/// # Examples
|
||||
/// ```
|
||||
/// use actix_service::fn_service;
|
||||
/// use actix_server::TestServer;
|
||||
///
|
||||
/// #[actix_rt::main]
|
||||
/// async fn main() {
|
||||
/// let srv = TestServer::start(|| fn_service(
|
||||
/// |sock| async move {
|
||||
/// println!("New connection: {:?}", sock);
|
||||
/// Ok::<_, ()>(())
|
||||
/// }
|
||||
/// ));
|
||||
///
|
||||
/// println!("SOCKET: {:?}", srv.connect());
|
||||
/// }
|
||||
/// ```
|
||||
pub struct TestServer;
|
||||
|
||||
/// Test server handle.
|
||||
pub struct TestServerHandle {
|
||||
addr: net::SocketAddr,
|
||||
host: String,
|
||||
port: u16,
|
||||
server_handle: ServerHandle,
|
||||
thread_handle: Option<thread::JoinHandle<io::Result<()>>>,
|
||||
}
|
||||
|
||||
impl TestServer {
|
||||
/// Start new `TestServer` using application factory and default server config.
|
||||
pub fn start(factory: impl ServerServiceFactory<TcpStream>) -> TestServerHandle {
|
||||
Self::start_with_builder(Server::build(), factory)
|
||||
}
|
||||
|
||||
/// Start new `TestServer` using application factory and server builder.
|
||||
pub fn start_with_builder(
|
||||
server_builder: ServerBuilder,
|
||||
factory: impl ServerServiceFactory<TcpStream>,
|
||||
) -> TestServerHandle {
|
||||
let (tx, rx) = mpsc::channel();
|
||||
|
||||
// run server in separate thread
|
||||
let thread_handle = thread::spawn(move || {
|
||||
let lst = net::TcpListener::bind("127.0.0.1:0").unwrap();
|
||||
let local_addr = lst.local_addr().unwrap();
|
||||
|
||||
System::new().block_on(async {
|
||||
let server = server_builder
|
||||
.listen("test", lst, factory)
|
||||
.unwrap()
|
||||
.workers(1)
|
||||
.disable_signals()
|
||||
.run();
|
||||
|
||||
tx.send((server.handle(), local_addr)).unwrap();
|
||||
server.await
|
||||
})
|
||||
});
|
||||
|
||||
let (server_handle, addr) = rx.recv().unwrap();
|
||||
|
||||
let host = format!("{}", addr.ip());
|
||||
let port = addr.port();
|
||||
|
||||
TestServerHandle {
|
||||
addr,
|
||||
host,
|
||||
port,
|
||||
server_handle,
|
||||
thread_handle: Some(thread_handle),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get first available unused local address.
|
||||
pub fn unused_addr() -> net::SocketAddr {
|
||||
use socket2::{Domain, Protocol, Socket, Type};
|
||||
|
||||
let addr: net::SocketAddr = "127.0.0.1:0".parse().unwrap();
|
||||
let domain = Domain::for_address(addr);
|
||||
let socket = Socket::new(domain, Type::STREAM, Some(Protocol::TCP)).unwrap();
|
||||
|
||||
socket.set_reuse_address(true).unwrap();
|
||||
socket.set_nonblocking(true).unwrap();
|
||||
socket.bind(&addr.into()).unwrap();
|
||||
socket.listen(1024).unwrap();
|
||||
|
||||
net::TcpListener::from(socket).local_addr().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
impl TestServerHandle {
|
||||
/// Test server host.
|
||||
pub fn host(&self) -> &str {
|
||||
&self.host
|
||||
}
|
||||
|
||||
/// Test server port.
|
||||
pub fn port(&self) -> u16 {
|
||||
self.port
|
||||
}
|
||||
|
||||
/// Get test server address.
|
||||
pub fn addr(&self) -> net::SocketAddr {
|
||||
self.addr
|
||||
}
|
||||
|
||||
/// Stop server.
|
||||
fn stop(&mut self) {
|
||||
drop(self.server_handle.stop(false));
|
||||
self.thread_handle.take().unwrap().join().unwrap().unwrap();
|
||||
}
|
||||
|
||||
/// Connect to server, returning a Tokio `TcpStream`.
|
||||
pub fn connect(&self) -> io::Result<TcpStream> {
|
||||
let stream = net::TcpStream::connect(self.addr)?;
|
||||
stream.set_nonblocking(true)?;
|
||||
TcpStream::from_std(stream)
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for TestServerHandle {
|
||||
fn drop(&mut self) {
|
||||
self.stop()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use actix_service::fn_service;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn connect_in_tokio_runtime() {
|
||||
let srv = TestServer::start(|| fn_service(|_sock| async move { Ok::<_, ()>(()) }));
|
||||
assert!(srv.connect().is_ok());
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn connect_in_actix_runtime() {
|
||||
let srv = TestServer::start(|| fn_service(|_sock| async move { Ok::<_, ()>(()) }));
|
||||
assert!(srv.connect().is_ok());
|
||||
}
|
||||
}
|
84
actix-server/src/waker_queue.rs
Normal file
84
actix-server/src/waker_queue.rs
Normal file
@ -0,0 +1,84 @@
|
||||
use std::{
|
||||
collections::VecDeque,
|
||||
ops::Deref,
|
||||
sync::{Arc, Mutex, MutexGuard},
|
||||
};
|
||||
|
||||
use mio::{Registry, Token as MioToken, Waker};
|
||||
|
||||
use crate::worker::WorkerHandleAccept;
|
||||
|
||||
/// Waker token for `mio::Poll` instance.
|
||||
pub(crate) const WAKER_TOKEN: MioToken = MioToken(usize::MAX);
|
||||
|
||||
/// `mio::Waker` with a queue for waking up the `Accept`'s `Poll` and contains the `WakerInterest`
|
||||
/// the `Poll` would want to look into.
|
||||
pub(crate) struct WakerQueue(Arc<(Waker, Mutex<VecDeque<WakerInterest>>)>);
|
||||
|
||||
impl Clone for WakerQueue {
|
||||
fn clone(&self) -> Self {
|
||||
Self(self.0.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for WakerQueue {
|
||||
type Target = (Waker, Mutex<VecDeque<WakerInterest>>);
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
self.0.deref()
|
||||
}
|
||||
}
|
||||
|
||||
impl WakerQueue {
|
||||
/// Construct a waker queue with given `Poll`'s `Registry` and capacity.
|
||||
///
|
||||
/// A fixed `WAKER_TOKEN` is used to identify the wake interest and the `Poll` needs to match
|
||||
/// event's token for it to properly handle `WakerInterest`.
|
||||
pub(crate) fn new(registry: &Registry) -> std::io::Result<Self> {
|
||||
let waker = Waker::new(registry, WAKER_TOKEN)?;
|
||||
let queue = Mutex::new(VecDeque::with_capacity(16));
|
||||
|
||||
Ok(Self(Arc::new((waker, queue))))
|
||||
}
|
||||
|
||||
/// Push a new interest to the queue and wake up the accept poll afterwards.
|
||||
pub(crate) fn wake(&self, interest: WakerInterest) {
|
||||
let (waker, queue) = self.deref();
|
||||
|
||||
queue
|
||||
.lock()
|
||||
.expect("Failed to lock WakerQueue")
|
||||
.push_back(interest);
|
||||
|
||||
waker
|
||||
.wake()
|
||||
.unwrap_or_else(|e| panic!("can not wake up Accept Poll: {}", e));
|
||||
}
|
||||
|
||||
/// Get a MutexGuard of the waker queue.
|
||||
pub(crate) fn guard(&self) -> MutexGuard<'_, VecDeque<WakerInterest>> {
|
||||
self.deref().1.lock().expect("Failed to lock WakerQueue")
|
||||
}
|
||||
|
||||
/// Reset the waker queue so it does not grow infinitely.
|
||||
pub(crate) fn reset(queue: &mut VecDeque<WakerInterest>) {
|
||||
std::mem::swap(&mut VecDeque::<WakerInterest>::with_capacity(16), queue);
|
||||
}
|
||||
}
|
||||
|
||||
/// Types of interests we would look into when `Accept`'s `Poll` is waked up by waker.
|
||||
///
|
||||
/// These interests should not be confused with `mio::Interest` and mostly not I/O related
|
||||
pub(crate) enum WakerInterest {
|
||||
/// `WorkerAvailable` is an interest from `Worker` notifying `Accept` there is a worker
|
||||
/// available and can accept new tasks.
|
||||
WorkerAvailable(usize),
|
||||
/// `Pause`, `Resume`, `Stop` Interest are from `ServerBuilder` future. It listens to
|
||||
/// `ServerCommand` and notify `Accept` to do exactly these tasks.
|
||||
Pause,
|
||||
Resume,
|
||||
Stop,
|
||||
/// `Worker` is an interest that is triggered after a worker faults. This is determined by
|
||||
/// trying to send work to it. `Accept` would be waked up and add the new `WorkerHandleAccept`.
|
||||
Worker(WorkerHandleAccept),
|
||||
}
|
File diff suppressed because it is too large
Load Diff
537
actix-server/tests/server.rs
Normal file
537
actix-server/tests/server.rs
Normal file
@ -0,0 +1,537 @@
|
||||
#![allow(clippy::let_underscore_future, missing_docs)]
|
||||
|
||||
use std::{
|
||||
net,
|
||||
sync::{
|
||||
atomic::{AtomicUsize, Ordering},
|
||||
mpsc, Arc,
|
||||
},
|
||||
thread,
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use actix_rt::{net::TcpStream, time::sleep};
|
||||
use actix_server::{Server, TestServer};
|
||||
use actix_service::fn_service;
|
||||
|
||||
fn unused_addr() -> net::SocketAddr {
|
||||
TestServer::unused_addr()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bind() {
|
||||
let addr = unused_addr();
|
||||
let (tx, rx) = mpsc::channel();
|
||||
|
||||
let h = thread::spawn(move || {
|
||||
actix_rt::System::new().block_on(async {
|
||||
let srv = Server::build()
|
||||
.workers(1)
|
||||
.disable_signals()
|
||||
.shutdown_timeout(3600)
|
||||
.bind("test", addr, move || {
|
||||
fn_service(|_| async { Ok::<_, ()>(()) })
|
||||
})?
|
||||
.run();
|
||||
|
||||
tx.send(srv.handle()).unwrap();
|
||||
srv.await
|
||||
})
|
||||
});
|
||||
|
||||
let srv = rx.recv().unwrap();
|
||||
|
||||
thread::sleep(Duration::from_millis(500));
|
||||
|
||||
net::TcpStream::connect(addr).unwrap();
|
||||
|
||||
let _ = srv.stop(true);
|
||||
h.join().unwrap().unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_listen() {
|
||||
let addr = unused_addr();
|
||||
let (tx, rx) = mpsc::channel();
|
||||
let lst = net::TcpListener::bind(addr).unwrap();
|
||||
|
||||
let h = thread::spawn(move || {
|
||||
actix_rt::System::new().block_on(async {
|
||||
let srv = Server::build()
|
||||
.workers(1)
|
||||
.disable_signals()
|
||||
.shutdown_timeout(3600)
|
||||
.listen("test", lst, move || {
|
||||
fn_service(|_| async { Ok::<_, ()>(()) })
|
||||
})?
|
||||
.run();
|
||||
|
||||
tx.send(srv.handle()).unwrap();
|
||||
srv.await
|
||||
})
|
||||
});
|
||||
|
||||
let srv = rx.recv().unwrap();
|
||||
|
||||
thread::sleep(Duration::from_millis(500));
|
||||
|
||||
net::TcpStream::connect(addr).unwrap();
|
||||
|
||||
let _ = srv.stop(true);
|
||||
h.join().unwrap().unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn plain_tokio_runtime() {
|
||||
let addr = unused_addr();
|
||||
let (tx, rx) = mpsc::channel();
|
||||
|
||||
let h = thread::spawn(move || {
|
||||
let rt = tokio::runtime::Builder::new_current_thread()
|
||||
.enable_all()
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
rt.block_on(async {
|
||||
let srv = Server::build()
|
||||
.workers(1)
|
||||
.disable_signals()
|
||||
.bind("test", addr, move || {
|
||||
fn_service(|_| async { Ok::<_, ()>(()) })
|
||||
})?
|
||||
.run();
|
||||
|
||||
tx.send(srv.handle()).unwrap();
|
||||
|
||||
srv.await
|
||||
})
|
||||
});
|
||||
|
||||
let srv = rx.recv().unwrap();
|
||||
|
||||
thread::sleep(Duration::from_millis(500));
|
||||
assert!(net::TcpStream::connect(addr).is_ok());
|
||||
|
||||
let _ = srv.stop(true);
|
||||
h.join().unwrap().unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg(unix)]
|
||||
fn test_start() {
|
||||
use std::io::Read;
|
||||
|
||||
use actix_codec::{BytesCodec, Framed};
|
||||
use bytes::Bytes;
|
||||
use futures_util::sink::SinkExt;
|
||||
|
||||
let addr = unused_addr();
|
||||
let (tx, rx) = mpsc::channel();
|
||||
|
||||
let h = thread::spawn(move || {
|
||||
actix_rt::System::new().block_on(async {
|
||||
let srv = Server::build()
|
||||
.backlog(100)
|
||||
.disable_signals()
|
||||
.bind("test", addr, move || {
|
||||
fn_service(|io: TcpStream| async move {
|
||||
let mut f = Framed::new(io, BytesCodec);
|
||||
f.send(Bytes::from_static(b"test")).await.unwrap();
|
||||
Ok::<_, ()>(())
|
||||
})
|
||||
})?
|
||||
.run();
|
||||
|
||||
let _ = tx.send((srv.handle(), actix_rt::System::current()));
|
||||
|
||||
srv.await
|
||||
})
|
||||
});
|
||||
|
||||
let (srv, sys) = rx.recv().unwrap();
|
||||
|
||||
let mut buf = [1u8; 4];
|
||||
let mut conn = net::TcpStream::connect(addr).unwrap();
|
||||
let _ = conn.read_exact(&mut buf);
|
||||
assert_eq!(buf, b"test"[..]);
|
||||
|
||||
// pause
|
||||
let _ = srv.pause();
|
||||
thread::sleep(Duration::from_millis(200));
|
||||
let mut conn = net::TcpStream::connect(addr).unwrap();
|
||||
conn.set_read_timeout(Some(Duration::from_millis(100)))
|
||||
.unwrap();
|
||||
let res = conn.read_exact(&mut buf);
|
||||
assert!(res.is_err());
|
||||
|
||||
// resume
|
||||
let _ = srv.resume();
|
||||
thread::sleep(Duration::from_millis(100));
|
||||
assert!(net::TcpStream::connect(addr).is_ok());
|
||||
assert!(net::TcpStream::connect(addr).is_ok());
|
||||
assert!(net::TcpStream::connect(addr).is_ok());
|
||||
|
||||
let mut buf = [0u8; 4];
|
||||
let mut conn = net::TcpStream::connect(addr).unwrap();
|
||||
let _ = conn.read_exact(&mut buf);
|
||||
assert_eq!(buf, b"test"[..]);
|
||||
|
||||
// stop
|
||||
let _ = srv.stop(false);
|
||||
sys.stop();
|
||||
h.join().unwrap().unwrap();
|
||||
|
||||
thread::sleep(Duration::from_secs(1));
|
||||
assert!(net::TcpStream::connect(addr).is_err());
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_max_concurrent_connections() {
|
||||
// Note:
|
||||
// A TCP listener would accept connects based on it's backlog setting.
|
||||
//
|
||||
// The limit test on the other hand is only for concurrent TCP stream limiting a work
|
||||
// thread accept.
|
||||
|
||||
use tokio::io::AsyncWriteExt;
|
||||
|
||||
let addr = unused_addr();
|
||||
let (tx, rx) = mpsc::channel();
|
||||
|
||||
let counter = Arc::new(AtomicUsize::new(0));
|
||||
let counter_clone = counter.clone();
|
||||
|
||||
let max_conn = 3;
|
||||
|
||||
let h = thread::spawn(move || {
|
||||
actix_rt::System::new().block_on(async {
|
||||
let srv = Server::build()
|
||||
// Set a relative higher backlog.
|
||||
.backlog(12)
|
||||
// max connection for a worker is 3.
|
||||
.max_concurrent_connections(max_conn)
|
||||
.workers(1)
|
||||
.disable_signals()
|
||||
.bind("test", addr, move || {
|
||||
let counter = counter.clone();
|
||||
fn_service(move |_io: TcpStream| {
|
||||
let counter = counter.clone();
|
||||
async move {
|
||||
counter.fetch_add(1, Ordering::SeqCst);
|
||||
sleep(Duration::from_secs(20)).await;
|
||||
counter.fetch_sub(1, Ordering::SeqCst);
|
||||
Ok::<(), ()>(())
|
||||
}
|
||||
})
|
||||
})?
|
||||
.run();
|
||||
|
||||
let _ = tx.send((srv.handle(), actix_rt::System::current()));
|
||||
|
||||
srv.await
|
||||
})
|
||||
});
|
||||
|
||||
let (srv, sys) = rx.recv().unwrap();
|
||||
|
||||
let mut conns = vec![];
|
||||
|
||||
for _ in 0..12 {
|
||||
let conn = tokio::net::TcpStream::connect(addr).await.unwrap();
|
||||
conns.push(conn);
|
||||
}
|
||||
|
||||
sleep(Duration::from_secs(5)).await;
|
||||
|
||||
// counter would remain at 3 even with 12 successful connection.
|
||||
// and 9 of them remain in backlog.
|
||||
assert_eq!(max_conn, counter_clone.load(Ordering::SeqCst));
|
||||
|
||||
for mut conn in conns {
|
||||
conn.shutdown().await.unwrap();
|
||||
}
|
||||
|
||||
srv.stop(false).await;
|
||||
sys.stop();
|
||||
h.join().unwrap().unwrap();
|
||||
}
|
||||
|
||||
// TODO: race-y failures detected due to integer underflow when calling Counter::total
|
||||
#[actix_rt::test]
|
||||
async fn test_service_restart() {
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use actix_service::{fn_factory, Service};
|
||||
use futures_core::future::LocalBoxFuture;
|
||||
use tokio::io::AsyncWriteExt;
|
||||
|
||||
struct TestService(Arc<AtomicUsize>);
|
||||
|
||||
impl Service<TcpStream> for TestService {
|
||||
type Response = ();
|
||||
type Error = ();
|
||||
type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>;
|
||||
|
||||
fn poll_ready(&self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
let TestService(ref counter) = self;
|
||||
let c = counter.fetch_add(1, Ordering::SeqCst);
|
||||
// Force the service to restart on first readiness check.
|
||||
if c > 0 {
|
||||
Poll::Ready(Ok(()))
|
||||
} else {
|
||||
Poll::Ready(Err(()))
|
||||
}
|
||||
}
|
||||
|
||||
fn call(&self, _: TcpStream) -> Self::Future {
|
||||
Box::pin(async { Ok(()) })
|
||||
}
|
||||
}
|
||||
|
||||
let addr1 = unused_addr();
|
||||
let addr2 = unused_addr();
|
||||
let (tx, rx) = mpsc::channel();
|
||||
let num = Arc::new(AtomicUsize::new(0));
|
||||
let num2 = Arc::new(AtomicUsize::new(0));
|
||||
|
||||
let num_clone = num.clone();
|
||||
let num2_clone = num2.clone();
|
||||
|
||||
let h = thread::spawn(move || {
|
||||
let num = num.clone();
|
||||
actix_rt::System::new().block_on(async {
|
||||
let srv = Server::build()
|
||||
.backlog(1)
|
||||
.disable_signals()
|
||||
.bind("addr1", addr1, move || {
|
||||
let num = num.clone();
|
||||
fn_factory(move || {
|
||||
let num = num.clone();
|
||||
async move { Ok::<_, ()>(TestService(num)) }
|
||||
})
|
||||
})?
|
||||
.bind("addr2", addr2, move || {
|
||||
let num2 = num2.clone();
|
||||
fn_factory(move || {
|
||||
let num2 = num2.clone();
|
||||
async move { Ok::<_, ()>(TestService(num2)) }
|
||||
})
|
||||
})?
|
||||
.workers(1)
|
||||
.run();
|
||||
|
||||
let _ = tx.send(srv.handle());
|
||||
srv.await
|
||||
})
|
||||
});
|
||||
|
||||
let srv = rx.recv().unwrap();
|
||||
|
||||
for _ in 0..5 {
|
||||
TcpStream::connect(addr1)
|
||||
.await
|
||||
.unwrap()
|
||||
.shutdown()
|
||||
.await
|
||||
.unwrap();
|
||||
TcpStream::connect(addr2)
|
||||
.await
|
||||
.unwrap()
|
||||
.shutdown()
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
sleep(Duration::from_secs(3)).await;
|
||||
|
||||
assert!(num_clone.load(Ordering::SeqCst) > 5);
|
||||
assert!(num2_clone.load(Ordering::SeqCst) > 5);
|
||||
|
||||
let _ = srv.stop(false);
|
||||
h.join().unwrap().unwrap();
|
||||
}
|
||||
|
||||
#[ignore] // non-deterministic on CI
|
||||
#[actix_rt::test]
|
||||
async fn worker_restart() {
|
||||
use actix_service::{Service, ServiceFactory};
|
||||
use futures_core::future::LocalBoxFuture;
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
|
||||
struct TestServiceFactory(Arc<AtomicUsize>);
|
||||
|
||||
impl ServiceFactory<TcpStream> for TestServiceFactory {
|
||||
type Response = ();
|
||||
type Error = ();
|
||||
type Config = ();
|
||||
type Service = TestService;
|
||||
type InitError = ();
|
||||
type Future = LocalBoxFuture<'static, Result<Self::Service, Self::InitError>>;
|
||||
|
||||
fn new_service(&self, _: Self::Config) -> Self::Future {
|
||||
let counter = self.0.fetch_add(1, Ordering::Relaxed);
|
||||
|
||||
Box::pin(async move { Ok(TestService(counter)) })
|
||||
}
|
||||
}
|
||||
|
||||
struct TestService(usize);
|
||||
|
||||
impl Service<TcpStream> for TestService {
|
||||
type Response = ();
|
||||
type Error = ();
|
||||
type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>;
|
||||
|
||||
actix_service::always_ready!();
|
||||
|
||||
fn call(&self, stream: TcpStream) -> Self::Future {
|
||||
let counter = self.0;
|
||||
|
||||
let mut stream = stream.into_std().unwrap();
|
||||
use std::io::Write;
|
||||
let str = counter.to_string();
|
||||
let buf = str.as_bytes();
|
||||
|
||||
let mut written = 0;
|
||||
|
||||
while written < buf.len() {
|
||||
if let Ok(n) = stream.write(&buf[written..]) {
|
||||
written += n;
|
||||
}
|
||||
}
|
||||
stream.flush().unwrap();
|
||||
stream.shutdown(net::Shutdown::Write).unwrap();
|
||||
|
||||
// force worker 2 to restart service once.
|
||||
if counter == 2 {
|
||||
panic!("panic on purpose")
|
||||
} else {
|
||||
Box::pin(async { Ok(()) })
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let addr = unused_addr();
|
||||
let (tx, rx) = mpsc::channel();
|
||||
|
||||
let counter = Arc::new(AtomicUsize::new(1));
|
||||
let h = thread::spawn(move || {
|
||||
let counter = counter.clone();
|
||||
actix_rt::System::new().block_on(async {
|
||||
let srv = Server::build()
|
||||
.disable_signals()
|
||||
.bind("addr", addr, move || TestServiceFactory(counter.clone()))?
|
||||
.workers(2)
|
||||
.run();
|
||||
|
||||
let _ = tx.send(srv.handle());
|
||||
|
||||
srv.await
|
||||
})
|
||||
});
|
||||
|
||||
let srv = rx.recv().unwrap();
|
||||
|
||||
sleep(Duration::from_secs(3)).await;
|
||||
|
||||
let mut buf = [0; 8];
|
||||
|
||||
// worker 1 would not restart and return it's id consistently.
|
||||
let mut stream = TcpStream::connect(addr).await.unwrap();
|
||||
let n = stream.read(&mut buf).await.unwrap();
|
||||
let id = String::from_utf8_lossy(&buf[0..n]);
|
||||
assert_eq!("1", id);
|
||||
stream.shutdown().await.unwrap();
|
||||
|
||||
// worker 2 dead after return response.
|
||||
let mut stream = TcpStream::connect(addr).await.unwrap();
|
||||
let n = stream.read(&mut buf).await.unwrap();
|
||||
let id = String::from_utf8_lossy(&buf[0..n]);
|
||||
assert_eq!("2", id);
|
||||
stream.shutdown().await.unwrap();
|
||||
|
||||
// request to worker 1
|
||||
let mut stream = TcpStream::connect(addr).await.unwrap();
|
||||
let n = stream.read(&mut buf).await.unwrap();
|
||||
let id = String::from_utf8_lossy(&buf[0..n]);
|
||||
assert_eq!("1", id);
|
||||
stream.shutdown().await.unwrap();
|
||||
|
||||
// TODO: Remove sleep if it can pass CI.
|
||||
sleep(Duration::from_secs(3)).await;
|
||||
|
||||
// worker 2 restarting and work goes to worker 1.
|
||||
let mut stream = TcpStream::connect(addr).await.unwrap();
|
||||
let n = stream.read(&mut buf).await.unwrap();
|
||||
let id = String::from_utf8_lossy(&buf[0..n]);
|
||||
assert_eq!("1", id);
|
||||
stream.shutdown().await.unwrap();
|
||||
|
||||
// TODO: Remove sleep if it can pass CI.
|
||||
sleep(Duration::from_secs(3)).await;
|
||||
|
||||
// worker 2 restarted but worker 1 was still the next to accept connection.
|
||||
let mut stream = TcpStream::connect(addr).await.unwrap();
|
||||
let n = stream.read(&mut buf).await.unwrap();
|
||||
let id = String::from_utf8_lossy(&buf[0..n]);
|
||||
assert_eq!("1", id);
|
||||
stream.shutdown().await.unwrap();
|
||||
|
||||
// TODO: Remove sleep if it can pass CI.
|
||||
sleep(Duration::from_secs(3)).await;
|
||||
|
||||
// worker 2 accept connection again but it's id is 3.
|
||||
let mut stream = TcpStream::connect(addr).await.unwrap();
|
||||
let n = stream.read(&mut buf).await.unwrap();
|
||||
let id = String::from_utf8_lossy(&buf[0..n]);
|
||||
assert_eq!("3", id);
|
||||
stream.shutdown().await.unwrap();
|
||||
|
||||
let _ = srv.stop(false);
|
||||
h.join().unwrap().unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn no_runtime_on_init() {
|
||||
use std::{thread::sleep, time::Duration};
|
||||
|
||||
let addr = unused_addr();
|
||||
let counter = Arc::new(AtomicUsize::new(0));
|
||||
|
||||
let mut srv = Server::build()
|
||||
.workers(2)
|
||||
.disable_signals()
|
||||
.bind("test", addr, {
|
||||
let counter = counter.clone();
|
||||
move || {
|
||||
counter.fetch_add(1, Ordering::SeqCst);
|
||||
fn_service(|_| async { Ok::<_, ()>(()) })
|
||||
}
|
||||
})
|
||||
.unwrap()
|
||||
.run();
|
||||
|
||||
fn is_send<T: Send>(_: &T) {}
|
||||
is_send(&srv);
|
||||
is_send(&srv.handle());
|
||||
|
||||
sleep(Duration::from_millis(1_000));
|
||||
assert_eq!(counter.load(Ordering::SeqCst), 0);
|
||||
|
||||
let rt = tokio::runtime::Builder::new_current_thread()
|
||||
.enable_all()
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
rt.block_on(async move {
|
||||
let _ = futures_util::poll!(&mut srv);
|
||||
|
||||
// available after the first poll
|
||||
sleep(Duration::from_millis(500));
|
||||
assert_eq!(counter.load(Ordering::SeqCst), 2);
|
||||
|
||||
let _ = srv.handle().stop(true);
|
||||
srv.await
|
||||
})
|
||||
.unwrap();
|
||||
}
|
@ -1,182 +0,0 @@
|
||||
use std::sync::atomic::{AtomicUsize, Ordering::Relaxed};
|
||||
use std::sync::{mpsc, Arc};
|
||||
use std::{net, thread, time};
|
||||
|
||||
use actix_server::Server;
|
||||
use actix_service::fn_service;
|
||||
use futures_util::future::{lazy, ok};
|
||||
use socket2::{Domain, Protocol, Socket, Type};
|
||||
|
||||
fn unused_addr() -> net::SocketAddr {
|
||||
let addr: net::SocketAddr = "127.0.0.1:0".parse().unwrap();
|
||||
let socket = Socket::new(Domain::ipv4(), Type::stream(), Some(Protocol::tcp())).unwrap();
|
||||
socket.bind(&addr.into()).unwrap();
|
||||
socket.set_reuse_address(true).unwrap();
|
||||
let tcp = socket.into_tcp_listener();
|
||||
tcp.local_addr().unwrap()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bind() {
|
||||
let addr = unused_addr();
|
||||
let (tx, rx) = mpsc::channel();
|
||||
|
||||
let h = thread::spawn(move || {
|
||||
let sys = actix_rt::System::new("test");
|
||||
let srv = Server::build()
|
||||
.workers(1)
|
||||
.disable_signals()
|
||||
.bind("test", addr, move || fn_service(|_| ok::<_, ()>(())))
|
||||
.unwrap()
|
||||
.start();
|
||||
let _ = tx.send((srv, actix_rt::System::current()));
|
||||
let _ = sys.run();
|
||||
});
|
||||
let (_, sys) = rx.recv().unwrap();
|
||||
|
||||
thread::sleep(time::Duration::from_millis(500));
|
||||
assert!(net::TcpStream::connect(addr).is_ok());
|
||||
sys.stop();
|
||||
let _ = h.join();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_listen() {
|
||||
let addr = unused_addr();
|
||||
let (tx, rx) = mpsc::channel();
|
||||
|
||||
let h = thread::spawn(move || {
|
||||
let sys = actix_rt::System::new("test");
|
||||
let lst = net::TcpListener::bind(addr).unwrap();
|
||||
Server::build()
|
||||
.disable_signals()
|
||||
.workers(1)
|
||||
.listen("test", lst, move || fn_service(|_| ok::<_, ()>(())))
|
||||
.unwrap()
|
||||
.start();
|
||||
let _ = tx.send(actix_rt::System::current());
|
||||
let _ = sys.run();
|
||||
});
|
||||
let sys = rx.recv().unwrap();
|
||||
|
||||
thread::sleep(time::Duration::from_millis(500));
|
||||
assert!(net::TcpStream::connect(addr).is_ok());
|
||||
sys.stop();
|
||||
let _ = h.join();
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg(unix)]
|
||||
fn test_start() {
|
||||
use actix_codec::{BytesCodec, Framed};
|
||||
use actix_rt::net::TcpStream;
|
||||
use bytes::Bytes;
|
||||
use futures_util::sink::SinkExt;
|
||||
use std::io::Read;
|
||||
|
||||
let addr = unused_addr();
|
||||
let (tx, rx) = mpsc::channel();
|
||||
|
||||
let h = thread::spawn(move || {
|
||||
let sys = actix_rt::System::new("test");
|
||||
let srv: Server = Server::build()
|
||||
.backlog(100)
|
||||
.disable_signals()
|
||||
.bind("test", addr, move || {
|
||||
fn_service(|io: TcpStream| async move {
|
||||
let mut f = Framed::new(io, BytesCodec);
|
||||
f.send(Bytes::from_static(b"test")).await.unwrap();
|
||||
Ok::<_, ()>(())
|
||||
})
|
||||
})
|
||||
.unwrap()
|
||||
.start();
|
||||
|
||||
let _ = tx.send((srv, actix_rt::System::current()));
|
||||
let _ = sys.run();
|
||||
});
|
||||
let (srv, sys) = rx.recv().unwrap();
|
||||
|
||||
let mut buf = [1u8; 4];
|
||||
let mut conn = net::TcpStream::connect(addr).unwrap();
|
||||
let _ = conn.read_exact(&mut buf);
|
||||
assert_eq!(buf, b"test"[..]);
|
||||
|
||||
// pause
|
||||
let _ = srv.pause();
|
||||
thread::sleep(time::Duration::from_millis(200));
|
||||
let mut conn = net::TcpStream::connect(addr).unwrap();
|
||||
conn.set_read_timeout(Some(time::Duration::from_millis(100)))
|
||||
.unwrap();
|
||||
let res = conn.read_exact(&mut buf);
|
||||
assert!(res.is_err());
|
||||
|
||||
// resume
|
||||
let _ = srv.resume();
|
||||
thread::sleep(time::Duration::from_millis(100));
|
||||
assert!(net::TcpStream::connect(addr).is_ok());
|
||||
assert!(net::TcpStream::connect(addr).is_ok());
|
||||
assert!(net::TcpStream::connect(addr).is_ok());
|
||||
|
||||
let mut buf = [0u8; 4];
|
||||
let mut conn = net::TcpStream::connect(addr).unwrap();
|
||||
let _ = conn.read_exact(&mut buf);
|
||||
assert_eq!(buf, b"test"[..]);
|
||||
|
||||
// stop
|
||||
let _ = srv.stop(false);
|
||||
thread::sleep(time::Duration::from_millis(100));
|
||||
assert!(net::TcpStream::connect(addr).is_err());
|
||||
|
||||
thread::sleep(time::Duration::from_millis(100));
|
||||
sys.stop();
|
||||
let _ = h.join();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_configure() {
|
||||
let addr1 = unused_addr();
|
||||
let addr2 = unused_addr();
|
||||
let addr3 = unused_addr();
|
||||
let (tx, rx) = mpsc::channel();
|
||||
let num = Arc::new(AtomicUsize::new(0));
|
||||
let num2 = num.clone();
|
||||
|
||||
let h = thread::spawn(move || {
|
||||
let num = num2.clone();
|
||||
let sys = actix_rt::System::new("test");
|
||||
let srv = Server::build()
|
||||
.disable_signals()
|
||||
.configure(move |cfg| {
|
||||
let num = num.clone();
|
||||
let lst = net::TcpListener::bind(addr3).unwrap();
|
||||
cfg.bind("addr1", addr1)
|
||||
.unwrap()
|
||||
.bind("addr2", addr2)
|
||||
.unwrap()
|
||||
.listen("addr3", lst)
|
||||
.apply(move |rt| {
|
||||
let num = num.clone();
|
||||
rt.service("addr1", fn_service(|_| ok::<_, ()>(())));
|
||||
rt.service("addr3", fn_service(|_| ok::<_, ()>(())));
|
||||
rt.on_start(lazy(move |_| {
|
||||
let _ = num.fetch_add(1, Relaxed);
|
||||
}))
|
||||
})
|
||||
})
|
||||
.unwrap()
|
||||
.workers(1)
|
||||
.start();
|
||||
let _ = tx.send((srv, actix_rt::System::current()));
|
||||
let _ = sys.run();
|
||||
});
|
||||
let (_, sys) = rx.recv().unwrap();
|
||||
thread::sleep(time::Duration::from_millis(500));
|
||||
|
||||
assert!(net::TcpStream::connect(addr1).is_ok());
|
||||
assert!(net::TcpStream::connect(addr2).is_ok());
|
||||
assert!(net::TcpStream::connect(addr3).is_ok());
|
||||
assert_eq!(num.load(Relaxed), 1);
|
||||
sys.stop();
|
||||
let _ = h.join();
|
||||
}
|
77
actix-server/tests/testing_server.rs
Normal file
77
actix-server/tests/testing_server.rs
Normal file
@ -0,0 +1,77 @@
|
||||
#![allow(missing_docs)]
|
||||
|
||||
use std::net;
|
||||
|
||||
use actix_rt::net::TcpStream;
|
||||
use actix_server::{Server, TestServer};
|
||||
use actix_service::fn_service;
|
||||
use bytes::BytesMut;
|
||||
use tokio::io::{AsyncReadExt as _, AsyncWriteExt as _};
|
||||
|
||||
macro_rules! await_timeout_ms {
|
||||
($fut:expr, $limit:expr) => {
|
||||
::actix_rt::time::timeout(::std::time::Duration::from_millis($limit), $fut)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
};
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn testing_server_echo() {
|
||||
let srv = TestServer::start(|| {
|
||||
fn_service(move |mut stream: TcpStream| async move {
|
||||
let mut size = 0;
|
||||
let mut buf = BytesMut::new();
|
||||
|
||||
match stream.read_buf(&mut buf).await {
|
||||
Ok(0) => return Err(()),
|
||||
|
||||
Ok(bytes_read) => {
|
||||
stream.write_all(&buf[size..]).await.unwrap();
|
||||
size += bytes_read;
|
||||
}
|
||||
|
||||
Err(_) => return Err(()),
|
||||
}
|
||||
|
||||
Ok((buf.freeze(), size))
|
||||
})
|
||||
});
|
||||
|
||||
let mut conn = srv.connect().unwrap();
|
||||
|
||||
await_timeout_ms!(conn.write_all(b"test"), 200);
|
||||
|
||||
let mut buf = Vec::new();
|
||||
await_timeout_ms!(conn.read_to_end(&mut buf), 200);
|
||||
|
||||
assert_eq!(&buf, b"test".as_ref());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn new_with_builder() {
|
||||
let alt_addr = TestServer::unused_addr();
|
||||
|
||||
let srv = TestServer::start_with_builder(
|
||||
Server::build()
|
||||
.bind("alt", alt_addr, || {
|
||||
fn_service(|_| async { Ok::<_, ()>(()) })
|
||||
})
|
||||
.unwrap(),
|
||||
|| {
|
||||
fn_service(|mut sock: TcpStream| async move {
|
||||
let mut buf = [0u8; 16];
|
||||
sock.read_exact(&mut buf).await
|
||||
})
|
||||
},
|
||||
);
|
||||
|
||||
// connect to test server
|
||||
srv.connect().unwrap();
|
||||
|
||||
// connect to alt service defined in custom ServerBuilder
|
||||
let stream = net::TcpStream::connect(alt_addr).unwrap();
|
||||
stream.set_nonblocking(true).unwrap();
|
||||
TcpStream::from_std(stream).unwrap();
|
||||
}
|
@ -1,229 +1,193 @@
|
||||
# Changes
|
||||
|
||||
## Unreleased - 2020-xx-xx
|
||||
* Upgrade `pin-project` to `1.0`.
|
||||
## Unreleased
|
||||
|
||||
## 1.0.6 - 2020-08-09
|
||||
## 2.0.3
|
||||
|
||||
### Fixed
|
||||
- Minimum supported Rust version (MSRV) is now 1.71.
|
||||
|
||||
* Removed unsound custom Cell implementation that allowed obtaining several mutable references to the same data, which is undefined behavior in Rust and could lead to violations of memory safety. External code could obtain several mutable references to the same data through service combinators. Attempts to acquire several mutable references to the same data will instead result in a panic.
|
||||
## 2.0.2
|
||||
|
||||
## [1.0.5] - 2020-01-16
|
||||
- Service types can now be `Send` and `'static` regardless of request, response, and config types, etc. [#397]
|
||||
|
||||
### Fixed
|
||||
[#397]: https://github.com/actix/actix-net/pull/397
|
||||
|
||||
* Fixed unsoundness in .and_then()/.then() service combinators
|
||||
## 2.0.1
|
||||
|
||||
## [1.0.4] - 2020-01-15
|
||||
- Documentation fix. [#388]
|
||||
|
||||
### Fixed
|
||||
[#388]: https://github.com/actix/actix-net/pull/388
|
||||
|
||||
* Revert 1.0.3 change
|
||||
## 2.0.0
|
||||
|
||||
## [1.0.3] - 2020-01-15
|
||||
- Removed pipeline and related structs/functions. [#335]
|
||||
|
||||
### Fixed
|
||||
[#335]: https://github.com/actix/actix-net/pull/335
|
||||
|
||||
* Fixed unsoundness in `AndThenService` impl
|
||||
## 2.0.0-beta.5
|
||||
|
||||
## [1.0.2] - 2020-01-08
|
||||
- Add default `Service` trait impl for `Rc<S: Service>` and `&S: Service`. [#288]
|
||||
- Add `boxed::rc_service` function for constructing `boxed::RcService` type [#290]
|
||||
|
||||
### Added
|
||||
[#288]: https://github.com/actix/actix-net/pull/288
|
||||
[#290]: https://github.com/actix/actix-net/pull/290
|
||||
|
||||
* Add `into_service` helper function
|
||||
## 2.0.0-beta.4
|
||||
|
||||
- `Service::poll_ready` and `Service::call` receive `&self`. [#247]
|
||||
- `apply_fn` and `apply_fn_factory` now receive `Fn(Req, &Service)` function type. [#247]
|
||||
- `apply_cfg` and `apply_cfg_factory` now receive `Fn(Req, &Service)` function type. [#247]
|
||||
- `fn_service` and friends now receive `Fn(Req)` function type. [#247]
|
||||
|
||||
## [1.0.1] - 2019-12-22
|
||||
[#247]: https://github.com/actix/actix-net/pull/247
|
||||
|
||||
### Changed
|
||||
## 2.0.0-beta.3
|
||||
|
||||
* `map_config()` and `unit_config()` accepts `IntoServiceFactory` type
|
||||
- The `forward_ready!` macro converts errors. [#246]
|
||||
|
||||
[#246]: https://github.com/actix/actix-net/pull/246
|
||||
|
||||
## [1.0.0] - 2019-12-11
|
||||
## 2.0.0-beta.2
|
||||
|
||||
### Added
|
||||
- Remove redundant type parameter from `map_config`.
|
||||
|
||||
* Add Clone impl for Apply service
|
||||
## 2.0.0-beta.1
|
||||
|
||||
- `Service`, other traits, and many type signatures now take the the request type as a type parameter instead of an associated type. [#232]
|
||||
- Add `always_ready!` and `forward_ready!` macros. [#233]
|
||||
- Crate is now `no_std`. [#233]
|
||||
- Migrate pin projections to `pin-project-lite`. [#233]
|
||||
- Remove `AndThenApplyFn` and Pipeline `and_then_apply_fn`. Use the `.and_then(apply_fn(...))` construction. [#233]
|
||||
- Move non-vital methods to `ServiceExt` and `ServiceFactoryExt` extension traits. [#235]
|
||||
|
||||
## [1.0.0-alpha.4] - 2019-12-08
|
||||
[#232]: https://github.com/actix/actix-net/pull/232
|
||||
[#233]: https://github.com/actix/actix-net/pull/233
|
||||
[#235]: https://github.com/actix/actix-net/pull/235
|
||||
|
||||
### Changed
|
||||
## 1.0.6
|
||||
|
||||
* Renamed `service_fn` to `fn_service`
|
||||
- Removed unsound custom Cell implementation that allowed obtaining several mutable references to the same data, which is undefined behavior in Rust and could lead to violations of memory safety. External code could obtain several mutable references to the same data through service combinators. Attempts to acquire several mutable references to the same data will instead result in a panic.
|
||||
|
||||
* Renamed `factory_fn` to `fn_factory`
|
||||
## 1.0.5
|
||||
|
||||
* Renamed `factory_fn_cfg` to `fn_factory_with_config`
|
||||
- Fixed unsoundness in .and_then()/.then() service combinators.
|
||||
|
||||
## 1.0.4
|
||||
|
||||
## [1.0.0-alpha.3] - 2019-12-06
|
||||
- Revert 1.0.3 change
|
||||
|
||||
### Changed
|
||||
## 1.0.3
|
||||
|
||||
* Add missing Clone impls
|
||||
- Fixed unsoundness in `AndThenService` impl.
|
||||
|
||||
* Restore `Transform::map_init_err()` combinator
|
||||
## 1.0.2
|
||||
|
||||
* Restore `Service/Factory::apply_fn()` in form of `Pipeline/Factory::and_then_apply_fn()`
|
||||
- Add `into_service` helper function.
|
||||
|
||||
* Optimize service combinators and futures memory layout
|
||||
## 1.0.1
|
||||
|
||||
- `map_config()` and `unit_config()` now accept `IntoServiceFactory` type.
|
||||
|
||||
## [1.0.0-alpha.2] - 2019-12-02
|
||||
## 1.0.0
|
||||
|
||||
### Changed
|
||||
- Add Clone impl for Apply service
|
||||
|
||||
* Use owned config value for service factory
|
||||
## 1.0.0-alpha.4
|
||||
|
||||
* Renamed BoxedNewService/BoxedService to BoxServiceFactory/BoxService
|
||||
- Renamed `service_fn` to `fn_service`
|
||||
- Renamed `factory_fn` to `fn_factory`
|
||||
- Renamed `factory_fn_cfg` to `fn_factory_with_config`
|
||||
|
||||
## 1.0.0-alpha.3
|
||||
|
||||
## [1.0.0-alpha.1] - 2019-11-25
|
||||
- Add missing Clone impls
|
||||
- Restore `Transform::map_init_err()` combinator
|
||||
- Restore `Service/Factory::apply_fn()` in form of `Pipeline/Factory::and_then_apply_fn()`
|
||||
- Optimize service combinators and futures memory layout
|
||||
|
||||
### Changed
|
||||
## 1.0.0-alpha.2
|
||||
|
||||
* Migraded to `std::future`
|
||||
- Use owned config value for service factory
|
||||
- Renamed BoxedNewService/BoxedService to BoxServiceFactory/BoxService
|
||||
|
||||
* `NewService` renamed to `ServiceFactory`
|
||||
## 1.0.0-alpha.1
|
||||
|
||||
* Added `pipeline` and `pipeline_factory` function
|
||||
- Migrated to `std::future`
|
||||
- `NewService` renamed to `ServiceFactory`
|
||||
- Added `pipeline` and `pipeline_factory` function
|
||||
|
||||
## 0.4.2
|
||||
|
||||
## [0.4.2] - 2019-08-27
|
||||
- Check service readiness for `new_apply_cfg` combinator
|
||||
|
||||
### Fixed
|
||||
## 0.4.1
|
||||
|
||||
* Check service readiness for `new_apply_cfg` combinator
|
||||
- Add `new_apply_cfg` function
|
||||
|
||||
## 0.4.0
|
||||
|
||||
## [0.4.1] - 2019-06-06
|
||||
- Add `NewService::map_config` and `NewService::unit_config` combinators.
|
||||
- Use associated type for `NewService` config.
|
||||
- Change `apply_cfg` function.
|
||||
- Renamed helper functions.
|
||||
|
||||
### Added
|
||||
## 0.3.6
|
||||
|
||||
* Add `new_apply_cfg` function
|
||||
- Poll boxed service call result immediately
|
||||
|
||||
## [0.4.0] - 2019-05-12
|
||||
## 0.3.5
|
||||
|
||||
### Changed
|
||||
- Add `impl<S: Service> Service for Rc<RefCell<S>>`.
|
||||
|
||||
* Use associated type for `NewService` config
|
||||
## 0.3.4
|
||||
|
||||
* Change `apply_cfg` function
|
||||
- Add `Transform::from_err()` combinator
|
||||
- Add `apply_fn` helper
|
||||
- Add `apply_fn_factory` helper
|
||||
- Add `apply_transform` helper
|
||||
- Add `apply_cfg` helper
|
||||
|
||||
* Renamed helper functions
|
||||
## 0.3.3
|
||||
|
||||
### Added
|
||||
- Add `ApplyTransform` new service for transform and new service.
|
||||
- Add `NewService::apply_cfg()` combinator, allows to use nested `NewService` with different config parameter.
|
||||
- Revert IntoFuture change
|
||||
|
||||
* Add `NewService::map_config` and `NewService::unit_config` combinators
|
||||
## 0.3.2
|
||||
|
||||
- Change `NewService::Future` and `Transform::Future` to the `IntoFuture` trait.
|
||||
- Export `AndThenTransform` type
|
||||
|
||||
## [0.3.6] - 2019-04-07
|
||||
## 0.3.1
|
||||
|
||||
### Changed
|
||||
- Simplify Transform trait
|
||||
|
||||
* Poll boxed service call result immediately
|
||||
## 0.3.0
|
||||
|
||||
- Added boxed NewService and Service.
|
||||
- Added `Config` parameter to `NewService` trait.
|
||||
- Added `Config` parameter to `NewTransform` trait.
|
||||
|
||||
## [0.3.5] - 2019-03-29
|
||||
## 0.2.2
|
||||
|
||||
### Added
|
||||
- Added `NewService` impl for `Rc<S> where S: NewService`
|
||||
- Added `NewService` impl for `Arc<S> where S: NewService`
|
||||
|
||||
* Add `impl<S: Service> Service for Rc<RefCell<S>>`
|
||||
## 0.2.1
|
||||
|
||||
- Generalize `.apply` combinator with Transform trait
|
||||
|
||||
## [0.3.4] - 2019-03-12
|
||||
|
||||
### Added
|
||||
|
||||
* Add `Transform::from_err()` combinator
|
||||
|
||||
* Add `apply_fn` helper
|
||||
|
||||
* Add `apply_fn_factory` helper
|
||||
|
||||
* Add `apply_transform` helper
|
||||
|
||||
* Add `apply_cfg` helper
|
||||
|
||||
|
||||
## [0.3.3] - 2019-03-09
|
||||
|
||||
### Added
|
||||
|
||||
* Add `ApplyTransform` new service for transform and new service.
|
||||
|
||||
* Add `NewService::apply_cfg()` combinator, allows to use
|
||||
nested `NewService` with different config parameter.
|
||||
|
||||
### Changed
|
||||
|
||||
* Revert IntoFuture change
|
||||
|
||||
|
||||
## [0.3.2] - 2019-03-04
|
||||
|
||||
### Changed
|
||||
|
||||
* Change `NewService::Future` and `Transform::Future` to the `IntoFuture` trait.
|
||||
|
||||
* Export `AndThenTransform` type
|
||||
|
||||
|
||||
## [0.3.1] - 2019-03-04
|
||||
|
||||
### Changed
|
||||
|
||||
* Simplify Transform trait
|
||||
|
||||
|
||||
## [0.3.0] - 2019-03-02
|
||||
|
||||
## Added
|
||||
|
||||
* Added boxed NewService and Service.
|
||||
|
||||
## Changed
|
||||
|
||||
* Added `Config` parameter to `NewService` trait.
|
||||
|
||||
* Added `Config` parameter to `NewTransform` trait.
|
||||
|
||||
|
||||
## [0.2.2] - 2019-02-19
|
||||
|
||||
### Added
|
||||
|
||||
* Added `NewService` impl for `Rc<S> where S: NewService`
|
||||
|
||||
* Added `NewService` impl for `Arc<S> where S: NewService`
|
||||
|
||||
|
||||
## [0.2.1] - 2019-02-03
|
||||
|
||||
### Changed
|
||||
|
||||
* Generalize `.apply` combinator with Transform trait
|
||||
|
||||
|
||||
## [0.2.0] - 2019-02-01
|
||||
|
||||
### Changed
|
||||
|
||||
* Use associated type instead of generic for Service definition.
|
||||
|
||||
* Before:
|
||||
## 0.2.0
|
||||
|
||||
- Use associated type instead of generic for Service definition.
|
||||
- Before:
|
||||
```rust
|
||||
impl Service<Request> for Client {
|
||||
type Response = Response;
|
||||
// ...
|
||||
}
|
||||
```
|
||||
* After:
|
||||
|
||||
- After:
|
||||
```rust
|
||||
impl Service for Client {
|
||||
type Request = Request;
|
||||
@ -232,51 +196,31 @@
|
||||
}
|
||||
```
|
||||
|
||||
## 0.1.6
|
||||
|
||||
## [0.1.6] - 2019-01-24
|
||||
- Use `FnMut` instead of `Fn` for .apply() and .map() combinators and `FnService` type
|
||||
- Change `.apply()` error semantic, new service's error is `From<Self::Error>`
|
||||
|
||||
### Changed
|
||||
## 0.1.5
|
||||
|
||||
* Use `FnMut` instead of `Fn` for .apply() and .map() combinators and `FnService` type
|
||||
- Make `Out::Error` convertible from `T::Error` for apply combinator
|
||||
|
||||
* Change `.apply()` error semantic, new service's error is `From<Self::Error>`
|
||||
## 0.1.4
|
||||
|
||||
- Use `FnMut` instead of `Fn` for `FnService`
|
||||
|
||||
## [0.1.5] - 2019-01-13
|
||||
## 0.1.3
|
||||
|
||||
### Changed
|
||||
- Split service combinators to separate trait
|
||||
|
||||
* Make `Out::Error` convertable from `T::Error` for apply combinator
|
||||
## 0.1.2
|
||||
|
||||
- Release future early for `.and_then()` and `.then()` combinators
|
||||
|
||||
## [0.1.4] - 2019-01-11
|
||||
## 0.1.1
|
||||
|
||||
### Changed
|
||||
- Added Service impl for `Box<S: Service>`
|
||||
|
||||
* Use `FnMut` instead of `Fn` for `FnService`
|
||||
## 0.1.0
|
||||
|
||||
|
||||
## [0.1.3] - 2018-12-12
|
||||
|
||||
### Changed
|
||||
|
||||
* Split service combinators to separate trait
|
||||
|
||||
|
||||
## [0.1.2] - 2018-12-12
|
||||
|
||||
### Fixed
|
||||
|
||||
* Release future early for `.and_then()` and `.then()` combinators
|
||||
|
||||
|
||||
## [0.1.1] - 2018-12-09
|
||||
|
||||
### Added
|
||||
|
||||
* Added Service impl for Box<S: Service>
|
||||
|
||||
|
||||
## [0.1.0] - 2018-12-09
|
||||
|
||||
* Initial import
|
||||
- Initial import
|
||||
|
@ -1,33 +1,23 @@
|
||||
[package]
|
||||
name = "actix-service"
|
||||
version = "1.0.6"
|
||||
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
|
||||
version = "2.0.3"
|
||||
authors = ["Nikolay Kim <fafhrd91@gmail.com>", "Rob Ede <robjtede@icloud.com>"]
|
||||
description = "Service trait and combinators for representing asynchronous request/response operations."
|
||||
keywords = ["network", "framework", "async", "futures", "service"]
|
||||
homepage = "https://actix.rs"
|
||||
repository = "https://github.com/actix/actix-net.git"
|
||||
documentation = "https://docs.rs/actix-service"
|
||||
readme = "README.md"
|
||||
categories = ["network-programming", "asynchronous"]
|
||||
categories = ["network-programming", "asynchronous", "no-std"]
|
||||
repository = "https://github.com/actix/actix-net"
|
||||
license = "MIT OR Apache-2.0"
|
||||
edition = "2018"
|
||||
|
||||
[lib]
|
||||
name = "actix_service"
|
||||
path = "src/lib.rs"
|
||||
edition.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[dependencies]
|
||||
futures-util = "0.3.1"
|
||||
pin-project = "1.0.0"
|
||||
futures-core = { version = "0.3.17", default-features = false }
|
||||
pin-project-lite = "0.2"
|
||||
|
||||
[dev-dependencies]
|
||||
actix-rt = "1.0.0"
|
||||
criterion = "0.3"
|
||||
actix-rt = "2"
|
||||
actix-utils = "3"
|
||||
futures-util = { version = "0.3.17", default-features = false }
|
||||
|
||||
[[bench]]
|
||||
name = "unsafecell_vs_refcell"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "and_then"
|
||||
harness = false
|
||||
[lints]
|
||||
workspace = true
|
||||
|
@ -2,6 +2,12 @@
|
||||
|
||||
> Service trait and combinators for representing asynchronous request/response operations.
|
||||
|
||||
See documentation for detailed explanations these components: [https://docs.rs/actix-service][docs].
|
||||
[](https://crates.io/crates/actix-service)
|
||||
[](https://docs.rs/actix-service/2.0.3)
|
||||
[](https://blog.rust-lang.org/2020/03/12/Rust-1.46.html)
|
||||

|
||||
[](https://deps.rs/crate/actix-service/2.0.3)
|
||||

|
||||
[](https://discord.gg/NWpN5mmg3x)
|
||||
|
||||
[docs]: https://docs.rs/actix-service
|
||||
See documentation for detailed explanations of these components: https://docs.rs/actix-service.
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user