1
0
mirror of https://github.com/fafhrd91/actix-net synced 2025-08-12 15:37:05 +02:00

Compare commits

..

535 Commits

Author SHA1 Message Date
Rob Ede
16ba77c4c8 prepare next set of betas (#273) 2021-02-06 19:24:52 +00:00
Rob Ede
b4a3f51659 prepare rt release 2.0.1 2021-02-06 15:54:11 +00:00
Riley
9d0901e07f actix-rt: expose JoinError (#271) 2021-02-06 15:50:38 +00:00
fakeshadow
ebb9cd055f use static dispatch on signal handling. reduce allocation (#272) 2021-02-06 03:38:11 +00:00
Rob Ede
a77b70aed2 prepare service 2.0.0-beta.4 release (#269) 2021-02-04 20:44:13 +00:00
Rob Ede
c918da906b use reexported tls crates when possible 2021-02-04 15:23:06 +00:00
Rob Ede
b5399c5631 use reusable box future in tls connector 2021-02-04 15:23:06 +00:00
fakeshadow
7f0eddd794 add blocking thread customize (#265) 2021-02-04 15:01:51 +00:00
shuo
db3385e865 retry on EINTR in accept loop (#264)
Co-authored-by: lishuo <lishuo.03@bytedance.com>
2021-02-04 10:20:37 +00:00
Rob Ede
4a8693d000 readme grammar 2021-02-03 11:18:35 +00:00
Rob Ede
4ec358575e prepare actix-rt v2.0.0 release (#262) 2021-02-03 10:25:31 +00:00
Rob Ede
66bd5bf4a2 prepare macros v0.2.0 release (#261) 2021-02-02 02:07:58 +00:00
Rob Ede
057e7cd7c9 prepare rt v2.0.0-beta.3 2021-01-31 05:19:30 +00:00
Rob Ede
0b656f51e1 deprecate rt TLS item storage 2021-01-31 04:48:03 +00:00
Rob Ede
0eb68d1c7b Revert "remove arbiter TLS item storage"
This reverts commit 3e6f69885c.
2021-01-31 04:45:27 +00:00
Rob Ede
3e6f69885c remove arbiter TLS item storage 2021-01-31 04:43:35 +00:00
Rob Ede
2fa60b07ae prevent arbiter leaks by waiting for registration 2021-01-31 04:41:28 +00:00
Rob Ede
b75254403a remove builder and introduce worker handle (#257) 2021-01-31 03:34:07 +00:00
Rob Ede
1b35ff8ee6 express spawn fn as spawn fut (#256) 2021-01-29 15:16:30 +00:00
Rob Ede
2924419905 prevent spawn_fn panic bubbling (#255) 2021-01-29 14:16:10 +00:00
Rob Ede
6b86b5efc5 rename arbiter to worker (#254) 2021-01-29 04:08:14 +00:00
Rob Ede
ba39c8436d remove tokio runners (#253) 2021-01-29 02:21:06 +00:00
fakeshadow
feac376c17 fix actix-tls build (#252) 2021-01-28 10:31:57 +00:00
Rob Ede
a633d2353c fix addr iterator 2021-01-27 11:23:28 +00:00
Rob Ede
45edff625e add rt tests and doc tests 2021-01-26 09:46:14 +00:00
Rob Ede
cff9deb729 attribute nits 2021-01-26 09:45:43 +00:00
Rob Ede
eaefe21b98 add tests for custom resolver 2021-01-26 08:05:19 +00:00
fakeshadow
636cef8868 service trait takes shared self reference (#247) 2021-01-23 03:06:22 +00:00
fakeshadow
874e5f2e50 change default name resolver and allow custom resolvers (#248) 2021-01-23 01:33:50 +00:00
Rob Ede
6112a47529 update local deps 2021-01-09 15:19:16 +00:00
Rob Ede
a2e03700e7 update rt changelog 2021-01-09 15:16:31 +00:00
Rob Ede
6edf9b8278 prepare rt 2.0.0-beta.2 release 2021-01-09 15:12:59 +00:00
Rob Ede
f07d807707 remove actix-threadpool crate 2021-01-09 15:04:55 +00:00
Rob Ede
d4c46b7da9 fix macros code 2021-01-09 14:58:15 +00:00
Rob Ede
b0a8f8411b prepare macros 0.2.0-beta.1 release 2021-01-09 14:56:07 +00:00
Rob Ede
46bfe5de36 prepare service 2.0.0-beta.3 release 2021-01-09 14:28:33 +00:00
Rob Ede
a95afe2800 prepare router release 0.2.6 2021-01-09 14:18:20 +00:00
Rob Ede
f751cf5acb use convert err on forward_ready! (#246) 2021-01-09 14:13:16 +00:00
fakeshadow
a1982bdbad add actix-rt::task (#245) 2021-01-03 18:16:57 +00:00
Rob Ede
147c4f4f2c test bytestring with ahash 2021-01-03 04:42:08 +00:00
Rob Ede
5285656bdc prepare next beta releases 2021-01-03 04:39:37 +00:00
Rob Ede
296294061f update readme 2020-12-31 02:52:55 +00:00
Rob Ede
93865de848 move router to actix-router 2020-12-31 02:29:27 +00:00
Rob Ede
6bcf6d8160 use bytestring crate name as dir name 2020-12-31 02:21:50 +00:00
Rob Ede
14ff379150 prepare bytestring release 1.0.0 (#243) 2020-12-31 02:20:49 +00:00
fakeshadow
647817ef14 tokio 1.0 and mio 0.7 (#204) 2020-12-30 22:11:50 +00:00
fakeshadow
b5eefb4d42 merge actix-testing into actix-server (#242) 2020-12-29 21:20:24 +00:00
fakeshadow
03eb96d6d4 fix actix-tls tests (#241) 2020-12-29 11:36:17 +00:00
Rob Ede
0934078947 prepare tls beta release 2020-12-29 01:04:21 +00:00
Rob Ede
5759c9e144 merge -connect and -tls and upgrade to rt v2 (#238) 2020-12-29 00:38:41 +00:00
Rob Ede
3c6de3a81b use correct service version for tracing 2020-12-29 00:08:59 +00:00
Rob Ede
ef83647ac9 prepare testing beta release 2020-12-28 23:54:21 +00:00
Rob Ede
98a17081b8 prepare server beta release 2020-12-28 23:50:00 +00:00
fakeshadow
b7202db8fd update actix-server and actix-testing to tokio 1.0 (#239) 2020-12-28 23:44:53 +00:00
Rob Ede
a09f9abfcb prepare utils release 3.0.0-beta.1 2020-12-28 03:32:28 +00:00
Rob Ede
e4a44b77e6 prepare codec release 0.4.0-beta.1 2020-12-28 03:24:43 +00:00
fakeshadow
2ee8f45f5d update actix-codec and actix-utils to tokio 1.0 (#237) 2020-12-28 03:16:37 +00:00
Rob Ede
f48e3f4cb0 prepare release for rt and service 2020-12-28 01:58:31 +00:00
Rob Ede
3d3bd60368 fix rt override 2020-12-28 01:53:11 +00:00
Rob Ede
d684128831 fix rt override 2020-12-28 01:48:19 +00:00
fakeshadow
0c12930796 update to tokio 1.0 for actix-rt (#236) 2020-12-28 01:40:22 +00:00
Rob Ede
ba44ea7d0b remove futures-util from service deps (#235) 2020-12-27 18:24:57 +00:00
Rob Ede
8a58a341a4 service improvements (#233) 2020-12-27 14:15:42 +00:00
Rob Ede
33c9aa6988 bump msrv to 1.46 2020-12-27 04:36:08 +00:00
Rob Ede
3ab8c3eb69 service trait takes request type parameter (#232) 2020-12-27 04:28:00 +00:00
fakeshadow
518bf3f6a6 remove RUNNING Q PENDING thread locals from actix-rt (#207) 2020-12-26 23:26:02 +00:00
fakeshadow
43ce25cda1 Remove unused mods in actix-utils (#229) 2020-12-26 21:27:59 +00:00
Yuki Okushi
4e4122b702 Disable PR comment from codecov 2020-12-17 21:42:21 +09:00
Aravinth Manivannan
b296d0f254 Intradoc links conversion (#227)
* intra doc conversion

* rm trailing blank comment
2020-12-14 08:22:30 +00:00
Juan Aguilar
02a902068f Refactor LocalWaker (#224) 2020-12-13 19:26:57 +00:00
fakeshadow
049795662f remove ServerMessage type. remove one unused InternalServiceFactory impl (#225) 2020-12-13 00:46:32 +00:00
Rob Ede
4e43216b99 standardise compiler lints across all crates (#226) 2020-12-12 23:24:00 +00:00
Rob Ede
93889776c4 prevent double registration of sockets when backpressure is resolved (#223) 2020-12-12 17:19:20 +00:00
Yuki Okushi
ab496a71b5 Fix release date 2020-12-03 08:59:59 +09:00
Yuki Okushi
76d956e25c macros: Add actix-reexport feature (#218) 2020-12-03 08:59:13 +09:00
Ivan Babrou
89e56cf661 Notify about paused accept loop (#215) 2020-11-29 15:30:13 +00:00
Rob Ede
8aca8d4d07 fix clippy warnings (#214)
and make my spelling checker happy
2020-11-25 01:41:14 +00:00
fakeshadow
e0dd2a3d76 remove actix-threadpool re-export from actix-rt (#212) 2020-11-24 17:03:09 +00:00
Rob Ede
59e976aaca address clippy error (#213) 2020-11-24 16:35:47 +00:00
Zura Benashvili
4cc1c87724 docs(transform): remove extra generic parameter (#211) 2020-11-20 22:45:57 +00:00
Yuki Okushi
ca39917d2c Update CoC contact information 2020-10-31 12:08:06 +09:00
ghizzo01
704af672b9 Bump pin-project to 1.0 (#202) 2020-10-25 19:42:40 +09:00
Rob Ede
242bef269f delete ioframe removed package readme
closes #199
2020-09-22 12:29:07 +01:00
Rob Ede
6c65e2a79f prepare router 0.2.5 release (#198) 2020-09-21 22:46:59 +01:00
nujz
e5ca271764 actix-router: fix from_hex error (#196) 2020-09-20 18:04:18 +01:00
nujz
98a2197a09 fix doc error (#195) 2020-09-19 23:12:41 +09:00
Rob Ede
fb0aa02b3c move and update server+tls examples (#190) 2020-09-13 10:12:07 +01:00
Rob Ede
681eeb497d prepare server release 1.0.4 (#188) 2020-09-12 15:28:17 +01:00
Igor Aleksanov
3e04b87311 actix-service: Fix broken link in readme (#189) 2020-09-12 15:08:03 +01:00
Rob Ede
77b7826658 prepare tls v2 release (#186) 2020-09-08 18:00:07 +01:00
Igor Aleksanov
b7a9cb7bb4 actix-rt: Make the process of running System in existing Runtime more clear (#173) 2020-09-06 11:01:24 +01:00
Robert Gabriel Jakabosky
88d99ac89c Fix clippy errors. (#187) 2020-09-06 10:41:42 +01:00
Rob Ede
7632f51509 prepare connect v2 stable release (#185) 2020-09-02 22:14:07 +01:00
Rob Ede
d28687d0d7 promote codec/utils out of beta (#184) 2020-08-24 09:18:37 +01:00
Rob Ede
27c6be9881 remove unused type parameter from Framed::replace_codec (#183) 2020-08-20 00:30:26 +01:00
Rob Ede
119dc39f5b prepare codec and utils betas (#182) 2020-08-19 11:00:12 +01:00
Rob Ede
b3010c13e0 solve framed integration with actix-http (#179) 2020-08-18 23:27:37 +01:00
Adrian Wechner
fecdfcd8d4 assert workers greater than zero (#167) 2020-08-18 16:44:22 +01:00
Yuki Okushi
578a560853 connect,tls: Bump up to next alpha versions (#181) 2020-08-17 15:39:17 +01:00
Rob Ede
fb098536ee bump MSRV to 1.42 (#180) 2020-08-17 15:37:57 +01:00
Rob Ede
5d28be9ad6 fix actix-service readme reference (#176) 2020-08-11 12:20:09 +01:00
Rob Ede
a5a6b6704c prepare actix-service 1.0.6 release (#175) 2020-08-09 16:10:58 +01:00
Igor Aleksanov
afb0a3c9fc actix-service: Fix clippy warning in benches (#174) 2020-08-07 17:16:45 +09:00
Miloas
02aaa75591 fix actix-service doc error (#172) 2020-08-06 11:21:51 +01:00
Yuki Okushi
ed4b708c66 Fix CI on MSRV check (#171) 2020-08-05 09:02:41 +09:00
Yuki Okushi
235a76dcd4 GHA: Switch action to the official setup-msys2 (#169) 2020-07-29 08:47:32 +09:00
Matt Kantor
0c5f1da625 Remove garbled doc comment for actix_router::IntoPattern::is_single (#168) 2020-07-29 05:46:53 +09:00
Yuki Okushi
8ace9264b7 Check code style with rustfmt on CI (#164) 2020-07-22 12:32:13 +09:00
Yuki Okushi
0dca1a705a actix-utils: Remove unsound custom Cell as well (#161) 2020-07-22 01:14:32 +01:00
Juan Aguilar
5d6d309e66 Simplify bcodec decode (#162) 2020-07-20 23:09:24 +09:00
Juan Aguilar
8d0bd7ce1c Improve bcodec encode performance (#157) 2020-07-19 22:36:51 +01:00
Sergey "Shnatsel" Davidoff
a67e38b4a0 Remove unsound custom Cell (#158) 2020-07-20 06:05:36 +09:00
Rob Ede
334c98575a Upgrade tokio utils to 0.3 (#138) 2020-07-20 05:44:26 +09:00
Rob Ede
a9b5a7b070 Create PULL_REQUEST_TEMPLATE.md (#159) 2020-07-20 03:01:09 +09:00
Yuki Okushi
61176f6410 Update rustls-related dependencies (#154) 2020-07-14 11:14:06 +01:00
Yuki Okushi
10b4c30a06 Use OR instead of deprecated / in license field (#155) 2020-07-14 11:11:30 +01:00
Yuki Okushi
7f550bcf0f threadpool: Bump up to 0.3.3 (#156) 2020-07-14 11:10:15 +01:00
Yuki Okushi
887f11f787 Merge pull request #153 from actix/tweak-actions
Tweak actions trigger events
2020-07-08 09:04:05 +09:00
Yuki Okushi
e2a6d352b0 Tweak actions trigger events 2020-07-08 08:38:24 +09:00
Yuki Okushi
f6c697a2dd Merge pull request #152 from paolobarbolini/pl-011
Update parking_lot to 0.11
2020-07-04 03:20:08 +09:00
Paolo Barbolini
5ecdfd684a Update parking_lot to 0.11 2020-07-03 17:37:10 +02:00
Yuki Okushi
7140c04c44 Merge pull request #149 from taiki-e/pin-project
Remove uses of pin_project::project attribute
2020-06-07 02:01:08 +09:00
Taiki Endo
9528df4486 Remove uses of pin_project::project attribute
pin-project will deprecate the project attribute due to some unfixable
limitations.

Refs: https://github.com/taiki-e/pin-project/issues/225
2020-06-06 06:42:45 +09:00
Pen Tree
755a8bb9d1 fix codec doc links (#148) 2020-06-02 18:05:39 +01:00
Yuki Okushi
f3cb6efc30 Merge pull request #146 from actix/cache-v2
Update `actions/cache` to v2
2020-05-28 04:59:34 +09:00
Yuki Okushi
87b857705c Update actions/cache to v2 2020-05-28 03:14:01 +09:00
Yuki Okushi
c897c5d3eb Merge pull request #145 from JohnTitor/new-threalpool
threadpool: Bump up to 0.3.2
2020-05-20 15:24:39 +09:00
Yuki Okushi
134e76b8b4 threadpool: Bump up to 0.3.2 2020-05-20 14:19:16 +09:00
Yuki Okushi
f3a401c23b Merge pull request #144 from JohnTitor/codecov-config
Add codecov config
2020-05-20 11:03:31 +09:00
Yuki Okushi
f7e8a912b3 Add codecov config 2020-05-19 14:45:39 +09:00
Yuki Okushi
11a1e11858 Merge pull request #143 from JohnTitor/new-testing
testing: Bump up to 1.0.1
2020-05-19 14:37:54 +09:00
Yuki Okushi
d0b27ee7e6 testing: Bump up to 1.0.1 2020-05-19 14:08:08 +09:00
Yuki Okushi
2d2b0591a2 Merge pull request #142 from JohnTitor/new-server
server: Bump up to 1.0.3
2020-05-19 13:58:39 +09:00
Yuki Okushi
abbc5f715f server: Bump up to 1.0.3 2020-05-19 10:23:17 +09:00
Yuki Okushi
140a6c76e3 Merge pull request #141 from actix/fix-ci
Only check compilation on mingw CI
2020-05-19 09:39:03 +09:00
Yuki Okushi
2395b28c5e Only check compilation on mingw CI
Disabled to run tests since somehow linking with OpenSSL is broken.
2020-05-19 09:11:27 +09:00
Yuki Okushi
aad4812ba6 Merge pull request #140 from JohnTitor/replace-net2
Replace deprecated `net2` crate with `socket2`
2020-05-19 08:58:40 +09:00
Yuki Okushi
ac6c78c476 testing: Replace net2 crate with socket2 2020-05-19 08:21:40 +09:00
Yuki Okushi
8218a098e8 server: Replace net2 crate with socket2 2020-05-19 08:17:44 +09:00
Yuki Okushi
49a6f525be Merge pull request #139 from JohnTitor/next-macros
macros: Bump up to 0.1.2
2020-05-19 07:50:46 +09:00
Yuki Okushi
f59ff82395 macros: Bump up to 0.1.2 2020-05-18 15:36:23 +09:00
Yuki Okushi
f7cc62564d Merge pull request #136 from JohnTitor/connect-alpha-3
actix-connect: Bump up to 2.0.0-alpha.3
2020-05-08 01:36:16 +09:00
Yuki Okushi
b125e2bdce actix-connect: Bump up to 2.0.0-alpha.3 2020-05-08 01:07:57 +09:00
Yuki Okushi
a5c185e80e Merge pull request #135 from actix/fix/unresolverd
correct spelling of ConnectError::Unresolved
2020-05-06 14:45:30 +09:00
Rob Ede
523cee0351 correct spelling of ConnectError::Unresolved 2020-05-03 23:14:22 +01:00
Yuki Okushi
343b3c09fc Merge pull request #134 from JohnTitor/new-rt
Bump up `actix-rt` to 1.1.1
2020-04-30 14:34:17 +09:00
Yuki Okushi
8a10580663 Bump up actix-rt to 1.1.1 2020-04-30 03:07:12 +09:00
Yuki Okushi
1b4a117063 Merge pull request #128 from Jonathas-Conceicao/topic/fix_memory_leak
actix-rt: Spawn future to cleanup pending JoinHandles
2020-04-30 02:58:13 +09:00
Yuki Okushi
700997fe48 Merge pull request #133 from actix/macro-compile-testing
add macro compile tests
2020-04-29 15:33:00 +09:00
Rob Ede
4c5568ed70 add trybuild compile tests 2020-04-26 20:11:16 +01:00
Yuki Okushi
7d0cfe1b4d Merge pull request #131 from danpintara/pull-1
actix-macros: Simplify test macros by using original signature
2020-04-23 02:33:52 +09:00
Daniel Pintara
e35c261c9f actix-macros: test: Simplify by using #sig instead of #name(#inputs) #ret 2020-04-22 00:13:32 +07:00
Yuki Okushi
115ef3fcb3 Merge pull request #130 from JohnTitor/dont-clone
Remove unnecessary clone usage
2020-04-20 08:37:10 +09:00
Yuki Okushi
c0482e2532 Remove unnecessary clone usage 2020-04-20 08:02:08 +09:00
Jonathas-Conceicao
6906f25e01 actix-rt: Set threshold size for arbiter's pending futures list
Signed-off-by: Jonathas-Conceicao <jadoliveira@inf.ufpel.edu.br>
2020-04-16 03:12:05 -03:00
Jonathas-Conceicao
06bca19524 actix-rt: Spawn future to cleanup pending JoinHandles
Signed-off-by: Jonathas-Conceicao <jadoliveira@inf.ufpel.edu.br>
2020-04-09 20:36:44 -03:00
Yuki Okushi
e9e2185296 Merge pull request #127 from rubdos/test-fixture-integration
Forward actix_rt::test arguments to test function.
2020-04-09 17:45:17 +09:00
Ruben De Smet
aae52a80ab Forward actix_rt::test arguments to test function.
Previously,

```rust
async fn foo(_a: u32) {}
```

would compile to

```rust
fn foo() {/* something */}
```

This patches changes this behaviour to

```rust
fn foo(_a: u32) {/* something */}
```

by simply forwarding the input arguments.

This allows any test fixture library (e.g. `rstest`, cfr.
https://github.com/la10736/rstest/issues/85) to integrate with
actix::test.
2020-04-08 16:48:10 +02:00
Yuki Okushi
65e2e8052e Release actix-rt 1.1.0 (#126)
* Release actix-rt 1.1.0

* Update actix-rt/CHANGES.md
2020-04-08 16:34:07 +09:00
Jonathas-Conceicao
783880bb0a actix-rt: Add Arbiter::is_running helper and fix System::is_set doc
`Arbiter::is_running` can be used to check if the current even-loop is currently
running; which should also work after the system has stopped. `System::is_set`
was updated to reflect what it actually does, it tells if the event loop has
started, which alone can't tell if it has stopped.

Signed-off-by: Jonathas-Conceicao <jadoliveira@inf.ufpel.edu.br>
2020-04-05 21:00:54 -03:00
Jonathas-Conceicao
69e8df9d62 actix-rt: Run rustfmt
Signed-off-by: Jonathas-Conceicao <jadoliveira@inf.ufpel.edu.br>
2020-04-05 21:00:54 -03:00
Yuki Okushi
9addf1a36b Merge pull request #125 from actix/fix/noisy-check
fix noisy check warning
2020-04-05 13:20:25 +09:00
Rob Ede
187a58472d fix noisy check warning 2020-04-04 23:57:52 +01:00
Nikolay Kim
30aa0b7bb6 add serde support to bytestring 2020-03-30 11:54:40 +06:00
Yuki Okushi
e775d08d76 Merge pull request #122 from actix/JohnTitor-patch-1
Upload coverage on PRs
2020-03-18 05:31:59 +09:00
Yuki Okushi
d5f95b54b7 Upload coverage on PRs 2020-03-18 05:03:37 +09:00
Yuki Okushi
904f90abc2 Merge pull request #121 from actix/revert-115-JohnTitor-patch-2
Revert "Disable windows-mingw builder temporarily"
2020-03-16 18:06:42 +09:00
Yuki Okushi
950c73077c Revert "Disable windows-mingw builder temporarily" 2020-03-16 17:31:10 +09:00
Yuki Okushi
732731a9c8 Merge pull request #120 from kornelski/err
std Error for BlockingError
2020-03-14 00:14:42 +09:00
Kornel Lesiński
0dd5a7ce1d std Error for BlockingError
#93
2020-03-13 12:35:20 +00:00
Yuki Okushi
7105091e51 Merge pull request #119 from JohnTitor/futures
Minimize `futures-*` dependencies
2020-03-13 05:12:37 +09:00
Yuki Okushi
08959dfc21 actix-tracing: Minimize futures-util dependencies 2020-03-12 07:13:32 +09:00
Yuki Okushi
2792433ad6 actix-codec: Minimize futures-* dependencies 2020-03-12 07:13:32 +09:00
Yuki Okushi
437a7b05c6 actix-rt: Fix build 2020-03-12 07:13:32 +09:00
Yuki Okushi
3d125c5381 actix-testing: Remove unused deps 2020-03-12 07:13:32 +09:00
Yuki Okushi
fbf7d6ef33 Update examples 2020-03-12 07:13:32 +09:00
Yuki Okushi
e6b6f08369 actix-utils: Minimize futures-* dependencies 2020-03-12 07:13:32 +09:00
Yuki Okushi
4e806b3e3f actix-tls: Minimize futures-* dependencies 2020-03-12 07:13:31 +09:00
Yuki Okushi
f5b07053fc actix-server: Minimize futures-* dependencies 2020-03-12 07:13:31 +09:00
Yuki Okushi
dd3bec83bf actix-ioframe: Minimize futures-* dependencies 2020-03-12 07:13:31 +09:00
Yuki Okushi
f955e49930 actix-connect: Minimize futures-* dependencies 2020-03-12 04:22:38 +09:00
Yuki Okushi
4be11b541b Merge pull request #117 from actix/new-connect
Release actix-http v2.0.0-alpha.2
2020-03-08 15:13:52 +09:00
Yuki Okushi
baba533407 Update actix-http dependency 2020-03-08 14:38:07 +09:00
Yuki Okushi
2bf50826b0 Bump up to 2.0.0-alpha.2 2020-03-08 14:37:33 +09:00
Yuki Okushi
41b2a3b2e2 Merge pull request #116 from Jonathas-Conceicao/topic/upgrade_trust_dns
actix-connect: Upgrade versions of trust-dns
2020-03-08 14:31:07 +09:00
Jonathas-Conceicao
7fdd4a1118 actix-connect: Upgrade versions of trust-dns
- `Address` trait is now required to have static lifetime;
- `start_resolver` and `start_default_resolver` are now `async` and may return
  a `ConnectError`;

Signed-off-by: Jonathas-Conceicao <jadoliveira@inf.ufpel.edu.br>
2020-03-07 14:52:41 -03:00
Jonathas-Conceicao
cb30f9e86a actix-connect: Run cargo fmt
Signed-off-by: Jonathas-Conceicao <jadoliveira@inf.ufpel.edu.br>
2020-03-07 14:37:39 -03:00
Yuki Okushi
873f69be51 Merge pull request #115 from actix/JohnTitor-patch-2
Disable windows-mingw builder temporarily
2020-03-06 14:11:50 +09:00
Yuki Okushi
0967061f30 Merge pull request #114 from actix/JohnTitor-patch-1
Unpin quote version
2020-03-06 14:11:28 +09:00
Yuki Okushi
59902cb3a3 Disable windows-mingw builder temporarily 2020-03-06 13:48:55 +09:00
Yuki Okushi
857e50120b Unpin quote version 2020-03-06 13:45:21 +09:00
Yuki Okushi
36a2edf1cd Merge pull request #111 from dunnock/master
Fix build with failing quote
2020-03-05 23:05:19 +09:00
Maksym Vorobiov
346bd072d3 fix build with failing quote 2020-03-05 14:58:44 +02:00
Yuki Okushi
8d3d58b3b7 Merge pull request #110 from Aaron1011/fix/better-pin
Replace calls to `Pin::new_unchecked` with `pin_project`.
2020-03-05 21:52:55 +09:00
Aaron Hill
c41b5d8dd4 Replace calls to Pin::new_unchecked with pin_project.
This is a breaking change, as it changes some public methods to take
`Pin<&mut Self>` rather than `&mut self`.

This brings these methods into line with `Stream::poll_next`, which also
takes a `Pin<&mut Self>`
2020-03-04 12:08:52 -05:00
Yuki Okushi
693d5132a9 Merge pull request #109 from JohnTitor/new-tls
actix-tls: Bump up to 2.0.0-alpha.1
2020-03-03 22:29:08 +09:00
Yuki Okushi
f7dac3feb4 Bump up to 2.0.0-alpha.1 2020-03-03 19:47:40 +09:00
Yuki Okushi
ebc11d03f2 Merge pull request #108 from JohnTitor/new-connect
Release `actix-connect` v2.0.0-alpha.1
2020-03-03 18:33:08 +09:00
Yuki Okushi
e3ad5de270 Update actix-connect dependency 2020-03-03 17:24:41 +09:00
Yuki Okushi
91118bb2ce Bump up to 2.0.0-alpha.1 2020-03-03 17:24:25 +09:00
Yuki Okushi
6628688bcf Merge pull request #107 from JohnTitor/rustls-017
Update `rustls` and `tokio-rustls`
2020-03-01 23:48:13 +09:00
Yuki Okushi
b9567359fd actix-tls: Update rustls and tokio-rustls 2020-03-01 12:08:14 +09:00
Yuki Okushi
7dbc0264b1 actix-connect: Update rustls and tokio-rustls 2020-03-01 12:08:14 +09:00
Erich Gubler
1b7c969f6a actix-rt: minimize futures dependencies to futures-{channel,util} with default features off (#104)
* build(deps): minimize `futures` deps by using `futures-channel` and `futures-util` directly

* style(actix-rt): enforce spaces around equals in `Cargo.toml`
2020-02-27 01:15:21 +09:00
Jonathas-Conceicao
f1685d8253 Add Arbiter::local_join associated function
Arbiter::local_join function can be used to await for futures spawned
on current arbiter.

Signed-off-by: Jonathas-Conceicao <jadoliveira@inf.ufpel.edu.br>
2020-02-26 12:59:46 -03:00
Jonathas-Conceicao
e3b6a33b97 Add integration tests
These initial tests validade basic usage with timed futures for:
- `System::block_on`;
- `Arbiter::new`;
- `Arbiter::stop`;
- `Arbiter::join`;

Signed-off-by: Jonathas-Conceicao <jadoliveira@inf.ufpel.edu.br>
2020-02-26 12:59:46 -03:00
Yuki Okushi
13b503435f Merge pull request #106 from JohnTitor/server-102
Release actix-server 1.0.2
2020-02-26 20:53:00 +09:00
Yuki Okushi
98f0290f65 actix-server: Bump up to 1.0.2 2020-02-26 19:48:52 +09:00
Yuki Okushi
b8f66f5e7f Update changelog 2020-02-26 19:48:41 +09:00
Yuki Okushi
dd59ee498e Add FIXME comment 2020-02-26 19:48:27 +09:00
Dany Laporte
83320efa31 Avoid error by register() on Windows (#103) 2020-02-26 18:40:31 +09:00
Yuki Okushi
c69bc11e3e Merge pull request #105 from actix/bench
Add action to check benchmark
2020-02-26 17:33:37 +09:00
Yuki Okushi
aad5c42ad7 Add action to check benchmark 2020-02-26 17:11:46 +09:00
Maxim Vorobjov
4d37858fc6 Benchmarks for actix-service: focused around UnsafeCell usage (#98)
* add benchmark comparing unsafecell vs refcell

* fix syntax

* add benches for and_then implementation options

* repeat benches to stabilize
2020-02-26 16:45:23 +09:00
Yuki Okushi
d402f08bb5 Merge pull request #102 from JohnTitor/single-import
Remove single import
2020-02-25 19:11:04 +09:00
Yuki Okushi
fa25e30427 Remove single import 2020-02-25 18:41:15 +09:00
Bo Yao
602db1779e Expose is_set (#99)
* Expose is_set

* Update doc and changes.md
2020-02-25 02:55:02 -03:00
Yuki Okushi
4f2910c6b3 Merge pull request #96 from actix/JohnTitor-patch-1
Disable coverage for PRs
2020-02-15 01:55:20 +09:00
Yuki Okushi
9f7d6bc068 Disable coverage for PRs 2020-02-14 07:30:21 +09:00
Yuki Okushi
6908b58943 Merge pull request #92 from actix/bye-travis
Move script from Travis to Actions
2020-02-02 06:28:42 +09:00
Yuki Okushi
043057ecbd Fix import scopes 2020-02-01 23:32:08 +09:00
Yuki Okushi
e12bf9200b Clean up metadata 2020-01-31 02:21:25 +09:00
Yuki Okushi
03d431e663 Add badges on README 2020-01-31 00:01:47 +09:00
Yuki Okushi
f0d352604e Remove travis config 2020-01-31 00:01:34 +09:00
Yuki Okushi
2f67e4f563 Use markdown format 2020-01-31 00:01:24 +09:00
Yuki Okushi
d1155d60ec Tweak Actions 2020-01-31 00:01:11 +09:00
Yuki Okushi
28d9c6a760 Merge pull request #90 from actix/fix-ci
Tweak GitHub Actions
2020-01-30 00:46:21 +09:00
Yuki Okushi
a970c2c997 Remove AppVeyor config 2020-01-29 12:05:55 +09:00
Yuki Okushi
d5a6c83207 Suppress/fix clippy warnings 2020-01-29 12:05:55 +09:00
Yuki Okushi
ee0db9a617 Tweak GitHub Actions 2020-01-29 12:05:55 +09:00
zero-systems
e5b5df1261 Optimize vector fill in builder. (#89)
* optimize vector fill
2020-01-22 06:35:22 +09:00
Nikolay Kim
dbfa13d6be Fixed unsoundness in .and_then()/.then() service combinators 2020-01-16 16:58:11 -08:00
Nikolay Kim
e7c2439543 prep release 2020-01-15 13:35:07 -08:00
Nikolay Kim
3116db5168 revert 1.0.3 changes 2020-01-15 13:24:38 -08:00
Nikolay Kim
5940731ef0 Fix actix-service 1.0.3 compatibility 2020-01-15 11:58:06 -08:00
Rajasekharan Vengalil
aed5fecc8a Add support for tokio tracing for actix Service. (#86)
* Add support for tokio tracing for actix Service.

* Address comments

* Change trace's return type to ApplyTransform

* Remove redundant type args

* Remove reference to MakeSpan from docs
2020-01-15 11:43:52 -08:00
Nikolay Kim
a751899aad Fixed unsoundness in AndThenService impl #83 2020-01-15 11:40:15 -08:00
Nikolay Kim
fa800aeba3 Fix AsRef<str> impl 2020-01-14 15:06:02 -08:00
Nikolay Kim
2f89483635 Merge branch 'master' of github.com:actix/actix-net 2020-01-14 00:42:29 -08:00
Nikolay Kim
3048073919 Add PartialEq<T: AsRef<str>>, AsRef<[u8]> impls 2020-01-13 11:58:31 +06:00
amosonn
4bbba803c1 Fix Service documentation (#85) 2020-01-12 07:44:01 +09:00
Sven-Hendrik Haase
4dcdeb6795 Merge pull request #84 from currency-engineering/master
Minor grammatical fix to docs.
2020-01-10 15:28:19 +01:00
Eric Findlay
3b4f222242 Minor grammatical fix to docs. 2020-01-10 20:52:49 +09:00
Nikolay Kim
7c5fa25b23 Add into_service helper function 2020-01-08 18:31:50 +06:00
Nikolay Kim
3551d6674d Add Clone impl for condition::Waiter 2020-01-08 11:18:56 +06:00
Nikolay Kim
9f00daea80 add Condition and Pool 2020-01-08 10:59:27 +06:00
Nikolay Kim
7dddeab2a8 Add ResourceDef::resource_path_named() path generation method 2019-12-31 18:02:43 +06:00
Nikolay Kim
dcbcc40da2 Revert "Support named parameters for ResourceDef::resource_path() in form of ((&k, &v), ...)"
This reverts commit b0d44198ba.
2019-12-31 15:14:53 +06:00
Nikolay Kim
b0d44198ba Support named parameters for ResourceDef::resource_path() in form of ((&k, &v), ...) 2019-12-31 14:53:30 +06:00
Nikolay Kim
974bd6b01e leak string instead of rc 2019-12-31 12:04:35 +06:00
Nikolay Kim
5779da0f49 refactor service and state manahement 2019-12-29 13:42:42 +06:00
Nikolay Kim
1918c8d4f8 rename .run to .start() 2019-12-29 10:07:46 +06:00
Nikolay Kim
e21c58930b Add impl IntoPattern for &String 2019-12-25 21:34:14 +04:00
Nikolay Kim
59c5e9be6a Use IntoPattern for RouterBuilder::path() 2019-12-25 21:01:07 +04:00
Nikolay Kim
a2a9d9764d introduce IntoPattern trait 2019-12-25 19:54:20 +04:00
Nikolay Kim
bf0a9d2f6e Add IntoPatterns trait 2019-12-25 15:34:21 +04:00
Nikolay Kim
119027f822 fmt 2019-12-25 15:10:13 +04:00
Nikolay Kim
0fe8038d23 allow specify set of resource patters 2019-12-25 15:10:01 +04:00
Nikolay Kim
b599bc4a0c map_config() and unit_config() accepts IntoServiceFactory type 2019-12-22 16:30:49 +04:00
Nikolay Kim
a80e1f8370 fix new() method and make from_static and from_bytes_unchecked methods const 2019-12-22 16:24:28 +04:00
Nikolay Kim
5fe759cc02 Merge branch 'master' of github.com:actix/actix-net 2019-12-20 09:15:19 +06:00
Nikolay Kim
05549f0b42 Add methods to check LocalWaker registration state 2019-12-20 09:13:11 +06:00
Yuki Okushi
b1430eaded Run tests for all features as possible (#78) 2019-12-19 16:31:32 +09:00
Nikolay Kim
0d3f9e74c5 Use .advance() intead of .split_to() 2019-12-19 09:50:31 +06:00
Nikolay Kim
cab73791ed pin trsut-dns-proto 2019-12-15 13:04:26 +06:00
Nikolay Kim
a7ac1a76ed add license files to actix-macros 2019-12-14 23:01:55 +06:00
Nikolay Kim
37bedff6fb use parking_lot 0.10 2019-12-12 06:57:40 +06:00
Nikolay Kim
33fd6adc11 better InOrder test 2019-12-12 06:56:45 +06:00
Nikolay Kim
4305cdba2c Revert InOrder service changes 2019-12-11 23:10:02 +06:00
Nikolay Kim
52ecb4bcc5 Add oneshot::Sender::is_canceled() method 2019-12-11 20:52:57 +06:00
Nikolay Kim
b28f32e82c Allow to create framed::Dispatcher with custom mpsc::Receiver 2019-12-11 20:23:14 +06:00
Nikolay Kim
081205a02f Disconnect callback accepts owned state 2019-12-11 18:57:43 +06:00
Nikolay Kim
8bb81c0768 optimize InOrder service 2019-12-11 18:55:53 +06:00
Nikolay Kim
c7a8743bf9 remove E param 2019-12-11 16:44:09 +06:00
Nikolay Kim
f26fcc703b prep release 2019-12-11 14:56:05 +06:00
Nikolay Kim
ce4587df82 prepare actix-tls release 2019-12-11 14:53:58 +06:00
Nikolay Kim
9957f28137 prepare actix-testing release 2019-12-11 14:49:26 +06:00
Nikolay Kim
9d84d14ef4 update deps 2019-12-11 14:47:30 +06:00
Nikolay Kim
60bfa1bfb1 prepare actix-server release 2019-12-11 14:43:26 +06:00
Nikolay Kim
2c81c22b3e refactor ioframe dispatcher 2019-12-11 14:36:11 +06:00
Nikolay Kim
dded482514 allow to close mpsc sender 2019-12-11 14:36:00 +06:00
Nikolay Kim
631cb86947 refactor framed and stream dispatchers 2019-12-11 12:42:07 +06:00
Nikolay Kim
2e5e69c9ba Simplify oneshot and mpsc implementations 2019-12-11 11:28:09 +06:00
Nikolay Kim
e315cf2893 prep actix-rt release; update deps 2019-12-11 10:34:50 +06:00
Nikolay Kim
13fd615966 actix-macros release 2019-12-11 10:32:01 +06:00
Nikolay Kim
c094f84b85 prepare actix-service release 2019-12-11 10:29:34 +06:00
Nikolay Kim
25012d290a update actix-codec dependencies 2019-12-11 10:23:01 +06:00
Nikolay Kim
32202188cc prepare actix-codec release 2019-12-11 10:18:11 +06:00
Nikolay Kim
bf734a31dc update docs 2019-12-10 21:34:51 +06:00
Nikolay Kim
d29e7c4ba6 Merge branch 'master' of github.com:actix/actix-net 2019-12-10 21:14:18 +06:00
Nikolay Kim
7163e2c2a2 update doc strings 2019-12-10 21:14:06 +06:00
Nikolay Kim
1d810b4561 re-export AlpnError 2019-12-10 12:15:27 +06:00
daxpedda
0913badd61 Macro improvements. (#74)
* Macro improvements.

* Fix usage in `fn main`.
2019-12-10 08:47:35 +06:00
Nikolay Kim
8b3062cd6e Fix buffer remaining capacity calcualtion 2019-12-09 21:50:36 +06:00
Nikolay Kim
35218a4df1 add Clone impl for Apply service 2019-12-09 14:07:20 +06:00
Nikolay Kim
d47f1fb730 prepare actix-service release 2019-12-08 19:49:35 +06:00
Nikolay Kim
1ad0bbfb7f rename fn service helpers 2019-12-08 19:05:05 +06:00
Nikolay Kim
c38a25f102 fix hash impl 2019-12-07 11:51:47 +06:00
Nikolay Kim
110457477a update changes 2019-12-07 11:04:53 +06:00
Nikolay Kim
a899b1e04d bump actix-ioframe version 2019-12-07 10:55:54 +06:00
Nikolay Kim
393cf1ab25 add unsafe from_bytes_unchecked 2019-12-07 10:48:22 +06:00
Nikolay Kim
40fbbb9c32 fix crate name 2019-12-07 10:39:33 +06:00
Nikolay Kim
99fef4f06b add helper conversions 2019-12-07 10:22:08 +06:00
Nikolay Kim
fc0825fcdd update tokio to 0.2.4 2019-12-07 10:15:26 +06:00
Nikolay Kim
6c00ab8296 add string crate 2019-12-07 09:59:39 +06:00
Nikolay Kim
cbdbc05dbd update tokio verion and prep alpha3 release 2019-12-07 09:57:43 +06:00
Yuki Okushi
5674840c01 Stop running tests for all features (#73) 2019-12-07 08:54:58 +06:00
Nikolay Kim
6f07c9d72a update trust-dns 2019-12-06 14:08:11 +06:00
Nikolay Kim
fa48ddcfa1 fix non unix signals support 2019-12-06 14:06:14 +06:00
Max Gortman
f89a992daf eager drop in then, and_then, and_then_apply_fn (#72) 2019-12-06 10:34:44 +06:00
Nikolay Kim
e670a32ff3 inclide stream feature 2019-12-06 01:34:13 +06:00
Nikolay Kim
021c742d22 use string crate from master 2019-12-06 00:10:27 +06:00
Nikolay Kim
88a60ffa66 reexport ssl types 2019-12-05 23:09:44 +06:00
Nikolay Kim
cb2845cb26 fix dependencies 2019-12-05 20:58:28 +06:00
Nikolay Kim
b18fbc98d5 move rustls and nativetls acceptor services to actix-tls 2019-12-05 20:52:37 +06:00
Nikolay Kim
3a858feaec migrate to tokio 0.2.2 2019-12-05 16:40:24 +06:00
Nikolay Kim
d49aca9595 use bitflags for internal flags; use tokio 0.2 2019-12-05 13:11:56 +06:00
Nikolay Kim
6f41b80cb4 optimize service combinators memory layout 2019-12-05 12:37:26 +06:00
Nikolay Kim
c6eb318536 Fix low/high watermark for write/read buffers; fix oneshot impl 2019-12-05 01:36:31 +06:00
Nikolay Kim
21dcc22e53 refactor server configurations 2019-12-04 21:35:27 +06:00
Nikolay Kim
de84663768 fix initial worker service state 2019-12-04 15:52:49 +06:00
Nikolay Kim
c4e2051327 refactor server worker 2019-12-04 15:12:02 +06:00
Nikolay Kim
0a4fe22003 Restore Service/Factory::apply_fn() in form of Pipeline/Factory::and_then_apply_fn() 2019-12-03 19:59:28 +06:00
Nikolay Kim
eb773c8b8c Merge branch 'master' of github.com:actix/actix-net 2019-12-03 18:34:32 +06:00
Nikolay Kim
db0bc1e156 Restore Transform::map_init_err() combinator 2019-12-03 18:32:02 +06:00
Yuki Okushi
9eb12e0467 Use GitHub Actions (#71) 2019-12-03 20:00:16 +09:00
Nikolay Kim
eb33f0ecbe add Clone for apply combinator 2019-12-03 16:15:06 +06:00
Nikolay Kim
cbc5da8625 update changes 2019-12-03 14:10:36 +06:00
Nikolay Kim
ec8dca8d69 Merge branch 'master' of github.com:actix/actix-net 2019-12-03 14:09:35 +06:00
Nikolay Kim
6a9df026e7 Add missing Clone impl for factory_fn_cfg 2019-12-03 14:05:23 +06:00
Aaron Housh
2756bedc3d Fix for non Unix OS (#69) 2019-12-03 10:07:54 +06:00
Nikolay Kim
bd4c4cda8b update threadpool 2019-12-02 22:49:02 +06:00
Nikolay Kim
c0ede65317 restore 0.1 behavior 2019-12-02 22:47:49 +06:00
Nikolay Kim
9f575418c1 clippy warnings 2019-12-02 22:30:09 +06:00
Nikolay Kim
9ed35cca7a use owned value for service factory config 2019-12-02 21:27:48 +06:00
Nikolay Kim
3385682e09 remove server feature 2019-12-02 17:04:42 +06:00
Nikolay Kim
f55f96bc77 fix dependencies 2019-12-02 11:49:42 +06:00
Nikolay Kim
a08b1eba87 update tests 2019-12-02 11:43:52 +06:00
Nikolay Kim
d81e72cf06 remove deprecaed crate 2019-12-02 11:30:52 +06:00
Nikolay Kim
9fbe6a1f6d refactor server configuration and tls support 2019-12-02 11:30:27 +06:00
Nikolay Kim
16ff283fb2 add metadata 2019-12-01 20:30:24 +06:00
Nikolay Kim
503c2feb08 re-export net primitives 2019-12-01 10:56:25 +06:00
Nikolay Kim
bec4efc699 add extra methods to pipeline 2019-11-29 13:51:00 +06:00
Nikolay Kim
5e5ae2ddec restore stream dispatcher 2019-11-29 10:41:09 +06:00
Nikolay Kim
a02064592b disable rustls 2019-11-27 21:03:26 +06:00
Nikolay Kim
af72005159 move BoxFuture to boxed mod 2019-11-27 20:59:36 +06:00
Nikolay Kim
c254bb978c allow to wait on Server until server stops; restore signal handling 2019-11-26 17:03:52 +06:00
Nikolay Kim
009f8e2e7c allow to wait server exit 2019-11-26 16:33:45 +06:00
Nikolay Kim
f5aecdee8f work around to rust#62127 2019-11-26 10:14:21 +06:00
Nikolay Kim
4546774f4e inclide fn ident to err message 2019-11-26 10:04:46 +06:00
Nikolay Kim
2cf140a869 inclide fn token to err message 2019-11-26 10:01:46 +06:00
Nikolay Kim
e76ea8e80c re-export timeout 2019-11-26 09:04:14 +06:00
Nikolay Kim
52d03fa18c use actix deps instead of tokio 2019-11-26 08:26:22 +06:00
Nikolay Kim
5efac449b1 re-export time utils 2019-11-26 08:12:16 +06:00
Nikolay Kim
4ceac79f2c add test and main macros 2019-11-25 21:49:11 +06:00
Nikolay Kim
1fddd1e75b renamed boxed service 2019-11-25 18:18:00 +06:00
Nikolay Kim
905d058454 upgrade derive_more 2019-11-25 17:54:47 +06:00
Nikolay Kim
5265714f68 prep alpha.1 release 2019-11-21 19:58:55 +06:00
Nikolay Kim
ae4394c0f2 fix uds server support 2019-11-21 00:35:44 +06:00
Nikolay Kim
d3c5518646 fix rustls acceptor 2019-11-19 18:54:36 +06:00
Nikolay Kim
3bf83c1d98 cleanup Unpin constraint; simplify Framed impl 2019-11-19 14:51:40 +06:00
Nikolay Kim
617e40a7e9 fix framed_read 2019-11-19 11:06:55 +06:00
Nikolay Kim
3105cde168 add Service impl for RefCell<S> 2019-11-19 08:45:09 +06:00
Nikolay Kim
5b74c79cf9 Simplify transform trait, remove map_init_err 2019-11-19 06:51:43 +06:00
Nikolay Kim
8bf8ad86d6 add IntoServiceFactory impl for servie_fn 2019-11-18 20:46:49 +06:00
Nikolay Kim
877f89eeb7 use service types for ssl connectors 2019-11-18 20:20:56 +06:00
Nikolay Kim
1354946460 remove pin-project; update Unpin consrtaint 2019-11-18 18:28:54 +06:00
Nikolay Kim
7404d82a9b use concrete types 2019-11-18 14:30:04 +06:00
Nikolay Kim
c1cdc9908a update deps and fix definitions 2019-11-15 16:06:44 +06:00
Yuki Okushi
be7904fd57 Fix code style (#65)
* Fix clippy warnings

* cargo fmt

* Remove redundant lifetime
2019-11-15 00:28:29 +09:00
Nikolay Kim
13049b80ca Migrate actix-net to std::future (#64)
* Migrate actix-codec, actix-rt, and actix-threadpool to std::future

* update to latest tokio alpha and futures-rs

* Migrate actix-service to std::future,

This is a squash of ~8 commits, since it included a lot of experimentation. To see the commits,
look into the semtexzv/std-future-service-tmp branch.

* update futures-rs and tokio

* Migrate actix-threadpool to std::future (#59)

* Migrate actix-threadpool to std::future

* Cosmetic refactor

- turn log::error! into log::warn! as it doesn't throw any error
- add Clone and Copy impls for Cancelled making it cheap to operate with
- apply rustfmt

* Bump up crate version to 0.2.0 and pre-fill its changelog

* Disable patching 'actix-threadpool' crate in global workspace as unnecessary

* Revert patching and fix 'actix-rt'

* Migrate actix-rt to std::future (#47)

* remove Pin from Service::poll_ready(); simplify combinators api; make code compile

* disable tests

* update travis config

* refactor naming

* drop IntoFuture trait

* Migrate actix-server to std::future (#50)

Still not finished, this is more WIP, this is an aggregation of several commits, which
can be found in semtexzv/std-future-server-tmp branch

* update actix-server

* rename Factor to ServiceFactory

* start server worker in start mehtod

* update actix-utils

* remove IntoTransform trait

* Migrate actix-server::ssl::nativetls to std futures (#61)

* Refactor 'nativetls' module

* Migrate 'actix-server-config' to std futures

- remove "uds" feature
- disable features by default

* Switch NativeTlsAcceptor to use 'tokio-tls' crate

* Bikeshed features names and remove unnecessary dependencies for 'actix-server-config' crate

* update openssl impl

* migrate actix-connect to std::future

* migrate actix-ioframe to std::future

* update version to alpha.1

* fix boxed service

* migrate server rustls support

* migratte openssl and rustls connecttors

* store the thread's handle with arbiter (#62)

* update ssl connect tests

* restore service tests

* update readme
2019-11-14 18:38:24 +06:00
Nikolay Kim
9fa2a36b4e prepare actix-rt release 2019-11-14 17:33:28 +06:00
Ivan Ladelshchikov
ed5023128b store the thread's handle with arbiter (#60) 2019-11-14 15:07:33 +06:00
Nikolay Kim
2e8c2c7733 Re-register task on every future poll 2019-10-14 17:55:52 +06:00
Nikolay Kim
115e82329f fix arbiter thread panic message 2019-10-14 11:19:08 +06:00
Nikolay Kim
0b0060fe47 update deps 2019-10-14 10:37:48 +06:00
Nikolay Kim
35e32d8e55 prepare actix-testing release 2019-10-14 10:30:27 +06:00
Nikolay Kim
9982a9498d register current task in counters available method. 2019-10-08 15:02:43 +06:00
Nikolay Kim
fa72975f34 extra trace logging 2019-10-08 14:46:22 +06:00
Sven-Hendrik Haase
fe5de2510d Merge pull request #56 from actix/fix-52
Add an error message if we receive a non-hostname-based dest
2019-10-04 13:48:20 +02:00
Yuki Okushi
e3155957a8 Prepare actix-server release (#55) 2019-10-04 17:36:23 +09:00
Sven-Hendrik Haase
f6f9e1fcdb Add an error message if we receive a non-hostname-based dest
This is more helpful than an unwrap and at least points users at the right location.
Upstream issue is https://github.com/briansmith/webpki/issues/54
2019-10-04 07:30:13 +02:00
Yuki Okushi
2667850d60 Prepare actix-server-config release (#54)
* Prepare actix-server-config release

* Bump up actix-server-config to 0.2.0
2019-10-04 06:13:33 +06:00
Yuki Okushi
fba2002702 Prepare actix-connect release (#53) 2019-10-04 06:21:59 +09:00
Jerome Gravel-Niquet
e733c562d9 Update rustls, tokio-rustls and webpki across the board (#42)
* Update rustls, tokio-rustls and webpki across the board

* bump minimum rust version to 1.37

* updated readme and changelogs to reflect changes and minimum required rust version
2019-10-04 03:32:32 +09:00
Yuki Okushi
8f05986a9f Use map() instead of and_then() (#51) 2019-10-03 14:55:44 +09:00
Nikolay Kim
aa9bbe2114 prepare actix-ioframe release 2019-09-25 10:47:06 +06:00
Nikolay Kim
4837a901e2 prepare actix-server release 2019-09-25 10:35:15 +06:00
Nikolay Kim
a02ff17cb1 remove actix-tower from workspace 2019-09-25 10:11:17 +06:00
Nikolay Kim
dbf566928c drop tower intergration 2019-09-25 10:01:08 +06:00
Nikolay Kim
ca982b2467 update workspace deps for tests 2019-09-25 10:00:54 +06:00
Nikolay Kim
c859d13e3b use actix-testing instead of test server 2019-09-25 09:51:28 +06:00
Nikolay Kim
41e49e8093 update changes 2019-09-25 09:32:33 +06:00
Nikolay Kim
715a770d7a deprecate test server 2019-09-25 09:31:52 +06:00
Nikolay Kim
5469d8c910 prep actix-testing release 2019-09-25 09:26:12 +06:00
Nikolay Kim
8be5f773f4 add actix-testing crate 2019-09-17 16:04:20 +06:00
karlri
b686b4c34e Feature uds: Add listen_uds to ServerBuilder (#43)
Allows directly passing an Unix Listener instead of a path. Useful
for example when running as a daemon under systemd with the systemd
crate.
2019-09-16 11:07:46 +06:00
Nikolay Kim
34a7b7f05a add TcpStreamService 2019-09-05 16:34:48 -07:00
Nikolay Kim
b1d9b06a87 Use arbiters storage for default async resolver 2019-09-02 15:15:55 -07:00
Nikolay Kim
94e673b50b Add arbiter specific storage 2019-09-02 15:03:03 -07:00
Nikolay Kim
1a644c6bb1 Check service readiness for new_apply_cfg combinator 2019-08-27 05:28:15 +06:00
Yuki Okushi
aad013f559 Fix clippy warnings (#40)
Add explicit `dyn`s

Remove let binding

Use +=

Return false

Derive Default for TcpConnector

Squash if/else

Remove unnecessary return keywords

Use is_empty()

Fix clippy attribute

Allow mut_from_ref
2019-08-17 05:15:51 +09:00
Aron Heinecke
7a18d9da26 Add check for minimum rust version (#39) 2019-08-07 17:49:29 -07:00
Aron Heinecke
d59b8ce62e Fix invalid minimum version (#38) 2019-08-07 17:48:31 -07:00
Nikolay Kim
3821d511d0 prep actix-threadpool release 2019-08-05 09:54:49 -07:00
Nikolay Kim
62e429cb0c Merge branch 'master' of github.com:actix/actix-net 2019-08-05 09:53:03 -07:00
Nikolay Kim
a2643d475a Add ConnectService and OpensslConnectService 2019-08-05 09:52:50 -07:00
Sven-Hendrik Haase
34c259a8b5 Merge pull request #35 from ignatenkobrain/parking_lot
threadpool: Update parking_lot to 0.9
2019-08-05 17:30:19 +02:00
Igor Gnatenko
8b398c3386 threadpool: Update parking_lot to 0.9
Signed-off-by: Igor Gnatenko <i.gnatenko.brain@gmail.com>
2019-08-04 15:46:14 +02:00
Neil Locketz
0baceb0e56 Fix typo in desc (#34) 2019-07-30 09:35:57 -07:00
Michael Snoyman
6be1f37f6c Minor typo corrections in docs (#33) 2019-07-25 11:46:11 +06:00
Nikolay Kim
a742768feb bump version 2019-07-24 14:16:25 +06:00
Marat Safin
f913872159 add rustls support for connect (#31) 2019-07-24 14:14:26 +06:00
Nikolay Kim
41145040e1 remove ClonableService 2019-07-19 11:03:16 +06:00
Nikolay Kim
311bb14d97 add unix domain sockets support #3 2019-07-18 17:05:40 +06:00
Nikolay Kim
2955e49d78 add unix domain sockets support 2019-07-18 16:43:42 +06:00
Nikolay Kim
9d1b428b34 undeprecate framed transport 2019-07-17 13:31:00 +06:00
Nikolay Kim
42d526bced mark some fn as unsafe 2019-07-17 11:16:38 +06:00
Nikolay Kim
23a230a83b deprecate ClonableService and FramedTransport 2019-07-17 10:57:52 +06:00
Nikolay Kim
411e31786f update actix-connect changes 2019-07-17 10:33:47 +06:00
Nikolay Kim
b491d373b1 update actix-rt changes 2019-07-17 10:30:59 +06:00
Jeff Muizelaar
9271b95c87 Avoid a copy of the Future when initializing the Box. (#29)
Future's can be pretty big (> 1500 bytes) so this probably worth doing.

I confirmed with memcpy-find that this did infact eliminate two ~1500
byte copies from the actix-web basic example.
2019-07-17 10:29:22 +06:00
Jan Michael Auer
1b3cd0d88c Expose Connect addrs (#30) 2019-07-17 06:17:51 +06:00
Nikolay Kim
da302d4b7a fix disconnect callback 2019-07-03 13:02:03 +06:00
Nikolay Kim
922a919572 simple callback 2019-07-02 12:35:27 +06:00
Nikolay Kim
5a62175b6e add disconnect callback 2019-07-02 12:10:05 +06:00
Nikolay Kim
5445e341c3 give access to io object during connect stage 2019-07-01 22:37:59 +06:00
Nikolay Kim
1b17d274a0 refactor connect stage 2019-07-01 11:20:24 +06:00
Nikolay Kim
9d8b3e6275 impl Stream and Sink for Connect 2019-06-30 22:58:23 +06:00
Nikolay Kim
27baf03f64 Do not block on sink drop for FramedTransport 2019-06-26 15:20:56 +06:00
Nikolay Kim
205cac82ce add custom framed dispatcher service 2019-06-26 15:19:40 +06:00
Nikolay Kim
07708c5e9a prepare rt release 2019-06-22 09:02:17 +06:00
Nikolay Kim
1c04ad3238 Merge pull request #22 from GeorgeHahn/with-external-runtime
Allow Actix to be started on an external CurrentThread runtime
2019-06-22 08:53:19 +06:00
Nikolay Kim
66aa21740c Merge pull request #28 from ignatenkobrain/deps
chore: Update derive_more to 0.15
2019-06-18 22:26:37 +06:00
Igor Gnatenko
b183cb3324 chore: Update derive_more to 0.15
Signed-off-by: Igor Gnatenko <i.gnatenko.brain@gmail.com>
2019-06-18 10:25:01 +02:00
Nikolay Kim
158482cd2f Add new_apply_cfg function 2019-06-06 14:28:07 +06:00
George Hahn
9e61f62871 new_async -> run_in_executor and return future directly + builder cleanup 2019-06-05 12:51:59 -05:00
Nikolay Kim
7051888289 prepare actix-threadpool release 2019-06-05 08:09:46 +06:00
Nikolay Kim
0caa47fc47 Merge pull request #27 from ignatenkobrain/license
Include license files into all sub-crates
2019-06-01 16:48:42 +06:00
Nikolay Kim
6d1cbb2d2f Merge pull request #26 from ignatenkobrain/master
chore: Update parking_lot to 0.8
2019-06-01 16:47:56 +06:00
Igor Gnatenko
ca289ddf7f Include license files into all sub-crates
Signed-off-by: Igor Gnatenko <i.gnatenko.brain@gmail.com>
2019-05-30 20:38:44 +02:00
Igor Gnatenko
ad9a197916 chore: Update parking_lot to 0.8
Signed-off-by: Igor Gnatenko <i.gnatenko.brain@gmail.com>
2019-05-30 20:30:43 +02:00
George Hahn
c4f05e033f fixup: fix new_async doc comment 2019-05-24 10:29:52 -05:00
George Hahn
048314913c Enable System to be executed on an external CurrentThread runtime 2019-05-23 13:34:47 -05:00
Nikolay Kim
c1b183e1ce Merge branch 'master' of github.com:actix/actix-net 2019-05-18 10:56:51 -07:00
Nikolay Kim
87bc3dacd9 use u64 for shutdown_timeout 2019-05-18 10:56:41 -07:00
Nikolay Kim
0156f479a0 Merge pull request #19 from pka/patch-1
Fix typo
2019-05-15 13:12:26 -07:00
Pirmin Kalberer
139fa3b9a2 Fix typo 2019-05-15 20:51:24 +02:00
Nikolay Kim
a14f612382 remove debug prints 2019-05-15 10:29:10 -07:00
Nikolay Kim
059e2ad042 Fix checked resource match 2019-05-15 10:21:29 -07:00
Nikolay Kim
fdf2a6f422 prepare actix-utils release 2019-05-15 08:31:40 -07:00
Nikolay Kim
fc2631c852 merge remote 2019-05-14 17:37:14 -07:00
Nikolay Kim
d51b210ae7 Merge branch 'master' of github.com:actix/actix-net 2019-05-14 17:36:18 -07:00
Nikolay Kim
0a6cded975 change Either constructor 2019-05-14 17:32:50 -07:00
Nikolay Kim
14e3933d8b Merge pull request #17 from neoeinstein/tower-interop
Second iteration on tower interop
2019-05-12 20:03:42 -07:00
Nikolay Kim
837504c10f update deps 2019-05-12 08:40:42 -07:00
Nikolay Kim
802d808aca prepare actix-connect release 2019-05-12 08:15:18 -07:00
Nikolay Kim
7712de3d8e update deps 2019-05-12 08:10:30 -07:00
Nikolay Kim
f1d0d5f6f9 prepare actix-server release 2019-05-12 08:03:16 -07:00
Nikolay Kim
a76fcaf4d8 prepare actix-utils release 2019-05-12 08:00:23 -07:00
Nikolay Kim
a2134035d6 prepare actix-service release 2019-05-12 07:53:26 -07:00
Nikolay Kim
5f8599faf1 merge master 2019-05-12 06:06:45 -07:00
Nikolay Kim
f0776fca94 Use associated type for NewService config 2019-05-12 06:03:50 -07:00
Marcus Griep
c7676df697 Add documentation and doctests 2019-05-03 10:08:49 -04:00
Marcus Griep
ecf7a11a20 Add convenience methods to wrap with middleware 2019-05-02 17:47:37 -04:00
Marcus Griep
686958fe0c Add reciprical compatibility layer for tower 2019-05-02 17:22:22 -04:00
Nikolay Kim
49ade171f6 Update CHANGES.md 2019-05-01 23:20:00 -07:00
Nikolay Kim
0a2a520c35 Merge pull request #16 from boustrophedon/derive_debug
Derive debug for Server and ServerCommand
2019-05-01 23:19:16 -07:00
Harry Stern
b0c37dfc87 Derive debug for Server and ServerCommand 2019-05-02 01:31:04 -04:00
Nikolay Kim
91e28a4312 Merge pull request #15 from neoeinstein/tower-interop
Add compatibility layer for tower-service
2019-04-29 10:21:32 -07:00
Marcus Griep
508dce8bf1 Add compatibility crate for tower-service
Introduces a new crate `actix-tower`, which makes it easier to use
services built on the `tower-service` abstraction to be used with
`actix-service`.
2019-04-29 12:39:16 -04:00
Nikolay Kim
8ed1099a2e Merge pull request #14 from Bobo1239/master
Increase compiler recursion limit
2019-04-23 13:04:32 -07:00
Boris-Chengbiao Zhou
83544bd971 Increase compiler recursion limit
Fixes `cargo doc` on Windows. (actix/actix#189)
2019-04-23 22:02:37 +02:00
Nikolay Kim
76c317e0b2 Added support for remainder match 2019-04-22 21:19:22 -07:00
Nikolay Kim
3b314e4c8c Connect::set_addr() 2019-04-19 17:43:52 -07:00
Nikolay Kim
ae27b87641 IoStream trait and impls for TcpStream, SslStream and TlsStream 2019-04-16 08:32:12 -07:00
Nikolay Kim
fc2dcadc7a use stable version of trust-dns-resolver 2019-04-14 20:46:36 -07:00
Nikolay Kim
54f62b5035 prep release 2019-04-12 12:30:55 -07:00
Nikolay Kim
d3208bf7a8 Do not start default resolver immediately for default connector. 2019-04-12 12:28:18 -07:00
Nikolay Kim
21507d3da1 add TestServerRuntime::run_on() method 2019-04-12 12:26:47 -07:00
Nikolay Kim
b9d8a215b4 Start trust-dns default resolver on first use 2019-04-11 09:57:21 -07:00
Nikolay Kim
51c4dfe5cb Allow to reset Path instance; export Quoter type 2019-04-07 22:48:18 -07:00
Nikolay Kim
a60112c71e Poll boxed service call result immediately 2019-04-07 20:48:40 -07:00
Nikolay Kim
bd814d6f80 re-export trust-dns types 2019-04-05 10:36:57 -07:00
Nikolay Kim
a4e0c71baa Merge branch 'master' of github.com:actix/actix-net 2019-04-04 15:41:50 -07:00
Nikolay Kim
b9ea445e70 Log error if dns system config could not be loaded 2019-04-04 15:41:05 -07:00
Nikolay Kim
ba2901269d Merge pull request #11 from Dowwie/master
added docs for trait Service, trait Transform
2019-04-04 11:06:02 -07:00
dowwie
5cbc29306a updated as per comments 2019-04-04 14:02:53 -04:00
Nikolay Kim
810fa869ae remove unneeded static 2019-04-04 10:04:19 -07:00
dowwie
33cd51aabf added docs for trait Service, trait Transform 2019-04-04 11:40:28 -04:00
Nikolay Kim
629ed05f82 Get dynamic segment by name instead of iterator 2019-04-03 21:40:21 -07:00
Nikolay Kim
5e8ae210f7 Rename connect Connector to TcpConnector #10 2019-03-31 19:14:13 -07:00
Nikolay Kim
3add90628f Fix SIGINT force shutdown 2019-03-30 12:09:02 -07:00
Nikolay Kim
02ab804e0b prepare actix-service release 2019-03-29 11:16:40 -07:00
Nikolay Kim
feac0b43d9 add impl Service for Rc<RefCell<S>> 2019-03-29 10:21:17 -07:00
Nikolay Kim
1441355d4f use release 2019-03-28 04:02:39 -07:00
Nikolay Kim
7c5afc09a6 move threadpool to separate crate 2019-03-28 03:56:52 -07:00
Nikolay Kim
16856c7d3f Merge branch 'master' of github.com:actix/actix-net 2019-03-27 17:30:54 -07:00
Nikolay Kim
95d02659d5 Added Framed::map_io() method 2019-03-27 17:30:37 -07:00
Juan Aguilar Santillana
bcbd7e6ddf Fix unnecessary arbiter clone at builder rt 2019-03-23 09:46:08 +03:00
Nikolay Kim
e0d3581239 allow to send messages to framed transport via mpsc channel 2019-03-20 09:44:23 -07:00
Nikolay Kim
ef1bdb2eb2 update travis config 2019-03-17 10:25:24 -07:00
Nikolay Kim
10301ff49d temp tarpaulin fix 2019-03-17 08:53:50 -07:00
Nikolay Kim
27c28d6597 Fix error handling for single address 2019-03-15 11:37:51 -07:00
Nikolay Kim
b290273e81 prepare actix-connect release 2019-03-14 22:39:49 -07:00
Nikolay Kim
720230b852 Merge pull request #8 from Firstyear/2019-03-15-arbiter-docs
Improve Arbiter documentation
2019-03-14 20:58:33 -07:00
Nikolay Kim
44c2639fd6 prepare actix-server release 2019-03-14 20:55:55 -07:00
Nikolay Kim
9a5705d1b6 merge travis branch 2019-03-14 20:53:56 -07:00
Nikolay Kim
7ff923a58f stop tests threads 2019-03-14 20:52:17 -07:00
Nikolay Kim
6659b192d3 travis config 2019-03-14 20:52:13 -07:00
Nikolay Kim
1146d9cf30 fix init order 2019-03-14 20:48:58 -07:00
Nikolay Kim
b7b76c47e5 rename method 2019-03-14 20:23:49 -07:00
Nikolay Kim
d23dc6f6af Allow to run future before server service initialization 2019-03-14 20:09:34 -07:00
William Brown
9b6a955da4 Improve Arbiter documentation 2019-03-15 10:24:27 +10:00
Nikolay Kim
f3aa48309f travis config 2019-03-14 11:55:39 -07:00
Nikolay Kim
c9b86712e5 reinstall tarpaulin 2019-03-14 11:31:32 -07:00
Nikolay Kim
0f74f280f9 impl Address for http::Uri 2019-03-14 11:15:32 -07:00
Nikolay Kim
eb37e15554 use specific version of nightly 2019-03-14 10:43:36 -07:00
Nikolay Kim
ad007b8b42 Merge branch 'master' of github.com:actix/actix-net 2019-03-14 10:28:47 -07:00
Nikolay Kim
7c0d1f2273 update travis config 2019-03-14 10:25:34 -07:00
Nikolay Kim
d82bc7c52b Merge pull request #7 from najamelan/fix/compiler_warnings
Fix compiler warnings.
2019-03-14 07:24:37 -07:00
Nikolay Kim
265229b44b allow to override port 2019-03-13 22:55:01 -07:00
Nikolay Kim
38545dedc7 refactor Connect type and add tests 2019-03-13 22:51:31 -07:00
Nikolay Kim
6ebff22601 simplify name 2019-03-13 16:38:08 -07:00
Nikolay Kim
2c9b91b366 add specific constructors 2019-03-13 15:55:20 -07:00
Nikolay Kim
b483200037 remove generic 2019-03-13 15:52:51 -07:00
Nikolay Kim
a73600fbcd remove generic E 2019-03-13 15:51:21 -07:00
Nikolay Kim
084a28ca07 add Connect::with_request 2019-03-13 15:49:31 -07:00
Nikolay Kim
a7c74c53ea store request in Connect request 2019-03-13 15:37:12 -07:00
Nikolay Kim
3e7d737e73 update travis 2019-03-13 14:39:02 -07:00
Nikolay Kim
87db4bf741 changes 2019-03-13 14:38:33 -07:00
Nikolay Kim
8b0fe6f796 rename crate 2019-03-13 12:41:41 -07:00
Nikolay Kim
52a45fda53 redesign actix-connector 2019-03-13 12:40:11 -07:00
Naja Melan
2c7de7e0fb Fix compiler warnings.
Compiles in stable and nightly
2019-03-13 08:41:26 +01:00
Nikolay Kim
1fcc0734b5 prep release 2019-03-12 17:01:02 -07:00
183 changed files with 12012 additions and 10413 deletions

View File

@@ -1,41 +0,0 @@
environment:
global:
PROJECT_NAME: actix-net
matrix:
# Stable channel
- TARGET: i686-pc-windows-msvc
CHANNEL: stable
- TARGET: x86_64-pc-windows-gnu
CHANNEL: stable
- TARGET: x86_64-pc-windows-msvc
CHANNEL: stable
# Nightly channel
- TARGET: i686-pc-windows-msvc
CHANNEL: nightly
- TARGET: x86_64-pc-windows-gnu
CHANNEL: nightly
- TARGET: x86_64-pc-windows-msvc
CHANNEL: nightly
# Install Rust and Cargo
# (Based on from https://github.com/rust-lang/libc/blob/master/appveyor.yml)
install:
- ps: >-
If ($Env:TARGET -eq 'x86_64-pc-windows-gnu') {
$Env:PATH += ';C:\msys64\mingw64\bin'
} ElseIf ($Env:TARGET -eq 'i686-pc-windows-gnu') {
$Env:PATH += ';C:\MinGW\bin'
}
- curl -sSf -o rustup-init.exe https://win.rustup.rs
- rustup-init.exe --default-host %TARGET% --default-toolchain %CHANNEL% -y
- set PATH=%PATH%;C:\Users\appveyor\.cargo\bin
- rustc -Vv
- cargo -V
# 'cargo test' takes care of building for us, so disable Appveyor's build stage.
build: false
# Equivalent to Travis' `script` phase
test_script:
- cargo clean
- cargo test

24
.github/PULL_REQUEST_TEMPLATE.md vendored Normal file
View File

@@ -0,0 +1,24 @@
## PR Type
<!-- What kind of change does this PR make? -->
<!-- Bug Fix / Feature / Refactor / Code Style / Other -->
INSERT_PR_TYPE
## PR Checklist
Check your PR fulfills the following:
<!-- For draft PRs check the boxes as you complete them. -->
- [ ] Tests for the changes have been added / updated.
- [ ] Documentation comments have been added / updated.
- [ ] A changelog entry has been made for the appropriate packages.
- [ ] Format code with the latest stable rustfmt
## Overview
<!-- Describe the current and new behavior. -->
<!-- Emphasize any breaking changes. -->
<!-- If this PR fixes or closes an issue, reference it here. -->
<!-- Closes #000 -->

34
.github/workflows/clippy-fmt.yml vendored Normal file
View File

@@ -0,0 +1,34 @@
on:
pull_request:
types: [opened, synchronize, reopened]
name: Clippy and rustfmt Check
jobs:
clippy_check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
toolchain: stable
components: rustfmt
profile: minimal
override: true
- name: Check with rustfmt
uses: actions-rs/cargo@v1
with:
command: fmt
args: --all -- --check
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly
components: clippy
profile: minimal
override: true
- name: Check with Clippy
uses: actions-rs/clippy-check@v1
with:
token: ${{ secrets.GITHUB_TOKEN }}
args: --workspace --tests

82
.github/workflows/linux.yml vendored Normal file
View File

@@ -0,0 +1,82 @@
name: CI (Linux)
on:
pull_request:
types: [opened, synchronize, reopened]
push:
branches:
- master
- '1.0'
jobs:
build_and_test:
strategy:
fail-fast: false
matrix:
version:
- 1.46.0
- stable
- nightly
name: ${{ matrix.version }} - x86_64-unknown-linux-gnu
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-x86_64-unknown-linux-gnu
profile: minimal
override: true
- name: Generate Cargo.lock
uses: actions-rs/cargo@v1
with:
command: generate-lockfile
- name: Cache cargo dirs
uses: actions/cache@v2
with:
path:
~/.cargo/registry
~/.cargo/git
~/.cargo/bin
key: ${{ matrix.version }}-x86_64-unknown-linux-gnu-cargo-trimmed-${{ hashFiles('**/Cargo.lock') }}
- name: Cache cargo build
uses: actions/cache@v2
with:
path: target
key: ${{ matrix.version }}-x86_64-unknown-linux-gnu-cargo-build-trimmed-${{ hashFiles('**/Cargo.lock') }}
- name: check build
uses: actions-rs/cargo@v1
with:
command: check
args: --workspace --bins --examples --tests
- name: tests
uses: actions-rs/cargo@v1
timeout-minutes: 40
with:
command: test
args: --workspace --exclude=actix-tls --no-fail-fast -- --nocapture
- name: Generate coverage file
if: matrix.version == 'stable' && (github.ref == 'refs/heads/master' || github.event_name == 'pull_request')
run: |
cargo install cargo-tarpaulin
cargo tarpaulin --out Xml --workspace
- name: Upload to Codecov
if: matrix.version == 'stable' && (github.ref == 'refs/heads/master' || github.event_name == 'pull_request')
uses: codecov/codecov-action@v1
with:
file: cobertura.xml
- name: Clear the cargo caches
run: |
rustup update stable
rustup override set stable
cargo install cargo-cache --no-default-features --features ci-autoclean
cargo-cache

43
.github/workflows/macos.yml vendored Normal file
View File

@@ -0,0 +1,43 @@
name: CI (macOS)
on:
pull_request:
types: [opened, synchronize, reopened]
push:
branches:
- master
- '1.0'
jobs:
build_and_test:
strategy:
fail-fast: false
matrix:
version:
- stable
- nightly
name: ${{ matrix.version }} - x86_64-apple-darwin
runs-on: macos-latest
steps:
- uses: actions/checkout@v2
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-x86_64-apple-darwin
profile: minimal
override: true
- name: check build
uses: actions-rs/cargo@v1
with:
command: check
args: --workspace --bins --examples --tests
- name: tests
uses: actions-rs/cargo@v1
with:
command: test
args: --workspace --exclude=actix-tls --no-fail-fast -- --nocapture

45
.github/workflows/windows-mingw.yml vendored Normal file
View File

@@ -0,0 +1,45 @@
name: CI (Windows-mingw)
on:
pull_request:
types: [opened, synchronize, reopened]
push:
branches:
- master
- '1.0'
jobs:
build_and_test:
strategy:
fail-fast: false
matrix:
version:
- stable
- nightly
name: ${{ matrix.version }} - x86_64-pc-windows-gnu
runs-on: windows-latest
steps:
- uses: actions/checkout@v2
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-x86_64-pc-windows-gnu
profile: minimal
override: true
- name: Install MSYS2
uses: msys2/setup-msys2@v2
- name: Install packages
run: |
msys2 -c 'pacman -Sy --noconfirm pacman'
msys2 -c 'pacman --noconfirm -S base-devel pkg-config'
- name: check build
uses: actions-rs/cargo@v1
with:
command: check
args: --workspace --bins --examples --tests

69
.github/workflows/windows.yml vendored Normal file
View File

@@ -0,0 +1,69 @@
name: CI (Windows)
on:
pull_request:
types: [opened, synchronize, reopened]
push:
branches:
- master
- '1.0'
env:
VCPKGRS_DYNAMIC: 1
jobs:
build_and_test:
strategy:
fail-fast: false
matrix:
version:
- stable
- nightly
target:
- x86_64-pc-windows-msvc
- i686-pc-windows-msvc
name: ${{ matrix.version }} - ${{ matrix.target }}
runs-on: windows-latest
steps:
- uses: actions/checkout@v2
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-${{ matrix.target }}
profile: minimal
override: true
- name: Install OpenSSL (x64)
if: matrix.target == 'x86_64-pc-windows-msvc'
run: |
vcpkg integrate install
vcpkg install openssl:x64-windows
Get-ChildItem C:\vcpkg\installed\x64-windows\bin
Get-ChildItem C:\vcpkg\installed\x64-windows\lib
Copy-Item C:\vcpkg\installed\x64-windows\bin\libcrypto-1_1-x64.dll C:\vcpkg\installed\x64-windows\bin\libcrypto.dll
Copy-Item C:\vcpkg\installed\x64-windows\bin\libssl-1_1-x64.dll C:\vcpkg\installed\x64-windows\bin\libssl.dll
- name: Install OpenSSL (x86)
if: matrix.target == 'i686-pc-windows-msvc'
run: |
vcpkg integrate install
vcpkg install openssl:x86-windows
Get-ChildItem C:\vcpkg\installed\x86-windows\bin
Get-ChildItem C:\vcpkg\installed\x86-windows\lib
Copy-Item C:\vcpkg\installed\x86-windows\bin\libcrypto-1_1.dll C:\vcpkg\installed\x86-windows\bin\libcrypto.dll
Copy-Item C:\vcpkg\installed\x86-windows\bin\libssl-1_1.dll C:\vcpkg\installed\x86-windows\bin\libssl.dll
- name: check build
uses: actions-rs/cargo@v1
with:
command: check
args: --workspace --bins --examples --tests
- name: tests
uses: actions-rs/cargo@v1
with:
command: test
args: --workspace --exclude=actix-tls --no-fail-fast -- --nocapture

2
.gitignore vendored
View File

@@ -12,3 +12,5 @@ guide/build/
# These are backup files generated by rustfmt
**/*.rs.bk
.idea

View File

@@ -1,57 +0,0 @@
language: rust
sudo: required
dist: trusty
cache:
cargo: true
apt: true
matrix:
include:
- rust: stable
- rust: beta
- rust: nightly
allow_failures:
- rust: nightly
env:
global:
- RUSTFLAGS="-C link-dead-code"
- OPENSSL_VERSION=openssl-1.0.2
before_install:
- sudo add-apt-repository -y ppa:0k53d-karl-f830m/openssl
- sudo apt-get update -qq
- sudo apt-get install -y openssl libssl-dev libelf-dev libdw-dev cmake gcc binutils-dev libiberty-dev
# Add clippy
before_script:
- export PATH=$PATH:~/.cargo/bin
script:
- |
if [[ "$TRAVIS_RUST_VERSION" != "nightly" ]]; then
cargo clean
cargo test --features="ssl,tls,rust-tls" -- --nocapture
cd actix-codec && cargo test && cd ..
cd actix-service && cargo test && cd ..
cd actix-server && cargo test --all-features -- --nocapture && cd ..
cd actix-rt && cargo test && cd ..
cd actix-connector && cargo test && cd ..
cd actix-utils && cargo test && cd ..
cd router && cargo test && cd ..
fi
- |
if [[ "$TRAVIS_RUST_VERSION" == "nightly" ]]; then
RUSTFLAGS="--cfg procmacro2_semver_exempt" cargo install -f cargo-tarpaulin
cargo tarpaulin --features="ssl,tls,rust-tls" --out Xml
bash <(curl -s https://codecov.io/bash)
echo "Uploaded code coverage"
cd actix-service && cargo tarpaulin --out Xml && bash <(curl -s https://codecov.io/bash) && cd ..
cd actix-rt && cargo tarpaulin --out Xml && bash <(curl -s https://codecov.io/bash) && cd ..
cd actix-connector && cargo tarpaulin --out Xml && bash <(curl -s https://codecov.io/bash) && cd ..
cd actix-codec && cargo tarpaulin --out Xml && bash <(curl -s https://codecov.io/bash) && cd ..
cd actix-server && cargo tarpaulin --out Xml && bash <(curl -s https://codecov.io/bash) && cd ..
cd actix-utils && cargo tarpaulin --out Xml && bash <(curl -s https://codecov.io/bash) && cd ..
cd router && cargo tarpaulin --out Xml && bash <(curl -s https://codecov.io/bash) && cd ..
fi

View File

@@ -1,68 +0,0 @@
# Changes
## [0.3.0] - xxx
* Split `Service` trait to separate crate
* Use new `Service<Request>` trait
## [0.2.4] - 2018-11-21
### Added
* Allow to skip name resolution stage in Connector
## [0.2.3] - 2018-11-17
### Added
* Framed::is_write_buf_empty() checks if write buffer is flushed
## [0.2.2] - 2018-11-14
### Added
* Add low/high caps to Framed
### Changed
* Refactor Connector and Resolver services
### Fixed
* Fix wrong service to socket binding
## [0.2.0] - 2018-11-08
### Added
* Timeout service
* Added ServiceConfig and ServiceRuntime for server service configuration
### Changed
* Connector has been refactored
* timer and LowResTimer renamed to time and LowResTime
* Refactored `Server::configure()` method
## [0.1.1] - 2018-10-10
### Changed
- Set actix min version - 0.7.5
- Set trust-dns min version
## [0.1.0] - 2018-10-08
* Initial impl

View File

@@ -34,10 +34,13 @@ This Code of Conduct applies both within project spaces and in public spaces whe
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at fafhrd91@gmail.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at robjtede@icloud.com ([@robjtede]) or huyuumi@neet.club ([@JohnTitor]). The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
[@robjtede]: https://github.com/robjtede
[@JohnTitor]: https://github.com/JohnTitor
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]

View File

@@ -1,38 +1,25 @@
[package]
name = "actix-net"
version = "0.3.0"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix net - framework for the compisible network services for Rust"
readme = "README.md"
keywords = ["network", "framework", "async", "futures"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-net/"
categories = ["network-programming", "asynchronous"]
license = "MIT/Apache-2.0"
exclude = [".gitignore", ".travis.yml", ".cargo/config", "appveyor.yml"]
edition = "2018"
[workspace]
members = [
"actix-codec",
"actix-connector",
"actix-macros",
"actix-router",
"actix-rt",
"actix-service",
"actix-server",
"actix-server-config",
"actix-test-server",
"actix-service",
"actix-tls",
"actix-tracing",
"actix-utils",
"router",
"bytestring",
]
[dev-dependencies]
actix-service = "0.3.3"
actix-codec = "0.1.1"
actix-rt = "0.2.0"
actix-server = { path="actix-server", features=["ssl"] }
env_logger = "0.6"
futures = "0.1.25"
openssl = "0.10"
tokio-tcp = "0.1"
tokio-openssl = "0.3"
[patch.crates-io]
actix-codec = { path = "actix-codec" }
actix-macros = { path = "actix-macros" }
actix-router = { path = "actix-router" }
actix-rt = { path = "actix-rt" }
actix-server = { path = "actix-server" }
actix-service = { path = "actix-service" }
actix-tls = { path = "actix-tls" }
actix-tracing = { path = "actix-tracing" }
actix-utils = { path = "actix-utils" }
bytestring = { path = "bytestring" }

View File

@@ -1,60 +1,26 @@
# Actix net [![Build Status](https://travis-ci.org/actix/actix-net.svg?branch=master)](https://travis-ci.org/actix/actix-net) [![codecov](https://codecov.io/gh/actix/actix-net/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-net) [![crates.io](https://meritbadge.herokuapp.com/actix-net)](https://crates.io/crates/actix-net) [![Join the chat at https://gitter.im/actix/actix](https://badges.gitter.im/actix/actix.svg)](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
# Actix Net
Actix net - framework for composable network services
> A collection of lower-level libraries for composable network services.
## Documentation & community resources
![Apache 2.0 or MIT licensed](https://img.shields.io/crates/l/actix-server)
[![codecov](https://codecov.io/gh/actix/actix-net/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-net)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x)
* [API Documentation (Development)](https://actix.rs/actix-net/actix_net/)
* [Chat on gitter](https://gitter.im/actix/actix)
* Cargo package: [actix-net](https://crates.io/crates/actix-net)
* Minimum supported Rust version: 1.32 or later
## Build statuses
| Platform | Build Status |
| ---------------- | ------------ |
| Linux | [![build status](https://github.com/actix/actix-net/workflows/CI%20%28Linux%29/badge.svg?branch=master&event=push)](https://github.com/actix/actix-net/actions?query=workflow%3A"CI+(Linux)") |
| macOS | [![build status](https://github.com/actix/actix-net/workflows/CI%20%28macOS%29/badge.svg?branch=master&event=push)](https://github.com/actix/actix-net/actions?query=workflow%3A"CI+(macOS)") |
| Windows | [![build status](https://github.com/actix/actix-net/workflows/CI%20%28Windows%29/badge.svg?branch=master&event=push)](https://github.com/actix/actix-net/actions?query=workflow%3A"CI+(Windows)") |
| Windows (MinGW) | [![build status](https://github.com/actix/actix-net/workflows/CI%20%28Windows-mingw%29/badge.svg?branch=master&event=push)](https://github.com/actix/actix-net/actions?query=workflow%3A"CI+(Windows-mingw)") |
## Example
See `actix-server/examples` and `actix-tls/examples` for some basic examples.
```rust
fn main() -> io::Result<()> {
// load ssl keys
let mut builder = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap();
builder.set_private_key_file("./examples/key.pem", SslFiletype::PEM).unwrap();
builder.set_certificate_chain_file("./examples/cert.pem").unwrap();
let acceptor = builder.build();
let num = Arc::new(AtomicUsize::new(0));
// bind socket address and start workers. By default server uses number of
// available logical cpu as threads count. actix net start separate
// instances of service pipeline in each worker.
Server::build()
.bind(
// configure service pipeline
"basic", "0.0.0.0:8443",
move || {
let num = num.clone();
let acceptor = acceptor.clone();
// service for converting incoming TcpStream to a SslStream<TcpStream>
fn_service(move |stream: Io<tokio_tcp::TcpStream>| {
SslAcceptorExt::accept_async(&acceptor, stream.into_parts().0)
.map_err(|e| println!("Openssl error: {}", e))
})
// .and_then() combinator uses other service to convert incoming `Request` to a
// `Response` and then uses that response as an input for next
// service. in this case, on success we use `logger` service
.and_then(fn_service(logger))
// Next service counts number of connections
.and_then(move |_| {
let num = num.fetch_add(1, Ordering::Relaxed);
println!("got ssl connection {:?}", num);
future::ok(())
})
},
)?
.run()
}
```
### MSRV
This repo's Minimum Supported Rust Version (MSRV) is 1.46.0.
## License
This project is licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0))
@@ -64,6 +30,5 @@ at your option.
## Code of Conduct
Contribution to the actix-net crate is organized under the terms of the
Contributor Covenant, the maintainer of actix-net, @fafhrd91, promises to
intervene to uphold that code of conduct.
Contribution to the actix-net repo is organized under the terms of the Contributor Covenant.
The Actix team promises to intervene to uphold that code of conduct.

View File

@@ -1,10 +1,59 @@
# Changes
## [0.1.0] - 2019-03-06
## Unreleased - 2021-xx-xx
## 0.4.0-beta.1 - 2020-12-28
* Replace `pin-project` with `pin-project-lite`. [#237]
* Upgrade `tokio` dependency to `1`. [#237]
* Upgrade `tokio-util` dependency to `0.6`. [#237]
* Upgrade `bytes` dependency to `1`. [#237]
[#237]: https://github.com/actix/actix-net/pull/237
## 0.3.0 - 2020-08-23
* No changes from beta 2.
## 0.3.0-beta.2 - 2020-08-19
* Remove unused type parameter from `Framed::replace_codec`.
## 0.3.0-beta.1 - 2020-08-19
* Use `.advance()` instead of `.split_to()`.
* Upgrade `tokio-util` to `0.3`.
* Improve `BytesCodec` `.encode()` performance
* Simplify `BytesCodec` `.decode()`
* Rename methods on `Framed` to better describe their use.
* Add method on `Framed` to get a pinned reference to the underlying I/O.
* Add method on `Framed` check emptiness of read buffer.
## 0.2.0 - 2019-12-10
* Use specific futures dependencies
## 0.2.0-alpha.4
* Fix buffer remaining capacity calculation
## 0.2.0-alpha.3
* Use tokio 0.2
* Fix low/high watermark for write/read buffers
## 0.2.0-alpha.2
* Migrated to `std::future`
## 0.1.2 - 2019-03-27
* Added `Framed::map_io()` method.
## 0.1.1 - 2019-03-06
* Added `FramedParts::with_read_buffer()` method.
## [0.1.0] - 2018-12-09
## 0.1.0 - 2018-12-09
* Move codec to separate crate

View File

@@ -1,25 +1,26 @@
[package]
name = "actix-codec"
version = "0.1.1"
version = "0.4.0-beta.1"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Utilities for encoding and decoding frames"
description = "Codec utilities for working with framed protocols"
keywords = ["network", "framework", "async", "futures"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-codec/"
documentation = "https://docs.rs/actix-codec"
categories = ["network-programming", "asynchronous"]
license = "MIT/Apache-2.0"
exclude = [".gitignore", ".travis.yml", ".cargo/config", "appveyor.yml"]
license = "MIT OR Apache-2.0"
edition = "2018"
workspace = "../"
[lib]
name = "actix_codec"
path = "src/lib.rs"
[dependencies]
bytes = "0.4"
futures = "0.1.24"
tokio-io = "0.1"
tokio-codec = "0.1"
log = "0.4"
bitflags = "1.2.1"
bytes = "1"
futures-core = { version = "0.3.7", default-features = false }
futures-sink = { version = "0.3.7", default-features = false }
log = "0.4"
pin-project-lite = "0.2"
tokio = "1"
tokio-util = { version = "0.6", features = ["codec", "io"] }

1
actix-codec/LICENSE-APACHE Symbolic link
View File

@@ -0,0 +1 @@
../LICENSE-APACHE

1
actix-codec/LICENSE-MIT Symbolic link
View File

@@ -0,0 +1 @@
../LICENSE-MIT

View File

@@ -1,7 +1,7 @@
use bytes::{Buf, Bytes, BytesMut};
use std::io;
use bytes::{Bytes, BytesMut};
use tokio_codec::{Decoder, Encoder};
use super::{Decoder, Encoder};
/// Bytes codec.
///
@@ -9,12 +9,12 @@ use tokio_codec::{Decoder, Encoder};
#[derive(Debug, Copy, Clone)]
pub struct BytesCodec;
impl Encoder for BytesCodec {
type Item = Bytes;
impl Encoder<Bytes> for BytesCodec {
type Error = io::Error;
#[inline]
fn encode(&mut self, item: Bytes, dst: &mut BytesMut) -> Result<(), Self::Error> {
dst.extend_from_slice(&item[..]);
dst.extend_from_slice(item.chunk());
Ok(())
}
}
@@ -27,7 +27,7 @@ impl Decoder for BytesCodec {
if src.is_empty() {
Ok(None)
} else {
Ok(Some(src.take()))
Ok(Some(src.split()))
}
}
}

View File

@@ -1,118 +1,73 @@
#![allow(deprecated)]
use std::pin::Pin;
use std::task::{Context, Poll};
use std::{fmt, io};
use std::fmt;
use std::io::{self, Read, Write};
use bytes::{Buf, BytesMut};
use futures_core::{ready, Stream};
use futures_sink::Sink;
use bytes::BytesMut;
use futures::{Poll, Sink, StartSend, Stream};
use tokio_codec::{Decoder, Encoder};
use tokio_io::{AsyncRead, AsyncWrite};
use super::framed_read::{framed_read2, framed_read2_with_buffer, FramedRead2};
use super::framed_write::{framed_write2, framed_write2_with_buffer, FramedWrite2};
use crate::{AsyncRead, AsyncWrite, Decoder, Encoder};
/// Low-water mark
const LW: usize = 1024;
/// High-water mark
const HW: usize = 8 * 1024;
/// A unified `Stream` and `Sink` interface to an underlying I/O object, using
/// the `Encoder` and `Decoder` traits to encode and decode frames.
///
/// You can create a `Framed` instance by using the `AsyncRead::framed` adapter.
pub struct Framed<T, U> {
inner: FramedRead2<FramedWrite2<Fuse<T, U>>>,
bitflags::bitflags! {
struct Flags: u8 {
const EOF = 0b0001;
const READABLE = 0b0010;
}
}
pub struct Fuse<T, U>(pub T, pub U);
pin_project_lite::pin_project! {
/// A unified `Stream` and `Sink` interface to an underlying I/O object, using
/// the `Encoder` and `Decoder` traits to encode and decode frames.
///
/// Raw I/O objects work with byte sequences, but higher-level code usually
/// wants to batch these into meaningful chunks, called "frames". This
/// method layers framing on top of an I/O object, by using the `Encoder`/`Decoder`
/// traits to handle encoding and decoding of message frames. Note that
/// the incoming and outgoing frame types may be distinct.
pub struct Framed<T, U> {
#[pin]
io: T,
codec: U,
flags: Flags,
read_buf: BytesMut,
write_buf: BytesMut,
}
}
impl<T, U> Framed<T, U>
where
T: AsyncRead + AsyncWrite,
U: Decoder + Encoder,
U: Decoder,
{
/// Provides a `Stream` and `Sink` interface for reading and writing to this
/// `Io` object, using `Decode` and `Encode` to read and write the raw data.
///
/// Raw I/O objects work with byte sequences, but higher-level code usually
/// wants to batch these into meaningful chunks, called "frames". This
/// method layers framing on top of an I/O object, by using the `Codec`
/// traits to handle encoding and decoding of messages frames. Note that
/// the incoming and outgoing frame types may be distinct.
///
/// This function returns a *single* object that is both `Stream` and
/// `Sink`; grouping this into a single object is often useful for layering
/// things like gzip or TLS, which require both read and write access to the
/// underlying object.
///
/// If you want to work more directly with the streams and sink, consider
/// calling `split` on the `Framed` returned by this method, which will
/// break them into separate objects, allowing them to interact more easily.
pub fn new(inner: T, codec: U) -> Framed<T, U> {
pub fn new(io: T, codec: U) -> Framed<T, U> {
Framed {
inner: framed_read2(framed_write2(Fuse(inner, codec), LW, HW)),
io,
codec,
flags: Flags::empty(),
read_buf: BytesMut::with_capacity(HW),
write_buf: BytesMut::with_capacity(HW),
}
}
/// Same as `Framed::new()` with ability to specify write buffer low/high capacity watermarks.
pub fn new_with_caps(inner: T, codec: U, lw: usize, hw: usize) -> Framed<T, U> {
debug_assert!((lw < hw) && hw != 0);
Framed {
inner: framed_read2(framed_write2(Fuse(inner, codec), lw, hw)),
}
}
/// Force send item
pub fn force_send(
&mut self,
item: <U as Encoder>::Item,
) -> Result<(), <U as Encoder>::Error> {
self.inner.get_mut().force_send(item)
}
}
impl<T, U> Framed<T, U> {
/// Provides a `Stream` and `Sink` interface for reading and writing to this
/// `Io` object, using `Decode` and `Encode` to read and write the raw data.
///
/// Raw I/O objects work with byte sequences, but higher-level code usually
/// wants to batch these into meaningful chunks, called "frames". This
/// method layers framing on top of an I/O object, by using the `Codec`
/// traits to handle encoding and decoding of messages frames. Note that
/// the incoming and outgoing frame types may be distinct.
///
/// This function returns a *single* object that is both `Stream` and
/// `Sink`; grouping this into a single object is often useful for layering
/// things like gzip or TLS, which require both read and write access to the
/// underlying object.
///
/// This objects takes a stream and a readbuffer and a writebuffer. These
/// field can be obtained from an existing `Framed` with the
/// `into_parts` method.
///
/// If you want to work more directly with the streams and sink, consider
/// calling `split` on the `Framed` returned by this method, which will
/// break them into separate objects, allowing them to interact more easily.
pub fn from_parts(parts: FramedParts<T, U>) -> Framed<T, U> {
Framed {
inner: framed_read2_with_buffer(
framed_write2_with_buffer(
Fuse(parts.io, parts.codec),
parts.write_buf,
parts.write_buf_lw,
parts.write_buf_hw,
),
parts.read_buf,
),
}
}
/// Returns a reference to the underlying codec.
pub fn get_codec(&self) -> &U {
&self.inner.get_ref().get_ref().1
pub fn codec_ref(&self) -> &U {
&self.codec
}
/// Returns a mutable reference to the underlying codec.
pub fn get_codec_mut(&mut self) -> &mut U {
&mut self.inner.get_mut().get_mut().1
pub fn codec_mut(&mut self) -> &mut U {
&mut self.codec
}
/// Returns a reference to the underlying I/O stream wrapped by
@@ -121,65 +76,285 @@ impl<T, U> Framed<T, U> {
/// Note that care should be taken to not tamper with the underlying stream
/// of data coming in as it may corrupt the stream of frames otherwise
/// being worked with.
pub fn get_ref(&self) -> &T {
&self.inner.get_ref().get_ref().0
pub fn io_ref(&self) -> &T {
&self.io
}
/// Returns a mutable reference to the underlying I/O stream wrapped by
/// `Frame`.
/// Returns a mutable reference to the underlying I/O stream.
///
/// Note that care should be taken to not tamper with the underlying stream
/// of data coming in as it may corrupt the stream of frames otherwise
/// being worked with.
pub fn get_mut(&mut self) -> &mut T {
&mut self.inner.get_mut().get_mut().0
pub fn io_mut(&mut self) -> &mut T {
&mut self.io
}
/// Returns a `Pin` of a mutable reference to the underlying I/O stream.
pub fn io_pin(self: Pin<&mut Self>) -> Pin<&mut T> {
self.project().io
}
/// Check if read buffer is empty.
pub fn is_read_buf_empty(&self) -> bool {
self.read_buf.is_empty()
}
/// Check if write buffer is empty.
pub fn is_write_buf_empty(&self) -> bool {
self.inner.get_ref().is_empty()
self.write_buf.is_empty()
}
/// Check if write buffer is full.
pub fn is_write_buf_full(&self) -> bool {
self.inner.get_ref().is_full()
self.write_buf.len() >= HW
}
/// Consumes the `Frame`, returning its underlying I/O stream.
/// Check if framed is able to write more data.
///
/// Note that care should be taken to not tamper with the underlying stream
/// of data coming in as it may corrupt the stream of frames otherwise
/// being worked with.
pub fn into_inner(self) -> T {
self.inner.into_inner().into_inner().0
/// `Framed` object considers ready if there is free space in write buffer.
pub fn is_write_ready(&self) -> bool {
self.write_buf.len() < HW
}
/// Consume the `Frame`, returning `Frame` with different codec.
pub fn into_framed<U2>(self, codec: U2) -> Framed<T, U2> {
let (inner, read_buf) = self.inner.into_parts();
let (inner, write_buf, lw, hw) = inner.into_parts();
pub fn replace_codec<U2>(self, codec: U2) -> Framed<T, U2> {
Framed {
inner: framed_read2_with_buffer(
framed_write2_with_buffer(Fuse(inner.0, codec), write_buf, lw, hw),
read_buf,
),
codec,
io: self.io,
flags: self.flags,
read_buf: self.read_buf,
write_buf: self.write_buf,
}
}
/// Consume the `Frame`, returning `Frame` with different io.
pub fn into_map_io<F, T2>(self, f: F) -> Framed<T2, U>
where
F: Fn(T) -> T2,
{
Framed {
io: f(self.io),
codec: self.codec,
flags: self.flags,
read_buf: self.read_buf,
write_buf: self.write_buf,
}
}
/// Consume the `Frame`, returning `Frame` with different codec.
pub fn map_codec<F, U2>(self, f: F) -> Framed<T, U2>
pub fn into_map_codec<F, U2>(self, f: F) -> Framed<T, U2>
where
F: Fn(U) -> U2,
{
let (inner, read_buf) = self.inner.into_parts();
let (inner, write_buf, lw, hw) = inner.into_parts();
Framed {
inner: framed_read2_with_buffer(
framed_write2_with_buffer(Fuse(inner.0, f(inner.1)), write_buf, lw, hw),
read_buf,
),
io: self.io,
codec: f(self.codec),
flags: self.flags,
read_buf: self.read_buf,
write_buf: self.write_buf,
}
}
}
impl<T, U> Framed<T, U> {
/// Serialize item and Write to the inner buffer
pub fn write<I>(mut self: Pin<&mut Self>, item: I) -> Result<(), <U as Encoder<I>>::Error>
where
T: AsyncWrite,
U: Encoder<I>,
{
let this = self.as_mut().project();
let remaining = this.write_buf.capacity() - this.write_buf.len();
if remaining < LW {
this.write_buf.reserve(HW - remaining);
}
this.codec.encode(item, this.write_buf)?;
Ok(())
}
/// Try to read underlying I/O stream and decode item.
pub fn next_item(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<<U as Decoder>::Item, U::Error>>>
where
T: AsyncRead,
U: Decoder,
{
loop {
let mut this = self.as_mut().project();
// Repeatedly call `decode` or `decode_eof` as long as it is
// "readable". Readable is defined as not having returned `None`. If
// the upstream has returned EOF, and the decoder is no longer
// readable, it can be assumed that the decoder will never become
// readable again, at which point the stream is terminated.
if this.flags.contains(Flags::READABLE) {
if this.flags.contains(Flags::EOF) {
match this.codec.decode_eof(&mut this.read_buf) {
Ok(Some(frame)) => return Poll::Ready(Some(Ok(frame))),
Ok(None) => return Poll::Ready(None),
Err(e) => return Poll::Ready(Some(Err(e))),
}
}
log::trace!("attempting to decode a frame");
match this.codec.decode(&mut this.read_buf) {
Ok(Some(frame)) => {
log::trace!("frame decoded from buffer");
return Poll::Ready(Some(Ok(frame)));
}
Err(e) => return Poll::Ready(Some(Err(e))),
_ => (), // Need more data
}
this.flags.remove(Flags::READABLE);
}
debug_assert!(!this.flags.contains(Flags::EOF));
// Otherwise, try to read more data and try again. Make sure we've got room
let remaining = this.read_buf.capacity() - this.read_buf.len();
if remaining < LW {
this.read_buf.reserve(HW - remaining)
}
let cnt = match tokio_util::io::poll_read_buf(this.io, cx, this.read_buf) {
Poll::Pending => return Poll::Pending,
Poll::Ready(Err(e)) => return Poll::Ready(Some(Err(e.into()))),
Poll::Ready(Ok(cnt)) => cnt,
};
if cnt == 0 {
this.flags.insert(Flags::EOF);
}
this.flags.insert(Flags::READABLE);
}
}
/// Flush write buffer to underlying I/O stream.
pub fn flush<I>(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), U::Error>>
where
T: AsyncWrite,
U: Encoder<I>,
{
let mut this = self.as_mut().project();
log::trace!("flushing framed transport");
while !this.write_buf.is_empty() {
log::trace!("writing; remaining={}", this.write_buf.len());
let n = ready!(this.io.as_mut().poll_write(cx, this.write_buf))?;
if n == 0 {
return Poll::Ready(Err(io::Error::new(
io::ErrorKind::WriteZero,
"failed to write frame to transport",
)
.into()));
}
// remove written data
this.write_buf.advance(n);
}
// Try flushing the underlying IO
ready!(this.io.poll_flush(cx))?;
log::trace!("framed transport flushed");
Poll::Ready(Ok(()))
}
/// Flush write buffer and shutdown underlying I/O stream.
pub fn close<I>(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), U::Error>>
where
T: AsyncWrite,
U: Encoder<I>,
{
let mut this = self.as_mut().project();
ready!(this.io.as_mut().poll_flush(cx))?;
ready!(this.io.as_mut().poll_shutdown(cx))?;
Poll::Ready(Ok(()))
}
}
impl<T, U> Stream for Framed<T, U>
where
T: AsyncRead,
U: Decoder,
{
type Item = Result<U::Item, U::Error>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
self.next_item(cx)
}
}
impl<T, U, I> Sink<I> for Framed<T, U>
where
T: AsyncWrite,
U: Encoder<I>,
U::Error: From<io::Error>,
{
type Error = U::Error;
fn poll_ready(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
if self.is_write_ready() {
Poll::Ready(Ok(()))
} else {
Poll::Pending
}
}
fn start_send(self: Pin<&mut Self>, item: I) -> Result<(), Self::Error> {
self.write(item)
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.flush(cx)
}
fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.close(cx)
}
}
impl<T, U> fmt::Debug for Framed<T, U>
where
T: fmt::Debug,
U: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Framed")
.field("io", &self.io)
.field("codec", &self.codec)
.finish()
}
}
impl<T, U> Framed<T, U> {
/// This function returns a *single* object that is both `Stream` and
/// `Sink`; grouping this into a single object is often useful for layering
/// things like gzip or TLS, which require both read and write access to the
/// underlying object.
///
/// These objects take a stream, a read buffer and a write buffer. These
/// fields can be obtained from an existing `Framed` with the `into_parts` method.
pub fn from_parts(parts: FramedParts<T, U>) -> Framed<T, U> {
Framed {
io: parts.io,
codec: parts.codec,
flags: parts.flags,
write_buf: parts.write_buf,
read_buf: parts.read_buf,
}
}
@@ -190,124 +365,16 @@ impl<T, U> Framed<T, U> {
/// of data coming in as it may corrupt the stream of frames otherwise
/// being worked with.
pub fn into_parts(self) -> FramedParts<T, U> {
let (inner, read_buf) = self.inner.into_parts();
let (inner, write_buf, write_buf_lw, write_buf_hw) = inner.into_parts();
FramedParts {
io: inner.0,
codec: inner.1,
read_buf,
write_buf,
write_buf_lw,
write_buf_hw,
_priv: (),
io: self.io,
codec: self.codec,
flags: self.flags,
read_buf: self.read_buf,
write_buf: self.write_buf,
}
}
}
impl<T, U> Stream for Framed<T, U>
where
T: AsyncRead,
U: Decoder,
{
type Item = U::Item;
type Error = U::Error;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
self.inner.poll()
}
}
impl<T, U> Sink for Framed<T, U>
where
T: AsyncWrite,
U: Encoder,
U::Error: From<io::Error>,
{
type SinkItem = U::Item;
type SinkError = U::Error;
fn start_send(
&mut self,
item: Self::SinkItem,
) -> StartSend<Self::SinkItem, Self::SinkError> {
self.inner.get_mut().start_send(item)
}
fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
self.inner.get_mut().poll_complete()
}
fn close(&mut self) -> Poll<(), Self::SinkError> {
self.inner.get_mut().close()
}
}
impl<T, U> fmt::Debug for Framed<T, U>
where
T: fmt::Debug,
U: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Framed")
.field("io", &self.inner.get_ref().get_ref().0)
.field("codec", &self.inner.get_ref().get_ref().1)
.finish()
}
}
// ===== impl Fuse =====
impl<T: Read, U> Read for Fuse<T, U> {
fn read(&mut self, dst: &mut [u8]) -> io::Result<usize> {
self.0.read(dst)
}
}
impl<T: AsyncRead, U> AsyncRead for Fuse<T, U> {
unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool {
self.0.prepare_uninitialized_buffer(buf)
}
}
impl<T: Write, U> Write for Fuse<T, U> {
fn write(&mut self, src: &[u8]) -> io::Result<usize> {
self.0.write(src)
}
fn flush(&mut self) -> io::Result<()> {
self.0.flush()
}
}
impl<T: AsyncWrite, U> AsyncWrite for Fuse<T, U> {
fn shutdown(&mut self) -> Poll<(), io::Error> {
self.0.shutdown()
}
}
impl<T, U: Decoder> Decoder for Fuse<T, U> {
type Item = U::Item;
type Error = U::Error;
fn decode(&mut self, buffer: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
self.1.decode(buffer)
}
fn decode_eof(&mut self, buffer: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
self.1.decode_eof(buffer)
}
}
impl<T, U: Encoder> Encoder for Fuse<T, U> {
type Item = U::Item;
type Error = U::Error;
fn encode(&mut self, item: Self::Item, dst: &mut BytesMut) -> Result<(), Self::Error> {
self.1.encode(item, dst)
}
}
/// `FramedParts` contains an export of the data of a Framed transport.
/// It can be used to construct a new `Framed` with a different codec.
/// It contains all current buffers and the inner transport.
@@ -325,15 +392,7 @@ pub struct FramedParts<T, U> {
/// A buffer with unprocessed data which are not written yet.
pub write_buf: BytesMut,
/// A buffer low watermark capacity
pub write_buf_lw: usize,
/// A buffer high watermark capacity
pub write_buf_hw: usize,
/// This private field allows us to add additional fields in the future in a
/// backwards compatible way.
_priv: (),
flags: Flags,
}
impl<T, U> FramedParts<T, U> {
@@ -342,11 +401,9 @@ impl<T, U> FramedParts<T, U> {
FramedParts {
io,
codec,
flags: Flags::empty(),
read_buf: BytesMut::new(),
write_buf: BytesMut::new(),
write_buf_lw: LW,
write_buf_hw: HW,
_priv: (),
}
}
@@ -356,10 +413,8 @@ impl<T, U> FramedParts<T, U> {
io,
codec,
read_buf,
flags: Flags::empty(),
write_buf: BytesMut::new(),
write_buf_lw: LW,
write_buf_hw: HW,
_priv: (),
}
}
}

View File

@@ -1,218 +0,0 @@
use std::fmt;
use bytes::BytesMut;
use futures::{try_ready, Async, Poll, Sink, StartSend, Stream};
use log::trace;
use tokio_codec::Decoder;
use tokio_io::AsyncRead;
use super::framed::Fuse;
/// A `Stream` of messages decoded from an `AsyncRead`.
pub struct FramedRead<T, D> {
inner: FramedRead2<Fuse<T, D>>,
}
pub struct FramedRead2<T> {
inner: T,
eof: bool,
is_readable: bool,
buffer: BytesMut,
}
const INITIAL_CAPACITY: usize = 8 * 1024;
// ===== impl FramedRead =====
impl<T, D> FramedRead<T, D>
where
T: AsyncRead,
D: Decoder,
{
/// Creates a new `FramedRead` with the given `decoder`.
pub fn new(inner: T, decoder: D) -> FramedRead<T, D> {
FramedRead {
inner: framed_read2(Fuse(inner, decoder)),
}
}
}
impl<T, D> FramedRead<T, D> {
/// Returns a reference to the underlying I/O stream wrapped by
/// `FramedRead`.
///
/// Note that care should be taken to not tamper with the underlying stream
/// of data coming in as it may corrupt the stream of frames otherwise
/// being worked with.
pub fn get_ref(&self) -> &T {
&self.inner.inner.0
}
/// Returns a mutable reference to the underlying I/O stream wrapped by
/// `FramedRead`.
///
/// Note that care should be taken to not tamper with the underlying stream
/// of data coming in as it may corrupt the stream of frames otherwise
/// being worked with.
pub fn get_mut(&mut self) -> &mut T {
&mut self.inner.inner.0
}
/// Consumes the `FramedRead`, returning its underlying I/O stream.
///
/// Note that care should be taken to not tamper with the underlying stream
/// of data coming in as it may corrupt the stream of frames otherwise
/// being worked with.
pub fn into_inner(self) -> T {
self.inner.inner.0
}
/// Returns a reference to the underlying decoder.
pub fn decoder(&self) -> &D {
&self.inner.inner.1
}
/// Returns a mutable reference to the underlying decoder.
pub fn decoder_mut(&mut self) -> &mut D {
&mut self.inner.inner.1
}
}
impl<T, D> Stream for FramedRead<T, D>
where
T: AsyncRead,
D: Decoder,
{
type Item = D::Item;
type Error = D::Error;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
self.inner.poll()
}
}
impl<T, D> Sink for FramedRead<T, D>
where
T: Sink,
{
type SinkItem = T::SinkItem;
type SinkError = T::SinkError;
fn start_send(
&mut self,
item: Self::SinkItem,
) -> StartSend<Self::SinkItem, Self::SinkError> {
self.inner.inner.0.start_send(item)
}
fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
self.inner.inner.0.poll_complete()
}
fn close(&mut self) -> Poll<(), Self::SinkError> {
self.inner.inner.0.close()
}
}
impl<T, D> fmt::Debug for FramedRead<T, D>
where
T: fmt::Debug,
D: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("FramedRead")
.field("inner", &self.inner.inner.0)
.field("decoder", &self.inner.inner.1)
.field("eof", &self.inner.eof)
.field("is_readable", &self.inner.is_readable)
.field("buffer", &self.inner.buffer)
.finish()
}
}
// ===== impl FramedRead2 =====
pub fn framed_read2<T>(inner: T) -> FramedRead2<T> {
FramedRead2 {
inner,
eof: false,
is_readable: false,
buffer: BytesMut::with_capacity(INITIAL_CAPACITY),
}
}
pub fn framed_read2_with_buffer<T>(inner: T, mut buf: BytesMut) -> FramedRead2<T> {
if buf.capacity() < INITIAL_CAPACITY {
let bytes_to_reserve = INITIAL_CAPACITY - buf.capacity();
buf.reserve(bytes_to_reserve);
}
FramedRead2 {
inner,
eof: false,
is_readable: !buf.is_empty(),
buffer: buf,
}
}
impl<T> FramedRead2<T> {
pub fn get_ref(&self) -> &T {
&self.inner
}
pub fn into_inner(self) -> T {
self.inner
}
pub fn into_parts(self) -> (T, BytesMut) {
(self.inner, self.buffer)
}
pub fn get_mut(&mut self) -> &mut T {
&mut self.inner
}
}
impl<T> Stream for FramedRead2<T>
where
T: AsyncRead + Decoder,
{
type Item = T::Item;
type Error = T::Error;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
loop {
// Repeatedly call `decode` or `decode_eof` as long as it is
// "readable". Readable is defined as not having returned `None`. If
// the upstream has returned EOF, and the decoder is no longer
// readable, it can be assumed that the decoder will never become
// readable again, at which point the stream is terminated.
if self.is_readable {
if self.eof {
let frame = self.inner.decode_eof(&mut self.buffer)?;
return Ok(Async::Ready(frame));
}
trace!("attempting to decode a frame");
if let Some(frame) = self.inner.decode(&mut self.buffer)? {
trace!("frame decoded from buffer");
return Ok(Async::Ready(Some(frame)));
}
self.is_readable = false;
}
assert!(!self.eof);
// Otherwise, try to read more data and try again. Make sure we've
// got room for at least one byte to read to ensure that we don't
// get a spurious 0 that looks like EOF
self.buffer.reserve(1);
if 0 == try_ready!(self.inner.read_buf(&mut self.buffer)) {
self.eof = true;
}
self.is_readable = true;
}
}
}

View File

@@ -1,303 +0,0 @@
use std::fmt;
use std::io::{self, Read};
use bytes::BytesMut;
use futures::{try_ready, Async, AsyncSink, Poll, Sink, StartSend, Stream};
use log::trace;
use tokio_codec::{Decoder, Encoder};
use tokio_io::{AsyncRead, AsyncWrite};
use super::framed::Fuse;
/// A `Sink` of frames encoded to an `AsyncWrite`.
pub struct FramedWrite<T, E> {
inner: FramedWrite2<Fuse<T, E>>,
}
pub struct FramedWrite2<T> {
inner: T,
buffer: BytesMut,
low_watermark: usize,
high_watermark: usize,
}
impl<T, E> FramedWrite<T, E>
where
T: AsyncWrite,
E: Encoder,
{
/// Creates a new `FramedWrite` with the given `encoder`.
pub fn new(inner: T, encoder: E, lw: usize, hw: usize) -> FramedWrite<T, E> {
FramedWrite {
inner: framed_write2(Fuse(inner, encoder), lw, hw),
}
}
}
impl<T, E> FramedWrite<T, E> {
/// Returns a reference to the underlying I/O stream wrapped by
/// `FramedWrite`.
///
/// Note that care should be taken to not tamper with the underlying stream
/// of data coming in as it may corrupt the stream of frames otherwise
/// being worked with.
pub fn get_ref(&self) -> &T {
&self.inner.inner.0
}
/// Returns a mutable reference to the underlying I/O stream wrapped by
/// `FramedWrite`.
///
/// Note that care should be taken to not tamper with the underlying stream
/// of data coming in as it may corrupt the stream of frames otherwise
/// being worked with.
pub fn get_mut(&mut self) -> &mut T {
&mut self.inner.inner.0
}
/// Consumes the `FramedWrite`, returning its underlying I/O stream.
///
/// Note that care should be taken to not tamper with the underlying stream
/// of data coming in as it may corrupt the stream of frames otherwise
/// being worked with.
pub fn into_inner(self) -> T {
self.inner.inner.0
}
/// Returns a reference to the underlying decoder.
pub fn encoder(&self) -> &E {
&self.inner.inner.1
}
/// Returns a mutable reference to the underlying decoder.
pub fn encoder_mut(&mut self) -> &mut E {
&mut self.inner.inner.1
}
/// Check if write buffer is full
pub fn is_full(&self) -> bool {
self.inner.is_full()
}
/// Check if write buffer is empty.
pub fn is_empty(&self) -> bool {
self.inner.is_empty()
}
}
impl<T, E> FramedWrite<T, E>
where
E: Encoder,
{
/// Force send item
pub fn force_send(&mut self, item: E::Item) -> Result<(), E::Error> {
self.inner.force_send(item)
}
}
impl<T, E> Sink for FramedWrite<T, E>
where
T: AsyncWrite,
E: Encoder,
{
type SinkItem = E::Item;
type SinkError = E::Error;
fn start_send(&mut self, item: E::Item) -> StartSend<E::Item, E::Error> {
self.inner.start_send(item)
}
fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
self.inner.poll_complete()
}
fn close(&mut self) -> Poll<(), Self::SinkError> {
Ok(self.inner.close()?)
}
}
impl<T, D> Stream for FramedWrite<T, D>
where
T: Stream,
{
type Item = T::Item;
type Error = T::Error;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
self.inner.inner.0.poll()
}
}
impl<T, U> fmt::Debug for FramedWrite<T, U>
where
T: fmt::Debug,
U: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("FramedWrite")
.field("inner", &self.inner.get_ref().0)
.field("encoder", &self.inner.get_ref().1)
.field("buffer", &self.inner.buffer)
.finish()
}
}
// ===== impl FramedWrite2 =====
pub fn framed_write2<T>(
inner: T,
low_watermark: usize,
high_watermark: usize,
) -> FramedWrite2<T> {
FramedWrite2 {
inner,
low_watermark,
high_watermark,
buffer: BytesMut::with_capacity(high_watermark),
}
}
pub fn framed_write2_with_buffer<T>(
inner: T,
mut buffer: BytesMut,
low_watermark: usize,
high_watermark: usize,
) -> FramedWrite2<T> {
if buffer.capacity() < high_watermark {
let bytes_to_reserve = high_watermark - buffer.capacity();
buffer.reserve(bytes_to_reserve);
}
FramedWrite2 {
inner,
buffer,
low_watermark,
high_watermark,
}
}
impl<T> FramedWrite2<T> {
pub fn get_ref(&self) -> &T {
&self.inner
}
pub fn into_inner(self) -> T {
self.inner
}
pub fn into_parts(self) -> (T, BytesMut, usize, usize) {
(
self.inner,
self.buffer,
self.low_watermark,
self.high_watermark,
)
}
pub fn get_mut(&mut self) -> &mut T {
&mut self.inner
}
pub fn is_full(&self) -> bool {
self.buffer.len() >= self.high_watermark
}
pub fn is_empty(&self) -> bool {
self.buffer.is_empty()
}
}
impl<T> FramedWrite2<T>
where
T: Encoder,
{
pub fn force_send(&mut self, item: T::Item) -> Result<(), T::Error> {
let len = self.buffer.len();
if len < self.low_watermark {
self.buffer.reserve(self.high_watermark - len)
}
self.inner.encode(item, &mut self.buffer)?;
Ok(())
}
}
impl<T> Sink for FramedWrite2<T>
where
T: AsyncWrite + Encoder,
{
type SinkItem = T::Item;
type SinkError = T::Error;
fn start_send(&mut self, item: T::Item) -> StartSend<T::Item, T::Error> {
// Check the buffer capacity
let len = self.buffer.len();
if len >= self.high_watermark {
return Ok(AsyncSink::NotReady(item));
}
if len < self.low_watermark {
self.buffer.reserve(self.high_watermark - len)
}
self.inner.encode(item, &mut self.buffer)?;
Ok(AsyncSink::Ready)
}
fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
trace!("flushing framed transport");
while !self.buffer.is_empty() {
trace!("writing; remaining={}", self.buffer.len());
let n = try_ready!(self.inner.poll_write(&self.buffer));
if n == 0 {
return Err(io::Error::new(
io::ErrorKind::WriteZero,
"failed to \
write frame to transport",
)
.into());
}
// TODO: Add a way to `bytes` to do this w/o returning the drained
// data.
let _ = self.buffer.split_to(n);
}
// Try flushing the underlying IO
try_ready!(self.inner.poll_flush());
trace!("framed transport flushed");
Ok(Async::Ready(()))
}
fn close(&mut self) -> Poll<(), Self::SinkError> {
try_ready!(self.poll_complete());
Ok(self.inner.shutdown()?)
}
}
impl<T: Decoder> Decoder for FramedWrite2<T> {
type Item = T::Item;
type Error = T::Error;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<T::Item>, T::Error> {
self.inner.decode(src)
}
fn decode_eof(&mut self, src: &mut BytesMut) -> Result<Option<T::Item>, T::Error> {
self.inner.decode_eof(src)
}
}
impl<T: Read> Read for FramedWrite2<T> {
fn read(&mut self, dst: &mut [u8]) -> io::Result<usize> {
self.inner.read(dst)
}
}
impl<T: AsyncRead> AsyncRead for FramedWrite2<T> {
unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool {
self.inner.prepare_uninitialized_buffer(buf)
}
}

View File

@@ -1,24 +1,23 @@
//! Utilities for encoding and decoding frames.
//! Codec utilities for working with framed protocols.
//!
//! Contains adapters to go from streams of bytes, [`AsyncRead`] and
//! [`AsyncWrite`], to framed streams implementing [`Sink`] and [`Stream`].
//! Framed streams are also known as [transports].
//! Framed streams are also known as `transports`.
//!
//! [`AsyncRead`]: #
//! [`AsyncWrite`]: #
//! [`Sink`]: #
//! [`Stream`]: #
//! [transports]: #
//! [`Sink`]: futures_sink::Sink
//! [`Stream`]: futures_core::Stream
#![deny(rust_2018_idioms, nonstandard_style)]
#![warn(missing_docs)]
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
mod bcodec;
mod framed;
mod framed_read;
mod framed_write;
pub use self::bcodec::BytesCodec;
pub use self::framed::{Framed, FramedParts};
pub use self::framed_read::FramedRead;
pub use self::framed_write::FramedWrite;
pub use tokio_codec::{Decoder, Encoder};
pub use tokio_io::{AsyncRead, AsyncWrite};
pub use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
pub use tokio_util::codec::{Decoder, Encoder};
pub use tokio_util::io::poll_read_buf;

View File

@@ -1,28 +0,0 @@
# Changes
## [0.3.0] - 2019-03-02
### Changed
* Migrate to actix-service 0.3
## [0.2.0] - 2019-02-01
### Changed
* Migrate to actix-service 0.2
* Upgrade trust-dns-resolver
* Use tokio-current-thread instead of direct actix-rt dipendency
## [0.1.1] - 2019-01-13
* Upgrade trust-dns-proto
## [0.1.0] - 2018-12-09
* Move server to separate crate

View File

@@ -1,39 +0,0 @@
[package]
name = "actix-connector"
version = "0.3.0"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix Connector - tcp connector service"
keywords = ["network", "framework", "async", "futures"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-net/"
categories = ["network-programming", "asynchronous"]
license = "MIT/Apache-2.0"
exclude = [".gitignore", ".travis.yml", ".cargo/config", "appveyor.yml"]
edition = "2018"
workspace = ".."
[package.metadata.docs.rs]
features = ["ssl"]
[lib]
name = "actix_connector"
path = "src/lib.rs"
[features]
default = []
# openssl
ssl = ["openssl", "tokio-openssl"]
[dependencies]
actix-service = "0.3.3"
actix-codec = "0.1.1"
futures = "0.1"
tokio-tcp = "0.1"
tokio-current-thread = "0.1"
trust-dns-resolver = { version="0.11.0-alpha.2", default-features = false }
# openssl
openssl = { version="0.10", optional = true }
tokio-openssl = { version="0.3", optional = true }

View File

@@ -1,386 +0,0 @@
use std::collections::VecDeque;
use std::marker::PhantomData;
use std::net::{IpAddr, SocketAddr};
use std::time::Duration;
use std::{fmt, io};
use actix_service::{fn_factory, NewService, Service};
use futures::future::{ok, Either};
use futures::{try_ready, Async, Future, Poll};
use tokio_tcp::{ConnectFuture, TcpStream};
use trust_dns_resolver::config::{ResolverConfig, ResolverOpts};
use trust_dns_resolver::system_conf::read_system_conf;
use super::resolver::{RequestHost, ResolveError, Resolver, ResolverFuture};
/// Port of the request
pub trait RequestPort {
fn port(&self) -> u16;
}
// #[derive(Fail, Debug)]
#[derive(Debug)]
pub enum ConnectorError {
/// Failed to resolve the hostname
// #[fail(display = "Failed resolving hostname: {}", _0)]
Resolver(ResolveError),
/// No dns records
// #[fail(display = "No dns records found for the input")]
NoRecords,
/// Connecting took too long
// #[fail(display = "Timeout out while establishing connection")]
Timeout,
/// Invalid input
InvalidInput,
/// Connection io error
// #[fail(display = "{}", _0)]
IoError(io::Error),
}
impl From<ResolveError> for ConnectorError {
fn from(err: ResolveError) -> Self {
ConnectorError::Resolver(err)
}
}
impl From<io::Error> for ConnectorError {
fn from(err: io::Error) -> Self {
ConnectorError::IoError(err)
}
}
/// Connect request
#[derive(Eq, PartialEq, Debug, Hash)]
pub struct Connect {
pub kind: ConnectKind,
pub timeout: Duration,
}
#[derive(Eq, PartialEq, Debug, Hash)]
pub enum ConnectKind {
Host { host: String, port: u16 },
Addr { host: String, addr: SocketAddr },
}
impl Connect {
/// Create new `Connect` instance.
pub fn new<T: AsRef<str>>(host: T, port: u16) -> Connect {
Connect {
kind: ConnectKind::Host {
host: host.as_ref().to_owned(),
port,
},
timeout: Duration::from_secs(1),
}
}
/// Create `Connect` instance by spliting the string by ':' and convert the second part to u16
pub fn with<T: AsRef<str>>(host: T) -> Result<Connect, ConnectorError> {
let mut parts_iter = host.as_ref().splitn(2, ':');
let host = parts_iter.next().ok_or(ConnectorError::InvalidInput)?;
let port_str = parts_iter.next().unwrap_or("");
let port = port_str
.parse::<u16>()
.map_err(|_| ConnectorError::InvalidInput)?;
Ok(Connect {
kind: ConnectKind::Host {
host: host.to_owned(),
port,
},
timeout: Duration::from_secs(1),
})
}
/// Create new `Connect` instance from host and address. Connector skips name resolution stage for such connect messages.
pub fn with_address<T: Into<String>>(host: T, addr: SocketAddr) -> Connect {
Connect {
kind: ConnectKind::Addr {
addr,
host: host.into(),
},
timeout: Duration::from_secs(1),
}
}
/// Set connect timeout
///
/// By default timeout is set to a 1 second.
pub fn timeout(mut self, timeout: Duration) -> Connect {
self.timeout = timeout;
self
}
}
impl RequestHost for Connect {
fn host(&self) -> &str {
match self.kind {
ConnectKind::Host { ref host, .. } => host,
ConnectKind::Addr { ref host, .. } => host,
}
}
}
impl RequestPort for Connect {
fn port(&self) -> u16 {
match self.kind {
ConnectKind::Host { port, .. } => port,
ConnectKind::Addr { addr, .. } => addr.port(),
}
}
}
impl fmt::Display for Connect {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}:{}", self.host(), self.port())
}
}
/// Tcp connector
pub struct Connector {
resolver: Resolver<Connect>,
}
impl Default for Connector {
fn default() -> Self {
let (cfg, opts) = if let Ok((cfg, opts)) = read_system_conf() {
(cfg, opts)
} else {
(ResolverConfig::default(), ResolverOpts::default())
};
Connector::new(cfg, opts)
}
}
impl Connector {
/// Create new connector with resolver configuration
pub fn new(cfg: ResolverConfig, opts: ResolverOpts) -> Self {
Connector {
resolver: Resolver::new(cfg, opts),
}
}
/// Create new connector with custom resolver
pub fn with_resolver(
resolver: Resolver<Connect>,
) -> impl Service<Request = Connect, Response = (Connect, TcpStream), Error = ConnectorError>
+ Clone {
Connector { resolver }
}
/// Create new default connector service
pub fn new_service_with_config<E>(
cfg: ResolverConfig,
opts: ResolverOpts,
) -> impl NewService<
(),
Request = Connect,
Response = (Connect, TcpStream),
Error = ConnectorError,
InitError = E,
> + Clone {
fn_factory(move || ok(Connector::new(cfg.clone(), opts)))
}
}
impl Clone for Connector {
fn clone(&self) -> Self {
Connector {
resolver: self.resolver.clone(),
}
}
}
impl Service for Connector {
type Request = Connect;
type Response = (Connect, TcpStream);
type Error = ConnectorError;
type Future = Either<ConnectorFuture, ConnectorTcpFuture>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
Ok(Async::Ready(()))
}
fn call(&mut self, req: Connect) -> Self::Future {
match req.kind {
ConnectKind::Host { .. } => Either::A(ConnectorFuture {
fut: self.resolver.call(req),
fut2: None,
}),
ConnectKind::Addr { addr, .. } => {
let mut addrs = VecDeque::new();
addrs.push_back(addr.ip());
Either::B(ConnectorTcpFuture {
fut: TcpConnectorResponse::new(req, addrs),
})
}
}
}
}
#[doc(hidden)]
pub struct ConnectorFuture {
fut: ResolverFuture<Connect>,
fut2: Option<TcpConnectorResponse<Connect>>,
}
impl Future for ConnectorFuture {
type Item = (Connect, TcpStream);
type Error = ConnectorError;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
if let Some(ref mut fut) = self.fut2 {
return fut.poll().map_err(ConnectorError::from);
}
match self.fut.poll().map_err(ConnectorError::from)? {
Async::Ready((req, addrs)) => {
if addrs.is_empty() {
Err(ConnectorError::NoRecords)
} else {
self.fut2 = Some(TcpConnectorResponse::new(req, addrs));
self.poll()
}
}
Async::NotReady => Ok(Async::NotReady),
}
}
}
#[doc(hidden)]
pub struct ConnectorTcpFuture {
fut: TcpConnectorResponse<Connect>,
}
impl Future for ConnectorTcpFuture {
type Item = (Connect, TcpStream);
type Error = ConnectorError;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
self.fut.poll().map_err(ConnectorError::IoError)
}
}
/// Tcp stream connector service
pub struct TcpConnector<T: RequestPort>(PhantomData<T>);
impl<T: RequestPort> Default for TcpConnector<T> {
fn default() -> TcpConnector<T> {
TcpConnector(PhantomData)
}
}
impl<T: RequestPort> Service for TcpConnector<T> {
type Request = (T, VecDeque<IpAddr>);
type Response = (T, TcpStream);
type Error = io::Error;
type Future = TcpConnectorResponse<T>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
Ok(Async::Ready(()))
}
fn call(&mut self, (req, addrs): (T, VecDeque<IpAddr>)) -> Self::Future {
TcpConnectorResponse::new(req, addrs)
}
}
#[doc(hidden)]
/// Tcp stream connector response future
pub struct TcpConnectorResponse<T: RequestPort> {
port: u16,
req: Option<T>,
addr: Option<SocketAddr>,
addrs: VecDeque<IpAddr>,
stream: Option<ConnectFuture>,
}
impl<T: RequestPort> TcpConnectorResponse<T> {
pub fn new(req: T, addrs: VecDeque<IpAddr>) -> TcpConnectorResponse<T> {
TcpConnectorResponse {
addrs,
port: req.port(),
req: Some(req),
addr: None,
stream: None,
}
}
}
impl<T: RequestPort> Future for TcpConnectorResponse<T> {
type Item = (T, TcpStream);
type Error = io::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
// connect
loop {
if let Some(new) = self.stream.as_mut() {
match new.poll() {
Ok(Async::Ready(sock)) => {
return Ok(Async::Ready((self.req.take().unwrap(), sock)));
}
Ok(Async::NotReady) => return Ok(Async::NotReady),
Err(err) => {
if self.addrs.is_empty() {
return Err(err);
}
}
}
}
// try to connect
let addr = SocketAddr::new(self.addrs.pop_front().unwrap(), self.port);
self.stream = Some(TcpStream::connect(&addr));
self.addr = Some(addr)
}
}
}
#[derive(Clone)]
pub struct DefaultConnector(Connector);
impl Default for DefaultConnector {
fn default() -> Self {
DefaultConnector(Connector::default())
}
}
impl DefaultConnector {
pub fn new(cfg: ResolverConfig, opts: ResolverOpts) -> Self {
DefaultConnector(Connector::new(cfg, opts))
}
}
impl Service for DefaultConnector {
type Request = Connect;
type Response = TcpStream;
type Error = ConnectorError;
type Future = DefaultConnectorFuture;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
self.0.poll_ready()
}
fn call(&mut self, req: Connect) -> Self::Future {
DefaultConnectorFuture {
fut: self.0.call(req),
}
}
}
#[doc(hidden)]
pub struct DefaultConnectorFuture {
fut: Either<ConnectorFuture, ConnectorTcpFuture>,
}
impl Future for DefaultConnectorFuture {
type Item = TcpStream;
type Error = ConnectorError;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
Ok(Async::Ready(try_ready!(self.fut.poll()).1))
}
}

View File

@@ -1,16 +0,0 @@
//! Actix Connector - tcp connector service
//!
//! ## Package feature
//!
//! * `tls` - enables ssl support via `native-tls` crate
//! * `ssl` - enables ssl support via `openssl` crate
//! * `rust-tls` - enables ssl support via `rustls` crate
mod connector;
mod resolver;
pub mod ssl;
pub use self::connector::{
Connect, Connector, ConnectorError, DefaultConnector, RequestPort, TcpConnector,
};
pub use self::resolver::{RequestHost, Resolver};

View File

@@ -1,129 +0,0 @@
use std::collections::VecDeque;
use std::marker::PhantomData;
use std::net::IpAddr;
use actix_service::Service;
use futures::{Async, Future, Poll};
use trust_dns_resolver::config::{ResolverConfig, ResolverOpts};
pub use trust_dns_resolver::error::ResolveError;
use trust_dns_resolver::lookup_ip::LookupIpFuture;
use trust_dns_resolver::system_conf::read_system_conf;
use trust_dns_resolver::{AsyncResolver, Background};
/// Host name of the request
pub trait RequestHost {
fn host(&self) -> &str;
}
impl RequestHost for String {
fn host(&self) -> &str {
self.as_ref()
}
}
pub struct Resolver<T = String> {
resolver: AsyncResolver,
req: PhantomData<T>,
}
impl<T: RequestHost> Default for Resolver<T> {
fn default() -> Self {
let (cfg, opts) = if let Ok((cfg, opts)) = read_system_conf() {
(cfg, opts)
} else {
(ResolverConfig::default(), ResolverOpts::default())
};
Resolver::new(cfg, opts)
}
}
impl<T: RequestHost> Resolver<T> {
/// Create new resolver instance with custom configuration and options.
pub fn new(cfg: ResolverConfig, opts: ResolverOpts) -> Self {
let (resolver, bg) = AsyncResolver::new(cfg, opts);
tokio_current_thread::spawn(bg);
Resolver {
resolver,
req: PhantomData,
}
}
/// Change type of resolver request.
pub fn into_request<T2: RequestHost>(&self) -> Resolver<T2> {
Resolver {
resolver: self.resolver.clone(),
req: PhantomData,
}
}
}
impl<T> Clone for Resolver<T> {
fn clone(&self) -> Self {
Resolver {
resolver: self.resolver.clone(),
req: PhantomData,
}
}
}
impl<T: RequestHost> Service for Resolver<T> {
type Request = T;
type Response = (T, VecDeque<IpAddr>);
type Error = ResolveError;
type Future = ResolverFuture<T>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
Ok(Async::Ready(()))
}
fn call(&mut self, req: T) -> Self::Future {
if let Ok(ip) = req.host().parse() {
let mut addrs = VecDeque::new();
addrs.push_back(ip);
ResolverFuture::new(req, &self.resolver, Some(addrs))
} else {
ResolverFuture::new(req, &self.resolver, None)
}
}
}
#[doc(hidden)]
/// Resolver future
pub struct ResolverFuture<T> {
req: Option<T>,
lookup: Option<Background<LookupIpFuture>>,
addrs: Option<VecDeque<IpAddr>>,
}
impl<T: RequestHost> ResolverFuture<T> {
pub fn new(addr: T, resolver: &AsyncResolver, addrs: Option<VecDeque<IpAddr>>) -> Self {
// we need to do dns resolution
let lookup = Some(resolver.lookup_ip(addr.host()));
ResolverFuture {
lookup,
addrs,
req: Some(addr),
}
}
}
impl<T: RequestHost> Future for ResolverFuture<T> {
type Item = (T, VecDeque<IpAddr>);
type Error = ResolveError;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
if let Some(addrs) = self.addrs.take() {
Ok(Async::Ready((self.req.take().unwrap(), addrs)))
} else {
match self.lookup.as_mut().unwrap().poll() {
Ok(Async::NotReady) => Ok(Async::NotReady),
Ok(Async::Ready(ips)) => Ok(Async::Ready((
self.req.take().unwrap(),
ips.iter().collect(),
))),
Err(err) => Err(err),
}
}
}
}

View File

@@ -1,6 +0,0 @@
//! SSL Services
#[cfg(feature = "ssl")]
mod openssl;
#[cfg(feature = "ssl")]
pub use self::openssl::OpensslConnector;

View File

@@ -1,107 +0,0 @@
use std::marker::PhantomData;
use actix_codec::{AsyncRead, AsyncWrite};
use actix_service::{NewService, Service};
use futures::{future::ok, future::FutureResult, Async, Future, Poll};
use openssl::ssl::{HandshakeError, SslConnector};
use tokio_openssl::{ConnectAsync, SslConnectorExt, SslStream};
use crate::resolver::RequestHost;
/// Openssl connector factory
pub struct OpensslConnector<R, T, E> {
connector: SslConnector,
_t: PhantomData<(R, T, E)>,
}
impl<R, T, E> OpensslConnector<R, T, E> {
pub fn new(connector: SslConnector) -> Self {
OpensslConnector {
connector,
_t: PhantomData,
}
}
}
impl<R: RequestHost, T: AsyncRead + AsyncWrite> OpensslConnector<R, T, ()> {
pub fn service(
connector: SslConnector,
) -> impl Service<Request = (R, T), Response = (R, SslStream<T>), Error = HandshakeError<T>>
{
OpensslConnectorService {
connector: connector,
_t: PhantomData,
}
}
}
impl<R, T, E> Clone for OpensslConnector<R, T, E> {
fn clone(&self) -> Self {
Self {
connector: self.connector.clone(),
_t: PhantomData,
}
}
}
impl<R: RequestHost, T: AsyncRead + AsyncWrite, E> NewService<()>
for OpensslConnector<R, T, E>
{
type Request = (R, T);
type Response = (R, SslStream<T>);
type Error = HandshakeError<T>;
type Service = OpensslConnectorService<R, T>;
type InitError = E;
type Future = FutureResult<Self::Service, Self::InitError>;
fn new_service(&self, _: &()) -> Self::Future {
ok(OpensslConnectorService {
connector: self.connector.clone(),
_t: PhantomData,
})
}
}
pub struct OpensslConnectorService<R, T> {
connector: SslConnector,
_t: PhantomData<(R, T)>,
}
impl<R: RequestHost, T: AsyncRead + AsyncWrite> Service for OpensslConnectorService<R, T> {
type Request = (R, T);
type Response = (R, SslStream<T>);
type Error = HandshakeError<T>;
type Future = ConnectAsyncExt<R, T>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
Ok(Async::Ready(()))
}
fn call(&mut self, (req, stream): (R, T)) -> Self::Future {
ConnectAsyncExt {
fut: SslConnectorExt::connect_async(&self.connector, req.host(), stream),
req: Some(req),
}
}
}
pub struct ConnectAsyncExt<R, T> {
req: Option<R>,
fut: ConnectAsync<T>,
}
impl<R, T> Future for ConnectAsyncExt<R, T>
where
R: RequestHost,
T: AsyncRead + AsyncWrite,
{
type Item = (R, SslStream<T>);
type Error = HandshakeError<T>;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match self.fut.poll()? {
Async::Ready(stream) => Ok(Async::Ready((self.req.take().unwrap(), stream))),
Async::NotReady => Ok(Async::NotReady),
}
}
}

1
actix-macros/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/wip

27
actix-macros/CHANGES.md Normal file
View File

@@ -0,0 +1,27 @@
# Changes
## Unreleased - 2021-xx-xx
## 0.2.0 - 2021-02-02
* Update to latest `actix_rt::System::new` signature. [#261]
[#261]: https://github.com/actix/actix-net/pull/261
## 0.2.0-beta.1 - 2021-01-09
* Remove `actix-reexport` feature. [#218]
[#218]: https://github.com/actix/actix-net/pull/218
## 0.1.3 - 2020-12-03
* Add `actix-reexport` feature. [#218]
[#218]: https://github.com/actix/actix-net/pull/218
## 0.1.2 - 2020-05-18
* Forward actix_rt::test arguments to test function [#127]
[#127]: https://github.com/actix/actix-net/pull/127

23
actix-macros/Cargo.toml Normal file
View File

@@ -0,0 +1,23 @@
[package]
name = "actix-macros"
version = "0.2.0"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Macros for Actix system and runtime"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-macros"
categories = ["network-programming", "asynchronous"]
license = "MIT OR Apache-2.0"
edition = "2018"
[lib]
proc-macro = true
[dependencies]
quote = "1.0.3"
syn = { version = "^1", features = ["full"] }
[dev-dependencies]
actix-rt = "2.0.0"
futures-util = { version = "0.3", default-features = false }
trybuild = "1"

1
actix-macros/LICENSE-APACHE Symbolic link
View File

@@ -0,0 +1 @@
../LICENSE-APACHE

1
actix-macros/LICENSE-MIT Symbolic link
View File

@@ -0,0 +1 @@
../LICENSE-MIT

108
actix-macros/src/lib.rs Normal file
View File

@@ -0,0 +1,108 @@
//! Macros for Actix system and runtime.
//!
//! The [`actix-rt`](https://docs.rs/actix-rt) crate must be available for macro output to compile.
//!
//! # Entry-point
//! See docs for the [`#[main]`](macro@main) macro.
//!
//! # Tests
//! See docs for the [`#[test]`](macro@test) macro.
#![deny(rust_2018_idioms, nonstandard_style)]
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
use proc_macro::TokenStream;
use quote::quote;
/// Marks async entry-point function to be executed by Actix system.
///
/// # Examples
/// ```
/// #[actix_rt::main]
/// async fn main() {
/// println!("Hello world");
/// }
/// ```
#[allow(clippy::needless_doctest_main)]
#[proc_macro_attribute]
#[cfg(not(test))] // Work around for rust-lang/rust#62127
pub fn main(_: TokenStream, item: TokenStream) -> TokenStream {
let mut input = syn::parse_macro_input!(item as syn::ItemFn);
let attrs = &input.attrs;
let vis = &input.vis;
let sig = &mut input.sig;
let body = &input.block;
if sig.asyncness.is_none() {
return syn::Error::new_spanned(
sig.fn_token,
"the async keyword is missing from the function declaration",
)
.to_compile_error()
.into();
}
sig.asyncness = None;
(quote! {
#(#attrs)*
#vis #sig {
actix_rt::System::new()
.block_on(async move { #body })
}
})
.into()
}
/// Marks async test function to be executed in an Actix system.
///
/// # Examples
/// ```
/// #[actix_rt::test]
/// async fn my_test() {
/// assert!(true);
/// }
/// ```
#[proc_macro_attribute]
pub fn test(_: TokenStream, item: TokenStream) -> TokenStream {
let mut input = syn::parse_macro_input!(item as syn::ItemFn);
let attrs = &input.attrs;
let vis = &input.vis;
let sig = &mut input.sig;
let body = &input.block;
let mut has_test_attr = false;
for attr in attrs {
if attr.path.is_ident("test") {
has_test_attr = true;
}
}
if sig.asyncness.is_none() {
return syn::Error::new_spanned(
input.sig.fn_token,
"the async keyword is missing from the function declaration",
)
.to_compile_error()
.into();
}
sig.asyncness = None;
let missing_test_attr = if has_test_attr {
quote!()
} else {
quote!(#[test])
};
(quote! {
#missing_test_attr
#(#attrs)*
#vis #sig {
actix_rt::System::new()
.block_on(async { #body })
}
})
.into()
}

View File

@@ -0,0 +1,11 @@
#[test]
fn compile_macros() {
let t = trybuild::TestCases::new();
t.pass("tests/trybuild/main-01-basic.rs");
t.compile_fail("tests/trybuild/main-02-only-async.rs");
t.pass("tests/trybuild/main-03-fn-params.rs");
t.pass("tests/trybuild/test-01-basic.rs");
t.pass("tests/trybuild/test-02-keep-attrs.rs");
t.compile_fail("tests/trybuild/test-03-only-async.rs");
}

View File

@@ -0,0 +1,4 @@
#[actix_rt::main]
async fn main() {
println!("Hello world");
}

View File

@@ -0,0 +1,4 @@
#[actix_rt::main]
fn main() {
futures_util::future::ready(()).await
}

View File

@@ -0,0 +1,14 @@
error: the async keyword is missing from the function declaration
--> $DIR/main-02-only-async.rs:2:1
|
2 | fn main() {
| ^^
error[E0601]: `main` function not found in crate `$CRATE`
--> $DIR/main-02-only-async.rs:1:1
|
1 | / #[actix_rt::main]
2 | | fn main() {
3 | | futures_util::future::ready(()).await
4 | | }
| |_^ consider adding a `main` function to `$DIR/tests/trybuild/main-02-only-async.rs`

View File

@@ -0,0 +1,6 @@
#[actix_rt::main]
async fn main2(_param: bool) {
futures_util::future::ready(()).await
}
fn main() {}

View File

@@ -0,0 +1,6 @@
#[actix_rt::test]
async fn my_test() {
assert!(true);
}
fn main() {}

View File

@@ -0,0 +1,7 @@
#[actix_rt::test]
#[should_panic]
async fn my_test() {
todo!()
}
fn main() {}

View File

@@ -0,0 +1,6 @@
#[actix_rt::test]
fn my_test() {
futures_util::future::ready(()).await
}
fn main() {}

View File

@@ -0,0 +1,5 @@
error: the async keyword is missing from the function declaration
--> $DIR/test-03-only-async.rs:2:1
|
2 | fn my_test() {
| ^^

64
actix-router/CHANGES.md Normal file
View File

@@ -0,0 +1,64 @@
# Changes
## Unreleased - 2021-xx-xx
* Add `Router::recognize_checked` [#247]
[#247]: https://github.com/actix/actix-net/pull/247
## 0.2.6 - 2021-01-09
* Use `bytestring` version range compatible with Bytes v1.0. [#246]
[#246]: https://github.com/actix/actix-net/pull/246
## 0.2.5 - 2020-09-20
* Fix `from_hex()` method
## 0.2.4 - 2019-12-31
* Add `ResourceDef::resource_path_named()` path generation method
## 0.2.3 - 2019-12-25
* Add impl `IntoPattern` for `&String`
## 0.2.2 - 2019-12-25
* Use `IntoPattern` for `RouterBuilder::path()`
## 0.2.1 - 2019-12-25
* Add `IntoPattern` trait
* Add multi-pattern resources
## 0.2.0 - 2019-12-07
* Update http to 0.2
* Update regex to 1.3
* Use bytestring instead of string
## 0.1.5 - 2019-05-15
* Remove debug prints
## 0.1.4 - 2019-05-15
* Fix checked resource match
## 0.1.3 - 2019-04-22
* Added support for `remainder match` (i.e "/path/{tail}*")
## 0.1.2 - 2019-04-07
* Export `Quoter` type
* Allow to reset `Path` instance
## 0.1.1 - 2019-04-03
* Get dynamic segment by name instead of iterator.
## 0.1.0 - 2019-03-09
* Initial release

29
actix-router/Cargo.toml Normal file
View File

@@ -0,0 +1,29 @@
[package]
name = "actix-router"
version = "0.2.6"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Resource path matching library"
keywords = ["actix"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-router"
license = "MIT OR Apache-2.0"
edition = "2018"
[lib]
name = "actix_router"
path = "src/lib.rs"
[features]
default = ["http"]
[dependencies]
regex = "1.3.1"
serde = "1.0.104"
bytestring = ">=0.1.5, <2"
log = "0.4.8"
http = { version = "0.2.2", optional = true }
[dev-dependencies]
http = "0.2.2"
serde_derive = "1.0"

1
actix-router/LICENSE-APACHE Symbolic link
View File

@@ -0,0 +1 @@
../LICENSE-APACHE

1
actix-router/LICENSE-MIT Symbolic link
View File

@@ -0,0 +1 @@
../LICENSE-MIT

View File

@@ -7,9 +7,13 @@ use crate::ResourcePath;
macro_rules! unsupported_type {
($trait_fn:ident, $name:expr) => {
fn $trait_fn<V>(self, _: V) -> Result<V::Value, Self::Error>
where V: Visitor<'de>
where
V: Visitor<'de>,
{
Err(de::value::Error::custom(concat!("unsupported type: ", $name)))
Err(de::value::Error::custom(concat!(
"unsupported type: ",
$name
)))
}
};
}
@@ -17,23 +21,28 @@ macro_rules! unsupported_type {
macro_rules! parse_single_value {
($trait_fn:ident, $visit_fn:ident, $tp:tt) => {
fn $trait_fn<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: Visitor<'de>
where
V: Visitor<'de>,
{
if self.path.len() != 1 {
Err(de::value::Error::custom(
format!("wrong number of parameters: {} expected 1",
self.path.len()).as_str()))
format!("wrong number of parameters: {} expected 1", self.path.len())
.as_str(),
))
} else {
let v = self.path[0].parse().map_err(
|_| de::value::Error::custom(
format!("can not parse {:?} to a {}", &self.path[0], $tp)))?;
let v = self.path[0].parse().map_err(|_| {
de::value::Error::custom(format!(
"can not parse {:?} to a {}",
&self.path[0], $tp
))
})?;
visitor.$visit_fn(v)
}
}
}
};
}
pub struct PathDeserializer<'de, T: ResourcePath + 'de> {
pub struct PathDeserializer<'de, T: ResourcePath> {
path: &'de Path<T>,
}
@@ -151,10 +160,8 @@ impl<'de, T: ResourcePath + 'de> Deserializer<'de> for PathDeserializer<'de, T>
where
V: Visitor<'de>,
{
if self.path.len() < 1 {
Err(de::value::Error::custom(
"expeceted at least one parameters",
))
if self.path.is_empty() {
Err(de::value::Error::custom("expected at least one parameters"))
} else {
visitor.visit_enum(ValueEnum {
value: &self.path[0],
@@ -268,14 +275,15 @@ impl<'de> Deserializer<'de> for Key<'de> {
macro_rules! parse_value {
($trait_fn:ident, $visit_fn:ident, $tp:tt) => {
fn $trait_fn<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: Visitor<'de>
where
V: Visitor<'de>,
{
let v = self.value.parse().map_err(
|_| de::value::Error::custom(
format!("can not parse {:?} to a {}", self.value, $tp)))?;
let v = self.value.parse().map_err(|_| {
de::value::Error::custom(format!("can not parse {:?} to a {}", self.value, $tp))
})?;
visitor.$visit_fn(v)
}
}
};
}
struct Value<'de> {
@@ -492,7 +500,7 @@ mod tests {
#[derive(Deserialize)]
struct Id {
id: String,
_id: String,
}
#[derive(Debug, Deserialize)]

152
actix-router/src/lib.rs Normal file
View File

@@ -0,0 +1,152 @@
//! Resource path matching library.
#![deny(rust_2018_idioms, nonstandard_style)]
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
mod de;
mod path;
mod resource;
mod router;
pub use self::de::PathDeserializer;
pub use self::path::Path;
pub use self::resource::ResourceDef;
pub use self::router::{ResourceInfo, Router, RouterBuilder};
pub trait Resource<T: ResourcePath> {
fn resource_path(&mut self) -> &mut Path<T>;
}
pub trait ResourcePath {
fn path(&self) -> &str;
}
impl ResourcePath for String {
fn path(&self) -> &str {
self.as_str()
}
}
impl<'a> ResourcePath for &'a str {
fn path(&self) -> &str {
self
}
}
impl ResourcePath for bytestring::ByteString {
fn path(&self) -> &str {
&*self
}
}
/// Helper trait for type that could be converted to path pattern
pub trait IntoPattern {
fn is_single(&self) -> bool;
fn patterns(&self) -> Vec<String>;
}
impl IntoPattern for String {
fn is_single(&self) -> bool {
true
}
fn patterns(&self) -> Vec<String> {
vec![self.clone()]
}
}
impl<'a> IntoPattern for &'a String {
fn is_single(&self) -> bool {
true
}
fn patterns(&self) -> Vec<String> {
vec![self.as_str().to_string()]
}
}
impl<'a> IntoPattern for &'a str {
fn is_single(&self) -> bool {
true
}
fn patterns(&self) -> Vec<String> {
vec![(*self).to_string()]
}
}
impl<T: AsRef<str>> IntoPattern for Vec<T> {
fn is_single(&self) -> bool {
self.len() == 1
}
fn patterns(&self) -> Vec<String> {
self.iter().map(|v| v.as_ref().to_string()).collect()
}
}
macro_rules! array_patterns (($tp:ty, $num:tt) => {
impl IntoPattern for [$tp; $num] {
fn is_single(&self) -> bool {
$num == 1
}
fn patterns(&self) -> Vec<String> {
self.iter().map(|v| v.to_string()).collect()
}
}
});
array_patterns!(&str, 1);
array_patterns!(&str, 2);
array_patterns!(&str, 3);
array_patterns!(&str, 4);
array_patterns!(&str, 5);
array_patterns!(&str, 6);
array_patterns!(&str, 7);
array_patterns!(&str, 8);
array_patterns!(&str, 9);
array_patterns!(&str, 10);
array_patterns!(&str, 11);
array_patterns!(&str, 12);
array_patterns!(&str, 13);
array_patterns!(&str, 14);
array_patterns!(&str, 15);
array_patterns!(&str, 16);
array_patterns!(String, 1);
array_patterns!(String, 2);
array_patterns!(String, 3);
array_patterns!(String, 4);
array_patterns!(String, 5);
array_patterns!(String, 6);
array_patterns!(String, 7);
array_patterns!(String, 8);
array_patterns!(String, 9);
array_patterns!(String, 10);
array_patterns!(String, 11);
array_patterns!(String, 12);
array_patterns!(String, 13);
array_patterns!(String, 14);
array_patterns!(String, 15);
array_patterns!(String, 16);
#[cfg(feature = "http")]
mod url;
#[cfg(feature = "http")]
pub use self::url::{Quoter, Url};
#[cfg(feature = "http")]
mod http_support {
use super::ResourcePath;
use http::Uri;
impl ResourcePath for Uri {
fn path(&self) -> &str {
self.path()
}
}
}

View File

@@ -1,5 +1,4 @@
use std::ops::Index;
use std::rc::Rc;
use serde::de;
@@ -19,7 +18,7 @@ pub(crate) enum PathItem {
pub struct Path<T> {
path: T,
pub(crate) skip: u16,
pub(crate) segments: Vec<(Rc<String>, PathItem)>,
pub(crate) segments: Vec<(&'static str, PathItem)>,
}
impl<T: Default> Default for Path<T> {
@@ -52,16 +51,19 @@ impl<T: ResourcePath> Path<T> {
}
/// Get reference to inner path instance
#[inline]
pub fn get_ref(&self) -> &T {
&self.path
}
/// Get mutable reference to inner path instance
#[inline]
pub fn get_mut(&mut self) -> &mut T {
&mut self.path
}
/// Path
#[inline]
pub fn path(&self) -> &str {
let skip = self.skip as usize;
let path = self.path.path();
@@ -72,19 +74,28 @@ impl<T: ResourcePath> Path<T> {
}
}
/// Reset inner path
/// Set new path
#[inline]
pub fn set(&mut self, path: T) {
self.skip = 0;
self.path = path;
self.segments.clear();
}
/// Skip first `n` chars in path
pub fn skip(&mut self, n: u16) {
self.skip = self.skip + n;
/// Reset state
#[inline]
pub fn reset(&mut self) {
self.skip = 0;
self.segments.clear();
}
pub(crate) fn add(&mut self, name: Rc<String>, value: PathItem) {
/// Skip first `n` chars in path
#[inline]
pub fn skip(&mut self, n: u16) {
self.skip += n;
}
pub(crate) fn add(&mut self, name: &'static str, value: PathItem) {
match value {
PathItem::Static(s) => self.segments.push((name, PathItem::Static(s))),
PathItem::Segment(begin, end) => self
@@ -94,17 +105,18 @@ impl<T: ResourcePath> Path<T> {
}
#[doc(hidden)]
pub fn add_static(&mut self, name: &str, value: &'static str) {
self.segments
.push((Rc::new(name.to_string()), PathItem::Static(value)));
pub fn add_static(&mut self, name: &'static str, value: &'static str) {
self.segments.push((name, PathItem::Static(value)));
}
/// Check if there are any matched patterns
#[inline]
pub fn is_empty(&self) -> bool {
self.segments.is_empty()
}
/// Check number of extracted parameters
#[inline]
pub fn len(&self) -> usize {
self.segments.len()
}
@@ -112,7 +124,7 @@ impl<T: ResourcePath> Path<T> {
/// Get matched parameter by name without type conversion
pub fn get(&self, key: &str) -> Option<&str> {
for item in self.segments.iter() {
if key == item.0.as_str() {
if key == item.0 {
return match item.1 {
PathItem::Static(ref s) => Some(&s),
PathItem::Segment(s, e) => {
@@ -146,7 +158,7 @@ impl<T: ResourcePath> Path<T> {
}
/// Return iterator to items in parameter container
pub fn iter(&self) -> PathIter<T> {
pub fn iter(&self) -> PathIter<'_, T> {
PathIter {
idx: 0,
params: self,

View File

@@ -0,0 +1,947 @@
use std::cmp::min;
use std::collections::HashMap;
use std::hash::{Hash, Hasher};
use regex::{escape, Regex, RegexSet};
use crate::path::{Path, PathItem};
use crate::{IntoPattern, Resource, ResourcePath};
const MAX_DYNAMIC_SEGMENTS: usize = 16;
/// ResourceDef describes an entry in resources table
///
/// Resource definition can contain only 16 dynamic segments
#[derive(Clone, Debug)]
pub struct ResourceDef {
id: u16,
tp: PatternType,
name: String,
pattern: String,
elements: Vec<PatternElement>,
}
#[derive(Debug, Clone, PartialEq)]
enum PatternElement {
Str(String),
Var(String),
}
#[derive(Clone, Debug)]
#[allow(clippy::large_enum_variant)]
enum PatternType {
Static(String),
Prefix(String),
Dynamic(Regex, Vec<&'static str>, usize),
DynamicSet(RegexSet, Vec<(Regex, Vec<&'static str>, usize)>),
}
impl ResourceDef {
/// Parse path pattern and create new `Pattern` instance.
///
/// Panics if path pattern is malformed.
pub fn new<T: IntoPattern>(path: T) -> Self {
if path.is_single() {
let patterns = path.patterns();
ResourceDef::with_prefix(&patterns[0], false)
} else {
let set = path.patterns();
let mut data = Vec::new();
let mut re_set = Vec::new();
for path in set {
let (pattern, _, _, len) = ResourceDef::parse(&path, false);
let re = match Regex::new(&pattern) {
Ok(re) => re,
Err(err) => panic!("Wrong path pattern: \"{}\" {}", path, err),
};
// actix creates one router per thread
let names: Vec<_> = re
.capture_names()
.filter_map(|name| {
name.map(|name| Box::leak(Box::new(name.to_owned())).as_str())
})
.collect();
data.push((re, names, len));
re_set.push(pattern);
}
ResourceDef {
id: 0,
tp: PatternType::DynamicSet(RegexSet::new(re_set).unwrap(), data),
elements: Vec::new(),
name: String::new(),
pattern: "".to_owned(),
}
}
}
/// Parse path pattern and create new `Pattern` instance.
///
/// Use `prefix` type instead of `static`.
///
/// Panics if path regex pattern is malformed.
pub fn prefix(path: &str) -> Self {
ResourceDef::with_prefix(path, true)
}
/// Parse path pattern and create new `Pattern` instance.
/// Inserts `/` to begging of the pattern.
///
///
/// Use `prefix` type instead of `static`.
///
/// Panics if path regex pattern is malformed.
pub fn root_prefix(path: &str) -> Self {
ResourceDef::with_prefix(&insert_slash(path), true)
}
/// Resource id
pub fn id(&self) -> u16 {
self.id
}
/// Set resource id
pub fn set_id(&mut self, id: u16) {
self.id = id;
}
/// Parse path pattern and create new `Pattern` instance with custom prefix
fn with_prefix(path: &str, for_prefix: bool) -> Self {
let path = path.to_owned();
let (pattern, elements, is_dynamic, len) = ResourceDef::parse(&path, for_prefix);
let tp = if is_dynamic {
let re = match Regex::new(&pattern) {
Ok(re) => re,
Err(err) => panic!("Wrong path pattern: \"{}\" {}", path, err),
};
// actix creates one router per thread
let names = re
.capture_names()
.filter_map(|name| {
name.map(|name| Box::leak(Box::new(name.to_owned())).as_str())
})
.collect();
PatternType::Dynamic(re, names, len)
} else if for_prefix {
PatternType::Prefix(pattern)
} else {
PatternType::Static(pattern)
};
ResourceDef {
tp,
elements,
id: 0,
name: String::new(),
pattern: path,
}
}
/// Resource pattern name
pub fn name(&self) -> &str {
&self.name
}
/// Mutable reference to a name of a resource definition.
pub fn name_mut(&mut self) -> &mut String {
&mut self.name
}
/// Path pattern of the resource
pub fn pattern(&self) -> &str {
&self.pattern
}
/// Check if path matches this pattern.
#[inline]
pub fn is_match(&self, path: &str) -> bool {
match self.tp {
PatternType::Static(ref s) => s == path,
PatternType::Prefix(ref s) => path.starts_with(s),
PatternType::Dynamic(ref re, _, _) => re.is_match(path),
PatternType::DynamicSet(ref re, _) => re.is_match(path),
}
}
/// Is prefix path a match against this resource.
pub fn is_prefix_match(&self, path: &str) -> Option<usize> {
let p_len = path.len();
let path = if path.is_empty() { "/" } else { path };
match self.tp {
PatternType::Static(ref s) => {
if s == path {
Some(p_len)
} else {
None
}
}
PatternType::Dynamic(ref re, _, len) => {
if let Some(captures) = re.captures(path) {
let mut pos = 0;
let mut passed = false;
for capture in captures.iter() {
if let Some(ref m) = capture {
if !passed {
passed = true;
continue;
}
pos = m.end();
}
}
Some(pos + len)
} else {
None
}
}
PatternType::Prefix(ref s) => {
let len = if path == s {
s.len()
} else if path.starts_with(s)
&& (s.ends_with('/') || path.split_at(s.len()).1.starts_with('/'))
{
if s.ends_with('/') {
s.len() - 1
} else {
s.len()
}
} else {
return None;
};
Some(min(p_len, len))
}
PatternType::DynamicSet(ref re, ref params) => {
if let Some(idx) = re.matches(path).into_iter().next() {
let (ref pattern, _, len) = params[idx];
if let Some(captures) = pattern.captures(path) {
let mut pos = 0;
let mut passed = false;
for capture in captures.iter() {
if let Some(ref m) = capture {
if !passed {
passed = true;
continue;
}
pos = m.end();
}
}
Some(pos + len)
} else {
None
}
} else {
None
}
}
}
}
/// Is the given path and parameters a match against this pattern.
pub fn match_path<T: ResourcePath>(&self, path: &mut Path<T>) -> bool {
match self.tp {
PatternType::Static(ref s) => {
if s == path.path() {
path.skip(path.len() as u16);
true
} else {
false
}
}
PatternType::Prefix(ref s) => {
let r_path = path.path();
let len = if s == r_path {
s.len()
} else if r_path.starts_with(s)
&& (s.ends_with('/') || r_path.split_at(s.len()).1.starts_with('/'))
{
if s.ends_with('/') {
s.len() - 1
} else {
s.len()
}
} else {
return false;
};
let r_path_len = r_path.len();
path.skip(min(r_path_len, len) as u16);
true
}
PatternType::Dynamic(ref re, ref names, len) => {
let mut idx = 0;
let mut pos = 0;
let mut segments: [PathItem; MAX_DYNAMIC_SEGMENTS] =
[PathItem::Static(""); MAX_DYNAMIC_SEGMENTS];
if let Some(captures) = re.captures(path.path()) {
for (no, name) in names.iter().enumerate() {
if let Some(m) = captures.name(&name) {
idx += 1;
pos = m.end();
segments[no] = PathItem::Segment(m.start() as u16, m.end() as u16);
} else {
log::error!(
"Dynamic path match but not all segments found: {}",
name
);
return false;
}
}
} else {
return false;
}
for idx in 0..idx {
path.add(names[idx], segments[idx]);
}
path.skip((pos + len) as u16);
true
}
PatternType::DynamicSet(ref re, ref params) => {
if let Some(idx) = re.matches(path.path()).into_iter().next() {
let (ref pattern, ref names, len) = params[idx];
let mut idx = 0;
let mut pos = 0;
let mut segments: [PathItem; MAX_DYNAMIC_SEGMENTS] =
[PathItem::Static(""); MAX_DYNAMIC_SEGMENTS];
if let Some(captures) = pattern.captures(path.path()) {
for (no, name) in names.iter().enumerate() {
if let Some(m) = captures.name(&name) {
idx += 1;
pos = m.end();
segments[no] =
PathItem::Segment(m.start() as u16, m.end() as u16);
} else {
log::error!(
"Dynamic path match but not all segments found: {}",
name
);
return false;
}
}
} else {
return false;
}
for idx in 0..idx {
path.add(names[idx], segments[idx]);
}
path.skip((pos + len) as u16);
true
} else {
false
}
}
}
}
/// Is the given path and parameters a match against this pattern?
pub fn match_path_checked<R, T, F, U>(
&self,
res: &mut R,
check: &F,
user_data: &Option<U>,
) -> bool
where
T: ResourcePath,
R: Resource<T>,
F: Fn(&R, &Option<U>) -> bool,
{
match self.tp {
PatternType::Static(ref s) => {
if s == res.resource_path().path() && check(res, user_data) {
let path = res.resource_path();
path.skip(path.len() as u16);
true
} else {
false
}
}
PatternType::Prefix(ref s) => {
let len = {
let r_path = res.resource_path().path();
if s == r_path {
s.len()
} else if r_path.starts_with(s)
&& (s.ends_with('/') || r_path.split_at(s.len()).1.starts_with('/'))
{
if s.ends_with('/') {
s.len() - 1
} else {
s.len()
}
} else {
return false;
}
};
if !check(res, user_data) {
return false;
}
let path = res.resource_path();
path.skip(min(path.path().len(), len) as u16);
true
}
PatternType::Dynamic(ref re, ref names, len) => {
let mut idx = 0;
let mut pos = 0;
let mut segments: [PathItem; MAX_DYNAMIC_SEGMENTS] =
[PathItem::Static(""); MAX_DYNAMIC_SEGMENTS];
if let Some(captures) = re.captures(res.resource_path().path()) {
for (no, name) in names.iter().enumerate() {
if let Some(m) = captures.name(&name) {
idx += 1;
pos = m.end();
segments[no] = PathItem::Segment(m.start() as u16, m.end() as u16);
} else {
log::error!(
"Dynamic path match but not all segments found: {}",
name
);
return false;
}
}
} else {
return false;
}
if !check(res, user_data) {
return false;
}
let path = res.resource_path();
for idx in 0..idx {
path.add(names[idx], segments[idx]);
}
path.skip((pos + len) as u16);
true
}
PatternType::DynamicSet(ref re, ref params) => {
let path = res.resource_path().path();
if let Some(idx) = re.matches(path).into_iter().next() {
let (ref pattern, ref names, len) = params[idx];
let mut idx = 0;
let mut pos = 0;
let mut segments: [PathItem; MAX_DYNAMIC_SEGMENTS] =
[PathItem::Static(""); MAX_DYNAMIC_SEGMENTS];
if let Some(captures) = pattern.captures(path) {
for (no, name) in names.iter().enumerate() {
if let Some(m) = captures.name(&name) {
idx += 1;
pos = m.end();
segments[no] =
PathItem::Segment(m.start() as u16, m.end() as u16);
} else {
log::error!(
"Dynamic path match but not all segments found: {}",
name
);
return false;
}
}
} else {
return false;
}
if !check(res, user_data) {
return false;
}
let path = res.resource_path();
for idx in 0..idx {
path.add(names[idx], segments[idx]);
}
path.skip((pos + len) as u16);
true
} else {
false
}
}
}
}
/// Build resource path from elements. Returns `true` on success.
pub fn resource_path<U, I>(&self, path: &mut String, elements: &mut U) -> bool
where
U: Iterator<Item = I>,
I: AsRef<str>,
{
match self.tp {
PatternType::Prefix(ref p) => path.push_str(p),
PatternType::Static(ref p) => path.push_str(p),
PatternType::Dynamic(..) => {
for el in &self.elements {
match *el {
PatternElement::Str(ref s) => path.push_str(s),
PatternElement::Var(_) => {
if let Some(val) = elements.next() {
path.push_str(val.as_ref())
} else {
return false;
}
}
}
}
}
PatternType::DynamicSet(..) => {
return false;
}
}
true
}
/// Build resource path from elements. Returns `true` on success.
pub fn resource_path_named<K, V, S>(
&self,
path: &mut String,
elements: &HashMap<K, V, S>,
) -> bool
where
K: std::borrow::Borrow<str> + Eq + Hash,
V: AsRef<str>,
S: std::hash::BuildHasher,
{
match self.tp {
PatternType::Prefix(ref p) => path.push_str(p),
PatternType::Static(ref p) => path.push_str(p),
PatternType::Dynamic(..) => {
for el in &self.elements {
match *el {
PatternElement::Str(ref s) => path.push_str(s),
PatternElement::Var(ref name) => {
if let Some(val) = elements.get(name) {
path.push_str(val.as_ref())
} else {
return false;
}
}
}
}
}
PatternType::DynamicSet(..) => {
return false;
}
}
true
}
fn parse_param(pattern: &str) -> (PatternElement, String, &str, bool) {
const DEFAULT_PATTERN: &str = "[^/]+";
const DEFAULT_PATTERN_TAIL: &str = ".*";
let mut params_nesting = 0usize;
let close_idx = pattern
.find(|c| match c {
'{' => {
params_nesting += 1;
false
}
'}' => {
params_nesting -= 1;
params_nesting == 0
}
_ => false,
})
.expect("malformed dynamic segment");
let (mut param, mut rem) = pattern.split_at(close_idx + 1);
param = &param[1..param.len() - 1]; // Remove outer brackets
let tail = rem == "*";
let (name, pattern) = match param.find(':') {
Some(idx) => {
if tail {
panic!("Custom regex is not supported for remainder match");
}
let (name, pattern) = param.split_at(idx);
(name, &pattern[1..])
}
None => (
param,
if tail {
rem = &rem[1..];
DEFAULT_PATTERN_TAIL
} else {
DEFAULT_PATTERN
},
),
};
(
PatternElement::Var(name.to_string()),
format!(r"(?P<{}>{})", &name, &pattern),
rem,
tail,
)
}
fn parse(
mut pattern: &str,
mut for_prefix: bool,
) -> (String, Vec<PatternElement>, bool, usize) {
if pattern.find('{').is_none() {
// TODO: MSRV: 1.45
#[allow(clippy::manual_strip)]
return if pattern.ends_with('*') {
let path = &pattern[..pattern.len() - 1];
let re = String::from("^") + path + "(.*)";
(re, vec![PatternElement::Str(String::from(path))], true, 0)
} else {
(
String::from(pattern),
vec![PatternElement::Str(String::from(pattern))],
false,
pattern.chars().count(),
)
};
}
let mut elements = Vec::new();
let mut re = String::from("^");
let mut dyn_elements = 0;
while let Some(idx) = pattern.find('{') {
let (prefix, rem) = pattern.split_at(idx);
elements.push(PatternElement::Str(String::from(prefix)));
re.push_str(&escape(prefix));
let (param_pattern, re_part, rem, tail) = Self::parse_param(rem);
if tail {
for_prefix = true;
}
elements.push(param_pattern);
re.push_str(&re_part);
pattern = rem;
dyn_elements += 1;
}
elements.push(PatternElement::Str(String::from(pattern)));
re.push_str(&escape(pattern));
if dyn_elements > MAX_DYNAMIC_SEGMENTS {
panic!(
"Only {} dynamic segments are allowed, provided: {}",
MAX_DYNAMIC_SEGMENTS, dyn_elements
);
}
if !for_prefix {
re.push('$');
}
(re, elements, true, pattern.chars().count())
}
}
impl Eq for ResourceDef {}
impl PartialEq for ResourceDef {
fn eq(&self, other: &ResourceDef) -> bool {
self.pattern == other.pattern
}
}
impl Hash for ResourceDef {
fn hash<H: Hasher>(&self, state: &mut H) {
self.pattern.hash(state);
}
}
impl<'a> From<&'a str> for ResourceDef {
fn from(path: &'a str) -> ResourceDef {
ResourceDef::new(path)
}
}
impl From<String> for ResourceDef {
fn from(path: String) -> ResourceDef {
ResourceDef::new(path)
}
}
pub(crate) fn insert_slash(path: &str) -> String {
let mut path = path.to_owned();
if !path.is_empty() && !path.starts_with('/') {
path.insert(0, '/');
};
path
}
#[cfg(test)]
mod tests {
use super::*;
use http::Uri;
use std::convert::TryFrom;
#[test]
fn test_parse_static() {
let re = ResourceDef::new("/");
assert!(re.is_match("/"));
assert!(!re.is_match("/a"));
let re = ResourceDef::new("/name");
assert!(re.is_match("/name"));
assert!(!re.is_match("/name1"));
assert!(!re.is_match("/name/"));
assert!(!re.is_match("/name~"));
assert_eq!(re.is_prefix_match("/name"), Some(5));
assert_eq!(re.is_prefix_match("/name1"), None);
assert_eq!(re.is_prefix_match("/name/"), None);
assert_eq!(re.is_prefix_match("/name~"), None);
let re = ResourceDef::new("/name/");
assert!(re.is_match("/name/"));
assert!(!re.is_match("/name"));
assert!(!re.is_match("/name/gs"));
let re = ResourceDef::new("/user/profile");
assert!(re.is_match("/user/profile"));
assert!(!re.is_match("/user/profile/profile"));
}
#[test]
fn test_parse_param() {
let re = ResourceDef::new("/user/{id}");
assert!(re.is_match("/user/profile"));
assert!(re.is_match("/user/2345"));
assert!(!re.is_match("/user/2345/"));
assert!(!re.is_match("/user/2345/sdg"));
let mut path = Path::new("/user/profile");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "profile");
let mut path = Path::new("/user/1245125");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "1245125");
let re = ResourceDef::new("/v{version}/resource/{id}");
assert!(re.is_match("/v1/resource/320120"));
assert!(!re.is_match("/v/resource/1"));
assert!(!re.is_match("/resource"));
let mut path = Path::new("/v151/resource/adage32");
assert!(re.match_path(&mut path));
assert_eq!(path.get("version").unwrap(), "151");
assert_eq!(path.get("id").unwrap(), "adage32");
let re = ResourceDef::new("/{id:[[:digit:]]{6}}");
assert!(re.is_match("/012345"));
assert!(!re.is_match("/012"));
assert!(!re.is_match("/01234567"));
assert!(!re.is_match("/XXXXXX"));
let mut path = Path::new("/012345");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "012345");
}
#[allow(clippy::cognitive_complexity)]
#[test]
fn test_dynamic_set() {
let re = ResourceDef::new(vec![
"/user/{id}",
"/v{version}/resource/{id}",
"/{id:[[:digit:]]{6}}",
]);
assert!(re.is_match("/user/profile"));
assert!(re.is_match("/user/2345"));
assert!(!re.is_match("/user/2345/"));
assert!(!re.is_match("/user/2345/sdg"));
let mut path = Path::new("/user/profile");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "profile");
let mut path = Path::new("/user/1245125");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "1245125");
assert!(re.is_match("/v1/resource/320120"));
assert!(!re.is_match("/v/resource/1"));
assert!(!re.is_match("/resource"));
let mut path = Path::new("/v151/resource/adage32");
assert!(re.match_path(&mut path));
assert_eq!(path.get("version").unwrap(), "151");
assert_eq!(path.get("id").unwrap(), "adage32");
assert!(re.is_match("/012345"));
assert!(!re.is_match("/012"));
assert!(!re.is_match("/01234567"));
assert!(!re.is_match("/XXXXXX"));
let mut path = Path::new("/012345");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "012345");
let re = ResourceDef::new([
"/user/{id}",
"/v{version}/resource/{id}",
"/{id:[[:digit:]]{6}}",
]);
assert!(re.is_match("/user/profile"));
assert!(re.is_match("/user/2345"));
assert!(!re.is_match("/user/2345/"));
assert!(!re.is_match("/user/2345/sdg"));
let re = ResourceDef::new([
"/user/{id}".to_string(),
"/v{version}/resource/{id}".to_string(),
"/{id:[[:digit:]]{6}}".to_string(),
]);
assert!(re.is_match("/user/profile"));
assert!(re.is_match("/user/2345"));
assert!(!re.is_match("/user/2345/"));
assert!(!re.is_match("/user/2345/sdg"));
}
#[test]
fn test_parse_tail() {
let re = ResourceDef::new("/user/-{id}*");
let mut path = Path::new("/user/-profile");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "profile");
let mut path = Path::new("/user/-2345");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "2345");
let mut path = Path::new("/user/-2345/");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "2345/");
let mut path = Path::new("/user/-2345/sdg");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "2345/sdg");
}
#[test]
fn test_static_tail() {
let re = ResourceDef::new("/user*");
assert!(re.is_match("/user/profile"));
assert!(re.is_match("/user/2345"));
assert!(re.is_match("/user/2345/"));
assert!(re.is_match("/user/2345/sdg"));
let re = ResourceDef::new("/user/*");
assert!(re.is_match("/user/profile"));
assert!(re.is_match("/user/2345"));
assert!(re.is_match("/user/2345/"));
assert!(re.is_match("/user/2345/sdg"));
}
#[test]
fn test_parse_urlencoded_param() {
let re = ResourceDef::new("/user/{id}/test");
let mut path = Path::new("/user/2345/test");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "2345");
let mut path = Path::new("/user/qwe%25/test");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "qwe%25");
let uri = Uri::try_from("/user/qwe%25/test").unwrap();
let mut path = Path::new(uri);
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "qwe%25");
}
#[test]
fn test_resource_prefix() {
let re = ResourceDef::prefix("/name");
assert!(re.is_match("/name"));
assert!(re.is_match("/name/"));
assert!(re.is_match("/name/test/test"));
assert!(re.is_match("/name1"));
assert!(re.is_match("/name~"));
assert_eq!(re.is_prefix_match("/name"), Some(5));
assert_eq!(re.is_prefix_match("/name/"), Some(5));
assert_eq!(re.is_prefix_match("/name/test/test"), Some(5));
assert_eq!(re.is_prefix_match("/name1"), None);
assert_eq!(re.is_prefix_match("/name~"), None);
let re = ResourceDef::prefix("/name/");
assert!(re.is_match("/name/"));
assert!(re.is_match("/name/gs"));
assert!(!re.is_match("/name"));
let re = ResourceDef::root_prefix("name/");
assert!(re.is_match("/name/"));
assert!(re.is_match("/name/gs"));
assert!(!re.is_match("/name"));
}
#[test]
fn test_resource_prefix_dynamic() {
let re = ResourceDef::prefix("/{name}/");
assert!(re.is_match("/name/"));
assert!(re.is_match("/name/gs"));
assert!(!re.is_match("/name"));
assert_eq!(re.is_prefix_match("/name/"), Some(6));
assert_eq!(re.is_prefix_match("/name/gs"), Some(6));
assert_eq!(re.is_prefix_match("/name"), None);
let mut path = Path::new("/test2/");
assert!(re.match_path(&mut path));
assert_eq!(&path["name"], "test2");
assert_eq!(&path[0], "test2");
let mut path = Path::new("/test2/subpath1/subpath2/index.html");
assert!(re.match_path(&mut path));
assert_eq!(&path["name"], "test2");
assert_eq!(&path[0], "test2");
}
#[test]
fn test_resource_path() {
let mut s = String::new();
let resource = ResourceDef::new("/user/{item1}/test");
assert!(resource.resource_path(&mut s, &mut (&["user1"]).iter()));
assert_eq!(s, "/user/user1/test");
let mut s = String::new();
let resource = ResourceDef::new("/user/{item1}/{item2}/test");
assert!(resource.resource_path(&mut s, &mut (&["item", "item2"]).iter()));
assert_eq!(s, "/user/item/item2/test");
let mut s = String::new();
let resource = ResourceDef::new("/user/{item1}/{item2}");
assert!(resource.resource_path(&mut s, &mut (&["item", "item2"]).iter()));
assert_eq!(s, "/user/item/item2");
let mut s = String::new();
let resource = ResourceDef::new("/user/{item1}/{item2}/");
assert!(resource.resource_path(&mut s, &mut (&["item", "item2"]).iter()));
assert_eq!(s, "/user/item/item2/");
let mut s = String::new();
assert!(!resource.resource_path(&mut s, &mut (&["item"]).iter()));
let mut s = String::new();
assert!(resource.resource_path(&mut s, &mut (&["item", "item2"]).iter()));
assert_eq!(s, "/user/item/item2/");
assert!(!resource.resource_path(&mut s, &mut (&["item"]).iter()));
let mut s = String::new();
assert!(resource.resource_path(&mut s, &mut vec!["item", "item2"].into_iter()));
assert_eq!(s, "/user/item/item2/");
let mut map = HashMap::new();
map.insert("item1", "item");
let mut s = String::new();
assert!(!resource.resource_path_named(&mut s, &map));
let mut s = String::new();
map.insert("item2", "item2");
assert!(resource.resource_path_named(&mut s, &map));
assert_eq!(s, "/user/item/item2/");
}
}

View File

@@ -1,4 +1,4 @@
use crate::{Resource, ResourceDef, ResourcePath};
use crate::{IntoPattern, Resource, ResourceDef, ResourcePath};
#[derive(Debug, Copy, Clone, PartialEq)]
pub struct ResourceId(pub u16);
@@ -19,35 +19,53 @@ impl<T, U> Router<T, U> {
}
}
pub fn recognize<R, P>(&self, path: &mut R) -> Option<(&T, ResourceId)>
pub fn recognize<R, P>(&self, resource: &mut R) -> Option<(&T, ResourceId)>
where
R: Resource<P>,
P: ResourcePath,
{
for item in self.0.iter() {
if item.0.match_path(path.resource_path()) {
if item.0.match_path(resource.resource_path()) {
return Some((&item.1, ResourceId(item.0.id())));
}
}
None
}
pub fn recognize_mut<R, P>(&mut self, res: &mut R) -> Option<(&mut T, ResourceId)>
pub fn recognize_mut<R, P>(&mut self, resource: &mut R) -> Option<(&mut T, ResourceId)>
where
R: Resource<P>,
P: ResourcePath,
{
for item in self.0.iter_mut() {
if item.0.match_path(res.resource_path()) {
if item.0.match_path(resource.resource_path()) {
return Some((&mut item.1, ResourceId(item.0.id())));
}
}
None
}
pub fn recognize_checked<R, P, F>(
&self,
resource: &mut R,
check: F,
) -> Option<(&T, ResourceId)>
where
F: Fn(&R, &Option<U>) -> bool,
R: Resource<P>,
P: ResourcePath,
{
for item in self.0.iter() {
if item.0.match_path_checked(resource, &check, &item.2) {
return Some((&item.1, ResourceId(item.0.id())));
}
}
None
}
pub fn recognize_mut_checked<R, P, F>(
&mut self,
res: &mut R,
resource: &mut R,
check: F,
) -> Option<(&mut T, ResourceId)>
where
@@ -56,7 +74,7 @@ impl<T, U> Router<T, U> {
P: ResourcePath,
{
for item in self.0.iter_mut() {
if item.0.match_path(res.resource_path()) && check(res, &item.2) {
if item.0.match_path_checked(resource, &check, &item.2) {
return Some((&mut item.1, ResourceId(item.0.id())));
}
}
@@ -70,7 +88,11 @@ pub struct RouterBuilder<T, U = ()> {
impl<T, U> RouterBuilder<T, U> {
/// Register resource for specified path.
pub fn path(&mut self, path: &str, resource: T) -> &mut (ResourceDef, T, Option<U>) {
pub fn path<P: IntoPattern>(
&mut self,
path: P,
resource: T,
) -> &mut (ResourceDef, T, Option<U>) {
self.resources
.push((ResourceDef::new(path), resource, None));
self.resources.last_mut().unwrap()
@@ -100,6 +122,7 @@ mod tests {
use crate::path::Path;
use crate::router::{ResourceId, Router};
#[allow(clippy::cognitive_complexity)]
#[test]
fn test_recognizer_1() {
let mut router = Router::<usize>::build();

View File

@@ -1,5 +1,3 @@
use std::rc::Rc;
use crate::ResourcePath;
#[allow(dead_code)]
@@ -33,13 +31,13 @@ fn set_bit(array: &mut [u8], ch: u8) {
}
thread_local! {
static DEFAULT_QUOTER: Quoter = { Quoter::new(b"@:", b"/+") };
static DEFAULT_QUOTER: Quoter = Quoter::new(b"@:", b"/+");
}
#[derive(Default, Clone, Debug)]
pub struct Url {
uri: http::Uri,
path: Option<Rc<String>>,
path: Option<String>,
}
impl Url {
@@ -49,6 +47,13 @@ impl Url {
Url { uri, path }
}
pub fn with_quoter(uri: http::Uri, quoter: &Quoter) -> Url {
Url {
path: quoter.requote(uri.path().as_bytes()),
uri,
}
}
pub fn uri(&self) -> &http::Uri {
&self.uri
}
@@ -61,19 +66,27 @@ impl Url {
}
}
#[inline]
pub fn update(&mut self, uri: &http::Uri) {
self.uri = uri.clone();
self.path = DEFAULT_QUOTER.with(|q| q.requote(uri.path().as_bytes()));
}
#[inline]
pub fn update_with_quoter(&mut self, uri: &http::Uri, quoter: &Quoter) {
self.uri = uri.clone();
self.path = quoter.requote(uri.path().as_bytes());
}
}
impl ResourcePath for Url {
#[inline]
fn path(&self) -> &str {
self.path()
}
}
pub(crate) struct Quoter {
pub struct Quoter {
safe_table: [u8; 16],
protected_table: [u8; 16],
}
@@ -108,7 +121,7 @@ impl Quoter {
q
}
pub fn requote(&self, val: &[u8]) -> Option<Rc<String>> {
pub fn requote(&self, val: &[u8]) -> Option<String> {
let mut has_pct = 0;
let mut pct = [b'%', 0, 0];
let mut idx = 0;
@@ -160,7 +173,7 @@ impl Quoter {
if let Some(data) = cloned {
// Unsafe: we get data from http::Uri, which does utf-8 checks already
// this code only decodes valid pct encoded values
Some(Rc::new(unsafe { String::from_utf8_unchecked(data) }))
Some(unsafe { String::from_utf8_unchecked(data) })
} else {
None
}
@@ -169,11 +182,11 @@ impl Quoter {
#[inline]
fn from_hex(v: u8) -> Option<u8> {
if v >= b'0' && v <= b'9' {
if (b'0'..=b'9').contains(&v) {
Some(v - 0x30) // ord('0') == 0x30
} else if v >= b'A' && v <= b'F' {
} else if (b'A'..=b'F').contains(&v) {
Some(v - 0x41 + 10) // ord('A') == 0x41
} else if v > b'a' && v <= b'f' {
} else if (b'a'..=b'f').contains(&v) {
Some(v - 0x61 + 10) // ord('a') == 0x61
} else {
None
@@ -182,12 +195,13 @@ fn from_hex(v: u8) -> Option<u8> {
#[inline]
fn restore_ch(d1: u8, d2: u8) -> Option<u8> {
from_hex(d1).and_then(|d1| from_hex(d2).and_then(move |d2| Some(d1 << 4 | d2)))
from_hex(d1).and_then(|d1| from_hex(d2).map(move |d2| d1 << 4 | d2))
}
#[cfg(test)]
mod tests {
use http::{HttpTryFrom, Uri};
use http::Uri;
use std::convert::TryFrom;
use super::*;
use crate::{Path, ResourceDef};
@@ -211,4 +225,25 @@ mod tests {
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "qwe%rty");
}
#[test]
fn test_from_hex() {
let hex = b"0123456789abcdefABCDEF";
for i in 0..256 {
let c = i as u8;
if hex.contains(&c) {
assert!(from_hex(c).is_some())
} else {
assert!(from_hex(c).is_none())
}
}
let expected = [
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 10, 11, 12, 13, 14, 15,
];
for i in 0..hex.len() {
assert_eq!(from_hex(hex[i]).unwrap(), expected[i]);
}
}
}

View File

@@ -1,5 +1,166 @@
# Changes
## Unreleased - 2021-xx-xx
## 2.0.1 - 2021-02-06
* Expose `JoinError` from Tokio. [#271]
[#271]: https://github.com/actix/actix-net/pull/271
## 2.0.0 - 2021-02-02
* Remove all Arbiter-local storage methods. [#262]
* Re-export `tokio::pin`. [#262]
[#262]: https://github.com/actix/actix-net/pull/262
## 2.0.0-beta.3 - 2021-01-31
* Remove `run_in_tokio`, `attach_to_tokio` and `AsyncSystemRunner`. [#253]
* Return `JoinHandle` from `actix_rt::spawn`. [#253]
* Remove old `Arbiter::spawn`. Implementation is now inlined into `actix_rt::spawn`. [#253]
* Rename `Arbiter::{send => spawn}` and `Arbiter::{exec_fn => spawn_fn}`. [#253]
* Remove `Arbiter::exec`. [#253]
* Remove deprecated `Arbiter::local_join` and `Arbiter::is_running`. [#253]
* `Arbiter::spawn` now accepts !Unpin futures. [#256]
* `System::new` no longer takes arguments. [#257]
* Remove `System::with_current`. [#257]
* Remove `Builder`. [#257]
* Add `System::with_init` as replacement for `Builder::run`. [#257]
* Rename `System::{is_set => is_registered}`. [#257]
* Add `ArbiterHandle` for sending messages to non-current-thread arbiters. [#257].
* `System::arbiter` now returns an `&ArbiterHandle`. [#257]
* `Arbiter::current` now returns an `ArbiterHandle` instead. [#257]
* `Arbiter::join` now takes self by value. [#257]
[#253]: https://github.com/actix/actix-net/pull/253
[#254]: https://github.com/actix/actix-net/pull/254
[#256]: https://github.com/actix/actix-net/pull/256
[#257]: https://github.com/actix/actix-net/pull/257
## 2.0.0-beta.2 - 2021-01-09
* Add `task` mod with re-export of `tokio::task::{spawn_blocking, yield_now, JoinHandle}` [#245]
* Add default "macros" feature to allow faster compile times when using `default-features=false`.
[#245]: https://github.com/actix/actix-net/pull/245
## 2.0.0-beta.1 - 2020-12-28
### Added
* Add `System::attach_to_tokio` method. [#173]
### Changed
* Update `tokio` dependency to `1.0`. [#236]
* Rename `time` module `delay_for` to `sleep`, `delay_until` to `sleep_until`, `Delay` to `Sleep`
to stay aligned with Tokio's naming. [#236]
* Remove `'static` lifetime requirement for `Runtime::block_on` and `SystemRunner::block_on`.
* These methods now accept `&self` when calling. [#236]
* Remove `'static` lifetime requirement for `System::run` and `Builder::run`. [#236]
* `Arbiter::spawn` now panics when `System` is not in scope. [#207]
### Fixed
* Fix work load issue by removing `PENDING` thread local. [#207]
[#207]: https://github.com/actix/actix-net/pull/207
[#236]: https://github.com/actix/actix-net/pull/236
## [1.1.1] - 2020-04-30
### Fixed
* Fix memory leak due to [#94] (see [#129] for more detail)
[#129]: https://github.com/actix/actix-net/issues/129
## [1.1.0] - 2020-04-08
**This version has been yanked.**
### Added
* Expose `System::is_set` to check if current system has ben started [#99]
* Add `Arbiter::is_running` to check if event loop is running [#124]
* Add `Arbiter::local_join` associated function
to get be able to `await` for spawned futures [#94]
[#94]: https://github.com/actix/actix-net/pull/94
[#99]: https://github.com/actix/actix-net/pull/99
[#124]: https://github.com/actix/actix-net/pull/124
## [1.0.0] - 2019-12-11
* Update dependencies
## [1.0.0-alpha.3] - 2019-12-07
### Fixed
* Fix compilation on non-unix platforms
### Changed
* Migrate to tokio 0.2
## [1.0.0-alpha.2] - 2019-12-02
Added
* Export `main` and `test` attribute macros
* Export `time` module (re-export of tokio-timer)
* Export `net` module (re-export of tokio-net)
## [1.0.0-alpha.1] - 2019-11-22
### Changed
* Migrate to std::future and tokio 0.2
## [0.2.6] - 2019-11-14
### Fixed
* Fix arbiter's thread panic message.
### Added
* Allow to join arbiter's thread. #60
## [0.2.5] - 2019-09-02
### Added
* Add arbiter specific storage
## [0.2.4] - 2019-07-17
### Changed
* Avoid a copy of the Future when initializing the Box. #29
## [0.2.3] - 2019-06-22
### Added
* Allow to start System using exsiting CurrentThread Handle #22
## [0.2.2] - 2019-03-28
### Changed
* Moved `blocking` module to `actix-threadpool` crate
## [0.2.1] - 2019-03-11
### Added
@@ -10,12 +171,14 @@
* Arbiter::exec - execute fn on the arbiter's thread and wait result
## [0.2.0] - 2019-03-06
* `run` method returns `io::Result<()>`
* Removed `Handle`
## [0.1.0] - 2018-12-09
* Initial release

View File

@@ -1,32 +1,33 @@
[package]
name = "actix-rt"
version = "0.2.1"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix runtime"
keywords = ["network", "framework", "async", "futures"]
version = "2.0.1"
authors = [
"Nikolay Kim <fafhrd91@gmail.com>",
"Rob Ede <robjtede@icloud.com>",
]
description = "Tokio-based single-threaded async runtime for the Actix ecosystem"
keywords = ["async", "futures", "io", "runtime"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-rt/"
documentation = "https://docs.rs/actix-rt"
categories = ["network-programming", "asynchronous"]
license = "MIT/Apache-2.0"
exclude = [".gitignore", ".travis.yml", ".cargo/config", "appveyor.yml"]
license = "MIT OR Apache-2.0"
edition = "2018"
workspace = "../"
[lib]
name = "actix_rt"
path = "src/lib.rs"
[features]
default = ["macros"]
macros = ["actix-macros"]
[dependencies]
bytes = "0.4"
derive_more = "0.14"
futures = "0.1.25"
parking_lot = "0.7"
lazy_static = "1.2"
log = "0.4"
num_cpus = "1.10"
threadpool = "1.7"
tokio-current-thread = "0.1"
tokio-executor = "0.1.5"
tokio-reactor = "0.1.7"
tokio-timer = "0.2.8"
actix-macros = { version = "0.2.0", optional = true }
futures-core = { version = "0.3", default-features = false }
tokio = { version = "1.2", features = ["rt", "net", "parking_lot", "signal", "sync", "time"] }
[dev-dependencies]
tokio = { version = "1.2", features = ["full"] }
hyper = { version = "0.14", default-features = false, features = ["server", "tcp", "http1"] }

1
actix-rt/LICENSE-APACHE Symbolic link
View File

@@ -0,0 +1 @@
../LICENSE-APACHE

1
actix-rt/LICENSE-MIT Symbolic link
View File

@@ -0,0 +1 @@
../LICENSE-MIT

5
actix-rt/README.md Normal file
View File

@@ -0,0 +1,5 @@
# actix-rt
> Tokio-based single-threaded async runtime for the Actix ecosystem.
See crate documentation for more: https://docs.rs/actix-rt.

View File

@@ -0,0 +1,28 @@
use hyper::service::{make_service_fn, service_fn};
use hyper::{Body, Request, Response, Server};
use std::convert::Infallible;
use std::net::SocketAddr;
async fn handle(_req: Request<Body>) -> Result<Response<Body>, Infallible> {
Ok(Response::new(Body::from("Hello World")))
}
fn main() {
actix_rt::System::with_tokio_rt(|| {
tokio::runtime::Builder::new_multi_thread()
.enable_all()
.build()
.unwrap()
})
.block_on(async {
let make_service =
make_service_fn(|_conn| async { Ok::<_, Infallible>(service_fn(handle)) });
let server =
Server::bind(&SocketAddr::from(([127, 0, 0, 1], 3000))).serve(make_service);
if let Err(e) = server.await {
eprintln!("server error: {}", e);
}
})
}

View File

@@ -1,306 +1,255 @@
use std::cell::{Cell, RefCell};
use std::collections::HashMap;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::{fmt, thread};
use std::{
cell::RefCell,
fmt,
future::Future,
pin::Pin,
sync::atomic::{AtomicUsize, Ordering},
task::{Context, Poll},
thread,
};
use futures::sync::mpsc::{unbounded, UnboundedReceiver, UnboundedSender};
use futures::sync::oneshot::{channel, Canceled, Sender};
use futures::{future, Async, Future, IntoFuture, Poll, Stream};
use tokio_current_thread::spawn;
use futures_core::ready;
use tokio::{sync::mpsc, task::LocalSet};
use crate::builder::Builder;
use crate::system::System;
thread_local!(
static ADDR: RefCell<Option<Arbiter>> = RefCell::new(None);
static RUNNING: Cell<bool> = Cell::new(false);
static Q: RefCell<Vec<Box<Future<Item = (), Error = ()>>>> = RefCell::new(Vec::new());
);
use crate::{
runtime::{default_tokio_runtime, Runtime},
system::{System, SystemCommand},
};
pub(crate) static COUNT: AtomicUsize = AtomicUsize::new(0);
thread_local!(
static HANDLE: RefCell<Option<ArbiterHandle>> = RefCell::new(None);
);
pub(crate) enum ArbiterCommand {
Stop,
Execute(Box<Future<Item = (), Error = ()> + Send>),
ExecuteFn(Box<FnExec>),
Execute(Pin<Box<dyn Future<Output = ()> + Send>>),
}
impl fmt::Debug for ArbiterCommand {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
ArbiterCommand::Stop => write!(f, "ArbiterCommand::Stop"),
ArbiterCommand::Execute(_) => write!(f, "ArbiterCommand::Execute"),
ArbiterCommand::ExecuteFn(_) => write!(f, "ArbiterCommand::ExecuteFn"),
}
}
}
/// A handle for sending spawn and stop messages to an [Arbiter].
#[derive(Debug, Clone)]
pub struct Arbiter(UnboundedSender<ArbiterCommand>);
impl Default for Arbiter {
fn default() -> Self {
Self::new()
}
pub struct ArbiterHandle {
tx: mpsc::UnboundedSender<ArbiterCommand>,
}
impl Arbiter {
pub(crate) fn new_system() -> Self {
let (tx, rx) = unbounded();
let arb = Arbiter(tx);
ADDR.with(|cell| *cell.borrow_mut() = Some(arb.clone()));
RUNNING.with(|cell| cell.set(false));
Arbiter::spawn(ArbiterController { stop: None, rx });
arb
impl ArbiterHandle {
pub(crate) fn new(tx: mpsc::UnboundedSender<ArbiterCommand>) -> Self {
Self { tx }
}
/// Returns current arbiter's address
pub fn current() -> Arbiter {
ADDR.with(|cell| match *cell.borrow() {
Some(ref addr) => addr.clone(),
None => panic!("Arbiter is not running"),
})
}
/// Stop arbiter
pub fn stop(&self) {
let _ = self.0.unbounded_send(ArbiterCommand::Stop);
}
/// Spawn new thread and run event loop in spawned thread.
/// Returns address of newly created arbiter.
pub fn new() -> Arbiter {
let id = COUNT.fetch_add(1, Ordering::Relaxed);
let name = format!("actix-rt:worker:{}", id);
let sys = System::current();
let (arb_tx, arb_rx) = unbounded();
let arb_tx2 = arb_tx.clone();
let _ = thread::Builder::new().name(name.clone()).spawn(move || {
let mut rt = Builder::new().build_rt().expect("Can not create Runtime");
let arb = Arbiter(arb_tx);
let (stop, stop_rx) = channel();
RUNNING.with(|cell| cell.set(true));
System::set_current(sys);
// start arbiter controller
rt.spawn(ArbiterController {
stop: Some(stop),
rx: arb_rx,
});
ADDR.with(|cell| *cell.borrow_mut() = Some(arb.clone()));
// register arbiter
let _ = System::current()
.sys()
.unbounded_send(SystemCommand::RegisterArbiter(id, arb.clone()));
// run loop
let _ = match rt.block_on(stop_rx) {
Ok(code) => code,
Err(_) => 1,
};
// unregister arbiter
let _ = System::current()
.sys()
.unbounded_send(SystemCommand::UnregisterArbiter(id));
});
Arbiter(arb_tx2)
}
pub(crate) fn run_system() {
RUNNING.with(|cell| cell.set(true));
Q.with(|cell| {
let mut v = cell.borrow_mut();
for fut in v.drain(..) {
spawn(fut);
}
});
}
pub(crate) fn stop_system() {
RUNNING.with(|cell| cell.set(false));
}
/// Spawn a future on the current thread.
pub fn spawn<F>(future: F)
/// Send a future to the [Arbiter]'s thread and spawn it.
///
/// If you require a result, include a response channel in the future.
///
/// Returns true if future was sent successfully and false if the [Arbiter] has died.
pub fn spawn<Fut>(&self, future: Fut) -> bool
where
F: Future<Item = (), Error = ()> + 'static,
Fut: Future<Output = ()> + Send + 'static,
{
RUNNING.with(move |cell| {
if cell.get() {
spawn(Box::new(future));
} else {
Q.with(move |cell| cell.borrow_mut().push(Box::new(future)));
}
});
self.tx
.send(ArbiterCommand::Execute(Box::pin(future)))
.is_ok()
}
/// Executes a future on the current thread.
pub fn spawn_fn<F, R>(f: F)
where
F: FnOnce() -> R + 'static,
R: IntoFuture<Item = (), Error = ()> + 'static,
{
Arbiter::spawn(future::lazy(f))
}
/// Send a future on the arbiter's thread and spawn.
pub fn send<F>(&self, future: F)
where
F: Future<Item = (), Error = ()> + Send + 'static,
{
let _ = self
.0
.unbounded_send(ArbiterCommand::Execute(Box::new(future)));
}
/// Send a function to the arbiter's thread and exeute.
pub fn exec_fn<F>(&self, f: F)
/// Send a function to the [Arbiter]'s thread and execute it.
///
/// Any result from the function is discarded. If you require a result, include a response
/// channel in the function.
///
/// Returns true if function was sent successfully and false if the [Arbiter] has died.
pub fn spawn_fn<F>(&self, f: F) -> bool
where
F: FnOnce() + Send + 'static,
{
let _ = self
.0
.unbounded_send(ArbiterCommand::ExecuteFn(Box::new(move || {
let _ = f();
})));
self.spawn(async { f() })
}
/// Send a function to the arbiter's thread, exeute and return result.
pub fn exec<F, R>(&self, f: F) -> impl Future<Item = R, Error = Canceled>
/// Instruct [Arbiter] to stop processing it's event loop.
///
/// Returns true if stop message was sent successfully and false if the [Arbiter] has
/// been dropped.
pub fn stop(&self) -> bool {
self.tx.send(ArbiterCommand::Stop).is_ok()
}
}
/// An Arbiter represents a thread that provides an asynchronous execution environment for futures
/// and functions.
///
/// When an arbiter is created, it spawns a new [OS thread](thread), and hosts an event loop.
#[derive(Debug)]
pub struct Arbiter {
tx: mpsc::UnboundedSender<ArbiterCommand>,
thread_handle: thread::JoinHandle<()>,
}
impl Arbiter {
/// Spawn a new Arbiter thread and start its event loop.
///
/// # Panics
/// Panics if a [System] is not registered on the current thread.
#[allow(clippy::new_without_default)]
pub fn new() -> Arbiter {
Self::with_tokio_rt(|| {
default_tokio_runtime().expect("Cannot create new Arbiter's Runtime.")
})
}
/// Spawn a new Arbiter using the [Tokio Runtime](tokio-runtime) returned from a closure.
///
/// [tokio-runtime]: tokio::runtime::Runtime
#[doc(hidden)]
pub fn with_tokio_rt<F>(runtime_factory: F) -> Arbiter
where
F: FnOnce() -> R + Send + 'static,
R: Send + 'static,
F: Fn() -> tokio::runtime::Runtime + Send + 'static,
{
let (tx, rx) = channel();
let _ = self
.0
.unbounded_send(ArbiterCommand::ExecuteFn(Box::new(move || {
if !tx.is_canceled() {
let _ = tx.send(f());
let sys = System::current();
let system_id = sys.id();
let arb_id = COUNT.fetch_add(1, Ordering::Relaxed);
let name = format!("actix-rt|system:{}|arbiter:{}", system_id, arb_id);
let (tx, rx) = mpsc::unbounded_channel();
let (ready_tx, ready_rx) = std::sync::mpsc::channel::<()>();
let thread_handle = thread::Builder::new()
.name(name.clone())
.spawn({
let tx = tx.clone();
move || {
let rt = Runtime::from(runtime_factory());
let hnd = ArbiterHandle::new(tx);
System::set_current(sys);
HANDLE.with(|cell| *cell.borrow_mut() = Some(hnd.clone()));
// register arbiter
let _ = System::current()
.tx()
.send(SystemCommand::RegisterArbiter(arb_id, hnd));
ready_tx.send(()).unwrap();
// run arbiter event processing loop
rt.block_on(ArbiterRunner { rx });
// deregister arbiter
let _ = System::current()
.tx()
.send(SystemCommand::DeregisterArbiter(arb_id));
}
})));
rx
})
.unwrap_or_else(|err| {
panic!("Cannot spawn Arbiter's thread: {:?}. {:?}", &name, err)
});
ready_rx.recv().unwrap();
Arbiter { tx, thread_handle }
}
/// Sets up an Arbiter runner in a new System using the provided runtime local task set.
pub(crate) fn in_new_system(local: &LocalSet) -> ArbiterHandle {
let (tx, rx) = mpsc::unbounded_channel();
let hnd = ArbiterHandle::new(tx);
HANDLE.with(|cell| *cell.borrow_mut() = Some(hnd.clone()));
local.spawn_local(ArbiterRunner { rx });
hnd
}
/// Return a handle to the current thread's Arbiter's message sender.
///
/// # Panics
/// Panics if no Arbiter is running on the current thread.
pub fn current() -> ArbiterHandle {
HANDLE.with(|cell| match *cell.borrow() {
Some(ref addr) => addr.clone(),
None => panic!("Arbiter is not running."),
})
}
/// Stop Arbiter from continuing it's event loop.
///
/// Returns true if stop message was sent successfully and false if the Arbiter has been dropped.
pub fn stop(&self) -> bool {
self.tx.send(ArbiterCommand::Stop).is_ok()
}
/// Send a future to the Arbiter's thread and spawn it.
///
/// If you require a result, include a response channel in the future.
///
/// Returns true if future was sent successfully and false if the Arbiter has died.
pub fn spawn<Fut>(&self, future: Fut) -> bool
where
Fut: Future<Output = ()> + Send + 'static,
{
self.tx
.send(ArbiterCommand::Execute(Box::pin(future)))
.is_ok()
}
/// Send a function to the Arbiter's thread and execute it.
///
/// Any result from the function is discarded. If you require a result, include a response
/// channel in the function.
///
/// Returns true if function was sent successfully and false if the Arbiter has died.
pub fn spawn_fn<F>(&self, f: F) -> bool
where
F: FnOnce() + Send + 'static,
{
self.spawn(async { f() })
}
/// Wait for Arbiter's event loop to complete.
///
/// Joins the underlying OS thread handle. See [`JoinHandle::join`](thread::JoinHandle::join).
pub fn join(self) -> thread::Result<()> {
self.thread_handle.join()
}
}
struct ArbiterController {
stop: Option<Sender<i32>>,
rx: UnboundedReceiver<ArbiterCommand>,
/// A persistent future that processes [Arbiter] commands.
struct ArbiterRunner {
rx: mpsc::UnboundedReceiver<ArbiterCommand>,
}
impl Drop for ArbiterController {
fn drop(&mut self) {
if thread::panicking() {
eprintln!("Panic in Arbiter thread, shutting down system.");
if System::current().stop_on_panic() {
System::current().stop_with_code(1)
}
}
}
}
impl Future for ArbiterRunner {
type Output = ();
impl Future for ArbiterController {
type Item = ();
type Error = ();
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
// process all items currently buffered in channel
loop {
match self.rx.poll() {
Ok(Async::Ready(None)) | Err(_) => return Ok(Async::Ready(())),
Ok(Async::Ready(Some(item))) => match item {
match ready!(Pin::new(&mut self.rx).poll_recv(cx)) {
// channel closed; no more messages can be received
None => return Poll::Ready(()),
// process arbiter command
Some(item) => match item {
ArbiterCommand::Stop => {
if let Some(stop) = self.stop.take() {
let _ = stop.send(0);
};
return Ok(Async::Ready(()));
return Poll::Ready(());
}
ArbiterCommand::Execute(fut) => {
spawn(fut);
}
ArbiterCommand::ExecuteFn(f) => {
f.call_box();
ArbiterCommand::Execute(task_fut) => {
tokio::task::spawn_local(task_fut);
}
},
Ok(Async::NotReady) => return Ok(Async::NotReady),
}
}
}
}
#[derive(Debug)]
pub(crate) enum SystemCommand {
Exit(i32),
RegisterArbiter(usize, Arbiter),
UnregisterArbiter(usize),
}
#[derive(Debug)]
pub(crate) struct SystemArbiter {
stop: Option<Sender<i32>>,
commands: UnboundedReceiver<SystemCommand>,
arbiters: HashMap<usize, Arbiter>,
}
impl SystemArbiter {
pub(crate) fn new(stop: Sender<i32>, commands: UnboundedReceiver<SystemCommand>) -> Self {
SystemArbiter {
commands,
stop: Some(stop),
arbiters: HashMap::new(),
}
}
}
impl Future for SystemArbiter {
type Item = ();
type Error = ();
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
loop {
match self.commands.poll() {
Ok(Async::Ready(None)) | Err(_) => return Ok(Async::Ready(())),
Ok(Async::Ready(Some(cmd))) => match cmd {
SystemCommand::Exit(code) => {
// stop arbiters
for arb in self.arbiters.values() {
arb.stop();
}
// stop event loop
if let Some(stop) = self.stop.take() {
let _ = stop.send(code);
}
}
SystemCommand::RegisterArbiter(name, hnd) => {
self.arbiters.insert(name, hnd);
}
SystemCommand::UnregisterArbiter(name) => {
self.arbiters.remove(&name);
}
},
Ok(Async::NotReady) => return Ok(Async::NotReady),
}
}
}
}
pub trait FnExec: Send + 'static {
fn call_box(self: Box<Self>);
}
impl<F> FnExec for F
where
F: FnOnce() + Send + 'static,
{
#[cfg_attr(feature = "cargo-clippy", allow(boxed_local))]
fn call_box(self: Box<Self>) {
(*self)()
}
}

View File

@@ -1,88 +0,0 @@
//! Thread pool for blocking operations
use std::fmt;
use derive_more::Display;
use futures::sync::oneshot;
use futures::{Async, Future, Poll};
use parking_lot::Mutex;
use threadpool::ThreadPool;
/// Env variable for default cpu pool size
const ENV_CPU_POOL_VAR: &str = "ACTIX_CPU_POOL";
lazy_static::lazy_static! {
pub(crate) static ref DEFAULT_POOL: Mutex<ThreadPool> = {
let default = match std::env::var(ENV_CPU_POOL_VAR) {
Ok(val) => {
if let Ok(val) = val.parse() {
val
} else {
log::error!("Can not parse ACTIX_CPU_POOL value");
num_cpus::get() * 5
}
}
Err(_) => num_cpus::get() * 5,
};
Mutex::new(
threadpool::Builder::new()
.thread_name("actix-web".to_owned())
.num_threads(default)
.build(),
)
};
}
thread_local! {
static POOL: ThreadPool = {
DEFAULT_POOL.lock().clone()
};
}
/// Blocking operation execution error
#[derive(Debug, Display)]
pub enum BlockingError<E: fmt::Debug> {
#[display(fmt = "{:?}", _0)]
Error(E),
#[display(fmt = "Thread pool is gone")]
Canceled,
}
/// Execute blocking function on a thread pool, returns future that resolves
/// to result of the function execution.
pub fn run<F, I, E>(f: F) -> CpuFuture<I, E>
where
F: FnOnce() -> Result<I, E> + Send + 'static,
I: Send + 'static,
E: Send + fmt::Debug + 'static,
{
let (tx, rx) = oneshot::channel();
POOL.with(|pool| {
pool.execute(move || {
if !tx.is_canceled() {
let _ = tx.send(f());
}
})
});
CpuFuture { rx }
}
/// Blocking operation completion future. It resolves with results
/// of blocking function execution.
pub struct CpuFuture<I, E> {
rx: oneshot::Receiver<Result<I, E>>,
}
impl<I, E: fmt::Debug> Future for CpuFuture<I, E> {
type Item = I;
type Error = BlockingError<E>;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
let res = futures::try_ready!(self.rx.poll().map_err(|_| BlockingError::Canceled));
match res {
Ok(val) => Ok(Async::Ready(val)),
Err(err) => Err(BlockingError::Error(err)),
}
}
}

View File

@@ -1,184 +0,0 @@
use std::borrow::Cow;
use std::io;
use futures::future::{lazy, Future};
use futures::sync::mpsc::unbounded;
use futures::sync::oneshot::{channel, Receiver};
use tokio_current_thread::CurrentThread;
use tokio_reactor::Reactor;
use tokio_timer::clock::Clock;
use tokio_timer::timer::Timer;
use crate::arbiter::{Arbiter, SystemArbiter};
use crate::runtime::Runtime;
use crate::system::System;
/// Builder struct for a actix runtime.
///
/// Either use `Builder::build` to create a system and start actors.
/// Alternatively, use `Builder::run` to start the tokio runtime and
/// run a function in its context.
pub struct Builder {
/// Name of the System. Defaults to "actix" if unset.
name: Cow<'static, str>,
/// The clock to use
clock: Clock,
/// Whether the Arbiter will stop the whole System on uncaught panic. Defaults to false.
stop_on_panic: bool,
}
impl Builder {
pub(crate) fn new() -> Self {
Builder {
name: Cow::Borrowed("actix"),
clock: Clock::new(),
stop_on_panic: false,
}
}
/// Sets the name of the System.
pub fn name<T: Into<String>>(mut self, name: T) -> Self {
self.name = Cow::Owned(name.into());
self
}
/// Set the Clock instance that will be used by this System.
///
/// Defaults to the system clock.
pub fn clock(mut self, clock: Clock) -> Self {
self.clock = clock;
self
}
/// Sets the option 'stop_on_panic' which controls whether the System is stopped when an
/// uncaught panic is thrown from a worker thread.
///
/// Defaults to false.
pub fn stop_on_panic(mut self, stop_on_panic: bool) -> Self {
self.stop_on_panic = stop_on_panic;
self
}
/// Create new System.
///
/// This method panics if it can not create tokio runtime
pub fn build(self) -> SystemRunner {
self.create_runtime(|| {})
}
/// This function will start tokio runtime and will finish once the
/// `System::stop()` message get called.
/// Function `f` get called within tokio runtime context.
pub fn run<F>(self, f: F) -> io::Result<()>
where
F: FnOnce() + 'static,
{
self.create_runtime(f).run()
}
fn create_runtime<F>(self, f: F) -> SystemRunner
where
F: FnOnce() + 'static,
{
let (stop_tx, stop) = channel();
let (sys_sender, sys_receiver) = unbounded();
let arbiter = Arbiter::new_system();
let system = System::construct(sys_sender, arbiter.clone(), self.stop_on_panic);
// system arbiter
let arb = SystemArbiter::new(stop_tx, sys_receiver);
let mut rt = self.build_rt().unwrap();
rt.spawn(arb);
// init system arbiter and run configuration method
let _ = rt.block_on(lazy(move || {
f();
Ok::<_, ()>(())
}));
SystemRunner { rt, stop, system }
}
pub(crate) fn build_rt(&self) -> io::Result<Runtime> {
// We need a reactor to receive events about IO objects from kernel
let reactor = Reactor::new()?;
let reactor_handle = reactor.handle();
// Place a timer wheel on top of the reactor. If there are no timeouts to fire, it'll let the
// reactor pick up some new external events.
let timer = Timer::new_with_now(reactor, self.clock.clone());
let timer_handle = timer.handle();
// And now put a single-threaded executor on top of the timer. When there are no futures ready
// to do something, it'll let the timer or the reactor to generate some new stimuli for the
// futures to continue in their life.
let executor = CurrentThread::new_with_park(timer);
Ok(Runtime::new2(
reactor_handle,
timer_handle,
self.clock.clone(),
executor,
))
}
}
/// Helper object that runs System's event loop
#[must_use = "SystemRunner must be run"]
#[derive(Debug)]
pub struct SystemRunner {
rt: Runtime,
stop: Receiver<i32>,
system: System,
}
impl SystemRunner {
/// This function will start event loop and will finish once the
/// `System::stop()` function is called.
pub fn run(self) -> io::Result<()> {
let SystemRunner { mut rt, stop, .. } = self;
// run loop
let _ = rt.block_on(lazy(move || {
Arbiter::run_system();
Ok::<_, ()>(())
}));
let result = match rt.block_on(stop) {
Ok(code) => {
if code != 0 {
Err(io::Error::new(
io::ErrorKind::Other,
format!("Non-zero exit code: {}", code),
))
} else {
Ok(())
}
}
Err(e) => Err(io::Error::new(io::ErrorKind::Other, e)),
};
Arbiter::stop_system();
result
}
/// Execute a future and wait for result.
pub fn block_on<F, I, E>(&mut self, fut: F) -> Result<I, E>
where
F: Future<Item = I, Error = E>,
{
let _ = self.rt.block_on(lazy(move || {
Arbiter::run_system();
Ok::<_, ()>(())
}));
let res = self.rt.block_on(fut);
let _ = self.rt.block_on(lazy(move || {
Arbiter::stop_system();
Ok::<_, ()>(())
}));
res
}
}

View File

@@ -1,28 +1,107 @@
//! A runtime implementation that runs everything on the current thread.
//! Tokio-based single-threaded async runtime for the Actix ecosystem.
//!
//! In most parts of the the Actix ecosystem, it has been chosen to use !Send futures. For this
//! reason, a single-threaded runtime is appropriate since it is guaranteed that futures will not
//! be moved between threads. This can result in small performance improvements over cases where
//! atomics would otherwise be needed.
//!
//! To achieve similar performance to multi-threaded, work-stealing runtimes, applications
//! using `actix-rt` will create multiple, mostly disconnected, single-threaded runtimes.
//! This approach has good performance characteristics for workloads where the majority of tasks
//! have similar runtime expense.
//!
//! The disadvantage is that idle threads will not steal work from very busy, stuck or otherwise
//! backlogged threads. Tasks that are disproportionately expensive should be offloaded to the
//! blocking task thread-pool using [`task::spawn_blocking`].
//!
//! # Examples
//! ```
//! use std::sync::mpsc;
//! use actix_rt::{Arbiter, System};
//!
//! let _ = System::new();
//!
//! let (tx, rx) = mpsc::channel::<u32>();
//!
//! let arbiter = Arbiter::new();
//! arbiter.spawn_fn(move || tx.send(42).unwrap());
//!
//! let num = rx.recv().unwrap();
//! assert_eq!(num, 42);
//!
//! arbiter.stop();
//! arbiter.join().unwrap();
//! ```
#![deny(rust_2018_idioms, nonstandard_style)]
#![allow(clippy::type_complexity)]
#![warn(missing_docs)]
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
use std::future::Future;
use tokio::task::JoinHandle;
// Cannot define a main macro when compiled into test harness.
// Workaround for https://github.com/rust-lang/rust/issues/62127.
#[cfg(all(feature = "macros", not(test)))]
pub use actix_macros::{main, test};
mod arbiter;
pub mod blocking;
mod builder;
mod runtime;
mod system;
pub use self::arbiter::Arbiter;
pub use self::builder::{Builder, SystemRunner};
pub use self::arbiter::{Arbiter, ArbiterHandle};
pub use self::runtime::Runtime;
pub use self::system::System;
pub use self::system::{System, SystemRunner};
/// Spawns a future on the current arbiter.
pub use tokio::pin;
pub mod signal {
//! Asynchronous signal handling (Tokio re-exports).
#[cfg(unix)]
pub mod unix {
//! Unix specific signals (Tokio re-exports).
pub use tokio::signal::unix::*;
}
pub use tokio::signal::ctrl_c;
}
pub mod net {
//! TCP/UDP/Unix bindings (Tokio re-exports).
pub use tokio::net::UdpSocket;
pub use tokio::net::{TcpListener, TcpStream};
#[cfg(unix)]
pub use tokio::net::{UnixDatagram, UnixListener, UnixStream};
}
pub mod time {
//! Utilities for tracking time (Tokio re-exports).
pub use tokio::time::Instant;
pub use tokio::time::{interval, interval_at, Interval};
pub use tokio::time::{sleep, sleep_until, Sleep};
pub use tokio::time::{timeout, Timeout};
}
pub mod task {
//! Task management (Tokio re-exports).
pub use tokio::task::{spawn_blocking, yield_now, JoinError, JoinHandle};
}
/// Spawns a future on the current thread.
///
/// # Panics
///
/// This function panics if actix system is not running.
pub fn spawn<F>(f: F)
/// Panics if Actix system is not running.
#[inline]
pub fn spawn<Fut>(f: Fut) -> JoinHandle<()>
where
F: futures::Future<Item = (), Error = ()> + 'static,
Fut: Future<Output = ()> + 'static,
{
if !System::is_set() {
panic!("System is not running");
}
Arbiter::spawn(f);
tokio::task::spawn_local(f)
}

View File

@@ -1,92 +0,0 @@
//! A runtime implementation that runs everything on the current thread.
//!
//! [`current_thread::Runtime`][rt] is similar to the primary
//! [`Runtime`][concurrent-rt] except that it runs all components on the current
//! thread instead of using a thread pool. This means that it is able to spawn
//! futures that do not implement `Send`.
//!
//! Same as the default [`Runtime`][concurrent-rt], the
//! [`current_thread::Runtime`][rt] includes:
//!
//! * A [reactor] to drive I/O resources.
//! * An [executor] to execute tasks that use these I/O resources.
//! * A [timer] for scheduling work to run after a set period of time.
//!
//! Note that [`current_thread::Runtime`][rt] does not implement `Send` itself
//! and cannot be safely moved to other threads.
//!
//! # Spawning from other threads
//!
//! While [`current_thread::Runtime`][rt] does not implement `Send` and cannot
//! safely be moved to other threads, it provides a `Handle` that can be sent
//! to other threads and allows to spawn new tasks from there.
//!
//! For example:
//!
//! ```
//! # extern crate tokio;
//! # extern crate futures;
//! use tokio::runtime::current_thread::Runtime;
//! use tokio::prelude::*;
//! use std::thread;
//!
//! # fn main() {
//! let mut runtime = Runtime::new().unwrap();
//! let handle = runtime.handle();
//!
//! thread::spawn(move || {
//! handle.spawn(future::ok(()));
//! }).join().unwrap();
//!
//! # /*
//! runtime.run().unwrap();
//! # */
//! # }
//! ```
//!
//! # Examples
//!
//! Creating a new `Runtime` and running a future `f` until its completion and
//! returning its result.
//!
//! ```
//! use tokio::runtime::current_thread::Runtime;
//! use tokio::prelude::*;
//!
//! let mut runtime = Runtime::new().unwrap();
//!
//! // Use the runtime...
//! // runtime.block_on(f); // where f is a future
//! ```
//!
//! [rt]: struct.Runtime.html
//! [concurrent-rt]: ../struct.Runtime.html
//! [chan]: https://docs.rs/futures/0.1/futures/sync/mpsc/fn.channel.html
//! [reactor]: ../../reactor/struct.Reactor.html
//! [executor]: https://tokio.rs/docs/getting-started/runtime-model/#executors
//! [timer]: ../../timer/index.html
mod builder;
mod runtime;
pub use self::builder::Builder;
pub use self::runtime::{Runtime, Handle};
pub use tokio_current_thread::spawn;
pub use tokio_current_thread::TaskExecutor;
use futures::Future;
/// Run the provided future to completion using a runtime running on the current thread.
///
/// This first creates a new [`Runtime`], and calls [`Runtime::block_on`] with the provided future,
/// which blocks the current thread until the provided future completes. It then calls
/// [`Runtime::run`] to wait for any other spawned futures to resolve.
pub fn block_on_all<F>(future: F) -> Result<F::Item, F::Error>
where
F: Future,
{
let mut r = Runtime::new().expect("failed to start runtime on current thread");
let v = r.block_on(future)?;
r.run().expect("failed to resolve remaining futures");
Ok(v)
}

View File

@@ -1,174 +1,96 @@
use std::error::Error;
use std::{fmt, io};
use std::{future::Future, io};
use futures::Future;
use tokio_current_thread::{self as current_thread, CurrentThread};
use tokio_executor;
use tokio_reactor::{self, Reactor};
use tokio_timer::clock::{self, Clock};
use tokio_timer::timer::{self, Timer};
use tokio::task::{JoinHandle, LocalSet};
use crate::builder::Builder;
/// Single-threaded runtime provides a way to start reactor
/// and executor on the current thread.
/// A Tokio-based runtime proxy.
///
/// See [module level][mod] documentation for more details.
///
/// [mod]: index.html
/// All spawned futures will be executed on the current thread. Therefore, there is no `Send` bound
/// on submitted futures.
#[derive(Debug)]
pub struct Runtime {
reactor_handle: tokio_reactor::Handle,
timer_handle: timer::Handle,
clock: Clock,
executor: CurrentThread<Timer<Reactor>>,
local: LocalSet,
rt: tokio::runtime::Runtime,
}
/// Error returned by the `run` function.
#[derive(Debug)]
pub struct RunError {
inner: current_thread::RunError,
}
impl fmt::Display for RunError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "{}", self.inner)
}
}
impl Error for RunError {
fn description(&self) -> &str {
self.inner.description()
}
fn cause(&self) -> Option<&Error> {
self.inner.cause()
}
pub(crate) fn default_tokio_runtime() -> io::Result<tokio::runtime::Runtime> {
tokio::runtime::Builder::new_current_thread()
.enable_io()
.enable_time()
.build()
}
impl Runtime {
#[allow(clippy::new_ret_no_self)]
/// Returns a new runtime initialized with default configuration values.
pub fn new() -> io::Result<Runtime> {
Builder::new().build_rt()
#[allow(clippy::new_ret_no_self)]
pub fn new() -> io::Result<Self> {
let rt = default_tokio_runtime()?;
Ok(Runtime {
rt,
local: LocalSet::new(),
})
}
pub(super) fn new2(
reactor_handle: tokio_reactor::Handle,
timer_handle: timer::Handle,
clock: Clock,
executor: CurrentThread<Timer<Reactor>>,
) -> Runtime {
Runtime {
reactor_handle,
timer_handle,
clock,
executor,
}
/// Reference to local task set.
pub(crate) fn local_set(&self) -> &LocalSet {
&self.local
}
/// Spawn a future onto the single-threaded Tokio runtime.
/// Offload a future onto the single-threaded runtime.
///
/// See [module level][mod] documentation for more details.
/// The returned join handle can be used to await the future's result.
///
/// [mod]: index.html
/// See [crate root][crate] documentation for more details.
///
/// # Examples
///
/// ```rust
/// # use futures::{future, Future, Stream};
/// use actix_rt::Runtime;
///
/// # fn dox() {
/// // Create the runtime
/// let mut rt = Runtime::new().unwrap();
/// ```
/// let rt = actix_rt::Runtime::new().unwrap();
///
/// // Spawn a future onto the runtime
/// rt.spawn(future::lazy(|| {
/// let handle = rt.spawn(async {
/// println!("running on the runtime");
/// Ok(())
/// }));
/// # }
/// # pub fn main() {}
/// 42
/// });
///
/// assert_eq!(rt.block_on(handle).unwrap(), 42);
/// ```
///
/// # Panics
///
/// This function panics if the spawn fails. Failure occurs if the executor
/// is currently at capacity and is unable to spawn a new future.
pub fn spawn<F>(&mut self, future: F) -> &mut Self
/// This function panics if the spawn fails. Failure occurs if the executor is currently at
/// capacity and is unable to spawn a new future.
pub fn spawn<F>(&self, future: F) -> JoinHandle<F::Output>
where
F: Future<Item = (), Error = ()> + 'static,
F: Future + 'static,
{
self.executor.spawn(future);
self
self.local.spawn_local(future)
}
/// Runs the provided future, blocking the current thread until the future
/// completes.
/// Runs the provided future, blocking the current thread until the future completes.
///
/// This function can be used to synchronously block the current thread
/// until the provided `future` has resolved either successfully or with an
/// error. The result of the future is then returned from this function
/// call.
/// This function can be used to synchronously block the current thread until the provided
/// `future` has resolved either successfully or with an error. The result of the future is
/// then returned from this function call.
///
/// Note that this function will **also** execute any spawned futures on the
/// current thread, but will **not** block until these other spawned futures
/// have completed. Once the function returns, any uncompleted futures
/// remain pending in the `Runtime` instance. These futures will not run
/// Note that this function will also execute any spawned futures on the current thread, but
/// will not block until these other spawned futures have completed. Once the function returns,
/// any uncompleted futures remain pending in the `Runtime` instance. These futures will not run
/// until `block_on` or `run` is called again.
///
/// The caller is responsible for ensuring that other spawned futures
/// complete execution by calling `block_on` or `run`.
pub fn block_on<F>(&mut self, f: F) -> Result<F::Item, F::Error>
/// The caller is responsible for ensuring that other spawned futures complete execution by
/// calling `block_on` or `run`.
pub fn block_on<F>(&self, f: F) -> F::Output
where
F: Future,
{
self.enter(|executor| {
// Run the provided future
let ret = executor.block_on(f);
ret.map_err(|e| e.into_inner().expect("unexpected execution error"))
})
}
/// Run the executor to completion, blocking the thread until **all**
/// spawned futures have completed.
pub fn run(&mut self) -> Result<(), RunError> {
self.enter(|executor| executor.run())
.map_err(|e| RunError { inner: e })
}
fn enter<F, R>(&mut self, f: F) -> R
where
F: FnOnce(&mut current_thread::Entered<Timer<Reactor>>) -> R,
{
let Runtime {
ref reactor_handle,
ref timer_handle,
ref clock,
ref mut executor,
..
} = *self;
// Binds an executor to this thread
let mut enter = tokio_executor::enter().expect("Multiple executors at once");
// This will set the default handle and timer to use inside the closure
// and run the future.
tokio_reactor::with_default(&reactor_handle, &mut enter, |enter| {
clock::with_default(clock, enter, |enter| {
timer::with_default(&timer_handle, enter, |enter| {
// The TaskExecutor is a fake executor that looks into the
// current single-threaded executor when used. This is a trick,
// because we need two mutable references to the executor (one
// to run the provided future, another to install as the default
// one). We use the fake one here as the default one.
let mut default_executor = current_thread::TaskExecutor::current();
tokio_executor::with_default(&mut default_executor, enter, |enter| {
let mut executor = executor.enter(enter);
f(&mut executor)
})
})
})
})
self.local.block_on(&self.rt, f)
}
}
impl From<tokio::runtime::Runtime> for Runtime {
fn from(rt: tokio::runtime::Runtime) -> Self {
Self {
local: LocalSet::new(),
rt,
}
}
}

View File

@@ -1,61 +1,98 @@
use std::cell::RefCell;
use std::io;
use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
use std::{
cell::RefCell,
collections::HashMap,
future::Future,
io,
pin::Pin,
sync::atomic::{AtomicUsize, Ordering},
task::{Context, Poll},
};
use futures::sync::mpsc::UnboundedSender;
use futures_core::ready;
use tokio::sync::{mpsc, oneshot};
use crate::arbiter::{Arbiter, SystemCommand};
use crate::builder::{Builder, SystemRunner};
use crate::{arbiter::ArbiterHandle, runtime::default_tokio_runtime, Arbiter, Runtime};
static SYSTEM_COUNT: AtomicUsize = ATOMIC_USIZE_INIT;
/// System is a runtime manager.
#[derive(Clone, Debug)]
pub struct System {
id: usize,
sys: UnboundedSender<SystemCommand>,
arbiter: Arbiter,
stop_on_panic: bool,
}
static SYSTEM_COUNT: AtomicUsize = AtomicUsize::new(0);
thread_local!(
static CURRENT: RefCell<Option<System>> = RefCell::new(None);
);
/// A manager for a per-thread distributed async runtime.
#[derive(Clone, Debug)]
pub struct System {
id: usize,
sys_tx: mpsc::UnboundedSender<SystemCommand>,
/// Handle to the first [Arbiter] that is created with the System.
arbiter_handle: ArbiterHandle,
}
impl System {
/// Constructs new system and sets it as current
/// Create a new system.
///
/// # Panics
/// Panics if underlying Tokio runtime can not be created.
#[allow(clippy::new_ret_no_self)]
pub fn new() -> SystemRunner {
Self::with_tokio_rt(|| {
default_tokio_runtime()
.expect("Default Actix (Tokio) runtime could not be created.")
})
}
/// Create a new System using the [Tokio Runtime](tokio-runtime) returned from a closure.
///
/// [tokio-runtime]: tokio::runtime::Runtime
#[doc(hidden)]
pub fn with_tokio_rt<F>(runtime_factory: F) -> SystemRunner
where
F: Fn() -> tokio::runtime::Runtime,
{
let (stop_tx, stop_rx) = oneshot::channel();
let (sys_tx, sys_rx) = mpsc::unbounded_channel();
let rt = Runtime::from(runtime_factory());
let sys_arbiter = Arbiter::in_new_system(rt.local_set());
let system = System::construct(sys_tx, sys_arbiter.clone());
system
.tx()
.send(SystemCommand::RegisterArbiter(usize::MAX, sys_arbiter))
.unwrap();
// init background system arbiter
let sys_ctrl = SystemController::new(sys_rx, stop_tx);
rt.spawn(sys_ctrl);
SystemRunner {
rt,
stop_rx,
system,
}
}
/// Constructs new system and registers it on the current thread.
pub(crate) fn construct(
sys: UnboundedSender<SystemCommand>,
arbiter: Arbiter,
stop_on_panic: bool,
sys_tx: mpsc::UnboundedSender<SystemCommand>,
arbiter_handle: ArbiterHandle,
) -> Self {
let sys = System {
sys,
arbiter,
stop_on_panic,
sys_tx,
arbiter_handle,
id: SYSTEM_COUNT.fetch_add(1, Ordering::SeqCst),
};
System::set_current(sys.clone());
sys
}
/// Build a new system with a customized tokio runtime.
///
/// This allows to customize the runtime. See struct level docs on
/// `Builder` for more information.
pub fn builder() -> Builder {
Builder::new()
}
#[allow(clippy::new_ret_no_self)]
/// Create new system.
///
/// This method panics if it can not create tokio runtime
pub fn new<T: Into<String>>(name: T) -> SystemRunner {
Self::builder().name(name).build()
}
/// Get current running system.
///
/// # Panics
/// Panics if no system is registered on the current thread.
pub fn current() -> System {
CURRENT.with(|cell| match *cell.borrow() {
Some(ref sys) => sys.clone(),
@@ -63,67 +100,147 @@ impl System {
})
}
/// Set current running system.
pub(crate) fn is_set() -> bool {
CURRENT.with(|cell| cell.borrow().is_some())
/// Get handle to a the System's initial [Arbiter].
pub fn arbiter(&self) -> &ArbiterHandle {
&self.arbiter_handle
}
/// Set current running system.
/// Check if there is a System registered on the current thread.
pub fn is_registered() -> bool {
CURRENT.with(|sys| sys.borrow().is_some())
}
/// Register given system on current thread.
#[doc(hidden)]
pub fn set_current(sys: System) {
CURRENT.with(|s| {
*s.borrow_mut() = Some(sys);
CURRENT.with(|cell| {
*cell.borrow_mut() = Some(sys);
})
}
/// Execute function with system reference.
pub fn with_current<F, R>(f: F) -> R
where
F: FnOnce(&System) -> R,
{
CURRENT.with(|cell| match *cell.borrow() {
Some(ref sys) => f(sys),
None => panic!("System is not running"),
})
}
/// System id
/// Numeric system identifier.
///
/// Useful when using multiple Systems.
pub fn id(&self) -> usize {
self.id
}
/// Stop the system
/// Stop the system (with code 0).
pub fn stop(&self) {
self.stop_with_code(0)
}
/// Stop the system with a particular exit code.
/// Stop the system with a given exit code.
pub fn stop_with_code(&self, code: i32) {
let _ = self.sys.unbounded_send(SystemCommand::Exit(code));
let _ = self.sys_tx.send(SystemCommand::Exit(code));
}
pub(crate) fn sys(&self) -> &UnboundedSender<SystemCommand> {
&self.sys
}
/// Return status of 'stop_on_panic' option which controls whether the System is stopped when an
/// uncaught panic is thrown from a worker thread.
pub fn stop_on_panic(&self) -> bool {
self.stop_on_panic
}
/// System arbiter
pub fn arbiter(&self) -> &Arbiter {
&self.arbiter
}
/// This function will start tokio runtime and will finish once the
/// `System::stop()` message get called.
/// Function `f` get called within tokio runtime context.
pub fn run<F>(f: F) -> io::Result<()>
where
F: FnOnce() + 'static,
{
Self::builder().run(f)
pub(crate) fn tx(&self) -> &mpsc::UnboundedSender<SystemCommand> {
&self.sys_tx
}
}
/// Runner that keeps a [System]'s event loop alive until stop message is received.
#[must_use = "A SystemRunner does nothing unless `run` is called."]
#[derive(Debug)]
pub struct SystemRunner {
rt: Runtime,
stop_rx: oneshot::Receiver<i32>,
system: System,
}
impl SystemRunner {
/// Starts event loop and will return once [System] is [stopped](System::stop).
pub fn run(self) -> io::Result<()> {
let SystemRunner { rt, stop_rx, .. } = self;
// run loop
match rt.block_on(stop_rx) {
Ok(code) => {
if code != 0 {
Err(io::Error::new(
io::ErrorKind::Other,
format!("Non-zero exit code: {}", code),
))
} else {
Ok(())
}
}
Err(e) => Err(io::Error::new(io::ErrorKind::Other, e)),
}
}
/// Runs the provided future, blocking the current thread until the future completes.
#[inline]
pub fn block_on<F: Future>(&self, fut: F) -> F::Output {
self.rt.block_on(fut)
}
}
#[derive(Debug)]
pub(crate) enum SystemCommand {
Exit(i32),
RegisterArbiter(usize, ArbiterHandle),
DeregisterArbiter(usize),
}
/// There is one `SystemController` per [System]. It runs in the background, keeping track of
/// [Arbiter]s and is able to distribute a system-wide stop command.
#[derive(Debug)]
pub(crate) struct SystemController {
stop_tx: Option<oneshot::Sender<i32>>,
cmd_rx: mpsc::UnboundedReceiver<SystemCommand>,
arbiters: HashMap<usize, ArbiterHandle>,
}
impl SystemController {
pub(crate) fn new(
cmd_rx: mpsc::UnboundedReceiver<SystemCommand>,
stop_tx: oneshot::Sender<i32>,
) -> Self {
SystemController {
cmd_rx,
stop_tx: Some(stop_tx),
arbiters: HashMap::with_capacity(4),
}
}
}
impl Future for SystemController {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
// process all items currently buffered in channel
loop {
match ready!(Pin::new(&mut self.cmd_rx).poll_recv(cx)) {
// channel closed; no more messages can be received
None => return Poll::Ready(()),
// process system command
Some(cmd) => match cmd {
SystemCommand::Exit(code) => {
// stop all arbiters
for arb in self.arbiters.values() {
arb.stop();
}
// stop event loop
// will only fire once
if let Some(stop_tx) = self.stop_tx.take() {
let _ = stop_tx.send(code);
}
}
SystemCommand::RegisterArbiter(id, arb) => {
self.arbiters.insert(id, arb);
}
SystemCommand::DeregisterArbiter(id) => {
self.arbiters.remove(&id);
}
},
}
}
}
}

268
actix-rt/tests/tests.rs Normal file
View File

@@ -0,0 +1,268 @@
use std::{
sync::{
atomic::{AtomicBool, Ordering},
mpsc::channel,
Arc,
},
thread,
time::{Duration, Instant},
};
use actix_rt::{Arbiter, System};
use tokio::sync::oneshot;
#[test]
fn await_for_timer() {
let time = Duration::from_secs(1);
let instant = Instant::now();
System::new().block_on(async move {
tokio::time::sleep(time).await;
});
assert!(
instant.elapsed() >= time,
"Block on should poll awaited future to completion"
);
}
#[test]
fn join_another_arbiter() {
let time = Duration::from_secs(1);
let instant = Instant::now();
System::new().block_on(async move {
let arbiter = Arbiter::new();
arbiter.spawn(Box::pin(async move {
tokio::time::sleep(time).await;
Arbiter::current().stop();
}));
arbiter.join().unwrap();
});
assert!(
instant.elapsed() >= time,
"Join on another arbiter should complete only when it calls stop"
);
let instant = Instant::now();
System::new().block_on(async move {
let arbiter = Arbiter::new();
arbiter.spawn_fn(move || {
actix_rt::spawn(async move {
tokio::time::sleep(time).await;
Arbiter::current().stop();
});
});
arbiter.join().unwrap();
});
assert!(
instant.elapsed() >= time,
"Join on an arbiter that has used actix_rt::spawn should wait for said future"
);
let instant = Instant::now();
System::new().block_on(async move {
let arbiter = Arbiter::new();
arbiter.spawn(Box::pin(async move {
tokio::time::sleep(time).await;
Arbiter::current().stop();
}));
arbiter.stop();
arbiter.join().unwrap();
});
assert!(
instant.elapsed() < time,
"Premature stop of arbiter should conclude regardless of it's current state"
);
}
#[test]
fn non_static_block_on() {
let string = String::from("test_str");
let string = string.as_str();
let sys = System::new();
sys.block_on(async {
actix_rt::time::sleep(Duration::from_millis(1)).await;
assert_eq!("test_str", string);
});
let rt = actix_rt::Runtime::new().unwrap();
rt.block_on(async {
actix_rt::time::sleep(Duration::from_millis(1)).await;
assert_eq!("test_str", string);
});
}
#[test]
fn wait_for_spawns() {
let rt = actix_rt::Runtime::new().unwrap();
let handle = rt.spawn(async {
println!("running on the runtime");
// assertion panic is caught at task boundary
assert_eq!(1, 2);
});
assert!(rt.block_on(handle).is_err());
}
#[test]
fn arbiter_spawn_fn_runs() {
let _ = System::new();
let (tx, rx) = channel::<u32>();
let arbiter = Arbiter::new();
arbiter.spawn_fn(move || tx.send(42).unwrap());
let num = rx.recv().unwrap();
assert_eq!(num, 42);
arbiter.stop();
arbiter.join().unwrap();
}
#[test]
fn arbiter_drop_no_panic_fn() {
let _ = System::new();
let arbiter = Arbiter::new();
arbiter.spawn_fn(|| panic!("test"));
arbiter.stop();
arbiter.join().unwrap();
}
#[test]
fn arbiter_drop_no_panic_fut() {
let _ = System::new();
let arbiter = Arbiter::new();
arbiter.spawn(async { panic!("test") });
arbiter.stop();
arbiter.join().unwrap();
}
#[test]
#[should_panic]
fn no_system_current_panic() {
System::current();
}
#[test]
#[should_panic]
fn no_system_arbiter_new_panic() {
Arbiter::new();
}
#[test]
fn system_arbiter_spawn() {
let runner = System::new();
let (tx, rx) = oneshot::channel();
let sys = System::current();
thread::spawn(|| {
// this thread will have no arbiter in it's thread local so call will panic
Arbiter::current();
})
.join()
.unwrap_err();
let thread = thread::spawn(|| {
// this thread will have no arbiter in it's thread local so use the system handle instead
System::set_current(sys);
let sys = System::current();
let arb = sys.arbiter();
arb.spawn(async move {
tx.send(42u32).unwrap();
System::current().stop();
});
});
assert_eq!(runner.block_on(rx).unwrap(), 42);
thread.join().unwrap();
}
#[test]
fn system_stop_stops_arbiters() {
let sys = System::new();
let arb = Arbiter::new();
// arbiter should be alive to receive spawn msg
assert!(Arbiter::current().spawn_fn(|| {}));
assert!(arb.spawn_fn(|| {}));
System::current().stop();
sys.run().unwrap();
// account for slightly slow thread de-spawns (only observed on windows)
thread::sleep(Duration::from_millis(100));
// arbiter should be dead and return false
assert!(!Arbiter::current().spawn_fn(|| {}));
assert!(!arb.spawn_fn(|| {}));
arb.join().unwrap();
}
#[test]
fn new_system_with_tokio() {
let (tx, rx) = channel();
let res = System::with_tokio_rt(move || {
tokio::runtime::Builder::new_multi_thread()
.enable_io()
.enable_time()
.thread_keep_alive(Duration::from_millis(1000))
.worker_threads(2)
.max_blocking_threads(2)
.on_thread_start(|| {})
.on_thread_stop(|| {})
.build()
.unwrap()
})
.block_on(async {
actix_rt::time::sleep(Duration::from_millis(1)).await;
tokio::task::spawn(async move {
tx.send(42).unwrap();
})
.await
.unwrap();
123usize
});
assert_eq!(res, 123);
assert_eq!(rx.recv().unwrap(), 42);
}
#[test]
fn new_arbiter_with_tokio() {
let _ = System::new();
let arb = Arbiter::with_tokio_rt(|| {
tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap()
});
let counter = Arc::new(AtomicBool::new(true));
let counter1 = counter.clone();
let did_spawn = arb.spawn(async move {
actix_rt::time::sleep(Duration::from_millis(1)).await;
counter1.store(false, Ordering::SeqCst);
Arbiter::current().stop();
});
assert!(did_spawn);
arb.join().unwrap();
assert_eq!(false, counter.load(Ordering::SeqCst));
}

View File

@@ -1,17 +0,0 @@
[package]
name = "actix-server-config"
version = "0.1.0"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix server config utils"
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net.git"
license = "MIT/Apache-2.0"
edition = "2018"
workspace = ".."
[lib]
name = "actix_server_config"
path = "src/lib.rs"
[dependencies]
futures = "0.1.25"

View File

@@ -1,132 +0,0 @@
use std::cell::Cell;
use std::fmt;
use std::net::SocketAddr;
use std::rc::Rc;
#[derive(Debug, Clone)]
pub struct ServerConfig {
addr: SocketAddr,
secure: Rc<Cell<bool>>,
}
impl ServerConfig {
pub fn new(addr: SocketAddr) -> Self {
ServerConfig {
addr,
secure: Rc::new(Cell::new(false)),
}
}
/// Returns the address of the local half of this TCP server socket
pub fn local_addr(&self) -> SocketAddr {
self.addr
}
/// Returns true if connection is secure (tls enabled)
pub fn secure(&self) -> bool {
self.secure.as_ref().get()
}
/// Set secure flag
pub fn set_secure(&self) {
self.secure.as_ref().set(true)
}
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub enum Protocol {
Unknown,
Http10,
Http11,
Http2,
Proto1,
Proto2,
Proto3,
Proto4,
Proto5,
Proto6,
}
pub struct Io<T, P = ()> {
io: T,
proto: Protocol,
params: P,
}
impl<T> Io<T, ()> {
pub fn new(io: T) -> Self {
Self {
io,
proto: Protocol::Unknown,
params: (),
}
}
}
impl<T, P> Io<T, P> {
/// Reconstruct from a parts.
pub fn from_parts(io: T, params: P, proto: Protocol) -> Self {
Self { io, params, proto }
}
/// Deconstruct into a parts.
pub fn into_parts(self) -> (T, P, Protocol) {
(self.io, self.params, self.proto)
}
/// Returns a shared reference to the underlying stream.
pub fn get_ref(&self) -> &T {
&self.io
}
/// Returns a mutable reference to the underlying stream.
pub fn get_mut(&mut self) -> &mut T {
&mut self.io
}
/// Get selected protocol
pub fn protocol(&self) -> Protocol {
self.proto
}
/// Return new Io object with new parameter.
pub fn set<U>(self, params: U) -> Io<T, U> {
Io {
io: self.io,
proto: self.proto,
params: params,
}
}
/// Maps an Io<_, P> to Io<_, U> by applying a function to a contained value.
pub fn map<U, F>(self, op: F) -> Io<T, U>
where
F: FnOnce(P) -> U,
{
Io {
io: self.io,
proto: self.proto,
params: op(self.params),
}
}
}
impl<T, P> std::ops::Deref for Io<T, P> {
type Target = T;
fn deref(&self) -> &T {
&self.io
}
}
impl<T, P> std::ops::DerefMut for Io<T, P> {
fn deref_mut(&mut self) -> &mut T {
&mut self.io
}
}
impl<T: fmt::Debug, P> fmt::Debug for Io<T, P> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Io {{{:?}}}", self.io)
}
}

View File

@@ -1,79 +1,162 @@
# Changes
## [0.4.0] - 2019-03-12
## Unreleased - 2021-xx-xx
### Changed
## 2.0.0-beta.3 - 2021-02-06
* Hidden `ServerBuilder::start` method has been removed. Use `ServerBuilder::run`. [#246]
* Add retry for EINTR signal (`io::Interrupted`) in `Accept`'s poll loop. [#264]
* Add `ServerBuilder::worker_max_blocking_threads` to customize blocking thread pool size. [#265]
* Update `actix-rt` to `2.0.0`. [#273]
[#246]: https://github.com/actix/actix-net/pull/246
[#264]: https://github.com/actix/actix-net/pull/264
[#265]: https://github.com/actix/actix-net/pull/265
[#273]: https://github.com/actix/actix-net/pull/273
## 2.0.0-beta.2 - 2021-01-03
* Merge `actix-testing` to `actix-server` as `test_server` mod. [#242]
[#242]: https://github.com/actix/actix-net/pull/242
## 2.0.0-beta.1 - 2020-12-28
* Added explicit info log message on accept queue pause. [#215]
* Prevent double registration of sockets when back-pressure is resolved. [#223]
* Update `mio` dependency to `0.7.3`. [#239]
* Remove `socket2` dependency. [#239]
* `ServerBuilder::backlog` now accepts `u32` instead of `i32`. [#239]
* Remove `AcceptNotify` type and pass `WakerQueue` to `Worker` to wake up `Accept`'s `Poll`. [#239]
* Convert `mio::net::TcpStream` to `actix_rt::net::TcpStream`(`UnixStream` for uds) using
`FromRawFd` and `IntoRawFd`(`FromRawSocket` and `IntoRawSocket` on windows). [#239]
* Remove `AsyncRead` and `AsyncWrite` trait bound for `socket::FromStream` trait. [#239]
[#215]: https://github.com/actix/actix-net/pull/215
[#223]: https://github.com/actix/actix-net/pull/223
[#239]: https://github.com/actix/actix-net/pull/239
## 1.0.4 - 2020-09-12
* Update actix-codec to 0.3.0.
* Workers must be greater than 0. [#167]
[#167]: https://github.com/actix/actix-net/pull/167
## 1.0.3 - 2020-05-19
* Replace deprecated `net2` crate with `socket2` [#140]
[#140]: https://github.com/actix/actix-net/pull/140
## 1.0.2 - 2020-02-26
* Avoid error by calling `reregister()` on Windows [#103]
[#103]: https://github.com/actix/actix-net/pull/103
## 1.0.1 - 2019-12-29
* Rename `.start()` method to `.run()`
## 1.0.0 - 2019-12-11
* Use actix-net releases
## 1.0.0-alpha.4 - 2019-12-08
* Use actix-service 1.0.0-alpha.4
## 1.0.0-alpha.3 - 2019-12-07
* Migrate to tokio 0.2
* Fix compilation on non-unix platforms
* Better handling server configuration
## 1.0.0-alpha.2 - 2019-12-02
* Simplify server service (remove actix-server-config)
* Allow to wait on `Server` until server stops
## 0.8.0-alpha.1 - 2019-11-22
* Migrate to `std::future`
## 0.7.0 - 2019-10-04
* Update `rustls` to 0.16
* Minimum required Rust version upped to 1.37.0
## 0.6.1 - 2019-09-25
* Add UDS listening support to `ServerBuilder`
## 0.6.0 - 2019-07-18
* Support Unix domain sockets #3
## 0.5.1 - 2019-05-18
* ServerBuilder::shutdown_timeout() accepts u64
## 0.5.0 - 2019-05-12
* Add `Debug` impl for `SslError`
* Derive debug for `Server` and `ServerCommand`
* Upgrade to actix-service 0.4
## 0.4.3 - 2019-04-16
* Re-export `IoStream` trait
* Depend on `ssl` and `rust-tls` features from actix-server-config
## 0.4.2 - 2019-03-30
* Fix SIGINT force shutdown
## 0.4.1 - 2019-03-14
* `SystemRuntime::on_start()` - allow to run future before server service initialization
## 0.4.0 - 2019-03-12
* Use `ServerConfig` for service factory
* Wrap tcp socket to `Io` type
* Upgrade actix-service
## [0.3.1] - 2019-03-04
### Added
## 0.3.1 - 2019-03-04
* Add `ServerBuilder::maxconnrate` sets the maximum per-worker number of concurrent connections
* Add helper ssl error `SslError`
### Changed
* Rename `StreamServiceFactory` to `ServiceFactory`
* Deprecate `StreamServiceFactory`
## [0.3.0] - 2019-03-02
### Changed
## 0.3.0 - 2019-03-02
* Use new `NewService` trait
## [0.2.1] - 2019-02-09
### Changed
## 0.2.1 - 2019-02-09
* Drop service response
## [0.2.0] - 2019-02-01
### Changed
## 0.2.0 - 2019-02-01
* Migrate to actix-service 0.2
* Updated rustls dependency
## [0.1.3] - 2018-12-21
### Fixed
## 0.1.3 - 2018-12-21
* Fix max concurrent connections handling
## [0.1.2] - 2018-12-12
### Changed
## 0.1.2 - 2018-12-12
* rename ServiceConfig::rt() to ServiceConfig::apply()
### Fixed
* Fix back-pressure for concurrent ssl handshakes
## [0.1.1] - 2018-12-11
## 0.1.1 - 2018-12-11
* Fix signal handling on windows
## [0.1.0] - 2018-12-09
## 0.1.0 - 2018-12-09
* Move server to separate crate

70
actix-server/Cargo.toml Normal file → Executable file
View File

@@ -1,20 +1,19 @@
[package]
name = "actix-server"
version = "0.4.0"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix server - General purpose tcp server"
version = "2.0.0-beta.2"
authors = [
"Nikolay Kim <fafhrd91@gmail.com>",
"fakeshadow <24548779@qq.com>",
]
description = "General purpose TCP server built for the Actix ecosystem"
keywords = ["network", "framework", "async", "futures"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-server/"
documentation = "https://docs.rs/actix-server"
categories = ["network-programming", "asynchronous"]
license = "MIT/Apache-2.0"
exclude = [".gitignore", ".travis.yml", ".cargo/config", "appveyor.yml"]
license = "MIT OR Apache-2.0"
exclude = [".gitignore", ".cargo/config"]
edition = "2018"
workspace = ".."
[package.metadata.docs.rs]
features = ["ssl", "tls", "rust-tls"]
[lib]
name = "actix_server"
@@ -23,47 +22,22 @@ path = "src/lib.rs"
[features]
default = []
# tls
tls = ["native-tls"]
# openssl
ssl = ["openssl", "tokio-openssl"]
# rustls
rust-tls = ["rustls", "tokio-rustls", "webpki", "webpki-roots"]
[dependencies]
actix-rt = "0.2.1"
actix-service = "0.3.4"
actix-server-config = "0.1.0"
actix-codec = "0.4.0-beta.1"
actix-rt = { version = "2.0.0", default-features = false }
actix-service = "2.0.0-beta.4"
actix-utils = "3.0.0-beta.2"
futures-core = { version = "0.3.7", default-features = false, features = ["alloc"] }
log = "0.4"
num_cpus = "1.0"
mio = "^0.6.13"
net2 = "0.2"
futures = "0.1"
mio = { version = "0.7.6", features = ["os-poll", "net"] }
num_cpus = "1.13"
slab = "0.4"
tokio-io = "0.1"
tokio-tcp = "0.1"
tokio-timer = "0.2.8"
tokio-reactor = "0.1"
tokio-signal = "0.2"
# native-tls
native-tls = { version="0.2", optional = true }
# openssl
openssl = { version="0.10", optional = true }
tokio-openssl = { version="0.3", optional = true }
#rustls
rustls = { version = "^0.15", optional = true }
tokio-rustls = { version = "^0.9", optional = true }
webpki = { version = "0.19", optional = true }
webpki-roots = { version = "0.16", optional = true }
tokio = { version = "1", features = ["sync"] }
[dev-dependencies]
bytes = "0.4"
actix-codec = "0.1.0"
env_logger = "0.6"
actix-rt = "2.0.0"
bytes = "1"
env_logger = "0.8"
futures-util = { version = "0.3.7", default-features = false, features = ["sink"] }
tokio = { version = "1", features = ["io-util"] }

1
actix-server/LICENSE-APACHE Symbolic link
View File

@@ -0,0 +1 @@
../LICENSE-APACHE

1
actix-server/LICENSE-MIT Symbolic link
View File

@@ -0,0 +1 @@
../LICENSE-MIT

View File

@@ -0,0 +1,88 @@
//! Simple composite-service TCP echo server.
//!
//! Using the following command:
//!
//! ```sh
//! nc 127.0.0.1 8080
//! ```
//!
//! Start typing. When you press enter the typed line will be echoed back. The server will log
//! the length of each line it echos and the total size of data sent when the connection is closed.
use std::sync::{
atomic::{AtomicUsize, Ordering},
Arc,
};
use std::{env, io};
use actix_rt::net::TcpStream;
use actix_server::Server;
use actix_service::pipeline_factory;
use bytes::BytesMut;
use futures_util::future::ok;
use log::{error, info};
use tokio::io::{AsyncReadExt, AsyncWriteExt};
#[actix_rt::main]
async fn main() -> io::Result<()> {
env::set_var("RUST_LOG", "actix=trace,basic=trace");
env_logger::init();
let count = Arc::new(AtomicUsize::new(0));
let addr = ("127.0.0.1", 8080);
info!("starting server on port: {}", &addr.0);
// Bind socket address and start worker(s). By default, the server uses the number of available
// logical CPU cores as the worker count. For this reason, the closure passed to bind needs
// to return a service *factory*; so it can be created once per worker.
Server::build()
.bind("echo", addr, move || {
let count = Arc::clone(&count);
let num2 = Arc::clone(&count);
pipeline_factory(move |mut stream: TcpStream| {
let count = Arc::clone(&count);
async move {
let num = count.fetch_add(1, Ordering::SeqCst);
let num = num + 1;
let mut size = 0;
let mut buf = BytesMut::new();
loop {
match stream.read_buf(&mut buf).await {
// end of stream; bail from loop
Ok(0) => break,
// more bytes to process
Ok(bytes_read) => {
info!("[{}] read {} bytes", num, bytes_read);
stream.write_all(&buf[size..]).await.unwrap();
size += bytes_read;
}
// stream error; bail from loop with error
Err(err) => {
error!("Stream Error: {:?}", err);
return Err(());
}
}
}
// send data down service pipeline
Ok((buf.freeze(), size))
}
})
.map_err(|err| error!("Service Error: {:?}", err))
.and_then(move |(_, size)| {
let num = num2.load(Ordering::SeqCst);
info!("[{}] total bytes read: {}", num, size);
ok(size)
})
})?
.workers(1)
.run()
.await
}

View File

@@ -1,121 +1,88 @@
use std::sync::mpsc as sync_mpsc;
use std::time::{Duration, Instant};
use std::{io, net, thread};
use std::time::Duration;
use std::{io, thread};
use actix_rt::System;
use futures::future::{lazy, Future};
use actix_rt::{
time::{sleep_until, Instant},
System,
};
use log::{error, info};
use mio;
use mio::{Interest, Poll, Token as MioToken};
use slab::Slab;
use tokio_timer::Delay;
use super::server::Server;
use super::worker::{Conn, WorkerClient};
use super::Token;
pub(crate) enum Command {
Pause,
Resume,
Stop,
Worker(WorkerClient),
}
use crate::server::Server;
use crate::socket::{MioListener, SocketAddr};
use crate::waker_queue::{WakerInterest, WakerQueue, WAKER_TOKEN};
use crate::worker::{Conn, WorkerHandle};
use crate::Token;
struct ServerSocketInfo {
addr: net::SocketAddr,
// addr for socket. mainly used for logging.
addr: SocketAddr,
// be ware this is the crate token for identify socket and should not be confused with
// mio::Token
token: Token,
sock: mio::net::TcpListener,
lst: MioListener,
// timeout is used to mark the deadline when this socket's listener should be registered again
// after an error.
timeout: Option<Instant>,
}
#[derive(Clone)]
pub(crate) struct AcceptNotify(mio::SetReadiness);
impl AcceptNotify {
pub(crate) fn new(ready: mio::SetReadiness) -> Self {
AcceptNotify(ready)
}
pub(crate) fn notify(&self) {
let _ = self.0.set_readiness(mio::Ready::readable());
}
}
impl Default for AcceptNotify {
fn default() -> Self {
AcceptNotify::new(mio::Registration::new2().1)
}
}
/// Accept loop would live with `ServerBuilder`.
///
/// It's tasked with construct `Poll` instance and `WakerQueue` which would be distributed to
/// `Accept` and `Worker`.
///
/// It would also listen to `ServerCommand` and push interests to `WakerQueue`.
pub(crate) struct AcceptLoop {
cmd_reg: Option<mio::Registration>,
cmd_ready: mio::SetReadiness,
notify_reg: Option<mio::Registration>,
notify_ready: mio::SetReadiness,
tx: sync_mpsc::Sender<Command>,
rx: Option<sync_mpsc::Receiver<Command>>,
srv: Option<Server>,
poll: Option<Poll>,
waker: WakerQueue,
}
impl AcceptLoop {
pub fn new(srv: Server) -> AcceptLoop {
let (tx, rx) = sync_mpsc::channel();
let (cmd_reg, cmd_ready) = mio::Registration::new2();
let (notify_reg, notify_ready) = mio::Registration::new2();
pub fn new(srv: Server) -> Self {
let poll = Poll::new().unwrap_or_else(|e| panic!("Can not create `mio::Poll`: {}", e));
let waker = WakerQueue::new(poll.registry())
.unwrap_or_else(|e| panic!("Can not create `mio::Waker`: {}", e));
AcceptLoop {
tx,
cmd_ready,
cmd_reg: Some(cmd_reg),
notify_ready,
notify_reg: Some(notify_reg),
rx: Some(rx),
Self {
srv: Some(srv),
poll: Some(poll),
waker,
}
}
pub fn send(&self, msg: Command) {
let _ = self.tx.send(msg);
let _ = self.cmd_ready.set_readiness(mio::Ready::readable());
pub(crate) fn waker_owned(&self) -> WakerQueue {
self.waker.clone()
}
pub fn get_notify(&self) -> AcceptNotify {
AcceptNotify::new(self.notify_ready.clone())
pub fn wake(&self, i: WakerInterest) {
self.waker.wake(i);
}
pub(crate) fn start(
&mut self,
socks: Vec<(Token, net::TcpListener)>,
workers: Vec<WorkerClient>,
socks: Vec<(Token, MioListener)>,
handles: Vec<WorkerHandle>,
) {
let srv = self.srv.take().expect("Can not re-use AcceptInfo");
let poll = self.poll.take().unwrap();
let waker = self.waker.clone();
Accept::start(
self.rx.take().expect("Can not re-use AcceptInfo"),
self.cmd_reg.take().expect("Can not re-use AcceptInfo"),
self.notify_reg.take().expect("Can not re-use AcceptInfo"),
socks,
srv,
workers,
);
Accept::start(poll, waker, socks, srv, handles);
}
}
/// poll instance of the server.
struct Accept {
poll: mio::Poll,
rx: sync_mpsc::Receiver<Command>,
sockets: Slab<ServerSocketInfo>,
workers: Vec<WorkerClient>,
poll: Poll,
waker: WakerQueue,
handles: Vec<WorkerHandle>,
srv: Server,
timer: (mio::Registration, mio::SetReadiness),
next: usize,
backpressure: bool,
}
const DELTA: usize = 100;
const CMD: mio::Token = mio::Token(0);
const TIMER: mio::Token = mio::Token(1);
const NOTIFY: mio::Token = mio::Token(2);
/// This function defines errors that are per-connection. Which basically
/// means that if we get this error from `accept()` system call it means
/// next connection might be ready to be accepted.
@@ -130,324 +97,324 @@ fn connection_error(e: &io::Error) -> bool {
}
impl Accept {
#![allow(clippy::too_many_arguments)]
pub(crate) fn start(
rx: sync_mpsc::Receiver<Command>,
cmd_reg: mio::Registration,
notify_reg: mio::Registration,
socks: Vec<(Token, net::TcpListener)>,
poll: Poll,
waker: WakerQueue,
socks: Vec<(Token, MioListener)>,
srv: Server,
workers: Vec<WorkerClient>,
handles: Vec<WorkerHandle>,
) {
// Accept runs in its own thread and would want to spawn additional futures to current
// actix system.
let sys = System::current();
// start accept thread
let _ = thread::Builder::new()
thread::Builder::new()
.name("actix-server accept loop".to_owned())
.spawn(move || {
System::set_current(sys);
let mut accept = Accept::new(rx, socks, workers, srv);
// Start listening for incoming commands
if let Err(err) = accept.poll.register(
&cmd_reg,
CMD,
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
panic!("Can not register Registration: {}", err);
}
// Start listening for notify updates
if let Err(err) = accept.poll.register(
&notify_reg,
NOTIFY,
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
panic!("Can not register Registration: {}", err);
}
accept.poll();
});
let (mut accept, sockets) =
Accept::new_with_sockets(poll, waker, socks, handles, srv);
accept.poll_with(sockets);
})
.unwrap();
}
fn new(
rx: sync_mpsc::Receiver<Command>,
socks: Vec<(Token, net::TcpListener)>,
workers: Vec<WorkerClient>,
fn new_with_sockets(
poll: Poll,
waker: WakerQueue,
socks: Vec<(Token, MioListener)>,
handles: Vec<WorkerHandle>,
srv: Server,
) -> Accept {
// Create a poll instance
let poll = match mio::Poll::new() {
Ok(poll) => poll,
Err(err) => panic!("Can not create mio::Poll: {}", err),
};
// Start accept
) -> (Accept, Slab<ServerSocketInfo>) {
let mut sockets = Slab::new();
for (hnd_token, lst) in socks.into_iter() {
let addr = lst.local_addr().unwrap();
let server = mio::net::TcpListener::from_std(lst)
.expect("Can not create mio::net::TcpListener");
for (hnd_token, mut lst) in socks.into_iter() {
let addr = lst.local_addr();
let entry = sockets.vacant_entry();
let token = entry.key();
// Start listening for incoming connections
if let Err(err) = poll.register(
&server,
mio::Token(token + DELTA),
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
panic!("Can not register io: {}", err);
}
poll.registry()
.register(&mut lst, MioToken(token), Interest::READABLE)
.unwrap_or_else(|e| panic!("Can not register io: {}", e));
entry.insert(ServerSocketInfo {
addr,
token: hnd_token,
sock: server,
lst,
timeout: None,
});
}
// Timer
let (tm, tmr) = mio::Registration::new2();
if let Err(err) =
poll.register(&tm, TIMER, mio::Ready::readable(), mio::PollOpt::edge())
{
panic!("Can not register Registration: {}", err);
}
Accept {
let accept = Accept {
poll,
rx,
sockets,
workers,
waker,
handles,
srv,
next: 0,
timer: (tm, tmr),
backpressure: false,
}
};
(accept, sockets)
}
fn poll(&mut self) {
// Create storage for events
fn poll_with(&mut self, mut sockets: Slab<ServerSocketInfo>) {
let mut events = mio::Events::with_capacity(128);
loop {
if let Err(err) = self.poll.poll(&mut events, None) {
panic!("Poll error: {}", err);
if let Err(e) = self.poll.poll(&mut events, None) {
match e.kind() {
std::io::ErrorKind::Interrupted => {
continue;
}
_ => {
panic!("Poll error: {}", e);
}
}
}
for event in events.iter() {
let token = event.token();
match token {
CMD => {
if !self.process_cmd() {
return;
// This is a loop because interests for command from previous version was
// a loop that would try to drain the command channel. It's yet unknown
// if it's necessary/good practice to actively drain the waker queue.
WAKER_TOKEN => 'waker: loop {
// take guard with every iteration so no new interest can be added
// until the current task is done.
let mut guard = self.waker.guard();
match guard.pop_front() {
// worker notify it becomes available. we may want to recover
// from backpressure.
Some(WakerInterest::WorkerAvailable) => {
drop(guard);
self.maybe_backpressure(&mut sockets, false);
}
// a new worker thread is made and it's handle would be added
// to Accept
Some(WakerInterest::Worker(handle)) => {
drop(guard);
// maybe we want to recover from a backpressure.
self.maybe_backpressure(&mut sockets, false);
self.handles.push(handle);
}
// got timer interest and it's time to try register socket(s)
// again.
Some(WakerInterest::Timer) => {
drop(guard);
self.process_timer(&mut sockets)
}
Some(WakerInterest::Pause) => {
drop(guard);
sockets.iter_mut().for_each(|(_, info)| {
match self.deregister(info) {
Ok(_) => info!(
"Paused accepting connections on {}",
info.addr
),
Err(e) => {
error!("Can not deregister server socket {}", e)
}
}
});
}
Some(WakerInterest::Resume) => {
drop(guard);
sockets.iter_mut().for_each(|(token, info)| {
self.register_logged(token, info);
});
}
Some(WakerInterest::Stop) => {
return self.deregister_all(&mut sockets);
}
// waker queue is drained.
None => {
// Reset the WakerQueue before break so it does not grow
// infinitely.
WakerQueue::reset(&mut guard);
break 'waker;
}
}
}
TIMER => self.process_timer(),
NOTIFY => self.backpressure(false),
},
_ => {
let token = usize::from(token);
if token < DELTA {
continue;
}
self.accept(token - DELTA);
self.accept(&mut sockets, token);
}
}
}
}
}
fn process_timer(&mut self) {
fn process_timer(&self, sockets: &mut Slab<ServerSocketInfo>) {
let now = Instant::now();
for (token, info) in self.sockets.iter_mut() {
sockets.iter_mut().for_each(|(token, info)| {
// only the ServerSocketInfo have an associate timeout value was de registered.
if let Some(inst) = info.timeout.take() {
if now > inst {
if let Err(err) = self.poll.register(
&info.sock,
mio::Token(token + DELTA),
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
error!("Can not register server socket {}", err);
} else {
info!("Resume accepting connections on {}", info.addr);
}
self.register_logged(token, info);
} else {
info.timeout = Some(inst);
}
}
});
}
#[cfg(not(target_os = "windows"))]
fn register(&self, token: usize, info: &mut ServerSocketInfo) -> io::Result<()> {
self.poll
.registry()
.register(&mut info.lst, MioToken(token), Interest::READABLE)
}
#[cfg(target_os = "windows")]
fn register(&self, token: usize, info: &mut ServerSocketInfo) -> io::Result<()> {
// On windows, calling register without deregister cause an error.
// See https://github.com/actix/actix-web/issues/905
// Calling reregister seems to fix the issue.
self.poll
.registry()
.register(&mut info.lst, mio::Token(token), Interest::READABLE)
.or_else(|_| {
self.poll.registry().reregister(
&mut info.lst,
mio::Token(token),
Interest::READABLE,
)
})
}
fn register_logged(&self, token: usize, info: &mut ServerSocketInfo) {
match self.register(token, info) {
Ok(_) => info!("Resume accepting connections on {}", info.addr),
Err(e) => error!("Can not register server socket {}", e),
}
}
fn process_cmd(&mut self) -> bool {
loop {
match self.rx.try_recv() {
Ok(cmd) => match cmd {
Command::Pause => {
for (_, info) in self.sockets.iter_mut() {
if let Err(err) = self.poll.deregister(&info.sock) {
error!("Can not deregister server socket {}", err);
} else {
info!("Paused accepting connections on {}", info.addr);
}
}
}
Command::Resume => {
for (token, info) in self.sockets.iter() {
if let Err(err) = self.poll.register(
&info.sock,
mio::Token(token + DELTA),
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
error!("Can not resume socket accept process: {}", err);
} else {
info!(
"Accepting connections on {} has been resumed",
info.addr
);
}
}
}
Command::Stop => {
for (_, info) in self.sockets.iter() {
let _ = self.poll.deregister(&info.sock);
}
return false;
}
Command::Worker(worker) => {
self.backpressure(false);
self.workers.push(worker);
}
},
Err(err) => match err {
sync_mpsc::TryRecvError::Empty => break,
sync_mpsc::TryRecvError::Disconnected => {
for (_, info) in self.sockets.iter() {
let _ = self.poll.deregister(&info.sock);
}
return false;
}
},
}
}
true
fn deregister(&self, info: &mut ServerSocketInfo) -> io::Result<()> {
self.poll.registry().deregister(&mut info.lst)
}
fn backpressure(&mut self, on: bool) {
fn deregister_all(&self, sockets: &mut Slab<ServerSocketInfo>) {
sockets.iter_mut().for_each(|(_, info)| {
info!("Accepting connections on {} has been paused", info.addr);
let _ = self.deregister(info);
});
}
fn maybe_backpressure(&mut self, sockets: &mut Slab<ServerSocketInfo>, on: bool) {
if self.backpressure {
if !on {
self.backpressure = false;
for (token, info) in self.sockets.iter() {
if let Err(err) = self.poll.register(
&info.sock,
mio::Token(token + DELTA),
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
error!("Can not resume socket accept process: {}", err);
} else {
info!("Accepting connections on {} has been resumed", info.addr);
for (token, info) in sockets.iter_mut() {
if info.timeout.is_some() {
// socket will attempt to re-register itself when its timeout completes
continue;
}
self.register_logged(token, info);
}
}
} else if on {
self.backpressure = true;
for (_, info) in self.sockets.iter() {
let _ = self.poll.deregister(&info.sock);
}
self.deregister_all(sockets);
}
}
fn accept_one(&mut self, mut msg: Conn) {
fn accept_one(&mut self, sockets: &mut Slab<ServerSocketInfo>, mut msg: Conn) {
if self.backpressure {
while !self.workers.is_empty() {
match self.workers[self.next].send(msg) {
Ok(_) => (),
while !self.handles.is_empty() {
match self.handles[self.next].send(msg) {
Ok(_) => {
self.set_next();
break;
}
Err(tmp) => {
self.srv.worker_died(self.workers[self.next].idx);
// worker lost contact and could be gone. a message is sent to
// `ServerBuilder` future to notify it a new worker should be made.
// after that remove the fault worker.
self.srv.worker_faulted(self.handles[self.next].idx);
msg = tmp;
self.workers.swap_remove(self.next);
if self.workers.is_empty() {
self.handles.swap_remove(self.next);
if self.handles.is_empty() {
error!("No workers");
return;
} else if self.workers.len() <= self.next {
} else if self.handles.len() <= self.next {
self.next = 0;
}
continue;
}
}
self.next = (self.next + 1) % self.workers.len();
break;
}
} else {
let mut idx = 0;
while idx < self.workers.len() {
while idx < self.handles.len() {
idx += 1;
if self.workers[self.next].available() {
match self.workers[self.next].send(msg) {
if self.handles[self.next].available() {
match self.handles[self.next].send(msg) {
Ok(_) => {
self.next = (self.next + 1) % self.workers.len();
self.set_next();
return;
}
// worker lost contact and could be gone. a message is sent to
// `ServerBuilder` future to notify it a new worker should be made.
// after that remove the fault worker and enter backpressure if necessary.
Err(tmp) => {
self.srv.worker_died(self.workers[self.next].idx);
self.srv.worker_faulted(self.handles[self.next].idx);
msg = tmp;
self.workers.swap_remove(self.next);
if self.workers.is_empty() {
self.handles.swap_remove(self.next);
if self.handles.is_empty() {
error!("No workers");
self.backpressure(true);
self.maybe_backpressure(sockets, true);
return;
} else if self.workers.len() <= self.next {
} else if self.handles.len() <= self.next {
self.next = 0;
}
continue;
}
}
}
self.next = (self.next + 1) % self.workers.len();
self.set_next();
}
// enable backpressure
self.backpressure(true);
self.accept_one(msg);
self.maybe_backpressure(sockets, true);
self.accept_one(sockets, msg);
}
}
fn accept(&mut self, token: usize) {
// set next worker handle that would accept work.
fn set_next(&mut self) {
self.next = (self.next + 1) % self.handles.len();
}
fn accept(&mut self, sockets: &mut Slab<ServerSocketInfo>, token: usize) {
loop {
let msg = if let Some(info) = self.sockets.get_mut(token) {
match info.sock.accept_std() {
Ok((io, addr)) => Conn {
let msg = if let Some(info) = sockets.get_mut(token) {
match info.lst.accept() {
Ok(Some((io, addr))) => Conn {
io,
token: info.token,
peer: Some(addr),
},
Ok(None) => return,
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => return,
Err(ref e) if connection_error(e) => continue,
Err(e) => {
// deregister listener temporary
error!("Error accepting connection: {}", e);
if let Err(err) = self.poll.deregister(&info.sock) {
if let Err(err) = self.deregister(info) {
error!("Can not deregister server socket {}", err);
}
// sleep after error
// sleep after error. write the timeout to socket info as later the poll
// would need it mark which socket and when it's listener should be
// registered.
info.timeout = Some(Instant::now() + Duration::from_millis(500));
let r = self.timer.1.clone();
System::current().arbiter().send(lazy(move || {
Delay::new(Instant::now() + Duration::from_millis(510))
.map_err(|_| ())
.and_then(move |_| {
let _ = r.set_readiness(mio::Ready::readable());
Ok(())
})
}));
// after the sleep a Timer interest is sent to Accept Poll
let waker = self.waker.clone();
System::current().arbiter().spawn(async move {
sleep_until(Instant::now() + Duration::from_millis(510)).await;
waker.wake(WakerInterest::Timer);
});
return;
}
}
@@ -455,7 +422,7 @@ impl Accept {
return;
};
self.accept_one(msg);
self.accept_one(sockets, msg);
}
}
}

View File

@@ -1,38 +1,42 @@
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::time::Duration;
use std::{io, mem, net};
use std::{io, mem};
use actix_rt::{spawn, Arbiter, System};
use futures::future::{lazy, ok};
use futures::stream::futures_unordered;
use futures::sync::mpsc::{unbounded, UnboundedReceiver};
use futures::{Async, Future, Poll, Stream};
use actix_rt::net::TcpStream;
use actix_rt::time::{sleep_until, Instant};
use actix_rt::{self as rt, System};
use log::{error, info};
use net2::TcpBuilder;
use num_cpus;
use tokio_timer::sleep;
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver};
use tokio::sync::oneshot;
use crate::accept::{AcceptLoop, AcceptNotify, Command};
use crate::accept::AcceptLoop;
use crate::config::{ConfiguredService, ServiceConfig};
use crate::server::{Server, ServerCommand};
use crate::service_config::{ConfiguredService, ServiceConfig};
use crate::services::{InternalServiceFactory, ServiceFactory, StreamNewService};
use crate::service::{InternalServiceFactory, ServiceFactory, StreamNewService};
use crate::signals::{Signal, Signals};
use crate::worker::{self, Worker, WorkerAvailability, WorkerClient};
use crate::{ssl, Token};
use crate::socket::{MioListener, StdSocketAddr, StdTcpListener, ToSocketAddrs};
use crate::socket::{MioTcpListener, MioTcpSocket};
use crate::waker_queue::{WakerInterest, WakerQueue};
use crate::worker::{self, ServerWorker, ServerWorkerConfig, WorkerAvailability, WorkerHandle};
use crate::{join_all, Token};
/// Server builder
pub struct ServerBuilder {
threads: usize,
token: Token,
backlog: i32,
workers: Vec<(usize, WorkerClient)>,
services: Vec<Box<InternalServiceFactory>>,
sockets: Vec<(Token, net::TcpListener)>,
backlog: u32,
handles: Vec<(usize, WorkerHandle)>,
services: Vec<Box<dyn InternalServiceFactory>>,
sockets: Vec<(Token, String, MioListener)>,
accept: AcceptLoop,
exit: bool,
shutdown_timeout: Duration,
no_signals: bool,
cmd: UnboundedReceiver<ServerCommand>,
server: Server,
notify: Vec<oneshot::Sender<()>>,
worker_config: ServerWorkerConfig,
}
impl Default for ServerBuilder {
@@ -44,34 +48,54 @@ impl Default for ServerBuilder {
impl ServerBuilder {
/// Create new Server builder instance
pub fn new() -> ServerBuilder {
let (tx, rx) = unbounded();
let (tx, rx) = unbounded_channel();
let server = Server::new(tx);
ServerBuilder {
threads: num_cpus::get(),
token: Token(0),
workers: Vec::new(),
token: Token::default(),
handles: Vec::new(),
services: Vec::new(),
sockets: Vec::new(),
accept: AcceptLoop::new(server.clone()),
backlog: 2048,
exit: false,
shutdown_timeout: Duration::from_secs(30),
no_signals: false,
cmd: rx,
notify: Vec::new(),
server,
worker_config: ServerWorkerConfig::default(),
}
}
/// Set number of workers to start.
///
/// By default server uses number of available logical cpu as workers
/// count.
/// count. Workers must be greater than 0.
pub fn workers(mut self, num: usize) -> Self {
assert_ne!(num, 0, "workers must be greater than 0");
self.threads = num;
self
}
/// Set max number of threads for each worker's blocking task thread pool.
///
/// One thread pool is set up **per worker**; not shared across workers.
///
/// # Examples:
/// ```
/// # use actix_server::ServerBuilder;
/// let builder = ServerBuilder::new()
/// .workers(4) // server has 4 worker thread.
/// .worker_max_blocking_threads(4); // every worker has 4 max blocking threads.
/// ```
///
/// See [tokio::runtime::Builder::max_blocking_threads] for behavior reference.
pub fn worker_max_blocking_threads(mut self, num: usize) -> Self {
self.worker_config.max_blocking_threads(num);
self
}
/// Set the maximum number of pending connections.
///
/// This refers to the number of clients that can be waiting to be served.
@@ -82,7 +106,7 @@ impl ServerBuilder {
/// Generally set in the 64-2048 range. Default value is 2048.
///
/// This method should be called before `bind()` method call.
pub fn backlog(mut self, num: i32) -> Self {
pub fn backlog(mut self, num: u32) -> Self {
self.backlog = num;
self
}
@@ -98,17 +122,6 @@ impl ServerBuilder {
self
}
/// Sets the maximum per-worker concurrent connection establish process.
///
/// All listeners will stop accepting connections when this limit is reached. It
/// can be used to limit the global SSL CPU usage.
///
/// By default max connections is set to a 256.
pub fn maxconnrate(self, num: usize) -> Self {
ssl::max_concurrent_ssl_connect(num);
self
}
/// Stop actix system.
pub fn system_exit(mut self) -> Self {
self.exit = true;
@@ -128,8 +141,9 @@ impl ServerBuilder {
/// dropped.
///
/// By default shutdown timeout sets to 30 seconds.
pub fn shutdown_timeout(mut self, sec: u16) -> Self {
self.shutdown_timeout = Duration::from_secs(u64::from(sec));
pub fn shutdown_timeout(mut self, sec: u64) -> Self {
self.worker_config
.shutdown_timeout(Duration::from_secs(sec));
self
}
@@ -150,8 +164,8 @@ impl ServerBuilder {
let mut srv = ConfiguredService::new(apply);
for (name, lst) in cfg.services {
let token = self.token.next();
srv.stream(token, name, lst.local_addr()?);
self.sockets.push((token, lst));
srv.stream(token, name.clone(), lst.local_addr()?);
self.sockets.push((token, name, MioListener::Tcp(lst)));
}
self.services.push(Box::new(srv));
}
@@ -163,8 +177,8 @@ impl ServerBuilder {
/// Add new service to the server.
pub fn bind<F, U, N: AsRef<str>>(mut self, name: N, addr: U, factory: F) -> io::Result<Self>
where
F: ServiceFactory,
U: net::ToSocketAddrs,
F: ServiceFactory<TcpStream>,
U: ToSocketAddrs,
{
let sockets = bind_addr(addr, self.backlog)?;
@@ -176,77 +190,115 @@ impl ServerBuilder {
factory.clone(),
lst.local_addr()?,
));
self.sockets.push((token, lst));
self.sockets
.push((token, name.as_ref().to_string(), MioListener::Tcp(lst)));
}
Ok(self)
}
/// Add new unix domain service to the server.
#[cfg(unix)]
pub fn bind_uds<F, U, N>(self, name: N, addr: U, factory: F) -> io::Result<Self>
where
F: ServiceFactory<actix_rt::net::UnixStream>,
N: AsRef<str>,
U: AsRef<std::path::Path>,
{
// The path must not exist when we try to bind.
// Try to remove it to avoid bind error.
if let Err(e) = std::fs::remove_file(addr.as_ref()) {
// NotFound is expected and not an issue. Anything else is.
if e.kind() != std::io::ErrorKind::NotFound {
return Err(e);
}
}
let lst = crate::socket::StdUnixListener::bind(addr)?;
self.listen_uds(name, lst, factory)
}
/// Add new unix domain service to the server.
/// Useful when running as a systemd service and
/// a socket FD can be acquired using the systemd crate.
#[cfg(unix)]
pub fn listen_uds<F, N: AsRef<str>>(
mut self,
name: N,
lst: crate::socket::StdUnixListener,
factory: F,
) -> io::Result<Self>
where
F: ServiceFactory<actix_rt::net::UnixStream>,
{
use std::net::{IpAddr, Ipv4Addr};
lst.set_nonblocking(true)?;
let token = self.token.next();
let addr = StdSocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
self.services.push(StreamNewService::create(
name.as_ref().to_string(),
token,
factory,
addr,
));
self.sockets
.push((token, name.as_ref().to_string(), MioListener::from(lst)));
Ok(self)
}
/// Add new service to the server.
pub fn listen<F, N: AsRef<str>>(
mut self,
name: N,
lst: net::TcpListener,
lst: StdTcpListener,
factory: F,
) -> io::Result<Self>
where
F: ServiceFactory,
F: ServiceFactory<TcpStream>,
{
lst.set_nonblocking(true)?;
let addr = lst.local_addr()?;
let token = self.token.next();
self.services.push(StreamNewService::create(
name.as_ref().to_string(),
token,
factory,
lst.local_addr()?,
addr,
));
self.sockets.push((token, lst));
self.sockets
.push((token, name.as_ref().to_string(), MioListener::from(lst)));
Ok(self)
}
/// Spawn new thread and start listening for incoming connections.
///
/// This method spawns new thread and starts new actix system. Other than
/// that it is similar to `start()` method. This method blocks.
///
/// This methods panics if no socket addresses get bound.
///
/// ```rust,ignore
/// use actix_web::*;
///
/// fn main() -> std::io::Result<()> {
/// Server::new().
/// .service(
/// HttpServer::new(|| App::new().service(web::service("/").to(|| HttpResponse::Ok())))
/// .bind("127.0.0.1:0")
/// .run()
/// }
/// ```
pub fn run(self) -> io::Result<()> {
let sys = System::new("http-server");
self.start();
sys.run()
}
/// Starts processing incoming connections and return server controller.
pub fn start(mut self) -> Server {
pub fn run(mut self) -> Server {
if self.sockets.is_empty() {
panic!("Server should have at least one bound socket");
} else {
info!("Starting {} workers", self.threads);
// start workers
let mut workers = Vec::new();
for idx in 0..self.threads {
let worker = self.start_worker(idx, self.accept.get_notify());
workers.push(worker.clone());
self.workers.push((idx, worker));
}
let handles = (0..self.threads)
.map(|idx| {
let handle = self.start_worker(idx, self.accept.waker_owned());
self.handles.push((idx, handle.clone()));
handle
})
.collect();
// start accept thread
for sock in &self.sockets {
info!("Starting server on {}", sock.1.local_addr().ok().unwrap());
info!("Starting \"{}\" service on {}", sock.1, sock.2);
}
self.accept
.start(mem::replace(&mut self.sockets, Vec::new()), workers);
self.accept.start(
mem::take(&mut self.sockets)
.into_iter()
.map(|t| (t.0, t.2))
.collect(),
handles,
);
// handle signals
if !self.no_signals {
@@ -255,36 +307,26 @@ impl ServerBuilder {
// start http server actor
let server = self.server.clone();
spawn(self);
rt::spawn(self);
server
}
}
fn start_worker(&self, idx: usize, notify: AcceptNotify) -> WorkerClient {
let (tx1, rx1) = unbounded();
let (tx2, rx2) = unbounded();
let timeout = self.shutdown_timeout;
let avail = WorkerAvailability::new(notify);
let worker = WorkerClient::new(idx, tx1, tx2, avail.clone());
let services: Vec<Box<InternalServiceFactory>> =
self.services.iter().map(|v| v.clone_factory()).collect();
fn start_worker(&self, idx: usize, waker: WakerQueue) -> WorkerHandle {
let avail = WorkerAvailability::new(waker);
let services = self.services.iter().map(|v| v.clone_factory()).collect();
Arbiter::new().send(lazy(move || {
Worker::start(rx1, rx2, services, avail, timeout);
Ok::<_, ()>(())
}));
worker
ServerWorker::start(idx, services, avail, self.worker_config)
}
fn handle_cmd(&mut self, item: ServerCommand) {
match item {
ServerCommand::Pause(tx) => {
self.accept.send(Command::Pause);
self.accept.wake(WakerInterest::Pause);
let _ = tx.send(());
}
ServerCommand::Resume(tx) => {
self.accept.send(Command::Resume);
self.accept.wake(WakerInterest::Resume);
let _ = tx.send(());
}
ServerCommand::Signal(sig) => {
@@ -318,6 +360,9 @@ impl ServerBuilder {
_ => (),
}
}
ServerCommand::Notify(tx) => {
self.notify.push(tx);
}
ServerCommand::Stop {
graceful,
completion,
@@ -325,48 +370,55 @@ impl ServerBuilder {
let exit = self.exit;
// stop accept thread
self.accept.send(Command::Stop);
self.accept.wake(WakerInterest::Stop);
let notify = std::mem::take(&mut self.notify);
// stop workers
if !self.workers.is_empty() {
spawn(
futures_unordered(
self.workers
.iter()
.map(move |worker| worker.1.stop(graceful)),
)
.collect()
.then(move |_| {
if let Some(tx) = completion {
let _ = tx.send(());
}
if exit {
spawn(sleep(Duration::from_millis(300)).then(|_| {
System::current().stop();
ok(())
}));
}
ok(())
}),
)
if !self.handles.is_empty() && graceful {
let iter = self
.handles
.iter()
.map(move |worker| worker.1.stop(graceful))
.collect();
let fut = join_all(iter);
rt::spawn(async move {
let _ = fut.await;
if let Some(tx) = completion {
let _ = tx.send(());
}
for tx in notify {
let _ = tx.send(());
}
if exit {
rt::spawn(async {
sleep_until(Instant::now() + Duration::from_millis(300)).await;
System::current().stop();
});
}
});
} else {
// we need to stop system if server was spawned
if self.exit {
spawn(sleep(Duration::from_millis(300)).then(|_| {
rt::spawn(async {
sleep_until(Instant::now() + Duration::from_millis(300)).await;
System::current().stop();
ok(())
}));
});
}
if let Some(tx) = completion {
let _ = tx.send(());
}
for tx in notify {
let _ = tx.send(());
}
}
}
ServerCommand::WorkerDied(idx) => {
ServerCommand::WorkerFaulted(idx) => {
let mut found = false;
for i in 0..self.workers.len() {
if self.workers[i].0 == idx {
self.workers.swap_remove(i);
for i in 0..self.handles.len() {
if self.handles[i].0 == idx {
self.handles.swap_remove(i);
found = true;
break;
}
@@ -375,10 +427,10 @@ impl ServerBuilder {
if found {
error!("Worker has died {:?}, restarting", idx);
let mut new_idx = self.workers.len();
let mut new_idx = self.handles.len();
'found: loop {
for i in 0..self.workers.len() {
if self.workers[i].0 == new_idx {
for i in 0..self.handles.len() {
if self.handles[i].0 == new_idx {
new_idx += 1;
continue 'found;
}
@@ -386,9 +438,9 @@ impl ServerBuilder {
break;
}
let worker = self.start_worker(new_idx, self.accept.get_notify());
self.workers.push((new_idx, worker.clone()));
self.accept.send(Command::Worker(worker));
let handle = self.start_worker(new_idx, self.accept.waker_owned());
self.handles.push((new_idx, handle.clone()));
self.accept.wake(WakerInterest::Worker(handle));
}
}
}
@@ -396,24 +448,22 @@ impl ServerBuilder {
}
impl Future for ServerBuilder {
type Item = ();
type Error = ();
type Output = ();
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
loop {
match self.cmd.poll() {
Ok(Async::Ready(None)) | Err(_) => return Ok(Async::Ready(())),
Ok(Async::NotReady) => return Ok(Async::NotReady),
Ok(Async::Ready(Some(item))) => self.handle_cmd(item),
match Pin::new(&mut self.cmd).poll_recv(cx) {
Poll::Ready(Some(it)) => self.as_mut().get_mut().handle_cmd(it),
_ => return Poll::Pending,
}
}
}
}
pub(super) fn bind_addr<S: net::ToSocketAddrs>(
pub(super) fn bind_addr<S: ToSocketAddrs>(
addr: S,
backlog: i32,
) -> io::Result<Vec<net::TcpListener>> {
backlog: u32,
) -> io::Result<Vec<MioTcpListener>> {
let mut err = None;
let mut succ = false;
let mut sockets = Vec::new();
@@ -441,12 +491,13 @@ pub(super) fn bind_addr<S: net::ToSocketAddrs>(
}
}
fn create_tcp_listener(addr: net::SocketAddr, backlog: i32) -> io::Result<net::TcpListener> {
let builder = match addr {
net::SocketAddr::V4(_) => TcpBuilder::new_v4()?,
net::SocketAddr::V6(_) => TcpBuilder::new_v6()?,
fn create_tcp_listener(addr: StdSocketAddr, backlog: u32) -> io::Result<MioTcpListener> {
let socket = match addr {
StdSocketAddr::V4(_) => MioTcpSocket::new_v4()?,
StdSocketAddr::V6(_) => MioTcpSocket::new_v6()?,
};
builder.reuse_address(true)?;
builder.bind(addr)?;
Ok(builder.listen(backlog)?)
socket.set_reuseaddr(true)?;
socket.bind(addr)?;
socket.listen(backlog)
}

287
actix-server/src/config.rs Normal file
View File

@@ -0,0 +1,287 @@
use std::collections::HashMap;
use std::future::Future;
use std::{fmt, io};
use actix_rt::net::TcpStream;
use actix_service::{
fn_service, IntoServiceFactory as IntoBaseServiceFactory,
ServiceFactory as BaseServiceFactory,
};
use actix_utils::counter::CounterGuard;
use futures_core::future::LocalBoxFuture;
use log::error;
use crate::builder::bind_addr;
use crate::service::{BoxedServerService, InternalServiceFactory, StreamService};
use crate::socket::{MioStream, MioTcpListener, StdSocketAddr, StdTcpListener, ToSocketAddrs};
use crate::{ready, Token};
pub struct ServiceConfig {
pub(crate) services: Vec<(String, MioTcpListener)>,
pub(crate) apply: Option<Box<dyn ServiceRuntimeConfiguration>>,
pub(crate) threads: usize,
pub(crate) backlog: u32,
}
impl ServiceConfig {
pub(super) fn new(threads: usize, backlog: u32) -> ServiceConfig {
ServiceConfig {
threads,
backlog,
services: Vec::new(),
apply: None,
}
}
/// Set number of workers to start.
///
/// By default server uses number of available logical cpu as workers
/// count.
pub fn workers(&mut self, num: usize) {
self.threads = num;
}
/// Add new service to server
pub fn bind<U, N: AsRef<str>>(&mut self, name: N, addr: U) -> io::Result<&mut Self>
where
U: ToSocketAddrs,
{
let sockets = bind_addr(addr, self.backlog)?;
for lst in sockets {
self._listen(name.as_ref(), lst);
}
Ok(self)
}
/// Add new service to server
pub fn listen<N: AsRef<str>>(&mut self, name: N, lst: StdTcpListener) -> &mut Self {
self._listen(name, MioTcpListener::from_std(lst))
}
/// Register service configuration function. This function get called
/// during worker runtime configuration. It get executed in worker thread.
pub fn apply<F>(&mut self, f: F) -> io::Result<()>
where
F: Fn(&mut ServiceRuntime) + Send + Clone + 'static,
{
self.apply = Some(Box::new(f));
Ok(())
}
fn _listen<N: AsRef<str>>(&mut self, name: N, lst: MioTcpListener) -> &mut Self {
if self.apply.is_none() {
self.apply = Some(Box::new(not_configured));
}
self.services.push((name.as_ref().to_string(), lst));
self
}
}
pub(super) struct ConfiguredService {
rt: Box<dyn ServiceRuntimeConfiguration>,
names: HashMap<Token, (String, StdSocketAddr)>,
topics: HashMap<String, Token>,
services: Vec<Token>,
}
impl ConfiguredService {
pub(super) fn new(rt: Box<dyn ServiceRuntimeConfiguration>) -> Self {
ConfiguredService {
rt,
names: HashMap::new(),
topics: HashMap::new(),
services: Vec::new(),
}
}
pub(super) fn stream(&mut self, token: Token, name: String, addr: StdSocketAddr) {
self.names.insert(token, (name.clone(), addr));
self.topics.insert(name, token);
self.services.push(token);
}
}
impl InternalServiceFactory for ConfiguredService {
fn name(&self, token: Token) -> &str {
&self.names[&token].0
}
fn clone_factory(&self) -> Box<dyn InternalServiceFactory> {
Box::new(Self {
rt: self.rt.clone(),
names: self.names.clone(),
topics: self.topics.clone(),
services: self.services.clone(),
})
}
fn create(&self) -> LocalBoxFuture<'static, Result<Vec<(Token, BoxedServerService)>, ()>> {
// configure services
let mut rt = ServiceRuntime::new(self.topics.clone());
self.rt.configure(&mut rt);
rt.validate();
let mut names = self.names.clone();
let tokens = self.services.clone();
// construct services
Box::pin(async move {
let mut services = rt.services;
// TODO: Proper error handling here
for f in rt.onstart.into_iter() {
f.await;
}
let mut res = vec![];
for token in tokens {
if let Some(srv) = services.remove(&token) {
let newserv = srv.new_service(());
match newserv.await {
Ok(serv) => {
res.push((token, serv));
}
Err(_) => {
error!("Can not construct service");
return Err(());
}
}
} else {
let name = names.remove(&token).unwrap().0;
res.push((
token,
Box::new(StreamService::new(fn_service(move |_: TcpStream| {
error!("Service {:?} is not configured", name);
ready::<Result<_, ()>>(Ok(()))
}))),
));
};
}
Ok(res)
})
}
}
pub(super) trait ServiceRuntimeConfiguration: Send {
fn clone(&self) -> Box<dyn ServiceRuntimeConfiguration>;
fn configure(&self, rt: &mut ServiceRuntime);
}
impl<F> ServiceRuntimeConfiguration for F
where
F: Fn(&mut ServiceRuntime) + Send + Clone + 'static,
{
fn clone(&self) -> Box<dyn ServiceRuntimeConfiguration> {
Box::new(self.clone())
}
fn configure(&self, rt: &mut ServiceRuntime) {
(self)(rt)
}
}
fn not_configured(_: &mut ServiceRuntime) {
error!("Service is not configured");
}
pub struct ServiceRuntime {
names: HashMap<String, Token>,
services: HashMap<Token, BoxedNewService>,
onstart: Vec<LocalBoxFuture<'static, ()>>,
}
impl ServiceRuntime {
fn new(names: HashMap<String, Token>) -> Self {
ServiceRuntime {
names,
services: HashMap::new(),
onstart: Vec::new(),
}
}
fn validate(&self) {
for (name, token) in &self.names {
if !self.services.contains_key(&token) {
error!("Service {:?} is not configured", name);
}
}
}
/// Register service.
///
/// Name of the service must be registered during configuration stage with
/// *ServiceConfig::bind()* or *ServiceConfig::listen()* methods.
pub fn service<T, F>(&mut self, name: &str, service: F)
where
F: IntoBaseServiceFactory<T, TcpStream>,
T: BaseServiceFactory<TcpStream, Config = ()> + 'static,
T::Future: 'static,
T::Service: 'static,
T::InitError: fmt::Debug,
{
// let name = name.to_owned();
if let Some(token) = self.names.get(name) {
self.services.insert(
*token,
Box::new(ServiceFactory {
inner: service.into_factory(),
}),
);
} else {
panic!("Unknown service: {:?}", name);
}
}
/// Execute future before services initialization.
pub fn on_start<F>(&mut self, fut: F)
where
F: Future<Output = ()> + 'static,
{
self.onstart.push(Box::pin(fut))
}
}
type BoxedNewService = Box<
dyn BaseServiceFactory<
(Option<CounterGuard>, MioStream),
Response = (),
Error = (),
InitError = (),
Config = (),
Service = BoxedServerService,
Future = LocalBoxFuture<'static, Result<BoxedServerService, ()>>,
>,
>;
struct ServiceFactory<T> {
inner: T,
}
impl<T> BaseServiceFactory<(Option<CounterGuard>, MioStream)> for ServiceFactory<T>
where
T: BaseServiceFactory<TcpStream, Config = ()>,
T::Future: 'static,
T::Service: 'static,
T::Error: 'static,
T::InitError: fmt::Debug + 'static,
{
type Response = ();
type Error = ();
type Config = ();
type Service = BoxedServerService;
type InitError = ();
type Future = LocalBoxFuture<'static, Result<BoxedServerService, ()>>;
fn new_service(&self, _: ()) -> Self::Future {
let fut = self.inner.new_service(());
Box::pin(async move {
match fut.await {
Ok(s) => Ok(Box::new(StreamService::new(s)) as BoxedServerService),
Err(e) => {
error!("Can not construct service: {:?}", e);
Err(())
}
}
})
}
}

View File

@@ -1,80 +0,0 @@
use std::cell::Cell;
use std::rc::Rc;
use futures::task::AtomicTask;
#[derive(Clone)]
/// Simple counter with ability to notify task on reaching specific number
///
/// Counter could be cloned, total ncount is shared across all clones.
pub struct Counter(Rc<CounterInner>);
#[derive(Debug)]
struct CounterInner {
count: Cell<usize>,
capacity: usize,
task: AtomicTask,
}
impl Counter {
/// Create `Counter` instance and set max value.
pub fn new(capacity: usize) -> Self {
Counter(Rc::new(CounterInner {
capacity,
count: Cell::new(0),
task: AtomicTask::new(),
}))
}
pub fn get(&self) -> CounterGuard {
CounterGuard::new(self.0.clone())
}
/// Check if counter is not at capacity
pub fn available(&self) -> bool {
self.0.available()
}
/// Get total number of acquired counts
pub fn total(&self) -> usize {
self.0.count.get()
}
}
#[derive(Debug)]
pub struct CounterGuard(Rc<CounterInner>);
impl CounterGuard {
fn new(inner: Rc<CounterInner>) -> Self {
inner.inc();
CounterGuard(inner)
}
}
impl Drop for CounterGuard {
fn drop(&mut self) {
self.0.dec();
}
}
impl CounterInner {
fn inc(&self) {
self.count.set(self.count.get() + 1);
}
fn dec(&self) {
let num = self.count.get();
self.count.set(num - 1);
if num == self.capacity {
self.task.notify();
}
}
fn available(&self) -> bool {
let avail = self.count.get() < self.capacity;
if !avail {
self.task.register();
}
avail
}
}

View File

@@ -1,32 +1,50 @@
//! General purpose tcp server
//! General purpose TCP server.
#![deny(rust_2018_idioms, nonstandard_style)]
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
mod accept;
mod builder;
mod counter;
mod config;
mod server;
mod service_config;
mod services;
mod service;
mod signals;
pub mod ssl;
mod socket;
mod test_server;
mod waker_queue;
mod worker;
pub use actix_server_config::{Io, Protocol, ServerConfig};
pub use self::builder::ServerBuilder;
pub use self::config::{ServiceConfig, ServiceRuntime};
pub use self::server::Server;
pub use self::service_config::{ServiceConfig, ServiceRuntime};
pub use self::services::ServiceFactory;
pub use self::service::ServiceFactory;
pub use self::test_server::TestServer;
#[doc(hidden)]
pub use self::services::ServiceFactory as StreamServiceFactory;
pub use self::socket::FromStream;
/// Socket id token
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
/// Socket ID token
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub(crate) struct Token(usize);
impl Default for Token {
fn default() -> Self {
Self::new()
}
}
impl Token {
fn new() -> Self {
Self(0)
}
pub(crate) fn next(&mut self) -> Token {
let token = Token(self.0 + 1);
let token = Token(self.0);
self.0 += 1;
token
}
@@ -36,3 +54,90 @@ impl Token {
pub fn new() -> ServerBuilder {
ServerBuilder::default()
}
// temporary Ready type for std::future::{ready, Ready}; Can be removed when MSRV surpass 1.48
#[doc(hidden)]
pub struct Ready<T>(Option<T>);
pub(crate) fn ready<T>(t: T) -> Ready<T> {
Ready(Some(t))
}
impl<T> Unpin for Ready<T> {}
impl<T> Future for Ready<T> {
type Output = T;
fn poll(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Self::Output> {
Poll::Ready(self.get_mut().0.take().unwrap())
}
}
// a poor man's join future. joined future is only used when starting/stopping the server.
// pin_project and pinned futures are overkill for this task.
pub(crate) struct JoinAll<T> {
fut: Vec<JoinFuture<T>>,
}
pub(crate) fn join_all<T>(fut: Vec<impl Future<Output = T> + 'static>) -> JoinAll<T> {
let fut = fut
.into_iter()
.map(|f| JoinFuture::Future(Box::pin(f)))
.collect();
JoinAll { fut }
}
enum JoinFuture<T> {
Future(Pin<Box<dyn Future<Output = T>>>),
Result(Option<T>),
}
impl<T> Unpin for JoinAll<T> {}
impl<T> Future for JoinAll<T> {
type Output = Vec<T>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut ready = true;
let this = self.get_mut();
for fut in this.fut.iter_mut() {
if let JoinFuture::Future(f) = fut {
match f.as_mut().poll(cx) {
Poll::Ready(t) => {
*fut = JoinFuture::Result(Some(t));
}
Poll::Pending => ready = false,
}
}
}
if ready {
let mut res = Vec::new();
for fut in this.fut.iter_mut() {
if let JoinFuture::Result(f) = fut {
res.push(f.take().unwrap());
}
}
Poll::Ready(res)
} else {
Poll::Pending
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[actix_rt::test]
async fn test_join_all() {
let futs = vec![ready(Ok(1)), ready(Err(3)), ready(Ok(9))];
let mut res = join_all(futs).await.into_iter();
assert_eq!(Ok(1), res.next().unwrap());
assert_eq!(Err(3), res.next().unwrap());
assert_eq!(Ok(9), res.next().unwrap());
}
}

View File

@@ -1,12 +1,17 @@
use futures::sync::mpsc::UnboundedSender;
use futures::sync::oneshot;
use futures::Future;
use std::future::Future;
use std::io;
use std::pin::Pin;
use std::task::{Context, Poll};
use tokio::sync::mpsc::UnboundedSender;
use tokio::sync::oneshot;
use crate::builder::ServerBuilder;
use crate::signals::Signal;
#[derive(Debug)]
pub(crate) enum ServerCommand {
WorkerDied(usize),
WorkerFaulted(usize),
Pause(oneshot::Sender<()>),
Resume(oneshot::Sender<()>),
Signal(Signal),
@@ -15,14 +20,19 @@ pub(crate) enum ServerCommand {
graceful: bool,
completion: Option<oneshot::Sender<()>>,
},
/// Notify of server stop
Notify(oneshot::Sender<()>),
}
#[derive(Clone)]
pub struct Server(UnboundedSender<ServerCommand>);
#[derive(Debug)]
pub struct Server(
UnboundedSender<ServerCommand>,
Option<oneshot::Receiver<()>>,
);
impl Server {
pub(crate) fn new(tx: UnboundedSender<ServerCommand>) -> Self {
Server(tx)
Server(tx, None)
}
/// Start server building process
@@ -31,39 +41,72 @@ impl Server {
}
pub(crate) fn signal(&self, sig: Signal) {
let _ = self.0.unbounded_send(ServerCommand::Signal(sig));
let _ = self.0.send(ServerCommand::Signal(sig));
}
pub(crate) fn worker_died(&self, idx: usize) {
let _ = self.0.unbounded_send(ServerCommand::WorkerDied(idx));
pub(crate) fn worker_faulted(&self, idx: usize) {
let _ = self.0.send(ServerCommand::WorkerFaulted(idx));
}
/// Pause accepting incoming connections
///
/// If socket contains some pending connection, they might be dropped.
/// All opened connection remains active.
pub fn pause(&self) -> impl Future<Item = (), Error = ()> {
pub fn pause(&self) -> impl Future<Output = ()> {
let (tx, rx) = oneshot::channel();
let _ = self.0.unbounded_send(ServerCommand::Pause(tx));
rx.map_err(|_| ())
let _ = self.0.send(ServerCommand::Pause(tx));
async {
let _ = rx.await;
}
}
/// Resume accepting incoming connections
pub fn resume(&self) -> impl Future<Item = (), Error = ()> {
pub fn resume(&self) -> impl Future<Output = ()> {
let (tx, rx) = oneshot::channel();
let _ = self.0.unbounded_send(ServerCommand::Resume(tx));
rx.map_err(|_| ())
let _ = self.0.send(ServerCommand::Resume(tx));
async {
let _ = rx.await;
}
}
/// Stop incoming connection processing, stop all workers and exit.
///
/// If server starts with `spawn()` method, then spawned thread get terminated.
pub fn stop(&self, graceful: bool) -> impl Future<Item = (), Error = ()> {
pub fn stop(&self, graceful: bool) -> impl Future<Output = ()> {
let (tx, rx) = oneshot::channel();
let _ = self.0.unbounded_send(ServerCommand::Stop {
let _ = self.0.send(ServerCommand::Stop {
graceful,
completion: Some(tx),
});
rx.map_err(|_| ())
async {
let _ = rx.await;
}
}
}
impl Clone for Server {
fn clone(&self) -> Self {
Self(self.0.clone(), None)
}
}
impl Future for Server {
type Output = io::Result<()>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.get_mut();
if this.1.is_none() {
let (tx, rx) = oneshot::channel();
if this.0.send(ServerCommand::Notify(tx)).is_err() {
return Poll::Ready(Ok(()));
}
this.1 = Some(rx);
}
match Pin::new(this.1.as_mut().unwrap()).poll(cx) {
Poll::Pending => Poll::Pending,
Poll::Ready(_) => Poll::Ready(Ok(())),
}
}
}

157
actix-server/src/service.rs Normal file
View File

@@ -0,0 +1,157 @@
use std::marker::PhantomData;
use std::net::SocketAddr;
use std::task::{Context, Poll};
use actix_service::{Service, ServiceFactory as BaseServiceFactory};
use actix_utils::counter::CounterGuard;
use futures_core::future::LocalBoxFuture;
use log::error;
use crate::socket::{FromStream, MioStream};
use crate::{ready, Ready, Token};
pub trait ServiceFactory<Stream: FromStream>: Send + Clone + 'static {
type Factory: BaseServiceFactory<Stream, Config = ()>;
fn create(&self) -> Self::Factory;
}
pub(crate) trait InternalServiceFactory: Send {
fn name(&self, token: Token) -> &str;
fn clone_factory(&self) -> Box<dyn InternalServiceFactory>;
fn create(&self) -> LocalBoxFuture<'static, Result<Vec<(Token, BoxedServerService)>, ()>>;
}
pub(crate) type BoxedServerService = Box<
dyn Service<
(Option<CounterGuard>, MioStream),
Response = (),
Error = (),
Future = Ready<Result<(), ()>>,
>,
>;
pub(crate) struct StreamService<S, I> {
service: S,
_phantom: PhantomData<I>,
}
impl<S, I> StreamService<S, I> {
pub(crate) fn new(service: S) -> Self {
StreamService {
service,
_phantom: PhantomData,
}
}
}
impl<S, I> Service<(Option<CounterGuard>, MioStream)> for StreamService<S, I>
where
S: Service<I>,
S::Future: 'static,
S::Error: 'static,
I: FromStream,
{
type Response = ();
type Error = ();
type Future = Ready<Result<(), ()>>;
fn poll_ready(&self, ctx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.service.poll_ready(ctx).map_err(|_| ())
}
fn call(&self, (guard, req): (Option<CounterGuard>, MioStream)) -> Self::Future {
ready(match FromStream::from_mio(req) {
Ok(stream) => {
let f = self.service.call(stream);
actix_rt::spawn(async move {
let _ = f.await;
drop(guard);
});
Ok(())
}
Err(e) => {
error!("Can not convert to an async tcp stream: {}", e);
Err(())
}
})
}
}
pub(crate) struct StreamNewService<F: ServiceFactory<Io>, Io: FromStream> {
name: String,
inner: F,
token: Token,
addr: SocketAddr,
_t: PhantomData<Io>,
}
impl<F, Io> StreamNewService<F, Io>
where
F: ServiceFactory<Io>,
Io: FromStream + Send + 'static,
{
pub(crate) fn create(
name: String,
token: Token,
inner: F,
addr: SocketAddr,
) -> Box<dyn InternalServiceFactory> {
Box::new(Self {
name,
token,
inner,
addr,
_t: PhantomData,
})
}
}
impl<F, Io> InternalServiceFactory for StreamNewService<F, Io>
where
F: ServiceFactory<Io>,
Io: FromStream + Send + 'static,
{
fn name(&self, _: Token) -> &str {
&self.name
}
fn clone_factory(&self) -> Box<dyn InternalServiceFactory> {
Box::new(Self {
name: self.name.clone(),
inner: self.inner.clone(),
token: self.token,
addr: self.addr,
_t: PhantomData,
})
}
fn create(&self) -> LocalBoxFuture<'static, Result<Vec<(Token, BoxedServerService)>, ()>> {
let token = self.token;
let fut = self.inner.create().new_service(());
Box::pin(async move {
match fut.await {
Ok(inner) => {
let service = Box::new(StreamService::new(inner)) as _;
Ok(vec![(token, service)])
}
Err(_) => Err(()),
}
})
}
}
impl<F, T, I> ServiceFactory<I> for F
where
F: Fn() -> T + Send + Clone + 'static,
T: BaseServiceFactory<I, Config = ()>,
I: FromStream,
{
type Factory = T;
fn create(&self) -> T {
(self)()
}
}

View File

@@ -1,233 +0,0 @@
use std::collections::HashMap;
use std::{fmt, io, net};
use actix_server_config::{Io, ServerConfig};
use actix_service::{IntoNewService, NewService};
use futures::future::{join_all, Future};
use log::error;
use tokio_tcp::TcpStream;
use crate::counter::CounterGuard;
use super::builder::bind_addr;
use super::services::{
BoxedServerService, InternalServiceFactory, ServerMessage, StreamService,
};
use super::Token;
pub struct ServiceConfig {
pub(crate) services: Vec<(String, net::TcpListener)>,
pub(crate) apply: Option<Box<ServiceRuntimeConfiguration>>,
pub(crate) threads: usize,
pub(crate) backlog: i32,
}
impl ServiceConfig {
pub(super) fn new(threads: usize, backlog: i32) -> ServiceConfig {
ServiceConfig {
threads,
backlog,
services: Vec::new(),
apply: None,
}
}
/// Set number of workers to start.
///
/// By default server uses number of available logical cpu as workers
/// count.
pub fn workers(&mut self, num: usize) {
self.threads = num;
}
/// Add new service to server
pub fn bind<U, N: AsRef<str>>(&mut self, name: N, addr: U) -> io::Result<&mut Self>
where
U: net::ToSocketAddrs,
{
let sockets = bind_addr(addr, self.backlog)?;
for lst in sockets {
self.listen(name.as_ref(), lst);
}
Ok(self)
}
/// Add new service to server
pub fn listen<N: AsRef<str>>(&mut self, name: N, lst: net::TcpListener) -> &mut Self {
if self.apply.is_none() {
self.apply = Some(Box::new(not_configured));
}
self.services.push((name.as_ref().to_string(), lst));
self
}
/// Register service configuration function. This function get called
/// during worker runtime configuration. It get executed in worker thread.
pub fn apply<F>(&mut self, f: F) -> io::Result<()>
where
F: Fn(&mut ServiceRuntime) + Send + Clone + 'static,
{
self.apply = Some(Box::new(f));
Ok(())
}
}
pub(super) struct ConfiguredService {
rt: Box<ServiceRuntimeConfiguration>,
names: HashMap<Token, (String, net::SocketAddr)>,
services: HashMap<String, Token>,
}
impl ConfiguredService {
pub(super) fn new(rt: Box<ServiceRuntimeConfiguration>) -> Self {
ConfiguredService {
rt,
names: HashMap::new(),
services: HashMap::new(),
}
}
pub(super) fn stream(&mut self, token: Token, name: String, addr: net::SocketAddr) {
self.names.insert(token, (name.clone(), addr));
self.services.insert(name, token);
}
}
impl InternalServiceFactory for ConfiguredService {
fn name(&self, token: Token) -> &str {
&self.names[&token].0
}
fn clone_factory(&self) -> Box<InternalServiceFactory> {
Box::new(Self {
rt: self.rt.clone(),
names: self.names.clone(),
services: self.services.clone(),
})
}
fn create(&self) -> Box<Future<Item = Vec<(Token, BoxedServerService)>, Error = ()>> {
// configure services
let mut rt = ServiceRuntime::new(self.services.clone());
self.rt.configure(&mut rt);
rt.validate();
// construct services
let mut fut = Vec::new();
for (token, ns) in rt.services {
let config = ServerConfig::new(self.names[&token].1);
fut.push(ns.new_service(&config).map(move |service| (token, service)));
}
Box::new(join_all(fut).map_err(|e| {
error!("Can not construct service: {:?}", e);
}))
}
}
pub(super) trait ServiceRuntimeConfiguration: Send {
fn clone(&self) -> Box<ServiceRuntimeConfiguration>;
fn configure(&self, rt: &mut ServiceRuntime);
}
impl<F> ServiceRuntimeConfiguration for F
where
F: Fn(&mut ServiceRuntime) + Send + Clone + 'static,
{
fn clone(&self) -> Box<ServiceRuntimeConfiguration> {
Box::new(self.clone())
}
fn configure(&self, rt: &mut ServiceRuntime) {
(self)(rt)
}
}
fn not_configured(_: &mut ServiceRuntime) {
error!("Service is not configured");
}
pub struct ServiceRuntime {
names: HashMap<String, Token>,
services: HashMap<Token, BoxedNewService>,
}
impl ServiceRuntime {
fn new(names: HashMap<String, Token>) -> Self {
ServiceRuntime {
names,
services: HashMap::new(),
}
}
fn validate(&self) {
for (name, token) in &self.names {
if !self.services.contains_key(&token) {
error!("Service {:?} is not configured", name);
}
}
}
pub fn service<T, F>(&mut self, name: &str, service: F)
where
F: IntoNewService<T, ServerConfig>,
T: NewService<ServerConfig, Request = Io<TcpStream>> + 'static,
T::Future: 'static,
T::Service: 'static,
T::InitError: fmt::Debug,
{
// let name = name.to_owned();
if let Some(token) = self.names.get(name) {
self.services.insert(
token.clone(),
Box::new(ServiceFactory {
inner: service.into_new_service(),
}),
);
} else {
panic!("Unknown service: {:?}", name);
}
}
}
type BoxedNewService = Box<
NewService<
ServerConfig,
Request = (Option<CounterGuard>, ServerMessage),
Response = (),
Error = (),
InitError = (),
Service = BoxedServerService,
Future = Box<Future<Item = BoxedServerService, Error = ()>>,
>,
>;
struct ServiceFactory<T> {
inner: T,
}
impl<T> NewService<ServerConfig> for ServiceFactory<T>
where
T: NewService<ServerConfig, Request = Io<TcpStream>>,
T::Future: 'static,
T::Service: 'static,
T::Error: 'static,
T::InitError: fmt::Debug + 'static,
{
type Request = (Option<CounterGuard>, ServerMessage);
type Response = ();
type Error = ();
type InitError = ();
type Service = BoxedServerService;
type Future = Box<Future<Item = BoxedServerService, Error = ()>>;
fn new_service(&self, cfg: &ServerConfig) -> Self::Future {
Box::new(self.inner.new_service(cfg).map_err(|_| ()).map(|s| {
let service: BoxedServerService = Box::new(StreamService::new(s));
service
}))
}
}

View File

@@ -1,179 +0,0 @@
use std::net::{self, SocketAddr};
use std::time::Duration;
use actix_rt::spawn;
use actix_server_config::{Io, ServerConfig};
use actix_service::{NewService, Service};
use futures::future::{err, ok, FutureResult};
use futures::{Future, Poll};
use log::error;
use tokio_reactor::Handle;
use tokio_tcp::TcpStream;
use super::Token;
use crate::counter::CounterGuard;
/// Server message
pub(crate) enum ServerMessage {
/// New stream
Connect(net::TcpStream),
/// Gracefull shutdown
Shutdown(Duration),
/// Force shutdown
ForceShutdown,
}
pub trait ServiceFactory: Send + Clone + 'static {
type NewService: NewService<ServerConfig, Request = Io<TcpStream>>;
fn create(&self) -> Self::NewService;
}
pub(crate) trait InternalServiceFactory: Send {
fn name(&self, token: Token) -> &str;
fn clone_factory(&self) -> Box<InternalServiceFactory>;
fn create(&self) -> Box<Future<Item = Vec<(Token, BoxedServerService)>, Error = ()>>;
}
pub(crate) type BoxedServerService = Box<
Service<
Request = (Option<CounterGuard>, ServerMessage),
Response = (),
Error = (),
Future = FutureResult<(), ()>,
>,
>;
pub(crate) struct StreamService<T> {
service: T,
}
impl<T> StreamService<T> {
pub(crate) fn new(service: T) -> Self {
StreamService { service }
}
}
impl<T> Service for StreamService<T>
where
T: Service<Request = Io<TcpStream>>,
T::Future: 'static,
T::Error: 'static,
{
type Request = (Option<CounterGuard>, ServerMessage);
type Response = ();
type Error = ();
type Future = FutureResult<(), ()>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
self.service.poll_ready().map_err(|_| ())
}
fn call(&mut self, (guard, req): (Option<CounterGuard>, ServerMessage)) -> Self::Future {
match req {
ServerMessage::Connect(stream) => {
let stream = TcpStream::from_std(stream, &Handle::default()).map_err(|e| {
error!("Can not convert to an async tcp stream: {}", e);
});
if let Ok(stream) = stream {
spawn(self.service.call(Io::new(stream)).then(move |res| {
drop(guard);
res.map_err(|_| ()).map(|_| ())
}));
ok(())
} else {
err(())
}
}
_ => ok(()),
}
}
}
pub(crate) struct StreamNewService<F: ServiceFactory> {
name: String,
inner: F,
token: Token,
addr: SocketAddr,
}
impl<F> StreamNewService<F>
where
F: ServiceFactory,
{
pub(crate) fn create(
name: String,
token: Token,
inner: F,
addr: SocketAddr,
) -> Box<InternalServiceFactory> {
Box::new(Self {
name,
token,
inner,
addr,
})
}
}
impl<F> InternalServiceFactory for StreamNewService<F>
where
F: ServiceFactory,
{
fn name(&self, _: Token) -> &str {
&self.name
}
fn clone_factory(&self) -> Box<InternalServiceFactory> {
Box::new(Self {
name: self.name.clone(),
inner: self.inner.clone(),
token: self.token,
addr: self.addr,
})
}
fn create(&self) -> Box<Future<Item = Vec<(Token, BoxedServerService)>, Error = ()>> {
let token = self.token;
let config = ServerConfig::new(self.addr);
Box::new(
self.inner
.create()
.new_service(&config)
.map_err(|_| ())
.map(move |inner| {
let service: BoxedServerService = Box::new(StreamService::new(inner));
vec![(token, service)]
}),
)
}
}
impl InternalServiceFactory for Box<InternalServiceFactory> {
fn name(&self, token: Token) -> &str {
self.as_ref().name(token)
}
fn clone_factory(&self) -> Box<InternalServiceFactory> {
self.as_ref().clone_factory()
}
fn create(&self) -> Box<Future<Item = Vec<(Token, BoxedServerService)>, Error = ()>> {
self.as_ref().create()
}
}
impl<F, T> ServiceFactory for F
where
F: Fn() -> T + Send + Clone + 'static,
T: NewService<ServerConfig, Request = Io<TcpStream>>,
{
type NewService = T;
fn create(&self) -> T {
(self)()
}
}

View File

@@ -1,12 +1,11 @@
use std::io;
use actix_rt::spawn;
use futures::stream::futures_unordered;
use futures::{Async, Future, Poll, Stream};
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use crate::server::Server;
/// Different types of process signals
#[allow(dead_code)]
#[derive(PartialEq, Clone, Copy, Debug)]
pub(crate) enum Signal {
/// SIGHUP
@@ -22,97 +21,74 @@ pub(crate) enum Signal {
pub(crate) struct Signals {
srv: Server,
#[cfg(not(unix))]
stream: SigStream,
signals: futures_core::future::LocalBoxFuture<'static, std::io::Result<()>>,
#[cfg(unix)]
streams: Vec<SigStream>,
signals: Vec<(Signal, actix_rt::signal::unix::Signal)>,
}
type SigStream = Box<Stream<Item = Signal, Error = io::Error>>;
impl Signals {
pub(crate) fn start(srv: Server) {
let fut = {
#[cfg(not(unix))]
{
tokio_signal::ctrl_c()
.map_err(|_| ())
.and_then(move |stream| Signals {
srv,
stream: Box::new(stream.map(|_| Signal::Int)),
})
}
#[cfg(not(unix))]
{
actix_rt::spawn(Signals {
srv,
signals: Box::pin(actix_rt::signal::ctrl_c()),
});
}
#[cfg(unix)]
{
use actix_rt::signal::unix;
#[cfg(unix)]
{
use tokio_signal::unix;
let sig_map = [
(unix::SignalKind::interrupt(), Signal::Int),
(unix::SignalKind::hangup(), Signal::Hup),
(unix::SignalKind::terminate(), Signal::Term),
(unix::SignalKind::quit(), Signal::Quit),
];
let mut sigs: Vec<Box<Future<Item = SigStream, Error = io::Error>>> =
Vec::new();
sigs.push(Box::new(
tokio_signal::unix::Signal::new(tokio_signal::unix::SIGINT).map(|stream| {
let s: SigStream = Box::new(stream.map(|_| Signal::Int));
s
}),
));
sigs.push(Box::new(
tokio_signal::unix::Signal::new(tokio_signal::unix::SIGHUP).map(
|stream: unix::Signal| {
let s: SigStream = Box::new(stream.map(|_| Signal::Hup));
s
},
),
));
sigs.push(Box::new(
tokio_signal::unix::Signal::new(tokio_signal::unix::SIGTERM).map(
|stream| {
let s: SigStream = Box::new(stream.map(|_| Signal::Term));
s
},
),
));
sigs.push(Box::new(
tokio_signal::unix::Signal::new(tokio_signal::unix::SIGQUIT).map(
|stream| {
let s: SigStream = Box::new(stream.map(|_| Signal::Quit));
s
},
),
));
futures_unordered(sigs)
.collect()
.map_err(|_| ())
.and_then(move |streams| Signals { srv, streams })
}
};
spawn(fut);
let signals = sig_map
.iter()
.filter_map(|(kind, sig)| {
unix::signal(*kind)
.map(|tokio_sig| (*sig, tokio_sig))
.map_err(|e| {
log::error!(
"Can not initialize stream handler for {:?} err: {}",
sig,
e
)
})
.ok()
})
.collect::<Vec<_>>();
actix_rt::spawn(Signals { srv, signals });
}
}
}
impl Future for Signals {
type Item = ();
type Error = ();
type Output = ();
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
#[cfg(not(unix))]
loop {
match self.stream.poll() {
Ok(Async::Ready(None)) | Err(_) => return Ok(Async::Ready(())),
Ok(Async::Ready(Some(sig))) => self.srv.signal(sig),
Ok(Async::NotReady) => return Ok(Async::NotReady),
match self.signals.as_mut().poll(cx) {
Poll::Ready(_) => {
self.srv.signal(Signal::Int);
Poll::Ready(())
}
Poll::Pending => Poll::Pending,
}
#[cfg(unix)]
{
for s in &mut self.streams {
loop {
match s.poll() {
Ok(Async::Ready(None)) | Err(_) => return Ok(Async::Ready(())),
Ok(Async::NotReady) => break,
Ok(Async::Ready(Some(sig))) => self.srv.signal(sig),
}
for (sig, fut) in self.signals.iter_mut() {
if Pin::new(fut).poll_recv(cx).is_ready() {
let sig = *sig;
self.srv.signal(sig);
return Poll::Ready(());
}
}
Ok(Async::NotReady)
Poll::Pending
}
}
}

254
actix-server/src/socket.rs Normal file
View File

@@ -0,0 +1,254 @@
pub(crate) use std::net::{
SocketAddr as StdSocketAddr, TcpListener as StdTcpListener, ToSocketAddrs,
};
pub(crate) use mio::net::{TcpListener as MioTcpListener, TcpSocket as MioTcpSocket};
#[cfg(unix)]
pub(crate) use {
mio::net::UnixListener as MioUnixListener,
std::os::unix::net::UnixListener as StdUnixListener,
};
use std::{fmt, io};
use actix_rt::net::TcpStream;
use mio::event::Source;
use mio::net::TcpStream as MioTcpStream;
use mio::{Interest, Registry, Token};
#[cfg(windows)]
use std::os::windows::io::{FromRawSocket, IntoRawSocket};
#[cfg(unix)]
use {
actix_rt::net::UnixStream,
mio::net::{SocketAddr as MioSocketAddr, UnixStream as MioUnixStream},
std::os::unix::io::{FromRawFd, IntoRawFd},
};
pub(crate) enum MioListener {
Tcp(MioTcpListener),
#[cfg(unix)]
Uds(MioUnixListener),
}
impl MioListener {
pub(crate) fn local_addr(&self) -> SocketAddr {
match *self {
MioListener::Tcp(ref lst) => SocketAddr::Tcp(lst.local_addr().unwrap()),
#[cfg(unix)]
MioListener::Uds(ref lst) => SocketAddr::Uds(lst.local_addr().unwrap()),
}
}
pub(crate) fn accept(&self) -> io::Result<Option<(MioStream, SocketAddr)>> {
match *self {
MioListener::Tcp(ref lst) => lst
.accept()
.map(|(stream, addr)| Some((MioStream::Tcp(stream), SocketAddr::Tcp(addr)))),
#[cfg(unix)]
MioListener::Uds(ref lst) => lst
.accept()
.map(|(stream, addr)| Some((MioStream::Uds(stream), SocketAddr::Uds(addr)))),
}
}
}
impl Source for MioListener {
fn register(
&mut self,
registry: &Registry,
token: Token,
interests: Interest,
) -> io::Result<()> {
match *self {
MioListener::Tcp(ref mut lst) => lst.register(registry, token, interests),
#[cfg(unix)]
MioListener::Uds(ref mut lst) => lst.register(registry, token, interests),
}
}
fn reregister(
&mut self,
registry: &Registry,
token: Token,
interests: Interest,
) -> io::Result<()> {
match *self {
MioListener::Tcp(ref mut lst) => lst.reregister(registry, token, interests),
#[cfg(unix)]
MioListener::Uds(ref mut lst) => lst.reregister(registry, token, interests),
}
}
fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
match *self {
MioListener::Tcp(ref mut lst) => lst.deregister(registry),
#[cfg(unix)]
MioListener::Uds(ref mut lst) => {
let res = lst.deregister(registry);
// cleanup file path
if let Ok(addr) = lst.local_addr() {
if let Some(path) = addr.as_pathname() {
let _ = std::fs::remove_file(path);
}
}
res
}
}
}
}
impl From<StdTcpListener> for MioListener {
fn from(lst: StdTcpListener) -> Self {
MioListener::Tcp(MioTcpListener::from_std(lst))
}
}
#[cfg(unix)]
impl From<StdUnixListener> for MioListener {
fn from(lst: StdUnixListener) -> Self {
MioListener::Uds(MioUnixListener::from_std(lst))
}
}
impl fmt::Debug for MioListener {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
MioListener::Tcp(ref lst) => write!(f, "{:?}", lst),
#[cfg(all(unix))]
MioListener::Uds(ref lst) => write!(f, "{:?}", lst),
}
}
}
impl fmt::Display for MioListener {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
MioListener::Tcp(ref lst) => write!(f, "{}", lst.local_addr().ok().unwrap()),
#[cfg(unix)]
MioListener::Uds(ref lst) => write!(f, "{:?}", lst.local_addr().ok().unwrap()),
}
}
}
pub(crate) enum SocketAddr {
Tcp(StdSocketAddr),
#[cfg(unix)]
Uds(MioSocketAddr),
}
impl fmt::Display for SocketAddr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
SocketAddr::Tcp(ref addr) => write!(f, "{}", addr),
#[cfg(unix)]
SocketAddr::Uds(ref addr) => write!(f, "{:?}", addr),
}
}
}
impl fmt::Debug for SocketAddr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
SocketAddr::Tcp(ref addr) => write!(f, "{:?}", addr),
#[cfg(unix)]
SocketAddr::Uds(ref addr) => write!(f, "{:?}", addr),
}
}
}
#[derive(Debug)]
pub enum MioStream {
Tcp(MioTcpStream),
#[cfg(unix)]
Uds(MioUnixStream),
}
/// helper trait for converting mio stream to tokio stream.
pub trait FromStream: Sized {
fn from_mio(sock: MioStream) -> io::Result<Self>;
}
// FIXME: This is a workaround and we need an efficient way to convert between mio and tokio stream
#[cfg(unix)]
impl FromStream for TcpStream {
fn from_mio(sock: MioStream) -> io::Result<Self> {
match sock {
MioStream::Tcp(mio) => {
let raw = IntoRawFd::into_raw_fd(mio);
// SAFETY: This is a in place conversion from mio stream to tokio stream.
TcpStream::from_std(unsafe { FromRawFd::from_raw_fd(raw) })
}
MioStream::Uds(_) => {
panic!("Should not happen, bug in server impl");
}
}
}
}
// FIXME: This is a workaround and we need an efficient way to convert between mio and tokio stream
#[cfg(windows)]
impl FromStream for TcpStream {
fn from_mio(sock: MioStream) -> io::Result<Self> {
match sock {
MioStream::Tcp(mio) => {
let raw = IntoRawSocket::into_raw_socket(mio);
// SAFETY: This is a in place conversion from mio stream to tokio stream.
TcpStream::from_std(unsafe { FromRawSocket::from_raw_socket(raw) })
}
}
}
}
// FIXME: This is a workaround and we need an efficient way to convert between mio and tokio stream
#[cfg(unix)]
impl FromStream for UnixStream {
fn from_mio(sock: MioStream) -> io::Result<Self> {
match sock {
MioStream::Tcp(_) => panic!("Should not happen, bug in server impl"),
MioStream::Uds(mio) => {
let raw = IntoRawFd::into_raw_fd(mio);
// SAFETY: This is a in place conversion from mio stream to tokio stream.
UnixStream::from_std(unsafe { FromRawFd::from_raw_fd(raw) })
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn socket_addr() {
let addr = SocketAddr::Tcp("127.0.0.1:8080".parse().unwrap());
assert!(format!("{:?}", addr).contains("127.0.0.1:8080"));
assert_eq!(format!("{}", addr), "127.0.0.1:8080");
let addr: StdSocketAddr = "127.0.0.1:0".parse().unwrap();
let socket = MioTcpSocket::new_v4().unwrap();
socket.set_reuseaddr(true).unwrap();
socket.bind(addr).unwrap();
let tcp = socket.listen(128).unwrap();
let lst = MioListener::Tcp(tcp);
assert!(format!("{:?}", lst).contains("TcpListener"));
assert!(format!("{}", lst).contains("127.0.0.1"));
}
#[test]
#[cfg(unix)]
fn uds() {
let _ = std::fs::remove_file("/tmp/sock.xxxxx");
if let Ok(socket) = MioUnixListener::bind("/tmp/sock.xxxxx") {
let addr = socket.local_addr().expect("Couldn't get local address");
let a = SocketAddr::Uds(addr);
assert!(format!("{:?}", a).contains("/tmp/sock.xxxxx"));
assert!(format!("{}", a).contains("/tmp/sock.xxxxx"));
let lst = MioListener::Uds(socket);
assert!(format!("{:?}", lst).contains("/tmp/sock.xxxxx"));
assert!(format!("{}", lst).contains("/tmp/sock.xxxxx"));
}
}
}

View File

@@ -1,41 +0,0 @@
//! SSL Services
use std::sync::atomic::{AtomicUsize, Ordering};
use crate::counter::Counter;
#[cfg(feature = "ssl")]
mod openssl;
#[cfg(feature = "ssl")]
pub use self::openssl::OpensslAcceptor;
#[cfg(feature = "tls")]
mod nativetls;
#[cfg(feature = "tls")]
pub use self::nativetls::{NativeTlsAcceptor, TlsStream};
#[cfg(feature = "rust-tls")]
mod rustls;
#[cfg(feature = "rust-tls")]
pub use self::rustls::RustlsAcceptor;
/// Sets the maximum per-worker concurrent ssl connection establish process.
///
/// All listeners will stop accepting connections when this limit is
/// reached. It can be used to limit the global SSL CPU usage.
///
/// By default max connections is set to a 256.
pub fn max_concurrent_ssl_connect(num: usize) {
MAX_CONN.store(num, Ordering::Relaxed);
}
pub(crate) static MAX_CONN: AtomicUsize = AtomicUsize::new(256);
thread_local! {
static MAX_CONN_COUNTER: Counter = Counter::new(MAX_CONN.load(Ordering::Relaxed));
}
/// Ssl error combinded with service error.
pub enum SslError<E1, E2> {
Ssl(E1),
Service(E2),
}

View File

@@ -1,180 +0,0 @@
use std::io;
use std::marker::PhantomData;
use actix_service::{NewService, Service};
use futures::{future::ok, future::FutureResult, Async, Future, Poll};
use native_tls::{self, Error, HandshakeError, TlsAcceptor};
use tokio_io::{AsyncRead, AsyncWrite};
use crate::counter::{Counter, CounterGuard};
use crate::ssl::MAX_CONN_COUNTER;
use crate::{Io, Protocol, ServerConfig};
/// Support `SSL` connections via native-tls package
///
/// `tls` feature enables `NativeTlsAcceptor` type
pub struct NativeTlsAcceptor<T, P = ()> {
acceptor: TlsAcceptor,
io: PhantomData<(T, P)>,
}
impl<T: AsyncRead + AsyncWrite, P> NativeTlsAcceptor<T, P> {
/// Create `NativeTlsAcceptor` instance
pub fn new(acceptor: TlsAcceptor) -> Self {
NativeTlsAcceptor {
acceptor,
io: PhantomData,
}
}
}
impl<T: AsyncRead + AsyncWrite, P> Clone for NativeTlsAcceptor<T, P> {
fn clone(&self) -> Self {
Self {
acceptor: self.acceptor.clone(),
io: PhantomData,
}
}
}
impl<T: AsyncRead + AsyncWrite, P> NewService<ServerConfig> for NativeTlsAcceptor<T, P> {
type Request = Io<T, P>;
type Response = Io<TlsStream<T>, P>;
type Error = Error;
type Service = NativeTlsAcceptorService<T, P>;
type InitError = ();
type Future = FutureResult<Self::Service, Self::InitError>;
fn new_service(&self, cfg: &ServerConfig) -> Self::Future {
cfg.set_secure();
MAX_CONN_COUNTER.with(|conns| {
ok(NativeTlsAcceptorService {
acceptor: self.acceptor.clone(),
conns: conns.clone(),
io: PhantomData,
})
})
}
}
pub struct NativeTlsAcceptorService<T, P> {
acceptor: TlsAcceptor,
io: PhantomData<(T, P)>,
conns: Counter,
}
impl<T: AsyncRead + AsyncWrite, P> Service for NativeTlsAcceptorService<T, P> {
type Request = Io<T, P>;
type Response = Io<TlsStream<T>, P>;
type Error = Error;
type Future = Accept<T, P>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
if self.conns.available() {
Ok(Async::Ready(()))
} else {
Ok(Async::NotReady)
}
}
fn call(&mut self, req: Self::Request) -> Self::Future {
let (io, params, _) = req.into_parts();
Accept {
_guard: self.conns.get(),
inner: Some(self.acceptor.accept(io)),
params: Some(params),
}
}
}
/// A wrapper around an underlying raw stream which implements the TLS or SSL
/// protocol.
///
/// A `TlsStream<S>` represents a handshake that has been completed successfully
/// and both the server and the client are ready for receiving and sending
/// data. Bytes read from a `TlsStream` are decrypted from `S` and bytes written
/// to a `TlsStream` are encrypted when passing through to `S`.
#[derive(Debug)]
pub struct TlsStream<S> {
inner: native_tls::TlsStream<S>,
}
/// Future returned from `NativeTlsAcceptor::accept` which will resolve
/// once the accept handshake has finished.
pub struct Accept<S, P> {
inner: Option<Result<native_tls::TlsStream<S>, HandshakeError<S>>>,
params: Option<P>,
_guard: CounterGuard,
}
impl<T: AsyncRead + AsyncWrite, P> Future for Accept<T, P> {
type Item = Io<TlsStream<T>, P>;
type Error = Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match self.inner.take().expect("cannot poll MidHandshake twice") {
Ok(stream) => Ok(Async::Ready(Io::from_parts(
TlsStream { inner: stream },
self.params.take().unwrap(),
Protocol::Unknown,
))),
Err(HandshakeError::Failure(e)) => Err(e),
Err(HandshakeError::WouldBlock(s)) => match s.handshake() {
Ok(stream) => Ok(Async::Ready(Io::from_parts(
TlsStream { inner: stream },
self.params.take().unwrap(),
Protocol::Unknown,
))),
Err(HandshakeError::Failure(e)) => Err(e),
Err(HandshakeError::WouldBlock(s)) => {
self.inner = Some(Err(HandshakeError::WouldBlock(s)));
Ok(Async::NotReady)
}
},
}
}
}
impl<S> TlsStream<S> {
/// Get access to the internal `native_tls::TlsStream` stream which also
/// transitively allows access to `S`.
pub fn get_ref(&self) -> &native_tls::TlsStream<S> {
&self.inner
}
/// Get mutable access to the internal `native_tls::TlsStream` stream which
/// also transitively allows mutable access to `S`.
pub fn get_mut(&mut self) -> &mut native_tls::TlsStream<S> {
&mut self.inner
}
}
impl<S: io::Read + io::Write> io::Read for TlsStream<S> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.inner.read(buf)
}
}
impl<S: io::Read + io::Write> io::Write for TlsStream<S> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.inner.write(buf)
}
fn flush(&mut self) -> io::Result<()> {
self.inner.flush()
}
}
impl<S: AsyncRead + AsyncWrite> AsyncRead for TlsStream<S> {}
impl<S: AsyncRead + AsyncWrite> AsyncWrite for TlsStream<S> {
fn shutdown(&mut self) -> Poll<(), io::Error> {
match self.inner.shutdown() {
Ok(_) => (),
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => (),
Err(e) => return Err(e),
}
self.inner.get_mut().shutdown()
}
}

View File

@@ -1,129 +0,0 @@
use std::marker::PhantomData;
use actix_service::{NewService, Service};
use futures::{future::ok, future::FutureResult, Async, Future, Poll};
use openssl::ssl::{HandshakeError, SslAcceptor};
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_openssl::{AcceptAsync, SslAcceptorExt, SslStream};
use crate::counter::{Counter, CounterGuard};
use crate::ssl::MAX_CONN_COUNTER;
use crate::{Io, Protocol, ServerConfig};
/// Support `SSL` connections via openssl package
///
/// `ssl` feature enables `OpensslAcceptor` type
pub struct OpensslAcceptor<T: AsyncRead + AsyncWrite, P = ()> {
acceptor: SslAcceptor,
io: PhantomData<(T, P)>,
}
impl<T: AsyncRead + AsyncWrite, P> OpensslAcceptor<T, P> {
/// Create default `OpensslAcceptor`
pub fn new(acceptor: SslAcceptor) -> Self {
OpensslAcceptor {
acceptor,
io: PhantomData,
}
}
}
impl<T: AsyncRead + AsyncWrite, P> Clone for OpensslAcceptor<T, P> {
fn clone(&self) -> Self {
Self {
acceptor: self.acceptor.clone(),
io: PhantomData,
}
}
}
impl<T: AsyncRead + AsyncWrite, P> NewService<ServerConfig> for OpensslAcceptor<T, P> {
type Request = Io<T, P>;
type Response = Io<SslStream<T>, P>;
type Error = HandshakeError<T>;
type Service = OpensslAcceptorService<T, P>;
type InitError = ();
type Future = FutureResult<Self::Service, Self::InitError>;
fn new_service(&self, cfg: &ServerConfig) -> Self::Future {
cfg.set_secure();
MAX_CONN_COUNTER.with(|conns| {
ok(OpensslAcceptorService {
acceptor: self.acceptor.clone(),
conns: conns.clone(),
io: PhantomData,
})
})
}
}
pub struct OpensslAcceptorService<T, P> {
acceptor: SslAcceptor,
conns: Counter,
io: PhantomData<(T, P)>,
}
impl<T: AsyncRead + AsyncWrite, P> Service for OpensslAcceptorService<T, P> {
type Request = Io<T, P>;
type Response = Io<SslStream<T>, P>;
type Error = HandshakeError<T>;
type Future = OpensslAcceptorServiceFut<T, P>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
if self.conns.available() {
Ok(Async::Ready(()))
} else {
Ok(Async::NotReady)
}
}
fn call(&mut self, req: Self::Request) -> Self::Future {
let (io, params, _) = req.into_parts();
OpensslAcceptorServiceFut {
_guard: self.conns.get(),
fut: SslAcceptorExt::accept_async(&self.acceptor, io),
params: Some(params),
}
}
}
pub struct OpensslAcceptorServiceFut<T, P>
where
T: AsyncRead + AsyncWrite,
{
fut: AcceptAsync<T>,
params: Option<P>,
_guard: CounterGuard,
}
impl<T: AsyncRead + AsyncWrite, P> Future for OpensslAcceptorServiceFut<T, P> {
type Item = Io<SslStream<T>, P>;
type Error = HandshakeError<T>;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
let io = futures::try_ready!(self.fut.poll());
let proto = if let Some(protos) = io.get_ref().ssl().selected_alpn_protocol() {
const H2: &[u8] = b"\x02h2";
const HTTP10: &[u8] = b"\x08http/1.0";
const HTTP11: &[u8] = b"\x08http/1.1";
if protos.windows(3).any(|window| window == H2) {
Protocol::Http2
} else if protos.windows(9).any(|window| window == HTTP11) {
Protocol::Http11
} else if protos.windows(9).any(|window| window == HTTP10) {
Protocol::Http10
} else {
Protocol::Unknown
}
} else {
Protocol::Unknown
};
Ok(Async::Ready(Io::from_parts(
io,
self.params.take().unwrap(),
proto,
)))
}
}

View File

@@ -1,114 +0,0 @@
use std::io;
use std::marker::PhantomData;
use std::sync::Arc;
use actix_service::{NewService, Service};
use futures::{future::ok, future::FutureResult, Async, Future, Poll};
use rustls::{ServerConfig, ServerSession};
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_rustls::{Accept, TlsAcceptor, TlsStream};
use crate::counter::{Counter, CounterGuard};
use crate::ssl::MAX_CONN_COUNTER;
use crate::{Io, Protocol, ServerConfig as SrvConfig};
/// Support `SSL` connections via rustls package
///
/// `rust-tls` feature enables `RustlsAcceptor` type
pub struct RustlsAcceptor<T, P = ()> {
config: Arc<ServerConfig>,
io: PhantomData<(T, P)>,
}
impl<T: AsyncRead + AsyncWrite, P> RustlsAcceptor<T, P> {
/// Create `RustlsAcceptor` new service
pub fn new(config: ServerConfig) -> Self {
RustlsAcceptor {
config: Arc::new(config),
io: PhantomData,
}
}
}
impl<T, P> Clone for RustlsAcceptor<T, P> {
fn clone(&self) -> Self {
Self {
config: self.config.clone(),
io: PhantomData,
}
}
}
impl<T: AsyncRead + AsyncWrite, P> NewService<SrvConfig> for RustlsAcceptor<T, P> {
type Request = Io<T, P>;
type Response = Io<TlsStream<T, ServerSession>, P>;
type Error = io::Error;
type Service = RustlsAcceptorService<T, P>;
type InitError = ();
type Future = FutureResult<Self::Service, Self::InitError>;
fn new_service(&self, cfg: &SrvConfig) -> Self::Future {
cfg.set_secure();
MAX_CONN_COUNTER.with(|conns| {
ok(RustlsAcceptorService {
acceptor: self.config.clone().into(),
conns: conns.clone(),
io: PhantomData,
})
})
}
}
pub struct RustlsAcceptorService<T, P> {
acceptor: TlsAcceptor,
io: PhantomData<(T, P)>,
conns: Counter,
}
impl<T: AsyncRead + AsyncWrite, P> Service for RustlsAcceptorService<T, P> {
type Request = Io<T, P>;
type Response = Io<TlsStream<T, ServerSession>, P>;
type Error = io::Error;
type Future = RustlsAcceptorServiceFut<T, P>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
if self.conns.available() {
Ok(Async::Ready(()))
} else {
Ok(Async::NotReady)
}
}
fn call(&mut self, req: Self::Request) -> Self::Future {
let (io, params, _) = req.into_parts();
RustlsAcceptorServiceFut {
_guard: self.conns.get(),
fut: self.acceptor.accept(io),
params: Some(params),
}
}
}
pub struct RustlsAcceptorServiceFut<T, P>
where
T: AsyncRead + AsyncWrite,
{
fut: Accept<T>,
params: Option<P>,
_guard: CounterGuard,
}
impl<T: AsyncRead + AsyncWrite, P> Future for RustlsAcceptorServiceFut<T, P> {
type Item = Io<TlsStream<T, ServerSession>, P>;
type Error = io::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
let io = futures::try_ready!(self.fut.poll());
Ok(Async::Ready(Io::from_parts(
io,
self.params.take().unwrap(),
Protocol::Unknown,
)))
}
}

View File

@@ -1,14 +1,9 @@
//! Various helpers for Actix applications to use during testing.
use std::sync::mpsc;
use std::{net, thread};
use actix_rt::{Runtime, System};
use actix_server::{Server, StreamServiceFactory};
use actix_rt::{net::TcpStream, System};
use futures::Future;
use net2::TcpBuilder;
use tokio_reactor::Handle;
use tokio_tcp::TcpStream;
use crate::{Server, ServerBuilder, ServiceFactory};
/// The `TestServer` type.
///
@@ -17,13 +12,14 @@ use tokio_tcp::TcpStream;
///
/// # Examples
///
/// ```rust
/// use actix_service::{fn_service, IntoNewService};
/// use actix_test_server::TestServer;
/// ```
/// use actix_service::fn_service;
/// use actix_server::TestServer;
///
/// fn main() {
/// #[actix_rt::main]
/// async fn main() {
/// let srv = TestServer::with(|| fn_service(
/// |sock| {
/// |sock| async move {
/// println!("New connection: {:?}", sock);
/// Ok::<_, ()>(())
/// }
@@ -34,78 +30,87 @@ use tokio_tcp::TcpStream;
/// ```
pub struct TestServer;
/// Test server runstime
/// Test server runtime
pub struct TestServerRuntime {
addr: net::SocketAddr,
host: String,
port: u16,
rt: Runtime,
system: System,
}
impl TestServer {
/// Start new test server with application factory
pub fn with<F: StreamServiceFactory>(factory: F) -> TestServerRuntime {
/// Start new server with server builder
pub fn start<F>(mut factory: F) -> TestServerRuntime
where
F: FnMut(ServerBuilder) -> ServerBuilder + Send + 'static,
{
let (tx, rx) = mpsc::channel();
// run server in separate thread
thread::spawn(move || {
let sys = System::new("actix-test-server");
let sys = System::new();
factory(Server::build()).workers(1).disable_signals().run();
tx.send(System::current()).unwrap();
sys.run()
});
let system = rx.recv().unwrap();
TestServerRuntime {
system,
addr: "127.0.0.1:0".parse().unwrap(),
host: "127.0.0.1".to_string(),
port: 0,
}
}
/// Start new test server with application factory
pub fn with<F: ServiceFactory<TcpStream>>(factory: F) -> TestServerRuntime {
let (tx, rx) = mpsc::channel();
// run server in separate thread
thread::spawn(move || {
let sys = System::new();
let tcp = net::TcpListener::bind("127.0.0.1:0").unwrap();
let local_addr = tcp.local_addr().unwrap();
Server::build()
.listen("test", tcp, factory)?
.workers(1)
.disable_signals()
.start();
tx.send((System::current(), local_addr)).unwrap();
sys.block_on(async {
Server::build()
.listen("test", tcp, factory)
.unwrap()
.workers(1)
.disable_signals()
.run();
tx.send((System::current(), local_addr)).unwrap();
});
sys.run()
});
let (system, addr) = rx.recv().unwrap();
System::set_current(system);
let rt = Runtime::new().unwrap();
let host = format!("{}", addr.ip());
let port = addr.port();
TestServerRuntime {
system,
addr,
rt,
host,
port,
}
}
/// Get firat available unused local address
/// Get first available unused local address
pub fn unused_addr() -> net::SocketAddr {
let addr: net::SocketAddr = "127.0.0.1:0".parse().unwrap();
let socket = TcpBuilder::new_v4().unwrap();
socket.bind(&addr).unwrap();
socket.reuse_address(true).unwrap();
let tcp = socket.to_tcp_listener().unwrap();
let socket = mio::net::TcpSocket::new_v4().unwrap();
socket.bind(addr).unwrap();
socket.set_reuseaddr(true).unwrap();
let tcp = socket.listen(1024).unwrap();
tcp.local_addr().unwrap()
}
}
impl TestServerRuntime {
/// Execute future on current runtime
pub fn block_on<F, I, E>(&mut self, fut: F) -> Result<I, E>
where
F: Future<Item = I, Error = E>,
{
self.rt.block_on(fut)
}
/// Spawn future to the current runtime
pub fn spawn<F>(&mut self, fut: F)
where
F: Future<Item = (), Error = ()> + 'static,
{
self.rt.spawn(fut);
}
/// Test server host
pub fn host(&self) -> &str {
&self.host
@@ -123,12 +128,12 @@ impl TestServerRuntime {
/// Stop http server
fn stop(&mut self) {
System::current().stop();
self.system.stop();
}
/// Connect to server, return tokio TcpStream
pub fn connect(&self) -> std::io::Result<TcpStream> {
TcpStream::from_std(net::TcpStream::connect(self.addr)?, &Handle::default())
TcpStream::from_std(net::TcpStream::connect(self.addr)?)
}
}

View File

@@ -0,0 +1,89 @@
use std::{
collections::VecDeque,
ops::Deref,
sync::{Arc, Mutex, MutexGuard},
};
use mio::{Registry, Token as MioToken, Waker};
use crate::worker::WorkerHandle;
/// waker token for `mio::Poll` instance
pub(crate) const WAKER_TOKEN: MioToken = MioToken(usize::MAX);
/// `mio::Waker` with a queue for waking up the `Accept`'s `Poll` and contains the `WakerInterest`
/// the `Poll` would want to look into.
pub(crate) struct WakerQueue(Arc<(Waker, Mutex<VecDeque<WakerInterest>>)>);
impl Clone for WakerQueue {
fn clone(&self) -> Self {
Self(self.0.clone())
}
}
impl Deref for WakerQueue {
type Target = (Waker, Mutex<VecDeque<WakerInterest>>);
fn deref(&self) -> &Self::Target {
self.0.deref()
}
}
impl WakerQueue {
/// construct a waker queue with given `Poll`'s `Registry` and capacity.
///
/// A fixed `WAKER_TOKEN` is used to identify the wake interest and the `Poll` needs to match
/// event's token for it to properly handle `WakerInterest`.
pub(crate) fn new(registry: &Registry) -> std::io::Result<Self> {
let waker = Waker::new(registry, WAKER_TOKEN)?;
let queue = Mutex::new(VecDeque::with_capacity(16));
Ok(Self(Arc::new((waker, queue))))
}
/// push a new interest to the queue and wake up the accept poll afterwards.
pub(crate) fn wake(&self, interest: WakerInterest) {
let (waker, queue) = self.deref();
queue
.lock()
.expect("Failed to lock WakerQueue")
.push_back(interest);
waker
.wake()
.unwrap_or_else(|e| panic!("can not wake up Accept Poll: {}", e));
}
/// get a MutexGuard of the waker queue.
pub(crate) fn guard(&self) -> MutexGuard<'_, VecDeque<WakerInterest>> {
self.deref().1.lock().expect("Failed to lock WakerQueue")
}
/// reset the waker queue so it does not grow infinitely.
pub(crate) fn reset(queue: &mut VecDeque<WakerInterest>) {
std::mem::swap(&mut VecDeque::<WakerInterest>::with_capacity(16), queue);
}
}
/// types of interests we would look into when `Accept`'s `Poll` is waked up by waker.
///
/// *. These interests should not be confused with `mio::Interest` and mostly not I/O related
pub(crate) enum WakerInterest {
/// `WorkerAvailable` is an interest from `Worker` notifying `Accept` there is a worker
/// available and can accept new tasks.
WorkerAvailable,
/// `Pause`, `Resume`, `Stop` Interest are from `ServerBuilder` future. It listens to
/// `ServerCommand` and notify `Accept` to do exactly these tasks.
Pause,
Resume,
Stop,
/// `Timer` is an interest sent as a delayed future. When an error happens on accepting
/// connection `Accept` would deregister socket listener temporary and wake up the poll and
/// register them again after the delayed future resolve.
Timer,
/// `Worker` is an interest happen after a worker runs into faulted state(This is determined
/// by if work can be sent to it successfully).`Accept` would be waked up and add the new
/// `WorkerHandle`.
Worker(WorkerHandle),
}

View File

@@ -1,18 +1,22 @@
use std::future::Future;
use std::pin::Pin;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::sync::Arc;
use std::{mem, net, time};
use std::task::{Context, Poll};
use std::time::Duration;
use actix_rt::time::{sleep_until, Instant, Sleep};
use actix_rt::{spawn, Arbiter};
use futures::sync::mpsc::{UnboundedReceiver, UnboundedSender};
use futures::sync::oneshot;
use futures::{future, Async, Future, Poll, Stream};
use actix_utils::counter::Counter;
use futures_core::future::LocalBoxFuture;
use log::{error, info, trace};
use tokio_timer::{sleep, Delay};
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender};
use tokio::sync::oneshot;
use crate::accept::AcceptNotify;
use crate::counter::Counter;
use crate::services::{BoxedServerService, InternalServiceFactory, ServerMessage};
use crate::Token;
use crate::service::{BoxedServerService, InternalServiceFactory};
use crate::socket::{MioStream, SocketAddr};
use crate::waker_queue::{WakerInterest, WakerQueue};
use crate::{join_all, Token};
pub(crate) struct WorkerCommand(Conn);
@@ -25,9 +29,9 @@ pub(crate) struct StopCommand {
#[derive(Debug)]
pub(crate) struct Conn {
pub io: net::TcpStream,
pub io: MioStream,
pub token: Token,
pub peer: Option<net::SocketAddr>,
pub peer: Option<SocketAddr>,
}
static MAX_CONNS: AtomicUsize = AtomicUsize::new(25600);
@@ -42,31 +46,33 @@ pub fn max_concurrent_connections(num: usize) {
MAX_CONNS.store(num, Ordering::Relaxed);
}
pub(crate) fn num_connections() -> usize {
MAX_CONNS_COUNTER.with(|conns| conns.total())
}
thread_local! {
static MAX_CONNS_COUNTER: Counter =
Counter::new(MAX_CONNS.load(Ordering::Relaxed));
}
pub(crate) fn num_connections() -> usize {
MAX_CONNS_COUNTER.with(|conns| conns.total())
}
// a handle to worker that can send message to worker and share the availability of worker to other
// thread.
#[derive(Clone)]
pub(crate) struct WorkerClient {
pub(crate) struct WorkerHandle {
pub idx: usize,
tx1: UnboundedSender<WorkerCommand>,
tx2: UnboundedSender<StopCommand>,
avail: WorkerAvailability,
}
impl WorkerClient {
impl WorkerHandle {
pub fn new(
idx: usize,
tx1: UnboundedSender<WorkerCommand>,
tx2: UnboundedSender<StopCommand>,
avail: WorkerAvailability,
) -> Self {
WorkerClient {
WorkerHandle {
idx,
tx1,
tx2,
@@ -75,9 +81,7 @@ impl WorkerClient {
}
pub fn send(&self, msg: Conn) -> Result<(), Conn> {
self.tx1
.unbounded_send(WorkerCommand(msg))
.map_err(|msg| msg.into_inner().0)
self.tx1.send(WorkerCommand(msg)).map_err(|msg| msg.0 .0)
}
pub fn available(&self) -> bool {
@@ -86,21 +90,21 @@ impl WorkerClient {
pub fn stop(&self, graceful: bool) -> oneshot::Receiver<bool> {
let (result, rx) = oneshot::channel();
let _ = self.tx2.unbounded_send(StopCommand { graceful, result });
let _ = self.tx2.send(StopCommand { graceful, result });
rx
}
}
#[derive(Clone)]
pub(crate) struct WorkerAvailability {
notify: AcceptNotify,
waker: WakerQueue,
available: Arc<AtomicBool>,
}
impl WorkerAvailability {
pub fn new(notify: AcceptNotify) -> Self {
pub fn new(waker: WakerQueue) -> Self {
WorkerAvailability {
notify,
waker,
available: Arc::new(AtomicBool::new(false)),
}
}
@@ -111,113 +115,205 @@ impl WorkerAvailability {
pub fn set(&self, val: bool) {
let old = self.available.swap(val, Ordering::Release);
// notify the accept on switched to available.
if !old && val {
self.notify.notify()
self.waker.wake(WakerInterest::WorkerAvailable);
}
}
}
/// Service worker
/// Service worker.
///
/// Worker accepts Socket objects via unbounded channel and starts stream
/// processing.
pub(crate) struct Worker {
/// Worker accepts Socket objects via unbounded channel and starts stream processing.
pub(crate) struct ServerWorker {
rx: UnboundedReceiver<WorkerCommand>,
rx2: UnboundedReceiver<StopCommand>,
services: Vec<Option<(usize, BoxedServerService)>>,
services: Vec<WorkerService>,
availability: WorkerAvailability,
conns: Counter,
factories: Vec<Box<InternalServiceFactory>>,
factories: Vec<Box<dyn InternalServiceFactory>>,
state: WorkerState,
shutdown_timeout: time::Duration,
config: ServerWorkerConfig,
}
impl Worker {
pub(crate) fn start(
rx: UnboundedReceiver<WorkerCommand>,
rx2: UnboundedReceiver<StopCommand>,
factories: Vec<Box<InternalServiceFactory>>,
availability: WorkerAvailability,
shutdown_timeout: time::Duration,
) {
availability.set(false);
let mut wrk = MAX_CONNS_COUNTER.with(|conns| Worker {
rx,
rx2,
availability,
factories,
shutdown_timeout,
services: Vec::new(),
conns: conns.clone(),
state: WorkerState::Unavailable(Vec::new()),
});
struct WorkerService {
factory: usize,
status: WorkerServiceStatus,
service: BoxedServerService,
}
let mut fut = Vec::new();
for (idx, factory) in wrk.factories.iter().enumerate() {
fut.push(factory.create().map(move |res| {
res.into_iter()
.map(|(t, s)| (idx, t, s))
.collect::<Vec<_>>()
}));
impl WorkerService {
fn created(&mut self, service: BoxedServerService) {
self.service = service;
self.status = WorkerServiceStatus::Unavailable;
}
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
enum WorkerServiceStatus {
Available,
Unavailable,
Failed,
Restarting,
Stopping,
Stopped,
}
/// Config for worker behavior passed down from server builder.
#[derive(Copy, Clone)]
pub(crate) struct ServerWorkerConfig {
shutdown_timeout: Duration,
max_blocking_threads: usize,
}
impl Default for ServerWorkerConfig {
fn default() -> Self {
// 512 is the default max blocking thread count of tokio runtime.
let max_blocking_threads = std::cmp::max(512 / num_cpus::get(), 1);
Self {
shutdown_timeout: Duration::from_secs(30),
max_blocking_threads,
}
spawn(
future::join_all(fut)
.map_err(|e| {
error!("Can not start worker: {:?}", e);
Arbiter::current().stop();
}
}
impl ServerWorkerConfig {
pub(crate) fn max_blocking_threads(&mut self, num: usize) {
self.max_blocking_threads = num;
}
pub(crate) fn shutdown_timeout(&mut self, dur: Duration) {
self.shutdown_timeout = dur;
}
}
impl ServerWorker {
pub(crate) fn start(
idx: usize,
factories: Vec<Box<dyn InternalServiceFactory>>,
availability: WorkerAvailability,
config: ServerWorkerConfig,
) -> WorkerHandle {
let (tx1, rx) = unbounded_channel();
let (tx2, rx2) = unbounded_channel();
let avail = availability.clone();
// every worker runs in it's own arbiter.
// use a custom tokio runtime builder to change the settings of runtime.
Arbiter::with_tokio_rt(move || {
tokio::runtime::Builder::new_current_thread()
.enable_all()
.max_blocking_threads(config.max_blocking_threads)
.build()
.unwrap()
})
.spawn(async move {
availability.set(false);
let mut wrk = MAX_CONNS_COUNTER.with(move |conns| ServerWorker {
rx,
rx2,
availability,
factories,
config,
services: Vec::new(),
conns: conns.clone(),
state: WorkerState::Unavailable,
});
let fut = wrk
.factories
.iter()
.enumerate()
.map(|(idx, factory)| {
let fut = factory.create();
async move {
fut.await.map(|r| {
r.into_iter().map(|(t, s)| (idx, t, s)).collect::<Vec<_>>()
})
}
})
.and_then(move |services| {
for item in services {
for (idx, token, service) in item {
while token.0 >= wrk.services.len() {
wrk.services.push(None);
.collect::<Vec<_>>();
// a second spawn to make sure worker future runs as non boxed future.
// As Arbiter::spawn would box the future before send it to arbiter.
spawn(async move {
let res: Result<Vec<_>, _> = join_all(fut).await.into_iter().collect();
match res {
Ok(services) => {
for item in services {
for (factory, token, service) in item {
assert_eq!(token.0, wrk.services.len());
wrk.services.push(WorkerService {
factory,
service,
status: WorkerServiceStatus::Unavailable,
});
}
wrk.services[token.0] = Some((idx, service));
}
}
wrk
}),
);
Err(e) => {
error!("Can not start worker: {:?}", e);
Arbiter::current().stop();
}
}
wrk.await
});
});
WorkerHandle::new(idx, tx1, tx2, avail)
}
fn shutdown(&mut self, force: bool) {
if force {
self.services.iter_mut().for_each(|h| {
if let Some(h) = h {
let _ = h.1.call((None, ServerMessage::ForceShutdown));
self.services.iter_mut().for_each(|srv| {
if srv.status == WorkerServiceStatus::Available {
srv.status = WorkerServiceStatus::Stopped;
}
});
} else {
let timeout = self.shutdown_timeout;
self.services.iter_mut().for_each(move |h| {
if let Some(h) = h {
let _ = h.1.call((None, ServerMessage::Shutdown(timeout)));
self.services.iter_mut().for_each(move |srv| {
if srv.status == WorkerServiceStatus::Available {
srv.status = WorkerServiceStatus::Stopping;
}
});
}
}
fn check_readiness(&mut self, trace: bool) -> Result<bool, (Token, usize)> {
let mut ready = self.conns.available();
fn check_readiness(&mut self, cx: &mut Context<'_>) -> Result<bool, (Token, usize)> {
let mut ready = self.conns.available(cx);
let mut failed = None;
for (token, service) in &mut self.services.iter_mut().enumerate() {
if let Some(service) = service {
match service.1.poll_ready() {
Ok(Async::Ready(_)) => {
if trace {
for (idx, srv) in self.services.iter_mut().enumerate() {
if srv.status == WorkerServiceStatus::Available
|| srv.status == WorkerServiceStatus::Unavailable
{
match srv.service.poll_ready(cx) {
Poll::Ready(Ok(_)) => {
if srv.status == WorkerServiceStatus::Unavailable {
trace!(
"Service {:?} is available",
self.factories[service.0].name(Token(token))
self.factories[srv.factory].name(Token(idx))
);
srv.status = WorkerServiceStatus::Available;
}
}
Ok(Async::NotReady) => ready = false,
Err(_) => {
Poll::Pending => {
ready = false;
if srv.status == WorkerServiceStatus::Available {
trace!(
"Service {:?} is unavailable",
self.factories[srv.factory].name(Token(idx))
);
srv.status = WorkerServiceStatus::Unavailable;
}
}
Poll::Ready(Err(_)) => {
error!(
"Service {:?} readiness check returned error, restarting",
self.factories[service.0].name(Token(token))
self.factories[srv.factory].name(Token(idx))
);
failed = Some((Token(token), service.0));
failed = Some((Token(idx), srv.factory));
srv.status = WorkerServiceStatus::Failed;
}
}
}
@@ -231,207 +327,158 @@ impl Worker {
}
enum WorkerState {
None,
Available,
Unavailable(Vec<Conn>),
Unavailable,
Restarting(
usize,
Token,
Box<Future<Item = Vec<(Token, BoxedServerService)>, Error = ()>>,
LocalBoxFuture<'static, Result<Vec<(Token, BoxedServerService)>, ()>>,
),
Shutdown(
Pin<Box<Sleep>>,
Pin<Box<Sleep>>,
Option<oneshot::Sender<bool>>,
),
Shutdown(Delay, Delay, oneshot::Sender<bool>),
}
impl Future for Worker {
type Item = ();
type Error = ();
impl Future for ServerWorker {
type Output = ();
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
// `StopWorker` message handler
if let Ok(Async::Ready(Some(StopCommand { graceful, result }))) = self.rx2.poll() {
if let Poll::Ready(Some(StopCommand { graceful, result })) =
Pin::new(&mut self.rx2).poll_recv(cx)
{
self.availability.set(false);
let num = num_connections();
if num == 0 {
info!("Shutting down worker, 0 connections");
let _ = result.send(true);
return Ok(Async::Ready(()));
return Poll::Ready(());
} else if graceful {
self.shutdown(false);
let num = num_connections();
if num != 0 {
info!("Graceful worker shutdown, {} connections", num);
self.state = WorkerState::Shutdown(
sleep(time::Duration::from_secs(1)),
sleep(self.shutdown_timeout),
result,
Box::pin(sleep_until(Instant::now() + Duration::from_secs(1))),
Box::pin(sleep_until(Instant::now() + self.config.shutdown_timeout)),
Some(result),
);
} else {
let _ = result.send(true);
return Ok(Async::Ready(()));
return Poll::Ready(());
}
} else {
info!("Force shutdown worker, {} connections", num);
self.shutdown(true);
let _ = result.send(false);
return Ok(Async::Ready(()));
return Poll::Ready(());
}
}
let state = mem::replace(&mut self.state, WorkerState::None);
match state {
WorkerState::Unavailable(mut conns) => {
match self.check_readiness(true) {
Ok(true) => {
self.state = WorkerState::Available;
// process requests from wait queue
while let Some(msg) = conns.pop() {
match self.check_readiness(false) {
Ok(true) => {
let guard = self.conns.get();
let _ = self.services[msg.token.0]
.as_mut()
.expect("actix net bug")
.1
.call((Some(guard), ServerMessage::Connect(msg.io)));
}
Ok(false) => {
trace!("Worker is unavailable");
self.state = WorkerState::Unavailable(conns);
return self.poll();
}
Err((token, idx)) => {
trace!(
"Service {:?} failed, restarting",
self.factories[idx].name(token)
);
self.state = WorkerState::Restarting(
idx,
token,
self.factories[idx].create(),
);
return self.poll();
}
}
match self.state {
WorkerState::Unavailable => match self.check_readiness(cx) {
Ok(true) => {
self.state = WorkerState::Available;
self.availability.set(true);
self.poll(cx)
}
Ok(false) => Poll::Pending,
Err((token, idx)) => {
trace!(
"Service {:?} failed, restarting",
self.factories[idx].name(token)
);
self.services[token.0].status = WorkerServiceStatus::Restarting;
self.state =
WorkerState::Restarting(idx, token, self.factories[idx].create());
self.poll(cx)
}
},
WorkerState::Restarting(idx, token, ref mut fut) => {
match fut.as_mut().poll(cx) {
Poll::Ready(Ok(item)) => {
// only interest in the first item?
if let Some((token, service)) = item.into_iter().next() {
trace!(
"Service {:?} has been restarted",
self.factories[idx].name(token)
);
self.services[token.0].created(service);
self.state = WorkerState::Unavailable;
return self.poll(cx);
}
self.availability.set(true);
return self.poll();
}
Poll::Ready(Err(_)) => {
panic!(
"Can not restart {:?} service",
self.factories[idx].name(token)
);
}
Poll::Pending => return Poll::Pending,
}
self.poll(cx)
}
WorkerState::Shutdown(ref mut t1, ref mut t2, ref mut tx) => {
let num = num_connections();
if num == 0 {
let _ = tx.take().unwrap().send(true);
Arbiter::current().stop();
return Poll::Ready(());
}
// check graceful timeout
if Pin::new(t2).poll(cx).is_ready() {
let _ = tx.take().unwrap().send(false);
self.shutdown(true);
Arbiter::current().stop();
return Poll::Ready(());
}
// sleep for 1 second and then check again
if t1.as_mut().poll(cx).is_ready() {
*t1 = Box::pin(sleep_until(Instant::now() + Duration::from_secs(1)));
let _ = t1.as_mut().poll(cx);
}
Poll::Pending
}
// actively poll stream and handle worker command
WorkerState::Available => loop {
match self.check_readiness(cx) {
Ok(true) => (),
Ok(false) => {
self.state = WorkerState::Unavailable(conns);
return Ok(Async::NotReady);
trace!("Worker is unavailable");
self.availability.set(false);
self.state = WorkerState::Unavailable;
return self.poll(cx);
}
Err((token, idx)) => {
trace!(
"Service {:?} failed, restarting",
self.factories[idx].name(token)
);
self.availability.set(false);
self.services[token.0].status = WorkerServiceStatus::Restarting;
self.state =
WorkerState::Restarting(idx, token, self.factories[idx].create());
return self.poll();
}
}
}
WorkerState::Restarting(idx, token, mut fut) => {
match fut.poll() {
Ok(Async::Ready(item)) => {
for (token, service) in item {
trace!(
"Service {:?} has been restarted",
self.factories[idx].name(token)
);
self.services[token.0] = Some((idx, service));
self.state = WorkerState::Unavailable(Vec::new());
}
}
Ok(Async::NotReady) => {
self.state = WorkerState::Restarting(idx, token, fut);
return Ok(Async::NotReady);
}
Err(_) => {
panic!(
"Can not restart {:?} service",
self.factories[idx].name(token)
);
}
}
return self.poll();
}
WorkerState::Shutdown(mut t1, mut t2, tx) => {
let num = num_connections();
if num == 0 {
let _ = tx.send(true);
Arbiter::current().stop();
return Ok(Async::Ready(()));
}
// check graceful timeout
match t2.poll().unwrap() {
Async::NotReady => (),
Async::Ready(_) => {
self.shutdown(true);
let _ = tx.send(false);
Arbiter::current().stop();
return Ok(Async::Ready(()));
return self.poll(cx);
}
}
// sleep for 1 second and then check again
match t1.poll().unwrap() {
Async::NotReady => (),
Async::Ready(_) => {
t1 = sleep(time::Duration::from_secs(1));
let _ = t1.poll();
match Pin::new(&mut self.rx).poll_recv(cx) {
// handle incoming io stream
Poll::Ready(Some(WorkerCommand(msg))) => {
let guard = self.conns.get();
let _ = self.services[msg.token.0]
.service
.call((Some(guard), msg.io));
}
}
self.state = WorkerState::Shutdown(t1, t2, tx);
return Ok(Async::NotReady);
}
WorkerState::Available => {
loop {
match self.rx.poll() {
// handle incoming tcp stream
Ok(Async::Ready(Some(WorkerCommand(msg)))) => {
match self.check_readiness(false) {
Ok(true) => {
let guard = self.conns.get();
let _ = self.services[msg.token.0]
.as_mut()
.expect("actix-server bug")
.1
.call((Some(guard), ServerMessage::Connect(msg.io)));
continue;
}
Ok(false) => {
trace!("Worker is unavailable");
self.availability.set(false);
self.state = WorkerState::Unavailable(vec![msg]);
}
Err((token, idx)) => {
trace!(
"Service {:?} failed, restarting",
self.factories[idx].name(token)
);
self.availability.set(false);
self.state = WorkerState::Restarting(
idx,
token,
self.factories[idx].create(),
);
}
}
return self.poll();
}
Ok(Async::NotReady) => {
self.state = WorkerState::Available;
return Ok(Async::NotReady);
}
Ok(Async::Ready(None)) | Err(_) => return Ok(Async::Ready(())),
}
}
}
WorkerState::None => panic!(),
};
Poll::Pending => return Poll::Pending,
Poll::Ready(None) => return Poll::Ready(()),
};
},
}
}
}

View File

@@ -1,117 +1,109 @@
use std::io::Read;
use std::sync::mpsc;
use std::sync::atomic::{AtomicUsize, Ordering::Relaxed};
use std::sync::{mpsc, Arc};
use std::{net, thread, time};
use actix_codec::{BytesCodec, Framed};
use actix_server::{Io, Server, ServerConfig};
use actix_service::{fn_cfg_factory, fn_service, IntoService};
use bytes::Bytes;
use futures::{Future, Sink};
use net2::TcpBuilder;
use tokio_tcp::TcpStream;
use actix_server::Server;
use actix_service::fn_service;
use futures_util::future::{lazy, ok};
fn unused_addr() -> net::SocketAddr {
let addr: net::SocketAddr = "127.0.0.1:0".parse().unwrap();
let socket = TcpBuilder::new_v4().unwrap();
socket.bind(&addr).unwrap();
socket.reuse_address(true).unwrap();
let tcp = socket.to_tcp_listener().unwrap();
let socket = mio::net::TcpSocket::new_v4().unwrap();
socket.bind(addr).unwrap();
socket.set_reuseaddr(true).unwrap();
let tcp = socket.listen(32).unwrap();
tcp.local_addr().unwrap()
}
#[test]
fn test_bind() {
let addr = unused_addr();
thread::spawn(move || {
Server::build()
.bind("test", addr, move || {
fn_cfg_factory(move |cfg: &ServerConfig| {
assert_eq!(cfg.local_addr(), addr);
Ok::<_, ()>((|_| Ok::<_, ()>(())).into_service())
})
})
.unwrap()
.run()
});
thread::sleep(time::Duration::from_millis(500));
assert!(net::TcpStream::connect(addr).is_ok());
}
#[test]
fn test_bind_no_config() {
let addr = unused_addr();
let (tx, rx) = mpsc::channel();
thread::spawn(move || {
let sys = actix_rt::System::new("test");
let srv = Server::build()
.bind("test", addr, move || fn_service(|_| Ok::<_, ()>(())))
.unwrap()
.start();
let h = thread::spawn(move || {
let sys = actix_rt::System::new();
let srv = sys.block_on(lazy(|_| {
Server::build()
.workers(1)
.disable_signals()
.bind("test", addr, move || fn_service(|_| ok::<_, ()>(())))
.unwrap()
.run()
}));
let _ = tx.send((srv, actix_rt::System::current()));
let _ = sys.run();
});
let (_, sys) = rx.recv().unwrap();
thread::sleep(time::Duration::from_millis(500));
assert!(net::TcpStream::connect(addr).is_ok());
let _ = sys.stop();
sys.stop();
let _ = h.join();
}
#[test]
fn test_listen() {
let addr = unused_addr();
let (tx, rx) = mpsc::channel();
thread::spawn(move || {
let h = thread::spawn(move || {
let sys = actix_rt::System::new();
let lst = net::TcpListener::bind(addr).unwrap();
Server::build()
.listen("test", lst, move || {
fn_cfg_factory(move |cfg: &ServerConfig| {
assert_eq!(cfg.local_addr(), addr);
Ok::<_, ()>((|_| Ok::<_, ()>(())).into_service())
})
})
.unwrap()
.run()
sys.block_on(async {
Server::build()
.disable_signals()
.workers(1)
.listen("test", lst, move || fn_service(|_| ok::<_, ()>(())))
.unwrap()
.run();
let _ = tx.send(actix_rt::System::current());
});
let _ = sys.run();
});
let sys = rx.recv().unwrap();
thread::sleep(time::Duration::from_millis(500));
assert!(net::TcpStream::connect(addr).is_ok());
sys.stop();
let _ = h.join();
}
#[test]
#[cfg(unix)]
fn test_start() {
use actix_codec::{BytesCodec, Framed};
use actix_rt::net::TcpStream;
use bytes::Bytes;
use futures_util::sink::SinkExt;
use std::io::Read;
let addr = unused_addr();
let (tx, rx) = mpsc::channel();
thread::spawn(move || {
let sys = actix_rt::System::new("test");
let srv = Server::build()
.backlog(100)
.bind("test", addr, move || {
fn_cfg_factory(move |cfg: &ServerConfig| {
assert_eq!(cfg.local_addr(), addr);
Ok::<_, ()>(
(|io: Io<TcpStream>| {
Framed::new(io.into_parts().0, BytesCodec)
.send(Bytes::from_static(b"test"))
.then(|_| Ok::<_, ()>(()))
})
.into_service(),
)
let h = thread::spawn(move || {
let sys = actix_rt::System::new();
let srv = sys.block_on(lazy(|_| {
Server::build()
.backlog(100)
.disable_signals()
.bind("test", addr, move || {
fn_service(|io: TcpStream| async move {
let mut f = Framed::new(io, BytesCodec);
f.send(Bytes::from_static(b"test")).await.unwrap();
Ok::<_, ()>(())
})
})
})
.unwrap()
.start();
.unwrap()
.run()
}));
let _ = tx.send((srv, actix_rt::System::current()));
let _ = sys.run();
});
let (srv, sys) = rx.recv().unwrap();
let mut buf = [0u8; 4];
let mut buf = [1u8; 4];
let mut conn = net::TcpStream::connect(addr).unwrap();
let _ = conn.read_exact(&mut buf);
assert_eq!(buf, b"test"[..]);
@@ -143,5 +135,56 @@ fn test_start() {
assert!(net::TcpStream::connect(addr).is_err());
thread::sleep(time::Duration::from_millis(100));
let _ = sys.stop();
sys.stop();
let _ = h.join();
}
#[test]
fn test_configure() {
let addr1 = unused_addr();
let addr2 = unused_addr();
let addr3 = unused_addr();
let (tx, rx) = mpsc::channel();
let num = Arc::new(AtomicUsize::new(0));
let num2 = num.clone();
let h = thread::spawn(move || {
let num = num2.clone();
let sys = actix_rt::System::new();
let srv = sys.block_on(lazy(|_| {
Server::build()
.disable_signals()
.configure(move |cfg| {
let num = num.clone();
let lst = net::TcpListener::bind(addr3).unwrap();
cfg.bind("addr1", addr1)
.unwrap()
.bind("addr2", addr2)
.unwrap()
.listen("addr3", lst)
.apply(move |rt| {
let num = num.clone();
rt.service("addr1", fn_service(|_| ok::<_, ()>(())));
rt.service("addr3", fn_service(|_| ok::<_, ()>(())));
rt.on_start(lazy(move |_| {
let _ = num.fetch_add(1, Relaxed);
}))
})
})
.unwrap()
.workers(1)
.run()
}));
let _ = tx.send((srv, actix_rt::System::current()));
let _ = sys.run();
});
let (_, sys) = rx.recv().unwrap();
thread::sleep(time::Duration::from_millis(500));
assert!(net::TcpStream::connect(addr1).is_ok());
assert!(net::TcpStream::connect(addr2).is_ok());
assert!(net::TcpStream::connect(addr3).is_ok());
assert_eq!(num.load(Relaxed), 1);
sys.stop();
let _ = h.join();
}

View File

@@ -1,5 +1,176 @@
# Changes
## Unreleased - 2021-xx-xx
## 2.0.0-beta.4 - 2021-02-04
* `Service::poll_ready` and `Service::call` receive `&self`. [#247]
* `apply_fn` and `apply_fn_factory` now receive `Fn(Req, &Service)` function type. [#247]
* `apply_cfg` and `apply_cfg_factory` now receive `Fn(Req, &Service)` function type. [#247]
* `fn_service` and friends now receive `Fn(Req)` function type. [#247]
[#247]: https://github.com/actix/actix-net/pull/247
## 2.0.0-beta.3 - 2021-01-09
* The `forward_ready!` macro converts errors. [#246]
[#246]: https://github.com/actix/actix-net/pull/246
## 2.0.0-beta.2 - 2021-01-03
* Remove redundant type parameter from `map_config`.
## 2.0.0-beta.1 - 2020-12-28
* `Service`, other traits, and many type signatures now take the the request type as a type
parameter instead of an associated type. [#232]
* Add `always_ready!` and `forward_ready!` macros. [#233]
* Crate is now `no_std`. [#233]
* Migrate pin projections to `pin-project-lite`. [#233]
* Remove `AndThenApplyFn` and Pipeline `and_then_apply_fn`. Use the
`.and_then(apply_fn(...))` construction. [#233]
* Move non-vital methods to `ServiceExt` and `ServiceFactoryExt` extension traits. [#235]
[#232]: https://github.com/actix/actix-net/pull/232
[#233]: https://github.com/actix/actix-net/pull/233
[#235]: https://github.com/actix/actix-net/pull/235
## 1.0.6 - 2020-08-09
### Fixed
* Removed unsound custom Cell implementation that allowed obtaining several mutable references to
the same data, which is undefined behavior in Rust and could lead to violations of memory safety. External code could obtain several mutable references to the same data through
service combinators. Attempts to acquire several mutable references to the same data will instead
result in a panic.
## [1.0.5] - 2020-01-16
### Fixed
* Fixed unsoundness in .and_then()/.then() service combinators
## [1.0.4] - 2020-01-15
### Fixed
* Revert 1.0.3 change
## [1.0.3] - 2020-01-15
### Fixed
* Fixed unsoundness in `AndThenService` impl
## [1.0.2] - 2020-01-08
### Added
* Add `into_service` helper function
## [1.0.1] - 2019-12-22
### Changed
* `map_config()` and `unit_config()` accepts `IntoServiceFactory` type
## [1.0.0] - 2019-12-11
### Added
* Add Clone impl for Apply service
## [1.0.0-alpha.4] - 2019-12-08
### Changed
* Renamed `service_fn` to `fn_service`
* Renamed `factory_fn` to `fn_factory`
* Renamed `factory_fn_cfg` to `fn_factory_with_config`
## [1.0.0-alpha.3] - 2019-12-06
### Changed
* Add missing Clone impls
* Restore `Transform::map_init_err()` combinator
* Restore `Service/Factory::apply_fn()` in form of `Pipeline/Factory::and_then_apply_fn()`
* Optimize service combinators and futures memory layout
## [1.0.0-alpha.2] - 2019-12-02
### Changed
* Use owned config value for service factory
* Renamed BoxedNewService/BoxedService to BoxServiceFactory/BoxService
## [1.0.0-alpha.1] - 2019-11-25
### Changed
* Migraded to `std::future`
* `NewService` renamed to `ServiceFactory`
* Added `pipeline` and `pipeline_factory` function
## [0.4.2] - 2019-08-27
### Fixed
* Check service readiness for `new_apply_cfg` combinator
## [0.4.1] - 2019-06-06
### Added
* Add `new_apply_cfg` function
## [0.4.0] - 2019-05-12
### Changed
* Use associated type for `NewService` config
* Change `apply_cfg` function
* Renamed helper functions
### Added
* Add `NewService::map_config` and `NewService::unit_config` combinators
## [0.3.6] - 2019-04-07
### Changed
* Poll boxed service call result immediately
## [0.3.5] - 2019-03-29
### Added
* Add `impl<S: Service> Service for Rc<RefCell<S>>`
## [0.3.4] - 2019-03-12
### Added

View File

@@ -1,30 +1,29 @@
[package]
name = "actix-service"
version = "0.3.4"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix Service"
keywords = ["network", "framework", "async", "futures"]
version = "2.0.0-beta.4"
authors = [
"Nikolay Kim <fafhrd91@gmail.com>",
"Rob Ede <robjtede@icloud.com>",
"fakeshadow <24548779@qq.com>",
]
description = "Service trait and combinators for representing asynchronous request/response operations."
keywords = ["network", "framework", "async", "futures", "service"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-service/"
documentation = "https://docs.rs/actix-service"
readme = "README.md"
categories = ["network-programming", "asynchronous"]
license = "MIT/Apache-2.0"
exclude = [".gitignore", ".travis.yml", ".cargo/config", "appveyor.yml"]
license = "MIT OR Apache-2.0"
edition = "2018"
workspace = ".."
[badges]
travis-ci = { repository = "actix/actix-service", branch = "master" }
appveyor = { repository = "actix/actix-net" }
codecov = { repository = "actix/actix-service", branch = "master", service = "github" }
[lib]
name = "actix_service"
path = "src/lib.rs"
[dependencies]
futures = "0.1.24"
void = "1.0.2"
futures-core = { version = "0.3.7", default-features = false }
pin-project-lite = "0.2"
[dev-dependencies]
actix-rt = "0.2"
actix-rt = "2.0.0"
futures-util = { version = "0.3.7", default-features = false }

View File

@@ -0,0 +1 @@
../LICENSE-APACHE

1
actix-service/LICENSE-MIT Symbolic link
View File

@@ -0,0 +1 @@
../LICENSE-MIT

13
actix-service/README.md Normal file
View File

@@ -0,0 +1,13 @@
# actix-service
> Service trait and combinators for representing asynchronous request/response operations.
[![crates.io](https://img.shields.io/crates/v/actix-service?label=latest)](https://crates.io/crates/actix-service)
[![Documentation](https://docs.rs/actix-service/badge.svg?version=2.0.0-beta.4)](https://docs.rs/actix-service/2.0.0-beta.4)
[![Version](https://img.shields.io/badge/rustc-1.46+-ab6000.svg)](https://blog.rust-lang.org/2020/03/12/Rust-1.46.html)
![License](https://img.shields.io/crates/l/actix-service.svg)
[![Dependency Status](https://deps.rs/crate/actix-service/2.0.0-beta.4/status.svg)](https://deps.rs/crate/actix-service/2.0.0-beta.4)
[![Download](https://img.shields.io/crates/d/actix-service.svg)](https://crates.io/crates/actix-service)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x)
See documentation for detailed explanations of these components: https://docs.rs/actix-service.

View File

@@ -1,191 +1,227 @@
use std::marker::PhantomData;
use alloc::rc::Rc;
use core::{
future::Future,
marker::PhantomData,
pin::Pin,
task::{Context, Poll},
};
use futures::{Async, Future, Poll};
use futures_core::ready;
use pin_project_lite::pin_project;
use super::{IntoNewService, NewService, Service};
use crate::cell::Cell;
use super::{Service, ServiceFactory};
/// Service for the `and_then` combinator, chaining a computation onto the end
/// of another service which completes successfully.
///
/// This is created by the `ServiceExt::and_then` method.
pub struct AndThen<A, B> {
a: A,
b: Cell<B>,
}
/// This is created by the `Pipeline::and_then` method.
pub(crate) struct AndThenService<A, B, Req>(Rc<(A, B)>, PhantomData<Req>);
impl<A, B> AndThen<A, B> {
impl<A, B, Req> AndThenService<A, B, Req> {
/// Create new `AndThen` combinator
pub fn new(a: A, b: B) -> Self
pub(crate) fn new(a: A, b: B) -> Self
where
A: Service,
B: Service<Request = A::Response, Error = A::Error>,
A: Service<Req>,
B: Service<A::Response, Error = A::Error>,
{
Self { a, b: Cell::new(b) }
Self(Rc::new((a, b)), PhantomData)
}
}
impl<A, B> Clone for AndThen<A, B>
where
A: Clone,
{
impl<A, B, Req> Clone for AndThenService<A, B, Req> {
fn clone(&self) -> Self {
AndThen {
a: self.a.clone(),
b: self.b.clone(),
}
AndThenService(self.0.clone(), PhantomData)
}
}
impl<A, B> Service for AndThen<A, B>
impl<A, B, Req> Service<Req> for AndThenService<A, B, Req>
where
A: Service,
B: Service<Request = A::Response, Error = A::Error>,
A: Service<Req>,
B: Service<A::Response, Error = A::Error>,
{
type Request = A::Request;
type Response = B::Response;
type Error = A::Error;
type Future = AndThenFuture<A, B>;
type Future = AndThenServiceResponse<A, B, Req>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
let not_ready = self.a.poll_ready()?.is_not_ready();
if self.b.get_mut().poll_ready()?.is_not_ready() || not_ready {
Ok(Async::NotReady)
fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
let (a, b) = &*self.0;
let not_ready = !a.poll_ready(cx)?.is_ready();
if !b.poll_ready(cx)?.is_ready() || not_ready {
Poll::Pending
} else {
Ok(Async::Ready(()))
Poll::Ready(Ok(()))
}
}
fn call(&mut self, req: A::Request) -> Self::Future {
AndThenFuture::new(self.a.call(req), self.b.clone())
}
}
pub struct AndThenFuture<A, B>
where
A: Service,
B: Service<Request = A::Response, Error = A::Error>,
{
b: Cell<B>,
fut_b: Option<B::Future>,
fut_a: Option<A::Future>,
}
impl<A, B> AndThenFuture<A, B>
where
A: Service,
B: Service<Request = A::Response, Error = A::Error>,
{
fn new(a: A::Future, b: Cell<B>) -> Self {
AndThenFuture {
b,
fut_a: Some(a),
fut_b: None,
fn call(&self, req: Req) -> Self::Future {
AndThenServiceResponse {
state: State::A {
fut: self.0 .0.call(req),
b: Some(self.0.clone()),
},
}
}
}
impl<A, B> Future for AndThenFuture<A, B>
pin_project! {
pub(crate) struct AndThenServiceResponse<A, B, Req>
where
A: Service<Req>,
B: Service<A::Response, Error = A::Error>,
{
#[pin]
state: State<A, B, Req>,
}
}
pin_project! {
#[project = StateProj]
enum State<A, B, Req>
where
A: Service<Req>,
B: Service<A::Response, Error = A::Error>,
{
A {
#[pin]
fut: A::Future,
b: Option<Rc<(A, B)>>,
},
B {
#[pin]
fut: B::Future,
},
}
}
impl<A, B, Req> Future for AndThenServiceResponse<A, B, Req>
where
A: Service,
B: Service<Request = A::Response, Error = A::Error>,
A: Service<Req>,
B: Service<A::Response, Error = A::Error>,
{
type Item = B::Response;
type Error = A::Error;
type Output = Result<B::Response, A::Error>;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
if let Some(ref mut fut) = self.fut_b {
return fut.poll();
}
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut this = self.as_mut().project();
match self.fut_a.as_mut().expect("Bug in actix-service").poll() {
Ok(Async::Ready(resp)) => {
let _ = self.fut_a.take();
self.fut_b = Some(self.b.get_mut().call(resp));
self.poll()
match this.state.as_mut().project() {
StateProj::A { fut, b } => {
let res = ready!(fut.poll(cx))?;
let b = b.take().unwrap();
let fut = b.1.call(res);
this.state.set(State::B { fut });
self.poll(cx)
}
Ok(Async::NotReady) => Ok(Async::NotReady),
Err(err) => Err(err),
StateProj::B { fut } => fut.poll(cx),
}
}
}
/// `AndThenNewService` new service combinator
pub struct AndThenNewService<A, B, C>
/// `.and_then()` service factory combinator
pub(crate) struct AndThenServiceFactory<A, B, Req>
where
A: NewService<C>,
B: NewService<C, Request = A::Response, Error = A::Error, InitError = A::InitError>,
A: ServiceFactory<Req>,
A::Config: Clone,
B: ServiceFactory<
A::Response,
Config = A::Config,
Error = A::Error,
InitError = A::InitError,
>,
{
a: A,
b: B,
_t: PhantomData<C>,
inner: Rc<(A, B)>,
_phantom: PhantomData<Req>,
}
impl<A, B, C> AndThenNewService<A, B, C>
impl<A, B, Req> AndThenServiceFactory<A, B, Req>
where
A: NewService<C>,
B: NewService<C, Request = A::Response, Error = A::Error, InitError = A::InitError>,
A: ServiceFactory<Req>,
A::Config: Clone,
B: ServiceFactory<
A::Response,
Config = A::Config,
Error = A::Error,
InitError = A::InitError,
>,
{
/// Create new `AndThen` combinator
pub fn new<F: IntoNewService<B, C>>(a: A, f: F) -> Self {
/// Create new `AndThenFactory` combinator
pub(crate) fn new(a: A, b: B) -> Self {
Self {
a,
b: f.into_new_service(),
_t: PhantomData,
inner: Rc::new((a, b)),
_phantom: PhantomData,
}
}
}
impl<A, B, C> NewService<C> for AndThenNewService<A, B, C>
impl<A, B, Req> ServiceFactory<Req> for AndThenServiceFactory<A, B, Req>
where
A: NewService<C>,
B: NewService<C, Request = A::Response, Error = A::Error, InitError = A::InitError>,
A: ServiceFactory<Req>,
A::Config: Clone,
B: ServiceFactory<
A::Response,
Config = A::Config,
Error = A::Error,
InitError = A::InitError,
>,
{
type Request = A::Request;
type Response = B::Response;
type Error = A::Error;
type Service = AndThen<A::Service, B::Service>;
type Config = A::Config;
type Service = AndThenService<A::Service, B::Service, Req>;
type InitError = A::InitError;
type Future = AndThenNewServiceFuture<A, B, C>;
type Future = AndThenServiceFactoryResponse<A, B, Req>;
fn new_service(&self, cfg: &C) -> Self::Future {
AndThenNewServiceFuture::new(self.a.new_service(cfg), self.b.new_service(cfg))
fn new_service(&self, cfg: A::Config) -> Self::Future {
let inner = &*self.inner;
AndThenServiceFactoryResponse::new(
inner.0.new_service(cfg.clone()),
inner.1.new_service(cfg),
)
}
}
impl<A, B, C> Clone for AndThenNewService<A, B, C>
impl<A, B, Req> Clone for AndThenServiceFactory<A, B, Req>
where
A: NewService<C> + Clone,
B: NewService<C, Request = A::Response, Error = A::Error, InitError = A::InitError> + Clone,
A: ServiceFactory<Req>,
A::Config: Clone,
B: ServiceFactory<
A::Response,
Config = A::Config,
Error = A::Error,
InitError = A::InitError,
>,
{
fn clone(&self) -> Self {
Self {
a: self.a.clone(),
b: self.b.clone(),
_t: PhantomData,
inner: self.inner.clone(),
_phantom: PhantomData,
}
}
}
pub struct AndThenNewServiceFuture<A, B, C>
where
A: NewService<C>,
B: NewService<C, Request = A::Response>,
{
fut_b: B::Future,
fut_a: A::Future,
a: Option<A::Service>,
b: Option<B::Service>,
pin_project! {
pub(crate) struct AndThenServiceFactoryResponse<A, B, Req>
where
A: ServiceFactory<Req>,
B: ServiceFactory<A::Response>,
{
#[pin]
fut_a: A::Future,
#[pin]
fut_b: B::Future,
a: Option<A::Service>,
b: Option<B::Service>,
}
}
impl<A, B, C> AndThenNewServiceFuture<A, B, C>
impl<A, B, Req> AndThenServiceFactoryResponse<A, B, Req>
where
A: NewService<C>,
B: NewService<C, Request = A::Response>,
A: ServiceFactory<Req>,
B: ServiceFactory<A::Response>,
{
fn new(fut_a: A::Future, fut_b: B::Future) -> Self {
AndThenNewServiceFuture {
AndThenServiceFactoryResponse {
fut_a,
fut_b,
a: None,
@@ -194,61 +230,64 @@ where
}
}
impl<A, B, C> Future for AndThenNewServiceFuture<A, B, C>
impl<A, B, Req> Future for AndThenServiceFactoryResponse<A, B, Req>
where
A: NewService<C>,
B: NewService<C, Request = A::Response, Error = A::Error, InitError = A::InitError>,
A: ServiceFactory<Req>,
B: ServiceFactory<A::Response, Error = A::Error, InitError = A::InitError>,
{
type Item = AndThen<A::Service, B::Service>;
type Error = A::InitError;
type Output = Result<AndThenService<A::Service, B::Service, Req>, A::InitError>;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
if self.a.is_none() {
if let Async::Ready(service) = self.fut_a.poll()? {
self.a = Some(service);
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
if this.a.is_none() {
if let Poll::Ready(service) = this.fut_a.poll(cx)? {
*this.a = Some(service);
}
}
if self.b.is_none() {
if let Async::Ready(service) = self.fut_b.poll()? {
self.b = Some(service);
if this.b.is_none() {
if let Poll::Ready(service) = this.fut_b.poll(cx)? {
*this.b = Some(service);
}
}
if self.a.is_some() && self.b.is_some() {
Ok(Async::Ready(AndThen::new(
self.a.take().unwrap(),
self.b.take().unwrap(),
if this.a.is_some() && this.b.is_some() {
Poll::Ready(Ok(AndThenService::new(
this.a.take().unwrap(),
this.b.take().unwrap(),
)))
} else {
Ok(Async::NotReady)
Poll::Pending
}
}
}
#[cfg(test)]
mod tests {
use futures::future::{ok, FutureResult};
use futures::{Async, Poll};
use std::cell::Cell;
use std::rc::Rc;
use alloc::rc::Rc;
use core::{
cell::Cell,
task::{Context, Poll},
};
use super::*;
use crate::{NewService, Service, ServiceExt};
use futures_util::future::lazy;
use crate::{
fn_factory, ok, pipeline, pipeline_factory, ready, Ready, Service, ServiceFactory,
};
struct Srv1(Rc<Cell<usize>>);
impl Service for Srv1 {
type Request = &'static str;
impl Service<&'static str> for Srv1 {
type Response = &'static str;
type Error = ();
type Future = FutureResult<Self::Response, ()>;
type Future = Ready<Result<Self::Response, ()>>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
fn poll_ready(&self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.0.set(self.0.get() + 1);
Ok(Async::Ready(()))
Poll::Ready(Ok(()))
}
fn call(&mut self, req: &'static str) -> Self::Future {
fn call(&self, req: &'static str) -> Self::Future {
ok(req)
}
}
@@ -256,55 +295,50 @@ mod tests {
#[derive(Clone)]
struct Srv2(Rc<Cell<usize>>);
impl Service for Srv2 {
type Request = &'static str;
impl Service<&'static str> for Srv2 {
type Response = (&'static str, &'static str);
type Error = ();
type Future = FutureResult<Self::Response, ()>;
type Future = Ready<Result<Self::Response, ()>>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
fn poll_ready(&self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.0.set(self.0.get() + 1);
Ok(Async::Ready(()))
Poll::Ready(Ok(()))
}
fn call(&mut self, req: &'static str) -> Self::Future {
fn call(&self, req: &'static str) -> Self::Future {
ok((req, "srv2"))
}
}
#[test]
fn test_poll_ready() {
#[actix_rt::test]
async fn test_poll_ready() {
let cnt = Rc::new(Cell::new(0));
let mut srv = Srv1(cnt.clone()).and_then(Srv2(cnt.clone()));
let res = srv.poll_ready();
assert!(res.is_ok());
assert_eq!(res.unwrap(), Async::Ready(()));
let srv = pipeline(Srv1(cnt.clone())).and_then(Srv2(cnt.clone()));
let res = lazy(|cx| srv.poll_ready(cx)).await;
assert_eq!(res, Poll::Ready(Ok(())));
assert_eq!(cnt.get(), 2);
}
#[test]
fn test_call() {
#[actix_rt::test]
async fn test_call() {
let cnt = Rc::new(Cell::new(0));
let mut srv = Srv1(cnt.clone()).and_then(Srv2(cnt));
let res = srv.call("srv1").poll();
let srv = pipeline(Srv1(cnt.clone())).and_then(Srv2(cnt));
let res = srv.call("srv1").await;
assert!(res.is_ok());
assert_eq!(res.unwrap(), Async::Ready(("srv1", "srv2")));
assert_eq!(res.unwrap(), ("srv1", "srv2"));
}
#[test]
fn test_new_service() {
#[actix_rt::test]
async fn test_new_service() {
let cnt = Rc::new(Cell::new(0));
let cnt2 = cnt.clone();
let blank = move || Ok::<_, ()>(Srv1(cnt2.clone()));
let new_srv = blank
.into_new_service()
.and_then(move || Ok(Srv2(cnt.clone())));
if let Async::Ready(mut srv) = new_srv.new_service(&()).poll().unwrap() {
let res = srv.call("srv1").poll();
assert!(res.is_ok());
assert_eq!(res.unwrap(), Async::Ready(("srv1", "srv2")));
} else {
panic!()
}
let new_srv =
pipeline_factory(fn_factory(move || ready(Ok::<_, ()>(Srv1(cnt2.clone())))))
.and_then(move || ready(Ok(Srv2(cnt.clone()))));
let srv = new_srv.new_service(()).await.unwrap();
let res = srv.call("srv1").await;
assert!(res.is_ok());
assert_eq!(res.unwrap(), ("srv1", "srv2"));
}
}

Some files were not shown because too many files have changed in this diff Show More