1
0
mirror of https://github.com/fafhrd91/actix-net synced 2025-08-12 23:27:05 +02:00

Compare commits

..

373 Commits

Author SHA1 Message Date
Rob Ede
f8f1ac94bc add Patterns::is_empty and impl IntoPatterns for Patterns 2021-07-20 08:18:50 +01:00
Rob Ede
82cd5b8290 prepare router release 0.5.0-beta.1 2021-07-20 07:43:50 +01:00
Rob Ede
c65e8524b2 rework resourcedef (#373) 2021-07-19 22:37:54 +01:00
Rob Ede
a83dfaa162 Update macros.rs
closes #234
2021-07-17 20:54:53 +01:00
Rob Ede
e4ec956001 fix examples on msrv 2021-07-17 03:11:25 +01:00
Rob Ede
95cba659ff add zero cost profiling to router 2021-07-17 01:09:29 +01:00
Rob Ede
5687e81d9f rework IntoPatterns trait and codegen (#372) 2021-07-17 01:06:23 +01:00
Rob Ede
a0fe2a9b2e clippy 2021-07-16 21:46:32 +01:00
Rob Ede
ad22a93466 allow path building when resource has tail (#371) 2021-07-16 21:41:57 +01:00
Rob Ede
c2d5b2398a Rename Path::{len => segment_count} (#370) 2021-07-16 19:43:48 +01:00
Ali MJ Al-Nasrawy
5b1ff30dd9 router: fix multi-pattern and path tail matches (#366) 2021-07-16 18:17:00 +01:00
Ali MJ Al-Nasrawy
e1317bb3a0 path.len() != path.path().len() (#368)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-07-15 15:34:49 +01:00
Ali MJ Al-Nasrawy
dcea009158 ResourceDef: cleanup (#365) 2021-07-15 15:09:01 +01:00
Rob Ede
13c18b8a51 Update CHANGES.md 2021-07-14 10:37:49 +01:00
Rob Ede
06b17d6a43 fix ci 2021-06-28 15:06:29 +01:00
Rob Ede
605ec25143 prepare macros release 0.2.1 2021-06-08 17:48:30 +01:00
Ibraheem Ahmed
3824493fd3 take custom system path in actix_rt::main macro (#363) 2021-06-08 17:33:05 +01:00
Rob Ede
3be3e11aa5 change actix-router version to 0.4.0 2021-06-06 18:48:27 +01:00
Rob Ede
6a5ea0342b prepare router release 0.3.0 (#362) 2021-06-06 18:43:22 +01:00
Ali MJ Al-Nasrawy
23b1f63345 router: handle newline char '\n' in url (#360) 2021-06-06 03:38:58 +01:00
Ali MJ Al-Nasrawy
3aa037d07d fix changelog (#361) 2021-06-05 19:24:30 +01:00
Ali MJ Al-Nasrawy
cf21df14f2 Path: fix unsafe malformed string (#359) 2021-06-05 18:29:00 +01:00
Ali MJ Al-Nasrawy
a1bf8662c9 router: don't decode %25 to '%' (#357) 2021-06-06 01:34:16 +09:00
Ibraheem Ahmed
6f4d2220fa store Cow in actix-router Path (#345) 2021-06-05 01:46:40 +01:00
Danilo Bargen
54b22f9fce Docs: Fix signature of Service::call (#358) 2021-06-02 21:10:36 +01:00
fakeshadow
983abec77d Fix interrupt handling. Fix double server pause/resume (#353) 2021-04-30 13:42:25 +01:00
fakeshadow
e4d4ae21ee refactor connection counter (#343)
* Remove restart_worker test

* Remove Slab

* Rework counter

* Make counter limit switch accurate

* Remove backpressure. Add pause state

* make changes for review

* fix doc comment for counter
2021-04-29 23:27:08 +08:00
fakeshadow
8ad5f58d38 Remove ServerBuilder::configure (#349) 2021-04-27 23:58:02 +01:00
fakeshadow
613b2be51f Fix Display impl of MioListener (#350) 2021-04-27 11:54:18 -07:00
Rob Ede
b2e9640952 prepare codec 0.4.0 release (#346) 2021-04-21 11:08:43 +01:00
Rob Ede
76338a5822 prepare server release 2.0.0-beta.5 2021-04-20 05:16:32 +01:00
Rob Ede
978e4f25fb prepare actix-utils release 3.0.0 (#342) 2021-04-17 02:00:36 +01:00
Rob Ede
1c4e965366 prepare service release 2.0.0 (#339) 2021-04-16 15:18:53 +01:00
fakeshadow
2435520e67 Remove/restart worker test (#341) 2021-04-16 14:40:21 +01:00
fakeshadow
19468feef8 Fix memory ordering of WorkerAvailability (#340) 2021-04-16 11:20:08 +01:00
fakeshadow
bd48908792 Return worker index in WakerInterest::WorkerAvailable (#337) 2021-04-16 05:59:10 +01:00
fakeshadow
20c2da17ed Fix worker_avail (#336)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-04-16 03:20:02 +01:00
Rob Ede
fdafc1dd65 amend licences 2021-04-16 02:08:44 +01:00
Rob Ede
7749dfe46a address msrv todo in router 2021-04-16 02:06:11 +01:00
fakeshadow
aeb81ad3fd Fix worker are notified to stop with non_graceful shutdown (#333) 2021-04-16 00:54:15 +01:00
Rob Ede
47fba25d67 remove pipeline from public api (#335) 2021-04-16 00:00:02 +01:00
Rob Ede
7a82288066 docs tweak 2021-04-15 21:58:18 +01:00
Rob Ede
4e6d88d143 improve boxed service docs 2021-04-15 20:43:02 +01:00
Rob Ede
ef206f40fb update ignored service docs to new traits 2021-04-15 20:13:27 +01:00
fakeshadow
8e98d9168c add test for restart worker thread (#328) 2021-04-15 18:49:43 +01:00
fakeshadow
3c1f57706a Make ServerWorker drop stop Arbiter it runs on (#334) 2021-04-15 13:31:03 +01:00
fakeshadow
d49ecf7203 Fix bug where backpressure happen too early (#332) 2021-04-14 14:48:05 +01:00
fakeshadow
e0fb67f646 Reduce ServerWorker size (#321) 2021-04-13 01:12:59 +01:00
fakeshadow
ddce2d6d12 Reduce cfg flags in actix_server::socket (#325) 2021-04-10 16:05:50 +01:00
fakeshadow
0a11cf5cba Separate WorkerHandle to two parts (#323) 2021-04-10 01:03:28 +01:00
Rob Ede
859f45868d Revert "do no drain backlog on backpressure" (#324)
This reverts commit d4829b046d.
2021-04-09 21:04:41 +01:00
fakeshadow
d4829b046d do no drain backlog on backpressure (#322) 2021-04-08 23:15:10 +01:00
fakeshadow
5961eb892e Fix bug where worker service restart could skip failing services and not being able to restart multiple services (#318) 2021-04-05 20:39:05 +01:00
fakeshadow
995efcf427 Fix bug where paused Accept would register timed out sockets (#312) 2021-04-05 13:38:41 +01:00
fakeshadow
f1573931dd Remove MAX_CONN (#316) 2021-04-04 23:00:12 +01:00
fakeshadow
3859e91799 Use named type for WorkerState::Restarting and Shutdown (#317) 2021-04-04 21:53:19 +01:00
fakeshadow
8aade720ed Refactor WorkerState::Shutdown (#310) 2021-04-04 20:34:52 +01:00
fakeshadow
8079c50ddb Add ServerWorker::restart_service method (#314)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-04-04 13:22:34 +01:00
fakeshadow
05689b86d9 Remove Option wrapper for CounterGuard (#313) 2021-04-04 10:53:06 +01:00
fakeshadow
fd3e5fba02 Refactor actix_server WorkerState::Restarting enum variant. (#306)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-04-03 19:40:12 +01:00
fakeshadow
39d1f282f7 add test for max concurrent connections (#311) 2021-04-03 19:01:00 +01:00
fakeshadow
d8889c63ef Do not do double check on connection num when entering graceful shutdown (#309) 2021-04-02 12:49:12 +01:00
fakeshadow
fdac52aa11 Refactor Worker::shutdown mehtod (#308) 2021-04-02 12:22:05 +01:00
Rob Ede
6d66cfb06a prepare utils release 3.0.0-beta.4 2021-04-01 13:57:08 +01:00
Rob Ede
fb27ffc525 add future::Either type to utils (#305) 2021-04-01 13:53:44 +01:00
Rob Ede
b068ea16f8 prepare server release 2.0.0-beta.4 2021-04-01 09:36:07 +01:00
Rob Ede
4eebdf4070 prepare actix-utils release 3.0.0-beta.3 2021-04-01 09:31:42 +01:00
Rob Ede
b09e7cd417 fix local waker metadata 2021-04-01 09:01:56 +01:00
fakeshadow
2c5c9167a5 Fix bug where timed out socket would register itself when server in b… (#302)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-04-01 08:25:24 +01:00
fakeshadow
ee3a548a85 Refactor Accept::accept_one (#303) 2021-04-01 07:45:49 +01:00
fakeshadow
f21eaa954f Reduce size of Conn by removing unused addr field (#304) 2021-04-01 06:55:33 +01:00
Rob Ede
8becb0db70 refactor crates for better api stability (#301) 2021-03-30 13:39:10 +01:00
fakeshadow
26a5af70cb reduce branch in Accept::accept method (#300) 2021-03-29 08:19:37 +01:00
Rob Ede
0ee8d032b6 prepare actix-tls release 3.0.0-beta.5 2021-03-29 06:57:47 +01:00
Rob Ede
3cf1c548fd prepare actix-rt release 2.2.0 2021-03-29 06:57:14 +01:00
fakeshadow
4544562e1b Remove unused TcpConnectService (#299) 2021-03-27 21:03:24 +00:00
fakeshadow
bb27bac216 Add native tls support for actix_tls::connect module (#295)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-03-27 00:20:17 +00:00
Rob Ede
f9262dbec0 prevent large shutdown timeout from panicking
closes #298
2021-03-26 23:37:01 +00:00
fakeshadow
12d3942b98 Remove unused types in actix-tls. Add ActixStream impl for Box<dyn Ac… (#297) 2021-03-26 13:03:03 +00:00
fakeshadow
a3c9ebc7fa fix rustls panic when generating dns name from ip (#296)
* fix rustls panic when generating dns name from ip

* Update rustls.rs

* update changelog

Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-03-24 09:32:04 -07:00
fakeshadow
b7bfff2b32 add example of using multi-thread tokio runtime (#294)
* add example of using multi-thread tokio runtime

* Update multi_thread_system.rs

Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-03-24 04:56:13 -07:00
fakeshadow
0c73f13c8b ActixStream readiness methods return Ready object (#293) 2021-03-23 05:50:48 +00:00
Rob Ede
945479e0c3 unvendor openssl (#292) 2021-03-17 00:26:04 +00:00
Rob Ede
746cc2ab89 prepare service release 2.0.0-beta.5 2021-03-15 23:09:34 +00:00
Rob Ede
91ea8c5dad remove service dev module and add transformext trait
also improve docs on transform and boxed mods
2021-03-10 03:18:09 +00:00
Rob Ede
0a705b1023 add docs for *_ready macros 2021-03-10 02:23:24 +00:00
fakeshadow
9e2bcec226 add RcService type and rc_service construct function (#290) 2021-02-28 23:01:05 +00:00
Rob Ede
382830a37e refactor dispatcher / add Receiver::recv (#286) 2021-02-28 21:11:16 +00:00
fakeshadow
493a1a32c0 rc service changelog (#289) 2021-02-28 19:54:57 +00:00
fakeshadow
50a195e9ce add impl Service for Rc<S: Service> (#288) 2021-02-28 19:42:11 +00:00
Rob Ede
06ddad0051 prepare rt and tls releases (#287) 2021-02-25 11:50:24 +00:00
Rob Ede
789e6a8a46 update ci (#284) 2021-02-24 09:48:41 +00:00
Rob Ede
6e590fd042 Merge pull request #285 from actix/dep/actix-server 2021-02-24 09:09:44 +00:00
fakeshadow
fa8ded3a34 bump tokio version for actix-server 2021-02-24 15:54:28 +08:00
Rob Ede
841c611233 doc nits 2021-02-24 01:39:02 +00:00
Rob Ede
81a2b6a425 add local_addr binding to connector service (#282) 2021-02-23 18:52:28 +00:00
fakeshadow
a6e79453d0 remove default reuse_addr 2021-02-24 02:26:11 +08:00
fakeshadow
17f711a9d6 update changelog 2021-02-24 01:20:01 +08:00
fakeshadow
c3be839a69 add local_addr binding to connector service 2021-02-24 01:13:17 +08:00
Rob Ede
8d74cf387d standardize openssl based stream name 2021-02-20 18:04:05 +00:00
Rob Ede
7e483cc356 tweak task and stream docs 2021-02-20 17:34:04 +00:00
fakeshadow
75d7ae3139 add actix stream trait (#276) 2021-02-20 17:25:22 +00:00
Juan Aguilar
2cfe1d88ad Refactor LocalWaker for use Cell and remove deprecated methods (#278) 2021-02-19 17:12:30 +00:00
Rob Ede
cb07ead392 prepare rt release 2.0.2 2021-02-06 22:52:53 +00:00
Rob Ede
32543809f9 add System::try_current (#275) 2021-02-06 22:45:03 +00:00
Rob Ede
eb4d29e15e add arbiter handle assoc fn (#274)
* add arbiter handle assoc fn
2021-02-06 22:27:56 +00:00
Rob Ede
7ee42b50b4 prepare router 0.2.7 release 2021-02-06 19:50:48 +00:00
Rob Ede
0da848e4ae fix server dev dep 2021-02-06 19:35:29 +00:00
Rob Ede
5f80d85010 fix server version 2021-02-06 19:34:58 +00:00
Rob Ede
16ba77c4c8 prepare next set of betas (#273) 2021-02-06 19:24:52 +00:00
Rob Ede
b4a3f51659 prepare rt release 2.0.1 2021-02-06 15:54:11 +00:00
Riley
9d0901e07f actix-rt: expose JoinError (#271) 2021-02-06 15:50:38 +00:00
fakeshadow
ebb9cd055f use static dispatch on signal handling. reduce allocation (#272) 2021-02-06 03:38:11 +00:00
Rob Ede
a77b70aed2 prepare service 2.0.0-beta.4 release (#269) 2021-02-04 20:44:13 +00:00
Rob Ede
c918da906b use reexported tls crates when possible 2021-02-04 15:23:06 +00:00
Rob Ede
b5399c5631 use reusable box future in tls connector 2021-02-04 15:23:06 +00:00
fakeshadow
7f0eddd794 add blocking thread customize (#265) 2021-02-04 15:01:51 +00:00
shuo
db3385e865 retry on EINTR in accept loop (#264)
Co-authored-by: lishuo <lishuo.03@bytedance.com>
2021-02-04 10:20:37 +00:00
Rob Ede
4a8693d000 readme grammar 2021-02-03 11:18:35 +00:00
Rob Ede
4ec358575e prepare actix-rt v2.0.0 release (#262) 2021-02-03 10:25:31 +00:00
Rob Ede
66bd5bf4a2 prepare macros v0.2.0 release (#261) 2021-02-02 02:07:58 +00:00
Rob Ede
057e7cd7c9 prepare rt v2.0.0-beta.3 2021-01-31 05:19:30 +00:00
Rob Ede
0b656f51e1 deprecate rt TLS item storage 2021-01-31 04:48:03 +00:00
Rob Ede
0eb68d1c7b Revert "remove arbiter TLS item storage"
This reverts commit 3e6f69885c.
2021-01-31 04:45:27 +00:00
Rob Ede
3e6f69885c remove arbiter TLS item storage 2021-01-31 04:43:35 +00:00
Rob Ede
2fa60b07ae prevent arbiter leaks by waiting for registration 2021-01-31 04:41:28 +00:00
Rob Ede
b75254403a remove builder and introduce worker handle (#257) 2021-01-31 03:34:07 +00:00
Rob Ede
1b35ff8ee6 express spawn fn as spawn fut (#256) 2021-01-29 15:16:30 +00:00
Rob Ede
2924419905 prevent spawn_fn panic bubbling (#255) 2021-01-29 14:16:10 +00:00
Rob Ede
6b86b5efc5 rename arbiter to worker (#254) 2021-01-29 04:08:14 +00:00
Rob Ede
ba39c8436d remove tokio runners (#253) 2021-01-29 02:21:06 +00:00
fakeshadow
feac376c17 fix actix-tls build (#252) 2021-01-28 10:31:57 +00:00
Rob Ede
a633d2353c fix addr iterator 2021-01-27 11:23:28 +00:00
Rob Ede
45edff625e add rt tests and doc tests 2021-01-26 09:46:14 +00:00
Rob Ede
cff9deb729 attribute nits 2021-01-26 09:45:43 +00:00
Rob Ede
eaefe21b98 add tests for custom resolver 2021-01-26 08:05:19 +00:00
fakeshadow
636cef8868 service trait takes shared self reference (#247) 2021-01-23 03:06:22 +00:00
fakeshadow
874e5f2e50 change default name resolver and allow custom resolvers (#248) 2021-01-23 01:33:50 +00:00
Rob Ede
6112a47529 update local deps 2021-01-09 15:19:16 +00:00
Rob Ede
a2e03700e7 update rt changelog 2021-01-09 15:16:31 +00:00
Rob Ede
6edf9b8278 prepare rt 2.0.0-beta.2 release 2021-01-09 15:12:59 +00:00
Rob Ede
f07d807707 remove actix-threadpool crate 2021-01-09 15:04:55 +00:00
Rob Ede
d4c46b7da9 fix macros code 2021-01-09 14:58:15 +00:00
Rob Ede
b0a8f8411b prepare macros 0.2.0-beta.1 release 2021-01-09 14:56:07 +00:00
Rob Ede
46bfe5de36 prepare service 2.0.0-beta.3 release 2021-01-09 14:28:33 +00:00
Rob Ede
a95afe2800 prepare router release 0.2.6 2021-01-09 14:18:20 +00:00
Rob Ede
f751cf5acb use convert err on forward_ready! (#246) 2021-01-09 14:13:16 +00:00
fakeshadow
a1982bdbad add actix-rt::task (#245) 2021-01-03 18:16:57 +00:00
Rob Ede
147c4f4f2c test bytestring with ahash 2021-01-03 04:42:08 +00:00
Rob Ede
5285656bdc prepare next beta releases 2021-01-03 04:39:37 +00:00
Rob Ede
296294061f update readme 2020-12-31 02:52:55 +00:00
Rob Ede
93865de848 move router to actix-router 2020-12-31 02:29:27 +00:00
Rob Ede
6bcf6d8160 use bytestring crate name as dir name 2020-12-31 02:21:50 +00:00
Rob Ede
14ff379150 prepare bytestring release 1.0.0 (#243) 2020-12-31 02:20:49 +00:00
fakeshadow
647817ef14 tokio 1.0 and mio 0.7 (#204) 2020-12-30 22:11:50 +00:00
fakeshadow
b5eefb4d42 merge actix-testing into actix-server (#242) 2020-12-29 21:20:24 +00:00
fakeshadow
03eb96d6d4 fix actix-tls tests (#241) 2020-12-29 11:36:17 +00:00
Rob Ede
0934078947 prepare tls beta release 2020-12-29 01:04:21 +00:00
Rob Ede
5759c9e144 merge -connect and -tls and upgrade to rt v2 (#238) 2020-12-29 00:38:41 +00:00
Rob Ede
3c6de3a81b use correct service version for tracing 2020-12-29 00:08:59 +00:00
Rob Ede
ef83647ac9 prepare testing beta release 2020-12-28 23:54:21 +00:00
Rob Ede
98a17081b8 prepare server beta release 2020-12-28 23:50:00 +00:00
fakeshadow
b7202db8fd update actix-server and actix-testing to tokio 1.0 (#239) 2020-12-28 23:44:53 +00:00
Rob Ede
a09f9abfcb prepare utils release 3.0.0-beta.1 2020-12-28 03:32:28 +00:00
Rob Ede
e4a44b77e6 prepare codec release 0.4.0-beta.1 2020-12-28 03:24:43 +00:00
fakeshadow
2ee8f45f5d update actix-codec and actix-utils to tokio 1.0 (#237) 2020-12-28 03:16:37 +00:00
Rob Ede
f48e3f4cb0 prepare release for rt and service 2020-12-28 01:58:31 +00:00
Rob Ede
3d3bd60368 fix rt override 2020-12-28 01:53:11 +00:00
Rob Ede
d684128831 fix rt override 2020-12-28 01:48:19 +00:00
fakeshadow
0c12930796 update to tokio 1.0 for actix-rt (#236) 2020-12-28 01:40:22 +00:00
Rob Ede
ba44ea7d0b remove futures-util from service deps (#235) 2020-12-27 18:24:57 +00:00
Rob Ede
8a58a341a4 service improvements (#233) 2020-12-27 14:15:42 +00:00
Rob Ede
33c9aa6988 bump msrv to 1.46 2020-12-27 04:36:08 +00:00
Rob Ede
3ab8c3eb69 service trait takes request type parameter (#232) 2020-12-27 04:28:00 +00:00
fakeshadow
518bf3f6a6 remove RUNNING Q PENDING thread locals from actix-rt (#207) 2020-12-26 23:26:02 +00:00
fakeshadow
43ce25cda1 Remove unused mods in actix-utils (#229) 2020-12-26 21:27:59 +00:00
Yuki Okushi
4e4122b702 Disable PR comment from codecov 2020-12-17 21:42:21 +09:00
Aravinth Manivannan
b296d0f254 Intradoc links conversion (#227)
* intra doc conversion

* rm trailing blank comment
2020-12-14 08:22:30 +00:00
Juan Aguilar
02a902068f Refactor LocalWaker (#224) 2020-12-13 19:26:57 +00:00
fakeshadow
049795662f remove ServerMessage type. remove one unused InternalServiceFactory impl (#225) 2020-12-13 00:46:32 +00:00
Rob Ede
4e43216b99 standardise compiler lints across all crates (#226) 2020-12-12 23:24:00 +00:00
Rob Ede
93889776c4 prevent double registration of sockets when backpressure is resolved (#223) 2020-12-12 17:19:20 +00:00
Yuki Okushi
ab496a71b5 Fix release date 2020-12-03 08:59:59 +09:00
Yuki Okushi
76d956e25c macros: Add actix-reexport feature (#218) 2020-12-03 08:59:13 +09:00
Ivan Babrou
89e56cf661 Notify about paused accept loop (#215) 2020-11-29 15:30:13 +00:00
Rob Ede
8aca8d4d07 fix clippy warnings (#214)
and make my spelling checker happy
2020-11-25 01:41:14 +00:00
fakeshadow
e0dd2a3d76 remove actix-threadpool re-export from actix-rt (#212) 2020-11-24 17:03:09 +00:00
Rob Ede
59e976aaca address clippy error (#213) 2020-11-24 16:35:47 +00:00
Zura Benashvili
4cc1c87724 docs(transform): remove extra generic parameter (#211) 2020-11-20 22:45:57 +00:00
Yuki Okushi
ca39917d2c Update CoC contact information 2020-10-31 12:08:06 +09:00
ghizzo01
704af672b9 Bump pin-project to 1.0 (#202) 2020-10-25 19:42:40 +09:00
Rob Ede
242bef269f delete ioframe removed package readme
closes #199
2020-09-22 12:29:07 +01:00
Rob Ede
6c65e2a79f prepare router 0.2.5 release (#198) 2020-09-21 22:46:59 +01:00
nujz
e5ca271764 actix-router: fix from_hex error (#196) 2020-09-20 18:04:18 +01:00
nujz
98a2197a09 fix doc error (#195) 2020-09-19 23:12:41 +09:00
Rob Ede
fb0aa02b3c move and update server+tls examples (#190) 2020-09-13 10:12:07 +01:00
Rob Ede
681eeb497d prepare server release 1.0.4 (#188) 2020-09-12 15:28:17 +01:00
Igor Aleksanov
3e04b87311 actix-service: Fix broken link in readme (#189) 2020-09-12 15:08:03 +01:00
Rob Ede
77b7826658 prepare tls v2 release (#186) 2020-09-08 18:00:07 +01:00
Igor Aleksanov
b7a9cb7bb4 actix-rt: Make the process of running System in existing Runtime more clear (#173) 2020-09-06 11:01:24 +01:00
Robert Gabriel Jakabosky
88d99ac89c Fix clippy errors. (#187) 2020-09-06 10:41:42 +01:00
Rob Ede
7632f51509 prepare connect v2 stable release (#185) 2020-09-02 22:14:07 +01:00
Rob Ede
d28687d0d7 promote codec/utils out of beta (#184) 2020-08-24 09:18:37 +01:00
Rob Ede
27c6be9881 remove unused type parameter from Framed::replace_codec (#183) 2020-08-20 00:30:26 +01:00
Rob Ede
119dc39f5b prepare codec and utils betas (#182) 2020-08-19 11:00:12 +01:00
Rob Ede
b3010c13e0 solve framed integration with actix-http (#179) 2020-08-18 23:27:37 +01:00
Adrian Wechner
fecdfcd8d4 assert workers greater than zero (#167) 2020-08-18 16:44:22 +01:00
Yuki Okushi
578a560853 connect,tls: Bump up to next alpha versions (#181) 2020-08-17 15:39:17 +01:00
Rob Ede
fb098536ee bump MSRV to 1.42 (#180) 2020-08-17 15:37:57 +01:00
Rob Ede
5d28be9ad6 fix actix-service readme reference (#176) 2020-08-11 12:20:09 +01:00
Rob Ede
a5a6b6704c prepare actix-service 1.0.6 release (#175) 2020-08-09 16:10:58 +01:00
Igor Aleksanov
afb0a3c9fc actix-service: Fix clippy warning in benches (#174) 2020-08-07 17:16:45 +09:00
Miloas
02aaa75591 fix actix-service doc error (#172) 2020-08-06 11:21:51 +01:00
Yuki Okushi
ed4b708c66 Fix CI on MSRV check (#171) 2020-08-05 09:02:41 +09:00
Yuki Okushi
235a76dcd4 GHA: Switch action to the official setup-msys2 (#169) 2020-07-29 08:47:32 +09:00
Matt Kantor
0c5f1da625 Remove garbled doc comment for actix_router::IntoPattern::is_single (#168) 2020-07-29 05:46:53 +09:00
Yuki Okushi
8ace9264b7 Check code style with rustfmt on CI (#164) 2020-07-22 12:32:13 +09:00
Yuki Okushi
0dca1a705a actix-utils: Remove unsound custom Cell as well (#161) 2020-07-22 01:14:32 +01:00
Juan Aguilar
5d6d309e66 Simplify bcodec decode (#162) 2020-07-20 23:09:24 +09:00
Juan Aguilar
8d0bd7ce1c Improve bcodec encode performance (#157) 2020-07-19 22:36:51 +01:00
Sergey "Shnatsel" Davidoff
a67e38b4a0 Remove unsound custom Cell (#158) 2020-07-20 06:05:36 +09:00
Rob Ede
334c98575a Upgrade tokio utils to 0.3 (#138) 2020-07-20 05:44:26 +09:00
Rob Ede
a9b5a7b070 Create PULL_REQUEST_TEMPLATE.md (#159) 2020-07-20 03:01:09 +09:00
Yuki Okushi
61176f6410 Update rustls-related dependencies (#154) 2020-07-14 11:14:06 +01:00
Yuki Okushi
10b4c30a06 Use OR instead of deprecated / in license field (#155) 2020-07-14 11:11:30 +01:00
Yuki Okushi
7f550bcf0f threadpool: Bump up to 0.3.3 (#156) 2020-07-14 11:10:15 +01:00
Yuki Okushi
887f11f787 Merge pull request #153 from actix/tweak-actions
Tweak actions trigger events
2020-07-08 09:04:05 +09:00
Yuki Okushi
e2a6d352b0 Tweak actions trigger events 2020-07-08 08:38:24 +09:00
Yuki Okushi
f6c697a2dd Merge pull request #152 from paolobarbolini/pl-011
Update parking_lot to 0.11
2020-07-04 03:20:08 +09:00
Paolo Barbolini
5ecdfd684a Update parking_lot to 0.11 2020-07-03 17:37:10 +02:00
Yuki Okushi
7140c04c44 Merge pull request #149 from taiki-e/pin-project
Remove uses of pin_project::project attribute
2020-06-07 02:01:08 +09:00
Taiki Endo
9528df4486 Remove uses of pin_project::project attribute
pin-project will deprecate the project attribute due to some unfixable
limitations.

Refs: https://github.com/taiki-e/pin-project/issues/225
2020-06-06 06:42:45 +09:00
Pen Tree
755a8bb9d1 fix codec doc links (#148) 2020-06-02 18:05:39 +01:00
Yuki Okushi
f3cb6efc30 Merge pull request #146 from actix/cache-v2
Update `actions/cache` to v2
2020-05-28 04:59:34 +09:00
Yuki Okushi
87b857705c Update actions/cache to v2 2020-05-28 03:14:01 +09:00
Yuki Okushi
c897c5d3eb Merge pull request #145 from JohnTitor/new-threalpool
threadpool: Bump up to 0.3.2
2020-05-20 15:24:39 +09:00
Yuki Okushi
134e76b8b4 threadpool: Bump up to 0.3.2 2020-05-20 14:19:16 +09:00
Yuki Okushi
f3a401c23b Merge pull request #144 from JohnTitor/codecov-config
Add codecov config
2020-05-20 11:03:31 +09:00
Yuki Okushi
f7e8a912b3 Add codecov config 2020-05-19 14:45:39 +09:00
Yuki Okushi
11a1e11858 Merge pull request #143 from JohnTitor/new-testing
testing: Bump up to 1.0.1
2020-05-19 14:37:54 +09:00
Yuki Okushi
d0b27ee7e6 testing: Bump up to 1.0.1 2020-05-19 14:08:08 +09:00
Yuki Okushi
2d2b0591a2 Merge pull request #142 from JohnTitor/new-server
server: Bump up to 1.0.3
2020-05-19 13:58:39 +09:00
Yuki Okushi
abbc5f715f server: Bump up to 1.0.3 2020-05-19 10:23:17 +09:00
Yuki Okushi
140a6c76e3 Merge pull request #141 from actix/fix-ci
Only check compilation on mingw CI
2020-05-19 09:39:03 +09:00
Yuki Okushi
2395b28c5e Only check compilation on mingw CI
Disabled to run tests since somehow linking with OpenSSL is broken.
2020-05-19 09:11:27 +09:00
Yuki Okushi
aad4812ba6 Merge pull request #140 from JohnTitor/replace-net2
Replace deprecated `net2` crate with `socket2`
2020-05-19 08:58:40 +09:00
Yuki Okushi
ac6c78c476 testing: Replace net2 crate with socket2 2020-05-19 08:21:40 +09:00
Yuki Okushi
8218a098e8 server: Replace net2 crate with socket2 2020-05-19 08:17:44 +09:00
Yuki Okushi
49a6f525be Merge pull request #139 from JohnTitor/next-macros
macros: Bump up to 0.1.2
2020-05-19 07:50:46 +09:00
Yuki Okushi
f59ff82395 macros: Bump up to 0.1.2 2020-05-18 15:36:23 +09:00
Yuki Okushi
f7cc62564d Merge pull request #136 from JohnTitor/connect-alpha-3
actix-connect: Bump up to 2.0.0-alpha.3
2020-05-08 01:36:16 +09:00
Yuki Okushi
b125e2bdce actix-connect: Bump up to 2.0.0-alpha.3 2020-05-08 01:07:57 +09:00
Yuki Okushi
a5c185e80e Merge pull request #135 from actix/fix/unresolverd
correct spelling of ConnectError::Unresolved
2020-05-06 14:45:30 +09:00
Rob Ede
523cee0351 correct spelling of ConnectError::Unresolved 2020-05-03 23:14:22 +01:00
Yuki Okushi
343b3c09fc Merge pull request #134 from JohnTitor/new-rt
Bump up `actix-rt` to 1.1.1
2020-04-30 14:34:17 +09:00
Yuki Okushi
8a10580663 Bump up actix-rt to 1.1.1 2020-04-30 03:07:12 +09:00
Yuki Okushi
1b4a117063 Merge pull request #128 from Jonathas-Conceicao/topic/fix_memory_leak
actix-rt: Spawn future to cleanup pending JoinHandles
2020-04-30 02:58:13 +09:00
Yuki Okushi
700997fe48 Merge pull request #133 from actix/macro-compile-testing
add macro compile tests
2020-04-29 15:33:00 +09:00
Rob Ede
4c5568ed70 add trybuild compile tests 2020-04-26 20:11:16 +01:00
Yuki Okushi
7d0cfe1b4d Merge pull request #131 from danpintara/pull-1
actix-macros: Simplify test macros by using original signature
2020-04-23 02:33:52 +09:00
Daniel Pintara
e35c261c9f actix-macros: test: Simplify by using #sig instead of #name(#inputs) #ret 2020-04-22 00:13:32 +07:00
Yuki Okushi
115ef3fcb3 Merge pull request #130 from JohnTitor/dont-clone
Remove unnecessary clone usage
2020-04-20 08:37:10 +09:00
Yuki Okushi
c0482e2532 Remove unnecessary clone usage 2020-04-20 08:02:08 +09:00
Jonathas-Conceicao
6906f25e01 actix-rt: Set threshold size for arbiter's pending futures list
Signed-off-by: Jonathas-Conceicao <jadoliveira@inf.ufpel.edu.br>
2020-04-16 03:12:05 -03:00
Jonathas-Conceicao
06bca19524 actix-rt: Spawn future to cleanup pending JoinHandles
Signed-off-by: Jonathas-Conceicao <jadoliveira@inf.ufpel.edu.br>
2020-04-09 20:36:44 -03:00
Yuki Okushi
e9e2185296 Merge pull request #127 from rubdos/test-fixture-integration
Forward actix_rt::test arguments to test function.
2020-04-09 17:45:17 +09:00
Ruben De Smet
aae52a80ab Forward actix_rt::test arguments to test function.
Previously,

```rust
async fn foo(_a: u32) {}
```

would compile to

```rust
fn foo() {/* something */}
```

This patches changes this behaviour to

```rust
fn foo(_a: u32) {/* something */}
```

by simply forwarding the input arguments.

This allows any test fixture library (e.g. `rstest`, cfr.
https://github.com/la10736/rstest/issues/85) to integrate with
actix::test.
2020-04-08 16:48:10 +02:00
Yuki Okushi
65e2e8052e Release actix-rt 1.1.0 (#126)
* Release actix-rt 1.1.0

* Update actix-rt/CHANGES.md
2020-04-08 16:34:07 +09:00
Jonathas-Conceicao
783880bb0a actix-rt: Add Arbiter::is_running helper and fix System::is_set doc
`Arbiter::is_running` can be used to check if the current even-loop is currently
running; which should also work after the system has stopped. `System::is_set`
was updated to reflect what it actually does, it tells if the event loop has
started, which alone can't tell if it has stopped.

Signed-off-by: Jonathas-Conceicao <jadoliveira@inf.ufpel.edu.br>
2020-04-05 21:00:54 -03:00
Jonathas-Conceicao
69e8df9d62 actix-rt: Run rustfmt
Signed-off-by: Jonathas-Conceicao <jadoliveira@inf.ufpel.edu.br>
2020-04-05 21:00:54 -03:00
Yuki Okushi
9addf1a36b Merge pull request #125 from actix/fix/noisy-check
fix noisy check warning
2020-04-05 13:20:25 +09:00
Rob Ede
187a58472d fix noisy check warning 2020-04-04 23:57:52 +01:00
Nikolay Kim
30aa0b7bb6 add serde support to bytestring 2020-03-30 11:54:40 +06:00
Yuki Okushi
e775d08d76 Merge pull request #122 from actix/JohnTitor-patch-1
Upload coverage on PRs
2020-03-18 05:31:59 +09:00
Yuki Okushi
d5f95b54b7 Upload coverage on PRs 2020-03-18 05:03:37 +09:00
Yuki Okushi
904f90abc2 Merge pull request #121 from actix/revert-115-JohnTitor-patch-2
Revert "Disable windows-mingw builder temporarily"
2020-03-16 18:06:42 +09:00
Yuki Okushi
950c73077c Revert "Disable windows-mingw builder temporarily" 2020-03-16 17:31:10 +09:00
Yuki Okushi
732731a9c8 Merge pull request #120 from kornelski/err
std Error for BlockingError
2020-03-14 00:14:42 +09:00
Kornel Lesiński
0dd5a7ce1d std Error for BlockingError
#93
2020-03-13 12:35:20 +00:00
Yuki Okushi
7105091e51 Merge pull request #119 from JohnTitor/futures
Minimize `futures-*` dependencies
2020-03-13 05:12:37 +09:00
Yuki Okushi
08959dfc21 actix-tracing: Minimize futures-util dependencies 2020-03-12 07:13:32 +09:00
Yuki Okushi
2792433ad6 actix-codec: Minimize futures-* dependencies 2020-03-12 07:13:32 +09:00
Yuki Okushi
437a7b05c6 actix-rt: Fix build 2020-03-12 07:13:32 +09:00
Yuki Okushi
3d125c5381 actix-testing: Remove unused deps 2020-03-12 07:13:32 +09:00
Yuki Okushi
fbf7d6ef33 Update examples 2020-03-12 07:13:32 +09:00
Yuki Okushi
e6b6f08369 actix-utils: Minimize futures-* dependencies 2020-03-12 07:13:32 +09:00
Yuki Okushi
4e806b3e3f actix-tls: Minimize futures-* dependencies 2020-03-12 07:13:31 +09:00
Yuki Okushi
f5b07053fc actix-server: Minimize futures-* dependencies 2020-03-12 07:13:31 +09:00
Yuki Okushi
dd3bec83bf actix-ioframe: Minimize futures-* dependencies 2020-03-12 07:13:31 +09:00
Yuki Okushi
f955e49930 actix-connect: Minimize futures-* dependencies 2020-03-12 04:22:38 +09:00
Yuki Okushi
4be11b541b Merge pull request #117 from actix/new-connect
Release actix-http v2.0.0-alpha.2
2020-03-08 15:13:52 +09:00
Yuki Okushi
baba533407 Update actix-http dependency 2020-03-08 14:38:07 +09:00
Yuki Okushi
2bf50826b0 Bump up to 2.0.0-alpha.2 2020-03-08 14:37:33 +09:00
Yuki Okushi
41b2a3b2e2 Merge pull request #116 from Jonathas-Conceicao/topic/upgrade_trust_dns
actix-connect: Upgrade versions of trust-dns
2020-03-08 14:31:07 +09:00
Jonathas-Conceicao
7fdd4a1118 actix-connect: Upgrade versions of trust-dns
- `Address` trait is now required to have static lifetime;
- `start_resolver` and `start_default_resolver` are now `async` and may return
  a `ConnectError`;

Signed-off-by: Jonathas-Conceicao <jadoliveira@inf.ufpel.edu.br>
2020-03-07 14:52:41 -03:00
Jonathas-Conceicao
cb30f9e86a actix-connect: Run cargo fmt
Signed-off-by: Jonathas-Conceicao <jadoliveira@inf.ufpel.edu.br>
2020-03-07 14:37:39 -03:00
Yuki Okushi
873f69be51 Merge pull request #115 from actix/JohnTitor-patch-2
Disable windows-mingw builder temporarily
2020-03-06 14:11:50 +09:00
Yuki Okushi
0967061f30 Merge pull request #114 from actix/JohnTitor-patch-1
Unpin quote version
2020-03-06 14:11:28 +09:00
Yuki Okushi
59902cb3a3 Disable windows-mingw builder temporarily 2020-03-06 13:48:55 +09:00
Yuki Okushi
857e50120b Unpin quote version 2020-03-06 13:45:21 +09:00
Yuki Okushi
36a2edf1cd Merge pull request #111 from dunnock/master
Fix build with failing quote
2020-03-05 23:05:19 +09:00
Maksym Vorobiov
346bd072d3 fix build with failing quote 2020-03-05 14:58:44 +02:00
Yuki Okushi
8d3d58b3b7 Merge pull request #110 from Aaron1011/fix/better-pin
Replace calls to `Pin::new_unchecked` with `pin_project`.
2020-03-05 21:52:55 +09:00
Aaron Hill
c41b5d8dd4 Replace calls to Pin::new_unchecked with pin_project.
This is a breaking change, as it changes some public methods to take
`Pin<&mut Self>` rather than `&mut self`.

This brings these methods into line with `Stream::poll_next`, which also
takes a `Pin<&mut Self>`
2020-03-04 12:08:52 -05:00
Yuki Okushi
693d5132a9 Merge pull request #109 from JohnTitor/new-tls
actix-tls: Bump up to 2.0.0-alpha.1
2020-03-03 22:29:08 +09:00
Yuki Okushi
f7dac3feb4 Bump up to 2.0.0-alpha.1 2020-03-03 19:47:40 +09:00
Yuki Okushi
ebc11d03f2 Merge pull request #108 from JohnTitor/new-connect
Release `actix-connect` v2.0.0-alpha.1
2020-03-03 18:33:08 +09:00
Yuki Okushi
e3ad5de270 Update actix-connect dependency 2020-03-03 17:24:41 +09:00
Yuki Okushi
91118bb2ce Bump up to 2.0.0-alpha.1 2020-03-03 17:24:25 +09:00
Yuki Okushi
6628688bcf Merge pull request #107 from JohnTitor/rustls-017
Update `rustls` and `tokio-rustls`
2020-03-01 23:48:13 +09:00
Yuki Okushi
b9567359fd actix-tls: Update rustls and tokio-rustls 2020-03-01 12:08:14 +09:00
Yuki Okushi
7dbc0264b1 actix-connect: Update rustls and tokio-rustls 2020-03-01 12:08:14 +09:00
Erich Gubler
1b7c969f6a actix-rt: minimize futures dependencies to futures-{channel,util} with default features off (#104)
* build(deps): minimize `futures` deps by using `futures-channel` and `futures-util` directly

* style(actix-rt): enforce spaces around equals in `Cargo.toml`
2020-02-27 01:15:21 +09:00
Jonathas-Conceicao
f1685d8253 Add Arbiter::local_join associated function
Arbiter::local_join function can be used to await for futures spawned
on current arbiter.

Signed-off-by: Jonathas-Conceicao <jadoliveira@inf.ufpel.edu.br>
2020-02-26 12:59:46 -03:00
Jonathas-Conceicao
e3b6a33b97 Add integration tests
These initial tests validade basic usage with timed futures for:
- `System::block_on`;
- `Arbiter::new`;
- `Arbiter::stop`;
- `Arbiter::join`;

Signed-off-by: Jonathas-Conceicao <jadoliveira@inf.ufpel.edu.br>
2020-02-26 12:59:46 -03:00
Yuki Okushi
13b503435f Merge pull request #106 from JohnTitor/server-102
Release actix-server 1.0.2
2020-02-26 20:53:00 +09:00
Yuki Okushi
98f0290f65 actix-server: Bump up to 1.0.2 2020-02-26 19:48:52 +09:00
Yuki Okushi
b8f66f5e7f Update changelog 2020-02-26 19:48:41 +09:00
Yuki Okushi
dd59ee498e Add FIXME comment 2020-02-26 19:48:27 +09:00
Dany Laporte
83320efa31 Avoid error by register() on Windows (#103) 2020-02-26 18:40:31 +09:00
Yuki Okushi
c69bc11e3e Merge pull request #105 from actix/bench
Add action to check benchmark
2020-02-26 17:33:37 +09:00
Yuki Okushi
aad5c42ad7 Add action to check benchmark 2020-02-26 17:11:46 +09:00
Maxim Vorobjov
4d37858fc6 Benchmarks for actix-service: focused around UnsafeCell usage (#98)
* add benchmark comparing unsafecell vs refcell

* fix syntax

* add benches for and_then implementation options

* repeat benches to stabilize
2020-02-26 16:45:23 +09:00
Yuki Okushi
d402f08bb5 Merge pull request #102 from JohnTitor/single-import
Remove single import
2020-02-25 19:11:04 +09:00
Yuki Okushi
fa25e30427 Remove single import 2020-02-25 18:41:15 +09:00
Bo Yao
602db1779e Expose is_set (#99)
* Expose is_set

* Update doc and changes.md
2020-02-25 02:55:02 -03:00
Yuki Okushi
4f2910c6b3 Merge pull request #96 from actix/JohnTitor-patch-1
Disable coverage for PRs
2020-02-15 01:55:20 +09:00
Yuki Okushi
9f7d6bc068 Disable coverage for PRs 2020-02-14 07:30:21 +09:00
Yuki Okushi
6908b58943 Merge pull request #92 from actix/bye-travis
Move script from Travis to Actions
2020-02-02 06:28:42 +09:00
Yuki Okushi
043057ecbd Fix import scopes 2020-02-01 23:32:08 +09:00
Yuki Okushi
e12bf9200b Clean up metadata 2020-01-31 02:21:25 +09:00
Yuki Okushi
03d431e663 Add badges on README 2020-01-31 00:01:47 +09:00
Yuki Okushi
f0d352604e Remove travis config 2020-01-31 00:01:34 +09:00
Yuki Okushi
2f67e4f563 Use markdown format 2020-01-31 00:01:24 +09:00
Yuki Okushi
d1155d60ec Tweak Actions 2020-01-31 00:01:11 +09:00
Yuki Okushi
28d9c6a760 Merge pull request #90 from actix/fix-ci
Tweak GitHub Actions
2020-01-30 00:46:21 +09:00
Yuki Okushi
a970c2c997 Remove AppVeyor config 2020-01-29 12:05:55 +09:00
Yuki Okushi
d5a6c83207 Suppress/fix clippy warnings 2020-01-29 12:05:55 +09:00
Yuki Okushi
ee0db9a617 Tweak GitHub Actions 2020-01-29 12:05:55 +09:00
zero-systems
e5b5df1261 Optimize vector fill in builder. (#89)
* optimize vector fill
2020-01-22 06:35:22 +09:00
Nikolay Kim
dbfa13d6be Fixed unsoundness in .and_then()/.then() service combinators 2020-01-16 16:58:11 -08:00
Nikolay Kim
e7c2439543 prep release 2020-01-15 13:35:07 -08:00
Nikolay Kim
3116db5168 revert 1.0.3 changes 2020-01-15 13:24:38 -08:00
Nikolay Kim
5940731ef0 Fix actix-service 1.0.3 compatibility 2020-01-15 11:58:06 -08:00
Rajasekharan Vengalil
aed5fecc8a Add support for tokio tracing for actix Service. (#86)
* Add support for tokio tracing for actix Service.

* Address comments

* Change trace's return type to ApplyTransform

* Remove redundant type args

* Remove reference to MakeSpan from docs
2020-01-15 11:43:52 -08:00
Nikolay Kim
a751899aad Fixed unsoundness in AndThenService impl #83 2020-01-15 11:40:15 -08:00
Nikolay Kim
fa800aeba3 Fix AsRef<str> impl 2020-01-14 15:06:02 -08:00
Nikolay Kim
2f89483635 Merge branch 'master' of github.com:actix/actix-net 2020-01-14 00:42:29 -08:00
Nikolay Kim
3048073919 Add PartialEq<T: AsRef<str>>, AsRef<[u8]> impls 2020-01-13 11:58:31 +06:00
amosonn
4bbba803c1 Fix Service documentation (#85) 2020-01-12 07:44:01 +09:00
Sven-Hendrik Haase
4dcdeb6795 Merge pull request #84 from currency-engineering/master
Minor grammatical fix to docs.
2020-01-10 15:28:19 +01:00
Eric Findlay
3b4f222242 Minor grammatical fix to docs. 2020-01-10 20:52:49 +09:00
Nikolay Kim
7c5fa25b23 Add into_service helper function 2020-01-08 18:31:50 +06:00
Nikolay Kim
3551d6674d Add Clone impl for condition::Waiter 2020-01-08 11:18:56 +06:00
Nikolay Kim
9f00daea80 add Condition and Pool 2020-01-08 10:59:27 +06:00
Nikolay Kim
7dddeab2a8 Add ResourceDef::resource_path_named() path generation method 2019-12-31 18:02:43 +06:00
Nikolay Kim
dcbcc40da2 Revert "Support named parameters for ResourceDef::resource_path() in form of ((&k, &v), ...)"
This reverts commit b0d44198ba.
2019-12-31 15:14:53 +06:00
Nikolay Kim
b0d44198ba Support named parameters for ResourceDef::resource_path() in form of ((&k, &v), ...) 2019-12-31 14:53:30 +06:00
Nikolay Kim
974bd6b01e leak string instead of rc 2019-12-31 12:04:35 +06:00
Nikolay Kim
5779da0f49 refactor service and state manahement 2019-12-29 13:42:42 +06:00
Nikolay Kim
1918c8d4f8 rename .run to .start() 2019-12-29 10:07:46 +06:00
Nikolay Kim
e21c58930b Add impl IntoPattern for &String 2019-12-25 21:34:14 +04:00
Nikolay Kim
59c5e9be6a Use IntoPattern for RouterBuilder::path() 2019-12-25 21:01:07 +04:00
Nikolay Kim
a2a9d9764d introduce IntoPattern trait 2019-12-25 19:54:20 +04:00
Nikolay Kim
bf0a9d2f6e Add IntoPatterns trait 2019-12-25 15:34:21 +04:00
Nikolay Kim
119027f822 fmt 2019-12-25 15:10:13 +04:00
Nikolay Kim
0fe8038d23 allow specify set of resource patters 2019-12-25 15:10:01 +04:00
Nikolay Kim
b599bc4a0c map_config() and unit_config() accepts IntoServiceFactory type 2019-12-22 16:30:49 +04:00
Nikolay Kim
a80e1f8370 fix new() method and make from_static and from_bytes_unchecked methods const 2019-12-22 16:24:28 +04:00
Nikolay Kim
5fe759cc02 Merge branch 'master' of github.com:actix/actix-net 2019-12-20 09:15:19 +06:00
Nikolay Kim
05549f0b42 Add methods to check LocalWaker registration state 2019-12-20 09:13:11 +06:00
Yuki Okushi
b1430eaded Run tests for all features as possible (#78) 2019-12-19 16:31:32 +09:00
Nikolay Kim
0d3f9e74c5 Use .advance() intead of .split_to() 2019-12-19 09:50:31 +06:00
203 changed files with 12476 additions and 11843 deletions

View File

@@ -1,41 +0,0 @@
environment:
global:
PROJECT_NAME: actix-net
matrix:
# Stable channel
- TARGET: i686-pc-windows-msvc
CHANNEL: stable
- TARGET: x86_64-pc-windows-gnu
CHANNEL: stable
- TARGET: x86_64-pc-windows-msvc
CHANNEL: stable
# Nightly channel
- TARGET: i686-pc-windows-msvc
CHANNEL: nightly
- TARGET: x86_64-pc-windows-gnu
CHANNEL: nightly
- TARGET: x86_64-pc-windows-msvc
CHANNEL: nightly
# Install Rust and Cargo
# (Based on from https://github.com/rust-lang/libc/blob/master/appveyor.yml)
install:
- ps: >-
If ($Env:TARGET -eq 'x86_64-pc-windows-gnu') {
$Env:PATH += ';C:\msys64\mingw64\bin'
} ElseIf ($Env:TARGET -eq 'i686-pc-windows-gnu') {
$Env:PATH += ';C:\MinGW\bin'
}
- curl -sSf -o rustup-init.exe https://win.rustup.rs
- rustup-init.exe --default-host %TARGET% --default-toolchain %CHANNEL% -y
- set PATH=%PATH%;C:\Users\appveyor\.cargo\bin
- rustc -Vv
- cargo -V
# 'cargo test' takes care of building for us, so disable Appveyor's build stage.
build: false
# Equivalent to Travis' `script` phase
test_script:
- cargo clean
- cargo test

3
.cargo/config.toml Normal file
View File

@@ -0,0 +1,3 @@
[alias]
chk = "check --workspace --all-features --tests --examples --bins"
lint = "clippy --workspace --all-features --tests --examples --bins -- -Dclippy::todo"

24
.github/PULL_REQUEST_TEMPLATE.md vendored Normal file
View File

@@ -0,0 +1,24 @@
## PR Type
<!-- What kind of change does this PR make? -->
<!-- Bug Fix / Feature / Refactor / Code Style / Other -->
INSERT_PR_TYPE
## PR Checklist
Check your PR fulfills the following:
<!-- For draft PRs check the boxes as you complete them. -->
- [ ] Tests for the changes have been added / updated.
- [ ] Documentation comments have been added / updated.
- [ ] A changelog entry has been made for the appropriate packages.
- [ ] Format code with the latest stable rustfmt
## Overview
<!-- Describe the current and new behavior. -->
<!-- Emphasize any breaking changes. -->
<!-- If this PR fixes or closes an issue, reference it here. -->
<!-- Closes #000 -->

131
.github/workflows/ci.yml vendored Normal file
View File

@@ -0,0 +1,131 @@
name: CI
on:
pull_request:
types: [opened, synchronize, reopened]
push:
branches: [master]
jobs:
build_and_test:
strategy:
fail-fast: false
matrix:
target:
- { name: Linux, os: ubuntu-latest, triple: x86_64-unknown-linux-gnu }
- { name: macOS, os: macos-latest, triple: x86_64-apple-darwin }
- { name: Windows, os: windows-latest, triple: x86_64-pc-windows-msvc }
- { name: Windows (MinGW), os: windows-latest, triple: x86_64-pc-windows-gnu }
- { name: Windows (32-bit), os: windows-latest, triple: i686-pc-windows-msvc }
version:
- 1.46.0 # MSRV
- stable
- nightly
name: ${{ matrix.target.name }} / ${{ matrix.version }}
runs-on: ${{ matrix.target.os }}
env:
VCPKGRS_DYNAMIC: 1
steps:
- name: Setup Routing
if: matrix.target.os == 'macos-latest'
run: sudo ifconfig lo0 alias 127.0.0.3
- uses: actions/checkout@v2
# install OpenSSL on Windows
- name: Set vcpkg root
if: matrix.target.triple == 'x86_64-pc-windows-msvc' || matrix.target.triple == 'i686-pc-windows-msvc'
run: echo "VCPKG_ROOT=$env:VCPKG_INSTALLATION_ROOT" | Out-File -FilePath $env:GITHUB_ENV -Append
- name: Install OpenSSL
if: matrix.target.triple == 'x86_64-pc-windows-msvc'
run: vcpkg install openssl:x64-windows
- name: Install OpenSSL
if: matrix.target.triple == 'i686-pc-windows-msvc'
run: vcpkg install openssl:x86-windows
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-${{ matrix.target.triple }}
profile: minimal
override: true
# - name: Install MSYS2
# if: matrix.target.triple == 'x86_64-pc-windows-gnu'
# uses: msys2/setup-msys2@v2
# - name: Install MinGW Packages
# if: matrix.target.triple == 'x86_64-pc-windows-gnu'
# run: |
# msys2 -c 'pacman -Sy --noconfirm pacman'
# msys2 -c 'pacman --noconfirm -S base-devel pkg-config'
# - name: Generate Cargo.lock
# uses: actions-rs/cargo@v1
# with:
# command: generate-lockfile
# - name: Cache Dependencies
# uses: Swatinem/rust-cache@v1.2.0
- name: Install cargo-hack
uses: actions-rs/cargo@v1
with:
command: install
args: cargo-hack
- name: check minimal
uses: actions-rs/cargo@v1
with:
command: hack
args: check --workspace --no-default-features
- name: check minimal + tests
uses: actions-rs/cargo@v1
with:
command: hack
args: check --workspace --no-default-features --tests --examples
- name: check default
uses: actions-rs/cargo@v1
with:
command: check
args: --workspace --tests --examples
- name: check full
# TODO: compile OpenSSL and run tests on MinGW
if: matrix.target.triple != 'x86_64-pc-windows-gnu'
uses: actions-rs/cargo@v1
with:
command: check
args: --workspace --all-features --tests --examples
- name: tests
if: matrix.target.triple != 'x86_64-pc-windows-gnu'
uses: actions-rs/cargo@v1
with:
command: test
args: --workspace --all-features --no-fail-fast -- --nocapture
- name: Generate coverage file
if: >
matrix.target.os == 'ubuntu-latest'
&& matrix.version == 'stable'
&& github.ref == 'refs/heads/master'
run: |
cargo install cargo-tarpaulin
cargo tarpaulin --out Xml --verbose
- name: Upload to Codecov
if: >
matrix.target.os == 'ubuntu-latest'
&& matrix.version == 'stable'
&& github.ref == 'refs/heads/master'
uses: codecov/codecov-action@v1
with:
file: cobertura.xml
- name: Clear the cargo caches
run: |
cargo install cargo-cache --version 0.6.2 --no-default-features --features ci-autoclean
cargo-cache

42
.github/workflows/clippy-fmt.yml vendored Normal file
View File

@@ -0,0 +1,42 @@
name: Lint
on:
pull_request:
types: [opened, synchronize, reopened]
jobs:
fmt:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
profile: minimal
components: rustfmt
override: true
- name: Rustfmt Check
uses: actions-rs/cargo@v1
with:
command: fmt
args: --all -- --check
clippy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
profile: minimal
components: clippy
override: true
- name: Clippy Check
uses: actions-rs/clippy-check@v1
with:
token: ${{ secrets.GITHUB_TOKEN }}
args: --workspace --all-features --tests --examples --bins -- -Dclippy::todo

View File

@@ -1,58 +0,0 @@
name: CI
on: [push, pull_request]
jobs:
build_and_test:
strategy:
fail-fast: false
matrix:
toolchain:
- x86_64-pc-windows-msvc
- x86_64-pc-windows-gnu
- i686-pc-windows-msvc
- x86_64-apple-darwin
version:
- stable
- nightly
include:
- toolchain: x86_64-pc-windows-msvc
os: windows-latest
- toolchain: x86_64-pc-windows-gnu
os: windows-latest
- toolchain: i686-pc-windows-msvc
os: windows-latest
- toolchain: x86_64-apple-darwin
os: macOS-latest
name: ${{ matrix.version }} - ${{ matrix.toolchain }}
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@master
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-${{ matrix.toolchain }}
default: true
- name: check nightly
if: matrix.version == 'nightly'
uses: actions-rs/cargo@v1
with:
command: check
args: --all --benches --bins --examples --tests
- name: check stable
if: matrix.version == 'stable'
uses: actions-rs/cargo@v1
with:
command: check
args: --all --bins --examples --tests
- name: tests
uses: actions-rs/cargo@v1
with:
command: test
args: --all -- --nocapture

35
.github/workflows/upload-doc.yml vendored Normal file
View File

@@ -0,0 +1,35 @@
name: Upload documentation
on:
push:
branches: [master]
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: nightly-x86_64-unknown-linux-gnu
profile: minimal
override: true
- name: Build Docs
uses: actions-rs/cargo@v1
with:
command: doc
args: --workspace --all-features --no-deps
- name: Tweak HTML
run: echo '<meta http-equiv="refresh" content="0;url=actix_server/index.html">' > target/doc/index.html
- name: Deploy to GitHub Pages
uses: JamesIves/github-pages-deploy-action@3.7.1
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
BRANCH: gh-pages
FOLDER: target/doc

View File

@@ -1,49 +0,0 @@
language: rust
sudo: required
dist: trusty
cache:
cargo: true
apt: true
matrix:
include:
- rust: stable
- rust: beta
- rust: nightly-2019-11-07
allow_failures:
- rust: nightly-2019-11-07
env:
global:
- RUSTFLAGS="-C link-dead-code"
- OPENSSL_VERSION=openssl-1.0.2
before_install:
- sudo add-apt-repository -y ppa:0k53d-karl-f830m/openssl
- sudo apt-get update -qq
- sudo apt-get install -y openssl libssl-dev libelf-dev libdw-dev cmake gcc binutils-dev libiberty-dev
before_cache: |
if [[ "$TRAVIS_RUST_VERSION" == "nightly-2019-11-07" ]]; then
RUSTFLAGS="--cfg procmacro2_semver_exempt" cargo install --version 0.6.11 cargo-tarpaulin
fi
# Add clippy
before_script:
- export PATH=$PATH:~/.cargo/bin
script:
- |
if [[ "$TRAVIS_RUST_VERSION" != "nightly-2019-11-07" ]]; then
cargo clean
cargo test --all --all-features -- --nocapture
fi
after_success:
- |
if [[ "$TRAVIS_RUST_VERSION" == "nightly-2019-11-07" ]]; then
taskset -c 0 cargo tarpaulin --all --all-features --out Xml
echo "Uploaded code coverage"
bash <(curl -s https://codecov.io/bash)
fi

View File

@@ -34,10 +34,13 @@ This Code of Conduct applies both within project spaces and in public spaces whe
## Enforcement ## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at fafhrd91@gmail.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at robjtede@icloud.com ([@robjtede]) or huyuumi@neet.club ([@JohnTitor]). The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
[@robjtede]: https://github.com/robjtede
[@JohnTitor]: https://github.com/JohnTitor
## Attribution ## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]

View File

@@ -1,31 +1,34 @@
[workspace] [workspace]
members = [ members = [
"actix-codec", "actix-codec",
"actix-connect",
"actix-ioframe",
"actix-rt",
"actix-macros", "actix-macros",
"actix-service", "actix-router",
"actix-rt",
"actix-server", "actix-server",
"actix-testing", "actix-service",
"actix-threadpool",
"actix-tls", "actix-tls",
"actix-tracing",
"actix-utils", "actix-utils",
"router", "bytestring",
"string", "local-channel",
"local-waker",
] ]
[patch.crates-io] [patch.crates-io]
actix-codec = { path = "actix-codec" } actix-codec = { path = "actix-codec" }
actix-connect = { path = "actix-connect" }
actix-ioframe = { path = "actix-ioframe" }
actix-rt = { path = "actix-rt" }
actix-macros = { path = "actix-macros" } actix-macros = { path = "actix-macros" }
actix-router = { path = "actix-router" }
actix-rt = { path = "actix-rt" }
actix-server = { path = "actix-server" } actix-server = { path = "actix-server" }
actix-service = { path = "actix-service" } actix-service = { path = "actix-service" }
actix-testing = { path = "actix-testing" }
actix-threadpool = { path = "actix-threadpool" }
actix-tls = { path = "actix-tls" } actix-tls = { path = "actix-tls" }
actix-tracing = { path = "actix-tracing" }
actix-utils = { path = "actix-utils" } actix-utils = { path = "actix-utils" }
actix-router = { path = "router" } bytestring = { path = "bytestring" }
bytestring = { path = "string" } local-channel = { path = "local-channel" }
local-waker = { path = "local-waker" }
[profile.release]
lto = true
opt-level = 3
codegen-units = 1

View File

@@ -186,7 +186,7 @@
same "printed page" as the copyright notice for easier same "printed page" as the copyright notice for easier
identification within third-party archives. identification within third-party archives.
Copyright 2017-NOW Nikolay Kim Copyright 2017-NOW Actix Team
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@@ -1,4 +1,4 @@
Copyright (c) 2017 Nikolay Kim Copyright (c) 2017-NOW Actix Team
Permission is hereby granted, free of charge, to any Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated person obtaining a copy of this software and associated

View File

@@ -1,60 +1,26 @@
# Actix net [![Build Status](https://travis-ci.org/actix/actix-net.svg?branch=master)](https://travis-ci.org/actix/actix-net) [![codecov](https://codecov.io/gh/actix/actix-net/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-net) [![Join the chat at https://gitter.im/actix/actix](https://badges.gitter.im/actix/actix.svg)](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) # Actix Net
Actix net - framework for composable network services > A collection of lower-level libraries for composable network services.
## Documentation & community resources ![Apache 2.0 or MIT licensed](https://img.shields.io/crates/l/actix-server)
[![codecov](https://codecov.io/gh/actix/actix-net/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-net)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x)
* [Chat on gitter](https://gitter.im/actix/actix) ## Build statuses
* Minimum supported Rust version: 1.39 or later | Platform | Build Status |
| ---------------- | ------------ |
| Linux | [![build status](https://github.com/actix/actix-net/workflows/CI%20%28Linux%29/badge.svg?branch=master&event=push)](https://github.com/actix/actix-net/actions?query=workflow%3A"CI+(Linux)") |
| macOS | [![build status](https://github.com/actix/actix-net/workflows/CI%20%28macOS%29/badge.svg?branch=master&event=push)](https://github.com/actix/actix-net/actions?query=workflow%3A"CI+(macOS)") |
| Windows | [![build status](https://github.com/actix/actix-net/workflows/CI%20%28Windows%29/badge.svg?branch=master&event=push)](https://github.com/actix/actix-net/actions?query=workflow%3A"CI+(Windows)") |
| Windows (MinGW) | [![build status](https://github.com/actix/actix-net/workflows/CI%20%28Windows-mingw%29/badge.svg?branch=master&event=push)](https://github.com/actix/actix-net/actions?query=workflow%3A"CI+(Windows-mingw)") |
## Example ## Example
See `actix-server/examples` and `actix-tls/examples` for some basic examples.
```rust ### MSRV
fn main() -> io::Result<()> { This repo's Minimum Supported Rust Version (MSRV) is 1.46.0.
// load ssl keys
let mut builder = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap();
builder.set_private_key_file("./examples/key.pem", SslFiletype::PEM).unwrap();
builder.set_certificate_chain_file("./examples/cert.pem").unwrap();
let acceptor = builder.build();
let num = Arc::new(AtomicUsize::new(0));
// bind socket address and start workers. By default server uses number of
// available logical cpu as threads count. actix net start separate
// instances of service pipeline in each worker.
Server::build()
.bind(
// configure service pipeline
"basic", "0.0.0.0:8443",
move || {
let num = num.clone();
let acceptor = acceptor.clone();
// construct transformation pipeline
pipeline(
// service for converting incoming TcpStream to a SslStream<TcpStream>
fn_service(move |stream: actix_rt::net::TcpStream| async move {
SslAcceptorExt::accept_async(&acceptor, stream.into_parts().0).await
.map_err(|e| println!("Openssl error: {}", e))
}))
// .and_then() combinator chains result of previos service call to argument
/// for next service calll. in this case, on success we chain
/// ssl stream to the `logger` service.
.and_then(fn_service(logger))
// Next service counts number of connections
.and_then(move |_| {
let num = num.fetch_add(1, Ordering::Relaxed);
println!("got ssl connection {:?}", num);
future::ok(())
})
},
)?
.run()
}
```
## License ## License
This project is licensed under either of This project is licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0)) * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0))
@@ -64,6 +30,5 @@ at your option.
## Code of Conduct ## Code of Conduct
Contribution to the actix-net crate is organized under the terms of the Contribution to the actix-net repo is organized under the terms of the Contributor Covenant.
Contributor Covenant, the maintainer of actix-net, @fafhrd91, promises to The Actix team promises to intervene to uphold that code of conduct.
intervene to uphold that code of conduct.

View File

@@ -1,31 +1,63 @@
# Changes # Changes
## [0.2.0] - 2019-12-10 ## Unreleased - 2021-xx-xx
* Use specific futures dependencies
## [0.2.0-alpha.4] ## 0.4.0 - 2021-04-20
* No significant changes since v0.4.0-beta.1.
* Fix buffer remaining capacity calcualtion
## [0.2.0-alpha.3] ## 0.4.0-beta.1 - 2020-12-28
* Replace `pin-project` with `pin-project-lite`. [#237]
* Upgrade `tokio` dependency to `1`. [#237]
* Upgrade `tokio-util` dependency to `0.6`. [#237]
* Upgrade `bytes` dependency to `1`. [#237]
* Use tokio 0.2 [#237]: https://github.com/actix/actix-net/pull/237
* Fix low/high watermark for write/read buffers
## [0.2.0-alpha.2] ## 0.3.0 - 2020-08-23
* No changes from beta 2.
* Migrated to `std::future`
## [0.1.2] - 2019-03-27 ## 0.3.0-beta.2 - 2020-08-19
* Remove unused type parameter from `Framed::replace_codec`.
## 0.3.0-beta.1 - 2020-08-19
* Use `.advance()` instead of `.split_to()`.
* Upgrade `tokio-util` to `0.3`.
* Improve `BytesCodec::encode()` performance.
* Simplify `BytesCodec::decode()`.
* Rename methods on `Framed` to better describe their use.
* Add method on `Framed` to get a pinned reference to the underlying I/O.
* Add method on `Framed` check emptiness of read buffer.
## 0.2.0 - 2019-12-10
* Use specific futures dependencies.
## 0.2.0-alpha.4
* Fix buffer remaining capacity calculation.
## 0.2.0-alpha.3
* Use tokio 0.2.
* Fix low/high watermark for write/read buffers.
## 0.2.0-alpha.2
* Migrated to `std::future`.
## 0.1.2 - 2019-03-27
* Added `Framed::map_io()` method. * Added `Framed::map_io()` method.
## [0.1.1] - 2019-03-06
## 0.1.1 - 2019-03-06
* Added `FramedParts::with_read_buffer()` method. * Added `FramedParts::with_read_buffer()` method.
## [0.1.0] - 2018-12-09
* Move codec to separate crate ## 0.1.0 - 2018-12-09
* Move codec to separate crate.

View File

@@ -1,16 +1,13 @@
[package] [package]
name = "actix-codec" name = "actix-codec"
version = "0.2.0" version = "0.4.0"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"] authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Utilities for encoding and decoding frames" description = "Codec utilities for working with framed protocols"
keywords = ["network", "framework", "async", "futures"] keywords = ["network", "framework", "async", "futures"]
homepage = "https://actix.rs" repository = "https://github.com/actix/actix-net"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-codec/"
categories = ["network-programming", "asynchronous"] categories = ["network-programming", "asynchronous"]
license = "MIT/Apache-2.0" license = "MIT OR Apache-2.0"
edition = "2018" edition = "2018"
workspace = ".."
[lib] [lib]
name = "actix_codec" name = "actix_codec"
@@ -18,9 +15,10 @@ path = "src/lib.rs"
[dependencies] [dependencies]
bitflags = "1.2.1" bitflags = "1.2.1"
bytes = "0.5.2" bytes = "1"
futures-core = "0.3.1" futures-core = { version = "0.3.7", default-features = false }
futures-sink = "0.3.1" futures-sink = { version = "0.3.7", default-features = false }
tokio = { version = "0.2.4", default-features=false } log = "0.4"
tokio-util = { version = "0.2.0", default-features=false, features=["codec"] } pin-project-lite = "0.2"
log = "0.4" tokio = "1"
tokio-util = { version = "0.6", features = ["codec", "io"] }

View File

@@ -1,4 +1,4 @@
use bytes::{BufMut, Bytes, BytesMut}; use bytes::{Buf, Bytes, BytesMut};
use std::io; use std::io;
use super::{Decoder, Encoder}; use super::{Decoder, Encoder};
@@ -9,13 +9,12 @@ use super::{Decoder, Encoder};
#[derive(Debug, Copy, Clone)] #[derive(Debug, Copy, Clone)]
pub struct BytesCodec; pub struct BytesCodec;
impl Encoder for BytesCodec { impl Encoder<Bytes> for BytesCodec {
type Item = Bytes;
type Error = io::Error; type Error = io::Error;
#[inline]
fn encode(&mut self, item: Bytes, dst: &mut BytesMut) -> Result<(), Self::Error> { fn encode(&mut self, item: Bytes, dst: &mut BytesMut) -> Result<(), Self::Error> {
dst.reserve(item.len()); dst.extend_from_slice(item.chunk());
dst.put(item);
Ok(()) Ok(())
} }
} }
@@ -28,8 +27,7 @@ impl Decoder for BytesCodec {
if src.is_empty() { if src.is_empty() {
Ok(None) Ok(None)
} else { } else {
let len = src.len(); Ok(Some(src.split()))
Ok(Some(src.split_to(len)))
} }
} }
} }

View File

@@ -2,13 +2,15 @@ use std::pin::Pin;
use std::task::{Context, Poll}; use std::task::{Context, Poll};
use std::{fmt, io}; use std::{fmt, io};
use bytes::BytesMut; use bytes::{Buf, BytesMut};
use futures_core::{ready, Stream}; use futures_core::{ready, Stream};
use futures_sink::Sink; use futures_sink::Sink;
use crate::{AsyncRead, AsyncWrite, Decoder, Encoder}; use crate::{AsyncRead, AsyncWrite, Decoder, Encoder};
/// Low-water mark
const LW: usize = 1024; const LW: usize = 1024;
/// High-water mark
const HW: usize = 8 * 1024; const HW: usize = 8 * 1024;
bitflags::bitflags! { bitflags::bitflags! {
@@ -18,32 +20,30 @@ bitflags::bitflags! {
} }
} }
/// A unified `Stream` and `Sink` interface to an underlying I/O object, using pin_project_lite::pin_project! {
/// the `Encoder` and `Decoder` traits to encode and decode frames. /// A unified `Stream` and `Sink` interface to an underlying I/O object, using
pub struct Framed<T, U> { /// the `Encoder` and `Decoder` traits to encode and decode frames.
io: T, ///
codec: U, /// Raw I/O objects work with byte sequences, but higher-level code usually
flags: Flags, /// wants to batch these into meaningful chunks, called "frames". This
read_buf: BytesMut, /// method layers framing on top of an I/O object, by using the `Encoder`/`Decoder`
write_buf: BytesMut, /// traits to handle encoding and decoding of message frames. Note that
/// the incoming and outgoing frame types may be distinct.
pub struct Framed<T, U> {
#[pin]
io: T,
codec: U,
flags: Flags,
read_buf: BytesMut,
write_buf: BytesMut,
}
} }
impl<T, U> Unpin for Framed<T, U> {}
impl<T, U> Framed<T, U> impl<T, U> Framed<T, U>
where where
T: AsyncRead + AsyncWrite, T: AsyncRead + AsyncWrite,
U: Decoder + Encoder, U: Decoder,
{ {
/// Provides a `Stream` and `Sink` interface for reading and writing to this
/// `Io` object, using `Decode` and `Encode` to read and write the raw data.
///
/// Raw I/O objects work with byte sequences, but higher-level code usually
/// wants to batch these into meaningful chunks, called "frames". This
/// method layers framing on top of an I/O object, by using the `Codec`
/// traits to handle encoding and decoding of messages frames. Note that
/// the incoming and outgoing frame types may be distinct.
///
/// This function returns a *single* object that is both `Stream` and /// This function returns a *single* object that is both `Stream` and
/// `Sink`; grouping this into a single object is often useful for layering /// `Sink`; grouping this into a single object is often useful for layering
/// things like gzip or TLS, which require both read and write access to the /// things like gzip or TLS, which require both read and write access to the
@@ -60,40 +60,13 @@ where
} }
impl<T, U> Framed<T, U> { impl<T, U> Framed<T, U> {
/// Provides a `Stream` and `Sink` interface for reading and writing to this
/// `Io` object, using `Decode` and `Encode` to read and write the raw data.
///
/// Raw I/O objects work with byte sequences, but higher-level code usually
/// wants to batch these into meaningful chunks, called "frames". This
/// method layers framing on top of an I/O object, by using the `Codec`
/// traits to handle encoding and decoding of messages frames. Note that
/// the incoming and outgoing frame types may be distinct.
///
/// This function returns a *single* object that is both `Stream` and
/// `Sink`; grouping this into a single object is often useful for layering
/// things like gzip or TLS, which require both read and write access to the
/// underlying object.
///
/// This objects takes a stream and a readbuffer and a writebuffer. These
/// field can be obtained from an existing `Framed` with the
/// `into_parts` method.
pub fn from_parts(parts: FramedParts<T, U>) -> Framed<T, U> {
Framed {
io: parts.io,
codec: parts.codec,
flags: parts.flags,
write_buf: parts.write_buf,
read_buf: parts.read_buf,
}
}
/// Returns a reference to the underlying codec. /// Returns a reference to the underlying codec.
pub fn get_codec(&self) -> &U { pub fn codec_ref(&self) -> &U {
&self.codec &self.codec
} }
/// Returns a mutable reference to the underlying codec. /// Returns a mutable reference to the underlying codec.
pub fn get_codec_mut(&mut self) -> &mut U { pub fn codec_mut(&mut self) -> &mut U {
&mut self.codec &mut self.codec
} }
@@ -103,20 +76,29 @@ impl<T, U> Framed<T, U> {
/// Note that care should be taken to not tamper with the underlying stream /// Note that care should be taken to not tamper with the underlying stream
/// of data coming in as it may corrupt the stream of frames otherwise /// of data coming in as it may corrupt the stream of frames otherwise
/// being worked with. /// being worked with.
pub fn get_ref(&self) -> &T { pub fn io_ref(&self) -> &T {
&self.io &self.io
} }
/// Returns a mutable reference to the underlying I/O stream wrapped by /// Returns a mutable reference to the underlying I/O stream.
/// `Frame`.
/// ///
/// Note that care should be taken to not tamper with the underlying stream /// Note that care should be taken to not tamper with the underlying stream
/// of data coming in as it may corrupt the stream of frames otherwise /// of data coming in as it may corrupt the stream of frames otherwise
/// being worked with. /// being worked with.
pub fn get_mut(&mut self) -> &mut T { pub fn io_mut(&mut self) -> &mut T {
&mut self.io &mut self.io
} }
/// Returns a `Pin` of a mutable reference to the underlying I/O stream.
pub fn io_pin(self: Pin<&mut Self>) -> Pin<&mut T> {
self.project().io
}
/// Check if read buffer is empty.
pub fn is_read_buf_empty(&self) -> bool {
self.read_buf.is_empty()
}
/// Check if write buffer is empty. /// Check if write buffer is empty.
pub fn is_write_buf_empty(&self) -> bool { pub fn is_write_buf_empty(&self) -> bool {
self.write_buf.is_empty() self.write_buf.is_empty()
@@ -127,8 +109,15 @@ impl<T, U> Framed<T, U> {
self.write_buf.len() >= HW self.write_buf.len() >= HW
} }
/// Check if framed is able to write more data.
///
/// `Framed` object considers ready if there is free space in write buffer.
pub fn is_write_ready(&self) -> bool {
self.write_buf.len() < HW
}
/// Consume the `Frame`, returning `Frame` with different codec. /// Consume the `Frame`, returning `Frame` with different codec.
pub fn into_framed<U2>(self, codec: U2) -> Framed<T, U2> { pub fn replace_codec<U2>(self, codec: U2) -> Framed<T, U2> {
Framed { Framed {
codec, codec,
io: self.io, io: self.io,
@@ -139,7 +128,7 @@ impl<T, U> Framed<T, U> {
} }
/// Consume the `Frame`, returning `Frame` with different io. /// Consume the `Frame`, returning `Frame` with different io.
pub fn map_io<F, T2>(self, f: F) -> Framed<T2, U> pub fn into_map_io<F, T2>(self, f: F) -> Framed<T2, U>
where where
F: Fn(T) -> T2, F: Fn(T) -> T2,
{ {
@@ -153,7 +142,7 @@ impl<T, U> Framed<T, U> {
} }
/// Consume the `Frame`, returning `Frame` with different codec. /// Consume the `Frame`, returning `Frame` with different codec.
pub fn map_codec<F, U2>(self, f: F) -> Framed<T, U2> pub fn into_map_codec<F, U2>(self, f: F) -> Framed<T, U2>
where where
F: Fn(U) -> U2, F: Fn(U) -> U2,
{ {
@@ -165,6 +154,209 @@ impl<T, U> Framed<T, U> {
write_buf: self.write_buf, write_buf: self.write_buf,
} }
} }
}
impl<T, U> Framed<T, U> {
/// Serialize item and Write to the inner buffer
pub fn write<I>(mut self: Pin<&mut Self>, item: I) -> Result<(), <U as Encoder<I>>::Error>
where
T: AsyncWrite,
U: Encoder<I>,
{
let this = self.as_mut().project();
let remaining = this.write_buf.capacity() - this.write_buf.len();
if remaining < LW {
this.write_buf.reserve(HW - remaining);
}
this.codec.encode(item, this.write_buf)?;
Ok(())
}
/// Try to read underlying I/O stream and decode item.
pub fn next_item(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<<U as Decoder>::Item, U::Error>>>
where
T: AsyncRead,
U: Decoder,
{
loop {
let mut this = self.as_mut().project();
// Repeatedly call `decode` or `decode_eof` as long as it is
// "readable". Readable is defined as not having returned `None`. If
// the upstream has returned EOF, and the decoder is no longer
// readable, it can be assumed that the decoder will never become
// readable again, at which point the stream is terminated.
if this.flags.contains(Flags::READABLE) {
if this.flags.contains(Flags::EOF) {
match this.codec.decode_eof(&mut this.read_buf) {
Ok(Some(frame)) => return Poll::Ready(Some(Ok(frame))),
Ok(None) => return Poll::Ready(None),
Err(e) => return Poll::Ready(Some(Err(e))),
}
}
log::trace!("attempting to decode a frame");
match this.codec.decode(&mut this.read_buf) {
Ok(Some(frame)) => {
log::trace!("frame decoded from buffer");
return Poll::Ready(Some(Ok(frame)));
}
Err(e) => return Poll::Ready(Some(Err(e))),
_ => (), // Need more data
}
this.flags.remove(Flags::READABLE);
}
debug_assert!(!this.flags.contains(Flags::EOF));
// Otherwise, try to read more data and try again. Make sure we've got room
let remaining = this.read_buf.capacity() - this.read_buf.len();
if remaining < LW {
this.read_buf.reserve(HW - remaining)
}
let cnt = match tokio_util::io::poll_read_buf(this.io, cx, this.read_buf) {
Poll::Pending => return Poll::Pending,
Poll::Ready(Err(e)) => return Poll::Ready(Some(Err(e.into()))),
Poll::Ready(Ok(cnt)) => cnt,
};
if cnt == 0 {
this.flags.insert(Flags::EOF);
}
this.flags.insert(Flags::READABLE);
}
}
/// Flush write buffer to underlying I/O stream.
pub fn flush<I>(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), U::Error>>
where
T: AsyncWrite,
U: Encoder<I>,
{
let mut this = self.as_mut().project();
log::trace!("flushing framed transport");
while !this.write_buf.is_empty() {
log::trace!("writing; remaining={}", this.write_buf.len());
let n = ready!(this.io.as_mut().poll_write(cx, this.write_buf))?;
if n == 0 {
return Poll::Ready(Err(io::Error::new(
io::ErrorKind::WriteZero,
"failed to write frame to transport",
)
.into()));
}
// remove written data
this.write_buf.advance(n);
}
// Try flushing the underlying IO
ready!(this.io.poll_flush(cx))?;
log::trace!("framed transport flushed");
Poll::Ready(Ok(()))
}
/// Flush write buffer and shutdown underlying I/O stream.
pub fn close<I>(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), U::Error>>
where
T: AsyncWrite,
U: Encoder<I>,
{
let mut this = self.as_mut().project();
ready!(this.io.as_mut().poll_flush(cx))?;
ready!(this.io.as_mut().poll_shutdown(cx))?;
Poll::Ready(Ok(()))
}
}
impl<T, U> Stream for Framed<T, U>
where
T: AsyncRead,
U: Decoder,
{
type Item = Result<U::Item, U::Error>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
self.next_item(cx)
}
}
impl<T, U, I> Sink<I> for Framed<T, U>
where
T: AsyncWrite,
U: Encoder<I>,
U::Error: From<io::Error>,
{
type Error = U::Error;
fn poll_ready(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
if self.is_write_ready() {
Poll::Ready(Ok(()))
} else {
Poll::Pending
}
}
fn start_send(self: Pin<&mut Self>, item: I) -> Result<(), Self::Error> {
self.write(item)
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.flush(cx)
}
fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.close(cx)
}
}
impl<T, U> fmt::Debug for Framed<T, U>
where
T: fmt::Debug,
U: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Framed")
.field("io", &self.io)
.field("codec", &self.codec)
.finish()
}
}
impl<T, U> Framed<T, U> {
/// This function returns a *single* object that is both `Stream` and
/// `Sink`; grouping this into a single object is often useful for layering
/// things like gzip or TLS, which require both read and write access to the
/// underlying object.
///
/// These objects take a stream, a read buffer and a write buffer. These
/// fields can be obtained from an existing `Framed` with the `into_parts` method.
pub fn from_parts(parts: FramedParts<T, U>) -> Framed<T, U> {
Framed {
io: parts.io,
codec: parts.codec,
flags: parts.flags,
write_buf: parts.write_buf,
read_buf: parts.read_buf,
}
}
/// Consumes the `Frame`, returning its underlying I/O stream, the buffer /// Consumes the `Frame`, returning its underlying I/O stream, the buffer
/// with unprocessed data, and the codec. /// with unprocessed data, and the codec.
@@ -183,198 +375,6 @@ impl<T, U> Framed<T, U> {
} }
} }
impl<T, U> Framed<T, U> {
/// Serialize item and Write to the inner buffer
pub fn write(&mut self, item: <U as Encoder>::Item) -> Result<(), <U as Encoder>::Error>
where
T: AsyncWrite,
U: Encoder,
{
let remaining = self.write_buf.capacity() - self.write_buf.len();
if remaining < LW {
self.write_buf.reserve(HW - remaining);
}
self.codec.encode(item, &mut self.write_buf)?;
Ok(())
}
/// Check if framed is able to write more data.
///
/// `Framed` object considers ready if there is free space in write buffer.
pub fn is_write_ready(&self) -> bool {
self.write_buf.len() < HW
}
/// Try to read underlying I/O stream and decode item.
pub fn next_item(&mut self, cx: &mut Context<'_>) -> Poll<Option<Result<U::Item, U::Error>>>
where
T: AsyncRead,
U: Decoder,
{
loop {
// Repeatedly call `decode` or `decode_eof` as long as it is
// "readable". Readable is defined as not having returned `None`. If
// the upstream has returned EOF, and the decoder is no longer
// readable, it can be assumed that the decoder will never become
// readable again, at which point the stream is terminated.
if self.flags.contains(Flags::READABLE) {
if self.flags.contains(Flags::EOF) {
match self.codec.decode_eof(&mut self.read_buf) {
Ok(Some(frame)) => return Poll::Ready(Some(Ok(frame))),
Ok(None) => return Poll::Ready(None),
Err(e) => return Poll::Ready(Some(Err(e))),
}
}
log::trace!("attempting to decode a frame");
match self.codec.decode(&mut self.read_buf) {
Ok(Some(frame)) => {
log::trace!("frame decoded from buffer");
return Poll::Ready(Some(Ok(frame)));
}
Err(e) => return Poll::Ready(Some(Err(e))),
_ => (), // Need more data
}
self.flags.remove(Flags::READABLE);
}
debug_assert!(!self.flags.contains(Flags::EOF));
// Otherwise, try to read more data and try again. Make sure we've got room
let remaining = self.read_buf.capacity() - self.read_buf.len();
if remaining < LW {
self.read_buf.reserve(HW - remaining)
}
let cnt = match unsafe {
Pin::new_unchecked(&mut self.io).poll_read_buf(cx, &mut self.read_buf)
} {
Poll::Pending => return Poll::Pending,
Poll::Ready(Err(e)) => return Poll::Ready(Some(Err(e.into()))),
Poll::Ready(Ok(cnt)) => cnt,
};
if cnt == 0 {
self.flags.insert(Flags::EOF);
}
self.flags.insert(Flags::READABLE);
}
}
/// Flush write buffer to underlying I/O stream.
pub fn flush(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), U::Error>>
where
T: AsyncWrite,
U: Encoder,
{
log::trace!("flushing framed transport");
while !self.write_buf.is_empty() {
log::trace!("writing; remaining={}", self.write_buf.len());
let n = ready!(unsafe {
Pin::new_unchecked(&mut self.io).poll_write(cx, &self.write_buf)
})?;
if n == 0 {
return Poll::Ready(Err(io::Error::new(
io::ErrorKind::WriteZero,
"failed to write frame to transport",
)
.into()));
}
// remove written data
let _ = self.write_buf.split_to(n);
}
// Try flushing the underlying IO
ready!(unsafe { Pin::new_unchecked(&mut self.io).poll_flush(cx) })?;
log::trace!("framed transport flushed");
Poll::Ready(Ok(()))
}
/// Flush write buffer and shutdown underlying I/O stream.
pub fn close(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), U::Error>>
where
T: AsyncWrite,
U: Encoder,
{
unsafe {
ready!(Pin::new_unchecked(&mut self.io).poll_flush(cx))?;
ready!(Pin::new_unchecked(&mut self.io).poll_shutdown(cx))?;
}
Poll::Ready(Ok(()))
}
}
impl<T, U> Stream for Framed<T, U>
where
T: AsyncRead,
U: Decoder,
{
type Item = Result<U::Item, U::Error>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
self.next_item(cx)
}
}
impl<T, U> Sink<U::Item> for Framed<T, U>
where
T: AsyncWrite,
U: Encoder,
U::Error: From<io::Error>,
{
type Error = U::Error;
fn poll_ready(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
if self.is_write_ready() {
Poll::Ready(Ok(()))
} else {
Poll::Pending
}
}
fn start_send(
mut self: Pin<&mut Self>,
item: <U as Encoder>::Item,
) -> Result<(), Self::Error> {
self.write(item)
}
fn poll_flush(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), Self::Error>> {
self.flush(cx)
}
fn poll_close(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), Self::Error>> {
self.close(cx)
}
}
impl<T, U> fmt::Debug for Framed<T, U>
where
T: fmt::Debug,
U: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Framed")
.field("io", &self.io)
.field("codec", &self.codec)
.finish()
}
}
/// `FramedParts` contains an export of the data of a Framed transport. /// `FramedParts` contains an export of the data of a Framed transport.
/// It can be used to construct a new `Framed` with a different codec. /// It can be used to construct a new `Framed` with a different codec.
/// It contains all current buffers and the inner transport. /// It contains all current buffers and the inner transport.

View File

@@ -1,12 +1,16 @@
//! Utilities for encoding and decoding frames. //! Codec utilities for working with framed protocols.
//! //!
//! Contains adapters to go from streams of bytes, [`AsyncRead`] and //! Contains adapters to go from streams of bytes, [`AsyncRead`] and
//! [`AsyncWrite`], to framed streams implementing [`Sink`] and [`Stream`]. //! [`AsyncWrite`], to framed streams implementing [`Sink`] and [`Stream`].
//! Framed streams are also known as [transports]. //! Framed streams are also known as `transports`.
//! //!
//! [`AsyncRead`]: # //! [`Sink`]: futures_sink::Sink
//! [`AsyncWrite`]: # //! [`Stream`]: futures_core::Stream
#![deny(rust_2018_idioms, warnings)]
#![deny(rust_2018_idioms, nonstandard_style, future_incompatible)]
#![warn(missing_docs)]
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
mod bcodec; mod bcodec;
mod framed; mod framed;
@@ -14,5 +18,6 @@ mod framed;
pub use self::bcodec::BytesCodec; pub use self::bcodec::BytesCodec;
pub use self::framed::{Framed, FramedParts}; pub use self::framed::{Framed, FramedParts};
pub use tokio::io::{AsyncRead, AsyncWrite}; pub use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
pub use tokio_util::codec::{Decoder, Encoder}; pub use tokio_util::codec::{Decoder, Encoder};
pub use tokio_util::io::poll_read_buf;

View File

@@ -1,112 +0,0 @@
# Changes
## [1.0.1] - 2019-12-15
* Fix trust-dns-resolver compilation
## [1.0.0] - 2019-12-11
* Release
## [1.0.0-alpha.3] - 2019-12-07
### Changed
* Migrate to tokio 0.2
## [1.0.0-alpha.2] - 2019-12-02
### Changed
* Migrated to `std::future`
## [0.3.0] - 2019-10-03
### Changed
* Update `rustls` to 0.16
* Minimum required Rust version upped to 1.37.0
## [0.2.5] - 2019-09-05
* Add `TcpConnectService`
## [0.2.4] - 2019-09-02
* Use arbiter's storage for default async resolver
## [0.2.3] - 2019-08-05
* Add `ConnectService` and `OpensslConnectService`
## [0.2.2] - 2019-07-24
* Add `rustls` support
## [0.2.1] - 2019-07-17
### Added
* Expose Connect addrs #30
### Changed
* Update `derive_more` to 0.15
## [0.2.0] - 2019-05-12
### Changed
* Upgrade to actix-service 0.4
## [0.1.5] - 2019-04-19
### Added
* `Connect::set_addr()`
### Changed
* Use trust-dns-resolver 0.11.0
## [0.1.4] - 2019-04-12
### Changed
* Do not start default resolver immediately for default connector.
## [0.1.3] - 2019-04-11
### Changed
* Start trust-dns default resolver on first use
## [0.1.2] - 2019-04-04
### Added
* Log error if dns system config could not be loaded.
### Changed
* Rename connect Connector to TcpConnector #10
## [0.1.1] - 2019-03-15
### Fixed
* Fix error handling for single address
## [0.1.0] - 2019-03-14
* Refactor resolver and connector services
* Rename crate

View File

@@ -1,58 +0,0 @@
[package]
name = "actix-connect"
version = "1.0.1"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix connect - tcp connector service"
keywords = ["network", "framework", "async", "futures"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-connect/"
categories = ["network-programming", "asynchronous"]
license = "MIT/Apache-2.0"
edition = "2018"
workspace = ".."
[package.metadata.docs.rs]
features = ["openssl", "rustls", "uri"]
[lib]
name = "actix_connect"
path = "src/lib.rs"
[features]
default = ["uri"]
# openssl
openssl = ["open-ssl", "tokio-openssl"]
# rustls
rustls = ["rust-tls", "tokio-rustls", "webpki"]
# support http::Uri as connect address
uri = ["http"]
[dependencies]
actix-service = "1.0.0"
actix-codec = "0.2.0"
actix-utils = "1.0.3"
actix-rt = "1.0.0"
derive_more = "0.99.2"
either = "1.5.2"
futures = "0.3.1"
http = { version = "0.2.0", optional = true }
log = "0.4"
trust-dns-proto = "=0.18.0-alpha.2"
trust-dns-resolver = "=0.18.0-alpha.2"
# openssl
open-ssl = { version="0.10", package = "openssl", optional = true }
tokio-openssl = { version = "0.4.0", optional = true }
# rustls
rust-tls = { version = "0.16.0", package = "rustls", optional = true }
tokio-rustls = { version = "0.12.0", optional = true }
webpki = { version = "0.21", optional = true }
[dev-dependencies]
bytes = "0.5.3"
actix-testing = { version="1.0.0" }

View File

@@ -1,281 +0,0 @@
use std::collections::{vec_deque, VecDeque};
use std::fmt;
use std::iter::{FromIterator, FusedIterator};
use std::net::SocketAddr;
use either::Either;
/// Connect request
pub trait Address: Unpin {
/// Host name of the request
fn host(&self) -> &str;
/// Port of the request
fn port(&self) -> Option<u16>;
}
impl Address for String {
fn host(&self) -> &str {
&self
}
fn port(&self) -> Option<u16> {
None
}
}
impl Address for &'static str {
fn host(&self) -> &str {
self
}
fn port(&self) -> Option<u16> {
None
}
}
/// Connect request
#[derive(Eq, PartialEq, Debug, Hash)]
pub struct Connect<T> {
pub(crate) req: T,
pub(crate) port: u16,
pub(crate) addr: Option<Either<SocketAddr, VecDeque<SocketAddr>>>,
}
impl<T: Address> Connect<T> {
/// Create `Connect` instance by spliting the string by ':' and convert the second part to u16
pub fn new(req: T) -> Connect<T> {
let (_, port) = parse(req.host());
Connect {
req,
port: port.unwrap_or(0),
addr: None,
}
}
/// Create new `Connect` instance from host and address. Connector skips name resolution stage for such connect messages.
pub fn with(req: T, addr: SocketAddr) -> Connect<T> {
Connect {
req,
port: 0,
addr: Some(Either::Left(addr)),
}
}
/// Use port if address does not provide one.
///
/// By default it set to 0
pub fn set_port(mut self, port: u16) -> Self {
self.port = port;
self
}
/// Use address.
pub fn set_addr(mut self, addr: Option<SocketAddr>) -> Self {
if let Some(addr) = addr {
self.addr = Some(Either::Left(addr));
}
self
}
/// Use addresses.
pub fn set_addrs<I>(mut self, addrs: I) -> Self
where
I: IntoIterator<Item = SocketAddr>,
{
let mut addrs = VecDeque::from_iter(addrs);
self.addr = if addrs.len() < 2 {
addrs.pop_front().map(Either::Left)
} else {
Some(Either::Right(addrs))
};
self
}
/// Host name
pub fn host(&self) -> &str {
self.req.host()
}
/// Port of the request
pub fn port(&self) -> u16 {
self.req.port().unwrap_or(self.port)
}
/// Preresolved addresses of the request.
pub fn addrs(&self) -> ConnectAddrsIter<'_> {
let inner = match self.addr {
None => Either::Left(None),
Some(Either::Left(addr)) => Either::Left(Some(addr)),
Some(Either::Right(ref addrs)) => Either::Right(addrs.iter()),
};
ConnectAddrsIter { inner }
}
/// Takes preresolved addresses of the request.
pub fn take_addrs(&mut self) -> ConnectTakeAddrsIter {
let inner = match self.addr.take() {
None => Either::Left(None),
Some(Either::Left(addr)) => Either::Left(Some(addr)),
Some(Either::Right(addrs)) => Either::Right(addrs.into_iter()),
};
ConnectTakeAddrsIter { inner }
}
}
impl<T: Address> From<T> for Connect<T> {
fn from(addr: T) -> Self {
Connect::new(addr)
}
}
impl<T: Address> fmt::Display for Connect<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}:{}", self.host(), self.port())
}
}
/// Iterator over addresses in a [`Connect`](struct.Connect.html) request.
#[derive(Clone)]
pub struct ConnectAddrsIter<'a> {
inner: Either<Option<SocketAddr>, vec_deque::Iter<'a, SocketAddr>>,
}
impl Iterator for ConnectAddrsIter<'_> {
type Item = SocketAddr;
fn next(&mut self) -> Option<Self::Item> {
match self.inner {
Either::Left(ref mut opt) => opt.take(),
Either::Right(ref mut iter) => iter.next().copied(),
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
match self.inner {
Either::Left(Some(_)) => (1, Some(1)),
Either::Left(None) => (0, Some(0)),
Either::Right(ref iter) => iter.size_hint(),
}
}
}
impl fmt::Debug for ConnectAddrsIter<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_list().entries(self.clone()).finish()
}
}
impl ExactSizeIterator for ConnectAddrsIter<'_> {}
impl FusedIterator for ConnectAddrsIter<'_> {}
/// Owned iterator over addresses in a [`Connect`](struct.Connect.html) request.
#[derive(Debug)]
pub struct ConnectTakeAddrsIter {
inner: Either<Option<SocketAddr>, vec_deque::IntoIter<SocketAddr>>,
}
impl Iterator for ConnectTakeAddrsIter {
type Item = SocketAddr;
fn next(&mut self) -> Option<Self::Item> {
match self.inner {
Either::Left(ref mut opt) => opt.take(),
Either::Right(ref mut iter) => iter.next(),
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
match self.inner {
Either::Left(Some(_)) => (1, Some(1)),
Either::Left(None) => (0, Some(0)),
Either::Right(ref iter) => iter.size_hint(),
}
}
}
impl ExactSizeIterator for ConnectTakeAddrsIter {}
impl FusedIterator for ConnectTakeAddrsIter {}
fn parse(host: &str) -> (&str, Option<u16>) {
let mut parts_iter = host.splitn(2, ':');
if let Some(host) = parts_iter.next() {
let port_str = parts_iter.next().unwrap_or("");
if let Ok(port) = port_str.parse::<u16>() {
(host, Some(port))
} else {
(host, None)
}
} else {
(host, None)
}
}
pub struct Connection<T, U> {
io: U,
req: T,
}
impl<T, U> Connection<T, U> {
pub fn new(io: U, req: T) -> Self {
Self { io, req }
}
}
impl<T, U> Connection<T, U> {
/// Reconstruct from a parts.
pub fn from_parts(io: U, req: T) -> Self {
Self { io, req }
}
/// Deconstruct into a parts.
pub fn into_parts(self) -> (U, T) {
(self.io, self.req)
}
/// Replace inclosed object, return new Stream and old object
pub fn replace<Y>(self, io: Y) -> (U, Connection<T, Y>) {
(self.io, Connection { io, req: self.req })
}
/// Returns a shared reference to the underlying stream.
pub fn get_ref(&self) -> &U {
&self.io
}
/// Returns a mutable reference to the underlying stream.
pub fn get_mut(&mut self) -> &mut U {
&mut self.io
}
}
impl<T: Address, U> Connection<T, U> {
/// Get request
pub fn host(&self) -> &str {
&self.req.host()
}
}
impl<T, U> std::ops::Deref for Connection<T, U> {
type Target = U;
fn deref(&self) -> &U {
&self.io
}
}
impl<T, U> std::ops::DerefMut for Connection<T, U> {
fn deref_mut(&mut self) -> &mut U {
&mut self.io
}
}
impl<T, U: fmt::Debug> fmt::Debug for Connection<T, U> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "Stream {{{:?}}}", self.io)
}
}

View File

@@ -1,171 +0,0 @@
use std::collections::VecDeque;
use std::future::Future;
use std::io;
use std::marker::PhantomData;
use std::net::SocketAddr;
use std::pin::Pin;
use std::task::{Context, Poll};
use actix_rt::net::TcpStream;
use actix_service::{Service, ServiceFactory};
use futures::future::{err, ok, BoxFuture, Either, FutureExt, Ready};
use super::connect::{Address, Connect, Connection};
use super::error::ConnectError;
/// Tcp connector service factory
#[derive(Debug)]
pub struct TcpConnectorFactory<T>(PhantomData<T>);
impl<T> TcpConnectorFactory<T> {
pub fn new() -> Self {
TcpConnectorFactory(PhantomData)
}
/// Create tcp connector service
pub fn service(&self) -> TcpConnector<T> {
TcpConnector(PhantomData)
}
}
impl<T> Default for TcpConnectorFactory<T> {
fn default() -> Self {
TcpConnectorFactory(PhantomData)
}
}
impl<T> Clone for TcpConnectorFactory<T> {
fn clone(&self) -> Self {
TcpConnectorFactory(PhantomData)
}
}
impl<T: Address> ServiceFactory for TcpConnectorFactory<T> {
type Request = Connect<T>;
type Response = Connection<T, TcpStream>;
type Error = ConnectError;
type Config = ();
type Service = TcpConnector<T>;
type InitError = ();
type Future = Ready<Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: ()) -> Self::Future {
ok(self.service())
}
}
/// Tcp connector service
#[derive(Default, Debug)]
pub struct TcpConnector<T>(PhantomData<T>);
impl<T> TcpConnector<T> {
pub fn new() -> Self {
TcpConnector(PhantomData)
}
}
impl<T> Clone for TcpConnector<T> {
fn clone(&self) -> Self {
TcpConnector(PhantomData)
}
}
impl<T: Address> Service for TcpConnector<T> {
type Request = Connect<T>;
type Response = Connection<T, TcpStream>;
type Error = ConnectError;
type Future = Either<TcpConnectorResponse<T>, Ready<Result<Self::Response, Self::Error>>>;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, req: Connect<T>) -> Self::Future {
let port = req.port();
let Connect { req, addr, .. } = req;
if let Some(addr) = addr {
Either::Left(TcpConnectorResponse::new(req, port, addr))
} else {
error!("TCP connector: got unresolved address");
Either::Right(err(ConnectError::Unresolverd))
}
}
}
#[doc(hidden)]
/// Tcp stream connector response future
pub struct TcpConnectorResponse<T> {
req: Option<T>,
port: u16,
addrs: Option<VecDeque<SocketAddr>>,
stream: Option<BoxFuture<'static, Result<TcpStream, io::Error>>>,
}
impl<T: Address> TcpConnectorResponse<T> {
pub fn new(
req: T,
port: u16,
addr: either::Either<SocketAddr, VecDeque<SocketAddr>>,
) -> TcpConnectorResponse<T> {
trace!(
"TCP connector - connecting to {:?} port:{}",
req.host(),
port
);
match addr {
either::Either::Left(addr) => TcpConnectorResponse {
req: Some(req),
port,
addrs: None,
stream: Some(TcpStream::connect(addr).boxed()),
},
either::Either::Right(addrs) => TcpConnectorResponse {
req: Some(req),
port,
addrs: Some(addrs),
stream: None,
},
}
}
}
impl<T: Address> Future for TcpConnectorResponse<T> {
type Output = Result<Connection<T, TcpStream>, ConnectError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.get_mut();
// connect
loop {
if let Some(new) = this.stream.as_mut() {
match new.as_mut().poll(cx) {
Poll::Ready(Ok(sock)) => {
let req = this.req.take().unwrap();
trace!(
"TCP connector - successfully connected to connecting to {:?} - {:?}",
req.host(), sock.peer_addr()
);
return Poll::Ready(Ok(Connection::new(sock, req)));
}
Poll::Pending => return Poll::Pending,
Poll::Ready(Err(err)) => {
trace!(
"TCP connector - failed to connect to connecting to {:?} port: {}",
this.req.as_ref().unwrap().host(),
this.port,
);
if this.addrs.is_none() || this.addrs.as_ref().unwrap().is_empty() {
return Poll::Ready(Err(err.into()));
}
}
}
}
// try to connect
let addr = this.addrs.as_mut().unwrap().pop_front().unwrap();
this.stream = Some(TcpStream::connect(addr).boxed());
}
}
}

View File

@@ -1,111 +0,0 @@
//! Actix connect - tcp connector service
//!
//! ## Package feature
//!
//! * `openssl` - enables ssl support via `openssl` crate
//! * `rustls` - enables ssl support via `rustls` crate
#![deny(rust_2018_idioms, warnings)]
#![allow(clippy::type_complexity)]
#![recursion_limit = "128"]
#[macro_use]
extern crate log;
mod connect;
mod connector;
mod error;
mod resolve;
mod service;
pub mod ssl;
#[cfg(feature = "uri")]
mod uri;
use actix_rt::{net::TcpStream, Arbiter};
use actix_service::{pipeline, pipeline_factory, Service, ServiceFactory};
use trust_dns_resolver::config::{ResolverConfig, ResolverOpts};
use trust_dns_resolver::system_conf::read_system_conf;
use trust_dns_resolver::AsyncResolver;
pub mod resolver {
pub use trust_dns_resolver::config::{ResolverConfig, ResolverOpts};
pub use trust_dns_resolver::system_conf::read_system_conf;
pub use trust_dns_resolver::{error::ResolveError, AsyncResolver};
}
pub use self::connect::{Address, Connect, Connection};
pub use self::connector::{TcpConnector, TcpConnectorFactory};
pub use self::error::ConnectError;
pub use self::resolve::{Resolver, ResolverFactory};
pub use self::service::{ConnectService, ConnectServiceFactory, TcpConnectService};
pub fn start_resolver(cfg: ResolverConfig, opts: ResolverOpts) -> AsyncResolver {
let (resolver, bg) = AsyncResolver::new(cfg, opts);
actix_rt::spawn(bg);
resolver
}
struct DefaultResolver(AsyncResolver);
pub(crate) fn get_default_resolver() -> AsyncResolver {
if Arbiter::contains_item::<DefaultResolver>() {
Arbiter::get_item(|item: &DefaultResolver| item.0.clone())
} else {
let (cfg, opts) = match read_system_conf() {
Ok((cfg, opts)) => (cfg, opts),
Err(e) => {
log::error!("TRust-DNS can not load system config: {}", e);
(ResolverConfig::default(), ResolverOpts::default())
}
};
let (resolver, bg) = AsyncResolver::new(cfg, opts);
actix_rt::spawn(bg);
Arbiter::set_item(DefaultResolver(resolver.clone()));
resolver
}
}
pub fn start_default_resolver() -> AsyncResolver {
get_default_resolver()
}
/// Create tcp connector service
pub fn new_connector<T: Address>(
resolver: AsyncResolver,
) -> impl Service<Request = Connect<T>, Response = Connection<T, TcpStream>, Error = ConnectError>
+ Clone {
pipeline(Resolver::new(resolver)).and_then(TcpConnector::new())
}
/// Create tcp connector service
pub fn new_connector_factory<T: Address>(
resolver: AsyncResolver,
) -> impl ServiceFactory<
Config = (),
Request = Connect<T>,
Response = Connection<T, TcpStream>,
Error = ConnectError,
InitError = (),
> + Clone {
pipeline_factory(ResolverFactory::new(resolver)).and_then(TcpConnectorFactory::new())
}
/// Create connector service with default parameters
pub fn default_connector<T: Address>(
) -> impl Service<Request = Connect<T>, Response = Connection<T, TcpStream>, Error = ConnectError>
+ Clone {
pipeline(Resolver::default()).and_then(TcpConnector::new())
}
/// Create connector service factory with default parameters
pub fn default_connector_factory<T: Address>() -> impl ServiceFactory<
Config = (),
Request = Connect<T>,
Response = Connection<T, TcpStream>,
Error = ConnectError,
InitError = (),
> + Clone {
pipeline_factory(ResolverFactory::default()).and_then(TcpConnectorFactory::new())
}

View File

@@ -1,188 +0,0 @@
use std::future::Future;
use std::marker::PhantomData;
use std::net::SocketAddr;
use std::pin::Pin;
use std::task::{Context, Poll};
use actix_service::{Service, ServiceFactory};
use futures::future::{ok, Either, Ready};
use trust_dns_resolver::lookup_ip::LookupIpFuture;
use trust_dns_resolver::{AsyncResolver, Background};
use crate::connect::{Address, Connect};
use crate::error::ConnectError;
use crate::get_default_resolver;
/// DNS Resolver Service factory
pub struct ResolverFactory<T> {
resolver: Option<AsyncResolver>,
_t: PhantomData<T>,
}
impl<T> ResolverFactory<T> {
/// Create new resolver instance with custom configuration and options.
pub fn new(resolver: AsyncResolver) -> Self {
ResolverFactory {
resolver: Some(resolver),
_t: PhantomData,
}
}
pub fn service(&self) -> Resolver<T> {
Resolver {
resolver: self.resolver.clone(),
_t: PhantomData,
}
}
}
impl<T> Default for ResolverFactory<T> {
fn default() -> Self {
ResolverFactory {
resolver: None,
_t: PhantomData,
}
}
}
impl<T> Clone for ResolverFactory<T> {
fn clone(&self) -> Self {
ResolverFactory {
resolver: self.resolver.clone(),
_t: PhantomData,
}
}
}
impl<T: Address> ServiceFactory for ResolverFactory<T> {
type Request = Connect<T>;
type Response = Connect<T>;
type Error = ConnectError;
type Config = ();
type Service = Resolver<T>;
type InitError = ();
type Future = Ready<Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: ()) -> Self::Future {
ok(self.service())
}
}
/// DNS Resolver Service
pub struct Resolver<T> {
resolver: Option<AsyncResolver>,
_t: PhantomData<T>,
}
impl<T> Resolver<T> {
/// Create new resolver instance with custom configuration and options.
pub fn new(resolver: AsyncResolver) -> Self {
Resolver {
resolver: Some(resolver),
_t: PhantomData,
}
}
}
impl<T> Default for Resolver<T> {
fn default() -> Self {
Resolver {
resolver: None,
_t: PhantomData,
}
}
}
impl<T> Clone for Resolver<T> {
fn clone(&self) -> Self {
Resolver {
resolver: self.resolver.clone(),
_t: PhantomData,
}
}
}
impl<T: Address> Service for Resolver<T> {
type Request = Connect<T>;
type Response = Connect<T>;
type Error = ConnectError;
type Future = Either<ResolverFuture<T>, Ready<Result<Connect<T>, Self::Error>>>;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, mut req: Connect<T>) -> Self::Future {
if req.addr.is_some() {
Either::Right(ok(req))
} else if let Ok(ip) = req.host().parse() {
req.addr = Some(either::Either::Left(SocketAddr::new(ip, req.port())));
Either::Right(ok(req))
} else {
trace!("DNS resolver: resolving host {:?}", req.host());
if self.resolver.is_none() {
self.resolver = Some(get_default_resolver());
}
Either::Left(ResolverFuture::new(req, self.resolver.as_ref().unwrap()))
}
}
}
#[doc(hidden)]
/// Resolver future
pub struct ResolverFuture<T: Address> {
req: Option<Connect<T>>,
lookup: Background<LookupIpFuture>,
}
impl<T: Address> ResolverFuture<T> {
pub fn new(req: Connect<T>, resolver: &AsyncResolver) -> Self {
let lookup = if let Some(host) = req.host().splitn(2, ':').next() {
resolver.lookup_ip(host)
} else {
resolver.lookup_ip(req.host())
};
ResolverFuture {
lookup,
req: Some(req),
}
}
}
impl<T: Address> Future for ResolverFuture<T> {
type Output = Result<Connect<T>, ConnectError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.get_mut();
match Pin::new(&mut this.lookup).poll(cx) {
Poll::Pending => Poll::Pending,
Poll::Ready(Ok(ips)) => {
let req = this.req.take().unwrap();
let port = req.port();
let req = req.set_addrs(ips.iter().map(|ip| SocketAddr::new(ip, port)));
trace!(
"DNS resolver: host {:?} resolved to {:?}",
req.host(),
req.addrs()
);
if req.addr.is_none() {
Poll::Ready(Err(ConnectError::NoRecords))
} else {
Poll::Ready(Ok(req))
}
}
Poll::Ready(Err(e)) => {
trace!(
"DNS resolver: failed to resolve host {:?} err: {}",
this.req.as_ref().unwrap().host(),
e
);
Poll::Ready(Err(e.into()))
}
}
}
}

View File

@@ -1,231 +0,0 @@
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use actix_rt::net::TcpStream;
use actix_service::{Service, ServiceFactory};
use either::Either;
use futures::future::{ok, Ready};
use trust_dns_resolver::AsyncResolver;
use crate::connect::{Address, Connect, Connection};
use crate::connector::{TcpConnector, TcpConnectorFactory};
use crate::error::ConnectError;
use crate::resolve::{Resolver, ResolverFactory};
pub struct ConnectServiceFactory<T> {
tcp: TcpConnectorFactory<T>,
resolver: ResolverFactory<T>,
}
impl<T> ConnectServiceFactory<T> {
/// Construct new ConnectService factory
pub fn new() -> Self {
ConnectServiceFactory {
tcp: TcpConnectorFactory::default(),
resolver: ResolverFactory::default(),
}
}
/// Construct new connect service with custom dns resolver
pub fn with_resolver(resolver: AsyncResolver) -> Self {
ConnectServiceFactory {
tcp: TcpConnectorFactory::default(),
resolver: ResolverFactory::new(resolver),
}
}
/// Construct new service
pub fn service(&self) -> ConnectService<T> {
ConnectService {
tcp: self.tcp.service(),
resolver: self.resolver.service(),
}
}
/// Construct new tcp stream service
pub fn tcp_service(&self) -> TcpConnectService<T> {
TcpConnectService {
tcp: self.tcp.service(),
resolver: self.resolver.service(),
}
}
}
impl<T> Default for ConnectServiceFactory<T> {
fn default() -> Self {
ConnectServiceFactory {
tcp: TcpConnectorFactory::default(),
resolver: ResolverFactory::default(),
}
}
}
impl<T> Clone for ConnectServiceFactory<T> {
fn clone(&self) -> Self {
ConnectServiceFactory {
tcp: self.tcp.clone(),
resolver: self.resolver.clone(),
}
}
}
impl<T: Address> ServiceFactory for ConnectServiceFactory<T> {
type Request = Connect<T>;
type Response = Connection<T, TcpStream>;
type Error = ConnectError;
type Config = ();
type Service = ConnectService<T>;
type InitError = ();
type Future = Ready<Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: ()) -> Self::Future {
ok(self.service())
}
}
#[derive(Clone)]
pub struct ConnectService<T> {
tcp: TcpConnector<T>,
resolver: Resolver<T>,
}
impl<T: Address> Service for ConnectService<T> {
type Request = Connect<T>;
type Response = Connection<T, TcpStream>;
type Error = ConnectError;
type Future = ConnectServiceResponse<T>;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, req: Connect<T>) -> Self::Future {
ConnectServiceResponse {
state: ConnectState::Resolve(self.resolver.call(req)),
tcp: self.tcp.clone(),
}
}
}
enum ConnectState<T: Address> {
Resolve(<Resolver<T> as Service>::Future),
Connect(<TcpConnector<T> as Service>::Future),
}
impl<T: Address> ConnectState<T> {
fn poll(
&mut self,
cx: &mut Context<'_>,
) -> Either<Poll<Result<Connection<T, TcpStream>, ConnectError>>, Connect<T>> {
match self {
ConnectState::Resolve(ref mut fut) => match Pin::new(fut).poll(cx) {
Poll::Pending => Either::Left(Poll::Pending),
Poll::Ready(Ok(res)) => Either::Right(res),
Poll::Ready(Err(err)) => Either::Left(Poll::Ready(Err(err))),
},
ConnectState::Connect(ref mut fut) => Either::Left(Pin::new(fut).poll(cx)),
}
}
}
pub struct ConnectServiceResponse<T: Address> {
state: ConnectState<T>,
tcp: TcpConnector<T>,
}
impl<T: Address> Future for ConnectServiceResponse<T> {
type Output = Result<Connection<T, TcpStream>, ConnectError>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let res = match self.state.poll(cx) {
Either::Right(res) => {
self.state = ConnectState::Connect(self.tcp.call(res));
self.state.poll(cx)
}
Either::Left(res) => return res,
};
match res {
Either::Left(res) => res,
Either::Right(_) => panic!(),
}
}
}
#[derive(Clone)]
pub struct TcpConnectService<T> {
tcp: TcpConnector<T>,
resolver: Resolver<T>,
}
impl<T: Address + 'static> Service for TcpConnectService<T> {
type Request = Connect<T>;
type Response = TcpStream;
type Error = ConnectError;
type Future = TcpConnectServiceResponse<T>;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, req: Connect<T>) -> Self::Future {
TcpConnectServiceResponse {
state: TcpConnectState::Resolve(self.resolver.call(req)),
tcp: self.tcp.clone(),
}
}
}
enum TcpConnectState<T: Address> {
Resolve(<Resolver<T> as Service>::Future),
Connect(<TcpConnector<T> as Service>::Future),
}
impl<T: Address> TcpConnectState<T> {
fn poll(
&mut self,
cx: &mut Context<'_>,
) -> Either<Poll<Result<TcpStream, ConnectError>>, Connect<T>> {
match self {
TcpConnectState::Resolve(ref mut fut) => match Pin::new(fut).poll(cx) {
Poll::Pending => (),
Poll::Ready(Ok(res)) => return Either::Right(res),
Poll::Ready(Err(err)) => return Either::Left(Poll::Ready(Err(err))),
},
TcpConnectState::Connect(ref mut fut) => {
if let Poll::Ready(res) = Pin::new(fut).poll(cx) {
return match res {
Ok(conn) => Either::Left(Poll::Ready(Ok(conn.into_parts().0))),
Err(err) => Either::Left(Poll::Ready(Err(err))),
};
}
}
}
Either::Left(Poll::Pending)
}
}
pub struct TcpConnectServiceResponse<T: Address> {
state: TcpConnectState<T>,
tcp: TcpConnector<T>,
}
impl<T: Address> Future for TcpConnectServiceResponse<T> {
type Output = Result<TcpStream, ConnectError>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let res = match self.state.poll(cx) {
Either::Right(res) => {
self.state = TcpConnectState::Connect(self.tcp.call(res));
self.state.poll(cx)
}
Either::Left(res) => return res,
};
match res {
Either::Left(res) => res,
Either::Right(_) => panic!(),
}
}
}

View File

@@ -1,267 +0,0 @@
use std::future::Future;
use std::marker::PhantomData;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::{fmt, io};
pub use open_ssl::ssl::{Error as SslError, SslConnector, SslMethod};
pub use tokio_openssl::{HandshakeError, SslStream};
use actix_codec::{AsyncRead, AsyncWrite};
use actix_rt::net::TcpStream;
use actix_service::{Service, ServiceFactory};
use futures::future::{err, ok, Either, FutureExt, LocalBoxFuture, Ready};
use trust_dns_resolver::AsyncResolver;
use crate::{
Address, Connect, ConnectError, ConnectService, ConnectServiceFactory, Connection,
};
/// Openssl connector factory
pub struct OpensslConnector<T, U> {
connector: SslConnector,
_t: PhantomData<(T, U)>,
}
impl<T, U> OpensslConnector<T, U> {
pub fn new(connector: SslConnector) -> Self {
OpensslConnector {
connector,
_t: PhantomData,
}
}
}
impl<T, U> OpensslConnector<T, U>
where
T: Address + 'static,
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug + 'static,
{
pub fn service(connector: SslConnector) -> OpensslConnectorService<T, U> {
OpensslConnectorService {
connector,
_t: PhantomData,
}
}
}
impl<T, U> Clone for OpensslConnector<T, U> {
fn clone(&self) -> Self {
Self {
connector: self.connector.clone(),
_t: PhantomData,
}
}
}
impl<T, U> ServiceFactory for OpensslConnector<T, U>
where
T: Address + 'static,
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug + 'static,
{
type Request = Connection<T, U>;
type Response = Connection<T, SslStream<U>>;
type Error = io::Error;
type Config = ();
type Service = OpensslConnectorService<T, U>;
type InitError = ();
type Future = Ready<Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: ()) -> Self::Future {
ok(OpensslConnectorService {
connector: self.connector.clone(),
_t: PhantomData,
})
}
}
pub struct OpensslConnectorService<T, U> {
connector: SslConnector,
_t: PhantomData<(T, U)>,
}
impl<T, U> Clone for OpensslConnectorService<T, U> {
fn clone(&self) -> Self {
Self {
connector: self.connector.clone(),
_t: PhantomData,
}
}
}
impl<T, U> Service for OpensslConnectorService<T, U>
where
T: Address + 'static,
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug + 'static,
{
type Request = Connection<T, U>;
type Response = Connection<T, SslStream<U>>;
type Error = io::Error;
type Future = Either<ConnectAsyncExt<T, U>, Ready<Result<Self::Response, Self::Error>>>;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, stream: Connection<T, U>) -> Self::Future {
trace!("SSL Handshake start for: {:?}", stream.host());
let (io, stream) = stream.replace(());
let host = stream.host().to_string();
match self.connector.configure() {
Err(e) => Either::Right(err(io::Error::new(io::ErrorKind::Other, e))),
Ok(config) => Either::Left(ConnectAsyncExt {
fut: async move { tokio_openssl::connect(config, &host, io).await }
.boxed_local(),
stream: Some(stream),
_t: PhantomData,
}),
}
}
}
pub struct ConnectAsyncExt<T, U> {
fut: LocalBoxFuture<'static, Result<SslStream<U>, HandshakeError<U>>>,
stream: Option<Connection<T, ()>>,
_t: PhantomData<U>,
}
impl<T: Address, U> Future for ConnectAsyncExt<T, U>
where
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug + 'static,
{
type Output = Result<Connection<T, SslStream<U>>, io::Error>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.get_mut();
match Pin::new(&mut this.fut).poll(cx) {
Poll::Ready(Ok(stream)) => {
let s = this.stream.take().unwrap();
trace!("SSL Handshake success: {:?}", s.host());
Poll::Ready(Ok(s.replace(stream).1))
}
Poll::Ready(Err(e)) => {
trace!("SSL Handshake error: {:?}", e);
Poll::Ready(Err(io::Error::new(io::ErrorKind::Other, format!("{}", e))))
}
Poll::Pending => Poll::Pending,
}
}
}
pub struct OpensslConnectServiceFactory<T> {
tcp: ConnectServiceFactory<T>,
openssl: OpensslConnector<T, TcpStream>,
}
impl<T> OpensslConnectServiceFactory<T> {
/// Construct new OpensslConnectService factory
pub fn new(connector: SslConnector) -> Self {
OpensslConnectServiceFactory {
tcp: ConnectServiceFactory::default(),
openssl: OpensslConnector::new(connector),
}
}
/// Construct new connect service with custom dns resolver
pub fn with_resolver(connector: SslConnector, resolver: AsyncResolver) -> Self {
OpensslConnectServiceFactory {
tcp: ConnectServiceFactory::with_resolver(resolver),
openssl: OpensslConnector::new(connector),
}
}
/// Construct openssl connect service
pub fn service(&self) -> OpensslConnectService<T> {
OpensslConnectService {
tcp: self.tcp.service(),
openssl: OpensslConnectorService {
connector: self.openssl.connector.clone(),
_t: PhantomData,
},
}
}
}
impl<T> Clone for OpensslConnectServiceFactory<T> {
fn clone(&self) -> Self {
OpensslConnectServiceFactory {
tcp: self.tcp.clone(),
openssl: self.openssl.clone(),
}
}
}
impl<T: Address + 'static> ServiceFactory for OpensslConnectServiceFactory<T> {
type Request = Connect<T>;
type Response = SslStream<TcpStream>;
type Error = ConnectError;
type Config = ();
type Service = OpensslConnectService<T>;
type InitError = ();
type Future = Ready<Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: ()) -> Self::Future {
ok(self.service())
}
}
#[derive(Clone)]
pub struct OpensslConnectService<T> {
tcp: ConnectService<T>,
openssl: OpensslConnectorService<T, TcpStream>,
}
impl<T: Address + 'static> Service for OpensslConnectService<T> {
type Request = Connect<T>;
type Response = SslStream<TcpStream>;
type Error = ConnectError;
type Future = OpensslConnectServiceResponse<T>;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, req: Connect<T>) -> Self::Future {
OpensslConnectServiceResponse {
fut1: Some(self.tcp.call(req)),
fut2: None,
openssl: self.openssl.clone(),
}
}
}
pub struct OpensslConnectServiceResponse<T: Address + 'static> {
fut1: Option<<ConnectService<T> as Service>::Future>,
fut2: Option<<OpensslConnectorService<T, TcpStream> as Service>::Future>,
openssl: OpensslConnectorService<T, TcpStream>,
}
impl<T: Address> Future for OpensslConnectServiceResponse<T> {
type Output = Result<SslStream<TcpStream>, ConnectError>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
if let Some(ref mut fut) = self.fut1 {
match futures::ready!(Pin::new(fut).poll(cx)) {
Ok(res) => {
let _ = self.fut1.take();
self.fut2 = Some(self.openssl.call(res));
}
Err(e) => return Poll::Ready(Err(e)),
}
}
if let Some(ref mut fut) = self.fut2 {
match futures::ready!(Pin::new(fut).poll(cx)) {
Ok(connect) => Poll::Ready(Ok(connect.into_parts().0)),
Err(e) => Poll::Ready(Err(ConnectError::Io(io::Error::new(
io::ErrorKind::Other,
e,
)))),
}
} else {
Poll::Pending
}
}
}

View File

@@ -1,136 +0,0 @@
use std::fmt;
use std::future::Future;
use std::marker::PhantomData;
use std::pin::Pin;
use std::sync::Arc;
use std::task::{Context, Poll};
pub use rust_tls::Session;
pub use tokio_rustls::{client::TlsStream, rustls::ClientConfig};
use actix_codec::{AsyncRead, AsyncWrite};
use actix_service::{Service, ServiceFactory};
use futures::future::{ok, Ready};
use tokio_rustls::{Connect, TlsConnector};
use webpki::DNSNameRef;
use crate::{Address, Connection};
/// Rustls connector factory
pub struct RustlsConnector<T, U> {
connector: Arc<ClientConfig>,
_t: PhantomData<(T, U)>,
}
impl<T, U> RustlsConnector<T, U> {
pub fn new(connector: Arc<ClientConfig>) -> Self {
RustlsConnector {
connector,
_t: PhantomData,
}
}
}
impl<T, U> RustlsConnector<T, U>
where
T: Address,
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug,
{
pub fn service(connector: Arc<ClientConfig>) -> RustlsConnectorService<T, U> {
RustlsConnectorService {
connector: connector,
_t: PhantomData,
}
}
}
impl<T, U> Clone for RustlsConnector<T, U> {
fn clone(&self) -> Self {
Self {
connector: self.connector.clone(),
_t: PhantomData,
}
}
}
impl<T: Address, U> ServiceFactory for RustlsConnector<T, U>
where
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug,
{
type Request = Connection<T, U>;
type Response = Connection<T, TlsStream<U>>;
type Error = std::io::Error;
type Config = ();
type Service = RustlsConnectorService<T, U>;
type InitError = ();
type Future = Ready<Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: ()) -> Self::Future {
ok(RustlsConnectorService {
connector: self.connector.clone(),
_t: PhantomData,
})
}
}
pub struct RustlsConnectorService<T, U> {
connector: Arc<ClientConfig>,
_t: PhantomData<(T, U)>,
}
impl<T, U> Clone for RustlsConnectorService<T, U> {
fn clone(&self) -> Self {
Self {
connector: self.connector.clone(),
_t: PhantomData,
}
}
}
impl<T: Address, U> Service for RustlsConnectorService<T, U>
where
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug,
{
type Request = Connection<T, U>;
type Response = Connection<T, TlsStream<U>>;
type Error = std::io::Error;
type Future = ConnectAsyncExt<T, U>;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, stream: Connection<T, U>) -> Self::Future {
trace!("SSL Handshake start for: {:?}", stream.host());
let (io, stream) = stream.replace(());
let host = DNSNameRef::try_from_ascii_str(stream.host())
.expect("rustls currently only handles hostname-based connections. See https://github.com/briansmith/webpki/issues/54");
ConnectAsyncExt {
fut: TlsConnector::from(self.connector.clone()).connect(host, io),
stream: Some(stream),
}
}
}
pub struct ConnectAsyncExt<T, U> {
fut: Connect<U>,
stream: Option<Connection<T, ()>>,
}
impl<T: Address, U> Future for ConnectAsyncExt<T, U>
where
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug,
{
type Output = Result<Connection<T, TlsStream<U>>, std::io::Error>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.get_mut();
Poll::Ready(
futures::ready!(Pin::new(&mut this.fut).poll(cx)).map(|stream| {
let s = this.stream.take().unwrap();
trace!("SSL Handshake success: {:?}", s.host());
s.replace(stream).1
}),
)
}
}

View File

@@ -1,37 +0,0 @@
use http::Uri;
use crate::Address;
impl Address for Uri {
fn host(&self) -> &str {
self.host().unwrap_or("")
}
fn port(&self) -> Option<u16> {
if let Some(port) = self.port_u16() {
Some(port)
} else {
port(self.scheme_str())
}
}
}
// TODO: load data from file
fn port(scheme: Option<&str>) -> Option<u16> {
if let Some(scheme) = scheme {
match scheme {
"http" => Some(80),
"https" => Some(443),
"ws" => Some(80),
"wss" => Some(443),
"amqp" => Some(5672),
"amqps" => Some(5671),
"sb" => Some(5671),
"mqtt" => Some(1883),
"mqtts" => Some(8883),
_ => None,
}
} else {
None
}
}

View File

@@ -1,137 +0,0 @@
use std::io;
use actix_codec::{BytesCodec, Framed};
use actix_rt::net::TcpStream;
use actix_service::{fn_service, Service, ServiceFactory};
use actix_testing::TestServer;
use bytes::Bytes;
use futures::SinkExt;
use actix_connect::resolver::{ResolverConfig, ResolverOpts};
use actix_connect::Connect;
#[cfg(feature = "openssl")]
#[actix_rt::test]
async fn test_string() {
let srv = TestServer::with(|| {
fn_service(|io: TcpStream| {
async {
let mut framed = Framed::new(io, BytesCodec);
framed.send(Bytes::from_static(b"test")).await?;
Ok::<_, io::Error>(())
}
})
});
let mut conn = actix_connect::default_connector();
let addr = format!("localhost:{}", srv.port());
let con = conn.call(addr.into()).await.unwrap();
assert_eq!(con.peer_addr().unwrap(), srv.addr());
}
#[cfg(feature = "rustls")]
#[actix_rt::test]
async fn test_rustls_string() {
let srv = TestServer::with(|| {
fn_service(|io: TcpStream| {
async {
let mut framed = Framed::new(io, BytesCodec);
framed.send(Bytes::from_static(b"test")).await?;
Ok::<_, io::Error>(())
}
})
});
let mut conn = actix_connect::default_connector();
let addr = format!("localhost:{}", srv.port());
let con = conn.call(addr.into()).await.unwrap();
assert_eq!(con.peer_addr().unwrap(), srv.addr());
}
#[actix_rt::test]
async fn test_static_str() {
let srv = TestServer::with(|| {
fn_service(|io: TcpStream| {
async {
let mut framed = Framed::new(io, BytesCodec);
framed.send(Bytes::from_static(b"test")).await?;
Ok::<_, io::Error>(())
}
})
});
let resolver = actix_connect::start_default_resolver();
let mut conn = actix_connect::new_connector(resolver.clone());
let con = conn.call(Connect::with("10", srv.addr())).await.unwrap();
assert_eq!(con.peer_addr().unwrap(), srv.addr());
let connect = Connect::new(srv.host().to_owned());
let mut conn = actix_connect::new_connector(resolver);
let con = conn.call(connect).await;
assert!(con.is_err());
}
#[actix_rt::test]
async fn test_new_service() {
let srv = TestServer::with(|| {
fn_service(|io: TcpStream| {
async {
let mut framed = Framed::new(io, BytesCodec);
framed.send(Bytes::from_static(b"test")).await?;
Ok::<_, io::Error>(())
}
})
});
let resolver =
actix_connect::start_resolver(ResolverConfig::default(), ResolverOpts::default());
let factory = actix_connect::new_connector_factory(resolver);
let mut conn = factory.new_service(()).await.unwrap();
let con = conn.call(Connect::with("10", srv.addr())).await.unwrap();
assert_eq!(con.peer_addr().unwrap(), srv.addr());
}
#[cfg(feature = "openssl")]
#[actix_rt::test]
async fn test_uri() {
use std::convert::TryFrom;
let srv = TestServer::with(|| {
fn_service(|io: TcpStream| {
async {
let mut framed = Framed::new(io, BytesCodec);
framed.send(Bytes::from_static(b"test")).await?;
Ok::<_, io::Error>(())
}
})
});
let mut conn = actix_connect::default_connector();
let addr = http::Uri::try_from(format!("https://localhost:{}", srv.port())).unwrap();
let con = conn.call(addr.into()).await.unwrap();
assert_eq!(con.peer_addr().unwrap(), srv.addr());
}
#[cfg(feature = "rustls")]
#[actix_rt::test]
async fn test_rustls_uri() {
use std::convert::TryFrom;
let srv = TestServer::with(|| {
fn_service(|io: TcpStream| {
async {
let mut framed = Framed::new(io, BytesCodec);
framed.send(Bytes::from_static(b"test")).await?;
Ok::<_, io::Error>(())
}
})
});
let mut conn = actix_connect::default_connector();
let addr = http::Uri::try_from(format!("https://localhost:{}", srv.port())).unwrap();
let con = conn.call(addr.into()).await.unwrap();
assert_eq!(con.peer_addr().unwrap(), srv.addr());
}

View File

@@ -1,25 +0,0 @@
# Changes
## [0.4.1] - 2019-12-11
* Disconnect callback accepts owned state
## [0.4.0] - 2019-12-11
* Remove `E` param
## [0.3.0-alpha.3] - 2019-12-07
* Migrate to tokio 0.2
## [0.3.0-alpha.2] - 2019-12-02
* Migrate to `std::future`
## [0.1.1] - 2019-10-14
* Re-register task on every dispatcher poll.
## [0.1.0] - 2019-09-25
* Initial release

View File

@@ -1,32 +0,0 @@
[package]
name = "actix-ioframe"
version = "0.4.1"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix framed service"
keywords = ["network", "framework", "async", "futures"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-ioframe/"
categories = ["network-programming", "asynchronous"]
license = "MIT/Apache-2.0"
edition = "2018"
workspace = ".."
[lib]
name = "actix_ioframe"
path = "src/lib.rs"
[dependencies]
actix-service = "1.0.0"
actix-codec = "0.2.0"
actix-utils = "1.0.1"
actix-rt = "1.0.0"
bytes = "0.5"
either = "1.5.2"
futures = "0.3.1"
pin-project = "0.4.6"
log = "0.4"
[dev-dependencies]
actix-connect = "1.0.0"
actix-testing = "1.0.0"

View File

@@ -1,115 +0,0 @@
use std::marker::PhantomData;
use std::pin::Pin;
use std::task::{Context, Poll};
use actix_codec::{AsyncRead, AsyncWrite, Decoder, Encoder, Framed};
use futures::Stream;
use crate::sink::Sink;
pub struct Connect<Io, Codec, St = ()>
where
Codec: Encoder + Decoder,
{
io: Io,
sink: Sink<<Codec as Encoder>::Item>,
_t: PhantomData<(St, Codec)>,
}
impl<Io, Codec> Connect<Io, Codec>
where
Io: AsyncRead + AsyncWrite,
Codec: Encoder + Decoder,
{
pub(crate) fn new(io: Io, sink: Sink<<Codec as Encoder>::Item>) -> Self {
Self {
io,
sink,
_t: PhantomData,
}
}
pub fn codec(self, codec: Codec) -> ConnectResult<Io, (), Codec> {
ConnectResult {
state: (),
sink: self.sink,
framed: Framed::new(self.io, codec),
}
}
}
#[pin_project::pin_project]
pub struct ConnectResult<Io, St, Codec: Encoder + Decoder> {
pub(crate) state: St,
pub(crate) framed: Framed<Io, Codec>,
pub(crate) sink: Sink<<Codec as Encoder>::Item>,
}
impl<Io, St, Codec: Encoder + Decoder> ConnectResult<Io, St, Codec> {
#[inline]
pub fn sink(&self) -> &Sink<<Codec as Encoder>::Item> {
&self.sink
}
#[inline]
pub fn get_ref(&self) -> &Io {
self.framed.get_ref()
}
#[inline]
pub fn get_mut(&mut self) -> &mut Io {
self.framed.get_mut()
}
#[inline]
pub fn state<S>(self, state: S) -> ConnectResult<Io, S, Codec> {
ConnectResult {
state,
framed: self.framed,
sink: self.sink,
}
}
}
impl<Io, St, Codec> Stream for ConnectResult<Io, St, Codec>
where
Io: AsyncRead + AsyncWrite,
Codec: Encoder + Decoder,
{
type Item = Result<<Codec as Decoder>::Item, <Codec as Decoder>::Error>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
self.project().framed.next_item(cx)
}
}
impl<Io, St, Codec> futures::Sink<<Codec as Encoder>::Item> for ConnectResult<Io, St, Codec>
where
Io: AsyncRead + AsyncWrite,
Codec: Encoder + Decoder,
{
type Error = <Codec as Encoder>::Error;
fn poll_ready(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
if self.framed.is_write_ready() {
Poll::Ready(Ok(()))
} else {
Poll::Pending
}
}
fn start_send(
self: Pin<&mut Self>,
item: <Codec as Encoder>::Item,
) -> Result<(), Self::Error> {
self.project().framed.write(item)
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.get_mut().framed.flush(cx)
}
fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.get_mut().framed.close(cx)
}
}

View File

@@ -1,286 +0,0 @@
//! Framed dispatcher service and related utilities
use std::pin::Pin;
use std::rc::Rc;
use std::task::{Context, Poll};
use actix_codec::{AsyncRead, AsyncWrite, Decoder, Encoder, Framed};
use actix_service::{IntoService, Service};
use actix_utils::{mpsc, oneshot};
use futures::{FutureExt, Stream};
use log::debug;
use crate::error::ServiceError;
use crate::item::Item;
use crate::sink::Sink;
type Request<S, U> = Item<S, U>;
type Response<U> = <U as Encoder>::Item;
pub(crate) enum Message<T> {
Item(T),
WaitClose(oneshot::Sender<()>),
Close,
}
/// FramedTransport - is a future that reads frames from Framed object
/// and pass then to the service.
#[pin_project::pin_project]
pub(crate) struct Dispatcher<St, S, T, U>
where
St: Clone,
S: Service<Request = Request<St, U>, Response = Option<Response<U>>>,
S::Error: 'static,
S::Future: 'static,
T: AsyncRead + AsyncWrite,
U: Encoder + Decoder,
<U as Encoder>::Item: 'static,
<U as Encoder>::Error: std::fmt::Debug,
{
service: S,
sink: Sink<<U as Encoder>::Item>,
state: St,
dispatch_state: FramedState<S, U>,
framed: Framed<T, U>,
rx: mpsc::Receiver<Result<Message<<U as Encoder>::Item>, S::Error>>,
tx: mpsc::Sender<Result<Message<<U as Encoder>::Item>, S::Error>>,
disconnect: Option<Rc<dyn Fn(St, bool)>>,
}
impl<St, S, T, U> Dispatcher<St, S, T, U>
where
St: Clone,
S: Service<Request = Request<St, U>, Response = Option<Response<U>>>,
S::Error: 'static,
S::Future: 'static,
T: AsyncRead + AsyncWrite,
U: Decoder + Encoder,
<U as Encoder>::Item: 'static,
<U as Encoder>::Error: std::fmt::Debug,
{
pub(crate) fn new<F: IntoService<S>>(
framed: Framed<T, U>,
state: St,
service: F,
sink: Sink<<U as Encoder>::Item>,
rx: mpsc::Receiver<Result<Message<<U as Encoder>::Item>, S::Error>>,
disconnect: Option<Rc<dyn Fn(St, bool)>>,
) -> Self {
let tx = rx.sender();
Dispatcher {
framed,
state,
sink,
disconnect,
rx,
tx,
service: service.into_service(),
dispatch_state: FramedState::Processing,
}
}
}
enum FramedState<S: Service, U: Encoder + Decoder> {
Processing,
Error(ServiceError<S::Error, U>),
FramedError(ServiceError<S::Error, U>),
FlushAndStop(Vec<oneshot::Sender<()>>),
Stopping,
}
impl<S: Service, U: Encoder + Decoder> FramedState<S, U> {
fn stop(&mut self, tx: Option<oneshot::Sender<()>>) {
match self {
FramedState::FlushAndStop(ref mut vec) => {
if let Some(tx) = tx {
vec.push(tx)
}
}
FramedState::Processing => {
*self = FramedState::FlushAndStop(if let Some(tx) = tx {
vec![tx]
} else {
Vec::new()
})
}
FramedState::Error(_) | FramedState::FramedError(_) | FramedState::Stopping => {
if let Some(tx) = tx {
let _ = tx.send(());
}
}
}
}
fn take_error(&mut self) -> ServiceError<S::Error, U> {
match std::mem::replace(self, FramedState::Processing) {
FramedState::Error(err) => err,
_ => panic!(),
}
}
fn take_framed_error(&mut self) -> ServiceError<S::Error, U> {
match std::mem::replace(self, FramedState::Processing) {
FramedState::FramedError(err) => err,
_ => panic!(),
}
}
}
impl<St, S, T, U> Dispatcher<St, S, T, U>
where
St: Clone,
S: Service<Request = Request<St, U>, Response = Option<Response<U>>>,
S::Error: 'static,
S::Future: 'static,
T: AsyncRead + AsyncWrite,
U: Decoder + Encoder,
<U as Encoder>::Item: 'static,
<U as Encoder>::Error: std::fmt::Debug,
{
fn poll_read(&mut self, cx: &mut Context<'_>) -> bool {
loop {
match self.service.poll_ready(cx) {
Poll::Ready(Ok(_)) => {
let item = match self.framed.next_item(cx) {
Poll::Ready(Some(Ok(el))) => el,
Poll::Ready(Some(Err(err))) => {
self.dispatch_state =
FramedState::FramedError(ServiceError::Decoder(err));
return true;
}
Poll::Pending => return false,
Poll::Ready(None) => {
log::trace!("Client disconnected");
self.dispatch_state = FramedState::Stopping;
return true;
}
};
let tx = self.tx.clone();
actix_rt::spawn(
self.service
.call(Item::new(self.state.clone(), self.sink.clone(), item))
.map(move |item| {
let item = match item {
Ok(Some(item)) => Ok(Message::Item(item)),
Ok(None) => return,
Err(err) => Err(err),
};
let _ = tx.send(item);
}),
);
}
Poll::Pending => return false,
Poll::Ready(Err(err)) => {
self.dispatch_state = FramedState::Error(ServiceError::Service(err));
return true;
}
}
}
}
/// write to framed object
fn poll_write(&mut self, cx: &mut Context<'_>) -> bool {
loop {
while !self.framed.is_write_buf_full() {
match Pin::new(&mut self.rx).poll_next(cx) {
Poll::Ready(Some(Ok(Message::Item(msg)))) => {
if let Err(err) = self.framed.write(msg) {
self.dispatch_state =
FramedState::FramedError(ServiceError::Encoder(err));
return true;
}
}
Poll::Ready(Some(Ok(Message::Close))) => {
self.dispatch_state.stop(None);
return true;
}
Poll::Ready(Some(Ok(Message::WaitClose(tx)))) => {
self.dispatch_state.stop(Some(tx));
return true;
}
Poll::Ready(Some(Err(err))) => {
self.dispatch_state = FramedState::Error(ServiceError::Service(err));
return true;
}
Poll::Ready(None) | Poll::Pending => break,
}
}
if !self.framed.is_write_buf_empty() {
match self.framed.flush(cx) {
Poll::Pending => break,
Poll::Ready(Ok(_)) => (),
Poll::Ready(Err(err)) => {
debug!("Error sending data: {:?}", err);
self.dispatch_state =
FramedState::FramedError(ServiceError::Encoder(err));
return true;
}
}
} else {
break;
}
}
false
}
pub(crate) fn poll(
&mut self,
cx: &mut Context<'_>,
) -> Poll<Result<(), ServiceError<S::Error, U>>> {
match self.dispatch_state {
FramedState::Processing => {
if self.poll_read(cx) || self.poll_write(cx) {
self.poll(cx)
} else {
Poll::Pending
}
}
FramedState::Error(_) => {
// flush write buffer
if !self.framed.is_write_buf_empty() {
if let Poll::Pending = self.framed.flush(cx) {
return Poll::Pending;
}
}
if let Some(ref disconnect) = self.disconnect {
(&*disconnect)(self.state.clone(), true);
}
Poll::Ready(Err(self.dispatch_state.take_error()))
}
FramedState::FlushAndStop(ref mut vec) => {
if !self.framed.is_write_buf_empty() {
match self.framed.flush(cx) {
Poll::Ready(Err(err)) => {
debug!("Error sending data: {:?}", err);
}
Poll::Pending => {
return Poll::Pending;
}
Poll::Ready(_) => (),
}
};
for tx in vec.drain(..) {
let _ = tx.send(());
}
if let Some(ref disconnect) = self.disconnect {
(&*disconnect)(self.state.clone(), false);
}
Poll::Ready(Ok(()))
}
FramedState::FramedError(_) => {
if let Some(ref disconnect) = self.disconnect {
(&*disconnect)(self.state.clone(), true);
}
Poll::Ready(Err(self.dispatch_state.take_framed_error()))
}
FramedState::Stopping => {
if let Some(ref disconnect) = self.disconnect {
(&*disconnect)(self.state.clone(), false);
}
Poll::Ready(Ok(()))
}
}
}
}

View File

@@ -1,49 +0,0 @@
use std::fmt;
use actix_codec::{Decoder, Encoder};
/// Framed service errors
pub enum ServiceError<E, U: Encoder + Decoder> {
/// Inner service error
Service(E),
/// Encoder parse error
Encoder(<U as Encoder>::Error),
/// Decoder parse error
Decoder(<U as Decoder>::Error),
}
impl<E, U: Encoder + Decoder> From<E> for ServiceError<E, U> {
fn from(err: E) -> Self {
ServiceError::Service(err)
}
}
impl<E, U: Encoder + Decoder> fmt::Debug for ServiceError<E, U>
where
E: fmt::Debug,
<U as Encoder>::Error: fmt::Debug,
<U as Decoder>::Error: fmt::Debug,
{
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
ServiceError::Service(ref e) => write!(fmt, "ServiceError::Service({:?})", e),
ServiceError::Encoder(ref e) => write!(fmt, "ServiceError::Encoder({:?})", e),
ServiceError::Decoder(ref e) => write!(fmt, "ServiceError::Encoder({:?})", e),
}
}
}
impl<E, U: Encoder + Decoder> fmt::Display for ServiceError<E, U>
where
E: fmt::Display,
<U as Encoder>::Error: fmt::Debug,
<U as Decoder>::Error: fmt::Debug,
{
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
ServiceError::Service(ref e) => write!(fmt, "{}", e),
ServiceError::Encoder(ref e) => write!(fmt, "{:?}", e),
ServiceError::Decoder(ref e) => write!(fmt, "{:?}", e),
}
}
}

View File

@@ -1,82 +0,0 @@
use std::fmt;
use std::ops::{Deref, DerefMut};
use actix_codec::{Decoder, Encoder};
use crate::sink::Sink;
pub struct Item<St, Codec: Encoder + Decoder> {
state: St,
sink: Sink<<Codec as Encoder>::Item>,
item: <Codec as Decoder>::Item,
}
impl<St, Codec> Item<St, Codec>
where
Codec: Encoder + Decoder,
{
pub(crate) fn new(
state: St,
sink: Sink<<Codec as Encoder>::Item>,
item: <Codec as Decoder>::Item,
) -> Self {
Item { state, sink, item }
}
#[inline]
pub fn state(&self) -> &St {
&self.state
}
#[inline]
pub fn state_mut(&mut self) -> &mut St {
&mut self.state
}
#[inline]
pub fn sink(&self) -> &Sink<<Codec as Encoder>::Item> {
&self.sink
}
#[inline]
pub fn into_inner(self) -> <Codec as Decoder>::Item {
self.item
}
#[inline]
pub fn into_parts(self) -> (St, Sink<<Codec as Encoder>::Item>, <Codec as Decoder>::Item) {
(self.state, self.sink, self.item)
}
}
impl<St, Codec> Deref for Item<St, Codec>
where
Codec: Encoder + Decoder,
{
type Target = <Codec as Decoder>::Item;
#[inline]
fn deref(&self) -> &<Codec as Decoder>::Item {
&self.item
}
}
impl<St, Codec> DerefMut for Item<St, Codec>
where
Codec: Encoder + Decoder,
{
#[inline]
fn deref_mut(&mut self) -> &mut <Codec as Decoder>::Item {
&mut self.item
}
}
impl<St, Codec> fmt::Debug for Item<St, Codec>
where
Codec: Encoder + Decoder,
<Codec as Decoder>::Item: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("Item").field(&self.item).finish()
}
}

View File

@@ -1,15 +0,0 @@
#![deny(rust_2018_idioms, warnings)]
#![allow(clippy::type_complexity, clippy::too_many_arguments)]
mod connect;
mod dispatcher;
mod error;
mod item;
mod service;
mod sink;
pub use self::connect::{Connect, ConnectResult};
pub use self::error::ServiceError;
pub use self::item::Item;
pub use self::service::{Builder, NewServiceBuilder, ServiceBuilder};
pub use self::sink::Sink;

View File

@@ -1,440 +0,0 @@
use std::future::Future;
use std::marker::PhantomData;
use std::pin::Pin;
use std::rc::Rc;
use std::task::{Context, Poll};
use actix_codec::{AsyncRead, AsyncWrite, Decoder, Encoder};
use actix_service::{IntoService, IntoServiceFactory, Service, ServiceFactory};
use actix_utils::mpsc;
use either::Either;
use futures::future::{FutureExt, LocalBoxFuture};
use pin_project::project;
use crate::connect::{Connect, ConnectResult};
use crate::dispatcher::{Dispatcher, Message};
use crate::error::ServiceError;
use crate::item::Item;
use crate::sink::Sink;
type RequestItem<S, U> = Item<S, U>;
type ResponseItem<U> = Option<<U as Encoder>::Item>;
type ServiceResult<U, E> = Result<Message<<U as Encoder>::Item>, E>;
/// Service builder - structure that follows the builder pattern
/// for building instances for framed services.
pub struct Builder<St, Codec>(PhantomData<(St, Codec)>);
impl<St: Clone, Codec> Default for Builder<St, Codec> {
fn default() -> Builder<St, Codec> {
Builder::new()
}
}
impl<St: Clone, Codec> Builder<St, Codec> {
pub fn new() -> Builder<St, Codec> {
Builder(PhantomData)
}
/// Construct framed handler service with specified connect service
pub fn service<Io, C, F>(self, connect: F) -> ServiceBuilder<St, C, Io, Codec>
where
F: IntoService<C>,
Io: AsyncRead + AsyncWrite,
C: Service<Request = Connect<Io, Codec>, Response = ConnectResult<Io, St, Codec>>,
Codec: Decoder + Encoder,
{
ServiceBuilder {
connect: connect.into_service(),
disconnect: None,
_t: PhantomData,
}
}
/// Construct framed handler new service with specified connect service
pub fn factory<Io, C, F>(self, connect: F) -> NewServiceBuilder<St, C, Io, Codec>
where
F: IntoServiceFactory<C>,
Io: AsyncRead + AsyncWrite,
C: ServiceFactory<
Config = (),
Request = Connect<Io, Codec>,
Response = ConnectResult<Io, St, Codec>,
>,
C::Error: 'static,
C::Future: 'static,
Codec: Decoder + Encoder,
{
NewServiceBuilder {
connect: connect.into_factory(),
disconnect: None,
_t: PhantomData,
}
}
}
pub struct ServiceBuilder<St, C, Io, Codec> {
connect: C,
disconnect: Option<Rc<dyn Fn(St, bool)>>,
_t: PhantomData<(St, Io, Codec)>,
}
impl<St, C, Io, Codec> ServiceBuilder<St, C, Io, Codec>
where
St: Clone,
C: Service<Request = Connect<Io, Codec>, Response = ConnectResult<Io, St, Codec>>,
Io: AsyncRead + AsyncWrite,
Codec: Decoder + Encoder,
<Codec as Encoder>::Item: 'static,
<Codec as Encoder>::Error: std::fmt::Debug,
{
/// Callback to execute on disconnect
///
/// Second parameter indicates error occured during disconnect.
pub fn disconnect<F, Out>(mut self, disconnect: F) -> Self
where
F: Fn(St, bool) + 'static,
{
self.disconnect = Some(Rc::new(disconnect));
self
}
/// Provide stream items handler service and construct service factory.
pub fn finish<F, T>(self, service: F) -> FramedServiceImpl<St, C, T, Io, Codec>
where
F: IntoServiceFactory<T>,
T: ServiceFactory<
Config = St,
Request = RequestItem<St, Codec>,
Response = ResponseItem<Codec>,
Error = C::Error,
InitError = C::Error,
>,
{
FramedServiceImpl {
connect: self.connect,
handler: Rc::new(service.into_factory()),
disconnect: self.disconnect.clone(),
_t: PhantomData,
}
}
}
pub struct NewServiceBuilder<St, C, Io, Codec> {
connect: C,
disconnect: Option<Rc<dyn Fn(St, bool)>>,
_t: PhantomData<(St, Io, Codec)>,
}
impl<St, C, Io, Codec> NewServiceBuilder<St, C, Io, Codec>
where
St: Clone,
Io: AsyncRead + AsyncWrite,
C: ServiceFactory<
Config = (),
Request = Connect<Io, Codec>,
Response = ConnectResult<Io, St, Codec>,
>,
C::Error: 'static,
C::Future: 'static,
Codec: Decoder + Encoder,
<Codec as Encoder>::Item: 'static,
<Codec as Encoder>::Error: std::fmt::Debug,
{
/// Callback to execute on disconnect
///
/// Second parameter indicates error occured during disconnect.
pub fn disconnect<F>(mut self, disconnect: F) -> Self
where
F: Fn(St, bool) + 'static,
{
self.disconnect = Some(Rc::new(disconnect));
self
}
pub fn finish<F, T, Cfg>(self, service: F) -> FramedService<St, C, T, Io, Codec, Cfg>
where
F: IntoServiceFactory<T>,
T: ServiceFactory<
Config = St,
Request = RequestItem<St, Codec>,
Response = ResponseItem<Codec>,
Error = C::Error,
InitError = C::Error,
> + 'static,
{
FramedService {
connect: self.connect,
handler: Rc::new(service.into_factory()),
disconnect: self.disconnect,
_t: PhantomData,
}
}
}
pub struct FramedService<St, C, T, Io, Codec, Cfg> {
connect: C,
handler: Rc<T>,
disconnect: Option<Rc<dyn Fn(St, bool)>>,
_t: PhantomData<(St, Io, Codec, Cfg)>,
}
impl<St, C, T, Io, Codec, Cfg> ServiceFactory for FramedService<St, C, T, Io, Codec, Cfg>
where
St: Clone + 'static,
Io: AsyncRead + AsyncWrite,
C: ServiceFactory<
Config = (),
Request = Connect<Io, Codec>,
Response = ConnectResult<Io, St, Codec>,
>,
C::Error: 'static,
C::Future: 'static,
T: ServiceFactory<
Config = St,
Request = RequestItem<St, Codec>,
Response = ResponseItem<Codec>,
Error = C::Error,
InitError = C::Error,
> + 'static,
<T::Service as Service>::Future: 'static,
Codec: Decoder + Encoder,
<Codec as Encoder>::Item: 'static,
<Codec as Encoder>::Error: std::fmt::Debug,
{
type Config = Cfg;
type Request = Io;
type Response = ();
type Error = ServiceError<C::Error, Codec>;
type InitError = C::InitError;
type Service = FramedServiceImpl<St, C::Service, T, Io, Codec>;
type Future = LocalBoxFuture<'static, Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: Cfg) -> Self::Future {
let handler = self.handler.clone();
let disconnect = self.disconnect.clone();
// create connect service and then create service impl
self.connect
.new_service(())
.map(move |result| {
result.map(move |connect| FramedServiceImpl {
connect,
handler,
disconnect,
_t: PhantomData,
})
})
.boxed_local()
}
}
pub struct FramedServiceImpl<St, C, T, Io, Codec> {
connect: C,
handler: Rc<T>,
disconnect: Option<Rc<dyn Fn(St, bool)>>,
_t: PhantomData<(St, Io, Codec)>,
}
impl<St, C, T, Io, Codec> Service for FramedServiceImpl<St, C, T, Io, Codec>
where
St: Clone,
Io: AsyncRead + AsyncWrite,
C: Service<Request = Connect<Io, Codec>, Response = ConnectResult<Io, St, Codec>>,
C::Error: 'static,
T: ServiceFactory<
Config = St,
Request = RequestItem<St, Codec>,
Response = ResponseItem<Codec>,
Error = C::Error,
InitError = C::Error,
>,
<T::Service as Service>::Future: 'static,
Codec: Decoder + Encoder,
<Codec as Encoder>::Item: 'static,
<Codec as Encoder>::Error: std::fmt::Debug,
{
type Request = Io;
type Response = ();
type Error = ServiceError<C::Error, Codec>;
type Future = FramedServiceImplResponse<St, Io, Codec, C, T>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.connect.poll_ready(cx).map_err(|e| e.into())
}
fn call(&mut self, req: Io) -> Self::Future {
let (tx, rx) = mpsc::channel();
let sink = Sink::new(Rc::new(move |msg| {
let _ = tx.send(Ok(msg));
}));
FramedServiceImplResponse {
inner: FramedServiceImplResponseInner::Connect(
self.connect.call(Connect::new(req, sink.clone())),
self.handler.clone(),
self.disconnect.clone(),
Some(rx),
),
}
}
}
#[pin_project::pin_project]
pub struct FramedServiceImplResponse<St, Io, Codec, C, T>
where
St: Clone,
C: Service<Request = Connect<Io, Codec>, Response = ConnectResult<Io, St, Codec>>,
C::Error: 'static,
T: ServiceFactory<
Config = St,
Request = RequestItem<St, Codec>,
Response = ResponseItem<Codec>,
Error = C::Error,
InitError = C::Error,
>,
<T::Service as Service>::Future: 'static,
Io: AsyncRead + AsyncWrite,
Codec: Encoder + Decoder,
<Codec as Encoder>::Item: 'static,
<Codec as Encoder>::Error: std::fmt::Debug,
{
#[pin]
inner: FramedServiceImplResponseInner<St, Io, Codec, C, T>,
}
impl<St, Io, Codec, C, T> Future for FramedServiceImplResponse<St, Io, Codec, C, T>
where
St: Clone,
C: Service<Request = Connect<Io, Codec>, Response = ConnectResult<Io, St, Codec>>,
C::Error: 'static,
T: ServiceFactory<
Config = St,
Request = RequestItem<St, Codec>,
Response = ResponseItem<Codec>,
Error = C::Error,
InitError = C::Error,
>,
<T::Service as Service>::Future: 'static,
Io: AsyncRead + AsyncWrite,
Codec: Encoder + Decoder,
<Codec as Encoder>::Item: 'static,
<Codec as Encoder>::Error: std::fmt::Debug,
{
type Output = Result<(), ServiceError<C::Error, Codec>>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut this = self.as_mut().project();
loop {
match this.inner.poll(cx) {
Either::Left(new) => {
this = self.as_mut().project();
this.inner.set(new)
}
Either::Right(poll) => return poll,
};
}
}
}
#[pin_project::pin_project]
enum FramedServiceImplResponseInner<St, Io, Codec, C, T>
where
St: Clone,
C: Service<Request = Connect<Io, Codec>, Response = ConnectResult<Io, St, Codec>>,
C::Error: 'static,
T: ServiceFactory<
Config = St,
Request = RequestItem<St, Codec>,
Response = ResponseItem<Codec>,
Error = C::Error,
InitError = C::Error,
>,
<T::Service as Service>::Future: 'static,
Io: AsyncRead + AsyncWrite,
Codec: Encoder + Decoder,
<Codec as Encoder>::Item: 'static,
<Codec as Encoder>::Error: std::fmt::Debug,
{
Connect(
#[pin] C::Future,
Rc<T>,
Option<Rc<dyn Fn(St, bool)>>,
Option<mpsc::Receiver<ServiceResult<Codec, C::Error>>>,
),
Handler(
#[pin] T::Future,
Option<ConnectResult<Io, St, Codec>>,
Option<Rc<dyn Fn(St, bool)>>,
Option<mpsc::Receiver<ServiceResult<Codec, C::Error>>>,
),
Dispatcher(Dispatcher<St, T::Service, Io, Codec>),
}
impl<St, Io, Codec, C, T> FramedServiceImplResponseInner<St, Io, Codec, C, T>
where
St: Clone,
C: Service<Request = Connect<Io, Codec>, Response = ConnectResult<Io, St, Codec>>,
C::Error: 'static,
T: ServiceFactory<
Config = St,
Request = RequestItem<St, Codec>,
Response = ResponseItem<Codec>,
Error = C::Error,
InitError = C::Error,
>,
<T::Service as Service>::Future: 'static,
Io: AsyncRead + AsyncWrite,
Codec: Encoder + Decoder,
<Codec as Encoder>::Item: 'static,
<Codec as Encoder>::Error: std::fmt::Debug,
{
#[project]
fn poll(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Either<
FramedServiceImplResponseInner<St, Io, Codec, C, T>,
Poll<Result<(), ServiceError<C::Error, Codec>>>,
> {
#[project]
match self.project() {
FramedServiceImplResponseInner::Connect(fut, handler, disconnect, rx) => {
match fut.poll(cx) {
Poll::Ready(Ok(res)) => {
Either::Left(FramedServiceImplResponseInner::Handler(
handler.new_service(res.state.clone()),
Some(res),
disconnect.take(),
rx.take(),
))
}
Poll::Pending => Either::Right(Poll::Pending),
Poll::Ready(Err(e)) => Either::Right(Poll::Ready(Err(e.into()))),
}
}
FramedServiceImplResponseInner::Handler(fut, res, disconnect, rx) => {
match fut.poll(cx) {
Poll::Ready(Ok(handler)) => {
let res = res.take().unwrap();
Either::Left(FramedServiceImplResponseInner::Dispatcher(
Dispatcher::new(
res.framed,
res.state,
handler,
res.sink,
rx.take().unwrap(),
disconnect.take(),
),
))
}
Poll::Pending => Either::Right(Poll::Pending),
Poll::Ready(Err(e)) => Either::Right(Poll::Ready(Err(e.into()))),
}
}
FramedServiceImplResponseInner::Dispatcher(ref mut fut) => {
Either::Right(fut.poll(cx))
}
}
}
}

View File

@@ -1,45 +0,0 @@
use std::fmt;
use std::rc::Rc;
use actix_utils::oneshot;
use futures::future::{Future, FutureExt};
use crate::dispatcher::Message;
pub struct Sink<T>(Rc<dyn Fn(Message<T>)>);
impl<T> Clone for Sink<T> {
fn clone(&self) -> Self {
Sink(self.0.clone())
}
}
impl<T> Sink<T> {
pub(crate) fn new(tx: Rc<dyn Fn(Message<T>)>) -> Self {
Sink(tx)
}
/// Close connection
pub fn close(&self) {
(self.0)(Message::Close);
}
/// Close connection
pub fn wait_close(&self) -> impl Future<Output = ()> {
let (tx, rx) = oneshot::channel();
(self.0)(Message::WaitClose(tx));
rx.map(|_| ())
}
/// Send item
pub fn send(&self, item: T) {
(self.0)(Message::Item(item));
}
}
impl<T> fmt::Debug for Sink<T> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("Sink").finish()
}
}

View File

@@ -1,52 +0,0 @@
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::time::Duration;
use actix_codec::BytesCodec;
use actix_rt::time::delay_for;
use actix_service::{fn_service, Service};
use actix_testing::TestServer;
use futures::future::ok;
use actix_ioframe::{Builder, Connect};
#[derive(Clone)]
struct State;
#[actix_rt::test]
async fn test_disconnect() -> std::io::Result<()> {
let disconnect = Arc::new(AtomicBool::new(false));
let disconnect1 = disconnect.clone();
let srv = TestServer::with(move || {
let disconnect1 = disconnect1.clone();
Builder::new()
.factory(fn_service(|conn: Connect<_, _>| {
ok(conn.codec(BytesCodec).state(State))
}))
.disconnect(move |_, _| {
disconnect1.store(true, Ordering::Relaxed);
})
.finish(fn_service(|_t| ok(None)))
});
let mut client = Builder::new()
.service(|conn: Connect<_, _>| {
let conn = conn.codec(BytesCodec).state(State);
conn.sink().close();
ok(conn)
})
.finish(fn_service(|_t| ok(None)));
let conn = actix_connect::default_connector()
.call(actix_connect::Connect::with(String::new(), srv.addr()))
.await
.unwrap();
client.call(conn.into_parts().0).await.unwrap();
let _ = delay_for(Duration::from_millis(100)).await;
assert!(disconnect.load(Ordering::Relaxed));
Ok(())
}

1
actix-macros/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/wip

33
actix-macros/CHANGES.md Normal file
View File

@@ -0,0 +1,33 @@
# Changes
## Unreleased - 2021-xx-xx
## 0.2.1 - 2021-02-02
* Add optional argument `system` to `main` macro which can be used to specify the path to `actix_rt::System` (useful for re-exports). [#363]
[#363]: https://github.com/actix/actix-net/pull/363
## 0.2.0 - 2021-02-02
* Update to latest `actix_rt::System::new` signature. [#261]
[#261]: https://github.com/actix/actix-net/pull/261
## 0.2.0-beta.1 - 2021-01-09
* Remove `actix-reexport` feature. [#218]
[#218]: https://github.com/actix/actix-net/pull/218
## 0.1.3 - 2020-12-03
* Add `actix-reexport` feature. [#218]
[#218]: https://github.com/actix/actix-net/pull/218
## 0.1.2 - 2020-05-18
* Forward actix_rt::test arguments to test function [#127]
[#127]: https://github.com/actix/actix-net/pull/127

View File

@@ -1,21 +1,25 @@
[package] [package]
name = "actix-macros" name = "actix-macros"
version = "0.1.1" version = "0.2.1"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"] authors = [
description = "Actix runtime macros" "Nikolay Kim <fafhrd91@gmail.com>",
"Ibraheem Ahmed <ibrah1440@gmail.com>",
]
description = "Macros for Actix system and runtime"
repository = "https://github.com/actix/actix-net" repository = "https://github.com/actix/actix-net"
documentation = "https://docs.rs/actix-macros/"
categories = ["network-programming", "asynchronous"] categories = ["network-programming", "asynchronous"]
license = "MIT/Apache-2.0" license = "MIT OR Apache-2.0"
edition = "2018" edition = "2018"
workspace = ".."
[lib] [lib]
proc-macro = true proc-macro = true
[dependencies] [dependencies]
quote = "^1" quote = "1.0.3"
syn = { version = "^1", features = ["full"] } syn = { version = "^1", features = ["full"] }
[dev-dependencies] [dev-dependencies]
actix-rt = { version = "1.0.0" } actix-rt = "2.0.0"
futures-util = { version = "0.3.7", default-features = false }
trybuild = "1"

View File

@@ -1,33 +1,83 @@
//! Macros for use with Tokio //! Macros for Actix system and runtime.
extern crate proc_macro; //!
//! The [`actix-rt`](https://docs.rs/actix-rt) crate must be available for macro output to compile.
//!
//! # Entry-point
//! See docs for the [`#[main]`](macro@main) macro.
//!
//! # Tests
//! See docs for the [`#[test]`](macro@test) macro.
#![deny(rust_2018_idioms, nonstandard_style)]
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
use proc_macro::TokenStream; use proc_macro::TokenStream;
use quote::quote; use quote::quote;
/// Marks async function to be executed by actix system. /// Marks async entry-point function to be executed by Actix system.
/// ///
/// ## Usage /// # Examples
/// /// ```
/// ```rust
/// #[actix_rt::main] /// #[actix_rt::main]
/// async fn main() { /// async fn main() {
/// println!("Hello world"); /// println!("Hello world");
/// } /// }
/// ``` /// ```
#[allow(clippy::needless_doctest_main)]
#[proc_macro_attribute] #[proc_macro_attribute]
#[cfg(not(test))] // Work around for rust-lang/rust#62127 #[cfg(not(test))] // Work around for rust-lang/rust#62127
pub fn main(_: TokenStream, item: TokenStream) -> TokenStream { pub fn main(args: TokenStream, item: TokenStream) -> TokenStream {
let mut input = syn::parse_macro_input!(item as syn::ItemFn); let mut input = syn::parse_macro_input!(item as syn::ItemFn);
let args = syn::parse_macro_input!(args as syn::AttributeArgs);
let attrs = &input.attrs; let attrs = &input.attrs;
let vis = &input.vis; let vis = &input.vis;
let sig = &mut input.sig; let sig = &mut input.sig;
let body = &input.block; let body = &input.block;
let name = &sig.ident;
if sig.asyncness.is_none() { if sig.asyncness.is_none() {
return syn::Error::new_spanned(sig.fn_token, "only async fn is supported") return syn::Error::new_spanned(
.to_compile_error() sig.fn_token,
.into(); "the async keyword is missing from the function declaration",
)
.to_compile_error()
.into();
}
let mut system = syn::parse_str::<syn::Path>("::actix_rt::System").unwrap();
for arg in &args {
match arg {
syn::NestedMeta::Meta(syn::Meta::NameValue(syn::MetaNameValue {
lit: syn::Lit::Str(lit),
path,
..
})) => match path
.get_ident()
.map(|i| i.to_string().to_lowercase())
.as_deref()
{
Some("system") => match lit.parse() {
Ok(path) => system = path,
Err(_) => {
return syn::Error::new_spanned(lit, "Expected path")
.to_compile_error()
.into();
}
},
_ => {
return syn::Error::new_spanned(arg, "Unknown attribute specified")
.to_compile_error()
.into();
}
},
_ => {
return syn::Error::new_spanned(arg, "Unknown attribute specified")
.to_compile_error()
.into();
}
}
} }
sig.asyncness = None; sig.asyncness = None;
@@ -35,18 +85,16 @@ pub fn main(_: TokenStream, item: TokenStream) -> TokenStream {
(quote! { (quote! {
#(#attrs)* #(#attrs)*
#vis #sig { #vis #sig {
actix_rt::System::new(stringify!(#name)) <#system>::new().block_on(async move { #body })
.block_on(async move { #body })
} }
}) })
.into() .into()
} }
/// Marks async test function to be executed by actix runtime. /// Marks async test function to be executed in an Actix system.
/// ///
/// ## Usage /// # Examples
/// /// ```
/// ```no_run
/// #[actix_rt::test] /// #[actix_rt::test]
/// async fn my_test() { /// async fn my_test() {
/// assert!(true); /// assert!(true);
@@ -54,12 +102,11 @@ pub fn main(_: TokenStream, item: TokenStream) -> TokenStream {
/// ``` /// ```
#[proc_macro_attribute] #[proc_macro_attribute]
pub fn test(_: TokenStream, item: TokenStream) -> TokenStream { pub fn test(_: TokenStream, item: TokenStream) -> TokenStream {
let input = syn::parse_macro_input!(item as syn::ItemFn); let mut input = syn::parse_macro_input!(item as syn::ItemFn);
let ret = &input.sig.output;
let name = &input.sig.ident;
let body = &input.block;
let attrs = &input.attrs; let attrs = &input.attrs;
let vis = &input.vis;
let sig = &mut input.sig;
let body = &input.block;
let mut has_test_attr = false; let mut has_test_attr = false;
for attr in attrs { for attr in attrs {
@@ -68,33 +115,30 @@ pub fn test(_: TokenStream, item: TokenStream) -> TokenStream {
} }
} }
if input.sig.asyncness.is_none() { if sig.asyncness.is_none() {
return syn::Error::new_spanned( return syn::Error::new_spanned(
input.sig.fn_token, input.sig.fn_token,
format!("only async fn is supported, {}", input.sig.ident), "the async keyword is missing from the function declaration",
) )
.to_compile_error() .to_compile_error()
.into(); .into();
} }
let result = if has_test_attr { sig.asyncness = None;
quote! {
#(#attrs)* let missing_test_attr = if has_test_attr {
fn #name() #ret { quote!()
actix_rt::System::new("test")
.block_on(async { #body })
}
}
} else { } else {
quote! { quote!(#[test])
#[test]
#(#attrs)*
fn #name() #ret {
actix_rt::System::new("test")
.block_on(async { #body })
}
}
}; };
result.into() (quote! {
#missing_test_attr
#(#attrs)*
#vis #sig {
actix_rt::System::new()
.block_on(async { #body })
}
})
.into()
} }

View File

@@ -0,0 +1,14 @@
#[test]
fn compile_macros() {
let t = trybuild::TestCases::new();
t.pass("tests/trybuild/main-01-basic.rs");
t.compile_fail("tests/trybuild/main-02-only-async.rs");
t.pass("tests/trybuild/main-03-fn-params.rs");
t.pass("tests/trybuild/main-04-system-path.rs");
t.compile_fail("tests/trybuild/main-05-system-expect-path.rs");
t.compile_fail("tests/trybuild/main-06-unknown-attr.rs");
t.pass("tests/trybuild/test-01-basic.rs");
t.pass("tests/trybuild/test-02-keep-attrs.rs");
t.compile_fail("tests/trybuild/test-03-only-async.rs");
}

View File

@@ -0,0 +1,4 @@
#[actix_rt::main]
async fn main() {
println!("Hello world");
}

View File

@@ -0,0 +1,4 @@
#[actix_rt::main]
fn main() {
futures_util::future::ready(()).await
}

View File

@@ -0,0 +1,14 @@
error: the async keyword is missing from the function declaration
--> $DIR/main-02-only-async.rs:2:1
|
2 | fn main() {
| ^^
error[E0601]: `main` function not found in crate `$CRATE`
--> $DIR/main-02-only-async.rs:1:1
|
1 | / #[actix_rt::main]
2 | | fn main() {
3 | | futures_util::future::ready(()).await
4 | | }
| |_^ consider adding a `main` function to `$DIR/tests/trybuild/main-02-only-async.rs`

View File

@@ -0,0 +1,6 @@
#[actix_rt::main]
async fn main2(_param: bool) {
futures_util::future::ready(()).await
}
fn main() {}

View File

@@ -0,0 +1,8 @@
mod system {
pub use actix_rt::System as MySystem;
}
#[actix_rt::main(system = "system::MySystem")]
async fn main() {
futures_util::future::ready(()).await
}

View File

@@ -0,0 +1,4 @@
#[actix_rt::main(system = "!@#*&")]
async fn main2() {}
fn main() {}

View File

@@ -0,0 +1,5 @@
error: Expected path
--> $DIR/main-05-system-expect-path.rs:1:27
|
1 | #[actix_rt::main(system = "!@#*&")]
| ^^^^^^^

View File

@@ -0,0 +1,7 @@
#[actix_rt::main(foo = "bar")]
async fn async_main() {}
#[actix_rt::main(bar::baz)]
async fn async_main2() {}
fn main() {}

View File

@@ -0,0 +1,11 @@
error: Unknown attribute specified
--> $DIR/main-06-unknown-attr.rs:1:18
|
1 | #[actix_rt::main(foo = "bar")]
| ^^^^^^^^^^^
error: Unknown attribute specified
--> $DIR/main-06-unknown-attr.rs:4:18
|
4 | #[actix_rt::main(bar::baz)]
| ^^^^^^^^

View File

@@ -0,0 +1,6 @@
#[actix_rt::test]
async fn my_test() {
assert!(true);
}
fn main() {}

View File

@@ -0,0 +1,7 @@
#[actix_rt::test]
#[should_panic]
async fn my_test() {
todo!()
}
fn main() {}

View File

@@ -0,0 +1,6 @@
#[actix_rt::test]
fn my_test() {
futures_util::future::ready(()).await
}
fn main() {}

View File

@@ -0,0 +1,5 @@
error: the async keyword is missing from the function declaration
--> $DIR/test-03-only-async.rs:2:1
|
2 | fn my_test() {
| ^^

111
actix-router/CHANGES.md Normal file
View File

@@ -0,0 +1,111 @@
# Changes
## Unreleased - 2021-xx-xx
## 0.5.0-beta.1 - 2021-07-20
* Fix a bug in multi-patterns where static patterns are interpreted as regex. [#366]
* Introduce `ResourceDef::pattern_iter` to get an iterator over all patterns in a multi-pattern resource. [#373]
* Fix segment interpolation leaving `Path` in unintended state after matching. [#368]
* Fix `ResourceDef` `PartialEq` implementation. [#373]
* Re-work `IntoPatterns` trait, adding a `Patterns` enum. [#372]
* Implement `IntoPatterns` for `bytestring::ByteString`. [#372]
* Rename `Path::{len => segment_count}` to be more descriptive of it's purpose. [#370]
* Rename `ResourceDef::{resource_path => resource_path_from_iter}`. [#371]
* `ResourceDef::resource_path_from_iter` now takes an `IntoIterator`. [#373]
* Rename `ResourceDef::{resource_path_named => resource_path_from_map}`. [#371]
* Rename `ResourceDef::{is_prefix_match => find_match}`. [#373]
* Rename `ResourceDef::{match_path => capture_match_info}`. [#373]
* Rename `ResourceDef::{match_path_checked => capture_match_info_fn}`. [#373]
* Remove `ResourceDef::name_mut` and introduce `ResourceDef::set_name`. [#373]
* Rename `Router::{*_checked => *_fn}`. [#373]
* Return type of `ResourceDef::name` is now `Option<&str>`. [#373]
* Return type of `ResourceDef::pattern` is now `Option<&str>`. [#373]
[#368]: https://github.com/actix/actix-net/pull/368
[#366]: https://github.com/actix/actix-net/pull/366
[#368]: https://github.com/actix/actix-net/pull/368
[#370]: https://github.com/actix/actix-net/pull/370
[#371]: https://github.com/actix/actix-net/pull/371
[#372]: https://github.com/actix/actix-net/pull/372
[#373]: https://github.com/actix/actix-net/pull/373
## 0.4.0 - 2021-06-06
* When matching path parameters, `%25` is now kept in the percent-encoded form; no longer decoded to `%`. [#357]
* Path tail patterns now match new lines (`\n`) in request URL. [#360]
* Fixed a safety bug where `Path` could return a malformed string after percent decoding. [#359]
* Methods `Path::{add, add_static}` now take `impl Into<Cow<'static, str>>`. [#345]
[#345]: https://github.com/actix/actix-net/pull/345
[#357]: https://github.com/actix/actix-net/pull/357
[#359]: https://github.com/actix/actix-net/pull/359
[#360]: https://github.com/actix/actix-net/pull/360
## 0.3.0 - 2019-12-31
* Version was yanked previously. See https://crates.io/crates/actix-router/0.3.0
## 0.2.7 - 2021-02-06
* Add `Router::recognize_checked` [#247]
[#247]: https://github.com/actix/actix-net/pull/247
## 0.2.6 - 2021-01-09
* Use `bytestring` version range compatible with Bytes v1.0. [#246]
[#246]: https://github.com/actix/actix-net/pull/246
## 0.2.5 - 2020-09-20
* Fix `from_hex()` method
## 0.2.4 - 2019-12-31
* Add `ResourceDef::resource_path_named()` path generation method
## 0.2.3 - 2019-12-25
* Add impl `IntoPattern` for `&String`
## 0.2.2 - 2019-12-25
* Use `IntoPattern` for `RouterBuilder::path()`
## 0.2.1 - 2019-12-25
* Add `IntoPattern` trait
* Add multi-pattern resources
## 0.2.0 - 2019-12-07
* Update http to 0.2
* Update regex to 1.3
* Use bytestring instead of string
## 0.1.5 - 2019-05-15
* Remove debug prints
## 0.1.4 - 2019-05-15
* Fix checked resource match
## 0.1.3 - 2019-04-22
* Added support for `remainder match` (i.e "/path/{tail}*")
## 0.1.2 - 2019-04-07
* Export `Quoter` type
* Allow to reset `Path` instance
## 0.1.1 - 2019-04-03
* Get dynamic segment by name instead of iterator.
## 0.1.0 - 2019-03-09
* Initial release

38
actix-router/Cargo.toml Normal file
View File

@@ -0,0 +1,38 @@
[package]
name = "actix-router"
version = "0.5.0-beta.1"
authors = [
"Nikolay Kim <fafhrd91@gmail.com>",
"Ali MJ Al-Nasrawy <alimjalnasrawy@gmail.com>",
"Rob Ede <robjtede@icloud.com>",
]
description = "Resource path matching and router"
keywords = ["actix", "router", "routing"]
repository = "https://github.com/actix/actix-net.git"
license = "MIT OR Apache-2.0"
edition = "2018"
[lib]
name = "actix_router"
path = "src/lib.rs"
[features]
default = ["http"]
[dependencies]
bytestring = ">=0.1.5, <2"
firestorm = "0.4"
http = { version = "0.2.3", optional = true }
log = "0.4"
regex = "1.5"
serde = "1"
[dev-dependencies]
criterion = { version = "0.3", features = ["html_reports"] }
firestorm = { version = "0.4", features = ["enable_system_time"] }
http = "0.2.3"
serde = { version = "1", features = ["derive"] }
[[bench]]
name = "router"
harness = false

View File

@@ -0,0 +1,194 @@
//! Based on https://github.com/ibraheemdev/matchit/blob/master/benches/bench.rs
use criterion::{black_box, criterion_group, criterion_main, Criterion};
macro_rules! register {
(colon) => {{
register!(finish => ":p1", ":p2", ":p3", ":p4")
}};
(brackets) => {{
register!(finish => "{p1}", "{p2}", "{p3}", "{p4}")
}};
(regex) => {{
register!(finish => "(.*)", "(.*)", "(.*)", "(.*)")
}};
(finish => $p1:literal, $p2:literal, $p3:literal, $p4:literal) => {{
let arr = [
concat!("/authorizations"),
concat!("/authorizations/", $p1),
concat!("/applications/", $p1, "/tokens/", $p2),
concat!("/events"),
concat!("/repos/", $p1, "/", $p2, "/events"),
concat!("/networks/", $p1, "/", $p2, "/events"),
concat!("/orgs/", $p1, "/events"),
concat!("/users/", $p1, "/received_events"),
concat!("/users/", $p1, "/received_events/public"),
concat!("/users/", $p1, "/events"),
concat!("/users/", $p1, "/events/public"),
concat!("/users/", $p1, "/events/orgs/", $p2),
concat!("/feeds"),
concat!("/notifications"),
concat!("/repos/", $p1, "/", $p2, "/notifications"),
concat!("/notifications/threads/", $p1),
concat!("/notifications/threads/", $p1, "/subscription"),
concat!("/repos/", $p1, "/", $p2, "/stargazers"),
concat!("/users/", $p1, "/starred"),
concat!("/user/starred"),
concat!("/user/starred/", $p1, "/", $p2),
concat!("/repos/", $p1, "/", $p2, "/subscribers"),
concat!("/users/", $p1, "/subscriptions"),
concat!("/user/subscriptions"),
concat!("/repos/", $p1, "/", $p2, "/subscription"),
concat!("/user/subscriptions/", $p1, "/", $p2),
concat!("/users/", $p1, "/gists"),
concat!("/gists"),
concat!("/gists/", $p1),
concat!("/gists/", $p1, "/star"),
concat!("/repos/", $p1, "/", $p2, "/git/blobs/", $p3),
concat!("/repos/", $p1, "/", $p2, "/git/commits/", $p3),
concat!("/repos/", $p1, "/", $p2, "/git/refs"),
concat!("/repos/", $p1, "/", $p2, "/git/tags/", $p3),
concat!("/repos/", $p1, "/", $p2, "/git/trees/", $p3),
concat!("/issues"),
concat!("/user/issues"),
concat!("/orgs/", $p1, "/issues"),
concat!("/repos/", $p1, "/", $p2, "/issues"),
concat!("/repos/", $p1, "/", $p2, "/issues/", $p3),
concat!("/repos/", $p1, "/", $p2, "/assignees"),
concat!("/repos/", $p1, "/", $p2, "/assignees/", $p3),
concat!("/repos/", $p1, "/", $p2, "/issues/", $p3, "/comments"),
concat!("/repos/", $p1, "/", $p2, "/issues/", $p3, "/events"),
concat!("/repos/", $p1, "/", $p2, "/labels"),
concat!("/repos/", $p1, "/", $p2, "/labels/", $p3),
concat!("/repos/", $p1, "/", $p2, "/issues/", $p3, "/labels"),
concat!("/repos/", $p1, "/", $p2, "/milestones/", $p3, "/labels"),
concat!("/repos/", $p1, "/", $p2, "/milestones/"),
concat!("/repos/", $p1, "/", $p2, "/milestones/", $p3),
concat!("/emojis"),
concat!("/gitignore/templates"),
concat!("/gitignore/templates/", $p1),
concat!("/meta"),
concat!("/rate_limit"),
concat!("/users/", $p1, "/orgs"),
concat!("/user/orgs"),
concat!("/orgs/", $p1),
concat!("/orgs/", $p1, "/members"),
concat!("/orgs/", $p1, "/members", $p2),
concat!("/orgs/", $p1, "/public_members"),
concat!("/orgs/", $p1, "/public_members/", $p2),
concat!("/orgs/", $p1, "/teams"),
concat!("/teams/", $p1),
concat!("/teams/", $p1, "/members"),
concat!("/teams/", $p1, "/members", $p2),
concat!("/teams/", $p1, "/repos"),
concat!("/teams/", $p1, "/repos/", $p2, "/", $p3),
concat!("/user/teams"),
concat!("/repos/", $p1, "/", $p2, "/pulls"),
concat!("/repos/", $p1, "/", $p2, "/pulls/", $p3),
concat!("/repos/", $p1, "/", $p2, "/pulls/", $p3, "/commits"),
concat!("/repos/", $p1, "/", $p2, "/pulls/", $p3, "/files"),
concat!("/repos/", $p1, "/", $p2, "/pulls/", $p3, "/merge"),
concat!("/repos/", $p1, "/", $p2, "/pulls/", $p3, "/comments"),
concat!("/user/repos"),
concat!("/users/", $p1, "/repos"),
concat!("/orgs/", $p1, "/repos"),
concat!("/repositories"),
concat!("/repos/", $p1, "/", $p2),
concat!("/repos/", $p1, "/", $p2, "/contributors"),
concat!("/repos/", $p1, "/", $p2, "/languages"),
concat!("/repos/", $p1, "/", $p2, "/teams"),
concat!("/repos/", $p1, "/", $p2, "/tags"),
concat!("/repos/", $p1, "/", $p2, "/branches"),
concat!("/repos/", $p1, "/", $p2, "/branches/", $p3),
concat!("/repos/", $p1, "/", $p2, "/collaborators"),
concat!("/repos/", $p1, "/", $p2, "/collaborators/", $p3),
concat!("/repos/", $p1, "/", $p2, "/comments"),
concat!("/repos/", $p1, "/", $p2, "/commits/", $p3, "/comments"),
concat!("/repos/", $p1, "/", $p2, "/commits"),
concat!("/repos/", $p1, "/", $p2, "/commits/", $p3),
concat!("/repos/", $p1, "/", $p2, "/readme"),
concat!("/repos/", $p1, "/", $p2, "/keys"),
concat!("/repos/", $p1, "/", $p2, "/keys", $p3),
concat!("/repos/", $p1, "/", $p2, "/downloads"),
concat!("/repos/", $p1, "/", $p2, "/downloads", $p3),
concat!("/repos/", $p1, "/", $p2, "/forks"),
concat!("/repos/", $p1, "/", $p2, "/hooks"),
concat!("/repos/", $p1, "/", $p2, "/hooks", $p3),
concat!("/repos/", $p1, "/", $p2, "/releases"),
concat!("/repos/", $p1, "/", $p2, "/releases/", $p3),
concat!("/repos/", $p1, "/", $p2, "/releases/", $p3, "/assets"),
concat!("/repos/", $p1, "/", $p2, "/stats/contributors"),
concat!("/repos/", $p1, "/", $p2, "/stats/commit_activity"),
concat!("/repos/", $p1, "/", $p2, "/stats/code_frequency"),
concat!("/repos/", $p1, "/", $p2, "/stats/participation"),
concat!("/repos/", $p1, "/", $p2, "/stats/punch_card"),
concat!("/repos/", $p1, "/", $p2, "/statuses/", $p3),
concat!("/search/repositories"),
concat!("/search/code"),
concat!("/search/issues"),
concat!("/search/users"),
concat!("/legacy/issues/search/", $p1, "/", $p2, "/", $p3, "/", $p4),
concat!("/legacy/repos/search/", $p1),
concat!("/legacy/user/search/", $p1),
concat!("/legacy/user/email/", $p1),
concat!("/users/", $p1),
concat!("/user"),
concat!("/users"),
concat!("/user/emails"),
concat!("/users/", $p1, "/followers"),
concat!("/user/followers"),
concat!("/users/", $p1, "/following"),
concat!("/user/following"),
concat!("/user/following/", $p1),
concat!("/users/", $p1, "/following", $p2),
concat!("/users/", $p1, "/keys"),
concat!("/user/keys"),
concat!("/user/keys/", $p1),
];
std::array::IntoIter::new(arr)
}};
}
fn call() -> impl Iterator<Item = &'static str> {
let arr = [
"/authorizations",
"/user/repos",
"/repos/rust-lang/rust/stargazers",
"/orgs/rust-lang/public_members/nikomatsakis",
"/repos/rust-lang/rust/releases/1.51.0",
];
std::array::IntoIter::new(arr)
}
fn compare_routers(c: &mut Criterion) {
let mut group = c.benchmark_group("Compare Routers");
let mut actix = actix_router::Router::<bool>::build();
for route in register!(brackets) {
actix.path(route, true);
}
let actix = actix.finish();
group.bench_function("actix", |b| {
b.iter(|| {
for route in call() {
let mut path = actix_router::Path::new(route);
black_box(actix.recognize(&mut path).unwrap());
}
});
});
let regex_set = regex::RegexSet::new(register!(regex)).unwrap();
group.bench_function("regex", |b| {
b.iter(|| {
for route in call() {
black_box(regex_set.matches(route));
}
});
});
group.finish();
}
criterion_group!(benches, compare_routers);
criterion_main!(benches);

View File

@@ -0,0 +1,169 @@
macro_rules! register {
(brackets) => {{
register!(finish => "{p1}", "{p2}", "{p3}", "{p4}")
}};
(finish => $p1:literal, $p2:literal, $p3:literal, $p4:literal) => {{
let arr = [
concat!("/authorizations"),
concat!("/authorizations/", $p1),
concat!("/applications/", $p1, "/tokens/", $p2),
concat!("/events"),
concat!("/repos/", $p1, "/", $p2, "/events"),
concat!("/networks/", $p1, "/", $p2, "/events"),
concat!("/orgs/", $p1, "/events"),
concat!("/users/", $p1, "/received_events"),
concat!("/users/", $p1, "/received_events/public"),
concat!("/users/", $p1, "/events"),
concat!("/users/", $p1, "/events/public"),
concat!("/users/", $p1, "/events/orgs/", $p2),
concat!("/feeds"),
concat!("/notifications"),
concat!("/repos/", $p1, "/", $p2, "/notifications"),
concat!("/notifications/threads/", $p1),
concat!("/notifications/threads/", $p1, "/subscription"),
concat!("/repos/", $p1, "/", $p2, "/stargazers"),
concat!("/users/", $p1, "/starred"),
concat!("/user/starred"),
concat!("/user/starred/", $p1, "/", $p2),
concat!("/repos/", $p1, "/", $p2, "/subscribers"),
concat!("/users/", $p1, "/subscriptions"),
concat!("/user/subscriptions"),
concat!("/repos/", $p1, "/", $p2, "/subscription"),
concat!("/user/subscriptions/", $p1, "/", $p2),
concat!("/users/", $p1, "/gists"),
concat!("/gists"),
concat!("/gists/", $p1),
concat!("/gists/", $p1, "/star"),
concat!("/repos/", $p1, "/", $p2, "/git/blobs/", $p3),
concat!("/repos/", $p1, "/", $p2, "/git/commits/", $p3),
concat!("/repos/", $p1, "/", $p2, "/git/refs"),
concat!("/repos/", $p1, "/", $p2, "/git/tags/", $p3),
concat!("/repos/", $p1, "/", $p2, "/git/trees/", $p3),
concat!("/issues"),
concat!("/user/issues"),
concat!("/orgs/", $p1, "/issues"),
concat!("/repos/", $p1, "/", $p2, "/issues"),
concat!("/repos/", $p1, "/", $p2, "/issues/", $p3),
concat!("/repos/", $p1, "/", $p2, "/assignees"),
concat!("/repos/", $p1, "/", $p2, "/assignees/", $p3),
concat!("/repos/", $p1, "/", $p2, "/issues/", $p3, "/comments"),
concat!("/repos/", $p1, "/", $p2, "/issues/", $p3, "/events"),
concat!("/repos/", $p1, "/", $p2, "/labels"),
concat!("/repos/", $p1, "/", $p2, "/labels/", $p3),
concat!("/repos/", $p1, "/", $p2, "/issues/", $p3, "/labels"),
concat!("/repos/", $p1, "/", $p2, "/milestones/", $p3, "/labels"),
concat!("/repos/", $p1, "/", $p2, "/milestones/"),
concat!("/repos/", $p1, "/", $p2, "/milestones/", $p3),
concat!("/emojis"),
concat!("/gitignore/templates"),
concat!("/gitignore/templates/", $p1),
concat!("/meta"),
concat!("/rate_limit"),
concat!("/users/", $p1, "/orgs"),
concat!("/user/orgs"),
concat!("/orgs/", $p1),
concat!("/orgs/", $p1, "/members"),
concat!("/orgs/", $p1, "/members", $p2),
concat!("/orgs/", $p1, "/public_members"),
concat!("/orgs/", $p1, "/public_members/", $p2),
concat!("/orgs/", $p1, "/teams"),
concat!("/teams/", $p1),
concat!("/teams/", $p1, "/members"),
concat!("/teams/", $p1, "/members", $p2),
concat!("/teams/", $p1, "/repos"),
concat!("/teams/", $p1, "/repos/", $p2, "/", $p3),
concat!("/user/teams"),
concat!("/repos/", $p1, "/", $p2, "/pulls"),
concat!("/repos/", $p1, "/", $p2, "/pulls/", $p3),
concat!("/repos/", $p1, "/", $p2, "/pulls/", $p3, "/commits"),
concat!("/repos/", $p1, "/", $p2, "/pulls/", $p3, "/files"),
concat!("/repos/", $p1, "/", $p2, "/pulls/", $p3, "/merge"),
concat!("/repos/", $p1, "/", $p2, "/pulls/", $p3, "/comments"),
concat!("/user/repos"),
concat!("/users/", $p1, "/repos"),
concat!("/orgs/", $p1, "/repos"),
concat!("/repositories"),
concat!("/repos/", $p1, "/", $p2),
concat!("/repos/", $p1, "/", $p2, "/contributors"),
concat!("/repos/", $p1, "/", $p2, "/languages"),
concat!("/repos/", $p1, "/", $p2, "/teams"),
concat!("/repos/", $p1, "/", $p2, "/tags"),
concat!("/repos/", $p1, "/", $p2, "/branches"),
concat!("/repos/", $p1, "/", $p2, "/branches/", $p3),
concat!("/repos/", $p1, "/", $p2, "/collaborators"),
concat!("/repos/", $p1, "/", $p2, "/collaborators/", $p3),
concat!("/repos/", $p1, "/", $p2, "/comments"),
concat!("/repos/", $p1, "/", $p2, "/commits/", $p3, "/comments"),
concat!("/repos/", $p1, "/", $p2, "/commits"),
concat!("/repos/", $p1, "/", $p2, "/commits/", $p3),
concat!("/repos/", $p1, "/", $p2, "/readme"),
concat!("/repos/", $p1, "/", $p2, "/keys"),
concat!("/repos/", $p1, "/", $p2, "/keys", $p3),
concat!("/repos/", $p1, "/", $p2, "/downloads"),
concat!("/repos/", $p1, "/", $p2, "/downloads", $p3),
concat!("/repos/", $p1, "/", $p2, "/forks"),
concat!("/repos/", $p1, "/", $p2, "/hooks"),
concat!("/repos/", $p1, "/", $p2, "/hooks", $p3),
concat!("/repos/", $p1, "/", $p2, "/releases"),
concat!("/repos/", $p1, "/", $p2, "/releases/", $p3),
concat!("/repos/", $p1, "/", $p2, "/releases/", $p3, "/assets"),
concat!("/repos/", $p1, "/", $p2, "/stats/contributors"),
concat!("/repos/", $p1, "/", $p2, "/stats/commit_activity"),
concat!("/repos/", $p1, "/", $p2, "/stats/code_frequency"),
concat!("/repos/", $p1, "/", $p2, "/stats/participation"),
concat!("/repos/", $p1, "/", $p2, "/stats/punch_card"),
concat!("/repos/", $p1, "/", $p2, "/statuses/", $p3),
concat!("/search/repositories"),
concat!("/search/code"),
concat!("/search/issues"),
concat!("/search/users"),
concat!("/legacy/issues/search/", $p1, "/", $p2, "/", $p3, "/", $p4),
concat!("/legacy/repos/search/", $p1),
concat!("/legacy/user/search/", $p1),
concat!("/legacy/user/email/", $p1),
concat!("/users/", $p1),
concat!("/user"),
concat!("/users"),
concat!("/user/emails"),
concat!("/users/", $p1, "/followers"),
concat!("/user/followers"),
concat!("/users/", $p1, "/following"),
concat!("/user/following"),
concat!("/user/following/", $p1),
concat!("/users/", $p1, "/following", $p2),
concat!("/users/", $p1, "/keys"),
concat!("/user/keys"),
concat!("/user/keys/", $p1),
];
arr.to_vec()
}};
}
static PATHS: [&str; 5] = [
"/authorizations",
"/user/repos",
"/repos/rust-lang/rust/stargazers",
"/orgs/rust-lang/public_members/nikomatsakis",
"/repos/rust-lang/rust/releases/1.51.0",
];
fn main() {
let mut router = actix_router::Router::<bool>::build();
for route in register!(brackets) {
router.path(route, true);
}
let actix = router.finish();
if firestorm::enabled() {
firestorm::bench("target", || {
for &route in &PATHS {
let mut path = actix_router::Path::new(route);
actix.recognize(&mut path).unwrap();
}
})
.unwrap();
}
}

View File

@@ -7,9 +7,13 @@ use crate::ResourcePath;
macro_rules! unsupported_type { macro_rules! unsupported_type {
($trait_fn:ident, $name:expr) => { ($trait_fn:ident, $name:expr) => {
fn $trait_fn<V>(self, _: V) -> Result<V::Value, Self::Error> fn $trait_fn<V>(self, _: V) -> Result<V::Value, Self::Error>
where V: Visitor<'de> where
V: Visitor<'de>,
{ {
Err(de::value::Error::custom(concat!("unsupported type: ", $name))) Err(de::value::Error::custom(concat!(
"unsupported type: ",
$name
)))
} }
}; };
} }
@@ -17,23 +21,31 @@ macro_rules! unsupported_type {
macro_rules! parse_single_value { macro_rules! parse_single_value {
($trait_fn:ident, $visit_fn:ident, $tp:tt) => { ($trait_fn:ident, $visit_fn:ident, $tp:tt) => {
fn $trait_fn<V>(self, visitor: V) -> Result<V::Value, Self::Error> fn $trait_fn<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: Visitor<'de> where
V: Visitor<'de>,
{ {
if self.path.len() != 1 { if self.path.segment_count() != 1 {
Err(de::value::Error::custom( Err(de::value::Error::custom(
format!("wrong number of parameters: {} expected 1", format!(
self.path.len()).as_str())) "wrong number of parameters: {} expected 1",
self.path.segment_count()
)
.as_str(),
))
} else { } else {
let v = self.path[0].parse().map_err( let v = self.path[0].parse().map_err(|_| {
|_| de::value::Error::custom( de::value::Error::custom(format!(
format!("can not parse {:?} to a {}", &self.path[0], $tp)))?; "can not parse {:?} to a {}",
&self.path[0], $tp
))
})?;
visitor.$visit_fn(v) visitor.$visit_fn(v)
} }
} }
} };
} }
pub struct PathDeserializer<'de, T: ResourcePath + 'de> { pub struct PathDeserializer<'de, T: ResourcePath> {
path: &'de Path<T>, path: &'de Path<T>,
} }
@@ -101,11 +113,11 @@ impl<'de, T: ResourcePath + 'de> Deserializer<'de> for PathDeserializer<'de, T>
where where
V: Visitor<'de>, V: Visitor<'de>,
{ {
if self.path.len() < len { if self.path.segment_count() < len {
Err(de::value::Error::custom( Err(de::value::Error::custom(
format!( format!(
"wrong number of parameters: {} expected {}", "wrong number of parameters: {} expected {}",
self.path.len(), self.path.segment_count(),
len len
) )
.as_str(), .as_str(),
@@ -126,11 +138,11 @@ impl<'de, T: ResourcePath + 'de> Deserializer<'de> for PathDeserializer<'de, T>
where where
V: Visitor<'de>, V: Visitor<'de>,
{ {
if self.path.len() < len { if self.path.segment_count() < len {
Err(de::value::Error::custom( Err(de::value::Error::custom(
format!( format!(
"wrong number of parameters: {} expected {}", "wrong number of parameters: {} expected {}",
self.path.len(), self.path.segment_count(),
len len
) )
.as_str(), .as_str(),
@@ -152,9 +164,7 @@ impl<'de, T: ResourcePath + 'de> Deserializer<'de> for PathDeserializer<'de, T>
V: Visitor<'de>, V: Visitor<'de>,
{ {
if self.path.is_empty() { if self.path.is_empty() {
Err(de::value::Error::custom( Err(de::value::Error::custom("expected at least one parameters"))
"expeceted at least one parameters",
))
} else { } else {
visitor.visit_enum(ValueEnum { visitor.visit_enum(ValueEnum {
value: &self.path[0], value: &self.path[0],
@@ -166,9 +176,13 @@ impl<'de, T: ResourcePath + 'de> Deserializer<'de> for PathDeserializer<'de, T>
where where
V: Visitor<'de>, V: Visitor<'de>,
{ {
if self.path.len() != 1 { if self.path.segment_count() != 1 {
Err(de::value::Error::custom( Err(de::value::Error::custom(
format!("wrong number of parameters: {} expected 1", self.path.len()).as_str(), format!(
"wrong number of parameters: {} expected 1",
self.path.segment_count()
)
.as_str(),
)) ))
} else { } else {
visitor.visit_str(&self.path[0]) visitor.visit_str(&self.path[0])
@@ -268,14 +282,15 @@ impl<'de> Deserializer<'de> for Key<'de> {
macro_rules! parse_value { macro_rules! parse_value {
($trait_fn:ident, $visit_fn:ident, $tp:tt) => { ($trait_fn:ident, $visit_fn:ident, $tp:tt) => {
fn $trait_fn<V>(self, visitor: V) -> Result<V::Value, Self::Error> fn $trait_fn<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: Visitor<'de> where
V: Visitor<'de>,
{ {
let v = self.value.parse().map_err( let v = self.value.parse().map_err(|_| {
|_| de::value::Error::custom( de::value::Error::custom(format!("can not parse {:?} to a {}", self.value, $tp))
format!("can not parse {:?} to a {}", self.value, $tp)))?; })?;
visitor.$visit_fn(v) visitor.$visit_fn(v)
} }
} };
} }
struct Value<'de> { struct Value<'de> {
@@ -477,8 +492,7 @@ impl<'de> de::VariantAccess<'de> for UnitVariant {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use serde::de; use serde::{de, Deserialize};
use serde_derive::Deserialize;
use super::*; use super::*;
use crate::path::Path; use crate::path::Path;
@@ -492,7 +506,7 @@ mod tests {
#[derive(Deserialize)] #[derive(Deserialize)]
struct Id { struct Id {
id: String, _id: String,
} }
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]

149
actix-router/src/lib.rs Normal file
View File

@@ -0,0 +1,149 @@
//! Resource path matching and router.
#![deny(rust_2018_idioms, nonstandard_style)]
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
mod de;
mod path;
mod resource;
mod router;
pub use self::de::PathDeserializer;
pub use self::path::Path;
pub use self::resource::ResourceDef;
pub use self::router::{ResourceInfo, Router, RouterBuilder};
// TODO: this trait is necessary, document it
// see impl Resource for ServiceRequest
pub trait Resource<T: ResourcePath> {
fn resource_path(&mut self) -> &mut Path<T>;
}
pub trait ResourcePath {
fn path(&self) -> &str;
}
impl ResourcePath for String {
fn path(&self) -> &str {
self.as_str()
}
}
impl<'a> ResourcePath for &'a str {
fn path(&self) -> &str {
self
}
}
impl ResourcePath for bytestring::ByteString {
fn path(&self) -> &str {
&*self
}
}
/// One or many patterns.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum Patterns {
Single(String),
List(Vec<String>),
}
impl Patterns {
pub fn is_empty(&self) -> bool {
match self {
Patterns::Single(_) => false,
Patterns::List(pats) => pats.is_empty(),
}
}
}
/// Helper trait for type that could be converted to one or more path pattern.
pub trait IntoPatterns {
fn patterns(&self) -> Patterns;
}
impl IntoPatterns for String {
fn patterns(&self) -> Patterns {
Patterns::Single(self.clone())
}
}
impl<'a> IntoPatterns for &'a String {
fn patterns(&self) -> Patterns {
Patterns::Single((*self).clone())
}
}
impl<'a> IntoPatterns for &'a str {
fn patterns(&self) -> Patterns {
Patterns::Single((*self).to_owned())
}
}
impl IntoPatterns for bytestring::ByteString {
fn patterns(&self) -> Patterns {
Patterns::Single(self.to_string())
}
}
impl IntoPatterns for Patterns {
fn patterns(&self) -> Patterns {
self.clone()
}
}
impl<T: AsRef<str>> IntoPatterns for Vec<T> {
fn patterns(&self) -> Patterns {
let mut patterns = self.iter().map(|v| v.as_ref().to_owned());
match patterns.size_hint() {
(1, _) => Patterns::Single(patterns.next().unwrap()),
_ => Patterns::List(patterns.collect()),
}
}
}
macro_rules! array_patterns_single (($tp:ty) => {
impl IntoPatterns for [$tp; 1] {
fn patterns(&self) -> Patterns {
Patterns::Single(self[0].to_owned())
}
}
});
macro_rules! array_patterns_multiple (($tp:ty, $str_fn:expr, $($num:tt) +) => {
// for each array length specified in $num
$(
impl IntoPatterns for [$tp; $num] {
fn patterns(&self) -> Patterns {
Patterns::List(self.iter().map($str_fn).collect())
}
}
)+
});
array_patterns_single!(&str);
array_patterns_multiple!(&str, |&v| v.to_owned(), 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16);
array_patterns_single!(String);
array_patterns_multiple!(String, |v| v.clone(), 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16);
#[cfg(feature = "http")]
mod url;
#[cfg(feature = "http")]
pub use self::url::{Quoter, Url};
#[cfg(feature = "http")]
mod http_impls {
use http::Uri;
use super::ResourcePath;
impl ResourcePath for Uri {
fn path(&self) -> &str {
self.path()
}
}
}

View File

@@ -1,45 +1,31 @@
use std::borrow::Cow;
use std::ops::Index; use std::ops::Index;
use std::rc::Rc;
use firestorm::profile_method;
use serde::de; use serde::de;
use crate::de::PathDeserializer; use crate::{de::PathDeserializer, Resource, ResourcePath};
use crate::{Resource, ResourcePath};
#[derive(Debug, Clone, Copy)] #[derive(Debug, Clone)]
pub(crate) enum PathItem { pub(crate) enum PathItem {
Static(&'static str), Static(Cow<'static, str>),
Segment(u16, u16), Segment(u16, u16),
} }
/// Resource path match information impl Default for PathItem {
fn default() -> Self {
Self::Static(Cow::Borrowed(""))
}
}
/// Resource path match information.
/// ///
/// If resource path contains variable patterns, `Path` stores them. /// If resource path contains variable patterns, `Path` stores them.
#[derive(Debug)] #[derive(Debug, Clone, Default)]
pub struct Path<T> { pub struct Path<T> {
path: T, path: T,
pub(crate) skip: u16, pub(crate) skip: u16,
pub(crate) segments: Vec<(Rc<String>, PathItem)>, pub(crate) segments: Vec<(Cow<'static, str>, PathItem)>,
}
impl<T: Default> Default for Path<T> {
fn default() -> Self {
Path {
path: T::default(),
skip: 0,
segments: Vec::new(),
}
}
}
impl<T: Clone> Clone for Path<T> {
fn clone(&self) -> Self {
Path {
path: self.path.clone(),
skip: self.skip,
segments: self.segments.clone(),
}
}
} }
impl<T: ResourcePath> Path<T> { impl<T: ResourcePath> Path<T> {
@@ -51,21 +37,23 @@ impl<T: ResourcePath> Path<T> {
} }
} }
/// Get reference to inner path instance.
#[inline] #[inline]
/// Get reference to inner path instance
pub fn get_ref(&self) -> &T { pub fn get_ref(&self) -> &T {
&self.path &self.path
} }
/// Get mutable reference to inner path instance.
#[inline] #[inline]
/// Get mutable reference to inner path instance
pub fn get_mut(&mut self) -> &mut T { pub fn get_mut(&mut self) -> &mut T {
&mut self.path &mut self.path
} }
/// Path.
#[inline] #[inline]
/// Path
pub fn path(&self) -> &str { pub fn path(&self) -> &str {
profile_method!(path);
let skip = self.skip as usize; let skip = self.skip as usize;
let path = self.path.path(); let path = self.path.path();
if skip <= path.len() { if skip <= path.len() {
@@ -75,71 +63,77 @@ impl<T: ResourcePath> Path<T> {
} }
} }
/// Set new path.
#[inline] #[inline]
/// Set new path
pub fn set(&mut self, path: T) { pub fn set(&mut self, path: T) {
self.skip = 0; self.skip = 0;
self.path = path; self.path = path;
self.segments.clear(); self.segments.clear();
} }
/// Reset state.
#[inline] #[inline]
/// Reset state
pub fn reset(&mut self) { pub fn reset(&mut self) {
self.skip = 0; self.skip = 0;
self.segments.clear(); self.segments.clear();
} }
/// Skip first `n` chars in path.
#[inline] #[inline]
/// Skip first `n` chars in path
pub fn skip(&mut self, n: u16) { pub fn skip(&mut self, n: u16) {
self.skip += n; self.skip += n;
} }
pub(crate) fn add(&mut self, name: Rc<String>, value: PathItem) { pub(crate) fn add(&mut self, name: impl Into<Cow<'static, str>>, value: PathItem) {
profile_method!(add);
match value { match value {
PathItem::Static(s) => self.segments.push((name, PathItem::Static(s))), PathItem::Static(s) => self.segments.push((name.into(), PathItem::Static(s))),
PathItem::Segment(begin, end) => self PathItem::Segment(begin, end) => self.segments.push((
.segments name.into(),
.push((name, PathItem::Segment(self.skip + begin, self.skip + end))), PathItem::Segment(self.skip + begin, self.skip + end),
)),
} }
} }
#[doc(hidden)] #[doc(hidden)]
pub fn add_static(&mut self, name: &str, value: &'static str) { pub fn add_static(
&mut self,
name: impl Into<Cow<'static, str>>,
value: impl Into<Cow<'static, str>>,
) {
self.segments self.segments
.push((Rc::new(name.to_string()), PathItem::Static(value))); .push((name.into(), PathItem::Static(value.into())));
} }
/// Check if there are any matched patterns.
#[inline] #[inline]
/// Check if there are any matched patterns
pub fn is_empty(&self) -> bool { pub fn is_empty(&self) -> bool {
self.segments.is_empty() self.segments.is_empty()
} }
/// Returns number of interpolated segments.
#[inline] #[inline]
/// Check number of extracted parameters pub fn segment_count(&self) -> usize {
pub fn len(&self) -> usize {
self.segments.len() self.segments.len()
} }
/// Get matched parameter by name without type conversion /// Get matched parameter by name without type conversion
pub fn get(&self, key: &str) -> Option<&str> { pub fn get(&self, name: &str) -> Option<&str> {
for item in self.segments.iter() { profile_method!(get);
if key == item.0.as_str() {
return match item.1 { for (seg_name, val) in self.segments.iter() {
if name == seg_name {
return match val {
PathItem::Static(ref s) => Some(&s), PathItem::Static(ref s) => Some(&s),
PathItem::Segment(s, e) => { PathItem::Segment(s, e) => {
Some(&self.path.path()[(s as usize)..(e as usize)]) Some(&self.path.path()[(*s as usize)..(*e as usize)])
} }
}; };
} }
} }
if key == "tail" {
Some(&self.path.path()[(self.skip as usize)..]) None
} else {
None
}
} }
/// Get unprocessed part of the path /// Get unprocessed part of the path
@@ -149,9 +143,10 @@ impl<T: ResourcePath> Path<T> {
/// Get matched parameter by name. /// Get matched parameter by name.
/// ///
/// If keyed parameter is not available empty string is used as default /// If keyed parameter is not available empty string is used as default value.
/// value.
pub fn query(&self, key: &str) -> &str { pub fn query(&self, key: &str) -> &str {
profile_method!(query);
if let Some(s) = self.get(key) { if let Some(s) = self.get(key) {
s s
} else { } else {
@@ -159,8 +154,8 @@ impl<T: ResourcePath> Path<T> {
} }
} }
/// Return iterator to items in parameter container /// Return iterator to items in parameter container.
pub fn iter(&self) -> PathIter<T> { pub fn iter(&self) -> PathIter<'_, T> {
PathIter { PathIter {
idx: 0, idx: 0,
params: self, params: self,
@@ -169,6 +164,7 @@ impl<T: ResourcePath> Path<T> {
/// Try to deserialize matching parameters to a specified type `U` /// Try to deserialize matching parameters to a specified type `U`
pub fn load<'de, U: serde::Deserialize<'de>>(&'de self) -> Result<U, de::value::Error> { pub fn load<'de, U: serde::Deserialize<'de>>(&'de self) -> Result<U, de::value::Error> {
profile_method!(load);
de::Deserialize::deserialize(PathDeserializer::new(self)) de::Deserialize::deserialize(PathDeserializer::new(self))
} }
} }
@@ -184,7 +180,7 @@ impl<'a, T: ResourcePath> Iterator for PathIter<'a, T> {
#[inline] #[inline]
fn next(&mut self) -> Option<(&'a str, &'a str)> { fn next(&mut self) -> Option<(&'a str, &'a str)> {
if self.idx < self.params.len() { if self.idx < self.params.segment_count() {
let idx = self.idx; let idx = self.idx;
let res = match self.params.segments[idx].1 { let res = match self.params.segments[idx].1 {
PathItem::Static(ref s) => &s, PathItem::Static(ref s) => &s,

1681
actix-router/src/resource.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +1,6 @@
use crate::{Resource, ResourceDef, ResourcePath}; use firestorm::profile_method;
use crate::{IntoPatterns, Resource, ResourceDef, ResourcePath};
#[derive(Debug, Copy, Clone, PartialEq)] #[derive(Debug, Copy, Clone, PartialEq)]
pub struct ResourceId(pub u16); pub struct ResourceId(pub u16);
@@ -10,7 +12,11 @@ pub struct ResourceInfo {
} }
/// Resource router. /// Resource router.
pub struct Router<T, U = ()>(Vec<(ResourceDef, T, Option<U>)>); // T is the resource itself
// U is any other data needed for routing like method guards
pub struct Router<T, U = ()> {
routes: Vec<(ResourceDef, T, Option<U>)>,
}
impl<T, U> Router<T, U> { impl<T, U> Router<T, U> {
pub fn build() -> RouterBuilder<T, U> { pub fn build() -> RouterBuilder<T, U> {
@@ -24,11 +30,14 @@ impl<T, U> Router<T, U> {
R: Resource<P>, R: Resource<P>,
P: ResourcePath, P: ResourcePath,
{ {
for item in self.0.iter() { profile_method!(recognize);
if item.0.match_path(resource.resource_path()) {
for item in self.routes.iter() {
if item.0.capture_match_info(resource.resource_path()) {
return Some((&item.1, ResourceId(item.0.id()))); return Some((&item.1, ResourceId(item.0.id())));
} }
} }
None None
} }
@@ -37,15 +46,35 @@ impl<T, U> Router<T, U> {
R: Resource<P>, R: Resource<P>,
P: ResourcePath, P: ResourcePath,
{ {
for item in self.0.iter_mut() { profile_method!(recognize_mut);
if item.0.match_path(resource.resource_path()) {
for item in self.routes.iter_mut() {
if item.0.capture_match_info(resource.resource_path()) {
return Some((&mut item.1, ResourceId(item.0.id()))); return Some((&mut item.1, ResourceId(item.0.id())));
} }
} }
None None
} }
pub fn recognize_mut_checked<R, P, F>( pub fn recognize_fn<R, P, F>(&self, resource: &mut R, check: F) -> Option<(&T, ResourceId)>
where
F: Fn(&R, &Option<U>) -> bool,
R: Resource<P>,
P: ResourcePath,
{
profile_method!(recognize_checked);
for item in self.routes.iter() {
if item.0.capture_match_info_fn(resource, &check, &item.2) {
return Some((&item.1, ResourceId(item.0.id())));
}
}
None
}
pub fn recognize_mut_fn<R, P, F>(
&mut self, &mut self,
resource: &mut R, resource: &mut R,
check: F, check: F,
@@ -55,11 +84,14 @@ impl<T, U> Router<T, U> {
R: Resource<P>, R: Resource<P>,
P: ResourcePath, P: ResourcePath,
{ {
for item in self.0.iter_mut() { profile_method!(recognize_mut_checked);
if item.0.match_path_checked(resource, &check, &item.2) {
for item in self.routes.iter_mut() {
if item.0.capture_match_info_fn(resource, &check, &item.2) {
return Some((&mut item.1, ResourceId(item.0.id()))); return Some((&mut item.1, ResourceId(item.0.id())));
} }
} }
None None
} }
} }
@@ -70,7 +102,13 @@ pub struct RouterBuilder<T, U = ()> {
impl<T, U> RouterBuilder<T, U> { impl<T, U> RouterBuilder<T, U> {
/// Register resource for specified path. /// Register resource for specified path.
pub fn path(&mut self, path: &str, resource: T) -> &mut (ResourceDef, T, Option<U>) { pub fn path<P: IntoPatterns>(
&mut self,
path: P,
resource: T,
) -> &mut (ResourceDef, T, Option<U>) {
profile_method!(path);
self.resources self.resources
.push((ResourceDef::new(path), resource, None)); .push((ResourceDef::new(path), resource, None));
self.resources.last_mut().unwrap() self.resources.last_mut().unwrap()
@@ -78,6 +116,8 @@ impl<T, U> RouterBuilder<T, U> {
/// Register resource for specified path prefix. /// Register resource for specified path prefix.
pub fn prefix(&mut self, prefix: &str, resource: T) -> &mut (ResourceDef, T, Option<U>) { pub fn prefix(&mut self, prefix: &str, resource: T) -> &mut (ResourceDef, T, Option<U>) {
profile_method!(prefix);
self.resources self.resources
.push((ResourceDef::prefix(prefix), resource, None)); .push((ResourceDef::prefix(prefix), resource, None));
self.resources.last_mut().unwrap() self.resources.last_mut().unwrap()
@@ -85,13 +125,17 @@ impl<T, U> RouterBuilder<T, U> {
/// Register resource for ResourceDef /// Register resource for ResourceDef
pub fn rdef(&mut self, rdef: ResourceDef, resource: T) -> &mut (ResourceDef, T, Option<U>) { pub fn rdef(&mut self, rdef: ResourceDef, resource: T) -> &mut (ResourceDef, T, Option<U>) {
profile_method!(rdef);
self.resources.push((rdef, resource, None)); self.resources.push((rdef, resource, None));
self.resources.last_mut().unwrap() self.resources.last_mut().unwrap()
} }
/// Finish configuration and create router instance. /// Finish configuration and create router instance.
pub fn finish(self) -> Router<T, U> { pub fn finish(self) -> Router<T, U> {
Router(self.resources) Router {
routes: self.resources,
}
} }
} }
@@ -100,6 +144,7 @@ mod tests {
use crate::path::Path; use crate::path::Path;
use crate::router::{ResourceId, Router}; use crate::router::{ResourceId, Router};
#[allow(clippy::cognitive_complexity)]
#[test] #[test]
fn test_recognizer_1() { fn test_recognizer_1() {
let mut router = Router::<usize>::build(); let mut router = Router::<usize>::build();

View File

@@ -31,7 +31,7 @@ fn set_bit(array: &mut [u8], ch: u8) {
} }
thread_local! { thread_local! {
static DEFAULT_QUOTER: Quoter = { Quoter::new(b"@:", b"/+") }; static DEFAULT_QUOTER: Quoter = Quoter::new(b"@:", b"%/+");
} }
#[derive(Default, Clone, Debug)] #[derive(Default, Clone, Debug)]
@@ -170,23 +170,17 @@ impl Quoter {
idx += 1; idx += 1;
} }
if let Some(data) = cloned { cloned.map(|data| String::from_utf8_lossy(&data).into_owned())
// Unsafe: we get data from http::Uri, which does utf-8 checks already
// this code only decodes valid pct encoded values
Some(unsafe { String::from_utf8_unchecked(data) })
} else {
None
}
} }
} }
#[inline] #[inline]
fn from_hex(v: u8) -> Option<u8> { fn from_hex(v: u8) -> Option<u8> {
if v >= b'0' && v <= b'9' { if (b'0'..=b'9').contains(&v) {
Some(v - 0x30) // ord('0') == 0x30 Some(v - 0x30) // ord('0') == 0x30
} else if v >= b'A' && v <= b'F' { } else if (b'A'..=b'F').contains(&v) {
Some(v - 0x41 + 10) // ord('A') == 0x41 Some(v - 0x41 + 10) // ord('A') == 0x41
} else if v > b'a' && v <= b'f' { } else if (b'a'..=b'f').contains(&v) {
Some(v - 0x61 + 10) // ord('a') == 0x61 Some(v - 0x61 + 10) // ord('a') == 0x61
} else { } else {
None None
@@ -206,23 +200,89 @@ mod tests {
use super::*; use super::*;
use crate::{Path, ResourceDef}; use crate::{Path, ResourceDef};
const PROTECTED: &[u8] = b"%/+";
fn match_url(pattern: &'static str, url: impl AsRef<str>) -> Path<Url> {
let re = ResourceDef::new(pattern);
let uri = Uri::try_from(url.as_ref()).unwrap();
let mut path = Path::new(Url::new(uri));
assert!(re.capture_match_info(&mut path));
path
}
fn percent_encode(data: &[u8]) -> String {
data.iter().map(|c| format!("%{:02X}", c)).collect()
}
#[test] #[test]
fn test_parse_url() { fn test_parse_url() {
let re = ResourceDef::new("/user/{id}/test"); let re = "/user/{id}/test";
let url = Uri::try_from("/user/2345/test").unwrap(); let path = match_url(re, "/user/2345/test");
let mut path = Path::new(Url::new(url));
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "2345"); assert_eq!(path.get("id").unwrap(), "2345");
let url = Uri::try_from("/user/qwe%25/test").unwrap(); // "%25" should never be decoded into '%' to guarantee the output is a valid
let mut path = Path::new(Url::new(url)); // percent-encoded format
assert!(re.match_path(&mut path)); let path = match_url(re, "/user/qwe%25/test");
assert_eq!(path.get("id").unwrap(), "qwe%"); assert_eq!(path.get("id").unwrap(), "qwe%25");
let url = Uri::try_from("/user/qwe%25rty/test").unwrap(); let path = match_url(re, "/user/qwe%25rty/test");
let mut path = Path::new(Url::new(url)); assert_eq!(path.get("id").unwrap(), "qwe%25rty");
assert!(re.match_path(&mut path)); }
assert_eq!(path.get("id").unwrap(), "qwe%rty");
#[test]
fn test_protected_chars() {
let encoded = percent_encode(PROTECTED);
let path = match_url("/user/{id}/test", format!("/user/{}/test", encoded));
assert_eq!(path.get("id").unwrap(), &encoded);
}
#[test]
fn test_non_protecteed_ascii() {
let nonprotected_ascii = ('\u{0}'..='\u{7F}')
.filter(|&c| c.is_ascii() && !PROTECTED.contains(&(c as u8)))
.collect::<String>();
let encoded = percent_encode(nonprotected_ascii.as_bytes());
let path = match_url("/user/{id}/test", format!("/user/{}/test", encoded));
assert_eq!(path.get("id").unwrap(), &nonprotected_ascii);
}
#[test]
fn test_valid_utf8_multibyte() {
let test = ('\u{FF00}'..='\u{FFFF}').collect::<String>();
let encoded = percent_encode(test.as_bytes());
let path = match_url("/a/{id}/b", format!("/a/{}/b", &encoded));
assert_eq!(path.get("id").unwrap(), &test);
}
#[test]
fn test_invalid_utf8() {
let invalid_utf8 = percent_encode((0x80..=0xff).collect::<Vec<_>>().as_slice());
let uri = Uri::try_from(format!("/{}", invalid_utf8)).unwrap();
let path = Path::new(Url::new(uri));
// We should always get a valid utf8 string
assert!(String::from_utf8(path.path().as_bytes().to_owned()).is_ok());
}
#[test]
fn test_from_hex() {
let hex = b"0123456789abcdefABCDEF";
for i in 0..256 {
let c = i as u8;
if hex.contains(&c) {
assert!(from_hex(c).is_some())
} else {
assert!(from_hex(c).is_none())
}
}
let expected = [
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 10, 11, 12, 13, 14, 15,
];
for i in 0..hex.len() {
assert_eq!(from_hex(hex[i]).unwrap(), expected[i]);
}
} }
} }

View File

@@ -1,95 +1,158 @@
# Changes # Changes
## [1.0.0] - 2019-12-11 ## Unreleased - 2021-xx-xx
## 2.2.0 - 2021-03-29
* **BREAKING** `ActixStream::{poll_read_ready, poll_write_ready}` methods now return
`Ready` object in ok variant. [#293]
* Breakage is acceptable since `ActixStream` was not intended to be public.
[#293]: https://github.com/actix/actix-net/pull/293
## 2.1.0 - 2021-02-24
* Add `ActixStream` extension trait to include readiness methods. [#276]
* Re-export `tokio::net::TcpSocket` in `net` module [#282]
[#276]: https://github.com/actix/actix-net/pull/276
[#282]: https://github.com/actix/actix-net/pull/282
## 2.0.2 - 2021-02-06
* Add `Arbiter::handle` to get a handle of an owned Arbiter. [#274]
* Add `System::try_current` for situations where actix may or may not be running a System. [#275]
[#274]: https://github.com/actix/actix-net/pull/274
[#275]: https://github.com/actix/actix-net/pull/275
## 2.0.1 - 2021-02-06
* Expose `JoinError` from Tokio. [#271]
[#271]: https://github.com/actix/actix-net/pull/271
## 2.0.0 - 2021-02-02
* Remove all Arbiter-local storage methods. [#262]
* Re-export `tokio::pin`. [#262]
[#262]: https://github.com/actix/actix-net/pull/262
## 2.0.0-beta.3 - 2021-01-31
* Remove `run_in_tokio`, `attach_to_tokio` and `AsyncSystemRunner`. [#253]
* Return `JoinHandle` from `actix_rt::spawn`. [#253]
* Remove old `Arbiter::spawn`. Implementation is now inlined into `actix_rt::spawn`. [#253]
* Rename `Arbiter::{send => spawn}` and `Arbiter::{exec_fn => spawn_fn}`. [#253]
* Remove `Arbiter::exec`. [#253]
* Remove deprecated `Arbiter::local_join` and `Arbiter::is_running`. [#253]
* `Arbiter::spawn` now accepts !Unpin futures. [#256]
* `System::new` no longer takes arguments. [#257]
* Remove `System::with_current`. [#257]
* Remove `Builder`. [#257]
* Add `System::with_init` as replacement for `Builder::run`. [#257]
* Rename `System::{is_set => is_registered}`. [#257]
* Add `ArbiterHandle` for sending messages to non-current-thread arbiters. [#257].
* `System::arbiter` now returns an `&ArbiterHandle`. [#257]
* `Arbiter::current` now returns an `ArbiterHandle` instead. [#257]
* `Arbiter::join` now takes self by value. [#257]
[#253]: https://github.com/actix/actix-net/pull/253
[#254]: https://github.com/actix/actix-net/pull/254
[#256]: https://github.com/actix/actix-net/pull/256
[#257]: https://github.com/actix/actix-net/pull/257
## 2.0.0-beta.2 - 2021-01-09
* Add `task` mod with re-export of `tokio::task::{spawn_blocking, yield_now, JoinHandle}` [#245]
* Add default "macros" feature to allow faster compile times when using `default-features=false`.
[#245]: https://github.com/actix/actix-net/pull/245
## 2.0.0-beta.1 - 2020-12-28
* Add `System::attach_to_tokio` method. [#173]
* Update `tokio` dependency to `1.0`. [#236]
* Rename `time` module `delay_for` to `sleep`, `delay_until` to `sleep_until`, `Delay` to `Sleep`
to stay aligned with Tokio's naming. [#236]
* Remove `'static` lifetime requirement for `Runtime::block_on` and `SystemRunner::block_on`.
* These methods now accept `&self` when calling. [#236]
* Remove `'static` lifetime requirement for `System::run` and `Builder::run`. [#236]
* `Arbiter::spawn` now panics when `System` is not in scope. [#207]
* Fix work load issue by removing `PENDING` thread local. [#207]
[#207]: https://github.com/actix/actix-net/pull/207
[#236]: https://github.com/actix/actix-net/pull/236
## 1.1.1 - 2020-04-30
* Fix memory leak due to [#94] (see [#129] for more detail)
[#129]: https://github.com/actix/actix-net/issues/129
## 1.1.0 - 2020-04-08 (YANKED)
* Expose `System::is_set` to check if current system has ben started [#99]
* Add `Arbiter::is_running` to check if event loop is running [#124]
* Add `Arbiter::local_join` associated function
to get be able to `await` for spawned futures [#94]
[#94]: https://github.com/actix/actix-net/pull/94
[#99]: https://github.com/actix/actix-net/pull/99
[#124]: https://github.com/actix/actix-net/pull/124
## 1.0.0 - 2019-12-11
* Update dependencies * Update dependencies
## [1.0.0-alpha.3] - 2019-12-07
### Fixed
## 1.0.0-alpha.3 - 2019-12-07
* Migrate to tokio 0.2
* Fix compilation on non-unix platforms * Fix compilation on non-unix platforms
### Changed
* Migrate to tokio 0.2
## [1.0.0-alpha.2] - 2019-12-02
Added
## 1.0.0-alpha.2 - 2019-12-02
* Export `main` and `test` attribute macros * Export `main` and `test` attribute macros
* Export `time` module (re-export of tokio-timer) * Export `time` module (re-export of tokio-timer)
* Export `net` module (re-export of tokio-net) * Export `net` module (re-export of tokio-net)
## [1.0.0-alpha.1] - 2019-11-22 ## 1.0.0-alpha.1 - 2019-11-22
### Changed
* Migrate to std::future and tokio 0.2 * Migrate to std::future and tokio 0.2
## [0.2.6] - 2019-11-14 ## 0.2.6 - 2019-11-14
* Allow to join arbiter's thread. #60
### Fixed
* Fix arbiter's thread panic message. * Fix arbiter's thread panic message.
### Added
* Allow to join arbiter's thread. #60
## [0.2.5] - 2019-09-02
### Added
## 0.2.5 - 2019-09-02
* Add arbiter specific storage * Add arbiter specific storage
## [0.2.4] - 2019-07-17 ## 0.2.4 - 2019-07-17
### Changed
* Avoid a copy of the Future when initializing the Box. #29 * Avoid a copy of the Future when initializing the Box. #29
## [0.2.3] - 2019-06-22 ## 0.2.3 - 2019-06-22
* Allow to start System using existing CurrentThread Handle #22
### Added
* Allow to start System using exsiting CurrentThread Handle #22
## [0.2.2] - 2019-03-28 ## 0.2.2 - 2019-03-28
### Changed
* Moved `blocking` module to `actix-threadpool` crate * Moved `blocking` module to `actix-threadpool` crate
## [0.2.1] - 2019-03-11 ## 0.2.1 - 2019-03-11
### Added
* Added `blocking` module * Added `blocking` module
* Added `Arbiter::exec_fn` - execute fn on the arbiter's thread
* Arbiter::exec_fn - execute fn on the arbiter's thread * Added `Arbiter::exec` - execute fn on the arbiter's thread and wait result
* Arbiter::exec - execute fn on the arbiter's thread and wait result
## [0.2.0] - 2019-03-06 ## 0.2.0 - 2019-03-06
* `run` method returns `io::Result<()>` * `run` method returns `io::Result<()>`
* Removed `Handle` * Removed `Handle`
## [0.1.0] - 2018-12-09 ## 0.1.0 - 2018-12-09
* Initial release * Initial release

View File

@@ -1,24 +1,33 @@
[package] [package]
name = "actix-rt" name = "actix-rt"
version = "1.0.0" version = "2.2.0"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"] authors = [
description = "Actix runtime" "Nikolay Kim <fafhrd91@gmail.com>",
keywords = ["network", "framework", "async", "futures"] "Rob Ede <robjtede@icloud.com>",
]
description = "Tokio-based single-threaded async runtime for the Actix ecosystem"
keywords = ["async", "futures", "io", "runtime"]
homepage = "https://actix.rs" homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net.git" repository = "https://github.com/actix/actix-net"
documentation = "https://docs.rs/actix-rt/" documentation = "https://docs.rs/actix-rt"
categories = ["network-programming", "asynchronous"] categories = ["network-programming", "asynchronous"]
license = "MIT/Apache-2.0" license = "MIT OR Apache-2.0"
edition = "2018" edition = "2018"
workspace = ".."
[lib] [lib]
name = "actix_rt" name = "actix_rt"
path = "src/lib.rs" path = "src/lib.rs"
[features]
default = ["macros"]
macros = ["actix-macros"]
[dependencies] [dependencies]
actix-macros = "0.1.0" actix-macros = { version = "0.2.0", optional = true }
actix-threadpool = "0.3"
futures = "0.3.1" futures-core = { version = "0.3", default-features = false }
copyless = "0.1.4" tokio = { version = "1.3", features = ["rt", "net", "parking_lot", "signal", "sync", "time"] }
tokio = { version = "0.2.4", default-features=false, features = ["rt-core", "rt-util", "io-driver", "tcp", "uds", "udp", "time", "signal", "stream"] }
[dev-dependencies]
tokio = { version = "1.2", features = ["full"] }
hyper = { version = "0.14", default-features = false, features = ["server", "tcp", "http1"] }

14
actix-rt/README.md Normal file
View File

@@ -0,0 +1,14 @@
# actix-rt
> Tokio-based single-threaded async runtime for the Actix ecosystem.
[![crates.io](https://img.shields.io/crates/v/actix-rt?label=latest)](https://crates.io/crates/actix-rt)
[![Documentation](https://docs.rs/actix-rt/badge.svg?version=2.2.0)](https://docs.rs/actix-rt/2.2.0)
[![Version](https://img.shields.io/badge/rustc-1.46+-ab6000.svg)](https://blog.rust-lang.org/2020/03/12/Rust-1.46.html)
![MIT or Apache 2.0 licensed](https://img.shields.io/crates/l/actix-rt.svg)
<br />
[![dependency status](https://deps.rs/crate/actix-rt/2.2.0/status.svg)](https://deps.rs/crate/actix-rt/2.2.0)
![Download](https://img.shields.io/crates/d/actix-rt.svg)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/WghFtEH6Hb)
See crate documentation for more: https://docs.rs/actix-rt.

View File

@@ -0,0 +1,28 @@
use hyper::service::{make_service_fn, service_fn};
use hyper::{Body, Request, Response, Server};
use std::convert::Infallible;
use std::net::SocketAddr;
async fn handle(_req: Request<Body>) -> Result<Response<Body>, Infallible> {
Ok(Response::new(Body::from("Hello World")))
}
fn main() {
actix_rt::System::with_tokio_rt(|| {
tokio::runtime::Builder::new_multi_thread()
.enable_all()
.build()
.unwrap()
})
.block_on(async {
let make_service =
make_service_fn(|_conn| async { Ok::<_, Infallible>(service_fn(handle)) });
let server =
Server::bind(&SocketAddr::from(([127, 0, 0, 1], 3000))).serve(make_service);
if let Err(e) = server.await {
eprintln!("server error: {}", e);
}
})
}

View File

@@ -0,0 +1,60 @@
//! An example on how to build a multi-thread tokio runtime for Actix System.
//! Then spawn async task that can make use of work stealing of tokio runtime.
use actix_rt::System;
fn main() {
System::with_tokio_rt(|| {
// build system with a multi-thread tokio runtime.
tokio::runtime::Builder::new_multi_thread()
.worker_threads(2)
.enable_all()
.build()
.unwrap()
})
.block_on(async_main());
}
// async main function that acts like #[actix_web::main] or #[tokio::main]
async fn async_main() {
let (tx, rx) = tokio::sync::oneshot::channel();
// get a handle to system arbiter and spawn async task on it
System::current().arbiter().spawn(async {
// use tokio::spawn to get inside the context of multi thread tokio runtime
let h1 = tokio::spawn(async {
println!("thread id is {:?}", std::thread::current().id());
std::thread::sleep(std::time::Duration::from_secs(2));
});
// work stealing occurs for this task spawn
let h2 = tokio::spawn(async {
println!("thread id is {:?}", std::thread::current().id());
});
h1.await.unwrap();
h2.await.unwrap();
let _ = tx.send(());
});
rx.await.unwrap();
let (tx, rx) = tokio::sync::oneshot::channel();
let now = std::time::Instant::now();
// without additional tokio::spawn, all spawned tasks run on single thread
System::current().arbiter().spawn(async {
println!("thread id is {:?}", std::thread::current().id());
std::thread::sleep(std::time::Duration::from_secs(2));
let _ = tx.send(());
});
// previous spawn task has blocked the system arbiter thread
// so this task will wait for 2 seconds until it can be run
System::current().arbiter().spawn(async move {
println!("thread id is {:?}", std::thread::current().id());
assert!(now.elapsed() > std::time::Duration::from_secs(2));
});
rx.await.unwrap();
}

View File

@@ -1,33 +1,30 @@
use std::any::{Any, TypeId}; use std::{
use std::cell::{Cell, RefCell}; cell::RefCell,
use std::collections::HashMap; fmt,
use std::pin::Pin; future::Future,
use std::sync::atomic::{AtomicUsize, Ordering}; pin::Pin,
use std::task::{Context, Poll}; sync::atomic::{AtomicUsize, Ordering},
use std::{fmt, thread}; task::{Context, Poll},
thread,
};
use futures::channel::mpsc::{unbounded, UnboundedReceiver, UnboundedSender}; use futures_core::ready;
use futures::channel::oneshot::{channel, Canceled, Sender}; use tokio::{sync::mpsc, task::LocalSet};
use futures::{future, Future, FutureExt, Stream};
use crate::runtime::Runtime; use crate::{
use crate::system::System; runtime::{default_tokio_runtime, Runtime},
system::{System, SystemCommand},
use copyless::BoxHelper; };
thread_local!(
static ADDR: RefCell<Option<Arbiter>> = RefCell::new(None);
static RUNNING: Cell<bool> = Cell::new(false);
static Q: RefCell<Vec<Pin<Box<dyn Future<Output = ()>>>>> = RefCell::new(Vec::new());
static STORAGE: RefCell<HashMap<TypeId, Box<dyn Any>>> = RefCell::new(HashMap::new());
);
pub(crate) static COUNT: AtomicUsize = AtomicUsize::new(0); pub(crate) static COUNT: AtomicUsize = AtomicUsize::new(0);
thread_local!(
static HANDLE: RefCell<Option<ArbiterHandle>> = RefCell::new(None);
);
pub(crate) enum ArbiterCommand { pub(crate) enum ArbiterCommand {
Stop, Stop,
Execute(Box<dyn Future<Output = ()> + Unpin + Send>), Execute(Pin<Box<dyn Future<Output = ()> + Send>>),
ExecuteFn(Box<dyn FnExec>),
} }
impl fmt::Debug for ArbiterCommand { impl fmt::Debug for ArbiterCommand {
@@ -35,377 +32,229 @@ impl fmt::Debug for ArbiterCommand {
match self { match self {
ArbiterCommand::Stop => write!(f, "ArbiterCommand::Stop"), ArbiterCommand::Stop => write!(f, "ArbiterCommand::Stop"),
ArbiterCommand::Execute(_) => write!(f, "ArbiterCommand::Execute"), ArbiterCommand::Execute(_) => write!(f, "ArbiterCommand::Execute"),
ArbiterCommand::ExecuteFn(_) => write!(f, "ArbiterCommand::ExecuteFn"),
} }
} }
} }
#[derive(Debug)] /// A handle for sending spawn and stop messages to an [Arbiter].
/// Arbiters provide an asynchronous execution environment for actors, functions #[derive(Debug, Clone)]
/// and futures. When an Arbiter is created, they spawn a new OS thread, and pub struct ArbiterHandle {
/// host an event loop. Some Arbiter functions execute on the current thread. tx: mpsc::UnboundedSender<ArbiterCommand>,
pub struct Arbiter {
sender: UnboundedSender<ArbiterCommand>,
thread_handle: Option<thread::JoinHandle<()>>,
} }
impl Clone for Arbiter { impl ArbiterHandle {
fn clone(&self) -> Self { pub(crate) fn new(tx: mpsc::UnboundedSender<ArbiterCommand>) -> Self {
Self::with_sender(self.sender.clone()) Self { tx }
}
}
impl Default for Arbiter {
fn default() -> Self {
Self::new()
}
}
impl Arbiter {
pub(crate) fn new_system() -> Self {
let (tx, rx) = unbounded();
let arb = Arbiter::with_sender(tx);
ADDR.with(|cell| *cell.borrow_mut() = Some(arb.clone()));
RUNNING.with(|cell| cell.set(false));
STORAGE.with(|cell| cell.borrow_mut().clear());
Arbiter::spawn(ArbiterController { stop: None, rx });
arb
} }
/// Returns the current thread's arbiter's address. If no Arbiter is present, then this /// Send a future to the [Arbiter]'s thread and spawn it.
/// function will panic! ///
pub fn current() -> Arbiter { /// If you require a result, include a response channel in the future.
ADDR.with(|cell| match *cell.borrow() { ///
Some(ref addr) => addr.clone(), /// Returns true if future was sent successfully and false if the [Arbiter] has died.
None => panic!("Arbiter is not running"), pub fn spawn<Fut>(&self, future: Fut) -> bool
})
}
/// Stop arbiter from continuing it's event loop.
pub fn stop(&self) {
let _ = self.sender.unbounded_send(ArbiterCommand::Stop);
}
/// Spawn new thread and run event loop in spawned thread.
/// Returns address of newly created arbiter.
pub fn new() -> Arbiter {
let id = COUNT.fetch_add(1, Ordering::Relaxed);
let name = format!("actix-rt:worker:{}", id);
let sys = System::current();
let (arb_tx, arb_rx) = unbounded();
let arb_tx2 = arb_tx.clone();
let handle = thread::Builder::new()
.name(name.clone())
.spawn(move || {
let mut rt = Runtime::new().expect("Can not create Runtime");
let arb = Arbiter::with_sender(arb_tx);
let (stop, stop_rx) = channel();
RUNNING.with(|cell| cell.set(true));
STORAGE.with(|cell| cell.borrow_mut().clear());
System::set_current(sys);
// start arbiter controller
rt.spawn(ArbiterController {
stop: Some(stop),
rx: arb_rx,
});
ADDR.with(|cell| *cell.borrow_mut() = Some(arb.clone()));
// register arbiter
let _ = System::current()
.sys()
.unbounded_send(SystemCommand::RegisterArbiter(id, arb));
// run loop
let _ = match rt.block_on(stop_rx) {
Ok(code) => code,
Err(_) => 1,
};
// unregister arbiter
let _ = System::current()
.sys()
.unbounded_send(SystemCommand::UnregisterArbiter(id));
})
.unwrap_or_else(|err| {
panic!("Cannot spawn an arbiter's thread {:?}: {:?}", &name, err)
});
Arbiter {
sender: arb_tx2,
thread_handle: Some(handle),
}
}
pub(crate) fn run_system(rt: Option<&Runtime>) {
RUNNING.with(|cell| cell.set(true));
Q.with(|cell| {
let mut v = cell.borrow_mut();
for fut in v.drain(..) {
if let Some(rt) = rt {
rt.spawn(fut);
} else {
tokio::task::spawn_local(fut);
}
}
});
}
pub(crate) fn stop_system() {
RUNNING.with(|cell| cell.set(false));
}
/// Spawn a future on the current thread. This does not create a new Arbiter
/// or Arbiter address, it is simply a helper for spawning futures on the current
/// thread.
pub fn spawn<F>(future: F)
where where
F: Future<Output = ()> + 'static, Fut: Future<Output = ()> + Send + 'static,
{ {
RUNNING.with(move |cell| { self.tx
if cell.get() { .send(ArbiterCommand::Execute(Box::pin(future)))
// Spawn the future on running executor .is_ok()
tokio::task::spawn_local(future);
} else {
// Box the future and push it to the queue, this results in double boxing
// because the executor boxes the future again, but works for now
Q.with(move |cell| {
cell.borrow_mut()
.push(unsafe { Pin::new_unchecked(Box::alloc().init(future)) })
});
}
});
} }
/// Executes a future on the current thread. This does not create a new Arbiter /// Send a function to the [Arbiter]'s thread and execute it.
/// or Arbiter address, it is simply a helper for executing futures on the current ///
/// thread. /// Any result from the function is discarded. If you require a result, include a response
pub fn spawn_fn<F, R>(f: F) /// channel in the function.
where ///
F: FnOnce() -> R + 'static, /// Returns true if function was sent successfully and false if the [Arbiter] has died.
R: Future<Output = ()> + 'static, pub fn spawn_fn<F>(&self, f: F) -> bool
{
Arbiter::spawn(future::lazy(|_| f()).flatten())
}
/// Send a future to the Arbiter's thread, and spawn it.
pub fn send<F>(&self, future: F)
where
F: Future<Output = ()> + Send + Unpin + 'static,
{
let _ = self
.sender
.unbounded_send(ArbiterCommand::Execute(Box::new(future)));
}
/// Send a function to the Arbiter's thread, and execute it. Any result from the function
/// is discarded.
pub fn exec_fn<F>(&self, f: F)
where where
F: FnOnce() + Send + 'static, F: FnOnce() + Send + 'static,
{ {
let _ = self self.spawn(async { f() })
.sender
.unbounded_send(ArbiterCommand::ExecuteFn(Box::new(move || {
f();
})));
} }
/// Send a function to the Arbiter's thread. This function will be executed asynchronously. /// Instruct [Arbiter] to stop processing it's event loop.
/// A future is created, and when resolved will contain the result of the function sent ///
/// to the Arbiters thread. /// Returns true if stop message was sent successfully and false if the [Arbiter] has
pub fn exec<F, R>(&self, f: F) -> impl Future<Output = Result<R, Canceled>> /// been dropped.
pub fn stop(&self) -> bool {
self.tx.send(ArbiterCommand::Stop).is_ok()
}
}
/// An Arbiter represents a thread that provides an asynchronous execution environment for futures
/// and functions.
///
/// When an arbiter is created, it spawns a new [OS thread](thread), and hosts an event loop.
#[derive(Debug)]
pub struct Arbiter {
tx: mpsc::UnboundedSender<ArbiterCommand>,
thread_handle: thread::JoinHandle<()>,
}
impl Arbiter {
/// Spawn a new Arbiter thread and start its event loop.
///
/// # Panics
/// Panics if a [System] is not registered on the current thread.
#[allow(clippy::new_without_default)]
pub fn new() -> Arbiter {
Self::with_tokio_rt(|| {
default_tokio_runtime().expect("Cannot create new Arbiter's Runtime.")
})
}
/// Spawn a new Arbiter using the [Tokio Runtime](tokio-runtime) returned from a closure.
///
/// [tokio-runtime]: tokio::runtime::Runtime
#[doc(hidden)]
pub fn with_tokio_rt<F>(runtime_factory: F) -> Arbiter
where where
F: FnOnce() -> R + Send + 'static, F: Fn() -> tokio::runtime::Runtime + Send + 'static,
R: Send + 'static,
{ {
let (tx, rx) = channel(); let sys = System::current();
let _ = self let system_id = sys.id();
.sender let arb_id = COUNT.fetch_add(1, Ordering::Relaxed);
.unbounded_send(ArbiterCommand::ExecuteFn(Box::new(move || {
if !tx.is_canceled() { let name = format!("actix-rt|system:{}|arbiter:{}", system_id, arb_id);
let _ = tx.send(f()); let (tx, rx) = mpsc::unbounded_channel();
let (ready_tx, ready_rx) = std::sync::mpsc::channel::<()>();
let thread_handle = thread::Builder::new()
.name(name.clone())
.spawn({
let tx = tx.clone();
move || {
let rt = Runtime::from(runtime_factory());
let hnd = ArbiterHandle::new(tx);
System::set_current(sys);
HANDLE.with(|cell| *cell.borrow_mut() = Some(hnd.clone()));
// register arbiter
let _ = System::current()
.tx()
.send(SystemCommand::RegisterArbiter(arb_id, hnd));
ready_tx.send(()).unwrap();
// run arbiter event processing loop
rt.block_on(ArbiterRunner { rx });
// deregister arbiter
let _ = System::current()
.tx()
.send(SystemCommand::DeregisterArbiter(arb_id));
} }
}))); })
rx .unwrap_or_else(|err| {
panic!("Cannot spawn Arbiter's thread: {:?}. {:?}", &name, err)
});
ready_rx.recv().unwrap();
Arbiter { tx, thread_handle }
} }
/// Set item to arbiter storage /// Sets up an Arbiter runner in a new System using the provided runtime local task set.
pub fn set_item<T: 'static>(item: T) { pub(crate) fn in_new_system(local: &LocalSet) -> ArbiterHandle {
STORAGE.with(move |cell| cell.borrow_mut().insert(TypeId::of::<T>(), Box::new(item))); let (tx, rx) = mpsc::unbounded_channel();
let hnd = ArbiterHandle::new(tx);
HANDLE.with(|cell| *cell.borrow_mut() = Some(hnd.clone()));
local.spawn_local(ArbiterRunner { rx });
hnd
} }
/// Check if arbiter storage contains item /// Return a handle to the this Arbiter's message sender.
pub fn contains_item<T: 'static>() -> bool { pub fn handle(&self) -> ArbiterHandle {
STORAGE.with(move |cell| cell.borrow().get(&TypeId::of::<T>()).is_some()) ArbiterHandle::new(self.tx.clone())
} }
/// Get a reference to a type previously inserted on this arbiter's storage. /// Return a handle to the current thread's Arbiter's message sender.
/// ///
/// Panics is item is not inserted /// # Panics
pub fn get_item<T: 'static, F, R>(mut f: F) -> R /// Panics if no Arbiter is running on the current thread.
where pub fn current() -> ArbiterHandle {
F: FnMut(&T) -> R, HANDLE.with(|cell| match *cell.borrow() {
{ Some(ref hnd) => hnd.clone(),
STORAGE.with(move |cell| { None => panic!("Arbiter is not running."),
let st = cell.borrow();
let item = st
.get(&TypeId::of::<T>())
.and_then(|boxed| (&**boxed as &(dyn Any + 'static)).downcast_ref())
.unwrap();
f(item)
}) })
} }
/// Get a mutable reference to a type previously inserted on this arbiter's storage. /// Stop Arbiter from continuing it's event loop.
/// ///
/// Panics is item is not inserted /// Returns true if stop message was sent successfully and false if the Arbiter has been dropped.
pub fn get_mut_item<T: 'static, F, R>(mut f: F) -> R pub fn stop(&self) -> bool {
self.tx.send(ArbiterCommand::Stop).is_ok()
}
/// Send a future to the Arbiter's thread and spawn it.
///
/// If you require a result, include a response channel in the future.
///
/// Returns true if future was sent successfully and false if the Arbiter has died.
pub fn spawn<Fut>(&self, future: Fut) -> bool
where where
F: FnMut(&mut T) -> R, Fut: Future<Output = ()> + Send + 'static,
{ {
STORAGE.with(move |cell| { self.tx
let mut st = cell.borrow_mut(); .send(ArbiterCommand::Execute(Box::pin(future)))
let item = st .is_ok()
.get_mut(&TypeId::of::<T>())
.and_then(|boxed| (&mut **boxed as &mut (dyn Any + 'static)).downcast_mut())
.unwrap();
f(item)
})
} }
fn with_sender(sender: UnboundedSender<ArbiterCommand>) -> Self { /// Send a function to the Arbiter's thread and execute it.
Self { ///
sender, /// Any result from the function is discarded. If you require a result, include a response
thread_handle: None, /// channel in the function.
} ///
/// Returns true if function was sent successfully and false if the Arbiter has died.
pub fn spawn_fn<F>(&self, f: F) -> bool
where
F: FnOnce() + Send + 'static,
{
self.spawn(async { f() })
} }
/// Wait for the event loop to stop by joining the underlying thread (if have Some). /// Wait for Arbiter's event loop to complete.
pub fn join(&mut self) -> thread::Result<()> { ///
if let Some(thread_handle) = self.thread_handle.take() { /// Joins the underlying OS thread handle. See [`JoinHandle::join`](thread::JoinHandle::join).
thread_handle.join() pub fn join(self) -> thread::Result<()> {
} else { self.thread_handle.join()
Ok(())
}
} }
} }
struct ArbiterController { /// A persistent future that processes [Arbiter] commands.
stop: Option<Sender<i32>>, struct ArbiterRunner {
rx: UnboundedReceiver<ArbiterCommand>, rx: mpsc::UnboundedReceiver<ArbiterCommand>,
} }
impl Drop for ArbiterController { impl Future for ArbiterRunner {
fn drop(&mut self) {
if thread::panicking() {
if System::current().stop_on_panic() {
eprintln!("Panic in Arbiter thread, shutting down system.");
System::current().stop_with_code(1)
} else {
eprintln!("Panic in Arbiter thread.");
}
}
}
}
impl Future for ArbiterController {
type Output = (); type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
// process all items currently buffered in channel
loop { loop {
match Pin::new(&mut self.rx).poll_next(cx) { match ready!(Pin::new(&mut self.rx).poll_recv(cx)) {
Poll::Ready(None) => return Poll::Ready(()), // channel closed; no more messages can be received
Poll::Ready(Some(item)) => match item { None => return Poll::Ready(()),
// process arbiter command
Some(item) => match item {
ArbiterCommand::Stop => { ArbiterCommand::Stop => {
if let Some(stop) = self.stop.take() {
let _ = stop.send(0);
};
return Poll::Ready(()); return Poll::Ready(());
} }
ArbiterCommand::Execute(fut) => { ArbiterCommand::Execute(task_fut) => {
tokio::task::spawn_local(fut); tokio::task::spawn_local(task_fut);
}
ArbiterCommand::ExecuteFn(f) => {
f.call_box();
} }
}, },
Poll::Pending => return Poll::Pending,
} }
} }
} }
} }
#[derive(Debug)]
pub(crate) enum SystemCommand {
Exit(i32),
RegisterArbiter(usize, Arbiter),
UnregisterArbiter(usize),
}
#[derive(Debug)]
pub(crate) struct SystemArbiter {
stop: Option<Sender<i32>>,
commands: UnboundedReceiver<SystemCommand>,
arbiters: HashMap<usize, Arbiter>,
}
impl SystemArbiter {
pub(crate) fn new(stop: Sender<i32>, commands: UnboundedReceiver<SystemCommand>) -> Self {
SystemArbiter {
commands,
stop: Some(stop),
arbiters: HashMap::new(),
}
}
}
impl Future for SystemArbiter {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
loop {
match Pin::new(&mut self.commands).poll_next(cx) {
Poll::Ready(None) => return Poll::Ready(()),
Poll::Ready(Some(cmd)) => match cmd {
SystemCommand::Exit(code) => {
// stop arbiters
for arb in self.arbiters.values() {
arb.stop();
}
// stop event loop
if let Some(stop) = self.stop.take() {
let _ = stop.send(code);
}
}
SystemCommand::RegisterArbiter(name, hnd) => {
self.arbiters.insert(name, hnd);
}
SystemCommand::UnregisterArbiter(name) => {
self.arbiters.remove(&name);
}
},
Poll::Pending => return Poll::Pending,
}
}
}
}
pub trait FnExec: Send + 'static {
fn call_box(self: Box<Self>);
}
impl<F> FnExec for F
where
F: FnOnce() + Send + 'static,
{
#[allow(clippy::boxed_local)]
fn call_box(self: Box<Self>) {
(*self)()
}
}

View File

@@ -1,191 +0,0 @@
use std::borrow::Cow;
use std::io;
use futures::channel::mpsc::unbounded;
use futures::channel::oneshot::{channel, Receiver};
use futures::future::{lazy, Future, FutureExt};
use tokio::task::LocalSet;
use crate::arbiter::{Arbiter, SystemArbiter};
use crate::runtime::Runtime;
use crate::system::System;
/// Builder struct for a actix runtime.
///
/// Either use `Builder::build` to create a system and start actors.
/// Alternatively, use `Builder::run` to start the tokio runtime and
/// run a function in its context.
pub struct Builder {
/// Name of the System. Defaults to "actix" if unset.
name: Cow<'static, str>,
/// Whether the Arbiter will stop the whole System on uncaught panic. Defaults to false.
stop_on_panic: bool,
}
impl Builder {
pub(crate) fn new() -> Self {
Builder {
name: Cow::Borrowed("actix"),
stop_on_panic: false,
}
}
/// Sets the name of the System.
pub fn name<T: Into<String>>(mut self, name: T) -> Self {
self.name = Cow::Owned(name.into());
self
}
/// Sets the option 'stop_on_panic' which controls whether the System is stopped when an
/// uncaught panic is thrown from a worker thread.
///
/// Defaults to false.
pub fn stop_on_panic(mut self, stop_on_panic: bool) -> Self {
self.stop_on_panic = stop_on_panic;
self
}
/// Create new System.
///
/// This method panics if it can not create tokio runtime
pub fn build(self) -> SystemRunner {
self.create_runtime(|| {})
}
/// Create new System that can run asynchronously.
///
/// This method panics if it cannot start the system arbiter
pub(crate) fn build_async(self, local: &LocalSet) -> AsyncSystemRunner {
self.create_async_runtime(local)
}
/// This function will start tokio runtime and will finish once the
/// `System::stop()` message get called.
/// Function `f` get called within tokio runtime context.
pub fn run<F>(self, f: F) -> io::Result<()>
where
F: FnOnce() + 'static,
{
self.create_runtime(f).run()
}
fn create_async_runtime(self, local: &LocalSet) -> AsyncSystemRunner {
let (stop_tx, stop) = channel();
let (sys_sender, sys_receiver) = unbounded();
let system = System::construct(sys_sender, Arbiter::new_system(), self.stop_on_panic);
// system arbiter
let arb = SystemArbiter::new(stop_tx, sys_receiver);
// start the system arbiter
let _ = local.spawn_local(arb);
AsyncSystemRunner { stop, system }
}
fn create_runtime<F>(self, f: F) -> SystemRunner
where
F: FnOnce() + 'static,
{
let (stop_tx, stop) = channel();
let (sys_sender, sys_receiver) = unbounded();
let system = System::construct(sys_sender, Arbiter::new_system(), self.stop_on_panic);
// system arbiter
let arb = SystemArbiter::new(stop_tx, sys_receiver);
let mut rt = Runtime::new().unwrap();
rt.spawn(arb);
// init system arbiter and run configuration method
rt.block_on(lazy(move |_| f()));
SystemRunner { rt, stop, system }
}
}
#[derive(Debug)]
pub(crate) struct AsyncSystemRunner {
stop: Receiver<i32>,
system: System,
}
impl AsyncSystemRunner {
/// This function will start event loop and returns a future that
/// resolves once the `System::stop()` function is called.
pub(crate) fn run_nonblocking(self) -> impl Future<Output = Result<(), io::Error>> + Send {
let AsyncSystemRunner { stop, .. } = self;
// run loop
lazy(|_| {
Arbiter::run_system(None);
async {
let res = match stop.await {
Ok(code) => {
if code != 0 {
Err(io::Error::new(
io::ErrorKind::Other,
format!("Non-zero exit code: {}", code),
))
} else {
Ok(())
}
}
Err(e) => Err(io::Error::new(io::ErrorKind::Other, e)),
};
Arbiter::stop_system();
return res;
}
})
.flatten()
}
}
/// Helper object that runs System's event loop
#[must_use = "SystemRunner must be run"]
#[derive(Debug)]
pub struct SystemRunner {
rt: Runtime,
stop: Receiver<i32>,
system: System,
}
impl SystemRunner {
/// This function will start event loop and will finish once the
/// `System::stop()` function is called.
pub fn run(self) -> io::Result<()> {
let SystemRunner { mut rt, stop, .. } = self;
// run loop
Arbiter::run_system(Some(&rt));
let result = match rt.block_on(stop) {
Ok(code) => {
if code != 0 {
Err(io::Error::new(
io::ErrorKind::Other,
format!("Non-zero exit code: {}", code),
))
} else {
Ok(())
}
}
Err(e) => Err(io::Error::new(io::ErrorKind::Other, e)),
};
Arbiter::stop_system();
result
}
/// Execute a future and wait for result.
pub fn block_on<F, O>(&mut self, fut: F) -> O
where
F: Future<Output = O> + 'static,
{
Arbiter::run_system(Some(&self.rt));
let res = self.rt.block_on(fut);
Arbiter::stop_system();
res
}
}

View File

@@ -1,66 +1,168 @@
//! A runtime implementation that runs everything on the current thread. //! Tokio-based single-threaded async runtime for the Actix ecosystem.
#![deny(rust_2018_idioms, warnings)] //!
#![allow(clippy::type_complexity)] //! In most parts of the the Actix ecosystem, it has been chosen to use !Send futures. For this
//! reason, a single-threaded runtime is appropriate since it is guaranteed that futures will not
//! be moved between threads. This can result in small performance improvements over cases where
//! atomics would otherwise be needed.
//!
//! To achieve similar performance to multi-threaded, work-stealing runtimes, applications
//! using `actix-rt` will create multiple, mostly disconnected, single-threaded runtimes.
//! This approach has good performance characteristics for workloads where the majority of tasks
//! have similar runtime expense.
//!
//! The disadvantage is that idle threads will not steal work from very busy, stuck or otherwise
//! backlogged threads. Tasks that are disproportionately expensive should be offloaded to the
//! blocking task thread-pool using [`task::spawn_blocking`].
//!
//! # Examples
//! ```
//! use std::sync::mpsc;
//! use actix_rt::{Arbiter, System};
//!
//! let _ = System::new();
//!
//! let (tx, rx) = mpsc::channel::<u32>();
//!
//! let arbiter = Arbiter::new();
//! arbiter.spawn_fn(move || tx.send(42).unwrap());
//!
//! let num = rx.recv().unwrap();
//! assert_eq!(num, 42);
//!
//! arbiter.stop();
//! arbiter.join().unwrap();
//! ```
#[cfg(not(test))] // Work around for rust-lang/rust#62127 #![deny(rust_2018_idioms, nonstandard_style)]
#![allow(clippy::type_complexity)]
#![warn(missing_docs)]
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
use std::future::Future;
use tokio::task::JoinHandle;
// Cannot define a main macro when compiled into test harness.
// Workaround for https://github.com/rust-lang/rust/issues/62127.
#[cfg(all(feature = "macros", not(test)))]
pub use actix_macros::{main, test}; pub use actix_macros::{main, test};
mod arbiter; mod arbiter;
mod builder;
mod runtime; mod runtime;
mod system; mod system;
pub use self::arbiter::Arbiter; pub use self::arbiter::{Arbiter, ArbiterHandle};
pub use self::builder::{Builder, SystemRunner};
pub use self::runtime::Runtime; pub use self::runtime::Runtime;
pub use self::system::System; pub use self::system::{System, SystemRunner};
#[doc(hidden)] pub use tokio::pin;
pub use actix_threadpool as blocking;
/// Spawns a future on the current arbiter.
///
/// # Panics
///
/// This function panics if actix system is not running.
pub fn spawn<F>(f: F)
where
F: futures::Future<Output = ()> + 'static,
{
if !System::is_set() {
panic!("System is not running");
}
Arbiter::spawn(f);
}
/// Asynchronous signal handling
pub mod signal { pub mod signal {
//! Asynchronous signal handling (Tokio re-exports).
#[cfg(unix)] #[cfg(unix)]
pub mod unix { pub mod unix {
//! Unix specific signals (Tokio re-exports).
pub use tokio::signal::unix::*; pub use tokio::signal::unix::*;
} }
pub use tokio::signal::ctrl_c; pub use tokio::signal::ctrl_c;
} }
/// TCP/UDP/Unix bindings
pub mod net { pub mod net {
//! TCP/UDP/Unix bindings (mostly Tokio re-exports).
use std::{
future::Future,
io,
task::{Context, Poll},
};
pub use tokio::io::Ready;
use tokio::io::{AsyncRead, AsyncWrite, Interest};
pub use tokio::net::UdpSocket; pub use tokio::net::UdpSocket;
pub use tokio::net::{TcpListener, TcpStream}; pub use tokio::net::{TcpListener, TcpSocket, TcpStream};
#[cfg(unix)] #[cfg(unix)]
mod unix { pub use tokio::net::{UnixDatagram, UnixListener, UnixStream};
pub use tokio::net::{UnixDatagram, UnixListener, UnixStream};
/// Extension trait over async read+write types that can also signal readiness.
#[doc(hidden)]
pub trait ActixStream: AsyncRead + AsyncWrite + Unpin {
/// Poll stream and check read readiness of Self.
///
/// See [tokio::net::TcpStream::poll_read_ready] for detail on intended use.
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>>;
/// Poll stream and check write readiness of Self.
///
/// See [tokio::net::TcpStream::poll_write_ready] for detail on intended use.
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>>;
}
impl ActixStream for TcpStream {
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
let ready = self.ready(Interest::READABLE);
tokio::pin!(ready);
ready.poll(cx)
}
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
let ready = self.ready(Interest::WRITABLE);
tokio::pin!(ready);
ready.poll(cx)
}
} }
#[cfg(unix)] #[cfg(unix)]
pub use self::unix::*; impl ActixStream for UnixStream {
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
let ready = self.ready(Interest::READABLE);
tokio::pin!(ready);
ready.poll(cx)
}
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
let ready = self.ready(Interest::WRITABLE);
tokio::pin!(ready);
ready.poll(cx)
}
}
impl<Io: ActixStream + ?Sized> ActixStream for Box<Io> {
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
(**self).poll_read_ready(cx)
}
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
(**self).poll_write_ready(cx)
}
}
} }
/// Utilities for tracking time.
pub mod time { pub mod time {
//! Utilities for tracking time (Tokio re-exports).
pub use tokio::time::Instant; pub use tokio::time::Instant;
pub use tokio::time::{delay_for, delay_until, Delay};
pub use tokio::time::{interval, interval_at, Interval}; pub use tokio::time::{interval, interval_at, Interval};
pub use tokio::time::{sleep, sleep_until, Sleep};
pub use tokio::time::{timeout, Timeout}; pub use tokio::time::{timeout, Timeout};
} }
pub mod task {
//! Task management (Tokio re-exports).
pub use tokio::task::{spawn_blocking, yield_now, JoinError, JoinHandle};
}
/// Spawns a future on the current thread.
///
/// # Panics
/// Panics if Actix system is not running.
#[inline]
pub fn spawn<Fut>(f: Fut) -> JoinHandle<()>
where
Fut: Future<Output = ()> + 'static,
{
tokio::task::spawn_local(f)
}

View File

@@ -1,28 +1,29 @@
use std::future::Future; use std::{future::Future, io};
use std::io;
use tokio::{runtime, task::LocalSet};
/// Single-threaded runtime provides a way to start reactor use tokio::task::{JoinHandle, LocalSet};
/// and runtime on the current thread.
/// A Tokio-based runtime proxy.
/// ///
/// See [module level][mod] documentation for more details. /// All spawned futures will be executed on the current thread. Therefore, there is no `Send` bound
/// /// on submitted futures.
/// [mod]: index.html
#[derive(Debug)] #[derive(Debug)]
pub struct Runtime { pub struct Runtime {
local: LocalSet, local: LocalSet,
rt: runtime::Runtime, rt: tokio::runtime::Runtime,
}
pub(crate) fn default_tokio_runtime() -> io::Result<tokio::runtime::Runtime> {
tokio::runtime::Builder::new_current_thread()
.enable_io()
.enable_time()
.build()
} }
impl Runtime { impl Runtime {
#[allow(clippy::new_ret_no_self)]
/// Returns a new runtime initialized with default configuration values. /// Returns a new runtime initialized with default configuration values.
pub fn new() -> io::Result<Runtime> { #[allow(clippy::new_ret_no_self)]
let rt = runtime::Builder::new() pub fn new() -> io::Result<Self> {
.enable_io() let rt = default_tokio_runtime()?;
.enable_time()
.basic_scheduler()
.build()?;
Ok(Runtime { Ok(Runtime {
rt, rt,
@@ -30,63 +31,66 @@ impl Runtime {
}) })
} }
/// Spawn a future onto the single-threaded runtime. /// Reference to local task set.
pub(crate) fn local_set(&self) -> &LocalSet {
&self.local
}
/// Offload a future onto the single-threaded runtime.
/// ///
/// See [module level][mod] documentation for more details. /// The returned join handle can be used to await the future's result.
/// ///
/// [mod]: index.html /// See [crate root][crate] documentation for more details.
/// ///
/// # Examples /// # Examples
/// /// ```
/// ```rust,ignore /// let rt = actix_rt::Runtime::new().unwrap();
/// # use futures::{future, Future, Stream};
/// use actix_rt::Runtime;
///
/// # fn dox() {
/// // Create the runtime
/// let mut rt = Runtime::new().unwrap();
/// ///
/// // Spawn a future onto the runtime /// // Spawn a future onto the runtime
/// rt.spawn(future::lazy(|_| { /// let handle = rt.spawn(async {
/// println!("running on the runtime"); /// println!("running on the runtime");
/// })); /// 42
/// # } /// });
/// # pub fn main() {} ///
/// assert_eq!(rt.block_on(handle).unwrap(), 42);
/// ``` /// ```
/// ///
/// # Panics /// # Panics
/// /// This function panics if the spawn fails. Failure occurs if the executor is currently at
/// This function panics if the spawn fails. Failure occurs if the executor /// capacity and is unable to spawn a new future.
/// is currently at capacity and is unable to spawn a new future. pub fn spawn<F>(&self, future: F) -> JoinHandle<F::Output>
pub fn spawn<F>(&self, future: F) -> &Self
where
F: Future<Output = ()> + 'static,
{
self.local.spawn_local(future);
self
}
/// Runs the provided future, blocking the current thread until the future
/// completes.
///
/// This function can be used to synchronously block the current thread
/// until the provided `future` has resolved either successfully or with an
/// error. The result of the future is then returned from this function
/// call.
///
/// Note that this function will **also** execute any spawned futures on the
/// current thread, but will **not** block until these other spawned futures
/// have completed. Once the function returns, any uncompleted futures
/// remain pending in the `Runtime` instance. These futures will not run
/// until `block_on` or `run` is called again.
///
/// The caller is responsible for ensuring that other spawned futures
/// complete execution by calling `block_on` or `run`.
pub fn block_on<F>(&mut self, f: F) -> F::Output
where where
F: Future + 'static, F: Future + 'static,
{ {
let res = self.local.block_on(&mut self.rt, f); self.local.spawn_local(future)
res }
/// Runs the provided future, blocking the current thread until the future completes.
///
/// This function can be used to synchronously block the current thread until the provided
/// `future` has resolved either successfully or with an error. The result of the future is
/// then returned from this function call.
///
/// Note that this function will also execute any spawned futures on the current thread, but
/// will not block until these other spawned futures have completed. Once the function returns,
/// any uncompleted futures remain pending in the `Runtime` instance. These futures will not run
/// until `block_on` or `run` is called again.
///
/// The caller is responsible for ensuring that other spawned futures complete execution by
/// calling `block_on` or `run`.
pub fn block_on<F>(&self, f: F) -> F::Output
where
F: Future,
{
self.local.block_on(&self.rt, f)
}
}
impl From<tokio::runtime::Runtime> for Runtime {
fn from(rt: tokio::runtime::Runtime) -> Self {
Self {
local: LocalSet::new(),
rt,
}
} }
} }

View File

@@ -1,77 +1,98 @@
use std::cell::RefCell; use std::{
use std::future::Future; cell::RefCell,
use std::io; collections::HashMap,
use std::sync::atomic::{AtomicUsize, Ordering}; future::Future,
io,
pin::Pin,
sync::atomic::{AtomicUsize, Ordering},
task::{Context, Poll},
};
use futures::channel::mpsc::UnboundedSender; use futures_core::ready;
use tokio::task::LocalSet; use tokio::sync::{mpsc, oneshot};
use crate::arbiter::{Arbiter, SystemCommand}; use crate::{arbiter::ArbiterHandle, runtime::default_tokio_runtime, Arbiter, Runtime};
use crate::builder::{Builder, SystemRunner};
static SYSTEM_COUNT: AtomicUsize = AtomicUsize::new(0); static SYSTEM_COUNT: AtomicUsize = AtomicUsize::new(0);
/// System is a runtime manager.
#[derive(Clone, Debug)]
pub struct System {
id: usize,
sys: UnboundedSender<SystemCommand>,
arbiter: Arbiter,
stop_on_panic: bool,
}
thread_local!( thread_local!(
static CURRENT: RefCell<Option<System>> = RefCell::new(None); static CURRENT: RefCell<Option<System>> = RefCell::new(None);
); );
/// A manager for a per-thread distributed async runtime.
#[derive(Clone, Debug)]
pub struct System {
id: usize,
sys_tx: mpsc::UnboundedSender<SystemCommand>,
/// Handle to the first [Arbiter] that is created with the System.
arbiter_handle: ArbiterHandle,
}
impl System { impl System {
/// Constructs new system and sets it as current /// Create a new system.
///
/// # Panics
/// Panics if underlying Tokio runtime can not be created.
#[allow(clippy::new_ret_no_self)]
pub fn new() -> SystemRunner {
Self::with_tokio_rt(|| {
default_tokio_runtime()
.expect("Default Actix (Tokio) runtime could not be created.")
})
}
/// Create a new System using the [Tokio Runtime](tokio-runtime) returned from a closure.
///
/// [tokio-runtime]: tokio::runtime::Runtime
#[doc(hidden)]
pub fn with_tokio_rt<F>(runtime_factory: F) -> SystemRunner
where
F: Fn() -> tokio::runtime::Runtime,
{
let (stop_tx, stop_rx) = oneshot::channel();
let (sys_tx, sys_rx) = mpsc::unbounded_channel();
let rt = Runtime::from(runtime_factory());
let sys_arbiter = Arbiter::in_new_system(rt.local_set());
let system = System::construct(sys_tx, sys_arbiter.clone());
system
.tx()
.send(SystemCommand::RegisterArbiter(usize::MAX, sys_arbiter))
.unwrap();
// init background system arbiter
let sys_ctrl = SystemController::new(sys_rx, stop_tx);
rt.spawn(sys_ctrl);
SystemRunner {
rt,
stop_rx,
system,
}
}
/// Constructs new system and registers it on the current thread.
pub(crate) fn construct( pub(crate) fn construct(
sys: UnboundedSender<SystemCommand>, sys_tx: mpsc::UnboundedSender<SystemCommand>,
arbiter: Arbiter, arbiter_handle: ArbiterHandle,
stop_on_panic: bool,
) -> Self { ) -> Self {
let sys = System { let sys = System {
sys, sys_tx,
arbiter, arbiter_handle,
stop_on_panic,
id: SYSTEM_COUNT.fetch_add(1, Ordering::SeqCst), id: SYSTEM_COUNT.fetch_add(1, Ordering::SeqCst),
}; };
System::set_current(sys.clone()); System::set_current(sys.clone());
sys sys
} }
/// Build a new system with a customized tokio runtime.
///
/// This allows to customize the runtime. See struct level docs on
/// `Builder` for more information.
pub fn builder() -> Builder {
Builder::new()
}
#[allow(clippy::new_ret_no_self)]
/// Create new system.
///
/// This method panics if it can not create tokio runtime
pub fn new<T: Into<String>>(name: T) -> SystemRunner {
Self::builder().name(name).build()
}
#[allow(clippy::new_ret_no_self)]
/// Create new system using provided tokio Handle.
///
/// This method panics if it can not spawn system arbiter
pub fn run_in_tokio<T: Into<String>>(
name: T,
local: &LocalSet,
) -> impl Future<Output = io::Result<()>> {
Self::builder()
.name(name)
.build_async(local)
.run_nonblocking()
}
/// Get current running system. /// Get current running system.
///
/// # Panics
/// Panics if no system is registered on the current thread.
pub fn current() -> System { pub fn current() -> System {
CURRENT.with(|cell| match *cell.borrow() { CURRENT.with(|cell| match *cell.borrow() {
Some(ref sys) => sys.clone(), Some(ref sys) => sys.clone(),
@@ -79,67 +100,156 @@ impl System {
}) })
} }
/// Set current running system. /// Try to get current running system.
pub(crate) fn is_set() -> bool { ///
CURRENT.with(|cell| cell.borrow().is_some()) /// Returns `None` if no System has been started.
///
/// Contrary to `current`, this never panics.
pub fn try_current() -> Option<System> {
CURRENT.with(|cell| cell.borrow().clone())
} }
/// Set current running system. /// Get handle to a the System's initial [Arbiter].
pub fn arbiter(&self) -> &ArbiterHandle {
&self.arbiter_handle
}
/// Check if there is a System registered on the current thread.
pub fn is_registered() -> bool {
CURRENT.with(|sys| sys.borrow().is_some())
}
/// Register given system on current thread.
#[doc(hidden)] #[doc(hidden)]
pub fn set_current(sys: System) { pub fn set_current(sys: System) {
CURRENT.with(|s| { CURRENT.with(|cell| {
*s.borrow_mut() = Some(sys); *cell.borrow_mut() = Some(sys);
}) })
} }
/// Execute function with system reference. /// Numeric system identifier.
pub fn with_current<F, R>(f: F) -> R ///
where /// Useful when using multiple Systems.
F: FnOnce(&System) -> R,
{
CURRENT.with(|cell| match *cell.borrow() {
Some(ref sys) => f(sys),
None => panic!("System is not running"),
})
}
/// System id
pub fn id(&self) -> usize { pub fn id(&self) -> usize {
self.id self.id
} }
/// Stop the system /// Stop the system (with code 0).
pub fn stop(&self) { pub fn stop(&self) {
self.stop_with_code(0) self.stop_with_code(0)
} }
/// Stop the system with a particular exit code. /// Stop the system with a given exit code.
pub fn stop_with_code(&self, code: i32) { pub fn stop_with_code(&self, code: i32) {
let _ = self.sys.unbounded_send(SystemCommand::Exit(code)); let _ = self.sys_tx.send(SystemCommand::Exit(code));
} }
pub(crate) fn sys(&self) -> &UnboundedSender<SystemCommand> { pub(crate) fn tx(&self) -> &mpsc::UnboundedSender<SystemCommand> {
&self.sys &self.sys_tx
} }
}
/// Return status of 'stop_on_panic' option which controls whether the System is stopped when an
/// uncaught panic is thrown from a worker thread. /// Runner that keeps a [System]'s event loop alive until stop message is received.
pub fn stop_on_panic(&self) -> bool { #[must_use = "A SystemRunner does nothing unless `run` is called."]
self.stop_on_panic #[derive(Debug)]
} pub struct SystemRunner {
rt: Runtime,
/// System arbiter stop_rx: oneshot::Receiver<i32>,
pub fn arbiter(&self) -> &Arbiter { system: System,
&self.arbiter }
}
impl SystemRunner {
/// This function will start tokio runtime and will finish once the /// Starts event loop and will return once [System] is [stopped](System::stop).
/// `System::stop()` message get called. pub fn run(self) -> io::Result<()> {
/// Function `f` get called within tokio runtime context. let SystemRunner { rt, stop_rx, .. } = self;
pub fn run<F>(f: F) -> io::Result<()>
where // run loop
F: FnOnce() + 'static, match rt.block_on(stop_rx) {
{ Ok(code) => {
Self::builder().run(f) if code != 0 {
Err(io::Error::new(
io::ErrorKind::Other,
format!("Non-zero exit code: {}", code),
))
} else {
Ok(())
}
}
Err(e) => Err(io::Error::new(io::ErrorKind::Other, e)),
}
}
/// Runs the provided future, blocking the current thread until the future completes.
#[inline]
pub fn block_on<F: Future>(&self, fut: F) -> F::Output {
self.rt.block_on(fut)
}
}
#[derive(Debug)]
pub(crate) enum SystemCommand {
Exit(i32),
RegisterArbiter(usize, ArbiterHandle),
DeregisterArbiter(usize),
}
/// There is one `SystemController` per [System]. It runs in the background, keeping track of
/// [Arbiter]s and is able to distribute a system-wide stop command.
#[derive(Debug)]
pub(crate) struct SystemController {
stop_tx: Option<oneshot::Sender<i32>>,
cmd_rx: mpsc::UnboundedReceiver<SystemCommand>,
arbiters: HashMap<usize, ArbiterHandle>,
}
impl SystemController {
pub(crate) fn new(
cmd_rx: mpsc::UnboundedReceiver<SystemCommand>,
stop_tx: oneshot::Sender<i32>,
) -> Self {
SystemController {
cmd_rx,
stop_tx: Some(stop_tx),
arbiters: HashMap::with_capacity(4),
}
}
}
impl Future for SystemController {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
// process all items currently buffered in channel
loop {
match ready!(Pin::new(&mut self.cmd_rx).poll_recv(cx)) {
// channel closed; no more messages can be received
None => return Poll::Ready(()),
// process system command
Some(cmd) => match cmd {
SystemCommand::Exit(code) => {
// stop all arbiters
for arb in self.arbiters.values() {
arb.stop();
}
// stop event loop
// will only fire once
if let Some(stop_tx) = self.stop_tx.take() {
let _ = stop_tx.send(code);
}
}
SystemCommand::RegisterArbiter(id, arb) => {
self.arbiters.insert(id, arb);
}
SystemCommand::DeregisterArbiter(id) => {
self.arbiters.remove(&id);
}
},
}
}
} }
} }

300
actix-rt/tests/tests.rs Normal file
View File

@@ -0,0 +1,300 @@
use std::{
sync::{
atomic::{AtomicBool, Ordering},
mpsc::channel,
Arc,
},
thread,
time::{Duration, Instant},
};
use actix_rt::{Arbiter, System};
use tokio::sync::oneshot;
#[test]
fn await_for_timer() {
let time = Duration::from_secs(1);
let instant = Instant::now();
System::new().block_on(async move {
tokio::time::sleep(time).await;
});
assert!(
instant.elapsed() >= time,
"Block on should poll awaited future to completion"
);
}
#[test]
fn join_another_arbiter() {
let time = Duration::from_secs(1);
let instant = Instant::now();
System::new().block_on(async move {
let arbiter = Arbiter::new();
arbiter.spawn(Box::pin(async move {
tokio::time::sleep(time).await;
Arbiter::current().stop();
}));
arbiter.join().unwrap();
});
assert!(
instant.elapsed() >= time,
"Join on another arbiter should complete only when it calls stop"
);
let instant = Instant::now();
System::new().block_on(async move {
let arbiter = Arbiter::new();
arbiter.spawn_fn(move || {
actix_rt::spawn(async move {
tokio::time::sleep(time).await;
Arbiter::current().stop();
});
});
arbiter.join().unwrap();
});
assert!(
instant.elapsed() >= time,
"Join on an arbiter that has used actix_rt::spawn should wait for said future"
);
let instant = Instant::now();
System::new().block_on(async move {
let arbiter = Arbiter::new();
arbiter.spawn(Box::pin(async move {
tokio::time::sleep(time).await;
Arbiter::current().stop();
}));
arbiter.stop();
arbiter.join().unwrap();
});
assert!(
instant.elapsed() < time,
"Premature stop of arbiter should conclude regardless of it's current state"
);
}
#[test]
fn non_static_block_on() {
let string = String::from("test_str");
let string = string.as_str();
let sys = System::new();
sys.block_on(async {
actix_rt::time::sleep(Duration::from_millis(1)).await;
assert_eq!("test_str", string);
});
let rt = actix_rt::Runtime::new().unwrap();
rt.block_on(async {
actix_rt::time::sleep(Duration::from_millis(1)).await;
assert_eq!("test_str", string);
});
}
#[test]
fn wait_for_spawns() {
let rt = actix_rt::Runtime::new().unwrap();
let handle = rt.spawn(async {
println!("running on the runtime");
// assertion panic is caught at task boundary
assert_eq!(1, 2);
});
assert!(rt.block_on(handle).is_err());
}
#[test]
fn arbiter_spawn_fn_runs() {
let _ = System::new();
let (tx, rx) = channel::<u32>();
let arbiter = Arbiter::new();
arbiter.spawn_fn(move || tx.send(42).unwrap());
let num = rx.recv().unwrap();
assert_eq!(num, 42);
arbiter.stop();
arbiter.join().unwrap();
}
#[test]
fn arbiter_handle_spawn_fn_runs() {
let sys = System::new();
let (tx, rx) = channel::<u32>();
let arbiter = Arbiter::new();
let handle = arbiter.handle();
drop(arbiter);
handle.spawn_fn(move || {
tx.send(42).unwrap();
System::current().stop()
});
let num = rx.recv_timeout(Duration::from_secs(2)).unwrap();
assert_eq!(num, 42);
handle.stop();
sys.run().unwrap();
}
#[test]
fn arbiter_drop_no_panic_fn() {
let _ = System::new();
let arbiter = Arbiter::new();
arbiter.spawn_fn(|| panic!("test"));
arbiter.stop();
arbiter.join().unwrap();
}
#[test]
fn arbiter_drop_no_panic_fut() {
let _ = System::new();
let arbiter = Arbiter::new();
arbiter.spawn(async { panic!("test") });
arbiter.stop();
arbiter.join().unwrap();
}
#[test]
#[should_panic]
fn no_system_current_panic() {
System::current();
}
#[test]
#[should_panic]
fn no_system_arbiter_new_panic() {
Arbiter::new();
}
#[test]
fn system_arbiter_spawn() {
let runner = System::new();
let (tx, rx) = oneshot::channel();
let sys = System::current();
thread::spawn(|| {
// this thread will have no arbiter in it's thread local so call will panic
Arbiter::current();
})
.join()
.unwrap_err();
let thread = thread::spawn(|| {
// this thread will have no arbiter in it's thread local so use the system handle instead
System::set_current(sys);
let sys = System::current();
let arb = sys.arbiter();
arb.spawn(async move {
tx.send(42u32).unwrap();
System::current().stop();
});
});
assert_eq!(runner.block_on(rx).unwrap(), 42);
thread.join().unwrap();
}
#[test]
fn system_stop_stops_arbiters() {
let sys = System::new();
let arb = Arbiter::new();
// arbiter should be alive to receive spawn msg
assert!(Arbiter::current().spawn_fn(|| {}));
assert!(arb.spawn_fn(|| {}));
System::current().stop();
sys.run().unwrap();
// account for slightly slow thread de-spawns (only observed on windows)
thread::sleep(Duration::from_millis(100));
// arbiter should be dead and return false
assert!(!Arbiter::current().spawn_fn(|| {}));
assert!(!arb.spawn_fn(|| {}));
arb.join().unwrap();
}
#[test]
fn new_system_with_tokio() {
let (tx, rx) = channel();
let res = System::with_tokio_rt(move || {
tokio::runtime::Builder::new_multi_thread()
.enable_io()
.enable_time()
.thread_keep_alive(Duration::from_millis(1000))
.worker_threads(2)
.max_blocking_threads(2)
.on_thread_start(|| {})
.on_thread_stop(|| {})
.build()
.unwrap()
})
.block_on(async {
actix_rt::time::sleep(Duration::from_millis(1)).await;
tokio::task::spawn(async move {
tx.send(42).unwrap();
})
.await
.unwrap();
123usize
});
assert_eq!(res, 123);
assert_eq!(rx.recv().unwrap(), 42);
}
#[test]
fn new_arbiter_with_tokio() {
let _ = System::new();
let arb = Arbiter::with_tokio_rt(|| {
tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap()
});
let counter = Arc::new(AtomicBool::new(true));
let counter1 = counter.clone();
let did_spawn = arb.spawn(async move {
actix_rt::time::sleep(Duration::from_millis(1)).await;
counter1.store(false, Ordering::SeqCst);
Arbiter::current().stop();
});
assert!(did_spawn);
arb.join().unwrap();
assert!(!counter.load(Ordering::SeqCst));
}
#[test]
fn try_current_no_system() {
assert!(System::try_current().is_none())
}
#[test]
fn try_current_with_system() {
System::new().block_on(async { assert!(System::try_current().is_some()) });
}

View File

@@ -1,188 +1,179 @@
# Changes # Changes
## [1.0.0] - 2019-12-11 ## Unreleased - 2021-xx-xx
* Remove `config` module. `ServiceConfig`, `ServiceRuntime` public types are removed due to this change. [#349]
* Remove `ServerBuilder::configure` [#349]
### Changed [#349]: https://github.com/actix/actix-net/pull/349
## 2.0.0-beta.5 - 2021-04-20
* Server shutdown would notify all workers to exit regardless if shutdown is graceful.
This would make all worker shutdown immediately in force shutdown case. [#333]
[#333]: https://github.com/actix/actix-net/pull/333
## 2.0.0-beta.4 - 2021-04-01
* Prevent panic when `shutdown_timeout` is very large. [f9262db]
[f9262db]: https://github.com/actix/actix-net/commit/f9262db
## 2.0.0-beta.3 - 2021-02-06
* Hidden `ServerBuilder::start` method has been removed. Use `ServerBuilder::run`. [#246]
* Add retry for EINTR signal (`io::Interrupted`) in `Accept`'s poll loop. [#264]
* Add `ServerBuilder::worker_max_blocking_threads` to customize blocking thread pool size. [#265]
* Update `actix-rt` to `2.0.0`. [#273]
[#246]: https://github.com/actix/actix-net/pull/246
[#264]: https://github.com/actix/actix-net/pull/264
[#265]: https://github.com/actix/actix-net/pull/265
[#273]: https://github.com/actix/actix-net/pull/273
## 2.0.0-beta.2 - 2021-01-03
* Merge `actix-testing` to `actix-server` as `test_server` mod. [#242]
[#242]: https://github.com/actix/actix-net/pull/242
## 2.0.0-beta.1 - 2020-12-28
* Added explicit info log message on accept queue pause. [#215]
* Prevent double registration of sockets when back-pressure is resolved. [#223]
* Update `mio` dependency to `0.7.3`. [#239]
* Remove `socket2` dependency. [#239]
* `ServerBuilder::backlog` now accepts `u32` instead of `i32`. [#239]
* Remove `AcceptNotify` type and pass `WakerQueue` to `Worker` to wake up `Accept`'s `Poll`. [#239]
* Convert `mio::net::TcpStream` to `actix_rt::net::TcpStream`(`UnixStream` for uds) using
`FromRawFd` and `IntoRawFd`(`FromRawSocket` and `IntoRawSocket` on windows). [#239]
* Remove `AsyncRead` and `AsyncWrite` trait bound for `socket::FromStream` trait. [#239]
[#215]: https://github.com/actix/actix-net/pull/215
[#223]: https://github.com/actix/actix-net/pull/223
[#239]: https://github.com/actix/actix-net/pull/239
## 1.0.4 - 2020-09-12
* Update actix-codec to 0.3.0.
* Workers must be greater than 0. [#167]
[#167]: https://github.com/actix/actix-net/pull/167
## 1.0.3 - 2020-05-19
* Replace deprecated `net2` crate with `socket2` [#140]
[#140]: https://github.com/actix/actix-net/pull/140
## 1.0.2 - 2020-02-26
* Avoid error by calling `reregister()` on Windows [#103]
[#103]: https://github.com/actix/actix-net/pull/103
## 1.0.1 - 2019-12-29
* Rename `.start()` method to `.run()`
## 1.0.0 - 2019-12-11
* Use actix-net releases * Use actix-net releases
## [1.0.0-alpha.4] - 2019-12-08 ## 1.0.0-alpha.4 - 2019-12-08
### Changed
* Use actix-service 1.0.0-alpha.4 * Use actix-service 1.0.0-alpha.4
## [1.0.0-alpha.3] - 2019-12-07
### Changed
## 1.0.0-alpha.3 - 2019-12-07
* Migrate to tokio 0.2 * Migrate to tokio 0.2
### Fixed
* Fix compilation on non-unix platforms * Fix compilation on non-unix platforms
* Better handling server configuration * Better handling server configuration
## [1.0.0-alpha.2] - 2019-12-02 ## 1.0.0-alpha.2 - 2019-12-02
### Changed
* Simplify server service (remove actix-server-config) * Simplify server service (remove actix-server-config)
* Allow to wait on `Server` until server stops * Allow to wait on `Server` until server stops
## [0.8.0-alpha.1] - 2019-11-22 ## 0.8.0-alpha.1 - 2019-11-22
### Changed
* Migrate to `std::future` * Migrate to `std::future`
## [0.7.0] - 2019-10-04 ## 0.7.0 - 2019-10-04
### Changed
* Update `rustls` to 0.16 * Update `rustls` to 0.16
* Minimum required Rust version upped to 1.37.0 * Minimum required Rust version upped to 1.37.0
## [0.6.1] - 2019-09-25 ## 0.6.1 - 2019-09-25
### Added
* Add UDS listening support to `ServerBuilder` * Add UDS listening support to `ServerBuilder`
## [0.6.0] - 2019-07-18 ## 0.6.0 - 2019-07-18
### Added
* Support Unix domain sockets #3 * Support Unix domain sockets #3
## [0.5.1] - 2019-05-18 ## 0.5.1 - 2019-05-18
### Changed
* ServerBuilder::shutdown_timeout() accepts u64 * ServerBuilder::shutdown_timeout() accepts u64
## [0.5.0] - 2019-05-12 ## 0.5.0 - 2019-05-12
### Added
* Add `Debug` impl for `SslError` * Add `Debug` impl for `SslError`
* Derive debug for `Server` and `ServerCommand` * Derive debug for `Server` and `ServerCommand`
### Changed
* Upgrade to actix-service 0.4 * Upgrade to actix-service 0.4
## [0.4.3] - 2019-04-16 ## 0.4.3 - 2019-04-16
### Added
* Re-export `IoStream` trait * Re-export `IoStream` trait
* Depend on `ssl` and `rust-tls` features from actix-server-config
### Changed
* Deppend on `ssl` and `rust-tls` features from actix-server-config
## [0.4.2] - 2019-03-30 ## 0.4.2 - 2019-03-30
### Fixed
* Fix SIGINT force shutdown * Fix SIGINT force shutdown
## [0.4.1] - 2019-03-14 ## 0.4.1 - 2019-03-14
### Added
* `SystemRuntime::on_start()` - allow to run future before server service initialization * `SystemRuntime::on_start()` - allow to run future before server service initialization
## [0.4.0] - 2019-03-12 ## 0.4.0 - 2019-03-12
### Changed
* Use `ServerConfig` for service factory * Use `ServerConfig` for service factory
* Wrap tcp socket to `Io` type * Wrap tcp socket to `Io` type
* Upgrade actix-service * Upgrade actix-service
## [0.3.1] - 2019-03-04 ## 0.3.1 - 2019-03-04
### Added
* Add `ServerBuilder::maxconnrate` sets the maximum per-worker number of concurrent connections * Add `ServerBuilder::maxconnrate` sets the maximum per-worker number of concurrent connections
* Add helper ssl error `SslError` * Add helper ssl error `SslError`
### Changed
* Rename `StreamServiceFactory` to `ServiceFactory` * Rename `StreamServiceFactory` to `ServiceFactory`
* Deprecate `StreamServiceFactory` * Deprecate `StreamServiceFactory`
## [0.3.0] - 2019-03-02 ## 0.3.0 - 2019-03-02
### Changed
* Use new `NewService` trait * Use new `NewService` trait
## [0.2.1] - 2019-02-09 ## 0.2.1 - 2019-02-09
### Changed
* Drop service response * Drop service response
## [0.2.0] - 2019-02-01 ## 0.2.0 - 2019-02-01
### Changed
* Migrate to actix-service 0.2 * Migrate to actix-service 0.2
* Updated rustls dependency * Updated rustls dependency
## [0.1.3] - 2018-12-21 ## 0.1.3 - 2018-12-21
### Fixed
* Fix max concurrent connections handling * Fix max concurrent connections handling
## [0.1.2] - 2018-12-12 ## 0.1.2 - 2018-12-12
### Changed
* rename ServiceConfig::rt() to ServiceConfig::apply() * rename ServiceConfig::rt() to ServiceConfig::apply()
### Fixed
* Fix back-pressure for concurrent ssl handshakes * Fix back-pressure for concurrent ssl handshakes
## [0.1.1] - 2018-12-11 ## 0.1.1 - 2018-12-11
* Fix signal handling on windows * Fix signal handling on windows
## [0.1.0] - 2018-12-09 ## 0.1.0 - 2018-12-09
* Move server to separate crate * Move server to separate crate

46
actix-server/Cargo.toml Normal file → Executable file
View File

@@ -1,17 +1,16 @@
[package] [package]
name = "actix-server" name = "actix-server"
version = "1.0.0" version = "2.0.0-beta.5"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"] authors = [
description = "Actix server - General purpose tcp server" "Nikolay Kim <fafhrd91@gmail.com>",
"fakeshadow <24548779@qq.com>",
]
description = "General purpose TCP server built for the Actix ecosystem"
keywords = ["network", "framework", "async", "futures"] keywords = ["network", "framework", "async", "futures"]
homepage = "https://actix.rs" repository = "https://github.com/actix/actix-net"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-server/"
categories = ["network-programming", "asynchronous"] categories = ["network-programming", "asynchronous"]
license = "MIT/Apache-2.0" license = "MIT OR Apache-2.0"
exclude = [".gitignore", ".travis.yml", ".cargo/config", "appveyor.yml"]
edition = "2018" edition = "2018"
workspace = ".."
[lib] [lib]
name = "actix_server" name = "actix_server"
@@ -21,22 +20,21 @@ path = "src/lib.rs"
default = [] default = []
[dependencies] [dependencies]
actix-service = "1.0.0" actix-rt = { version = "2.0.0", default-features = false }
actix-rt = "1.0.0" actix-service = "2.0.0"
actix-codec = "0.2.0" actix-utils = "3.0.0"
actix-utils = "1.0.0"
futures-core = { version = "0.3.7", default-features = false, features = ["alloc"] }
log = "0.4" log = "0.4"
num_cpus = "1.11" mio = { version = "0.7.6", features = ["os-poll", "net"] }
mio = "0.6.19" num_cpus = "1.13"
net2 = "0.2" tokio = { version = "1.2", features = ["sync"] }
futures = "0.3.1"
slab = "0.4"
# unix domain sockets
mio-uds = { version = "0.6.7" }
[dev-dependencies] [dev-dependencies]
bytes = "0.5" actix-codec = "0.4.0-beta.1"
env_logger = "0.6" actix-rt = "2.0.0"
actix-testing = "1.0.0"
bytes = "1"
env_logger = "0.8"
futures-util = { version = "0.3.7", default-features = false, features = ["sink"] }
tokio = { version = "1", features = ["io-util"] }

View File

@@ -0,0 +1,90 @@
//! Simple composite-service TCP echo server.
//!
//! Using the following command:
//!
//! ```sh
//! nc 127.0.0.1 8080
//! ```
//!
//! Start typing. When you press enter the typed line will be echoed back. The server will log
//! the length of each line it echos and the total size of data sent when the connection is closed.
use std::{
env, io,
sync::{
atomic::{AtomicUsize, Ordering},
Arc,
},
};
use actix_rt::net::TcpStream;
use actix_server::Server;
use actix_service::{fn_service, ServiceFactoryExt as _};
use bytes::BytesMut;
use futures_util::future::ok;
use log::{error, info};
use tokio::io::{AsyncReadExt, AsyncWriteExt};
#[actix_rt::main]
async fn main() -> io::Result<()> {
env::set_var("RUST_LOG", "info");
env_logger::init();
let count = Arc::new(AtomicUsize::new(0));
let addr = ("127.0.0.1", 8080);
info!("starting server on port: {}", &addr.0);
// Bind socket address and start worker(s). By default, the server uses the number of available
// logical CPU cores as the worker count. For this reason, the closure passed to bind needs
// to return a service *factory*; so it can be created once per worker.
Server::build()
.bind("echo", addr, move || {
let count = Arc::clone(&count);
let num2 = Arc::clone(&count);
fn_service(move |mut stream: TcpStream| {
let count = Arc::clone(&count);
async move {
let num = count.fetch_add(1, Ordering::SeqCst);
let num = num + 1;
let mut size = 0;
let mut buf = BytesMut::new();
loop {
match stream.read_buf(&mut buf).await {
// end of stream; bail from loop
Ok(0) => break,
// more bytes to process
Ok(bytes_read) => {
info!("[{}] read {} bytes", num, bytes_read);
stream.write_all(&buf[size..]).await.unwrap();
size += bytes_read;
}
// stream error; bail from loop with error
Err(err) => {
error!("Stream Error: {:?}", err);
return Err(());
}
}
}
// send data down service pipeline
Ok((buf.freeze(), size))
}
})
.map_err(|err| error!("Service Error: {:?}", err))
.and_then(move |(_, size)| {
let num = num2.load(Ordering::SeqCst);
info!("[{}] total bytes read: {}", num, size);
ok(size)
})
})?
.workers(1)
.run()
.await
}

View File

@@ -1,119 +1,144 @@
use std::sync::mpsc as sync_mpsc;
use std::time::Duration; use std::time::Duration;
use std::{io, thread}; use std::{io, thread};
use actix_rt::time::{delay_until, Instant}; use actix_rt::{
use actix_rt::System; time::{sleep, Instant},
System,
};
use log::{error, info}; use log::{error, info};
use slab::Slab; use mio::{Interest, Poll, Token as MioToken};
use crate::server::Server; use crate::server::Server;
use crate::socket::{SocketAddr, SocketListener, StdListener}; use crate::socket::MioListener;
use crate::worker::{Conn, WorkerClient}; use crate::waker_queue::{WakerInterest, WakerQueue, WAKER_TOKEN};
use crate::Token; use crate::worker::{Conn, WorkerHandleAccept};
pub(crate) enum Command {
Pause,
Resume,
Stop,
Worker(WorkerClient),
}
struct ServerSocketInfo { struct ServerSocketInfo {
addr: SocketAddr, token: usize,
token: Token,
sock: SocketListener, lst: MioListener,
/// Timeout is used to mark the deadline when this socket's listener should be registered again
/// after an error.
timeout: Option<Instant>, timeout: Option<Instant>,
} }
#[derive(Clone)] /// Accept loop would live with `ServerBuilder`.
pub(crate) struct AcceptNotify(mio::SetReadiness); ///
/// It's tasked with construct `Poll` instance and `WakerQueue` which would be distributed to
impl AcceptNotify { /// `Accept` and `Worker`.
pub(crate) fn new(ready: mio::SetReadiness) -> Self { ///
AcceptNotify(ready) /// It would also listen to `ServerCommand` and push interests to `WakerQueue`.
}
pub(crate) fn notify(&self) {
let _ = self.0.set_readiness(mio::Ready::readable());
}
}
impl Default for AcceptNotify {
fn default() -> Self {
AcceptNotify::new(mio::Registration::new2().1)
}
}
pub(crate) struct AcceptLoop { pub(crate) struct AcceptLoop {
cmd_reg: Option<mio::Registration>,
cmd_ready: mio::SetReadiness,
notify_reg: Option<mio::Registration>,
notify_ready: mio::SetReadiness,
tx: sync_mpsc::Sender<Command>,
rx: Option<sync_mpsc::Receiver<Command>>,
srv: Option<Server>, srv: Option<Server>,
poll: Option<Poll>,
waker: WakerQueue,
} }
impl AcceptLoop { impl AcceptLoop {
pub fn new(srv: Server) -> AcceptLoop { pub fn new(srv: Server) -> Self {
let (tx, rx) = sync_mpsc::channel(); let poll = Poll::new().unwrap_or_else(|e| panic!("Can not create `mio::Poll`: {}", e));
let (cmd_reg, cmd_ready) = mio::Registration::new2(); let waker = WakerQueue::new(poll.registry())
let (notify_reg, notify_ready) = mio::Registration::new2(); .unwrap_or_else(|e| panic!("Can not create `mio::Waker`: {}", e));
AcceptLoop { Self {
tx,
cmd_ready,
cmd_reg: Some(cmd_reg),
notify_ready,
notify_reg: Some(notify_reg),
rx: Some(rx),
srv: Some(srv), srv: Some(srv),
poll: Some(poll),
waker,
} }
} }
pub fn send(&self, msg: Command) { pub(crate) fn waker_owned(&self) -> WakerQueue {
let _ = self.tx.send(msg); self.waker.clone()
let _ = self.cmd_ready.set_readiness(mio::Ready::readable());
} }
pub fn get_notify(&self) -> AcceptNotify { pub fn wake(&self, i: WakerInterest) {
AcceptNotify::new(self.notify_ready.clone()) self.waker.wake(i);
} }
pub(crate) fn start( pub(crate) fn start(
&mut self, &mut self,
socks: Vec<(Token, StdListener)>, socks: Vec<(usize, MioListener)>,
workers: Vec<WorkerClient>, handles: Vec<WorkerHandleAccept>,
) { ) {
let srv = self.srv.take().expect("Can not re-use AcceptInfo"); let srv = self.srv.take().expect("Can not re-use AcceptInfo");
let poll = self.poll.take().unwrap();
let waker = self.waker.clone();
Accept::start( Accept::start(poll, waker, socks, srv, handles);
self.rx.take().expect("Can not re-use AcceptInfo"),
self.cmd_reg.take().expect("Can not re-use AcceptInfo"),
self.notify_reg.take().expect("Can not re-use AcceptInfo"),
socks,
srv,
workers,
);
} }
} }
/// poll instance of the server.
struct Accept { struct Accept {
poll: mio::Poll, poll: Poll,
rx: sync_mpsc::Receiver<Command>, waker: WakerQueue,
sockets: Slab<ServerSocketInfo>, handles: Vec<WorkerHandleAccept>,
workers: Vec<WorkerClient>,
srv: Server, srv: Server,
timer: (mio::Registration, mio::SetReadiness),
next: usize, next: usize,
backpressure: bool, avail: Availability,
paused: bool,
} }
const DELTA: usize = 100; /// Array of u128 with every bit as marker for a worker handle's availability.
const CMD: mio::Token = mio::Token(0); struct Availability([u128; 4]);
const TIMER: mio::Token = mio::Token(1);
const NOTIFY: mio::Token = mio::Token(2); impl Default for Availability {
fn default() -> Self {
Self([0; 4])
}
}
impl Availability {
/// Check if any worker handle is available
#[inline(always)]
fn available(&self) -> bool {
self.0.iter().any(|a| *a != 0)
}
/// Check if worker handle is available by index
#[inline(always)]
fn get_available(&self, idx: usize) -> bool {
let (offset, idx) = Self::offset(idx);
self.0[offset] & (1 << idx as u128) != 0
}
/// Set worker handle available state by index.
fn set_available(&mut self, idx: usize, avail: bool) {
let (offset, idx) = Self::offset(idx);
let off = 1 << idx as u128;
if avail {
self.0[offset] |= off;
} else {
self.0[offset] &= !off
}
}
/// Set all worker handle to available state.
/// This would result in a re-check on all workers' availability.
fn set_available_all(&mut self, handles: &[WorkerHandleAccept]) {
handles.iter().for_each(|handle| {
self.set_available(handle.idx(), true);
})
}
/// Get offset and adjusted index of given worker handle index.
fn offset(idx: usize) -> (usize, usize) {
if idx < 128 {
(0, idx)
} else if idx < 128 * 2 {
(1, idx - 128)
} else if idx < 128 * 3 {
(2, idx - 128 * 2)
} else if idx < 128 * 4 {
(3, idx - 128 * 3)
} else {
panic!("Max WorkerHandle count is 512")
}
}
}
/// This function defines errors that are per-connection. Which basically /// This function defines errors that are per-connection. Which basically
/// means that if we get this error from `accept()` system call it means /// means that if we get this error from `accept()` system call it means
@@ -129,328 +154,439 @@ fn connection_error(e: &io::Error) -> bool {
} }
impl Accept { impl Accept {
#![allow(clippy::too_many_arguments)]
pub(crate) fn start( pub(crate) fn start(
rx: sync_mpsc::Receiver<Command>, poll: Poll,
cmd_reg: mio::Registration, waker: WakerQueue,
notify_reg: mio::Registration, socks: Vec<(usize, MioListener)>,
socks: Vec<(Token, StdListener)>,
srv: Server, srv: Server,
workers: Vec<WorkerClient>, handles: Vec<WorkerHandleAccept>,
) { ) {
// Accept runs in its own thread and would want to spawn additional futures to current
// actix system.
let sys = System::current(); let sys = System::current();
thread::Builder::new()
// start accept thread
let _ = thread::Builder::new()
.name("actix-server accept loop".to_owned()) .name("actix-server accept loop".to_owned())
.spawn(move || { .spawn(move || {
System::set_current(sys); System::set_current(sys);
let mut accept = Accept::new(rx, socks, workers, srv); let (mut accept, mut sockets) =
Accept::new_with_sockets(poll, waker, socks, handles, srv);
// Start listening for incoming commands accept.poll_with(&mut sockets);
if let Err(err) = accept.poll.register( })
&cmd_reg, .unwrap();
CMD,
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
panic!("Can not register Registration: {}", err);
}
// Start listening for notify updates
if let Err(err) = accept.poll.register(
&notify_reg,
NOTIFY,
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
panic!("Can not register Registration: {}", err);
}
accept.poll();
});
} }
fn new( fn new_with_sockets(
rx: sync_mpsc::Receiver<Command>, poll: Poll,
socks: Vec<(Token, StdListener)>, waker: WakerQueue,
workers: Vec<WorkerClient>, socks: Vec<(usize, MioListener)>,
handles: Vec<WorkerHandleAccept>,
srv: Server, srv: Server,
) -> Accept { ) -> (Accept, Vec<ServerSocketInfo>) {
// Create a poll instance let sockets = socks
let poll = match mio::Poll::new() { .into_iter()
Ok(poll) => poll, .map(|(token, mut lst)| {
Err(err) => panic!("Can not create mio::Poll: {}", err), // Start listening for incoming connections
}; poll.registry()
.register(&mut lst, MioToken(token), Interest::READABLE)
.unwrap_or_else(|e| panic!("Can not register io: {}", e));
// Start accept ServerSocketInfo {
let mut sockets = Slab::new(); token,
for (hnd_token, lst) in socks.into_iter() { lst,
let addr = lst.local_addr(); timeout: None,
}
})
.collect();
let server = lst.into_listener(); let mut avail = Availability::default();
let entry = sockets.vacant_entry();
let token = entry.key();
// Start listening for incoming connections // Assume all handles are avail at construct time.
if let Err(err) = poll.register( avail.set_available_all(&handles);
&server,
mio::Token(token + DELTA),
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
panic!("Can not register io: {}", err);
}
entry.insert(ServerSocketInfo { let accept = Accept {
addr,
token: hnd_token,
sock: server,
timeout: None,
});
}
// Timer
let (tm, tmr) = mio::Registration::new2();
if let Err(err) =
poll.register(&tm, TIMER, mio::Ready::readable(), mio::PollOpt::edge())
{
panic!("Can not register Registration: {}", err);
}
Accept {
poll, poll,
rx, waker,
sockets, handles,
workers,
srv, srv,
next: 0, next: 0,
timer: (tm, tmr), avail,
backpressure: false, paused: false,
} };
(accept, sockets)
} }
fn poll(&mut self) { fn poll_with(&mut self, sockets: &mut [ServerSocketInfo]) {
// Create storage for events
let mut events = mio::Events::with_capacity(128); let mut events = mio::Events::with_capacity(128);
loop { loop {
if let Err(err) = self.poll.poll(&mut events, None) { if let Err(e) = self.poll.poll(&mut events, None) {
panic!("Poll error: {}", err); match e.kind() {
io::ErrorKind::Interrupted => {}
_ => panic!("Poll error: {}", e),
}
} }
for event in events.iter() { for event in events.iter() {
let token = event.token(); let token = event.token();
match token { match token {
CMD => { WAKER_TOKEN => {
if !self.process_cmd() { let exit = self.handle_waker(sockets);
if exit {
info!("Accept is stopped.");
return; return;
} }
} }
TIMER => self.process_timer(),
NOTIFY => self.backpressure(false),
_ => { _ => {
let token = usize::from(token); let token = usize::from(token);
if token < DELTA { self.accept(sockets, token);
continue;
}
self.accept(token - DELTA);
} }
} }
} }
} }
} }
fn process_timer(&mut self) { fn handle_waker(&mut self, sockets: &mut [ServerSocketInfo]) -> bool {
// This is a loop because interests for command from previous version was
// a loop that would try to drain the command channel. It's yet unknown
// if it's necessary/good practice to actively drain the waker queue.
loop {
// take guard with every iteration so no new interest can be added
// until the current task is done.
let mut guard = self.waker.guard();
match guard.pop_front() {
// worker notify it becomes available.
Some(WakerInterest::WorkerAvailable(idx)) => {
drop(guard);
self.avail.set_available(idx, true);
if !self.paused {
self.accept_all(sockets);
}
}
// a new worker thread is made and it's handle would be added to Accept
Some(WakerInterest::Worker(handle)) => {
drop(guard);
self.avail.set_available(handle.idx(), true);
self.handles.push(handle);
if !self.paused {
self.accept_all(sockets);
}
}
// got timer interest and it's time to try register socket(s) again
Some(WakerInterest::Timer) => {
drop(guard);
self.process_timer(sockets)
}
Some(WakerInterest::Pause) => {
drop(guard);
if !self.paused {
self.paused = true;
self.deregister_all(sockets);
}
}
Some(WakerInterest::Resume) => {
drop(guard);
if self.paused {
self.paused = false;
sockets.iter_mut().for_each(|info| {
self.register_logged(info);
});
self.accept_all(sockets);
}
}
Some(WakerInterest::Stop) => {
if !self.paused {
self.deregister_all(sockets);
}
return true;
}
// waker queue is drained
None => {
// Reset the WakerQueue before break so it does not grow infinitely
WakerQueue::reset(&mut guard);
return false;
}
}
}
}
fn process_timer(&self, sockets: &mut [ServerSocketInfo]) {
let now = Instant::now(); let now = Instant::now();
for (token, info) in self.sockets.iter_mut() { sockets
if let Some(inst) = info.timeout.take() { .iter_mut()
if now > inst { // Only sockets that had an associated timeout were deregistered.
if let Err(err) = self.poll.register( .filter(|info| info.timeout.is_some())
&info.sock, .for_each(|info| {
mio::Token(token + DELTA), let inst = info.timeout.take().unwrap();
mio::Ready::readable(),
mio::PollOpt::edge(), if now < inst {
) {
error!("Can not register server socket {}", err);
} else {
info!("Resume accepting connections on {}", info.addr);
}
} else {
info.timeout = Some(inst); info.timeout = Some(inst);
} else if !self.paused {
self.register_logged(info);
} }
// Drop the timeout if server is paused and socket timeout is expired.
// When server recovers from pause it will register all sockets without
// a timeout value so this socket register will be delayed till then.
});
}
#[cfg(not(target_os = "windows"))]
fn register(&self, info: &mut ServerSocketInfo) -> io::Result<()> {
let token = MioToken(info.token);
self.poll
.registry()
.register(&mut info.lst, token, Interest::READABLE)
}
#[cfg(target_os = "windows")]
fn register(&self, info: &mut ServerSocketInfo) -> io::Result<()> {
// On windows, calling register without deregister cause an error.
// See https://github.com/actix/actix-web/issues/905
// Calling reregister seems to fix the issue.
let token = MioToken(info.token);
self.poll
.registry()
.register(&mut info.lst, token, Interest::READABLE)
.or_else(|_| {
self.poll
.registry()
.reregister(&mut info.lst, token, Interest::READABLE)
})
}
fn register_logged(&self, info: &mut ServerSocketInfo) {
match self.register(info) {
Ok(_) => info!("Resume accepting connections on {}", info.lst.local_addr()),
Err(e) => error!("Can not register server socket {}", e),
}
}
fn deregister_logged(&self, info: &mut ServerSocketInfo) {
match self.poll.registry().deregister(&mut info.lst) {
Ok(_) => info!("Paused accepting connections on {}", info.lst.local_addr()),
Err(e) => {
error!("Can not deregister server socket {}", e)
} }
} }
} }
fn process_cmd(&mut self) -> bool { fn deregister_all(&self, sockets: &mut [ServerSocketInfo]) {
// This is a best effort implementation with following limitation:
//
// Every ServerSocketInfo with associate timeout will be skipped and it's timeout
// is removed in the process.
//
// Therefore WakerInterest::Pause followed by WakerInterest::Resume in a very short
// gap (less than 500ms) would cause all timing out ServerSocketInfos be reregistered
// before expected timing.
sockets
.iter_mut()
// Take all timeout.
// This is to prevent Accept::process_timer method re-register a socket afterwards.
.map(|info| (info.timeout.take(), info))
// Socket info with a timeout is already deregistered so skip them.
.filter(|(timeout, _)| timeout.is_none())
.for_each(|(_, info)| self.deregister_logged(info));
}
// Send connection to worker and handle error.
fn send_connection(&mut self, conn: Conn) -> Result<(), Conn> {
let next = self.next();
match next.send(conn) {
Ok(_) => {
// Increment counter of WorkerHandle.
// Set worker to unavailable with it hit max (Return false).
if !next.inc_counter() {
let idx = next.idx();
self.avail.set_available(idx, false);
}
self.set_next();
Ok(())
}
Err(conn) => {
// Worker thread is error and could be gone.
// Remove worker handle and notify `ServerBuilder`.
self.remove_next();
if self.handles.is_empty() {
error!("No workers");
// All workers are gone and Conn is nowhere to be sent.
// Treat this situation as Ok and drop Conn.
return Ok(());
} else if self.handles.len() <= self.next {
self.next = 0;
}
Err(conn)
}
}
}
fn accept_one(&mut self, mut conn: Conn) {
loop { loop {
match self.rx.try_recv() { let next = self.next();
Ok(cmd) => match cmd { let idx = next.idx();
Command::Pause => {
for (_, info) in self.sockets.iter_mut() {
if let Err(err) = self.poll.deregister(&info.sock) {
error!("Can not deregister server socket {}", err);
} else {
info!("Paused accepting connections on {}", info.addr);
}
}
}
Command::Resume => {
for (token, info) in self.sockets.iter() {
if let Err(err) = self.poll.register(
&info.sock,
mio::Token(token + DELTA),
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
error!("Can not resume socket accept process: {}", err);
} else {
info!(
"Accepting connections on {} has been resumed",
info.addr
);
}
}
}
Command::Stop => {
for (_, info) in self.sockets.iter() {
let _ = self.poll.deregister(&info.sock);
}
return false;
}
Command::Worker(worker) => {
self.backpressure(false);
self.workers.push(worker);
}
},
Err(err) => match err {
sync_mpsc::TryRecvError::Empty => break,
sync_mpsc::TryRecvError::Disconnected => {
for (_, info) in self.sockets.iter() {
let _ = self.poll.deregister(&info.sock);
}
return false;
}
},
}
}
true
}
fn backpressure(&mut self, on: bool) { if self.avail.get_available(idx) {
if self.backpressure { match self.send_connection(conn) {
if !on { Ok(_) => return,
self.backpressure = false; Err(c) => conn = c,
for (token, info) in self.sockets.iter() {
if let Err(err) = self.poll.register(
&info.sock,
mio::Token(token + DELTA),
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
error!("Can not resume socket accept process: {}", err);
} else {
info!("Accepting connections on {} has been resumed", info.addr);
}
}
}
} else if on {
self.backpressure = true;
for (_, info) in self.sockets.iter() {
let _ = self.poll.deregister(&info.sock);
}
}
}
fn accept_one(&mut self, mut msg: Conn) {
if self.backpressure {
while !self.workers.is_empty() {
match self.workers[self.next].send(msg) {
Ok(_) => (),
Err(tmp) => {
self.srv.worker_faulted(self.workers[self.next].idx);
msg = tmp;
self.workers.swap_remove(self.next);
if self.workers.is_empty() {
error!("No workers");
return;
} else if self.workers.len() <= self.next {
self.next = 0;
}
continue;
}
}
self.next = (self.next + 1) % self.workers.len();
break;
}
} else {
let mut idx = 0;
while idx < self.workers.len() {
idx += 1;
if self.workers[self.next].available() {
match self.workers[self.next].send(msg) {
Ok(_) => {
self.next = (self.next + 1) % self.workers.len();
return;
}
Err(tmp) => {
self.srv.worker_faulted(self.workers[self.next].idx);
msg = tmp;
self.workers.swap_remove(self.next);
if self.workers.is_empty() {
error!("No workers");
self.backpressure(true);
return;
} else if self.workers.len() <= self.next {
self.next = 0;
}
continue;
}
}
}
self.next = (self.next + 1) % self.workers.len();
}
// enable backpressure
self.backpressure(true);
self.accept_one(msg);
}
}
fn accept(&mut self, token: usize) {
loop {
let msg = if let Some(info) = self.sockets.get_mut(token) {
match info.sock.accept() {
Ok(Some((io, addr))) => Conn {
io,
token: info.token,
peer: Some(addr),
},
Ok(None) => return,
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => return,
Err(ref e) if connection_error(e) => continue,
Err(e) => {
error!("Error accepting connection: {}", e);
if let Err(err) = self.poll.deregister(&info.sock) {
error!("Can not deregister server socket {}", err);
}
// sleep after error
info.timeout = Some(Instant::now() + Duration::from_millis(500));
let r = self.timer.1.clone();
System::current().arbiter().send(Box::pin(async move {
delay_until(Instant::now() + Duration::from_millis(510)).await;
let _ = r.set_readiness(mio::Ready::readable());
}));
return;
}
} }
} else { } else {
return; self.avail.set_available(idx, false);
}; self.set_next();
self.accept_one(msg); if !self.avail.available() {
while let Err(c) = self.send_connection(conn) {
conn = c;
}
return;
}
}
} }
} }
fn accept(&mut self, sockets: &mut [ServerSocketInfo], token: usize) {
while self.avail.available() {
let info = &mut sockets[token];
match info.lst.accept() {
Ok(io) => {
let conn = Conn { io, token };
self.accept_one(conn);
}
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => return,
Err(ref e) if connection_error(e) => continue,
Err(e) => {
error!("Error accepting connection: {}", e);
// deregister listener temporary
self.deregister_logged(info);
// sleep after error. write the timeout to socket info as later
// the poll would need it mark which socket and when it's
// listener should be registered
info.timeout = Some(Instant::now() + Duration::from_millis(500));
// after the sleep a Timer interest is sent to Accept Poll
let waker = self.waker.clone();
System::current().arbiter().spawn(async move {
sleep(Duration::from_millis(510)).await;
waker.wake(WakerInterest::Timer);
});
return;
}
};
}
}
fn accept_all(&mut self, sockets: &mut [ServerSocketInfo]) {
sockets
.iter_mut()
.map(|info| info.token)
.collect::<Vec<_>>()
.into_iter()
.for_each(|idx| self.accept(sockets, idx))
}
#[inline(always)]
fn next(&self) -> &WorkerHandleAccept {
&self.handles[self.next]
}
/// Set next worker handle that would accept connection.
#[inline(always)]
fn set_next(&mut self) {
self.next = (self.next + 1) % self.handles.len();
}
/// Remove next worker handle that fail to accept connection.
fn remove_next(&mut self) {
let handle = self.handles.swap_remove(self.next);
let idx = handle.idx();
// A message is sent to `ServerBuilder` future to notify it a new worker
// should be made.
self.srv.worker_faulted(idx);
self.avail.set_available(idx, false);
}
}
#[cfg(test)]
mod test {
use super::Availability;
fn single(aval: &mut Availability, idx: usize) {
aval.set_available(idx, true);
assert!(aval.available());
aval.set_available(idx, true);
aval.set_available(idx, false);
assert!(!aval.available());
aval.set_available(idx, false);
assert!(!aval.available());
}
fn multi(aval: &mut Availability, mut idx: Vec<usize>) {
idx.iter().for_each(|idx| aval.set_available(*idx, true));
assert!(aval.available());
while let Some(idx) = idx.pop() {
assert!(aval.available());
aval.set_available(idx, false);
}
assert!(!aval.available());
}
#[test]
fn availability() {
let mut aval = Availability::default();
single(&mut aval, 1);
single(&mut aval, 128);
single(&mut aval, 256);
single(&mut aval, 511);
let idx = (0..511).filter(|i| i % 3 == 0 && i % 5 == 0).collect();
multi(&mut aval, idx);
multi(&mut aval, (0..511).collect())
}
#[test]
#[should_panic]
fn overflow() {
let mut aval = Availability::default();
single(&mut aval, 512);
}
#[test]
fn pin_point() {
let mut aval = Availability::default();
aval.set_available(438, true);
aval.set_available(479, true);
assert_eq!(aval.0[3], 1 << (438 - 384) | 1 << (479 - 384));
}
} }

View File

@@ -1,44 +1,43 @@
use std::pin::Pin; use std::{
use std::task::{Context, Poll}; future::Future,
use std::time::Duration; io, mem,
use std::{io, mem, net}; pin::Pin,
task::{Context, Poll},
time::Duration,
};
use actix_rt::net::TcpStream; use actix_rt::{self as rt, net::TcpStream, time::sleep, System};
use actix_rt::time::{delay_until, Instant};
use actix_rt::{spawn, System};
use futures::channel::mpsc::{unbounded, UnboundedReceiver};
use futures::channel::oneshot;
use futures::future::ready;
use futures::stream::FuturesUnordered;
use futures::{ready, Future, FutureExt, Stream, StreamExt};
use log::{error, info}; use log::{error, info};
use net2::TcpBuilder; use tokio::sync::{
use num_cpus; mpsc::{unbounded_channel, UnboundedReceiver},
oneshot,
};
use crate::accept::{AcceptLoop, AcceptNotify, Command}; use crate::accept::AcceptLoop;
use crate::config::{ConfiguredService, ServiceConfig}; use crate::join_all;
use crate::server::{Server, ServerCommand}; use crate::server::{Server, ServerCommand};
use crate::service::{InternalServiceFactory, ServiceFactory, StreamNewService}; use crate::service::{InternalServiceFactory, ServiceFactory, StreamNewService};
use crate::signals::{Signal, Signals}; use crate::signals::{Signal, Signals};
use crate::socket::StdListener; use crate::socket::{MioListener, StdSocketAddr, StdTcpListener, ToSocketAddrs};
use crate::worker::{self, Worker, WorkerAvailability, WorkerClient}; use crate::socket::{MioTcpListener, MioTcpSocket};
use crate::Token; use crate::waker_queue::{WakerInterest, WakerQueue};
use crate::worker::{ServerWorker, ServerWorkerConfig, WorkerHandleAccept, WorkerHandleServer};
/// Server builder /// Server builder
pub struct ServerBuilder { pub struct ServerBuilder {
threads: usize, threads: usize,
token: Token, token: usize,
backlog: i32, backlog: u32,
workers: Vec<(usize, WorkerClient)>, handles: Vec<(usize, WorkerHandleServer)>,
services: Vec<Box<dyn InternalServiceFactory>>, services: Vec<Box<dyn InternalServiceFactory>>,
sockets: Vec<(Token, StdListener)>, sockets: Vec<(usize, String, MioListener)>,
accept: AcceptLoop, accept: AcceptLoop,
exit: bool, exit: bool,
shutdown_timeout: Duration,
no_signals: bool, no_signals: bool,
cmd: UnboundedReceiver<ServerCommand>, cmd: UnboundedReceiver<ServerCommand>,
server: Server, server: Server,
notify: Vec<oneshot::Sender<()>>, notify: Vec<oneshot::Sender<()>>,
worker_config: ServerWorkerConfig,
} }
impl Default for ServerBuilder { impl Default for ServerBuilder {
@@ -50,35 +49,54 @@ impl Default for ServerBuilder {
impl ServerBuilder { impl ServerBuilder {
/// Create new Server builder instance /// Create new Server builder instance
pub fn new() -> ServerBuilder { pub fn new() -> ServerBuilder {
let (tx, rx) = unbounded(); let (tx, rx) = unbounded_channel();
let server = Server::new(tx); let server = Server::new(tx);
ServerBuilder { ServerBuilder {
threads: num_cpus::get(), threads: num_cpus::get(),
token: Token(0), token: 0,
workers: Vec::new(), handles: Vec::new(),
services: Vec::new(), services: Vec::new(),
sockets: Vec::new(), sockets: Vec::new(),
accept: AcceptLoop::new(server.clone()), accept: AcceptLoop::new(server.clone()),
backlog: 2048, backlog: 2048,
exit: false, exit: false,
shutdown_timeout: Duration::from_secs(30),
no_signals: false, no_signals: false,
cmd: rx, cmd: rx,
notify: Vec::new(), notify: Vec::new(),
server, server,
worker_config: ServerWorkerConfig::default(),
} }
} }
/// Set number of workers to start. /// Set number of workers to start.
/// ///
/// By default server uses number of available logical cpu as workers /// By default server uses number of available logical cpu as workers
/// count. /// count. Workers must be greater than 0.
pub fn workers(mut self, num: usize) -> Self { pub fn workers(mut self, num: usize) -> Self {
assert_ne!(num, 0, "workers must be greater than 0");
self.threads = num; self.threads = num;
self self
} }
/// Set max number of threads for each worker's blocking task thread pool.
///
/// One thread pool is set up **per worker**; not shared across workers.
///
/// # Examples:
/// ```
/// # use actix_server::ServerBuilder;
/// let builder = ServerBuilder::new()
/// .workers(4) // server has 4 worker thread.
/// .worker_max_blocking_threads(4); // every worker has 4 max blocking threads.
/// ```
///
/// See [tokio::runtime::Builder::max_blocking_threads] for behavior reference.
pub fn worker_max_blocking_threads(mut self, num: usize) -> Self {
self.worker_config.max_blocking_threads(num);
self
}
/// Set the maximum number of pending connections. /// Set the maximum number of pending connections.
/// ///
/// This refers to the number of clients that can be waiting to be served. /// This refers to the number of clients that can be waiting to be served.
@@ -89,7 +107,7 @@ impl ServerBuilder {
/// Generally set in the 64-2048 range. Default value is 2048. /// Generally set in the 64-2048 range. Default value is 2048.
/// ///
/// This method should be called before `bind()` method call. /// This method should be called before `bind()` method call.
pub fn backlog(mut self, num: i32) -> Self { pub fn backlog(mut self, num: u32) -> Self {
self.backlog = num; self.backlog = num;
self self
} }
@@ -100,18 +118,18 @@ impl ServerBuilder {
/// reached for each worker. /// reached for each worker.
/// ///
/// By default max connections is set to a 25k per worker. /// By default max connections is set to a 25k per worker.
pub fn maxconn(self, num: usize) -> Self { pub fn maxconn(mut self, num: usize) -> Self {
worker::max_concurrent_connections(num); self.worker_config.max_concurrent_connections(num);
self self
} }
/// Stop actix system. /// Stop Actix system.
pub fn system_exit(mut self) -> Self { pub fn system_exit(mut self) -> Self {
self.exit = true; self.exit = true;
self self
} }
/// Disable signal handling /// Disable signal handling.
pub fn disable_signals(mut self) -> Self { pub fn disable_signals(mut self) -> Self {
self.no_signals = true; self.no_signals = true;
self self
@@ -119,74 +137,46 @@ impl ServerBuilder {
/// Timeout for graceful workers shutdown in seconds. /// Timeout for graceful workers shutdown in seconds.
/// ///
/// After receiving a stop signal, workers have this much time to finish /// After receiving a stop signal, workers have this much time to finish serving requests.
/// serving requests. Workers still alive after the timeout are force /// Workers still alive after the timeout are force dropped.
/// dropped.
/// ///
/// By default shutdown timeout sets to 30 seconds. /// By default shutdown timeout sets to 30 seconds.
pub fn shutdown_timeout(mut self, sec: u64) -> Self { pub fn shutdown_timeout(mut self, sec: u64) -> Self {
self.shutdown_timeout = Duration::from_secs(sec); self.worker_config
.shutdown_timeout(Duration::from_secs(sec));
self self
} }
/// Execute external configuration as part of the server building
/// process.
///
/// This function is useful for moving parts of configuration to a
/// different module or even library.
pub fn configure<F>(mut self, f: F) -> io::Result<ServerBuilder>
where
F: Fn(&mut ServiceConfig) -> io::Result<()>,
{
let mut cfg = ServiceConfig::new(self.threads, self.backlog);
f(&mut cfg)?;
if let Some(apply) = cfg.apply {
let mut srv = ConfiguredService::new(apply);
for (name, lst) in cfg.services {
let token = self.token.next();
srv.stream(token, name, lst.local_addr()?);
self.sockets.push((token, StdListener::Tcp(lst)));
}
self.services.push(Box::new(srv));
}
self.threads = cfg.threads;
Ok(self)
}
/// Add new service to the server. /// Add new service to the server.
pub fn bind<F, U, N: AsRef<str>>(mut self, name: N, addr: U, factory: F) -> io::Result<Self> pub fn bind<F, U, N: AsRef<str>>(mut self, name: N, addr: U, factory: F) -> io::Result<Self>
where where
F: ServiceFactory<TcpStream>, F: ServiceFactory<TcpStream>,
U: net::ToSocketAddrs, U: ToSocketAddrs,
{ {
let sockets = bind_addr(addr, self.backlog)?; let sockets = bind_addr(addr, self.backlog)?;
for lst in sockets { for lst in sockets {
let token = self.token.next(); let token = self.next_token();
self.services.push(StreamNewService::create( self.services.push(StreamNewService::create(
name.as_ref().to_string(), name.as_ref().to_string(),
token, token,
factory.clone(), factory.clone(),
lst.local_addr()?, lst.local_addr()?,
)); ));
self.sockets.push((token, StdListener::Tcp(lst))); self.sockets
.push((token, name.as_ref().to_string(), MioListener::Tcp(lst)));
} }
Ok(self) Ok(self)
} }
#[cfg(all(unix))]
/// Add new unix domain service to the server. /// Add new unix domain service to the server.
#[cfg(unix)]
pub fn bind_uds<F, U, N>(self, name: N, addr: U, factory: F) -> io::Result<Self> pub fn bind_uds<F, U, N>(self, name: N, addr: U, factory: F) -> io::Result<Self>
where where
F: ServiceFactory<actix_rt::net::UnixStream>, F: ServiceFactory<actix_rt::net::UnixStream>,
N: AsRef<str>, N: AsRef<str>,
U: AsRef<std::path::Path>, U: AsRef<std::path::Path>,
{ {
use std::os::unix::net::UnixListener;
// The path must not exist when we try to bind. // The path must not exist when we try to bind.
// Try to remove it to avoid bind error. // Try to remove it to avoid bind error.
if let Err(e) = std::fs::remove_file(addr.as_ref()) { if let Err(e) = std::fs::remove_file(addr.as_ref()) {
@@ -196,33 +186,35 @@ impl ServerBuilder {
} }
} }
let lst = UnixListener::bind(addr)?; let lst = crate::socket::StdUnixListener::bind(addr)?;
self.listen_uds(name, lst, factory) self.listen_uds(name, lst, factory)
} }
#[cfg(all(unix))]
/// Add new unix domain service to the server. /// Add new unix domain service to the server.
/// Useful when running as a systemd service and /// Useful when running as a systemd service and
/// a socket FD can be acquired using the systemd crate. /// a socket FD can be acquired using the systemd crate.
#[cfg(unix)]
pub fn listen_uds<F, N: AsRef<str>>( pub fn listen_uds<F, N: AsRef<str>>(
mut self, mut self,
name: N, name: N,
lst: std::os::unix::net::UnixListener, lst: crate::socket::StdUnixListener,
factory: F, factory: F,
) -> io::Result<Self> ) -> io::Result<Self>
where where
F: ServiceFactory<actix_rt::net::UnixStream>, F: ServiceFactory<actix_rt::net::UnixStream>,
{ {
use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::net::{IpAddr, Ipv4Addr};
let token = self.token.next(); lst.set_nonblocking(true)?;
let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); let token = self.next_token();
let addr = StdSocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
self.services.push(StreamNewService::create( self.services.push(StreamNewService::create(
name.as_ref().to_string(), name.as_ref().to_string(),
token, token,
factory.clone(), factory,
addr, addr,
)); ));
self.sockets.push((token, StdListener::Uds(lst))); self.sockets
.push((token, name.as_ref().to_string(), MioListener::from(lst)));
Ok(self) Ok(self)
} }
@@ -230,97 +222,89 @@ impl ServerBuilder {
pub fn listen<F, N: AsRef<str>>( pub fn listen<F, N: AsRef<str>>(
mut self, mut self,
name: N, name: N,
lst: net::TcpListener, lst: StdTcpListener,
factory: F, factory: F,
) -> io::Result<Self> ) -> io::Result<Self>
where where
F: ServiceFactory<TcpStream>, F: ServiceFactory<TcpStream>,
{ {
let token = self.token.next(); lst.set_nonblocking(true)?;
let addr = lst.local_addr()?;
let token = self.next_token();
self.services.push(StreamNewService::create( self.services.push(StreamNewService::create(
name.as_ref().to_string(), name.as_ref().to_string(),
token, token,
factory, factory,
lst.local_addr()?, addr,
)); ));
self.sockets.push((token, StdListener::Tcp(lst)));
self.sockets
.push((token, name.as_ref().to_string(), MioListener::from(lst)));
Ok(self) Ok(self)
} }
/// Spawn new thread and start listening for incoming connections.
///
/// This method spawns new thread and starts new actix system. Other than
/// that it is similar to `start()` method. This method blocks.
///
/// This methods panics if no socket addresses get bound.
///
/// ```rust,ignore
/// use actix_web::*;
///
/// fn main() -> std::io::Result<()> {
/// Server::new().
/// .service(
/// HttpServer::new(|| App::new().service(web::service("/").to(|| HttpResponse::Ok())))
/// .bind("127.0.0.1:0")
/// .run()
/// }
/// ```
pub fn run(self) -> io::Result<()> {
let sys = System::new("http-server");
self.start();
sys.run()
}
/// Starts processing incoming connections and return server controller. /// Starts processing incoming connections and return server controller.
pub fn start(mut self) -> Server { pub fn run(mut self) -> Server {
if self.sockets.is_empty() { if self.sockets.is_empty() {
panic!("Server should have at least one bound socket"); panic!("Server should have at least one bound socket");
} else { } else {
info!("Starting {} workers", self.threads); info!("Starting {} workers", self.threads);
// start workers // start workers
let mut workers = Vec::new(); let handles = (0..self.threads)
for idx in 0..self.threads { .map(|idx| {
let worker = self.start_worker(idx, self.accept.get_notify()); let (handle_accept, handle_server) =
workers.push(worker.clone()); self.start_worker(idx, self.accept.waker_owned());
self.workers.push((idx, worker)); self.handles.push((idx, handle_server));
}
handle_accept
})
.collect();
// start accept thread // start accept thread
for sock in &self.sockets { for sock in &self.sockets {
info!("Starting server on {}", sock.1); info!("Starting \"{}\" service on {}", sock.1, sock.2);
} }
self.accept self.accept.start(
.start(mem::replace(&mut self.sockets, Vec::new()), workers); mem::take(&mut self.sockets)
.into_iter()
.map(|t| (t.0, t.2))
.collect(),
handles,
);
// handle signals // handle signals
if !self.no_signals { if !self.no_signals {
Signals::start(self.server.clone()).unwrap(); Signals::start(self.server.clone());
} }
// start http server actor // start http server actor
let server = self.server.clone(); let server = self.server.clone();
spawn(self); rt::spawn(self);
server server
} }
} }
fn start_worker(&self, idx: usize, notify: AcceptNotify) -> WorkerClient { fn start_worker(
let avail = WorkerAvailability::new(notify); &self,
let services: Vec<Box<dyn InternalServiceFactory>> = idx: usize,
self.services.iter().map(|v| v.clone_factory()).collect(); waker_queue: WakerQueue,
) -> (WorkerHandleAccept, WorkerHandleServer) {
let services = self.services.iter().map(|v| v.clone_factory()).collect();
Worker::start(idx, services, avail, self.shutdown_timeout) ServerWorker::start(idx, services, waker_queue, self.worker_config)
} }
fn handle_cmd(&mut self, item: ServerCommand) { fn handle_cmd(&mut self, item: ServerCommand) {
match item { match item {
ServerCommand::Pause(tx) => { ServerCommand::Pause(tx) => {
self.accept.send(Command::Pause); self.accept.wake(WakerInterest::Pause);
let _ = tx.send(()); let _ = tx.send(());
} }
ServerCommand::Resume(tx) => { ServerCommand::Resume(tx) => {
self.accept.send(Command::Resume); self.accept.wake(WakerInterest::Resume);
let _ = tx.send(()); let _ = tx.send(());
} }
ServerCommand::Signal(sig) => { ServerCommand::Signal(sig) => {
@@ -364,64 +348,39 @@ impl ServerBuilder {
let exit = self.exit; let exit = self.exit;
// stop accept thread // stop accept thread
self.accept.send(Command::Stop); self.accept.wake(WakerInterest::Stop);
let notify = std::mem::replace(&mut self.notify, Vec::new()); let notify = std::mem::take(&mut self.notify);
// stop workers // stop workers
if !self.workers.is_empty() && graceful { let stop = self
spawn( .handles
self.workers .iter()
.iter() .map(move |worker| worker.1.stop(graceful))
.map(move |worker| worker.1.stop(graceful)) .collect();
.collect::<FuturesUnordered<_>>()
.collect::<Vec<_>>() rt::spawn(async move {
.then(move |_| { if graceful {
if let Some(tx) = completion { let _ = join_all(stop).await;
let _ = tx.send(());
}
for tx in notify {
let _ = tx.send(());
}
if exit {
spawn(
async {
delay_until(
Instant::now() + Duration::from_millis(300),
)
.await;
System::current().stop();
}
.boxed(),
);
}
ready(())
}),
)
} else {
// we need to stop system if server was spawned
if self.exit {
spawn(
delay_until(Instant::now() + Duration::from_millis(300)).then(
|_| {
System::current().stop();
ready(())
},
),
);
} }
if let Some(tx) = completion { if let Some(tx) = completion {
let _ = tx.send(()); let _ = tx.send(());
} }
for tx in notify { for tx in notify {
let _ = tx.send(()); let _ = tx.send(());
} }
}
if exit {
sleep(Duration::from_millis(300)).await;
System::current().stop();
}
});
} }
ServerCommand::WorkerFaulted(idx) => { ServerCommand::WorkerFaulted(idx) => {
let mut found = false; let mut found = false;
for i in 0..self.workers.len() { for i in 0..self.handles.len() {
if self.workers[i].0 == idx { if self.handles[i].0 == idx {
self.workers.swap_remove(i); self.handles.swap_remove(i);
found = true; found = true;
break; break;
} }
@@ -430,10 +389,10 @@ impl ServerBuilder {
if found { if found {
error!("Worker has died {:?}, restarting", idx); error!("Worker has died {:?}, restarting", idx);
let mut new_idx = self.workers.len(); let mut new_idx = self.handles.len();
'found: loop { 'found: loop {
for i in 0..self.workers.len() { for i in 0..self.handles.len() {
if self.workers[i].0 == new_idx { if self.handles[i].0 == new_idx {
new_idx += 1; new_idx += 1;
continue 'found; continue 'found;
} }
@@ -441,13 +400,20 @@ impl ServerBuilder {
break; break;
} }
let worker = self.start_worker(new_idx, self.accept.get_notify()); let (handle_accept, handle_server) =
self.workers.push((new_idx, worker.clone())); self.start_worker(new_idx, self.accept.waker_owned());
self.accept.send(Command::Worker(worker)); self.handles.push((new_idx, handle_server));
self.accept.wake(WakerInterest::Worker(handle_accept));
} }
} }
} }
} }
fn next_token(&mut self) -> usize {
let token = self.token;
self.token += 1;
token
}
} }
impl Future for ServerBuilder { impl Future for ServerBuilder {
@@ -455,20 +421,18 @@ impl Future for ServerBuilder {
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
loop { loop {
match ready!(Pin::new(&mut self.cmd).poll_next(cx)) { match Pin::new(&mut self.cmd).poll_recv(cx) {
Some(it) => self.as_mut().get_mut().handle_cmd(it), Poll::Ready(Some(it)) => self.as_mut().get_mut().handle_cmd(it),
None => { _ => return Poll::Pending,
return Poll::Pending;
}
} }
} }
} }
} }
pub(super) fn bind_addr<S: net::ToSocketAddrs>( pub(super) fn bind_addr<S: ToSocketAddrs>(
addr: S, addr: S,
backlog: i32, backlog: u32,
) -> io::Result<Vec<net::TcpListener>> { ) -> io::Result<Vec<MioTcpListener>> {
let mut err = None; let mut err = None;
let mut succ = false; let mut succ = false;
let mut sockets = Vec::new(); let mut sockets = Vec::new();
@@ -496,12 +460,13 @@ pub(super) fn bind_addr<S: net::ToSocketAddrs>(
} }
} }
fn create_tcp_listener(addr: net::SocketAddr, backlog: i32) -> io::Result<net::TcpListener> { fn create_tcp_listener(addr: StdSocketAddr, backlog: u32) -> io::Result<MioTcpListener> {
let builder = match addr { let socket = match addr {
net::SocketAddr::V4(_) => TcpBuilder::new_v4()?, StdSocketAddr::V4(_) => MioTcpSocket::new_v4()?,
net::SocketAddr::V6(_) => TcpBuilder::new_v6()?, StdSocketAddr::V6(_) => MioTcpSocket::new_v6()?,
}; };
builder.reuse_address(true)?;
builder.bind(addr)?; socket.set_reuseaddr(true)?;
Ok(builder.listen(backlog)?) socket.bind(addr)?;
socket.listen(backlog)
} }

View File

@@ -1,285 +0,0 @@
use std::collections::HashMap;
use std::{fmt, io, net};
use actix_rt::net::TcpStream;
use actix_service as actix;
use actix_utils::counter::CounterGuard;
use futures::future::{ok, Future, FutureExt, LocalBoxFuture};
use log::error;
use super::builder::bind_addr;
use super::service::{
BoxedServerService, InternalServiceFactory, ServerMessage, StreamService,
};
use super::Token;
pub struct ServiceConfig {
pub(crate) services: Vec<(String, net::TcpListener)>,
pub(crate) apply: Option<Box<dyn ServiceRuntimeConfiguration>>,
pub(crate) threads: usize,
pub(crate) backlog: i32,
}
impl ServiceConfig {
pub(super) fn new(threads: usize, backlog: i32) -> ServiceConfig {
ServiceConfig {
threads,
backlog,
services: Vec::new(),
apply: None,
}
}
/// Set number of workers to start.
///
/// By default server uses number of available logical cpu as workers
/// count.
pub fn workers(&mut self, num: usize) {
self.threads = num;
}
/// Add new service to server
pub fn bind<U, N: AsRef<str>>(&mut self, name: N, addr: U) -> io::Result<&mut Self>
where
U: net::ToSocketAddrs,
{
let sockets = bind_addr(addr, self.backlog)?;
for lst in sockets {
self.listen(name.as_ref(), lst);
}
Ok(self)
}
/// Add new service to server
pub fn listen<N: AsRef<str>>(&mut self, name: N, lst: net::TcpListener) -> &mut Self {
if self.apply.is_none() {
self.apply = Some(Box::new(not_configured));
}
self.services.push((name.as_ref().to_string(), lst));
self
}
/// Register service configuration function. This function get called
/// during worker runtime configuration. It get executed in worker thread.
pub fn apply<F>(&mut self, f: F) -> io::Result<()>
where
F: Fn(&mut ServiceRuntime) + Send + Clone + 'static,
{
self.apply = Some(Box::new(f));
Ok(())
}
}
pub(super) struct ConfiguredService {
rt: Box<dyn ServiceRuntimeConfiguration>,
names: HashMap<Token, (String, net::SocketAddr)>,
topics: HashMap<String, Token>,
services: Vec<Token>,
}
impl ConfiguredService {
pub(super) fn new(rt: Box<dyn ServiceRuntimeConfiguration>) -> Self {
ConfiguredService {
rt,
names: HashMap::new(),
topics: HashMap::new(),
services: Vec::new(),
}
}
pub(super) fn stream(&mut self, token: Token, name: String, addr: net::SocketAddr) {
self.names.insert(token, (name.clone(), addr));
self.topics.insert(name.clone(), token);
self.services.push(token);
}
}
impl InternalServiceFactory for ConfiguredService {
fn name(&self, token: Token) -> &str {
&self.names[&token].0
}
fn clone_factory(&self) -> Box<dyn InternalServiceFactory> {
Box::new(Self {
rt: self.rt.clone(),
names: self.names.clone(),
topics: self.topics.clone(),
services: self.services.clone(),
})
}
fn create(&self) -> LocalBoxFuture<'static, Result<Vec<(Token, BoxedServerService)>, ()>> {
// configure services
let mut rt = ServiceRuntime::new(self.topics.clone());
self.rt.configure(&mut rt);
rt.validate();
let mut names = self.names.clone();
let tokens = self.services.clone();
// construct services
async move {
let mut services = rt.services;
// TODO: Proper error handling here
for f in rt.onstart.into_iter() {
f.await;
}
let mut res = vec![];
for token in tokens {
if let Some(srv) = services.remove(&token) {
let newserv = srv.new_service(());
match newserv.await {
Ok(serv) => {
res.push((token, serv));
}
Err(_) => {
error!("Can not construct service");
return Err(());
}
}
} else {
let name = names.remove(&token).unwrap().0;
res.push((
token,
Box::new(StreamService::new(actix::fn_service(
move |_: TcpStream| {
error!("Service {:?} is not configured", name);
ok::<_, ()>(())
},
))),
));
};
}
return Ok(res);
}
.boxed_local()
}
}
pub(super) trait ServiceRuntimeConfiguration: Send {
fn clone(&self) -> Box<dyn ServiceRuntimeConfiguration>;
fn configure(&self, rt: &mut ServiceRuntime);
}
impl<F> ServiceRuntimeConfiguration for F
where
F: Fn(&mut ServiceRuntime) + Send + Clone + 'static,
{
fn clone(&self) -> Box<dyn ServiceRuntimeConfiguration> {
Box::new(self.clone())
}
fn configure(&self, rt: &mut ServiceRuntime) {
(self)(rt)
}
}
fn not_configured(_: &mut ServiceRuntime) {
error!("Service is not configured");
}
pub struct ServiceRuntime {
names: HashMap<String, Token>,
services: HashMap<Token, BoxedNewService>,
onstart: Vec<LocalBoxFuture<'static, ()>>,
}
impl ServiceRuntime {
fn new(names: HashMap<String, Token>) -> Self {
ServiceRuntime {
names,
services: HashMap::new(),
onstart: Vec::new(),
}
}
fn validate(&self) {
for (name, token) in &self.names {
if !self.services.contains_key(&token) {
error!("Service {:?} is not configured", name);
}
}
}
/// Register service.
///
/// Name of the service must be registered during configuration stage with
/// *ServiceConfig::bind()* or *ServiceConfig::listen()* methods.
pub fn service<T, F>(&mut self, name: &str, service: F)
where
F: actix::IntoServiceFactory<T>,
T: actix::ServiceFactory<Config = (), Request = TcpStream> + 'static,
T::Future: 'static,
T::Service: 'static,
T::InitError: fmt::Debug,
{
// let name = name.to_owned();
if let Some(token) = self.names.get(name) {
self.services.insert(
token.clone(),
Box::new(ServiceFactory {
inner: service.into_factory(),
}),
);
} else {
panic!("Unknown service: {:?}", name);
}
}
/// Execute future before services initialization.
pub fn on_start<F>(&mut self, fut: F)
where
F: Future<Output = ()> + 'static,
{
self.onstart.push(fut.boxed_local())
}
}
type BoxedNewService = Box<
dyn actix::ServiceFactory<
Request = (Option<CounterGuard>, ServerMessage),
Response = (),
Error = (),
InitError = (),
Config = (),
Service = BoxedServerService,
Future = LocalBoxFuture<'static, Result<BoxedServerService, ()>>,
>,
>;
struct ServiceFactory<T> {
inner: T,
}
impl<T> actix::ServiceFactory for ServiceFactory<T>
where
T: actix::ServiceFactory<Config = (), Request = TcpStream>,
T::Future: 'static,
T::Service: 'static,
T::Error: 'static,
T::InitError: fmt::Debug + 'static,
{
type Request = (Option<CounterGuard>, ServerMessage);
type Response = ();
type Error = ();
type InitError = ();
type Config = ();
type Service = BoxedServerService;
type Future = LocalBoxFuture<'static, Result<BoxedServerService, ()>>;
fn new_service(&self, _: ()) -> Self::Future {
let fut = self.inner.new_service(());
async move {
return match fut.await {
Ok(s) => Ok(Box::new(StreamService::new(s)) as BoxedServerService),
Err(e) => {
error!("Can not construct service: {:?}", e);
Err(())
}
};
}
.boxed_local()
}
}

View File

@@ -1,37 +1,103 @@
//! General purpose tcp server //! General purpose TCP server.
#![deny(rust_2018_idioms, warnings)]
#![allow(clippy::type_complexity)] #![deny(rust_2018_idioms, nonstandard_style)]
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
mod accept; mod accept;
mod builder; mod builder;
mod config;
mod server; mod server;
mod service; mod service;
mod signals; mod signals;
mod socket; mod socket;
mod test_server;
mod waker_queue;
mod worker; mod worker;
pub use self::builder::ServerBuilder; pub use self::builder::ServerBuilder;
pub use self::config::{ServiceConfig, ServiceRuntime};
pub use self::server::Server; pub use self::server::Server;
pub use self::service::ServiceFactory; pub use self::service::ServiceFactory;
pub use self::test_server::TestServer;
#[doc(hidden)] #[doc(hidden)]
pub use self::socket::FromStream; pub use self::socket::FromStream;
/// Socket id token use std::future::Future;
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] use std::pin::Pin;
pub(crate) struct Token(usize); use std::task::{Context, Poll};
impl Token {
pub(crate) fn next(&mut self) -> Token {
let token = Token(self.0);
self.0 += 1;
token
}
}
/// Start server building process /// Start server building process
pub fn new() -> ServerBuilder { pub fn new() -> ServerBuilder {
ServerBuilder::default() ServerBuilder::default()
} }
// a poor man's join future. joined future is only used when starting/stopping the server.
// pin_project and pinned futures are overkill for this task.
pub(crate) struct JoinAll<T> {
fut: Vec<JoinFuture<T>>,
}
pub(crate) fn join_all<T>(fut: Vec<impl Future<Output = T> + 'static>) -> JoinAll<T> {
let fut = fut
.into_iter()
.map(|f| JoinFuture::Future(Box::pin(f)))
.collect();
JoinAll { fut }
}
enum JoinFuture<T> {
Future(Pin<Box<dyn Future<Output = T>>>),
Result(Option<T>),
}
impl<T> Unpin for JoinAll<T> {}
impl<T> Future for JoinAll<T> {
type Output = Vec<T>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut ready = true;
let this = self.get_mut();
for fut in this.fut.iter_mut() {
if let JoinFuture::Future(f) = fut {
match f.as_mut().poll(cx) {
Poll::Ready(t) => {
*fut = JoinFuture::Result(Some(t));
}
Poll::Pending => ready = false,
}
}
}
if ready {
let mut res = Vec::new();
for fut in this.fut.iter_mut() {
if let JoinFuture::Result(f) = fut {
res.push(f.take().unwrap());
}
}
Poll::Ready(res)
} else {
Poll::Pending
}
}
}
#[cfg(test)]
mod test {
use super::*;
use actix_utils::future::ready;
#[actix_rt::test]
async fn test_join_all() {
let futs = vec![ready(Ok(1)), ready(Err(3)), ready(Ok(9))];
let mut res = join_all(futs).await.into_iter();
assert_eq!(Ok(1), res.next().unwrap());
assert_eq!(Err(3), res.next().unwrap());
assert_eq!(Ok(9), res.next().unwrap());
}
}

View File

@@ -3,9 +3,8 @@ use std::io;
use std::pin::Pin; use std::pin::Pin;
use std::task::{Context, Poll}; use std::task::{Context, Poll};
use futures::channel::mpsc::UnboundedSender; use tokio::sync::mpsc::UnboundedSender;
use futures::channel::oneshot; use tokio::sync::oneshot;
use futures::FutureExt;
use crate::builder::ServerBuilder; use crate::builder::ServerBuilder;
use crate::signals::Signal; use crate::signals::Signal;
@@ -42,11 +41,11 @@ impl Server {
} }
pub(crate) fn signal(&self, sig: Signal) { pub(crate) fn signal(&self, sig: Signal) {
let _ = self.0.unbounded_send(ServerCommand::Signal(sig)); let _ = self.0.send(ServerCommand::Signal(sig));
} }
pub(crate) fn worker_faulted(&self, idx: usize) { pub(crate) fn worker_faulted(&self, idx: usize) {
let _ = self.0.unbounded_send(ServerCommand::WorkerFaulted(idx)); let _ = self.0.send(ServerCommand::WorkerFaulted(idx));
} }
/// Pause accepting incoming connections /// Pause accepting incoming connections
@@ -55,15 +54,19 @@ impl Server {
/// All opened connection remains active. /// All opened connection remains active.
pub fn pause(&self) -> impl Future<Output = ()> { pub fn pause(&self) -> impl Future<Output = ()> {
let (tx, rx) = oneshot::channel(); let (tx, rx) = oneshot::channel();
let _ = self.0.unbounded_send(ServerCommand::Pause(tx)); let _ = self.0.send(ServerCommand::Pause(tx));
rx.map(|_| ()) async {
let _ = rx.await;
}
} }
/// Resume accepting incoming connections /// Resume accepting incoming connections
pub fn resume(&self) -> impl Future<Output = ()> { pub fn resume(&self) -> impl Future<Output = ()> {
let (tx, rx) = oneshot::channel(); let (tx, rx) = oneshot::channel();
let _ = self.0.unbounded_send(ServerCommand::Resume(tx)); let _ = self.0.send(ServerCommand::Resume(tx));
rx.map(|_| ()) async {
let _ = rx.await;
}
} }
/// Stop incoming connection processing, stop all workers and exit. /// Stop incoming connection processing, stop all workers and exit.
@@ -71,11 +74,13 @@ impl Server {
/// If server starts with `spawn()` method, then spawned thread get terminated. /// If server starts with `spawn()` method, then spawned thread get terminated.
pub fn stop(&self, graceful: bool) -> impl Future<Output = ()> { pub fn stop(&self, graceful: bool) -> impl Future<Output = ()> {
let (tx, rx) = oneshot::channel(); let (tx, rx) = oneshot::channel();
let _ = self.0.unbounded_send(ServerCommand::Stop { let _ = self.0.send(ServerCommand::Stop {
graceful, graceful,
completion: Some(tx), completion: Some(tx),
}); });
rx.map(|_| ()) async {
let _ = rx.await;
}
} }
} }
@@ -93,7 +98,7 @@ impl Future for Server {
if this.1.is_none() { if this.1.is_none() {
let (tx, rx) = oneshot::channel(); let (tx, rx) = oneshot::channel();
if this.0.unbounded_send(ServerCommand::Notify(tx)).is_err() { if this.0.send(ServerCommand::Notify(tx)).is_err() {
return Poll::Ready(Ok(())); return Poll::Ready(Ok(()));
} }
this.1 = Some(rx); this.1 = Some(rx);
@@ -101,8 +106,7 @@ impl Future for Server {
match Pin::new(this.1.as_mut().unwrap()).poll(cx) { match Pin::new(this.1.as_mut().unwrap()).poll(cx) {
Poll::Pending => Poll::Pending, Poll::Pending => Poll::Pending,
Poll::Ready(Ok(_)) => Poll::Ready(Ok(())), Poll::Ready(_) => Poll::Ready(Ok(())),
Poll::Ready(Err(_)) => Poll::Ready(Ok(())),
} }
} }
} }

View File

@@ -1,104 +1,89 @@
use std::marker::PhantomData; use std::marker::PhantomData;
use std::net::SocketAddr; use std::net::SocketAddr;
use std::task::{Context, Poll}; use std::task::{Context, Poll};
use std::time::Duration;
use actix_rt::spawn; use actix_service::{Service, ServiceFactory as BaseServiceFactory};
use actix_service::{self as actix, Service, ServiceFactory as ActixServiceFactory}; use actix_utils::future::{ready, Ready};
use actix_utils::counter::CounterGuard; use futures_core::future::LocalBoxFuture;
use futures::future::{err, ok, LocalBoxFuture, Ready};
use futures::{FutureExt, TryFutureExt};
use log::error; use log::error;
use super::Token; use crate::socket::{FromStream, MioStream};
use crate::socket::{FromStream, StdStream}; use crate::worker::WorkerCounterGuard;
/// Server message
pub(crate) enum ServerMessage {
/// New stream
Connect(StdStream),
/// Gracefull shutdown
Shutdown(Duration),
/// Force shutdown
ForceShutdown,
}
pub trait ServiceFactory<Stream: FromStream>: Send + Clone + 'static { pub trait ServiceFactory<Stream: FromStream>: Send + Clone + 'static {
type Factory: actix::ServiceFactory<Config = (), Request = Stream>; type Factory: BaseServiceFactory<Stream, Config = ()>;
fn create(&self) -> Self::Factory; fn create(&self) -> Self::Factory;
} }
pub(crate) trait InternalServiceFactory: Send { pub(crate) trait InternalServiceFactory: Send {
fn name(&self, token: Token) -> &str; fn name(&self, token: usize) -> &str;
fn clone_factory(&self) -> Box<dyn InternalServiceFactory>; fn clone_factory(&self) -> Box<dyn InternalServiceFactory>;
fn create(&self) -> LocalBoxFuture<'static, Result<Vec<(Token, BoxedServerService)>, ()>>; fn create(&self) -> LocalBoxFuture<'static, Result<(usize, BoxedServerService), ()>>;
} }
pub(crate) type BoxedServerService = Box< pub(crate) type BoxedServerService = Box<
dyn Service< dyn Service<
Request = (Option<CounterGuard>, ServerMessage), (WorkerCounterGuard, MioStream),
Response = (), Response = (),
Error = (), Error = (),
Future = Ready<Result<(), ()>>, Future = Ready<Result<(), ()>>,
>, >,
>; >;
pub(crate) struct StreamService<T> { pub(crate) struct StreamService<S, I> {
service: T, service: S,
_phantom: PhantomData<I>,
} }
impl<T> StreamService<T> { impl<S, I> StreamService<S, I> {
pub(crate) fn new(service: T) -> Self { pub(crate) fn new(service: S) -> Self {
StreamService { service } StreamService {
service,
_phantom: PhantomData,
}
} }
} }
impl<T, I> Service for StreamService<T> impl<S, I> Service<(WorkerCounterGuard, MioStream)> for StreamService<S, I>
where where
T: Service<Request = I>, S: Service<I>,
T::Future: 'static, S::Future: 'static,
T::Error: 'static, S::Error: 'static,
I: FromStream, I: FromStream,
{ {
type Request = (Option<CounterGuard>, ServerMessage);
type Response = (); type Response = ();
type Error = (); type Error = ();
type Future = Ready<Result<(), ()>>; type Future = Ready<Result<(), ()>>;
fn poll_ready(&mut self, ctx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { fn poll_ready(&self, ctx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.service.poll_ready(ctx).map_err(|_| ()) self.service.poll_ready(ctx).map_err(|_| ())
} }
fn call(&mut self, (guard, req): (Option<CounterGuard>, ServerMessage)) -> Self::Future { fn call(&self, (guard, req): (WorkerCounterGuard, MioStream)) -> Self::Future {
match req { ready(match FromStream::from_mio(req) {
ServerMessage::Connect(stream) => { Ok(stream) => {
let stream = FromStream::from_stdstream(stream).map_err(|e| { let f = self.service.call(stream);
error!("Can not convert to an async tcp stream: {}", e); actix_rt::spawn(async move {
let _ = f.await;
drop(guard);
}); });
Ok(())
if let Ok(stream) = stream {
let f = self.service.call(stream);
spawn(async move {
let _ = f.await;
drop(guard);
});
ok(())
} else {
err(())
}
} }
_ => ok(()), Err(e) => {
} error!("Can not convert to an async tcp stream: {}", e);
Err(())
}
})
} }
} }
pub(crate) struct StreamNewService<F: ServiceFactory<Io>, Io: FromStream> { pub(crate) struct StreamNewService<F: ServiceFactory<Io>, Io: FromStream> {
name: String, name: String,
inner: F, inner: F,
token: Token, token: usize,
addr: SocketAddr, addr: SocketAddr,
_t: PhantomData<Io>, _t: PhantomData<Io>,
} }
@@ -110,7 +95,7 @@ where
{ {
pub(crate) fn create( pub(crate) fn create(
name: String, name: String,
token: Token, token: usize,
inner: F, inner: F,
addr: SocketAddr, addr: SocketAddr,
) -> Box<dyn InternalServiceFactory> { ) -> Box<dyn InternalServiceFactory> {
@@ -129,7 +114,7 @@ where
F: ServiceFactory<Io>, F: ServiceFactory<Io>,
Io: FromStream + Send + 'static, Io: FromStream + Send + 'static,
{ {
fn name(&self, _: Token) -> &str { fn name(&self, _: usize) -> &str {
&self.name &self.name
} }
@@ -143,38 +128,25 @@ where
}) })
} }
fn create(&self) -> LocalBoxFuture<'static, Result<Vec<(Token, BoxedServerService)>, ()>> { fn create(&self) -> LocalBoxFuture<'static, Result<(usize, BoxedServerService), ()>> {
let token = self.token; let token = self.token;
self.inner let fut = self.inner.create().new_service(());
.create() Box::pin(async move {
.new_service(()) match fut.await {
.map_err(|_| ()) Ok(inner) => {
.map_ok(move |inner| { let service = Box::new(StreamService::new(inner)) as _;
let service: BoxedServerService = Box::new(StreamService::new(inner)); Ok((token, service))
vec![(token, service)] }
}) Err(_) => Err(()),
.boxed_local() }
} })
}
impl InternalServiceFactory for Box<dyn InternalServiceFactory> {
fn name(&self, token: Token) -> &str {
self.as_ref().name(token)
}
fn clone_factory(&self) -> Box<dyn InternalServiceFactory> {
self.as_ref().clone_factory()
}
fn create(&self) -> LocalBoxFuture<'static, Result<Vec<(Token, BoxedServerService)>, ()>> {
self.as_ref().create()
} }
} }
impl<F, T, I> ServiceFactory<I> for F impl<F, T, I> ServiceFactory<I> for F
where where
F: Fn() -> T + Send + Clone + 'static, F: Fn() -> T + Send + Clone + 'static,
T: actix::ServiceFactory<Config = (), Request = I>, T: BaseServiceFactory<I, Config = ()>,
I: FromStream, I: FromStream,
{ {
type Factory = T; type Factory = T;

View File

@@ -1,10 +1,7 @@
use std::future::Future; use std::future::Future;
use std::io;
use std::pin::Pin; use std::pin::Pin;
use std::task::{Context, Poll}; use std::task::{Context, Poll};
use futures::future::lazy;
use crate::server::Server; use crate::server::Server;
/// Different types of process signals /// Different types of process signals
@@ -24,50 +21,49 @@ pub(crate) enum Signal {
pub(crate) struct Signals { pub(crate) struct Signals {
srv: Server, srv: Server,
#[cfg(not(unix))] #[cfg(not(unix))]
stream: Pin<Box<dyn Future<Output = io::Result<()>>>>, signals: futures_core::future::LocalBoxFuture<'static, std::io::Result<()>>,
#[cfg(unix)] #[cfg(unix)]
streams: Vec<(Signal, actix_rt::signal::unix::Signal)>, signals: Vec<(Signal, actix_rt::signal::unix::Signal)>,
} }
impl Signals { impl Signals {
pub(crate) fn start(srv: Server) -> io::Result<()> { pub(crate) fn start(srv: Server) {
actix_rt::spawn(lazy(|_| { #[cfg(not(unix))]
#[cfg(not(unix))] {
{ actix_rt::spawn(Signals {
actix_rt::spawn(Signals { srv,
srv, signals: Box::pin(actix_rt::signal::ctrl_c()),
stream: Box::pin(actix_rt::signal::ctrl_c()), });
}); }
} #[cfg(unix)]
#[cfg(unix)] {
{ use actix_rt::signal::unix;
use actix_rt::signal::unix;
let mut streams = Vec::new(); let sig_map = [
(unix::SignalKind::interrupt(), Signal::Int),
(unix::SignalKind::hangup(), Signal::Hup),
(unix::SignalKind::terminate(), Signal::Term),
(unix::SignalKind::quit(), Signal::Quit),
];
let sig_map = [ let signals = sig_map
(unix::SignalKind::interrupt(), Signal::Int), .iter()
(unix::SignalKind::hangup(), Signal::Hup), .filter_map(|(kind, sig)| {
(unix::SignalKind::terminate(), Signal::Term), unix::signal(*kind)
(unix::SignalKind::quit(), Signal::Quit), .map(|tokio_sig| (*sig, tokio_sig))
]; .map_err(|e| {
log::error!(
"Can not initialize stream handler for {:?} err: {}",
sig,
e
)
})
.ok()
})
.collect::<Vec<_>>();
for (kind, sig) in sig_map.iter() { actix_rt::spawn(Signals { srv, signals });
match unix::signal(*kind) { }
Ok(stream) => streams.push((*sig, stream)),
Err(e) => log::error!(
"Can not initialize stream handler for {:?} err: {}",
sig,
e
),
}
}
actix_rt::spawn(Signals { srv, streams })
}
}));
Ok(())
} }
} }
@@ -76,25 +72,20 @@ impl Future for Signals {
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
#[cfg(not(unix))] #[cfg(not(unix))]
match Pin::new(&mut self.stream).poll(cx) { match self.signals.as_mut().poll(cx) {
Poll::Ready(_) => { Poll::Ready(_) => {
self.srv.signal(Signal::Int); self.srv.signal(Signal::Int);
Poll::Ready(()) Poll::Ready(())
} }
Poll::Pending => return Poll::Pending, Poll::Pending => Poll::Pending,
} }
#[cfg(unix)] #[cfg(unix)]
{ {
for idx in 0..self.streams.len() { for (sig, fut) in self.signals.iter_mut() {
loop { if Pin::new(fut).poll_recv(cx).is_ready() {
match self.streams[idx].1.poll_recv(cx) { let sig = *sig;
Poll::Ready(None) => return Poll::Ready(()), self.srv.signal(sig);
Poll::Pending => break, return Poll::Ready(());
Poll::Ready(Some(_)) => {
let sig = self.streams[idx].0;
self.srv.signal(sig);
}
}
} }
} }
Poll::Pending Poll::Pending

View File

@@ -1,135 +1,82 @@
use std::{fmt, io, net}; pub(crate) use std::net::{
SocketAddr as StdSocketAddr, TcpListener as StdTcpListener, ToSocketAddrs,
};
pub(crate) use mio::net::{TcpListener as MioTcpListener, TcpSocket as MioTcpSocket};
#[cfg(unix)]
pub(crate) use {
mio::net::UnixListener as MioUnixListener,
std::os::unix::net::UnixListener as StdUnixListener,
};
use std::{fmt, io};
use actix_codec::{AsyncRead, AsyncWrite};
use actix_rt::net::TcpStream; use actix_rt::net::TcpStream;
use mio::{event::Source, Interest, Registry, Token};
pub(crate) enum StdListener { pub(crate) enum MioListener {
Tcp(net::TcpListener), Tcp(MioTcpListener),
#[cfg(all(unix))] #[cfg(unix)]
Uds(std::os::unix::net::UnixListener), Uds(MioUnixListener),
} }
pub(crate) enum SocketAddr { impl MioListener {
Tcp(net::SocketAddr),
#[cfg(all(unix))]
Uds(std::os::unix::net::SocketAddr),
}
impl fmt::Display for SocketAddr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
SocketAddr::Tcp(ref addr) => write!(f, "{}", addr),
#[cfg(all(unix))]
SocketAddr::Uds(ref addr) => write!(f, "{:?}", addr),
}
}
}
impl fmt::Debug for SocketAddr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
SocketAddr::Tcp(ref addr) => write!(f, "{:?}", addr),
#[cfg(all(unix))]
SocketAddr::Uds(ref addr) => write!(f, "{:?}", addr),
}
}
}
impl fmt::Display for StdListener {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
StdListener::Tcp(ref lst) => write!(f, "{}", lst.local_addr().ok().unwrap()),
#[cfg(all(unix))]
StdListener::Uds(ref lst) => write!(f, "{:?}", lst.local_addr().ok().unwrap()),
}
}
}
impl StdListener {
pub(crate) fn local_addr(&self) -> SocketAddr { pub(crate) fn local_addr(&self) -> SocketAddr {
match self {
StdListener::Tcp(lst) => SocketAddr::Tcp(lst.local_addr().unwrap()),
#[cfg(all(unix))]
StdListener::Uds(lst) => SocketAddr::Uds(lst.local_addr().unwrap()),
}
}
pub(crate) fn into_listener(self) -> SocketListener {
match self {
StdListener::Tcp(lst) => SocketListener::Tcp(
mio::net::TcpListener::from_std(lst)
.expect("Can not create mio::net::TcpListener"),
),
#[cfg(all(unix))]
StdListener::Uds(lst) => SocketListener::Uds(
mio_uds::UnixListener::from_listener(lst)
.expect("Can not create mio_uds::UnixListener"),
),
}
}
}
#[derive(Debug)]
pub enum StdStream {
Tcp(std::net::TcpStream),
#[cfg(all(unix))]
Uds(std::os::unix::net::UnixStream),
}
pub(crate) enum SocketListener {
Tcp(mio::net::TcpListener),
#[cfg(all(unix))]
Uds(mio_uds::UnixListener),
}
impl SocketListener {
pub(crate) fn accept(&self) -> io::Result<Option<(StdStream, SocketAddr)>> {
match *self { match *self {
SocketListener::Tcp(ref lst) => lst MioListener::Tcp(ref lst) => lst
.accept_std() .local_addr()
.map(|(stream, addr)| Some((StdStream::Tcp(stream), SocketAddr::Tcp(addr)))), .map(SocketAddr::Tcp)
#[cfg(all(unix))] .unwrap_or(SocketAddr::Unknown),
SocketListener::Uds(ref lst) => lst.accept_std().map(|res| { #[cfg(unix)]
res.map(|(stream, addr)| (StdStream::Uds(stream), SocketAddr::Uds(addr))) MioListener::Uds(ref lst) => lst
}), .local_addr()
.map(SocketAddr::Uds)
.unwrap_or(SocketAddr::Unknown),
}
}
pub(crate) fn accept(&self) -> io::Result<MioStream> {
match *self {
MioListener::Tcp(ref lst) => lst.accept().map(|(stream, _)| MioStream::Tcp(stream)),
#[cfg(unix)]
MioListener::Uds(ref lst) => lst.accept().map(|(stream, _)| MioStream::Uds(stream)),
} }
} }
} }
impl mio::Evented for SocketListener { impl Source for MioListener {
fn register( fn register(
&self, &mut self,
poll: &mio::Poll, registry: &Registry,
token: mio::Token, token: Token,
interest: mio::Ready, interests: Interest,
opts: mio::PollOpt,
) -> io::Result<()> { ) -> io::Result<()> {
match *self { match *self {
SocketListener::Tcp(ref lst) => lst.register(poll, token, interest, opts), MioListener::Tcp(ref mut lst) => lst.register(registry, token, interests),
#[cfg(all(unix))] #[cfg(unix)]
SocketListener::Uds(ref lst) => lst.register(poll, token, interest, opts), MioListener::Uds(ref mut lst) => lst.register(registry, token, interests),
} }
} }
fn reregister( fn reregister(
&self, &mut self,
poll: &mio::Poll, registry: &Registry,
token: mio::Token, token: Token,
interest: mio::Ready, interests: Interest,
opts: mio::PollOpt,
) -> io::Result<()> { ) -> io::Result<()> {
match *self { match *self {
SocketListener::Tcp(ref lst) => lst.reregister(poll, token, interest, opts), MioListener::Tcp(ref mut lst) => lst.reregister(registry, token, interests),
#[cfg(all(unix))] #[cfg(unix)]
SocketListener::Uds(ref lst) => lst.reregister(poll, token, interest, opts), MioListener::Uds(ref mut lst) => lst.reregister(registry, token, interests),
} }
} }
fn deregister(&self, poll: &mio::Poll) -> io::Result<()> {
fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
match *self { match *self {
SocketListener::Tcp(ref lst) => lst.deregister(poll), MioListener::Tcp(ref mut lst) => lst.deregister(registry),
#[cfg(all(unix))] #[cfg(unix)]
SocketListener::Uds(ref lst) => { MioListener::Uds(ref mut lst) => {
let res = lst.deregister(poll); let res = lst.deregister(registry);
// cleanup file path // cleanup file path
if let Ok(addr) = lst.local_addr() { if let Ok(addr) = lst.local_addr() {
@@ -143,28 +90,172 @@ impl mio::Evented for SocketListener {
} }
} }
pub trait FromStream: AsyncRead + AsyncWrite + Sized { impl From<StdTcpListener> for MioListener {
fn from_stdstream(sock: StdStream) -> io::Result<Self>; fn from(lst: StdTcpListener) -> Self {
MioListener::Tcp(MioTcpListener::from_std(lst))
}
} }
impl FromStream for TcpStream { #[cfg(unix)]
fn from_stdstream(sock: StdStream) -> io::Result<Self> { impl From<StdUnixListener> for MioListener {
match sock { fn from(lst: StdUnixListener) -> Self {
StdStream::Tcp(stream) => TcpStream::from_std(stream), MioListener::Uds(MioUnixListener::from_std(lst))
}
}
impl fmt::Debug for MioListener {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
MioListener::Tcp(ref lst) => write!(f, "{:?}", lst),
#[cfg(all(unix))] #[cfg(all(unix))]
StdStream::Uds(_) => { MioListener::Uds(ref lst) => write!(f, "{:?}", lst),
panic!("Should not happen, bug in server impl"); }
}
}
impl fmt::Display for MioListener {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
MioListener::Tcp(ref lst) => write!(f, "{:?}", lst),
#[cfg(unix)]
MioListener::Uds(ref lst) => write!(f, "{:?}", lst),
}
}
}
pub(crate) enum SocketAddr {
Unknown,
Tcp(StdSocketAddr),
#[cfg(unix)]
Uds(mio::net::SocketAddr),
}
impl fmt::Display for SocketAddr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
Self::Unknown => write!(f, "Unknown SocketAddr"),
Self::Tcp(ref addr) => write!(f, "{}", addr),
#[cfg(unix)]
Self::Uds(ref addr) => write!(f, "{:?}", addr),
}
}
}
impl fmt::Debug for SocketAddr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
Self::Unknown => write!(f, "Unknown SocketAddr"),
Self::Tcp(ref addr) => write!(f, "{:?}", addr),
#[cfg(unix)]
Self::Uds(ref addr) => write!(f, "{:?}", addr),
}
}
}
#[derive(Debug)]
pub enum MioStream {
Tcp(mio::net::TcpStream),
#[cfg(unix)]
Uds(mio::net::UnixStream),
}
/// helper trait for converting mio stream to tokio stream.
pub trait FromStream: Sized {
fn from_mio(sock: MioStream) -> io::Result<Self>;
}
#[cfg(windows)]
mod win_impl {
use super::*;
use std::os::windows::io::{FromRawSocket, IntoRawSocket};
// FIXME: This is a workaround and we need an efficient way to convert between mio and tokio stream
impl FromStream for TcpStream {
fn from_mio(sock: MioStream) -> io::Result<Self> {
match sock {
MioStream::Tcp(mio) => {
let raw = IntoRawSocket::into_raw_socket(mio);
// SAFETY: This is a in place conversion from mio stream to tokio stream.
TcpStream::from_std(unsafe { FromRawSocket::from_raw_socket(raw) })
}
} }
} }
} }
} }
#[cfg(all(unix))] #[cfg(unix)]
impl FromStream for actix_rt::net::UnixStream { mod unix_impl {
fn from_stdstream(sock: StdStream) -> io::Result<Self> { use super::*;
match sock {
StdStream::Tcp(_) => panic!("Should not happen, bug in server impl"), use std::os::unix::io::{FromRawFd, IntoRawFd};
StdStream::Uds(stream) => actix_rt::net::UnixStream::from_std(stream),
use actix_rt::net::UnixStream;
// FIXME: This is a workaround and we need an efficient way to convert between mio and tokio stream
impl FromStream for TcpStream {
fn from_mio(sock: MioStream) -> io::Result<Self> {
match sock {
MioStream::Tcp(mio) => {
let raw = IntoRawFd::into_raw_fd(mio);
// SAFETY: This is a in place conversion from mio stream to tokio stream.
TcpStream::from_std(unsafe { FromRawFd::from_raw_fd(raw) })
}
MioStream::Uds(_) => {
panic!("Should not happen, bug in server impl");
}
}
}
}
// FIXME: This is a workaround and we need an efficient way to convert between mio and tokio stream
impl FromStream for UnixStream {
fn from_mio(sock: MioStream) -> io::Result<Self> {
match sock {
MioStream::Tcp(_) => panic!("Should not happen, bug in server impl"),
MioStream::Uds(mio) => {
let raw = IntoRawFd::into_raw_fd(mio);
// SAFETY: This is a in place conversion from mio stream to tokio stream.
UnixStream::from_std(unsafe { FromRawFd::from_raw_fd(raw) })
}
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn socket_addr() {
let addr = SocketAddr::Tcp("127.0.0.1:8080".parse().unwrap());
assert!(format!("{:?}", addr).contains("127.0.0.1:8080"));
assert_eq!(format!("{}", addr), "127.0.0.1:8080");
let addr: StdSocketAddr = "127.0.0.1:0".parse().unwrap();
let socket = MioTcpSocket::new_v4().unwrap();
socket.set_reuseaddr(true).unwrap();
socket.bind(addr).unwrap();
let tcp = socket.listen(128).unwrap();
let lst = MioListener::Tcp(tcp);
assert!(format!("{:?}", lst).contains("TcpListener"));
assert!(format!("{}", lst).contains("127.0.0.1"));
}
#[test]
#[cfg(unix)]
fn uds() {
let _ = std::fs::remove_file("/tmp/sock.xxxxx");
if let Ok(socket) = MioUnixListener::bind("/tmp/sock.xxxxx") {
let addr = socket.local_addr().expect("Couldn't get local address");
let a = SocketAddr::Uds(addr);
assert!(format!("{:?}", a).contains("/tmp/sock.xxxxx"));
assert!(format!("{}", a).contains("/tmp/sock.xxxxx"));
let lst = MioListener::Uds(socket);
assert!(format!("{:?}", lst).contains("/tmp/sock.xxxxx"));
assert!(format!("{}", lst).contains("/tmp/sock.xxxxx"));
} }
} }
} }

View File

@@ -1,16 +1,9 @@
//! Various helpers for Actix applications to use during testing.
#![deny(rust_2018_idioms, warnings)]
#![allow(clippy::type_complexity)]
use std::sync::mpsc; use std::sync::mpsc;
use std::{net, thread}; use std::{net, thread};
use actix_rt::{net::TcpStream, System}; use actix_rt::{net::TcpStream, System};
use actix_server::{Server, ServerBuilder, ServiceFactory};
use net2::TcpBuilder;
#[cfg(not(test))] // Work around for rust-lang/rust#62127 use crate::{Server, ServerBuilder, ServiceFactory};
pub use actix_macros::test;
/// The `TestServer` type. /// The `TestServer` type.
/// ///
@@ -19,9 +12,9 @@ pub use actix_macros::test;
/// ///
/// # Examples /// # Examples
/// ///
/// ```rust /// ```
/// use actix_service::fn_service; /// use actix_service::fn_service;
/// use actix_testing::TestServer; /// use actix_server::TestServer;
/// ///
/// #[actix_rt::main] /// #[actix_rt::main]
/// async fn main() { /// async fn main() {
@@ -37,7 +30,7 @@ pub use actix_macros::test;
/// ``` /// ```
pub struct TestServer; pub struct TestServer;
/// Test server runstime /// Test server runtime
pub struct TestServerRuntime { pub struct TestServerRuntime {
addr: net::SocketAddr, addr: net::SocketAddr,
host: String, host: String,
@@ -55,11 +48,8 @@ impl TestServer {
// run server in separate thread // run server in separate thread
thread::spawn(move || { thread::spawn(move || {
let sys = System::new("actix-test-server"); let sys = System::new();
factory(Server::build()) factory(Server::build()).workers(1).disable_signals().run();
.workers(1)
.disable_signals()
.start();
tx.send(System::current()).unwrap(); tx.send(System::current()).unwrap();
sys.run() sys.run()
@@ -80,17 +70,19 @@ impl TestServer {
// run server in separate thread // run server in separate thread
thread::spawn(move || { thread::spawn(move || {
let sys = System::new("actix-test-server"); let sys = System::new();
let tcp = net::TcpListener::bind("127.0.0.1:0").unwrap(); let tcp = net::TcpListener::bind("127.0.0.1:0").unwrap();
let local_addr = tcp.local_addr().unwrap(); let local_addr = tcp.local_addr().unwrap();
Server::build() sys.block_on(async {
.listen("test", tcp, factory)? Server::build()
.workers(1) .listen("test", tcp, factory)
.disable_signals() .unwrap()
.start(); .workers(1)
.disable_signals()
tx.send((System::current(), local_addr)).unwrap(); .run();
tx.send((System::current(), local_addr)).unwrap();
});
sys.run() sys.run()
}); });
@@ -100,20 +92,20 @@ impl TestServer {
let port = addr.port(); let port = addr.port();
TestServerRuntime { TestServerRuntime {
system,
addr, addr,
host, host,
port, port,
system,
} }
} }
/// Get firat available unused local address /// Get first available unused local address
pub fn unused_addr() -> net::SocketAddr { pub fn unused_addr() -> net::SocketAddr {
let addr: net::SocketAddr = "127.0.0.1:0".parse().unwrap(); let addr: net::SocketAddr = "127.0.0.1:0".parse().unwrap();
let socket = TcpBuilder::new_v4().unwrap(); let socket = mio::net::TcpSocket::new_v4().unwrap();
socket.bind(&addr).unwrap(); socket.bind(addr).unwrap();
socket.reuse_address(true).unwrap(); socket.set_reuseaddr(true).unwrap();
let tcp = socket.to_tcp_listener().unwrap(); let tcp = socket.listen(1024).unwrap();
tcp.local_addr().unwrap() tcp.local_addr().unwrap()
} }
} }

View File

@@ -0,0 +1,89 @@
use std::{
collections::VecDeque,
ops::Deref,
sync::{Arc, Mutex, MutexGuard},
};
use mio::{Registry, Token as MioToken, Waker};
use crate::worker::WorkerHandleAccept;
/// Waker token for `mio::Poll` instance.
pub(crate) const WAKER_TOKEN: MioToken = MioToken(usize::MAX);
/// `mio::Waker` with a queue for waking up the `Accept`'s `Poll` and contains the `WakerInterest`
/// the `Poll` would want to look into.
pub(crate) struct WakerQueue(Arc<(Waker, Mutex<VecDeque<WakerInterest>>)>);
impl Clone for WakerQueue {
fn clone(&self) -> Self {
Self(self.0.clone())
}
}
impl Deref for WakerQueue {
type Target = (Waker, Mutex<VecDeque<WakerInterest>>);
fn deref(&self) -> &Self::Target {
self.0.deref()
}
}
impl WakerQueue {
/// Construct a waker queue with given `Poll`'s `Registry` and capacity.
///
/// A fixed `WAKER_TOKEN` is used to identify the wake interest and the `Poll` needs to match
/// event's token for it to properly handle `WakerInterest`.
pub(crate) fn new(registry: &Registry) -> std::io::Result<Self> {
let waker = Waker::new(registry, WAKER_TOKEN)?;
let queue = Mutex::new(VecDeque::with_capacity(16));
Ok(Self(Arc::new((waker, queue))))
}
/// Push a new interest to the queue and wake up the accept poll afterwards.
pub(crate) fn wake(&self, interest: WakerInterest) {
let (waker, queue) = self.deref();
queue
.lock()
.expect("Failed to lock WakerQueue")
.push_back(interest);
waker
.wake()
.unwrap_or_else(|e| panic!("can not wake up Accept Poll: {}", e));
}
/// Get a MutexGuard of the waker queue.
pub(crate) fn guard(&self) -> MutexGuard<'_, VecDeque<WakerInterest>> {
self.deref().1.lock().expect("Failed to lock WakerQueue")
}
/// Reset the waker queue so it does not grow infinitely.
pub(crate) fn reset(queue: &mut VecDeque<WakerInterest>) {
std::mem::swap(&mut VecDeque::<WakerInterest>::with_capacity(16), queue);
}
}
/// Types of interests we would look into when `Accept`'s `Poll` is waked up by waker.
///
/// These interests should not be confused with `mio::Interest` and mostly not I/O related
pub(crate) enum WakerInterest {
/// `WorkerAvailable` is an interest from `Worker` notifying `Accept` there is a worker
/// available and can accept new tasks.
WorkerAvailable(usize),
/// `Pause`, `Resume`, `Stop` Interest are from `ServerBuilder` future. It listens to
/// `ServerCommand` and notify `Accept` to do exactly these tasks.
Pause,
Resume,
Stop,
/// `Timer` is an interest sent as a delayed future. When an error happens on accepting
/// connection `Accept` would deregister socket listener temporary and wake up the poll and
/// register them again after the delayed future resolve.
Timer,
/// `Worker` is an interest happen after a worker runs into faulted state(This is determined
/// by if work can be sent to it successfully).`Accept` would be waked up and add the new
/// `WorkerHandleAccept`.
Worker(WorkerHandleAccept),
}

View File

@@ -1,139 +1,211 @@
use std::pin::Pin; use std::{
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; future::Future,
use std::sync::Arc; mem,
use std::task::{Context, Poll}; pin::Pin,
use std::time; rc::Rc,
sync::{
atomic::{AtomicUsize, Ordering},
Arc,
},
task::{Context, Poll},
time::Duration,
};
use actix_rt::time::{delay_until, Delay, Instant}; use actix_rt::{
use actix_rt::{spawn, Arbiter}; spawn,
use actix_utils::counter::Counter; time::{sleep, Instant, Sleep},
use futures::channel::mpsc::{unbounded, UnboundedReceiver, UnboundedSender}; Arbiter,
use futures::channel::oneshot; };
use futures::future::{join_all, LocalBoxFuture, MapOk}; use futures_core::{future::LocalBoxFuture, ready};
use futures::{Future, FutureExt, Stream, TryFutureExt};
use log::{error, info, trace}; use log::{error, info, trace};
use tokio::sync::{
mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender},
oneshot,
};
use crate::accept::AcceptNotify; use crate::join_all;
use crate::service::{BoxedServerService, InternalServiceFactory, ServerMessage}; use crate::service::{BoxedServerService, InternalServiceFactory};
use crate::socket::{SocketAddr, StdStream}; use crate::socket::MioStream;
use crate::Token; use crate::waker_queue::{WakerInterest, WakerQueue};
pub(crate) struct WorkerCommand(Conn); /// Stop worker message. Returns `true` on successful graceful shutdown.
/// and `false` if some connections still alive when shutdown execute.
/// Stop worker message. Returns `true` on successful shutdown pub(crate) struct Stop {
/// and `false` if some connections still alive.
pub(crate) struct StopCommand {
graceful: bool, graceful: bool,
result: oneshot::Sender<bool>, tx: oneshot::Sender<bool>,
} }
#[derive(Debug)] #[derive(Debug)]
pub(crate) struct Conn { pub(crate) struct Conn {
pub io: StdStream, pub io: MioStream,
pub token: Token, pub token: usize,
pub peer: Option<SocketAddr>,
} }
static MAX_CONNS: AtomicUsize = AtomicUsize::new(25600); fn handle_pair(
idx: usize,
tx1: UnboundedSender<Conn>,
tx2: UnboundedSender<Stop>,
counter: Counter,
) -> (WorkerHandleAccept, WorkerHandleServer) {
let accept = WorkerHandleAccept {
idx,
tx: tx1,
counter,
};
/// Sets the maximum per-worker number of concurrent connections. let server = WorkerHandleServer { idx, tx: tx2 };
(accept, server)
}
/// counter: Arc<AtomicUsize> field is owned by `Accept` thread and `ServerWorker` thread.
/// ///
/// All socket listeners will stop accepting connections when this limit is /// `Accept` would increment the counter and `ServerWorker` would decrement it.
/// reached for each worker.
/// ///
/// By default max connections is set to a 25k per worker. /// # Atomic Ordering:
pub fn max_concurrent_connections(num: usize) { ///
MAX_CONNS.store(num, Ordering::Relaxed); /// `Accept` always look into it's cached `Availability` field for `ServerWorker` state.
} /// It lazily increment counter after successful dispatching new work to `ServerWorker`.
/// On reaching counter limit `Accept` update it's cached `Availability` and mark worker as
pub(crate) fn num_connections() -> usize { /// unable to accept any work.
MAX_CONNS_COUNTER.with(|conns| conns.total()) ///
} /// `ServerWorker` always decrement the counter when every work received from `Accept` is done.
/// On reaching counter limit worker would use `mio::Waker` and `WakerQueue` to wake up `Accept`
thread_local! { /// and notify it to update cached `Availability` again to mark worker as able to accept work again.
static MAX_CONNS_COUNTER: Counter = ///
Counter::new(MAX_CONNS.load(Ordering::Relaxed)); /// Hence, a wake up would only happen after `Accept` increment it to limit.
} /// And a decrement to limit always wake up `Accept`.
#[derive(Clone)] #[derive(Clone)]
pub(crate) struct WorkerClient { pub(crate) struct Counter {
pub idx: usize, counter: Arc<AtomicUsize>,
tx1: UnboundedSender<WorkerCommand>, limit: usize,
tx2: UnboundedSender<StopCommand>,
avail: WorkerAvailability,
} }
impl WorkerClient { impl Counter {
pub fn new( pub(crate) fn new(limit: usize) -> Self {
idx: usize, Self {
tx1: UnboundedSender<WorkerCommand>, counter: Arc::new(AtomicUsize::new(1)),
tx2: UnboundedSender<StopCommand>, limit,
avail: WorkerAvailability,
) -> Self {
WorkerClient {
idx,
tx1,
tx2,
avail,
} }
} }
pub fn send(&self, msg: Conn) -> Result<(), Conn> { /// Increment counter by 1 and return true when hitting limit
self.tx1 #[inline(always)]
.unbounded_send(WorkerCommand(msg)) pub(crate) fn inc(&self) -> bool {
.map_err(|msg| msg.into_inner().0) self.counter.fetch_add(1, Ordering::Relaxed) != self.limit
} }
pub fn available(&self) -> bool { /// Decrement counter by 1 and return true if crossing limit.
self.avail.available() #[inline(always)]
pub(crate) fn dec(&self) -> bool {
self.counter.fetch_sub(1, Ordering::Relaxed) == self.limit
} }
pub fn stop(&self, graceful: bool) -> oneshot::Receiver<bool> { pub(crate) fn total(&self) -> usize {
let (result, rx) = oneshot::channel(); self.counter.load(Ordering::SeqCst) - 1
let _ = self.tx2.unbounded_send(StopCommand { graceful, result }); }
}
pub(crate) struct WorkerCounter {
idx: usize,
inner: Rc<(WakerQueue, Counter)>,
}
impl Clone for WorkerCounter {
fn clone(&self) -> Self {
Self {
idx: self.idx,
inner: self.inner.clone(),
}
}
}
impl WorkerCounter {
pub(crate) fn new(idx: usize, waker_queue: WakerQueue, counter: Counter) -> Self {
Self {
idx,
inner: Rc::new((waker_queue, counter)),
}
}
#[inline(always)]
pub(crate) fn guard(&self) -> WorkerCounterGuard {
WorkerCounterGuard(self.clone())
}
fn total(&self) -> usize {
self.inner.1.total()
}
}
pub(crate) struct WorkerCounterGuard(WorkerCounter);
impl Drop for WorkerCounterGuard {
fn drop(&mut self) {
let (waker_queue, counter) = &*self.0.inner;
if counter.dec() {
waker_queue.wake(WakerInterest::WorkerAvailable(self.0.idx));
}
}
}
/// Handle to worker that can send connection message to worker and share the
/// availability of worker to other thread.
///
/// Held by [Accept](crate::accept::Accept).
pub(crate) struct WorkerHandleAccept {
idx: usize,
tx: UnboundedSender<Conn>,
counter: Counter,
}
impl WorkerHandleAccept {
#[inline(always)]
pub(crate) fn idx(&self) -> usize {
self.idx
}
#[inline(always)]
pub(crate) fn send(&self, msg: Conn) -> Result<(), Conn> {
self.tx.send(msg).map_err(|msg| msg.0)
}
#[inline(always)]
pub(crate) fn inc_counter(&self) -> bool {
self.counter.inc()
}
}
/// Handle to worker than can send stop message to worker.
///
/// Held by [ServerBuilder](crate::builder::ServerBuilder).
#[derive(Debug)]
pub(crate) struct WorkerHandleServer {
idx: usize,
tx: UnboundedSender<Stop>,
}
impl WorkerHandleServer {
pub(crate) fn stop(&self, graceful: bool) -> oneshot::Receiver<bool> {
let (tx, rx) = oneshot::channel();
let _ = self.tx.send(Stop { graceful, tx });
rx rx
} }
} }
#[derive(Clone)] /// Service worker.
pub(crate) struct WorkerAvailability {
notify: AcceptNotify,
available: Arc<AtomicBool>,
}
impl WorkerAvailability {
pub fn new(notify: AcceptNotify) -> Self {
WorkerAvailability {
notify,
available: Arc::new(AtomicBool::new(false)),
}
}
pub fn available(&self) -> bool {
self.available.load(Ordering::Acquire)
}
pub fn set(&self, val: bool) {
let old = self.available.swap(val, Ordering::Release);
if !old && val {
self.notify.notify()
}
}
}
/// Service worker
/// ///
/// Worker accepts Socket objects via unbounded channel and starts stream /// Worker accepts Socket objects via unbounded channel and starts stream processing.
/// processing. pub(crate) struct ServerWorker {
pub(crate) struct Worker { // UnboundedReceiver<Conn> should always be the first field.
rx: UnboundedReceiver<WorkerCommand>, // It must be dropped as soon as ServerWorker dropping.
rx2: UnboundedReceiver<StopCommand>, rx: UnboundedReceiver<Conn>,
services: Vec<WorkerService>, rx2: UnboundedReceiver<Stop>,
availability: WorkerAvailability, counter: WorkerCounter,
conns: Counter, services: Box<[WorkerService]>,
factories: Vec<Box<dyn InternalServiceFactory>>, factories: Box<[Box<dyn InternalServiceFactory>]>,
state: WorkerState, state: WorkerState,
shutdown_timeout: time::Duration, shutdown_timeout: Duration,
} }
struct WorkerService { struct WorkerService {
@@ -159,101 +231,141 @@ enum WorkerServiceStatus {
Stopped, Stopped,
} }
impl Worker { /// Config for worker behavior passed down from server builder.
#[derive(Copy, Clone)]
pub(crate) struct ServerWorkerConfig {
shutdown_timeout: Duration,
max_blocking_threads: usize,
max_concurrent_connections: usize,
}
impl Default for ServerWorkerConfig {
fn default() -> Self {
// 512 is the default max blocking thread count of tokio runtime.
let max_blocking_threads = std::cmp::max(512 / num_cpus::get(), 1);
Self {
shutdown_timeout: Duration::from_secs(30),
max_blocking_threads,
max_concurrent_connections: 25600,
}
}
}
impl ServerWorkerConfig {
pub(crate) fn max_blocking_threads(&mut self, num: usize) {
self.max_blocking_threads = num;
}
pub(crate) fn max_concurrent_connections(&mut self, num: usize) {
self.max_concurrent_connections = num;
}
pub(crate) fn shutdown_timeout(&mut self, dur: Duration) {
self.shutdown_timeout = dur;
}
}
impl ServerWorker {
pub(crate) fn start( pub(crate) fn start(
idx: usize, idx: usize,
factories: Vec<Box<dyn InternalServiceFactory>>, factories: Vec<Box<dyn InternalServiceFactory>>,
availability: WorkerAvailability, waker_queue: WakerQueue,
shutdown_timeout: time::Duration, config: ServerWorkerConfig,
) -> WorkerClient { ) -> (WorkerHandleAccept, WorkerHandleServer) {
let (tx1, rx) = unbounded(); let (tx1, rx) = unbounded_channel();
let (tx2, rx2) = unbounded(); let (tx2, rx2) = unbounded_channel();
let avail = availability.clone();
Arbiter::new().send( let counter = Counter::new(config.max_concurrent_connections);
async move {
availability.set(false); let counter_clone = counter.clone();
let mut wrk = MAX_CONNS_COUNTER.with(move |conns| Worker { // every worker runs in it's own arbiter.
// use a custom tokio runtime builder to change the settings of runtime.
Arbiter::with_tokio_rt(move || {
tokio::runtime::Builder::new_current_thread()
.enable_all()
.max_blocking_threads(config.max_blocking_threads)
.build()
.unwrap()
})
.spawn(async move {
let fut = factories
.iter()
.enumerate()
.map(|(idx, factory)| {
let fut = factory.create();
async move { fut.await.map(|(t, s)| (idx, t, s)) }
})
.collect::<Vec<_>>();
// a second spawn to run !Send future tasks.
spawn(async move {
let res = join_all(fut)
.await
.into_iter()
.collect::<Result<Vec<_>, _>>();
let services = match res {
Ok(res) => res
.into_iter()
.fold(Vec::new(), |mut services, (factory, token, service)| {
assert_eq!(token, services.len());
services.push(WorkerService {
factory,
service,
status: WorkerServiceStatus::Unavailable,
});
services
})
.into_boxed_slice(),
Err(e) => {
error!("Can not start worker: {:?}", e);
Arbiter::current().stop();
return;
}
};
// a third spawn to make sure ServerWorker runs as non boxed future.
spawn(ServerWorker {
rx, rx,
rx2, rx2,
availability, services,
factories, counter: WorkerCounter::new(idx, waker_queue, counter_clone),
shutdown_timeout, factories: factories.into_boxed_slice(),
services: Vec::new(), state: Default::default(),
conns: conns.clone(), shutdown_timeout: config.shutdown_timeout,
state: WorkerState::Unavailable(Vec::new()),
}); });
});
});
let mut fut: Vec<MapOk<LocalBoxFuture<'static, _>, _>> = Vec::new(); handle_pair(idx, tx1, tx2, counter)
for (idx, factory) in wrk.factories.iter().enumerate() { }
fut.push(factory.create().map_ok(move |r| {
r.into_iter()
.map(|(t, s): (Token, _)| (idx, t, s))
.collect::<Vec<_>>()
}));
}
spawn(async move { fn restart_service(&mut self, idx: usize, factory_id: usize) {
let res = join_all(fut).await; let factory = &self.factories[factory_id];
let res: Result<Vec<_>, _> = res.into_iter().collect(); trace!("Service {:?} failed, restarting", factory.name(idx));
match res { self.services[idx].status = WorkerServiceStatus::Restarting;
Ok(services) => { self.state = WorkerState::Restarting(Restart {
for item in services { factory_id,
for (factory, token, service) in item { token: idx,
assert_eq!(token.0, wrk.services.len()); fut: factory.create(),
wrk.services.push(WorkerService { });
factory,
service,
status: WorkerServiceStatus::Unavailable,
});
}
}
}
Err(e) => {
error!("Can not start worker: {:?}", e);
Arbiter::current().stop();
}
}
wrk.await
});
}
.boxed(),
);
WorkerClient::new(idx, tx1, tx2, avail)
} }
fn shutdown(&mut self, force: bool) { fn shutdown(&mut self, force: bool) {
if force { self.services
self.services.iter_mut().for_each(|srv| { .iter_mut()
if srv.status == WorkerServiceStatus::Available { .filter(|srv| srv.status == WorkerServiceStatus::Available)
srv.status = WorkerServiceStatus::Stopped; .for_each(|srv| {
actix_rt::spawn( srv.status = if force {
srv.service WorkerServiceStatus::Stopped
.call((None, ServerMessage::ForceShutdown)) } else {
.map(|_| ()), WorkerServiceStatus::Stopping
); };
}
}); });
} else {
let timeout = self.shutdown_timeout;
self.services.iter_mut().for_each(move |srv| {
if srv.status == WorkerServiceStatus::Available {
srv.status = WorkerServiceStatus::Stopping;
actix_rt::spawn(
srv.service
.call((None, ServerMessage::Shutdown(timeout)))
.map(|_| ()),
);
}
});
}
} }
fn check_readiness(&mut self, cx: &mut Context<'_>) -> Result<bool, (Token, usize)> { fn check_readiness(&mut self, cx: &mut Context<'_>) -> Result<bool, (usize, usize)> {
let mut ready = self.conns.available(cx); let mut ready = true;
let mut failed = None; for (idx, srv) in self.services.iter_mut().enumerate() {
for (idx, srv) in &mut self.services.iter_mut().enumerate() {
if srv.status == WorkerServiceStatus::Available if srv.status == WorkerServiceStatus::Available
|| srv.status == WorkerServiceStatus::Unavailable || srv.status == WorkerServiceStatus::Unavailable
{ {
@@ -262,7 +374,7 @@ impl Worker {
if srv.status == WorkerServiceStatus::Unavailable { if srv.status == WorkerServiceStatus::Unavailable {
trace!( trace!(
"Service {:?} is available", "Service {:?} is available",
self.factories[srv.factory].name(Token(idx)) self.factories[srv.factory].name(idx)
); );
srv.status = WorkerServiceStatus::Available; srv.status = WorkerServiceStatus::Available;
} }
@@ -273,7 +385,7 @@ impl Worker {
if srv.status == WorkerServiceStatus::Available { if srv.status == WorkerServiceStatus::Available {
trace!( trace!(
"Service {:?} is unavailable", "Service {:?} is unavailable",
self.factories[srv.factory].name(Token(idx)) self.factories[srv.factory].name(idx)
); );
srv.status = WorkerServiceStatus::Unavailable; srv.status = WorkerServiceStatus::Unavailable;
} }
@@ -281,213 +393,170 @@ impl Worker {
Poll::Ready(Err(_)) => { Poll::Ready(Err(_)) => {
error!( error!(
"Service {:?} readiness check returned error, restarting", "Service {:?} readiness check returned error, restarting",
self.factories[srv.factory].name(Token(idx)) self.factories[srv.factory].name(idx)
); );
failed = Some((Token(idx), srv.factory));
srv.status = WorkerServiceStatus::Failed; srv.status = WorkerServiceStatus::Failed;
return Err((idx, srv.factory));
} }
} }
} }
} }
if let Some(idx) = failed {
Err(idx) Ok(ready)
} else {
Ok(ready)
}
} }
} }
enum WorkerState { enum WorkerState {
Available, Available,
Unavailable(Vec<Conn>), Unavailable,
Restarting( Restarting(Restart),
usize, Shutdown(Shutdown),
Token,
Pin<Box<dyn Future<Output = Result<Vec<(Token, BoxedServerService)>, ()>>>>,
),
Shutdown(
Pin<Box<Delay>>,
Pin<Box<Delay>>,
Option<oneshot::Sender<bool>>,
),
} }
impl Future for Worker { struct Restart {
factory_id: usize,
token: usize,
fut: LocalBoxFuture<'static, Result<(usize, BoxedServerService), ()>>,
}
// Shutdown keep states necessary for server shutdown:
// Sleep for interval check the shutdown progress.
// Instant for the start time of shutdown.
// Sender for send back the shutdown outcome(force/grace) to StopCommand caller.
struct Shutdown {
timer: Pin<Box<Sleep>>,
start_from: Instant,
tx: oneshot::Sender<bool>,
}
impl Default for WorkerState {
fn default() -> Self {
Self::Unavailable
}
}
impl Drop for ServerWorker {
fn drop(&mut self) {
// Stop the Arbiter ServerWorker runs on on drop.
Arbiter::current().stop();
}
}
impl Future for ServerWorker {
type Output = (); type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.as_mut().get_mut();
// `StopWorker` message handler // `StopWorker` message handler
if let Poll::Ready(Some(StopCommand { graceful, result })) = if let Poll::Ready(Some(Stop { graceful, tx })) = Pin::new(&mut this.rx2).poll_recv(cx)
Pin::new(&mut self.rx2).poll_next(cx)
{ {
self.availability.set(false); let num = this.counter.total();
let num = num_connections();
if num == 0 { if num == 0 {
info!("Shutting down worker, 0 connections"); info!("Shutting down worker, 0 connections");
let _ = result.send(true); let _ = tx.send(true);
return Poll::Ready(()); return Poll::Ready(());
} else if graceful { } else if graceful {
self.shutdown(false); info!("Graceful worker shutdown, {} connections", num);
let num = num_connections(); this.shutdown(false);
if num != 0 {
info!("Graceful worker shutdown, {} connections", num); this.state = WorkerState::Shutdown(Shutdown {
self.state = WorkerState::Shutdown( timer: Box::pin(sleep(Duration::from_secs(1))),
Box::pin(delay_until(Instant::now() + time::Duration::from_secs(1))), start_from: Instant::now(),
Box::pin(delay_until(Instant::now() + self.shutdown_timeout)), tx,
Some(result), });
);
} else {
let _ = result.send(true);
return Poll::Ready(());
}
} else { } else {
info!("Force shutdown worker, {} connections", num); info!("Force shutdown worker, {} connections", num);
self.shutdown(true); this.shutdown(true);
let _ = result.send(false);
let _ = tx.send(false);
return Poll::Ready(()); return Poll::Ready(());
} }
} }
match self.state { match this.state {
WorkerState::Unavailable(ref mut conns) => { WorkerState::Unavailable => match this.check_readiness(cx) {
let conn = conns.pop(); Ok(true) => {
match self.check_readiness(cx) { this.state = WorkerState::Available;
Ok(true) => { self.poll(cx)
// process requests from wait queue
if let Some(conn) = conn {
let guard = self.conns.get();
let _ = self.services[conn.token.0]
.service
.call((Some(guard), ServerMessage::Connect(conn.io)));
} else {
self.state = WorkerState::Available;
self.availability.set(true);
}
self.poll(cx)
}
Ok(false) => {
// push connection back to queue
if let Some(conn) = conn {
match self.state {
WorkerState::Unavailable(ref mut conns) => {
conns.push(conn);
}
_ => (),
}
}
Poll::Pending
}
Err((token, idx)) => {
trace!(
"Service {:?} failed, restarting",
self.factories[idx].name(token)
);
self.services[token.0].status = WorkerServiceStatus::Restarting;
self.state =
WorkerState::Restarting(idx, token, self.factories[idx].create());
self.poll(cx)
}
} }
} Ok(false) => Poll::Pending,
WorkerState::Restarting(idx, token, ref mut fut) => { Err((token, idx)) => {
match Pin::new(fut).poll(cx) { this.restart_service(token, idx);
Poll::Ready(Ok(item)) => { self.poll(cx)
for (token, service) in item { }
trace!( },
"Service {:?} has been restarted", WorkerState::Restarting(ref mut restart) => {
self.factories[idx].name(token) let factory_id = restart.factory_id;
); let token = restart.token;
self.services[token.0].created(service);
self.state = WorkerState::Unavailable(Vec::new()); let (token_new, service) = ready!(restart.fut.as_mut().poll(cx))
return self.poll(cx); .unwrap_or_else(|_| {
}
}
Poll::Ready(Err(_)) => {
panic!( panic!(
"Can not restart {:?} service", "Can not restart {:?} service",
self.factories[idx].name(token) this.factories[factory_id].name(token)
); )
} });
Poll::Pending => {
return Poll::Pending; assert_eq!(token, token_new);
}
} trace!(
"Service {:?} has been restarted",
this.factories[factory_id].name(token)
);
this.services[token].created(service);
this.state = WorkerState::Unavailable;
self.poll(cx) self.poll(cx)
} }
WorkerState::Shutdown(ref mut t1, ref mut t2, ref mut tx) => { WorkerState::Shutdown(ref mut shutdown) => {
let num = num_connections(); // Wait for 1 second.
if num == 0 { ready!(shutdown.timer.as_mut().poll(cx));
let _ = tx.take().unwrap().send(true);
Arbiter::current().stop();
return Poll::Ready(());
}
// check graceful timeout if this.counter.total() == 0 {
match t2.as_mut().poll(cx) { // Graceful shutdown.
Poll::Pending => (), if let WorkerState::Shutdown(shutdown) = mem::take(&mut this.state) {
Poll::Ready(_) => { let _ = shutdown.tx.send(true);
let _ = tx.take().unwrap().send(false); }
self.shutdown(true); Poll::Ready(())
Arbiter::current().stop(); } else if shutdown.start_from.elapsed() >= this.shutdown_timeout {
return Poll::Ready(()); // Timeout forceful shutdown.
if let WorkerState::Shutdown(shutdown) = mem::take(&mut this.state) {
let _ = shutdown.tx.send(false);
}
Poll::Ready(())
} else {
// Reset timer and wait for 1 second.
let time = Instant::now() + Duration::from_secs(1);
shutdown.timer.as_mut().reset(time);
shutdown.timer.as_mut().poll(cx)
}
}
// actively poll stream and handle worker command
WorkerState::Available => loop {
match this.check_readiness(cx) {
Ok(true) => {}
Ok(false) => {
trace!("Worker is unavailable");
this.state = WorkerState::Unavailable;
return self.poll(cx);
}
Err((token, idx)) => {
this.restart_service(token, idx);
return self.poll(cx);
} }
} }
// sleep for 1 second and then check again // handle incoming io stream
match t1.as_mut().poll(cx) { match ready!(Pin::new(&mut this.rx).poll_recv(cx)) {
Poll::Pending => (), Some(msg) => {
Poll::Ready(_) => { let guard = this.counter.guard();
*t1 = Box::pin(delay_until( let _ = this.services[msg.token].service.call((guard, msg.io));
Instant::now() + time::Duration::from_secs(1),
));
let _ = t1.as_mut().poll(cx);
} }
} None => return Poll::Ready(()),
Poll::Pending };
} },
WorkerState::Available => {
loop {
match Pin::new(&mut self.rx).poll_next(cx) {
// handle incoming io stream
Poll::Ready(Some(WorkerCommand(msg))) => {
match self.check_readiness(cx) {
Ok(true) => {
let guard = self.conns.get();
let _ = self.services[msg.token.0]
.service
.call((Some(guard), ServerMessage::Connect(msg.io)));
continue;
}
Ok(false) => {
trace!("Worker is unavailable");
self.availability.set(false);
self.state = WorkerState::Unavailable(vec![msg]);
}
Err((token, idx)) => {
trace!(
"Service {:?} failed, restarting",
self.factories[idx].name(token)
);
self.availability.set(false);
self.services[token.0].status =
WorkerServiceStatus::Restarting;
self.state = WorkerState::Restarting(
idx,
token,
self.factories[idx].create(),
);
}
}
return self.poll(cx);
}
Poll::Pending => {
self.state = WorkerState::Available;
return Poll::Pending;
}
Poll::Ready(None) => return Poll::Ready(()),
}
}
}
} }
} }
} }

View File

@@ -1,23 +1,19 @@
use std::io::Read; use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::atomic::{AtomicUsize, Ordering::Relaxed};
use std::sync::{mpsc, Arc}; use std::sync::{mpsc, Arc};
use std::{net, thread, time}; use std::{net, thread, time::Duration};
use actix_codec::{BytesCodec, Framed}; use actix_rt::{net::TcpStream, time::sleep};
use actix_rt::net::TcpStream;
use actix_server::Server; use actix_server::Server;
use actix_service::fn_service; use actix_service::fn_service;
use bytes::Bytes; use actix_utils::future::ok;
use futures::future::{lazy, ok}; use futures_util::future::lazy;
use futures::SinkExt;
use net2::TcpBuilder;
fn unused_addr() -> net::SocketAddr { fn unused_addr() -> net::SocketAddr {
let addr: net::SocketAddr = "127.0.0.1:0".parse().unwrap(); let addr: net::SocketAddr = "127.0.0.1:0".parse().unwrap();
let socket = TcpBuilder::new_v4().unwrap(); let socket = mio::net::TcpSocket::new_v4().unwrap();
socket.bind(&addr).unwrap(); socket.bind(addr).unwrap();
socket.reuse_address(true).unwrap(); socket.set_reuseaddr(true).unwrap();
let tcp = socket.to_tcp_listener().unwrap(); let tcp = socket.listen(32).unwrap();
tcp.local_addr().unwrap() tcp.local_addr().unwrap()
} }
@@ -27,21 +23,24 @@ fn test_bind() {
let (tx, rx) = mpsc::channel(); let (tx, rx) = mpsc::channel();
let h = thread::spawn(move || { let h = thread::spawn(move || {
let sys = actix_rt::System::new("test"); let sys = actix_rt::System::new();
let srv = Server::build() let srv = sys.block_on(lazy(|_| {
.workers(1) Server::build()
.disable_signals() .workers(1)
.bind("test", addr, move || fn_service(|_| ok::<_, ()>(()))) .disable_signals()
.unwrap() .bind("test", addr, move || fn_service(|_| ok::<_, ()>(())))
.start(); .unwrap()
.run()
}));
let _ = tx.send((srv, actix_rt::System::current())); let _ = tx.send((srv, actix_rt::System::current()));
let _ = sys.run(); let _ = sys.run();
}); });
let (_, sys) = rx.recv().unwrap(); let (_, sys) = rx.recv().unwrap();
thread::sleep(time::Duration::from_millis(500)); thread::sleep(Duration::from_millis(500));
assert!(net::TcpStream::connect(addr).is_ok()); assert!(net::TcpStream::connect(addr).is_ok());
let _ = sys.stop(); sys.stop();
let _ = h.join(); let _ = h.join();
} }
@@ -51,51 +50,60 @@ fn test_listen() {
let (tx, rx) = mpsc::channel(); let (tx, rx) = mpsc::channel();
let h = thread::spawn(move || { let h = thread::spawn(move || {
let sys = actix_rt::System::new("test"); let sys = actix_rt::System::new();
let lst = net::TcpListener::bind(addr).unwrap(); let lst = net::TcpListener::bind(addr).unwrap();
Server::build() sys.block_on(async {
.disable_signals() Server::build()
.workers(1) .disable_signals()
.listen("test", lst, move || fn_service(|_| ok::<_, ()>(()))) .workers(1)
.unwrap() .listen("test", lst, move || fn_service(|_| ok::<_, ()>(())))
.start(); .unwrap()
let _ = tx.send(actix_rt::System::current()); .run();
let _ = tx.send(actix_rt::System::current());
});
let _ = sys.run(); let _ = sys.run();
}); });
let sys = rx.recv().unwrap(); let sys = rx.recv().unwrap();
thread::sleep(time::Duration::from_millis(500)); thread::sleep(Duration::from_millis(500));
assert!(net::TcpStream::connect(addr).is_ok()); assert!(net::TcpStream::connect(addr).is_ok());
let _ = sys.stop(); sys.stop();
let _ = h.join(); let _ = h.join();
} }
#[test] #[test]
#[cfg(unix)] #[cfg(unix)]
fn test_start() { fn test_start() {
use std::io::Read;
use actix_codec::{BytesCodec, Framed};
use bytes::Bytes;
use futures_util::sink::SinkExt;
let addr = unused_addr(); let addr = unused_addr();
let (tx, rx) = mpsc::channel(); let (tx, rx) = mpsc::channel();
let h = thread::spawn(move || { let h = thread::spawn(move || {
let sys = actix_rt::System::new("test"); let sys = actix_rt::System::new();
let srv: Server = Server::build() let srv = sys.block_on(lazy(|_| {
.backlog(100) Server::build()
.disable_signals() .backlog(100)
.bind("test", addr, move || { .disable_signals()
fn_service(|io: TcpStream| { .bind("test", addr, move || {
async move { fn_service(|io: TcpStream| async move {
let mut f = Framed::new(io, BytesCodec); let mut f = Framed::new(io, BytesCodec);
f.send(Bytes::from_static(b"test")).await.unwrap(); f.send(Bytes::from_static(b"test")).await.unwrap();
Ok::<_, ()>(()) Ok::<_, ()>(())
} })
}) })
}) .unwrap()
.unwrap() .run()
.start(); }));
let _ = tx.send((srv, actix_rt::System::current())); let _ = tx.send((srv, actix_rt::System::current()));
let _ = sys.run(); let _ = sys.run();
}); });
let (srv, sys) = rx.recv().unwrap(); let (srv, sys) = rx.recv().unwrap();
let mut buf = [1u8; 4]; let mut buf = [1u8; 4];
@@ -105,16 +113,16 @@ fn test_start() {
// pause // pause
let _ = srv.pause(); let _ = srv.pause();
thread::sleep(time::Duration::from_millis(200)); thread::sleep(Duration::from_millis(200));
let mut conn = net::TcpStream::connect(addr).unwrap(); let mut conn = net::TcpStream::connect(addr).unwrap();
conn.set_read_timeout(Some(time::Duration::from_millis(100))) conn.set_read_timeout(Some(Duration::from_millis(100)))
.unwrap(); .unwrap();
let res = conn.read_exact(&mut buf); let res = conn.read_exact(&mut buf);
assert!(res.is_err()); assert!(res.is_err());
// resume // resume
let _ = srv.resume(); let _ = srv.resume();
thread::sleep(time::Duration::from_millis(100)); thread::sleep(Duration::from_millis(100));
assert!(net::TcpStream::connect(addr).is_ok()); assert!(net::TcpStream::connect(addr).is_ok());
assert!(net::TcpStream::connect(addr).is_ok()); assert!(net::TcpStream::connect(addr).is_ok());
assert!(net::TcpStream::connect(addr).is_ok()); assert!(net::TcpStream::connect(addr).is_ok());
@@ -126,58 +134,320 @@ fn test_start() {
// stop // stop
let _ = srv.stop(false); let _ = srv.stop(false);
thread::sleep(time::Duration::from_millis(100)); thread::sleep(Duration::from_millis(100));
assert!(net::TcpStream::connect(addr).is_err()); assert!(net::TcpStream::connect(addr).is_err());
thread::sleep(time::Duration::from_millis(100)); thread::sleep(Duration::from_millis(100));
let _ = sys.stop(); sys.stop();
let _ = h.join(); let _ = h.join();
} }
#[test] #[actix_rt::test]
fn test_configure() { async fn test_max_concurrent_connections() {
let addr1 = unused_addr(); // Note:
let addr2 = unused_addr(); // A tcp listener would accept connects based on it's backlog setting.
let addr3 = unused_addr(); //
// The limit test on the other hand is only for concurrent tcp stream limiting a work
// thread accept.
use tokio::io::AsyncWriteExt;
let addr = unused_addr();
let (tx, rx) = mpsc::channel(); let (tx, rx) = mpsc::channel();
let num = Arc::new(AtomicUsize::new(0));
let num2 = num.clone(); let counter = Arc::new(AtomicUsize::new(0));
let counter_clone = counter.clone();
let max_conn = 3;
let h = thread::spawn(move || { let h = thread::spawn(move || {
let num = num2.clone(); actix_rt::System::new().block_on(async {
let sys = actix_rt::System::new("test"); let server = Server::build()
let srv = Server::build() // Set a relative higher backlog.
.disable_signals() .backlog(12)
.configure(move |cfg| { // max connection for a worker is 3.
let num = num.clone(); .maxconn(max_conn)
let lst = net::TcpListener::bind(addr3).unwrap(); .workers(1)
cfg.bind("addr1", addr1) .disable_signals()
.unwrap() .bind("test", addr, move || {
.bind("addr2", addr2) let counter = counter.clone();
.unwrap() fn_service(move |_io: TcpStream| {
.listen("addr3", lst) let counter = counter.clone();
.apply(move |rt| { async move {
let num = num.clone(); counter.fetch_add(1, Ordering::SeqCst);
rt.service("addr1", fn_service(|_| ok::<_, ()>(()))); sleep(Duration::from_secs(20)).await;
rt.service("addr3", fn_service(|_| ok::<_, ()>(()))); counter.fetch_sub(1, Ordering::SeqCst);
rt.on_start(lazy(move |_| { Ok::<(), ()>(())
let _ = num.fetch_add(1, Relaxed); }
}))
}) })
}) })?
.unwrap() .run();
.workers(1)
.start();
let _ = tx.send((srv, actix_rt::System::current()));
let _ = sys.run();
});
let (_, sys) = rx.recv().unwrap();
thread::sleep(time::Duration::from_millis(500));
assert!(net::TcpStream::connect(addr1).is_ok()); let _ = tx.send((server.clone(), actix_rt::System::current()));
assert!(net::TcpStream::connect(addr2).is_ok());
assert!(net::TcpStream::connect(addr3).is_ok()); server.await
assert_eq!(num.load(Relaxed), 1); })
let _ = sys.stop(); });
let _ = h.join();
let (srv, sys) = rx.recv().unwrap();
let mut conns = vec![];
for _ in 0..12 {
let conn = tokio::net::TcpStream::connect(addr).await.unwrap();
conns.push(conn);
}
sleep(Duration::from_secs(5)).await;
// counter would remain at 3 even with 12 successful connection.
// and 9 of them remain in backlog.
assert_eq!(max_conn, counter_clone.load(Ordering::SeqCst));
for mut conn in conns {
conn.shutdown().await.unwrap();
}
srv.stop(false).await;
sys.stop();
let _ = h.join().unwrap();
}
#[actix_rt::test]
async fn test_service_restart() {
use std::task::{Context, Poll};
use actix_service::{fn_factory, Service};
use futures_core::future::LocalBoxFuture;
use tokio::io::AsyncWriteExt;
struct TestService(Arc<AtomicUsize>);
impl Service<TcpStream> for TestService {
type Response = ();
type Error = ();
type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>;
fn poll_ready(&self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
let TestService(ref counter) = self;
let c = counter.fetch_add(1, Ordering::SeqCst);
// Force the service to restart on first readiness check.
if c > 0 {
Poll::Ready(Ok(()))
} else {
Poll::Ready(Err(()))
}
}
fn call(&self, _: TcpStream) -> Self::Future {
Box::pin(async { Ok(()) })
}
}
let addr1 = unused_addr();
let addr2 = unused_addr();
let (tx, rx) = mpsc::channel();
let num = Arc::new(AtomicUsize::new(0));
let num2 = Arc::new(AtomicUsize::new(0));
let num_clone = num.clone();
let num2_clone = num2.clone();
let h = thread::spawn(move || {
let num = num.clone();
actix_rt::System::new().block_on(async {
let server = Server::build()
.backlog(1)
.disable_signals()
.bind("addr1", addr1, move || {
let num = num.clone();
fn_factory(move || {
let num = num.clone();
async move { Ok::<_, ()>(TestService(num)) }
})
})
.unwrap()
.bind("addr2", addr2, move || {
let num2 = num2.clone();
fn_factory(move || {
let num2 = num2.clone();
async move { Ok::<_, ()>(TestService(num2)) }
})
})
.unwrap()
.workers(1)
.run();
let _ = tx.send((server.clone(), actix_rt::System::current()));
server.await
})
});
let (server, sys) = rx.recv().unwrap();
for _ in 0..5 {
TcpStream::connect(addr1)
.await
.unwrap()
.shutdown()
.await
.unwrap();
TcpStream::connect(addr2)
.await
.unwrap()
.shutdown()
.await
.unwrap();
}
sleep(Duration::from_secs(3)).await;
assert!(num_clone.load(Ordering::SeqCst) > 5);
assert!(num2_clone.load(Ordering::SeqCst) > 5);
sys.stop();
let _ = server.stop(false);
let _ = h.join().unwrap();
}
#[ignore]
#[actix_rt::test]
async fn worker_restart() {
use actix_service::{Service, ServiceFactory};
use futures_core::future::LocalBoxFuture;
use tokio::io::{AsyncReadExt, AsyncWriteExt};
struct TestServiceFactory(Arc<AtomicUsize>);
impl ServiceFactory<TcpStream> for TestServiceFactory {
type Response = ();
type Error = ();
type Config = ();
type Service = TestService;
type InitError = ();
type Future = LocalBoxFuture<'static, Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: Self::Config) -> Self::Future {
let counter = self.0.fetch_add(1, Ordering::Relaxed);
Box::pin(async move { Ok(TestService(counter)) })
}
}
struct TestService(usize);
impl Service<TcpStream> for TestService {
type Response = ();
type Error = ();
type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>;
actix_service::always_ready!();
fn call(&self, stream: TcpStream) -> Self::Future {
let counter = self.0;
let mut stream = stream.into_std().unwrap();
use std::io::Write;
let str = counter.to_string();
let buf = str.as_bytes();
let mut written = 0;
while written < buf.len() {
if let Ok(n) = stream.write(&buf[written..]) {
written += n;
}
}
stream.flush().unwrap();
stream.shutdown(net::Shutdown::Write).unwrap();
// force worker 2 to restart service once.
if counter == 2 {
panic!("panic on purpose")
} else {
Box::pin(async { Ok(()) })
}
}
}
let addr = unused_addr();
let (tx, rx) = mpsc::channel();
let counter = Arc::new(AtomicUsize::new(1));
let h = thread::spawn(move || {
let counter = counter.clone();
actix_rt::System::new().block_on(async {
let server = Server::build()
.disable_signals()
.bind("addr", addr, move || TestServiceFactory(counter.clone()))
.unwrap()
.workers(2)
.run();
let _ = tx.send((server.clone(), actix_rt::System::current()));
server.await
})
});
let (server, sys) = rx.recv().unwrap();
sleep(Duration::from_secs(3)).await;
let mut buf = [0; 8];
// worker 1 would not restart and return it's id consistently.
let mut stream = TcpStream::connect(addr).await.unwrap();
let n = stream.read(&mut buf).await.unwrap();
let id = String::from_utf8_lossy(&buf[0..n]);
assert_eq!("1", id);
stream.shutdown().await.unwrap();
// worker 2 dead after return response.
let mut stream = TcpStream::connect(addr).await.unwrap();
let n = stream.read(&mut buf).await.unwrap();
let id = String::from_utf8_lossy(&buf[0..n]);
assert_eq!("2", id);
stream.shutdown().await.unwrap();
// request to worker 1
let mut stream = TcpStream::connect(addr).await.unwrap();
let n = stream.read(&mut buf).await.unwrap();
let id = String::from_utf8_lossy(&buf[0..n]);
assert_eq!("1", id);
stream.shutdown().await.unwrap();
// TODO: Remove sleep if it can pass CI.
sleep(Duration::from_secs(3)).await;
// worker 2 restarting and work goes to worker 1.
let mut stream = TcpStream::connect(addr).await.unwrap();
let n = stream.read(&mut buf).await.unwrap();
let id = String::from_utf8_lossy(&buf[0..n]);
assert_eq!("1", id);
stream.shutdown().await.unwrap();
// TODO: Remove sleep if it can pass CI.
sleep(Duration::from_secs(3)).await;
// worker 2 restarted but worker 1 was still the next to accept connection.
let mut stream = TcpStream::connect(addr).await.unwrap();
let n = stream.read(&mut buf).await.unwrap();
let id = String::from_utf8_lossy(&buf[0..n]);
assert_eq!("1", id);
stream.shutdown().await.unwrap();
// TODO: Remove sleep if it can pass CI.
sleep(Duration::from_secs(3)).await;
// worker 2 accept connection again but it's id is 3.
let mut stream = TcpStream::connect(addr).await.unwrap();
let n = stream.read(&mut buf).await.unwrap();
let id = String::from_utf8_lossy(&buf[0..n]);
assert_eq!("3", id);
stream.shutdown().await.unwrap();
sys.stop();
let _ = server.stop(false);
let _ = h.join().unwrap();
} }

View File

@@ -1,5 +1,97 @@
# Changes # Changes
## Unreleased - 2021-xx-xx
## 2.0.0 - 2021-04-16
* Removed pipeline and related structs/functions. [#335]
[#335]: https://github.com/actix/actix-net/pull/335
## 2.0.0-beta.5 - 2021-03-15
* Add default `Service` trait impl for `Rc<S: Service>` and `&S: Service`. [#288]
* Add `boxed::rc_service` function for constructing `boxed::RcService` type [#290]
[#288]: https://github.com/actix/actix-net/pull/288
[#290]: https://github.com/actix/actix-net/pull/290
## 2.0.0-beta.4 - 2021-02-04
* `Service::poll_ready` and `Service::call` receive `&self`. [#247]
* `apply_fn` and `apply_fn_factory` now receive `Fn(Req, &Service)` function type. [#247]
* `apply_cfg` and `apply_cfg_factory` now receive `Fn(Req, &Service)` function type. [#247]
* `fn_service` and friends now receive `Fn(Req)` function type. [#247]
[#247]: https://github.com/actix/actix-net/pull/247
## 2.0.0-beta.3 - 2021-01-09
* The `forward_ready!` macro converts errors. [#246]
[#246]: https://github.com/actix/actix-net/pull/246
## 2.0.0-beta.2 - 2021-01-03
* Remove redundant type parameter from `map_config`.
## 2.0.0-beta.1 - 2020-12-28
* `Service`, other traits, and many type signatures now take the the request type as a type
parameter instead of an associated type. [#232]
* Add `always_ready!` and `forward_ready!` macros. [#233]
* Crate is now `no_std`. [#233]
* Migrate pin projections to `pin-project-lite`. [#233]
* Remove `AndThenApplyFn` and Pipeline `and_then_apply_fn`. Use the
`.and_then(apply_fn(...))` construction. [#233]
* Move non-vital methods to `ServiceExt` and `ServiceFactoryExt` extension traits. [#235]
[#232]: https://github.com/actix/actix-net/pull/232
[#233]: https://github.com/actix/actix-net/pull/233
[#235]: https://github.com/actix/actix-net/pull/235
## 1.0.6 - 2020-08-09
### Fixed
* Removed unsound custom Cell implementation that allowed obtaining several mutable references to
the same data, which is undefined behavior in Rust and could lead to violations of memory safety. External code could obtain several mutable references to the same data through
service combinators. Attempts to acquire several mutable references to the same data will instead
result in a panic.
## [1.0.5] - 2020-01-16
### Fixed
* Fixed unsoundness in .and_then()/.then() service combinators
## [1.0.4] - 2020-01-15
### Fixed
* Revert 1.0.3 change
## [1.0.3] - 2020-01-15
### Fixed
* Fixed unsoundness in `AndThenService` impl
## [1.0.2] - 2020-01-08
### Added
* Add `into_service` helper function
## [1.0.1] - 2019-12-22
### Changed
* `map_config()` and `unit_config()` accepts `IntoServiceFactory` type
## [1.0.0] - 2019-12-11 ## [1.0.0] - 2019-12-11
### Added ### Added

View File

@@ -1,29 +1,28 @@
[package] [package]
name = "actix-service" name = "actix-service"
version = "1.0.0" version = "2.0.0"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"] authors = [
description = "Actix service" "Nikolay Kim <fafhrd91@gmail.com>",
keywords = ["network", "framework", "async", "futures"] "Rob Ede <robjtede@icloud.com>",
homepage = "https://actix.rs" "fakeshadow <24548779@qq.com>",
repository = "https://github.com/actix/actix-net.git" ]
documentation = "https://docs.rs/actix-service/" description = "Service trait and combinators for representing asynchronous request/response operations."
keywords = ["network", "framework", "async", "futures", "service"]
categories = ["network-programming", "asynchronous"] categories = ["network-programming", "asynchronous"]
license = "MIT/Apache-2.0" repository = "https://github.com/actix/actix-net"
license = "MIT OR Apache-2.0"
edition = "2018" edition = "2018"
workspace = ".."
[badges]
travis-ci = { repository = "actix/actix-service", branch = "master" }
appveyor = { repository = "actix/actix-net" }
codecov = { repository = "actix/actix-service", branch = "master", service = "github" }
[lib] [lib]
name = "actix_service" name = "actix_service"
path = "src/lib.rs" path = "src/lib.rs"
[dependencies] [dependencies]
futures-util = "0.3.1" futures-core = { version = "0.3.7", default-features = false }
pin-project = "0.4.6" paste = "1"
pin-project-lite = "0.2"
[dev-dependencies] [dev-dependencies]
actix-rt = "1.0.0" actix-rt = "2.0.0"
actix-utils = "3.0.0"
futures-util = { version = "0.3.7", default-features = false }

13
actix-service/README.md Normal file
View File

@@ -0,0 +1,13 @@
# actix-service
> Service trait and combinators for representing asynchronous request/response operations.
[![crates.io](https://img.shields.io/crates/v/actix-service?label=latest)](https://crates.io/crates/actix-service)
[![Documentation](https://docs.rs/actix-service/badge.svg?version=2.0.0)](https://docs.rs/actix-service/2.0.0)
[![Version](https://img.shields.io/badge/rustc-1.46+-ab6000.svg)](https://blog.rust-lang.org/2020/03/12/Rust-1.46.html)
![License](https://img.shields.io/crates/l/actix-service.svg)
[![Dependency Status](https://deps.rs/crate/actix-service/2.0.0/status.svg)](https://deps.rs/crate/actix-service/2.0.0)
![Download](https://img.shields.io/crates/d/actix-service.svg)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x)
See documentation for detailed explanations of these components: https://docs.rs/actix-service.

View File

@@ -1,213 +1,224 @@
use std::future::Future; use alloc::rc::Rc;
use std::pin::Pin; use core::{
use std::task::{Context, Poll}; future::Future,
marker::PhantomData,
pin::Pin,
task::{Context, Poll},
};
use futures_core::ready;
use pin_project_lite::pin_project;
use super::{Service, ServiceFactory}; use super::{Service, ServiceFactory};
use crate::cell::Cell;
/// Service for the `and_then` combinator, chaining a computation onto the end /// Service for the `and_then` combinator, chaining a computation onto the end of another service
/// of another service which completes successfully. /// which completes successfully.
/// ///
/// This is created by the `ServiceExt::and_then` method. /// This is created by the `Pipeline::and_then` method.
pub struct AndThenService<A, B>(Cell<(A, B)>); pub struct AndThenService<A, B, Req>(Rc<(A, B)>, PhantomData<Req>);
impl<A, B> AndThenService<A, B> { impl<A, B, Req> AndThenService<A, B, Req> {
/// Create new `AndThen` combinator /// Create new `AndThen` combinator
pub(crate) fn new(a: A, b: B) -> Self pub(crate) fn new(a: A, b: B) -> Self
where where
A: Service, A: Service<Req>,
B: Service<Request = A::Response, Error = A::Error>, B: Service<A::Response, Error = A::Error>,
{ {
Self(Cell::new((a, b))) Self(Rc::new((a, b)), PhantomData)
} }
} }
impl<A, B> Clone for AndThenService<A, B> { impl<A, B, Req> Clone for AndThenService<A, B, Req> {
fn clone(&self) -> Self { fn clone(&self) -> Self {
AndThenService(self.0.clone()) AndThenService(self.0.clone(), PhantomData)
} }
} }
impl<A, B> Service for AndThenService<A, B> impl<A, B, Req> Service<Req> for AndThenService<A, B, Req>
where where
A: Service, A: Service<Req>,
B: Service<Request = A::Response, Error = A::Error>, B: Service<A::Response, Error = A::Error>,
{ {
type Request = A::Request;
type Response = B::Response; type Response = B::Response;
type Error = A::Error; type Error = A::Error;
type Future = AndThenServiceResponse<A, B>; type Future = AndThenServiceResponse<A, B, Req>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
let srv = self.0.get_mut(); let (a, b) = &*self.0;
let not_ready = !a.poll_ready(cx)?.is_ready();
let not_ready = !srv.0.poll_ready(cx)?.is_ready(); if !b.poll_ready(cx)?.is_ready() || not_ready {
if !srv.1.poll_ready(cx)?.is_ready() || not_ready {
Poll::Pending Poll::Pending
} else { } else {
Poll::Ready(Ok(())) Poll::Ready(Ok(()))
} }
} }
fn call(&mut self, req: A::Request) -> Self::Future { fn call(&self, req: Req) -> Self::Future {
AndThenServiceResponse { AndThenServiceResponse {
state: State::A(self.0.get_mut().0.call(req), Some(self.0.clone())), state: State::A {
fut: self.0 .0.call(req),
b: Some(self.0.clone()),
},
} }
} }
} }
#[pin_project::pin_project] pin_project! {
pub struct AndThenServiceResponse<A, B> pub struct AndThenServiceResponse<A, B, Req>
where where
A: Service, A: Service<Req>,
B: Service<Request = A::Response, Error = A::Error>, B: Service<A::Response, Error = A::Error>,
{ {
#[pin] #[pin]
state: State<A, B>, state: State<A, B, Req>,
}
} }
#[pin_project::pin_project] pin_project! {
enum State<A, B> #[project = StateProj]
where enum State<A, B, Req>
A: Service, where
B: Service<Request = A::Response, Error = A::Error>, A: Service<Req>,
{ B: Service<A::Response, Error = A::Error>,
A(#[pin] A::Future, Option<Cell<(A, B)>>), {
B(#[pin] B::Future), A {
Empty, #[pin]
fut: A::Future,
b: Option<Rc<(A, B)>>,
},
B {
#[pin]
fut: B::Future,
},
}
} }
impl<A, B> Future for AndThenServiceResponse<A, B> impl<A, B, Req> Future for AndThenServiceResponse<A, B, Req>
where where
A: Service, A: Service<Req>,
B: Service<Request = A::Response, Error = A::Error>, B: Service<A::Response, Error = A::Error>,
{ {
type Output = Result<B::Response, A::Error>; type Output = Result<B::Response, A::Error>;
#[pin_project::project]
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut this = self.as_mut().project(); let mut this = self.as_mut().project();
#[project]
match this.state.as_mut().project() { match this.state.as_mut().project() {
State::A(fut, b) => match fut.poll(cx)? { StateProj::A { fut, b } => {
Poll::Ready(res) => { let res = ready!(fut.poll(cx))?;
let mut b = b.take().unwrap(); let b = b.take().unwrap();
this.state.set(State::Empty); // drop fut A let fut = b.1.call(res);
let fut = b.get_mut().1.call(res); this.state.set(State::B { fut });
this.state.set(State::B(fut)); self.poll(cx)
self.poll(cx) }
} StateProj::B { fut } => fut.poll(cx),
Poll::Pending => Poll::Pending,
},
State::B(fut) => fut.poll(cx).map(|r| {
this.state.set(State::Empty);
r
}),
State::Empty => panic!("future must not be polled after it returned `Poll::Ready`"),
} }
} }
} }
/// `.and_then()` service factory combinator /// `.and_then()` service factory combinator
pub struct AndThenServiceFactory<A, B> pub struct AndThenServiceFactory<A, B, Req>
where where
A: ServiceFactory, A: ServiceFactory<Req>,
A::Config: Clone, A::Config: Clone,
B: ServiceFactory< B: ServiceFactory<
A::Response,
Config = A::Config, Config = A::Config,
Request = A::Response,
Error = A::Error, Error = A::Error,
InitError = A::InitError, InitError = A::InitError,
>, >,
{ {
a: A, inner: Rc<(A, B)>,
b: B, _phantom: PhantomData<Req>,
} }
impl<A, B> AndThenServiceFactory<A, B> impl<A, B, Req> AndThenServiceFactory<A, B, Req>
where where
A: ServiceFactory, A: ServiceFactory<Req>,
A::Config: Clone, A::Config: Clone,
B: ServiceFactory< B: ServiceFactory<
A::Response,
Config = A::Config, Config = A::Config,
Request = A::Response,
Error = A::Error, Error = A::Error,
InitError = A::InitError, InitError = A::InitError,
>, >,
{ {
/// Create new `AndThenFactory` combinator /// Create new `AndThenFactory` combinator
pub(crate) fn new(a: A, b: B) -> Self { pub(crate) fn new(a: A, b: B) -> Self {
Self { a, b }
}
}
impl<A, B> ServiceFactory for AndThenServiceFactory<A, B>
where
A: ServiceFactory,
A::Config: Clone,
B: ServiceFactory<
Config = A::Config,
Request = A::Response,
Error = A::Error,
InitError = A::InitError,
>,
{
type Request = A::Request;
type Response = B::Response;
type Error = A::Error;
type Config = A::Config;
type Service = AndThenService<A::Service, B::Service>;
type InitError = A::InitError;
type Future = AndThenServiceFactoryResponse<A, B>;
fn new_service(&self, cfg: A::Config) -> Self::Future {
AndThenServiceFactoryResponse::new(
self.a.new_service(cfg.clone()),
self.b.new_service(cfg),
)
}
}
impl<A, B> Clone for AndThenServiceFactory<A, B>
where
A: ServiceFactory + Clone,
A::Config: Clone,
B: ServiceFactory<
Config = A::Config,
Request = A::Response,
Error = A::Error,
InitError = A::InitError,
> + Clone,
{
fn clone(&self) -> Self {
Self { Self {
a: self.a.clone(), inner: Rc::new((a, b)),
b: self.b.clone(), _phantom: PhantomData,
} }
} }
} }
#[pin_project::pin_project] impl<A, B, Req> ServiceFactory<Req> for AndThenServiceFactory<A, B, Req>
pub struct AndThenServiceFactoryResponse<A, B>
where where
A: ServiceFactory, A: ServiceFactory<Req>,
B: ServiceFactory<Request = A::Response>, A::Config: Clone,
B: ServiceFactory<
A::Response,
Config = A::Config,
Error = A::Error,
InitError = A::InitError,
>,
{ {
#[pin] type Response = B::Response;
fut_a: A::Future, type Error = A::Error;
#[pin]
fut_b: B::Future,
a: Option<A::Service>, type Config = A::Config;
b: Option<B::Service>, type Service = AndThenService<A::Service, B::Service, Req>;
type InitError = A::InitError;
type Future = AndThenServiceFactoryResponse<A, B, Req>;
fn new_service(&self, cfg: A::Config) -> Self::Future {
let inner = &*self.inner;
AndThenServiceFactoryResponse::new(
inner.0.new_service(cfg.clone()),
inner.1.new_service(cfg),
)
}
} }
impl<A, B> AndThenServiceFactoryResponse<A, B> impl<A, B, Req> Clone for AndThenServiceFactory<A, B, Req>
where where
A: ServiceFactory, A: ServiceFactory<Req>,
B: ServiceFactory<Request = A::Response>, A::Config: Clone,
B: ServiceFactory<
A::Response,
Config = A::Config,
Error = A::Error,
InitError = A::InitError,
>,
{
fn clone(&self) -> Self {
Self {
inner: self.inner.clone(),
_phantom: PhantomData,
}
}
}
pin_project! {
pub struct AndThenServiceFactoryResponse<A, B, Req>
where
A: ServiceFactory<Req>,
B: ServiceFactory<A::Response>,
{
#[pin]
fut_a: A::Future,
#[pin]
fut_b: B::Future,
a: Option<A::Service>,
b: Option<B::Service>,
}
}
impl<A, B, Req> AndThenServiceFactoryResponse<A, B, Req>
where
A: ServiceFactory<Req>,
B: ServiceFactory<A::Response>,
{ {
fn new(fut_a: A::Future, fut_b: B::Future) -> Self { fn new(fut_a: A::Future, fut_b: B::Future) -> Self {
AndThenServiceFactoryResponse { AndThenServiceFactoryResponse {
@@ -219,12 +230,12 @@ where
} }
} }
impl<A, B> Future for AndThenServiceFactoryResponse<A, B> impl<A, B, Req> Future for AndThenServiceFactoryResponse<A, B, Req>
where where
A: ServiceFactory, A: ServiceFactory<Req>,
B: ServiceFactory<Request = A::Response, Error = A::Error, InitError = A::InitError>, B: ServiceFactory<A::Response, Error = A::Error, InitError = A::InitError>,
{ {
type Output = Result<AndThenService<A::Service, B::Service>, A::InitError>; type Output = Result<AndThenService<A::Service, B::Service, Req>, A::InitError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project(); let this = self.project();
@@ -252,28 +263,33 @@ where
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::cell::Cell; use alloc::rc::Rc;
use std::rc::Rc; use core::{
use std::task::{Context, Poll}; cell::Cell,
task::{Context, Poll},
};
use futures_util::future::{lazy, ok, ready, Ready}; use futures_util::future::lazy;
use crate::{fn_factory, pipeline, pipeline_factory, Service, ServiceFactory}; use crate::{
fn_factory, ok,
pipeline::{pipeline, pipeline_factory},
ready, Ready, Service, ServiceFactory,
};
struct Srv1(Rc<Cell<usize>>); struct Srv1(Rc<Cell<usize>>);
impl Service for Srv1 { impl Service<&'static str> for Srv1 {
type Request = &'static str;
type Response = &'static str; type Response = &'static str;
type Error = (); type Error = ();
type Future = Ready<Result<Self::Response, ()>>; type Future = Ready<Result<Self::Response, ()>>;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { fn poll_ready(&self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.0.set(self.0.get() + 1); self.0.set(self.0.get() + 1);
Poll::Ready(Ok(())) Poll::Ready(Ok(()))
} }
fn call(&mut self, req: &'static str) -> Self::Future { fn call(&self, req: &'static str) -> Self::Future {
ok(req) ok(req)
} }
} }
@@ -281,18 +297,17 @@ mod tests {
#[derive(Clone)] #[derive(Clone)]
struct Srv2(Rc<Cell<usize>>); struct Srv2(Rc<Cell<usize>>);
impl Service for Srv2 { impl Service<&'static str> for Srv2 {
type Request = &'static str;
type Response = (&'static str, &'static str); type Response = (&'static str, &'static str);
type Error = (); type Error = ();
type Future = Ready<Result<Self::Response, ()>>; type Future = Ready<Result<Self::Response, ()>>;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { fn poll_ready(&self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.0.set(self.0.get() + 1); self.0.set(self.0.get() + 1);
Poll::Ready(Ok(())) Poll::Ready(Ok(()))
} }
fn call(&mut self, req: &'static str) -> Self::Future { fn call(&self, req: &'static str) -> Self::Future {
ok((req, "srv2")) ok((req, "srv2"))
} }
} }
@@ -300,7 +315,7 @@ mod tests {
#[actix_rt::test] #[actix_rt::test]
async fn test_poll_ready() { async fn test_poll_ready() {
let cnt = Rc::new(Cell::new(0)); let cnt = Rc::new(Cell::new(0));
let mut srv = pipeline(Srv1(cnt.clone())).and_then(Srv2(cnt.clone())); let srv = pipeline(Srv1(cnt.clone())).and_then(Srv2(cnt.clone()));
let res = lazy(|cx| srv.poll_ready(cx)).await; let res = lazy(|cx| srv.poll_ready(cx)).await;
assert_eq!(res, Poll::Ready(Ok(()))); assert_eq!(res, Poll::Ready(Ok(())));
assert_eq!(cnt.get(), 2); assert_eq!(cnt.get(), 2);
@@ -309,10 +324,10 @@ mod tests {
#[actix_rt::test] #[actix_rt::test]
async fn test_call() { async fn test_call() {
let cnt = Rc::new(Cell::new(0)); let cnt = Rc::new(Cell::new(0));
let mut srv = pipeline(Srv1(cnt.clone())).and_then(Srv2(cnt)); let srv = pipeline(Srv1(cnt.clone())).and_then(Srv2(cnt));
let res = srv.call("srv1").await; let res = srv.call("srv1").await;
assert!(res.is_ok()); assert!(res.is_ok());
assert_eq!(res.unwrap(), (("srv1", "srv2"))); assert_eq!(res.unwrap(), ("srv1", "srv2"));
} }
#[actix_rt::test] #[actix_rt::test]
@@ -323,7 +338,7 @@ mod tests {
pipeline_factory(fn_factory(move || ready(Ok::<_, ()>(Srv1(cnt2.clone()))))) pipeline_factory(fn_factory(move || ready(Ok::<_, ()>(Srv1(cnt2.clone())))))
.and_then(move || ready(Ok(Srv2(cnt.clone())))); .and_then(move || ready(Ok(Srv2(cnt.clone()))));
let mut srv = new_srv.new_service(()).await.unwrap(); let srv = new_srv.new_service(()).await.unwrap();
let res = srv.call("srv1").await; let res = srv.call("srv1").await;
assert!(res.is_ok()); assert!(res.is_ok());
assert_eq!(res.unwrap(), ("srv1", "srv2")); assert_eq!(res.unwrap(), ("srv1", "srv2"));

View File

@@ -1,334 +0,0 @@
use std::future::Future;
use std::marker::PhantomData;
use std::pin::Pin;
use std::task::{Context, Poll};
use crate::cell::Cell;
use crate::{Service, ServiceFactory};
/// `Apply` service combinator
pub struct AndThenApplyFn<A, B, F, Fut, Res, Err>
where
A: Service,
B: Service,
F: FnMut(A::Response, &mut B) -> Fut,
Fut: Future<Output = Result<Res, Err>>,
Err: From<A::Error> + From<B::Error>,
{
a: A,
b: Cell<(B, F)>,
r: PhantomData<(Fut, Res, Err)>,
}
impl<A, B, F, Fut, Res, Err> AndThenApplyFn<A, B, F, Fut, Res, Err>
where
A: Service,
B: Service,
F: FnMut(A::Response, &mut B) -> Fut,
Fut: Future<Output = Result<Res, Err>>,
Err: From<A::Error> + From<B::Error>,
{
/// Create new `Apply` combinator
pub(crate) fn new(a: A, b: B, f: F) -> Self {
Self {
a,
b: Cell::new((b, f)),
r: PhantomData,
}
}
}
impl<A, B, F, Fut, Res, Err> Clone for AndThenApplyFn<A, B, F, Fut, Res, Err>
where
A: Service + Clone,
B: Service,
F: FnMut(A::Response, &mut B) -> Fut,
Fut: Future<Output = Result<Res, Err>>,
Err: From<A::Error> + From<B::Error>,
{
fn clone(&self) -> Self {
AndThenApplyFn {
a: self.a.clone(),
b: self.b.clone(),
r: PhantomData,
}
}
}
impl<A, B, F, Fut, Res, Err> Service for AndThenApplyFn<A, B, F, Fut, Res, Err>
where
A: Service,
B: Service,
F: FnMut(A::Response, &mut B) -> Fut,
Fut: Future<Output = Result<Res, Err>>,
Err: From<A::Error> + From<B::Error>,
{
type Request = A::Request;
type Response = Res;
type Error = Err;
type Future = AndThenApplyFnFuture<A, B, F, Fut, Res, Err>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
let not_ready = self.a.poll_ready(cx)?.is_pending();
if self.b.get_mut().0.poll_ready(cx)?.is_pending() || not_ready {
Poll::Pending
} else {
Poll::Ready(Ok(()))
}
}
fn call(&mut self, req: A::Request) -> Self::Future {
AndThenApplyFnFuture {
state: State::A(self.a.call(req), Some(self.b.clone())),
}
}
}
#[pin_project::pin_project]
pub struct AndThenApplyFnFuture<A, B, F, Fut, Res, Err>
where
A: Service,
B: Service,
F: FnMut(A::Response, &mut B) -> Fut,
Fut: Future<Output = Result<Res, Err>>,
Err: From<A::Error>,
Err: From<B::Error>,
{
#[pin]
state: State<A, B, F, Fut, Res, Err>,
}
#[pin_project::pin_project]
enum State<A, B, F, Fut, Res, Err>
where
A: Service,
B: Service,
F: FnMut(A::Response, &mut B) -> Fut,
Fut: Future<Output = Result<Res, Err>>,
Err: From<A::Error>,
Err: From<B::Error>,
{
A(#[pin] A::Future, Option<Cell<(B, F)>>),
B(#[pin] Fut),
Empty,
}
impl<A, B, F, Fut, Res, Err> Future for AndThenApplyFnFuture<A, B, F, Fut, Res, Err>
where
A: Service,
B: Service,
F: FnMut(A::Response, &mut B) -> Fut,
Fut: Future<Output = Result<Res, Err>>,
Err: From<A::Error> + From<B::Error>,
{
type Output = Result<Res, Err>;
#[pin_project::project]
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut this = self.as_mut().project();
#[project]
match this.state.as_mut().project() {
State::A(fut, b) => match fut.poll(cx)? {
Poll::Ready(res) => {
let mut b = b.take().unwrap();
this.state.set(State::Empty);
let b = b.get_mut();
let fut = (&mut b.1)(res, &mut b.0);
this.state.set(State::B(fut));
self.poll(cx)
}
Poll::Pending => Poll::Pending,
},
State::B(fut) => fut.poll(cx).map(|r| {
this.state.set(State::Empty);
r
}),
State::Empty => panic!("future must not be polled after it returned `Poll::Ready`"),
}
}
}
/// `AndThenApplyFn` service factory
pub struct AndThenApplyFnFactory<A, B, F, Fut, Res, Err> {
a: A,
b: B,
f: F,
r: PhantomData<(Fut, Res, Err)>,
}
impl<A, B, F, Fut, Res, Err> AndThenApplyFnFactory<A, B, F, Fut, Res, Err>
where
A: ServiceFactory,
B: ServiceFactory<Config = A::Config, InitError = A::InitError>,
F: FnMut(A::Response, &mut B::Service) -> Fut + Clone,
Fut: Future<Output = Result<Res, Err>>,
Err: From<A::Error> + From<B::Error>,
{
/// Create new `ApplyNewService` new service instance
pub(crate) fn new(a: A, b: B, f: F) -> Self {
Self {
a: a,
b: b,
f: f,
r: PhantomData,
}
}
}
impl<A, B, F, Fut, Res, Err> Clone for AndThenApplyFnFactory<A, B, F, Fut, Res, Err>
where
A: Clone,
B: Clone,
F: Clone,
{
fn clone(&self) -> Self {
Self {
a: self.a.clone(),
b: self.b.clone(),
f: self.f.clone(),
r: PhantomData,
}
}
}
impl<A, B, F, Fut, Res, Err> ServiceFactory for AndThenApplyFnFactory<A, B, F, Fut, Res, Err>
where
A: ServiceFactory,
A::Config: Clone,
B: ServiceFactory<Config = A::Config, InitError = A::InitError>,
F: FnMut(A::Response, &mut B::Service) -> Fut + Clone,
Fut: Future<Output = Result<Res, Err>>,
Err: From<A::Error> + From<B::Error>,
{
type Request = A::Request;
type Response = Res;
type Error = Err;
type Service = AndThenApplyFn<A::Service, B::Service, F, Fut, Res, Err>;
type Config = A::Config;
type InitError = A::InitError;
type Future = AndThenApplyFnFactoryResponse<A, B, F, Fut, Res, Err>;
fn new_service(&self, cfg: A::Config) -> Self::Future {
AndThenApplyFnFactoryResponse {
a: None,
b: None,
f: self.f.clone(),
fut_a: self.a.new_service(cfg.clone()),
fut_b: self.b.new_service(cfg),
}
}
}
#[pin_project::pin_project]
pub struct AndThenApplyFnFactoryResponse<A, B, F, Fut, Res, Err>
where
A: ServiceFactory,
B: ServiceFactory<Config = A::Config, InitError = A::InitError>,
F: FnMut(A::Response, &mut B::Service) -> Fut + Clone,
Fut: Future<Output = Result<Res, Err>>,
Err: From<A::Error>,
Err: From<B::Error>,
{
#[pin]
fut_b: B::Future,
#[pin]
fut_a: A::Future,
f: F,
a: Option<A::Service>,
b: Option<B::Service>,
}
impl<A, B, F, Fut, Res, Err> Future for AndThenApplyFnFactoryResponse<A, B, F, Fut, Res, Err>
where
A: ServiceFactory,
B: ServiceFactory<Config = A::Config, InitError = A::InitError>,
F: FnMut(A::Response, &mut B::Service) -> Fut + Clone,
Fut: Future<Output = Result<Res, Err>>,
Err: From<A::Error> + From<B::Error>,
{
type Output =
Result<AndThenApplyFn<A::Service, B::Service, F, Fut, Res, Err>, A::InitError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
if this.a.is_none() {
if let Poll::Ready(service) = this.fut_a.poll(cx)? {
*this.a = Some(service);
}
}
if this.b.is_none() {
if let Poll::Ready(service) = this.fut_b.poll(cx)? {
*this.b = Some(service);
}
}
if this.a.is_some() && this.b.is_some() {
Poll::Ready(Ok(AndThenApplyFn {
a: this.a.take().unwrap(),
b: Cell::new((this.b.take().unwrap(), this.f.clone())),
r: PhantomData,
}))
} else {
Poll::Pending
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use futures_util::future::{lazy, ok, Ready, TryFutureExt};
use crate::{fn_service, pipeline, pipeline_factory, Service, ServiceFactory};
#[derive(Clone)]
struct Srv;
impl Service for Srv {
type Request = ();
type Response = ();
type Error = ();
type Future = Ready<Result<(), ()>>;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, req: Self::Request) -> Self::Future {
ok(req)
}
}
#[actix_rt::test]
async fn test_service() {
let mut srv = pipeline(|r: &'static str| ok(r))
.and_then_apply_fn(Srv, |req: &'static str, s| {
s.call(()).map_ok(move |res| (req, res))
});
let res = lazy(|cx| srv.poll_ready(cx)).await;
assert_eq!(res, Poll::Ready(Ok(())));
let res = srv.call("srv").await;
assert!(res.is_ok());
assert_eq!(res.unwrap(), ("srv", ()));
}
#[actix_rt::test]
async fn test_service_factory() {
let new_srv = pipeline_factory(|| ok::<_, ()>(fn_service(|r: &'static str| ok(r))))
.and_then_apply_fn(
|| ok(Srv),
|req: &'static str, s| s.call(()).map_ok(move |res| (req, res)),
);
let mut srv = new_srv.new_service(()).await.unwrap();
let res = lazy(|cx| srv.poll_ready(cx)).await;
assert_eq!(res, Poll::Ready(Ok(())));
let res = srv.call("srv").await;
assert!(res.is_ok());
assert_eq!(res.unwrap(), ("srv", ()));
}
}

Some files were not shown because too many files have changed in this diff Show More