1
0
mirror of https://github.com/fafhrd91/actix-net synced 2025-08-12 16:47:05 +02:00

Compare commits

..

844 Commits

Author SHA1 Message Date
Rob Ede
f8f1ac94bc add Patterns::is_empty and impl IntoPatterns for Patterns 2021-07-20 08:18:50 +01:00
Rob Ede
82cd5b8290 prepare router release 0.5.0-beta.1 2021-07-20 07:43:50 +01:00
Rob Ede
c65e8524b2 rework resourcedef (#373) 2021-07-19 22:37:54 +01:00
Rob Ede
a83dfaa162 Update macros.rs
closes #234
2021-07-17 20:54:53 +01:00
Rob Ede
e4ec956001 fix examples on msrv 2021-07-17 03:11:25 +01:00
Rob Ede
95cba659ff add zero cost profiling to router 2021-07-17 01:09:29 +01:00
Rob Ede
5687e81d9f rework IntoPatterns trait and codegen (#372) 2021-07-17 01:06:23 +01:00
Rob Ede
a0fe2a9b2e clippy 2021-07-16 21:46:32 +01:00
Rob Ede
ad22a93466 allow path building when resource has tail (#371) 2021-07-16 21:41:57 +01:00
Rob Ede
c2d5b2398a Rename Path::{len => segment_count} (#370) 2021-07-16 19:43:48 +01:00
Ali MJ Al-Nasrawy
5b1ff30dd9 router: fix multi-pattern and path tail matches (#366) 2021-07-16 18:17:00 +01:00
Ali MJ Al-Nasrawy
e1317bb3a0 path.len() != path.path().len() (#368)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-07-15 15:34:49 +01:00
Ali MJ Al-Nasrawy
dcea009158 ResourceDef: cleanup (#365) 2021-07-15 15:09:01 +01:00
Rob Ede
13c18b8a51 Update CHANGES.md 2021-07-14 10:37:49 +01:00
Rob Ede
06b17d6a43 fix ci 2021-06-28 15:06:29 +01:00
Rob Ede
605ec25143 prepare macros release 0.2.1 2021-06-08 17:48:30 +01:00
Ibraheem Ahmed
3824493fd3 take custom system path in actix_rt::main macro (#363) 2021-06-08 17:33:05 +01:00
Rob Ede
3be3e11aa5 change actix-router version to 0.4.0 2021-06-06 18:48:27 +01:00
Rob Ede
6a5ea0342b prepare router release 0.3.0 (#362) 2021-06-06 18:43:22 +01:00
Ali MJ Al-Nasrawy
23b1f63345 router: handle newline char '\n' in url (#360) 2021-06-06 03:38:58 +01:00
Ali MJ Al-Nasrawy
3aa037d07d fix changelog (#361) 2021-06-05 19:24:30 +01:00
Ali MJ Al-Nasrawy
cf21df14f2 Path: fix unsafe malformed string (#359) 2021-06-05 18:29:00 +01:00
Ali MJ Al-Nasrawy
a1bf8662c9 router: don't decode %25 to '%' (#357) 2021-06-06 01:34:16 +09:00
Ibraheem Ahmed
6f4d2220fa store Cow in actix-router Path (#345) 2021-06-05 01:46:40 +01:00
Danilo Bargen
54b22f9fce Docs: Fix signature of Service::call (#358) 2021-06-02 21:10:36 +01:00
fakeshadow
983abec77d Fix interrupt handling. Fix double server pause/resume (#353) 2021-04-30 13:42:25 +01:00
fakeshadow
e4d4ae21ee refactor connection counter (#343)
* Remove restart_worker test

* Remove Slab

* Rework counter

* Make counter limit switch accurate

* Remove backpressure. Add pause state

* make changes for review

* fix doc comment for counter
2021-04-29 23:27:08 +08:00
fakeshadow
8ad5f58d38 Remove ServerBuilder::configure (#349) 2021-04-27 23:58:02 +01:00
fakeshadow
613b2be51f Fix Display impl of MioListener (#350) 2021-04-27 11:54:18 -07:00
Rob Ede
b2e9640952 prepare codec 0.4.0 release (#346) 2021-04-21 11:08:43 +01:00
Rob Ede
76338a5822 prepare server release 2.0.0-beta.5 2021-04-20 05:16:32 +01:00
Rob Ede
978e4f25fb prepare actix-utils release 3.0.0 (#342) 2021-04-17 02:00:36 +01:00
Rob Ede
1c4e965366 prepare service release 2.0.0 (#339) 2021-04-16 15:18:53 +01:00
fakeshadow
2435520e67 Remove/restart worker test (#341) 2021-04-16 14:40:21 +01:00
fakeshadow
19468feef8 Fix memory ordering of WorkerAvailability (#340) 2021-04-16 11:20:08 +01:00
fakeshadow
bd48908792 Return worker index in WakerInterest::WorkerAvailable (#337) 2021-04-16 05:59:10 +01:00
fakeshadow
20c2da17ed Fix worker_avail (#336)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-04-16 03:20:02 +01:00
Rob Ede
fdafc1dd65 amend licences 2021-04-16 02:08:44 +01:00
Rob Ede
7749dfe46a address msrv todo in router 2021-04-16 02:06:11 +01:00
fakeshadow
aeb81ad3fd Fix worker are notified to stop with non_graceful shutdown (#333) 2021-04-16 00:54:15 +01:00
Rob Ede
47fba25d67 remove pipeline from public api (#335) 2021-04-16 00:00:02 +01:00
Rob Ede
7a82288066 docs tweak 2021-04-15 21:58:18 +01:00
Rob Ede
4e6d88d143 improve boxed service docs 2021-04-15 20:43:02 +01:00
Rob Ede
ef206f40fb update ignored service docs to new traits 2021-04-15 20:13:27 +01:00
fakeshadow
8e98d9168c add test for restart worker thread (#328) 2021-04-15 18:49:43 +01:00
fakeshadow
3c1f57706a Make ServerWorker drop stop Arbiter it runs on (#334) 2021-04-15 13:31:03 +01:00
fakeshadow
d49ecf7203 Fix bug where backpressure happen too early (#332) 2021-04-14 14:48:05 +01:00
fakeshadow
e0fb67f646 Reduce ServerWorker size (#321) 2021-04-13 01:12:59 +01:00
fakeshadow
ddce2d6d12 Reduce cfg flags in actix_server::socket (#325) 2021-04-10 16:05:50 +01:00
fakeshadow
0a11cf5cba Separate WorkerHandle to two parts (#323) 2021-04-10 01:03:28 +01:00
Rob Ede
859f45868d Revert "do no drain backlog on backpressure" (#324)
This reverts commit d4829b046d.
2021-04-09 21:04:41 +01:00
fakeshadow
d4829b046d do no drain backlog on backpressure (#322) 2021-04-08 23:15:10 +01:00
fakeshadow
5961eb892e Fix bug where worker service restart could skip failing services and not being able to restart multiple services (#318) 2021-04-05 20:39:05 +01:00
fakeshadow
995efcf427 Fix bug where paused Accept would register timed out sockets (#312) 2021-04-05 13:38:41 +01:00
fakeshadow
f1573931dd Remove MAX_CONN (#316) 2021-04-04 23:00:12 +01:00
fakeshadow
3859e91799 Use named type for WorkerState::Restarting and Shutdown (#317) 2021-04-04 21:53:19 +01:00
fakeshadow
8aade720ed Refactor WorkerState::Shutdown (#310) 2021-04-04 20:34:52 +01:00
fakeshadow
8079c50ddb Add ServerWorker::restart_service method (#314)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-04-04 13:22:34 +01:00
fakeshadow
05689b86d9 Remove Option wrapper for CounterGuard (#313) 2021-04-04 10:53:06 +01:00
fakeshadow
fd3e5fba02 Refactor actix_server WorkerState::Restarting enum variant. (#306)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-04-03 19:40:12 +01:00
fakeshadow
39d1f282f7 add test for max concurrent connections (#311) 2021-04-03 19:01:00 +01:00
fakeshadow
d8889c63ef Do not do double check on connection num when entering graceful shutdown (#309) 2021-04-02 12:49:12 +01:00
fakeshadow
fdac52aa11 Refactor Worker::shutdown mehtod (#308) 2021-04-02 12:22:05 +01:00
Rob Ede
6d66cfb06a prepare utils release 3.0.0-beta.4 2021-04-01 13:57:08 +01:00
Rob Ede
fb27ffc525 add future::Either type to utils (#305) 2021-04-01 13:53:44 +01:00
Rob Ede
b068ea16f8 prepare server release 2.0.0-beta.4 2021-04-01 09:36:07 +01:00
Rob Ede
4eebdf4070 prepare actix-utils release 3.0.0-beta.3 2021-04-01 09:31:42 +01:00
Rob Ede
b09e7cd417 fix local waker metadata 2021-04-01 09:01:56 +01:00
fakeshadow
2c5c9167a5 Fix bug where timed out socket would register itself when server in b… (#302)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-04-01 08:25:24 +01:00
fakeshadow
ee3a548a85 Refactor Accept::accept_one (#303) 2021-04-01 07:45:49 +01:00
fakeshadow
f21eaa954f Reduce size of Conn by removing unused addr field (#304) 2021-04-01 06:55:33 +01:00
Rob Ede
8becb0db70 refactor crates for better api stability (#301) 2021-03-30 13:39:10 +01:00
fakeshadow
26a5af70cb reduce branch in Accept::accept method (#300) 2021-03-29 08:19:37 +01:00
Rob Ede
0ee8d032b6 prepare actix-tls release 3.0.0-beta.5 2021-03-29 06:57:47 +01:00
Rob Ede
3cf1c548fd prepare actix-rt release 2.2.0 2021-03-29 06:57:14 +01:00
fakeshadow
4544562e1b Remove unused TcpConnectService (#299) 2021-03-27 21:03:24 +00:00
fakeshadow
bb27bac216 Add native tls support for actix_tls::connect module (#295)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-03-27 00:20:17 +00:00
Rob Ede
f9262dbec0 prevent large shutdown timeout from panicking
closes #298
2021-03-26 23:37:01 +00:00
fakeshadow
12d3942b98 Remove unused types in actix-tls. Add ActixStream impl for Box<dyn Ac… (#297) 2021-03-26 13:03:03 +00:00
fakeshadow
a3c9ebc7fa fix rustls panic when generating dns name from ip (#296)
* fix rustls panic when generating dns name from ip

* Update rustls.rs

* update changelog

Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-03-24 09:32:04 -07:00
fakeshadow
b7bfff2b32 add example of using multi-thread tokio runtime (#294)
* add example of using multi-thread tokio runtime

* Update multi_thread_system.rs

Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-03-24 04:56:13 -07:00
fakeshadow
0c73f13c8b ActixStream readiness methods return Ready object (#293) 2021-03-23 05:50:48 +00:00
Rob Ede
945479e0c3 unvendor openssl (#292) 2021-03-17 00:26:04 +00:00
Rob Ede
746cc2ab89 prepare service release 2.0.0-beta.5 2021-03-15 23:09:34 +00:00
Rob Ede
91ea8c5dad remove service dev module and add transformext trait
also improve docs on transform and boxed mods
2021-03-10 03:18:09 +00:00
Rob Ede
0a705b1023 add docs for *_ready macros 2021-03-10 02:23:24 +00:00
fakeshadow
9e2bcec226 add RcService type and rc_service construct function (#290) 2021-02-28 23:01:05 +00:00
Rob Ede
382830a37e refactor dispatcher / add Receiver::recv (#286) 2021-02-28 21:11:16 +00:00
fakeshadow
493a1a32c0 rc service changelog (#289) 2021-02-28 19:54:57 +00:00
fakeshadow
50a195e9ce add impl Service for Rc<S: Service> (#288) 2021-02-28 19:42:11 +00:00
Rob Ede
06ddad0051 prepare rt and tls releases (#287) 2021-02-25 11:50:24 +00:00
Rob Ede
789e6a8a46 update ci (#284) 2021-02-24 09:48:41 +00:00
Rob Ede
6e590fd042 Merge pull request #285 from actix/dep/actix-server 2021-02-24 09:09:44 +00:00
fakeshadow
fa8ded3a34 bump tokio version for actix-server 2021-02-24 15:54:28 +08:00
Rob Ede
841c611233 doc nits 2021-02-24 01:39:02 +00:00
Rob Ede
81a2b6a425 add local_addr binding to connector service (#282) 2021-02-23 18:52:28 +00:00
fakeshadow
a6e79453d0 remove default reuse_addr 2021-02-24 02:26:11 +08:00
fakeshadow
17f711a9d6 update changelog 2021-02-24 01:20:01 +08:00
fakeshadow
c3be839a69 add local_addr binding to connector service 2021-02-24 01:13:17 +08:00
Rob Ede
8d74cf387d standardize openssl based stream name 2021-02-20 18:04:05 +00:00
Rob Ede
7e483cc356 tweak task and stream docs 2021-02-20 17:34:04 +00:00
fakeshadow
75d7ae3139 add actix stream trait (#276) 2021-02-20 17:25:22 +00:00
Juan Aguilar
2cfe1d88ad Refactor LocalWaker for use Cell and remove deprecated methods (#278) 2021-02-19 17:12:30 +00:00
Rob Ede
cb07ead392 prepare rt release 2.0.2 2021-02-06 22:52:53 +00:00
Rob Ede
32543809f9 add System::try_current (#275) 2021-02-06 22:45:03 +00:00
Rob Ede
eb4d29e15e add arbiter handle assoc fn (#274)
* add arbiter handle assoc fn
2021-02-06 22:27:56 +00:00
Rob Ede
7ee42b50b4 prepare router 0.2.7 release 2021-02-06 19:50:48 +00:00
Rob Ede
0da848e4ae fix server dev dep 2021-02-06 19:35:29 +00:00
Rob Ede
5f80d85010 fix server version 2021-02-06 19:34:58 +00:00
Rob Ede
16ba77c4c8 prepare next set of betas (#273) 2021-02-06 19:24:52 +00:00
Rob Ede
b4a3f51659 prepare rt release 2.0.1 2021-02-06 15:54:11 +00:00
Riley
9d0901e07f actix-rt: expose JoinError (#271) 2021-02-06 15:50:38 +00:00
fakeshadow
ebb9cd055f use static dispatch on signal handling. reduce allocation (#272) 2021-02-06 03:38:11 +00:00
Rob Ede
a77b70aed2 prepare service 2.0.0-beta.4 release (#269) 2021-02-04 20:44:13 +00:00
Rob Ede
c918da906b use reexported tls crates when possible 2021-02-04 15:23:06 +00:00
Rob Ede
b5399c5631 use reusable box future in tls connector 2021-02-04 15:23:06 +00:00
fakeshadow
7f0eddd794 add blocking thread customize (#265) 2021-02-04 15:01:51 +00:00
shuo
db3385e865 retry on EINTR in accept loop (#264)
Co-authored-by: lishuo <lishuo.03@bytedance.com>
2021-02-04 10:20:37 +00:00
Rob Ede
4a8693d000 readme grammar 2021-02-03 11:18:35 +00:00
Rob Ede
4ec358575e prepare actix-rt v2.0.0 release (#262) 2021-02-03 10:25:31 +00:00
Rob Ede
66bd5bf4a2 prepare macros v0.2.0 release (#261) 2021-02-02 02:07:58 +00:00
Rob Ede
057e7cd7c9 prepare rt v2.0.0-beta.3 2021-01-31 05:19:30 +00:00
Rob Ede
0b656f51e1 deprecate rt TLS item storage 2021-01-31 04:48:03 +00:00
Rob Ede
0eb68d1c7b Revert "remove arbiter TLS item storage"
This reverts commit 3e6f69885c.
2021-01-31 04:45:27 +00:00
Rob Ede
3e6f69885c remove arbiter TLS item storage 2021-01-31 04:43:35 +00:00
Rob Ede
2fa60b07ae prevent arbiter leaks by waiting for registration 2021-01-31 04:41:28 +00:00
Rob Ede
b75254403a remove builder and introduce worker handle (#257) 2021-01-31 03:34:07 +00:00
Rob Ede
1b35ff8ee6 express spawn fn as spawn fut (#256) 2021-01-29 15:16:30 +00:00
Rob Ede
2924419905 prevent spawn_fn panic bubbling (#255) 2021-01-29 14:16:10 +00:00
Rob Ede
6b86b5efc5 rename arbiter to worker (#254) 2021-01-29 04:08:14 +00:00
Rob Ede
ba39c8436d remove tokio runners (#253) 2021-01-29 02:21:06 +00:00
fakeshadow
feac376c17 fix actix-tls build (#252) 2021-01-28 10:31:57 +00:00
Rob Ede
a633d2353c fix addr iterator 2021-01-27 11:23:28 +00:00
Rob Ede
45edff625e add rt tests and doc tests 2021-01-26 09:46:14 +00:00
Rob Ede
cff9deb729 attribute nits 2021-01-26 09:45:43 +00:00
Rob Ede
eaefe21b98 add tests for custom resolver 2021-01-26 08:05:19 +00:00
fakeshadow
636cef8868 service trait takes shared self reference (#247) 2021-01-23 03:06:22 +00:00
fakeshadow
874e5f2e50 change default name resolver and allow custom resolvers (#248) 2021-01-23 01:33:50 +00:00
Rob Ede
6112a47529 update local deps 2021-01-09 15:19:16 +00:00
Rob Ede
a2e03700e7 update rt changelog 2021-01-09 15:16:31 +00:00
Rob Ede
6edf9b8278 prepare rt 2.0.0-beta.2 release 2021-01-09 15:12:59 +00:00
Rob Ede
f07d807707 remove actix-threadpool crate 2021-01-09 15:04:55 +00:00
Rob Ede
d4c46b7da9 fix macros code 2021-01-09 14:58:15 +00:00
Rob Ede
b0a8f8411b prepare macros 0.2.0-beta.1 release 2021-01-09 14:56:07 +00:00
Rob Ede
46bfe5de36 prepare service 2.0.0-beta.3 release 2021-01-09 14:28:33 +00:00
Rob Ede
a95afe2800 prepare router release 0.2.6 2021-01-09 14:18:20 +00:00
Rob Ede
f751cf5acb use convert err on forward_ready! (#246) 2021-01-09 14:13:16 +00:00
fakeshadow
a1982bdbad add actix-rt::task (#245) 2021-01-03 18:16:57 +00:00
Rob Ede
147c4f4f2c test bytestring with ahash 2021-01-03 04:42:08 +00:00
Rob Ede
5285656bdc prepare next beta releases 2021-01-03 04:39:37 +00:00
Rob Ede
296294061f update readme 2020-12-31 02:52:55 +00:00
Rob Ede
93865de848 move router to actix-router 2020-12-31 02:29:27 +00:00
Rob Ede
6bcf6d8160 use bytestring crate name as dir name 2020-12-31 02:21:50 +00:00
Rob Ede
14ff379150 prepare bytestring release 1.0.0 (#243) 2020-12-31 02:20:49 +00:00
fakeshadow
647817ef14 tokio 1.0 and mio 0.7 (#204) 2020-12-30 22:11:50 +00:00
fakeshadow
b5eefb4d42 merge actix-testing into actix-server (#242) 2020-12-29 21:20:24 +00:00
fakeshadow
03eb96d6d4 fix actix-tls tests (#241) 2020-12-29 11:36:17 +00:00
Rob Ede
0934078947 prepare tls beta release 2020-12-29 01:04:21 +00:00
Rob Ede
5759c9e144 merge -connect and -tls and upgrade to rt v2 (#238) 2020-12-29 00:38:41 +00:00
Rob Ede
3c6de3a81b use correct service version for tracing 2020-12-29 00:08:59 +00:00
Rob Ede
ef83647ac9 prepare testing beta release 2020-12-28 23:54:21 +00:00
Rob Ede
98a17081b8 prepare server beta release 2020-12-28 23:50:00 +00:00
fakeshadow
b7202db8fd update actix-server and actix-testing to tokio 1.0 (#239) 2020-12-28 23:44:53 +00:00
Rob Ede
a09f9abfcb prepare utils release 3.0.0-beta.1 2020-12-28 03:32:28 +00:00
Rob Ede
e4a44b77e6 prepare codec release 0.4.0-beta.1 2020-12-28 03:24:43 +00:00
fakeshadow
2ee8f45f5d update actix-codec and actix-utils to tokio 1.0 (#237) 2020-12-28 03:16:37 +00:00
Rob Ede
f48e3f4cb0 prepare release for rt and service 2020-12-28 01:58:31 +00:00
Rob Ede
3d3bd60368 fix rt override 2020-12-28 01:53:11 +00:00
Rob Ede
d684128831 fix rt override 2020-12-28 01:48:19 +00:00
fakeshadow
0c12930796 update to tokio 1.0 for actix-rt (#236) 2020-12-28 01:40:22 +00:00
Rob Ede
ba44ea7d0b remove futures-util from service deps (#235) 2020-12-27 18:24:57 +00:00
Rob Ede
8a58a341a4 service improvements (#233) 2020-12-27 14:15:42 +00:00
Rob Ede
33c9aa6988 bump msrv to 1.46 2020-12-27 04:36:08 +00:00
Rob Ede
3ab8c3eb69 service trait takes request type parameter (#232) 2020-12-27 04:28:00 +00:00
fakeshadow
518bf3f6a6 remove RUNNING Q PENDING thread locals from actix-rt (#207) 2020-12-26 23:26:02 +00:00
fakeshadow
43ce25cda1 Remove unused mods in actix-utils (#229) 2020-12-26 21:27:59 +00:00
Yuki Okushi
4e4122b702 Disable PR comment from codecov 2020-12-17 21:42:21 +09:00
Aravinth Manivannan
b296d0f254 Intradoc links conversion (#227)
* intra doc conversion

* rm trailing blank comment
2020-12-14 08:22:30 +00:00
Juan Aguilar
02a902068f Refactor LocalWaker (#224) 2020-12-13 19:26:57 +00:00
fakeshadow
049795662f remove ServerMessage type. remove one unused InternalServiceFactory impl (#225) 2020-12-13 00:46:32 +00:00
Rob Ede
4e43216b99 standardise compiler lints across all crates (#226) 2020-12-12 23:24:00 +00:00
Rob Ede
93889776c4 prevent double registration of sockets when backpressure is resolved (#223) 2020-12-12 17:19:20 +00:00
Yuki Okushi
ab496a71b5 Fix release date 2020-12-03 08:59:59 +09:00
Yuki Okushi
76d956e25c macros: Add actix-reexport feature (#218) 2020-12-03 08:59:13 +09:00
Ivan Babrou
89e56cf661 Notify about paused accept loop (#215) 2020-11-29 15:30:13 +00:00
Rob Ede
8aca8d4d07 fix clippy warnings (#214)
and make my spelling checker happy
2020-11-25 01:41:14 +00:00
fakeshadow
e0dd2a3d76 remove actix-threadpool re-export from actix-rt (#212) 2020-11-24 17:03:09 +00:00
Rob Ede
59e976aaca address clippy error (#213) 2020-11-24 16:35:47 +00:00
Zura Benashvili
4cc1c87724 docs(transform): remove extra generic parameter (#211) 2020-11-20 22:45:57 +00:00
Yuki Okushi
ca39917d2c Update CoC contact information 2020-10-31 12:08:06 +09:00
ghizzo01
704af672b9 Bump pin-project to 1.0 (#202) 2020-10-25 19:42:40 +09:00
Rob Ede
242bef269f delete ioframe removed package readme
closes #199
2020-09-22 12:29:07 +01:00
Rob Ede
6c65e2a79f prepare router 0.2.5 release (#198) 2020-09-21 22:46:59 +01:00
nujz
e5ca271764 actix-router: fix from_hex error (#196) 2020-09-20 18:04:18 +01:00
nujz
98a2197a09 fix doc error (#195) 2020-09-19 23:12:41 +09:00
Rob Ede
fb0aa02b3c move and update server+tls examples (#190) 2020-09-13 10:12:07 +01:00
Rob Ede
681eeb497d prepare server release 1.0.4 (#188) 2020-09-12 15:28:17 +01:00
Igor Aleksanov
3e04b87311 actix-service: Fix broken link in readme (#189) 2020-09-12 15:08:03 +01:00
Rob Ede
77b7826658 prepare tls v2 release (#186) 2020-09-08 18:00:07 +01:00
Igor Aleksanov
b7a9cb7bb4 actix-rt: Make the process of running System in existing Runtime more clear (#173) 2020-09-06 11:01:24 +01:00
Robert Gabriel Jakabosky
88d99ac89c Fix clippy errors. (#187) 2020-09-06 10:41:42 +01:00
Rob Ede
7632f51509 prepare connect v2 stable release (#185) 2020-09-02 22:14:07 +01:00
Rob Ede
d28687d0d7 promote codec/utils out of beta (#184) 2020-08-24 09:18:37 +01:00
Rob Ede
27c6be9881 remove unused type parameter from Framed::replace_codec (#183) 2020-08-20 00:30:26 +01:00
Rob Ede
119dc39f5b prepare codec and utils betas (#182) 2020-08-19 11:00:12 +01:00
Rob Ede
b3010c13e0 solve framed integration with actix-http (#179) 2020-08-18 23:27:37 +01:00
Adrian Wechner
fecdfcd8d4 assert workers greater than zero (#167) 2020-08-18 16:44:22 +01:00
Yuki Okushi
578a560853 connect,tls: Bump up to next alpha versions (#181) 2020-08-17 15:39:17 +01:00
Rob Ede
fb098536ee bump MSRV to 1.42 (#180) 2020-08-17 15:37:57 +01:00
Rob Ede
5d28be9ad6 fix actix-service readme reference (#176) 2020-08-11 12:20:09 +01:00
Rob Ede
a5a6b6704c prepare actix-service 1.0.6 release (#175) 2020-08-09 16:10:58 +01:00
Igor Aleksanov
afb0a3c9fc actix-service: Fix clippy warning in benches (#174) 2020-08-07 17:16:45 +09:00
Miloas
02aaa75591 fix actix-service doc error (#172) 2020-08-06 11:21:51 +01:00
Yuki Okushi
ed4b708c66 Fix CI on MSRV check (#171) 2020-08-05 09:02:41 +09:00
Yuki Okushi
235a76dcd4 GHA: Switch action to the official setup-msys2 (#169) 2020-07-29 08:47:32 +09:00
Matt Kantor
0c5f1da625 Remove garbled doc comment for actix_router::IntoPattern::is_single (#168) 2020-07-29 05:46:53 +09:00
Yuki Okushi
8ace9264b7 Check code style with rustfmt on CI (#164) 2020-07-22 12:32:13 +09:00
Yuki Okushi
0dca1a705a actix-utils: Remove unsound custom Cell as well (#161) 2020-07-22 01:14:32 +01:00
Juan Aguilar
5d6d309e66 Simplify bcodec decode (#162) 2020-07-20 23:09:24 +09:00
Juan Aguilar
8d0bd7ce1c Improve bcodec encode performance (#157) 2020-07-19 22:36:51 +01:00
Sergey "Shnatsel" Davidoff
a67e38b4a0 Remove unsound custom Cell (#158) 2020-07-20 06:05:36 +09:00
Rob Ede
334c98575a Upgrade tokio utils to 0.3 (#138) 2020-07-20 05:44:26 +09:00
Rob Ede
a9b5a7b070 Create PULL_REQUEST_TEMPLATE.md (#159) 2020-07-20 03:01:09 +09:00
Yuki Okushi
61176f6410 Update rustls-related dependencies (#154) 2020-07-14 11:14:06 +01:00
Yuki Okushi
10b4c30a06 Use OR instead of deprecated / in license field (#155) 2020-07-14 11:11:30 +01:00
Yuki Okushi
7f550bcf0f threadpool: Bump up to 0.3.3 (#156) 2020-07-14 11:10:15 +01:00
Yuki Okushi
887f11f787 Merge pull request #153 from actix/tweak-actions
Tweak actions trigger events
2020-07-08 09:04:05 +09:00
Yuki Okushi
e2a6d352b0 Tweak actions trigger events 2020-07-08 08:38:24 +09:00
Yuki Okushi
f6c697a2dd Merge pull request #152 from paolobarbolini/pl-011
Update parking_lot to 0.11
2020-07-04 03:20:08 +09:00
Paolo Barbolini
5ecdfd684a Update parking_lot to 0.11 2020-07-03 17:37:10 +02:00
Yuki Okushi
7140c04c44 Merge pull request #149 from taiki-e/pin-project
Remove uses of pin_project::project attribute
2020-06-07 02:01:08 +09:00
Taiki Endo
9528df4486 Remove uses of pin_project::project attribute
pin-project will deprecate the project attribute due to some unfixable
limitations.

Refs: https://github.com/taiki-e/pin-project/issues/225
2020-06-06 06:42:45 +09:00
Pen Tree
755a8bb9d1 fix codec doc links (#148) 2020-06-02 18:05:39 +01:00
Yuki Okushi
f3cb6efc30 Merge pull request #146 from actix/cache-v2
Update `actions/cache` to v2
2020-05-28 04:59:34 +09:00
Yuki Okushi
87b857705c Update actions/cache to v2 2020-05-28 03:14:01 +09:00
Yuki Okushi
c897c5d3eb Merge pull request #145 from JohnTitor/new-threalpool
threadpool: Bump up to 0.3.2
2020-05-20 15:24:39 +09:00
Yuki Okushi
134e76b8b4 threadpool: Bump up to 0.3.2 2020-05-20 14:19:16 +09:00
Yuki Okushi
f3a401c23b Merge pull request #144 from JohnTitor/codecov-config
Add codecov config
2020-05-20 11:03:31 +09:00
Yuki Okushi
f7e8a912b3 Add codecov config 2020-05-19 14:45:39 +09:00
Yuki Okushi
11a1e11858 Merge pull request #143 from JohnTitor/new-testing
testing: Bump up to 1.0.1
2020-05-19 14:37:54 +09:00
Yuki Okushi
d0b27ee7e6 testing: Bump up to 1.0.1 2020-05-19 14:08:08 +09:00
Yuki Okushi
2d2b0591a2 Merge pull request #142 from JohnTitor/new-server
server: Bump up to 1.0.3
2020-05-19 13:58:39 +09:00
Yuki Okushi
abbc5f715f server: Bump up to 1.0.3 2020-05-19 10:23:17 +09:00
Yuki Okushi
140a6c76e3 Merge pull request #141 from actix/fix-ci
Only check compilation on mingw CI
2020-05-19 09:39:03 +09:00
Yuki Okushi
2395b28c5e Only check compilation on mingw CI
Disabled to run tests since somehow linking with OpenSSL is broken.
2020-05-19 09:11:27 +09:00
Yuki Okushi
aad4812ba6 Merge pull request #140 from JohnTitor/replace-net2
Replace deprecated `net2` crate with `socket2`
2020-05-19 08:58:40 +09:00
Yuki Okushi
ac6c78c476 testing: Replace net2 crate with socket2 2020-05-19 08:21:40 +09:00
Yuki Okushi
8218a098e8 server: Replace net2 crate with socket2 2020-05-19 08:17:44 +09:00
Yuki Okushi
49a6f525be Merge pull request #139 from JohnTitor/next-macros
macros: Bump up to 0.1.2
2020-05-19 07:50:46 +09:00
Yuki Okushi
f59ff82395 macros: Bump up to 0.1.2 2020-05-18 15:36:23 +09:00
Yuki Okushi
f7cc62564d Merge pull request #136 from JohnTitor/connect-alpha-3
actix-connect: Bump up to 2.0.0-alpha.3
2020-05-08 01:36:16 +09:00
Yuki Okushi
b125e2bdce actix-connect: Bump up to 2.0.0-alpha.3 2020-05-08 01:07:57 +09:00
Yuki Okushi
a5c185e80e Merge pull request #135 from actix/fix/unresolverd
correct spelling of ConnectError::Unresolved
2020-05-06 14:45:30 +09:00
Rob Ede
523cee0351 correct spelling of ConnectError::Unresolved 2020-05-03 23:14:22 +01:00
Yuki Okushi
343b3c09fc Merge pull request #134 from JohnTitor/new-rt
Bump up `actix-rt` to 1.1.1
2020-04-30 14:34:17 +09:00
Yuki Okushi
8a10580663 Bump up actix-rt to 1.1.1 2020-04-30 03:07:12 +09:00
Yuki Okushi
1b4a117063 Merge pull request #128 from Jonathas-Conceicao/topic/fix_memory_leak
actix-rt: Spawn future to cleanup pending JoinHandles
2020-04-30 02:58:13 +09:00
Yuki Okushi
700997fe48 Merge pull request #133 from actix/macro-compile-testing
add macro compile tests
2020-04-29 15:33:00 +09:00
Rob Ede
4c5568ed70 add trybuild compile tests 2020-04-26 20:11:16 +01:00
Yuki Okushi
7d0cfe1b4d Merge pull request #131 from danpintara/pull-1
actix-macros: Simplify test macros by using original signature
2020-04-23 02:33:52 +09:00
Daniel Pintara
e35c261c9f actix-macros: test: Simplify by using #sig instead of #name(#inputs) #ret 2020-04-22 00:13:32 +07:00
Yuki Okushi
115ef3fcb3 Merge pull request #130 from JohnTitor/dont-clone
Remove unnecessary clone usage
2020-04-20 08:37:10 +09:00
Yuki Okushi
c0482e2532 Remove unnecessary clone usage 2020-04-20 08:02:08 +09:00
Jonathas-Conceicao
6906f25e01 actix-rt: Set threshold size for arbiter's pending futures list
Signed-off-by: Jonathas-Conceicao <jadoliveira@inf.ufpel.edu.br>
2020-04-16 03:12:05 -03:00
Jonathas-Conceicao
06bca19524 actix-rt: Spawn future to cleanup pending JoinHandles
Signed-off-by: Jonathas-Conceicao <jadoliveira@inf.ufpel.edu.br>
2020-04-09 20:36:44 -03:00
Yuki Okushi
e9e2185296 Merge pull request #127 from rubdos/test-fixture-integration
Forward actix_rt::test arguments to test function.
2020-04-09 17:45:17 +09:00
Ruben De Smet
aae52a80ab Forward actix_rt::test arguments to test function.
Previously,

```rust
async fn foo(_a: u32) {}
```

would compile to

```rust
fn foo() {/* something */}
```

This patches changes this behaviour to

```rust
fn foo(_a: u32) {/* something */}
```

by simply forwarding the input arguments.

This allows any test fixture library (e.g. `rstest`, cfr.
https://github.com/la10736/rstest/issues/85) to integrate with
actix::test.
2020-04-08 16:48:10 +02:00
Yuki Okushi
65e2e8052e Release actix-rt 1.1.0 (#126)
* Release actix-rt 1.1.0

* Update actix-rt/CHANGES.md
2020-04-08 16:34:07 +09:00
Jonathas-Conceicao
783880bb0a actix-rt: Add Arbiter::is_running helper and fix System::is_set doc
`Arbiter::is_running` can be used to check if the current even-loop is currently
running; which should also work after the system has stopped. `System::is_set`
was updated to reflect what it actually does, it tells if the event loop has
started, which alone can't tell if it has stopped.

Signed-off-by: Jonathas-Conceicao <jadoliveira@inf.ufpel.edu.br>
2020-04-05 21:00:54 -03:00
Jonathas-Conceicao
69e8df9d62 actix-rt: Run rustfmt
Signed-off-by: Jonathas-Conceicao <jadoliveira@inf.ufpel.edu.br>
2020-04-05 21:00:54 -03:00
Yuki Okushi
9addf1a36b Merge pull request #125 from actix/fix/noisy-check
fix noisy check warning
2020-04-05 13:20:25 +09:00
Rob Ede
187a58472d fix noisy check warning 2020-04-04 23:57:52 +01:00
Nikolay Kim
30aa0b7bb6 add serde support to bytestring 2020-03-30 11:54:40 +06:00
Yuki Okushi
e775d08d76 Merge pull request #122 from actix/JohnTitor-patch-1
Upload coverage on PRs
2020-03-18 05:31:59 +09:00
Yuki Okushi
d5f95b54b7 Upload coverage on PRs 2020-03-18 05:03:37 +09:00
Yuki Okushi
904f90abc2 Merge pull request #121 from actix/revert-115-JohnTitor-patch-2
Revert "Disable windows-mingw builder temporarily"
2020-03-16 18:06:42 +09:00
Yuki Okushi
950c73077c Revert "Disable windows-mingw builder temporarily" 2020-03-16 17:31:10 +09:00
Yuki Okushi
732731a9c8 Merge pull request #120 from kornelski/err
std Error for BlockingError
2020-03-14 00:14:42 +09:00
Kornel Lesiński
0dd5a7ce1d std Error for BlockingError
#93
2020-03-13 12:35:20 +00:00
Yuki Okushi
7105091e51 Merge pull request #119 from JohnTitor/futures
Minimize `futures-*` dependencies
2020-03-13 05:12:37 +09:00
Yuki Okushi
08959dfc21 actix-tracing: Minimize futures-util dependencies 2020-03-12 07:13:32 +09:00
Yuki Okushi
2792433ad6 actix-codec: Minimize futures-* dependencies 2020-03-12 07:13:32 +09:00
Yuki Okushi
437a7b05c6 actix-rt: Fix build 2020-03-12 07:13:32 +09:00
Yuki Okushi
3d125c5381 actix-testing: Remove unused deps 2020-03-12 07:13:32 +09:00
Yuki Okushi
fbf7d6ef33 Update examples 2020-03-12 07:13:32 +09:00
Yuki Okushi
e6b6f08369 actix-utils: Minimize futures-* dependencies 2020-03-12 07:13:32 +09:00
Yuki Okushi
4e806b3e3f actix-tls: Minimize futures-* dependencies 2020-03-12 07:13:31 +09:00
Yuki Okushi
f5b07053fc actix-server: Minimize futures-* dependencies 2020-03-12 07:13:31 +09:00
Yuki Okushi
dd3bec83bf actix-ioframe: Minimize futures-* dependencies 2020-03-12 07:13:31 +09:00
Yuki Okushi
f955e49930 actix-connect: Minimize futures-* dependencies 2020-03-12 04:22:38 +09:00
Yuki Okushi
4be11b541b Merge pull request #117 from actix/new-connect
Release actix-http v2.0.0-alpha.2
2020-03-08 15:13:52 +09:00
Yuki Okushi
baba533407 Update actix-http dependency 2020-03-08 14:38:07 +09:00
Yuki Okushi
2bf50826b0 Bump up to 2.0.0-alpha.2 2020-03-08 14:37:33 +09:00
Yuki Okushi
41b2a3b2e2 Merge pull request #116 from Jonathas-Conceicao/topic/upgrade_trust_dns
actix-connect: Upgrade versions of trust-dns
2020-03-08 14:31:07 +09:00
Jonathas-Conceicao
7fdd4a1118 actix-connect: Upgrade versions of trust-dns
- `Address` trait is now required to have static lifetime;
- `start_resolver` and `start_default_resolver` are now `async` and may return
  a `ConnectError`;

Signed-off-by: Jonathas-Conceicao <jadoliveira@inf.ufpel.edu.br>
2020-03-07 14:52:41 -03:00
Jonathas-Conceicao
cb30f9e86a actix-connect: Run cargo fmt
Signed-off-by: Jonathas-Conceicao <jadoliveira@inf.ufpel.edu.br>
2020-03-07 14:37:39 -03:00
Yuki Okushi
873f69be51 Merge pull request #115 from actix/JohnTitor-patch-2
Disable windows-mingw builder temporarily
2020-03-06 14:11:50 +09:00
Yuki Okushi
0967061f30 Merge pull request #114 from actix/JohnTitor-patch-1
Unpin quote version
2020-03-06 14:11:28 +09:00
Yuki Okushi
59902cb3a3 Disable windows-mingw builder temporarily 2020-03-06 13:48:55 +09:00
Yuki Okushi
857e50120b Unpin quote version 2020-03-06 13:45:21 +09:00
Yuki Okushi
36a2edf1cd Merge pull request #111 from dunnock/master
Fix build with failing quote
2020-03-05 23:05:19 +09:00
Maksym Vorobiov
346bd072d3 fix build with failing quote 2020-03-05 14:58:44 +02:00
Yuki Okushi
8d3d58b3b7 Merge pull request #110 from Aaron1011/fix/better-pin
Replace calls to `Pin::new_unchecked` with `pin_project`.
2020-03-05 21:52:55 +09:00
Aaron Hill
c41b5d8dd4 Replace calls to Pin::new_unchecked with pin_project.
This is a breaking change, as it changes some public methods to take
`Pin<&mut Self>` rather than `&mut self`.

This brings these methods into line with `Stream::poll_next`, which also
takes a `Pin<&mut Self>`
2020-03-04 12:08:52 -05:00
Yuki Okushi
693d5132a9 Merge pull request #109 from JohnTitor/new-tls
actix-tls: Bump up to 2.0.0-alpha.1
2020-03-03 22:29:08 +09:00
Yuki Okushi
f7dac3feb4 Bump up to 2.0.0-alpha.1 2020-03-03 19:47:40 +09:00
Yuki Okushi
ebc11d03f2 Merge pull request #108 from JohnTitor/new-connect
Release `actix-connect` v2.0.0-alpha.1
2020-03-03 18:33:08 +09:00
Yuki Okushi
e3ad5de270 Update actix-connect dependency 2020-03-03 17:24:41 +09:00
Yuki Okushi
91118bb2ce Bump up to 2.0.0-alpha.1 2020-03-03 17:24:25 +09:00
Yuki Okushi
6628688bcf Merge pull request #107 from JohnTitor/rustls-017
Update `rustls` and `tokio-rustls`
2020-03-01 23:48:13 +09:00
Yuki Okushi
b9567359fd actix-tls: Update rustls and tokio-rustls 2020-03-01 12:08:14 +09:00
Yuki Okushi
7dbc0264b1 actix-connect: Update rustls and tokio-rustls 2020-03-01 12:08:14 +09:00
Erich Gubler
1b7c969f6a actix-rt: minimize futures dependencies to futures-{channel,util} with default features off (#104)
* build(deps): minimize `futures` deps by using `futures-channel` and `futures-util` directly

* style(actix-rt): enforce spaces around equals in `Cargo.toml`
2020-02-27 01:15:21 +09:00
Jonathas-Conceicao
f1685d8253 Add Arbiter::local_join associated function
Arbiter::local_join function can be used to await for futures spawned
on current arbiter.

Signed-off-by: Jonathas-Conceicao <jadoliveira@inf.ufpel.edu.br>
2020-02-26 12:59:46 -03:00
Jonathas-Conceicao
e3b6a33b97 Add integration tests
These initial tests validade basic usage with timed futures for:
- `System::block_on`;
- `Arbiter::new`;
- `Arbiter::stop`;
- `Arbiter::join`;

Signed-off-by: Jonathas-Conceicao <jadoliveira@inf.ufpel.edu.br>
2020-02-26 12:59:46 -03:00
Yuki Okushi
13b503435f Merge pull request #106 from JohnTitor/server-102
Release actix-server 1.0.2
2020-02-26 20:53:00 +09:00
Yuki Okushi
98f0290f65 actix-server: Bump up to 1.0.2 2020-02-26 19:48:52 +09:00
Yuki Okushi
b8f66f5e7f Update changelog 2020-02-26 19:48:41 +09:00
Yuki Okushi
dd59ee498e Add FIXME comment 2020-02-26 19:48:27 +09:00
Dany Laporte
83320efa31 Avoid error by register() on Windows (#103) 2020-02-26 18:40:31 +09:00
Yuki Okushi
c69bc11e3e Merge pull request #105 from actix/bench
Add action to check benchmark
2020-02-26 17:33:37 +09:00
Yuki Okushi
aad5c42ad7 Add action to check benchmark 2020-02-26 17:11:46 +09:00
Maxim Vorobjov
4d37858fc6 Benchmarks for actix-service: focused around UnsafeCell usage (#98)
* add benchmark comparing unsafecell vs refcell

* fix syntax

* add benches for and_then implementation options

* repeat benches to stabilize
2020-02-26 16:45:23 +09:00
Yuki Okushi
d402f08bb5 Merge pull request #102 from JohnTitor/single-import
Remove single import
2020-02-25 19:11:04 +09:00
Yuki Okushi
fa25e30427 Remove single import 2020-02-25 18:41:15 +09:00
Bo Yao
602db1779e Expose is_set (#99)
* Expose is_set

* Update doc and changes.md
2020-02-25 02:55:02 -03:00
Yuki Okushi
4f2910c6b3 Merge pull request #96 from actix/JohnTitor-patch-1
Disable coverage for PRs
2020-02-15 01:55:20 +09:00
Yuki Okushi
9f7d6bc068 Disable coverage for PRs 2020-02-14 07:30:21 +09:00
Yuki Okushi
6908b58943 Merge pull request #92 from actix/bye-travis
Move script from Travis to Actions
2020-02-02 06:28:42 +09:00
Yuki Okushi
043057ecbd Fix import scopes 2020-02-01 23:32:08 +09:00
Yuki Okushi
e12bf9200b Clean up metadata 2020-01-31 02:21:25 +09:00
Yuki Okushi
03d431e663 Add badges on README 2020-01-31 00:01:47 +09:00
Yuki Okushi
f0d352604e Remove travis config 2020-01-31 00:01:34 +09:00
Yuki Okushi
2f67e4f563 Use markdown format 2020-01-31 00:01:24 +09:00
Yuki Okushi
d1155d60ec Tweak Actions 2020-01-31 00:01:11 +09:00
Yuki Okushi
28d9c6a760 Merge pull request #90 from actix/fix-ci
Tweak GitHub Actions
2020-01-30 00:46:21 +09:00
Yuki Okushi
a970c2c997 Remove AppVeyor config 2020-01-29 12:05:55 +09:00
Yuki Okushi
d5a6c83207 Suppress/fix clippy warnings 2020-01-29 12:05:55 +09:00
Yuki Okushi
ee0db9a617 Tweak GitHub Actions 2020-01-29 12:05:55 +09:00
zero-systems
e5b5df1261 Optimize vector fill in builder. (#89)
* optimize vector fill
2020-01-22 06:35:22 +09:00
Nikolay Kim
dbfa13d6be Fixed unsoundness in .and_then()/.then() service combinators 2020-01-16 16:58:11 -08:00
Nikolay Kim
e7c2439543 prep release 2020-01-15 13:35:07 -08:00
Nikolay Kim
3116db5168 revert 1.0.3 changes 2020-01-15 13:24:38 -08:00
Nikolay Kim
5940731ef0 Fix actix-service 1.0.3 compatibility 2020-01-15 11:58:06 -08:00
Rajasekharan Vengalil
aed5fecc8a Add support for tokio tracing for actix Service. (#86)
* Add support for tokio tracing for actix Service.

* Address comments

* Change trace's return type to ApplyTransform

* Remove redundant type args

* Remove reference to MakeSpan from docs
2020-01-15 11:43:52 -08:00
Nikolay Kim
a751899aad Fixed unsoundness in AndThenService impl #83 2020-01-15 11:40:15 -08:00
Nikolay Kim
fa800aeba3 Fix AsRef<str> impl 2020-01-14 15:06:02 -08:00
Nikolay Kim
2f89483635 Merge branch 'master' of github.com:actix/actix-net 2020-01-14 00:42:29 -08:00
Nikolay Kim
3048073919 Add PartialEq<T: AsRef<str>>, AsRef<[u8]> impls 2020-01-13 11:58:31 +06:00
amosonn
4bbba803c1 Fix Service documentation (#85) 2020-01-12 07:44:01 +09:00
Sven-Hendrik Haase
4dcdeb6795 Merge pull request #84 from currency-engineering/master
Minor grammatical fix to docs.
2020-01-10 15:28:19 +01:00
Eric Findlay
3b4f222242 Minor grammatical fix to docs. 2020-01-10 20:52:49 +09:00
Nikolay Kim
7c5fa25b23 Add into_service helper function 2020-01-08 18:31:50 +06:00
Nikolay Kim
3551d6674d Add Clone impl for condition::Waiter 2020-01-08 11:18:56 +06:00
Nikolay Kim
9f00daea80 add Condition and Pool 2020-01-08 10:59:27 +06:00
Nikolay Kim
7dddeab2a8 Add ResourceDef::resource_path_named() path generation method 2019-12-31 18:02:43 +06:00
Nikolay Kim
dcbcc40da2 Revert "Support named parameters for ResourceDef::resource_path() in form of ((&k, &v), ...)"
This reverts commit b0d44198ba.
2019-12-31 15:14:53 +06:00
Nikolay Kim
b0d44198ba Support named parameters for ResourceDef::resource_path() in form of ((&k, &v), ...) 2019-12-31 14:53:30 +06:00
Nikolay Kim
974bd6b01e leak string instead of rc 2019-12-31 12:04:35 +06:00
Nikolay Kim
5779da0f49 refactor service and state manahement 2019-12-29 13:42:42 +06:00
Nikolay Kim
1918c8d4f8 rename .run to .start() 2019-12-29 10:07:46 +06:00
Nikolay Kim
e21c58930b Add impl IntoPattern for &String 2019-12-25 21:34:14 +04:00
Nikolay Kim
59c5e9be6a Use IntoPattern for RouterBuilder::path() 2019-12-25 21:01:07 +04:00
Nikolay Kim
a2a9d9764d introduce IntoPattern trait 2019-12-25 19:54:20 +04:00
Nikolay Kim
bf0a9d2f6e Add IntoPatterns trait 2019-12-25 15:34:21 +04:00
Nikolay Kim
119027f822 fmt 2019-12-25 15:10:13 +04:00
Nikolay Kim
0fe8038d23 allow specify set of resource patters 2019-12-25 15:10:01 +04:00
Nikolay Kim
b599bc4a0c map_config() and unit_config() accepts IntoServiceFactory type 2019-12-22 16:30:49 +04:00
Nikolay Kim
a80e1f8370 fix new() method and make from_static and from_bytes_unchecked methods const 2019-12-22 16:24:28 +04:00
Nikolay Kim
5fe759cc02 Merge branch 'master' of github.com:actix/actix-net 2019-12-20 09:15:19 +06:00
Nikolay Kim
05549f0b42 Add methods to check LocalWaker registration state 2019-12-20 09:13:11 +06:00
Yuki Okushi
b1430eaded Run tests for all features as possible (#78) 2019-12-19 16:31:32 +09:00
Nikolay Kim
0d3f9e74c5 Use .advance() intead of .split_to() 2019-12-19 09:50:31 +06:00
Nikolay Kim
cab73791ed pin trsut-dns-proto 2019-12-15 13:04:26 +06:00
Nikolay Kim
a7ac1a76ed add license files to actix-macros 2019-12-14 23:01:55 +06:00
Nikolay Kim
37bedff6fb use parking_lot 0.10 2019-12-12 06:57:40 +06:00
Nikolay Kim
33fd6adc11 better InOrder test 2019-12-12 06:56:45 +06:00
Nikolay Kim
4305cdba2c Revert InOrder service changes 2019-12-11 23:10:02 +06:00
Nikolay Kim
52ecb4bcc5 Add oneshot::Sender::is_canceled() method 2019-12-11 20:52:57 +06:00
Nikolay Kim
b28f32e82c Allow to create framed::Dispatcher with custom mpsc::Receiver 2019-12-11 20:23:14 +06:00
Nikolay Kim
081205a02f Disconnect callback accepts owned state 2019-12-11 18:57:43 +06:00
Nikolay Kim
8bb81c0768 optimize InOrder service 2019-12-11 18:55:53 +06:00
Nikolay Kim
c7a8743bf9 remove E param 2019-12-11 16:44:09 +06:00
Nikolay Kim
f26fcc703b prep release 2019-12-11 14:56:05 +06:00
Nikolay Kim
ce4587df82 prepare actix-tls release 2019-12-11 14:53:58 +06:00
Nikolay Kim
9957f28137 prepare actix-testing release 2019-12-11 14:49:26 +06:00
Nikolay Kim
9d84d14ef4 update deps 2019-12-11 14:47:30 +06:00
Nikolay Kim
60bfa1bfb1 prepare actix-server release 2019-12-11 14:43:26 +06:00
Nikolay Kim
2c81c22b3e refactor ioframe dispatcher 2019-12-11 14:36:11 +06:00
Nikolay Kim
dded482514 allow to close mpsc sender 2019-12-11 14:36:00 +06:00
Nikolay Kim
631cb86947 refactor framed and stream dispatchers 2019-12-11 12:42:07 +06:00
Nikolay Kim
2e5e69c9ba Simplify oneshot and mpsc implementations 2019-12-11 11:28:09 +06:00
Nikolay Kim
e315cf2893 prep actix-rt release; update deps 2019-12-11 10:34:50 +06:00
Nikolay Kim
13fd615966 actix-macros release 2019-12-11 10:32:01 +06:00
Nikolay Kim
c094f84b85 prepare actix-service release 2019-12-11 10:29:34 +06:00
Nikolay Kim
25012d290a update actix-codec dependencies 2019-12-11 10:23:01 +06:00
Nikolay Kim
32202188cc prepare actix-codec release 2019-12-11 10:18:11 +06:00
Nikolay Kim
bf734a31dc update docs 2019-12-10 21:34:51 +06:00
Nikolay Kim
d29e7c4ba6 Merge branch 'master' of github.com:actix/actix-net 2019-12-10 21:14:18 +06:00
Nikolay Kim
7163e2c2a2 update doc strings 2019-12-10 21:14:06 +06:00
Nikolay Kim
1d810b4561 re-export AlpnError 2019-12-10 12:15:27 +06:00
daxpedda
0913badd61 Macro improvements. (#74)
* Macro improvements.

* Fix usage in `fn main`.
2019-12-10 08:47:35 +06:00
Nikolay Kim
8b3062cd6e Fix buffer remaining capacity calcualtion 2019-12-09 21:50:36 +06:00
Nikolay Kim
35218a4df1 add Clone impl for Apply service 2019-12-09 14:07:20 +06:00
Nikolay Kim
d47f1fb730 prepare actix-service release 2019-12-08 19:49:35 +06:00
Nikolay Kim
1ad0bbfb7f rename fn service helpers 2019-12-08 19:05:05 +06:00
Nikolay Kim
c38a25f102 fix hash impl 2019-12-07 11:51:47 +06:00
Nikolay Kim
110457477a update changes 2019-12-07 11:04:53 +06:00
Nikolay Kim
a899b1e04d bump actix-ioframe version 2019-12-07 10:55:54 +06:00
Nikolay Kim
393cf1ab25 add unsafe from_bytes_unchecked 2019-12-07 10:48:22 +06:00
Nikolay Kim
40fbbb9c32 fix crate name 2019-12-07 10:39:33 +06:00
Nikolay Kim
99fef4f06b add helper conversions 2019-12-07 10:22:08 +06:00
Nikolay Kim
fc0825fcdd update tokio to 0.2.4 2019-12-07 10:15:26 +06:00
Nikolay Kim
6c00ab8296 add string crate 2019-12-07 09:59:39 +06:00
Nikolay Kim
cbdbc05dbd update tokio verion and prep alpha3 release 2019-12-07 09:57:43 +06:00
Yuki Okushi
5674840c01 Stop running tests for all features (#73) 2019-12-07 08:54:58 +06:00
Nikolay Kim
6f07c9d72a update trust-dns 2019-12-06 14:08:11 +06:00
Nikolay Kim
fa48ddcfa1 fix non unix signals support 2019-12-06 14:06:14 +06:00
Max Gortman
f89a992daf eager drop in then, and_then, and_then_apply_fn (#72) 2019-12-06 10:34:44 +06:00
Nikolay Kim
e670a32ff3 inclide stream feature 2019-12-06 01:34:13 +06:00
Nikolay Kim
021c742d22 use string crate from master 2019-12-06 00:10:27 +06:00
Nikolay Kim
88a60ffa66 reexport ssl types 2019-12-05 23:09:44 +06:00
Nikolay Kim
cb2845cb26 fix dependencies 2019-12-05 20:58:28 +06:00
Nikolay Kim
b18fbc98d5 move rustls and nativetls acceptor services to actix-tls 2019-12-05 20:52:37 +06:00
Nikolay Kim
3a858feaec migrate to tokio 0.2.2 2019-12-05 16:40:24 +06:00
Nikolay Kim
d49aca9595 use bitflags for internal flags; use tokio 0.2 2019-12-05 13:11:56 +06:00
Nikolay Kim
6f41b80cb4 optimize service combinators memory layout 2019-12-05 12:37:26 +06:00
Nikolay Kim
c6eb318536 Fix low/high watermark for write/read buffers; fix oneshot impl 2019-12-05 01:36:31 +06:00
Nikolay Kim
21dcc22e53 refactor server configurations 2019-12-04 21:35:27 +06:00
Nikolay Kim
de84663768 fix initial worker service state 2019-12-04 15:52:49 +06:00
Nikolay Kim
c4e2051327 refactor server worker 2019-12-04 15:12:02 +06:00
Nikolay Kim
0a4fe22003 Restore Service/Factory::apply_fn() in form of Pipeline/Factory::and_then_apply_fn() 2019-12-03 19:59:28 +06:00
Nikolay Kim
eb773c8b8c Merge branch 'master' of github.com:actix/actix-net 2019-12-03 18:34:32 +06:00
Nikolay Kim
db0bc1e156 Restore Transform::map_init_err() combinator 2019-12-03 18:32:02 +06:00
Yuki Okushi
9eb12e0467 Use GitHub Actions (#71) 2019-12-03 20:00:16 +09:00
Nikolay Kim
eb33f0ecbe add Clone for apply combinator 2019-12-03 16:15:06 +06:00
Nikolay Kim
cbc5da8625 update changes 2019-12-03 14:10:36 +06:00
Nikolay Kim
ec8dca8d69 Merge branch 'master' of github.com:actix/actix-net 2019-12-03 14:09:35 +06:00
Nikolay Kim
6a9df026e7 Add missing Clone impl for factory_fn_cfg 2019-12-03 14:05:23 +06:00
Aaron Housh
2756bedc3d Fix for non Unix OS (#69) 2019-12-03 10:07:54 +06:00
Nikolay Kim
bd4c4cda8b update threadpool 2019-12-02 22:49:02 +06:00
Nikolay Kim
c0ede65317 restore 0.1 behavior 2019-12-02 22:47:49 +06:00
Nikolay Kim
9f575418c1 clippy warnings 2019-12-02 22:30:09 +06:00
Nikolay Kim
9ed35cca7a use owned value for service factory config 2019-12-02 21:27:48 +06:00
Nikolay Kim
3385682e09 remove server feature 2019-12-02 17:04:42 +06:00
Nikolay Kim
f55f96bc77 fix dependencies 2019-12-02 11:49:42 +06:00
Nikolay Kim
a08b1eba87 update tests 2019-12-02 11:43:52 +06:00
Nikolay Kim
d81e72cf06 remove deprecaed crate 2019-12-02 11:30:52 +06:00
Nikolay Kim
9fbe6a1f6d refactor server configuration and tls support 2019-12-02 11:30:27 +06:00
Nikolay Kim
16ff283fb2 add metadata 2019-12-01 20:30:24 +06:00
Nikolay Kim
503c2feb08 re-export net primitives 2019-12-01 10:56:25 +06:00
Nikolay Kim
bec4efc699 add extra methods to pipeline 2019-11-29 13:51:00 +06:00
Nikolay Kim
5e5ae2ddec restore stream dispatcher 2019-11-29 10:41:09 +06:00
Nikolay Kim
a02064592b disable rustls 2019-11-27 21:03:26 +06:00
Nikolay Kim
af72005159 move BoxFuture to boxed mod 2019-11-27 20:59:36 +06:00
Nikolay Kim
c254bb978c allow to wait on Server until server stops; restore signal handling 2019-11-26 17:03:52 +06:00
Nikolay Kim
009f8e2e7c allow to wait server exit 2019-11-26 16:33:45 +06:00
Nikolay Kim
f5aecdee8f work around to rust#62127 2019-11-26 10:14:21 +06:00
Nikolay Kim
4546774f4e inclide fn ident to err message 2019-11-26 10:04:46 +06:00
Nikolay Kim
2cf140a869 inclide fn token to err message 2019-11-26 10:01:46 +06:00
Nikolay Kim
e76ea8e80c re-export timeout 2019-11-26 09:04:14 +06:00
Nikolay Kim
52d03fa18c use actix deps instead of tokio 2019-11-26 08:26:22 +06:00
Nikolay Kim
5efac449b1 re-export time utils 2019-11-26 08:12:16 +06:00
Nikolay Kim
4ceac79f2c add test and main macros 2019-11-25 21:49:11 +06:00
Nikolay Kim
1fddd1e75b renamed boxed service 2019-11-25 18:18:00 +06:00
Nikolay Kim
905d058454 upgrade derive_more 2019-11-25 17:54:47 +06:00
Nikolay Kim
5265714f68 prep alpha.1 release 2019-11-21 19:58:55 +06:00
Nikolay Kim
ae4394c0f2 fix uds server support 2019-11-21 00:35:44 +06:00
Nikolay Kim
d3c5518646 fix rustls acceptor 2019-11-19 18:54:36 +06:00
Nikolay Kim
3bf83c1d98 cleanup Unpin constraint; simplify Framed impl 2019-11-19 14:51:40 +06:00
Nikolay Kim
617e40a7e9 fix framed_read 2019-11-19 11:06:55 +06:00
Nikolay Kim
3105cde168 add Service impl for RefCell<S> 2019-11-19 08:45:09 +06:00
Nikolay Kim
5b74c79cf9 Simplify transform trait, remove map_init_err 2019-11-19 06:51:43 +06:00
Nikolay Kim
8bf8ad86d6 add IntoServiceFactory impl for servie_fn 2019-11-18 20:46:49 +06:00
Nikolay Kim
877f89eeb7 use service types for ssl connectors 2019-11-18 20:20:56 +06:00
Nikolay Kim
1354946460 remove pin-project; update Unpin consrtaint 2019-11-18 18:28:54 +06:00
Nikolay Kim
7404d82a9b use concrete types 2019-11-18 14:30:04 +06:00
Nikolay Kim
c1cdc9908a update deps and fix definitions 2019-11-15 16:06:44 +06:00
Yuki Okushi
be7904fd57 Fix code style (#65)
* Fix clippy warnings

* cargo fmt

* Remove redundant lifetime
2019-11-15 00:28:29 +09:00
Nikolay Kim
13049b80ca Migrate actix-net to std::future (#64)
* Migrate actix-codec, actix-rt, and actix-threadpool to std::future

* update to latest tokio alpha and futures-rs

* Migrate actix-service to std::future,

This is a squash of ~8 commits, since it included a lot of experimentation. To see the commits,
look into the semtexzv/std-future-service-tmp branch.

* update futures-rs and tokio

* Migrate actix-threadpool to std::future (#59)

* Migrate actix-threadpool to std::future

* Cosmetic refactor

- turn log::error! into log::warn! as it doesn't throw any error
- add Clone and Copy impls for Cancelled making it cheap to operate with
- apply rustfmt

* Bump up crate version to 0.2.0 and pre-fill its changelog

* Disable patching 'actix-threadpool' crate in global workspace as unnecessary

* Revert patching and fix 'actix-rt'

* Migrate actix-rt to std::future (#47)

* remove Pin from Service::poll_ready(); simplify combinators api; make code compile

* disable tests

* update travis config

* refactor naming

* drop IntoFuture trait

* Migrate actix-server to std::future (#50)

Still not finished, this is more WIP, this is an aggregation of several commits, which
can be found in semtexzv/std-future-server-tmp branch

* update actix-server

* rename Factor to ServiceFactory

* start server worker in start mehtod

* update actix-utils

* remove IntoTransform trait

* Migrate actix-server::ssl::nativetls to std futures (#61)

* Refactor 'nativetls' module

* Migrate 'actix-server-config' to std futures

- remove "uds" feature
- disable features by default

* Switch NativeTlsAcceptor to use 'tokio-tls' crate

* Bikeshed features names and remove unnecessary dependencies for 'actix-server-config' crate

* update openssl impl

* migrate actix-connect to std::future

* migrate actix-ioframe to std::future

* update version to alpha.1

* fix boxed service

* migrate server rustls support

* migratte openssl and rustls connecttors

* store the thread's handle with arbiter (#62)

* update ssl connect tests

* restore service tests

* update readme
2019-11-14 18:38:24 +06:00
Nikolay Kim
9fa2a36b4e prepare actix-rt release 2019-11-14 17:33:28 +06:00
Ivan Ladelshchikov
ed5023128b store the thread's handle with arbiter (#60) 2019-11-14 15:07:33 +06:00
Nikolay Kim
2e8c2c7733 Re-register task on every future poll 2019-10-14 17:55:52 +06:00
Nikolay Kim
115e82329f fix arbiter thread panic message 2019-10-14 11:19:08 +06:00
Nikolay Kim
0b0060fe47 update deps 2019-10-14 10:37:48 +06:00
Nikolay Kim
35e32d8e55 prepare actix-testing release 2019-10-14 10:30:27 +06:00
Nikolay Kim
9982a9498d register current task in counters available method. 2019-10-08 15:02:43 +06:00
Nikolay Kim
fa72975f34 extra trace logging 2019-10-08 14:46:22 +06:00
Sven-Hendrik Haase
fe5de2510d Merge pull request #56 from actix/fix-52
Add an error message if we receive a non-hostname-based dest
2019-10-04 13:48:20 +02:00
Yuki Okushi
e3155957a8 Prepare actix-server release (#55) 2019-10-04 17:36:23 +09:00
Sven-Hendrik Haase
f6f9e1fcdb Add an error message if we receive a non-hostname-based dest
This is more helpful than an unwrap and at least points users at the right location.
Upstream issue is https://github.com/briansmith/webpki/issues/54
2019-10-04 07:30:13 +02:00
Yuki Okushi
2667850d60 Prepare actix-server-config release (#54)
* Prepare actix-server-config release

* Bump up actix-server-config to 0.2.0
2019-10-04 06:13:33 +06:00
Yuki Okushi
fba2002702 Prepare actix-connect release (#53) 2019-10-04 06:21:59 +09:00
Jerome Gravel-Niquet
e733c562d9 Update rustls, tokio-rustls and webpki across the board (#42)
* Update rustls, tokio-rustls and webpki across the board

* bump minimum rust version to 1.37

* updated readme and changelogs to reflect changes and minimum required rust version
2019-10-04 03:32:32 +09:00
Yuki Okushi
8f05986a9f Use map() instead of and_then() (#51) 2019-10-03 14:55:44 +09:00
Nikolay Kim
aa9bbe2114 prepare actix-ioframe release 2019-09-25 10:47:06 +06:00
Nikolay Kim
4837a901e2 prepare actix-server release 2019-09-25 10:35:15 +06:00
Nikolay Kim
a02ff17cb1 remove actix-tower from workspace 2019-09-25 10:11:17 +06:00
Nikolay Kim
dbf566928c drop tower intergration 2019-09-25 10:01:08 +06:00
Nikolay Kim
ca982b2467 update workspace deps for tests 2019-09-25 10:00:54 +06:00
Nikolay Kim
c859d13e3b use actix-testing instead of test server 2019-09-25 09:51:28 +06:00
Nikolay Kim
41e49e8093 update changes 2019-09-25 09:32:33 +06:00
Nikolay Kim
715a770d7a deprecate test server 2019-09-25 09:31:52 +06:00
Nikolay Kim
5469d8c910 prep actix-testing release 2019-09-25 09:26:12 +06:00
Nikolay Kim
8be5f773f4 add actix-testing crate 2019-09-17 16:04:20 +06:00
karlri
b686b4c34e Feature uds: Add listen_uds to ServerBuilder (#43)
Allows directly passing an Unix Listener instead of a path. Useful
for example when running as a daemon under systemd with the systemd
crate.
2019-09-16 11:07:46 +06:00
Nikolay Kim
34a7b7f05a add TcpStreamService 2019-09-05 16:34:48 -07:00
Nikolay Kim
b1d9b06a87 Use arbiters storage for default async resolver 2019-09-02 15:15:55 -07:00
Nikolay Kim
94e673b50b Add arbiter specific storage 2019-09-02 15:03:03 -07:00
Nikolay Kim
1a644c6bb1 Check service readiness for new_apply_cfg combinator 2019-08-27 05:28:15 +06:00
Yuki Okushi
aad013f559 Fix clippy warnings (#40)
Add explicit `dyn`s

Remove let binding

Use +=

Return false

Derive Default for TcpConnector

Squash if/else

Remove unnecessary return keywords

Use is_empty()

Fix clippy attribute

Allow mut_from_ref
2019-08-17 05:15:51 +09:00
Aron Heinecke
7a18d9da26 Add check for minimum rust version (#39) 2019-08-07 17:49:29 -07:00
Aron Heinecke
d59b8ce62e Fix invalid minimum version (#38) 2019-08-07 17:48:31 -07:00
Nikolay Kim
3821d511d0 prep actix-threadpool release 2019-08-05 09:54:49 -07:00
Nikolay Kim
62e429cb0c Merge branch 'master' of github.com:actix/actix-net 2019-08-05 09:53:03 -07:00
Nikolay Kim
a2643d475a Add ConnectService and OpensslConnectService 2019-08-05 09:52:50 -07:00
Sven-Hendrik Haase
34c259a8b5 Merge pull request #35 from ignatenkobrain/parking_lot
threadpool: Update parking_lot to 0.9
2019-08-05 17:30:19 +02:00
Igor Gnatenko
8b398c3386 threadpool: Update parking_lot to 0.9
Signed-off-by: Igor Gnatenko <i.gnatenko.brain@gmail.com>
2019-08-04 15:46:14 +02:00
Neil Locketz
0baceb0e56 Fix typo in desc (#34) 2019-07-30 09:35:57 -07:00
Michael Snoyman
6be1f37f6c Minor typo corrections in docs (#33) 2019-07-25 11:46:11 +06:00
Nikolay Kim
a742768feb bump version 2019-07-24 14:16:25 +06:00
Marat Safin
f913872159 add rustls support for connect (#31) 2019-07-24 14:14:26 +06:00
Nikolay Kim
41145040e1 remove ClonableService 2019-07-19 11:03:16 +06:00
Nikolay Kim
311bb14d97 add unix domain sockets support #3 2019-07-18 17:05:40 +06:00
Nikolay Kim
2955e49d78 add unix domain sockets support 2019-07-18 16:43:42 +06:00
Nikolay Kim
9d1b428b34 undeprecate framed transport 2019-07-17 13:31:00 +06:00
Nikolay Kim
42d526bced mark some fn as unsafe 2019-07-17 11:16:38 +06:00
Nikolay Kim
23a230a83b deprecate ClonableService and FramedTransport 2019-07-17 10:57:52 +06:00
Nikolay Kim
411e31786f update actix-connect changes 2019-07-17 10:33:47 +06:00
Nikolay Kim
b491d373b1 update actix-rt changes 2019-07-17 10:30:59 +06:00
Jeff Muizelaar
9271b95c87 Avoid a copy of the Future when initializing the Box. (#29)
Future's can be pretty big (> 1500 bytes) so this probably worth doing.

I confirmed with memcpy-find that this did infact eliminate two ~1500
byte copies from the actix-web basic example.
2019-07-17 10:29:22 +06:00
Jan Michael Auer
1b3cd0d88c Expose Connect addrs (#30) 2019-07-17 06:17:51 +06:00
Nikolay Kim
da302d4b7a fix disconnect callback 2019-07-03 13:02:03 +06:00
Nikolay Kim
922a919572 simple callback 2019-07-02 12:35:27 +06:00
Nikolay Kim
5a62175b6e add disconnect callback 2019-07-02 12:10:05 +06:00
Nikolay Kim
5445e341c3 give access to io object during connect stage 2019-07-01 22:37:59 +06:00
Nikolay Kim
1b17d274a0 refactor connect stage 2019-07-01 11:20:24 +06:00
Nikolay Kim
9d8b3e6275 impl Stream and Sink for Connect 2019-06-30 22:58:23 +06:00
Nikolay Kim
27baf03f64 Do not block on sink drop for FramedTransport 2019-06-26 15:20:56 +06:00
Nikolay Kim
205cac82ce add custom framed dispatcher service 2019-06-26 15:19:40 +06:00
Nikolay Kim
07708c5e9a prepare rt release 2019-06-22 09:02:17 +06:00
Nikolay Kim
1c04ad3238 Merge pull request #22 from GeorgeHahn/with-external-runtime
Allow Actix to be started on an external CurrentThread runtime
2019-06-22 08:53:19 +06:00
Nikolay Kim
66aa21740c Merge pull request #28 from ignatenkobrain/deps
chore: Update derive_more to 0.15
2019-06-18 22:26:37 +06:00
Igor Gnatenko
b183cb3324 chore: Update derive_more to 0.15
Signed-off-by: Igor Gnatenko <i.gnatenko.brain@gmail.com>
2019-06-18 10:25:01 +02:00
Nikolay Kim
158482cd2f Add new_apply_cfg function 2019-06-06 14:28:07 +06:00
George Hahn
9e61f62871 new_async -> run_in_executor and return future directly + builder cleanup 2019-06-05 12:51:59 -05:00
Nikolay Kim
7051888289 prepare actix-threadpool release 2019-06-05 08:09:46 +06:00
Nikolay Kim
0caa47fc47 Merge pull request #27 from ignatenkobrain/license
Include license files into all sub-crates
2019-06-01 16:48:42 +06:00
Nikolay Kim
6d1cbb2d2f Merge pull request #26 from ignatenkobrain/master
chore: Update parking_lot to 0.8
2019-06-01 16:47:56 +06:00
Igor Gnatenko
ca289ddf7f Include license files into all sub-crates
Signed-off-by: Igor Gnatenko <i.gnatenko.brain@gmail.com>
2019-05-30 20:38:44 +02:00
Igor Gnatenko
ad9a197916 chore: Update parking_lot to 0.8
Signed-off-by: Igor Gnatenko <i.gnatenko.brain@gmail.com>
2019-05-30 20:30:43 +02:00
George Hahn
c4f05e033f fixup: fix new_async doc comment 2019-05-24 10:29:52 -05:00
George Hahn
048314913c Enable System to be executed on an external CurrentThread runtime 2019-05-23 13:34:47 -05:00
Nikolay Kim
c1b183e1ce Merge branch 'master' of github.com:actix/actix-net 2019-05-18 10:56:51 -07:00
Nikolay Kim
87bc3dacd9 use u64 for shutdown_timeout 2019-05-18 10:56:41 -07:00
Nikolay Kim
0156f479a0 Merge pull request #19 from pka/patch-1
Fix typo
2019-05-15 13:12:26 -07:00
Pirmin Kalberer
139fa3b9a2 Fix typo 2019-05-15 20:51:24 +02:00
Nikolay Kim
a14f612382 remove debug prints 2019-05-15 10:29:10 -07:00
Nikolay Kim
059e2ad042 Fix checked resource match 2019-05-15 10:21:29 -07:00
Nikolay Kim
fdf2a6f422 prepare actix-utils release 2019-05-15 08:31:40 -07:00
Nikolay Kim
fc2631c852 merge remote 2019-05-14 17:37:14 -07:00
Nikolay Kim
d51b210ae7 Merge branch 'master' of github.com:actix/actix-net 2019-05-14 17:36:18 -07:00
Nikolay Kim
0a6cded975 change Either constructor 2019-05-14 17:32:50 -07:00
Nikolay Kim
14e3933d8b Merge pull request #17 from neoeinstein/tower-interop
Second iteration on tower interop
2019-05-12 20:03:42 -07:00
Nikolay Kim
837504c10f update deps 2019-05-12 08:40:42 -07:00
Nikolay Kim
802d808aca prepare actix-connect release 2019-05-12 08:15:18 -07:00
Nikolay Kim
7712de3d8e update deps 2019-05-12 08:10:30 -07:00
Nikolay Kim
f1d0d5f6f9 prepare actix-server release 2019-05-12 08:03:16 -07:00
Nikolay Kim
a76fcaf4d8 prepare actix-utils release 2019-05-12 08:00:23 -07:00
Nikolay Kim
a2134035d6 prepare actix-service release 2019-05-12 07:53:26 -07:00
Nikolay Kim
5f8599faf1 merge master 2019-05-12 06:06:45 -07:00
Nikolay Kim
f0776fca94 Use associated type for NewService config 2019-05-12 06:03:50 -07:00
Marcus Griep
c7676df697 Add documentation and doctests 2019-05-03 10:08:49 -04:00
Marcus Griep
ecf7a11a20 Add convenience methods to wrap with middleware 2019-05-02 17:47:37 -04:00
Marcus Griep
686958fe0c Add reciprical compatibility layer for tower 2019-05-02 17:22:22 -04:00
Nikolay Kim
49ade171f6 Update CHANGES.md 2019-05-01 23:20:00 -07:00
Nikolay Kim
0a2a520c35 Merge pull request #16 from boustrophedon/derive_debug
Derive debug for Server and ServerCommand
2019-05-01 23:19:16 -07:00
Harry Stern
b0c37dfc87 Derive debug for Server and ServerCommand 2019-05-02 01:31:04 -04:00
Nikolay Kim
91e28a4312 Merge pull request #15 from neoeinstein/tower-interop
Add compatibility layer for tower-service
2019-04-29 10:21:32 -07:00
Marcus Griep
508dce8bf1 Add compatibility crate for tower-service
Introduces a new crate `actix-tower`, which makes it easier to use
services built on the `tower-service` abstraction to be used with
`actix-service`.
2019-04-29 12:39:16 -04:00
Nikolay Kim
8ed1099a2e Merge pull request #14 from Bobo1239/master
Increase compiler recursion limit
2019-04-23 13:04:32 -07:00
Boris-Chengbiao Zhou
83544bd971 Increase compiler recursion limit
Fixes `cargo doc` on Windows. (actix/actix#189)
2019-04-23 22:02:37 +02:00
Nikolay Kim
76c317e0b2 Added support for remainder match 2019-04-22 21:19:22 -07:00
Nikolay Kim
3b314e4c8c Connect::set_addr() 2019-04-19 17:43:52 -07:00
Nikolay Kim
ae27b87641 IoStream trait and impls for TcpStream, SslStream and TlsStream 2019-04-16 08:32:12 -07:00
Nikolay Kim
fc2dcadc7a use stable version of trust-dns-resolver 2019-04-14 20:46:36 -07:00
Nikolay Kim
54f62b5035 prep release 2019-04-12 12:30:55 -07:00
Nikolay Kim
d3208bf7a8 Do not start default resolver immediately for default connector. 2019-04-12 12:28:18 -07:00
Nikolay Kim
21507d3da1 add TestServerRuntime::run_on() method 2019-04-12 12:26:47 -07:00
Nikolay Kim
b9d8a215b4 Start trust-dns default resolver on first use 2019-04-11 09:57:21 -07:00
Nikolay Kim
51c4dfe5cb Allow to reset Path instance; export Quoter type 2019-04-07 22:48:18 -07:00
Nikolay Kim
a60112c71e Poll boxed service call result immediately 2019-04-07 20:48:40 -07:00
Nikolay Kim
bd814d6f80 re-export trust-dns types 2019-04-05 10:36:57 -07:00
Nikolay Kim
a4e0c71baa Merge branch 'master' of github.com:actix/actix-net 2019-04-04 15:41:50 -07:00
Nikolay Kim
b9ea445e70 Log error if dns system config could not be loaded 2019-04-04 15:41:05 -07:00
Nikolay Kim
ba2901269d Merge pull request #11 from Dowwie/master
added docs for trait Service, trait Transform
2019-04-04 11:06:02 -07:00
dowwie
5cbc29306a updated as per comments 2019-04-04 14:02:53 -04:00
Nikolay Kim
810fa869ae remove unneeded static 2019-04-04 10:04:19 -07:00
dowwie
33cd51aabf added docs for trait Service, trait Transform 2019-04-04 11:40:28 -04:00
Nikolay Kim
629ed05f82 Get dynamic segment by name instead of iterator 2019-04-03 21:40:21 -07:00
Nikolay Kim
5e8ae210f7 Rename connect Connector to TcpConnector #10 2019-03-31 19:14:13 -07:00
Nikolay Kim
3add90628f Fix SIGINT force shutdown 2019-03-30 12:09:02 -07:00
Nikolay Kim
02ab804e0b prepare actix-service release 2019-03-29 11:16:40 -07:00
Nikolay Kim
feac0b43d9 add impl Service for Rc<RefCell<S>> 2019-03-29 10:21:17 -07:00
Nikolay Kim
1441355d4f use release 2019-03-28 04:02:39 -07:00
Nikolay Kim
7c5afc09a6 move threadpool to separate crate 2019-03-28 03:56:52 -07:00
Nikolay Kim
16856c7d3f Merge branch 'master' of github.com:actix/actix-net 2019-03-27 17:30:54 -07:00
Nikolay Kim
95d02659d5 Added Framed::map_io() method 2019-03-27 17:30:37 -07:00
Juan Aguilar Santillana
bcbd7e6ddf Fix unnecessary arbiter clone at builder rt 2019-03-23 09:46:08 +03:00
Nikolay Kim
e0d3581239 allow to send messages to framed transport via mpsc channel 2019-03-20 09:44:23 -07:00
Nikolay Kim
ef1bdb2eb2 update travis config 2019-03-17 10:25:24 -07:00
Nikolay Kim
10301ff49d temp tarpaulin fix 2019-03-17 08:53:50 -07:00
Nikolay Kim
27c28d6597 Fix error handling for single address 2019-03-15 11:37:51 -07:00
Nikolay Kim
b290273e81 prepare actix-connect release 2019-03-14 22:39:49 -07:00
Nikolay Kim
720230b852 Merge pull request #8 from Firstyear/2019-03-15-arbiter-docs
Improve Arbiter documentation
2019-03-14 20:58:33 -07:00
Nikolay Kim
44c2639fd6 prepare actix-server release 2019-03-14 20:55:55 -07:00
Nikolay Kim
9a5705d1b6 merge travis branch 2019-03-14 20:53:56 -07:00
Nikolay Kim
7ff923a58f stop tests threads 2019-03-14 20:52:17 -07:00
Nikolay Kim
6659b192d3 travis config 2019-03-14 20:52:13 -07:00
Nikolay Kim
1146d9cf30 fix init order 2019-03-14 20:48:58 -07:00
Nikolay Kim
b7b76c47e5 rename method 2019-03-14 20:23:49 -07:00
Nikolay Kim
d23dc6f6af Allow to run future before server service initialization 2019-03-14 20:09:34 -07:00
William Brown
9b6a955da4 Improve Arbiter documentation 2019-03-15 10:24:27 +10:00
Nikolay Kim
f3aa48309f travis config 2019-03-14 11:55:39 -07:00
Nikolay Kim
c9b86712e5 reinstall tarpaulin 2019-03-14 11:31:32 -07:00
Nikolay Kim
0f74f280f9 impl Address for http::Uri 2019-03-14 11:15:32 -07:00
Nikolay Kim
eb37e15554 use specific version of nightly 2019-03-14 10:43:36 -07:00
Nikolay Kim
ad007b8b42 Merge branch 'master' of github.com:actix/actix-net 2019-03-14 10:28:47 -07:00
Nikolay Kim
7c0d1f2273 update travis config 2019-03-14 10:25:34 -07:00
Nikolay Kim
d82bc7c52b Merge pull request #7 from najamelan/fix/compiler_warnings
Fix compiler warnings.
2019-03-14 07:24:37 -07:00
Nikolay Kim
265229b44b allow to override port 2019-03-13 22:55:01 -07:00
Nikolay Kim
38545dedc7 refactor Connect type and add tests 2019-03-13 22:51:31 -07:00
Nikolay Kim
6ebff22601 simplify name 2019-03-13 16:38:08 -07:00
Nikolay Kim
2c9b91b366 add specific constructors 2019-03-13 15:55:20 -07:00
Nikolay Kim
b483200037 remove generic 2019-03-13 15:52:51 -07:00
Nikolay Kim
a73600fbcd remove generic E 2019-03-13 15:51:21 -07:00
Nikolay Kim
084a28ca07 add Connect::with_request 2019-03-13 15:49:31 -07:00
Nikolay Kim
a7c74c53ea store request in Connect request 2019-03-13 15:37:12 -07:00
Nikolay Kim
3e7d737e73 update travis 2019-03-13 14:39:02 -07:00
Nikolay Kim
87db4bf741 changes 2019-03-13 14:38:33 -07:00
Nikolay Kim
8b0fe6f796 rename crate 2019-03-13 12:41:41 -07:00
Nikolay Kim
52a45fda53 redesign actix-connector 2019-03-13 12:40:11 -07:00
Naja Melan
2c7de7e0fb Fix compiler warnings.
Compiles in stable and nightly
2019-03-13 08:41:26 +01:00
Nikolay Kim
1fcc0734b5 prep release 2019-03-12 17:01:02 -07:00
Nikolay Kim
b6f952b036 remove constraint 2019-03-12 16:49:57 -07:00
Nikolay Kim
0fdac38307 add constraint 2019-03-12 16:48:29 -07:00
Nikolay Kim
a3c4637372 prepare actix-server release 2019-03-12 16:40:22 -07:00
Nikolay Kim
ae9bc5ae78 refactor ApplyConfig combinator 2019-03-12 16:32:10 -07:00
Nikolay Kim
21c289d7e4 fix InOrderService::poll_ready() nested service rediness check 2019-03-12 16:03:05 -07:00
Nikolay Kim
5e6eed905c fix InFlightService::poll_ready() nested service rediness check 2019-03-12 15:48:02 -07:00
Nikolay Kim
6801a38de5 check readiness for all services 2019-03-12 15:15:14 -07:00
Nikolay Kim
39356690b0 use ServerConfig for system configuration 2019-03-12 14:14:21 -07:00
Nikolay Kim
f6f292a678 enforce constraint on OpensslAcceptor 2019-03-12 13:57:28 -07:00
Nikolay Kim
b3366bc1af service response is not important 2019-03-12 13:55:31 -07:00
Nikolay Kim
2c1f8f0b96 enforce constraints on AndThenNewService type 2019-03-12 13:50:14 -07:00
Nikolay Kim
e7465bfa2e fix map_err constraints 2019-03-12 13:45:05 -07:00
Nikolay Kim
755d4958c5 use IntoService types for transform services 2019-03-12 13:39:04 -07:00
Nikolay Kim
825117fd4c add Deref/DerefMut impls for Io 2019-03-12 13:12:22 -07:00
Nikolay Kim
7033b50fed add Io::set method for overriding param 2019-03-12 12:53:43 -07:00
Nikolay Kim
ef9bfb8981 add helpers 2019-03-12 12:53:08 -07:00
Nikolay Kim
bef199f831 add blocking mod support 2019-03-11 22:54:27 -07:00
Nikolay Kim
f1d4bcef4b add Arbiter::exec_fn and exec functions 2019-03-11 22:51:17 -07:00
Nikolay Kim
8e13ba7bce update readme 2019-03-11 18:24:31 -07:00
Nikolay Kim
9a9b3e9ca9 update server tests 2019-03-11 15:19:28 -07:00
Nikolay Kim
5567fb41d2 add default type parameter 2019-03-11 13:48:55 -07:00
Nikolay Kim
ad50595ece add PartialEq impl for Io 2019-03-11 13:37:30 -07:00
Nikolay Kim
2430c7247b update timing for travis 2019-03-11 12:53:16 -07:00
Nikolay Kim
1bf0f1e1a5 add Debug impl for Io; update examples 2019-03-11 12:46:12 -07:00
Nikolay Kim
9887aef6e8 add delay to test 2019-03-11 12:35:57 -07:00
Nikolay Kim
787255d030 add io parameters 2019-03-11 12:01:55 -07:00
Nikolay Kim
f696914038 prepare router release 2019-03-09 14:38:08 -08:00
Nikolay Kim
3618f542fb prepare release actix-utils 2019-03-09 14:30:37 -08:00
Nikolay Kim
1f54ae9051 update deps 2019-03-09 14:23:08 -08:00
Nikolay Kim
86f57e5a4a prepare actix-service release 2019-03-09 14:10:02 -08:00
Nikolay Kim
43ad18ccb1 remove get_decoded 2019-03-09 13:58:07 -08:00
Nikolay Kim
34995a8ccf revert re-quoting change 2019-03-09 13:56:09 -08:00
Nikolay Kim
0ff300c40f export ApplyConfig 2019-03-09 09:05:51 -08:00
Nikolay Kim
629ef23371 add .apply_cfg new service combinator 2019-03-09 09:02:23 -08:00
Nikolay Kim
d2b96ff877 add ServerConfig to server services 2019-03-09 07:31:22 -08:00
Nikolay Kim
ac62e2dbf9 revert generic request in actix-utils 2019-03-09 07:27:35 -08:00
Nikolay Kim
6bbbdba921 revert generic Request change 2019-03-09 06:36:23 -08:00
Nikolay Kim
2099629fe3 cleanup ssl services 2019-03-08 22:41:30 -08:00
Nikolay Kim
a4d4770462 remove server config 2019-03-08 22:38:39 -08:00
Nikolay Kim
70ead175b9 IntoService for fn_cfg_factory 2019-03-08 20:51:50 -08:00
Nikolay Kim
49867b5e9d fix IntoService 2019-03-08 20:50:29 -08:00
Nikolay Kim
0f064c43e9 remove uneeded code 2019-03-08 20:10:47 -08:00
Nikolay Kim
7db29544f9 add ServerConfig param for server service 2019-03-08 19:43:13 -08:00
Nikolay Kim
4850cf41ff rename module 2019-03-08 16:26:30 -08:00
Nikolay Kim
046142ffbc added is_prefix_match and resource_path 2019-03-08 15:34:40 -08:00
Nikolay Kim
49e6dbcda2 remove unused ResourceMap 2019-03-08 12:33:44 -08:00
Nikolay Kim
877614a494 add params decoding 2019-03-08 04:43:51 -08:00
Nikolay Kim
ac0e8b9e53 re-enable examples 2019-03-06 23:33:35 -08:00
Nikolay Kim
b407c65f4c add FramedParts::with_read_buf method 2019-03-06 22:53:55 -08:00
Nikolay Kim
51bd7d2721 update actix-rt 2019-03-06 10:39:53 -08:00
Nikolay Kim
c03d869694 return io::Result from run method, remove Handle 2019-03-06 10:24:58 -08:00
Nikolay Kim
25f1eae51f add ResourceDef::root_prefix, insert slash to the beggining of the pattern 2019-03-05 21:03:53 -08:00
Nikolay Kim
1153715149 change generics order for Transform trait 2019-03-05 09:49:08 -08:00
Nikolay Kim
aa2967c653 fix feature gated code 2019-03-05 07:41:41 -08:00
Nikolay Kim
dfbb77f98d make service Request type generic 2019-03-05 07:35:26 -08:00
Nikolay Kim
e8a49801eb revert IntoFuture change 2019-03-04 21:37:06 -08:00
Nikolay Kim
03f2046a42 add ApplyTransform new service 2019-03-04 21:25:50 -08:00
Nikolay Kim
2e18ca805c use IntoFuture 2019-03-04 20:40:38 -08:00
Nikolay Kim
15dafeff3d use IntoFuture instead of Future 2019-03-04 20:37:03 -08:00
Nikolay Kim
ed14e6b8ea change to IntoFuture 2019-03-04 20:29:35 -08:00
Nikolay Kim
700abc997e prepare actix-utils release 2019-03-04 19:45:17 -08:00
Nikolay Kim
8c48bf4de7 simplify transform trait 2019-03-04 19:38:11 -08:00
Nikolay Kim
9bc492cf6c add SslError 2019-03-04 16:16:39 -08:00
Nikolay Kim
d2a223e69e update changes 2019-03-04 15:42:25 -08:00
Nikolay Kim
e9657a399a add maxconnrate 2019-03-04 15:41:16 -08:00
Nikolay Kim
9f25fdf929 rename StreamServiceFactory to ServiceFactory 2019-03-04 14:31:46 -08:00
Nikolay Kim
82930de8e7 use default type for RouterBuilder 2019-03-04 14:03:46 -08:00
Nikolay Kim
04a3e59bd5 update tests 2019-03-04 12:41:39 -08:00
Nikolay Kim
0ff0daa795 allow custom checks for resource selection 2019-03-04 11:47:03 -08:00
Nikolay Kim
fb43940824 allow empty pattern 2019-03-03 21:00:58 -08:00
Nikolay Kim
0410f59cf5 prep release 2019-03-02 14:55:22 -08:00
Nikolay Kim
400023a07b prepare actix-connector release 2019-03-02 14:47:52 -08:00
Nikolay Kim
2e4c84dbb6 prepare actix-server release 2019-03-02 14:42:31 -08:00
Nikolay Kim
672c3936a6 prepare actix-utils release 2019-03-02 14:30:32 -08:00
Nikolay Kim
fbf4444b04 prep release 2019-03-02 14:22:03 -08:00
Nikolay Kim
b5b3168b34 do not use void for now 2019-03-02 13:49:21 -08:00
Nikolay Kim
668e4f9ac4 update utils; add NewTransform::map_init_err 2019-03-02 13:18:01 -08:00
Nikolay Kim
d0b8b6940c add configuration parameter to transform factory 2019-03-02 12:16:30 -08:00
Nikolay Kim
f1bc9d0deb update tests 2019-02-22 18:47:29 -08:00
Nikolay Kim
6ed020565c update service crate 2019-02-22 18:31:25 -08:00
Nikolay Kim
7ee33efdfd moved boxed service and new service to actix-service 2019-02-22 18:20:54 -08:00
Nikolay Kim
83f51b28d7 allow to customize cfg parameter for FnNewService 2019-02-22 17:12:26 -08:00
Nikolay Kim
83a19e9cb3 add IntoConfigurableNewService 2019-02-22 14:30:00 -08:00
Nikolay Kim
6b4010892d add IntoNewService for FnNewService 2019-02-22 14:19:43 -08:00
Nikolay Kim
d2bd9134aa add fn service helpers 2019-02-22 14:13:48 -08:00
Nikolay Kim
a0e2d926e6 add fn_nservice 2019-02-22 13:20:52 -08:00
Nikolay Kim
43d2dd473f update tests 2019-02-22 13:08:31 -08:00
Nikolay Kim
862be49e30 add Config argument to NewService 2019-02-22 12:44:37 -08:00
Nikolay Kim
6ea128fac5 Custom BoxedNewService implementation 2019-02-21 11:19:28 -08:00
Nikolay Kim
a97d7f0ccf add BoxedNewService and BoxedService 2019-02-21 10:41:39 -08:00
Nikolay Kim
3d7daabdd7 add NewService impls for Rc<S> and Arc<S> 2019-02-19 11:31:54 -08:00
Nikolay Kim
32f4718880 Add Display impl for InOrderError 2019-02-11 08:39:28 -08:00
Nikolay Kim
b8f9bf4bc8 Add Display impl for TimeoutError 2019-02-11 08:34:57 -08:00
Nikolay Kim
e354c6df92 Drop service response 2019-02-09 21:39:17 -08:00
Nikolay Kim
a53f06a1a4 allow register router resource for ResourceDef 2019-02-09 20:43:39 -08:00
Nikolay Kim
9979bfb3ef rename Pattern to ResourceDef 2019-02-09 07:24:35 -08:00
Nikolay Kim
17d0f84f63 prep actix-utils 2019-02-06 11:40:22 -08:00
Nikolay Kim
08bc328826 clippy warnings 2019-02-04 11:04:10 -08:00
Nikolay Kim
7dca264546 move transform map_err combinator to separate module 2019-02-04 10:55:39 -08:00
Nikolay Kim
3bddba5da5 helper method 2019-02-03 14:12:15 -08:00
Nikolay Kim
4be025926c add InOrder service 2019-02-03 14:05:13 -08:00
Nikolay Kim
0063a26aab prepare actix-service release 2019-02-03 13:32:51 -08:00
Nikolay Kim
bcc466f6ab update tests 2019-02-03 11:48:11 -08:00
Nikolay Kim
663ae53954 fix Clone impl for Timeout 2019-02-03 11:37:34 -08:00
Nikolay Kim
406088524e depend on git repo 2019-02-03 11:33:26 -08:00
Nikolay Kim
5b8446105f depend on repo 2019-02-03 11:16:24 -08:00
Nikolay Kim
429ad453d3 change Apply::new_fn to old args order 2019-02-03 10:52:44 -08:00
Nikolay Kim
bd977373bc generalize apply combinator with transform trait 2019-02-03 10:42:27 -08:00
Nikolay Kim
d45fb9521f update deps 2019-02-01 20:11:30 -08:00
Nikolay Kim
94a0da3659 prepare actix-connector release 2019-02-01 20:06:53 -08:00
Nikolay Kim
8d62ac4b2f prepare actix-server release 2019-02-01 20:03:31 -08:00
Nikolay Kim
442163690e prepare actix-utils release 2019-02-01 20:00:12 -08:00
Nikolay Kim
d83bf95304 Use associated type instead of generic for Service definition 2019-02-01 19:53:13 -08:00
Nikolay Kim
8cdbf49187 better inflight readiness check 2019-02-01 15:15:53 -08:00
Nikolay Kim
132be0baa1 better naming 2019-02-01 14:48:09 -08:00
Nikolay Kim
cedba24a36 add Url type from actix web 2019-02-01 13:25:56 -08:00
Nikolay Kim
aa20b304ec more refactorings for framed transport 2019-01-26 22:07:27 -08:00
Nikolay Kim
9f0a288e4b refactor FramedTransport 2019-01-26 21:41:28 -08:00
Nikolay Kim
cabebb6b3f add time services tests 2019-01-26 13:21:56 -08:00
Nikolay Kim
84bd257b86 update deps 2019-01-26 13:15:17 -08:00
Nikolay Kim
ff6ac0a67f add System time service 2019-01-26 13:14:37 -08:00
Nikolay Kim
c1c989034d Added Clone impl for Timeout service factory 2019-01-25 14:31:27 -08:00
Nikolay Kim
515bfad830 prep release 2019-01-24 20:42:30 -08:00
Nikolay Kim
0340d82314 better ergonomics for .apply combinator 2019-01-24 20:41:42 -08:00
Nikolay Kim
88548199d7 change apply combinator error semantic 2019-01-24 19:19:44 -08:00
Nikolay Kim
278176fca5 use FnMut instead of Fn 2019-01-16 15:33:33 -08:00
Nikolay Kim
2c8e7c4ae4 add helper constructors to Either service 2019-01-16 15:33:10 -08:00
Nikolay Kim
b6414d6197 use FnMut instead of Fn 2019-01-16 15:00:23 -08:00
Nikolay Kim
f94ef5248e add Clone impl for Either service 2019-01-16 15:00:08 -08:00
Nikolay Kim
cbc378b67f allow deserialize from the path 2019-01-15 19:25:49 -08:00
Nikolay Kim
db2367b26e properly check readiness inclosed service 2019-01-14 09:41:10 -08:00
Nikolay Kim
0bee4db270 use new service converter 2019-01-13 23:30:42 -08:00
Nikolay Kim
615a0d52ed add service and new service for stream dispatcher 2019-01-13 23:12:46 -08:00
Nikolay Kim
cfb62ccc40 Make Out::Error convertable from T::Error for apply combinator 2019-01-13 22:58:23 -08:00
Nikolay Kim
605a947c3e update version 2019-01-13 10:06:54 -08:00
Nikolay Kim
31f0a96c20 Upgrade trust-dns-proto 2019-01-13 10:03:33 -08:00
Nikolay Kim
66a7c59aaf fix changelog 2019-01-11 21:42:13 -08:00
Nikolay Kim
316974616a Use FnMut instead of Fn for FnService 2019-01-11 21:39:58 -08:00
Nikolay Kim
b5d84bd980 use release version of string 2019-01-10 10:40:27 -08:00
Nikolay Kim
e969429e2c different string crate version 2019-01-09 19:54:52 -08:00
Nikolay Kim
6fe741025f add host and port for test server 2019-01-07 21:09:56 -08:00
Nikolay Kim
f8e170fdaf add enum support for path deserializer 2019-01-06 10:26:59 -08:00
Nikolay Kim
474fed4dfe more tests for de 2019-01-06 08:12:51 -08:00
Nikolay Kim
2b8f41e9e4 helper impls for Router 2019-01-05 22:00:38 -08:00
Nikolay Kim
3484007e4e add router crate 2019-01-05 13:20:32 -08:00
Nikolay Kim
58ba1d8269 use service error for stream dispatcher 2019-01-05 13:19:06 -08:00
Nikolay Kim
7017bad4bb add test server 2019-01-01 22:59:52 -08:00
Nikolay Kim
112a7b6b1b fix error handling 2018-12-26 11:50:07 -08:00
Nikolay Kim
48a1c7320c fix io handling code 2018-12-21 13:00:26 -08:00
Nikolay Kim
37d28304c9 Fix max concurrent connections handling 2018-12-21 10:38:08 -08:00
Nikolay Kim
640c39fdc8 better usage for Framed type 2018-12-16 16:26:24 -08:00
Nikolay Kim
cd5435e5ee fix service tests 2018-12-12 18:56:39 -08:00
Nikolay Kim
bf9bd97173 split ServiceExt trait 2018-12-12 18:32:19 -08:00
Nikolay Kim
61939c7af2 Release future early for .and_then() and .then() combinators 2018-12-12 18:00:35 -08:00
Nikolay Kim
e8a1664c15 prepare release 2018-12-12 14:24:46 -08:00
Nikolay Kim
d1bfae7414 fix backpressure for ssl concurrent handshakes 2018-12-12 14:24:24 -08:00
Nikolay Kim
5ca00dc798 rename ServiceConfig::rt to ServiceConfig::apply 2018-12-12 14:16:16 -08:00
Nikolay Kim
fd3e77ea83 fix signals handling on windows 2018-12-11 14:03:06 -08:00
Nikolay Kim
d38eb00793 no readme 2018-12-11 08:36:03 -08:00
Nikolay Kim
3c9d95bd9f no readme 2018-12-11 08:33:28 -08:00
Nikolay Kim
331db2eb47 use released version 2018-12-11 08:28:44 -08:00
Nikolay Kim
de66b5c776 fix examples 2018-12-11 08:20:19 -08:00
Nikolay Kim
4adbbad450 update travis config 2018-12-11 08:13:58 -08:00
Nikolay Kim
42ec3454d9 add signals support 2018-12-10 21:06:54 -08:00
Nikolay Kim
e6daca7995 try to parse host first 2018-12-10 18:08:07 -08:00
Nikolay Kim
d35c87d228 move helper services to separate package 2018-12-10 16:16:40 -08:00
Nikolay Kim
ba006d95c7 prepare actix-utils 2018-12-10 08:42:31 -08:00
Nikolay Kim
9577b7bbed add helper fn 2018-12-09 22:19:26 -08:00
Nikolay Kim
8ad93f4838 move server to separate crate 2018-12-09 22:14:29 -08:00
Nikolay Kim
ffb07c8884 use actix-rt for server impl 2018-12-09 21:51:35 -08:00
Nikolay Kim
cdd6904aa0 rename Server to ServerBuilder 2018-12-09 20:30:14 -08:00
Nikolay Kim
98a151db4f add actix single threaded runtime 2018-12-09 19:55:40 -08:00
Nikolay Kim
227ea15683 remove unused code 2018-12-09 15:21:23 -08:00
Nikolay Kim
e50be58fdb move codec to separate crate 2018-12-09 15:19:25 -08:00
Nikolay Kim
3288b7648d update changes 2018-12-09 10:16:35 -08:00
Nikolay Kim
43e14818c4 migrate to actix-service crate 2018-12-09 10:15:49 -08:00
Nikolay Kim
a60bf691d5 add Service impl for Box<S> 2018-12-09 10:14:08 -08:00
Nikolay Kim
5f37d85f9b move service to separate crate 2018-12-09 09:56:23 -08:00
Nikolay Kim
e8aa73a44b update tests 2018-12-06 14:53:55 -08:00
Nikolay Kim
8fe7ce533c convert to edition 2018 2018-12-06 14:04:42 -08:00
Nikolay Kim
42a4679635 use FnMut for apply combinator 2018-12-05 17:43:31 -08:00
Nikolay Kim
36a15efeac do not export cell 2018-12-03 17:46:25 -08:00
Nikolay Kim
5937a06ebe export cell 2018-12-03 13:49:46 -08:00
Nikolay Kim
a16ad6b2cd cleanup clonable service 2018-11-30 11:55:30 -08:00
Nikolay Kim
8108f19580 update tests 2018-11-29 17:17:02 -10:00
Nikolay Kim
1b1ae01b5a migrate to new Service<Request> trait 2018-11-29 16:56:15 -10:00
Nikolay Kim
c62567f85f allow to skip name resolution 2018-11-21 08:07:04 -10:00
Nikolay Kim
410204a41e Framed::is_write_buf_empty() checks if write buffer is flushed 2018-11-17 18:46:26 -08:00
Nikolay Kim
29fe995a02 update version 2018-11-14 14:26:24 -08:00
Nikolay Kim
741b3fb1c0 Fix wrong service to socket binding 2018-11-14 14:20:33 -08:00
Nikolay Kim
dba86fbbf8 allow to force write to Framed object 2018-11-14 10:26:49 -08:00
Nikolay Kim
c29501fcf3 better name 2018-11-13 22:55:20 -08:00
Nikolay Kim
9a3321b153 allow to check if Framed's write buffer is full 2018-11-13 21:33:51 -08:00
Nikolay Kim
f13a0925f7 update defaults 2018-11-13 21:26:49 -08:00
Nikolay Kim
8886672ae6 add write buffer capacity caps for Framed 2018-11-13 21:13:37 -08:00
Nikolay Kim
8f20f69559 use RequestHost trait for OpensslConnector service 2018-11-11 23:04:39 -08:00
Nikolay Kim
9b9599500a refactor connector and resolver services 2018-11-11 21:12:30 -08:00
Nikolay Kim
a4b81a256c remove debug output 2018-11-08 18:45:40 -08:00
Nikolay Kim
38235c14bb update changes 2018-11-08 09:16:40 -08:00
Nikolay Kim
bf9269de9a reset delay instead of creating new one 2018-11-07 21:20:50 -08:00
198 changed files with 18093 additions and 7956 deletions

View File

@@ -1,41 +0,0 @@
environment:
global:
PROJECT_NAME: actix-net
matrix:
# Stable channel
- TARGET: i686-pc-windows-msvc
CHANNEL: stable
- TARGET: x86_64-pc-windows-gnu
CHANNEL: stable
- TARGET: x86_64-pc-windows-msvc
CHANNEL: stable
# Nightly channel
- TARGET: i686-pc-windows-msvc
CHANNEL: nightly
- TARGET: x86_64-pc-windows-gnu
CHANNEL: nightly
- TARGET: x86_64-pc-windows-msvc
CHANNEL: nightly
# Install Rust and Cargo
# (Based on from https://github.com/rust-lang/libc/blob/master/appveyor.yml)
install:
- ps: >-
If ($Env:TARGET -eq 'x86_64-pc-windows-gnu') {
$Env:PATH += ';C:\msys64\mingw64\bin'
} ElseIf ($Env:TARGET -eq 'i686-pc-windows-gnu') {
$Env:PATH += ';C:\MinGW\bin'
}
- curl -sSf -o rustup-init.exe https://win.rustup.rs
- rustup-init.exe --default-host %TARGET% --default-toolchain %CHANNEL% -y
- set PATH=%PATH%;C:\Users\appveyor\.cargo\bin
- rustc -Vv
- cargo -V
# 'cargo test' takes care of building for us, so disable Appveyor's build stage.
build: false
# Equivalent to Travis' `script` phase
test_script:
- cargo clean
- cargo test

3
.cargo/config.toml Normal file
View File

@@ -0,0 +1,3 @@
[alias]
chk = "check --workspace --all-features --tests --examples --bins"
lint = "clippy --workspace --all-features --tests --examples --bins -- -Dclippy::todo"

24
.github/PULL_REQUEST_TEMPLATE.md vendored Normal file
View File

@@ -0,0 +1,24 @@
## PR Type
<!-- What kind of change does this PR make? -->
<!-- Bug Fix / Feature / Refactor / Code Style / Other -->
INSERT_PR_TYPE
## PR Checklist
Check your PR fulfills the following:
<!-- For draft PRs check the boxes as you complete them. -->
- [ ] Tests for the changes have been added / updated.
- [ ] Documentation comments have been added / updated.
- [ ] A changelog entry has been made for the appropriate packages.
- [ ] Format code with the latest stable rustfmt
## Overview
<!-- Describe the current and new behavior. -->
<!-- Emphasize any breaking changes. -->
<!-- If this PR fixes or closes an issue, reference it here. -->
<!-- Closes #000 -->

131
.github/workflows/ci.yml vendored Normal file
View File

@@ -0,0 +1,131 @@
name: CI
on:
pull_request:
types: [opened, synchronize, reopened]
push:
branches: [master]
jobs:
build_and_test:
strategy:
fail-fast: false
matrix:
target:
- { name: Linux, os: ubuntu-latest, triple: x86_64-unknown-linux-gnu }
- { name: macOS, os: macos-latest, triple: x86_64-apple-darwin }
- { name: Windows, os: windows-latest, triple: x86_64-pc-windows-msvc }
- { name: Windows (MinGW), os: windows-latest, triple: x86_64-pc-windows-gnu }
- { name: Windows (32-bit), os: windows-latest, triple: i686-pc-windows-msvc }
version:
- 1.46.0 # MSRV
- stable
- nightly
name: ${{ matrix.target.name }} / ${{ matrix.version }}
runs-on: ${{ matrix.target.os }}
env:
VCPKGRS_DYNAMIC: 1
steps:
- name: Setup Routing
if: matrix.target.os == 'macos-latest'
run: sudo ifconfig lo0 alias 127.0.0.3
- uses: actions/checkout@v2
# install OpenSSL on Windows
- name: Set vcpkg root
if: matrix.target.triple == 'x86_64-pc-windows-msvc' || matrix.target.triple == 'i686-pc-windows-msvc'
run: echo "VCPKG_ROOT=$env:VCPKG_INSTALLATION_ROOT" | Out-File -FilePath $env:GITHUB_ENV -Append
- name: Install OpenSSL
if: matrix.target.triple == 'x86_64-pc-windows-msvc'
run: vcpkg install openssl:x64-windows
- name: Install OpenSSL
if: matrix.target.triple == 'i686-pc-windows-msvc'
run: vcpkg install openssl:x86-windows
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-${{ matrix.target.triple }}
profile: minimal
override: true
# - name: Install MSYS2
# if: matrix.target.triple == 'x86_64-pc-windows-gnu'
# uses: msys2/setup-msys2@v2
# - name: Install MinGW Packages
# if: matrix.target.triple == 'x86_64-pc-windows-gnu'
# run: |
# msys2 -c 'pacman -Sy --noconfirm pacman'
# msys2 -c 'pacman --noconfirm -S base-devel pkg-config'
# - name: Generate Cargo.lock
# uses: actions-rs/cargo@v1
# with:
# command: generate-lockfile
# - name: Cache Dependencies
# uses: Swatinem/rust-cache@v1.2.0
- name: Install cargo-hack
uses: actions-rs/cargo@v1
with:
command: install
args: cargo-hack
- name: check minimal
uses: actions-rs/cargo@v1
with:
command: hack
args: check --workspace --no-default-features
- name: check minimal + tests
uses: actions-rs/cargo@v1
with:
command: hack
args: check --workspace --no-default-features --tests --examples
- name: check default
uses: actions-rs/cargo@v1
with:
command: check
args: --workspace --tests --examples
- name: check full
# TODO: compile OpenSSL and run tests on MinGW
if: matrix.target.triple != 'x86_64-pc-windows-gnu'
uses: actions-rs/cargo@v1
with:
command: check
args: --workspace --all-features --tests --examples
- name: tests
if: matrix.target.triple != 'x86_64-pc-windows-gnu'
uses: actions-rs/cargo@v1
with:
command: test
args: --workspace --all-features --no-fail-fast -- --nocapture
- name: Generate coverage file
if: >
matrix.target.os == 'ubuntu-latest'
&& matrix.version == 'stable'
&& github.ref == 'refs/heads/master'
run: |
cargo install cargo-tarpaulin
cargo tarpaulin --out Xml --verbose
- name: Upload to Codecov
if: >
matrix.target.os == 'ubuntu-latest'
&& matrix.version == 'stable'
&& github.ref == 'refs/heads/master'
uses: codecov/codecov-action@v1
with:
file: cobertura.xml
- name: Clear the cargo caches
run: |
cargo install cargo-cache --version 0.6.2 --no-default-features --features ci-autoclean
cargo-cache

42
.github/workflows/clippy-fmt.yml vendored Normal file
View File

@@ -0,0 +1,42 @@
name: Lint
on:
pull_request:
types: [opened, synchronize, reopened]
jobs:
fmt:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
profile: minimal
components: rustfmt
override: true
- name: Rustfmt Check
uses: actions-rs/cargo@v1
with:
command: fmt
args: --all -- --check
clippy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
profile: minimal
components: clippy
override: true
- name: Clippy Check
uses: actions-rs/clippy-check@v1
with:
token: ${{ secrets.GITHUB_TOKEN }}
args: --workspace --all-features --tests --examples --bins -- -Dclippy::todo

35
.github/workflows/upload-doc.yml vendored Normal file
View File

@@ -0,0 +1,35 @@
name: Upload documentation
on:
push:
branches: [master]
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: nightly-x86_64-unknown-linux-gnu
profile: minimal
override: true
- name: Build Docs
uses: actions-rs/cargo@v1
with:
command: doc
args: --workspace --all-features --no-deps
- name: Tweak HTML
run: echo '<meta http-equiv="refresh" content="0;url=actix_server/index.html">' > target/doc/index.html
- name: Deploy to GitHub Pages
uses: JamesIves/github-pages-deploy-action@3.7.1
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
BRANCH: gh-pages
FOLDER: target/doc

2
.gitignore vendored
View File

@@ -12,3 +12,5 @@ guide/build/
# These are backup files generated by rustfmt
**/*.rs.bk
.idea

View File

@@ -1,54 +0,0 @@
language: rust
sudo: required
dist: trusty
cache:
cargo: true
apt: true
matrix:
include:
- rust: stable
- rust: beta
- rust: nightly
allow_failures:
- rust: nightly
env:
global:
# - RUSTFLAGS="-C link-dead-code"
- OPENSSL_VERSION=openssl-1.0.2
before_install:
- sudo add-apt-repository -y ppa:0k53d-karl-f830m/openssl
- sudo apt-get update -qq
- sudo apt-get install -y openssl libssl-dev libelf-dev libdw-dev cmake gcc binutils-dev libiberty-dev
# Add clippy
before_script:
- export PATH=$PATH:~/.cargo/bin
script:
- |
if [[ "$TRAVIS_RUST_VERSION" != "nightly" ]]; then
cargo clean
cargo test --features="ssl,tls,rust-tls" -- --nocapture
fi
- |
if [[ "$TRAVIS_RUST_VERSION" == "nightly" ]]; then
RUSTFLAGS="--cfg procmacro2_semver_exempt" cargo install -f cargo-tarpaulin
cargo tarpaulin --features="ssl,tls,rust-tls" --out Xml
bash <(curl -s https://codecov.io/bash)
echo "Uploaded code coverage"
fi
# Upload docs
after_success:
- |
if [[ "$TRAVIS_OS_NAME" == "linux" && "$TRAVIS_PULL_REQUEST" = "false" && "$TRAVIS_BRANCH" == "master" && "$TRAVIS_RUST_VERSION" == "beta" ]]; then
cargo doc --features "ssl,tls,rust-tls" --no-deps &&
echo "<meta http-equiv=refresh content=0;url=os_balloon/index.html>" > target/doc/index.html &&
git clone https://github.com/davisp/ghp-import.git &&
./ghp-import/ghp_import.py -n -p -f -m "Documentation upload" -r https://"$GH_TOKEN"@github.com/"$TRAVIS_REPO_SLUG.git" target/doc &&
echo "Uploaded documentation"
fi

View File

@@ -1,14 +0,0 @@
# Changes
## [0.1.1] - 2018-10-10
### Changed
- Set actix min version - 0.7.5
- Set trust-dns min version
## [0.1.0] - 2018-10-08
* Initial impl

View File

@@ -34,10 +34,13 @@ This Code of Conduct applies both within project spaces and in public spaces whe
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at fafhrd91@gmail.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at robjtede@icloud.com ([@robjtede]) or huyuumi@neet.club ([@JohnTitor]). The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
[@robjtede]: https://github.com/robjtede
[@JohnTitor]: https://github.com/JohnTitor
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]

View File

@@ -1,81 +1,32 @@
[package]
name = "actix-net"
version = "0.2.0"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix net - framework for the compisible network services for Rust (experimental)"
readme = "README.md"
keywords = ["network", "framework", "async", "futures"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-net/"
categories = ["network-programming", "asynchronous"]
license = "MIT/Apache-2.0"
exclude = [".gitignore", ".travis.yml", ".cargo/config", "appveyor.yml"]
[workspace]
members = [
"actix-codec",
"actix-macros",
"actix-router",
"actix-rt",
"actix-server",
"actix-service",
"actix-tls",
"actix-tracing",
"actix-utils",
"bytestring",
"local-channel",
"local-waker",
]
[package.metadata.docs.rs]
features = ["ssl", "tls", "rust-tls"]
[badges]
travis-ci = { repository = "actix/actix-net", branch = "master" }
# appveyor = { repository = "fafhrd91/actix-web-hdy9d" }
codecov = { repository = "actix/actix-net", branch = "master", service = "github" }
[lib]
name = "actix_net"
path = "src/lib.rs"
[features]
default = []
# tls
tls = ["native-tls"]
# openssl
ssl = ["openssl", "tokio-openssl"]
# rustls
rust-tls = ["rustls", "tokio-rustls", "webpki", "webpki-roots"]
cell = []
[dependencies]
actix = "^0.7.5"
log = "0.4"
num_cpus = "1.0"
# io
mio = "^0.6.13"
net2 = "0.2"
bytes = "0.4"
futures = "0.1"
slab = "0.4"
tokio = "0.1"
tokio-codec = "0.1"
tokio-io = "0.1"
tokio-tcp = "0.1"
tokio-timer = "0.2"
tokio-reactor = "0.1"
tokio-current-thread = "0.1"
tower-service = "0.1"
trust-dns-proto = "^0.5.0"
trust-dns-resolver = "^0.10.0"
# native-tls
native-tls = { version="0.2", optional = true }
# openssl
openssl = { version="0.10", optional = true }
tokio-openssl = { version="0.2", optional = true }
#rustls
rustls = { version = "^0.14", optional = true }
tokio-rustls = { version = "^0.8", optional = true }
webpki = { version = "0.18", optional = true }
webpki-roots = { version = "0.15", optional = true }
[dev-dependencies]
env_logger = "0.5"
[patch.crates-io]
actix-codec = { path = "actix-codec" }
actix-macros = { path = "actix-macros" }
actix-router = { path = "actix-router" }
actix-rt = { path = "actix-rt" }
actix-server = { path = "actix-server" }
actix-service = { path = "actix-service" }
actix-tls = { path = "actix-tls" }
actix-tracing = { path = "actix-tracing" }
actix-utils = { path = "actix-utils" }
bytestring = { path = "bytestring" }
local-channel = { path = "local-channel" }
local-waker = { path = "local-waker" }
[profile.release]
lto = true

View File

@@ -186,7 +186,7 @@
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2017-NOW Nikolay Kim
Copyright 2017-NOW Actix Team
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -1,4 +1,4 @@
Copyright (c) 2017 Nikolay Kim
Copyright (c) 2017-NOW Actix Team
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated

View File

@@ -1,67 +1,26 @@
# Actix net [![Build Status](https://travis-ci.org/actix/actix-net.svg?branch=master)](https://travis-ci.org/actix/actix-net) [![codecov](https://codecov.io/gh/actix/actix-net/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-net) [![crates.io](https://meritbadge.herokuapp.com/actix-net)](https://crates.io/crates/actix-net) [![Join the chat at https://gitter.im/actix/actix](https://badges.gitter.im/actix/actix.svg)](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
# Actix Net
Actix net - framework for composable network services (experimental)
> A collection of lower-level libraries for composable network services.
## Documentation & community resources
![Apache 2.0 or MIT licensed](https://img.shields.io/crates/l/actix-server)
[![codecov](https://codecov.io/gh/actix/actix-net/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-net)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x)
* [API Documentation (Development)](https://actix.rs/actix-net/actix_net/)
* [Chat on gitter](https://gitter.im/actix/actix)
* Cargo package: [actix-net](https://crates.io/crates/actix-net)
* Minimum supported Rust version: 1.26 or later
## Build statuses
| Platform | Build Status |
| ---------------- | ------------ |
| Linux | [![build status](https://github.com/actix/actix-net/workflows/CI%20%28Linux%29/badge.svg?branch=master&event=push)](https://github.com/actix/actix-net/actions?query=workflow%3A"CI+(Linux)") |
| macOS | [![build status](https://github.com/actix/actix-net/workflows/CI%20%28macOS%29/badge.svg?branch=master&event=push)](https://github.com/actix/actix-net/actions?query=workflow%3A"CI+(macOS)") |
| Windows | [![build status](https://github.com/actix/actix-net/workflows/CI%20%28Windows%29/badge.svg?branch=master&event=push)](https://github.com/actix/actix-net/actions?query=workflow%3A"CI+(Windows)") |
| Windows (MinGW) | [![build status](https://github.com/actix/actix-net/workflows/CI%20%28Windows-mingw%29/badge.svg?branch=master&event=push)](https://github.com/actix/actix-net/actions?query=workflow%3A"CI+(Windows-mingw)") |
## Example
See `actix-server/examples` and `actix-tls/examples` for some basic examples.
```rust
fn main() {
let sys = actix::System::new("test");
// load ssl keys
let mut builder = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap();
builder.set_private_key_file("./examples/key.pem", SslFiletype::PEM).unwrap();
builder.set_certificate_chain_file("./examples/cert.pem").unwrap();
let acceptor = builder.build();
let num = Arc::new(AtomicUsize::new(0));
// bind socket address and start workers. By default server uses number of
// available logical cpu as threads count. actix net start separate
// instances of service pipeline in each worker.
Server::default()
.bind(
// configure service pipeline
"basic", "0.0.0.0:8443",
move || {
let num = num.clone();
let acceptor = acceptor.clone();
// service for converting incoming TcpStream to a SslStream<TcpStream>
(move |stream| {
SslAcceptorExt::accept_async(&acceptor, stream)
.map_err(|e| println!("Openssl error: {}", e))
})
// convert closure to a `NewService`
.into_new_service()
// .and_then() combinator uses other service to convert incoming `Request` to a `Response`
// and then uses that response as an input for next service.
// in this case, on success we use `logger` service
.and_then(logger)
// Next service counts number of connections
.and_then(move |req| {
let num = num.fetch_add(1, Ordering::Relaxed);
println!("processed {:?} connections", num);
future::ok(())
})
}).unwrap()
.start();
sys.run();
}
```
### MSRV
This repo's Minimum Supported Rust Version (MSRV) is 1.46.0.
## License
This project is licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0))
@@ -71,6 +30,5 @@ at your option.
## Code of Conduct
Contribution to the actix-net crate is organized under the terms of the
Contributor Covenant, the maintainer of actix-net, @fafhrd91, promises to
intervene to uphold that code of conduct.
Contribution to the actix-net repo is organized under the terms of the Contributor Covenant.
The Actix team promises to intervene to uphold that code of conduct.

63
actix-codec/CHANGES.md Normal file
View File

@@ -0,0 +1,63 @@
# Changes
## Unreleased - 2021-xx-xx
## 0.4.0 - 2021-04-20
* No significant changes since v0.4.0-beta.1.
## 0.4.0-beta.1 - 2020-12-28
* Replace `pin-project` with `pin-project-lite`. [#237]
* Upgrade `tokio` dependency to `1`. [#237]
* Upgrade `tokio-util` dependency to `0.6`. [#237]
* Upgrade `bytes` dependency to `1`. [#237]
[#237]: https://github.com/actix/actix-net/pull/237
## 0.3.0 - 2020-08-23
* No changes from beta 2.
## 0.3.0-beta.2 - 2020-08-19
* Remove unused type parameter from `Framed::replace_codec`.
## 0.3.0-beta.1 - 2020-08-19
* Use `.advance()` instead of `.split_to()`.
* Upgrade `tokio-util` to `0.3`.
* Improve `BytesCodec::encode()` performance.
* Simplify `BytesCodec::decode()`.
* Rename methods on `Framed` to better describe their use.
* Add method on `Framed` to get a pinned reference to the underlying I/O.
* Add method on `Framed` check emptiness of read buffer.
## 0.2.0 - 2019-12-10
* Use specific futures dependencies.
## 0.2.0-alpha.4
* Fix buffer remaining capacity calculation.
## 0.2.0-alpha.3
* Use tokio 0.2.
* Fix low/high watermark for write/read buffers.
## 0.2.0-alpha.2
* Migrated to `std::future`.
## 0.1.2 - 2019-03-27
* Added `Framed::map_io()` method.
## 0.1.1 - 2019-03-06
* Added `FramedParts::with_read_buffer()` method.
## 0.1.0 - 2018-12-09
* Move codec to separate crate.

24
actix-codec/Cargo.toml Normal file
View File

@@ -0,0 +1,24 @@
[package]
name = "actix-codec"
version = "0.4.0"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Codec utilities for working with framed protocols"
keywords = ["network", "framework", "async", "futures"]
repository = "https://github.com/actix/actix-net"
categories = ["network-programming", "asynchronous"]
license = "MIT OR Apache-2.0"
edition = "2018"
[lib]
name = "actix_codec"
path = "src/lib.rs"
[dependencies]
bitflags = "1.2.1"
bytes = "1"
futures-core = { version = "0.3.7", default-features = false }
futures-sink = { version = "0.3.7", default-features = false }
log = "0.4"
pin-project-lite = "0.2"
tokio = "1"
tokio-util = { version = "0.6", features = ["codec", "io"] }

1
actix-codec/LICENSE-APACHE Symbolic link
View File

@@ -0,0 +1 @@
../LICENSE-APACHE

1
actix-codec/LICENSE-MIT Symbolic link
View File

@@ -0,0 +1 @@
../LICENSE-MIT

View File

@@ -1,7 +1,7 @@
use bytes::{Buf, Bytes, BytesMut};
use std::io;
use bytes::{Bytes, BytesMut};
use tokio_codec::{Decoder, Encoder};
use super::{Decoder, Encoder};
/// Bytes codec.
///
@@ -9,12 +9,12 @@ use tokio_codec::{Decoder, Encoder};
#[derive(Debug, Copy, Clone)]
pub struct BytesCodec;
impl Encoder for BytesCodec {
type Item = Bytes;
impl Encoder<Bytes> for BytesCodec {
type Error = io::Error;
#[inline]
fn encode(&mut self, item: Bytes, dst: &mut BytesMut) -> Result<(), Self::Error> {
dst.extend_from_slice(&item[..]);
dst.extend_from_slice(item.chunk());
Ok(())
}
}
@@ -27,7 +27,7 @@ impl Decoder for BytesCodec {
if src.is_empty() {
Ok(None)
} else {
Ok(Some(src.take()))
Ok(Some(src.split()))
}
}
}

420
actix-codec/src/framed.rs Normal file
View File

@@ -0,0 +1,420 @@
use std::pin::Pin;
use std::task::{Context, Poll};
use std::{fmt, io};
use bytes::{Buf, BytesMut};
use futures_core::{ready, Stream};
use futures_sink::Sink;
use crate::{AsyncRead, AsyncWrite, Decoder, Encoder};
/// Low-water mark
const LW: usize = 1024;
/// High-water mark
const HW: usize = 8 * 1024;
bitflags::bitflags! {
struct Flags: u8 {
const EOF = 0b0001;
const READABLE = 0b0010;
}
}
pin_project_lite::pin_project! {
/// A unified `Stream` and `Sink` interface to an underlying I/O object, using
/// the `Encoder` and `Decoder` traits to encode and decode frames.
///
/// Raw I/O objects work with byte sequences, but higher-level code usually
/// wants to batch these into meaningful chunks, called "frames". This
/// method layers framing on top of an I/O object, by using the `Encoder`/`Decoder`
/// traits to handle encoding and decoding of message frames. Note that
/// the incoming and outgoing frame types may be distinct.
pub struct Framed<T, U> {
#[pin]
io: T,
codec: U,
flags: Flags,
read_buf: BytesMut,
write_buf: BytesMut,
}
}
impl<T, U> Framed<T, U>
where
T: AsyncRead + AsyncWrite,
U: Decoder,
{
/// This function returns a *single* object that is both `Stream` and
/// `Sink`; grouping this into a single object is often useful for layering
/// things like gzip or TLS, which require both read and write access to the
/// underlying object.
pub fn new(io: T, codec: U) -> Framed<T, U> {
Framed {
io,
codec,
flags: Flags::empty(),
read_buf: BytesMut::with_capacity(HW),
write_buf: BytesMut::with_capacity(HW),
}
}
}
impl<T, U> Framed<T, U> {
/// Returns a reference to the underlying codec.
pub fn codec_ref(&self) -> &U {
&self.codec
}
/// Returns a mutable reference to the underlying codec.
pub fn codec_mut(&mut self) -> &mut U {
&mut self.codec
}
/// Returns a reference to the underlying I/O stream wrapped by
/// `Frame`.
///
/// Note that care should be taken to not tamper with the underlying stream
/// of data coming in as it may corrupt the stream of frames otherwise
/// being worked with.
pub fn io_ref(&self) -> &T {
&self.io
}
/// Returns a mutable reference to the underlying I/O stream.
///
/// Note that care should be taken to not tamper with the underlying stream
/// of data coming in as it may corrupt the stream of frames otherwise
/// being worked with.
pub fn io_mut(&mut self) -> &mut T {
&mut self.io
}
/// Returns a `Pin` of a mutable reference to the underlying I/O stream.
pub fn io_pin(self: Pin<&mut Self>) -> Pin<&mut T> {
self.project().io
}
/// Check if read buffer is empty.
pub fn is_read_buf_empty(&self) -> bool {
self.read_buf.is_empty()
}
/// Check if write buffer is empty.
pub fn is_write_buf_empty(&self) -> bool {
self.write_buf.is_empty()
}
/// Check if write buffer is full.
pub fn is_write_buf_full(&self) -> bool {
self.write_buf.len() >= HW
}
/// Check if framed is able to write more data.
///
/// `Framed` object considers ready if there is free space in write buffer.
pub fn is_write_ready(&self) -> bool {
self.write_buf.len() < HW
}
/// Consume the `Frame`, returning `Frame` with different codec.
pub fn replace_codec<U2>(self, codec: U2) -> Framed<T, U2> {
Framed {
codec,
io: self.io,
flags: self.flags,
read_buf: self.read_buf,
write_buf: self.write_buf,
}
}
/// Consume the `Frame`, returning `Frame` with different io.
pub fn into_map_io<F, T2>(self, f: F) -> Framed<T2, U>
where
F: Fn(T) -> T2,
{
Framed {
io: f(self.io),
codec: self.codec,
flags: self.flags,
read_buf: self.read_buf,
write_buf: self.write_buf,
}
}
/// Consume the `Frame`, returning `Frame` with different codec.
pub fn into_map_codec<F, U2>(self, f: F) -> Framed<T, U2>
where
F: Fn(U) -> U2,
{
Framed {
io: self.io,
codec: f(self.codec),
flags: self.flags,
read_buf: self.read_buf,
write_buf: self.write_buf,
}
}
}
impl<T, U> Framed<T, U> {
/// Serialize item and Write to the inner buffer
pub fn write<I>(mut self: Pin<&mut Self>, item: I) -> Result<(), <U as Encoder<I>>::Error>
where
T: AsyncWrite,
U: Encoder<I>,
{
let this = self.as_mut().project();
let remaining = this.write_buf.capacity() - this.write_buf.len();
if remaining < LW {
this.write_buf.reserve(HW - remaining);
}
this.codec.encode(item, this.write_buf)?;
Ok(())
}
/// Try to read underlying I/O stream and decode item.
pub fn next_item(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<<U as Decoder>::Item, U::Error>>>
where
T: AsyncRead,
U: Decoder,
{
loop {
let mut this = self.as_mut().project();
// Repeatedly call `decode` or `decode_eof` as long as it is
// "readable". Readable is defined as not having returned `None`. If
// the upstream has returned EOF, and the decoder is no longer
// readable, it can be assumed that the decoder will never become
// readable again, at which point the stream is terminated.
if this.flags.contains(Flags::READABLE) {
if this.flags.contains(Flags::EOF) {
match this.codec.decode_eof(&mut this.read_buf) {
Ok(Some(frame)) => return Poll::Ready(Some(Ok(frame))),
Ok(None) => return Poll::Ready(None),
Err(e) => return Poll::Ready(Some(Err(e))),
}
}
log::trace!("attempting to decode a frame");
match this.codec.decode(&mut this.read_buf) {
Ok(Some(frame)) => {
log::trace!("frame decoded from buffer");
return Poll::Ready(Some(Ok(frame)));
}
Err(e) => return Poll::Ready(Some(Err(e))),
_ => (), // Need more data
}
this.flags.remove(Flags::READABLE);
}
debug_assert!(!this.flags.contains(Flags::EOF));
// Otherwise, try to read more data and try again. Make sure we've got room
let remaining = this.read_buf.capacity() - this.read_buf.len();
if remaining < LW {
this.read_buf.reserve(HW - remaining)
}
let cnt = match tokio_util::io::poll_read_buf(this.io, cx, this.read_buf) {
Poll::Pending => return Poll::Pending,
Poll::Ready(Err(e)) => return Poll::Ready(Some(Err(e.into()))),
Poll::Ready(Ok(cnt)) => cnt,
};
if cnt == 0 {
this.flags.insert(Flags::EOF);
}
this.flags.insert(Flags::READABLE);
}
}
/// Flush write buffer to underlying I/O stream.
pub fn flush<I>(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), U::Error>>
where
T: AsyncWrite,
U: Encoder<I>,
{
let mut this = self.as_mut().project();
log::trace!("flushing framed transport");
while !this.write_buf.is_empty() {
log::trace!("writing; remaining={}", this.write_buf.len());
let n = ready!(this.io.as_mut().poll_write(cx, this.write_buf))?;
if n == 0 {
return Poll::Ready(Err(io::Error::new(
io::ErrorKind::WriteZero,
"failed to write frame to transport",
)
.into()));
}
// remove written data
this.write_buf.advance(n);
}
// Try flushing the underlying IO
ready!(this.io.poll_flush(cx))?;
log::trace!("framed transport flushed");
Poll::Ready(Ok(()))
}
/// Flush write buffer and shutdown underlying I/O stream.
pub fn close<I>(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), U::Error>>
where
T: AsyncWrite,
U: Encoder<I>,
{
let mut this = self.as_mut().project();
ready!(this.io.as_mut().poll_flush(cx))?;
ready!(this.io.as_mut().poll_shutdown(cx))?;
Poll::Ready(Ok(()))
}
}
impl<T, U> Stream for Framed<T, U>
where
T: AsyncRead,
U: Decoder,
{
type Item = Result<U::Item, U::Error>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
self.next_item(cx)
}
}
impl<T, U, I> Sink<I> for Framed<T, U>
where
T: AsyncWrite,
U: Encoder<I>,
U::Error: From<io::Error>,
{
type Error = U::Error;
fn poll_ready(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
if self.is_write_ready() {
Poll::Ready(Ok(()))
} else {
Poll::Pending
}
}
fn start_send(self: Pin<&mut Self>, item: I) -> Result<(), Self::Error> {
self.write(item)
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.flush(cx)
}
fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.close(cx)
}
}
impl<T, U> fmt::Debug for Framed<T, U>
where
T: fmt::Debug,
U: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Framed")
.field("io", &self.io)
.field("codec", &self.codec)
.finish()
}
}
impl<T, U> Framed<T, U> {
/// This function returns a *single* object that is both `Stream` and
/// `Sink`; grouping this into a single object is often useful for layering
/// things like gzip or TLS, which require both read and write access to the
/// underlying object.
///
/// These objects take a stream, a read buffer and a write buffer. These
/// fields can be obtained from an existing `Framed` with the `into_parts` method.
pub fn from_parts(parts: FramedParts<T, U>) -> Framed<T, U> {
Framed {
io: parts.io,
codec: parts.codec,
flags: parts.flags,
write_buf: parts.write_buf,
read_buf: parts.read_buf,
}
}
/// Consumes the `Frame`, returning its underlying I/O stream, the buffer
/// with unprocessed data, and the codec.
///
/// Note that care should be taken to not tamper with the underlying stream
/// of data coming in as it may corrupt the stream of frames otherwise
/// being worked with.
pub fn into_parts(self) -> FramedParts<T, U> {
FramedParts {
io: self.io,
codec: self.codec,
flags: self.flags,
read_buf: self.read_buf,
write_buf: self.write_buf,
}
}
}
/// `FramedParts` contains an export of the data of a Framed transport.
/// It can be used to construct a new `Framed` with a different codec.
/// It contains all current buffers and the inner transport.
#[derive(Debug)]
pub struct FramedParts<T, U> {
/// The inner transport used to read bytes to and write bytes to
pub io: T,
/// The codec
pub codec: U,
/// The buffer with read but unprocessed data.
pub read_buf: BytesMut,
/// A buffer with unprocessed data which are not written yet.
pub write_buf: BytesMut,
flags: Flags,
}
impl<T, U> FramedParts<T, U> {
/// Create a new, default, `FramedParts`
pub fn new(io: T, codec: U) -> FramedParts<T, U> {
FramedParts {
io,
codec,
flags: Flags::empty(),
read_buf: BytesMut::new(),
write_buf: BytesMut::new(),
}
}
/// Create a new `FramedParts` with read buffer
pub fn with_read_buf(io: T, codec: U, read_buf: BytesMut) -> FramedParts<T, U> {
FramedParts {
io,
codec,
read_buf,
flags: Flags::empty(),
write_buf: BytesMut::new(),
}
}
}

23
actix-codec/src/lib.rs Normal file
View File

@@ -0,0 +1,23 @@
//! Codec utilities for working with framed protocols.
//!
//! Contains adapters to go from streams of bytes, [`AsyncRead`] and
//! [`AsyncWrite`], to framed streams implementing [`Sink`] and [`Stream`].
//! Framed streams are also known as `transports`.
//!
//! [`Sink`]: futures_sink::Sink
//! [`Stream`]: futures_core::Stream
#![deny(rust_2018_idioms, nonstandard_style, future_incompatible)]
#![warn(missing_docs)]
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
mod bcodec;
mod framed;
pub use self::bcodec::BytesCodec;
pub use self::framed::{Framed, FramedParts};
pub use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
pub use tokio_util::codec::{Decoder, Encoder};
pub use tokio_util::io::poll_read_buf;

1
actix-macros/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/wip

33
actix-macros/CHANGES.md Normal file
View File

@@ -0,0 +1,33 @@
# Changes
## Unreleased - 2021-xx-xx
## 0.2.1 - 2021-02-02
* Add optional argument `system` to `main` macro which can be used to specify the path to `actix_rt::System` (useful for re-exports). [#363]
[#363]: https://github.com/actix/actix-net/pull/363
## 0.2.0 - 2021-02-02
* Update to latest `actix_rt::System::new` signature. [#261]
[#261]: https://github.com/actix/actix-net/pull/261
## 0.2.0-beta.1 - 2021-01-09
* Remove `actix-reexport` feature. [#218]
[#218]: https://github.com/actix/actix-net/pull/218
## 0.1.3 - 2020-12-03
* Add `actix-reexport` feature. [#218]
[#218]: https://github.com/actix/actix-net/pull/218
## 0.1.2 - 2020-05-18
* Forward actix_rt::test arguments to test function [#127]
[#127]: https://github.com/actix/actix-net/pull/127

25
actix-macros/Cargo.toml Normal file
View File

@@ -0,0 +1,25 @@
[package]
name = "actix-macros"
version = "0.2.1"
authors = [
"Nikolay Kim <fafhrd91@gmail.com>",
"Ibraheem Ahmed <ibrah1440@gmail.com>",
]
description = "Macros for Actix system and runtime"
repository = "https://github.com/actix/actix-net"
categories = ["network-programming", "asynchronous"]
license = "MIT OR Apache-2.0"
edition = "2018"
[lib]
proc-macro = true
[dependencies]
quote = "1.0.3"
syn = { version = "^1", features = ["full"] }
[dev-dependencies]
actix-rt = "2.0.0"
futures-util = { version = "0.3.7", default-features = false }
trybuild = "1"

1
actix-macros/LICENSE-APACHE Symbolic link
View File

@@ -0,0 +1 @@
../LICENSE-APACHE

1
actix-macros/LICENSE-MIT Symbolic link
View File

@@ -0,0 +1 @@
../LICENSE-MIT

144
actix-macros/src/lib.rs Normal file
View File

@@ -0,0 +1,144 @@
//! Macros for Actix system and runtime.
//!
//! The [`actix-rt`](https://docs.rs/actix-rt) crate must be available for macro output to compile.
//!
//! # Entry-point
//! See docs for the [`#[main]`](macro@main) macro.
//!
//! # Tests
//! See docs for the [`#[test]`](macro@test) macro.
#![deny(rust_2018_idioms, nonstandard_style)]
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
use proc_macro::TokenStream;
use quote::quote;
/// Marks async entry-point function to be executed by Actix system.
///
/// # Examples
/// ```
/// #[actix_rt::main]
/// async fn main() {
/// println!("Hello world");
/// }
/// ```
#[allow(clippy::needless_doctest_main)]
#[proc_macro_attribute]
#[cfg(not(test))] // Work around for rust-lang/rust#62127
pub fn main(args: TokenStream, item: TokenStream) -> TokenStream {
let mut input = syn::parse_macro_input!(item as syn::ItemFn);
let args = syn::parse_macro_input!(args as syn::AttributeArgs);
let attrs = &input.attrs;
let vis = &input.vis;
let sig = &mut input.sig;
let body = &input.block;
if sig.asyncness.is_none() {
return syn::Error::new_spanned(
sig.fn_token,
"the async keyword is missing from the function declaration",
)
.to_compile_error()
.into();
}
let mut system = syn::parse_str::<syn::Path>("::actix_rt::System").unwrap();
for arg in &args {
match arg {
syn::NestedMeta::Meta(syn::Meta::NameValue(syn::MetaNameValue {
lit: syn::Lit::Str(lit),
path,
..
})) => match path
.get_ident()
.map(|i| i.to_string().to_lowercase())
.as_deref()
{
Some("system") => match lit.parse() {
Ok(path) => system = path,
Err(_) => {
return syn::Error::new_spanned(lit, "Expected path")
.to_compile_error()
.into();
}
},
_ => {
return syn::Error::new_spanned(arg, "Unknown attribute specified")
.to_compile_error()
.into();
}
},
_ => {
return syn::Error::new_spanned(arg, "Unknown attribute specified")
.to_compile_error()
.into();
}
}
}
sig.asyncness = None;
(quote! {
#(#attrs)*
#vis #sig {
<#system>::new().block_on(async move { #body })
}
})
.into()
}
/// Marks async test function to be executed in an Actix system.
///
/// # Examples
/// ```
/// #[actix_rt::test]
/// async fn my_test() {
/// assert!(true);
/// }
/// ```
#[proc_macro_attribute]
pub fn test(_: TokenStream, item: TokenStream) -> TokenStream {
let mut input = syn::parse_macro_input!(item as syn::ItemFn);
let attrs = &input.attrs;
let vis = &input.vis;
let sig = &mut input.sig;
let body = &input.block;
let mut has_test_attr = false;
for attr in attrs {
if attr.path.is_ident("test") {
has_test_attr = true;
}
}
if sig.asyncness.is_none() {
return syn::Error::new_spanned(
input.sig.fn_token,
"the async keyword is missing from the function declaration",
)
.to_compile_error()
.into();
}
sig.asyncness = None;
let missing_test_attr = if has_test_attr {
quote!()
} else {
quote!(#[test])
};
(quote! {
#missing_test_attr
#(#attrs)*
#vis #sig {
actix_rt::System::new()
.block_on(async { #body })
}
})
.into()
}

View File

@@ -0,0 +1,14 @@
#[test]
fn compile_macros() {
let t = trybuild::TestCases::new();
t.pass("tests/trybuild/main-01-basic.rs");
t.compile_fail("tests/trybuild/main-02-only-async.rs");
t.pass("tests/trybuild/main-03-fn-params.rs");
t.pass("tests/trybuild/main-04-system-path.rs");
t.compile_fail("tests/trybuild/main-05-system-expect-path.rs");
t.compile_fail("tests/trybuild/main-06-unknown-attr.rs");
t.pass("tests/trybuild/test-01-basic.rs");
t.pass("tests/trybuild/test-02-keep-attrs.rs");
t.compile_fail("tests/trybuild/test-03-only-async.rs");
}

View File

@@ -0,0 +1,4 @@
#[actix_rt::main]
async fn main() {
println!("Hello world");
}

View File

@@ -0,0 +1,4 @@
#[actix_rt::main]
fn main() {
futures_util::future::ready(()).await
}

View File

@@ -0,0 +1,14 @@
error: the async keyword is missing from the function declaration
--> $DIR/main-02-only-async.rs:2:1
|
2 | fn main() {
| ^^
error[E0601]: `main` function not found in crate `$CRATE`
--> $DIR/main-02-only-async.rs:1:1
|
1 | / #[actix_rt::main]
2 | | fn main() {
3 | | futures_util::future::ready(()).await
4 | | }
| |_^ consider adding a `main` function to `$DIR/tests/trybuild/main-02-only-async.rs`

View File

@@ -0,0 +1,6 @@
#[actix_rt::main]
async fn main2(_param: bool) {
futures_util::future::ready(()).await
}
fn main() {}

View File

@@ -0,0 +1,8 @@
mod system {
pub use actix_rt::System as MySystem;
}
#[actix_rt::main(system = "system::MySystem")]
async fn main() {
futures_util::future::ready(()).await
}

View File

@@ -0,0 +1,4 @@
#[actix_rt::main(system = "!@#*&")]
async fn main2() {}
fn main() {}

View File

@@ -0,0 +1,5 @@
error: Expected path
--> $DIR/main-05-system-expect-path.rs:1:27
|
1 | #[actix_rt::main(system = "!@#*&")]
| ^^^^^^^

View File

@@ -0,0 +1,7 @@
#[actix_rt::main(foo = "bar")]
async fn async_main() {}
#[actix_rt::main(bar::baz)]
async fn async_main2() {}
fn main() {}

View File

@@ -0,0 +1,11 @@
error: Unknown attribute specified
--> $DIR/main-06-unknown-attr.rs:1:18
|
1 | #[actix_rt::main(foo = "bar")]
| ^^^^^^^^^^^
error: Unknown attribute specified
--> $DIR/main-06-unknown-attr.rs:4:18
|
4 | #[actix_rt::main(bar::baz)]
| ^^^^^^^^

View File

@@ -0,0 +1,6 @@
#[actix_rt::test]
async fn my_test() {
assert!(true);
}
fn main() {}

View File

@@ -0,0 +1,7 @@
#[actix_rt::test]
#[should_panic]
async fn my_test() {
todo!()
}
fn main() {}

View File

@@ -0,0 +1,6 @@
#[actix_rt::test]
fn my_test() {
futures_util::future::ready(()).await
}
fn main() {}

View File

@@ -0,0 +1,5 @@
error: the async keyword is missing from the function declaration
--> $DIR/test-03-only-async.rs:2:1
|
2 | fn my_test() {
| ^^

111
actix-router/CHANGES.md Normal file
View File

@@ -0,0 +1,111 @@
# Changes
## Unreleased - 2021-xx-xx
## 0.5.0-beta.1 - 2021-07-20
* Fix a bug in multi-patterns where static patterns are interpreted as regex. [#366]
* Introduce `ResourceDef::pattern_iter` to get an iterator over all patterns in a multi-pattern resource. [#373]
* Fix segment interpolation leaving `Path` in unintended state after matching. [#368]
* Fix `ResourceDef` `PartialEq` implementation. [#373]
* Re-work `IntoPatterns` trait, adding a `Patterns` enum. [#372]
* Implement `IntoPatterns` for `bytestring::ByteString`. [#372]
* Rename `Path::{len => segment_count}` to be more descriptive of it's purpose. [#370]
* Rename `ResourceDef::{resource_path => resource_path_from_iter}`. [#371]
* `ResourceDef::resource_path_from_iter` now takes an `IntoIterator`. [#373]
* Rename `ResourceDef::{resource_path_named => resource_path_from_map}`. [#371]
* Rename `ResourceDef::{is_prefix_match => find_match}`. [#373]
* Rename `ResourceDef::{match_path => capture_match_info}`. [#373]
* Rename `ResourceDef::{match_path_checked => capture_match_info_fn}`. [#373]
* Remove `ResourceDef::name_mut` and introduce `ResourceDef::set_name`. [#373]
* Rename `Router::{*_checked => *_fn}`. [#373]
* Return type of `ResourceDef::name` is now `Option<&str>`. [#373]
* Return type of `ResourceDef::pattern` is now `Option<&str>`. [#373]
[#368]: https://github.com/actix/actix-net/pull/368
[#366]: https://github.com/actix/actix-net/pull/366
[#368]: https://github.com/actix/actix-net/pull/368
[#370]: https://github.com/actix/actix-net/pull/370
[#371]: https://github.com/actix/actix-net/pull/371
[#372]: https://github.com/actix/actix-net/pull/372
[#373]: https://github.com/actix/actix-net/pull/373
## 0.4.0 - 2021-06-06
* When matching path parameters, `%25` is now kept in the percent-encoded form; no longer decoded to `%`. [#357]
* Path tail patterns now match new lines (`\n`) in request URL. [#360]
* Fixed a safety bug where `Path` could return a malformed string after percent decoding. [#359]
* Methods `Path::{add, add_static}` now take `impl Into<Cow<'static, str>>`. [#345]
[#345]: https://github.com/actix/actix-net/pull/345
[#357]: https://github.com/actix/actix-net/pull/357
[#359]: https://github.com/actix/actix-net/pull/359
[#360]: https://github.com/actix/actix-net/pull/360
## 0.3.0 - 2019-12-31
* Version was yanked previously. See https://crates.io/crates/actix-router/0.3.0
## 0.2.7 - 2021-02-06
* Add `Router::recognize_checked` [#247]
[#247]: https://github.com/actix/actix-net/pull/247
## 0.2.6 - 2021-01-09
* Use `bytestring` version range compatible with Bytes v1.0. [#246]
[#246]: https://github.com/actix/actix-net/pull/246
## 0.2.5 - 2020-09-20
* Fix `from_hex()` method
## 0.2.4 - 2019-12-31
* Add `ResourceDef::resource_path_named()` path generation method
## 0.2.3 - 2019-12-25
* Add impl `IntoPattern` for `&String`
## 0.2.2 - 2019-12-25
* Use `IntoPattern` for `RouterBuilder::path()`
## 0.2.1 - 2019-12-25
* Add `IntoPattern` trait
* Add multi-pattern resources
## 0.2.0 - 2019-12-07
* Update http to 0.2
* Update regex to 1.3
* Use bytestring instead of string
## 0.1.5 - 2019-05-15
* Remove debug prints
## 0.1.4 - 2019-05-15
* Fix checked resource match
## 0.1.3 - 2019-04-22
* Added support for `remainder match` (i.e "/path/{tail}*")
## 0.1.2 - 2019-04-07
* Export `Quoter` type
* Allow to reset `Path` instance
## 0.1.1 - 2019-04-03
* Get dynamic segment by name instead of iterator.
## 0.1.0 - 2019-03-09
* Initial release

38
actix-router/Cargo.toml Normal file
View File

@@ -0,0 +1,38 @@
[package]
name = "actix-router"
version = "0.5.0-beta.1"
authors = [
"Nikolay Kim <fafhrd91@gmail.com>",
"Ali MJ Al-Nasrawy <alimjalnasrawy@gmail.com>",
"Rob Ede <robjtede@icloud.com>",
]
description = "Resource path matching and router"
keywords = ["actix", "router", "routing"]
repository = "https://github.com/actix/actix-net.git"
license = "MIT OR Apache-2.0"
edition = "2018"
[lib]
name = "actix_router"
path = "src/lib.rs"
[features]
default = ["http"]
[dependencies]
bytestring = ">=0.1.5, <2"
firestorm = "0.4"
http = { version = "0.2.3", optional = true }
log = "0.4"
regex = "1.5"
serde = "1"
[dev-dependencies]
criterion = { version = "0.3", features = ["html_reports"] }
firestorm = { version = "0.4", features = ["enable_system_time"] }
http = "0.2.3"
serde = { version = "1", features = ["derive"] }
[[bench]]
name = "router"
harness = false

1
actix-router/LICENSE-APACHE Symbolic link
View File

@@ -0,0 +1 @@
../LICENSE-APACHE

1
actix-router/LICENSE-MIT Symbolic link
View File

@@ -0,0 +1 @@
../LICENSE-MIT

View File

@@ -0,0 +1,194 @@
//! Based on https://github.com/ibraheemdev/matchit/blob/master/benches/bench.rs
use criterion::{black_box, criterion_group, criterion_main, Criterion};
macro_rules! register {
(colon) => {{
register!(finish => ":p1", ":p2", ":p3", ":p4")
}};
(brackets) => {{
register!(finish => "{p1}", "{p2}", "{p3}", "{p4}")
}};
(regex) => {{
register!(finish => "(.*)", "(.*)", "(.*)", "(.*)")
}};
(finish => $p1:literal, $p2:literal, $p3:literal, $p4:literal) => {{
let arr = [
concat!("/authorizations"),
concat!("/authorizations/", $p1),
concat!("/applications/", $p1, "/tokens/", $p2),
concat!("/events"),
concat!("/repos/", $p1, "/", $p2, "/events"),
concat!("/networks/", $p1, "/", $p2, "/events"),
concat!("/orgs/", $p1, "/events"),
concat!("/users/", $p1, "/received_events"),
concat!("/users/", $p1, "/received_events/public"),
concat!("/users/", $p1, "/events"),
concat!("/users/", $p1, "/events/public"),
concat!("/users/", $p1, "/events/orgs/", $p2),
concat!("/feeds"),
concat!("/notifications"),
concat!("/repos/", $p1, "/", $p2, "/notifications"),
concat!("/notifications/threads/", $p1),
concat!("/notifications/threads/", $p1, "/subscription"),
concat!("/repos/", $p1, "/", $p2, "/stargazers"),
concat!("/users/", $p1, "/starred"),
concat!("/user/starred"),
concat!("/user/starred/", $p1, "/", $p2),
concat!("/repos/", $p1, "/", $p2, "/subscribers"),
concat!("/users/", $p1, "/subscriptions"),
concat!("/user/subscriptions"),
concat!("/repos/", $p1, "/", $p2, "/subscription"),
concat!("/user/subscriptions/", $p1, "/", $p2),
concat!("/users/", $p1, "/gists"),
concat!("/gists"),
concat!("/gists/", $p1),
concat!("/gists/", $p1, "/star"),
concat!("/repos/", $p1, "/", $p2, "/git/blobs/", $p3),
concat!("/repos/", $p1, "/", $p2, "/git/commits/", $p3),
concat!("/repos/", $p1, "/", $p2, "/git/refs"),
concat!("/repos/", $p1, "/", $p2, "/git/tags/", $p3),
concat!("/repos/", $p1, "/", $p2, "/git/trees/", $p3),
concat!("/issues"),
concat!("/user/issues"),
concat!("/orgs/", $p1, "/issues"),
concat!("/repos/", $p1, "/", $p2, "/issues"),
concat!("/repos/", $p1, "/", $p2, "/issues/", $p3),
concat!("/repos/", $p1, "/", $p2, "/assignees"),
concat!("/repos/", $p1, "/", $p2, "/assignees/", $p3),
concat!("/repos/", $p1, "/", $p2, "/issues/", $p3, "/comments"),
concat!("/repos/", $p1, "/", $p2, "/issues/", $p3, "/events"),
concat!("/repos/", $p1, "/", $p2, "/labels"),
concat!("/repos/", $p1, "/", $p2, "/labels/", $p3),
concat!("/repos/", $p1, "/", $p2, "/issues/", $p3, "/labels"),
concat!("/repos/", $p1, "/", $p2, "/milestones/", $p3, "/labels"),
concat!("/repos/", $p1, "/", $p2, "/milestones/"),
concat!("/repos/", $p1, "/", $p2, "/milestones/", $p3),
concat!("/emojis"),
concat!("/gitignore/templates"),
concat!("/gitignore/templates/", $p1),
concat!("/meta"),
concat!("/rate_limit"),
concat!("/users/", $p1, "/orgs"),
concat!("/user/orgs"),
concat!("/orgs/", $p1),
concat!("/orgs/", $p1, "/members"),
concat!("/orgs/", $p1, "/members", $p2),
concat!("/orgs/", $p1, "/public_members"),
concat!("/orgs/", $p1, "/public_members/", $p2),
concat!("/orgs/", $p1, "/teams"),
concat!("/teams/", $p1),
concat!("/teams/", $p1, "/members"),
concat!("/teams/", $p1, "/members", $p2),
concat!("/teams/", $p1, "/repos"),
concat!("/teams/", $p1, "/repos/", $p2, "/", $p3),
concat!("/user/teams"),
concat!("/repos/", $p1, "/", $p2, "/pulls"),
concat!("/repos/", $p1, "/", $p2, "/pulls/", $p3),
concat!("/repos/", $p1, "/", $p2, "/pulls/", $p3, "/commits"),
concat!("/repos/", $p1, "/", $p2, "/pulls/", $p3, "/files"),
concat!("/repos/", $p1, "/", $p2, "/pulls/", $p3, "/merge"),
concat!("/repos/", $p1, "/", $p2, "/pulls/", $p3, "/comments"),
concat!("/user/repos"),
concat!("/users/", $p1, "/repos"),
concat!("/orgs/", $p1, "/repos"),
concat!("/repositories"),
concat!("/repos/", $p1, "/", $p2),
concat!("/repos/", $p1, "/", $p2, "/contributors"),
concat!("/repos/", $p1, "/", $p2, "/languages"),
concat!("/repos/", $p1, "/", $p2, "/teams"),
concat!("/repos/", $p1, "/", $p2, "/tags"),
concat!("/repos/", $p1, "/", $p2, "/branches"),
concat!("/repos/", $p1, "/", $p2, "/branches/", $p3),
concat!("/repos/", $p1, "/", $p2, "/collaborators"),
concat!("/repos/", $p1, "/", $p2, "/collaborators/", $p3),
concat!("/repos/", $p1, "/", $p2, "/comments"),
concat!("/repos/", $p1, "/", $p2, "/commits/", $p3, "/comments"),
concat!("/repos/", $p1, "/", $p2, "/commits"),
concat!("/repos/", $p1, "/", $p2, "/commits/", $p3),
concat!("/repos/", $p1, "/", $p2, "/readme"),
concat!("/repos/", $p1, "/", $p2, "/keys"),
concat!("/repos/", $p1, "/", $p2, "/keys", $p3),
concat!("/repos/", $p1, "/", $p2, "/downloads"),
concat!("/repos/", $p1, "/", $p2, "/downloads", $p3),
concat!("/repos/", $p1, "/", $p2, "/forks"),
concat!("/repos/", $p1, "/", $p2, "/hooks"),
concat!("/repos/", $p1, "/", $p2, "/hooks", $p3),
concat!("/repos/", $p1, "/", $p2, "/releases"),
concat!("/repos/", $p1, "/", $p2, "/releases/", $p3),
concat!("/repos/", $p1, "/", $p2, "/releases/", $p3, "/assets"),
concat!("/repos/", $p1, "/", $p2, "/stats/contributors"),
concat!("/repos/", $p1, "/", $p2, "/stats/commit_activity"),
concat!("/repos/", $p1, "/", $p2, "/stats/code_frequency"),
concat!("/repos/", $p1, "/", $p2, "/stats/participation"),
concat!("/repos/", $p1, "/", $p2, "/stats/punch_card"),
concat!("/repos/", $p1, "/", $p2, "/statuses/", $p3),
concat!("/search/repositories"),
concat!("/search/code"),
concat!("/search/issues"),
concat!("/search/users"),
concat!("/legacy/issues/search/", $p1, "/", $p2, "/", $p3, "/", $p4),
concat!("/legacy/repos/search/", $p1),
concat!("/legacy/user/search/", $p1),
concat!("/legacy/user/email/", $p1),
concat!("/users/", $p1),
concat!("/user"),
concat!("/users"),
concat!("/user/emails"),
concat!("/users/", $p1, "/followers"),
concat!("/user/followers"),
concat!("/users/", $p1, "/following"),
concat!("/user/following"),
concat!("/user/following/", $p1),
concat!("/users/", $p1, "/following", $p2),
concat!("/users/", $p1, "/keys"),
concat!("/user/keys"),
concat!("/user/keys/", $p1),
];
std::array::IntoIter::new(arr)
}};
}
fn call() -> impl Iterator<Item = &'static str> {
let arr = [
"/authorizations",
"/user/repos",
"/repos/rust-lang/rust/stargazers",
"/orgs/rust-lang/public_members/nikomatsakis",
"/repos/rust-lang/rust/releases/1.51.0",
];
std::array::IntoIter::new(arr)
}
fn compare_routers(c: &mut Criterion) {
let mut group = c.benchmark_group("Compare Routers");
let mut actix = actix_router::Router::<bool>::build();
for route in register!(brackets) {
actix.path(route, true);
}
let actix = actix.finish();
group.bench_function("actix", |b| {
b.iter(|| {
for route in call() {
let mut path = actix_router::Path::new(route);
black_box(actix.recognize(&mut path).unwrap());
}
});
});
let regex_set = regex::RegexSet::new(register!(regex)).unwrap();
group.bench_function("regex", |b| {
b.iter(|| {
for route in call() {
black_box(regex_set.matches(route));
}
});
});
group.finish();
}
criterion_group!(benches, compare_routers);
criterion_main!(benches);

View File

@@ -0,0 +1,169 @@
macro_rules! register {
(brackets) => {{
register!(finish => "{p1}", "{p2}", "{p3}", "{p4}")
}};
(finish => $p1:literal, $p2:literal, $p3:literal, $p4:literal) => {{
let arr = [
concat!("/authorizations"),
concat!("/authorizations/", $p1),
concat!("/applications/", $p1, "/tokens/", $p2),
concat!("/events"),
concat!("/repos/", $p1, "/", $p2, "/events"),
concat!("/networks/", $p1, "/", $p2, "/events"),
concat!("/orgs/", $p1, "/events"),
concat!("/users/", $p1, "/received_events"),
concat!("/users/", $p1, "/received_events/public"),
concat!("/users/", $p1, "/events"),
concat!("/users/", $p1, "/events/public"),
concat!("/users/", $p1, "/events/orgs/", $p2),
concat!("/feeds"),
concat!("/notifications"),
concat!("/repos/", $p1, "/", $p2, "/notifications"),
concat!("/notifications/threads/", $p1),
concat!("/notifications/threads/", $p1, "/subscription"),
concat!("/repos/", $p1, "/", $p2, "/stargazers"),
concat!("/users/", $p1, "/starred"),
concat!("/user/starred"),
concat!("/user/starred/", $p1, "/", $p2),
concat!("/repos/", $p1, "/", $p2, "/subscribers"),
concat!("/users/", $p1, "/subscriptions"),
concat!("/user/subscriptions"),
concat!("/repos/", $p1, "/", $p2, "/subscription"),
concat!("/user/subscriptions/", $p1, "/", $p2),
concat!("/users/", $p1, "/gists"),
concat!("/gists"),
concat!("/gists/", $p1),
concat!("/gists/", $p1, "/star"),
concat!("/repos/", $p1, "/", $p2, "/git/blobs/", $p3),
concat!("/repos/", $p1, "/", $p2, "/git/commits/", $p3),
concat!("/repos/", $p1, "/", $p2, "/git/refs"),
concat!("/repos/", $p1, "/", $p2, "/git/tags/", $p3),
concat!("/repos/", $p1, "/", $p2, "/git/trees/", $p3),
concat!("/issues"),
concat!("/user/issues"),
concat!("/orgs/", $p1, "/issues"),
concat!("/repos/", $p1, "/", $p2, "/issues"),
concat!("/repos/", $p1, "/", $p2, "/issues/", $p3),
concat!("/repos/", $p1, "/", $p2, "/assignees"),
concat!("/repos/", $p1, "/", $p2, "/assignees/", $p3),
concat!("/repos/", $p1, "/", $p2, "/issues/", $p3, "/comments"),
concat!("/repos/", $p1, "/", $p2, "/issues/", $p3, "/events"),
concat!("/repos/", $p1, "/", $p2, "/labels"),
concat!("/repos/", $p1, "/", $p2, "/labels/", $p3),
concat!("/repos/", $p1, "/", $p2, "/issues/", $p3, "/labels"),
concat!("/repos/", $p1, "/", $p2, "/milestones/", $p3, "/labels"),
concat!("/repos/", $p1, "/", $p2, "/milestones/"),
concat!("/repos/", $p1, "/", $p2, "/milestones/", $p3),
concat!("/emojis"),
concat!("/gitignore/templates"),
concat!("/gitignore/templates/", $p1),
concat!("/meta"),
concat!("/rate_limit"),
concat!("/users/", $p1, "/orgs"),
concat!("/user/orgs"),
concat!("/orgs/", $p1),
concat!("/orgs/", $p1, "/members"),
concat!("/orgs/", $p1, "/members", $p2),
concat!("/orgs/", $p1, "/public_members"),
concat!("/orgs/", $p1, "/public_members/", $p2),
concat!("/orgs/", $p1, "/teams"),
concat!("/teams/", $p1),
concat!("/teams/", $p1, "/members"),
concat!("/teams/", $p1, "/members", $p2),
concat!("/teams/", $p1, "/repos"),
concat!("/teams/", $p1, "/repos/", $p2, "/", $p3),
concat!("/user/teams"),
concat!("/repos/", $p1, "/", $p2, "/pulls"),
concat!("/repos/", $p1, "/", $p2, "/pulls/", $p3),
concat!("/repos/", $p1, "/", $p2, "/pulls/", $p3, "/commits"),
concat!("/repos/", $p1, "/", $p2, "/pulls/", $p3, "/files"),
concat!("/repos/", $p1, "/", $p2, "/pulls/", $p3, "/merge"),
concat!("/repos/", $p1, "/", $p2, "/pulls/", $p3, "/comments"),
concat!("/user/repos"),
concat!("/users/", $p1, "/repos"),
concat!("/orgs/", $p1, "/repos"),
concat!("/repositories"),
concat!("/repos/", $p1, "/", $p2),
concat!("/repos/", $p1, "/", $p2, "/contributors"),
concat!("/repos/", $p1, "/", $p2, "/languages"),
concat!("/repos/", $p1, "/", $p2, "/teams"),
concat!("/repos/", $p1, "/", $p2, "/tags"),
concat!("/repos/", $p1, "/", $p2, "/branches"),
concat!("/repos/", $p1, "/", $p2, "/branches/", $p3),
concat!("/repos/", $p1, "/", $p2, "/collaborators"),
concat!("/repos/", $p1, "/", $p2, "/collaborators/", $p3),
concat!("/repos/", $p1, "/", $p2, "/comments"),
concat!("/repos/", $p1, "/", $p2, "/commits/", $p3, "/comments"),
concat!("/repos/", $p1, "/", $p2, "/commits"),
concat!("/repos/", $p1, "/", $p2, "/commits/", $p3),
concat!("/repos/", $p1, "/", $p2, "/readme"),
concat!("/repos/", $p1, "/", $p2, "/keys"),
concat!("/repos/", $p1, "/", $p2, "/keys", $p3),
concat!("/repos/", $p1, "/", $p2, "/downloads"),
concat!("/repos/", $p1, "/", $p2, "/downloads", $p3),
concat!("/repos/", $p1, "/", $p2, "/forks"),
concat!("/repos/", $p1, "/", $p2, "/hooks"),
concat!("/repos/", $p1, "/", $p2, "/hooks", $p3),
concat!("/repos/", $p1, "/", $p2, "/releases"),
concat!("/repos/", $p1, "/", $p2, "/releases/", $p3),
concat!("/repos/", $p1, "/", $p2, "/releases/", $p3, "/assets"),
concat!("/repos/", $p1, "/", $p2, "/stats/contributors"),
concat!("/repos/", $p1, "/", $p2, "/stats/commit_activity"),
concat!("/repos/", $p1, "/", $p2, "/stats/code_frequency"),
concat!("/repos/", $p1, "/", $p2, "/stats/participation"),
concat!("/repos/", $p1, "/", $p2, "/stats/punch_card"),
concat!("/repos/", $p1, "/", $p2, "/statuses/", $p3),
concat!("/search/repositories"),
concat!("/search/code"),
concat!("/search/issues"),
concat!("/search/users"),
concat!("/legacy/issues/search/", $p1, "/", $p2, "/", $p3, "/", $p4),
concat!("/legacy/repos/search/", $p1),
concat!("/legacy/user/search/", $p1),
concat!("/legacy/user/email/", $p1),
concat!("/users/", $p1),
concat!("/user"),
concat!("/users"),
concat!("/user/emails"),
concat!("/users/", $p1, "/followers"),
concat!("/user/followers"),
concat!("/users/", $p1, "/following"),
concat!("/user/following"),
concat!("/user/following/", $p1),
concat!("/users/", $p1, "/following", $p2),
concat!("/users/", $p1, "/keys"),
concat!("/user/keys"),
concat!("/user/keys/", $p1),
];
arr.to_vec()
}};
}
static PATHS: [&str; 5] = [
"/authorizations",
"/user/repos",
"/repos/rust-lang/rust/stargazers",
"/orgs/rust-lang/public_members/nikomatsakis",
"/repos/rust-lang/rust/releases/1.51.0",
];
fn main() {
let mut router = actix_router::Router::<bool>::build();
for route in register!(brackets) {
router.path(route, true);
}
let actix = router.finish();
if firestorm::enabled() {
firestorm::bench("target", || {
for &route in &PATHS {
let mut path = actix_router::Path::new(route);
actix.recognize(&mut path).unwrap();
}
})
.unwrap();
}
}

723
actix-router/src/de.rs Normal file
View File

@@ -0,0 +1,723 @@
use serde::de::{self, Deserializer, Error as DeError, Visitor};
use serde::forward_to_deserialize_any;
use crate::path::{Path, PathIter};
use crate::ResourcePath;
macro_rules! unsupported_type {
($trait_fn:ident, $name:expr) => {
fn $trait_fn<V>(self, _: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
Err(de::value::Error::custom(concat!(
"unsupported type: ",
$name
)))
}
};
}
macro_rules! parse_single_value {
($trait_fn:ident, $visit_fn:ident, $tp:tt) => {
fn $trait_fn<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
if self.path.segment_count() != 1 {
Err(de::value::Error::custom(
format!(
"wrong number of parameters: {} expected 1",
self.path.segment_count()
)
.as_str(),
))
} else {
let v = self.path[0].parse().map_err(|_| {
de::value::Error::custom(format!(
"can not parse {:?} to a {}",
&self.path[0], $tp
))
})?;
visitor.$visit_fn(v)
}
}
};
}
pub struct PathDeserializer<'de, T: ResourcePath> {
path: &'de Path<T>,
}
impl<'de, T: ResourcePath + 'de> PathDeserializer<'de, T> {
pub fn new(path: &'de Path<T>) -> Self {
PathDeserializer { path }
}
}
impl<'de, T: ResourcePath + 'de> Deserializer<'de> for PathDeserializer<'de, T> {
type Error = de::value::Error;
fn deserialize_map<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_map(ParamsDeserializer {
params: self.path.iter(),
current: None,
})
}
fn deserialize_struct<V>(
self,
_: &'static str,
_: &'static [&'static str],
visitor: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_map(visitor)
}
fn deserialize_unit<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_unit()
}
fn deserialize_unit_struct<V>(
self,
_: &'static str,
visitor: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_unit(visitor)
}
fn deserialize_newtype_struct<V>(
self,
_: &'static str,
visitor: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_newtype_struct(self)
}
fn deserialize_tuple<V>(self, len: usize, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
if self.path.segment_count() < len {
Err(de::value::Error::custom(
format!(
"wrong number of parameters: {} expected {}",
self.path.segment_count(),
len
)
.as_str(),
))
} else {
visitor.visit_seq(ParamsSeq {
params: self.path.iter(),
})
}
}
fn deserialize_tuple_struct<V>(
self,
_: &'static str,
len: usize,
visitor: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
if self.path.segment_count() < len {
Err(de::value::Error::custom(
format!(
"wrong number of parameters: {} expected {}",
self.path.segment_count(),
len
)
.as_str(),
))
} else {
visitor.visit_seq(ParamsSeq {
params: self.path.iter(),
})
}
}
fn deserialize_enum<V>(
self,
_: &'static str,
_: &'static [&'static str],
visitor: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
if self.path.is_empty() {
Err(de::value::Error::custom("expected at least one parameters"))
} else {
visitor.visit_enum(ValueEnum {
value: &self.path[0],
})
}
}
fn deserialize_str<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
if self.path.segment_count() != 1 {
Err(de::value::Error::custom(
format!(
"wrong number of parameters: {} expected 1",
self.path.segment_count()
)
.as_str(),
))
} else {
visitor.visit_str(&self.path[0])
}
}
fn deserialize_seq<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_seq(ParamsSeq {
params: self.path.iter(),
})
}
unsupported_type!(deserialize_any, "'any'");
unsupported_type!(deserialize_bytes, "bytes");
unsupported_type!(deserialize_option, "Option<T>");
unsupported_type!(deserialize_identifier, "identifier");
unsupported_type!(deserialize_ignored_any, "ignored_any");
parse_single_value!(deserialize_bool, visit_bool, "bool");
parse_single_value!(deserialize_i8, visit_i8, "i8");
parse_single_value!(deserialize_i16, visit_i16, "i16");
parse_single_value!(deserialize_i32, visit_i32, "i32");
parse_single_value!(deserialize_i64, visit_i64, "i64");
parse_single_value!(deserialize_u8, visit_u8, "u8");
parse_single_value!(deserialize_u16, visit_u16, "u16");
parse_single_value!(deserialize_u32, visit_u32, "u32");
parse_single_value!(deserialize_u64, visit_u64, "u64");
parse_single_value!(deserialize_f32, visit_f32, "f32");
parse_single_value!(deserialize_f64, visit_f64, "f64");
parse_single_value!(deserialize_string, visit_string, "String");
parse_single_value!(deserialize_byte_buf, visit_string, "String");
parse_single_value!(deserialize_char, visit_char, "char");
}
struct ParamsDeserializer<'de, T: ResourcePath> {
params: PathIter<'de, T>,
current: Option<(&'de str, &'de str)>,
}
impl<'de, T: ResourcePath> de::MapAccess<'de> for ParamsDeserializer<'de, T> {
type Error = de::value::Error;
fn next_key_seed<K>(&mut self, seed: K) -> Result<Option<K::Value>, Self::Error>
where
K: de::DeserializeSeed<'de>,
{
self.current = self.params.next().map(|ref item| (item.0, item.1));
match self.current {
Some((key, _)) => Ok(Some(seed.deserialize(Key { key })?)),
None => Ok(None),
}
}
fn next_value_seed<V>(&mut self, seed: V) -> Result<V::Value, Self::Error>
where
V: de::DeserializeSeed<'de>,
{
if let Some((_, value)) = self.current.take() {
seed.deserialize(Value { value })
} else {
Err(de::value::Error::custom("unexpected item"))
}
}
}
struct Key<'de> {
key: &'de str,
}
impl<'de> Deserializer<'de> for Key<'de> {
type Error = de::value::Error;
fn deserialize_identifier<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_str(self.key)
}
fn deserialize_any<V>(self, _visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
Err(de::value::Error::custom("Unexpected"))
}
forward_to_deserialize_any! {
bool i8 i16 i32 i64 u8 u16 u32 u64 f32 f64 char str string bytes
byte_buf option unit unit_struct newtype_struct seq tuple
tuple_struct map struct enum ignored_any
}
}
macro_rules! parse_value {
($trait_fn:ident, $visit_fn:ident, $tp:tt) => {
fn $trait_fn<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
let v = self.value.parse().map_err(|_| {
de::value::Error::custom(format!("can not parse {:?} to a {}", self.value, $tp))
})?;
visitor.$visit_fn(v)
}
};
}
struct Value<'de> {
value: &'de str,
}
impl<'de> Deserializer<'de> for Value<'de> {
type Error = de::value::Error;
parse_value!(deserialize_bool, visit_bool, "bool");
parse_value!(deserialize_i8, visit_i8, "i8");
parse_value!(deserialize_i16, visit_i16, "i16");
parse_value!(deserialize_i32, visit_i32, "i16");
parse_value!(deserialize_i64, visit_i64, "i64");
parse_value!(deserialize_u8, visit_u8, "u8");
parse_value!(deserialize_u16, visit_u16, "u16");
parse_value!(deserialize_u32, visit_u32, "u32");
parse_value!(deserialize_u64, visit_u64, "u64");
parse_value!(deserialize_f32, visit_f32, "f32");
parse_value!(deserialize_f64, visit_f64, "f64");
parse_value!(deserialize_string, visit_string, "String");
parse_value!(deserialize_byte_buf, visit_string, "String");
parse_value!(deserialize_char, visit_char, "char");
fn deserialize_ignored_any<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_unit()
}
fn deserialize_unit<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_unit()
}
fn deserialize_unit_struct<V>(
self,
_: &'static str,
visitor: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_unit()
}
fn deserialize_bytes<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_borrowed_bytes(self.value.as_bytes())
}
fn deserialize_str<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_borrowed_str(self.value)
}
fn deserialize_option<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_some(self)
}
fn deserialize_enum<V>(
self,
_: &'static str,
_: &'static [&'static str],
visitor: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_enum(ValueEnum { value: self.value })
}
fn deserialize_newtype_struct<V>(
self,
_: &'static str,
visitor: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_newtype_struct(self)
}
fn deserialize_tuple<V>(self, _: usize, _: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
Err(de::value::Error::custom("unsupported type: tuple"))
}
fn deserialize_struct<V>(
self,
_: &'static str,
_: &'static [&'static str],
_: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
Err(de::value::Error::custom("unsupported type: struct"))
}
fn deserialize_tuple_struct<V>(
self,
_: &'static str,
_: usize,
_: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
Err(de::value::Error::custom("unsupported type: tuple struct"))
}
unsupported_type!(deserialize_any, "any");
unsupported_type!(deserialize_seq, "seq");
unsupported_type!(deserialize_map, "map");
unsupported_type!(deserialize_identifier, "identifier");
}
struct ParamsSeq<'de, T: ResourcePath> {
params: PathIter<'de, T>,
}
impl<'de, T: ResourcePath> de::SeqAccess<'de> for ParamsSeq<'de, T> {
type Error = de::value::Error;
fn next_element_seed<U>(&mut self, seed: U) -> Result<Option<U::Value>, Self::Error>
where
U: de::DeserializeSeed<'de>,
{
match self.params.next() {
Some(item) => Ok(Some(seed.deserialize(Value { value: item.1 })?)),
None => Ok(None),
}
}
}
struct ValueEnum<'de> {
value: &'de str,
}
impl<'de> de::EnumAccess<'de> for ValueEnum<'de> {
type Error = de::value::Error;
type Variant = UnitVariant;
fn variant_seed<V>(self, seed: V) -> Result<(V::Value, Self::Variant), Self::Error>
where
V: de::DeserializeSeed<'de>,
{
Ok((seed.deserialize(Key { key: self.value })?, UnitVariant))
}
}
struct UnitVariant;
impl<'de> de::VariantAccess<'de> for UnitVariant {
type Error = de::value::Error;
fn unit_variant(self) -> Result<(), Self::Error> {
Ok(())
}
fn newtype_variant_seed<T>(self, _seed: T) -> Result<T::Value, Self::Error>
where
T: de::DeserializeSeed<'de>,
{
Err(de::value::Error::custom("not supported"))
}
fn tuple_variant<V>(self, _len: usize, _visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
Err(de::value::Error::custom("not supported"))
}
fn struct_variant<V>(
self,
_: &'static [&'static str],
_: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
Err(de::value::Error::custom("not supported"))
}
}
#[cfg(test)]
mod tests {
use serde::{de, Deserialize};
use super::*;
use crate::path::Path;
use crate::router::Router;
#[derive(Deserialize)]
struct MyStruct {
key: String,
value: String,
}
#[derive(Deserialize)]
struct Id {
_id: String,
}
#[derive(Debug, Deserialize)]
struct Test1(String, u32);
#[derive(Debug, Deserialize)]
struct Test2 {
key: String,
value: u32,
}
#[derive(Debug, Deserialize, PartialEq)]
#[serde(rename_all = "lowercase")]
enum TestEnum {
Val1,
Val2,
}
#[derive(Debug, Deserialize)]
struct Test3 {
val: TestEnum,
}
#[test]
fn test_request_extract() {
let mut router = Router::<()>::build();
router.path("/{key}/{value}/", ());
let router = router.finish();
let mut path = Path::new("/name/user1/");
assert!(router.recognize(&mut path).is_some());
let s: MyStruct = de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
assert_eq!(s.key, "name");
assert_eq!(s.value, "user1");
let s: (String, String) =
de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
assert_eq!(s.0, "name");
assert_eq!(s.1, "user1");
let mut router = Router::<()>::build();
router.path("/{key}/{value}/", ());
let router = router.finish();
let mut path = Path::new("/name/32/");
assert!(router.recognize(&mut path).is_some());
let s: Test1 = de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
assert_eq!(s.0, "name");
assert_eq!(s.1, 32);
let s: Test2 = de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
assert_eq!(s.key, "name");
assert_eq!(s.value, 32);
let s: (String, u8) =
de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
assert_eq!(s.0, "name");
assert_eq!(s.1, 32);
let res: Vec<String> =
de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
assert_eq!(res[0], "name".to_owned());
assert_eq!(res[1], "32".to_owned());
}
#[test]
fn test_extract_path_single() {
let mut router = Router::<()>::build();
router.path("/{value}/", ());
let router = router.finish();
let mut path = Path::new("/32/");
assert!(router.recognize(&mut path).is_some());
let i: i8 = de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
assert_eq!(i, 32);
}
#[test]
fn test_extract_enum() {
let mut router = Router::<()>::build();
router.path("/{val}/", ());
let router = router.finish();
let mut path = Path::new("/val1/");
assert!(router.recognize(&mut path).is_some());
let i: TestEnum = de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
assert_eq!(i, TestEnum::Val1);
let mut router = Router::<()>::build();
router.path("/{val1}/{val2}/", ());
let router = router.finish();
let mut path = Path::new("/val1/val2/");
assert!(router.recognize(&mut path).is_some());
let i: (TestEnum, TestEnum) =
de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
assert_eq!(i, (TestEnum::Val1, TestEnum::Val2));
}
#[test]
fn test_extract_enum_value() {
let mut router = Router::<()>::build();
router.path("/{val}/", ());
let router = router.finish();
let mut path = Path::new("/val1/");
assert!(router.recognize(&mut path).is_some());
let i: Test3 = de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
assert_eq!(i.val, TestEnum::Val1);
let mut path = Path::new("/val3/");
assert!(router.recognize(&mut path).is_some());
let i: Result<Test3, de::value::Error> =
de::Deserialize::deserialize(PathDeserializer::new(&path));
assert!(i.is_err());
assert!(format!("{:?}", i).contains("unknown variant"));
}
#[test]
fn test_extract_errors() {
let mut router = Router::<()>::build();
router.path("/{value}/", ());
let router = router.finish();
let mut path = Path::new("/name/");
assert!(router.recognize(&mut path).is_some());
let s: Result<Test1, de::value::Error> =
de::Deserialize::deserialize(PathDeserializer::new(&path));
assert!(s.is_err());
assert!(format!("{:?}", s).contains("wrong number of parameters"));
let s: Result<Test2, de::value::Error> =
de::Deserialize::deserialize(PathDeserializer::new(&path));
assert!(s.is_err());
assert!(format!("{:?}", s).contains("can not parse"));
let s: Result<(String, String), de::value::Error> =
de::Deserialize::deserialize(PathDeserializer::new(&path));
assert!(s.is_err());
assert!(format!("{:?}", s).contains("wrong number of parameters"));
let s: Result<u32, de::value::Error> =
de::Deserialize::deserialize(PathDeserializer::new(&path));
assert!(s.is_err());
assert!(format!("{:?}", s).contains("can not parse"));
}
// #[test]
// fn test_extract_path_decode() {
// let mut router = Router::<()>::default();
// router.register_resource(Resource::new(ResourceDef::new("/{value}/")));
// macro_rules! test_single_value {
// ($value:expr, $expected:expr) => {{
// let req = TestRequest::with_uri($value).finish();
// let info = router.recognize(&req, &(), 0);
// let req = req.with_route_info(info);
// assert_eq!(
// *Path::<String>::from_request(&req, &PathConfig::default()).unwrap(),
// $expected
// );
// }};
// }
// test_single_value!("/%25/", "%");
// test_single_value!("/%40%C2%A3%24%25%5E%26%2B%3D/", "@£$%^&+=");
// test_single_value!("/%2B/", "+");
// test_single_value!("/%252B/", "%2B");
// test_single_value!("/%2F/", "/");
// test_single_value!("/%252F/", "%2F");
// test_single_value!(
// "/http%3A%2F%2Flocalhost%3A80%2Ffoo/",
// "http://localhost:80/foo"
// );
// test_single_value!("/%2Fvar%2Flog%2Fsyslog/", "/var/log/syslog");
// test_single_value!(
// "/http%3A%2F%2Flocalhost%3A80%2Ffile%2F%252Fvar%252Flog%252Fsyslog/",
// "http://localhost:80/file/%2Fvar%2Flog%2Fsyslog"
// );
// let req = TestRequest::with_uri("/%25/7/?id=test").finish();
// let mut router = Router::<()>::default();
// router.register_resource(Resource::new(ResourceDef::new("/{key}/{value}/")));
// let info = router.recognize(&req, &(), 0);
// let req = req.with_route_info(info);
// let s = Path::<Test2>::from_request(&req, &PathConfig::default()).unwrap();
// assert_eq!(s.key, "%");
// assert_eq!(s.value, 7);
// let s = Path::<(String, String)>::from_request(&req, &PathConfig::default()).unwrap();
// assert_eq!(s.0, "%");
// assert_eq!(s.1, "7");
// }
// #[test]
// fn test_extract_path_no_decode() {
// let mut router = Router::<()>::default();
// router.register_resource(Resource::new(ResourceDef::new("/{value}/")));
// let req = TestRequest::with_uri("/%25/").finish();
// let info = router.recognize(&req, &(), 0);
// let req = req.with_route_info(info);
// assert_eq!(
// *Path::<String>::from_request(&req, &&PathConfig::default().disable_decoding())
// .unwrap(),
// "%25"
// );
// }
}

149
actix-router/src/lib.rs Normal file
View File

@@ -0,0 +1,149 @@
//! Resource path matching and router.
#![deny(rust_2018_idioms, nonstandard_style)]
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
mod de;
mod path;
mod resource;
mod router;
pub use self::de::PathDeserializer;
pub use self::path::Path;
pub use self::resource::ResourceDef;
pub use self::router::{ResourceInfo, Router, RouterBuilder};
// TODO: this trait is necessary, document it
// see impl Resource for ServiceRequest
pub trait Resource<T: ResourcePath> {
fn resource_path(&mut self) -> &mut Path<T>;
}
pub trait ResourcePath {
fn path(&self) -> &str;
}
impl ResourcePath for String {
fn path(&self) -> &str {
self.as_str()
}
}
impl<'a> ResourcePath for &'a str {
fn path(&self) -> &str {
self
}
}
impl ResourcePath for bytestring::ByteString {
fn path(&self) -> &str {
&*self
}
}
/// One or many patterns.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum Patterns {
Single(String),
List(Vec<String>),
}
impl Patterns {
pub fn is_empty(&self) -> bool {
match self {
Patterns::Single(_) => false,
Patterns::List(pats) => pats.is_empty(),
}
}
}
/// Helper trait for type that could be converted to one or more path pattern.
pub trait IntoPatterns {
fn patterns(&self) -> Patterns;
}
impl IntoPatterns for String {
fn patterns(&self) -> Patterns {
Patterns::Single(self.clone())
}
}
impl<'a> IntoPatterns for &'a String {
fn patterns(&self) -> Patterns {
Patterns::Single((*self).clone())
}
}
impl<'a> IntoPatterns for &'a str {
fn patterns(&self) -> Patterns {
Patterns::Single((*self).to_owned())
}
}
impl IntoPatterns for bytestring::ByteString {
fn patterns(&self) -> Patterns {
Patterns::Single(self.to_string())
}
}
impl IntoPatterns for Patterns {
fn patterns(&self) -> Patterns {
self.clone()
}
}
impl<T: AsRef<str>> IntoPatterns for Vec<T> {
fn patterns(&self) -> Patterns {
let mut patterns = self.iter().map(|v| v.as_ref().to_owned());
match patterns.size_hint() {
(1, _) => Patterns::Single(patterns.next().unwrap()),
_ => Patterns::List(patterns.collect()),
}
}
}
macro_rules! array_patterns_single (($tp:ty) => {
impl IntoPatterns for [$tp; 1] {
fn patterns(&self) -> Patterns {
Patterns::Single(self[0].to_owned())
}
}
});
macro_rules! array_patterns_multiple (($tp:ty, $str_fn:expr, $($num:tt) +) => {
// for each array length specified in $num
$(
impl IntoPatterns for [$tp; $num] {
fn patterns(&self) -> Patterns {
Patterns::List(self.iter().map($str_fn).collect())
}
}
)+
});
array_patterns_single!(&str);
array_patterns_multiple!(&str, |&v| v.to_owned(), 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16);
array_patterns_single!(String);
array_patterns_multiple!(String, |v| v.clone(), 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16);
#[cfg(feature = "http")]
mod url;
#[cfg(feature = "http")]
pub use self::url::{Quoter, Url};
#[cfg(feature = "http")]
mod http_impls {
use http::Uri;
use super::ResourcePath;
impl ResourcePath for Uri {
fn path(&self) -> &str {
self.path()
}
}
}

220
actix-router/src/path.rs Normal file
View File

@@ -0,0 +1,220 @@
use std::borrow::Cow;
use std::ops::Index;
use firestorm::profile_method;
use serde::de;
use crate::{de::PathDeserializer, Resource, ResourcePath};
#[derive(Debug, Clone)]
pub(crate) enum PathItem {
Static(Cow<'static, str>),
Segment(u16, u16),
}
impl Default for PathItem {
fn default() -> Self {
Self::Static(Cow::Borrowed(""))
}
}
/// Resource path match information.
///
/// If resource path contains variable patterns, `Path` stores them.
#[derive(Debug, Clone, Default)]
pub struct Path<T> {
path: T,
pub(crate) skip: u16,
pub(crate) segments: Vec<(Cow<'static, str>, PathItem)>,
}
impl<T: ResourcePath> Path<T> {
pub fn new(path: T) -> Path<T> {
Path {
path,
skip: 0,
segments: Vec::new(),
}
}
/// Get reference to inner path instance.
#[inline]
pub fn get_ref(&self) -> &T {
&self.path
}
/// Get mutable reference to inner path instance.
#[inline]
pub fn get_mut(&mut self) -> &mut T {
&mut self.path
}
/// Path.
#[inline]
pub fn path(&self) -> &str {
profile_method!(path);
let skip = self.skip as usize;
let path = self.path.path();
if skip <= path.len() {
&path[skip..]
} else {
""
}
}
/// Set new path.
#[inline]
pub fn set(&mut self, path: T) {
self.skip = 0;
self.path = path;
self.segments.clear();
}
/// Reset state.
#[inline]
pub fn reset(&mut self) {
self.skip = 0;
self.segments.clear();
}
/// Skip first `n` chars in path.
#[inline]
pub fn skip(&mut self, n: u16) {
self.skip += n;
}
pub(crate) fn add(&mut self, name: impl Into<Cow<'static, str>>, value: PathItem) {
profile_method!(add);
match value {
PathItem::Static(s) => self.segments.push((name.into(), PathItem::Static(s))),
PathItem::Segment(begin, end) => self.segments.push((
name.into(),
PathItem::Segment(self.skip + begin, self.skip + end),
)),
}
}
#[doc(hidden)]
pub fn add_static(
&mut self,
name: impl Into<Cow<'static, str>>,
value: impl Into<Cow<'static, str>>,
) {
self.segments
.push((name.into(), PathItem::Static(value.into())));
}
/// Check if there are any matched patterns.
#[inline]
pub fn is_empty(&self) -> bool {
self.segments.is_empty()
}
/// Returns number of interpolated segments.
#[inline]
pub fn segment_count(&self) -> usize {
self.segments.len()
}
/// Get matched parameter by name without type conversion
pub fn get(&self, name: &str) -> Option<&str> {
profile_method!(get);
for (seg_name, val) in self.segments.iter() {
if name == seg_name {
return match val {
PathItem::Static(ref s) => Some(&s),
PathItem::Segment(s, e) => {
Some(&self.path.path()[(*s as usize)..(*e as usize)])
}
};
}
}
None
}
/// Get unprocessed part of the path
pub fn unprocessed(&self) -> &str {
&self.path.path()[(self.skip as usize)..]
}
/// Get matched parameter by name.
///
/// If keyed parameter is not available empty string is used as default value.
pub fn query(&self, key: &str) -> &str {
profile_method!(query);
if let Some(s) = self.get(key) {
s
} else {
""
}
}
/// Return iterator to items in parameter container.
pub fn iter(&self) -> PathIter<'_, T> {
PathIter {
idx: 0,
params: self,
}
}
/// Try to deserialize matching parameters to a specified type `U`
pub fn load<'de, U: serde::Deserialize<'de>>(&'de self) -> Result<U, de::value::Error> {
profile_method!(load);
de::Deserialize::deserialize(PathDeserializer::new(self))
}
}
#[derive(Debug)]
pub struct PathIter<'a, T> {
idx: usize,
params: &'a Path<T>,
}
impl<'a, T: ResourcePath> Iterator for PathIter<'a, T> {
type Item = (&'a str, &'a str);
#[inline]
fn next(&mut self) -> Option<(&'a str, &'a str)> {
if self.idx < self.params.segment_count() {
let idx = self.idx;
let res = match self.params.segments[idx].1 {
PathItem::Static(ref s) => &s,
PathItem::Segment(s, e) => &self.params.path.path()[(s as usize)..(e as usize)],
};
self.idx += 1;
return Some((&self.params.segments[idx].0, res));
}
None
}
}
impl<'a, T: ResourcePath> Index<&'a str> for Path<T> {
type Output = str;
fn index(&self, name: &'a str) -> &str {
self.get(name)
.expect("Value for parameter is not available")
}
}
impl<T: ResourcePath> Index<usize> for Path<T> {
type Output = str;
fn index(&self, idx: usize) -> &str {
match self.segments[idx].1 {
PathItem::Static(ref s) => &s,
PathItem::Segment(s, e) => &self.path.path()[(s as usize)..(e as usize)],
}
}
}
impl<T: ResourcePath> Resource<T> for Path<T> {
fn resource_path(&mut self) -> &mut Self {
self
}
}

1681
actix-router/src/resource.rs Normal file

File diff suppressed because it is too large Load Diff

281
actix-router/src/router.rs Normal file
View File

@@ -0,0 +1,281 @@
use firestorm::profile_method;
use crate::{IntoPatterns, Resource, ResourceDef, ResourcePath};
#[derive(Debug, Copy, Clone, PartialEq)]
pub struct ResourceId(pub u16);
/// Information about current resource
#[derive(Clone, Debug)]
pub struct ResourceInfo {
resource: ResourceId,
}
/// Resource router.
// T is the resource itself
// U is any other data needed for routing like method guards
pub struct Router<T, U = ()> {
routes: Vec<(ResourceDef, T, Option<U>)>,
}
impl<T, U> Router<T, U> {
pub fn build() -> RouterBuilder<T, U> {
RouterBuilder {
resources: Vec::new(),
}
}
pub fn recognize<R, P>(&self, resource: &mut R) -> Option<(&T, ResourceId)>
where
R: Resource<P>,
P: ResourcePath,
{
profile_method!(recognize);
for item in self.routes.iter() {
if item.0.capture_match_info(resource.resource_path()) {
return Some((&item.1, ResourceId(item.0.id())));
}
}
None
}
pub fn recognize_mut<R, P>(&mut self, resource: &mut R) -> Option<(&mut T, ResourceId)>
where
R: Resource<P>,
P: ResourcePath,
{
profile_method!(recognize_mut);
for item in self.routes.iter_mut() {
if item.0.capture_match_info(resource.resource_path()) {
return Some((&mut item.1, ResourceId(item.0.id())));
}
}
None
}
pub fn recognize_fn<R, P, F>(&self, resource: &mut R, check: F) -> Option<(&T, ResourceId)>
where
F: Fn(&R, &Option<U>) -> bool,
R: Resource<P>,
P: ResourcePath,
{
profile_method!(recognize_checked);
for item in self.routes.iter() {
if item.0.capture_match_info_fn(resource, &check, &item.2) {
return Some((&item.1, ResourceId(item.0.id())));
}
}
None
}
pub fn recognize_mut_fn<R, P, F>(
&mut self,
resource: &mut R,
check: F,
) -> Option<(&mut T, ResourceId)>
where
F: Fn(&R, &Option<U>) -> bool,
R: Resource<P>,
P: ResourcePath,
{
profile_method!(recognize_mut_checked);
for item in self.routes.iter_mut() {
if item.0.capture_match_info_fn(resource, &check, &item.2) {
return Some((&mut item.1, ResourceId(item.0.id())));
}
}
None
}
}
pub struct RouterBuilder<T, U = ()> {
resources: Vec<(ResourceDef, T, Option<U>)>,
}
impl<T, U> RouterBuilder<T, U> {
/// Register resource for specified path.
pub fn path<P: IntoPatterns>(
&mut self,
path: P,
resource: T,
) -> &mut (ResourceDef, T, Option<U>) {
profile_method!(path);
self.resources
.push((ResourceDef::new(path), resource, None));
self.resources.last_mut().unwrap()
}
/// Register resource for specified path prefix.
pub fn prefix(&mut self, prefix: &str, resource: T) -> &mut (ResourceDef, T, Option<U>) {
profile_method!(prefix);
self.resources
.push((ResourceDef::prefix(prefix), resource, None));
self.resources.last_mut().unwrap()
}
/// Register resource for ResourceDef
pub fn rdef(&mut self, rdef: ResourceDef, resource: T) -> &mut (ResourceDef, T, Option<U>) {
profile_method!(rdef);
self.resources.push((rdef, resource, None));
self.resources.last_mut().unwrap()
}
/// Finish configuration and create router instance.
pub fn finish(self) -> Router<T, U> {
Router {
routes: self.resources,
}
}
}
#[cfg(test)]
mod tests {
use crate::path::Path;
use crate::router::{ResourceId, Router};
#[allow(clippy::cognitive_complexity)]
#[test]
fn test_recognizer_1() {
let mut router = Router::<usize>::build();
router.path("/name", 10).0.set_id(0);
router.path("/name/{val}", 11).0.set_id(1);
router.path("/name/{val}/index.html", 12).0.set_id(2);
router.path("/file/{file}.{ext}", 13).0.set_id(3);
router.path("/v{val}/{val2}/index.html", 14).0.set_id(4);
router.path("/v/{tail:.*}", 15).0.set_id(5);
router.path("/test2/{test}.html", 16).0.set_id(6);
router.path("/{test}/index.html", 17).0.set_id(7);
let mut router = router.finish();
let mut path = Path::new("/unknown");
assert!(router.recognize_mut(&mut path).is_none());
let mut path = Path::new("/name");
let (h, info) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 10);
assert_eq!(info, ResourceId(0));
assert!(path.is_empty());
let mut path = Path::new("/name/value");
let (h, info) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 11);
assert_eq!(info, ResourceId(1));
assert_eq!(path.get("val").unwrap(), "value");
assert_eq!(&path["val"], "value");
let mut path = Path::new("/name/value2/index.html");
let (h, info) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 12);
assert_eq!(info, ResourceId(2));
assert_eq!(path.get("val").unwrap(), "value2");
let mut path = Path::new("/file/file.gz");
let (h, info) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 13);
assert_eq!(info, ResourceId(3));
assert_eq!(path.get("file").unwrap(), "file");
assert_eq!(path.get("ext").unwrap(), "gz");
let mut path = Path::new("/vtest/ttt/index.html");
let (h, info) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 14);
assert_eq!(info, ResourceId(4));
assert_eq!(path.get("val").unwrap(), "test");
assert_eq!(path.get("val2").unwrap(), "ttt");
let mut path = Path::new("/v/blah-blah/index.html");
let (h, info) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 15);
assert_eq!(info, ResourceId(5));
assert_eq!(path.get("tail").unwrap(), "blah-blah/index.html");
let mut path = Path::new("/test2/index.html");
let (h, info) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 16);
assert_eq!(info, ResourceId(6));
assert_eq!(path.get("test").unwrap(), "index");
let mut path = Path::new("/bbb/index.html");
let (h, info) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 17);
assert_eq!(info, ResourceId(7));
assert_eq!(path.get("test").unwrap(), "bbb");
}
#[test]
fn test_recognizer_2() {
let mut router = Router::<usize>::build();
router.path("/index.json", 10);
router.path("/{source}.json", 11);
let mut router = router.finish();
let mut path = Path::new("/index.json");
let (h, _) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 10);
let mut path = Path::new("/test.json");
let (h, _) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 11);
}
#[test]
fn test_recognizer_with_prefix() {
let mut router = Router::<usize>::build();
router.path("/name", 10).0.set_id(0);
router.path("/name/{val}", 11).0.set_id(1);
let mut router = router.finish();
let mut path = Path::new("/name");
path.skip(5);
assert!(router.recognize_mut(&mut path).is_none());
let mut path = Path::new("/test/name");
path.skip(5);
let (h, _) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 10);
let mut path = Path::new("/test/name/value");
path.skip(5);
let (h, id) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 11);
assert_eq!(id, ResourceId(1));
assert_eq!(path.get("val").unwrap(), "value");
assert_eq!(&path["val"], "value");
// same patterns
let mut router = Router::<usize>::build();
router.path("/name", 10);
router.path("/name/{val}", 11);
let mut router = router.finish();
let mut path = Path::new("/name");
path.skip(6);
assert!(router.recognize_mut(&mut path).is_none());
let mut path = Path::new("/test2/name");
path.skip(6);
let (h, _) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 10);
let mut path = Path::new("/test2/name-test");
path.skip(6);
assert!(router.recognize_mut(&mut path).is_none());
let mut path = Path::new("/test2/name/ttt");
path.skip(6);
let (h, _) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 11);
assert_eq!(&path["val"], "ttt");
}
}

288
actix-router/src/url.rs Normal file
View File

@@ -0,0 +1,288 @@
use crate::ResourcePath;
#[allow(dead_code)]
const GEN_DELIMS: &[u8] = b":/?#[]@";
#[allow(dead_code)]
const SUB_DELIMS_WITHOUT_QS: &[u8] = b"!$'()*,";
#[allow(dead_code)]
const SUB_DELIMS: &[u8] = b"!$'()*,+?=;";
#[allow(dead_code)]
const RESERVED: &[u8] = b":/?#[]@!$'()*,+?=;";
#[allow(dead_code)]
const UNRESERVED: &[u8] = b"abcdefghijklmnopqrstuvwxyz
ABCDEFGHIJKLMNOPQRSTUVWXYZ
1234567890
-._~";
const ALLOWED: &[u8] = b"abcdefghijklmnopqrstuvwxyz
ABCDEFGHIJKLMNOPQRSTUVWXYZ
1234567890
-._~
!$'()*,";
const QS: &[u8] = b"+&=;b";
#[inline]
fn bit_at(array: &[u8], ch: u8) -> bool {
array[(ch >> 3) as usize] & (1 << (ch & 7)) != 0
}
#[inline]
fn set_bit(array: &mut [u8], ch: u8) {
array[(ch >> 3) as usize] |= 1 << (ch & 7)
}
thread_local! {
static DEFAULT_QUOTER: Quoter = Quoter::new(b"@:", b"%/+");
}
#[derive(Default, Clone, Debug)]
pub struct Url {
uri: http::Uri,
path: Option<String>,
}
impl Url {
pub fn new(uri: http::Uri) -> Url {
let path = DEFAULT_QUOTER.with(|q| q.requote(uri.path().as_bytes()));
Url { uri, path }
}
pub fn with_quoter(uri: http::Uri, quoter: &Quoter) -> Url {
Url {
path: quoter.requote(uri.path().as_bytes()),
uri,
}
}
pub fn uri(&self) -> &http::Uri {
&self.uri
}
pub fn path(&self) -> &str {
if let Some(ref s) = self.path {
s
} else {
self.uri.path()
}
}
#[inline]
pub fn update(&mut self, uri: &http::Uri) {
self.uri = uri.clone();
self.path = DEFAULT_QUOTER.with(|q| q.requote(uri.path().as_bytes()));
}
#[inline]
pub fn update_with_quoter(&mut self, uri: &http::Uri, quoter: &Quoter) {
self.uri = uri.clone();
self.path = quoter.requote(uri.path().as_bytes());
}
}
impl ResourcePath for Url {
#[inline]
fn path(&self) -> &str {
self.path()
}
}
pub struct Quoter {
safe_table: [u8; 16],
protected_table: [u8; 16],
}
impl Quoter {
pub fn new(safe: &[u8], protected: &[u8]) -> Quoter {
let mut q = Quoter {
safe_table: [0; 16],
protected_table: [0; 16],
};
// prepare safe table
for i in 0..128 {
if ALLOWED.contains(&i) {
set_bit(&mut q.safe_table, i);
}
if QS.contains(&i) {
set_bit(&mut q.safe_table, i);
}
}
for ch in safe {
set_bit(&mut q.safe_table, *ch)
}
// prepare protected table
for ch in protected {
set_bit(&mut q.safe_table, *ch);
set_bit(&mut q.protected_table, *ch);
}
q
}
pub fn requote(&self, val: &[u8]) -> Option<String> {
let mut has_pct = 0;
let mut pct = [b'%', 0, 0];
let mut idx = 0;
let mut cloned: Option<Vec<u8>> = None;
let len = val.len();
while idx < len {
let ch = val[idx];
if has_pct != 0 {
pct[has_pct] = val[idx];
has_pct += 1;
if has_pct == 3 {
has_pct = 0;
let buf = cloned.as_mut().unwrap();
if let Some(ch) = restore_ch(pct[1], pct[2]) {
if ch < 128 {
if bit_at(&self.protected_table, ch) {
buf.extend_from_slice(&pct);
idx += 1;
continue;
}
if bit_at(&self.safe_table, ch) {
buf.push(ch);
idx += 1;
continue;
}
}
buf.push(ch);
} else {
buf.extend_from_slice(&pct[..]);
}
}
} else if ch == b'%' {
has_pct = 1;
if cloned.is_none() {
let mut c = Vec::with_capacity(len);
c.extend_from_slice(&val[..idx]);
cloned = Some(c);
}
} else if let Some(ref mut cloned) = cloned {
cloned.push(ch)
}
idx += 1;
}
cloned.map(|data| String::from_utf8_lossy(&data).into_owned())
}
}
#[inline]
fn from_hex(v: u8) -> Option<u8> {
if (b'0'..=b'9').contains(&v) {
Some(v - 0x30) // ord('0') == 0x30
} else if (b'A'..=b'F').contains(&v) {
Some(v - 0x41 + 10) // ord('A') == 0x41
} else if (b'a'..=b'f').contains(&v) {
Some(v - 0x61 + 10) // ord('a') == 0x61
} else {
None
}
}
#[inline]
fn restore_ch(d1: u8, d2: u8) -> Option<u8> {
from_hex(d1).and_then(|d1| from_hex(d2).map(move |d2| d1 << 4 | d2))
}
#[cfg(test)]
mod tests {
use http::Uri;
use std::convert::TryFrom;
use super::*;
use crate::{Path, ResourceDef};
const PROTECTED: &[u8] = b"%/+";
fn match_url(pattern: &'static str, url: impl AsRef<str>) -> Path<Url> {
let re = ResourceDef::new(pattern);
let uri = Uri::try_from(url.as_ref()).unwrap();
let mut path = Path::new(Url::new(uri));
assert!(re.capture_match_info(&mut path));
path
}
fn percent_encode(data: &[u8]) -> String {
data.iter().map(|c| format!("%{:02X}", c)).collect()
}
#[test]
fn test_parse_url() {
let re = "/user/{id}/test";
let path = match_url(re, "/user/2345/test");
assert_eq!(path.get("id").unwrap(), "2345");
// "%25" should never be decoded into '%' to guarantee the output is a valid
// percent-encoded format
let path = match_url(re, "/user/qwe%25/test");
assert_eq!(path.get("id").unwrap(), "qwe%25");
let path = match_url(re, "/user/qwe%25rty/test");
assert_eq!(path.get("id").unwrap(), "qwe%25rty");
}
#[test]
fn test_protected_chars() {
let encoded = percent_encode(PROTECTED);
let path = match_url("/user/{id}/test", format!("/user/{}/test", encoded));
assert_eq!(path.get("id").unwrap(), &encoded);
}
#[test]
fn test_non_protecteed_ascii() {
let nonprotected_ascii = ('\u{0}'..='\u{7F}')
.filter(|&c| c.is_ascii() && !PROTECTED.contains(&(c as u8)))
.collect::<String>();
let encoded = percent_encode(nonprotected_ascii.as_bytes());
let path = match_url("/user/{id}/test", format!("/user/{}/test", encoded));
assert_eq!(path.get("id").unwrap(), &nonprotected_ascii);
}
#[test]
fn test_valid_utf8_multibyte() {
let test = ('\u{FF00}'..='\u{FFFF}').collect::<String>();
let encoded = percent_encode(test.as_bytes());
let path = match_url("/a/{id}/b", format!("/a/{}/b", &encoded));
assert_eq!(path.get("id").unwrap(), &test);
}
#[test]
fn test_invalid_utf8() {
let invalid_utf8 = percent_encode((0x80..=0xff).collect::<Vec<_>>().as_slice());
let uri = Uri::try_from(format!("/{}", invalid_utf8)).unwrap();
let path = Path::new(Url::new(uri));
// We should always get a valid utf8 string
assert!(String::from_utf8(path.path().as_bytes().to_owned()).is_ok());
}
#[test]
fn test_from_hex() {
let hex = b"0123456789abcdefABCDEF";
for i in 0..256 {
let c = i as u8;
if hex.contains(&c) {
assert!(from_hex(c).is_some())
} else {
assert!(from_hex(c).is_none())
}
}
let expected = [
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 10, 11, 12, 13, 14, 15,
];
for i in 0..hex.len() {
assert_eq!(from_hex(hex[i]).unwrap(), expected[i]);
}
}
}

158
actix-rt/CHANGES.md Normal file
View File

@@ -0,0 +1,158 @@
# Changes
## Unreleased - 2021-xx-xx
## 2.2.0 - 2021-03-29
* **BREAKING** `ActixStream::{poll_read_ready, poll_write_ready}` methods now return
`Ready` object in ok variant. [#293]
* Breakage is acceptable since `ActixStream` was not intended to be public.
[#293]: https://github.com/actix/actix-net/pull/293
## 2.1.0 - 2021-02-24
* Add `ActixStream` extension trait to include readiness methods. [#276]
* Re-export `tokio::net::TcpSocket` in `net` module [#282]
[#276]: https://github.com/actix/actix-net/pull/276
[#282]: https://github.com/actix/actix-net/pull/282
## 2.0.2 - 2021-02-06
* Add `Arbiter::handle` to get a handle of an owned Arbiter. [#274]
* Add `System::try_current` for situations where actix may or may not be running a System. [#275]
[#274]: https://github.com/actix/actix-net/pull/274
[#275]: https://github.com/actix/actix-net/pull/275
## 2.0.1 - 2021-02-06
* Expose `JoinError` from Tokio. [#271]
[#271]: https://github.com/actix/actix-net/pull/271
## 2.0.0 - 2021-02-02
* Remove all Arbiter-local storage methods. [#262]
* Re-export `tokio::pin`. [#262]
[#262]: https://github.com/actix/actix-net/pull/262
## 2.0.0-beta.3 - 2021-01-31
* Remove `run_in_tokio`, `attach_to_tokio` and `AsyncSystemRunner`. [#253]
* Return `JoinHandle` from `actix_rt::spawn`. [#253]
* Remove old `Arbiter::spawn`. Implementation is now inlined into `actix_rt::spawn`. [#253]
* Rename `Arbiter::{send => spawn}` and `Arbiter::{exec_fn => spawn_fn}`. [#253]
* Remove `Arbiter::exec`. [#253]
* Remove deprecated `Arbiter::local_join` and `Arbiter::is_running`. [#253]
* `Arbiter::spawn` now accepts !Unpin futures. [#256]
* `System::new` no longer takes arguments. [#257]
* Remove `System::with_current`. [#257]
* Remove `Builder`. [#257]
* Add `System::with_init` as replacement for `Builder::run`. [#257]
* Rename `System::{is_set => is_registered}`. [#257]
* Add `ArbiterHandle` for sending messages to non-current-thread arbiters. [#257].
* `System::arbiter` now returns an `&ArbiterHandle`. [#257]
* `Arbiter::current` now returns an `ArbiterHandle` instead. [#257]
* `Arbiter::join` now takes self by value. [#257]
[#253]: https://github.com/actix/actix-net/pull/253
[#254]: https://github.com/actix/actix-net/pull/254
[#256]: https://github.com/actix/actix-net/pull/256
[#257]: https://github.com/actix/actix-net/pull/257
## 2.0.0-beta.2 - 2021-01-09
* Add `task` mod with re-export of `tokio::task::{spawn_blocking, yield_now, JoinHandle}` [#245]
* Add default "macros" feature to allow faster compile times when using `default-features=false`.
[#245]: https://github.com/actix/actix-net/pull/245
## 2.0.0-beta.1 - 2020-12-28
* Add `System::attach_to_tokio` method. [#173]
* Update `tokio` dependency to `1.0`. [#236]
* Rename `time` module `delay_for` to `sleep`, `delay_until` to `sleep_until`, `Delay` to `Sleep`
to stay aligned with Tokio's naming. [#236]
* Remove `'static` lifetime requirement for `Runtime::block_on` and `SystemRunner::block_on`.
* These methods now accept `&self` when calling. [#236]
* Remove `'static` lifetime requirement for `System::run` and `Builder::run`. [#236]
* `Arbiter::spawn` now panics when `System` is not in scope. [#207]
* Fix work load issue by removing `PENDING` thread local. [#207]
[#207]: https://github.com/actix/actix-net/pull/207
[#236]: https://github.com/actix/actix-net/pull/236
## 1.1.1 - 2020-04-30
* Fix memory leak due to [#94] (see [#129] for more detail)
[#129]: https://github.com/actix/actix-net/issues/129
## 1.1.0 - 2020-04-08 (YANKED)
* Expose `System::is_set` to check if current system has ben started [#99]
* Add `Arbiter::is_running` to check if event loop is running [#124]
* Add `Arbiter::local_join` associated function
to get be able to `await` for spawned futures [#94]
[#94]: https://github.com/actix/actix-net/pull/94
[#99]: https://github.com/actix/actix-net/pull/99
[#124]: https://github.com/actix/actix-net/pull/124
## 1.0.0 - 2019-12-11
* Update dependencies
## 1.0.0-alpha.3 - 2019-12-07
* Migrate to tokio 0.2
* Fix compilation on non-unix platforms
## 1.0.0-alpha.2 - 2019-12-02
* Export `main` and `test` attribute macros
* Export `time` module (re-export of tokio-timer)
* Export `net` module (re-export of tokio-net)
## 1.0.0-alpha.1 - 2019-11-22
* Migrate to std::future and tokio 0.2
## 0.2.6 - 2019-11-14
* Allow to join arbiter's thread. #60
* Fix arbiter's thread panic message.
## 0.2.5 - 2019-09-02
* Add arbiter specific storage
## 0.2.4 - 2019-07-17
* Avoid a copy of the Future when initializing the Box. #29
## 0.2.3 - 2019-06-22
* Allow to start System using existing CurrentThread Handle #22
## 0.2.2 - 2019-03-28
* Moved `blocking` module to `actix-threadpool` crate
## 0.2.1 - 2019-03-11
* Added `blocking` module
* Added `Arbiter::exec_fn` - execute fn on the arbiter's thread
* Added `Arbiter::exec` - execute fn on the arbiter's thread and wait result
## 0.2.0 - 2019-03-06
* `run` method returns `io::Result<()>`
* Removed `Handle`
## 0.1.0 - 2018-12-09
* Initial release

33
actix-rt/Cargo.toml Normal file
View File

@@ -0,0 +1,33 @@
[package]
name = "actix-rt"
version = "2.2.0"
authors = [
"Nikolay Kim <fafhrd91@gmail.com>",
"Rob Ede <robjtede@icloud.com>",
]
description = "Tokio-based single-threaded async runtime for the Actix ecosystem"
keywords = ["async", "futures", "io", "runtime"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net"
documentation = "https://docs.rs/actix-rt"
categories = ["network-programming", "asynchronous"]
license = "MIT OR Apache-2.0"
edition = "2018"
[lib]
name = "actix_rt"
path = "src/lib.rs"
[features]
default = ["macros"]
macros = ["actix-macros"]
[dependencies]
actix-macros = { version = "0.2.0", optional = true }
futures-core = { version = "0.3", default-features = false }
tokio = { version = "1.3", features = ["rt", "net", "parking_lot", "signal", "sync", "time"] }
[dev-dependencies]
tokio = { version = "1.2", features = ["full"] }
hyper = { version = "0.14", default-features = false, features = ["server", "tcp", "http1"] }

1
actix-rt/LICENSE-APACHE Symbolic link
View File

@@ -0,0 +1 @@
../LICENSE-APACHE

1
actix-rt/LICENSE-MIT Symbolic link
View File

@@ -0,0 +1 @@
../LICENSE-MIT

14
actix-rt/README.md Normal file
View File

@@ -0,0 +1,14 @@
# actix-rt
> Tokio-based single-threaded async runtime for the Actix ecosystem.
[![crates.io](https://img.shields.io/crates/v/actix-rt?label=latest)](https://crates.io/crates/actix-rt)
[![Documentation](https://docs.rs/actix-rt/badge.svg?version=2.2.0)](https://docs.rs/actix-rt/2.2.0)
[![Version](https://img.shields.io/badge/rustc-1.46+-ab6000.svg)](https://blog.rust-lang.org/2020/03/12/Rust-1.46.html)
![MIT or Apache 2.0 licensed](https://img.shields.io/crates/l/actix-rt.svg)
<br />
[![dependency status](https://deps.rs/crate/actix-rt/2.2.0/status.svg)](https://deps.rs/crate/actix-rt/2.2.0)
![Download](https://img.shields.io/crates/d/actix-rt.svg)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/WghFtEH6Hb)
See crate documentation for more: https://docs.rs/actix-rt.

View File

@@ -0,0 +1,28 @@
use hyper::service::{make_service_fn, service_fn};
use hyper::{Body, Request, Response, Server};
use std::convert::Infallible;
use std::net::SocketAddr;
async fn handle(_req: Request<Body>) -> Result<Response<Body>, Infallible> {
Ok(Response::new(Body::from("Hello World")))
}
fn main() {
actix_rt::System::with_tokio_rt(|| {
tokio::runtime::Builder::new_multi_thread()
.enable_all()
.build()
.unwrap()
})
.block_on(async {
let make_service =
make_service_fn(|_conn| async { Ok::<_, Infallible>(service_fn(handle)) });
let server =
Server::bind(&SocketAddr::from(([127, 0, 0, 1], 3000))).serve(make_service);
if let Err(e) = server.await {
eprintln!("server error: {}", e);
}
})
}

View File

@@ -0,0 +1,60 @@
//! An example on how to build a multi-thread tokio runtime for Actix System.
//! Then spawn async task that can make use of work stealing of tokio runtime.
use actix_rt::System;
fn main() {
System::with_tokio_rt(|| {
// build system with a multi-thread tokio runtime.
tokio::runtime::Builder::new_multi_thread()
.worker_threads(2)
.enable_all()
.build()
.unwrap()
})
.block_on(async_main());
}
// async main function that acts like #[actix_web::main] or #[tokio::main]
async fn async_main() {
let (tx, rx) = tokio::sync::oneshot::channel();
// get a handle to system arbiter and spawn async task on it
System::current().arbiter().spawn(async {
// use tokio::spawn to get inside the context of multi thread tokio runtime
let h1 = tokio::spawn(async {
println!("thread id is {:?}", std::thread::current().id());
std::thread::sleep(std::time::Duration::from_secs(2));
});
// work stealing occurs for this task spawn
let h2 = tokio::spawn(async {
println!("thread id is {:?}", std::thread::current().id());
});
h1.await.unwrap();
h2.await.unwrap();
let _ = tx.send(());
});
rx.await.unwrap();
let (tx, rx) = tokio::sync::oneshot::channel();
let now = std::time::Instant::now();
// without additional tokio::spawn, all spawned tasks run on single thread
System::current().arbiter().spawn(async {
println!("thread id is {:?}", std::thread::current().id());
std::thread::sleep(std::time::Duration::from_secs(2));
let _ = tx.send(());
});
// previous spawn task has blocked the system arbiter thread
// so this task will wait for 2 seconds until it can be run
System::current().arbiter().spawn(async move {
println!("thread id is {:?}", std::thread::current().id());
assert!(now.elapsed() > std::time::Duration::from_secs(2));
});
rx.await.unwrap();
}

260
actix-rt/src/arbiter.rs Normal file
View File

@@ -0,0 +1,260 @@
use std::{
cell::RefCell,
fmt,
future::Future,
pin::Pin,
sync::atomic::{AtomicUsize, Ordering},
task::{Context, Poll},
thread,
};
use futures_core::ready;
use tokio::{sync::mpsc, task::LocalSet};
use crate::{
runtime::{default_tokio_runtime, Runtime},
system::{System, SystemCommand},
};
pub(crate) static COUNT: AtomicUsize = AtomicUsize::new(0);
thread_local!(
static HANDLE: RefCell<Option<ArbiterHandle>> = RefCell::new(None);
);
pub(crate) enum ArbiterCommand {
Stop,
Execute(Pin<Box<dyn Future<Output = ()> + Send>>),
}
impl fmt::Debug for ArbiterCommand {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
ArbiterCommand::Stop => write!(f, "ArbiterCommand::Stop"),
ArbiterCommand::Execute(_) => write!(f, "ArbiterCommand::Execute"),
}
}
}
/// A handle for sending spawn and stop messages to an [Arbiter].
#[derive(Debug, Clone)]
pub struct ArbiterHandle {
tx: mpsc::UnboundedSender<ArbiterCommand>,
}
impl ArbiterHandle {
pub(crate) fn new(tx: mpsc::UnboundedSender<ArbiterCommand>) -> Self {
Self { tx }
}
/// Send a future to the [Arbiter]'s thread and spawn it.
///
/// If you require a result, include a response channel in the future.
///
/// Returns true if future was sent successfully and false if the [Arbiter] has died.
pub fn spawn<Fut>(&self, future: Fut) -> bool
where
Fut: Future<Output = ()> + Send + 'static,
{
self.tx
.send(ArbiterCommand::Execute(Box::pin(future)))
.is_ok()
}
/// Send a function to the [Arbiter]'s thread and execute it.
///
/// Any result from the function is discarded. If you require a result, include a response
/// channel in the function.
///
/// Returns true if function was sent successfully and false if the [Arbiter] has died.
pub fn spawn_fn<F>(&self, f: F) -> bool
where
F: FnOnce() + Send + 'static,
{
self.spawn(async { f() })
}
/// Instruct [Arbiter] to stop processing it's event loop.
///
/// Returns true if stop message was sent successfully and false if the [Arbiter] has
/// been dropped.
pub fn stop(&self) -> bool {
self.tx.send(ArbiterCommand::Stop).is_ok()
}
}
/// An Arbiter represents a thread that provides an asynchronous execution environment for futures
/// and functions.
///
/// When an arbiter is created, it spawns a new [OS thread](thread), and hosts an event loop.
#[derive(Debug)]
pub struct Arbiter {
tx: mpsc::UnboundedSender<ArbiterCommand>,
thread_handle: thread::JoinHandle<()>,
}
impl Arbiter {
/// Spawn a new Arbiter thread and start its event loop.
///
/// # Panics
/// Panics if a [System] is not registered on the current thread.
#[allow(clippy::new_without_default)]
pub fn new() -> Arbiter {
Self::with_tokio_rt(|| {
default_tokio_runtime().expect("Cannot create new Arbiter's Runtime.")
})
}
/// Spawn a new Arbiter using the [Tokio Runtime](tokio-runtime) returned from a closure.
///
/// [tokio-runtime]: tokio::runtime::Runtime
#[doc(hidden)]
pub fn with_tokio_rt<F>(runtime_factory: F) -> Arbiter
where
F: Fn() -> tokio::runtime::Runtime + Send + 'static,
{
let sys = System::current();
let system_id = sys.id();
let arb_id = COUNT.fetch_add(1, Ordering::Relaxed);
let name = format!("actix-rt|system:{}|arbiter:{}", system_id, arb_id);
let (tx, rx) = mpsc::unbounded_channel();
let (ready_tx, ready_rx) = std::sync::mpsc::channel::<()>();
let thread_handle = thread::Builder::new()
.name(name.clone())
.spawn({
let tx = tx.clone();
move || {
let rt = Runtime::from(runtime_factory());
let hnd = ArbiterHandle::new(tx);
System::set_current(sys);
HANDLE.with(|cell| *cell.borrow_mut() = Some(hnd.clone()));
// register arbiter
let _ = System::current()
.tx()
.send(SystemCommand::RegisterArbiter(arb_id, hnd));
ready_tx.send(()).unwrap();
// run arbiter event processing loop
rt.block_on(ArbiterRunner { rx });
// deregister arbiter
let _ = System::current()
.tx()
.send(SystemCommand::DeregisterArbiter(arb_id));
}
})
.unwrap_or_else(|err| {
panic!("Cannot spawn Arbiter's thread: {:?}. {:?}", &name, err)
});
ready_rx.recv().unwrap();
Arbiter { tx, thread_handle }
}
/// Sets up an Arbiter runner in a new System using the provided runtime local task set.
pub(crate) fn in_new_system(local: &LocalSet) -> ArbiterHandle {
let (tx, rx) = mpsc::unbounded_channel();
let hnd = ArbiterHandle::new(tx);
HANDLE.with(|cell| *cell.borrow_mut() = Some(hnd.clone()));
local.spawn_local(ArbiterRunner { rx });
hnd
}
/// Return a handle to the this Arbiter's message sender.
pub fn handle(&self) -> ArbiterHandle {
ArbiterHandle::new(self.tx.clone())
}
/// Return a handle to the current thread's Arbiter's message sender.
///
/// # Panics
/// Panics if no Arbiter is running on the current thread.
pub fn current() -> ArbiterHandle {
HANDLE.with(|cell| match *cell.borrow() {
Some(ref hnd) => hnd.clone(),
None => panic!("Arbiter is not running."),
})
}
/// Stop Arbiter from continuing it's event loop.
///
/// Returns true if stop message was sent successfully and false if the Arbiter has been dropped.
pub fn stop(&self) -> bool {
self.tx.send(ArbiterCommand::Stop).is_ok()
}
/// Send a future to the Arbiter's thread and spawn it.
///
/// If you require a result, include a response channel in the future.
///
/// Returns true if future was sent successfully and false if the Arbiter has died.
pub fn spawn<Fut>(&self, future: Fut) -> bool
where
Fut: Future<Output = ()> + Send + 'static,
{
self.tx
.send(ArbiterCommand::Execute(Box::pin(future)))
.is_ok()
}
/// Send a function to the Arbiter's thread and execute it.
///
/// Any result from the function is discarded. If you require a result, include a response
/// channel in the function.
///
/// Returns true if function was sent successfully and false if the Arbiter has died.
pub fn spawn_fn<F>(&self, f: F) -> bool
where
F: FnOnce() + Send + 'static,
{
self.spawn(async { f() })
}
/// Wait for Arbiter's event loop to complete.
///
/// Joins the underlying OS thread handle. See [`JoinHandle::join`](thread::JoinHandle::join).
pub fn join(self) -> thread::Result<()> {
self.thread_handle.join()
}
}
/// A persistent future that processes [Arbiter] commands.
struct ArbiterRunner {
rx: mpsc::UnboundedReceiver<ArbiterCommand>,
}
impl Future for ArbiterRunner {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
// process all items currently buffered in channel
loop {
match ready!(Pin::new(&mut self.rx).poll_recv(cx)) {
// channel closed; no more messages can be received
None => return Poll::Ready(()),
// process arbiter command
Some(item) => match item {
ArbiterCommand::Stop => {
return Poll::Ready(());
}
ArbiterCommand::Execute(task_fut) => {
tokio::task::spawn_local(task_fut);
}
},
}
}
}
}

168
actix-rt/src/lib.rs Normal file
View File

@@ -0,0 +1,168 @@
//! Tokio-based single-threaded async runtime for the Actix ecosystem.
//!
//! In most parts of the the Actix ecosystem, it has been chosen to use !Send futures. For this
//! reason, a single-threaded runtime is appropriate since it is guaranteed that futures will not
//! be moved between threads. This can result in small performance improvements over cases where
//! atomics would otherwise be needed.
//!
//! To achieve similar performance to multi-threaded, work-stealing runtimes, applications
//! using `actix-rt` will create multiple, mostly disconnected, single-threaded runtimes.
//! This approach has good performance characteristics for workloads where the majority of tasks
//! have similar runtime expense.
//!
//! The disadvantage is that idle threads will not steal work from very busy, stuck or otherwise
//! backlogged threads. Tasks that are disproportionately expensive should be offloaded to the
//! blocking task thread-pool using [`task::spawn_blocking`].
//!
//! # Examples
//! ```
//! use std::sync::mpsc;
//! use actix_rt::{Arbiter, System};
//!
//! let _ = System::new();
//!
//! let (tx, rx) = mpsc::channel::<u32>();
//!
//! let arbiter = Arbiter::new();
//! arbiter.spawn_fn(move || tx.send(42).unwrap());
//!
//! let num = rx.recv().unwrap();
//! assert_eq!(num, 42);
//!
//! arbiter.stop();
//! arbiter.join().unwrap();
//! ```
#![deny(rust_2018_idioms, nonstandard_style)]
#![allow(clippy::type_complexity)]
#![warn(missing_docs)]
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
use std::future::Future;
use tokio::task::JoinHandle;
// Cannot define a main macro when compiled into test harness.
// Workaround for https://github.com/rust-lang/rust/issues/62127.
#[cfg(all(feature = "macros", not(test)))]
pub use actix_macros::{main, test};
mod arbiter;
mod runtime;
mod system;
pub use self::arbiter::{Arbiter, ArbiterHandle};
pub use self::runtime::Runtime;
pub use self::system::{System, SystemRunner};
pub use tokio::pin;
pub mod signal {
//! Asynchronous signal handling (Tokio re-exports).
#[cfg(unix)]
pub mod unix {
//! Unix specific signals (Tokio re-exports).
pub use tokio::signal::unix::*;
}
pub use tokio::signal::ctrl_c;
}
pub mod net {
//! TCP/UDP/Unix bindings (mostly Tokio re-exports).
use std::{
future::Future,
io,
task::{Context, Poll},
};
pub use tokio::io::Ready;
use tokio::io::{AsyncRead, AsyncWrite, Interest};
pub use tokio::net::UdpSocket;
pub use tokio::net::{TcpListener, TcpSocket, TcpStream};
#[cfg(unix)]
pub use tokio::net::{UnixDatagram, UnixListener, UnixStream};
/// Extension trait over async read+write types that can also signal readiness.
#[doc(hidden)]
pub trait ActixStream: AsyncRead + AsyncWrite + Unpin {
/// Poll stream and check read readiness of Self.
///
/// See [tokio::net::TcpStream::poll_read_ready] for detail on intended use.
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>>;
/// Poll stream and check write readiness of Self.
///
/// See [tokio::net::TcpStream::poll_write_ready] for detail on intended use.
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>>;
}
impl ActixStream for TcpStream {
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
let ready = self.ready(Interest::READABLE);
tokio::pin!(ready);
ready.poll(cx)
}
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
let ready = self.ready(Interest::WRITABLE);
tokio::pin!(ready);
ready.poll(cx)
}
}
#[cfg(unix)]
impl ActixStream for UnixStream {
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
let ready = self.ready(Interest::READABLE);
tokio::pin!(ready);
ready.poll(cx)
}
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
let ready = self.ready(Interest::WRITABLE);
tokio::pin!(ready);
ready.poll(cx)
}
}
impl<Io: ActixStream + ?Sized> ActixStream for Box<Io> {
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
(**self).poll_read_ready(cx)
}
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
(**self).poll_write_ready(cx)
}
}
}
pub mod time {
//! Utilities for tracking time (Tokio re-exports).
pub use tokio::time::Instant;
pub use tokio::time::{interval, interval_at, Interval};
pub use tokio::time::{sleep, sleep_until, Sleep};
pub use tokio::time::{timeout, Timeout};
}
pub mod task {
//! Task management (Tokio re-exports).
pub use tokio::task::{spawn_blocking, yield_now, JoinError, JoinHandle};
}
/// Spawns a future on the current thread.
///
/// # Panics
/// Panics if Actix system is not running.
#[inline]
pub fn spawn<Fut>(f: Fut) -> JoinHandle<()>
where
Fut: Future<Output = ()> + 'static,
{
tokio::task::spawn_local(f)
}

96
actix-rt/src/runtime.rs Normal file
View File

@@ -0,0 +1,96 @@
use std::{future::Future, io};
use tokio::task::{JoinHandle, LocalSet};
/// A Tokio-based runtime proxy.
///
/// All spawned futures will be executed on the current thread. Therefore, there is no `Send` bound
/// on submitted futures.
#[derive(Debug)]
pub struct Runtime {
local: LocalSet,
rt: tokio::runtime::Runtime,
}
pub(crate) fn default_tokio_runtime() -> io::Result<tokio::runtime::Runtime> {
tokio::runtime::Builder::new_current_thread()
.enable_io()
.enable_time()
.build()
}
impl Runtime {
/// Returns a new runtime initialized with default configuration values.
#[allow(clippy::new_ret_no_self)]
pub fn new() -> io::Result<Self> {
let rt = default_tokio_runtime()?;
Ok(Runtime {
rt,
local: LocalSet::new(),
})
}
/// Reference to local task set.
pub(crate) fn local_set(&self) -> &LocalSet {
&self.local
}
/// Offload a future onto the single-threaded runtime.
///
/// The returned join handle can be used to await the future's result.
///
/// See [crate root][crate] documentation for more details.
///
/// # Examples
/// ```
/// let rt = actix_rt::Runtime::new().unwrap();
///
/// // Spawn a future onto the runtime
/// let handle = rt.spawn(async {
/// println!("running on the runtime");
/// 42
/// });
///
/// assert_eq!(rt.block_on(handle).unwrap(), 42);
/// ```
///
/// # Panics
/// This function panics if the spawn fails. Failure occurs if the executor is currently at
/// capacity and is unable to spawn a new future.
pub fn spawn<F>(&self, future: F) -> JoinHandle<F::Output>
where
F: Future + 'static,
{
self.local.spawn_local(future)
}
/// Runs the provided future, blocking the current thread until the future completes.
///
/// This function can be used to synchronously block the current thread until the provided
/// `future` has resolved either successfully or with an error. The result of the future is
/// then returned from this function call.
///
/// Note that this function will also execute any spawned futures on the current thread, but
/// will not block until these other spawned futures have completed. Once the function returns,
/// any uncompleted futures remain pending in the `Runtime` instance. These futures will not run
/// until `block_on` or `run` is called again.
///
/// The caller is responsible for ensuring that other spawned futures complete execution by
/// calling `block_on` or `run`.
pub fn block_on<F>(&self, f: F) -> F::Output
where
F: Future,
{
self.local.block_on(&self.rt, f)
}
}
impl From<tokio::runtime::Runtime> for Runtime {
fn from(rt: tokio::runtime::Runtime) -> Self {
Self {
local: LocalSet::new(),
rt,
}
}
}

255
actix-rt/src/system.rs Normal file
View File

@@ -0,0 +1,255 @@
use std::{
cell::RefCell,
collections::HashMap,
future::Future,
io,
pin::Pin,
sync::atomic::{AtomicUsize, Ordering},
task::{Context, Poll},
};
use futures_core::ready;
use tokio::sync::{mpsc, oneshot};
use crate::{arbiter::ArbiterHandle, runtime::default_tokio_runtime, Arbiter, Runtime};
static SYSTEM_COUNT: AtomicUsize = AtomicUsize::new(0);
thread_local!(
static CURRENT: RefCell<Option<System>> = RefCell::new(None);
);
/// A manager for a per-thread distributed async runtime.
#[derive(Clone, Debug)]
pub struct System {
id: usize,
sys_tx: mpsc::UnboundedSender<SystemCommand>,
/// Handle to the first [Arbiter] that is created with the System.
arbiter_handle: ArbiterHandle,
}
impl System {
/// Create a new system.
///
/// # Panics
/// Panics if underlying Tokio runtime can not be created.
#[allow(clippy::new_ret_no_self)]
pub fn new() -> SystemRunner {
Self::with_tokio_rt(|| {
default_tokio_runtime()
.expect("Default Actix (Tokio) runtime could not be created.")
})
}
/// Create a new System using the [Tokio Runtime](tokio-runtime) returned from a closure.
///
/// [tokio-runtime]: tokio::runtime::Runtime
#[doc(hidden)]
pub fn with_tokio_rt<F>(runtime_factory: F) -> SystemRunner
where
F: Fn() -> tokio::runtime::Runtime,
{
let (stop_tx, stop_rx) = oneshot::channel();
let (sys_tx, sys_rx) = mpsc::unbounded_channel();
let rt = Runtime::from(runtime_factory());
let sys_arbiter = Arbiter::in_new_system(rt.local_set());
let system = System::construct(sys_tx, sys_arbiter.clone());
system
.tx()
.send(SystemCommand::RegisterArbiter(usize::MAX, sys_arbiter))
.unwrap();
// init background system arbiter
let sys_ctrl = SystemController::new(sys_rx, stop_tx);
rt.spawn(sys_ctrl);
SystemRunner {
rt,
stop_rx,
system,
}
}
/// Constructs new system and registers it on the current thread.
pub(crate) fn construct(
sys_tx: mpsc::UnboundedSender<SystemCommand>,
arbiter_handle: ArbiterHandle,
) -> Self {
let sys = System {
sys_tx,
arbiter_handle,
id: SYSTEM_COUNT.fetch_add(1, Ordering::SeqCst),
};
System::set_current(sys.clone());
sys
}
/// Get current running system.
///
/// # Panics
/// Panics if no system is registered on the current thread.
pub fn current() -> System {
CURRENT.with(|cell| match *cell.borrow() {
Some(ref sys) => sys.clone(),
None => panic!("System is not running"),
})
}
/// Try to get current running system.
///
/// Returns `None` if no System has been started.
///
/// Contrary to `current`, this never panics.
pub fn try_current() -> Option<System> {
CURRENT.with(|cell| cell.borrow().clone())
}
/// Get handle to a the System's initial [Arbiter].
pub fn arbiter(&self) -> &ArbiterHandle {
&self.arbiter_handle
}
/// Check if there is a System registered on the current thread.
pub fn is_registered() -> bool {
CURRENT.with(|sys| sys.borrow().is_some())
}
/// Register given system on current thread.
#[doc(hidden)]
pub fn set_current(sys: System) {
CURRENT.with(|cell| {
*cell.borrow_mut() = Some(sys);
})
}
/// Numeric system identifier.
///
/// Useful when using multiple Systems.
pub fn id(&self) -> usize {
self.id
}
/// Stop the system (with code 0).
pub fn stop(&self) {
self.stop_with_code(0)
}
/// Stop the system with a given exit code.
pub fn stop_with_code(&self, code: i32) {
let _ = self.sys_tx.send(SystemCommand::Exit(code));
}
pub(crate) fn tx(&self) -> &mpsc::UnboundedSender<SystemCommand> {
&self.sys_tx
}
}
/// Runner that keeps a [System]'s event loop alive until stop message is received.
#[must_use = "A SystemRunner does nothing unless `run` is called."]
#[derive(Debug)]
pub struct SystemRunner {
rt: Runtime,
stop_rx: oneshot::Receiver<i32>,
system: System,
}
impl SystemRunner {
/// Starts event loop and will return once [System] is [stopped](System::stop).
pub fn run(self) -> io::Result<()> {
let SystemRunner { rt, stop_rx, .. } = self;
// run loop
match rt.block_on(stop_rx) {
Ok(code) => {
if code != 0 {
Err(io::Error::new(
io::ErrorKind::Other,
format!("Non-zero exit code: {}", code),
))
} else {
Ok(())
}
}
Err(e) => Err(io::Error::new(io::ErrorKind::Other, e)),
}
}
/// Runs the provided future, blocking the current thread until the future completes.
#[inline]
pub fn block_on<F: Future>(&self, fut: F) -> F::Output {
self.rt.block_on(fut)
}
}
#[derive(Debug)]
pub(crate) enum SystemCommand {
Exit(i32),
RegisterArbiter(usize, ArbiterHandle),
DeregisterArbiter(usize),
}
/// There is one `SystemController` per [System]. It runs in the background, keeping track of
/// [Arbiter]s and is able to distribute a system-wide stop command.
#[derive(Debug)]
pub(crate) struct SystemController {
stop_tx: Option<oneshot::Sender<i32>>,
cmd_rx: mpsc::UnboundedReceiver<SystemCommand>,
arbiters: HashMap<usize, ArbiterHandle>,
}
impl SystemController {
pub(crate) fn new(
cmd_rx: mpsc::UnboundedReceiver<SystemCommand>,
stop_tx: oneshot::Sender<i32>,
) -> Self {
SystemController {
cmd_rx,
stop_tx: Some(stop_tx),
arbiters: HashMap::with_capacity(4),
}
}
}
impl Future for SystemController {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
// process all items currently buffered in channel
loop {
match ready!(Pin::new(&mut self.cmd_rx).poll_recv(cx)) {
// channel closed; no more messages can be received
None => return Poll::Ready(()),
// process system command
Some(cmd) => match cmd {
SystemCommand::Exit(code) => {
// stop all arbiters
for arb in self.arbiters.values() {
arb.stop();
}
// stop event loop
// will only fire once
if let Some(stop_tx) = self.stop_tx.take() {
let _ = stop_tx.send(code);
}
}
SystemCommand::RegisterArbiter(id, arb) => {
self.arbiters.insert(id, arb);
}
SystemCommand::DeregisterArbiter(id) => {
self.arbiters.remove(&id);
}
},
}
}
}
}

300
actix-rt/tests/tests.rs Normal file
View File

@@ -0,0 +1,300 @@
use std::{
sync::{
atomic::{AtomicBool, Ordering},
mpsc::channel,
Arc,
},
thread,
time::{Duration, Instant},
};
use actix_rt::{Arbiter, System};
use tokio::sync::oneshot;
#[test]
fn await_for_timer() {
let time = Duration::from_secs(1);
let instant = Instant::now();
System::new().block_on(async move {
tokio::time::sleep(time).await;
});
assert!(
instant.elapsed() >= time,
"Block on should poll awaited future to completion"
);
}
#[test]
fn join_another_arbiter() {
let time = Duration::from_secs(1);
let instant = Instant::now();
System::new().block_on(async move {
let arbiter = Arbiter::new();
arbiter.spawn(Box::pin(async move {
tokio::time::sleep(time).await;
Arbiter::current().stop();
}));
arbiter.join().unwrap();
});
assert!(
instant.elapsed() >= time,
"Join on another arbiter should complete only when it calls stop"
);
let instant = Instant::now();
System::new().block_on(async move {
let arbiter = Arbiter::new();
arbiter.spawn_fn(move || {
actix_rt::spawn(async move {
tokio::time::sleep(time).await;
Arbiter::current().stop();
});
});
arbiter.join().unwrap();
});
assert!(
instant.elapsed() >= time,
"Join on an arbiter that has used actix_rt::spawn should wait for said future"
);
let instant = Instant::now();
System::new().block_on(async move {
let arbiter = Arbiter::new();
arbiter.spawn(Box::pin(async move {
tokio::time::sleep(time).await;
Arbiter::current().stop();
}));
arbiter.stop();
arbiter.join().unwrap();
});
assert!(
instant.elapsed() < time,
"Premature stop of arbiter should conclude regardless of it's current state"
);
}
#[test]
fn non_static_block_on() {
let string = String::from("test_str");
let string = string.as_str();
let sys = System::new();
sys.block_on(async {
actix_rt::time::sleep(Duration::from_millis(1)).await;
assert_eq!("test_str", string);
});
let rt = actix_rt::Runtime::new().unwrap();
rt.block_on(async {
actix_rt::time::sleep(Duration::from_millis(1)).await;
assert_eq!("test_str", string);
});
}
#[test]
fn wait_for_spawns() {
let rt = actix_rt::Runtime::new().unwrap();
let handle = rt.spawn(async {
println!("running on the runtime");
// assertion panic is caught at task boundary
assert_eq!(1, 2);
});
assert!(rt.block_on(handle).is_err());
}
#[test]
fn arbiter_spawn_fn_runs() {
let _ = System::new();
let (tx, rx) = channel::<u32>();
let arbiter = Arbiter::new();
arbiter.spawn_fn(move || tx.send(42).unwrap());
let num = rx.recv().unwrap();
assert_eq!(num, 42);
arbiter.stop();
arbiter.join().unwrap();
}
#[test]
fn arbiter_handle_spawn_fn_runs() {
let sys = System::new();
let (tx, rx) = channel::<u32>();
let arbiter = Arbiter::new();
let handle = arbiter.handle();
drop(arbiter);
handle.spawn_fn(move || {
tx.send(42).unwrap();
System::current().stop()
});
let num = rx.recv_timeout(Duration::from_secs(2)).unwrap();
assert_eq!(num, 42);
handle.stop();
sys.run().unwrap();
}
#[test]
fn arbiter_drop_no_panic_fn() {
let _ = System::new();
let arbiter = Arbiter::new();
arbiter.spawn_fn(|| panic!("test"));
arbiter.stop();
arbiter.join().unwrap();
}
#[test]
fn arbiter_drop_no_panic_fut() {
let _ = System::new();
let arbiter = Arbiter::new();
arbiter.spawn(async { panic!("test") });
arbiter.stop();
arbiter.join().unwrap();
}
#[test]
#[should_panic]
fn no_system_current_panic() {
System::current();
}
#[test]
#[should_panic]
fn no_system_arbiter_new_panic() {
Arbiter::new();
}
#[test]
fn system_arbiter_spawn() {
let runner = System::new();
let (tx, rx) = oneshot::channel();
let sys = System::current();
thread::spawn(|| {
// this thread will have no arbiter in it's thread local so call will panic
Arbiter::current();
})
.join()
.unwrap_err();
let thread = thread::spawn(|| {
// this thread will have no arbiter in it's thread local so use the system handle instead
System::set_current(sys);
let sys = System::current();
let arb = sys.arbiter();
arb.spawn(async move {
tx.send(42u32).unwrap();
System::current().stop();
});
});
assert_eq!(runner.block_on(rx).unwrap(), 42);
thread.join().unwrap();
}
#[test]
fn system_stop_stops_arbiters() {
let sys = System::new();
let arb = Arbiter::new();
// arbiter should be alive to receive spawn msg
assert!(Arbiter::current().spawn_fn(|| {}));
assert!(arb.spawn_fn(|| {}));
System::current().stop();
sys.run().unwrap();
// account for slightly slow thread de-spawns (only observed on windows)
thread::sleep(Duration::from_millis(100));
// arbiter should be dead and return false
assert!(!Arbiter::current().spawn_fn(|| {}));
assert!(!arb.spawn_fn(|| {}));
arb.join().unwrap();
}
#[test]
fn new_system_with_tokio() {
let (tx, rx) = channel();
let res = System::with_tokio_rt(move || {
tokio::runtime::Builder::new_multi_thread()
.enable_io()
.enable_time()
.thread_keep_alive(Duration::from_millis(1000))
.worker_threads(2)
.max_blocking_threads(2)
.on_thread_start(|| {})
.on_thread_stop(|| {})
.build()
.unwrap()
})
.block_on(async {
actix_rt::time::sleep(Duration::from_millis(1)).await;
tokio::task::spawn(async move {
tx.send(42).unwrap();
})
.await
.unwrap();
123usize
});
assert_eq!(res, 123);
assert_eq!(rx.recv().unwrap(), 42);
}
#[test]
fn new_arbiter_with_tokio() {
let _ = System::new();
let arb = Arbiter::with_tokio_rt(|| {
tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap()
});
let counter = Arc::new(AtomicBool::new(true));
let counter1 = counter.clone();
let did_spawn = arb.spawn(async move {
actix_rt::time::sleep(Duration::from_millis(1)).await;
counter1.store(false, Ordering::SeqCst);
Arbiter::current().stop();
});
assert!(did_spawn);
arb.join().unwrap();
assert!(!counter.load(Ordering::SeqCst));
}
#[test]
fn try_current_no_system() {
assert!(System::try_current().is_none())
}
#[test]
fn try_current_with_system() {
System::new().block_on(async { assert!(System::try_current().is_some()) });
}

179
actix-server/CHANGES.md Normal file
View File

@@ -0,0 +1,179 @@
# Changes
## Unreleased - 2021-xx-xx
* Remove `config` module. `ServiceConfig`, `ServiceRuntime` public types are removed due to this change. [#349]
* Remove `ServerBuilder::configure` [#349]
[#349]: https://github.com/actix/actix-net/pull/349
## 2.0.0-beta.5 - 2021-04-20
* Server shutdown would notify all workers to exit regardless if shutdown is graceful.
This would make all worker shutdown immediately in force shutdown case. [#333]
[#333]: https://github.com/actix/actix-net/pull/333
## 2.0.0-beta.4 - 2021-04-01
* Prevent panic when `shutdown_timeout` is very large. [f9262db]
[f9262db]: https://github.com/actix/actix-net/commit/f9262db
## 2.0.0-beta.3 - 2021-02-06
* Hidden `ServerBuilder::start` method has been removed. Use `ServerBuilder::run`. [#246]
* Add retry for EINTR signal (`io::Interrupted`) in `Accept`'s poll loop. [#264]
* Add `ServerBuilder::worker_max_blocking_threads` to customize blocking thread pool size. [#265]
* Update `actix-rt` to `2.0.0`. [#273]
[#246]: https://github.com/actix/actix-net/pull/246
[#264]: https://github.com/actix/actix-net/pull/264
[#265]: https://github.com/actix/actix-net/pull/265
[#273]: https://github.com/actix/actix-net/pull/273
## 2.0.0-beta.2 - 2021-01-03
* Merge `actix-testing` to `actix-server` as `test_server` mod. [#242]
[#242]: https://github.com/actix/actix-net/pull/242
## 2.0.0-beta.1 - 2020-12-28
* Added explicit info log message on accept queue pause. [#215]
* Prevent double registration of sockets when back-pressure is resolved. [#223]
* Update `mio` dependency to `0.7.3`. [#239]
* Remove `socket2` dependency. [#239]
* `ServerBuilder::backlog` now accepts `u32` instead of `i32`. [#239]
* Remove `AcceptNotify` type and pass `WakerQueue` to `Worker` to wake up `Accept`'s `Poll`. [#239]
* Convert `mio::net::TcpStream` to `actix_rt::net::TcpStream`(`UnixStream` for uds) using
`FromRawFd` and `IntoRawFd`(`FromRawSocket` and `IntoRawSocket` on windows). [#239]
* Remove `AsyncRead` and `AsyncWrite` trait bound for `socket::FromStream` trait. [#239]
[#215]: https://github.com/actix/actix-net/pull/215
[#223]: https://github.com/actix/actix-net/pull/223
[#239]: https://github.com/actix/actix-net/pull/239
## 1.0.4 - 2020-09-12
* Update actix-codec to 0.3.0.
* Workers must be greater than 0. [#167]
[#167]: https://github.com/actix/actix-net/pull/167
## 1.0.3 - 2020-05-19
* Replace deprecated `net2` crate with `socket2` [#140]
[#140]: https://github.com/actix/actix-net/pull/140
## 1.0.2 - 2020-02-26
* Avoid error by calling `reregister()` on Windows [#103]
[#103]: https://github.com/actix/actix-net/pull/103
## 1.0.1 - 2019-12-29
* Rename `.start()` method to `.run()`
## 1.0.0 - 2019-12-11
* Use actix-net releases
## 1.0.0-alpha.4 - 2019-12-08
* Use actix-service 1.0.0-alpha.4
## 1.0.0-alpha.3 - 2019-12-07
* Migrate to tokio 0.2
* Fix compilation on non-unix platforms
* Better handling server configuration
## 1.0.0-alpha.2 - 2019-12-02
* Simplify server service (remove actix-server-config)
* Allow to wait on `Server` until server stops
## 0.8.0-alpha.1 - 2019-11-22
* Migrate to `std::future`
## 0.7.0 - 2019-10-04
* Update `rustls` to 0.16
* Minimum required Rust version upped to 1.37.0
## 0.6.1 - 2019-09-25
* Add UDS listening support to `ServerBuilder`
## 0.6.0 - 2019-07-18
* Support Unix domain sockets #3
## 0.5.1 - 2019-05-18
* ServerBuilder::shutdown_timeout() accepts u64
## 0.5.0 - 2019-05-12
* Add `Debug` impl for `SslError`
* Derive debug for `Server` and `ServerCommand`
* Upgrade to actix-service 0.4
## 0.4.3 - 2019-04-16
* Re-export `IoStream` trait
* Depend on `ssl` and `rust-tls` features from actix-server-config
## 0.4.2 - 2019-03-30
* Fix SIGINT force shutdown
## 0.4.1 - 2019-03-14
* `SystemRuntime::on_start()` - allow to run future before server service initialization
## 0.4.0 - 2019-03-12
* Use `ServerConfig` for service factory
* Wrap tcp socket to `Io` type
* Upgrade actix-service
## 0.3.1 - 2019-03-04
* Add `ServerBuilder::maxconnrate` sets the maximum per-worker number of concurrent connections
* Add helper ssl error `SslError`
* Rename `StreamServiceFactory` to `ServiceFactory`
* Deprecate `StreamServiceFactory`
## 0.3.0 - 2019-03-02
* Use new `NewService` trait
## 0.2.1 - 2019-02-09
* Drop service response
## 0.2.0 - 2019-02-01
* Migrate to actix-service 0.2
* Updated rustls dependency
## 0.1.3 - 2018-12-21
* Fix max concurrent connections handling
## 0.1.2 - 2018-12-12
* rename ServiceConfig::rt() to ServiceConfig::apply()
* Fix back-pressure for concurrent ssl handshakes
## 0.1.1 - 2018-12-11
* Fix signal handling on windows
## 0.1.0 - 2018-12-09
* Move server to separate crate

40
actix-server/Cargo.toml Executable file
View File

@@ -0,0 +1,40 @@
[package]
name = "actix-server"
version = "2.0.0-beta.5"
authors = [
"Nikolay Kim <fafhrd91@gmail.com>",
"fakeshadow <24548779@qq.com>",
]
description = "General purpose TCP server built for the Actix ecosystem"
keywords = ["network", "framework", "async", "futures"]
repository = "https://github.com/actix/actix-net"
categories = ["network-programming", "asynchronous"]
license = "MIT OR Apache-2.0"
edition = "2018"
[lib]
name = "actix_server"
path = "src/lib.rs"
[features]
default = []
[dependencies]
actix-rt = { version = "2.0.0", default-features = false }
actix-service = "2.0.0"
actix-utils = "3.0.0"
futures-core = { version = "0.3.7", default-features = false, features = ["alloc"] }
log = "0.4"
mio = { version = "0.7.6", features = ["os-poll", "net"] }
num_cpus = "1.13"
tokio = { version = "1.2", features = ["sync"] }
[dev-dependencies]
actix-codec = "0.4.0-beta.1"
actix-rt = "2.0.0"
bytes = "1"
env_logger = "0.8"
futures-util = { version = "0.3.7", default-features = false, features = ["sink"] }
tokio = { version = "1", features = ["io-util"] }

1
actix-server/LICENSE-APACHE Symbolic link
View File

@@ -0,0 +1 @@
../LICENSE-APACHE

1
actix-server/LICENSE-MIT Symbolic link
View File

@@ -0,0 +1 @@
../LICENSE-MIT

View File

@@ -0,0 +1,90 @@
//! Simple composite-service TCP echo server.
//!
//! Using the following command:
//!
//! ```sh
//! nc 127.0.0.1 8080
//! ```
//!
//! Start typing. When you press enter the typed line will be echoed back. The server will log
//! the length of each line it echos and the total size of data sent when the connection is closed.
use std::{
env, io,
sync::{
atomic::{AtomicUsize, Ordering},
Arc,
},
};
use actix_rt::net::TcpStream;
use actix_server::Server;
use actix_service::{fn_service, ServiceFactoryExt as _};
use bytes::BytesMut;
use futures_util::future::ok;
use log::{error, info};
use tokio::io::{AsyncReadExt, AsyncWriteExt};
#[actix_rt::main]
async fn main() -> io::Result<()> {
env::set_var("RUST_LOG", "info");
env_logger::init();
let count = Arc::new(AtomicUsize::new(0));
let addr = ("127.0.0.1", 8080);
info!("starting server on port: {}", &addr.0);
// Bind socket address and start worker(s). By default, the server uses the number of available
// logical CPU cores as the worker count. For this reason, the closure passed to bind needs
// to return a service *factory*; so it can be created once per worker.
Server::build()
.bind("echo", addr, move || {
let count = Arc::clone(&count);
let num2 = Arc::clone(&count);
fn_service(move |mut stream: TcpStream| {
let count = Arc::clone(&count);
async move {
let num = count.fetch_add(1, Ordering::SeqCst);
let num = num + 1;
let mut size = 0;
let mut buf = BytesMut::new();
loop {
match stream.read_buf(&mut buf).await {
// end of stream; bail from loop
Ok(0) => break,
// more bytes to process
Ok(bytes_read) => {
info!("[{}] read {} bytes", num, bytes_read);
stream.write_all(&buf[size..]).await.unwrap();
size += bytes_read;
}
// stream error; bail from loop with error
Err(err) => {
error!("Stream Error: {:?}", err);
return Err(());
}
}
}
// send data down service pipeline
Ok((buf.freeze(), size))
}
})
.map_err(|err| error!("Service Error: {:?}", err))
.and_then(move |(_, size)| {
let num = num2.load(Ordering::SeqCst);
info!("[{}] total bytes read: {}", num, size);
ok(size)
})
})?
.workers(1)
.run()
.await
}

592
actix-server/src/accept.rs Normal file
View File

@@ -0,0 +1,592 @@
use std::time::Duration;
use std::{io, thread};
use actix_rt::{
time::{sleep, Instant},
System,
};
use log::{error, info};
use mio::{Interest, Poll, Token as MioToken};
use crate::server::Server;
use crate::socket::MioListener;
use crate::waker_queue::{WakerInterest, WakerQueue, WAKER_TOKEN};
use crate::worker::{Conn, WorkerHandleAccept};
struct ServerSocketInfo {
token: usize,
lst: MioListener,
/// Timeout is used to mark the deadline when this socket's listener should be registered again
/// after an error.
timeout: Option<Instant>,
}
/// Accept loop would live with `ServerBuilder`.
///
/// It's tasked with construct `Poll` instance and `WakerQueue` which would be distributed to
/// `Accept` and `Worker`.
///
/// It would also listen to `ServerCommand` and push interests to `WakerQueue`.
pub(crate) struct AcceptLoop {
srv: Option<Server>,
poll: Option<Poll>,
waker: WakerQueue,
}
impl AcceptLoop {
pub fn new(srv: Server) -> Self {
let poll = Poll::new().unwrap_or_else(|e| panic!("Can not create `mio::Poll`: {}", e));
let waker = WakerQueue::new(poll.registry())
.unwrap_or_else(|e| panic!("Can not create `mio::Waker`: {}", e));
Self {
srv: Some(srv),
poll: Some(poll),
waker,
}
}
pub(crate) fn waker_owned(&self) -> WakerQueue {
self.waker.clone()
}
pub fn wake(&self, i: WakerInterest) {
self.waker.wake(i);
}
pub(crate) fn start(
&mut self,
socks: Vec<(usize, MioListener)>,
handles: Vec<WorkerHandleAccept>,
) {
let srv = self.srv.take().expect("Can not re-use AcceptInfo");
let poll = self.poll.take().unwrap();
let waker = self.waker.clone();
Accept::start(poll, waker, socks, srv, handles);
}
}
/// poll instance of the server.
struct Accept {
poll: Poll,
waker: WakerQueue,
handles: Vec<WorkerHandleAccept>,
srv: Server,
next: usize,
avail: Availability,
paused: bool,
}
/// Array of u128 with every bit as marker for a worker handle's availability.
struct Availability([u128; 4]);
impl Default for Availability {
fn default() -> Self {
Self([0; 4])
}
}
impl Availability {
/// Check if any worker handle is available
#[inline(always)]
fn available(&self) -> bool {
self.0.iter().any(|a| *a != 0)
}
/// Check if worker handle is available by index
#[inline(always)]
fn get_available(&self, idx: usize) -> bool {
let (offset, idx) = Self::offset(idx);
self.0[offset] & (1 << idx as u128) != 0
}
/// Set worker handle available state by index.
fn set_available(&mut self, idx: usize, avail: bool) {
let (offset, idx) = Self::offset(idx);
let off = 1 << idx as u128;
if avail {
self.0[offset] |= off;
} else {
self.0[offset] &= !off
}
}
/// Set all worker handle to available state.
/// This would result in a re-check on all workers' availability.
fn set_available_all(&mut self, handles: &[WorkerHandleAccept]) {
handles.iter().for_each(|handle| {
self.set_available(handle.idx(), true);
})
}
/// Get offset and adjusted index of given worker handle index.
fn offset(idx: usize) -> (usize, usize) {
if idx < 128 {
(0, idx)
} else if idx < 128 * 2 {
(1, idx - 128)
} else if idx < 128 * 3 {
(2, idx - 128 * 2)
} else if idx < 128 * 4 {
(3, idx - 128 * 3)
} else {
panic!("Max WorkerHandle count is 512")
}
}
}
/// This function defines errors that are per-connection. Which basically
/// means that if we get this error from `accept()` system call it means
/// next connection might be ready to be accepted.
///
/// All other errors will incur a timeout before next `accept()` is performed.
/// The timeout is useful to handle resource exhaustion errors like ENFILE
/// and EMFILE. Otherwise, could enter into tight loop.
fn connection_error(e: &io::Error) -> bool {
e.kind() == io::ErrorKind::ConnectionRefused
|| e.kind() == io::ErrorKind::ConnectionAborted
|| e.kind() == io::ErrorKind::ConnectionReset
}
impl Accept {
pub(crate) fn start(
poll: Poll,
waker: WakerQueue,
socks: Vec<(usize, MioListener)>,
srv: Server,
handles: Vec<WorkerHandleAccept>,
) {
// Accept runs in its own thread and would want to spawn additional futures to current
// actix system.
let sys = System::current();
thread::Builder::new()
.name("actix-server accept loop".to_owned())
.spawn(move || {
System::set_current(sys);
let (mut accept, mut sockets) =
Accept::new_with_sockets(poll, waker, socks, handles, srv);
accept.poll_with(&mut sockets);
})
.unwrap();
}
fn new_with_sockets(
poll: Poll,
waker: WakerQueue,
socks: Vec<(usize, MioListener)>,
handles: Vec<WorkerHandleAccept>,
srv: Server,
) -> (Accept, Vec<ServerSocketInfo>) {
let sockets = socks
.into_iter()
.map(|(token, mut lst)| {
// Start listening for incoming connections
poll.registry()
.register(&mut lst, MioToken(token), Interest::READABLE)
.unwrap_or_else(|e| panic!("Can not register io: {}", e));
ServerSocketInfo {
token,
lst,
timeout: None,
}
})
.collect();
let mut avail = Availability::default();
// Assume all handles are avail at construct time.
avail.set_available_all(&handles);
let accept = Accept {
poll,
waker,
handles,
srv,
next: 0,
avail,
paused: false,
};
(accept, sockets)
}
fn poll_with(&mut self, sockets: &mut [ServerSocketInfo]) {
let mut events = mio::Events::with_capacity(128);
loop {
if let Err(e) = self.poll.poll(&mut events, None) {
match e.kind() {
io::ErrorKind::Interrupted => {}
_ => panic!("Poll error: {}", e),
}
}
for event in events.iter() {
let token = event.token();
match token {
WAKER_TOKEN => {
let exit = self.handle_waker(sockets);
if exit {
info!("Accept is stopped.");
return;
}
}
_ => {
let token = usize::from(token);
self.accept(sockets, token);
}
}
}
}
}
fn handle_waker(&mut self, sockets: &mut [ServerSocketInfo]) -> bool {
// This is a loop because interests for command from previous version was
// a loop that would try to drain the command channel. It's yet unknown
// if it's necessary/good practice to actively drain the waker queue.
loop {
// take guard with every iteration so no new interest can be added
// until the current task is done.
let mut guard = self.waker.guard();
match guard.pop_front() {
// worker notify it becomes available.
Some(WakerInterest::WorkerAvailable(idx)) => {
drop(guard);
self.avail.set_available(idx, true);
if !self.paused {
self.accept_all(sockets);
}
}
// a new worker thread is made and it's handle would be added to Accept
Some(WakerInterest::Worker(handle)) => {
drop(guard);
self.avail.set_available(handle.idx(), true);
self.handles.push(handle);
if !self.paused {
self.accept_all(sockets);
}
}
// got timer interest and it's time to try register socket(s) again
Some(WakerInterest::Timer) => {
drop(guard);
self.process_timer(sockets)
}
Some(WakerInterest::Pause) => {
drop(guard);
if !self.paused {
self.paused = true;
self.deregister_all(sockets);
}
}
Some(WakerInterest::Resume) => {
drop(guard);
if self.paused {
self.paused = false;
sockets.iter_mut().for_each(|info| {
self.register_logged(info);
});
self.accept_all(sockets);
}
}
Some(WakerInterest::Stop) => {
if !self.paused {
self.deregister_all(sockets);
}
return true;
}
// waker queue is drained
None => {
// Reset the WakerQueue before break so it does not grow infinitely
WakerQueue::reset(&mut guard);
return false;
}
}
}
}
fn process_timer(&self, sockets: &mut [ServerSocketInfo]) {
let now = Instant::now();
sockets
.iter_mut()
// Only sockets that had an associated timeout were deregistered.
.filter(|info| info.timeout.is_some())
.for_each(|info| {
let inst = info.timeout.take().unwrap();
if now < inst {
info.timeout = Some(inst);
} else if !self.paused {
self.register_logged(info);
}
// Drop the timeout if server is paused and socket timeout is expired.
// When server recovers from pause it will register all sockets without
// a timeout value so this socket register will be delayed till then.
});
}
#[cfg(not(target_os = "windows"))]
fn register(&self, info: &mut ServerSocketInfo) -> io::Result<()> {
let token = MioToken(info.token);
self.poll
.registry()
.register(&mut info.lst, token, Interest::READABLE)
}
#[cfg(target_os = "windows")]
fn register(&self, info: &mut ServerSocketInfo) -> io::Result<()> {
// On windows, calling register without deregister cause an error.
// See https://github.com/actix/actix-web/issues/905
// Calling reregister seems to fix the issue.
let token = MioToken(info.token);
self.poll
.registry()
.register(&mut info.lst, token, Interest::READABLE)
.or_else(|_| {
self.poll
.registry()
.reregister(&mut info.lst, token, Interest::READABLE)
})
}
fn register_logged(&self, info: &mut ServerSocketInfo) {
match self.register(info) {
Ok(_) => info!("Resume accepting connections on {}", info.lst.local_addr()),
Err(e) => error!("Can not register server socket {}", e),
}
}
fn deregister_logged(&self, info: &mut ServerSocketInfo) {
match self.poll.registry().deregister(&mut info.lst) {
Ok(_) => info!("Paused accepting connections on {}", info.lst.local_addr()),
Err(e) => {
error!("Can not deregister server socket {}", e)
}
}
}
fn deregister_all(&self, sockets: &mut [ServerSocketInfo]) {
// This is a best effort implementation with following limitation:
//
// Every ServerSocketInfo with associate timeout will be skipped and it's timeout
// is removed in the process.
//
// Therefore WakerInterest::Pause followed by WakerInterest::Resume in a very short
// gap (less than 500ms) would cause all timing out ServerSocketInfos be reregistered
// before expected timing.
sockets
.iter_mut()
// Take all timeout.
// This is to prevent Accept::process_timer method re-register a socket afterwards.
.map(|info| (info.timeout.take(), info))
// Socket info with a timeout is already deregistered so skip them.
.filter(|(timeout, _)| timeout.is_none())
.for_each(|(_, info)| self.deregister_logged(info));
}
// Send connection to worker and handle error.
fn send_connection(&mut self, conn: Conn) -> Result<(), Conn> {
let next = self.next();
match next.send(conn) {
Ok(_) => {
// Increment counter of WorkerHandle.
// Set worker to unavailable with it hit max (Return false).
if !next.inc_counter() {
let idx = next.idx();
self.avail.set_available(idx, false);
}
self.set_next();
Ok(())
}
Err(conn) => {
// Worker thread is error and could be gone.
// Remove worker handle and notify `ServerBuilder`.
self.remove_next();
if self.handles.is_empty() {
error!("No workers");
// All workers are gone and Conn is nowhere to be sent.
// Treat this situation as Ok and drop Conn.
return Ok(());
} else if self.handles.len() <= self.next {
self.next = 0;
}
Err(conn)
}
}
}
fn accept_one(&mut self, mut conn: Conn) {
loop {
let next = self.next();
let idx = next.idx();
if self.avail.get_available(idx) {
match self.send_connection(conn) {
Ok(_) => return,
Err(c) => conn = c,
}
} else {
self.avail.set_available(idx, false);
self.set_next();
if !self.avail.available() {
while let Err(c) = self.send_connection(conn) {
conn = c;
}
return;
}
}
}
}
fn accept(&mut self, sockets: &mut [ServerSocketInfo], token: usize) {
while self.avail.available() {
let info = &mut sockets[token];
match info.lst.accept() {
Ok(io) => {
let conn = Conn { io, token };
self.accept_one(conn);
}
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => return,
Err(ref e) if connection_error(e) => continue,
Err(e) => {
error!("Error accepting connection: {}", e);
// deregister listener temporary
self.deregister_logged(info);
// sleep after error. write the timeout to socket info as later
// the poll would need it mark which socket and when it's
// listener should be registered
info.timeout = Some(Instant::now() + Duration::from_millis(500));
// after the sleep a Timer interest is sent to Accept Poll
let waker = self.waker.clone();
System::current().arbiter().spawn(async move {
sleep(Duration::from_millis(510)).await;
waker.wake(WakerInterest::Timer);
});
return;
}
};
}
}
fn accept_all(&mut self, sockets: &mut [ServerSocketInfo]) {
sockets
.iter_mut()
.map(|info| info.token)
.collect::<Vec<_>>()
.into_iter()
.for_each(|idx| self.accept(sockets, idx))
}
#[inline(always)]
fn next(&self) -> &WorkerHandleAccept {
&self.handles[self.next]
}
/// Set next worker handle that would accept connection.
#[inline(always)]
fn set_next(&mut self) {
self.next = (self.next + 1) % self.handles.len();
}
/// Remove next worker handle that fail to accept connection.
fn remove_next(&mut self) {
let handle = self.handles.swap_remove(self.next);
let idx = handle.idx();
// A message is sent to `ServerBuilder` future to notify it a new worker
// should be made.
self.srv.worker_faulted(idx);
self.avail.set_available(idx, false);
}
}
#[cfg(test)]
mod test {
use super::Availability;
fn single(aval: &mut Availability, idx: usize) {
aval.set_available(idx, true);
assert!(aval.available());
aval.set_available(idx, true);
aval.set_available(idx, false);
assert!(!aval.available());
aval.set_available(idx, false);
assert!(!aval.available());
}
fn multi(aval: &mut Availability, mut idx: Vec<usize>) {
idx.iter().for_each(|idx| aval.set_available(*idx, true));
assert!(aval.available());
while let Some(idx) = idx.pop() {
assert!(aval.available());
aval.set_available(idx, false);
}
assert!(!aval.available());
}
#[test]
fn availability() {
let mut aval = Availability::default();
single(&mut aval, 1);
single(&mut aval, 128);
single(&mut aval, 256);
single(&mut aval, 511);
let idx = (0..511).filter(|i| i % 3 == 0 && i % 5 == 0).collect();
multi(&mut aval, idx);
multi(&mut aval, (0..511).collect())
}
#[test]
#[should_panic]
fn overflow() {
let mut aval = Availability::default();
single(&mut aval, 512);
}
#[test]
fn pin_point() {
let mut aval = Availability::default();
aval.set_available(438, true);
aval.set_available(479, true);
assert_eq!(aval.0[3], 1 << (438 - 384) | 1 << (479 - 384));
}
}

472
actix-server/src/builder.rs Normal file
View File

@@ -0,0 +1,472 @@
use std::{
future::Future,
io, mem,
pin::Pin,
task::{Context, Poll},
time::Duration,
};
use actix_rt::{self as rt, net::TcpStream, time::sleep, System};
use log::{error, info};
use tokio::sync::{
mpsc::{unbounded_channel, UnboundedReceiver},
oneshot,
};
use crate::accept::AcceptLoop;
use crate::join_all;
use crate::server::{Server, ServerCommand};
use crate::service::{InternalServiceFactory, ServiceFactory, StreamNewService};
use crate::signals::{Signal, Signals};
use crate::socket::{MioListener, StdSocketAddr, StdTcpListener, ToSocketAddrs};
use crate::socket::{MioTcpListener, MioTcpSocket};
use crate::waker_queue::{WakerInterest, WakerQueue};
use crate::worker::{ServerWorker, ServerWorkerConfig, WorkerHandleAccept, WorkerHandleServer};
/// Server builder
pub struct ServerBuilder {
threads: usize,
token: usize,
backlog: u32,
handles: Vec<(usize, WorkerHandleServer)>,
services: Vec<Box<dyn InternalServiceFactory>>,
sockets: Vec<(usize, String, MioListener)>,
accept: AcceptLoop,
exit: bool,
no_signals: bool,
cmd: UnboundedReceiver<ServerCommand>,
server: Server,
notify: Vec<oneshot::Sender<()>>,
worker_config: ServerWorkerConfig,
}
impl Default for ServerBuilder {
fn default() -> Self {
Self::new()
}
}
impl ServerBuilder {
/// Create new Server builder instance
pub fn new() -> ServerBuilder {
let (tx, rx) = unbounded_channel();
let server = Server::new(tx);
ServerBuilder {
threads: num_cpus::get(),
token: 0,
handles: Vec::new(),
services: Vec::new(),
sockets: Vec::new(),
accept: AcceptLoop::new(server.clone()),
backlog: 2048,
exit: false,
no_signals: false,
cmd: rx,
notify: Vec::new(),
server,
worker_config: ServerWorkerConfig::default(),
}
}
/// Set number of workers to start.
///
/// By default server uses number of available logical cpu as workers
/// count. Workers must be greater than 0.
pub fn workers(mut self, num: usize) -> Self {
assert_ne!(num, 0, "workers must be greater than 0");
self.threads = num;
self
}
/// Set max number of threads for each worker's blocking task thread pool.
///
/// One thread pool is set up **per worker**; not shared across workers.
///
/// # Examples:
/// ```
/// # use actix_server::ServerBuilder;
/// let builder = ServerBuilder::new()
/// .workers(4) // server has 4 worker thread.
/// .worker_max_blocking_threads(4); // every worker has 4 max blocking threads.
/// ```
///
/// See [tokio::runtime::Builder::max_blocking_threads] for behavior reference.
pub fn worker_max_blocking_threads(mut self, num: usize) -> Self {
self.worker_config.max_blocking_threads(num);
self
}
/// Set the maximum number of pending connections.
///
/// This refers to the number of clients that can be waiting to be served.
/// Exceeding this number results in the client getting an error when
/// attempting to connect. It should only affect servers under significant
/// load.
///
/// Generally set in the 64-2048 range. Default value is 2048.
///
/// This method should be called before `bind()` method call.
pub fn backlog(mut self, num: u32) -> Self {
self.backlog = num;
self
}
/// Sets the maximum per-worker number of concurrent connections.
///
/// All socket listeners will stop accepting connections when this limit is
/// reached for each worker.
///
/// By default max connections is set to a 25k per worker.
pub fn maxconn(mut self, num: usize) -> Self {
self.worker_config.max_concurrent_connections(num);
self
}
/// Stop Actix system.
pub fn system_exit(mut self) -> Self {
self.exit = true;
self
}
/// Disable signal handling.
pub fn disable_signals(mut self) -> Self {
self.no_signals = true;
self
}
/// Timeout for graceful workers shutdown in seconds.
///
/// After receiving a stop signal, workers have this much time to finish serving requests.
/// Workers still alive after the timeout are force dropped.
///
/// By default shutdown timeout sets to 30 seconds.
pub fn shutdown_timeout(mut self, sec: u64) -> Self {
self.worker_config
.shutdown_timeout(Duration::from_secs(sec));
self
}
/// Add new service to the server.
pub fn bind<F, U, N: AsRef<str>>(mut self, name: N, addr: U, factory: F) -> io::Result<Self>
where
F: ServiceFactory<TcpStream>,
U: ToSocketAddrs,
{
let sockets = bind_addr(addr, self.backlog)?;
for lst in sockets {
let token = self.next_token();
self.services.push(StreamNewService::create(
name.as_ref().to_string(),
token,
factory.clone(),
lst.local_addr()?,
));
self.sockets
.push((token, name.as_ref().to_string(), MioListener::Tcp(lst)));
}
Ok(self)
}
/// Add new unix domain service to the server.
#[cfg(unix)]
pub fn bind_uds<F, U, N>(self, name: N, addr: U, factory: F) -> io::Result<Self>
where
F: ServiceFactory<actix_rt::net::UnixStream>,
N: AsRef<str>,
U: AsRef<std::path::Path>,
{
// The path must not exist when we try to bind.
// Try to remove it to avoid bind error.
if let Err(e) = std::fs::remove_file(addr.as_ref()) {
// NotFound is expected and not an issue. Anything else is.
if e.kind() != std::io::ErrorKind::NotFound {
return Err(e);
}
}
let lst = crate::socket::StdUnixListener::bind(addr)?;
self.listen_uds(name, lst, factory)
}
/// Add new unix domain service to the server.
/// Useful when running as a systemd service and
/// a socket FD can be acquired using the systemd crate.
#[cfg(unix)]
pub fn listen_uds<F, N: AsRef<str>>(
mut self,
name: N,
lst: crate::socket::StdUnixListener,
factory: F,
) -> io::Result<Self>
where
F: ServiceFactory<actix_rt::net::UnixStream>,
{
use std::net::{IpAddr, Ipv4Addr};
lst.set_nonblocking(true)?;
let token = self.next_token();
let addr = StdSocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
self.services.push(StreamNewService::create(
name.as_ref().to_string(),
token,
factory,
addr,
));
self.sockets
.push((token, name.as_ref().to_string(), MioListener::from(lst)));
Ok(self)
}
/// Add new service to the server.
pub fn listen<F, N: AsRef<str>>(
mut self,
name: N,
lst: StdTcpListener,
factory: F,
) -> io::Result<Self>
where
F: ServiceFactory<TcpStream>,
{
lst.set_nonblocking(true)?;
let addr = lst.local_addr()?;
let token = self.next_token();
self.services.push(StreamNewService::create(
name.as_ref().to_string(),
token,
factory,
addr,
));
self.sockets
.push((token, name.as_ref().to_string(), MioListener::from(lst)));
Ok(self)
}
/// Starts processing incoming connections and return server controller.
pub fn run(mut self) -> Server {
if self.sockets.is_empty() {
panic!("Server should have at least one bound socket");
} else {
info!("Starting {} workers", self.threads);
// start workers
let handles = (0..self.threads)
.map(|idx| {
let (handle_accept, handle_server) =
self.start_worker(idx, self.accept.waker_owned());
self.handles.push((idx, handle_server));
handle_accept
})
.collect();
// start accept thread
for sock in &self.sockets {
info!("Starting \"{}\" service on {}", sock.1, sock.2);
}
self.accept.start(
mem::take(&mut self.sockets)
.into_iter()
.map(|t| (t.0, t.2))
.collect(),
handles,
);
// handle signals
if !self.no_signals {
Signals::start(self.server.clone());
}
// start http server actor
let server = self.server.clone();
rt::spawn(self);
server
}
}
fn start_worker(
&self,
idx: usize,
waker_queue: WakerQueue,
) -> (WorkerHandleAccept, WorkerHandleServer) {
let services = self.services.iter().map(|v| v.clone_factory()).collect();
ServerWorker::start(idx, services, waker_queue, self.worker_config)
}
fn handle_cmd(&mut self, item: ServerCommand) {
match item {
ServerCommand::Pause(tx) => {
self.accept.wake(WakerInterest::Pause);
let _ = tx.send(());
}
ServerCommand::Resume(tx) => {
self.accept.wake(WakerInterest::Resume);
let _ = tx.send(());
}
ServerCommand::Signal(sig) => {
// Signals support
// Handle `SIGINT`, `SIGTERM`, `SIGQUIT` signals and stop actix system
match sig {
Signal::Int => {
info!("SIGINT received, exiting");
self.exit = true;
self.handle_cmd(ServerCommand::Stop {
graceful: false,
completion: None,
})
}
Signal::Term => {
info!("SIGTERM received, stopping");
self.exit = true;
self.handle_cmd(ServerCommand::Stop {
graceful: true,
completion: None,
})
}
Signal::Quit => {
info!("SIGQUIT received, exiting");
self.exit = true;
self.handle_cmd(ServerCommand::Stop {
graceful: false,
completion: None,
})
}
_ => (),
}
}
ServerCommand::Notify(tx) => {
self.notify.push(tx);
}
ServerCommand::Stop {
graceful,
completion,
} => {
let exit = self.exit;
// stop accept thread
self.accept.wake(WakerInterest::Stop);
let notify = std::mem::take(&mut self.notify);
// stop workers
let stop = self
.handles
.iter()
.map(move |worker| worker.1.stop(graceful))
.collect();
rt::spawn(async move {
if graceful {
let _ = join_all(stop).await;
}
if let Some(tx) = completion {
let _ = tx.send(());
}
for tx in notify {
let _ = tx.send(());
}
if exit {
sleep(Duration::from_millis(300)).await;
System::current().stop();
}
});
}
ServerCommand::WorkerFaulted(idx) => {
let mut found = false;
for i in 0..self.handles.len() {
if self.handles[i].0 == idx {
self.handles.swap_remove(i);
found = true;
break;
}
}
if found {
error!("Worker has died {:?}, restarting", idx);
let mut new_idx = self.handles.len();
'found: loop {
for i in 0..self.handles.len() {
if self.handles[i].0 == new_idx {
new_idx += 1;
continue 'found;
}
}
break;
}
let (handle_accept, handle_server) =
self.start_worker(new_idx, self.accept.waker_owned());
self.handles.push((new_idx, handle_server));
self.accept.wake(WakerInterest::Worker(handle_accept));
}
}
}
}
fn next_token(&mut self) -> usize {
let token = self.token;
self.token += 1;
token
}
}
impl Future for ServerBuilder {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
loop {
match Pin::new(&mut self.cmd).poll_recv(cx) {
Poll::Ready(Some(it)) => self.as_mut().get_mut().handle_cmd(it),
_ => return Poll::Pending,
}
}
}
}
pub(super) fn bind_addr<S: ToSocketAddrs>(
addr: S,
backlog: u32,
) -> io::Result<Vec<MioTcpListener>> {
let mut err = None;
let mut succ = false;
let mut sockets = Vec::new();
for addr in addr.to_socket_addrs()? {
match create_tcp_listener(addr, backlog) {
Ok(lst) => {
succ = true;
sockets.push(lst);
}
Err(e) => err = Some(e),
}
}
if !succ {
if let Some(e) = err.take() {
Err(e)
} else {
Err(io::Error::new(
io::ErrorKind::Other,
"Can not bind to address.",
))
}
} else {
Ok(sockets)
}
}
fn create_tcp_listener(addr: StdSocketAddr, backlog: u32) -> io::Result<MioTcpListener> {
let socket = match addr {
StdSocketAddr::V4(_) => MioTcpSocket::new_v4()?,
StdSocketAddr::V6(_) => MioTcpSocket::new_v6()?,
};
socket.set_reuseaddr(true)?;
socket.bind(addr)?;
socket.listen(backlog)
}

103
actix-server/src/lib.rs Normal file
View File

@@ -0,0 +1,103 @@
//! General purpose TCP server.
#![deny(rust_2018_idioms, nonstandard_style)]
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
mod accept;
mod builder;
mod server;
mod service;
mod signals;
mod socket;
mod test_server;
mod waker_queue;
mod worker;
pub use self::builder::ServerBuilder;
pub use self::server::Server;
pub use self::service::ServiceFactory;
pub use self::test_server::TestServer;
#[doc(hidden)]
pub use self::socket::FromStream;
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
/// Start server building process
pub fn new() -> ServerBuilder {
ServerBuilder::default()
}
// a poor man's join future. joined future is only used when starting/stopping the server.
// pin_project and pinned futures are overkill for this task.
pub(crate) struct JoinAll<T> {
fut: Vec<JoinFuture<T>>,
}
pub(crate) fn join_all<T>(fut: Vec<impl Future<Output = T> + 'static>) -> JoinAll<T> {
let fut = fut
.into_iter()
.map(|f| JoinFuture::Future(Box::pin(f)))
.collect();
JoinAll { fut }
}
enum JoinFuture<T> {
Future(Pin<Box<dyn Future<Output = T>>>),
Result(Option<T>),
}
impl<T> Unpin for JoinAll<T> {}
impl<T> Future for JoinAll<T> {
type Output = Vec<T>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut ready = true;
let this = self.get_mut();
for fut in this.fut.iter_mut() {
if let JoinFuture::Future(f) = fut {
match f.as_mut().poll(cx) {
Poll::Ready(t) => {
*fut = JoinFuture::Result(Some(t));
}
Poll::Pending => ready = false,
}
}
}
if ready {
let mut res = Vec::new();
for fut in this.fut.iter_mut() {
if let JoinFuture::Result(f) = fut {
res.push(f.take().unwrap());
}
}
Poll::Ready(res)
} else {
Poll::Pending
}
}
}
#[cfg(test)]
mod test {
use super::*;
use actix_utils::future::ready;
#[actix_rt::test]
async fn test_join_all() {
let futs = vec![ready(Ok(1)), ready(Err(3)), ready(Ok(9))];
let mut res = join_all(futs).await.into_iter();
assert_eq!(Ok(1), res.next().unwrap());
assert_eq!(Err(3), res.next().unwrap());
assert_eq!(Ok(9), res.next().unwrap());
}
}

112
actix-server/src/server.rs Normal file
View File

@@ -0,0 +1,112 @@
use std::future::Future;
use std::io;
use std::pin::Pin;
use std::task::{Context, Poll};
use tokio::sync::mpsc::UnboundedSender;
use tokio::sync::oneshot;
use crate::builder::ServerBuilder;
use crate::signals::Signal;
#[derive(Debug)]
pub(crate) enum ServerCommand {
WorkerFaulted(usize),
Pause(oneshot::Sender<()>),
Resume(oneshot::Sender<()>),
Signal(Signal),
/// Whether to try and shut down gracefully
Stop {
graceful: bool,
completion: Option<oneshot::Sender<()>>,
},
/// Notify of server stop
Notify(oneshot::Sender<()>),
}
#[derive(Debug)]
pub struct Server(
UnboundedSender<ServerCommand>,
Option<oneshot::Receiver<()>>,
);
impl Server {
pub(crate) fn new(tx: UnboundedSender<ServerCommand>) -> Self {
Server(tx, None)
}
/// Start server building process
pub fn build() -> ServerBuilder {
ServerBuilder::default()
}
pub(crate) fn signal(&self, sig: Signal) {
let _ = self.0.send(ServerCommand::Signal(sig));
}
pub(crate) fn worker_faulted(&self, idx: usize) {
let _ = self.0.send(ServerCommand::WorkerFaulted(idx));
}
/// Pause accepting incoming connections
///
/// If socket contains some pending connection, they might be dropped.
/// All opened connection remains active.
pub fn pause(&self) -> impl Future<Output = ()> {
let (tx, rx) = oneshot::channel();
let _ = self.0.send(ServerCommand::Pause(tx));
async {
let _ = rx.await;
}
}
/// Resume accepting incoming connections
pub fn resume(&self) -> impl Future<Output = ()> {
let (tx, rx) = oneshot::channel();
let _ = self.0.send(ServerCommand::Resume(tx));
async {
let _ = rx.await;
}
}
/// Stop incoming connection processing, stop all workers and exit.
///
/// If server starts with `spawn()` method, then spawned thread get terminated.
pub fn stop(&self, graceful: bool) -> impl Future<Output = ()> {
let (tx, rx) = oneshot::channel();
let _ = self.0.send(ServerCommand::Stop {
graceful,
completion: Some(tx),
});
async {
let _ = rx.await;
}
}
}
impl Clone for Server {
fn clone(&self) -> Self {
Self(self.0.clone(), None)
}
}
impl Future for Server {
type Output = io::Result<()>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.get_mut();
if this.1.is_none() {
let (tx, rx) = oneshot::channel();
if this.0.send(ServerCommand::Notify(tx)).is_err() {
return Poll::Ready(Ok(()));
}
this.1 = Some(rx);
}
match Pin::new(this.1.as_mut().unwrap()).poll(cx) {
Poll::Pending => Poll::Pending,
Poll::Ready(_) => Poll::Ready(Ok(())),
}
}
}

157
actix-server/src/service.rs Normal file
View File

@@ -0,0 +1,157 @@
use std::marker::PhantomData;
use std::net::SocketAddr;
use std::task::{Context, Poll};
use actix_service::{Service, ServiceFactory as BaseServiceFactory};
use actix_utils::future::{ready, Ready};
use futures_core::future::LocalBoxFuture;
use log::error;
use crate::socket::{FromStream, MioStream};
use crate::worker::WorkerCounterGuard;
pub trait ServiceFactory<Stream: FromStream>: Send + Clone + 'static {
type Factory: BaseServiceFactory<Stream, Config = ()>;
fn create(&self) -> Self::Factory;
}
pub(crate) trait InternalServiceFactory: Send {
fn name(&self, token: usize) -> &str;
fn clone_factory(&self) -> Box<dyn InternalServiceFactory>;
fn create(&self) -> LocalBoxFuture<'static, Result<(usize, BoxedServerService), ()>>;
}
pub(crate) type BoxedServerService = Box<
dyn Service<
(WorkerCounterGuard, MioStream),
Response = (),
Error = (),
Future = Ready<Result<(), ()>>,
>,
>;
pub(crate) struct StreamService<S, I> {
service: S,
_phantom: PhantomData<I>,
}
impl<S, I> StreamService<S, I> {
pub(crate) fn new(service: S) -> Self {
StreamService {
service,
_phantom: PhantomData,
}
}
}
impl<S, I> Service<(WorkerCounterGuard, MioStream)> for StreamService<S, I>
where
S: Service<I>,
S::Future: 'static,
S::Error: 'static,
I: FromStream,
{
type Response = ();
type Error = ();
type Future = Ready<Result<(), ()>>;
fn poll_ready(&self, ctx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.service.poll_ready(ctx).map_err(|_| ())
}
fn call(&self, (guard, req): (WorkerCounterGuard, MioStream)) -> Self::Future {
ready(match FromStream::from_mio(req) {
Ok(stream) => {
let f = self.service.call(stream);
actix_rt::spawn(async move {
let _ = f.await;
drop(guard);
});
Ok(())
}
Err(e) => {
error!("Can not convert to an async tcp stream: {}", e);
Err(())
}
})
}
}
pub(crate) struct StreamNewService<F: ServiceFactory<Io>, Io: FromStream> {
name: String,
inner: F,
token: usize,
addr: SocketAddr,
_t: PhantomData<Io>,
}
impl<F, Io> StreamNewService<F, Io>
where
F: ServiceFactory<Io>,
Io: FromStream + Send + 'static,
{
pub(crate) fn create(
name: String,
token: usize,
inner: F,
addr: SocketAddr,
) -> Box<dyn InternalServiceFactory> {
Box::new(Self {
name,
token,
inner,
addr,
_t: PhantomData,
})
}
}
impl<F, Io> InternalServiceFactory for StreamNewService<F, Io>
where
F: ServiceFactory<Io>,
Io: FromStream + Send + 'static,
{
fn name(&self, _: usize) -> &str {
&self.name
}
fn clone_factory(&self) -> Box<dyn InternalServiceFactory> {
Box::new(Self {
name: self.name.clone(),
inner: self.inner.clone(),
token: self.token,
addr: self.addr,
_t: PhantomData,
})
}
fn create(&self) -> LocalBoxFuture<'static, Result<(usize, BoxedServerService), ()>> {
let token = self.token;
let fut = self.inner.create().new_service(());
Box::pin(async move {
match fut.await {
Ok(inner) => {
let service = Box::new(StreamService::new(inner)) as _;
Ok((token, service))
}
Err(_) => Err(()),
}
})
}
}
impl<F, T, I> ServiceFactory<I> for F
where
F: Fn() -> T + Send + Clone + 'static,
T: BaseServiceFactory<I, Config = ()>,
I: FromStream,
{
type Factory = T;
fn create(&self) -> T {
(self)()
}
}

View File

@@ -0,0 +1,94 @@
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use crate::server::Server;
/// Different types of process signals
#[allow(dead_code)]
#[derive(PartialEq, Clone, Copy, Debug)]
pub(crate) enum Signal {
/// SIGHUP
Hup,
/// SIGINT
Int,
/// SIGTERM
Term,
/// SIGQUIT
Quit,
}
pub(crate) struct Signals {
srv: Server,
#[cfg(not(unix))]
signals: futures_core::future::LocalBoxFuture<'static, std::io::Result<()>>,
#[cfg(unix)]
signals: Vec<(Signal, actix_rt::signal::unix::Signal)>,
}
impl Signals {
pub(crate) fn start(srv: Server) {
#[cfg(not(unix))]
{
actix_rt::spawn(Signals {
srv,
signals: Box::pin(actix_rt::signal::ctrl_c()),
});
}
#[cfg(unix)]
{
use actix_rt::signal::unix;
let sig_map = [
(unix::SignalKind::interrupt(), Signal::Int),
(unix::SignalKind::hangup(), Signal::Hup),
(unix::SignalKind::terminate(), Signal::Term),
(unix::SignalKind::quit(), Signal::Quit),
];
let signals = sig_map
.iter()
.filter_map(|(kind, sig)| {
unix::signal(*kind)
.map(|tokio_sig| (*sig, tokio_sig))
.map_err(|e| {
log::error!(
"Can not initialize stream handler for {:?} err: {}",
sig,
e
)
})
.ok()
})
.collect::<Vec<_>>();
actix_rt::spawn(Signals { srv, signals });
}
}
}
impl Future for Signals {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
#[cfg(not(unix))]
match self.signals.as_mut().poll(cx) {
Poll::Ready(_) => {
self.srv.signal(Signal::Int);
Poll::Ready(())
}
Poll::Pending => Poll::Pending,
}
#[cfg(unix)]
{
for (sig, fut) in self.signals.iter_mut() {
if Pin::new(fut).poll_recv(cx).is_ready() {
let sig = *sig;
self.srv.signal(sig);
return Poll::Ready(());
}
}
Poll::Pending
}
}
}

261
actix-server/src/socket.rs Normal file
View File

@@ -0,0 +1,261 @@
pub(crate) use std::net::{
SocketAddr as StdSocketAddr, TcpListener as StdTcpListener, ToSocketAddrs,
};
pub(crate) use mio::net::{TcpListener as MioTcpListener, TcpSocket as MioTcpSocket};
#[cfg(unix)]
pub(crate) use {
mio::net::UnixListener as MioUnixListener,
std::os::unix::net::UnixListener as StdUnixListener,
};
use std::{fmt, io};
use actix_rt::net::TcpStream;
use mio::{event::Source, Interest, Registry, Token};
pub(crate) enum MioListener {
Tcp(MioTcpListener),
#[cfg(unix)]
Uds(MioUnixListener),
}
impl MioListener {
pub(crate) fn local_addr(&self) -> SocketAddr {
match *self {
MioListener::Tcp(ref lst) => lst
.local_addr()
.map(SocketAddr::Tcp)
.unwrap_or(SocketAddr::Unknown),
#[cfg(unix)]
MioListener::Uds(ref lst) => lst
.local_addr()
.map(SocketAddr::Uds)
.unwrap_or(SocketAddr::Unknown),
}
}
pub(crate) fn accept(&self) -> io::Result<MioStream> {
match *self {
MioListener::Tcp(ref lst) => lst.accept().map(|(stream, _)| MioStream::Tcp(stream)),
#[cfg(unix)]
MioListener::Uds(ref lst) => lst.accept().map(|(stream, _)| MioStream::Uds(stream)),
}
}
}
impl Source for MioListener {
fn register(
&mut self,
registry: &Registry,
token: Token,
interests: Interest,
) -> io::Result<()> {
match *self {
MioListener::Tcp(ref mut lst) => lst.register(registry, token, interests),
#[cfg(unix)]
MioListener::Uds(ref mut lst) => lst.register(registry, token, interests),
}
}
fn reregister(
&mut self,
registry: &Registry,
token: Token,
interests: Interest,
) -> io::Result<()> {
match *self {
MioListener::Tcp(ref mut lst) => lst.reregister(registry, token, interests),
#[cfg(unix)]
MioListener::Uds(ref mut lst) => lst.reregister(registry, token, interests),
}
}
fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
match *self {
MioListener::Tcp(ref mut lst) => lst.deregister(registry),
#[cfg(unix)]
MioListener::Uds(ref mut lst) => {
let res = lst.deregister(registry);
// cleanup file path
if let Ok(addr) = lst.local_addr() {
if let Some(path) = addr.as_pathname() {
let _ = std::fs::remove_file(path);
}
}
res
}
}
}
}
impl From<StdTcpListener> for MioListener {
fn from(lst: StdTcpListener) -> Self {
MioListener::Tcp(MioTcpListener::from_std(lst))
}
}
#[cfg(unix)]
impl From<StdUnixListener> for MioListener {
fn from(lst: StdUnixListener) -> Self {
MioListener::Uds(MioUnixListener::from_std(lst))
}
}
impl fmt::Debug for MioListener {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
MioListener::Tcp(ref lst) => write!(f, "{:?}", lst),
#[cfg(all(unix))]
MioListener::Uds(ref lst) => write!(f, "{:?}", lst),
}
}
}
impl fmt::Display for MioListener {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
MioListener::Tcp(ref lst) => write!(f, "{:?}", lst),
#[cfg(unix)]
MioListener::Uds(ref lst) => write!(f, "{:?}", lst),
}
}
}
pub(crate) enum SocketAddr {
Unknown,
Tcp(StdSocketAddr),
#[cfg(unix)]
Uds(mio::net::SocketAddr),
}
impl fmt::Display for SocketAddr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
Self::Unknown => write!(f, "Unknown SocketAddr"),
Self::Tcp(ref addr) => write!(f, "{}", addr),
#[cfg(unix)]
Self::Uds(ref addr) => write!(f, "{:?}", addr),
}
}
}
impl fmt::Debug for SocketAddr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
Self::Unknown => write!(f, "Unknown SocketAddr"),
Self::Tcp(ref addr) => write!(f, "{:?}", addr),
#[cfg(unix)]
Self::Uds(ref addr) => write!(f, "{:?}", addr),
}
}
}
#[derive(Debug)]
pub enum MioStream {
Tcp(mio::net::TcpStream),
#[cfg(unix)]
Uds(mio::net::UnixStream),
}
/// helper trait for converting mio stream to tokio stream.
pub trait FromStream: Sized {
fn from_mio(sock: MioStream) -> io::Result<Self>;
}
#[cfg(windows)]
mod win_impl {
use super::*;
use std::os::windows::io::{FromRawSocket, IntoRawSocket};
// FIXME: This is a workaround and we need an efficient way to convert between mio and tokio stream
impl FromStream for TcpStream {
fn from_mio(sock: MioStream) -> io::Result<Self> {
match sock {
MioStream::Tcp(mio) => {
let raw = IntoRawSocket::into_raw_socket(mio);
// SAFETY: This is a in place conversion from mio stream to tokio stream.
TcpStream::from_std(unsafe { FromRawSocket::from_raw_socket(raw) })
}
}
}
}
}
#[cfg(unix)]
mod unix_impl {
use super::*;
use std::os::unix::io::{FromRawFd, IntoRawFd};
use actix_rt::net::UnixStream;
// FIXME: This is a workaround and we need an efficient way to convert between mio and tokio stream
impl FromStream for TcpStream {
fn from_mio(sock: MioStream) -> io::Result<Self> {
match sock {
MioStream::Tcp(mio) => {
let raw = IntoRawFd::into_raw_fd(mio);
// SAFETY: This is a in place conversion from mio stream to tokio stream.
TcpStream::from_std(unsafe { FromRawFd::from_raw_fd(raw) })
}
MioStream::Uds(_) => {
panic!("Should not happen, bug in server impl");
}
}
}
}
// FIXME: This is a workaround and we need an efficient way to convert between mio and tokio stream
impl FromStream for UnixStream {
fn from_mio(sock: MioStream) -> io::Result<Self> {
match sock {
MioStream::Tcp(_) => panic!("Should not happen, bug in server impl"),
MioStream::Uds(mio) => {
let raw = IntoRawFd::into_raw_fd(mio);
// SAFETY: This is a in place conversion from mio stream to tokio stream.
UnixStream::from_std(unsafe { FromRawFd::from_raw_fd(raw) })
}
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn socket_addr() {
let addr = SocketAddr::Tcp("127.0.0.1:8080".parse().unwrap());
assert!(format!("{:?}", addr).contains("127.0.0.1:8080"));
assert_eq!(format!("{}", addr), "127.0.0.1:8080");
let addr: StdSocketAddr = "127.0.0.1:0".parse().unwrap();
let socket = MioTcpSocket::new_v4().unwrap();
socket.set_reuseaddr(true).unwrap();
socket.bind(addr).unwrap();
let tcp = socket.listen(128).unwrap();
let lst = MioListener::Tcp(tcp);
assert!(format!("{:?}", lst).contains("TcpListener"));
assert!(format!("{}", lst).contains("127.0.0.1"));
}
#[test]
#[cfg(unix)]
fn uds() {
let _ = std::fs::remove_file("/tmp/sock.xxxxx");
if let Ok(socket) = MioUnixListener::bind("/tmp/sock.xxxxx") {
let addr = socket.local_addr().expect("Couldn't get local address");
let a = SocketAddr::Uds(addr);
assert!(format!("{:?}", a).contains("/tmp/sock.xxxxx"));
assert!(format!("{}", a).contains("/tmp/sock.xxxxx"));
let lst = MioListener::Uds(socket);
assert!(format!("{:?}", lst).contains("/tmp/sock.xxxxx"));
assert!(format!("{}", lst).contains("/tmp/sock.xxxxx"));
}
}
}

View File

@@ -0,0 +1,144 @@
use std::sync::mpsc;
use std::{net, thread};
use actix_rt::{net::TcpStream, System};
use crate::{Server, ServerBuilder, ServiceFactory};
/// The `TestServer` type.
///
/// `TestServer` is very simple test server that simplify process of writing
/// integration tests for actix-net applications.
///
/// # Examples
///
/// ```
/// use actix_service::fn_service;
/// use actix_server::TestServer;
///
/// #[actix_rt::main]
/// async fn main() {
/// let srv = TestServer::with(|| fn_service(
/// |sock| async move {
/// println!("New connection: {:?}", sock);
/// Ok::<_, ()>(())
/// }
/// ));
///
/// println!("SOCKET: {:?}", srv.connect());
/// }
/// ```
pub struct TestServer;
/// Test server runtime
pub struct TestServerRuntime {
addr: net::SocketAddr,
host: String,
port: u16,
system: System,
}
impl TestServer {
/// Start new server with server builder
pub fn start<F>(mut factory: F) -> TestServerRuntime
where
F: FnMut(ServerBuilder) -> ServerBuilder + Send + 'static,
{
let (tx, rx) = mpsc::channel();
// run server in separate thread
thread::spawn(move || {
let sys = System::new();
factory(Server::build()).workers(1).disable_signals().run();
tx.send(System::current()).unwrap();
sys.run()
});
let system = rx.recv().unwrap();
TestServerRuntime {
system,
addr: "127.0.0.1:0".parse().unwrap(),
host: "127.0.0.1".to_string(),
port: 0,
}
}
/// Start new test server with application factory
pub fn with<F: ServiceFactory<TcpStream>>(factory: F) -> TestServerRuntime {
let (tx, rx) = mpsc::channel();
// run server in separate thread
thread::spawn(move || {
let sys = System::new();
let tcp = net::TcpListener::bind("127.0.0.1:0").unwrap();
let local_addr = tcp.local_addr().unwrap();
sys.block_on(async {
Server::build()
.listen("test", tcp, factory)
.unwrap()
.workers(1)
.disable_signals()
.run();
tx.send((System::current(), local_addr)).unwrap();
});
sys.run()
});
let (system, addr) = rx.recv().unwrap();
let host = format!("{}", addr.ip());
let port = addr.port();
TestServerRuntime {
addr,
host,
port,
system,
}
}
/// Get first available unused local address
pub fn unused_addr() -> net::SocketAddr {
let addr: net::SocketAddr = "127.0.0.1:0".parse().unwrap();
let socket = mio::net::TcpSocket::new_v4().unwrap();
socket.bind(addr).unwrap();
socket.set_reuseaddr(true).unwrap();
let tcp = socket.listen(1024).unwrap();
tcp.local_addr().unwrap()
}
}
impl TestServerRuntime {
/// Test server host
pub fn host(&self) -> &str {
&self.host
}
/// Test server port
pub fn port(&self) -> u16 {
self.port
}
/// Get test server address
pub fn addr(&self) -> net::SocketAddr {
self.addr
}
/// Stop http server
fn stop(&mut self) {
self.system.stop();
}
/// Connect to server, return tokio TcpStream
pub fn connect(&self) -> std::io::Result<TcpStream> {
TcpStream::from_std(net::TcpStream::connect(self.addr)?)
}
}
impl Drop for TestServerRuntime {
fn drop(&mut self) {
self.stop()
}
}

View File

@@ -0,0 +1,89 @@
use std::{
collections::VecDeque,
ops::Deref,
sync::{Arc, Mutex, MutexGuard},
};
use mio::{Registry, Token as MioToken, Waker};
use crate::worker::WorkerHandleAccept;
/// Waker token for `mio::Poll` instance.
pub(crate) const WAKER_TOKEN: MioToken = MioToken(usize::MAX);
/// `mio::Waker` with a queue for waking up the `Accept`'s `Poll` and contains the `WakerInterest`
/// the `Poll` would want to look into.
pub(crate) struct WakerQueue(Arc<(Waker, Mutex<VecDeque<WakerInterest>>)>);
impl Clone for WakerQueue {
fn clone(&self) -> Self {
Self(self.0.clone())
}
}
impl Deref for WakerQueue {
type Target = (Waker, Mutex<VecDeque<WakerInterest>>);
fn deref(&self) -> &Self::Target {
self.0.deref()
}
}
impl WakerQueue {
/// Construct a waker queue with given `Poll`'s `Registry` and capacity.
///
/// A fixed `WAKER_TOKEN` is used to identify the wake interest and the `Poll` needs to match
/// event's token for it to properly handle `WakerInterest`.
pub(crate) fn new(registry: &Registry) -> std::io::Result<Self> {
let waker = Waker::new(registry, WAKER_TOKEN)?;
let queue = Mutex::new(VecDeque::with_capacity(16));
Ok(Self(Arc::new((waker, queue))))
}
/// Push a new interest to the queue and wake up the accept poll afterwards.
pub(crate) fn wake(&self, interest: WakerInterest) {
let (waker, queue) = self.deref();
queue
.lock()
.expect("Failed to lock WakerQueue")
.push_back(interest);
waker
.wake()
.unwrap_or_else(|e| panic!("can not wake up Accept Poll: {}", e));
}
/// Get a MutexGuard of the waker queue.
pub(crate) fn guard(&self) -> MutexGuard<'_, VecDeque<WakerInterest>> {
self.deref().1.lock().expect("Failed to lock WakerQueue")
}
/// Reset the waker queue so it does not grow infinitely.
pub(crate) fn reset(queue: &mut VecDeque<WakerInterest>) {
std::mem::swap(&mut VecDeque::<WakerInterest>::with_capacity(16), queue);
}
}
/// Types of interests we would look into when `Accept`'s `Poll` is waked up by waker.
///
/// These interests should not be confused with `mio::Interest` and mostly not I/O related
pub(crate) enum WakerInterest {
/// `WorkerAvailable` is an interest from `Worker` notifying `Accept` there is a worker
/// available and can accept new tasks.
WorkerAvailable(usize),
/// `Pause`, `Resume`, `Stop` Interest are from `ServerBuilder` future. It listens to
/// `ServerCommand` and notify `Accept` to do exactly these tasks.
Pause,
Resume,
Stop,
/// `Timer` is an interest sent as a delayed future. When an error happens on accepting
/// connection `Accept` would deregister socket listener temporary and wake up the poll and
/// register them again after the delayed future resolve.
Timer,
/// `Worker` is an interest happen after a worker runs into faulted state(This is determined
/// by if work can be sent to it successfully).`Accept` would be waked up and add the new
/// `WorkerHandleAccept`.
Worker(WorkerHandleAccept),
}

562
actix-server/src/worker.rs Normal file
View File

@@ -0,0 +1,562 @@
use std::{
future::Future,
mem,
pin::Pin,
rc::Rc,
sync::{
atomic::{AtomicUsize, Ordering},
Arc,
},
task::{Context, Poll},
time::Duration,
};
use actix_rt::{
spawn,
time::{sleep, Instant, Sleep},
Arbiter,
};
use futures_core::{future::LocalBoxFuture, ready};
use log::{error, info, trace};
use tokio::sync::{
mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender},
oneshot,
};
use crate::join_all;
use crate::service::{BoxedServerService, InternalServiceFactory};
use crate::socket::MioStream;
use crate::waker_queue::{WakerInterest, WakerQueue};
/// Stop worker message. Returns `true` on successful graceful shutdown.
/// and `false` if some connections still alive when shutdown execute.
pub(crate) struct Stop {
graceful: bool,
tx: oneshot::Sender<bool>,
}
#[derive(Debug)]
pub(crate) struct Conn {
pub io: MioStream,
pub token: usize,
}
fn handle_pair(
idx: usize,
tx1: UnboundedSender<Conn>,
tx2: UnboundedSender<Stop>,
counter: Counter,
) -> (WorkerHandleAccept, WorkerHandleServer) {
let accept = WorkerHandleAccept {
idx,
tx: tx1,
counter,
};
let server = WorkerHandleServer { idx, tx: tx2 };
(accept, server)
}
/// counter: Arc<AtomicUsize> field is owned by `Accept` thread and `ServerWorker` thread.
///
/// `Accept` would increment the counter and `ServerWorker` would decrement it.
///
/// # Atomic Ordering:
///
/// `Accept` always look into it's cached `Availability` field for `ServerWorker` state.
/// It lazily increment counter after successful dispatching new work to `ServerWorker`.
/// On reaching counter limit `Accept` update it's cached `Availability` and mark worker as
/// unable to accept any work.
///
/// `ServerWorker` always decrement the counter when every work received from `Accept` is done.
/// On reaching counter limit worker would use `mio::Waker` and `WakerQueue` to wake up `Accept`
/// and notify it to update cached `Availability` again to mark worker as able to accept work again.
///
/// Hence, a wake up would only happen after `Accept` increment it to limit.
/// And a decrement to limit always wake up `Accept`.
#[derive(Clone)]
pub(crate) struct Counter {
counter: Arc<AtomicUsize>,
limit: usize,
}
impl Counter {
pub(crate) fn new(limit: usize) -> Self {
Self {
counter: Arc::new(AtomicUsize::new(1)),
limit,
}
}
/// Increment counter by 1 and return true when hitting limit
#[inline(always)]
pub(crate) fn inc(&self) -> bool {
self.counter.fetch_add(1, Ordering::Relaxed) != self.limit
}
/// Decrement counter by 1 and return true if crossing limit.
#[inline(always)]
pub(crate) fn dec(&self) -> bool {
self.counter.fetch_sub(1, Ordering::Relaxed) == self.limit
}
pub(crate) fn total(&self) -> usize {
self.counter.load(Ordering::SeqCst) - 1
}
}
pub(crate) struct WorkerCounter {
idx: usize,
inner: Rc<(WakerQueue, Counter)>,
}
impl Clone for WorkerCounter {
fn clone(&self) -> Self {
Self {
idx: self.idx,
inner: self.inner.clone(),
}
}
}
impl WorkerCounter {
pub(crate) fn new(idx: usize, waker_queue: WakerQueue, counter: Counter) -> Self {
Self {
idx,
inner: Rc::new((waker_queue, counter)),
}
}
#[inline(always)]
pub(crate) fn guard(&self) -> WorkerCounterGuard {
WorkerCounterGuard(self.clone())
}
fn total(&self) -> usize {
self.inner.1.total()
}
}
pub(crate) struct WorkerCounterGuard(WorkerCounter);
impl Drop for WorkerCounterGuard {
fn drop(&mut self) {
let (waker_queue, counter) = &*self.0.inner;
if counter.dec() {
waker_queue.wake(WakerInterest::WorkerAvailable(self.0.idx));
}
}
}
/// Handle to worker that can send connection message to worker and share the
/// availability of worker to other thread.
///
/// Held by [Accept](crate::accept::Accept).
pub(crate) struct WorkerHandleAccept {
idx: usize,
tx: UnboundedSender<Conn>,
counter: Counter,
}
impl WorkerHandleAccept {
#[inline(always)]
pub(crate) fn idx(&self) -> usize {
self.idx
}
#[inline(always)]
pub(crate) fn send(&self, msg: Conn) -> Result<(), Conn> {
self.tx.send(msg).map_err(|msg| msg.0)
}
#[inline(always)]
pub(crate) fn inc_counter(&self) -> bool {
self.counter.inc()
}
}
/// Handle to worker than can send stop message to worker.
///
/// Held by [ServerBuilder](crate::builder::ServerBuilder).
#[derive(Debug)]
pub(crate) struct WorkerHandleServer {
idx: usize,
tx: UnboundedSender<Stop>,
}
impl WorkerHandleServer {
pub(crate) fn stop(&self, graceful: bool) -> oneshot::Receiver<bool> {
let (tx, rx) = oneshot::channel();
let _ = self.tx.send(Stop { graceful, tx });
rx
}
}
/// Service worker.
///
/// Worker accepts Socket objects via unbounded channel and starts stream processing.
pub(crate) struct ServerWorker {
// UnboundedReceiver<Conn> should always be the first field.
// It must be dropped as soon as ServerWorker dropping.
rx: UnboundedReceiver<Conn>,
rx2: UnboundedReceiver<Stop>,
counter: WorkerCounter,
services: Box<[WorkerService]>,
factories: Box<[Box<dyn InternalServiceFactory>]>,
state: WorkerState,
shutdown_timeout: Duration,
}
struct WorkerService {
factory: usize,
status: WorkerServiceStatus,
service: BoxedServerService,
}
impl WorkerService {
fn created(&mut self, service: BoxedServerService) {
self.service = service;
self.status = WorkerServiceStatus::Unavailable;
}
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
enum WorkerServiceStatus {
Available,
Unavailable,
Failed,
Restarting,
Stopping,
Stopped,
}
/// Config for worker behavior passed down from server builder.
#[derive(Copy, Clone)]
pub(crate) struct ServerWorkerConfig {
shutdown_timeout: Duration,
max_blocking_threads: usize,
max_concurrent_connections: usize,
}
impl Default for ServerWorkerConfig {
fn default() -> Self {
// 512 is the default max blocking thread count of tokio runtime.
let max_blocking_threads = std::cmp::max(512 / num_cpus::get(), 1);
Self {
shutdown_timeout: Duration::from_secs(30),
max_blocking_threads,
max_concurrent_connections: 25600,
}
}
}
impl ServerWorkerConfig {
pub(crate) fn max_blocking_threads(&mut self, num: usize) {
self.max_blocking_threads = num;
}
pub(crate) fn max_concurrent_connections(&mut self, num: usize) {
self.max_concurrent_connections = num;
}
pub(crate) fn shutdown_timeout(&mut self, dur: Duration) {
self.shutdown_timeout = dur;
}
}
impl ServerWorker {
pub(crate) fn start(
idx: usize,
factories: Vec<Box<dyn InternalServiceFactory>>,
waker_queue: WakerQueue,
config: ServerWorkerConfig,
) -> (WorkerHandleAccept, WorkerHandleServer) {
let (tx1, rx) = unbounded_channel();
let (tx2, rx2) = unbounded_channel();
let counter = Counter::new(config.max_concurrent_connections);
let counter_clone = counter.clone();
// every worker runs in it's own arbiter.
// use a custom tokio runtime builder to change the settings of runtime.
Arbiter::with_tokio_rt(move || {
tokio::runtime::Builder::new_current_thread()
.enable_all()
.max_blocking_threads(config.max_blocking_threads)
.build()
.unwrap()
})
.spawn(async move {
let fut = factories
.iter()
.enumerate()
.map(|(idx, factory)| {
let fut = factory.create();
async move { fut.await.map(|(t, s)| (idx, t, s)) }
})
.collect::<Vec<_>>();
// a second spawn to run !Send future tasks.
spawn(async move {
let res = join_all(fut)
.await
.into_iter()
.collect::<Result<Vec<_>, _>>();
let services = match res {
Ok(res) => res
.into_iter()
.fold(Vec::new(), |mut services, (factory, token, service)| {
assert_eq!(token, services.len());
services.push(WorkerService {
factory,
service,
status: WorkerServiceStatus::Unavailable,
});
services
})
.into_boxed_slice(),
Err(e) => {
error!("Can not start worker: {:?}", e);
Arbiter::current().stop();
return;
}
};
// a third spawn to make sure ServerWorker runs as non boxed future.
spawn(ServerWorker {
rx,
rx2,
services,
counter: WorkerCounter::new(idx, waker_queue, counter_clone),
factories: factories.into_boxed_slice(),
state: Default::default(),
shutdown_timeout: config.shutdown_timeout,
});
});
});
handle_pair(idx, tx1, tx2, counter)
}
fn restart_service(&mut self, idx: usize, factory_id: usize) {
let factory = &self.factories[factory_id];
trace!("Service {:?} failed, restarting", factory.name(idx));
self.services[idx].status = WorkerServiceStatus::Restarting;
self.state = WorkerState::Restarting(Restart {
factory_id,
token: idx,
fut: factory.create(),
});
}
fn shutdown(&mut self, force: bool) {
self.services
.iter_mut()
.filter(|srv| srv.status == WorkerServiceStatus::Available)
.for_each(|srv| {
srv.status = if force {
WorkerServiceStatus::Stopped
} else {
WorkerServiceStatus::Stopping
};
});
}
fn check_readiness(&mut self, cx: &mut Context<'_>) -> Result<bool, (usize, usize)> {
let mut ready = true;
for (idx, srv) in self.services.iter_mut().enumerate() {
if srv.status == WorkerServiceStatus::Available
|| srv.status == WorkerServiceStatus::Unavailable
{
match srv.service.poll_ready(cx) {
Poll::Ready(Ok(_)) => {
if srv.status == WorkerServiceStatus::Unavailable {
trace!(
"Service {:?} is available",
self.factories[srv.factory].name(idx)
);
srv.status = WorkerServiceStatus::Available;
}
}
Poll::Pending => {
ready = false;
if srv.status == WorkerServiceStatus::Available {
trace!(
"Service {:?} is unavailable",
self.factories[srv.factory].name(idx)
);
srv.status = WorkerServiceStatus::Unavailable;
}
}
Poll::Ready(Err(_)) => {
error!(
"Service {:?} readiness check returned error, restarting",
self.factories[srv.factory].name(idx)
);
srv.status = WorkerServiceStatus::Failed;
return Err((idx, srv.factory));
}
}
}
}
Ok(ready)
}
}
enum WorkerState {
Available,
Unavailable,
Restarting(Restart),
Shutdown(Shutdown),
}
struct Restart {
factory_id: usize,
token: usize,
fut: LocalBoxFuture<'static, Result<(usize, BoxedServerService), ()>>,
}
// Shutdown keep states necessary for server shutdown:
// Sleep for interval check the shutdown progress.
// Instant for the start time of shutdown.
// Sender for send back the shutdown outcome(force/grace) to StopCommand caller.
struct Shutdown {
timer: Pin<Box<Sleep>>,
start_from: Instant,
tx: oneshot::Sender<bool>,
}
impl Default for WorkerState {
fn default() -> Self {
Self::Unavailable
}
}
impl Drop for ServerWorker {
fn drop(&mut self) {
// Stop the Arbiter ServerWorker runs on on drop.
Arbiter::current().stop();
}
}
impl Future for ServerWorker {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.as_mut().get_mut();
// `StopWorker` message handler
if let Poll::Ready(Some(Stop { graceful, tx })) = Pin::new(&mut this.rx2).poll_recv(cx)
{
let num = this.counter.total();
if num == 0 {
info!("Shutting down worker, 0 connections");
let _ = tx.send(true);
return Poll::Ready(());
} else if graceful {
info!("Graceful worker shutdown, {} connections", num);
this.shutdown(false);
this.state = WorkerState::Shutdown(Shutdown {
timer: Box::pin(sleep(Duration::from_secs(1))),
start_from: Instant::now(),
tx,
});
} else {
info!("Force shutdown worker, {} connections", num);
this.shutdown(true);
let _ = tx.send(false);
return Poll::Ready(());
}
}
match this.state {
WorkerState::Unavailable => match this.check_readiness(cx) {
Ok(true) => {
this.state = WorkerState::Available;
self.poll(cx)
}
Ok(false) => Poll::Pending,
Err((token, idx)) => {
this.restart_service(token, idx);
self.poll(cx)
}
},
WorkerState::Restarting(ref mut restart) => {
let factory_id = restart.factory_id;
let token = restart.token;
let (token_new, service) = ready!(restart.fut.as_mut().poll(cx))
.unwrap_or_else(|_| {
panic!(
"Can not restart {:?} service",
this.factories[factory_id].name(token)
)
});
assert_eq!(token, token_new);
trace!(
"Service {:?} has been restarted",
this.factories[factory_id].name(token)
);
this.services[token].created(service);
this.state = WorkerState::Unavailable;
self.poll(cx)
}
WorkerState::Shutdown(ref mut shutdown) => {
// Wait for 1 second.
ready!(shutdown.timer.as_mut().poll(cx));
if this.counter.total() == 0 {
// Graceful shutdown.
if let WorkerState::Shutdown(shutdown) = mem::take(&mut this.state) {
let _ = shutdown.tx.send(true);
}
Poll::Ready(())
} else if shutdown.start_from.elapsed() >= this.shutdown_timeout {
// Timeout forceful shutdown.
if let WorkerState::Shutdown(shutdown) = mem::take(&mut this.state) {
let _ = shutdown.tx.send(false);
}
Poll::Ready(())
} else {
// Reset timer and wait for 1 second.
let time = Instant::now() + Duration::from_secs(1);
shutdown.timer.as_mut().reset(time);
shutdown.timer.as_mut().poll(cx)
}
}
// actively poll stream and handle worker command
WorkerState::Available => loop {
match this.check_readiness(cx) {
Ok(true) => {}
Ok(false) => {
trace!("Worker is unavailable");
this.state = WorkerState::Unavailable;
return self.poll(cx);
}
Err((token, idx)) => {
this.restart_service(token, idx);
return self.poll(cx);
}
}
// handle incoming io stream
match ready!(Pin::new(&mut this.rx).poll_recv(cx)) {
Some(msg) => {
let guard = this.counter.guard();
let _ = this.services[msg.token].service.call((guard, msg.io));
}
None => return Poll::Ready(()),
};
},
}
}
}

View File

@@ -0,0 +1,453 @@
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{mpsc, Arc};
use std::{net, thread, time::Duration};
use actix_rt::{net::TcpStream, time::sleep};
use actix_server::Server;
use actix_service::fn_service;
use actix_utils::future::ok;
use futures_util::future::lazy;
fn unused_addr() -> net::SocketAddr {
let addr: net::SocketAddr = "127.0.0.1:0".parse().unwrap();
let socket = mio::net::TcpSocket::new_v4().unwrap();
socket.bind(addr).unwrap();
socket.set_reuseaddr(true).unwrap();
let tcp = socket.listen(32).unwrap();
tcp.local_addr().unwrap()
}
#[test]
fn test_bind() {
let addr = unused_addr();
let (tx, rx) = mpsc::channel();
let h = thread::spawn(move || {
let sys = actix_rt::System::new();
let srv = sys.block_on(lazy(|_| {
Server::build()
.workers(1)
.disable_signals()
.bind("test", addr, move || fn_service(|_| ok::<_, ()>(())))
.unwrap()
.run()
}));
let _ = tx.send((srv, actix_rt::System::current()));
let _ = sys.run();
});
let (_, sys) = rx.recv().unwrap();
thread::sleep(Duration::from_millis(500));
assert!(net::TcpStream::connect(addr).is_ok());
sys.stop();
let _ = h.join();
}
#[test]
fn test_listen() {
let addr = unused_addr();
let (tx, rx) = mpsc::channel();
let h = thread::spawn(move || {
let sys = actix_rt::System::new();
let lst = net::TcpListener::bind(addr).unwrap();
sys.block_on(async {
Server::build()
.disable_signals()
.workers(1)
.listen("test", lst, move || fn_service(|_| ok::<_, ()>(())))
.unwrap()
.run();
let _ = tx.send(actix_rt::System::current());
});
let _ = sys.run();
});
let sys = rx.recv().unwrap();
thread::sleep(Duration::from_millis(500));
assert!(net::TcpStream::connect(addr).is_ok());
sys.stop();
let _ = h.join();
}
#[test]
#[cfg(unix)]
fn test_start() {
use std::io::Read;
use actix_codec::{BytesCodec, Framed};
use bytes::Bytes;
use futures_util::sink::SinkExt;
let addr = unused_addr();
let (tx, rx) = mpsc::channel();
let h = thread::spawn(move || {
let sys = actix_rt::System::new();
let srv = sys.block_on(lazy(|_| {
Server::build()
.backlog(100)
.disable_signals()
.bind("test", addr, move || {
fn_service(|io: TcpStream| async move {
let mut f = Framed::new(io, BytesCodec);
f.send(Bytes::from_static(b"test")).await.unwrap();
Ok::<_, ()>(())
})
})
.unwrap()
.run()
}));
let _ = tx.send((srv, actix_rt::System::current()));
let _ = sys.run();
});
let (srv, sys) = rx.recv().unwrap();
let mut buf = [1u8; 4];
let mut conn = net::TcpStream::connect(addr).unwrap();
let _ = conn.read_exact(&mut buf);
assert_eq!(buf, b"test"[..]);
// pause
let _ = srv.pause();
thread::sleep(Duration::from_millis(200));
let mut conn = net::TcpStream::connect(addr).unwrap();
conn.set_read_timeout(Some(Duration::from_millis(100)))
.unwrap();
let res = conn.read_exact(&mut buf);
assert!(res.is_err());
// resume
let _ = srv.resume();
thread::sleep(Duration::from_millis(100));
assert!(net::TcpStream::connect(addr).is_ok());
assert!(net::TcpStream::connect(addr).is_ok());
assert!(net::TcpStream::connect(addr).is_ok());
let mut buf = [0u8; 4];
let mut conn = net::TcpStream::connect(addr).unwrap();
let _ = conn.read_exact(&mut buf);
assert_eq!(buf, b"test"[..]);
// stop
let _ = srv.stop(false);
thread::sleep(Duration::from_millis(100));
assert!(net::TcpStream::connect(addr).is_err());
thread::sleep(Duration::from_millis(100));
sys.stop();
let _ = h.join();
}
#[actix_rt::test]
async fn test_max_concurrent_connections() {
// Note:
// A tcp listener would accept connects based on it's backlog setting.
//
// The limit test on the other hand is only for concurrent tcp stream limiting a work
// thread accept.
use tokio::io::AsyncWriteExt;
let addr = unused_addr();
let (tx, rx) = mpsc::channel();
let counter = Arc::new(AtomicUsize::new(0));
let counter_clone = counter.clone();
let max_conn = 3;
let h = thread::spawn(move || {
actix_rt::System::new().block_on(async {
let server = Server::build()
// Set a relative higher backlog.
.backlog(12)
// max connection for a worker is 3.
.maxconn(max_conn)
.workers(1)
.disable_signals()
.bind("test", addr, move || {
let counter = counter.clone();
fn_service(move |_io: TcpStream| {
let counter = counter.clone();
async move {
counter.fetch_add(1, Ordering::SeqCst);
sleep(Duration::from_secs(20)).await;
counter.fetch_sub(1, Ordering::SeqCst);
Ok::<(), ()>(())
}
})
})?
.run();
let _ = tx.send((server.clone(), actix_rt::System::current()));
server.await
})
});
let (srv, sys) = rx.recv().unwrap();
let mut conns = vec![];
for _ in 0..12 {
let conn = tokio::net::TcpStream::connect(addr).await.unwrap();
conns.push(conn);
}
sleep(Duration::from_secs(5)).await;
// counter would remain at 3 even with 12 successful connection.
// and 9 of them remain in backlog.
assert_eq!(max_conn, counter_clone.load(Ordering::SeqCst));
for mut conn in conns {
conn.shutdown().await.unwrap();
}
srv.stop(false).await;
sys.stop();
let _ = h.join().unwrap();
}
#[actix_rt::test]
async fn test_service_restart() {
use std::task::{Context, Poll};
use actix_service::{fn_factory, Service};
use futures_core::future::LocalBoxFuture;
use tokio::io::AsyncWriteExt;
struct TestService(Arc<AtomicUsize>);
impl Service<TcpStream> for TestService {
type Response = ();
type Error = ();
type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>;
fn poll_ready(&self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
let TestService(ref counter) = self;
let c = counter.fetch_add(1, Ordering::SeqCst);
// Force the service to restart on first readiness check.
if c > 0 {
Poll::Ready(Ok(()))
} else {
Poll::Ready(Err(()))
}
}
fn call(&self, _: TcpStream) -> Self::Future {
Box::pin(async { Ok(()) })
}
}
let addr1 = unused_addr();
let addr2 = unused_addr();
let (tx, rx) = mpsc::channel();
let num = Arc::new(AtomicUsize::new(0));
let num2 = Arc::new(AtomicUsize::new(0));
let num_clone = num.clone();
let num2_clone = num2.clone();
let h = thread::spawn(move || {
let num = num.clone();
actix_rt::System::new().block_on(async {
let server = Server::build()
.backlog(1)
.disable_signals()
.bind("addr1", addr1, move || {
let num = num.clone();
fn_factory(move || {
let num = num.clone();
async move { Ok::<_, ()>(TestService(num)) }
})
})
.unwrap()
.bind("addr2", addr2, move || {
let num2 = num2.clone();
fn_factory(move || {
let num2 = num2.clone();
async move { Ok::<_, ()>(TestService(num2)) }
})
})
.unwrap()
.workers(1)
.run();
let _ = tx.send((server.clone(), actix_rt::System::current()));
server.await
})
});
let (server, sys) = rx.recv().unwrap();
for _ in 0..5 {
TcpStream::connect(addr1)
.await
.unwrap()
.shutdown()
.await
.unwrap();
TcpStream::connect(addr2)
.await
.unwrap()
.shutdown()
.await
.unwrap();
}
sleep(Duration::from_secs(3)).await;
assert!(num_clone.load(Ordering::SeqCst) > 5);
assert!(num2_clone.load(Ordering::SeqCst) > 5);
sys.stop();
let _ = server.stop(false);
let _ = h.join().unwrap();
}
#[ignore]
#[actix_rt::test]
async fn worker_restart() {
use actix_service::{Service, ServiceFactory};
use futures_core::future::LocalBoxFuture;
use tokio::io::{AsyncReadExt, AsyncWriteExt};
struct TestServiceFactory(Arc<AtomicUsize>);
impl ServiceFactory<TcpStream> for TestServiceFactory {
type Response = ();
type Error = ();
type Config = ();
type Service = TestService;
type InitError = ();
type Future = LocalBoxFuture<'static, Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: Self::Config) -> Self::Future {
let counter = self.0.fetch_add(1, Ordering::Relaxed);
Box::pin(async move { Ok(TestService(counter)) })
}
}
struct TestService(usize);
impl Service<TcpStream> for TestService {
type Response = ();
type Error = ();
type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>;
actix_service::always_ready!();
fn call(&self, stream: TcpStream) -> Self::Future {
let counter = self.0;
let mut stream = stream.into_std().unwrap();
use std::io::Write;
let str = counter.to_string();
let buf = str.as_bytes();
let mut written = 0;
while written < buf.len() {
if let Ok(n) = stream.write(&buf[written..]) {
written += n;
}
}
stream.flush().unwrap();
stream.shutdown(net::Shutdown::Write).unwrap();
// force worker 2 to restart service once.
if counter == 2 {
panic!("panic on purpose")
} else {
Box::pin(async { Ok(()) })
}
}
}
let addr = unused_addr();
let (tx, rx) = mpsc::channel();
let counter = Arc::new(AtomicUsize::new(1));
let h = thread::spawn(move || {
let counter = counter.clone();
actix_rt::System::new().block_on(async {
let server = Server::build()
.disable_signals()
.bind("addr", addr, move || TestServiceFactory(counter.clone()))
.unwrap()
.workers(2)
.run();
let _ = tx.send((server.clone(), actix_rt::System::current()));
server.await
})
});
let (server, sys) = rx.recv().unwrap();
sleep(Duration::from_secs(3)).await;
let mut buf = [0; 8];
// worker 1 would not restart and return it's id consistently.
let mut stream = TcpStream::connect(addr).await.unwrap();
let n = stream.read(&mut buf).await.unwrap();
let id = String::from_utf8_lossy(&buf[0..n]);
assert_eq!("1", id);
stream.shutdown().await.unwrap();
// worker 2 dead after return response.
let mut stream = TcpStream::connect(addr).await.unwrap();
let n = stream.read(&mut buf).await.unwrap();
let id = String::from_utf8_lossy(&buf[0..n]);
assert_eq!("2", id);
stream.shutdown().await.unwrap();
// request to worker 1
let mut stream = TcpStream::connect(addr).await.unwrap();
let n = stream.read(&mut buf).await.unwrap();
let id = String::from_utf8_lossy(&buf[0..n]);
assert_eq!("1", id);
stream.shutdown().await.unwrap();
// TODO: Remove sleep if it can pass CI.
sleep(Duration::from_secs(3)).await;
// worker 2 restarting and work goes to worker 1.
let mut stream = TcpStream::connect(addr).await.unwrap();
let n = stream.read(&mut buf).await.unwrap();
let id = String::from_utf8_lossy(&buf[0..n]);
assert_eq!("1", id);
stream.shutdown().await.unwrap();
// TODO: Remove sleep if it can pass CI.
sleep(Duration::from_secs(3)).await;
// worker 2 restarted but worker 1 was still the next to accept connection.
let mut stream = TcpStream::connect(addr).await.unwrap();
let n = stream.read(&mut buf).await.unwrap();
let id = String::from_utf8_lossy(&buf[0..n]);
assert_eq!("1", id);
stream.shutdown().await.unwrap();
// TODO: Remove sleep if it can pass CI.
sleep(Duration::from_secs(3)).await;
// worker 2 accept connection again but it's id is 3.
let mut stream = TcpStream::connect(addr).await.unwrap();
let n = stream.read(&mut buf).await.unwrap();
let id = String::from_utf8_lossy(&buf[0..n]);
assert_eq!("3", id);
stream.shutdown().await.unwrap();
sys.stop();
let _ = server.stop(false);
let _ = h.join().unwrap();
}

333
actix-service/CHANGES.md Normal file
View File

@@ -0,0 +1,333 @@
# Changes
## Unreleased - 2021-xx-xx
## 2.0.0 - 2021-04-16
* Removed pipeline and related structs/functions. [#335]
[#335]: https://github.com/actix/actix-net/pull/335
## 2.0.0-beta.5 - 2021-03-15
* Add default `Service` trait impl for `Rc<S: Service>` and `&S: Service`. [#288]
* Add `boxed::rc_service` function for constructing `boxed::RcService` type [#290]
[#288]: https://github.com/actix/actix-net/pull/288
[#290]: https://github.com/actix/actix-net/pull/290
## 2.0.0-beta.4 - 2021-02-04
* `Service::poll_ready` and `Service::call` receive `&self`. [#247]
* `apply_fn` and `apply_fn_factory` now receive `Fn(Req, &Service)` function type. [#247]
* `apply_cfg` and `apply_cfg_factory` now receive `Fn(Req, &Service)` function type. [#247]
* `fn_service` and friends now receive `Fn(Req)` function type. [#247]
[#247]: https://github.com/actix/actix-net/pull/247
## 2.0.0-beta.3 - 2021-01-09
* The `forward_ready!` macro converts errors. [#246]
[#246]: https://github.com/actix/actix-net/pull/246
## 2.0.0-beta.2 - 2021-01-03
* Remove redundant type parameter from `map_config`.
## 2.0.0-beta.1 - 2020-12-28
* `Service`, other traits, and many type signatures now take the the request type as a type
parameter instead of an associated type. [#232]
* Add `always_ready!` and `forward_ready!` macros. [#233]
* Crate is now `no_std`. [#233]
* Migrate pin projections to `pin-project-lite`. [#233]
* Remove `AndThenApplyFn` and Pipeline `and_then_apply_fn`. Use the
`.and_then(apply_fn(...))` construction. [#233]
* Move non-vital methods to `ServiceExt` and `ServiceFactoryExt` extension traits. [#235]
[#232]: https://github.com/actix/actix-net/pull/232
[#233]: https://github.com/actix/actix-net/pull/233
[#235]: https://github.com/actix/actix-net/pull/235
## 1.0.6 - 2020-08-09
### Fixed
* Removed unsound custom Cell implementation that allowed obtaining several mutable references to
the same data, which is undefined behavior in Rust and could lead to violations of memory safety. External code could obtain several mutable references to the same data through
service combinators. Attempts to acquire several mutable references to the same data will instead
result in a panic.
## [1.0.5] - 2020-01-16
### Fixed
* Fixed unsoundness in .and_then()/.then() service combinators
## [1.0.4] - 2020-01-15
### Fixed
* Revert 1.0.3 change
## [1.0.3] - 2020-01-15
### Fixed
* Fixed unsoundness in `AndThenService` impl
## [1.0.2] - 2020-01-08
### Added
* Add `into_service` helper function
## [1.0.1] - 2019-12-22
### Changed
* `map_config()` and `unit_config()` accepts `IntoServiceFactory` type
## [1.0.0] - 2019-12-11
### Added
* Add Clone impl for Apply service
## [1.0.0-alpha.4] - 2019-12-08
### Changed
* Renamed `service_fn` to `fn_service`
* Renamed `factory_fn` to `fn_factory`
* Renamed `factory_fn_cfg` to `fn_factory_with_config`
## [1.0.0-alpha.3] - 2019-12-06
### Changed
* Add missing Clone impls
* Restore `Transform::map_init_err()` combinator
* Restore `Service/Factory::apply_fn()` in form of `Pipeline/Factory::and_then_apply_fn()`
* Optimize service combinators and futures memory layout
## [1.0.0-alpha.2] - 2019-12-02
### Changed
* Use owned config value for service factory
* Renamed BoxedNewService/BoxedService to BoxServiceFactory/BoxService
## [1.0.0-alpha.1] - 2019-11-25
### Changed
* Migraded to `std::future`
* `NewService` renamed to `ServiceFactory`
* Added `pipeline` and `pipeline_factory` function
## [0.4.2] - 2019-08-27
### Fixed
* Check service readiness for `new_apply_cfg` combinator
## [0.4.1] - 2019-06-06
### Added
* Add `new_apply_cfg` function
## [0.4.0] - 2019-05-12
### Changed
* Use associated type for `NewService` config
* Change `apply_cfg` function
* Renamed helper functions
### Added
* Add `NewService::map_config` and `NewService::unit_config` combinators
## [0.3.6] - 2019-04-07
### Changed
* Poll boxed service call result immediately
## [0.3.5] - 2019-03-29
### Added
* Add `impl<S: Service> Service for Rc<RefCell<S>>`
## [0.3.4] - 2019-03-12
### Added
* Add `Transform::from_err()` combinator
* Add `apply_fn` helper
* Add `apply_fn_factory` helper
* Add `apply_transform` helper
* Add `apply_cfg` helper
## [0.3.3] - 2019-03-09
### Added
* Add `ApplyTransform` new service for transform and new service.
* Add `NewService::apply_cfg()` combinator, allows to use
nested `NewService` with different config parameter.
### Changed
* Revert IntoFuture change
## [0.3.2] - 2019-03-04
### Changed
* Change `NewService::Future` and `Transform::Future` to the `IntoFuture` trait.
* Export `AndThenTransform` type
## [0.3.1] - 2019-03-04
### Changed
* Simplify Transform trait
## [0.3.0] - 2019-03-02
## Added
* Added boxed NewService and Service.
## Changed
* Added `Config` parameter to `NewService` trait.
* Added `Config` parameter to `NewTransform` trait.
## [0.2.2] - 2019-02-19
### Added
* Added `NewService` impl for `Rc<S> where S: NewService`
* Added `NewService` impl for `Arc<S> where S: NewService`
## [0.2.1] - 2019-02-03
### Changed
* Generalize `.apply` combinator with Transform trait
## [0.2.0] - 2019-02-01
### Changed
* Use associated type instead of generic for Service definition.
* Before:
```rust
impl Service<Request> for Client {
type Response = Response;
// ...
}
```
* After:
```rust
impl Service for Client {
type Request = Request;
type Response = Response;
// ...
}
```
## [0.1.6] - 2019-01-24
### Changed
* Use `FnMut` instead of `Fn` for .apply() and .map() combinators and `FnService` type
* Change `.apply()` error semantic, new service's error is `From<Self::Error>`
## [0.1.5] - 2019-01-13
### Changed
* Make `Out::Error` convertable from `T::Error` for apply combinator
## [0.1.4] - 2019-01-11
### Changed
* Use `FnMut` instead of `Fn` for `FnService`
## [0.1.3] - 2018-12-12
### Changed
* Split service combinators to separate trait
## [0.1.2] - 2018-12-12
### Fixed
* Release future early for `.and_then()` and `.then()` combinators
## [0.1.1] - 2018-12-09
### Added
* Added Service impl for Box<S: Service>
## [0.1.0] - 2018-12-09
* Initial import

28
actix-service/Cargo.toml Normal file
View File

@@ -0,0 +1,28 @@
[package]
name = "actix-service"
version = "2.0.0"
authors = [
"Nikolay Kim <fafhrd91@gmail.com>",
"Rob Ede <robjtede@icloud.com>",
"fakeshadow <24548779@qq.com>",
]
description = "Service trait and combinators for representing asynchronous request/response operations."
keywords = ["network", "framework", "async", "futures", "service"]
categories = ["network-programming", "asynchronous"]
repository = "https://github.com/actix/actix-net"
license = "MIT OR Apache-2.0"
edition = "2018"
[lib]
name = "actix_service"
path = "src/lib.rs"
[dependencies]
futures-core = { version = "0.3.7", default-features = false }
paste = "1"
pin-project-lite = "0.2"
[dev-dependencies]
actix-rt = "2.0.0"
actix-utils = "3.0.0"
futures-util = { version = "0.3.7", default-features = false }

View File

@@ -0,0 +1 @@
../LICENSE-APACHE

1
actix-service/LICENSE-MIT Symbolic link
View File

@@ -0,0 +1 @@
../LICENSE-MIT

13
actix-service/README.md Normal file
View File

@@ -0,0 +1,13 @@
# actix-service
> Service trait and combinators for representing asynchronous request/response operations.
[![crates.io](https://img.shields.io/crates/v/actix-service?label=latest)](https://crates.io/crates/actix-service)
[![Documentation](https://docs.rs/actix-service/badge.svg?version=2.0.0)](https://docs.rs/actix-service/2.0.0)
[![Version](https://img.shields.io/badge/rustc-1.46+-ab6000.svg)](https://blog.rust-lang.org/2020/03/12/Rust-1.46.html)
![License](https://img.shields.io/crates/l/actix-service.svg)
[![Dependency Status](https://deps.rs/crate/actix-service/2.0.0/status.svg)](https://deps.rs/crate/actix-service/2.0.0)
![Download](https://img.shields.io/crates/d/actix-service.svg)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x)
See documentation for detailed explanations of these components: https://docs.rs/actix-service.

View File

@@ -0,0 +1,346 @@
use alloc::rc::Rc;
use core::{
future::Future,
marker::PhantomData,
pin::Pin,
task::{Context, Poll},
};
use futures_core::ready;
use pin_project_lite::pin_project;
use super::{Service, ServiceFactory};
/// Service for the `and_then` combinator, chaining a computation onto the end of another service
/// which completes successfully.
///
/// This is created by the `Pipeline::and_then` method.
pub struct AndThenService<A, B, Req>(Rc<(A, B)>, PhantomData<Req>);
impl<A, B, Req> AndThenService<A, B, Req> {
/// Create new `AndThen` combinator
pub(crate) fn new(a: A, b: B) -> Self
where
A: Service<Req>,
B: Service<A::Response, Error = A::Error>,
{
Self(Rc::new((a, b)), PhantomData)
}
}
impl<A, B, Req> Clone for AndThenService<A, B, Req> {
fn clone(&self) -> Self {
AndThenService(self.0.clone(), PhantomData)
}
}
impl<A, B, Req> Service<Req> for AndThenService<A, B, Req>
where
A: Service<Req>,
B: Service<A::Response, Error = A::Error>,
{
type Response = B::Response;
type Error = A::Error;
type Future = AndThenServiceResponse<A, B, Req>;
fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
let (a, b) = &*self.0;
let not_ready = !a.poll_ready(cx)?.is_ready();
if !b.poll_ready(cx)?.is_ready() || not_ready {
Poll::Pending
} else {
Poll::Ready(Ok(()))
}
}
fn call(&self, req: Req) -> Self::Future {
AndThenServiceResponse {
state: State::A {
fut: self.0 .0.call(req),
b: Some(self.0.clone()),
},
}
}
}
pin_project! {
pub struct AndThenServiceResponse<A, B, Req>
where
A: Service<Req>,
B: Service<A::Response, Error = A::Error>,
{
#[pin]
state: State<A, B, Req>,
}
}
pin_project! {
#[project = StateProj]
enum State<A, B, Req>
where
A: Service<Req>,
B: Service<A::Response, Error = A::Error>,
{
A {
#[pin]
fut: A::Future,
b: Option<Rc<(A, B)>>,
},
B {
#[pin]
fut: B::Future,
},
}
}
impl<A, B, Req> Future for AndThenServiceResponse<A, B, Req>
where
A: Service<Req>,
B: Service<A::Response, Error = A::Error>,
{
type Output = Result<B::Response, A::Error>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut this = self.as_mut().project();
match this.state.as_mut().project() {
StateProj::A { fut, b } => {
let res = ready!(fut.poll(cx))?;
let b = b.take().unwrap();
let fut = b.1.call(res);
this.state.set(State::B { fut });
self.poll(cx)
}
StateProj::B { fut } => fut.poll(cx),
}
}
}
/// `.and_then()` service factory combinator
pub struct AndThenServiceFactory<A, B, Req>
where
A: ServiceFactory<Req>,
A::Config: Clone,
B: ServiceFactory<
A::Response,
Config = A::Config,
Error = A::Error,
InitError = A::InitError,
>,
{
inner: Rc<(A, B)>,
_phantom: PhantomData<Req>,
}
impl<A, B, Req> AndThenServiceFactory<A, B, Req>
where
A: ServiceFactory<Req>,
A::Config: Clone,
B: ServiceFactory<
A::Response,
Config = A::Config,
Error = A::Error,
InitError = A::InitError,
>,
{
/// Create new `AndThenFactory` combinator
pub(crate) fn new(a: A, b: B) -> Self {
Self {
inner: Rc::new((a, b)),
_phantom: PhantomData,
}
}
}
impl<A, B, Req> ServiceFactory<Req> for AndThenServiceFactory<A, B, Req>
where
A: ServiceFactory<Req>,
A::Config: Clone,
B: ServiceFactory<
A::Response,
Config = A::Config,
Error = A::Error,
InitError = A::InitError,
>,
{
type Response = B::Response;
type Error = A::Error;
type Config = A::Config;
type Service = AndThenService<A::Service, B::Service, Req>;
type InitError = A::InitError;
type Future = AndThenServiceFactoryResponse<A, B, Req>;
fn new_service(&self, cfg: A::Config) -> Self::Future {
let inner = &*self.inner;
AndThenServiceFactoryResponse::new(
inner.0.new_service(cfg.clone()),
inner.1.new_service(cfg),
)
}
}
impl<A, B, Req> Clone for AndThenServiceFactory<A, B, Req>
where
A: ServiceFactory<Req>,
A::Config: Clone,
B: ServiceFactory<
A::Response,
Config = A::Config,
Error = A::Error,
InitError = A::InitError,
>,
{
fn clone(&self) -> Self {
Self {
inner: self.inner.clone(),
_phantom: PhantomData,
}
}
}
pin_project! {
pub struct AndThenServiceFactoryResponse<A, B, Req>
where
A: ServiceFactory<Req>,
B: ServiceFactory<A::Response>,
{
#[pin]
fut_a: A::Future,
#[pin]
fut_b: B::Future,
a: Option<A::Service>,
b: Option<B::Service>,
}
}
impl<A, B, Req> AndThenServiceFactoryResponse<A, B, Req>
where
A: ServiceFactory<Req>,
B: ServiceFactory<A::Response>,
{
fn new(fut_a: A::Future, fut_b: B::Future) -> Self {
AndThenServiceFactoryResponse {
fut_a,
fut_b,
a: None,
b: None,
}
}
}
impl<A, B, Req> Future for AndThenServiceFactoryResponse<A, B, Req>
where
A: ServiceFactory<Req>,
B: ServiceFactory<A::Response, Error = A::Error, InitError = A::InitError>,
{
type Output = Result<AndThenService<A::Service, B::Service, Req>, A::InitError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
if this.a.is_none() {
if let Poll::Ready(service) = this.fut_a.poll(cx)? {
*this.a = Some(service);
}
}
if this.b.is_none() {
if let Poll::Ready(service) = this.fut_b.poll(cx)? {
*this.b = Some(service);
}
}
if this.a.is_some() && this.b.is_some() {
Poll::Ready(Ok(AndThenService::new(
this.a.take().unwrap(),
this.b.take().unwrap(),
)))
} else {
Poll::Pending
}
}
}
#[cfg(test)]
mod tests {
use alloc::rc::Rc;
use core::{
cell::Cell,
task::{Context, Poll},
};
use futures_util::future::lazy;
use crate::{
fn_factory, ok,
pipeline::{pipeline, pipeline_factory},
ready, Ready, Service, ServiceFactory,
};
struct Srv1(Rc<Cell<usize>>);
impl Service<&'static str> for Srv1 {
type Response = &'static str;
type Error = ();
type Future = Ready<Result<Self::Response, ()>>;
fn poll_ready(&self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.0.set(self.0.get() + 1);
Poll::Ready(Ok(()))
}
fn call(&self, req: &'static str) -> Self::Future {
ok(req)
}
}
#[derive(Clone)]
struct Srv2(Rc<Cell<usize>>);
impl Service<&'static str> for Srv2 {
type Response = (&'static str, &'static str);
type Error = ();
type Future = Ready<Result<Self::Response, ()>>;
fn poll_ready(&self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.0.set(self.0.get() + 1);
Poll::Ready(Ok(()))
}
fn call(&self, req: &'static str) -> Self::Future {
ok((req, "srv2"))
}
}
#[actix_rt::test]
async fn test_poll_ready() {
let cnt = Rc::new(Cell::new(0));
let srv = pipeline(Srv1(cnt.clone())).and_then(Srv2(cnt.clone()));
let res = lazy(|cx| srv.poll_ready(cx)).await;
assert_eq!(res, Poll::Ready(Ok(())));
assert_eq!(cnt.get(), 2);
}
#[actix_rt::test]
async fn test_call() {
let cnt = Rc::new(Cell::new(0));
let srv = pipeline(Srv1(cnt.clone())).and_then(Srv2(cnt));
let res = srv.call("srv1").await;
assert!(res.is_ok());
assert_eq!(res.unwrap(), ("srv1", "srv2"));
}
#[actix_rt::test]
async fn test_new_service() {
let cnt = Rc::new(Cell::new(0));
let cnt2 = cnt.clone();
let new_srv =
pipeline_factory(fn_factory(move || ready(Ok::<_, ()>(Srv1(cnt2.clone())))))
.and_then(move || ready(Ok(Srv2(cnt.clone()))));
let srv = new_srv.new_service(()).await.unwrap();
let res = srv.call("srv1").await;
assert!(res.is_ok());
assert_eq!(res.unwrap(), ("srv1", "srv2"));
}
}

276
actix-service/src/apply.rs Normal file
View File

@@ -0,0 +1,276 @@
use core::{
future::Future,
marker::PhantomData,
pin::Pin,
task::{Context, Poll},
};
use futures_core::ready;
use pin_project_lite::pin_project;
use super::{IntoService, IntoServiceFactory, Service, ServiceFactory};
/// Apply transform function to a service.
///
/// The In and Out type params refer to the request and response types for the wrapped service.
pub fn apply_fn<I, S, F, Fut, Req, In, Res, Err>(
service: I,
wrap_fn: F,
) -> Apply<S, F, Req, In, Res, Err>
where
I: IntoService<S, In>,
S: Service<In, Error = Err>,
F: Fn(Req, &S) -> Fut,
Fut: Future<Output = Result<Res, Err>>,
{
Apply::new(service.into_service(), wrap_fn)
}
/// Service factory that produces `apply_fn` service.
///
/// The In and Out type params refer to the request and response types for the wrapped service.
pub fn apply_fn_factory<I, SF, F, Fut, Req, In, Res, Err>(
service: I,
f: F,
) -> ApplyFactory<SF, F, Req, In, Res, Err>
where
I: IntoServiceFactory<SF, In>,
SF: ServiceFactory<In, Error = Err>,
F: Fn(Req, &SF::Service) -> Fut + Clone,
Fut: Future<Output = Result<Res, Err>>,
{
ApplyFactory::new(service.into_factory(), f)
}
/// `Apply` service combinator.
///
/// The In and Out type params refer to the request and response types for the wrapped service.
pub struct Apply<S, F, Req, In, Res, Err>
where
S: Service<In, Error = Err>,
{
service: S,
wrap_fn: F,
_phantom: PhantomData<(Req, In, Res, Err)>,
}
impl<S, F, Fut, Req, In, Res, Err> Apply<S, F, Req, In, Res, Err>
where
S: Service<In, Error = Err>,
F: Fn(Req, &S) -> Fut,
Fut: Future<Output = Result<Res, Err>>,
{
/// Create new `Apply` combinator
fn new(service: S, wrap_fn: F) -> Self {
Self {
service,
wrap_fn,
_phantom: PhantomData,
}
}
}
impl<S, F, Fut, Req, In, Res, Err> Clone for Apply<S, F, Req, In, Res, Err>
where
S: Service<In, Error = Err> + Clone,
F: Fn(Req, &S) -> Fut + Clone,
Fut: Future<Output = Result<Res, Err>>,
{
fn clone(&self) -> Self {
Apply {
service: self.service.clone(),
wrap_fn: self.wrap_fn.clone(),
_phantom: PhantomData,
}
}
}
impl<S, F, Fut, Req, In, Res, Err> Service<Req> for Apply<S, F, Req, In, Res, Err>
where
S: Service<In, Error = Err>,
F: Fn(Req, &S) -> Fut,
Fut: Future<Output = Result<Res, Err>>,
{
type Response = Res;
type Error = Err;
type Future = Fut;
crate::forward_ready!(service);
fn call(&self, req: Req) -> Self::Future {
(self.wrap_fn)(req, &self.service)
}
}
/// `ApplyFactory` service factory combinator.
pub struct ApplyFactory<SF, F, Req, In, Res, Err> {
factory: SF,
wrap_fn: F,
_phantom: PhantomData<(Req, In, Res, Err)>,
}
impl<SF, F, Fut, Req, In, Res, Err> ApplyFactory<SF, F, Req, In, Res, Err>
where
SF: ServiceFactory<In, Error = Err>,
F: Fn(Req, &SF::Service) -> Fut + Clone,
Fut: Future<Output = Result<Res, Err>>,
{
/// Create new `ApplyFactory` new service instance
fn new(factory: SF, wrap_fn: F) -> Self {
Self {
factory,
wrap_fn,
_phantom: PhantomData,
}
}
}
impl<SF, F, Fut, Req, In, Res, Err> Clone for ApplyFactory<SF, F, Req, In, Res, Err>
where
SF: ServiceFactory<In, Error = Err> + Clone,
F: Fn(Req, &SF::Service) -> Fut + Clone,
Fut: Future<Output = Result<Res, Err>>,
{
fn clone(&self) -> Self {
Self {
factory: self.factory.clone(),
wrap_fn: self.wrap_fn.clone(),
_phantom: PhantomData,
}
}
}
impl<SF, F, Fut, Req, In, Res, Err> ServiceFactory<Req>
for ApplyFactory<SF, F, Req, In, Res, Err>
where
SF: ServiceFactory<In, Error = Err>,
F: Fn(Req, &SF::Service) -> Fut + Clone,
Fut: Future<Output = Result<Res, Err>>,
{
type Response = Res;
type Error = Err;
type Config = SF::Config;
type Service = Apply<SF::Service, F, Req, In, Res, Err>;
type InitError = SF::InitError;
type Future = ApplyServiceFactoryResponse<SF, F, Fut, Req, In, Res, Err>;
fn new_service(&self, cfg: SF::Config) -> Self::Future {
let svc = self.factory.new_service(cfg);
ApplyServiceFactoryResponse::new(svc, self.wrap_fn.clone())
}
}
pin_project! {
pub struct ApplyServiceFactoryResponse<SF, F, Fut, Req, In, Res, Err>
where
SF: ServiceFactory<In, Error = Err>,
F: Fn(Req, &SF::Service) -> Fut,
Fut: Future<Output = Result<Res, Err>>,
{
#[pin]
fut: SF::Future,
wrap_fn: Option<F>,
_phantom: PhantomData<(Req, Res)>,
}
}
impl<SF, F, Fut, Req, In, Res, Err> ApplyServiceFactoryResponse<SF, F, Fut, Req, In, Res, Err>
where
SF: ServiceFactory<In, Error = Err>,
F: Fn(Req, &SF::Service) -> Fut,
Fut: Future<Output = Result<Res, Err>>,
{
fn new(fut: SF::Future, wrap_fn: F) -> Self {
Self {
fut,
wrap_fn: Some(wrap_fn),
_phantom: PhantomData,
}
}
}
impl<SF, F, Fut, Req, In, Res, Err> Future
for ApplyServiceFactoryResponse<SF, F, Fut, Req, In, Res, Err>
where
SF: ServiceFactory<In, Error = Err>,
F: Fn(Req, &SF::Service) -> Fut,
Fut: Future<Output = Result<Res, Err>>,
{
type Output = Result<Apply<SF::Service, F, Req, In, Res, Err>, SF::InitError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
let svc = ready!(this.fut.poll(cx))?;
Poll::Ready(Ok(Apply::new(svc, this.wrap_fn.take().unwrap())))
}
}
#[cfg(test)]
mod tests {
use core::task::Poll;
use futures_util::future::lazy;
use super::*;
use crate::{
ok,
pipeline::{pipeline, pipeline_factory},
Ready, Service, ServiceFactory,
};
#[derive(Clone)]
struct Srv;
impl Service<()> for Srv {
type Response = ();
type Error = ();
type Future = Ready<Result<(), ()>>;
crate::always_ready!();
fn call(&self, _: ()) -> Self::Future {
ok(())
}
}
#[actix_rt::test]
async fn test_call() {
let srv = pipeline(apply_fn(Srv, |req: &'static str, srv| {
let fut = srv.call(());
async move {
fut.await.unwrap();
Ok((req, ()))
}
}));
assert_eq!(lazy(|cx| srv.poll_ready(cx)).await, Poll::Ready(Ok(())));
let res = srv.call("srv").await;
assert!(res.is_ok());
assert_eq!(res.unwrap(), ("srv", ()));
}
#[actix_rt::test]
async fn test_new_service() {
let new_srv = pipeline_factory(apply_fn_factory(
|| ok::<_, ()>(Srv),
|req: &'static str, srv| {
let fut = srv.call(());
async move {
fut.await.unwrap();
Ok((req, ()))
}
},
));
let srv = new_srv.new_service(()).await.unwrap();
assert_eq!(lazy(|cx| srv.poll_ready(cx)).await, Poll::Ready(Ok(())));
let res = srv.call("srv").await;
assert!(res.is_ok());
assert_eq!(res.unwrap(), ("srv", ()));
}
}

View File

@@ -0,0 +1,233 @@
use alloc::rc::Rc;
use core::{
future::Future,
marker::PhantomData,
pin::Pin,
task::{Context, Poll},
};
use futures_core::ready;
use pin_project_lite::pin_project;
use crate::{Service, ServiceFactory};
/// Convert `Fn(Config, &Service1) -> Future<Service2>` fn to a service factory.
pub fn apply_cfg<S1, Req, F, Cfg, Fut, S2, Err>(
srv: S1,
f: F,
) -> impl ServiceFactory<
Req,
Config = Cfg,
Response = S2::Response,
Error = S2::Error,
Service = S2,
InitError = Err,
Future = Fut,
> + Clone
where
S1: Service<Req>,
F: Fn(Cfg, &S1) -> Fut,
Fut: Future<Output = Result<S2, Err>>,
S2: Service<Req>,
{
ApplyConfigService {
srv: Rc::new((srv, f)),
_phantom: PhantomData,
}
}
/// Convert `Fn(Config, &ServiceFactory1) -> Future<ServiceFactory2>` fn to a service factory.
///
/// Service1 get constructed from `T` factory.
pub fn apply_cfg_factory<SF, Req, F, Cfg, Fut, S>(
factory: SF,
f: F,
) -> impl ServiceFactory<
Req,
Config = Cfg,
Response = S::Response,
Error = S::Error,
Service = S,
InitError = SF::InitError,
> + Clone
where
SF: ServiceFactory<Req, Config = ()>,
F: Fn(Cfg, &SF::Service) -> Fut,
SF::InitError: From<SF::Error>,
Fut: Future<Output = Result<S, SF::InitError>>,
S: Service<Req>,
{
ApplyConfigServiceFactory {
srv: Rc::new((factory, f)),
_phantom: PhantomData,
}
}
/// Convert `Fn(Config, &Server) -> Future<Service>` fn to NewService\
struct ApplyConfigService<S1, Req, F, Cfg, Fut, S2, Err>
where
S1: Service<Req>,
F: Fn(Cfg, &S1) -> Fut,
Fut: Future<Output = Result<S2, Err>>,
S2: Service<Req>,
{
srv: Rc<(S1, F)>,
_phantom: PhantomData<(Cfg, Req, Fut, S2)>,
}
impl<S1, Req, F, Cfg, Fut, S2, Err> Clone for ApplyConfigService<S1, Req, F, Cfg, Fut, S2, Err>
where
S1: Service<Req>,
F: Fn(Cfg, &S1) -> Fut,
Fut: Future<Output = Result<S2, Err>>,
S2: Service<Req>,
{
fn clone(&self) -> Self {
ApplyConfigService {
srv: self.srv.clone(),
_phantom: PhantomData,
}
}
}
impl<S1, Req, F, Cfg, Fut, S2, Err> ServiceFactory<Req>
for ApplyConfigService<S1, Req, F, Cfg, Fut, S2, Err>
where
S1: Service<Req>,
F: Fn(Cfg, &S1) -> Fut,
Fut: Future<Output = Result<S2, Err>>,
S2: Service<Req>,
{
type Response = S2::Response;
type Error = S2::Error;
type Config = Cfg;
type Service = S2;
type InitError = Err;
type Future = Fut;
fn new_service(&self, cfg: Cfg) -> Self::Future {
let (t, f) = &*self.srv;
f(cfg, t)
}
}
/// Convert `Fn(&Config) -> Future<Service>` fn to NewService
struct ApplyConfigServiceFactory<SF, Req, F, Cfg, Fut, S>
where
SF: ServiceFactory<Req, Config = ()>,
F: Fn(Cfg, &SF::Service) -> Fut,
Fut: Future<Output = Result<S, SF::InitError>>,
S: Service<Req>,
{
srv: Rc<(SF, F)>,
_phantom: PhantomData<(Cfg, Req, Fut, S)>,
}
impl<SF, Req, F, Cfg, Fut, S> Clone for ApplyConfigServiceFactory<SF, Req, F, Cfg, Fut, S>
where
SF: ServiceFactory<Req, Config = ()>,
F: Fn(Cfg, &SF::Service) -> Fut,
Fut: Future<Output = Result<S, SF::InitError>>,
S: Service<Req>,
{
fn clone(&self) -> Self {
Self {
srv: self.srv.clone(),
_phantom: PhantomData,
}
}
}
impl<SF, Req, F, Cfg, Fut, S> ServiceFactory<Req>
for ApplyConfigServiceFactory<SF, Req, F, Cfg, Fut, S>
where
SF: ServiceFactory<Req, Config = ()>,
SF::InitError: From<SF::Error>,
F: Fn(Cfg, &SF::Service) -> Fut,
Fut: Future<Output = Result<S, SF::InitError>>,
S: Service<Req>,
{
type Response = S::Response;
type Error = S::Error;
type Config = Cfg;
type Service = S;
type InitError = SF::InitError;
type Future = ApplyConfigServiceFactoryResponse<SF, Req, F, Cfg, Fut, S>;
fn new_service(&self, cfg: Cfg) -> Self::Future {
ApplyConfigServiceFactoryResponse {
cfg: Some(cfg),
store: self.srv.clone(),
state: State::A {
fut: self.srv.0.new_service(()),
},
}
}
}
pin_project! {
struct ApplyConfigServiceFactoryResponse<SF, Req, F, Cfg, Fut, S>
where
SF: ServiceFactory<Req, Config = ()>,
SF::InitError: From<SF::Error>,
F: Fn(Cfg, &SF::Service) -> Fut,
Fut: Future<Output = Result<S, SF::InitError>>,
S: Service<Req>,
{
cfg: Option<Cfg>,
store: Rc<(SF, F)>,
#[pin]
state: State<SF, Fut, S, Req>,
}
}
pin_project! {
#[project = StateProj]
enum State<SF, Fut, S, Req>
where
SF: ServiceFactory<Req, Config = ()>,
SF::InitError: From<SF::Error>,
Fut: Future<Output = Result<S, SF::InitError>>,
S: Service<Req>,
{
A { #[pin] fut: SF::Future },
B { svc: SF::Service },
C { #[pin] fut: Fut },
}
}
impl<SF, Req, F, Cfg, Fut, S> Future
for ApplyConfigServiceFactoryResponse<SF, Req, F, Cfg, Fut, S>
where
SF: ServiceFactory<Req, Config = ()>,
SF::InitError: From<SF::Error>,
F: Fn(Cfg, &SF::Service) -> Fut,
Fut: Future<Output = Result<S, SF::InitError>>,
S: Service<Req>,
{
type Output = Result<S, SF::InitError>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut this = self.as_mut().project();
match this.state.as_mut().project() {
StateProj::A { fut } => {
let svc = ready!(fut.poll(cx))?;
this.state.set(State::B { svc });
self.poll(cx)
}
StateProj::B { svc } => {
ready!(svc.poll_ready(cx))?;
{
let (_, f) = &**this.store;
let fut = f(this.cfg.take().unwrap(), svc);
this.state.set(State::C { fut });
}
self.poll(cx)
}
StateProj::C { fut } => fut.poll(cx),
}
}
}

139
actix-service/src/boxed.rs Normal file
View File

@@ -0,0 +1,139 @@
//! Trait object forms of services and service factories.
use alloc::{boxed::Box, rc::Rc};
use core::{future::Future, pin::Pin};
use paste::paste;
use crate::{Service, ServiceFactory};
/// A boxed future with no send bound or lifetime parameters.
pub type BoxFuture<T> = Pin<Box<dyn Future<Output = T>>>;
macro_rules! service_object {
($name: ident, $type: tt, $fn_name: ident) => {
paste! {
#[doc = "Type alias for service trait object using `" $type "`."]
pub type $name<Req, Res, Err> = $type<
dyn Service<Req, Response = Res, Error = Err, Future = BoxFuture<Result<Res, Err>>>,
>;
#[doc = "Wraps service as a trait object using [`" $name "`]."]
pub fn $fn_name<S, Req>(service: S) -> $name<Req, S::Response, S::Error>
where
S: Service<Req> + 'static,
Req: 'static,
S::Future: 'static,
{
$type::new(ServiceWrapper::new(service))
}
}
};
}
service_object!(BoxService, Box, service);
service_object!(RcService, Rc, rc_service);
struct ServiceWrapper<S> {
inner: S,
}
impl<S> ServiceWrapper<S> {
fn new(inner: S) -> Self {
Self { inner }
}
}
impl<S, Req, Res, Err> Service<Req> for ServiceWrapper<S>
where
S: Service<Req, Response = Res, Error = Err>,
S::Future: 'static,
{
type Response = Res;
type Error = Err;
type Future = BoxFuture<Result<Res, Err>>;
crate::forward_ready!(inner);
fn call(&self, req: Req) -> Self::Future {
Box::pin(self.inner.call(req))
}
}
/// Wrapper for a service factory that will map it's services to boxed trait object services.
pub struct BoxServiceFactory<Cfg, Req, Res, Err, InitErr>(Inner<Cfg, Req, Res, Err, InitErr>);
/// Wraps a service factory that returns service trait objects.
pub fn factory<SF, Req>(
factory: SF,
) -> BoxServiceFactory<SF::Config, Req, SF::Response, SF::Error, SF::InitError>
where
SF: ServiceFactory<Req> + 'static,
Req: 'static,
SF::Response: 'static,
SF::Service: 'static,
SF::Future: 'static,
SF::Error: 'static,
SF::InitError: 'static,
{
BoxServiceFactory(Box::new(FactoryWrapper(factory)))
}
type Inner<C, Req, Res, Err, InitErr> = Box<
dyn ServiceFactory<
Req,
Config = C,
Response = Res,
Error = Err,
InitError = InitErr,
Service = BoxService<Req, Res, Err>,
Future = BoxFuture<Result<BoxService<Req, Res, Err>, InitErr>>,
>,
>;
impl<C, Req, Res, Err, InitErr> ServiceFactory<Req>
for BoxServiceFactory<C, Req, Res, Err, InitErr>
where
Req: 'static,
Res: 'static,
Err: 'static,
InitErr: 'static,
{
type Response = Res;
type Error = Err;
type Config = C;
type Service = BoxService<Req, Res, Err>;
type InitError = InitErr;
type Future = BoxFuture<Result<Self::Service, InitErr>>;
fn new_service(&self, cfg: C) -> Self::Future {
self.0.new_service(cfg)
}
}
struct FactoryWrapper<SF>(SF);
impl<SF, Req, Cfg, Res, Err, InitErr> ServiceFactory<Req> for FactoryWrapper<SF>
where
Req: 'static,
Res: 'static,
Err: 'static,
InitErr: 'static,
SF: ServiceFactory<Req, Config = Cfg, Response = Res, Error = Err, InitError = InitErr>,
SF::Future: 'static,
SF::Service: 'static,
<SF::Service as Service<Req>>::Future: 'static,
{
type Response = Res;
type Error = Err;
type Config = Cfg;
type Service = BoxService<Req, Res, Err>;
type InitError = InitErr;
type Future = BoxFuture<Result<Self::Service, Self::InitError>>;
fn new_service(&self, cfg: Cfg) -> Self::Future {
let f = self.0.new_service(cfg);
Box::pin(async { f.await.map(|s| Box::new(ServiceWrapper::new(s)) as _) })
}
}

124
actix-service/src/ext.rs Normal file
View File

@@ -0,0 +1,124 @@
use crate::{
and_then::{AndThenService, AndThenServiceFactory},
map::Map,
map_err::MapErr,
transform_err::TransformMapInitErr,
IntoService, IntoServiceFactory, Service, ServiceFactory, Transform,
};
/// An extension trait for [`Service`]s that provides a variety of convenient adapters.
pub trait ServiceExt<Req>: Service<Req> {
/// Map this service's output to a different type, returning a new service
/// of the resulting type.
///
/// This function is similar to the `Option::map` or `Iterator::map` where
/// it will change the type of the underlying service.
///
/// Note that this function consumes the receiving service and returns a
/// wrapped version of it, similar to the existing `map` methods in the
/// standard library.
fn map<F, R>(self, f: F) -> Map<Self, F, Req, R>
where
Self: Sized,
F: FnMut(Self::Response) -> R,
{
Map::new(self, f)
}
/// Map this service's error to a different error, returning a new service.
///
/// This function is similar to the `Result::map_err` where it will change
/// the error type of the underlying service. For example, this can be useful to
/// ensure that services have the same error type.
///
/// Note that this function consumes the receiving service and returns a
/// wrapped version of it.
fn map_err<F, E>(self, f: F) -> MapErr<Self, Req, F, E>
where
Self: Sized,
F: Fn(Self::Error) -> E,
{
MapErr::new(self, f)
}
/// Call another service after call to this one has resolved successfully.
///
/// This function can be used to chain two services together and ensure that the second service
/// isn't called until call to the fist service have finished. Result of the call to the first
/// service is used as an input parameter for the second service's call.
///
/// Note that this function consumes the receiving service and returns a wrapped version of it.
fn and_then<I, S1>(self, service: I) -> AndThenService<Self, S1, Req>
where
Self: Sized,
I: IntoService<S1, Self::Response>,
S1: Service<Self::Response, Error = Self::Error>,
{
AndThenService::new(self, service.into_service())
}
}
impl<S, Req> ServiceExt<Req> for S where S: Service<Req> {}
/// An extension trait for [`ServiceFactory`]s that provides a variety of convenient adapters.
pub trait ServiceFactoryExt<Req>: ServiceFactory<Req> {
/// Map this service's output to a different type, returning a new service
/// of the resulting type.
fn map<F, R>(self, f: F) -> crate::map::MapServiceFactory<Self, F, Req, R>
where
Self: Sized,
F: FnMut(Self::Response) -> R + Clone,
{
crate::map::MapServiceFactory::new(self, f)
}
/// Map this service's error to a different error, returning a new service.
fn map_err<F, E>(self, f: F) -> crate::map_err::MapErrServiceFactory<Self, Req, F, E>
where
Self: Sized,
F: Fn(Self::Error) -> E + Clone,
{
crate::map_err::MapErrServiceFactory::new(self, f)
}
/// Map this factory's init error to a different error, returning a new service.
fn map_init_err<F, E>(self, f: F) -> crate::map_init_err::MapInitErr<Self, F, Req, E>
where
Self: Sized,
F: Fn(Self::InitError) -> E + Clone,
{
crate::map_init_err::MapInitErr::new(self, f)
}
/// Call another service after call to this one has resolved successfully.
fn and_then<I, SF1>(self, factory: I) -> AndThenServiceFactory<Self, SF1, Req>
where
Self: Sized,
Self::Config: Clone,
I: IntoServiceFactory<SF1, Self::Response>,
SF1: ServiceFactory<
Self::Response,
Config = Self::Config,
Error = Self::Error,
InitError = Self::InitError,
>,
{
AndThenServiceFactory::new(self, factory.into_factory())
}
}
impl<SF, Req> ServiceFactoryExt<Req> for SF where SF: ServiceFactory<Req> {}
/// An extension trait for [`Transform`]s that provides a variety of convenient adapters.
pub trait TransformExt<S, Req>: Transform<S, Req> {
/// Return a new `Transform` whose init error is mapped to to a different type.
fn map_init_err<F, E>(self, f: F) -> TransformMapInitErr<Self, S, Req, F, E>
where
Self: Sized,
F: Fn(Self::InitError) -> E + Clone,
{
TransformMapInitErr::new(self, f)
}
}
impl<T, Req> TransformExt<T, Req> for T where T: Transform<T, Req> {}

View File

@@ -0,0 +1,394 @@
use core::{future::Future, marker::PhantomData};
use crate::{ok, IntoService, IntoServiceFactory, Ready, Service, ServiceFactory};
/// Create `ServiceFactory` for function that can act as a `Service`
pub fn fn_service<F, Fut, Req, Res, Err, Cfg>(
f: F,
) -> FnServiceFactory<F, Fut, Req, Res, Err, Cfg>
where
F: Fn(Req) -> Fut + Clone,
Fut: Future<Output = Result<Res, Err>>,
{
FnServiceFactory::new(f)
}
/// Create `ServiceFactory` for function that can produce services
///
/// # Examples
/// ```
/// use std::io;
/// use actix_service::{fn_factory, fn_service, Service, ServiceFactory};
/// use futures_util::future::ok;
///
/// /// Service that divides two usize values.
/// async fn div((x, y): (usize, usize)) -> Result<usize, io::Error> {
/// if y == 0 {
/// Err(io::Error::new(io::ErrorKind::Other, "divide by zero"))
/// } else {
/// Ok(x / y)
/// }
/// }
///
/// #[actix_rt::main]
/// async fn main() -> io::Result<()> {
/// // Create service factory that produces `div` services
/// let factory = fn_factory(|| {
/// ok::<_, io::Error>(fn_service(div))
/// });
///
/// // construct new service
/// let srv = factory.new_service(()).await?;
///
/// // now we can use `div` service
/// let result = srv.call((10, 20)).await?;
///
/// println!("10 / 20 = {}", result);
///
/// Ok(())
/// }
/// ```
pub fn fn_factory<F, Cfg, Srv, Req, Fut, Err>(
f: F,
) -> FnServiceNoConfig<F, Cfg, Srv, Req, Fut, Err>
where
Srv: Service<Req>,
F: Fn() -> Fut,
Fut: Future<Output = Result<Srv, Err>>,
{
FnServiceNoConfig::new(f)
}
/// Create `ServiceFactory` for function that accepts config argument and can produce services
///
/// Any function that has following form `Fn(Config) -> Future<Output = Service>` could act as
/// a `ServiceFactory`.
///
/// # Examples
/// ```
/// use std::io;
/// use actix_service::{fn_factory_with_config, fn_service, Service, ServiceFactory};
/// use futures_util::future::ok;
///
/// #[actix_rt::main]
/// async fn main() -> io::Result<()> {
/// // Create service factory. factory uses config argument for
/// // services it generates.
/// let factory = fn_factory_with_config(|y: usize| {
/// ok::<_, io::Error>(fn_service(move |x: usize| ok::<_, io::Error>(x * y)))
/// });
///
/// // construct new service with config argument
/// let srv = factory.new_service(10).await?;
///
/// let result = srv.call(10).await?;
/// assert_eq!(result, 100);
///
/// println!("10 * 10 = {}", result);
/// Ok(())
/// }
/// ```
pub fn fn_factory_with_config<F, Fut, Cfg, Srv, Req, Err>(
f: F,
) -> FnServiceConfig<F, Fut, Cfg, Srv, Req, Err>
where
F: Fn(Cfg) -> Fut,
Fut: Future<Output = Result<Srv, Err>>,
Srv: Service<Req>,
{
FnServiceConfig::new(f)
}
pub struct FnService<F, Fut, Req, Res, Err>
where
F: FnMut(Req) -> Fut,
Fut: Future<Output = Result<Res, Err>>,
{
f: F,
_t: PhantomData<Req>,
}
impl<F, Fut, Req, Res, Err> FnService<F, Fut, Req, Res, Err>
where
F: FnMut(Req) -> Fut,
Fut: Future<Output = Result<Res, Err>>,
{
pub(crate) fn new(f: F) -> Self {
Self { f, _t: PhantomData }
}
}
impl<F, Fut, Req, Res, Err> Clone for FnService<F, Fut, Req, Res, Err>
where
F: FnMut(Req) -> Fut + Clone,
Fut: Future<Output = Result<Res, Err>>,
{
fn clone(&self) -> Self {
Self::new(self.f.clone())
}
}
impl<F, Fut, Req, Res, Err> Service<Req> for FnService<F, Fut, Req, Res, Err>
where
F: Fn(Req) -> Fut,
Fut: Future<Output = Result<Res, Err>>,
{
type Response = Res;
type Error = Err;
type Future = Fut;
crate::always_ready!();
fn call(&self, req: Req) -> Self::Future {
(self.f)(req)
}
}
impl<F, Fut, Req, Res, Err> IntoService<FnService<F, Fut, Req, Res, Err>, Req> for F
where
F: Fn(Req) -> Fut,
Fut: Future<Output = Result<Res, Err>>,
{
fn into_service(self) -> FnService<F, Fut, Req, Res, Err> {
FnService::new(self)
}
}
pub struct FnServiceFactory<F, Fut, Req, Res, Err, Cfg>
where
F: Fn(Req) -> Fut,
Fut: Future<Output = Result<Res, Err>>,
{
f: F,
_t: PhantomData<(Req, Cfg)>,
}
impl<F, Fut, Req, Res, Err, Cfg> FnServiceFactory<F, Fut, Req, Res, Err, Cfg>
where
F: Fn(Req) -> Fut + Clone,
Fut: Future<Output = Result<Res, Err>>,
{
fn new(f: F) -> Self {
FnServiceFactory { f, _t: PhantomData }
}
}
impl<F, Fut, Req, Res, Err, Cfg> Clone for FnServiceFactory<F, Fut, Req, Res, Err, Cfg>
where
F: Fn(Req) -> Fut + Clone,
Fut: Future<Output = Result<Res, Err>>,
{
fn clone(&self) -> Self {
Self::new(self.f.clone())
}
}
impl<F, Fut, Req, Res, Err> Service<Req> for FnServiceFactory<F, Fut, Req, Res, Err, ()>
where
F: Fn(Req) -> Fut + Clone,
Fut: Future<Output = Result<Res, Err>>,
{
type Response = Res;
type Error = Err;
type Future = Fut;
crate::always_ready!();
fn call(&self, req: Req) -> Self::Future {
(self.f)(req)
}
}
impl<F, Fut, Req, Res, Err, Cfg> ServiceFactory<Req>
for FnServiceFactory<F, Fut, Req, Res, Err, Cfg>
where
F: Fn(Req) -> Fut + Clone,
Fut: Future<Output = Result<Res, Err>>,
{
type Response = Res;
type Error = Err;
type Config = Cfg;
type Service = FnService<F, Fut, Req, Res, Err>;
type InitError = ();
type Future = Ready<Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: Cfg) -> Self::Future {
ok(FnService::new(self.f.clone()))
}
}
impl<F, Fut, Req, Res, Err, Cfg>
IntoServiceFactory<FnServiceFactory<F, Fut, Req, Res, Err, Cfg>, Req> for F
where
F: Fn(Req) -> Fut + Clone,
Fut: Future<Output = Result<Res, Err>>,
{
fn into_factory(self) -> FnServiceFactory<F, Fut, Req, Res, Err, Cfg> {
FnServiceFactory::new(self)
}
}
/// Convert `Fn(&Config) -> Future<Service>` fn to NewService
pub struct FnServiceConfig<F, Fut, Cfg, Srv, Req, Err>
where
F: Fn(Cfg) -> Fut,
Fut: Future<Output = Result<Srv, Err>>,
Srv: Service<Req>,
{
f: F,
_t: PhantomData<(Fut, Cfg, Req, Srv, Err)>,
}
impl<F, Fut, Cfg, Srv, Req, Err> FnServiceConfig<F, Fut, Cfg, Srv, Req, Err>
where
F: Fn(Cfg) -> Fut,
Fut: Future<Output = Result<Srv, Err>>,
Srv: Service<Req>,
{
fn new(f: F) -> Self {
FnServiceConfig { f, _t: PhantomData }
}
}
impl<F, Fut, Cfg, Srv, Req, Err> Clone for FnServiceConfig<F, Fut, Cfg, Srv, Req, Err>
where
F: Fn(Cfg) -> Fut + Clone,
Fut: Future<Output = Result<Srv, Err>>,
Srv: Service<Req>,
{
fn clone(&self) -> Self {
FnServiceConfig {
f: self.f.clone(),
_t: PhantomData,
}
}
}
impl<F, Fut, Cfg, Srv, Req, Err> ServiceFactory<Req>
for FnServiceConfig<F, Fut, Cfg, Srv, Req, Err>
where
F: Fn(Cfg) -> Fut,
Fut: Future<Output = Result<Srv, Err>>,
Srv: Service<Req>,
{
type Response = Srv::Response;
type Error = Srv::Error;
type Config = Cfg;
type Service = Srv;
type InitError = Err;
type Future = Fut;
fn new_service(&self, cfg: Cfg) -> Self::Future {
(self.f)(cfg)
}
}
/// Converter for `Fn() -> Future<Service>` fn
pub struct FnServiceNoConfig<F, Cfg, Srv, Req, Fut, Err>
where
F: Fn() -> Fut,
Srv: Service<Req>,
Fut: Future<Output = Result<Srv, Err>>,
{
f: F,
_t: PhantomData<(Cfg, Req)>,
}
impl<F, Cfg, Srv, Req, Fut, Err> FnServiceNoConfig<F, Cfg, Srv, Req, Fut, Err>
where
F: Fn() -> Fut,
Fut: Future<Output = Result<Srv, Err>>,
Srv: Service<Req>,
{
fn new(f: F) -> Self {
Self { f, _t: PhantomData }
}
}
impl<F, Cfg, Srv, Req, Fut, Err> ServiceFactory<Req>
for FnServiceNoConfig<F, Cfg, Srv, Req, Fut, Err>
where
F: Fn() -> Fut,
Fut: Future<Output = Result<Srv, Err>>,
Srv: Service<Req>,
{
type Response = Srv::Response;
type Error = Srv::Error;
type Config = Cfg;
type Service = Srv;
type InitError = Err;
type Future = Fut;
fn new_service(&self, _: Cfg) -> Self::Future {
(self.f)()
}
}
impl<F, Cfg, Srv, Req, Fut, Err> Clone for FnServiceNoConfig<F, Cfg, Srv, Req, Fut, Err>
where
F: Fn() -> Fut + Clone,
Fut: Future<Output = Result<Srv, Err>>,
Srv: Service<Req>,
{
fn clone(&self) -> Self {
Self::new(self.f.clone())
}
}
impl<F, Cfg, Srv, Req, Fut, Err>
IntoServiceFactory<FnServiceNoConfig<F, Cfg, Srv, Req, Fut, Err>, Req> for F
where
F: Fn() -> Fut,
Fut: Future<Output = Result<Srv, Err>>,
Srv: Service<Req>,
{
fn into_factory(self) -> FnServiceNoConfig<F, Cfg, Srv, Req, Fut, Err> {
FnServiceNoConfig::new(self)
}
}
#[cfg(test)]
mod tests {
use core::task::Poll;
use futures_util::future::lazy;
use super::*;
use crate::{ok, Service, ServiceFactory};
#[actix_rt::test]
async fn test_fn_service() {
let new_srv = fn_service(|()| ok::<_, ()>("srv"));
let srv = new_srv.new_service(()).await.unwrap();
let res = srv.call(()).await;
assert_eq!(lazy(|cx| srv.poll_ready(cx)).await, Poll::Ready(Ok(())));
assert!(res.is_ok());
assert_eq!(res.unwrap(), "srv");
}
#[actix_rt::test]
async fn test_fn_service_service() {
let srv = fn_service(|()| ok::<_, ()>("srv"));
let res = srv.call(()).await;
assert_eq!(lazy(|cx| srv.poll_ready(cx)).await, Poll::Ready(Ok(())));
assert!(res.is_ok());
assert_eq!(res.unwrap(), "srv");
}
#[actix_rt::test]
async fn test_fn_service_with_config() {
let new_srv = fn_factory_with_config(|cfg: usize| {
ok::<_, ()>(fn_service(move |()| ok::<_, ()>(("srv", cfg))))
});
let srv = new_srv.new_service(1).await.unwrap();
let res = srv.call(()).await;
assert_eq!(lazy(|cx| srv.poll_ready(cx)).await, Poll::Ready(Ok(())));
assert!(res.is_ok());
assert_eq!(res.unwrap(), ("srv", 1));
}
}

324
actix-service/src/lib.rs Normal file
View File

@@ -0,0 +1,324 @@
//! See [`Service`] docs for information on this crate's foundational trait.
#![no_std]
#![deny(rust_2018_idioms, nonstandard_style, future_incompatible)]
#![warn(missing_docs)]
#![allow(clippy::type_complexity)]
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
extern crate alloc;
use alloc::{boxed::Box, rc::Rc, sync::Arc};
use core::{
cell::RefCell,
future::Future,
task::{self, Context, Poll},
};
mod and_then;
mod apply;
mod apply_cfg;
pub mod boxed;
mod ext;
mod fn_service;
mod macros;
mod map;
mod map_config;
mod map_err;
mod map_init_err;
mod pipeline;
mod ready;
mod then;
mod transform;
mod transform_err;
pub use self::apply::{apply_fn, apply_fn_factory};
pub use self::apply_cfg::{apply_cfg, apply_cfg_factory};
pub use self::ext::{ServiceExt, ServiceFactoryExt, TransformExt};
pub use self::fn_service::{fn_factory, fn_factory_with_config, fn_service};
pub use self::map_config::{map_config, unit_config};
pub use self::transform::{apply, ApplyTransform, Transform};
#[allow(unused_imports)]
use self::ready::{err, ok, ready, Ready};
/// An asynchronous operation from `Request` to a `Response`.
///
/// The `Service` trait models a request/response interaction, receiving requests and returning
/// replies. You can think about a service as a function with one argument that returns some result
/// asynchronously. Conceptually, the operation looks like this:
///
/// ```ignore
/// async fn(Request) -> Result<Response, Err>
/// ```
///
/// The `Service` trait just generalizes this form. Requests are defined as a generic type parameter
/// and responses and other details are defined as associated types on the trait impl. Notice that
/// this design means that services can receive many request types and converge them to a single
/// response type.
///
/// Services can also have mutable state that influence computation by using a `Cell`, `RefCell`
/// or `Mutex`. Services intentionally do not take `&mut self` to reduce overhead in the
/// common cases.
///
/// `Service` provides a symmetric and uniform API; the same abstractions can be used to represent
/// both clients and servers. Services describe only _transformation_ operations which encourage
/// simple API surfaces. This leads to simpler design of each service, improves test-ability and
/// makes composition easier.
///
/// ```ignore
/// struct MyService;
///
/// impl Service<u8> for MyService {
/// type Response = u64;
/// type Error = MyError;
/// type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>>>>;
///
/// fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { ... }
///
/// fn call(&self, req: Self::Request) -> Self::Future { ... }
/// }
/// ```
///
/// Sometimes it is not necessary to implement the Service trait. For example, the above service
/// could be rewritten as a simple function and passed to [`fn_service`](fn_service()).
///
/// ```ignore
/// async fn my_service(req: u8) -> Result<u64, MyError>;
///
/// let svc = fn_service(my_service)
/// svc.call(123)
/// ```
pub trait Service<Req> {
/// Responses given by the service.
type Response;
/// Errors produced by the service when polling readiness or executing call.
type Error;
/// The future response value.
type Future: Future<Output = Result<Self::Response, Self::Error>>;
/// Returns `Ready` when the service is able to process requests.
///
/// If the service is at capacity, then `Pending` is returned and the task is notified when the
/// service becomes ready again. This function is expected to be called while on a task.
///
/// This is a best effort implementation. False positives are permitted. It is permitted for
/// the service to return `Ready` from a `poll_ready` call and the next invocation of `call`
/// results in an error.
///
/// # Notes
/// 1. `poll_ready` might be called on a different task to `call`.
/// 1. In cases of chained services, `.poll_ready()` is called for all services at once.
fn poll_ready(&self, ctx: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>>;
/// Process the request and return the response asynchronously.
///
/// This function is expected to be callable off-task. As such, implementations of `call` should
/// take care to not call `poll_ready`. If the service is at capacity and the request is unable
/// to be handled, the returned `Future` should resolve to an error.
///
/// Invoking `call` without first invoking `poll_ready` is permitted. Implementations must be
/// resilient to this fact.
fn call(&self, req: Req) -> Self::Future;
}
/// Factory for creating `Service`s.
///
/// This is useful for cases where new `Service`s must be produced. One case is a TCP
/// server listener: a listener accepts new connections, constructs a new `Service` for each using
/// the `ServiceFactory` trait, and uses the new `Service` to process inbound requests on that new
/// connection.
///
/// `Config` is a service factory configuration type.
///
/// Simple factories may be able to use [`fn_factory`] or [`fn_factory_with_config`] to
/// reduce boilerplate.
pub trait ServiceFactory<Req> {
/// Responses given by the created services.
type Response;
/// Errors produced by the created services.
type Error;
/// Service factory configuration.
type Config;
/// The kind of `Service` created by this factory.
type Service: Service<Req, Response = Self::Response, Error = Self::Error>;
/// Errors potentially raised while building a service.
type InitError;
/// The future of the `Service` instance.g
type Future: Future<Output = Result<Self::Service, Self::InitError>>;
/// Create and return a new service asynchronously.
fn new_service(&self, cfg: Self::Config) -> Self::Future;
}
// TODO: remove implement on mut reference.
impl<'a, S, Req> Service<Req> for &'a mut S
where
S: Service<Req> + 'a,
{
type Response = S::Response;
type Error = S::Error;
type Future = S::Future;
fn poll_ready(&self, ctx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
(**self).poll_ready(ctx)
}
fn call(&self, request: Req) -> S::Future {
(**self).call(request)
}
}
impl<'a, S, Req> Service<Req> for &'a S
where
S: Service<Req> + 'a,
{
type Response = S::Response;
type Error = S::Error;
type Future = S::Future;
fn poll_ready(&self, ctx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
(**self).poll_ready(ctx)
}
fn call(&self, request: Req) -> S::Future {
(**self).call(request)
}
}
impl<S, Req> Service<Req> for Box<S>
where
S: Service<Req> + ?Sized,
{
type Response = S::Response;
type Error = S::Error;
type Future = S::Future;
fn poll_ready(&self, ctx: &mut Context<'_>) -> Poll<Result<(), S::Error>> {
(**self).poll_ready(ctx)
}
fn call(&self, request: Req) -> S::Future {
(**self).call(request)
}
}
impl<S, Req> Service<Req> for Rc<S>
where
S: Service<Req> + ?Sized,
{
type Response = S::Response;
type Error = S::Error;
type Future = S::Future;
fn poll_ready(&self, ctx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
(**self).poll_ready(ctx)
}
fn call(&self, request: Req) -> S::Future {
(**self).call(request)
}
}
/// This impl is deprecated since v2 because the `Service` trait now receives shared reference.
impl<S, Req> Service<Req> for RefCell<S>
where
S: Service<Req>,
{
type Response = S::Response;
type Error = S::Error;
type Future = S::Future;
fn poll_ready(&self, ctx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.borrow().poll_ready(ctx)
}
fn call(&self, request: Req) -> S::Future {
self.borrow().call(request)
}
}
impl<S, Req> ServiceFactory<Req> for Rc<S>
where
S: ServiceFactory<Req>,
{
type Response = S::Response;
type Error = S::Error;
type Config = S::Config;
type Service = S::Service;
type InitError = S::InitError;
type Future = S::Future;
fn new_service(&self, cfg: S::Config) -> S::Future {
self.as_ref().new_service(cfg)
}
}
impl<S, Req> ServiceFactory<Req> for Arc<S>
where
S: ServiceFactory<Req>,
{
type Response = S::Response;
type Error = S::Error;
type Config = S::Config;
type Service = S::Service;
type InitError = S::InitError;
type Future = S::Future;
fn new_service(&self, cfg: S::Config) -> S::Future {
self.as_ref().new_service(cfg)
}
}
/// Trait for types that can be converted to a `Service`
pub trait IntoService<S, Req>
where
S: Service<Req>,
{
/// Convert to a `Service`
fn into_service(self) -> S;
}
/// Trait for types that can be converted to a `ServiceFactory`
pub trait IntoServiceFactory<SF, Req>
where
SF: ServiceFactory<Req>,
{
/// Convert `Self` to a `ServiceFactory`
fn into_factory(self) -> SF;
}
impl<S, Req> IntoService<S, Req> for S
where
S: Service<Req>,
{
fn into_service(self) -> S {
self
}
}
impl<SF, Req> IntoServiceFactory<SF, Req> for SF
where
SF: ServiceFactory<Req>,
{
fn into_factory(self) -> SF {
self
}
}
/// Convert object of type `U` to a service `S`
pub fn into_service<I, S, Req>(tp: I) -> S
where
I: IntoService<S, Req>,
S: Service<Req>,
{
tp.into_service()
}

185
actix-service/src/macros.rs Normal file
View File

@@ -0,0 +1,185 @@
/// An implementation of [`poll_ready`]() that always signals readiness.
///
/// This should only be used for basic leaf services that have no concept of un-readiness.
/// For wrapper or other serivice types, use [`forward_ready!`] for simple cases or write a bespoke
/// `poll_ready` implementation.
///
/// [`poll_ready`]: crate::Service::poll_ready
///
/// # Examples
/// ```no_run
/// use actix_service::Service;
/// use futures_util::future::{ready, Ready};
///
/// struct IdentityService;
///
/// impl Service<u32> for IdentityService {
/// type Response = u32;
/// type Error = ();
/// type Future = Ready<Result<Self::Response, Self::Error>>;
///
/// actix_service::always_ready!();
///
/// fn call(&self, req: u32) -> Self::Future {
/// ready(Ok(req))
/// }
/// }
/// ```
#[macro_export]
macro_rules! always_ready {
() => {
#[inline]
fn poll_ready(
&self,
_: &mut ::core::task::Context<'_>,
) -> ::core::task::Poll<Result<(), Self::Error>> {
::core::task::Poll::Ready(Ok(()))
}
};
}
/// An implementation of [`poll_ready`] that forwards readiness checks to a
/// named struct field.
///
/// Tuple structs are not supported.
///
/// [`poll_ready`]: crate::Service::poll_ready
///
/// # Examples
/// ```no_run
/// use actix_service::Service;
/// use futures_util::future::{ready, Ready};
///
/// struct WrapperService<S> {
/// inner: S,
/// }
///
/// impl<S> Service<()> for WrapperService<S>
/// where
/// S: Service<()>,
/// {
/// type Response = S::Response;
/// type Error = S::Error;
/// type Future = S::Future;
///
/// actix_service::forward_ready!(inner);
///
/// fn call(&self, req: ()) -> Self::Future {
/// self.inner.call(req)
/// }
/// }
/// ```
#[macro_export]
macro_rules! forward_ready {
($field:ident) => {
#[inline]
fn poll_ready(
&self,
cx: &mut ::core::task::Context<'_>,
) -> ::core::task::Poll<Result<(), Self::Error>> {
self.$field
.poll_ready(cx)
.map_err(::core::convert::Into::into)
}
};
}
#[cfg(test)]
mod tests {
use core::{
cell::Cell,
convert::Infallible,
task::{self, Context, Poll},
};
use futures_util::{
future::{ready, Ready},
task::noop_waker,
};
use crate::Service;
struct IdentityService;
impl Service<u32> for IdentityService {
type Response = u32;
type Error = Infallible;
type Future = Ready<Result<Self::Response, Self::Error>>;
always_ready!();
fn call(&self, req: u32) -> Self::Future {
ready(Ok(req))
}
}
struct CountdownService(Cell<u32>);
impl Service<()> for CountdownService {
type Response = ();
type Error = Infallible;
type Future = Ready<Result<Self::Response, Self::Error>>;
fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
let count = self.0.get();
if count == 0 {
Poll::Ready(Ok(()))
} else {
self.0.set(count - 1);
cx.waker().wake_by_ref();
Poll::Pending
}
}
fn call(&self, _: ()) -> Self::Future {
ready(Ok(()))
}
}
struct WrapperService<S> {
inner: S,
}
impl<S> Service<()> for WrapperService<S>
where
S: Service<()>,
{
type Response = S::Response;
type Error = S::Error;
type Future = S::Future;
forward_ready!(inner);
fn call(&self, _: ()) -> Self::Future {
self.inner.call(())
}
}
#[test]
fn test_always_ready_macro() {
let waker = noop_waker();
let mut cx = task::Context::from_waker(&waker);
let svc = IdentityService;
assert!(svc.poll_ready(&mut cx).is_ready());
assert!(svc.poll_ready(&mut cx).is_ready());
assert!(svc.poll_ready(&mut cx).is_ready());
}
#[test]
fn test_forward_ready_macro() {
let waker = noop_waker();
let mut cx = task::Context::from_waker(&waker);
let svc = WrapperService {
inner: CountdownService(Cell::new(3)),
};
assert!(svc.poll_ready(&mut cx).is_pending());
assert!(svc.poll_ready(&mut cx).is_pending());
assert!(svc.poll_ready(&mut cx).is_pending());
assert!(svc.poll_ready(&mut cx).is_ready());
}
}

246
actix-service/src/map.rs Normal file
View File

@@ -0,0 +1,246 @@
use core::{
future::Future,
marker::PhantomData,
pin::Pin,
task::{Context, Poll},
};
use pin_project_lite::pin_project;
use super::{Service, ServiceFactory};
/// Service for the `map` combinator, changing the type of a service's response.
///
/// This is created by the `ServiceExt::map` method.
pub struct Map<A, F, Req, Res> {
service: A,
f: F,
_t: PhantomData<(Req, Res)>,
}
impl<A, F, Req, Res> Map<A, F, Req, Res> {
/// Create new `Map` combinator
pub(crate) fn new(service: A, f: F) -> Self
where
A: Service<Req>,
F: FnMut(A::Response) -> Res,
{
Self {
service,
f,
_t: PhantomData,
}
}
}
impl<A, F, Req, Res> Clone for Map<A, F, Req, Res>
where
A: Clone,
F: Clone,
{
fn clone(&self) -> Self {
Map {
service: self.service.clone(),
f: self.f.clone(),
_t: PhantomData,
}
}
}
impl<A, F, Req, Res> Service<Req> for Map<A, F, Req, Res>
where
A: Service<Req>,
F: FnMut(A::Response) -> Res + Clone,
{
type Response = Res;
type Error = A::Error;
type Future = MapFuture<A, F, Req, Res>;
crate::forward_ready!(service);
fn call(&self, req: Req) -> Self::Future {
MapFuture::new(self.service.call(req), self.f.clone())
}
}
pin_project! {
pub struct MapFuture<A, F, Req, Res>
where
A: Service<Req>,
F: FnMut(A::Response) -> Res,
{
f: F,
#[pin]
fut: A::Future,
}
}
impl<A, F, Req, Res> MapFuture<A, F, Req, Res>
where
A: Service<Req>,
F: FnMut(A::Response) -> Res,
{
fn new(fut: A::Future, f: F) -> Self {
MapFuture { f, fut }
}
}
impl<A, F, Req, Res> Future for MapFuture<A, F, Req, Res>
where
A: Service<Req>,
F: FnMut(A::Response) -> Res,
{
type Output = Result<Res, A::Error>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
match this.fut.poll(cx) {
Poll::Ready(Ok(resp)) => Poll::Ready(Ok((this.f)(resp))),
Poll::Ready(Err(e)) => Poll::Ready(Err(e)),
Poll::Pending => Poll::Pending,
}
}
}
/// `MapNewService` new service combinator
pub struct MapServiceFactory<A, F, Req, Res> {
a: A,
f: F,
r: PhantomData<(Res, Req)>,
}
impl<A, F, Req, Res> MapServiceFactory<A, F, Req, Res> {
/// Create new `Map` new service instance
pub(crate) fn new(a: A, f: F) -> Self
where
A: ServiceFactory<Req>,
F: FnMut(A::Response) -> Res,
{
Self {
a,
f,
r: PhantomData,
}
}
}
impl<A, F, Req, Res> Clone for MapServiceFactory<A, F, Req, Res>
where
A: Clone,
F: Clone,
{
fn clone(&self) -> Self {
Self {
a: self.a.clone(),
f: self.f.clone(),
r: PhantomData,
}
}
}
impl<A, F, Req, Res> ServiceFactory<Req> for MapServiceFactory<A, F, Req, Res>
where
A: ServiceFactory<Req>,
F: FnMut(A::Response) -> Res + Clone,
{
type Response = Res;
type Error = A::Error;
type Config = A::Config;
type Service = Map<A::Service, F, Req, Res>;
type InitError = A::InitError;
type Future = MapServiceFuture<A, F, Req, Res>;
fn new_service(&self, cfg: A::Config) -> Self::Future {
MapServiceFuture::new(self.a.new_service(cfg), self.f.clone())
}
}
pin_project! {
pub struct MapServiceFuture<A, F, Req, Res>
where
A: ServiceFactory<Req>,
F: FnMut(A::Response) -> Res,
{
#[pin]
fut: A::Future,
f: Option<F>,
}
}
impl<A, F, Req, Res> MapServiceFuture<A, F, Req, Res>
where
A: ServiceFactory<Req>,
F: FnMut(A::Response) -> Res,
{
fn new(fut: A::Future, f: F) -> Self {
MapServiceFuture { f: Some(f), fut }
}
}
impl<A, F, Req, Res> Future for MapServiceFuture<A, F, Req, Res>
where
A: ServiceFactory<Req>,
F: FnMut(A::Response) -> Res,
{
type Output = Result<Map<A::Service, F, Req, Res>, A::InitError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
if let Poll::Ready(svc) = this.fut.poll(cx)? {
Poll::Ready(Ok(Map::new(svc, this.f.take().unwrap())))
} else {
Poll::Pending
}
}
}
#[cfg(test)]
mod tests {
use futures_util::future::lazy;
use super::*;
use crate::{
ok, IntoServiceFactory, Ready, Service, ServiceExt, ServiceFactory, ServiceFactoryExt,
};
struct Srv;
impl Service<()> for Srv {
type Response = ();
type Error = ();
type Future = Ready<Result<(), ()>>;
crate::always_ready!();
fn call(&self, _: ()) -> Self::Future {
ok(())
}
}
#[actix_rt::test]
async fn test_poll_ready() {
let srv = Srv.map(|_| "ok");
let res = lazy(|cx| srv.poll_ready(cx)).await;
assert_eq!(res, Poll::Ready(Ok(())));
}
#[actix_rt::test]
async fn test_call() {
let srv = Srv.map(|_| "ok");
let res = srv.call(()).await;
assert!(res.is_ok());
assert_eq!(res.unwrap(), "ok");
}
#[actix_rt::test]
async fn test_new_service() {
let new_srv = (|| ok::<_, ()>(Srv)).into_factory().map(|_| "ok");
let srv = new_srv.new_service(&()).await.unwrap();
let res = srv.call(()).await;
assert!(res.is_ok());
assert_eq!(res.unwrap(), ("ok"));
}
}

View File

@@ -0,0 +1,128 @@
use core::marker::PhantomData;
use super::{IntoServiceFactory, ServiceFactory};
/// Adapt external config argument to a config for provided service factory
///
/// Note that this function consumes the receiving service factory and returns
/// a wrapped version of it.
pub fn map_config<I, SF, Req, F, Cfg>(factory: I, f: F) -> MapConfig<SF, Req, F, Cfg>
where
I: IntoServiceFactory<SF, Req>,
SF: ServiceFactory<Req>,
F: Fn(Cfg) -> SF::Config,
{
MapConfig::new(factory.into_factory(), f)
}
/// Replace config with unit.
pub fn unit_config<I, SF, Cfg, Req>(factory: I) -> UnitConfig<SF, Cfg, Req>
where
I: IntoServiceFactory<SF, Req>,
SF: ServiceFactory<Req, Config = ()>,
{
UnitConfig::new(factory.into_factory())
}
/// `map_config()` adapter service factory
pub struct MapConfig<SF, Req, F, Cfg> {
factory: SF,
cfg_mapper: F,
e: PhantomData<(Cfg, Req)>,
}
impl<SF, Req, F, Cfg> MapConfig<SF, Req, F, Cfg> {
/// Create new `MapConfig` combinator
pub(crate) fn new(factory: SF, cfg_mapper: F) -> Self
where
SF: ServiceFactory<Req>,
F: Fn(Cfg) -> SF::Config,
{
Self {
factory,
cfg_mapper,
e: PhantomData,
}
}
}
impl<SF, Req, F, Cfg> Clone for MapConfig<SF, Req, F, Cfg>
where
SF: Clone,
F: Clone,
{
fn clone(&self) -> Self {
Self {
factory: self.factory.clone(),
cfg_mapper: self.cfg_mapper.clone(),
e: PhantomData,
}
}
}
impl<SF, Req, F, Cfg> ServiceFactory<Req> for MapConfig<SF, Req, F, Cfg>
where
SF: ServiceFactory<Req>,
F: Fn(Cfg) -> SF::Config,
{
type Response = SF::Response;
type Error = SF::Error;
type Config = Cfg;
type Service = SF::Service;
type InitError = SF::InitError;
type Future = SF::Future;
fn new_service(&self, cfg: Self::Config) -> Self::Future {
let mapped_cfg = (self.cfg_mapper)(cfg);
self.factory.new_service(mapped_cfg)
}
}
/// `unit_config()` config combinator
pub struct UnitConfig<SF, Cfg, Req> {
factory: SF,
_phantom: PhantomData<(Cfg, Req)>,
}
impl<SF, Cfg, Req> UnitConfig<SF, Cfg, Req>
where
SF: ServiceFactory<Req, Config = ()>,
{
/// Create new `UnitConfig` combinator
pub(crate) fn new(factory: SF) -> Self {
Self {
factory,
_phantom: PhantomData,
}
}
}
impl<SF, Cfg, Req> Clone for UnitConfig<SF, Cfg, Req>
where
SF: Clone,
{
fn clone(&self) -> Self {
Self {
factory: self.factory.clone(),
_phantom: PhantomData,
}
}
}
impl<SF, Cfg, Req> ServiceFactory<Req> for UnitConfig<SF, Cfg, Req>
where
SF: ServiceFactory<Req, Config = ()>,
{
type Response = SF::Response;
type Error = SF::Error;
type Config = Cfg;
type Service = SF::Service;
type InitError = SF::InitError;
type Future = SF::Future;
fn new_service(&self, _: Cfg) -> Self::Future {
self.factory.new_service(())
}
}

View File

@@ -0,0 +1,253 @@
use core::{
future::Future,
marker::PhantomData,
pin::Pin,
task::{Context, Poll},
};
use pin_project_lite::pin_project;
use super::{Service, ServiceFactory};
/// Service for the `map_err` combinator, changing the type of a service's
/// error.
///
/// This is created by the `ServiceExt::map_err` method.
pub struct MapErr<S, Req, F, E> {
service: S,
f: F,
_t: PhantomData<(E, Req)>,
}
impl<S, Req, F, E> MapErr<S, Req, F, E> {
/// Create new `MapErr` combinator
pub(crate) fn new(service: S, f: F) -> Self
where
S: Service<Req>,
F: Fn(S::Error) -> E,
{
Self {
service,
f,
_t: PhantomData,
}
}
}
impl<S, Req, F, E> Clone for MapErr<S, Req, F, E>
where
S: Clone,
F: Clone,
{
fn clone(&self) -> Self {
MapErr {
service: self.service.clone(),
f: self.f.clone(),
_t: PhantomData,
}
}
}
impl<A, Req, F, E> Service<Req> for MapErr<A, Req, F, E>
where
A: Service<Req>,
F: Fn(A::Error) -> E + Clone,
{
type Response = A::Response;
type Error = E;
type Future = MapErrFuture<A, Req, F, E>;
fn poll_ready(&self, ctx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.service.poll_ready(ctx).map_err(&self.f)
}
fn call(&self, req: Req) -> Self::Future {
MapErrFuture::new(self.service.call(req), self.f.clone())
}
}
pin_project! {
pub struct MapErrFuture<A, Req, F, E>
where
A: Service<Req>,
F: Fn(A::Error) -> E,
{
f: F,
#[pin]
fut: A::Future,
}
}
impl<A, Req, F, E> MapErrFuture<A, Req, F, E>
where
A: Service<Req>,
F: Fn(A::Error) -> E,
{
fn new(fut: A::Future, f: F) -> Self {
MapErrFuture { f, fut }
}
}
impl<A, Req, F, E> Future for MapErrFuture<A, Req, F, E>
where
A: Service<Req>,
F: Fn(A::Error) -> E,
{
type Output = Result<A::Response, E>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
this.fut.poll(cx).map_err(this.f)
}
}
/// Factory for the `map_err` combinator, changing the type of a new
/// service's error.
///
/// This is created by the `NewServiceExt::map_err` method.
pub struct MapErrServiceFactory<A, Req, F, E>
where
A: ServiceFactory<Req>,
F: Fn(A::Error) -> E + Clone,
{
a: A,
f: F,
e: PhantomData<(E, Req)>,
}
impl<A, Req, F, E> MapErrServiceFactory<A, Req, F, E>
where
A: ServiceFactory<Req>,
F: Fn(A::Error) -> E + Clone,
{
/// Create new `MapErr` new service instance
pub(crate) fn new(a: A, f: F) -> Self {
Self {
a,
f,
e: PhantomData,
}
}
}
impl<A, Req, F, E> Clone for MapErrServiceFactory<A, Req, F, E>
where
A: ServiceFactory<Req> + Clone,
F: Fn(A::Error) -> E + Clone,
{
fn clone(&self) -> Self {
Self {
a: self.a.clone(),
f: self.f.clone(),
e: PhantomData,
}
}
}
impl<A, Req, F, E> ServiceFactory<Req> for MapErrServiceFactory<A, Req, F, E>
where
A: ServiceFactory<Req>,
F: Fn(A::Error) -> E + Clone,
{
type Response = A::Response;
type Error = E;
type Config = A::Config;
type Service = MapErr<A::Service, Req, F, E>;
type InitError = A::InitError;
type Future = MapErrServiceFuture<A, Req, F, E>;
fn new_service(&self, cfg: A::Config) -> Self::Future {
MapErrServiceFuture::new(self.a.new_service(cfg), self.f.clone())
}
}
pin_project! {
pub struct MapErrServiceFuture<A, Req, F, E>
where
A: ServiceFactory<Req>,
F: Fn(A::Error) -> E,
{
#[pin]
fut: A::Future,
f: F,
}
}
impl<A, Req, F, E> MapErrServiceFuture<A, Req, F, E>
where
A: ServiceFactory<Req>,
F: Fn(A::Error) -> E,
{
fn new(fut: A::Future, f: F) -> Self {
MapErrServiceFuture { fut, f }
}
}
impl<A, Req, F, E> Future for MapErrServiceFuture<A, Req, F, E>
where
A: ServiceFactory<Req>,
F: Fn(A::Error) -> E + Clone,
{
type Output = Result<MapErr<A::Service, Req, F, E>, A::InitError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
if let Poll::Ready(svc) = this.fut.poll(cx)? {
Poll::Ready(Ok(MapErr::new(svc, this.f.clone())))
} else {
Poll::Pending
}
}
}
#[cfg(test)]
mod tests {
use futures_util::future::lazy;
use super::*;
use crate::{
err, ok, IntoServiceFactory, Ready, Service, ServiceExt, ServiceFactory,
ServiceFactoryExt,
};
struct Srv;
impl Service<()> for Srv {
type Response = ();
type Error = ();
type Future = Ready<Result<(), ()>>;
fn poll_ready(&self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Err(()))
}
fn call(&self, _: ()) -> Self::Future {
err(())
}
}
#[actix_rt::test]
async fn test_poll_ready() {
let srv = Srv.map_err(|_| "error");
let res = lazy(|cx| srv.poll_ready(cx)).await;
assert_eq!(res, Poll::Ready(Err("error")));
}
#[actix_rt::test]
async fn test_call() {
let srv = Srv.map_err(|_| "error");
let res = srv.call(()).await;
assert!(res.is_err());
assert_eq!(res.err().unwrap(), "error");
}
#[actix_rt::test]
async fn test_new_service() {
let new_srv = (|| ok::<_, ()>(Srv)).into_factory().map_err(|_| "error");
let srv = new_srv.new_service(&()).await.unwrap();
let res = srv.call(()).await;
assert!(res.is_err());
assert_eq!(res.err().unwrap(), "error");
}
}

View File

@@ -0,0 +1,99 @@
use core::{
future::Future,
marker::PhantomData,
pin::Pin,
task::{Context, Poll},
};
use pin_project_lite::pin_project;
use super::ServiceFactory;
/// `MapInitErr` service combinator
pub struct MapInitErr<A, F, Req, Err> {
a: A,
f: F,
e: PhantomData<(Req, Err)>,
}
impl<A, F, Req, Err> MapInitErr<A, F, Req, Err>
where
A: ServiceFactory<Req>,
F: Fn(A::InitError) -> Err,
{
/// Create new `MapInitErr` combinator
pub(crate) fn new(a: A, f: F) -> Self {
Self {
a,
f,
e: PhantomData,
}
}
}
impl<A, F, Req, E> Clone for MapInitErr<A, F, Req, E>
where
A: Clone,
F: Clone,
{
fn clone(&self) -> Self {
Self {
a: self.a.clone(),
f: self.f.clone(),
e: PhantomData,
}
}
}
impl<A, F, Req, E> ServiceFactory<Req> for MapInitErr<A, F, Req, E>
where
A: ServiceFactory<Req>,
F: Fn(A::InitError) -> E + Clone,
{
type Response = A::Response;
type Error = A::Error;
type Config = A::Config;
type Service = A::Service;
type InitError = E;
type Future = MapInitErrFuture<A, F, Req, E>;
fn new_service(&self, cfg: A::Config) -> Self::Future {
MapInitErrFuture::new(self.a.new_service(cfg), self.f.clone())
}
}
pin_project! {
pub struct MapInitErrFuture<A, F, Req, E>
where
A: ServiceFactory<Req>,
F: Fn(A::InitError) -> E,
{
f: F,
#[pin]
fut: A::Future,
}
}
impl<A, F, Req, E> MapInitErrFuture<A, F, Req, E>
where
A: ServiceFactory<Req>,
F: Fn(A::InitError) -> E,
{
fn new(fut: A::Future, f: F) -> Self {
MapInitErrFuture { f, fut }
}
}
impl<A, F, Req, E> Future for MapInitErrFuture<A, F, Req, E>
where
A: ServiceFactory<Req>,
F: Fn(A::InitError) -> E,
{
type Output = Result<A::Service, E>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
this.fut.poll(cx).map_err(this.f)
}
}

View File

@@ -0,0 +1,309 @@
// TODO: see if pipeline is necessary
#![allow(dead_code)]
use core::{
marker::PhantomData,
task::{Context, Poll},
};
use crate::and_then::{AndThenService, AndThenServiceFactory};
use crate::map::{Map, MapServiceFactory};
use crate::map_err::{MapErr, MapErrServiceFactory};
use crate::map_init_err::MapInitErr;
use crate::then::{ThenService, ThenServiceFactory};
use crate::{IntoService, IntoServiceFactory, Service, ServiceFactory};
/// Construct new pipeline with one service in pipeline chain.
pub(crate) fn pipeline<I, S, Req>(service: I) -> Pipeline<S, Req>
where
I: IntoService<S, Req>,
S: Service<Req>,
{
Pipeline {
service: service.into_service(),
_phantom: PhantomData,
}
}
/// Construct new pipeline factory with one service factory.
pub(crate) fn pipeline_factory<I, SF, Req>(factory: I) -> PipelineFactory<SF, Req>
where
I: IntoServiceFactory<SF, Req>,
SF: ServiceFactory<Req>,
{
PipelineFactory {
factory: factory.into_factory(),
_phantom: PhantomData,
}
}
/// Pipeline service - pipeline allows to compose multiple service into one service.
pub(crate) struct Pipeline<S, Req> {
service: S,
_phantom: PhantomData<Req>,
}
impl<S, Req> Pipeline<S, Req>
where
S: Service<Req>,
{
/// Call another service after call to this one has resolved successfully.
///
/// This function can be used to chain two services together and ensure that
/// the second service isn't called until call to the fist service have
/// finished. Result of the call to the first service is used as an
/// input parameter for the second service's call.
///
/// Note that this function consumes the receiving service and returns a
/// wrapped version of it.
pub fn and_then<I, S1>(
self,
service: I,
) -> Pipeline<impl Service<Req, Response = S1::Response, Error = S::Error> + Clone, Req>
where
Self: Sized,
I: IntoService<S1, S::Response>,
S1: Service<S::Response, Error = S::Error>,
{
Pipeline {
service: AndThenService::new(self.service, service.into_service()),
_phantom: PhantomData,
}
}
/// Chain on a computation for when a call to the service finished,
/// passing the result of the call to the next service `U`.
///
/// Note that this function consumes the receiving pipeline and returns a
/// wrapped version of it.
pub fn then<F, S1>(
self,
service: F,
) -> Pipeline<impl Service<Req, Response = S1::Response, Error = S::Error> + Clone, Req>
where
Self: Sized,
F: IntoService<S1, Result<S::Response, S::Error>>,
S1: Service<Result<S::Response, S::Error>, Error = S::Error>,
{
Pipeline {
service: ThenService::new(self.service, service.into_service()),
_phantom: PhantomData,
}
}
/// Map this service's output to a different type, returning a new service
/// of the resulting type.
///
/// This function is similar to the `Option::map` or `Iterator::map` where
/// it will change the type of the underlying service.
///
/// Note that this function consumes the receiving service and returns a
/// wrapped version of it, similar to the existing `map` methods in the
/// standard library.
pub fn map<F, R>(self, f: F) -> Pipeline<Map<S, F, Req, R>, Req>
where
Self: Sized,
F: FnMut(S::Response) -> R,
{
Pipeline {
service: Map::new(self.service, f),
_phantom: PhantomData,
}
}
/// Map this service's error to a different error, returning a new service.
///
/// This function is similar to the `Result::map_err` where it will change
/// the error type of the underlying service. This is useful for example to
/// ensure that services have the same error type.
///
/// Note that this function consumes the receiving service and returns a
/// wrapped version of it.
pub fn map_err<F, E>(self, f: F) -> Pipeline<MapErr<S, Req, F, E>, Req>
where
Self: Sized,
F: Fn(S::Error) -> E,
{
Pipeline {
service: MapErr::new(self.service, f),
_phantom: PhantomData,
}
}
}
impl<T, Req> Clone for Pipeline<T, Req>
where
T: Clone,
{
fn clone(&self) -> Self {
Pipeline {
service: self.service.clone(),
_phantom: PhantomData,
}
}
}
impl<S: Service<Req>, Req> Service<Req> for Pipeline<S, Req> {
type Response = S::Response;
type Error = S::Error;
type Future = S::Future;
#[inline]
fn poll_ready(&self, ctx: &mut Context<'_>) -> Poll<Result<(), S::Error>> {
self.service.poll_ready(ctx)
}
#[inline]
fn call(&self, req: Req) -> Self::Future {
self.service.call(req)
}
}
/// Pipeline factory
pub(crate) struct PipelineFactory<SF, Req> {
factory: SF,
_phantom: PhantomData<Req>,
}
impl<SF, Req> PipelineFactory<SF, Req>
where
SF: ServiceFactory<Req>,
{
/// Call another service after call to this one has resolved successfully.
pub fn and_then<I, SF1>(
self,
factory: I,
) -> PipelineFactory<
impl ServiceFactory<
Req,
Response = SF1::Response,
Error = SF::Error,
Config = SF::Config,
InitError = SF::InitError,
Service = impl Service<Req, Response = SF1::Response, Error = SF::Error> + Clone,
> + Clone,
Req,
>
where
Self: Sized,
SF::Config: Clone,
I: IntoServiceFactory<SF1, SF::Response>,
SF1: ServiceFactory<
SF::Response,
Config = SF::Config,
Error = SF::Error,
InitError = SF::InitError,
>,
{
PipelineFactory {
factory: AndThenServiceFactory::new(self.factory, factory.into_factory()),
_phantom: PhantomData,
}
}
/// Create `NewService` to chain on a computation for when a call to the
/// service finished, passing the result of the call to the next
/// service `U`.
///
/// Note that this function consumes the receiving pipeline and returns a
/// wrapped version of it.
pub fn then<I, SF1>(
self,
factory: I,
) -> PipelineFactory<
impl ServiceFactory<
Req,
Response = SF1::Response,
Error = SF::Error,
Config = SF::Config,
InitError = SF::InitError,
Service = impl Service<Req, Response = SF1::Response, Error = SF::Error> + Clone,
> + Clone,
Req,
>
where
Self: Sized,
SF::Config: Clone,
I: IntoServiceFactory<SF1, Result<SF::Response, SF::Error>>,
SF1: ServiceFactory<
Result<SF::Response, SF::Error>,
Config = SF::Config,
Error = SF::Error,
InitError = SF::InitError,
>,
{
PipelineFactory {
factory: ThenServiceFactory::new(self.factory, factory.into_factory()),
_phantom: PhantomData,
}
}
/// Map this service's output to a different type, returning a new service
/// of the resulting type.
pub fn map<F, R>(self, f: F) -> PipelineFactory<MapServiceFactory<SF, F, Req, R>, Req>
where
Self: Sized,
F: FnMut(SF::Response) -> R + Clone,
{
PipelineFactory {
factory: MapServiceFactory::new(self.factory, f),
_phantom: PhantomData,
}
}
/// Map this service's error to a different error, returning a new service.
pub fn map_err<F, E>(
self,
f: F,
) -> PipelineFactory<MapErrServiceFactory<SF, Req, F, E>, Req>
where
Self: Sized,
F: Fn(SF::Error) -> E + Clone,
{
PipelineFactory {
factory: MapErrServiceFactory::new(self.factory, f),
_phantom: PhantomData,
}
}
/// Map this factory's init error to a different error, returning a new service.
pub fn map_init_err<F, E>(self, f: F) -> PipelineFactory<MapInitErr<SF, F, Req, E>, Req>
where
Self: Sized,
F: Fn(SF::InitError) -> E + Clone,
{
PipelineFactory {
factory: MapInitErr::new(self.factory, f),
_phantom: PhantomData,
}
}
}
impl<T, Req> Clone for PipelineFactory<T, Req>
where
T: Clone,
{
fn clone(&self) -> Self {
PipelineFactory {
factory: self.factory.clone(),
_phantom: PhantomData,
}
}
}
impl<SF, Req> ServiceFactory<Req> for PipelineFactory<SF, Req>
where
SF: ServiceFactory<Req>,
{
type Config = SF::Config;
type Response = SF::Response;
type Error = SF::Error;
type Service = SF::Service;
type InitError = SF::InitError;
type Future = SF::Future;
#[inline]
fn new_service(&self, cfg: SF::Config) -> Self::Future {
self.factory.new_service(cfg)
}
}

View File

@@ -0,0 +1,54 @@
//! When MSRV is 1.48, replace with `core::future::Ready` and `core::future::ready()`.
use core::{
future::Future,
pin::Pin,
task::{Context, Poll},
};
/// Future for the [`ready`](ready()) function.
#[derive(Debug, Clone)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct Ready<T> {
val: Option<T>,
}
impl<T> Ready<T> {
/// Unwraps the value from this immediately ready future.
#[inline]
pub fn into_inner(mut self) -> T {
self.val.take().unwrap()
}
}
impl<T> Unpin for Ready<T> {}
impl<T> Future for Ready<T> {
type Output = T;
#[inline]
fn poll(mut self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<T> {
let val = self.val.take().expect("Ready can not be polled twice.");
Poll::Ready(val)
}
}
/// Creates a future that is immediately ready with a value.
#[allow(dead_code)]
pub(crate) fn ready<T>(val: T) -> Ready<T> {
Ready { val: Some(val) }
}
/// Create a future that is immediately ready with a success value.
#[allow(dead_code)]
pub(crate) fn ok<T, E>(val: T) -> Ready<Result<T, E>> {
Ready { val: Some(Ok(val)) }
}
/// Create a future that is immediately ready with an error value.
#[allow(dead_code)]
pub(crate) fn err<T, E>(err: E) -> Ready<Result<T, E>> {
Ready {
val: Some(Err(err)),
}
}

Some files were not shown because too many files have changed in this diff Show More