1
0
mirror of https://github.com/fafhrd91/actix-net synced 2025-08-12 07:37:06 +02:00

Compare commits

..

192 Commits

Author SHA1 Message Date
Rob Ede
443a328fb4 prepare actix-server release 2.0.0-beta.9 2021-11-15 02:39:55 +00:00
Rob Ede
58a67ade32 improve docs of system_exit 2021-11-15 02:33:13 +00:00
Rob Ede
38caa8f088 Fix server arbiter support (#417) 2021-11-14 19:45:15 +00:00
Rob Ede
ed987eef06 prepare actix-server release 2.0.0-beta.8 2021-11-07 15:46:59 +00:00
fakeshadow
3658929010 fix io-uring feature for actix-server (#414)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-11-07 15:43:59 +00:00
fakeshadow
3f49d8ab54 remove usage of mio::net::TcpSocket (#413) 2021-11-07 14:18:23 +00:00
fakeshadow
161d1ee94b fix accept timeout and worker graceful shutdown (#412) 2021-11-07 13:00:19 +00:00
Rob Ede
81ba7cafaa fix server non-unix signal impl send (#410) 2021-11-05 02:16:13 +00:00
Rob Ede
f8f51a2240 prepare actix-server release 2.0.0-beta.7 2021-11-05 01:14:28 +00:00
Rob Ede
a2e765ea6e prepare actix-codec release 0.4.1 2021-11-05 01:05:51 +00:00
Rob Ede
03dae6a4a4 prepare actix-rt release 2.4.0 2021-11-05 00:51:34 +00:00
brockelmore
2080f4c149 Framed::poll_ready flushes when buffer is full (#409) 2021-11-05 00:43:33 +00:00
Rob Ede
b2cef8fcdb add lines codec (#338) 2021-11-05 00:12:02 +00:00
Rob Ede
15279eaf3d sync wait for service factories to be ready 2021-11-04 23:26:56 +00:00
Rob Ede
7d98247cb0 fix server worker name 2021-11-04 23:00:43 +00:00
Rob Ede
5b537c7b10 actix-rt-less (#408) 2021-11-04 20:30:43 +00:00
Rob Ede
81d7295486 clippy 2021-11-01 23:41:28 +00:00
Rob Ede
581e599209 rename Server => ServerHandler (#407) 2021-11-01 23:36:51 +00:00
Rob Ede
1c8fcaebbc tweak server logging 2021-10-22 18:17:26 +01:00
fakeshadow
a1d15f2e08 minimal support of System type with io-uring (#395) 2021-10-21 11:04:51 +01:00
Rob Ede
70ea5322ab prepare actix-tls 3.0.0-beta.7 release (#401) 2021-10-20 17:12:11 +01:00
Rob Ede
303666278a prepare actix-tls release 3.0.0-beta.6 2021-10-19 16:51:40 +01:00
Edward Shen
669e868370 Use tokio-rustls 0.23 (#396)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-10-19 16:48:23 +01:00
Rob Ede
47f278b17a fix test macro in presence of other imports named test (#399) 2021-10-19 16:13:13 +01:00
Rob Ede
ca77d8d835 split -server and -tls msrv and raise to 1.52 (#398) 2021-10-19 14:53:42 +01:00
Rob Ede
00775884f8 prepare actix-macros release 0.2.2 2021-10-14 11:08:02 +01:00
Rob Ede
4ff8a2cf68 make runtime macros more IDE friendly (#391) 2021-10-14 10:54:39 +01:00
Rob Ede
5c555a9408 prepare actix-rt release 2.3.0 2021-10-11 22:55:23 +01:00
Rob Ede
ca435b2575 prepare actix-server release 2.0.0-beta.6 2021-10-11 05:14:34 +01:00
Rob Ede
9fa8d7fc5a avoid dependency on older tokios 2021-10-11 05:12:57 +01:00
Rob Ede
b03fe7c5b6 prepare actix-service release v2.0.1 2021-10-11 04:20:37 +01:00
fakeshadow
6fed1c3e7d add support for io-uring (#374)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-10-11 02:58:11 +01:00
Thales
c3d697df97 server: Don't listen for SIGHUP (#389) 2021-10-04 02:48:10 +01:00
Riley
80a362712f Fix Service<u8> request documentation (#388) 2021-09-26 01:30:11 +01:00
Ibraheem Ahmed
2b1edb95ea spawn should allow futures with non-unit outputs (#369)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-09-01 21:51:03 +01:00
Rob Ede
4644fa41cf run doc test in parallel (#387) 2021-09-01 21:30:26 +01:00
Rob Ede
98c37fe47d clippy 2021-09-01 20:59:54 +01:00
Rob Ede
b9455d2ca9 move router crate 2021-08-06 19:05:29 +01:00
Rob Ede
0183b0f8cc soft-disallow prefix resources with tail segments (#379) 2021-08-06 18:48:49 +01:00
Ali MJ Al-Nasrawy
b122a1ae1a ResourceDef::join (#380) 2021-08-06 18:48:30 +01:00
Rob Ede
4303058243 enforce path / separators on dynamic prefixes (#378) 2021-08-06 18:25:21 +01:00
Aravinth Manivannan
48b2e11509 improve malformed path error message (#384) 2021-08-06 18:06:29 +01:00
Ali MJ Al-Nasrawy
5379a46a99 ResourceDef: relax unnecessary bounds (#381) 2021-08-06 17:45:10 +01:00
Rob Ede
f8f1ac94bc add Patterns::is_empty and impl IntoPatterns for Patterns 2021-07-20 08:18:50 +01:00
Rob Ede
82cd5b8290 prepare router release 0.5.0-beta.1 2021-07-20 07:43:50 +01:00
Rob Ede
c65e8524b2 rework resourcedef (#373) 2021-07-19 22:37:54 +01:00
Rob Ede
a83dfaa162 Update macros.rs
closes #234
2021-07-17 20:54:53 +01:00
Rob Ede
e4ec956001 fix examples on msrv 2021-07-17 03:11:25 +01:00
Rob Ede
95cba659ff add zero cost profiling to router 2021-07-17 01:09:29 +01:00
Rob Ede
5687e81d9f rework IntoPatterns trait and codegen (#372) 2021-07-17 01:06:23 +01:00
Rob Ede
a0fe2a9b2e clippy 2021-07-16 21:46:32 +01:00
Rob Ede
ad22a93466 allow path building when resource has tail (#371) 2021-07-16 21:41:57 +01:00
Rob Ede
c2d5b2398a Rename Path::{len => segment_count} (#370) 2021-07-16 19:43:48 +01:00
Ali MJ Al-Nasrawy
5b1ff30dd9 router: fix multi-pattern and path tail matches (#366) 2021-07-16 18:17:00 +01:00
Ali MJ Al-Nasrawy
e1317bb3a0 path.len() != path.path().len() (#368)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-07-15 15:34:49 +01:00
Ali MJ Al-Nasrawy
dcea009158 ResourceDef: cleanup (#365) 2021-07-15 15:09:01 +01:00
Rob Ede
13c18b8a51 Update CHANGES.md 2021-07-14 10:37:49 +01:00
Rob Ede
06b17d6a43 fix ci 2021-06-28 15:06:29 +01:00
Rob Ede
605ec25143 prepare macros release 0.2.1 2021-06-08 17:48:30 +01:00
Ibraheem Ahmed
3824493fd3 take custom system path in actix_rt::main macro (#363) 2021-06-08 17:33:05 +01:00
Rob Ede
3be3e11aa5 change actix-router version to 0.4.0 2021-06-06 18:48:27 +01:00
Rob Ede
6a5ea0342b prepare router release 0.3.0 (#362) 2021-06-06 18:43:22 +01:00
Ali MJ Al-Nasrawy
23b1f63345 router: handle newline char '\n' in url (#360) 2021-06-06 03:38:58 +01:00
Ali MJ Al-Nasrawy
3aa037d07d fix changelog (#361) 2021-06-05 19:24:30 +01:00
Ali MJ Al-Nasrawy
cf21df14f2 Path: fix unsafe malformed string (#359) 2021-06-05 18:29:00 +01:00
Ali MJ Al-Nasrawy
a1bf8662c9 router: don't decode %25 to '%' (#357) 2021-06-06 01:34:16 +09:00
Ibraheem Ahmed
6f4d2220fa store Cow in actix-router Path (#345) 2021-06-05 01:46:40 +01:00
Danilo Bargen
54b22f9fce Docs: Fix signature of Service::call (#358) 2021-06-02 21:10:36 +01:00
fakeshadow
983abec77d Fix interrupt handling. Fix double server pause/resume (#353) 2021-04-30 13:42:25 +01:00
fakeshadow
e4d4ae21ee refactor connection counter (#343)
* Remove restart_worker test

* Remove Slab

* Rework counter

* Make counter limit switch accurate

* Remove backpressure. Add pause state

* make changes for review

* fix doc comment for counter
2021-04-29 23:27:08 +08:00
fakeshadow
8ad5f58d38 Remove ServerBuilder::configure (#349) 2021-04-27 23:58:02 +01:00
fakeshadow
613b2be51f Fix Display impl of MioListener (#350) 2021-04-27 11:54:18 -07:00
Rob Ede
b2e9640952 prepare codec 0.4.0 release (#346) 2021-04-21 11:08:43 +01:00
Rob Ede
76338a5822 prepare server release 2.0.0-beta.5 2021-04-20 05:16:32 +01:00
Rob Ede
978e4f25fb prepare actix-utils release 3.0.0 (#342) 2021-04-17 02:00:36 +01:00
Rob Ede
1c4e965366 prepare service release 2.0.0 (#339) 2021-04-16 15:18:53 +01:00
fakeshadow
2435520e67 Remove/restart worker test (#341) 2021-04-16 14:40:21 +01:00
fakeshadow
19468feef8 Fix memory ordering of WorkerAvailability (#340) 2021-04-16 11:20:08 +01:00
fakeshadow
bd48908792 Return worker index in WakerInterest::WorkerAvailable (#337) 2021-04-16 05:59:10 +01:00
fakeshadow
20c2da17ed Fix worker_avail (#336)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-04-16 03:20:02 +01:00
Rob Ede
fdafc1dd65 amend licences 2021-04-16 02:08:44 +01:00
Rob Ede
7749dfe46a address msrv todo in router 2021-04-16 02:06:11 +01:00
fakeshadow
aeb81ad3fd Fix worker are notified to stop with non_graceful shutdown (#333) 2021-04-16 00:54:15 +01:00
Rob Ede
47fba25d67 remove pipeline from public api (#335) 2021-04-16 00:00:02 +01:00
Rob Ede
7a82288066 docs tweak 2021-04-15 21:58:18 +01:00
Rob Ede
4e6d88d143 improve boxed service docs 2021-04-15 20:43:02 +01:00
Rob Ede
ef206f40fb update ignored service docs to new traits 2021-04-15 20:13:27 +01:00
fakeshadow
8e98d9168c add test for restart worker thread (#328) 2021-04-15 18:49:43 +01:00
fakeshadow
3c1f57706a Make ServerWorker drop stop Arbiter it runs on (#334) 2021-04-15 13:31:03 +01:00
fakeshadow
d49ecf7203 Fix bug where backpressure happen too early (#332) 2021-04-14 14:48:05 +01:00
fakeshadow
e0fb67f646 Reduce ServerWorker size (#321) 2021-04-13 01:12:59 +01:00
fakeshadow
ddce2d6d12 Reduce cfg flags in actix_server::socket (#325) 2021-04-10 16:05:50 +01:00
fakeshadow
0a11cf5cba Separate WorkerHandle to two parts (#323) 2021-04-10 01:03:28 +01:00
Rob Ede
859f45868d Revert "do no drain backlog on backpressure" (#324)
This reverts commit d4829b046d.
2021-04-09 21:04:41 +01:00
fakeshadow
d4829b046d do no drain backlog on backpressure (#322) 2021-04-08 23:15:10 +01:00
fakeshadow
5961eb892e Fix bug where worker service restart could skip failing services and not being able to restart multiple services (#318) 2021-04-05 20:39:05 +01:00
fakeshadow
995efcf427 Fix bug where paused Accept would register timed out sockets (#312) 2021-04-05 13:38:41 +01:00
fakeshadow
f1573931dd Remove MAX_CONN (#316) 2021-04-04 23:00:12 +01:00
fakeshadow
3859e91799 Use named type for WorkerState::Restarting and Shutdown (#317) 2021-04-04 21:53:19 +01:00
fakeshadow
8aade720ed Refactor WorkerState::Shutdown (#310) 2021-04-04 20:34:52 +01:00
fakeshadow
8079c50ddb Add ServerWorker::restart_service method (#314)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-04-04 13:22:34 +01:00
fakeshadow
05689b86d9 Remove Option wrapper for CounterGuard (#313) 2021-04-04 10:53:06 +01:00
fakeshadow
fd3e5fba02 Refactor actix_server WorkerState::Restarting enum variant. (#306)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-04-03 19:40:12 +01:00
fakeshadow
39d1f282f7 add test for max concurrent connections (#311) 2021-04-03 19:01:00 +01:00
fakeshadow
d8889c63ef Do not do double check on connection num when entering graceful shutdown (#309) 2021-04-02 12:49:12 +01:00
fakeshadow
fdac52aa11 Refactor Worker::shutdown mehtod (#308) 2021-04-02 12:22:05 +01:00
Rob Ede
6d66cfb06a prepare utils release 3.0.0-beta.4 2021-04-01 13:57:08 +01:00
Rob Ede
fb27ffc525 add future::Either type to utils (#305) 2021-04-01 13:53:44 +01:00
Rob Ede
b068ea16f8 prepare server release 2.0.0-beta.4 2021-04-01 09:36:07 +01:00
Rob Ede
4eebdf4070 prepare actix-utils release 3.0.0-beta.3 2021-04-01 09:31:42 +01:00
Rob Ede
b09e7cd417 fix local waker metadata 2021-04-01 09:01:56 +01:00
fakeshadow
2c5c9167a5 Fix bug where timed out socket would register itself when server in b… (#302)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-04-01 08:25:24 +01:00
fakeshadow
ee3a548a85 Refactor Accept::accept_one (#303) 2021-04-01 07:45:49 +01:00
fakeshadow
f21eaa954f Reduce size of Conn by removing unused addr field (#304) 2021-04-01 06:55:33 +01:00
Rob Ede
8becb0db70 refactor crates for better api stability (#301) 2021-03-30 13:39:10 +01:00
fakeshadow
26a5af70cb reduce branch in Accept::accept method (#300) 2021-03-29 08:19:37 +01:00
Rob Ede
0ee8d032b6 prepare actix-tls release 3.0.0-beta.5 2021-03-29 06:57:47 +01:00
Rob Ede
3cf1c548fd prepare actix-rt release 2.2.0 2021-03-29 06:57:14 +01:00
fakeshadow
4544562e1b Remove unused TcpConnectService (#299) 2021-03-27 21:03:24 +00:00
fakeshadow
bb27bac216 Add native tls support for actix_tls::connect module (#295)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-03-27 00:20:17 +00:00
Rob Ede
f9262dbec0 prevent large shutdown timeout from panicking
closes #298
2021-03-26 23:37:01 +00:00
fakeshadow
12d3942b98 Remove unused types in actix-tls. Add ActixStream impl for Box<dyn Ac… (#297) 2021-03-26 13:03:03 +00:00
fakeshadow
a3c9ebc7fa fix rustls panic when generating dns name from ip (#296)
* fix rustls panic when generating dns name from ip

* Update rustls.rs

* update changelog

Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-03-24 09:32:04 -07:00
fakeshadow
b7bfff2b32 add example of using multi-thread tokio runtime (#294)
* add example of using multi-thread tokio runtime

* Update multi_thread_system.rs

Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-03-24 04:56:13 -07:00
fakeshadow
0c73f13c8b ActixStream readiness methods return Ready object (#293) 2021-03-23 05:50:48 +00:00
Rob Ede
945479e0c3 unvendor openssl (#292) 2021-03-17 00:26:04 +00:00
Rob Ede
746cc2ab89 prepare service release 2.0.0-beta.5 2021-03-15 23:09:34 +00:00
Rob Ede
91ea8c5dad remove service dev module and add transformext trait
also improve docs on transform and boxed mods
2021-03-10 03:18:09 +00:00
Rob Ede
0a705b1023 add docs for *_ready macros 2021-03-10 02:23:24 +00:00
fakeshadow
9e2bcec226 add RcService type and rc_service construct function (#290) 2021-02-28 23:01:05 +00:00
Rob Ede
382830a37e refactor dispatcher / add Receiver::recv (#286) 2021-02-28 21:11:16 +00:00
fakeshadow
493a1a32c0 rc service changelog (#289) 2021-02-28 19:54:57 +00:00
fakeshadow
50a195e9ce add impl Service for Rc<S: Service> (#288) 2021-02-28 19:42:11 +00:00
Rob Ede
06ddad0051 prepare rt and tls releases (#287) 2021-02-25 11:50:24 +00:00
Rob Ede
789e6a8a46 update ci (#284) 2021-02-24 09:48:41 +00:00
Rob Ede
6e590fd042 Merge pull request #285 from actix/dep/actix-server 2021-02-24 09:09:44 +00:00
fakeshadow
fa8ded3a34 bump tokio version for actix-server 2021-02-24 15:54:28 +08:00
Rob Ede
841c611233 doc nits 2021-02-24 01:39:02 +00:00
Rob Ede
81a2b6a425 add local_addr binding to connector service (#282) 2021-02-23 18:52:28 +00:00
fakeshadow
a6e79453d0 remove default reuse_addr 2021-02-24 02:26:11 +08:00
fakeshadow
17f711a9d6 update changelog 2021-02-24 01:20:01 +08:00
fakeshadow
c3be839a69 add local_addr binding to connector service 2021-02-24 01:13:17 +08:00
Rob Ede
8d74cf387d standardize openssl based stream name 2021-02-20 18:04:05 +00:00
Rob Ede
7e483cc356 tweak task and stream docs 2021-02-20 17:34:04 +00:00
fakeshadow
75d7ae3139 add actix stream trait (#276) 2021-02-20 17:25:22 +00:00
Juan Aguilar
2cfe1d88ad Refactor LocalWaker for use Cell and remove deprecated methods (#278) 2021-02-19 17:12:30 +00:00
Rob Ede
cb07ead392 prepare rt release 2.0.2 2021-02-06 22:52:53 +00:00
Rob Ede
32543809f9 add System::try_current (#275) 2021-02-06 22:45:03 +00:00
Rob Ede
eb4d29e15e add arbiter handle assoc fn (#274)
* add arbiter handle assoc fn
2021-02-06 22:27:56 +00:00
Rob Ede
7ee42b50b4 prepare router 0.2.7 release 2021-02-06 19:50:48 +00:00
Rob Ede
0da848e4ae fix server dev dep 2021-02-06 19:35:29 +00:00
Rob Ede
5f80d85010 fix server version 2021-02-06 19:34:58 +00:00
Rob Ede
16ba77c4c8 prepare next set of betas (#273) 2021-02-06 19:24:52 +00:00
Rob Ede
b4a3f51659 prepare rt release 2.0.1 2021-02-06 15:54:11 +00:00
Riley
9d0901e07f actix-rt: expose JoinError (#271) 2021-02-06 15:50:38 +00:00
fakeshadow
ebb9cd055f use static dispatch on signal handling. reduce allocation (#272) 2021-02-06 03:38:11 +00:00
Rob Ede
a77b70aed2 prepare service 2.0.0-beta.4 release (#269) 2021-02-04 20:44:13 +00:00
Rob Ede
c918da906b use reexported tls crates when possible 2021-02-04 15:23:06 +00:00
Rob Ede
b5399c5631 use reusable box future in tls connector 2021-02-04 15:23:06 +00:00
fakeshadow
7f0eddd794 add blocking thread customize (#265) 2021-02-04 15:01:51 +00:00
shuo
db3385e865 retry on EINTR in accept loop (#264)
Co-authored-by: lishuo <lishuo.03@bytedance.com>
2021-02-04 10:20:37 +00:00
Rob Ede
4a8693d000 readme grammar 2021-02-03 11:18:35 +00:00
Rob Ede
4ec358575e prepare actix-rt v2.0.0 release (#262) 2021-02-03 10:25:31 +00:00
Rob Ede
66bd5bf4a2 prepare macros v0.2.0 release (#261) 2021-02-02 02:07:58 +00:00
Rob Ede
057e7cd7c9 prepare rt v2.0.0-beta.3 2021-01-31 05:19:30 +00:00
Rob Ede
0b656f51e1 deprecate rt TLS item storage 2021-01-31 04:48:03 +00:00
Rob Ede
0eb68d1c7b Revert "remove arbiter TLS item storage"
This reverts commit 3e6f69885c.
2021-01-31 04:45:27 +00:00
Rob Ede
3e6f69885c remove arbiter TLS item storage 2021-01-31 04:43:35 +00:00
Rob Ede
2fa60b07ae prevent arbiter leaks by waiting for registration 2021-01-31 04:41:28 +00:00
Rob Ede
b75254403a remove builder and introduce worker handle (#257) 2021-01-31 03:34:07 +00:00
Rob Ede
1b35ff8ee6 express spawn fn as spawn fut (#256) 2021-01-29 15:16:30 +00:00
Rob Ede
2924419905 prevent spawn_fn panic bubbling (#255) 2021-01-29 14:16:10 +00:00
Rob Ede
6b86b5efc5 rename arbiter to worker (#254) 2021-01-29 04:08:14 +00:00
Rob Ede
ba39c8436d remove tokio runners (#253) 2021-01-29 02:21:06 +00:00
fakeshadow
feac376c17 fix actix-tls build (#252) 2021-01-28 10:31:57 +00:00
Rob Ede
a633d2353c fix addr iterator 2021-01-27 11:23:28 +00:00
Rob Ede
45edff625e add rt tests and doc tests 2021-01-26 09:46:14 +00:00
Rob Ede
cff9deb729 attribute nits 2021-01-26 09:45:43 +00:00
Rob Ede
eaefe21b98 add tests for custom resolver 2021-01-26 08:05:19 +00:00
fakeshadow
636cef8868 service trait takes shared self reference (#247) 2021-01-23 03:06:22 +00:00
fakeshadow
874e5f2e50 change default name resolver and allow custom resolvers (#248) 2021-01-23 01:33:50 +00:00
Rob Ede
6112a47529 update local deps 2021-01-09 15:19:16 +00:00
Rob Ede
a2e03700e7 update rt changelog 2021-01-09 15:16:31 +00:00
Rob Ede
6edf9b8278 prepare rt 2.0.0-beta.2 release 2021-01-09 15:12:59 +00:00
Rob Ede
f07d807707 remove actix-threadpool crate 2021-01-09 15:04:55 +00:00
Rob Ede
d4c46b7da9 fix macros code 2021-01-09 14:58:15 +00:00
Rob Ede
b0a8f8411b prepare macros 0.2.0-beta.1 release 2021-01-09 14:56:07 +00:00
Rob Ede
46bfe5de36 prepare service 2.0.0-beta.3 release 2021-01-09 14:28:33 +00:00
Rob Ede
a95afe2800 prepare router release 0.2.6 2021-01-09 14:18:20 +00:00
Rob Ede
f751cf5acb use convert err on forward_ready! (#246) 2021-01-09 14:13:16 +00:00
fakeshadow
a1982bdbad add actix-rt::task (#245) 2021-01-03 18:16:57 +00:00
Rob Ede
147c4f4f2c test bytestring with ahash 2021-01-03 04:42:08 +00:00
149 changed files with 7828 additions and 8486 deletions

26
.cargo/config.toml Normal file
View File

@@ -0,0 +1,26 @@
[alias]
lint = "clippy --workspace --tests --examples --bins -- -Dclippy::todo"
lint-all = "clippy --workspace --all-features --tests --examples --bins -- -Dclippy::todo"
ci-doctest = "test --workspace --all-features --doc --no-fail-fast -- --nocapture"
# just check the library (without dev deps)
ci-check-min = "hack --workspace check --no-default-features"
ci-check-lib = "hack --workspace --feature-powerset --exclude-features=io-uring check"
ci-check-lib-linux = "hack --workspace --feature-powerset check"
# check everything
ci-check = "hack --workspace --feature-powerset --exclude-features=io-uring check --tests --examples"
ci-check-linux = "hack --workspace --feature-powerset check --tests --examples"
# tests avoiding io-uring feature
ci-test = "hack test --workspace --exclude=actix-rt --exclude=actix-server --all-features --lib --tests --no-fail-fast -- --nocapture"
ci-test-rt = " hack --feature-powerset --exclude-features=io-uring test --package=actix-rt --lib --tests --no-fail-fast -- --nocapture"
ci-test-server = "hack --feature-powerset --exclude-features=io-uring test --package=actix-server --lib --tests --no-fail-fast -- --nocapture"
# test with io-uring feature
ci-test-rt-linux = " hack --feature-powerset test --package=actix-rt --lib --tests --no-fail-fast -- --nocapture"
ci-test-server-linux = "hack --feature-powerset test --package=actix-server --lib --tests --no-fail-fast -- --nocapture"
# test lower msrv
ci-test-lower-msrv = "hack --workspace --exclude=actix-server --exclude=actix-tls --feature-powerset test --lib --tests --no-fail-fast -- --nocapture"

201
.github/workflows/ci.yml vendored Normal file
View File

@@ -0,0 +1,201 @@
name: CI
on:
pull_request:
types: [opened, synchronize, reopened]
push:
branches: [master]
jobs:
build_and_test:
strategy:
fail-fast: false
matrix:
target:
- { name: Linux, os: ubuntu-latest, triple: x86_64-unknown-linux-gnu }
- { name: macOS, os: macos-latest, triple: x86_64-apple-darwin }
- { name: Windows, os: windows-latest, triple: x86_64-pc-windows-msvc }
- { name: Windows (MinGW), os: windows-latest, triple: x86_64-pc-windows-gnu }
- { name: Windows (32-bit), os: windows-latest, triple: i686-pc-windows-msvc }
version:
- 1.52.0 # MSRV for -server and -tls
- stable
- nightly
name: ${{ matrix.target.name }} / ${{ matrix.version }}
runs-on: ${{ matrix.target.os }}
env:
VCPKGRS_DYNAMIC: 1
steps:
- name: Setup Routing
if: matrix.target.os == 'macos-latest'
run: sudo ifconfig lo0 alias 127.0.0.3
- uses: actions/checkout@v2
# install OpenSSL on Windows
- name: Set vcpkg root
if: matrix.target.triple == 'x86_64-pc-windows-msvc' || matrix.target.triple == 'i686-pc-windows-msvc'
run: echo "VCPKG_ROOT=$env:VCPKG_INSTALLATION_ROOT" | Out-File -FilePath $env:GITHUB_ENV -Append
- name: Install OpenSSL
if: matrix.target.triple == 'x86_64-pc-windows-msvc'
run: vcpkg install openssl:x64-windows
- name: Install OpenSSL
if: matrix.target.triple == 'i686-pc-windows-msvc'
run: vcpkg install openssl:x86-windows
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-${{ matrix.target.triple }}
profile: minimal
override: true
# - name: Install MSYS2
# if: matrix.target.triple == 'x86_64-pc-windows-gnu'
# uses: msys2/setup-msys2@v2
# - name: Install MinGW Packages
# if: matrix.target.triple == 'x86_64-pc-windows-gnu'
# run: |
# msys2 -c 'pacman -Sy --noconfirm pacman'
# msys2 -c 'pacman --noconfirm -S base-devel pkg-config'
# - name: Generate Cargo.lock
# uses: actions-rs/cargo@v1
# with: { command: generate-lockfile }
# - name: Cache Dependencies
# uses: Swatinem/rust-cache@v1.2.0
- name: Install cargo-hack
uses: actions-rs/cargo@v1
with:
command: install
args: cargo-hack
- name: check lib
if: >
matrix.target.os != 'ubuntu-latest'
&& matrix.target.triple != 'x86_64-pc-windows-gnu'
uses: actions-rs/cargo@v1
with: { command: ci-check-lib }
- name: check lib
if: matrix.target.os == 'ubuntu-latest'
uses: actions-rs/cargo@v1
with: { command: ci-check-lib-linux }
- name: check lib
if: matrix.target.triple == 'x86_64-pc-windows-gnu'
uses: actions-rs/cargo@v1
with: { command: ci-check-min }
- name: check full
# TODO: compile OpenSSL and run tests on MinGW
if: >
matrix.target.os != 'ubuntu-latest'
&& matrix.target.triple != 'x86_64-pc-windows-gnu'
uses: actions-rs/cargo@v1
with: { command: ci-check }
- name: check all
if: matrix.target.os == 'ubuntu-latest'
uses: actions-rs/cargo@v1
with: { command: ci-check-linux }
- name: tests
if: >
matrix.target.os != 'ubuntu-latest'
&& matrix.target.triple != 'x86_64-pc-windows-gnu'
run: |
cargo ci-test
cargo ci-test-rt
cargo ci-test-server
- name: tests
if: matrix.target.os == 'ubuntu-latest'
run: |
sudo bash -c "ulimit -Sl 512 && ulimit -Hl 512 && PATH=$PATH:/usr/share/rust/.cargo/bin && RUSTUP_TOOLCHAIN=${{ matrix.version }} cargo ci-test && RUSTUP_TOOLCHAIN=${{ matrix.version }} cargo ci-test-rt-linux && RUSTUP_TOOLCHAIN=${{ matrix.version }} cargo ci-test-server-linux"
- name: Clear the cargo caches
run: |
cargo install cargo-cache --version 0.6.2 --no-default-features --features ci-autoclean
cargo-cache
build_and_test_lower_msrv:
name: Linux / 1.46 (lower MSRV)
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install 1.46.0 # MSRV for all but -server and -tls
uses: actions-rs/toolchain@v1
with:
toolchain: 1.46.0-x86_64-unknown-linux-gnu
profile: minimal
override: true
- name: Install cargo-hack
uses: actions-rs/cargo@v1
with:
command: install
args: cargo-hack
- name: tests
run: |
sudo bash -c "ulimit -Sl 512 && ulimit -Hl 512 && PATH=$PATH:/usr/share/rust/.cargo/bin && RUSTUP_TOOLCHAIN=1.46 cargo ci-test-lower-msrv"
- name: Clear the cargo caches
run: |
cargo install cargo-cache --version 0.6.2 --no-default-features --features ci-autoclean
cargo-cache
coverage:
name: coverage
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install Rust (nightly)
uses: actions-rs/toolchain@v1
with:
toolchain: stable-x86_64-unknown-linux-gnu
profile: minimal
override: true
- name: Generate Cargo.lock
uses: actions-rs/cargo@v1
with: { command: generate-lockfile }
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.3.0
- name: Generate coverage file
if: github.ref == 'refs/heads/master'
run: |
cargo install cargo-tarpaulin
cargo tarpaulin --out Xml --verbose
- name: Upload to Codecov
if: github.ref == 'refs/heads/master'
uses: codecov/codecov-action@v1
with: { file: cobertura.xml }
rustdoc:
name: rustdoc
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install Rust (nightly)
uses: actions-rs/toolchain@v1
with:
toolchain: nightly-x86_64-unknown-linux-gnu
profile: minimal
override: true
- name: Generate Cargo.lock
uses: actions-rs/cargo@v1
with: { command: generate-lockfile }
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.3.0
- name: doc tests io-uring
run: |
sudo bash -c "ulimit -Sl 512 && ulimit -Hl 512 && PATH=$PATH:/usr/share/rust/.cargo/bin && RUSTUP_TOOLCHAIN=nightly cargo ci-doctest"

View File

@@ -1,34 +1,42 @@
name: Lint
on:
pull_request:
types: [opened, synchronize, reopened]
name: Clippy and rustfmt Check
jobs:
clippy_check:
fmt:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
components: rustfmt
profile: minimal
components: rustfmt
override: true
- name: Check with rustfmt
- name: Rustfmt Check
uses: actions-rs/cargo@v1
with:
command: fmt
args: --all -- --check
- uses: actions-rs/toolchain@v1
clippy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: nightly
components: clippy
toolchain: stable
profile: minimal
components: clippy
override: true
- name: Check with Clippy
- name: Clippy Check
uses: actions-rs/clippy-check@v1
with:
token: ${{ secrets.GITHUB_TOKEN }}
args: --workspace --tests
args: --workspace --all-features --tests --examples --bins -- -Dclippy::todo

View File

@@ -1,82 +0,0 @@
name: CI (Linux)
on:
pull_request:
types: [opened, synchronize, reopened]
push:
branches:
- master
- '1.0'
jobs:
build_and_test:
strategy:
fail-fast: false
matrix:
version:
- 1.46.0
- stable
- nightly
name: ${{ matrix.version }} - x86_64-unknown-linux-gnu
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-x86_64-unknown-linux-gnu
profile: minimal
override: true
- name: Generate Cargo.lock
uses: actions-rs/cargo@v1
with:
command: generate-lockfile
- name: Cache cargo dirs
uses: actions/cache@v2
with:
path:
~/.cargo/registry
~/.cargo/git
~/.cargo/bin
key: ${{ matrix.version }}-x86_64-unknown-linux-gnu-cargo-trimmed-${{ hashFiles('**/Cargo.lock') }}
- name: Cache cargo build
uses: actions/cache@v2
with:
path: target
key: ${{ matrix.version }}-x86_64-unknown-linux-gnu-cargo-build-trimmed-${{ hashFiles('**/Cargo.lock') }}
- name: check build
uses: actions-rs/cargo@v1
with:
command: check
args: --workspace --bins --examples --tests
- name: tests
uses: actions-rs/cargo@v1
timeout-minutes: 40
with:
command: test
args: --workspace --exclude=actix-tls --no-fail-fast -- --nocapture
- name: Generate coverage file
if: matrix.version == 'stable' && (github.ref == 'refs/heads/master' || github.event_name == 'pull_request')
run: |
cargo install cargo-tarpaulin
cargo tarpaulin --out Xml --workspace
- name: Upload to Codecov
if: matrix.version == 'stable' && (github.ref == 'refs/heads/master' || github.event_name == 'pull_request')
uses: codecov/codecov-action@v1
with:
file: cobertura.xml
- name: Clear the cargo caches
run: |
rustup update stable
rustup override set stable
cargo install cargo-cache --no-default-features --features ci-autoclean
cargo-cache

View File

@@ -1,43 +0,0 @@
name: CI (macOS)
on:
pull_request:
types: [opened, synchronize, reopened]
push:
branches:
- master
- '1.0'
jobs:
build_and_test:
strategy:
fail-fast: false
matrix:
version:
- stable
- nightly
name: ${{ matrix.version }} - x86_64-apple-darwin
runs-on: macos-latest
steps:
- uses: actions/checkout@v2
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-x86_64-apple-darwin
profile: minimal
override: true
- name: check build
uses: actions-rs/cargo@v1
with:
command: check
args: --workspace --bins --examples --tests
- name: tests
uses: actions-rs/cargo@v1
with:
command: test
args: --workspace --exclude=actix-tls --no-fail-fast -- --nocapture

35
.github/workflows/upload-doc.yml vendored Normal file
View File

@@ -0,0 +1,35 @@
name: Upload documentation
on:
push:
branches: [master]
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: nightly-x86_64-unknown-linux-gnu
profile: minimal
override: true
- name: Build Docs
uses: actions-rs/cargo@v1
with:
command: doc
args: --workspace --all-features --no-deps
- name: Tweak HTML
run: echo '<meta http-equiv="refresh" content="0;url=actix_server/index.html">' > target/doc/index.html
- name: Deploy to GitHub Pages
uses: JamesIves/github-pages-deploy-action@3.7.1
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
BRANCH: gh-pages
FOLDER: target/doc

View File

@@ -1,45 +0,0 @@
name: CI (Windows-mingw)
on:
pull_request:
types: [opened, synchronize, reopened]
push:
branches:
- master
- '1.0'
jobs:
build_and_test:
strategy:
fail-fast: false
matrix:
version:
- stable
- nightly
name: ${{ matrix.version }} - x86_64-pc-windows-gnu
runs-on: windows-latest
steps:
- uses: actions/checkout@v2
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-x86_64-pc-windows-gnu
profile: minimal
override: true
- name: Install MSYS2
uses: msys2/setup-msys2@v2
- name: Install packages
run: |
msys2 -c 'pacman -Sy --noconfirm pacman'
msys2 -c 'pacman --noconfirm -S base-devel pkg-config'
- name: check build
uses: actions-rs/cargo@v1
with:
command: check
args: --workspace --bins --examples --tests

View File

@@ -1,69 +0,0 @@
name: CI (Windows)
on:
pull_request:
types: [opened, synchronize, reopened]
push:
branches:
- master
- '1.0'
env:
VCPKGRS_DYNAMIC: 1
jobs:
build_and_test:
strategy:
fail-fast: false
matrix:
version:
- stable
- nightly
target:
- x86_64-pc-windows-msvc
- i686-pc-windows-msvc
name: ${{ matrix.version }} - ${{ matrix.target }}
runs-on: windows-latest
steps:
- uses: actions/checkout@v2
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-${{ matrix.target }}
profile: minimal
override: true
- name: Install OpenSSL (x64)
if: matrix.target == 'x86_64-pc-windows-msvc'
run: |
vcpkg integrate install
vcpkg install openssl:x64-windows
Get-ChildItem C:\vcpkg\installed\x64-windows\bin
Get-ChildItem C:\vcpkg\installed\x64-windows\lib
Copy-Item C:\vcpkg\installed\x64-windows\bin\libcrypto-1_1-x64.dll C:\vcpkg\installed\x64-windows\bin\libcrypto.dll
Copy-Item C:\vcpkg\installed\x64-windows\bin\libssl-1_1-x64.dll C:\vcpkg\installed\x64-windows\bin\libssl.dll
- name: Install OpenSSL (x86)
if: matrix.target == 'i686-pc-windows-msvc'
run: |
vcpkg integrate install
vcpkg install openssl:x86-windows
Get-ChildItem C:\vcpkg\installed\x86-windows\bin
Get-ChildItem C:\vcpkg\installed\x86-windows\lib
Copy-Item C:\vcpkg\installed\x86-windows\bin\libcrypto-1_1.dll C:\vcpkg\installed\x86-windows\bin\libcrypto.dll
Copy-Item C:\vcpkg\installed\x86-windows\bin\libssl-1_1.dll C:\vcpkg\installed\x86-windows\bin\libssl.dll
- name: check build
uses: actions-rs/cargo@v1
with:
command: check
args: --workspace --bins --examples --tests
- name: tests
uses: actions-rs/cargo@v1
with:
command: test
args: --workspace --exclude=actix-tls --no-fail-fast -- --nocapture

View File

@@ -2,26 +2,31 @@
members = [
"actix-codec",
"actix-macros",
"actix-router",
"actix-rt",
"actix-server",
"actix-service",
"actix-threadpool",
"actix-tls",
"actix-tracing",
"actix-utils",
"bytestring",
"local-channel",
"local-waker",
]
[patch.crates-io]
actix-codec = { path = "actix-codec" }
actix-macros = { path = "actix-macros" }
actix-router = { path = "actix-router" }
actix-rt = { path = "actix-rt" }
actix-server = { path = "actix-server" }
actix-service = { path = "actix-service" }
actix-threadpool = { path = "actix-threadpool" }
actix-tls = { path = "actix-tls" }
actix-tracing = { path = "actix-tracing" }
actix-utils = { path = "actix-utils" }
bytestring = { path = "bytestring" }
local-channel = { path = "local-channel" }
local-waker = { path = "local-waker" }
[profile.release]
lto = true
opt-level = 3
codegen-units = 1

View File

@@ -186,7 +186,7 @@
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2017-NOW Nikolay Kim
Copyright 2017-NOW Actix Team
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -1,4 +1,4 @@
Copyright (c) 2017 Nikolay Kim
Copyright (c) 2017-NOW Actix Team
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated

View File

@@ -3,6 +3,18 @@
## Unreleased - 2021-xx-xx
## 0.4.1 - 2021-11-05
* Added `LinesCodec.` [#338]
* `Framed::poll_ready` flushes when the buffer is full. [#409]
[#338]: https://github.com/actix/actix-net/pull/338
[#409]: https://github.com/actix/actix-net/pull/409
## 0.4.0 - 2021-04-20
* No significant changes since v0.4.0-beta.1.
## 0.4.0-beta.1 - 2020-12-28
* Replace `pin-project` with `pin-project-lite`. [#237]
* Upgrade `tokio` dependency to `1`. [#237]
@@ -23,28 +35,28 @@
## 0.3.0-beta.1 - 2020-08-19
* Use `.advance()` instead of `.split_to()`.
* Upgrade `tokio-util` to `0.3`.
* Improve `BytesCodec` `.encode()` performance
* Simplify `BytesCodec` `.decode()`
* Improve `BytesCodec::encode()` performance.
* Simplify `BytesCodec::decode()`.
* Rename methods on `Framed` to better describe their use.
* Add method on `Framed` to get a pinned reference to the underlying I/O.
* Add method on `Framed` check emptiness of read buffer.
## 0.2.0 - 2019-12-10
* Use specific futures dependencies
* Use specific futures dependencies.
## 0.2.0-alpha.4
* Fix buffer remaining capacity calculation
* Fix buffer remaining capacity calculation.
## 0.2.0-alpha.3
* Use tokio 0.2
* Fix low/high watermark for write/read buffers
* Use tokio 0.2.
* Fix low/high watermark for write/read buffers.
## 0.2.0-alpha.2
* Migrated to `std::future`
* Migrated to `std::future`.
## 0.1.2 - 2019-03-27
@@ -56,4 +68,4 @@
## 0.1.0 - 2018-12-09
* Move codec to separate crate
* Move codec to separate crate.

View File

@@ -1,12 +1,13 @@
[package]
name = "actix-codec"
version = "0.4.0-beta.1"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
version = "0.4.1"
authors = [
"Nikolay Kim <fafhrd91@gmail.com>",
"Rob Ede <robjtede@icloud.com>",
]
description = "Codec utilities for working with framed protocols"
keywords = ["network", "framework", "async", "futures"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-codec/"
repository = "https://github.com/actix/actix-net"
categories = ["network-programming", "asynchronous"]
license = "MIT OR Apache-2.0"
edition = "2018"
@@ -21,6 +22,15 @@ bytes = "1"
futures-core = { version = "0.3.7", default-features = false }
futures-sink = { version = "0.3.7", default-features = false }
log = "0.4"
memchr = "2.3"
pin-project-lite = "0.2"
tokio = "1"
tokio = "1.5.1"
tokio-util = { version = "0.6", features = ["codec", "io"] }
[dev-dependencies]
criterion = { version = "0.3", features = ["html_reports"] }
tokio-test = "0.4.2"
[[bench]]
name = "lines"
harness = false

View File

@@ -0,0 +1,57 @@
use bytes::BytesMut;
use criterion::{criterion_group, criterion_main, Criterion};
const INPUT: &[u8] = include_bytes!("./lorem.txt");
fn bench_lines_codec(c: &mut Criterion) {
let mut decode_group = c.benchmark_group("lines decode");
decode_group.bench_function("actix", |b| {
b.iter(|| {
use actix_codec::Decoder as _;
let mut codec = actix_codec::LinesCodec::default();
let mut buf = BytesMut::from(INPUT);
while let Ok(Some(_bytes)) = codec.decode_eof(&mut buf) {}
});
});
decode_group.bench_function("tokio", |b| {
b.iter(|| {
use tokio_util::codec::Decoder as _;
let mut codec = tokio_util::codec::LinesCodec::new();
let mut buf = BytesMut::from(INPUT);
while let Ok(Some(_bytes)) = codec.decode_eof(&mut buf) {}
});
});
decode_group.finish();
let mut encode_group = c.benchmark_group("lines encode");
encode_group.bench_function("actix", |b| {
b.iter(|| {
use actix_codec::Encoder as _;
let mut codec = actix_codec::LinesCodec::default();
let mut buf = BytesMut::new();
codec.encode("123", &mut buf).unwrap();
});
});
encode_group.bench_function("tokio", |b| {
b.iter(|| {
use tokio_util::codec::Encoder as _;
let mut codec = tokio_util::codec::LinesCodec::new();
let mut buf = BytesMut::new();
codec.encode("123", &mut buf).unwrap();
});
});
encode_group.finish();
}
criterion_group!(benches, bench_lines_codec);
criterion_main!(benches);

View File

@@ -0,0 +1,5 @@
Lorem ipsum dolor sit amet, consectetur adipiscing elit. In tortor quam, pulvinar sit amet vestibulum eget, tincidunt non urna. Sed eu sem in felis malesuada venenatis. Suspendisse volutpat aliquet nisi, in condimentum nibh convallis id. Quisque gravida felis scelerisque ipsum aliquam consequat. Praesent libero odio, malesuada vitae odio quis, aliquam aliquet enim. In fringilla ut turpis nec pharetra. Duis eu posuere metus. Sed a aliquet massa. Mauris non tempus mi, quis mattis libero. Vivamus ornare ex at semper cursus. Vestibulum sed facilisis erat, aliquet mollis est. In interdum, magna iaculis ultricies elementum, mi ante vestibulum mauris, nec viverra turpis lorem quis ante. Proin in auctor erat. Vivamus dictum congue massa, fermentum bibendum leo pretium quis. Integer dapibus sodales ligula, sit amet imperdiet felis suscipit eu. Phasellus non ornare enim.
Nam feugiat neque sit amet hendrerit rhoncus. Nunc suscipit molestie vehicula. Aenean vulputate porttitor augue, sit amet molestie dolor volutpat vitae. Nulla vitae condimentum eros. Aliquam tristique purus at metus lacinia egestas. Cras euismod lorem eu orci lobortis, sed tincidunt nisl laoreet. Ut suscipit fermentum mi, et euismod tortor. Pellentesque vitae tempor quam, sed dignissim mi. Suspendisse luctus lacus vitae ligula blandit vehicula. Quisque interdum iaculis tincidunt. Nunc elementum mi vitae tempor placerat. Suspendisse potenti. Donec blandit laoreet ipsum, quis rhoncus velit vulputate sed.
Aliquam suscipit lectus eros, at maximus dolor efficitur quis. Integer blandit tortor orci, nec mattis nunc eleifend ac. Mauris pharetra vel quam quis lacinia. Duis lobortis condimentum nunc ut facilisis. Praesent arcu nisi, porta sit amet viverra sit amet, pellentesque ut nisi. Nunc gravida tortor eu ligula tempus, in interdum magna pretium. Fusce eu ornare sapien. Nullam pellentesque cursus eros. Nam orci massa, faucibus eget leo eget, elementum vulputate erat. Fusce vehicula augue et dui hendrerit vulputate. Mauris neque lacus, porttitor ut condimentum id, efficitur ac neque. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia curae; Donec accumsan, lectus fermentum elementum tristique, ipsum tortor mollis ante, non lacinia nibh ex quis sapien.
Donec pharetra, elit eget rutrum luctus, urna ligula facilisis lorem, sit amet rhoncus ante est eu mi. Vestibulum vestibulum ultricies interdum. Nulla tincidunt ante non hendrerit venenatis. Curabitur vestibulum turpis erat, id efficitur quam venenatis eu. Fusce nulla sem, dapibus vel quam feugiat, ornare fermentum ligula. Praesent tempus tincidunt mauris, non pellentesque felis varius in. Aenean eu arcu ligula. Morbi dapibus maximus nulla a pharetra. Fusce leo metus, luctus ut cursus non, sollicitudin non lectus. Integer pellentesque eleifend erat, vel gravida purus tempus a. Mauris id vestibulum quam. Nunc vitae ullamcorper metus, pharetra placerat enim. Fusce in ultrices nisl. Curabitur justo mauris, dignissim in aliquam sit amet, sollicitudin ut risus. Cras tempor rutrum justo, non tincidunt est maximus at.
Aliquam ac velit tincidunt, ullamcorper velit sit amet, pulvinar nisi. Nullam rhoncus rhoncus egestas. Cras ac luctus nisi. Mauris sit amet risus at magna volutpat ultrices quis ac dui. Aliquam condimentum tellus purus, vel sagittis odio vulputate at. Sed ut finibus tellus. Aliquam tincidunt vehicula diam.

View File

@@ -1,11 +1,10 @@
use bytes::{Buf, Bytes, BytesMut};
use std::io;
use bytes::{Buf, Bytes, BytesMut};
use super::{Decoder, Encoder};
/// Bytes codec.
///
/// Reads/Writes chunks of bytes from a stream.
/// Bytes codec. Reads/writes chunks of bytes from a stream.
#[derive(Debug, Copy, Clone)]
pub struct BytesCodec;

View File

@@ -21,14 +21,13 @@ bitflags::bitflags! {
}
pin_project_lite::pin_project! {
/// A unified `Stream` and `Sink` interface to an underlying I/O object, using
/// the `Encoder` and `Decoder` traits to encode and decode frames.
/// A unified `Stream` and `Sink` interface to an underlying I/O object, using the `Encoder` and
/// `Decoder` traits to encode and decode frames.
///
/// Raw I/O objects work with byte sequences, but higher-level code usually
/// wants to batch these into meaningful chunks, called "frames". This
/// method layers framing on top of an I/O object, by using the `Encoder`/`Decoder`
/// traits to handle encoding and decoding of message frames. Note that
/// the incoming and outgoing frame types may be distinct.
/// Raw I/O objects work with byte sequences, but higher-level code usually wants to batch these
/// into meaningful chunks, called "frames". This method layers framing on top of an I/O object,
/// by using the `Encoder`/`Decoder` traits to handle encoding and decoding of message frames.
/// Note that the incoming and outgoing frame types may be distinct.
pub struct Framed<T, U> {
#[pin]
io: T,
@@ -44,10 +43,9 @@ where
T: AsyncRead + AsyncWrite,
U: Decoder,
{
/// This function returns a *single* object that is both `Stream` and
/// `Sink`; grouping this into a single object is often useful for layering
/// things like gzip or TLS, which require both read and write access to the
/// underlying object.
/// This function returns a *single* object that is both `Stream` and `Sink`; grouping this into
/// a single object is often useful for layering things like gzip or TLS, which require both
/// read and write access to the underlying object.
pub fn new(io: T, codec: U) -> Framed<T, U> {
Framed {
io,
@@ -70,21 +68,18 @@ impl<T, U> Framed<T, U> {
&mut self.codec
}
/// Returns a reference to the underlying I/O stream wrapped by
/// `Frame`.
/// Returns a reference to the underlying I/O stream wrapped by `Frame`.
///
/// Note that care should be taken to not tamper with the underlying stream
/// of data coming in as it may corrupt the stream of frames otherwise
/// being worked with.
/// Note that care should be taken to not tamper with the underlying stream of data coming in as
/// it may corrupt the stream of frames otherwise being worked with.
pub fn io_ref(&self) -> &T {
&self.io
}
/// Returns a mutable reference to the underlying I/O stream.
///
/// Note that care should be taken to not tamper with the underlying stream
/// of data coming in as it may corrupt the stream of frames otherwise
/// being worked with.
/// Note that care should be taken to not tamper with the underlying stream of data coming in as
/// it may corrupt the stream of frames otherwise being worked with.
pub fn io_mut(&mut self) -> &mut T {
&mut self.io
}
@@ -183,16 +178,15 @@ impl<T, U> Framed<T, U> {
U: Decoder,
{
loop {
let mut this = self.as_mut().project();
// Repeatedly call `decode` or `decode_eof` as long as it is
// "readable". Readable is defined as not having returned `None`. If
// the upstream has returned EOF, and the decoder is no longer
// readable, it can be assumed that the decoder will never become
let this = self.as_mut().project();
// Repeatedly call `decode` or `decode_eof` as long as it is "readable". Readable is
// defined as not having returned `None`. If the upstream has returned EOF, and the
// decoder is no longer readable, it can be assumed that the decoder will never become
// readable again, at which point the stream is terminated.
if this.flags.contains(Flags::READABLE) {
if this.flags.contains(Flags::EOF) {
match this.codec.decode_eof(&mut this.read_buf) {
match this.codec.decode_eof(this.read_buf) {
Ok(Some(frame)) => return Poll::Ready(Some(Ok(frame))),
Ok(None) => return Poll::Ready(None),
Err(e) => return Poll::Ready(Some(Err(e))),
@@ -201,7 +195,7 @@ impl<T, U> Framed<T, U> {
log::trace!("attempting to decode a frame");
match this.codec.decode(&mut this.read_buf) {
match this.codec.decode(this.read_buf) {
Ok(Some(frame)) => {
log::trace!("frame decoded from buffer");
return Poll::Ready(Some(Ok(frame)));
@@ -215,7 +209,7 @@ impl<T, U> Framed<T, U> {
debug_assert!(!this.flags.contains(Flags::EOF));
// Otherwise, try to read more data and try again. Make sure we've got room
// Otherwise, try to read more data and try again. Make sure we've got room.
let remaining = this.read_buf.capacity() - this.read_buf.len();
if remaining < LW {
this.read_buf.reserve(HW - remaining)
@@ -306,11 +300,11 @@ where
{
type Error = U::Error;
fn poll_ready(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
if self.is_write_ready() {
Poll::Ready(Ok(()))
} else {
Poll::Pending
self.flush(cx)
}
}
@@ -341,13 +335,12 @@ where
}
impl<T, U> Framed<T, U> {
/// This function returns a *single* object that is both `Stream` and
/// `Sink`; grouping this into a single object is often useful for layering
/// things like gzip or TLS, which require both read and write access to the
/// underlying object.
/// This function returns a *single* object that is both `Stream` and `Sink`; grouping this into
/// a single object is often useful for layering things like gzip or TLS, which require both
/// read and write access to the underlying object.
///
/// These objects take a stream, a read buffer and a write buffer. These
/// fields can be obtained from an existing `Framed` with the `into_parts` method.
/// These objects take a stream, a read buffer and a write buffer. These fields can be obtained
/// from an existing `Framed` with the `into_parts` method.
pub fn from_parts(parts: FramedParts<T, U>) -> Framed<T, U> {
Framed {
io: parts.io,
@@ -358,12 +351,11 @@ impl<T, U> Framed<T, U> {
}
}
/// Consumes the `Frame`, returning its underlying I/O stream, the buffer
/// with unprocessed data, and the codec.
/// Consumes the `Frame`, returning its underlying I/O stream, the buffer with unprocessed data,
/// and the codec.
///
/// Note that care should be taken to not tamper with the underlying stream
/// of data coming in as it may corrupt the stream of frames otherwise
/// being worked with.
/// Note that care should be taken to not tamper with the underlying stream of data coming in as
/// it may corrupt the stream of frames otherwise being worked with.
pub fn into_parts(self) -> FramedParts<T, U> {
FramedParts {
io: self.io,
@@ -376,14 +368,15 @@ impl<T, U> Framed<T, U> {
}
/// `FramedParts` contains an export of the data of a Framed transport.
/// It can be used to construct a new `Framed` with a different codec.
/// It contains all current buffers and the inner transport.
///
/// It can be used to construct a new `Framed` with a different codec. It contains all current
/// buffers and the inner transport.
#[derive(Debug)]
pub struct FramedParts<T, U> {
/// The inner transport used to read bytes to and write bytes to
/// The inner transport used to read bytes to and write bytes to.
pub io: T,
/// The codec
/// The codec object.
pub codec: U,
/// The buffer with read but unprocessed data.
@@ -396,7 +389,7 @@ pub struct FramedParts<T, U> {
}
impl<T, U> FramedParts<T, U> {
/// Create a new, default, `FramedParts`
/// Creates a new default `FramedParts`.
pub fn new(io: T, codec: U) -> FramedParts<T, U> {
FramedParts {
io,
@@ -407,7 +400,7 @@ impl<T, U> FramedParts<T, U> {
}
}
/// Create a new `FramedParts` with read buffer
/// Creates a new `FramedParts` with read buffer.
pub fn with_read_buf(io: T, codec: U, read_buf: BytesMut) -> FramedParts<T, U> {
FramedParts {
io,

View File

@@ -7,16 +7,18 @@
//! [`Sink`]: futures_sink::Sink
//! [`Stream`]: futures_core::Stream
#![deny(rust_2018_idioms, nonstandard_style)]
#![deny(rust_2018_idioms, nonstandard_style, future_incompatible)]
#![warn(missing_docs)]
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
mod bcodec;
mod framed;
mod lines;
pub use self::bcodec::BytesCodec;
pub use self::framed::{Framed, FramedParts};
pub use self::lines::LinesCodec;
pub use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
pub use tokio_util::codec::{Decoder, Encoder};

158
actix-codec/src/lines.rs Normal file
View File

@@ -0,0 +1,158 @@
use std::io;
use bytes::{Buf, BufMut, Bytes, BytesMut};
use memchr::memchr;
use super::{Decoder, Encoder};
/// Lines codec. Reads/writes line delimited strings.
///
/// Will split input up by LF or CRLF delimiters. I.e. carriage return characters at the end of
/// lines are not preserved.
#[derive(Debug, Copy, Clone, Default)]
#[non_exhaustive]
pub struct LinesCodec;
impl<T: AsRef<str>> Encoder<T> for LinesCodec {
type Error = io::Error;
#[inline]
fn encode(&mut self, item: T, dst: &mut BytesMut) -> Result<(), Self::Error> {
let item = item.as_ref();
dst.reserve(item.len() + 1);
dst.put_slice(item.as_bytes());
dst.put_u8(b'\n');
Ok(())
}
}
impl Decoder for LinesCodec {
type Item = String;
type Error = io::Error;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
if src.is_empty() {
return Ok(None);
}
let len = match memchr(b'\n', src) {
Some(n) => n,
None => {
return Ok(None);
}
};
// split up to new line char
let mut buf = src.split_to(len);
debug_assert_eq!(len, buf.len());
// remove new line char from source
src.advance(1);
match buf.last() {
// remove carriage returns at the end of buf
Some(b'\r') => buf.truncate(len - 1),
// line is empty
None => return Ok(Some(String::new())),
_ => {}
}
try_into_utf8(buf.freeze())
}
fn decode_eof(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
match self.decode(src)? {
Some(frame) => Ok(Some(frame)),
None if src.is_empty() => Ok(None),
None => {
let buf = match src.last() {
// if last line ends in a CR then take everything up to it
Some(b'\r') => src.split_to(src.len() - 1),
// take all bytes from source
_ => src.split(),
};
if buf.is_empty() {
return Ok(None);
}
try_into_utf8(buf.freeze())
}
}
}
}
// Attempts to convert bytes into a `String`.
fn try_into_utf8(buf: Bytes) -> io::Result<Option<String>> {
String::from_utf8(buf.to_vec())
.map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err))
.map(Some)
}
#[cfg(test)]
mod tests {
use bytes::BufMut as _;
use super::*;
#[test]
fn lines_decoder() {
let mut codec = LinesCodec::default();
let mut buf = BytesMut::from("\nline 1\nline 2\r\nline 3\n\r\n\r");
assert_eq!("", codec.decode(&mut buf).unwrap().unwrap());
assert_eq!("line 1", codec.decode(&mut buf).unwrap().unwrap());
assert_eq!("line 2", codec.decode(&mut buf).unwrap().unwrap());
assert_eq!("line 3", codec.decode(&mut buf).unwrap().unwrap());
assert_eq!("", codec.decode(&mut buf).unwrap().unwrap());
assert!(codec.decode(&mut buf).unwrap().is_none());
assert!(codec.decode_eof(&mut buf).unwrap().is_none());
buf.put_slice(b"k");
assert!(codec.decode(&mut buf).unwrap().is_none());
assert_eq!("\rk", codec.decode_eof(&mut buf).unwrap().unwrap());
assert!(codec.decode(&mut buf).unwrap().is_none());
assert!(codec.decode_eof(&mut buf).unwrap().is_none());
}
#[test]
fn lines_encoder() {
let mut codec = LinesCodec::default();
let mut buf = BytesMut::new();
codec.encode("", &mut buf).unwrap();
assert_eq!(&buf[..], b"\n");
codec.encode("test", &mut buf).unwrap();
assert_eq!(&buf[..], b"\ntest\n");
codec.encode("a\nb", &mut buf).unwrap();
assert_eq!(&buf[..], b"\ntest\na\nb\n");
}
#[test]
fn lines_encoder_no_overflow() {
let mut codec = LinesCodec::default();
let mut buf = BytesMut::new();
codec.encode("1234567", &mut buf).unwrap();
assert_eq!(&buf[..], b"1234567\n");
let mut buf = BytesMut::new();
codec.encode("12345678", &mut buf).unwrap();
assert_eq!(&buf[..], b"12345678\n");
let mut buf = BytesMut::new();
codec.encode("123456789111213", &mut buf).unwrap();
assert_eq!(&buf[..], b"123456789111213\n");
let mut buf = BytesMut::new();
codec.encode("1234567891112131", &mut buf).unwrap();
assert_eq!(&buf[..], b"1234567891112131\n");
}
}

View File

@@ -0,0 +1,221 @@
use actix_codec::*;
use bytes::Buf;
use bytes::{BufMut, BytesMut};
use futures_sink::Sink;
use std::collections::VecDeque;
use std::io::{self, Write};
use std::pin::Pin;
use std::task::Poll::{Pending, Ready};
use std::task::{Context, Poll};
use tokio_test::{assert_ready, task};
macro_rules! bilateral {
($($x:expr,)*) => {{
let mut v = VecDeque::new();
v.extend(vec![$($x),*]);
Bilateral { calls: v }
}};
}
macro_rules! assert_ready {
($e:expr) => {{
use core::task::Poll::*;
match $e {
Ready(v) => v,
Pending => panic!("pending"),
}
}};
($e:expr, $($msg:tt),+) => {{
use core::task::Poll::*;
match $e {
Ready(v) => v,
Pending => {
let msg = format_args!($($msg),+);
panic!("pending; {}", msg)
}
}
}};
}
#[derive(Debug)]
pub struct Bilateral {
pub calls: VecDeque<io::Result<Vec<u8>>>,
}
impl Write for Bilateral {
fn write(&mut self, src: &[u8]) -> io::Result<usize> {
match self.calls.pop_front() {
Some(Ok(data)) => {
assert!(src.len() >= data.len());
assert_eq!(&data[..], &src[..data.len()]);
Ok(data.len())
}
Some(Err(e)) => Err(e),
None => panic!("unexpected write; {:?}", src),
}
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl AsyncWrite for Bilateral {
fn poll_write(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, io::Error>> {
match Pin::get_mut(self).write(buf) {
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => Pending,
other => Ready(other),
}
}
fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
match Pin::get_mut(self).flush() {
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => Pending,
other => Ready(other),
}
}
fn poll_shutdown(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Result<(), io::Error>> {
unimplemented!()
}
}
impl AsyncRead for Bilateral {
fn poll_read(
mut self: Pin<&mut Self>,
_: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<Result<(), std::io::Error>> {
use io::ErrorKind::WouldBlock;
match self.calls.pop_front() {
Some(Ok(data)) => {
debug_assert!(buf.remaining() >= data.len());
buf.put_slice(&data);
Ready(Ok(()))
}
Some(Err(ref e)) if e.kind() == WouldBlock => Pending,
Some(Err(e)) => Ready(Err(e)),
None => Ready(Ok(())),
}
}
}
pub struct U32;
impl Encoder<u32> for U32 {
type Error = io::Error;
fn encode(&mut self, item: u32, dst: &mut BytesMut) -> io::Result<()> {
// Reserve space
dst.reserve(4);
dst.put_u32(item);
Ok(())
}
}
impl Decoder for U32 {
type Item = u32;
type Error = io::Error;
fn decode(&mut self, buf: &mut BytesMut) -> io::Result<Option<u32>> {
if buf.len() < 4 {
return Ok(None);
}
let n = buf.split_to(4).get_u32();
Ok(Some(n))
}
}
#[test]
fn test_write_hits_highwater_mark() {
// see here for what this test is based on:
// https://github.com/tokio-rs/tokio/blob/75c07770bfbfea4e5fd914af819c741ed9c3fc36/tokio-util/tests/framed_write.rs#L69
const ITER: usize = 2 * 1024;
let mut bi = bilateral! {
Err(io::Error::new(io::ErrorKind::WouldBlock, "not ready")),
Ok(b"".to_vec()),
};
for i in 0..=ITER {
let mut b = BytesMut::with_capacity(4);
b.put_u32(i as u32);
// Append to the end
match bi.calls.back_mut().unwrap() {
Ok(ref mut data) => {
// Write in 2kb chunks
if data.len() < ITER {
data.extend_from_slice(&b[..]);
continue;
} // else fall through and create a new buffer
}
_ => unreachable!(),
}
// Push a new new chunk
bi.calls.push_back(Ok(b[..].to_vec()));
}
assert_eq!(bi.calls.len(), 6);
let mut framed = Framed::new(bi, U32);
// Send 8KB. This fills up FramedWrite2 buffer
let mut task = task::spawn(());
task.enter(|cx, _| {
// Send 8KB. This fills up Framed buffer
for i in 0..ITER {
{
#[allow(unused_mut)]
let mut framed = Pin::new(&mut framed);
assert!(assert_ready!(framed.poll_ready(cx)).is_ok());
}
#[allow(unused_mut)]
let mut framed = Pin::new(&mut framed);
// write the buffer
assert!(framed.start_send(i as u32).is_ok());
}
{
#[allow(unused_mut)]
let mut framed = Pin::new(&mut framed);
// Now we poll_ready which forces a flush. The bilateral pops the front message
// and decides to block.
assert!(framed.poll_ready(cx).is_pending());
}
{
#[allow(unused_mut)]
let mut framed = Pin::new(&mut framed);
// We poll again, forcing another flush, which this time succeeds
// The whole 8KB buffer is flushed
assert!(assert_ready!(framed.poll_ready(cx)).is_ok());
}
{
#[allow(unused_mut)]
let mut framed = Pin::new(&mut framed);
// Send more data. This matches the final message expected by the bilateral
assert!(framed.start_send(ITER as u32).is_ok());
}
{
#[allow(unused_mut)]
let mut framed = Pin::new(&mut framed);
// Flush the rest of the buffer
assert!(assert_ready!(framed.poll_flush(cx)).is_ok());
}
// Ensure the mock is empty
assert_eq!(0, Pin::new(&framed).get_ref().io_ref().calls.len());
});
}

View File

@@ -1,13 +1,46 @@
# CHANGES
# Changes
## 0.1.3 - 2020-12-3
## Unreleased - 2021-xx-xx
## 0.2.3 - 2021-10-19
* Fix test macro in presence of other imports named "test". [#399]
[#399]: https://github.com/actix/actix-net/pull/399
## 0.2.2 - 2021-10-14
* Improve error recovery potential when macro input is invalid. [#391]
* Allow custom `System`s on test macro. [#391]
[#391]: https://github.com/actix/actix-net/pull/391
## 0.2.1 - 2021-02-02
* Add optional argument `system` to `main` macro which can be used to specify the path to `actix_rt::System` (useful for re-exports). [#363]
[#363]: https://github.com/actix/actix-net/pull/363
## 0.2.0 - 2021-02-02
* Update to latest `actix_rt::System::new` signature. [#261]
[#261]: https://github.com/actix/actix-net/pull/261
## 0.2.0-beta.1 - 2021-01-09
* Remove `actix-reexport` feature. [#218]
[#218]: https://github.com/actix/actix-net/pull/218
## 0.1.3 - 2020-12-03
* Add `actix-reexport` feature. [#218]
[#218]: https://github.com/actix/actix-net/pull/218
* Add `actix-reexport` feature
## 0.1.2 - 2020-05-18
### Changed
* Forward actix_rt::test arguments to test function [#127]
[#127]: https://github.com/actix/actix-net/pull/127

View File

@@ -1,10 +1,13 @@
[package]
name = "actix-macros"
version = "0.1.3"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix runtime macros"
repository = "https://github.com/actix/actix-net"
documentation = "https://docs.rs/actix-macros/"
version = "0.2.3"
authors = [
"Nikolay Kim <fafhrd91@gmail.com>",
"Ibraheem Ahmed <ibrah1440@gmail.com>",
"Rob Ede <robjtede@icloud.com>",
]
description = "Macros for Actix system and runtime"
repository = "https://github.com/actix/actix-net.git"
categories = ["network-programming", "asynchronous"]
license = "MIT OR Apache-2.0"
edition = "2018"
@@ -16,11 +19,9 @@ proc-macro = true
quote = "1.0.3"
syn = { version = "^1", features = ["full"] }
[features]
actix-reexport = []
[dev-dependencies]
actix-rt = "1.0"
actix-rt = "2.0.0"
futures-util = { version = "0.3", default-features = false }
futures-util = { version = "0.3.7", default-features = false }
rustversion = "1"
trybuild = "1"

View File

@@ -1,4 +1,12 @@
//! Macros for use with Tokio
//! Macros for Actix system and runtime.
//!
//! The [`actix-rt`](https://docs.rs/actix-rt) crate must be available for macro output to compile.
//!
//! # Entry-point
//! See docs for the [`#[main]`](macro@main) macro.
//!
//! # Tests
//! See docs for the [`#[test]`](macro@test) macro.
#![deny(rust_2018_idioms, nonstandard_style)]
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
@@ -7,11 +15,10 @@
use proc_macro::TokenStream;
use quote::quote;
/// Marks async function to be executed by actix system.
/// Marks async entry-point function to be executed by Actix system.
///
/// ## Usage
///
/// ```rust
/// # Examples
/// ```
/// #[actix_rt::main]
/// async fn main() {
/// println!("Hello world");
@@ -20,56 +27,94 @@ use quote::quote;
#[allow(clippy::needless_doctest_main)]
#[proc_macro_attribute]
#[cfg(not(test))] // Work around for rust-lang/rust#62127
pub fn main(_: TokenStream, item: TokenStream) -> TokenStream {
let mut input = syn::parse_macro_input!(item as syn::ItemFn);
pub fn main(args: TokenStream, item: TokenStream) -> TokenStream {
let mut input = match syn::parse::<syn::ItemFn>(item.clone()) {
Ok(input) => input,
// on parse err, make IDEs happy; see fn docs
Err(err) => return input_and_compile_error(item, err),
};
let args = syn::parse_macro_input!(args as syn::AttributeArgs);
let attrs = &input.attrs;
let vis = &input.vis;
let sig = &mut input.sig;
let body = &input.block;
let name = &sig.ident;
if sig.asyncness.is_none() {
return syn::Error::new_spanned(sig.fn_token, "only async fn is supported")
.to_compile_error()
.into();
return syn::Error::new_spanned(
sig.fn_token,
"the async keyword is missing from the function declaration",
)
.to_compile_error()
.into();
}
let mut system = syn::parse_str::<syn::Path>("::actix_rt::System").unwrap();
for arg in &args {
match arg {
syn::NestedMeta::Meta(syn::Meta::NameValue(syn::MetaNameValue {
lit: syn::Lit::Str(lit),
path,
..
})) => match path
.get_ident()
.map(|i| i.to_string().to_lowercase())
.as_deref()
{
Some("system") => match lit.parse() {
Ok(path) => system = path,
Err(_) => {
return syn::Error::new_spanned(lit, "Expected path")
.to_compile_error()
.into();
}
},
_ => {
return syn::Error::new_spanned(arg, "Unknown attribute specified")
.to_compile_error()
.into();
}
},
_ => {
return syn::Error::new_spanned(arg, "Unknown attribute specified")
.to_compile_error()
.into();
}
}
}
sig.asyncness = None;
if cfg!(feature = "actix-reexport") {
(quote! {
#(#attrs)*
#vis #sig {
actix::System::new(stringify!(#name))
.block_on(async move { #body })
}
})
.into()
} else {
(quote! {
#(#attrs)*
#vis #sig {
actix_rt::System::new(stringify!(#name))
.block_on(async move { #body })
}
})
.into()
}
(quote! {
#(#attrs)*
#vis #sig {
<#system>::new().block_on(async move { #body })
}
})
.into()
}
/// Marks async test function to be executed by actix runtime.
/// Marks async test function to be executed in an Actix system.
///
/// ## Usage
///
/// ```no_run
/// # Examples
/// ```
/// #[actix_rt::test]
/// async fn my_test() {
/// assert!(true);
/// }
/// ```
#[proc_macro_attribute]
pub fn test(_: TokenStream, item: TokenStream) -> TokenStream {
let mut input = syn::parse_macro_input!(item as syn::ItemFn);
pub fn test(args: TokenStream, item: TokenStream) -> TokenStream {
let mut input = match syn::parse::<syn::ItemFn>(item.clone()) {
Ok(input) => input,
// on parse err, make IDEs happy; see fn docs
Err(err) => return input_and_compile_error(item, err),
};
let args = syn::parse_macro_input!(args as syn::AttributeArgs);
let attrs = &input.attrs;
let vis = &input.vis;
let sig = &mut input.sig;
@@ -85,7 +130,7 @@ pub fn test(_: TokenStream, item: TokenStream) -> TokenStream {
if sig.asyncness.is_none() {
return syn::Error::new_spanned(
input.sig.fn_token,
format!("only async fn is supported, {}", input.sig.ident),
"the async keyword is missing from the function declaration",
)
.to_compile_error()
.into();
@@ -93,24 +138,65 @@ pub fn test(_: TokenStream, item: TokenStream) -> TokenStream {
sig.asyncness = None;
let result = if has_test_attr {
quote! {
#(#attrs)*
#vis #sig {
actix_rt::System::new("test")
.block_on(async { #body })
}
}
let missing_test_attr = if has_test_attr {
quote! {}
} else {
quote! {
#[test]
#(#attrs)*
#vis #sig {
actix_rt::System::new("test")
.block_on(async { #body })
}
}
quote! { #[::core::prelude::v1::test] }
};
result.into()
let mut system = syn::parse_str::<syn::Path>("::actix_rt::System").unwrap();
for arg in &args {
match arg {
syn::NestedMeta::Meta(syn::Meta::NameValue(syn::MetaNameValue {
lit: syn::Lit::Str(lit),
path,
..
})) => match path
.get_ident()
.map(|i| i.to_string().to_lowercase())
.as_deref()
{
Some("system") => match lit.parse() {
Ok(path) => system = path,
Err(_) => {
return syn::Error::new_spanned(lit, "Expected path")
.to_compile_error()
.into();
}
},
_ => {
return syn::Error::new_spanned(arg, "Unknown attribute specified")
.to_compile_error()
.into();
}
},
_ => {
return syn::Error::new_spanned(arg, "Unknown attribute specified")
.to_compile_error()
.into();
}
}
}
(quote! {
#missing_test_attr
#(#attrs)*
#vis #sig {
<#system>::new().block_on(async { #body })
}
})
.into()
}
/// Converts the error to a token stream and appends it to the original input.
///
/// Returning the original input in addition to the error is good for IDEs which can gracefully
/// recover and show more precise errors within the macro body.
///
/// See <https://github.com/rust-analyzer/rust-analyzer/issues/10468> for more info.
fn input_and_compile_error(mut item: TokenStream, err: syn::Error) -> TokenStream {
let compile_err = TokenStream::from(err.to_compile_error());
item.extend(compile_err);
item
}

View File

@@ -1,9 +1,18 @@
#[rustversion::stable(1.46)] // MSRV
#[test]
fn compile_macros() {
let t = trybuild::TestCases::new();
t.pass("tests/trybuild/main-01-basic.rs");
t.compile_fail("tests/trybuild/main-02-only-async.rs");
t.pass("tests/trybuild/main-03-fn-params.rs");
t.pass("tests/trybuild/main-04-system-path.rs");
t.compile_fail("tests/trybuild/main-05-system-expect-path.rs");
t.compile_fail("tests/trybuild/main-06-unknown-attr.rs");
t.pass("tests/trybuild/test-01-basic.rs");
t.pass("tests/trybuild/test-02-keep-attrs.rs");
t.compile_fail("tests/trybuild/test-03-only-async.rs");
t.pass("tests/trybuild/test-04-system-path.rs");
t.compile_fail("tests/trybuild/test-05-system-expect-path.rs");
t.compile_fail("tests/trybuild/test-06-unknown-attr.rs");
}

View File

@@ -1,4 +1,4 @@
error: only async fn is supported
error: the async keyword is missing from the function declaration
--> $DIR/main-02-only-async.rs:2:1
|
2 | fn main() {

View File

@@ -0,0 +1,6 @@
#[actix_rt::main]
async fn main2(_param: bool) {
futures_util::future::ready(()).await
}
fn main() {}

View File

@@ -0,0 +1,8 @@
mod system {
pub use actix_rt::System as MySystem;
}
#[actix_rt::main(system = "system::MySystem")]
async fn main() {
futures_util::future::ready(()).await
}

View File

@@ -0,0 +1,4 @@
#[actix_rt::main(system = "!@#*&")]
async fn main2() {}
fn main() {}

View File

@@ -0,0 +1,5 @@
error: Expected path
--> $DIR/main-05-system-expect-path.rs:1:27
|
1 | #[actix_rt::main(system = "!@#*&")]
| ^^^^^^^

View File

@@ -0,0 +1,7 @@
#[actix_rt::main(foo = "bar")]
async fn async_main() {}
#[actix_rt::main(bar::baz)]
async fn async_main2() {}
fn main() {}

View File

@@ -0,0 +1,11 @@
error: Unknown attribute specified
--> $DIR/main-06-unknown-attr.rs:1:18
|
1 | #[actix_rt::main(foo = "bar")]
| ^^^^^^^^^^^
error: Unknown attribute specified
--> $DIR/main-06-unknown-attr.rs:4:18
|
4 | #[actix_rt::main(bar::baz)]
| ^^^^^^^^

View File

@@ -0,0 +1,6 @@
#[actix_rt::test]
fn my_test() {
futures_util::future::ready(()).await
}
fn main() {}

View File

@@ -0,0 +1,5 @@
error: the async keyword is missing from the function declaration
--> $DIR/test-03-only-async.rs:2:1
|
2 | fn my_test() {
| ^^

View File

@@ -0,0 +1,10 @@
mod system {
pub use actix_rt::System as MySystem;
}
#[actix_rt::test(system = "system::MySystem")]
async fn my_test() {
futures_util::future::ready(()).await
}
fn main() {}

View File

@@ -0,0 +1,4 @@
#[actix_rt::test(system = "!@#*&")]
async fn my_test() {}
fn main() {}

View File

@@ -0,0 +1,5 @@
error: Expected path
--> $DIR/test-05-system-expect-path.rs:1:27
|
1 | #[actix_rt::test(system = "!@#*&")]
| ^^^^^^^

View File

@@ -0,0 +1,7 @@
#[actix_rt::test(foo = "bar")]
async fn my_test_1() {}
#[actix_rt::test(bar::baz)]
async fn my_test_2() {}
fn main() {}

View File

@@ -0,0 +1,11 @@
error: Unknown attribute specified
--> $DIR/test-06-unknown-attr.rs:1:18
|
1 | #[actix_rt::test(foo = "bar")]
| ^^^^^^^^^^^
error: Unknown attribute specified
--> $DIR/test-06-unknown-attr.rs:4:18
|
4 | #[actix_rt::test(bar::baz)]
| ^^^^^^^^

View File

@@ -1,55 +0,0 @@
# Changes
## Unreleased - 2021-xx-xx
## 0.2.5 - 2020-09-20
* Fix `from_hex()` method
## 0.2.4 - 2019-12-31
* Add `ResourceDef::resource_path_named()` path generation method
## 0.2.3 - 2019-12-25
* Add impl `IntoPattern` for `&String`
## 0.2.2 - 2019-12-25
* Use `IntoPattern` for `RouterBuilder::path()`
## 0.2.1 - 2019-12-25
* Add `IntoPattern` trait
* Add multi-pattern resources
## 0.2.0 - 2019-12-07
* Update http to 0.2
* Update regex to 1.3
* Use bytestring instead of string
## 0.1.5 - 2019-05-15
* Remove debug prints
## 0.1.4 - 2019-05-15
* Fix checked resource match
## 0.1.3 - 2019-04-22
* Added support for `remainder match` (i.e "/path/{tail}*")
## 0.1.2 - 2019-04-07
* Export `Quoter` type
* Allow to reset `Path` instance
## 0.1.1 - 2019-04-03
* Get dynamic segment by name instead of iterator.
## 0.1.0 - 2019-03-09
* Initial release

View File

@@ -1,29 +0,0 @@
[package]
name = "actix-router"
version = "0.2.5"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Resource path matching library"
keywords = ["actix"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-router/"
license = "MIT OR Apache-2.0"
edition = "2018"
[lib]
name = "actix_router"
path = "src/lib.rs"
[features]
default = ["http"]
[dependencies]
regex = "1.3.1"
serde = "1.0.104"
bytestring = "0.1.2"
log = "0.4.8"
http = { version = "0.2.2", optional = true }
[dev-dependencies]
http = "0.2.2"
serde_derive = "1.0"

View File

@@ -1 +0,0 @@
../LICENSE-APACHE

View File

@@ -1 +0,0 @@
../LICENSE-MIT

View File

@@ -1,717 +0,0 @@
use serde::de::{self, Deserializer, Error as DeError, Visitor};
use serde::forward_to_deserialize_any;
use crate::path::{Path, PathIter};
use crate::ResourcePath;
macro_rules! unsupported_type {
($trait_fn:ident, $name:expr) => {
fn $trait_fn<V>(self, _: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
Err(de::value::Error::custom(concat!(
"unsupported type: ",
$name
)))
}
};
}
macro_rules! parse_single_value {
($trait_fn:ident, $visit_fn:ident, $tp:tt) => {
fn $trait_fn<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
if self.path.len() != 1 {
Err(de::value::Error::custom(
format!("wrong number of parameters: {} expected 1", self.path.len())
.as_str(),
))
} else {
let v = self.path[0].parse().map_err(|_| {
de::value::Error::custom(format!(
"can not parse {:?} to a {}",
&self.path[0], $tp
))
})?;
visitor.$visit_fn(v)
}
}
};
}
pub struct PathDeserializer<'de, T: ResourcePath> {
path: &'de Path<T>,
}
impl<'de, T: ResourcePath + 'de> PathDeserializer<'de, T> {
pub fn new(path: &'de Path<T>) -> Self {
PathDeserializer { path }
}
}
impl<'de, T: ResourcePath + 'de> Deserializer<'de> for PathDeserializer<'de, T> {
type Error = de::value::Error;
fn deserialize_map<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_map(ParamsDeserializer {
params: self.path.iter(),
current: None,
})
}
fn deserialize_struct<V>(
self,
_: &'static str,
_: &'static [&'static str],
visitor: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_map(visitor)
}
fn deserialize_unit<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_unit()
}
fn deserialize_unit_struct<V>(
self,
_: &'static str,
visitor: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_unit(visitor)
}
fn deserialize_newtype_struct<V>(
self,
_: &'static str,
visitor: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_newtype_struct(self)
}
fn deserialize_tuple<V>(self, len: usize, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
if self.path.len() < len {
Err(de::value::Error::custom(
format!(
"wrong number of parameters: {} expected {}",
self.path.len(),
len
)
.as_str(),
))
} else {
visitor.visit_seq(ParamsSeq {
params: self.path.iter(),
})
}
}
fn deserialize_tuple_struct<V>(
self,
_: &'static str,
len: usize,
visitor: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
if self.path.len() < len {
Err(de::value::Error::custom(
format!(
"wrong number of parameters: {} expected {}",
self.path.len(),
len
)
.as_str(),
))
} else {
visitor.visit_seq(ParamsSeq {
params: self.path.iter(),
})
}
}
fn deserialize_enum<V>(
self,
_: &'static str,
_: &'static [&'static str],
visitor: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
if self.path.is_empty() {
Err(de::value::Error::custom("expected at least one parameters"))
} else {
visitor.visit_enum(ValueEnum {
value: &self.path[0],
})
}
}
fn deserialize_str<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
if self.path.len() != 1 {
Err(de::value::Error::custom(
format!("wrong number of parameters: {} expected 1", self.path.len()).as_str(),
))
} else {
visitor.visit_str(&self.path[0])
}
}
fn deserialize_seq<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_seq(ParamsSeq {
params: self.path.iter(),
})
}
unsupported_type!(deserialize_any, "'any'");
unsupported_type!(deserialize_bytes, "bytes");
unsupported_type!(deserialize_option, "Option<T>");
unsupported_type!(deserialize_identifier, "identifier");
unsupported_type!(deserialize_ignored_any, "ignored_any");
parse_single_value!(deserialize_bool, visit_bool, "bool");
parse_single_value!(deserialize_i8, visit_i8, "i8");
parse_single_value!(deserialize_i16, visit_i16, "i16");
parse_single_value!(deserialize_i32, visit_i32, "i32");
parse_single_value!(deserialize_i64, visit_i64, "i64");
parse_single_value!(deserialize_u8, visit_u8, "u8");
parse_single_value!(deserialize_u16, visit_u16, "u16");
parse_single_value!(deserialize_u32, visit_u32, "u32");
parse_single_value!(deserialize_u64, visit_u64, "u64");
parse_single_value!(deserialize_f32, visit_f32, "f32");
parse_single_value!(deserialize_f64, visit_f64, "f64");
parse_single_value!(deserialize_string, visit_string, "String");
parse_single_value!(deserialize_byte_buf, visit_string, "String");
parse_single_value!(deserialize_char, visit_char, "char");
}
struct ParamsDeserializer<'de, T: ResourcePath> {
params: PathIter<'de, T>,
current: Option<(&'de str, &'de str)>,
}
impl<'de, T: ResourcePath> de::MapAccess<'de> for ParamsDeserializer<'de, T> {
type Error = de::value::Error;
fn next_key_seed<K>(&mut self, seed: K) -> Result<Option<K::Value>, Self::Error>
where
K: de::DeserializeSeed<'de>,
{
self.current = self.params.next().map(|ref item| (item.0, item.1));
match self.current {
Some((key, _)) => Ok(Some(seed.deserialize(Key { key })?)),
None => Ok(None),
}
}
fn next_value_seed<V>(&mut self, seed: V) -> Result<V::Value, Self::Error>
where
V: de::DeserializeSeed<'de>,
{
if let Some((_, value)) = self.current.take() {
seed.deserialize(Value { value })
} else {
Err(de::value::Error::custom("unexpected item"))
}
}
}
struct Key<'de> {
key: &'de str,
}
impl<'de> Deserializer<'de> for Key<'de> {
type Error = de::value::Error;
fn deserialize_identifier<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_str(self.key)
}
fn deserialize_any<V>(self, _visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
Err(de::value::Error::custom("Unexpected"))
}
forward_to_deserialize_any! {
bool i8 i16 i32 i64 u8 u16 u32 u64 f32 f64 char str string bytes
byte_buf option unit unit_struct newtype_struct seq tuple
tuple_struct map struct enum ignored_any
}
}
macro_rules! parse_value {
($trait_fn:ident, $visit_fn:ident, $tp:tt) => {
fn $trait_fn<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
let v = self.value.parse().map_err(|_| {
de::value::Error::custom(format!("can not parse {:?} to a {}", self.value, $tp))
})?;
visitor.$visit_fn(v)
}
};
}
struct Value<'de> {
value: &'de str,
}
impl<'de> Deserializer<'de> for Value<'de> {
type Error = de::value::Error;
parse_value!(deserialize_bool, visit_bool, "bool");
parse_value!(deserialize_i8, visit_i8, "i8");
parse_value!(deserialize_i16, visit_i16, "i16");
parse_value!(deserialize_i32, visit_i32, "i16");
parse_value!(deserialize_i64, visit_i64, "i64");
parse_value!(deserialize_u8, visit_u8, "u8");
parse_value!(deserialize_u16, visit_u16, "u16");
parse_value!(deserialize_u32, visit_u32, "u32");
parse_value!(deserialize_u64, visit_u64, "u64");
parse_value!(deserialize_f32, visit_f32, "f32");
parse_value!(deserialize_f64, visit_f64, "f64");
parse_value!(deserialize_string, visit_string, "String");
parse_value!(deserialize_byte_buf, visit_string, "String");
parse_value!(deserialize_char, visit_char, "char");
fn deserialize_ignored_any<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_unit()
}
fn deserialize_unit<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_unit()
}
fn deserialize_unit_struct<V>(
self,
_: &'static str,
visitor: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_unit()
}
fn deserialize_bytes<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_borrowed_bytes(self.value.as_bytes())
}
fn deserialize_str<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_borrowed_str(self.value)
}
fn deserialize_option<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_some(self)
}
fn deserialize_enum<V>(
self,
_: &'static str,
_: &'static [&'static str],
visitor: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_enum(ValueEnum { value: self.value })
}
fn deserialize_newtype_struct<V>(
self,
_: &'static str,
visitor: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_newtype_struct(self)
}
fn deserialize_tuple<V>(self, _: usize, _: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
Err(de::value::Error::custom("unsupported type: tuple"))
}
fn deserialize_struct<V>(
self,
_: &'static str,
_: &'static [&'static str],
_: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
Err(de::value::Error::custom("unsupported type: struct"))
}
fn deserialize_tuple_struct<V>(
self,
_: &'static str,
_: usize,
_: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
Err(de::value::Error::custom("unsupported type: tuple struct"))
}
unsupported_type!(deserialize_any, "any");
unsupported_type!(deserialize_seq, "seq");
unsupported_type!(deserialize_map, "map");
unsupported_type!(deserialize_identifier, "identifier");
}
struct ParamsSeq<'de, T: ResourcePath> {
params: PathIter<'de, T>,
}
impl<'de, T: ResourcePath> de::SeqAccess<'de> for ParamsSeq<'de, T> {
type Error = de::value::Error;
fn next_element_seed<U>(&mut self, seed: U) -> Result<Option<U::Value>, Self::Error>
where
U: de::DeserializeSeed<'de>,
{
match self.params.next() {
Some(item) => Ok(Some(seed.deserialize(Value { value: item.1 })?)),
None => Ok(None),
}
}
}
struct ValueEnum<'de> {
value: &'de str,
}
impl<'de> de::EnumAccess<'de> for ValueEnum<'de> {
type Error = de::value::Error;
type Variant = UnitVariant;
fn variant_seed<V>(self, seed: V) -> Result<(V::Value, Self::Variant), Self::Error>
where
V: de::DeserializeSeed<'de>,
{
Ok((seed.deserialize(Key { key: self.value })?, UnitVariant))
}
}
struct UnitVariant;
impl<'de> de::VariantAccess<'de> for UnitVariant {
type Error = de::value::Error;
fn unit_variant(self) -> Result<(), Self::Error> {
Ok(())
}
fn newtype_variant_seed<T>(self, _seed: T) -> Result<T::Value, Self::Error>
where
T: de::DeserializeSeed<'de>,
{
Err(de::value::Error::custom("not supported"))
}
fn tuple_variant<V>(self, _len: usize, _visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
Err(de::value::Error::custom("not supported"))
}
fn struct_variant<V>(
self,
_: &'static [&'static str],
_: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
Err(de::value::Error::custom("not supported"))
}
}
#[cfg(test)]
mod tests {
use serde::de;
use serde_derive::Deserialize;
use super::*;
use crate::path::Path;
use crate::router::Router;
#[derive(Deserialize)]
struct MyStruct {
key: String,
value: String,
}
#[derive(Deserialize)]
struct Id {
_id: String,
}
#[derive(Debug, Deserialize)]
struct Test1(String, u32);
#[derive(Debug, Deserialize)]
struct Test2 {
key: String,
value: u32,
}
#[derive(Debug, Deserialize, PartialEq)]
#[serde(rename_all = "lowercase")]
enum TestEnum {
Val1,
Val2,
}
#[derive(Debug, Deserialize)]
struct Test3 {
val: TestEnum,
}
#[test]
fn test_request_extract() {
let mut router = Router::<()>::build();
router.path("/{key}/{value}/", ());
let router = router.finish();
let mut path = Path::new("/name/user1/");
assert!(router.recognize(&mut path).is_some());
let s: MyStruct = de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
assert_eq!(s.key, "name");
assert_eq!(s.value, "user1");
let s: (String, String) =
de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
assert_eq!(s.0, "name");
assert_eq!(s.1, "user1");
let mut router = Router::<()>::build();
router.path("/{key}/{value}/", ());
let router = router.finish();
let mut path = Path::new("/name/32/");
assert!(router.recognize(&mut path).is_some());
let s: Test1 = de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
assert_eq!(s.0, "name");
assert_eq!(s.1, 32);
let s: Test2 = de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
assert_eq!(s.key, "name");
assert_eq!(s.value, 32);
let s: (String, u8) =
de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
assert_eq!(s.0, "name");
assert_eq!(s.1, 32);
let res: Vec<String> =
de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
assert_eq!(res[0], "name".to_owned());
assert_eq!(res[1], "32".to_owned());
}
#[test]
fn test_extract_path_single() {
let mut router = Router::<()>::build();
router.path("/{value}/", ());
let router = router.finish();
let mut path = Path::new("/32/");
assert!(router.recognize(&mut path).is_some());
let i: i8 = de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
assert_eq!(i, 32);
}
#[test]
fn test_extract_enum() {
let mut router = Router::<()>::build();
router.path("/{val}/", ());
let router = router.finish();
let mut path = Path::new("/val1/");
assert!(router.recognize(&mut path).is_some());
let i: TestEnum = de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
assert_eq!(i, TestEnum::Val1);
let mut router = Router::<()>::build();
router.path("/{val1}/{val2}/", ());
let router = router.finish();
let mut path = Path::new("/val1/val2/");
assert!(router.recognize(&mut path).is_some());
let i: (TestEnum, TestEnum) =
de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
assert_eq!(i, (TestEnum::Val1, TestEnum::Val2));
}
#[test]
fn test_extract_enum_value() {
let mut router = Router::<()>::build();
router.path("/{val}/", ());
let router = router.finish();
let mut path = Path::new("/val1/");
assert!(router.recognize(&mut path).is_some());
let i: Test3 = de::Deserialize::deserialize(PathDeserializer::new(&path)).unwrap();
assert_eq!(i.val, TestEnum::Val1);
let mut path = Path::new("/val3/");
assert!(router.recognize(&mut path).is_some());
let i: Result<Test3, de::value::Error> =
de::Deserialize::deserialize(PathDeserializer::new(&path));
assert!(i.is_err());
assert!(format!("{:?}", i).contains("unknown variant"));
}
#[test]
fn test_extract_errors() {
let mut router = Router::<()>::build();
router.path("/{value}/", ());
let router = router.finish();
let mut path = Path::new("/name/");
assert!(router.recognize(&mut path).is_some());
let s: Result<Test1, de::value::Error> =
de::Deserialize::deserialize(PathDeserializer::new(&path));
assert!(s.is_err());
assert!(format!("{:?}", s).contains("wrong number of parameters"));
let s: Result<Test2, de::value::Error> =
de::Deserialize::deserialize(PathDeserializer::new(&path));
assert!(s.is_err());
assert!(format!("{:?}", s).contains("can not parse"));
let s: Result<(String, String), de::value::Error> =
de::Deserialize::deserialize(PathDeserializer::new(&path));
assert!(s.is_err());
assert!(format!("{:?}", s).contains("wrong number of parameters"));
let s: Result<u32, de::value::Error> =
de::Deserialize::deserialize(PathDeserializer::new(&path));
assert!(s.is_err());
assert!(format!("{:?}", s).contains("can not parse"));
}
// #[test]
// fn test_extract_path_decode() {
// let mut router = Router::<()>::default();
// router.register_resource(Resource::new(ResourceDef::new("/{value}/")));
// macro_rules! test_single_value {
// ($value:expr, $expected:expr) => {{
// let req = TestRequest::with_uri($value).finish();
// let info = router.recognize(&req, &(), 0);
// let req = req.with_route_info(info);
// assert_eq!(
// *Path::<String>::from_request(&req, &PathConfig::default()).unwrap(),
// $expected
// );
// }};
// }
// test_single_value!("/%25/", "%");
// test_single_value!("/%40%C2%A3%24%25%5E%26%2B%3D/", "@£$%^&+=");
// test_single_value!("/%2B/", "+");
// test_single_value!("/%252B/", "%2B");
// test_single_value!("/%2F/", "/");
// test_single_value!("/%252F/", "%2F");
// test_single_value!(
// "/http%3A%2F%2Flocalhost%3A80%2Ffoo/",
// "http://localhost:80/foo"
// );
// test_single_value!("/%2Fvar%2Flog%2Fsyslog/", "/var/log/syslog");
// test_single_value!(
// "/http%3A%2F%2Flocalhost%3A80%2Ffile%2F%252Fvar%252Flog%252Fsyslog/",
// "http://localhost:80/file/%2Fvar%2Flog%2Fsyslog"
// );
// let req = TestRequest::with_uri("/%25/7/?id=test").finish();
// let mut router = Router::<()>::default();
// router.register_resource(Resource::new(ResourceDef::new("/{key}/{value}/")));
// let info = router.recognize(&req, &(), 0);
// let req = req.with_route_info(info);
// let s = Path::<Test2>::from_request(&req, &PathConfig::default()).unwrap();
// assert_eq!(s.key, "%");
// assert_eq!(s.value, 7);
// let s = Path::<(String, String)>::from_request(&req, &PathConfig::default()).unwrap();
// assert_eq!(s.0, "%");
// assert_eq!(s.1, "7");
// }
// #[test]
// fn test_extract_path_no_decode() {
// let mut router = Router::<()>::default();
// router.register_resource(Resource::new(ResourceDef::new("/{value}/")));
// let req = TestRequest::with_uri("/%25/").finish();
// let info = router.recognize(&req, &(), 0);
// let req = req.with_route_info(info);
// assert_eq!(
// *Path::<String>::from_request(&req, &&PathConfig::default().disable_decoding())
// .unwrap(),
// "%25"
// );
// }
}

View File

@@ -1,152 +0,0 @@
//! Resource path matching library.
#![deny(rust_2018_idioms, nonstandard_style)]
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
mod de;
mod path;
mod resource;
mod router;
pub use self::de::PathDeserializer;
pub use self::path::Path;
pub use self::resource::ResourceDef;
pub use self::router::{ResourceInfo, Router, RouterBuilder};
pub trait Resource<T: ResourcePath> {
fn resource_path(&mut self) -> &mut Path<T>;
}
pub trait ResourcePath {
fn path(&self) -> &str;
}
impl ResourcePath for String {
fn path(&self) -> &str {
self.as_str()
}
}
impl<'a> ResourcePath for &'a str {
fn path(&self) -> &str {
self
}
}
impl ResourcePath for bytestring::ByteString {
fn path(&self) -> &str {
&*self
}
}
/// Helper trait for type that could be converted to path pattern
pub trait IntoPattern {
fn is_single(&self) -> bool;
fn patterns(&self) -> Vec<String>;
}
impl IntoPattern for String {
fn is_single(&self) -> bool {
true
}
fn patterns(&self) -> Vec<String> {
vec![self.clone()]
}
}
impl<'a> IntoPattern for &'a String {
fn is_single(&self) -> bool {
true
}
fn patterns(&self) -> Vec<String> {
vec![self.as_str().to_string()]
}
}
impl<'a> IntoPattern for &'a str {
fn is_single(&self) -> bool {
true
}
fn patterns(&self) -> Vec<String> {
vec![(*self).to_string()]
}
}
impl<T: AsRef<str>> IntoPattern for Vec<T> {
fn is_single(&self) -> bool {
self.len() == 1
}
fn patterns(&self) -> Vec<String> {
self.iter().map(|v| v.as_ref().to_string()).collect()
}
}
macro_rules! array_patterns (($tp:ty, $num:tt) => {
impl IntoPattern for [$tp; $num] {
fn is_single(&self) -> bool {
$num == 1
}
fn patterns(&self) -> Vec<String> {
self.iter().map(|v| v.to_string()).collect()
}
}
});
array_patterns!(&str, 1);
array_patterns!(&str, 2);
array_patterns!(&str, 3);
array_patterns!(&str, 4);
array_patterns!(&str, 5);
array_patterns!(&str, 6);
array_patterns!(&str, 7);
array_patterns!(&str, 8);
array_patterns!(&str, 9);
array_patterns!(&str, 10);
array_patterns!(&str, 11);
array_patterns!(&str, 12);
array_patterns!(&str, 13);
array_patterns!(&str, 14);
array_patterns!(&str, 15);
array_patterns!(&str, 16);
array_patterns!(String, 1);
array_patterns!(String, 2);
array_patterns!(String, 3);
array_patterns!(String, 4);
array_patterns!(String, 5);
array_patterns!(String, 6);
array_patterns!(String, 7);
array_patterns!(String, 8);
array_patterns!(String, 9);
array_patterns!(String, 10);
array_patterns!(String, 11);
array_patterns!(String, 12);
array_patterns!(String, 13);
array_patterns!(String, 14);
array_patterns!(String, 15);
array_patterns!(String, 16);
#[cfg(feature = "http")]
mod url;
#[cfg(feature = "http")]
pub use self::url::{Quoter, Url};
#[cfg(feature = "http")]
mod http_support {
use super::ResourcePath;
use http::Uri;
impl ResourcePath for Uri {
fn path(&self) -> &str {
self.path()
}
}
}

View File

@@ -1,222 +0,0 @@
use std::ops::Index;
use serde::de;
use crate::de::PathDeserializer;
use crate::{Resource, ResourcePath};
#[derive(Debug, Clone, Copy)]
pub(crate) enum PathItem {
Static(&'static str),
Segment(u16, u16),
}
/// Resource path match information
///
/// If resource path contains variable patterns, `Path` stores them.
#[derive(Debug)]
pub struct Path<T> {
path: T,
pub(crate) skip: u16,
pub(crate) segments: Vec<(&'static str, PathItem)>,
}
impl<T: Default> Default for Path<T> {
fn default() -> Self {
Path {
path: T::default(),
skip: 0,
segments: Vec::new(),
}
}
}
impl<T: Clone> Clone for Path<T> {
fn clone(&self) -> Self {
Path {
path: self.path.clone(),
skip: self.skip,
segments: self.segments.clone(),
}
}
}
impl<T: ResourcePath> Path<T> {
pub fn new(path: T) -> Path<T> {
Path {
path,
skip: 0,
segments: Vec::new(),
}
}
#[inline]
/// Get reference to inner path instance
pub fn get_ref(&self) -> &T {
&self.path
}
#[inline]
/// Get mutable reference to inner path instance
pub fn get_mut(&mut self) -> &mut T {
&mut self.path
}
#[inline]
/// Path
pub fn path(&self) -> &str {
let skip = self.skip as usize;
let path = self.path.path();
if skip <= path.len() {
&path[skip..]
} else {
""
}
}
#[inline]
/// Set new path
pub fn set(&mut self, path: T) {
self.skip = 0;
self.path = path;
self.segments.clear();
}
#[inline]
/// Reset state
pub fn reset(&mut self) {
self.skip = 0;
self.segments.clear();
}
#[inline]
/// Skip first `n` chars in path
pub fn skip(&mut self, n: u16) {
self.skip += n;
}
pub(crate) fn add(&mut self, name: &'static str, value: PathItem) {
match value {
PathItem::Static(s) => self.segments.push((name, PathItem::Static(s))),
PathItem::Segment(begin, end) => self
.segments
.push((name, PathItem::Segment(self.skip + begin, self.skip + end))),
}
}
#[doc(hidden)]
pub fn add_static(&mut self, name: &'static str, value: &'static str) {
self.segments.push((name, PathItem::Static(value)));
}
#[inline]
/// Check if there are any matched patterns
pub fn is_empty(&self) -> bool {
self.segments.is_empty()
}
#[inline]
/// Check number of extracted parameters
pub fn len(&self) -> usize {
self.segments.len()
}
/// Get matched parameter by name without type conversion
pub fn get(&self, key: &str) -> Option<&str> {
for item in self.segments.iter() {
if key == item.0 {
return match item.1 {
PathItem::Static(ref s) => Some(&s),
PathItem::Segment(s, e) => {
Some(&self.path.path()[(s as usize)..(e as usize)])
}
};
}
}
if key == "tail" {
Some(&self.path.path()[(self.skip as usize)..])
} else {
None
}
}
/// Get unprocessed part of the path
pub fn unprocessed(&self) -> &str {
&self.path.path()[(self.skip as usize)..]
}
/// Get matched parameter by name.
///
/// If keyed parameter is not available empty string is used as default
/// value.
pub fn query(&self, key: &str) -> &str {
if let Some(s) = self.get(key) {
s
} else {
""
}
}
/// Return iterator to items in parameter container
pub fn iter(&self) -> PathIter<'_, T> {
PathIter {
idx: 0,
params: self,
}
}
/// Try to deserialize matching parameters to a specified type `U`
pub fn load<'de, U: serde::Deserialize<'de>>(&'de self) -> Result<U, de::value::Error> {
de::Deserialize::deserialize(PathDeserializer::new(self))
}
}
#[derive(Debug)]
pub struct PathIter<'a, T> {
idx: usize,
params: &'a Path<T>,
}
impl<'a, T: ResourcePath> Iterator for PathIter<'a, T> {
type Item = (&'a str, &'a str);
#[inline]
fn next(&mut self) -> Option<(&'a str, &'a str)> {
if self.idx < self.params.len() {
let idx = self.idx;
let res = match self.params.segments[idx].1 {
PathItem::Static(ref s) => &s,
PathItem::Segment(s, e) => &self.params.path.path()[(s as usize)..(e as usize)],
};
self.idx += 1;
return Some((&self.params.segments[idx].0, res));
}
None
}
}
impl<'a, T: ResourcePath> Index<&'a str> for Path<T> {
type Output = str;
fn index(&self, name: &'a str) -> &str {
self.get(name)
.expect("Value for parameter is not available")
}
}
impl<T: ResourcePath> Index<usize> for Path<T> {
type Output = str;
fn index(&self, idx: usize) -> &str {
match self.segments[idx].1 {
PathItem::Static(ref s) => &s,
PathItem::Segment(s, e) => &self.path.path()[(s as usize)..(e as usize)],
}
}
}
impl<T: ResourcePath> Resource<T> for Path<T> {
fn resource_path(&mut self) -> &mut Self {
self
}
}

View File

@@ -1,946 +0,0 @@
use std::cmp::min;
use std::collections::HashMap;
use std::hash::{Hash, Hasher};
use regex::{escape, Regex, RegexSet};
use crate::path::{Path, PathItem};
use crate::{IntoPattern, Resource, ResourcePath};
const MAX_DYNAMIC_SEGMENTS: usize = 16;
/// ResourceDef describes an entry in resources table
///
/// Resource definition can contain only 16 dynamic segments
#[derive(Clone, Debug)]
pub struct ResourceDef {
id: u16,
tp: PatternType,
name: String,
pattern: String,
elements: Vec<PatternElement>,
}
#[derive(Debug, Clone, PartialEq)]
enum PatternElement {
Str(String),
Var(String),
}
#[derive(Clone, Debug)]
enum PatternType {
Static(String),
Prefix(String),
Dynamic(Regex, Vec<&'static str>, usize),
DynamicSet(RegexSet, Vec<(Regex, Vec<&'static str>, usize)>),
}
impl ResourceDef {
/// Parse path pattern and create new `Pattern` instance.
///
/// Panics if path pattern is malformed.
pub fn new<T: IntoPattern>(path: T) -> Self {
if path.is_single() {
let patterns = path.patterns();
ResourceDef::with_prefix(&patterns[0], false)
} else {
let set = path.patterns();
let mut data = Vec::new();
let mut re_set = Vec::new();
for path in set {
let (pattern, _, _, len) = ResourceDef::parse(&path, false);
let re = match Regex::new(&pattern) {
Ok(re) => re,
Err(err) => panic!("Wrong path pattern: \"{}\" {}", path, err),
};
// actix creates one router per thread
let names: Vec<_> = re
.capture_names()
.filter_map(|name| {
name.map(|name| Box::leak(Box::new(name.to_owned())).as_str())
})
.collect();
data.push((re, names, len));
re_set.push(pattern);
}
ResourceDef {
id: 0,
tp: PatternType::DynamicSet(RegexSet::new(re_set).unwrap(), data),
elements: Vec::new(),
name: String::new(),
pattern: "".to_owned(),
}
}
}
/// Parse path pattern and create new `Pattern` instance.
///
/// Use `prefix` type instead of `static`.
///
/// Panics if path regex pattern is malformed.
pub fn prefix(path: &str) -> Self {
ResourceDef::with_prefix(path, true)
}
/// Parse path pattern and create new `Pattern` instance.
/// Inserts `/` to begging of the pattern.
///
///
/// Use `prefix` type instead of `static`.
///
/// Panics if path regex pattern is malformed.
pub fn root_prefix(path: &str) -> Self {
ResourceDef::with_prefix(&insert_slash(path), true)
}
/// Resource id
pub fn id(&self) -> u16 {
self.id
}
/// Set resource id
pub fn set_id(&mut self, id: u16) {
self.id = id;
}
/// Parse path pattern and create new `Pattern` instance with custom prefix
fn with_prefix(path: &str, for_prefix: bool) -> Self {
let path = path.to_owned();
let (pattern, elements, is_dynamic, len) = ResourceDef::parse(&path, for_prefix);
let tp = if is_dynamic {
let re = match Regex::new(&pattern) {
Ok(re) => re,
Err(err) => panic!("Wrong path pattern: \"{}\" {}", path, err),
};
// actix creates one router per thread
let names = re
.capture_names()
.filter_map(|name| {
name.map(|name| Box::leak(Box::new(name.to_owned())).as_str())
})
.collect();
PatternType::Dynamic(re, names, len)
} else if for_prefix {
PatternType::Prefix(pattern)
} else {
PatternType::Static(pattern)
};
ResourceDef {
tp,
elements,
id: 0,
name: String::new(),
pattern: path,
}
}
/// Resource pattern name
pub fn name(&self) -> &str {
&self.name
}
/// Mutable reference to a name of a resource definition.
pub fn name_mut(&mut self) -> &mut String {
&mut self.name
}
/// Path pattern of the resource
pub fn pattern(&self) -> &str {
&self.pattern
}
#[inline]
/// Check if path matches this pattern.
pub fn is_match(&self, path: &str) -> bool {
match self.tp {
PatternType::Static(ref s) => s == path,
PatternType::Prefix(ref s) => path.starts_with(s),
PatternType::Dynamic(ref re, _, _) => re.is_match(path),
PatternType::DynamicSet(ref re, _) => re.is_match(path),
}
}
/// Is prefix path a match against this resource.
pub fn is_prefix_match(&self, path: &str) -> Option<usize> {
let p_len = path.len();
let path = if path.is_empty() { "/" } else { path };
match self.tp {
PatternType::Static(ref s) => {
if s == path {
Some(p_len)
} else {
None
}
}
PatternType::Dynamic(ref re, _, len) => {
if let Some(captures) = re.captures(path) {
let mut pos = 0;
let mut passed = false;
for capture in captures.iter() {
if let Some(ref m) = capture {
if !passed {
passed = true;
continue;
}
pos = m.end();
}
}
Some(pos + len)
} else {
None
}
}
PatternType::Prefix(ref s) => {
let len = if path == s {
s.len()
} else if path.starts_with(s)
&& (s.ends_with('/') || path.split_at(s.len()).1.starts_with('/'))
{
if s.ends_with('/') {
s.len() - 1
} else {
s.len()
}
} else {
return None;
};
Some(min(p_len, len))
}
PatternType::DynamicSet(ref re, ref params) => {
if let Some(idx) = re.matches(path).into_iter().next() {
let (ref pattern, _, len) = params[idx];
if let Some(captures) = pattern.captures(path) {
let mut pos = 0;
let mut passed = false;
for capture in captures.iter() {
if let Some(ref m) = capture {
if !passed {
passed = true;
continue;
}
pos = m.end();
}
}
Some(pos + len)
} else {
None
}
} else {
None
}
}
}
}
/// Is the given path and parameters a match against this pattern.
pub fn match_path<T: ResourcePath>(&self, path: &mut Path<T>) -> bool {
match self.tp {
PatternType::Static(ref s) => {
if s == path.path() {
path.skip(path.len() as u16);
true
} else {
false
}
}
PatternType::Prefix(ref s) => {
let r_path = path.path();
let len = if s == r_path {
s.len()
} else if r_path.starts_with(s)
&& (s.ends_with('/') || r_path.split_at(s.len()).1.starts_with('/'))
{
if s.ends_with('/') {
s.len() - 1
} else {
s.len()
}
} else {
return false;
};
let r_path_len = r_path.len();
path.skip(min(r_path_len, len) as u16);
true
}
PatternType::Dynamic(ref re, ref names, len) => {
let mut idx = 0;
let mut pos = 0;
let mut segments: [PathItem; MAX_DYNAMIC_SEGMENTS] =
[PathItem::Static(""); MAX_DYNAMIC_SEGMENTS];
if let Some(captures) = re.captures(path.path()) {
for (no, name) in names.iter().enumerate() {
if let Some(m) = captures.name(&name) {
idx += 1;
pos = m.end();
segments[no] = PathItem::Segment(m.start() as u16, m.end() as u16);
} else {
log::error!(
"Dynamic path match but not all segments found: {}",
name
);
return false;
}
}
} else {
return false;
}
for idx in 0..idx {
path.add(names[idx], segments[idx]);
}
path.skip((pos + len) as u16);
true
}
PatternType::DynamicSet(ref re, ref params) => {
if let Some(idx) = re.matches(path.path()).into_iter().next() {
let (ref pattern, ref names, len) = params[idx];
let mut idx = 0;
let mut pos = 0;
let mut segments: [PathItem; MAX_DYNAMIC_SEGMENTS] =
[PathItem::Static(""); MAX_DYNAMIC_SEGMENTS];
if let Some(captures) = pattern.captures(path.path()) {
for (no, name) in names.iter().enumerate() {
if let Some(m) = captures.name(&name) {
idx += 1;
pos = m.end();
segments[no] =
PathItem::Segment(m.start() as u16, m.end() as u16);
} else {
log::error!(
"Dynamic path match but not all segments found: {}",
name
);
return false;
}
}
} else {
return false;
}
for idx in 0..idx {
path.add(names[idx], segments[idx]);
}
path.skip((pos + len) as u16);
true
} else {
false
}
}
}
}
/// Is the given path and parameters a match against this pattern?
pub fn match_path_checked<R, T, F, U>(
&self,
res: &mut R,
check: &F,
user_data: &Option<U>,
) -> bool
where
T: ResourcePath,
R: Resource<T>,
F: Fn(&R, &Option<U>) -> bool,
{
match self.tp {
PatternType::Static(ref s) => {
if s == res.resource_path().path() && check(res, user_data) {
let path = res.resource_path();
path.skip(path.len() as u16);
true
} else {
false
}
}
PatternType::Prefix(ref s) => {
let len = {
let r_path = res.resource_path().path();
if s == r_path {
s.len()
} else if r_path.starts_with(s)
&& (s.ends_with('/') || r_path.split_at(s.len()).1.starts_with('/'))
{
if s.ends_with('/') {
s.len() - 1
} else {
s.len()
}
} else {
return false;
}
};
if !check(res, user_data) {
return false;
}
let path = res.resource_path();
path.skip(min(path.path().len(), len) as u16);
true
}
PatternType::Dynamic(ref re, ref names, len) => {
let mut idx = 0;
let mut pos = 0;
let mut segments: [PathItem; MAX_DYNAMIC_SEGMENTS] =
[PathItem::Static(""); MAX_DYNAMIC_SEGMENTS];
if let Some(captures) = re.captures(res.resource_path().path()) {
for (no, name) in names.iter().enumerate() {
if let Some(m) = captures.name(&name) {
idx += 1;
pos = m.end();
segments[no] = PathItem::Segment(m.start() as u16, m.end() as u16);
} else {
log::error!(
"Dynamic path match but not all segments found: {}",
name
);
return false;
}
}
} else {
return false;
}
if !check(res, user_data) {
return false;
}
let path = res.resource_path();
for idx in 0..idx {
path.add(names[idx], segments[idx]);
}
path.skip((pos + len) as u16);
true
}
PatternType::DynamicSet(ref re, ref params) => {
let path = res.resource_path().path();
if let Some(idx) = re.matches(path).into_iter().next() {
let (ref pattern, ref names, len) = params[idx];
let mut idx = 0;
let mut pos = 0;
let mut segments: [PathItem; MAX_DYNAMIC_SEGMENTS] =
[PathItem::Static(""); MAX_DYNAMIC_SEGMENTS];
if let Some(captures) = pattern.captures(path) {
for (no, name) in names.iter().enumerate() {
if let Some(m) = captures.name(&name) {
idx += 1;
pos = m.end();
segments[no] =
PathItem::Segment(m.start() as u16, m.end() as u16);
} else {
log::error!(
"Dynamic path match but not all segments found: {}",
name
);
return false;
}
}
} else {
return false;
}
if !check(res, user_data) {
return false;
}
let path = res.resource_path();
for idx in 0..idx {
path.add(names[idx], segments[idx]);
}
path.skip((pos + len) as u16);
true
} else {
false
}
}
}
}
/// Build resource path from elements. Returns `true` on success.
pub fn resource_path<U, I>(&self, path: &mut String, elements: &mut U) -> bool
where
U: Iterator<Item = I>,
I: AsRef<str>,
{
match self.tp {
PatternType::Prefix(ref p) => path.push_str(p),
PatternType::Static(ref p) => path.push_str(p),
PatternType::Dynamic(..) => {
for el in &self.elements {
match *el {
PatternElement::Str(ref s) => path.push_str(s),
PatternElement::Var(_) => {
if let Some(val) = elements.next() {
path.push_str(val.as_ref())
} else {
return false;
}
}
}
}
}
PatternType::DynamicSet(..) => {
return false;
}
}
true
}
/// Build resource path from elements. Returns `true` on success.
pub fn resource_path_named<K, V, S>(
&self,
path: &mut String,
elements: &HashMap<K, V, S>,
) -> bool
where
K: std::borrow::Borrow<str> + Eq + Hash,
V: AsRef<str>,
S: std::hash::BuildHasher,
{
match self.tp {
PatternType::Prefix(ref p) => path.push_str(p),
PatternType::Static(ref p) => path.push_str(p),
PatternType::Dynamic(..) => {
for el in &self.elements {
match *el {
PatternElement::Str(ref s) => path.push_str(s),
PatternElement::Var(ref name) => {
if let Some(val) = elements.get(name) {
path.push_str(val.as_ref())
} else {
return false;
}
}
}
}
}
PatternType::DynamicSet(..) => {
return false;
}
}
true
}
fn parse_param(pattern: &str) -> (PatternElement, String, &str, bool) {
const DEFAULT_PATTERN: &str = "[^/]+";
const DEFAULT_PATTERN_TAIL: &str = ".*";
let mut params_nesting = 0usize;
let close_idx = pattern
.find(|c| match c {
'{' => {
params_nesting += 1;
false
}
'}' => {
params_nesting -= 1;
params_nesting == 0
}
_ => false,
})
.expect("malformed dynamic segment");
let (mut param, mut rem) = pattern.split_at(close_idx + 1);
param = &param[1..param.len() - 1]; // Remove outer brackets
let tail = rem == "*";
let (name, pattern) = match param.find(':') {
Some(idx) => {
if tail {
panic!("Custom regex is not supported for remainder match");
}
let (name, pattern) = param.split_at(idx);
(name, &pattern[1..])
}
None => (
param,
if tail {
rem = &rem[1..];
DEFAULT_PATTERN_TAIL
} else {
DEFAULT_PATTERN
},
),
};
(
PatternElement::Var(name.to_string()),
format!(r"(?P<{}>{})", &name, &pattern),
rem,
tail,
)
}
fn parse(
mut pattern: &str,
mut for_prefix: bool,
) -> (String, Vec<PatternElement>, bool, usize) {
if pattern.find('{').is_none() {
// TODO: MSRV: 1.45
#[allow(clippy::manual_strip)]
return if pattern.ends_with('*') {
let path = &pattern[..pattern.len() - 1];
let re = String::from("^") + path + "(.*)";
(re, vec![PatternElement::Str(String::from(path))], true, 0)
} else {
(
String::from(pattern),
vec![PatternElement::Str(String::from(pattern))],
false,
pattern.chars().count(),
)
};
}
let mut elements = Vec::new();
let mut re = String::from("^");
let mut dyn_elements = 0;
while let Some(idx) = pattern.find('{') {
let (prefix, rem) = pattern.split_at(idx);
elements.push(PatternElement::Str(String::from(prefix)));
re.push_str(&escape(prefix));
let (param_pattern, re_part, rem, tail) = Self::parse_param(rem);
if tail {
for_prefix = true;
}
elements.push(param_pattern);
re.push_str(&re_part);
pattern = rem;
dyn_elements += 1;
}
elements.push(PatternElement::Str(String::from(pattern)));
re.push_str(&escape(pattern));
if dyn_elements > MAX_DYNAMIC_SEGMENTS {
panic!(
"Only {} dynamic segments are allowed, provided: {}",
MAX_DYNAMIC_SEGMENTS, dyn_elements
);
}
if !for_prefix {
re.push('$');
}
(re, elements, true, pattern.chars().count())
}
}
impl Eq for ResourceDef {}
impl PartialEq for ResourceDef {
fn eq(&self, other: &ResourceDef) -> bool {
self.pattern == other.pattern
}
}
impl Hash for ResourceDef {
fn hash<H: Hasher>(&self, state: &mut H) {
self.pattern.hash(state);
}
}
impl<'a> From<&'a str> for ResourceDef {
fn from(path: &'a str) -> ResourceDef {
ResourceDef::new(path)
}
}
impl From<String> for ResourceDef {
fn from(path: String) -> ResourceDef {
ResourceDef::new(path)
}
}
pub(crate) fn insert_slash(path: &str) -> String {
let mut path = path.to_owned();
if !path.is_empty() && !path.starts_with('/') {
path.insert(0, '/');
};
path
}
#[cfg(test)]
mod tests {
use super::*;
use http::Uri;
use std::convert::TryFrom;
#[test]
fn test_parse_static() {
let re = ResourceDef::new("/");
assert!(re.is_match("/"));
assert!(!re.is_match("/a"));
let re = ResourceDef::new("/name");
assert!(re.is_match("/name"));
assert!(!re.is_match("/name1"));
assert!(!re.is_match("/name/"));
assert!(!re.is_match("/name~"));
assert_eq!(re.is_prefix_match("/name"), Some(5));
assert_eq!(re.is_prefix_match("/name1"), None);
assert_eq!(re.is_prefix_match("/name/"), None);
assert_eq!(re.is_prefix_match("/name~"), None);
let re = ResourceDef::new("/name/");
assert!(re.is_match("/name/"));
assert!(!re.is_match("/name"));
assert!(!re.is_match("/name/gs"));
let re = ResourceDef::new("/user/profile");
assert!(re.is_match("/user/profile"));
assert!(!re.is_match("/user/profile/profile"));
}
#[test]
fn test_parse_param() {
let re = ResourceDef::new("/user/{id}");
assert!(re.is_match("/user/profile"));
assert!(re.is_match("/user/2345"));
assert!(!re.is_match("/user/2345/"));
assert!(!re.is_match("/user/2345/sdg"));
let mut path = Path::new("/user/profile");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "profile");
let mut path = Path::new("/user/1245125");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "1245125");
let re = ResourceDef::new("/v{version}/resource/{id}");
assert!(re.is_match("/v1/resource/320120"));
assert!(!re.is_match("/v/resource/1"));
assert!(!re.is_match("/resource"));
let mut path = Path::new("/v151/resource/adage32");
assert!(re.match_path(&mut path));
assert_eq!(path.get("version").unwrap(), "151");
assert_eq!(path.get("id").unwrap(), "adage32");
let re = ResourceDef::new("/{id:[[:digit:]]{6}}");
assert!(re.is_match("/012345"));
assert!(!re.is_match("/012"));
assert!(!re.is_match("/01234567"));
assert!(!re.is_match("/XXXXXX"));
let mut path = Path::new("/012345");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "012345");
}
#[allow(clippy::cognitive_complexity)]
#[test]
fn test_dynamic_set() {
let re = ResourceDef::new(vec![
"/user/{id}",
"/v{version}/resource/{id}",
"/{id:[[:digit:]]{6}}",
]);
assert!(re.is_match("/user/profile"));
assert!(re.is_match("/user/2345"));
assert!(!re.is_match("/user/2345/"));
assert!(!re.is_match("/user/2345/sdg"));
let mut path = Path::new("/user/profile");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "profile");
let mut path = Path::new("/user/1245125");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "1245125");
assert!(re.is_match("/v1/resource/320120"));
assert!(!re.is_match("/v/resource/1"));
assert!(!re.is_match("/resource"));
let mut path = Path::new("/v151/resource/adage32");
assert!(re.match_path(&mut path));
assert_eq!(path.get("version").unwrap(), "151");
assert_eq!(path.get("id").unwrap(), "adage32");
assert!(re.is_match("/012345"));
assert!(!re.is_match("/012"));
assert!(!re.is_match("/01234567"));
assert!(!re.is_match("/XXXXXX"));
let mut path = Path::new("/012345");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "012345");
let re = ResourceDef::new([
"/user/{id}",
"/v{version}/resource/{id}",
"/{id:[[:digit:]]{6}}",
]);
assert!(re.is_match("/user/profile"));
assert!(re.is_match("/user/2345"));
assert!(!re.is_match("/user/2345/"));
assert!(!re.is_match("/user/2345/sdg"));
let re = ResourceDef::new([
"/user/{id}".to_string(),
"/v{version}/resource/{id}".to_string(),
"/{id:[[:digit:]]{6}}".to_string(),
]);
assert!(re.is_match("/user/profile"));
assert!(re.is_match("/user/2345"));
assert!(!re.is_match("/user/2345/"));
assert!(!re.is_match("/user/2345/sdg"));
}
#[test]
fn test_parse_tail() {
let re = ResourceDef::new("/user/-{id}*");
let mut path = Path::new("/user/-profile");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "profile");
let mut path = Path::new("/user/-2345");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "2345");
let mut path = Path::new("/user/-2345/");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "2345/");
let mut path = Path::new("/user/-2345/sdg");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "2345/sdg");
}
#[test]
fn test_static_tail() {
let re = ResourceDef::new("/user*");
assert!(re.is_match("/user/profile"));
assert!(re.is_match("/user/2345"));
assert!(re.is_match("/user/2345/"));
assert!(re.is_match("/user/2345/sdg"));
let re = ResourceDef::new("/user/*");
assert!(re.is_match("/user/profile"));
assert!(re.is_match("/user/2345"));
assert!(re.is_match("/user/2345/"));
assert!(re.is_match("/user/2345/sdg"));
}
#[test]
fn test_parse_urlencoded_param() {
let re = ResourceDef::new("/user/{id}/test");
let mut path = Path::new("/user/2345/test");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "2345");
let mut path = Path::new("/user/qwe%25/test");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "qwe%25");
let uri = Uri::try_from("/user/qwe%25/test").unwrap();
let mut path = Path::new(uri);
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "qwe%25");
}
#[test]
fn test_resource_prefix() {
let re = ResourceDef::prefix("/name");
assert!(re.is_match("/name"));
assert!(re.is_match("/name/"));
assert!(re.is_match("/name/test/test"));
assert!(re.is_match("/name1"));
assert!(re.is_match("/name~"));
assert_eq!(re.is_prefix_match("/name"), Some(5));
assert_eq!(re.is_prefix_match("/name/"), Some(5));
assert_eq!(re.is_prefix_match("/name/test/test"), Some(5));
assert_eq!(re.is_prefix_match("/name1"), None);
assert_eq!(re.is_prefix_match("/name~"), None);
let re = ResourceDef::prefix("/name/");
assert!(re.is_match("/name/"));
assert!(re.is_match("/name/gs"));
assert!(!re.is_match("/name"));
let re = ResourceDef::root_prefix("name/");
assert!(re.is_match("/name/"));
assert!(re.is_match("/name/gs"));
assert!(!re.is_match("/name"));
}
#[test]
fn test_resource_prefix_dynamic() {
let re = ResourceDef::prefix("/{name}/");
assert!(re.is_match("/name/"));
assert!(re.is_match("/name/gs"));
assert!(!re.is_match("/name"));
assert_eq!(re.is_prefix_match("/name/"), Some(6));
assert_eq!(re.is_prefix_match("/name/gs"), Some(6));
assert_eq!(re.is_prefix_match("/name"), None);
let mut path = Path::new("/test2/");
assert!(re.match_path(&mut path));
assert_eq!(&path["name"], "test2");
assert_eq!(&path[0], "test2");
let mut path = Path::new("/test2/subpath1/subpath2/index.html");
assert!(re.match_path(&mut path));
assert_eq!(&path["name"], "test2");
assert_eq!(&path[0], "test2");
}
#[test]
fn test_resource_path() {
let mut s = String::new();
let resource = ResourceDef::new("/user/{item1}/test");
assert!(resource.resource_path(&mut s, &mut (&["user1"]).iter()));
assert_eq!(s, "/user/user1/test");
let mut s = String::new();
let resource = ResourceDef::new("/user/{item1}/{item2}/test");
assert!(resource.resource_path(&mut s, &mut (&["item", "item2"]).iter()));
assert_eq!(s, "/user/item/item2/test");
let mut s = String::new();
let resource = ResourceDef::new("/user/{item1}/{item2}");
assert!(resource.resource_path(&mut s, &mut (&["item", "item2"]).iter()));
assert_eq!(s, "/user/item/item2");
let mut s = String::new();
let resource = ResourceDef::new("/user/{item1}/{item2}/");
assert!(resource.resource_path(&mut s, &mut (&["item", "item2"]).iter()));
assert_eq!(s, "/user/item/item2/");
let mut s = String::new();
assert!(!resource.resource_path(&mut s, &mut (&["item"]).iter()));
let mut s = String::new();
assert!(resource.resource_path(&mut s, &mut (&["item", "item2"]).iter()));
assert_eq!(s, "/user/item/item2/");
assert!(!resource.resource_path(&mut s, &mut (&["item"]).iter()));
let mut s = String::new();
assert!(resource.resource_path(&mut s, &mut vec!["item", "item2"].into_iter()));
assert_eq!(s, "/user/item/item2/");
let mut map = HashMap::new();
map.insert("item1", "item");
let mut s = String::new();
assert!(!resource.resource_path_named(&mut s, &map));
let mut s = String::new();
map.insert("item2", "item2");
assert!(resource.resource_path_named(&mut s, &map));
assert_eq!(s, "/user/item/item2/");
}
}

View File

@@ -1,241 +0,0 @@
use crate::{IntoPattern, Resource, ResourceDef, ResourcePath};
#[derive(Debug, Copy, Clone, PartialEq)]
pub struct ResourceId(pub u16);
/// Information about current resource
#[derive(Clone, Debug)]
pub struct ResourceInfo {
resource: ResourceId,
}
/// Resource router.
pub struct Router<T, U = ()>(Vec<(ResourceDef, T, Option<U>)>);
impl<T, U> Router<T, U> {
pub fn build() -> RouterBuilder<T, U> {
RouterBuilder {
resources: Vec::new(),
}
}
pub fn recognize<R, P>(&self, resource: &mut R) -> Option<(&T, ResourceId)>
where
R: Resource<P>,
P: ResourcePath,
{
for item in self.0.iter() {
if item.0.match_path(resource.resource_path()) {
return Some((&item.1, ResourceId(item.0.id())));
}
}
None
}
pub fn recognize_mut<R, P>(&mut self, resource: &mut R) -> Option<(&mut T, ResourceId)>
where
R: Resource<P>,
P: ResourcePath,
{
for item in self.0.iter_mut() {
if item.0.match_path(resource.resource_path()) {
return Some((&mut item.1, ResourceId(item.0.id())));
}
}
None
}
pub fn recognize_mut_checked<R, P, F>(
&mut self,
resource: &mut R,
check: F,
) -> Option<(&mut T, ResourceId)>
where
F: Fn(&R, &Option<U>) -> bool,
R: Resource<P>,
P: ResourcePath,
{
for item in self.0.iter_mut() {
if item.0.match_path_checked(resource, &check, &item.2) {
return Some((&mut item.1, ResourceId(item.0.id())));
}
}
None
}
}
pub struct RouterBuilder<T, U = ()> {
resources: Vec<(ResourceDef, T, Option<U>)>,
}
impl<T, U> RouterBuilder<T, U> {
/// Register resource for specified path.
pub fn path<P: IntoPattern>(
&mut self,
path: P,
resource: T,
) -> &mut (ResourceDef, T, Option<U>) {
self.resources
.push((ResourceDef::new(path), resource, None));
self.resources.last_mut().unwrap()
}
/// Register resource for specified path prefix.
pub fn prefix(&mut self, prefix: &str, resource: T) -> &mut (ResourceDef, T, Option<U>) {
self.resources
.push((ResourceDef::prefix(prefix), resource, None));
self.resources.last_mut().unwrap()
}
/// Register resource for ResourceDef
pub fn rdef(&mut self, rdef: ResourceDef, resource: T) -> &mut (ResourceDef, T, Option<U>) {
self.resources.push((rdef, resource, None));
self.resources.last_mut().unwrap()
}
/// Finish configuration and create router instance.
pub fn finish(self) -> Router<T, U> {
Router(self.resources)
}
}
#[cfg(test)]
mod tests {
use crate::path::Path;
use crate::router::{ResourceId, Router};
#[allow(clippy::cognitive_complexity)]
#[test]
fn test_recognizer_1() {
let mut router = Router::<usize>::build();
router.path("/name", 10).0.set_id(0);
router.path("/name/{val}", 11).0.set_id(1);
router.path("/name/{val}/index.html", 12).0.set_id(2);
router.path("/file/{file}.{ext}", 13).0.set_id(3);
router.path("/v{val}/{val2}/index.html", 14).0.set_id(4);
router.path("/v/{tail:.*}", 15).0.set_id(5);
router.path("/test2/{test}.html", 16).0.set_id(6);
router.path("/{test}/index.html", 17).0.set_id(7);
let mut router = router.finish();
let mut path = Path::new("/unknown");
assert!(router.recognize_mut(&mut path).is_none());
let mut path = Path::new("/name");
let (h, info) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 10);
assert_eq!(info, ResourceId(0));
assert!(path.is_empty());
let mut path = Path::new("/name/value");
let (h, info) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 11);
assert_eq!(info, ResourceId(1));
assert_eq!(path.get("val").unwrap(), "value");
assert_eq!(&path["val"], "value");
let mut path = Path::new("/name/value2/index.html");
let (h, info) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 12);
assert_eq!(info, ResourceId(2));
assert_eq!(path.get("val").unwrap(), "value2");
let mut path = Path::new("/file/file.gz");
let (h, info) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 13);
assert_eq!(info, ResourceId(3));
assert_eq!(path.get("file").unwrap(), "file");
assert_eq!(path.get("ext").unwrap(), "gz");
let mut path = Path::new("/vtest/ttt/index.html");
let (h, info) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 14);
assert_eq!(info, ResourceId(4));
assert_eq!(path.get("val").unwrap(), "test");
assert_eq!(path.get("val2").unwrap(), "ttt");
let mut path = Path::new("/v/blah-blah/index.html");
let (h, info) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 15);
assert_eq!(info, ResourceId(5));
assert_eq!(path.get("tail").unwrap(), "blah-blah/index.html");
let mut path = Path::new("/test2/index.html");
let (h, info) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 16);
assert_eq!(info, ResourceId(6));
assert_eq!(path.get("test").unwrap(), "index");
let mut path = Path::new("/bbb/index.html");
let (h, info) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 17);
assert_eq!(info, ResourceId(7));
assert_eq!(path.get("test").unwrap(), "bbb");
}
#[test]
fn test_recognizer_2() {
let mut router = Router::<usize>::build();
router.path("/index.json", 10);
router.path("/{source}.json", 11);
let mut router = router.finish();
let mut path = Path::new("/index.json");
let (h, _) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 10);
let mut path = Path::new("/test.json");
let (h, _) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 11);
}
#[test]
fn test_recognizer_with_prefix() {
let mut router = Router::<usize>::build();
router.path("/name", 10).0.set_id(0);
router.path("/name/{val}", 11).0.set_id(1);
let mut router = router.finish();
let mut path = Path::new("/name");
path.skip(5);
assert!(router.recognize_mut(&mut path).is_none());
let mut path = Path::new("/test/name");
path.skip(5);
let (h, _) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 10);
let mut path = Path::new("/test/name/value");
path.skip(5);
let (h, id) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 11);
assert_eq!(id, ResourceId(1));
assert_eq!(path.get("val").unwrap(), "value");
assert_eq!(&path["val"], "value");
// same patterns
let mut router = Router::<usize>::build();
router.path("/name", 10);
router.path("/name/{val}", 11);
let mut router = router.finish();
let mut path = Path::new("/name");
path.skip(6);
assert!(router.recognize_mut(&mut path).is_none());
let mut path = Path::new("/test2/name");
path.skip(6);
let (h, _) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 10);
let mut path = Path::new("/test2/name-test");
path.skip(6);
assert!(router.recognize_mut(&mut path).is_none());
let mut path = Path::new("/test2/name/ttt");
path.skip(6);
let (h, _) = router.recognize_mut(&mut path).unwrap();
assert_eq!(*h, 11);
assert_eq!(&path["val"], "ttt");
}
}

View File

@@ -1,249 +0,0 @@
use crate::ResourcePath;
#[allow(dead_code)]
const GEN_DELIMS: &[u8] = b":/?#[]@";
#[allow(dead_code)]
const SUB_DELIMS_WITHOUT_QS: &[u8] = b"!$'()*,";
#[allow(dead_code)]
const SUB_DELIMS: &[u8] = b"!$'()*,+?=;";
#[allow(dead_code)]
const RESERVED: &[u8] = b":/?#[]@!$'()*,+?=;";
#[allow(dead_code)]
const UNRESERVED: &[u8] = b"abcdefghijklmnopqrstuvwxyz
ABCDEFGHIJKLMNOPQRSTUVWXYZ
1234567890
-._~";
const ALLOWED: &[u8] = b"abcdefghijklmnopqrstuvwxyz
ABCDEFGHIJKLMNOPQRSTUVWXYZ
1234567890
-._~
!$'()*,";
const QS: &[u8] = b"+&=;b";
#[inline]
fn bit_at(array: &[u8], ch: u8) -> bool {
array[(ch >> 3) as usize] & (1 << (ch & 7)) != 0
}
#[inline]
fn set_bit(array: &mut [u8], ch: u8) {
array[(ch >> 3) as usize] |= 1 << (ch & 7)
}
thread_local! {
static DEFAULT_QUOTER: Quoter = Quoter::new(b"@:", b"/+");
}
#[derive(Default, Clone, Debug)]
pub struct Url {
uri: http::Uri,
path: Option<String>,
}
impl Url {
pub fn new(uri: http::Uri) -> Url {
let path = DEFAULT_QUOTER.with(|q| q.requote(uri.path().as_bytes()));
Url { uri, path }
}
pub fn with_quoter(uri: http::Uri, quoter: &Quoter) -> Url {
Url {
path: quoter.requote(uri.path().as_bytes()),
uri,
}
}
pub fn uri(&self) -> &http::Uri {
&self.uri
}
pub fn path(&self) -> &str {
if let Some(ref s) = self.path {
s
} else {
self.uri.path()
}
}
#[inline]
pub fn update(&mut self, uri: &http::Uri) {
self.uri = uri.clone();
self.path = DEFAULT_QUOTER.with(|q| q.requote(uri.path().as_bytes()));
}
#[inline]
pub fn update_with_quoter(&mut self, uri: &http::Uri, quoter: &Quoter) {
self.uri = uri.clone();
self.path = quoter.requote(uri.path().as_bytes());
}
}
impl ResourcePath for Url {
#[inline]
fn path(&self) -> &str {
self.path()
}
}
pub struct Quoter {
safe_table: [u8; 16],
protected_table: [u8; 16],
}
impl Quoter {
pub fn new(safe: &[u8], protected: &[u8]) -> Quoter {
let mut q = Quoter {
safe_table: [0; 16],
protected_table: [0; 16],
};
// prepare safe table
for i in 0..128 {
if ALLOWED.contains(&i) {
set_bit(&mut q.safe_table, i);
}
if QS.contains(&i) {
set_bit(&mut q.safe_table, i);
}
}
for ch in safe {
set_bit(&mut q.safe_table, *ch)
}
// prepare protected table
for ch in protected {
set_bit(&mut q.safe_table, *ch);
set_bit(&mut q.protected_table, *ch);
}
q
}
pub fn requote(&self, val: &[u8]) -> Option<String> {
let mut has_pct = 0;
let mut pct = [b'%', 0, 0];
let mut idx = 0;
let mut cloned: Option<Vec<u8>> = None;
let len = val.len();
while idx < len {
let ch = val[idx];
if has_pct != 0 {
pct[has_pct] = val[idx];
has_pct += 1;
if has_pct == 3 {
has_pct = 0;
let buf = cloned.as_mut().unwrap();
if let Some(ch) = restore_ch(pct[1], pct[2]) {
if ch < 128 {
if bit_at(&self.protected_table, ch) {
buf.extend_from_slice(&pct);
idx += 1;
continue;
}
if bit_at(&self.safe_table, ch) {
buf.push(ch);
idx += 1;
continue;
}
}
buf.push(ch);
} else {
buf.extend_from_slice(&pct[..]);
}
}
} else if ch == b'%' {
has_pct = 1;
if cloned.is_none() {
let mut c = Vec::with_capacity(len);
c.extend_from_slice(&val[..idx]);
cloned = Some(c);
}
} else if let Some(ref mut cloned) = cloned {
cloned.push(ch)
}
idx += 1;
}
if let Some(data) = cloned {
// Unsafe: we get data from http::Uri, which does utf-8 checks already
// this code only decodes valid pct encoded values
Some(unsafe { String::from_utf8_unchecked(data) })
} else {
None
}
}
}
#[inline]
fn from_hex(v: u8) -> Option<u8> {
if (b'0'..=b'9').contains(&v) {
Some(v - 0x30) // ord('0') == 0x30
} else if (b'A'..=b'F').contains(&v) {
Some(v - 0x41 + 10) // ord('A') == 0x41
} else if (b'a'..=b'f').contains(&v) {
Some(v - 0x61 + 10) // ord('a') == 0x61
} else {
None
}
}
#[inline]
fn restore_ch(d1: u8, d2: u8) -> Option<u8> {
from_hex(d1).and_then(|d1| from_hex(d2).map(move |d2| d1 << 4 | d2))
}
#[cfg(test)]
mod tests {
use http::Uri;
use std::convert::TryFrom;
use super::*;
use crate::{Path, ResourceDef};
#[test]
fn test_parse_url() {
let re = ResourceDef::new("/user/{id}/test");
let url = Uri::try_from("/user/2345/test").unwrap();
let mut path = Path::new(Url::new(url));
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "2345");
let url = Uri::try_from("/user/qwe%25/test").unwrap();
let mut path = Path::new(Url::new(url));
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "qwe%");
let url = Uri::try_from("/user/qwe%25rty/test").unwrap();
let mut path = Path::new(Url::new(url));
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "qwe%rty");
}
#[test]
fn test_from_hex() {
let hex = b"0123456789abcdefABCDEF";
for i in 0..256 {
let c = i as u8;
if hex.contains(&c) {
assert!(from_hex(c).is_some())
} else {
assert!(from_hex(c).is_none())
}
}
let expected = [
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 10, 11, 12, 13, 14, 15,
];
for i in 0..hex.len() {
assert_eq!(from_hex(hex[i]).unwrap(), expected[i]);
}
}
}

View File

@@ -3,11 +3,92 @@
## Unreleased - 2021-xx-xx
## 2.0.0-beta.1 - 2020-12-28
### Added
* Add `System::attach_to_tokio` method. [#173]
## 2.4.0 - 2021-11-05
* Add `Arbiter::try_current` for situations where thread may or may not have Arbiter context. [#408]
* Start io-uring with `System::new` when feature is enabled. [#395]
### Changed
[#395]: https://github.com/actix/actix-net/pull/395
[#408]: https://github.com/actix/actix-net/pull/408
## 2.3.0 - 2021-10-11
* The `spawn` method can now resolve with non-unit outputs. [#369]
* Add experimental (semver-exempt) `io-uring` feature for enabling async file I/O on linux. [#374]
[#369]: https://github.com/actix/actix-net/pull/369
[#374]: https://github.com/actix/actix-net/pull/374
## 2.2.0 - 2021-03-29
* **BREAKING** `ActixStream::{poll_read_ready, poll_write_ready}` methods now return
`Ready` object in ok variant. [#293]
* Breakage is acceptable since `ActixStream` was not intended to be public.
[#293]: https://github.com/actix/actix-net/pull/293
## 2.1.0 - 2021-02-24
* Add `ActixStream` extension trait to include readiness methods. [#276]
* Re-export `tokio::net::TcpSocket` in `net` module [#282]
[#276]: https://github.com/actix/actix-net/pull/276
[#282]: https://github.com/actix/actix-net/pull/282
## 2.0.2 - 2021-02-06
* Add `Arbiter::handle` to get a handle of an owned Arbiter. [#274]
* Add `System::try_current` for situations where actix may or may not be running a System. [#275]
[#274]: https://github.com/actix/actix-net/pull/274
[#275]: https://github.com/actix/actix-net/pull/275
## 2.0.1 - 2021-02-06
* Expose `JoinError` from Tokio. [#271]
[#271]: https://github.com/actix/actix-net/pull/271
## 2.0.0 - 2021-02-02
* Remove all Arbiter-local storage methods. [#262]
* Re-export `tokio::pin`. [#262]
[#262]: https://github.com/actix/actix-net/pull/262
## 2.0.0-beta.3 - 2021-01-31
* Remove `run_in_tokio`, `attach_to_tokio` and `AsyncSystemRunner`. [#253]
* Return `JoinHandle` from `actix_rt::spawn`. [#253]
* Remove old `Arbiter::spawn`. Implementation is now inlined into `actix_rt::spawn`. [#253]
* Rename `Arbiter::{send => spawn}` and `Arbiter::{exec_fn => spawn_fn}`. [#253]
* Remove `Arbiter::exec`. [#253]
* Remove deprecated `Arbiter::local_join` and `Arbiter::is_running`. [#253]
* `Arbiter::spawn` now accepts !Unpin futures. [#256]
* `System::new` no longer takes arguments. [#257]
* Remove `System::with_current`. [#257]
* Remove `Builder`. [#257]
* Add `System::with_init` as replacement for `Builder::run`. [#257]
* Rename `System::{is_set => is_registered}`. [#257]
* Add `ArbiterHandle` for sending messages to non-current-thread arbiters. [#257].
* `System::arbiter` now returns an `&ArbiterHandle`. [#257]
* `Arbiter::current` now returns an `ArbiterHandle` instead. [#257]
* `Arbiter::join` now takes self by value. [#257]
[#253]: https://github.com/actix/actix-net/pull/253
[#254]: https://github.com/actix/actix-net/pull/254
[#256]: https://github.com/actix/actix-net/pull/256
[#257]: https://github.com/actix/actix-net/pull/257
## 2.0.0-beta.2 - 2021-01-09
* Add `task` mod with re-export of `tokio::task::{spawn_blocking, yield_now, JoinHandle}` [#245]
* Add default "macros" feature to allow faster compile times when using `default-features=false`.
[#245]: https://github.com/actix/actix-net/pull/245
## 2.0.0-beta.1 - 2020-12-28
* Add `System::attach_to_tokio` method. [#173]
* Update `tokio` dependency to `1.0`. [#236]
* Rename `time` module `delay_for` to `sleep`, `delay_until` to `sleep_until`, `Delay` to `Sleep`
to stay aligned with Tokio's naming. [#236]
@@ -15,27 +96,19 @@
* These methods now accept `&self` when calling. [#236]
* Remove `'static` lifetime requirement for `System::run` and `Builder::run`. [#236]
* `Arbiter::spawn` now panics when `System` is not in scope. [#207]
### Fixed
* Fix work load issue by removing `PENDING` thread local. [#207]
[#207]: https://github.com/actix/actix-net/pull/207
[#236]: https://github.com/actix/actix-net/pull/236
## [1.1.1] - 2020-04-30
### Fixed
## 1.1.1 - 2020-04-30
* Fix memory leak due to [#94] (see [#129] for more detail)
[#129]: https://github.com/actix/actix-net/issues/129
## [1.1.0] - 2020-04-08
**This version has been yanked.**
### Added
## 1.1.0 - 2020-04-08 _(YANKED)_
* Expose `System::is_set` to check if current system has ben started [#99]
* Add `Arbiter::is_running` to check if event loop is running [#124]
* Add `Arbiter::local_join` associated function
@@ -45,96 +118,57 @@
[#99]: https://github.com/actix/actix-net/pull/99
[#124]: https://github.com/actix/actix-net/pull/124
## [1.0.0] - 2019-12-11
## 1.0.0 - 2019-12-11
* Update dependencies
## [1.0.0-alpha.3] - 2019-12-07
### Fixed
## 1.0.0-alpha.3 - 2019-12-07
* Migrate to tokio 0.2
* Fix compilation on non-unix platforms
### Changed
* Migrate to tokio 0.2
## [1.0.0-alpha.2] - 2019-12-02
Added
## 1.0.0-alpha.2 - 2019-12-02
* Export `main` and `test` attribute macros
* Export `time` module (re-export of tokio-timer)
* Export `net` module (re-export of tokio-net)
## [1.0.0-alpha.1] - 2019-11-22
### Changed
## 1.0.0-alpha.1 - 2019-11-22
* Migrate to std::future and tokio 0.2
## [0.2.6] - 2019-11-14
### Fixed
## 0.2.6 - 2019-11-14
* Allow to join arbiter's thread. #60
* Fix arbiter's thread panic message.
### Added
* Allow to join arbiter's thread. #60
## [0.2.5] - 2019-09-02
### Added
## 0.2.5 - 2019-09-02
* Add arbiter specific storage
## [0.2.4] - 2019-07-17
### Changed
## 0.2.4 - 2019-07-17
* Avoid a copy of the Future when initializing the Box. #29
## [0.2.3] - 2019-06-22
### Added
* Allow to start System using exsiting CurrentThread Handle #22
## 0.2.3 - 2019-06-22
* Allow to start System using existing CurrentThread Handle #22
## [0.2.2] - 2019-03-28
### Changed
## 0.2.2 - 2019-03-28
* Moved `blocking` module to `actix-threadpool` crate
## [0.2.1] - 2019-03-11
### Added
## 0.2.1 - 2019-03-11
* Added `blocking` module
* Arbiter::exec_fn - execute fn on the arbiter's thread
* Arbiter::exec - execute fn on the arbiter's thread and wait result
* Added `Arbiter::exec_fn` - execute fn on the arbiter's thread
* Added `Arbiter::exec` - execute fn on the arbiter's thread and wait result
## [0.2.0] - 2019-03-06
## 0.2.0 - 2019-03-06
* `run` method returns `io::Result<()>`
* Removed `Handle`
## [0.1.0] - 2018-12-09
## 0.1.0 - 2018-12-09
* Initial release

View File

@@ -1,12 +1,14 @@
[package]
name = "actix-rt"
version = "2.0.0-beta.1"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Tokio-based single-thread async runtime for the Actix ecosystem"
keywords = ["network", "framework", "async", "futures"]
version = "2.4.0"
authors = [
"Nikolay Kim <fafhrd91@gmail.com>",
"Rob Ede <robjtede@icloud.com>",
]
description = "Tokio-based single-threaded async runtime for the Actix ecosystem"
keywords = ["async", "futures", "io", "runtime"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-rt/"
categories = ["network-programming", "asynchronous"]
license = "MIT OR Apache-2.0"
edition = "2018"
@@ -15,7 +17,20 @@ edition = "2018"
name = "actix_rt"
path = "src/lib.rs"
[dependencies]
actix-macros = "0.1.0"
[features]
default = ["macros"]
macros = ["actix-macros"]
io-uring = ["tokio-uring"]
tokio = { version = "1", features = ["rt", "net", "signal", "sync", "time"] }
[dependencies]
actix-macros = { version = "0.2.3", optional = true }
futures-core = { version = "0.3", default-features = false }
tokio = { version = "1.5.1", features = ["rt", "net", "parking_lot", "signal", "sync", "time"] }
[target.'cfg(target_os = "linux")'.dependencies]
tokio-uring = { version = "0.1", optional = true }
[dev-dependencies]
tokio = { version = "1.5.1", features = ["full"] }
hyper = { version = "0.14", default-features = false, features = ["server", "tcp", "http1"] }

14
actix-rt/README.md Normal file
View File

@@ -0,0 +1,14 @@
# actix-rt
> Tokio-based single-threaded async runtime for the Actix ecosystem.
[![crates.io](https://img.shields.io/crates/v/actix-rt?label=latest)](https://crates.io/crates/actix-rt)
[![Documentation](https://docs.rs/actix-rt/badge.svg?version=2.4.0)](https://docs.rs/actix-rt/2.4.0)
[![Version](https://img.shields.io/badge/rustc-1.46+-ab6000.svg)](https://blog.rust-lang.org/2020/03/12/Rust-1.46.html)
![MIT or Apache 2.0 licensed](https://img.shields.io/crates/l/actix-rt.svg)
<br />
[![dependency status](https://deps.rs/crate/actix-rt/2.4.0/status.svg)](https://deps.rs/crate/actix-rt/2.4.0)
![Download](https://img.shields.io/crates/d/actix-rt.svg)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/WghFtEH6Hb)
See crate documentation for more: https://docs.rs/actix-rt.

View File

@@ -0,0 +1,28 @@
use hyper::service::{make_service_fn, service_fn};
use hyper::{Body, Request, Response, Server};
use std::convert::Infallible;
use std::net::SocketAddr;
async fn handle(_req: Request<Body>) -> Result<Response<Body>, Infallible> {
Ok(Response::new(Body::from("Hello World")))
}
fn main() {
actix_rt::System::with_tokio_rt(|| {
tokio::runtime::Builder::new_multi_thread()
.enable_all()
.build()
.unwrap()
})
.block_on(async {
let make_service =
make_service_fn(|_conn| async { Ok::<_, Infallible>(service_fn(handle)) });
let server =
Server::bind(&SocketAddr::from(([127, 0, 0, 1], 3000))).serve(make_service);
if let Err(e) = server.await {
eprintln!("server error: {}", e);
}
})
}

View File

@@ -0,0 +1,60 @@
//! An example on how to build a multi-thread tokio runtime for Actix System.
//! Then spawn async task that can make use of work stealing of tokio runtime.
use actix_rt::System;
fn main() {
System::with_tokio_rt(|| {
// build system with a multi-thread tokio runtime.
tokio::runtime::Builder::new_multi_thread()
.worker_threads(2)
.enable_all()
.build()
.unwrap()
})
.block_on(async_main());
}
// async main function that acts like #[actix_web::main] or #[tokio::main]
async fn async_main() {
let (tx, rx) = tokio::sync::oneshot::channel();
// get a handle to system arbiter and spawn async task on it
System::current().arbiter().spawn(async {
// use tokio::spawn to get inside the context of multi thread tokio runtime
let h1 = tokio::spawn(async {
println!("thread id is {:?}", std::thread::current().id());
std::thread::sleep(std::time::Duration::from_secs(2));
});
// work stealing occurs for this task spawn
let h2 = tokio::spawn(async {
println!("thread id is {:?}", std::thread::current().id());
});
h1.await.unwrap();
h2.await.unwrap();
let _ = tx.send(());
});
rx.await.unwrap();
let (tx, rx) = tokio::sync::oneshot::channel();
let now = std::time::Instant::now();
// without additional tokio::spawn, all spawned tasks run on single thread
System::current().arbiter().spawn(async {
println!("thread id is {:?}", std::thread::current().id());
std::thread::sleep(std::time::Duration::from_secs(2));
let _ = tx.send(());
});
// previous spawn task has blocked the system arbiter thread
// so this task will wait for 2 seconds until it can be run
System::current().arbiter().spawn(async move {
println!("thread id is {:?}", std::thread::current().id());
assert!(now.elapsed() > std::time::Duration::from_secs(2));
});
rx.await.unwrap();
}

View File

@@ -1,39 +1,27 @@
use std::any::{Any, TypeId};
use std::cell::RefCell;
use std::collections::HashMap;
use std::future::Future;
use std::pin::Pin;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::task::{Context, Poll};
use std::{fmt, thread};
use std::{
cell::RefCell,
fmt,
future::Future,
pin::Pin,
sync::atomic::{AtomicUsize, Ordering},
task::{Context, Poll},
thread,
};
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender};
use tokio::sync::oneshot::{channel, error::RecvError as Canceled, Sender};
// use futures_util::stream::FuturesUnordered;
// use tokio::task::JoinHandle;
// use tokio::stream::StreamExt;
use tokio::task::LocalSet;
use futures_core::ready;
use tokio::sync::mpsc;
use crate::runtime::Runtime;
use crate::system::System;
thread_local!(
static ADDR: RefCell<Option<Arbiter>> = RefCell::new(None);
// TODO: Commented out code are for Arbiter::local_join function.
// It can be safely removed if this function is not used in actix-*.
//
// /// stores join handle for spawned async tasks.
// static HANDLE: RefCell<FuturesUnordered<JoinHandle<()>>> =
// RefCell::new(FuturesUnordered::new());
static STORAGE: RefCell<HashMap<TypeId, Box<dyn Any>>> = RefCell::new(HashMap::new());
);
use crate::system::{System, SystemCommand};
pub(crate) static COUNT: AtomicUsize = AtomicUsize::new(0);
thread_local!(
static HANDLE: RefCell<Option<ArbiterHandle>> = RefCell::new(None);
);
pub(crate) enum ArbiterCommand {
Stop,
Execute(Box<dyn Future<Output = ()> + Unpin + Send>),
ExecuteFn(Box<dyn FnExec>),
Execute(Pin<Box<dyn Future<Output = ()> + Send>>),
}
impl fmt::Debug for ArbiterCommand {
@@ -41,391 +29,293 @@ impl fmt::Debug for ArbiterCommand {
match self {
ArbiterCommand::Stop => write!(f, "ArbiterCommand::Stop"),
ArbiterCommand::Execute(_) => write!(f, "ArbiterCommand::Execute"),
ArbiterCommand::ExecuteFn(_) => write!(f, "ArbiterCommand::ExecuteFn"),
}
}
}
/// A handle for sending spawn and stop messages to an [Arbiter].
#[derive(Debug, Clone)]
pub struct ArbiterHandle {
tx: mpsc::UnboundedSender<ArbiterCommand>,
}
impl ArbiterHandle {
pub(crate) fn new(tx: mpsc::UnboundedSender<ArbiterCommand>) -> Self {
Self { tx }
}
/// Send a future to the [Arbiter]'s thread and spawn it.
///
/// If you require a result, include a response channel in the future.
///
/// Returns true if future was sent successfully and false if the [Arbiter] has died.
pub fn spawn<Fut>(&self, future: Fut) -> bool
where
Fut: Future<Output = ()> + Send + 'static,
{
self.tx
.send(ArbiterCommand::Execute(Box::pin(future)))
.is_ok()
}
/// Send a function to the [Arbiter]'s thread and execute it.
///
/// Any result from the function is discarded. If you require a result, include a response
/// channel in the function.
///
/// Returns true if function was sent successfully and false if the [Arbiter] has died.
pub fn spawn_fn<F>(&self, f: F) -> bool
where
F: FnOnce() + Send + 'static,
{
self.spawn(async { f() })
}
/// Instruct [Arbiter] to stop processing it's event loop.
///
/// Returns true if stop message was sent successfully and false if the [Arbiter] has
/// been dropped.
pub fn stop(&self) -> bool {
self.tx.send(ArbiterCommand::Stop).is_ok()
}
}
/// An Arbiter represents a thread that provides an asynchronous execution environment for futures
/// and functions.
///
/// When an arbiter is created, it spawns a new [OS thread](thread), and hosts an event loop.
#[derive(Debug)]
/// Arbiters provide an asynchronous execution environment for actors, functions
/// and futures. When an Arbiter is created, it spawns a new OS thread, and
/// hosts an event loop. Some Arbiter functions execute on the current thread.
pub struct Arbiter {
sender: UnboundedSender<ArbiterCommand>,
thread_handle: Option<thread::JoinHandle<()>>,
}
impl Clone for Arbiter {
fn clone(&self) -> Self {
Self::with_sender(self.sender.clone())
}
}
impl Default for Arbiter {
fn default() -> Self {
Self::new()
}
tx: mpsc::UnboundedSender<ArbiterCommand>,
thread_handle: thread::JoinHandle<()>,
}
impl Arbiter {
pub(crate) fn new_system(local: &LocalSet) -> Self {
let (tx, rx) = unbounded_channel();
let arb = Arbiter::with_sender(tx);
ADDR.with(|cell| *cell.borrow_mut() = Some(arb.clone()));
STORAGE.with(|cell| cell.borrow_mut().clear());
local.spawn_local(ArbiterController { rx });
arb
}
/// Returns the current thread's arbiter's address. If no Arbiter is present, then this
/// function will panic!
pub fn current() -> Arbiter {
ADDR.with(|cell| match *cell.borrow() {
Some(ref addr) => addr.clone(),
None => panic!("Arbiter is not running"),
/// Spawn a new Arbiter thread and start its event loop.
///
/// # Panics
/// Panics if a [System] is not registered on the current thread.
#[cfg(not(all(target_os = "linux", feature = "io-uring")))]
#[allow(clippy::new_without_default)]
pub fn new() -> Arbiter {
Self::with_tokio_rt(|| {
crate::runtime::default_tokio_runtime()
.expect("Cannot create new Arbiter's Runtime.")
})
}
/// Check if current arbiter is running.
#[deprecated(note = "Thread local variables for running state of Arbiter is removed")]
pub fn is_running() -> bool {
false
}
/// Stop arbiter from continuing it's event loop.
pub fn stop(&self) {
let _ = self.sender.send(ArbiterCommand::Stop);
}
/// Spawn new thread and run event loop in spawned thread.
/// Returns address of newly created arbiter.
pub fn new() -> Arbiter {
let id = COUNT.fetch_add(1, Ordering::Relaxed);
let name = format!("actix-rt:worker:{}", id);
/// Spawn a new Arbiter using the [Tokio Runtime](tokio-runtime) returned from a closure.
///
/// [tokio-runtime]: tokio::runtime::Runtime
#[cfg(not(all(target_os = "linux", feature = "io-uring")))]
#[doc(hidden)]
pub fn with_tokio_rt<F>(runtime_factory: F) -> Arbiter
where
F: Fn() -> tokio::runtime::Runtime + Send + 'static,
{
let sys = System::current();
let (tx, rx) = unbounded_channel();
let system_id = sys.id();
let arb_id = COUNT.fetch_add(1, Ordering::Relaxed);
let handle = thread::Builder::new()
let name = format!("actix-rt|system:{}|arbiter:{}", system_id, arb_id);
let (tx, rx) = mpsc::unbounded_channel();
let (ready_tx, ready_rx) = std::sync::mpsc::channel::<()>();
let thread_handle = thread::Builder::new()
.name(name.clone())
.spawn({
let tx = tx.clone();
move || {
let rt = Runtime::new().expect("Can not create Runtime");
let arb = Arbiter::with_sender(tx);
STORAGE.with(|cell| cell.borrow_mut().clear());
let rt = crate::runtime::Runtime::from(runtime_factory());
let hnd = ArbiterHandle::new(tx);
System::set_current(sys);
ADDR.with(|cell| *cell.borrow_mut() = Some(arb.clone()));
HANDLE.with(|cell| *cell.borrow_mut() = Some(hnd.clone()));
// register arbiter
let _ = System::current()
.sys()
.send(SystemCommand::RegisterArbiter(id, arb));
.tx()
.send(SystemCommand::RegisterArbiter(arb_id, hnd));
// start arbiter controller
// run loop
rt.block_on(ArbiterController { rx });
ready_tx.send(()).unwrap();
// unregister arbiter
// run arbiter event processing loop
rt.block_on(ArbiterRunner { rx });
// deregister arbiter
let _ = System::current()
.sys()
.send(SystemCommand::UnregisterArbiter(id));
.tx()
.send(SystemCommand::DeregisterArbiter(arb_id));
}
})
.unwrap_or_else(|err| {
panic!("Cannot spawn an arbiter's thread {:?}: {:?}", &name, err)
panic!("Cannot spawn Arbiter's thread: {:?}. {:?}", &name, err)
});
Arbiter {
sender: tx,
thread_handle: Some(handle),
}
ready_rx.recv().unwrap();
Arbiter { tx, thread_handle }
}
/// Spawn a future on the current thread. This does not create a new Arbiter
/// or Arbiter address, it is simply a helper for spawning futures on the current
/// thread.
pub fn spawn<F>(future: F)
where
F: Future<Output = ()> + 'static,
{
// HANDLE.with(|handle| {
// let handle = handle.borrow();
// handle.push(tokio::task::spawn_local(future));
// });
// let _ = tokio::task::spawn_local(CleanupPending);
let _ = tokio::task::spawn_local(future);
/// Spawn a new Arbiter thread and start its event loop with `tokio-uring` runtime.
///
/// # Panics
/// Panics if a [System] is not registered on the current thread.
#[cfg(all(target_os = "linux", feature = "io-uring"))]
#[allow(clippy::new_without_default)]
pub fn new() -> Arbiter {
let sys = System::current();
let system_id = sys.id();
let arb_id = COUNT.fetch_add(1, Ordering::Relaxed);
let name = format!("actix-rt|system:{}|arbiter:{}", system_id, arb_id);
let (tx, rx) = mpsc::unbounded_channel();
let (ready_tx, ready_rx) = std::sync::mpsc::channel::<()>();
let thread_handle = thread::Builder::new()
.name(name.clone())
.spawn({
let tx = tx.clone();
move || {
let hnd = ArbiterHandle::new(tx);
System::set_current(sys);
HANDLE.with(|cell| *cell.borrow_mut() = Some(hnd.clone()));
// register arbiter
let _ = System::current()
.tx()
.send(SystemCommand::RegisterArbiter(arb_id, hnd));
ready_tx.send(()).unwrap();
// run arbiter event processing loop
tokio_uring::start(ArbiterRunner { rx });
// deregister arbiter
let _ = System::current()
.tx()
.send(SystemCommand::DeregisterArbiter(arb_id));
}
})
.unwrap_or_else(|err| {
panic!("Cannot spawn Arbiter's thread: {:?}. {:?}", &name, err)
});
ready_rx.recv().unwrap();
Arbiter { tx, thread_handle }
}
/// Executes a future on the current thread. This does not create a new Arbiter
/// or Arbiter address, it is simply a helper for executing futures on the current
/// thread.
pub fn spawn_fn<F, R>(f: F)
where
F: FnOnce() -> R + 'static,
R: Future<Output = ()> + 'static,
{
Arbiter::spawn(async {
f();
/// Sets up an Arbiter runner in a new System using the environment's local set.
pub(crate) fn in_new_system() -> ArbiterHandle {
let (tx, rx) = mpsc::unbounded_channel();
let hnd = ArbiterHandle::new(tx);
HANDLE.with(|cell| *cell.borrow_mut() = Some(hnd.clone()));
crate::spawn(ArbiterRunner { rx });
hnd
}
/// Return a handle to the this Arbiter's message sender.
pub fn handle(&self) -> ArbiterHandle {
ArbiterHandle::new(self.tx.clone())
}
/// Return a handle to the current thread's Arbiter's message sender.
///
/// # Panics
/// Panics if no Arbiter is running on the current thread.
pub fn current() -> ArbiterHandle {
HANDLE.with(|cell| match *cell.borrow() {
Some(ref hnd) => hnd.clone(),
None => panic!("Arbiter is not running."),
})
}
/// Send a future to the Arbiter's thread, and spawn it.
pub fn send<F>(&self, future: F)
where
F: Future<Output = ()> + Send + Unpin + 'static,
{
let _ = self.sender.send(ArbiterCommand::Execute(Box::new(future)));
/// Try to get current running arbiter handle.
///
/// Returns `None` if no Arbiter has been started.
///
/// Unlike [`current`](Self::current), this never panics.
pub fn try_current() -> Option<ArbiterHandle> {
HANDLE.with(|cell| cell.borrow().clone())
}
/// Send a function to the Arbiter's thread, and execute it. Any result from the function
/// is discarded.
pub fn exec_fn<F>(&self, f: F)
/// Stop Arbiter from continuing it's event loop.
///
/// Returns true if stop message was sent successfully and false if the Arbiter has been dropped.
pub fn stop(&self) -> bool {
self.tx.send(ArbiterCommand::Stop).is_ok()
}
/// Send a future to the Arbiter's thread and spawn it.
///
/// If you require a result, include a response channel in the future.
///
/// Returns true if future was sent successfully and false if the Arbiter has died.
pub fn spawn<Fut>(&self, future: Fut) -> bool
where
Fut: Future<Output = ()> + Send + 'static,
{
self.tx
.send(ArbiterCommand::Execute(Box::pin(future)))
.is_ok()
}
/// Send a function to the Arbiter's thread and execute it.
///
/// Any result from the function is discarded. If you require a result, include a response
/// channel in the function.
///
/// Returns true if function was sent successfully and false if the Arbiter has died.
pub fn spawn_fn<F>(&self, f: F) -> bool
where
F: FnOnce() + Send + 'static,
{
let _ = self
.sender
.send(ArbiterCommand::ExecuteFn(Box::new(move || {
f();
})));
self.spawn(async { f() })
}
/// Send a function to the Arbiter's thread. This function will be executed asynchronously.
/// A future is created, and when resolved will contain the result of the function sent
/// to the Arbiters thread.
pub fn exec<F, R>(&self, f: F) -> impl Future<Output = Result<R, Canceled>>
where
F: FnOnce() -> R + Send + 'static,
R: Send + 'static,
{
let (tx, rx) = channel();
let _ = self
.sender
.send(ArbiterCommand::ExecuteFn(Box::new(move || {
if !tx.is_closed() {
let _ = tx.send(f());
}
})));
rx
}
/// Set item to arbiter storage
pub fn set_item<T: 'static>(item: T) {
STORAGE.with(move |cell| cell.borrow_mut().insert(TypeId::of::<T>(), Box::new(item)));
}
/// Check if arbiter storage contains item
pub fn contains_item<T: 'static>() -> bool {
STORAGE.with(move |cell| cell.borrow().get(&TypeId::of::<T>()).is_some())
}
/// Get a reference to a type previously inserted on this arbiter's storage.
/// Wait for Arbiter's event loop to complete.
///
/// Panics is item is not inserted
pub fn get_item<T: 'static, F, R>(mut f: F) -> R
where
F: FnMut(&T) -> R,
{
STORAGE.with(move |cell| {
let st = cell.borrow();
let item = st
.get(&TypeId::of::<T>())
.and_then(|boxed| (&**boxed as &(dyn Any + 'static)).downcast_ref())
.unwrap();
f(item)
})
}
/// Get a mutable reference to a type previously inserted on this arbiter's storage.
///
/// Panics is item is not inserted
pub fn get_mut_item<T: 'static, F, R>(mut f: F) -> R
where
F: FnMut(&mut T) -> R,
{
STORAGE.with(move |cell| {
let mut st = cell.borrow_mut();
let item = st
.get_mut(&TypeId::of::<T>())
.and_then(|boxed| (&mut **boxed as &mut (dyn Any + 'static)).downcast_mut())
.unwrap();
f(item)
})
}
fn with_sender(sender: UnboundedSender<ArbiterCommand>) -> Self {
Self {
sender,
thread_handle: None,
}
}
/// Wait for the event loop to stop by joining the underlying thread (if have Some).
pub fn join(&mut self) -> thread::Result<()> {
if let Some(thread_handle) = self.thread_handle.take() {
thread_handle.join()
} else {
Ok(())
}
}
/// Returns a future that will be completed once all currently spawned futures
/// have completed.
#[deprecated(since = "1.2.0", note = "Arbiter::local_join function is removed.")]
pub async fn local_join() {
// let handle = HANDLE.with(|fut| std::mem::take(&mut *fut.borrow_mut()));
// async move {
// handle.collect::<Vec<_>>().await;
// }
unimplemented!("Arbiter::local_join function is removed.")
/// Joins the underlying OS thread handle. See [`JoinHandle::join`](thread::JoinHandle::join).
pub fn join(self) -> thread::Result<()> {
self.thread_handle.join()
}
}
// /// Future used for cleaning-up already finished `JoinHandle`s
// /// from the `PENDING` list so the vector doesn't grow indefinitely
// struct CleanupPending;
//
// impl Future for CleanupPending {
// type Output = ();
//
// fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
// HANDLE.with(move |handle| {
// recycle_join_handle(&mut *handle.borrow_mut(), cx);
// });
//
// Poll::Ready(())
// }
// }
struct ArbiterController {
rx: UnboundedReceiver<ArbiterCommand>,
/// A persistent future that processes [Arbiter] commands.
struct ArbiterRunner {
rx: mpsc::UnboundedReceiver<ArbiterCommand>,
}
impl Drop for ArbiterController {
fn drop(&mut self) {
if thread::panicking() {
if System::current().stop_on_panic() {
eprintln!("Panic in Arbiter thread, shutting down system.");
System::current().stop_with_code(1)
} else {
eprintln!("Panic in Arbiter thread.");
}
}
}
}
impl Future for ArbiterController {
impl Future for ArbiterRunner {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
// process all items currently buffered in channel
loop {
match Pin::new(&mut self.rx).poll_recv(cx) {
Poll::Ready(None) => return Poll::Ready(()),
Poll::Ready(Some(item)) => match item {
ArbiterCommand::Stop => return Poll::Ready(()),
ArbiterCommand::Execute(fut) => {
// HANDLE.with(|handle| {
// let mut handle = handle.borrow_mut();
// handle.push(tokio::task::spawn_local(fut));
// recycle_join_handle(&mut *handle, cx);
// });
tokio::task::spawn_local(fut);
match ready!(Pin::new(&mut self.rx).poll_recv(cx)) {
// channel closed; no more messages can be received
None => return Poll::Ready(()),
// process arbiter command
Some(item) => match item {
ArbiterCommand::Stop => {
return Poll::Ready(());
}
ArbiterCommand::ExecuteFn(f) => {
f.call_box();
ArbiterCommand::Execute(task_fut) => {
tokio::task::spawn_local(task_fut);
}
},
Poll::Pending => return Poll::Pending,
}
}
}
}
// fn recycle_join_handle(handle: &mut FuturesUnordered<JoinHandle<()>>, cx: &mut Context<'_>) {
// let _ = Pin::new(&mut *handle).poll_next(cx);
//
// // Try to recycle more join handles and free up memory.
// //
// // this is a guess. The yield limit for FuturesUnordered is 32.
// // So poll an extra 3 times would make the total poll below 128.
// if handle.len() > 64 {
// (0..3).for_each(|_| {
// let _ = Pin::new(&mut *handle).poll_next(cx);
// })
// }
// }
#[derive(Debug)]
pub(crate) enum SystemCommand {
Exit(i32),
RegisterArbiter(usize, Arbiter),
UnregisterArbiter(usize),
}
#[derive(Debug)]
pub(crate) struct SystemArbiter {
stop: Option<Sender<i32>>,
commands: UnboundedReceiver<SystemCommand>,
arbiters: HashMap<usize, Arbiter>,
}
impl SystemArbiter {
pub(crate) fn new(stop: Sender<i32>, commands: UnboundedReceiver<SystemCommand>) -> Self {
SystemArbiter {
commands,
stop: Some(stop),
arbiters: HashMap::new(),
}
}
}
impl Future for SystemArbiter {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
loop {
match Pin::new(&mut self.commands).poll_recv(cx) {
Poll::Ready(None) => return Poll::Ready(()),
Poll::Ready(Some(cmd)) => match cmd {
SystemCommand::Exit(code) => {
// stop arbiters
for arb in self.arbiters.values() {
arb.stop();
}
// stop event loop
if let Some(stop) = self.stop.take() {
let _ = stop.send(code);
}
}
SystemCommand::RegisterArbiter(name, hnd) => {
self.arbiters.insert(name, hnd);
}
SystemCommand::UnregisterArbiter(name) => {
self.arbiters.remove(&name);
}
},
Poll::Pending => return Poll::Pending,
}
}
}
}
pub trait FnExec: Send + 'static {
fn call_box(self: Box<Self>);
}
impl<F> FnExec for F
where
F: FnOnce() + Send + 'static,
{
#[allow(clippy::boxed_local)]
fn call_box(self: Box<Self>) {
(*self)()
}
}

View File

@@ -1,183 +0,0 @@
use std::borrow::Cow;
use std::future::Future;
use std::io;
use tokio::sync::mpsc::unbounded_channel;
use tokio::sync::oneshot::{channel, Receiver};
use tokio::task::LocalSet;
use crate::arbiter::{Arbiter, SystemArbiter};
use crate::runtime::Runtime;
use crate::system::System;
/// Builder struct for a actix runtime.
///
/// Either use `Builder::build` to create a system and start actors.
/// Alternatively, use `Builder::run` to start the tokio runtime and
/// run a function in its context.
pub struct Builder {
/// Name of the System. Defaults to "actix" if unset.
name: Cow<'static, str>,
/// Whether the Arbiter will stop the whole System on uncaught panic. Defaults to false.
stop_on_panic: bool,
}
impl Builder {
pub(crate) fn new() -> Self {
Builder {
name: Cow::Borrowed("actix"),
stop_on_panic: false,
}
}
/// Sets the name of the System.
pub fn name<T: Into<String>>(mut self, name: T) -> Self {
self.name = Cow::Owned(name.into());
self
}
/// Sets the option 'stop_on_panic' which controls whether the System is stopped when an
/// uncaught panic is thrown from a worker thread.
///
/// Defaults to false.
pub fn stop_on_panic(mut self, stop_on_panic: bool) -> Self {
self.stop_on_panic = stop_on_panic;
self
}
/// Create new System.
///
/// This method panics if it can not create tokio runtime
pub fn build(self) -> SystemRunner {
self.create_runtime(|| {})
}
/// Create new System that can run asynchronously.
///
/// This method panics if it cannot start the system arbiter
pub(crate) fn build_async(self, local: &LocalSet) -> AsyncSystemRunner {
self.create_async_runtime(local)
}
/// This function will start tokio runtime and will finish once the
/// `System::stop()` message get called.
/// Function `f` get called within tokio runtime context.
pub fn run<F>(self, f: F) -> io::Result<()>
where
F: FnOnce(),
{
self.create_runtime(f).run()
}
fn create_async_runtime(self, local: &LocalSet) -> AsyncSystemRunner {
let (stop_tx, stop) = channel();
let (sys_sender, sys_receiver) = unbounded_channel();
let system =
System::construct(sys_sender, Arbiter::new_system(local), self.stop_on_panic);
// system arbiter
let arb = SystemArbiter::new(stop_tx, sys_receiver);
// start the system arbiter
let _ = local.spawn_local(arb);
AsyncSystemRunner { stop, system }
}
fn create_runtime<F>(self, f: F) -> SystemRunner
where
F: FnOnce(),
{
let (stop_tx, stop) = channel();
let (sys_sender, sys_receiver) = unbounded_channel();
let rt = Runtime::new().unwrap();
let system = System::construct(
sys_sender,
Arbiter::new_system(rt.local()),
self.stop_on_panic,
);
// system arbiter
let arb = SystemArbiter::new(stop_tx, sys_receiver);
rt.spawn(arb);
// init system arbiter and run configuration method
rt.block_on(async { f() });
SystemRunner { rt, stop, system }
}
}
#[derive(Debug)]
pub(crate) struct AsyncSystemRunner {
stop: Receiver<i32>,
system: System,
}
impl AsyncSystemRunner {
/// This function will start event loop and returns a future that
/// resolves once the `System::stop()` function is called.
pub(crate) fn run_nonblocking(self) -> impl Future<Output = Result<(), io::Error>> + Send {
let AsyncSystemRunner { stop, .. } = self;
// run loop
async {
match stop.await {
Ok(code) => {
if code != 0 {
Err(io::Error::new(
io::ErrorKind::Other,
format!("Non-zero exit code: {}", code),
))
} else {
Ok(())
}
}
Err(e) => Err(io::Error::new(io::ErrorKind::Other, e)),
}
}
}
}
/// Helper object that runs System's event loop
#[must_use = "SystemRunner must be run"]
#[derive(Debug)]
pub struct SystemRunner {
rt: Runtime,
stop: Receiver<i32>,
system: System,
}
impl SystemRunner {
/// This function will start event loop and will finish once the
/// `System::stop()` function is called.
pub fn run(self) -> io::Result<()> {
let SystemRunner { rt, stop, .. } = self;
// run loop
match rt.block_on(stop) {
Ok(code) => {
if code != 0 {
Err(io::Error::new(
io::ErrorKind::Other,
format!("Non-zero exit code: {}", code),
))
} else {
Ok(())
}
}
Err(e) => Err(io::Error::new(io::ErrorKind::Other, e)),
}
}
/// Execute a future and wait for result.
#[inline]
pub fn block_on<F: Future>(&self, fut: F) -> F::Output {
self.rt.block_on(fut)
}
}

View File

@@ -1,65 +1,205 @@
//! Tokio-based single-thread async runtime for the Actix ecosystem.
//! Tokio-based single-threaded async runtime for the Actix ecosystem.
//!
//! In most parts of the the Actix ecosystem, it has been chosen to use !Send futures. For this
//! reason, a single-threaded runtime is appropriate since it is guaranteed that futures will not
//! be moved between threads. This can result in small performance improvements over cases where
//! atomics would otherwise be needed.
//!
//! To achieve similar performance to multi-threaded, work-stealing runtimes, applications
//! using `actix-rt` will create multiple, mostly disconnected, single-threaded runtimes.
//! This approach has good performance characteristics for workloads where the majority of tasks
//! have similar runtime expense.
//!
//! The disadvantage is that idle threads will not steal work from very busy, stuck or otherwise
//! backlogged threads. Tasks that are disproportionately expensive should be offloaded to the
//! blocking task thread-pool using [`task::spawn_blocking`].
//!
//! # Examples
//! ```no_run
//! use std::sync::mpsc;
//! use actix_rt::{Arbiter, System};
//!
//! let _ = System::new();
//!
//! let (tx, rx) = mpsc::channel::<u32>();
//!
//! let arbiter = Arbiter::new();
//! arbiter.spawn_fn(move || tx.send(42).unwrap());
//!
//! let num = rx.recv().unwrap();
//! assert_eq!(num, 42);
//!
//! arbiter.stop();
//! arbiter.join().unwrap();
//! ```
//!
//! # `io-uring` Support
//! There is experimental support for using io-uring with this crate by enabling the
//! `io-uring` feature. For now, it is semver exempt.
#![deny(rust_2018_idioms, nonstandard_style)]
#![allow(clippy::type_complexity)]
#![warn(missing_docs)]
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
#[cfg(all(not(target_os = "linux"), feature = "io-uring"))]
compile_error!("io_uring is a linux only feature.");
use std::future::Future;
#[cfg(not(test))] // Work around for rust-lang/rust#62127
pub use actix_macros::{main, test};
use tokio::task::JoinHandle;
// Cannot define a main macro when compiled into test harness.
// Workaround for https://github.com/rust-lang/rust/issues/62127.
#[cfg(all(feature = "macros", not(test)))]
pub use actix_macros::main;
#[cfg(feature = "macros")]
pub use actix_macros::test;
mod arbiter;
mod builder;
mod runtime;
mod system;
pub use self::arbiter::Arbiter;
pub use self::builder::{Builder, SystemRunner};
pub use self::arbiter::{Arbiter, ArbiterHandle};
pub use self::runtime::Runtime;
pub use self::system::System;
pub use self::system::{System, SystemRunner};
/// Spawns a future on the current arbiter.
///
/// # Panics
///
/// This function panics if actix system is not running.
#[inline]
pub fn spawn<F>(f: F)
where
F: Future<Output = ()> + 'static,
{
Arbiter::spawn(f)
}
pub use tokio::pin;
/// Asynchronous signal handling
pub mod signal {
//! Asynchronous signal handling (Tokio re-exports).
#[cfg(unix)]
pub mod unix {
//! Unix specific signals (Tokio re-exports).
pub use tokio::signal::unix::*;
}
pub use tokio::signal::ctrl_c;
}
/// TCP/UDP/Unix bindings
pub mod net {
//! TCP/UDP/Unix bindings (mostly Tokio re-exports).
use std::{
future::Future,
io,
task::{Context, Poll},
};
pub use tokio::io::Ready;
use tokio::io::{AsyncRead, AsyncWrite, Interest};
pub use tokio::net::UdpSocket;
pub use tokio::net::{TcpListener, TcpStream};
pub use tokio::net::{TcpListener, TcpSocket, TcpStream};
#[cfg(unix)]
mod unix {
pub use tokio::net::{UnixDatagram, UnixListener, UnixStream};
pub use tokio::net::{UnixDatagram, UnixListener, UnixStream};
/// Extension trait over async read+write types that can also signal readiness.
#[doc(hidden)]
pub trait ActixStream: AsyncRead + AsyncWrite + Unpin {
/// Poll stream and check read readiness of Self.
///
/// See [tokio::net::TcpStream::poll_read_ready] for detail on intended use.
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>>;
/// Poll stream and check write readiness of Self.
///
/// See [tokio::net::TcpStream::poll_write_ready] for detail on intended use.
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>>;
}
impl ActixStream for TcpStream {
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
let ready = self.ready(Interest::READABLE);
tokio::pin!(ready);
ready.poll(cx)
}
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
let ready = self.ready(Interest::WRITABLE);
tokio::pin!(ready);
ready.poll(cx)
}
}
#[cfg(unix)]
pub use self::unix::*;
impl ActixStream for UnixStream {
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
let ready = self.ready(Interest::READABLE);
tokio::pin!(ready);
ready.poll(cx)
}
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
let ready = self.ready(Interest::WRITABLE);
tokio::pin!(ready);
ready.poll(cx)
}
}
impl<Io: ActixStream + ?Sized> ActixStream for Box<Io> {
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
(**self).poll_read_ready(cx)
}
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
(**self).poll_write_ready(cx)
}
}
}
/// Utilities for tracking time.
pub mod time {
//! Utilities for tracking time (Tokio re-exports).
pub use tokio::time::Instant;
pub use tokio::time::{interval, interval_at, Interval};
pub use tokio::time::{sleep, sleep_until, Sleep};
pub use tokio::time::{timeout, Timeout};
}
pub mod task {
//! Task management (Tokio re-exports).
pub use tokio::task::{spawn_blocking, yield_now, JoinError, JoinHandle};
}
/// Spawns a future on the current thread as a new task.
///
/// If not immediately awaited, the task can be cancelled using [`JoinHandle::abort`].
///
/// The provided future is spawned as a new task; therefore, panics are caught.
///
/// # Panics
/// Panics if Actix system is not running.
///
/// # Examples
/// ```
/// # use std::time::Duration;
/// # actix_rt::Runtime::new().unwrap().block_on(async {
/// // task resolves successfully
/// assert_eq!(actix_rt::spawn(async { 1 }).await.unwrap(), 1);
///
/// // task panics
/// assert!(actix_rt::spawn(async {
/// panic!("panic is caught at task boundary");
/// })
/// .await
/// .unwrap_err()
/// .is_panic());
///
/// // task is cancelled before completion
/// let handle = actix_rt::spawn(actix_rt::time::sleep(Duration::from_secs(100)));
/// handle.abort();
/// assert!(handle.await.unwrap_err().is_cancelled());
/// # });
/// ```
#[inline]
pub fn spawn<Fut>(f: Fut) -> JoinHandle<Fut::Output>
where
Fut: Future + 'static,
Fut::Output: 'static,
{
tokio::task::spawn_local(f)
}

View File

@@ -1,27 +1,29 @@
use std::future::Future;
use std::io;
use tokio::{runtime, task::LocalSet};
use std::{future::Future, io};
/// Single-threaded runtime provides a way to start reactor
/// and runtime on the current thread.
use tokio::task::{JoinHandle, LocalSet};
/// A Tokio-based runtime proxy.
///
/// See [module level][mod] documentation for more details.
///
/// [mod]: crate
/// All spawned futures will be executed on the current thread. Therefore, there is no `Send` bound
/// on submitted futures.
#[derive(Debug)]
pub struct Runtime {
local: LocalSet,
rt: runtime::Runtime,
rt: tokio::runtime::Runtime,
}
pub(crate) fn default_tokio_runtime() -> io::Result<tokio::runtime::Runtime> {
tokio::runtime::Builder::new_current_thread()
.enable_io()
.enable_time()
.build()
}
impl Runtime {
#[allow(clippy::new_ret_no_self)]
/// Returns a new runtime initialized with default configuration values.
pub fn new() -> io::Result<Runtime> {
let rt = runtime::Builder::new_current_thread()
.enable_io()
.enable_time()
.build()?;
#[allow(clippy::new_ret_no_self)]
pub fn new() -> io::Result<Self> {
let rt = default_tokio_runtime()?;
Ok(Runtime {
rt,
@@ -29,62 +31,48 @@ impl Runtime {
})
}
pub(super) fn local(&self) -> &LocalSet {
&self.local
}
/// Spawn a future onto the single-threaded runtime.
/// Offload a future onto the single-threaded runtime.
///
/// See [module level][mod] documentation for more details.
/// The returned join handle can be used to await the future's result.
///
/// [mod]: crate
/// See [crate root][crate] documentation for more details.
///
/// # Examples
///
/// ```rust,ignore
/// # use futures::{future, Future, Stream};
/// use actix_rt::Runtime;
///
/// # fn dox() {
/// // Create the runtime
/// let rt = Runtime::new().unwrap();
/// ```
/// let rt = actix_rt::Runtime::new().unwrap();
///
/// // Spawn a future onto the runtime
/// rt.spawn(future::lazy(|_| {
/// let handle = rt.spawn(async {
/// println!("running on the runtime");
/// }));
/// # }
/// # pub fn main() {}
/// 42
/// });
///
/// assert_eq!(rt.block_on(handle).unwrap(), 42);
/// ```
///
/// # Panics
///
/// This function panics if the spawn fails. Failure occurs if the executor
/// is currently at capacity and is unable to spawn a new future.
pub fn spawn<F>(&self, future: F) -> &Self
/// This function panics if the spawn fails. Failure occurs if the executor is currently at
/// capacity and is unable to spawn a new future.
pub fn spawn<F>(&self, future: F) -> JoinHandle<F::Output>
where
F: Future<Output = ()> + 'static,
F: Future + 'static,
{
self.local.spawn_local(future);
self
self.local.spawn_local(future)
}
/// Runs the provided future, blocking the current thread until the future
/// completes.
/// Runs the provided future, blocking the current thread until the future completes.
///
/// This function can be used to synchronously block the current thread
/// until the provided `future` has resolved either successfully or with an
/// error. The result of the future is then returned from this function
/// call.
/// This function can be used to synchronously block the current thread until the provided
/// `future` has resolved either successfully or with an error. The result of the future is
/// then returned from this function call.
///
/// Note that this function will **also** execute any spawned futures on the
/// current thread, but will **not** block until these other spawned futures
/// have completed. Once the function returns, any uncompleted futures
/// remain pending in the `Runtime` instance. These futures will not run
/// Note that this function will also execute any spawned futures on the current thread, but
/// will not block until these other spawned futures have completed. Once the function returns,
/// any uncompleted futures remain pending in the `Runtime` instance. These futures will not run
/// until `block_on` or `run` is called again.
///
/// The caller is responsible for ensuring that other spawned futures
/// complete execution by calling `block_on` or `run`.
/// The caller is responsible for ensuring that other spawned futures complete execution by
/// calling `block_on` or `run`.
pub fn block_on<F>(&self, f: F) -> F::Output
where
F: Future,
@@ -92,3 +80,12 @@ impl Runtime {
self.local.block_on(&self.rt, f)
}
}
impl From<tokio::runtime::Runtime> for Runtime {
fn from(rt: tokio::runtime::Runtime) -> Self {
Self {
local: LocalSet::new(),
rt,
}
}
}

View File

@@ -1,193 +1,124 @@
use std::cell::RefCell;
use std::future::Future;
use std::io;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::{
cell::RefCell,
collections::HashMap,
future::Future,
io,
pin::Pin,
sync::atomic::{AtomicUsize, Ordering},
task::{Context, Poll},
};
use tokio::sync::mpsc::UnboundedSender;
use tokio::task::LocalSet;
use futures_core::ready;
use tokio::sync::{mpsc, oneshot};
use crate::arbiter::{Arbiter, SystemCommand};
use crate::builder::{Builder, SystemRunner};
use crate::{arbiter::ArbiterHandle, Arbiter};
static SYSTEM_COUNT: AtomicUsize = AtomicUsize::new(0);
/// System is a runtime manager.
#[derive(Clone, Debug)]
pub struct System {
id: usize,
sys: UnboundedSender<SystemCommand>,
arbiter: Arbiter,
stop_on_panic: bool,
}
thread_local!(
static CURRENT: RefCell<Option<System>> = RefCell::new(None);
);
/// A manager for a per-thread distributed async runtime.
#[derive(Clone, Debug)]
pub struct System {
id: usize,
sys_tx: mpsc::UnboundedSender<SystemCommand>,
/// Handle to the first [Arbiter] that is created with the System.
arbiter_handle: ArbiterHandle,
}
#[cfg(not(feature = "io-uring"))]
impl System {
/// Constructs new system and sets it as current
/// Create a new system.
///
/// # Panics
/// Panics if underlying Tokio runtime can not be created.
#[allow(clippy::new_ret_no_self)]
pub fn new() -> SystemRunner {
Self::with_tokio_rt(|| {
crate::runtime::default_tokio_runtime()
.expect("Default Actix (Tokio) runtime could not be created.")
})
}
/// Create a new System using the [Tokio Runtime](tokio-runtime) returned from a closure.
///
/// [tokio-runtime]: tokio::runtime::Runtime
#[doc(hidden)]
pub fn with_tokio_rt<F>(runtime_factory: F) -> SystemRunner
where
F: Fn() -> tokio::runtime::Runtime,
{
let (stop_tx, stop_rx) = oneshot::channel();
let (sys_tx, sys_rx) = mpsc::unbounded_channel();
let rt = crate::runtime::Runtime::from(runtime_factory());
let sys_arbiter = rt.block_on(async { Arbiter::in_new_system() });
let system = System::construct(sys_tx, sys_arbiter.clone());
system
.tx()
.send(SystemCommand::RegisterArbiter(usize::MAX, sys_arbiter))
.unwrap();
// init background system arbiter
let sys_ctrl = SystemController::new(sys_rx, stop_tx);
rt.spawn(sys_ctrl);
SystemRunner {
rt,
stop_rx,
system,
}
}
}
#[cfg(feature = "io-uring")]
impl System {
/// Create a new system.
///
/// # Panics
/// Panics if underlying Tokio runtime can not be created.
#[allow(clippy::new_ret_no_self)]
pub fn new() -> SystemRunner {
SystemRunner
}
/// Create a new System using the [Tokio Runtime](tokio-runtime) returned from a closure.
///
/// [tokio-runtime]: tokio::runtime::Runtime
#[doc(hidden)]
pub fn with_tokio_rt<F>(_: F) -> SystemRunner
where
F: Fn() -> tokio::runtime::Runtime,
{
unimplemented!("System::with_tokio_rt is not implemented yet")
}
}
impl System {
/// Constructs new system and registers it on the current thread.
pub(crate) fn construct(
sys: UnboundedSender<SystemCommand>,
arbiter: Arbiter,
stop_on_panic: bool,
sys_tx: mpsc::UnboundedSender<SystemCommand>,
arbiter_handle: ArbiterHandle,
) -> Self {
let sys = System {
sys,
arbiter,
stop_on_panic,
sys_tx,
arbiter_handle,
id: SYSTEM_COUNT.fetch_add(1, Ordering::SeqCst),
};
System::set_current(sys.clone());
sys
}
/// Build a new system with a customized tokio runtime.
///
/// This allows to customize the runtime. See struct level docs on
/// `Builder` for more information.
pub fn builder() -> Builder {
Builder::new()
}
#[allow(clippy::new_ret_no_self)]
/// Create new system.
///
/// This method panics if it can not create tokio runtime
pub fn new<T: Into<String>>(name: T) -> SystemRunner {
Self::builder().name(name).build()
}
/// Create new system using provided tokio `LocalSet`.
///
/// This method panics if it can not spawn system arbiter
///
/// Note: This method uses provided `LocalSet` to create a `System` future only.
/// All the [`Arbiter`]s will be started in separate threads using their own tokio `Runtime`s.
/// It means that using this method currently it is impossible to make `actix-rt` work in the
/// alternative `tokio` `Runtime`s (e.g. provided by [`tokio_compat`]).
///
/// [`tokio_compat`]: https://crates.io/crates/tokio-compat
///
/// # Examples
///
/// ```rust,ignore
/// use tokio::{runtime::Runtime, task::LocalSet};
/// use actix_rt::System;
/// use futures_util::future::try_join_all;
///
/// async fn run_application() {
/// let first_task = tokio::spawn(async {
/// // ...
/// # println!("One task");
/// # Ok::<(),()>(())
/// });
///
/// let second_task = tokio::spawn(async {
/// // ...
/// # println!("Another task");
/// # Ok::<(),()>(())
/// });
///
/// try_join_all(vec![first_task, second_task])
/// .await
/// .expect("Some of the futures finished unexpectedly");
/// }
///
///
/// let runtime = tokio::runtime::Builder::new_multi_thread()
/// .worker_threads(2)
/// .enable_all()
/// .build()
/// .unwrap();
///
///
/// let actix_system_task = LocalSet::new();
/// let sys = System::run_in_tokio("actix-main-system", &actix_system_task);
/// actix_system_task.spawn_local(sys);
///
/// let rest_operations = run_application();
/// runtime.block_on(actix_system_task.run_until(rest_operations));
/// ```
pub fn run_in_tokio<T: Into<String>>(
name: T,
local: &LocalSet,
) -> impl Future<Output = io::Result<()>> {
Self::builder()
.name(name)
.build_async(local)
.run_nonblocking()
}
/// Consume the provided tokio Runtime and start the `System` in it.
/// This method will create a `LocalSet` object and occupy the current thread
/// for the created `System` exclusively. All the other asynchronous tasks that
/// should be executed as well must be aggregated into one future, provided as the last
/// argument to this method.
///
/// Note: This method uses provided `Runtime` to create a `System` future only.
/// All the [`Arbiter`]s will be started in separate threads using their own tokio `Runtime`s.
/// It means that using this method currently it is impossible to make `actix-rt` work in the
/// alternative `tokio` `Runtime`s (e.g. provided by `tokio_compat`).
///
/// [`tokio_compat`]: https://crates.io/crates/tokio-compat
///
/// # Arguments
///
/// - `name`: Name of the System
/// - `runtime`: A tokio Runtime to run the system in.
/// - `rest_operations`: A future to be executed in the runtime along with the System.
///
/// # Examples
///
/// ```rust,ignore
/// use tokio::runtime::Runtime;
/// use actix_rt::System;
/// use futures_util::future::try_join_all;
///
/// async fn run_application() {
/// let first_task = tokio::spawn(async {
/// // ...
/// # println!("One task");
/// # Ok::<(),()>(())
/// });
///
/// let second_task = tokio::spawn(async {
/// // ...
/// # println!("Another task");
/// # Ok::<(),()>(())
/// });
///
/// try_join_all(vec![first_task, second_task])
/// .await
/// .expect("Some of the futures finished unexpectedly");
/// }
///
///
/// let runtime = tokio::runtime::Builder::new_multi_thread()
/// .worker_threads(2)
/// .enable_all()
/// .build()
/// .unwrap();
///
/// let rest_operations = run_application();
/// System::attach_to_tokio("actix-main-system", runtime, rest_operations);
/// ```
pub fn attach_to_tokio<Fut, R>(
name: impl Into<String>,
runtime: tokio::runtime::Runtime,
rest_operations: Fut,
) -> R
where
Fut: std::future::Future<Output = R>,
{
let actix_system_task = LocalSet::new();
let sys = System::run_in_tokio(name.into(), &actix_system_task);
actix_system_task.spawn_local(sys);
runtime.block_on(actix_system_task.run_until(rest_operations))
}
/// Get current running system.
///
/// # Panics
/// Panics if no system is registered on the current thread.
pub fn current() -> System {
CURRENT.with(|cell| match *cell.borrow() {
Some(ref sys) => sys.clone(),
@@ -195,67 +126,198 @@ impl System {
})
}
/// Check if current system is set, i.e., as already been started.
pub fn is_set() -> bool {
CURRENT.with(|cell| cell.borrow().is_some())
/// Try to get current running system.
///
/// Returns `None` if no System has been started.
///
/// Unlike [`current`](Self::current), this never panics.
pub fn try_current() -> Option<System> {
CURRENT.with(|cell| cell.borrow().clone())
}
/// Set current running system.
/// Get handle to a the System's initial [Arbiter].
pub fn arbiter(&self) -> &ArbiterHandle {
&self.arbiter_handle
}
/// Check if there is a System registered on the current thread.
pub fn is_registered() -> bool {
CURRENT.with(|sys| sys.borrow().is_some())
}
/// Register given system on current thread.
#[doc(hidden)]
pub fn set_current(sys: System) {
CURRENT.with(|s| {
*s.borrow_mut() = Some(sys);
CURRENT.with(|cell| {
*cell.borrow_mut() = Some(sys);
})
}
/// Execute function with system reference.
pub fn with_current<F, R>(f: F) -> R
where
F: FnOnce(&System) -> R,
{
CURRENT.with(|cell| match *cell.borrow() {
Some(ref sys) => f(sys),
None => panic!("System is not running"),
})
}
/// System id
/// Numeric system identifier.
///
/// Useful when using multiple Systems.
pub fn id(&self) -> usize {
self.id
}
/// Stop the system
/// Stop the system (with code 0).
pub fn stop(&self) {
self.stop_with_code(0)
}
/// Stop the system with a particular exit code.
/// Stop the system with a given exit code.
pub fn stop_with_code(&self, code: i32) {
let _ = self.sys.send(SystemCommand::Exit(code));
let _ = self.sys_tx.send(SystemCommand::Exit(code));
}
pub(crate) fn sys(&self) -> &UnboundedSender<SystemCommand> {
&self.sys
}
/// Return status of 'stop_on_panic' option which controls whether the System is stopped when an
/// uncaught panic is thrown from a worker thread.
pub fn stop_on_panic(&self) -> bool {
self.stop_on_panic
}
/// System arbiter
pub fn arbiter(&self) -> &Arbiter {
&self.arbiter
}
/// This function will start tokio runtime and will finish once the
/// `System::stop()` message get called.
/// Function `f` get called within tokio runtime context.
pub fn run<F>(f: F) -> io::Result<()>
where
F: FnOnce(),
{
Self::builder().run(f)
pub(crate) fn tx(&self) -> &mpsc::UnboundedSender<SystemCommand> {
&self.sys_tx
}
}
#[cfg(not(feature = "io-uring"))]
/// Runner that keeps a [System]'s event loop alive until stop message is received.
#[must_use = "A SystemRunner does nothing unless `run` is called."]
#[derive(Debug)]
pub struct SystemRunner {
rt: crate::runtime::Runtime,
stop_rx: oneshot::Receiver<i32>,
#[allow(dead_code)]
system: System,
}
#[cfg(not(feature = "io-uring"))]
impl SystemRunner {
/// Starts event loop and will return once [System] is [stopped](System::stop).
pub fn run(self) -> io::Result<()> {
let SystemRunner { rt, stop_rx, .. } = self;
// run loop
match rt.block_on(stop_rx) {
Ok(code) => {
if code != 0 {
Err(io::Error::new(
io::ErrorKind::Other,
format!("Non-zero exit code: {}", code),
))
} else {
Ok(())
}
}
Err(e) => Err(io::Error::new(io::ErrorKind::Other, e)),
}
}
/// Runs the provided future, blocking the current thread until the future completes.
#[inline]
pub fn block_on<F: Future>(&self, fut: F) -> F::Output {
self.rt.block_on(fut)
}
}
#[cfg(feature = "io-uring")]
/// Runner that keeps a [System]'s event loop alive until stop message is received.
#[must_use = "A SystemRunner does nothing unless `run` is called."]
#[derive(Debug)]
pub struct SystemRunner;
#[cfg(feature = "io-uring")]
impl SystemRunner {
/// Starts event loop and will return once [System] is [stopped](System::stop).
pub fn run(self) -> io::Result<()> {
unimplemented!("SystemRunner::run is not implemented yet")
}
/// Runs the provided future, blocking the current thread until the future completes.
#[inline]
pub fn block_on<F: Future>(&self, fut: F) -> F::Output {
tokio_uring::start(async move {
let (stop_tx, stop_rx) = oneshot::channel();
let (sys_tx, sys_rx) = mpsc::unbounded_channel();
let sys_arbiter = Arbiter::in_new_system();
let system = System::construct(sys_tx, sys_arbiter.clone());
system
.tx()
.send(SystemCommand::RegisterArbiter(usize::MAX, sys_arbiter))
.unwrap();
// init background system arbiter
let sys_ctrl = SystemController::new(sys_rx, stop_tx);
tokio_uring::spawn(sys_ctrl);
let res = fut.await;
drop(stop_rx);
res
})
}
}
#[derive(Debug)]
pub(crate) enum SystemCommand {
Exit(i32),
RegisterArbiter(usize, ArbiterHandle),
DeregisterArbiter(usize),
}
/// There is one `SystemController` per [System]. It runs in the background, keeping track of
/// [Arbiter]s and is able to distribute a system-wide stop command.
#[derive(Debug)]
pub(crate) struct SystemController {
stop_tx: Option<oneshot::Sender<i32>>,
cmd_rx: mpsc::UnboundedReceiver<SystemCommand>,
arbiters: HashMap<usize, ArbiterHandle>,
}
impl SystemController {
pub(crate) fn new(
cmd_rx: mpsc::UnboundedReceiver<SystemCommand>,
stop_tx: oneshot::Sender<i32>,
) -> Self {
SystemController {
cmd_rx,
stop_tx: Some(stop_tx),
arbiters: HashMap::with_capacity(4),
}
}
}
impl Future for SystemController {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
// process all items currently buffered in channel
loop {
match ready!(Pin::new(&mut self.cmd_rx).poll_recv(cx)) {
// channel closed; no more messages can be received
None => return Poll::Ready(()),
// process system command
Some(cmd) => match cmd {
SystemCommand::Exit(code) => {
// stop all arbiters
for arb in self.arbiters.values() {
arb.stop();
}
// stop event loop
// will only fire once
if let Some(stop_tx) = self.stop_tx.take() {
let _ = stop_tx.send(code);
}
}
SystemCommand::RegisterArbiter(id, arb) => {
self.arbiters.insert(id, arb);
}
SystemCommand::DeregisterArbiter(id) => {
self.arbiters.remove(&id);
}
},
}
}
}
}

View File

@@ -1,126 +0,0 @@
use std::time::{Duration, Instant};
#[test]
fn await_for_timer() {
let time = Duration::from_secs(2);
let instant = Instant::now();
actix_rt::System::new("test_wait_timer").block_on(async move {
tokio::time::sleep(time).await;
});
assert!(
instant.elapsed() >= time,
"Block on should poll awaited future to completion"
);
}
#[test]
fn join_another_arbiter() {
let time = Duration::from_secs(2);
let instant = Instant::now();
actix_rt::System::new("test_join_another_arbiter").block_on(async move {
let mut arbiter = actix_rt::Arbiter::new();
arbiter.send(Box::pin(async move {
tokio::time::sleep(time).await;
actix_rt::Arbiter::current().stop();
}));
arbiter.join().unwrap();
});
assert!(
instant.elapsed() >= time,
"Join on another arbiter should complete only when it calls stop"
);
let instant = Instant::now();
actix_rt::System::new("test_join_another_arbiter").block_on(async move {
let mut arbiter = actix_rt::Arbiter::new();
arbiter.exec_fn(move || {
actix_rt::spawn(async move {
tokio::time::sleep(time).await;
actix_rt::Arbiter::current().stop();
});
});
arbiter.join().unwrap();
});
assert!(
instant.elapsed() >= time,
"Join on a arbiter that has used actix_rt::spawn should wait for said future"
);
let instant = Instant::now();
actix_rt::System::new("test_join_another_arbiter").block_on(async move {
let mut arbiter = actix_rt::Arbiter::new();
arbiter.send(Box::pin(async move {
tokio::time::sleep(time).await;
actix_rt::Arbiter::current().stop();
}));
arbiter.stop();
arbiter.join().unwrap();
});
assert!(
instant.elapsed() < time,
"Premature stop of arbiter should conclude regardless of it's current state"
);
}
// #[test]
// fn join_current_arbiter() {
// let time = Duration::from_secs(2);
//
// let instant = Instant::now();
// actix_rt::System::new("test_join_current_arbiter").block_on(async move {
// actix_rt::spawn(async move {
// tokio::time::delay_for(time).await;
// actix_rt::Arbiter::current().stop();
// });
// actix_rt::Arbiter::local_join().await;
// });
// assert!(
// instant.elapsed() >= time,
// "Join on current arbiter should wait for all spawned futures"
// );
//
// let large_timer = Duration::from_secs(20);
// let instant = Instant::now();
// actix_rt::System::new("test_join_current_arbiter").block_on(async move {
// actix_rt::spawn(async move {
// tokio::time::delay_for(time).await;
// actix_rt::Arbiter::current().stop();
// });
// let f = actix_rt::Arbiter::local_join();
// actix_rt::spawn(async move {
// tokio::time::delay_for(large_timer).await;
// actix_rt::Arbiter::current().stop();
// });
// f.await;
// });
// assert!(
// instant.elapsed() < large_timer,
// "local_join should await only for the already spawned futures"
// );
// }
#[test]
fn non_static_block_on() {
let string = String::from("test_str");
let str = string.as_str();
let sys = actix_rt::System::new("borrow some");
sys.block_on(async {
actix_rt::time::sleep(Duration::from_millis(1)).await;
assert_eq!("test_str", str);
});
let rt = actix_rt::Runtime::new().unwrap();
rt.block_on(async {
actix_rt::time::sleep(Duration::from_millis(1)).await;
assert_eq!("test_str", str);
});
actix_rt::System::run(|| {
assert_eq!("test_str", str);
actix_rt::System::current().stop();
})
.unwrap();
}

View File

@@ -0,0 +1,17 @@
//! Checks that test macro does not cause problems in the presence of imports named "test" that
//! could be either a module with test items or the "test with runtime" macro itself.
//!
//! Before actix/actix-net#399 was implemented, this macro was running twice. The first run output
//! `#[test]` and it got run again and since it was in scope.
//!
//! Prevented by using the fully-qualified test marker (`#[::core::prelude::v1::test]`).
#![cfg(feature = "macros")]
use actix_rt::time as test;
#[actix_rt::test]
async fn test_naming_conflict() {
use test as time;
time::sleep(std::time::Duration::from_millis(2)).await;
}

368
actix-rt/tests/tests.rs Normal file
View File

@@ -0,0 +1,368 @@
use std::{
future::Future,
time::{Duration, Instant},
};
use actix_rt::{task::JoinError, Arbiter, System};
#[cfg(not(feature = "io-uring"))]
use {
std::{sync::mpsc::channel, thread},
tokio::sync::oneshot,
};
#[test]
fn await_for_timer() {
let time = Duration::from_secs(1);
let instant = Instant::now();
System::new().block_on(async move {
tokio::time::sleep(time).await;
});
assert!(
instant.elapsed() >= time,
"Block on should poll awaited future to completion"
);
}
#[test]
fn join_another_arbiter() {
let time = Duration::from_secs(1);
let instant = Instant::now();
System::new().block_on(async move {
let arbiter = Arbiter::new();
arbiter.spawn(Box::pin(async move {
tokio::time::sleep(time).await;
Arbiter::current().stop();
}));
arbiter.join().unwrap();
});
assert!(
instant.elapsed() >= time,
"Join on another arbiter should complete only when it calls stop"
);
let instant = Instant::now();
System::new().block_on(async move {
let arbiter = Arbiter::new();
arbiter.spawn_fn(move || {
actix_rt::spawn(async move {
tokio::time::sleep(time).await;
Arbiter::current().stop();
});
});
arbiter.join().unwrap();
});
assert!(
instant.elapsed() >= time,
"Join on an arbiter that has used actix_rt::spawn should wait for said future"
);
let instant = Instant::now();
System::new().block_on(async move {
let arbiter = Arbiter::new();
arbiter.spawn(Box::pin(async move {
tokio::time::sleep(time).await;
Arbiter::current().stop();
}));
arbiter.stop();
arbiter.join().unwrap();
});
assert!(
instant.elapsed() < time,
"Premature stop of arbiter should conclude regardless of it's current state"
);
}
#[test]
fn non_static_block_on() {
let string = String::from("test_str");
let string = string.as_str();
let sys = System::new();
sys.block_on(async {
actix_rt::time::sleep(Duration::from_millis(1)).await;
assert_eq!("test_str", string);
});
let rt = actix_rt::Runtime::new().unwrap();
rt.block_on(async {
actix_rt::time::sleep(Duration::from_millis(1)).await;
assert_eq!("test_str", string);
});
}
#[test]
fn wait_for_spawns() {
let rt = actix_rt::Runtime::new().unwrap();
let handle = rt.spawn(async {
println!("running on the runtime");
// assertion panic is caught at task boundary
assert_eq!(1, 2);
});
assert!(rt.block_on(handle).is_err());
}
// Temporary disabled tests for io-uring feature.
// They should be enabled when possible.
#[cfg(not(feature = "io-uring"))]
#[test]
fn arbiter_spawn_fn_runs() {
let _ = System::new();
let (tx, rx) = channel::<u32>();
let arbiter = Arbiter::new();
arbiter.spawn_fn(move || tx.send(42).unwrap());
let num = rx.recv().unwrap();
assert_eq!(num, 42);
arbiter.stop();
arbiter.join().unwrap();
}
#[cfg(not(feature = "io-uring"))]
#[test]
fn arbiter_handle_spawn_fn_runs() {
let sys = System::new();
let (tx, rx) = channel::<u32>();
let arbiter = Arbiter::new();
let handle = arbiter.handle();
drop(arbiter);
handle.spawn_fn(move || {
tx.send(42).unwrap();
System::current().stop()
});
let num = rx.recv_timeout(Duration::from_secs(2)).unwrap();
assert_eq!(num, 42);
handle.stop();
sys.run().unwrap();
}
#[cfg(not(feature = "io-uring"))]
#[test]
fn arbiter_drop_no_panic_fn() {
let _ = System::new();
let arbiter = Arbiter::new();
arbiter.spawn_fn(|| panic!("test"));
arbiter.stop();
arbiter.join().unwrap();
}
#[cfg(not(feature = "io-uring"))]
#[test]
fn arbiter_drop_no_panic_fut() {
let _ = System::new();
let arbiter = Arbiter::new();
arbiter.spawn(async { panic!("test") });
arbiter.stop();
arbiter.join().unwrap();
}
#[cfg(not(feature = "io-uring"))]
#[test]
fn system_arbiter_spawn() {
let runner = System::new();
let (tx, rx) = oneshot::channel();
let sys = System::current();
thread::spawn(|| {
// this thread will have no arbiter in it's thread local so call will panic
Arbiter::current();
})
.join()
.unwrap_err();
let thread = thread::spawn(|| {
// this thread will have no arbiter in it's thread local so use the system handle instead
System::set_current(sys);
let sys = System::current();
let arb = sys.arbiter();
arb.spawn(async move {
tx.send(42u32).unwrap();
System::current().stop();
});
});
assert_eq!(runner.block_on(rx).unwrap(), 42);
thread.join().unwrap();
}
#[cfg(not(feature = "io-uring"))]
#[test]
fn system_stop_stops_arbiters() {
let sys = System::new();
let arb = Arbiter::new();
// arbiter should be alive to receive spawn msg
assert!(Arbiter::current().spawn_fn(|| {}));
assert!(arb.spawn_fn(|| {}));
System::current().stop();
sys.run().unwrap();
// account for slightly slow thread de-spawns
thread::sleep(Duration::from_millis(500));
// arbiter should be dead and return false
assert!(!Arbiter::current().spawn_fn(|| {}));
assert!(!arb.spawn_fn(|| {}));
arb.join().unwrap();
}
#[cfg(not(feature = "io-uring"))]
#[test]
fn new_system_with_tokio() {
let (tx, rx) = channel();
let res = System::with_tokio_rt(move || {
tokio::runtime::Builder::new_multi_thread()
.enable_io()
.enable_time()
.thread_keep_alive(Duration::from_millis(1000))
.worker_threads(2)
.max_blocking_threads(2)
.on_thread_start(|| {})
.on_thread_stop(|| {})
.build()
.unwrap()
})
.block_on(async {
actix_rt::time::sleep(Duration::from_millis(1)).await;
tokio::task::spawn(async move {
tx.send(42).unwrap();
})
.await
.unwrap();
123usize
});
assert_eq!(res, 123);
assert_eq!(rx.recv().unwrap(), 42);
}
#[cfg(not(feature = "io-uring"))]
#[test]
fn new_arbiter_with_tokio() {
use std::sync::{
atomic::{AtomicBool, Ordering},
Arc,
};
let _ = System::new();
let arb = Arbiter::with_tokio_rt(|| {
tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap()
});
let counter = Arc::new(AtomicBool::new(true));
let counter1 = counter.clone();
let did_spawn = arb.spawn(async move {
actix_rt::time::sleep(Duration::from_millis(1)).await;
counter1.store(false, Ordering::SeqCst);
Arbiter::current().stop();
});
assert!(did_spawn);
arb.join().unwrap();
assert!(!counter.load(Ordering::SeqCst));
}
#[test]
#[should_panic]
fn no_system_current_panic() {
System::current();
}
#[test]
#[should_panic]
fn no_system_arbiter_new_panic() {
Arbiter::new();
}
#[test]
fn try_current_no_system() {
assert!(System::try_current().is_none())
}
#[test]
fn try_current_with_system() {
System::new().block_on(async { assert!(System::try_current().is_some()) });
}
#[allow(clippy::unit_cmp)]
#[test]
fn spawn_local() {
System::new().block_on(async {
// demonstrate that spawn -> R is strictly more capable than spawn -> ()
assert_eq!(actix_rt::spawn(async {}).await.unwrap(), ());
assert_eq!(actix_rt::spawn(async { 1 }).await.unwrap(), 1);
assert!(actix_rt::spawn(async { panic!("") }).await.is_err());
actix_rt::spawn(async { tokio::time::sleep(Duration::from_millis(50)).await })
.await
.unwrap();
fn g<F: Future<Output = Result<(), JoinError>>>(_f: F) {}
g(actix_rt::spawn(async {}));
// g(actix_rt::spawn(async { 1 })); // compile err
fn h<F: Future<Output = Result<R, JoinError>>, R>(_f: F) {}
h(actix_rt::spawn(async {}));
h(actix_rt::spawn(async { 1 }));
})
}
#[cfg(all(target_os = "linux", feature = "io-uring"))]
#[test]
fn tokio_uring_arbiter() {
System::new().block_on(async {
let (tx, rx) = std::sync::mpsc::channel();
Arbiter::new().spawn(async move {
let handle = actix_rt::spawn(async move {
let f = tokio_uring::fs::File::create("test.txt").await.unwrap();
let buf = b"Hello World!";
let (res, _) = f.write_at(&buf[..], 0).await;
assert!(res.is_ok());
f.sync_all().await.unwrap();
f.close().await.unwrap();
std::fs::remove_file("test.txt").unwrap();
});
handle.await.unwrap();
tx.send(true).unwrap();
});
assert!(rx.recv().unwrap());
})
}

View File

@@ -3,6 +3,69 @@
## Unreleased - 2021-xx-xx
## 2.0.0-beta.9 - 2021-11-15
* Restore `Arbiter` support lost in `beta.8`. [#417]
[#417]: https://github.com/actix/actix-net/pull/417
## 2.0.0-beta.8 - 2021-11-05 _(YANKED)_
* Fix non-unix signal handler. [#410]
[#410]: https://github.com/actix/actix-net/pull/410
## 2.0.0-beta.7 - 2021-11-05 _(YANKED)_
* Server can be started in regular Tokio runtime. [#408]
* Expose new `Server` type whose `Future` impl resolves when server stops. [#408]
* Rename `Server` to `ServerHandle`. [#407]
* Add `Server::handle` to obtain handle to server. [#408]
* Rename `ServerBuilder::{maxconn => max_concurrent_connections}`. [#407]
* Deprecate crate-level `new` shortcut for server builder. [#408]
* Minimum supported Rust version (MSRV) is now 1.52.
[#407]: https://github.com/actix/actix-net/pull/407
[#408]: https://github.com/actix/actix-net/pull/408
## 2.0.0-beta.6 - 2021-10-11
* Add experimental (semver-exempt) `io-uring` feature for enabling async file I/O on linux. [#374]
* Server no long listens to `SIGHUP` signal. Previously, the received was not used but did block
subsequent exit signals from working. [#389]
* Remove `config` module. `ServiceConfig`, `ServiceRuntime` public types are removed due to
this change. [#349]
* Remove `ServerBuilder::configure` [#349]
[#374]: https://github.com/actix/actix-net/pull/374
[#349]: https://github.com/actix/actix-net/pull/349
[#389]: https://github.com/actix/actix-net/pull/389
## 2.0.0-beta.5 - 2021-04-20
* Server shutdown notifies all workers to exit regardless if shutdown is graceful. This causes all
workers to shutdown immediately in force shutdown case. [#333]
[#333]: https://github.com/actix/actix-net/pull/333
## 2.0.0-beta.4 - 2021-04-01
* Prevent panic when `shutdown_timeout` is very large. [f9262db]
[f9262db]: https://github.com/actix/actix-net/commit/f9262db
## 2.0.0-beta.3 - 2021-02-06
* Hidden `ServerBuilder::start` method has been removed. Use `ServerBuilder::run`. [#246]
* Add retry for EINTR signal (`io::Interrupted`) in `Accept`'s poll loop. [#264]
* Add `ServerBuilder::worker_max_blocking_threads` to customize blocking thread pool size. [#265]
* Update `actix-rt` to `2.0.0`. [#273]
[#246]: https://github.com/actix/actix-net/pull/246
[#264]: https://github.com/actix/actix-net/pull/264
[#265]: https://github.com/actix/actix-net/pull/265
[#273]: https://github.com/actix/actix-net/pull/273
## 2.0.0-beta.2 - 2021-01-03
* Merge `actix-testing` to `actix-server` as `test_server` mod. [#242]

30
actix-server/Cargo.toml Normal file → Executable file
View File

@@ -1,18 +1,15 @@
[package]
name = "actix-server"
version = "2.0.0-beta.2"
version = "2.0.0-beta.9"
authors = [
"Nikolay Kim <fafhrd91@gmail.com>",
"fakeshadow <24548779@qq.com>",
]
description = "General purpose TCP server built for the Actix ecosystem"
keywords = ["network", "framework", "async", "futures"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-server/"
categories = ["network-programming", "asynchronous"]
license = "MIT OR Apache-2.0"
exclude = [".gitignore", ".cargo/config"]
edition = "2018"
[lib]
@@ -21,22 +18,29 @@ path = "src/lib.rs"
[features]
default = []
io-uring = ["tokio-uring", "actix-rt/io-uring"]
[dependencies]
actix-codec = "0.4.0-beta.1"
actix-rt = "2.0.0-beta.1"
actix-service = "2.0.0-beta.2"
actix-utils = "3.0.0-beta.1"
actix-rt = { version = "2.4.0", default-features = false }
actix-service = "2.0.0"
actix-utils = "3.0.0"
futures-core = { version = "0.3.7", default-features = false }
futures-core = { version = "0.3.7", default-features = false, features = ["alloc"] }
futures-util = { version = "0.3.7", default-features = false, features = ["alloc"] }
log = "0.4"
mio = { version = "0.7.6", features = ["os-poll", "net"] }
num_cpus = "1.13"
slab = "0.4"
tokio = { version = "1", features = ["sync"] }
socket2 = "0.4.2"
tokio = { version = "1.5.1", features = ["sync"] }
# runtime for io-uring feature
tokio-uring = { version = "0.1", optional = true }
[dev-dependencies]
actix-codec = "0.4.0"
actix-rt = "2.0.0"
bytes = "1"
env_logger = "0.8"
env_logger = "0.9"
futures-util = { version = "0.3.7", default-features = false, features = ["sink"] }
tokio = { version = "1", features = ["io-util"] }
tokio = { version = "1.5.1", features = ["io-util", "rt-multi-thread", "macros"] }

View File

@@ -9,24 +9,24 @@
//! Start typing. When you press enter the typed line will be echoed back. The server will log
//! the length of each line it echos and the total size of data sent when the connection is closed.
use std::sync::{
atomic::{AtomicUsize, Ordering},
Arc,
use std::{
io,
sync::{
atomic::{AtomicUsize, Ordering},
Arc,
},
};
use std::{env, io};
use actix_rt::net::TcpStream;
use actix_server::Server;
use actix_service::pipeline_factory;
use actix_service::{fn_service, ServiceFactoryExt as _};
use bytes::BytesMut;
use futures_util::future::ok;
use log::{error, info};
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio::io::{AsyncReadExt as _, AsyncWriteExt as _};
#[actix_rt::main]
async fn main() -> io::Result<()> {
env::set_var("RUST_LOG", "actix=trace,basic=trace");
env_logger::init();
async fn run() -> io::Result<()> {
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).init();
let count = Arc::new(AtomicUsize::new(0));
@@ -41,7 +41,7 @@ async fn main() -> io::Result<()> {
let count = Arc::clone(&count);
let num2 = Arc::clone(&count);
pipeline_factory(move |mut stream: TcpStream| {
fn_service(move |mut stream: TcpStream| {
let count = Arc::clone(&count);
async move {
@@ -86,3 +86,16 @@ async fn main() -> io::Result<()> {
.run()
.await
}
#[tokio::main]
async fn main() -> io::Result<()> {
run().await?;
Ok(())
}
// alternatively:
// #[actix_rt::main]
// async fn main() -> io::Result<()> {
// run().await?;
// Ok(())
// }

View File

@@ -1,418 +1,460 @@
use std::time::Duration;
use std::{io, thread};
use std::{io, thread, time::Duration};
use actix_rt::time::{sleep_until, Instant};
use actix_rt::System;
use log::{error, info};
use actix_rt::time::Instant;
use log::{debug, error, info};
use mio::{Interest, Poll, Token as MioToken};
use slab::Slab;
use crate::server::Server;
use crate::socket::{MioListener, SocketAddr};
use crate::waker_queue::{WakerInterest, WakerQueue, WAKER_TOKEN};
use crate::worker::{Conn, WorkerHandle};
use crate::Token;
use crate::{
availability::Availability,
socket::MioListener,
waker_queue::{WakerInterest, WakerQueue, WAKER_TOKEN},
worker::{Conn, ServerWorker, WorkerHandleAccept, WorkerHandleServer},
ServerBuilder, ServerHandle,
};
const TIMEOUT_DURATION_ON_ERROR: Duration = Duration::from_millis(510);
struct ServerSocketInfo {
// addr for socket. mainly used for logging.
addr: SocketAddr,
// be ware this is the crate token for identify socket and should not be confused with
// mio::Token
token: Token,
token: usize,
lst: MioListener,
// timeout is used to mark the deadline when this socket's listener should be registered again
// after an error.
timeout: Option<Instant>,
}
/// Accept loop would live with `ServerBuilder`.
///
/// It's tasked with construct `Poll` instance and `WakerQueue` which would be distributed to
/// `Accept` and `Worker`.
///
/// It would also listen to `ServerCommand` and push interests to `WakerQueue`.
pub(crate) struct AcceptLoop {
srv: Option<Server>,
poll: Option<Poll>,
waker: WakerQueue,
}
impl AcceptLoop {
pub fn new(srv: Server) -> Self {
let poll = Poll::new().unwrap_or_else(|e| panic!("Can not create `mio::Poll`: {}", e));
let waker = WakerQueue::new(poll.registry())
.unwrap_or_else(|e| panic!("Can not create `mio::Waker`: {}", e));
Self {
srv: Some(srv),
poll: Some(poll),
waker,
}
}
pub(crate) fn waker_owned(&self) -> WakerQueue {
self.waker.clone()
}
pub fn wake(&self, i: WakerInterest) {
self.waker.wake(i);
}
pub(crate) fn start(
&mut self,
socks: Vec<(Token, MioListener)>,
handles: Vec<WorkerHandle>,
) {
let srv = self.srv.take().expect("Can not re-use AcceptInfo");
let poll = self.poll.take().unwrap();
let waker = self.waker.clone();
Accept::start(poll, waker, socks, srv, handles);
}
/// Timeout is used to mark the deadline when this socket's listener should be registered again
/// after an error.
timeout: Option<actix_rt::time::Instant>,
}
/// poll instance of the server.
struct Accept {
pub(crate) struct Accept {
poll: Poll,
waker: WakerQueue,
handles: Vec<WorkerHandle>,
srv: Server,
waker_queue: WakerQueue,
handles: Vec<WorkerHandleAccept>,
srv: ServerHandle,
next: usize,
backpressure: bool,
avail: Availability,
/// use the smallest duration from sockets timeout.
timeout: Option<Duration>,
paused: bool,
}
/// This function defines errors that are per-connection. Which basically
/// means that if we get this error from `accept()` system call it means
/// next connection might be ready to be accepted.
impl Accept {
pub(crate) fn start(
sockets: Vec<(usize, MioListener)>,
builder: &ServerBuilder,
) -> io::Result<(WakerQueue, Vec<WorkerHandleServer>)> {
let handle_server = ServerHandle::new(builder.cmd_tx.clone());
// construct poll instance and its waker
let poll = Poll::new()?;
let waker_queue = WakerQueue::new(poll.registry())?;
// start workers and collect handles
let (handles_accept, handles_server) = (0..builder.threads)
.map(|idx| {
// clone service factories
let factories = builder
.factories
.iter()
.map(|f| f.clone_factory())
.collect::<Vec<_>>();
// start worker using service factories
ServerWorker::start(idx, factories, waker_queue.clone(), builder.worker_config)
})
.collect::<io::Result<Vec<_>>>()?
.into_iter()
.unzip();
let (mut accept, mut sockets) = Accept::new_with_sockets(
poll,
waker_queue.clone(),
sockets,
handles_accept,
handle_server,
)?;
thread::Builder::new()
.name("actix-server acceptor".to_owned())
.spawn(move || accept.poll_with(&mut sockets))
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))?;
Ok((waker_queue, handles_server))
}
fn new_with_sockets(
poll: Poll,
waker_queue: WakerQueue,
sockets: Vec<(usize, MioListener)>,
accept_handles: Vec<WorkerHandleAccept>,
server_handle: ServerHandle,
) -> io::Result<(Accept, Box<[ServerSocketInfo]>)> {
let sockets = sockets
.into_iter()
.map(|(token, mut lst)| {
// Start listening for incoming connections
poll.registry()
.register(&mut lst, MioToken(token), Interest::READABLE)?;
Ok(ServerSocketInfo {
token,
lst,
timeout: None,
})
})
.collect::<io::Result<_>>()?;
let mut avail = Availability::default();
// Assume all handles are avail at construct time.
avail.set_available_all(&accept_handles);
let accept = Accept {
poll,
waker_queue,
handles: accept_handles,
srv: server_handle,
next: 0,
avail,
timeout: None,
paused: false,
};
Ok((accept, sockets))
}
/// blocking wait for readiness events triggered by mio
fn poll_with(&mut self, sockets: &mut [ServerSocketInfo]) {
let mut events = mio::Events::with_capacity(256);
loop {
if let Err(e) = self.poll.poll(&mut events, self.timeout) {
match e.kind() {
io::ErrorKind::Interrupted => {}
_ => panic!("Poll error: {}", e),
}
}
for event in events.iter() {
let token = event.token();
match token {
WAKER_TOKEN => {
let exit = self.handle_waker(sockets);
if exit {
info!("Accept thread stopped");
return;
}
}
_ => {
let token = usize::from(token);
self.accept(sockets, token);
}
}
}
// check for timeout and re-register sockets
self.process_timeout(sockets);
}
}
fn handle_waker(&mut self, sockets: &mut [ServerSocketInfo]) -> bool {
// This is a loop because interests for command from previous version was
// a loop that would try to drain the command channel. It's yet unknown
// if it's necessary/good practice to actively drain the waker queue.
loop {
// take guard with every iteration so no new interest can be added
// until the current task is done.
let mut guard = self.waker_queue.guard();
match guard.pop_front() {
// worker notify it becomes available.
Some(WakerInterest::WorkerAvailable(idx)) => {
drop(guard);
self.avail.set_available(idx, true);
if !self.paused {
self.accept_all(sockets);
}
}
// a new worker thread is made and it's handle would be added to Accept
Some(WakerInterest::Worker(handle)) => {
drop(guard);
self.avail.set_available(handle.idx(), true);
self.handles.push(handle);
if !self.paused {
self.accept_all(sockets);
}
}
Some(WakerInterest::Pause) => {
drop(guard);
if !self.paused {
self.paused = true;
self.deregister_all(sockets);
}
}
Some(WakerInterest::Resume) => {
drop(guard);
if self.paused {
self.paused = false;
sockets.iter_mut().for_each(|info| {
self.register_logged(info);
});
self.accept_all(sockets);
}
}
Some(WakerInterest::Stop) => {
if !self.paused {
self.deregister_all(sockets);
}
return true;
}
// waker queue is drained
None => {
// Reset the WakerQueue before break so it does not grow infinitely
WakerQueue::reset(&mut guard);
return false;
}
}
}
}
fn process_timeout(&mut self, sockets: &mut [ServerSocketInfo]) {
// always remove old timeouts
if self.timeout.take().is_some() {
let now = Instant::now();
sockets
.iter_mut()
// Only sockets that had an associated timeout were deregistered.
.filter(|info| info.timeout.is_some())
.for_each(|info| {
let inst = info.timeout.take().unwrap();
if now < inst {
// still timed out; try to set new timeout
info.timeout = Some(inst);
self.set_timeout(inst - now);
} else if !self.paused {
// timeout expired; register socket again
self.register_logged(info);
}
// Drop the timeout if server is paused and socket timeout is expired.
// When server recovers from pause it will register all sockets without
// a timeout value so this socket register will be delayed till then.
});
}
}
/// Update accept timeout with `duration` if it is shorter than current timeout.
fn set_timeout(&mut self, duration: Duration) {
match self.timeout {
Some(ref mut timeout) => {
if *timeout > duration {
*timeout = duration;
}
}
None => self.timeout = Some(duration),
}
}
#[cfg(not(target_os = "windows"))]
fn register(&self, info: &mut ServerSocketInfo) -> io::Result<()> {
let token = MioToken(info.token);
self.poll
.registry()
.register(&mut info.lst, token, Interest::READABLE)
}
#[cfg(target_os = "windows")]
fn register(&self, info: &mut ServerSocketInfo) -> io::Result<()> {
// On windows, calling register without deregister cause an error.
// See https://github.com/actix/actix-web/issues/905
// Calling reregister seems to fix the issue.
let token = MioToken(info.token);
self.poll
.registry()
.register(&mut info.lst, token, Interest::READABLE)
.or_else(|_| {
self.poll
.registry()
.reregister(&mut info.lst, token, Interest::READABLE)
})
}
fn register_logged(&self, info: &mut ServerSocketInfo) {
match self.register(info) {
Ok(_) => debug!("Resume accepting connections on {}", info.lst.local_addr()),
Err(e) => error!("Can not register server socket {}", e),
}
}
fn deregister_logged(&self, info: &mut ServerSocketInfo) {
match self.poll.registry().deregister(&mut info.lst) {
Ok(_) => debug!("Paused accepting connections on {}", info.lst.local_addr()),
Err(e) => {
error!("Can not deregister server socket {}", e)
}
}
}
fn deregister_all(&self, sockets: &mut [ServerSocketInfo]) {
// This is a best effort implementation with following limitation:
//
// Every ServerSocketInfo with associated timeout will be skipped and it's timeout is
// removed in the process.
//
// Therefore WakerInterest::Pause followed by WakerInterest::Resume in a very short gap
// (less than 500ms) would cause all timing out ServerSocketInfos be re-registered before
// expected timing.
sockets
.iter_mut()
// Take all timeout.
// This is to prevent Accept::process_timer method re-register a socket afterwards.
.map(|info| (info.timeout.take(), info))
// Socket info with a timeout is already deregistered so skip them.
.filter(|(timeout, _)| timeout.is_none())
.for_each(|(_, info)| self.deregister_logged(info));
}
// Send connection to worker and handle error.
fn send_connection(&mut self, conn: Conn) -> Result<(), Conn> {
let next = self.next();
match next.send(conn) {
Ok(_) => {
// Increment counter of WorkerHandle.
// Set worker to unavailable with it hit max (Return false).
if !next.inc_counter() {
let idx = next.idx();
self.avail.set_available(idx, false);
}
self.set_next();
Ok(())
}
Err(conn) => {
// Worker thread is error and could be gone.
// Remove worker handle and notify `ServerBuilder`.
self.remove_next();
if self.handles.is_empty() {
error!("No workers");
// All workers are gone and Conn is nowhere to be sent.
// Treat this situation as Ok and drop Conn.
return Ok(());
} else if self.handles.len() <= self.next {
self.next = 0;
}
Err(conn)
}
}
}
fn accept_one(&mut self, mut conn: Conn) {
loop {
let next = self.next();
let idx = next.idx();
if self.avail.get_available(idx) {
match self.send_connection(conn) {
Ok(_) => return,
Err(c) => conn = c,
}
} else {
self.avail.set_available(idx, false);
self.set_next();
if !self.avail.available() {
while let Err(c) = self.send_connection(conn) {
conn = c;
}
return;
}
}
}
}
fn accept(&mut self, sockets: &mut [ServerSocketInfo], token: usize) {
while self.avail.available() {
let info = &mut sockets[token];
match info.lst.accept() {
Ok(io) => {
let conn = Conn { io, token };
self.accept_one(conn);
}
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => return,
Err(ref e) if connection_error(e) => continue,
Err(e) => {
error!("Error accepting connection: {}", e);
// deregister listener temporary
self.deregister_logged(info);
// sleep after error. write the timeout to socket info as later
// the poll would need it mark which socket and when it's
// listener should be registered
info.timeout = Some(Instant::now() + Duration::from_millis(500));
self.set_timeout(TIMEOUT_DURATION_ON_ERROR);
return;
}
};
}
}
fn accept_all(&mut self, sockets: &mut [ServerSocketInfo]) {
sockets
.iter_mut()
.map(|info| info.token)
.collect::<Vec<_>>()
.into_iter()
.for_each(|idx| self.accept(sockets, idx))
}
#[inline(always)]
fn next(&self) -> &WorkerHandleAccept {
&self.handles[self.next]
}
/// Set next worker handle that would accept connection.
#[inline(always)]
fn set_next(&mut self) {
self.next = (self.next + 1) % self.handles.len();
}
/// Remove next worker handle that fail to accept connection.
fn remove_next(&mut self) {
let handle = self.handles.swap_remove(self.next);
let idx = handle.idx();
// A message is sent to `ServerBuilder` future to notify it a new worker
// should be made.
self.srv.worker_faulted(idx);
self.avail.set_available(idx, false);
}
}
/// This function defines errors that are per-connection; if we get this error from the `accept()`
/// system call it means the next connection might be ready to be accepted.
///
/// All other errors will incur a timeout before next `accept()` is performed.
/// The timeout is useful to handle resource exhaustion errors like ENFILE
/// and EMFILE. Otherwise, could enter into tight loop.
/// All other errors will incur a timeout before next `accept()` call is attempted. The timeout is
/// useful to handle resource exhaustion errors like `ENFILE` and `EMFILE`. Otherwise, it could
/// enter into a temporary spin loop.
fn connection_error(e: &io::Error) -> bool {
e.kind() == io::ErrorKind::ConnectionRefused
|| e.kind() == io::ErrorKind::ConnectionAborted
|| e.kind() == io::ErrorKind::ConnectionReset
}
impl Accept {
pub(crate) fn start(
poll: Poll,
waker: WakerQueue,
socks: Vec<(Token, MioListener)>,
srv: Server,
handles: Vec<WorkerHandle>,
) {
// Accept runs in its own thread and would want to spawn additional futures to current
// actix system.
let sys = System::current();
thread::Builder::new()
.name("actix-server accept loop".to_owned())
.spawn(move || {
System::set_current(sys);
let (mut accept, sockets) =
Accept::new_with_sockets(poll, waker, socks, handles, srv);
accept.poll_with(sockets);
})
.unwrap();
}
fn new_with_sockets(
poll: Poll,
waker: WakerQueue,
socks: Vec<(Token, MioListener)>,
handles: Vec<WorkerHandle>,
srv: Server,
) -> (Accept, Slab<ServerSocketInfo>) {
let mut sockets = Slab::new();
for (hnd_token, mut lst) in socks.into_iter() {
let addr = lst.local_addr();
let entry = sockets.vacant_entry();
let token = entry.key();
// Start listening for incoming connections
poll.registry()
.register(&mut lst, MioToken(token), Interest::READABLE)
.unwrap_or_else(|e| panic!("Can not register io: {}", e));
entry.insert(ServerSocketInfo {
addr,
token: hnd_token,
lst,
timeout: None,
});
}
let accept = Accept {
poll,
waker,
handles,
srv,
next: 0,
backpressure: false,
};
(accept, sockets)
}
fn poll_with(&mut self, mut sockets: Slab<ServerSocketInfo>) {
let mut events = mio::Events::with_capacity(128);
loop {
self.poll
.poll(&mut events, None)
.unwrap_or_else(|e| panic!("Poll error: {}", e));
for event in events.iter() {
let token = event.token();
match token {
// This is a loop because interests for command from previous version was
// a loop that would try to drain the command channel. It's yet unknown
// if it's necessary/good practice to actively drain the waker queue.
WAKER_TOKEN => 'waker: loop {
// take guard with every iteration so no new interest can be added
// until the current task is done.
let mut guard = self.waker.guard();
match guard.pop_front() {
// worker notify it becomes available. we may want to recover
// from backpressure.
Some(WakerInterest::WorkerAvailable) => {
drop(guard);
self.maybe_backpressure(&mut sockets, false);
}
// a new worker thread is made and it's handle would be added
// to Accept
Some(WakerInterest::Worker(handle)) => {
drop(guard);
// maybe we want to recover from a backpressure.
self.maybe_backpressure(&mut sockets, false);
self.handles.push(handle);
}
// got timer interest and it's time to try register socket(s)
// again.
Some(WakerInterest::Timer) => {
drop(guard);
self.process_timer(&mut sockets)
}
Some(WakerInterest::Pause) => {
drop(guard);
sockets.iter_mut().for_each(|(_, info)| {
match self.deregister(info) {
Ok(_) => info!(
"Paused accepting connections on {}",
info.addr
),
Err(e) => {
error!("Can not deregister server socket {}", e)
}
}
});
}
Some(WakerInterest::Resume) => {
drop(guard);
sockets.iter_mut().for_each(|(token, info)| {
self.register_logged(token, info);
});
}
Some(WakerInterest::Stop) => {
return self.deregister_all(&mut sockets);
}
// waker queue is drained.
None => {
// Reset the WakerQueue before break so it does not grow
// infinitely.
WakerQueue::reset(&mut guard);
break 'waker;
}
}
},
_ => {
let token = usize::from(token);
self.accept(&mut sockets, token);
}
}
}
}
}
fn process_timer(&self, sockets: &mut Slab<ServerSocketInfo>) {
let now = Instant::now();
sockets.iter_mut().for_each(|(token, info)| {
// only the ServerSocketInfo have an associate timeout value was de registered.
if let Some(inst) = info.timeout.take() {
if now > inst {
self.register_logged(token, info);
} else {
info.timeout = Some(inst);
}
}
});
}
#[cfg(not(target_os = "windows"))]
fn register(&self, token: usize, info: &mut ServerSocketInfo) -> io::Result<()> {
self.poll
.registry()
.register(&mut info.lst, MioToken(token), Interest::READABLE)
}
#[cfg(target_os = "windows")]
fn register(&self, token: usize, info: &mut ServerSocketInfo) -> io::Result<()> {
// On windows, calling register without deregister cause an error.
// See https://github.com/actix/actix-web/issues/905
// Calling reregister seems to fix the issue.
self.poll
.registry()
.register(&mut info.lst, mio::Token(token), Interest::READABLE)
.or_else(|_| {
self.poll.registry().reregister(
&mut info.lst,
mio::Token(token),
Interest::READABLE,
)
})
}
fn register_logged(&self, token: usize, info: &mut ServerSocketInfo) {
match self.register(token, info) {
Ok(_) => info!("Resume accepting connections on {}", info.addr),
Err(e) => error!("Can not register server socket {}", e),
}
}
fn deregister(&self, info: &mut ServerSocketInfo) -> io::Result<()> {
self.poll.registry().deregister(&mut info.lst)
}
fn deregister_all(&self, sockets: &mut Slab<ServerSocketInfo>) {
sockets.iter_mut().for_each(|(_, info)| {
info!("Accepting connections on {} has been paused", info.addr);
let _ = self.deregister(info);
});
}
fn maybe_backpressure(&mut self, sockets: &mut Slab<ServerSocketInfo>, on: bool) {
if self.backpressure {
if !on {
self.backpressure = false;
for (token, info) in sockets.iter_mut() {
if info.timeout.is_some() {
// socket will attempt to re-register itself when its timeout completes
continue;
}
self.register_logged(token, info);
}
}
} else if on {
self.backpressure = true;
self.deregister_all(sockets);
}
}
fn accept_one(&mut self, sockets: &mut Slab<ServerSocketInfo>, mut msg: Conn) {
if self.backpressure {
while !self.handles.is_empty() {
match self.handles[self.next].send(msg) {
Ok(_) => {
self.set_next();
break;
}
Err(tmp) => {
// worker lost contact and could be gone. a message is sent to
// `ServerBuilder` future to notify it a new worker should be made.
// after that remove the fault worker.
self.srv.worker_faulted(self.handles[self.next].idx);
msg = tmp;
self.handles.swap_remove(self.next);
if self.handles.is_empty() {
error!("No workers");
return;
} else if self.handles.len() <= self.next {
self.next = 0;
}
continue;
}
}
}
} else {
let mut idx = 0;
while idx < self.handles.len() {
idx += 1;
if self.handles[self.next].available() {
match self.handles[self.next].send(msg) {
Ok(_) => {
self.set_next();
return;
}
// worker lost contact and could be gone. a message is sent to
// `ServerBuilder` future to notify it a new worker should be made.
// after that remove the fault worker and enter backpressure if necessary.
Err(tmp) => {
self.srv.worker_faulted(self.handles[self.next].idx);
msg = tmp;
self.handles.swap_remove(self.next);
if self.handles.is_empty() {
error!("No workers");
self.maybe_backpressure(sockets, true);
return;
} else if self.handles.len() <= self.next {
self.next = 0;
}
continue;
}
}
}
self.set_next();
}
// enable backpressure
self.maybe_backpressure(sockets, true);
self.accept_one(sockets, msg);
}
}
// set next worker handle that would accept work.
fn set_next(&mut self) {
self.next = (self.next + 1) % self.handles.len();
}
fn accept(&mut self, sockets: &mut Slab<ServerSocketInfo>, token: usize) {
loop {
let msg = if let Some(info) = sockets.get_mut(token) {
match info.lst.accept() {
Ok(Some((io, addr))) => Conn {
io,
token: info.token,
peer: Some(addr),
},
Ok(None) => return,
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => return,
Err(ref e) if connection_error(e) => continue,
Err(e) => {
// deregister listener temporary
error!("Error accepting connection: {}", e);
if let Err(err) = self.deregister(info) {
error!("Can not deregister server socket {}", err);
}
// sleep after error. write the timeout to socket info as later the poll
// would need it mark which socket and when it's listener should be
// registered.
info.timeout = Some(Instant::now() + Duration::from_millis(500));
// after the sleep a Timer interest is sent to Accept Poll
let waker = self.waker.clone();
System::current().arbiter().send(Box::pin(async move {
sleep_until(Instant::now() + Duration::from_millis(510)).await;
waker.wake(WakerInterest::Timer);
}));
return;
}
}
} else {
return;
};
self.accept_one(sockets, msg);
}
}
}

View File

@@ -0,0 +1,121 @@
use crate::worker::WorkerHandleAccept;
/// Array of u128 with every bit as marker for a worker handle's availability.
#[derive(Debug, Default)]
pub(crate) struct Availability([u128; 4]);
impl Availability {
/// Check if any worker handle is available
#[inline(always)]
pub(crate) fn available(&self) -> bool {
self.0.iter().any(|a| *a != 0)
}
/// Check if worker handle is available by index
#[inline(always)]
pub(crate) fn get_available(&self, idx: usize) -> bool {
let (offset, idx) = Self::offset(idx);
self.0[offset] & (1 << idx as u128) != 0
}
/// Set worker handle available state by index.
pub(crate) fn set_available(&mut self, idx: usize, avail: bool) {
let (offset, idx) = Self::offset(idx);
let off = 1 << idx as u128;
if avail {
self.0[offset] |= off;
} else {
self.0[offset] &= !off
}
}
/// Set all worker handle to available state.
/// This would result in a re-check on all workers' availability.
pub(crate) fn set_available_all(&mut self, handles: &[WorkerHandleAccept]) {
handles.iter().for_each(|handle| {
self.set_available(handle.idx(), true);
})
}
/// Get offset and adjusted index of given worker handle index.
pub(crate) fn offset(idx: usize) -> (usize, usize) {
if idx < 128 {
(0, idx)
} else if idx < 128 * 2 {
(1, idx - 128)
} else if idx < 128 * 3 {
(2, idx - 128 * 2)
} else if idx < 128 * 4 {
(3, idx - 128 * 3)
} else {
panic!("Max WorkerHandle count is 512")
}
}
}
#[cfg(test)]
mod tests {
use super::*;
fn single(aval: &mut Availability, idx: usize) {
aval.set_available(idx, true);
assert!(aval.available());
aval.set_available(idx, true);
aval.set_available(idx, false);
assert!(!aval.available());
aval.set_available(idx, false);
assert!(!aval.available());
}
fn multi(aval: &mut Availability, mut idx: Vec<usize>) {
idx.iter().for_each(|idx| aval.set_available(*idx, true));
assert!(aval.available());
while let Some(idx) = idx.pop() {
assert!(aval.available());
aval.set_available(idx, false);
}
assert!(!aval.available());
}
#[test]
fn availability() {
let mut aval = Availability::default();
single(&mut aval, 1);
single(&mut aval, 128);
single(&mut aval, 256);
single(&mut aval, 511);
let idx = (0..511).filter(|i| i % 3 == 0 && i % 5 == 0).collect();
multi(&mut aval, idx);
multi(&mut aval, (0..511).collect())
}
#[test]
#[should_panic]
fn overflow() {
let mut aval = Availability::default();
single(&mut aval, 512);
}
#[test]
fn pin_point() {
let mut aval = Availability::default();
aval.set_available(438, true);
aval.set_available(479, true);
assert_eq!(aval.0[3], 1 << (438 - 384) | 1 << (479 - 384));
}
}

View File

@@ -1,42 +1,32 @@
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::time::Duration;
use std::{io, mem};
use std::{io, time::Duration};
use actix_rt::net::TcpStream;
use actix_rt::time::{sleep_until, Instant};
use actix_rt::{spawn, System};
use log::{error, info};
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver};
use tokio::sync::oneshot;
use log::{info, trace};
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender};
use crate::accept::AcceptLoop;
use crate::config::{ConfiguredService, ServiceConfig};
use crate::server::{Server, ServerCommand};
use crate::service::{InternalServiceFactory, ServiceFactory, StreamNewService};
use crate::signals::{Signal, Signals};
use crate::socket::{MioListener, StdSocketAddr, StdTcpListener, ToSocketAddrs};
use crate::socket::{MioTcpListener, MioTcpSocket};
use crate::waker_queue::{WakerInterest, WakerQueue};
use crate::worker::{self, Worker, WorkerAvailability, WorkerHandle};
use crate::{join_all, Token};
use crate::{
server::ServerCommand,
service::{InternalServiceFactory, ServiceFactory, StreamNewService},
socket::{
create_mio_tcp_listener, MioListener, MioTcpListener, StdSocketAddr, StdTcpListener,
ToSocketAddrs,
},
worker::ServerWorkerConfig,
Server,
};
/// Server builder
/// [Server] builder.
pub struct ServerBuilder {
threads: usize,
token: Token,
backlog: u32,
handles: Vec<(usize, WorkerHandle)>,
services: Vec<Box<dyn InternalServiceFactory>>,
sockets: Vec<(Token, String, MioListener)>,
accept: AcceptLoop,
exit: bool,
shutdown_timeout: Duration,
no_signals: bool,
cmd: UnboundedReceiver<ServerCommand>,
server: Server,
notify: Vec<oneshot::Sender<()>>,
pub(crate) threads: usize,
pub(crate) token: usize,
pub(crate) backlog: u32,
pub(crate) factories: Vec<Box<dyn InternalServiceFactory>>,
pub(crate) sockets: Vec<(usize, String, MioListener)>,
pub(crate) exit: bool,
pub(crate) listen_os_signals: bool,
pub(crate) cmd_tx: UnboundedSender<ServerCommand>,
pub(crate) cmd_rx: UnboundedReceiver<ServerCommand>,
pub(crate) worker_config: ServerWorkerConfig,
}
impl Default for ServerBuilder {
@@ -48,42 +38,55 @@ impl Default for ServerBuilder {
impl ServerBuilder {
/// Create new Server builder instance
pub fn new() -> ServerBuilder {
let (tx, rx) = unbounded_channel();
let server = Server::new(tx);
let (cmd_tx, cmd_rx) = unbounded_channel();
ServerBuilder {
threads: num_cpus::get(),
token: Token::default(),
handles: Vec::new(),
services: Vec::new(),
token: 0,
factories: Vec::new(),
sockets: Vec::new(),
accept: AcceptLoop::new(server.clone()),
backlog: 2048,
exit: false,
shutdown_timeout: Duration::from_secs(30),
no_signals: false,
cmd: rx,
notify: Vec::new(),
server,
listen_os_signals: true,
cmd_tx,
cmd_rx,
worker_config: ServerWorkerConfig::default(),
}
}
/// Set number of workers to start.
///
/// By default server uses number of available logical cpu as workers
/// count. Workers must be greater than 0.
/// By default server uses number of available logical CPU as workers count. Workers must be
/// greater than 0.
pub fn workers(mut self, num: usize) -> Self {
assert_ne!(num, 0, "workers must be greater than 0");
self.threads = num;
self
}
/// Set max number of threads for each worker's blocking task thread pool.
///
/// One thread pool is set up **per worker**; not shared across workers.
///
/// # Examples:
/// ```
/// # use actix_server::ServerBuilder;
/// let builder = ServerBuilder::new()
/// .workers(4) // server has 4 worker thread.
/// .worker_max_blocking_threads(4); // every worker has 4 max blocking threads.
/// ```
///
/// See [tokio::runtime::Builder::max_blocking_threads] for behavior reference.
pub fn worker_max_blocking_threads(mut self, num: usize) -> Self {
self.worker_config.max_blocking_threads(num);
self
}
/// Set the maximum number of pending connections.
///
/// This refers to the number of clients that can be waiting to be served.
/// Exceeding this number results in the client getting an error when
/// attempting to connect. It should only affect servers under significant
/// load.
/// This refers to the number of clients that can be waiting to be served. Exceeding this number
/// results in the client getting an error when attempting to connect. It should only affect
/// servers under significant load.
///
/// Generally set in the 64-2048 range. Default value is 2048.
///
@@ -95,66 +98,45 @@ impl ServerBuilder {
/// Sets the maximum per-worker number of concurrent connections.
///
/// All socket listeners will stop accepting connections when this limit is
/// reached for each worker.
/// All socket listeners will stop accepting connections when this limit is reached for
/// each worker.
///
/// By default max connections is set to a 25k per worker.
pub fn maxconn(self, num: usize) -> Self {
worker::max_concurrent_connections(num);
pub fn max_concurrent_connections(mut self, num: usize) -> Self {
self.worker_config.max_concurrent_connections(num);
self
}
/// Stop actix system.
#[doc(hidden)]
#[deprecated(since = "2.0.0", note = "Renamed to `max_concurrent_connections`.")]
pub fn maxconn(self, num: usize) -> Self {
self.max_concurrent_connections(num)
}
/// Stop Actix `System` after server shutdown.
pub fn system_exit(mut self) -> Self {
self.exit = true;
self
}
/// Disable signal handling
/// Disable OS signal handling.
pub fn disable_signals(mut self) -> Self {
self.no_signals = true;
self.listen_os_signals = false;
self
}
/// Timeout for graceful workers shutdown in seconds.
///
/// After receiving a stop signal, workers have this much time to finish
/// serving requests. Workers still alive after the timeout are force
/// dropped.
/// After receiving a stop signal, workers have this much time to finish serving requests.
/// Workers still alive after the timeout are force dropped.
///
/// By default shutdown timeout sets to 30 seconds.
pub fn shutdown_timeout(mut self, sec: u64) -> Self {
self.shutdown_timeout = Duration::from_secs(sec);
self.worker_config
.shutdown_timeout(Duration::from_secs(sec));
self
}
/// Execute external configuration as part of the server building
/// process.
///
/// This function is useful for moving parts of configuration to a
/// different module or even library.
pub fn configure<F>(mut self, f: F) -> io::Result<ServerBuilder>
where
F: Fn(&mut ServiceConfig) -> io::Result<()>,
{
let mut cfg = ServiceConfig::new(self.threads, self.backlog);
f(&mut cfg)?;
if let Some(apply) = cfg.apply {
let mut srv = ConfiguredService::new(apply);
for (name, lst) in cfg.services {
let token = self.token.next();
srv.stream(token, name.clone(), lst.local_addr()?);
self.sockets.push((token, name, MioListener::Tcp(lst)));
}
self.services.push(Box::new(srv));
}
self.threads = cfg.threads;
Ok(self)
}
/// Add new service to the server.
pub fn bind<F, U, N: AsRef<str>>(mut self, name: N, addr: U, factory: F) -> io::Result<Self>
where
@@ -163,9 +145,11 @@ impl ServerBuilder {
{
let sockets = bind_addr(addr, self.backlog)?;
trace!("binding server to: {:?}", &sockets);
for lst in sockets {
let token = self.token.next();
self.services.push(StreamNewService::create(
let token = self.next_token();
self.factories.push(StreamNewService::create(
name.as_ref().to_string(),
token,
factory.clone(),
@@ -174,10 +158,56 @@ impl ServerBuilder {
self.sockets
.push((token, name.as_ref().to_string(), MioListener::Tcp(lst)));
}
Ok(self)
}
#[cfg(unix)]
/// Add new service to the server.
pub fn listen<F, N: AsRef<str>>(
mut self,
name: N,
lst: StdTcpListener,
factory: F,
) -> io::Result<Self>
where
F: ServiceFactory<TcpStream>,
{
lst.set_nonblocking(true)?;
let addr = lst.local_addr()?;
let token = self.next_token();
self.factories.push(StreamNewService::create(
name.as_ref().to_string(),
token,
factory,
addr,
));
self.sockets
.push((token, name.as_ref().to_string(), MioListener::from(lst)));
Ok(self)
}
/// Starts processing incoming connections and return server controller.
pub fn run(self) -> Server {
if self.sockets.is_empty() {
panic!("Server should have at least one bound socket");
} else {
info!("Starting {} workers", self.threads);
Server::new(self)
}
}
fn next_token(&mut self) -> usize {
let token = self.token;
self.token += 1;
token
}
}
#[cfg(unix)]
impl ServerBuilder {
/// Add new unix domain service to the server.
pub fn bind_uds<F, U, N>(self, name: N, addr: U, factory: F) -> io::Result<Self>
where
@@ -198,10 +228,9 @@ impl ServerBuilder {
self.listen_uds(name, lst, factory)
}
#[cfg(unix)]
/// Add new unix domain service to the server.
/// Useful when running as a systemd service and
/// a socket FD can be acquired using the systemd crate.
///
/// Useful when running as a systemd service and a socket FD is acquired externally.
pub fn listen_uds<F, N: AsRef<str>>(
mut self,
name: N,
@@ -213,9 +242,9 @@ impl ServerBuilder {
{
use std::net::{IpAddr, Ipv4Addr};
lst.set_nonblocking(true)?;
let token = self.token.next();
let token = self.next_token();
let addr = StdSocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
self.services.push(StreamNewService::create(
self.factories.push(StreamNewService::create(
name.as_ref().to_string(),
token,
factory,
@@ -225,225 +254,6 @@ impl ServerBuilder {
.push((token, name.as_ref().to_string(), MioListener::from(lst)));
Ok(self)
}
/// Add new service to the server.
pub fn listen<F, N: AsRef<str>>(
mut self,
name: N,
lst: StdTcpListener,
factory: F,
) -> io::Result<Self>
where
F: ServiceFactory<TcpStream>,
{
lst.set_nonblocking(true)?;
let addr = lst.local_addr()?;
let token = self.token.next();
self.services.push(StreamNewService::create(
name.as_ref().to_string(),
token,
factory,
addr,
));
self.sockets
.push((token, name.as_ref().to_string(), MioListener::from(lst)));
Ok(self)
}
#[doc(hidden)]
pub fn start(self) -> Server {
self.run()
}
/// Starts processing incoming connections and return server controller.
pub fn run(mut self) -> Server {
if self.sockets.is_empty() {
panic!("Server should have at least one bound socket");
} else {
info!("Starting {} workers", self.threads);
// start workers
let handles = (0..self.threads)
.map(|idx| {
let handle = self.start_worker(idx, self.accept.waker_owned());
self.handles.push((idx, handle.clone()));
handle
})
.collect();
// start accept thread
for sock in &self.sockets {
info!("Starting \"{}\" service on {}", sock.1, sock.2);
}
self.accept.start(
mem::take(&mut self.sockets)
.into_iter()
.map(|t| (t.0, t.2))
.collect(),
handles,
);
// handle signals
if !self.no_signals {
Signals::start(self.server.clone());
}
// start http server actor
let server = self.server.clone();
spawn(self);
server
}
}
fn start_worker(&self, idx: usize, waker: WakerQueue) -> WorkerHandle {
let avail = WorkerAvailability::new(waker);
let services = self.services.iter().map(|v| v.clone_factory()).collect();
Worker::start(idx, services, avail, self.shutdown_timeout)
}
fn handle_cmd(&mut self, item: ServerCommand) {
match item {
ServerCommand::Pause(tx) => {
self.accept.wake(WakerInterest::Pause);
let _ = tx.send(());
}
ServerCommand::Resume(tx) => {
self.accept.wake(WakerInterest::Resume);
let _ = tx.send(());
}
ServerCommand::Signal(sig) => {
// Signals support
// Handle `SIGINT`, `SIGTERM`, `SIGQUIT` signals and stop actix system
match sig {
Signal::Int => {
info!("SIGINT received, exiting");
self.exit = true;
self.handle_cmd(ServerCommand::Stop {
graceful: false,
completion: None,
})
}
Signal::Term => {
info!("SIGTERM received, stopping");
self.exit = true;
self.handle_cmd(ServerCommand::Stop {
graceful: true,
completion: None,
})
}
Signal::Quit => {
info!("SIGQUIT received, exiting");
self.exit = true;
self.handle_cmd(ServerCommand::Stop {
graceful: false,
completion: None,
})
}
_ => (),
}
}
ServerCommand::Notify(tx) => {
self.notify.push(tx);
}
ServerCommand::Stop {
graceful,
completion,
} => {
let exit = self.exit;
// stop accept thread
self.accept.wake(WakerInterest::Stop);
let notify = std::mem::take(&mut self.notify);
// stop workers
if !self.handles.is_empty() && graceful {
let iter = self
.handles
.iter()
.map(move |worker| worker.1.stop(graceful))
.collect();
let fut = join_all(iter);
spawn(async move {
let _ = fut.await;
if let Some(tx) = completion {
let _ = tx.send(());
}
for tx in notify {
let _ = tx.send(());
}
if exit {
spawn(async {
sleep_until(Instant::now() + Duration::from_millis(300)).await;
System::current().stop();
});
}
})
} else {
// we need to stop system if server was spawned
if self.exit {
spawn(async {
sleep_until(Instant::now() + Duration::from_millis(300)).await;
System::current().stop();
});
}
if let Some(tx) = completion {
let _ = tx.send(());
}
for tx in notify {
let _ = tx.send(());
}
}
}
ServerCommand::WorkerFaulted(idx) => {
let mut found = false;
for i in 0..self.handles.len() {
if self.handles[i].0 == idx {
self.handles.swap_remove(i);
found = true;
break;
}
}
if found {
error!("Worker has died {:?}, restarting", idx);
let mut new_idx = self.handles.len();
'found: loop {
for i in 0..self.handles.len() {
if self.handles[i].0 == new_idx {
new_idx += 1;
continue 'found;
}
}
break;
}
let handle = self.start_worker(new_idx, self.accept.waker_owned());
self.handles.push((new_idx, handle.clone()));
self.accept.wake(WakerInterest::Worker(handle));
}
}
}
}
}
impl Future for ServerBuilder {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
loop {
match Pin::new(&mut self.cmd).poll_recv(cx) {
Poll::Ready(Some(it)) => self.as_mut().get_mut().handle_cmd(it),
_ => return Poll::Pending,
}
}
}
}
pub(super) fn bind_addr<S: ToSocketAddrs>(
@@ -451,39 +261,26 @@ pub(super) fn bind_addr<S: ToSocketAddrs>(
backlog: u32,
) -> io::Result<Vec<MioTcpListener>> {
let mut err = None;
let mut succ = false;
let mut success = false;
let mut sockets = Vec::new();
for addr in addr.to_socket_addrs()? {
match create_tcp_listener(addr, backlog) {
match create_mio_tcp_listener(addr, backlog) {
Ok(lst) => {
succ = true;
success = true;
sockets.push(lst);
}
Err(e) => err = Some(e),
}
}
if !succ {
if let Some(e) = err.take() {
Err(e)
} else {
Err(io::Error::new(
io::ErrorKind::Other,
"Can not bind to address.",
))
}
} else {
if success {
Ok(sockets)
} else if let Some(err) = err.take() {
Err(err)
} else {
Err(io::Error::new(
io::ErrorKind::Other,
"Can not bind to address.",
))
}
}
fn create_tcp_listener(addr: StdSocketAddr, backlog: u32) -> io::Result<MioTcpListener> {
let socket = match addr {
StdSocketAddr::V4(_) => MioTcpSocket::new_v4()?,
StdSocketAddr::V6(_) => MioTcpSocket::new_v6()?,
};
socket.set_reuseaddr(true)?;
socket.bind(addr)?;
socket.listen(backlog)
}

View File

@@ -1,287 +0,0 @@
use std::collections::HashMap;
use std::future::Future;
use std::{fmt, io};
use actix_rt::net::TcpStream;
use actix_service::{
fn_service, IntoServiceFactory as IntoBaseServiceFactory,
ServiceFactory as BaseServiceFactory,
};
use actix_utils::counter::CounterGuard;
use futures_core::future::LocalBoxFuture;
use log::error;
use crate::builder::bind_addr;
use crate::service::{BoxedServerService, InternalServiceFactory, StreamService};
use crate::socket::{MioStream, MioTcpListener, StdSocketAddr, StdTcpListener, ToSocketAddrs};
use crate::{ready, Token};
pub struct ServiceConfig {
pub(crate) services: Vec<(String, MioTcpListener)>,
pub(crate) apply: Option<Box<dyn ServiceRuntimeConfiguration>>,
pub(crate) threads: usize,
pub(crate) backlog: u32,
}
impl ServiceConfig {
pub(super) fn new(threads: usize, backlog: u32) -> ServiceConfig {
ServiceConfig {
threads,
backlog,
services: Vec::new(),
apply: None,
}
}
/// Set number of workers to start.
///
/// By default server uses number of available logical cpu as workers
/// count.
pub fn workers(&mut self, num: usize) {
self.threads = num;
}
/// Add new service to server
pub fn bind<U, N: AsRef<str>>(&mut self, name: N, addr: U) -> io::Result<&mut Self>
where
U: ToSocketAddrs,
{
let sockets = bind_addr(addr, self.backlog)?;
for lst in sockets {
self._listen(name.as_ref(), lst);
}
Ok(self)
}
/// Add new service to server
pub fn listen<N: AsRef<str>>(&mut self, name: N, lst: StdTcpListener) -> &mut Self {
self._listen(name, MioTcpListener::from_std(lst))
}
/// Register service configuration function. This function get called
/// during worker runtime configuration. It get executed in worker thread.
pub fn apply<F>(&mut self, f: F) -> io::Result<()>
where
F: Fn(&mut ServiceRuntime) + Send + Clone + 'static,
{
self.apply = Some(Box::new(f));
Ok(())
}
fn _listen<N: AsRef<str>>(&mut self, name: N, lst: MioTcpListener) -> &mut Self {
if self.apply.is_none() {
self.apply = Some(Box::new(not_configured));
}
self.services.push((name.as_ref().to_string(), lst));
self
}
}
pub(super) struct ConfiguredService {
rt: Box<dyn ServiceRuntimeConfiguration>,
names: HashMap<Token, (String, StdSocketAddr)>,
topics: HashMap<String, Token>,
services: Vec<Token>,
}
impl ConfiguredService {
pub(super) fn new(rt: Box<dyn ServiceRuntimeConfiguration>) -> Self {
ConfiguredService {
rt,
names: HashMap::new(),
topics: HashMap::new(),
services: Vec::new(),
}
}
pub(super) fn stream(&mut self, token: Token, name: String, addr: StdSocketAddr) {
self.names.insert(token, (name.clone(), addr));
self.topics.insert(name, token);
self.services.push(token);
}
}
impl InternalServiceFactory for ConfiguredService {
fn name(&self, token: Token) -> &str {
&self.names[&token].0
}
fn clone_factory(&self) -> Box<dyn InternalServiceFactory> {
Box::new(Self {
rt: self.rt.clone(),
names: self.names.clone(),
topics: self.topics.clone(),
services: self.services.clone(),
})
}
fn create(&self) -> LocalBoxFuture<'static, Result<Vec<(Token, BoxedServerService)>, ()>> {
// configure services
let mut rt = ServiceRuntime::new(self.topics.clone());
self.rt.configure(&mut rt);
rt.validate();
let mut names = self.names.clone();
let tokens = self.services.clone();
// construct services
Box::pin(async move {
let mut services = rt.services;
// TODO: Proper error handling here
for f in rt.onstart.into_iter() {
f.await;
}
let mut res = vec![];
for token in tokens {
if let Some(srv) = services.remove(&token) {
let newserv = srv.new_service(());
match newserv.await {
Ok(serv) => {
res.push((token, serv));
}
Err(_) => {
error!("Can not construct service");
return Err(());
}
}
} else {
let name = names.remove(&token).unwrap().0;
res.push((
token,
Box::new(StreamService::new(fn_service(move |_: TcpStream| {
error!("Service {:?} is not configured", name);
ready::<Result<_, ()>>(Ok(()))
}))),
));
};
}
Ok(res)
})
}
}
pub(super) trait ServiceRuntimeConfiguration: Send {
fn clone(&self) -> Box<dyn ServiceRuntimeConfiguration>;
fn configure(&self, rt: &mut ServiceRuntime);
}
impl<F> ServiceRuntimeConfiguration for F
where
F: Fn(&mut ServiceRuntime) + Send + Clone + 'static,
{
fn clone(&self) -> Box<dyn ServiceRuntimeConfiguration> {
Box::new(self.clone())
}
fn configure(&self, rt: &mut ServiceRuntime) {
(self)(rt)
}
}
fn not_configured(_: &mut ServiceRuntime) {
error!("Service is not configured");
}
pub struct ServiceRuntime {
names: HashMap<String, Token>,
services: HashMap<Token, BoxedNewService>,
onstart: Vec<LocalBoxFuture<'static, ()>>,
}
impl ServiceRuntime {
fn new(names: HashMap<String, Token>) -> Self {
ServiceRuntime {
names,
services: HashMap::new(),
onstart: Vec::new(),
}
}
fn validate(&self) {
for (name, token) in &self.names {
if !self.services.contains_key(&token) {
error!("Service {:?} is not configured", name);
}
}
}
/// Register service.
///
/// Name of the service must be registered during configuration stage with
/// *ServiceConfig::bind()* or *ServiceConfig::listen()* methods.
pub fn service<T, F>(&mut self, name: &str, service: F)
where
F: IntoBaseServiceFactory<T, TcpStream>,
T: BaseServiceFactory<TcpStream, Config = ()> + 'static,
T::Future: 'static,
T::Service: 'static,
T::InitError: fmt::Debug,
{
// let name = name.to_owned();
if let Some(token) = self.names.get(name) {
self.services.insert(
*token,
Box::new(ServiceFactory {
inner: service.into_factory(),
}),
);
} else {
panic!("Unknown service: {:?}", name);
}
}
/// Execute future before services initialization.
pub fn on_start<F>(&mut self, fut: F)
where
F: Future<Output = ()> + 'static,
{
self.onstart.push(Box::pin(fut))
}
}
type BoxedNewService = Box<
dyn BaseServiceFactory<
(Option<CounterGuard>, MioStream),
Response = (),
Error = (),
InitError = (),
Config = (),
Service = BoxedServerService,
Future = LocalBoxFuture<'static, Result<BoxedServerService, ()>>,
>,
>;
struct ServiceFactory<T> {
inner: T,
}
impl<T> BaseServiceFactory<(Option<CounterGuard>, MioStream)> for ServiceFactory<T>
where
T: BaseServiceFactory<TcpStream, Config = ()>,
T::Future: 'static,
T::Service: 'static,
T::Error: 'static,
T::InitError: fmt::Debug + 'static,
{
type Response = ();
type Error = ();
type Config = ();
type Service = BoxedServerService;
type InitError = ();
type Future = LocalBoxFuture<'static, Result<BoxedServerService, ()>>;
fn new_service(&self, _: ()) -> Self::Future {
let fut = self.inner.new_service(());
Box::pin(async move {
match fut.await {
Ok(s) => Ok(Box::new(StreamService::new(s)) as BoxedServerService),
Err(e) => {
error!("Can not construct service: {:?}", e);
Err(())
}
}
})
}
}

View File

@@ -0,0 +1,55 @@
use std::future::Future;
use tokio::sync::{mpsc::UnboundedSender, oneshot};
use crate::server::ServerCommand;
/// Server handle.
#[derive(Debug, Clone)]
pub struct ServerHandle {
cmd_tx: UnboundedSender<ServerCommand>,
}
impl ServerHandle {
pub(crate) fn new(cmd_tx: UnboundedSender<ServerCommand>) -> Self {
ServerHandle { cmd_tx }
}
pub(crate) fn worker_faulted(&self, idx: usize) {
let _ = self.cmd_tx.send(ServerCommand::WorkerFaulted(idx));
}
/// Pause accepting incoming connections.
///
/// May drop socket pending connection. All open connections remain active.
pub fn pause(&self) -> impl Future<Output = ()> {
let (tx, rx) = oneshot::channel();
let _ = self.cmd_tx.send(ServerCommand::Pause(tx));
async {
let _ = rx.await;
}
}
/// Resume accepting incoming connections.
pub fn resume(&self) -> impl Future<Output = ()> {
let (tx, rx) = oneshot::channel();
let _ = self.cmd_tx.send(ServerCommand::Resume(tx));
async {
let _ = rx.await;
}
}
/// Stop incoming connection processing, stop all workers and exit.
pub fn stop(&self, graceful: bool) -> impl Future<Output = ()> {
let (tx, rx) = oneshot::channel();
let _ = self.cmd_tx.send(ServerCommand::Stop {
graceful,
completion: Some(tx),
});
async {
let _ = rx.await;
}
}
}

View File

@@ -0,0 +1,78 @@
use std::{
future::Future,
pin::Pin,
task::{Context, Poll},
};
use futures_core::future::BoxFuture;
// a poor man's join future. joined future is only used when starting/stopping the server.
// pin_project and pinned futures are overkill for this task.
pub(crate) struct JoinAll<T> {
fut: Vec<JoinFuture<T>>,
}
pub(crate) fn join_all<T>(fut: Vec<impl Future<Output = T> + Send + 'static>) -> JoinAll<T> {
let fut = fut
.into_iter()
.map(|f| JoinFuture::Future(Box::pin(f)))
.collect();
JoinAll { fut }
}
enum JoinFuture<T> {
Future(BoxFuture<'static, T>),
Result(Option<T>),
}
impl<T> Unpin for JoinAll<T> {}
impl<T> Future for JoinAll<T> {
type Output = Vec<T>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut ready = true;
let this = self.get_mut();
for fut in this.fut.iter_mut() {
if let JoinFuture::Future(f) = fut {
match f.as_mut().poll(cx) {
Poll::Ready(t) => {
*fut = JoinFuture::Result(Some(t));
}
Poll::Pending => ready = false,
}
}
}
if ready {
let mut res = Vec::new();
for fut in this.fut.iter_mut() {
if let JoinFuture::Result(f) = fut {
res.push(f.take().unwrap());
}
}
Poll::Ready(res)
} else {
Poll::Pending
}
}
}
#[cfg(test)]
mod test {
use super::*;
use actix_utils::future::ready;
#[actix_rt::test]
async fn test_join_all() {
let futs = vec![ready(Ok(1)), ready(Err(3)), ready(Ok(9))];
let mut res = join_all(futs).await.into_iter();
assert_eq!(Ok(1), res.next().unwrap());
assert_eq!(Err(3), res.next().unwrap());
assert_eq!(Ok(9), res.next().unwrap());
}
}

View File

@@ -5,8 +5,10 @@
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
mod accept;
mod availability;
mod builder;
mod config;
mod handle;
mod join_all;
mod server;
mod service;
mod signals;
@@ -16,7 +18,7 @@ mod waker_queue;
mod worker;
pub use self::builder::ServerBuilder;
pub use self::config::{ServiceConfig, ServiceRuntime};
pub use self::handle::ServerHandle;
pub use self::server::Server;
pub use self::service::ServiceFactory;
pub use self::test_server::TestServer;
@@ -24,120 +26,9 @@ pub use self::test_server::TestServer;
#[doc(hidden)]
pub use self::socket::FromStream;
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
/// Socket ID token
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub(crate) struct Token(usize);
impl Default for Token {
fn default() -> Self {
Self::new()
}
}
impl Token {
fn new() -> Self {
Self(0)
}
pub(crate) fn next(&mut self) -> Token {
let token = Token(self.0);
self.0 += 1;
token
}
}
/// Start server building process
#[doc(hidden)]
#[deprecated(since = "2.0.0", note = "Use `Server::build()`.")]
pub fn new() -> ServerBuilder {
ServerBuilder::default()
}
// temporary Ready type for std::future::{ready, Ready}; Can be removed when MSRV surpass 1.48
#[doc(hidden)]
pub struct Ready<T>(Option<T>);
pub(crate) fn ready<T>(t: T) -> Ready<T> {
Ready(Some(t))
}
impl<T> Unpin for Ready<T> {}
impl<T> Future for Ready<T> {
type Output = T;
fn poll(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Self::Output> {
Poll::Ready(self.get_mut().0.take().unwrap())
}
}
// a poor man's join future. joined future is only used when starting/stopping the server.
// pin_project and pinned futures are overkill for this task.
pub(crate) struct JoinAll<T> {
fut: Vec<JoinFuture<T>>,
}
pub(crate) fn join_all<T>(fut: Vec<impl Future<Output = T> + 'static>) -> JoinAll<T> {
let fut = fut
.into_iter()
.map(|f| JoinFuture::Future(Box::pin(f)))
.collect();
JoinAll { fut }
}
enum JoinFuture<T> {
Future(Pin<Box<dyn Future<Output = T>>>),
Result(Option<T>),
}
impl<T> Unpin for JoinAll<T> {}
impl<T> Future for JoinAll<T> {
type Output = Vec<T>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut ready = true;
let this = self.get_mut();
for fut in this.fut.iter_mut() {
if let JoinFuture::Future(f) = fut {
match f.as_mut().poll(cx) {
Poll::Ready(t) => {
*fut = JoinFuture::Result(Some(t));
}
Poll::Pending => ready = false,
}
}
}
if ready {
let mut res = Vec::new();
for fut in this.fut.iter_mut() {
if let JoinFuture::Result(f) = fut {
res.push(f.take().unwrap());
}
}
Poll::Ready(res)
} else {
Poll::Pending
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[actix_rt::test]
async fn test_join_all() {
let futs = vec![ready(Ok(1)), ready(Err(3)), ready(Ok(9))];
let mut res = join_all(futs).await.into_iter();
assert_eq!(Ok(1), res.next().unwrap());
assert_eq!(Err(3), res.next().unwrap());
assert_eq!(Ok(9), res.next().unwrap());
}
}

View File

@@ -1,112 +1,359 @@
use std::future::Future;
use std::io;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::{
future::Future,
io, mem,
pin::Pin,
task::{Context, Poll},
time::Duration,
};
use tokio::sync::mpsc::UnboundedSender;
use tokio::sync::oneshot;
use actix_rt::{time::sleep, System};
use futures_core::future::BoxFuture;
use log::{error, info};
use tokio::sync::{
mpsc::{UnboundedReceiver, UnboundedSender},
oneshot,
};
use crate::builder::ServerBuilder;
use crate::signals::Signal;
use crate::{
accept::Accept,
builder::ServerBuilder,
join_all::join_all,
service::InternalServiceFactory,
signals::{Signal, Signals},
waker_queue::{WakerInterest, WakerQueue},
worker::{ServerWorker, ServerWorkerConfig, WorkerHandleServer},
ServerHandle,
};
#[derive(Debug)]
pub(crate) enum ServerCommand {
/// TODO
WorkerFaulted(usize),
/// Contains return channel to notify caller of successful state change.
Pause(oneshot::Sender<()>),
/// Contains return channel to notify caller of successful state change.
Resume(oneshot::Sender<()>),
Signal(Signal),
/// Whether to try and shut down gracefully
/// TODO
Stop {
/// True if shut down should be graceful.
graceful: bool,
/// Return channel to notify caller that shutdown is complete.
completion: Option<oneshot::Sender<()>>,
},
/// Notify of server stop
Notify(oneshot::Sender<()>),
}
#[derive(Debug)]
pub struct Server(
UnboundedSender<ServerCommand>,
Option<oneshot::Receiver<()>>,
);
/// General purpose TCP server that runs services receiving Tokio `TcpStream`s.
///
/// Handles creating worker threads, restarting faulted workers, connection accepting, and
/// back-pressure logic.
///
/// Creates a worker per CPU core (or the number specified in [`ServerBuilder::workers`]) and
/// distributes connections with a round-robin strategy.
///
/// The [Server] must be awaited to process stop commands and listen for OS signals. It will resolve
/// when the server has fully shut down.
///
/// # Shutdown Signals
/// On UNIX systems, `SIGQUIT` will start a graceful shutdown and `SIGTERM` or `SIGINT` will start a
/// forced shutdown. On Windows, a Ctrl-C signal will start a forced shutdown.
///
/// A graceful shutdown will wait for all workers to stop first.
///
/// # Examples
/// The following is a TCP echo server. Test using `telnet 127.0.0.1 8080`.
///
/// ```no_run
/// use std::io;
///
/// use actix_rt::net::TcpStream;
/// use actix_server::Server;
/// use actix_service::{fn_service, ServiceFactoryExt as _};
/// use bytes::BytesMut;
/// use tokio::io::{AsyncReadExt as _, AsyncWriteExt as _};
///
/// #[actix_rt::main]
/// async fn main() -> io::Result<()> {
/// let bind_addr = ("127.0.0.1", 8080);
///
/// Server::build()
/// .bind("echo", bind_addr, move || {
/// fn_service(move |mut stream: TcpStream| {
/// async move {
/// let mut size = 0;
/// let mut buf = BytesMut::new();
///
/// loop {
/// match stream.read_buf(&mut buf).await {
/// // end of stream; bail from loop
/// Ok(0) => break,
///
/// // write bytes back to stream
/// Ok(bytes_read) => {
/// stream.write_all(&buf[size..]).await.unwrap();
/// size += bytes_read;
/// }
///
/// Err(err) => {
/// eprintln!("Stream Error: {:?}", err);
/// return Err(());
/// }
/// }
/// }
///
/// Ok(())
/// }
/// })
/// .map_err(|err| eprintln!("Service Error: {:?}", err))
/// })?
/// .run()
/// .await
/// }
/// ```
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub enum Server {
Server(ServerInner),
Error(Option<io::Error>),
}
impl Server {
pub(crate) fn new(tx: UnboundedSender<ServerCommand>) -> Self {
Server(tx, None)
}
/// Start server building process
/// Create server build.
pub fn build() -> ServerBuilder {
ServerBuilder::default()
}
pub(crate) fn signal(&self, sig: Signal) {
let _ = self.0.send(ServerCommand::Signal(sig));
pub(crate) fn new(mut builder: ServerBuilder) -> Self {
let sockets = mem::take(&mut builder.sockets)
.into_iter()
.map(|t| (t.0, t.2))
.collect();
// Give log information on what runtime will be used.
let is_actix = actix_rt::System::try_current().is_some();
let is_tokio = tokio::runtime::Handle::try_current().is_ok();
match (is_actix, is_tokio) {
(false, true) => info!("Tokio runtime found. Starting in existing Tokio runtime"),
(true, _) => info!("Actix runtime found. Starting in Actix runtime"),
(_, _) => info!(
"Actix/Tokio runtime not found. Starting in newt Tokio current-thread runtime"
),
}
for (_, name, lst) in &builder.sockets {
info!(
r#"Starting service: "{}", workers: {}, listening on: {}"#,
name,
builder.threads,
lst.local_addr()
);
}
match Accept::start(sockets, &builder) {
Ok((waker_queue, worker_handles)) => {
// construct OS signals listener future
let signals = (builder.listen_os_signals).then(Signals::new);
Self::Server(ServerInner {
cmd_tx: builder.cmd_tx.clone(),
cmd_rx: builder.cmd_rx,
signals,
waker_queue,
worker_handles,
worker_config: builder.worker_config,
services: builder.factories,
exit: builder.exit,
stop_task: None,
})
}
Err(err) => Self::Error(Some(err)),
}
}
pub(crate) fn worker_faulted(&self, idx: usize) {
let _ = self.0.send(ServerCommand::WorkerFaulted(idx));
}
/// Pause accepting incoming connections
/// Get a handle for ServerFuture that can be used to change state of actix server.
///
/// If socket contains some pending connection, they might be dropped.
/// All opened connection remains active.
pub fn pause(&self) -> impl Future<Output = ()> {
let (tx, rx) = oneshot::channel();
let _ = self.0.send(ServerCommand::Pause(tx));
async {
let _ = rx.await;
/// See [ServerHandle](ServerHandle) for usage.
pub fn handle(&self) -> ServerHandle {
match self {
Server::Server(inner) => ServerHandle::new(inner.cmd_tx.clone()),
Server::Error(err) => {
// TODO: i don't think this is the best way to handle server startup fail
panic!(
"server handle can not be obtained because server failed to start up: {}",
err.as_ref().unwrap()
);
}
}
}
/// Resume accepting incoming connections
pub fn resume(&self) -> impl Future<Output = ()> {
let (tx, rx) = oneshot::channel();
let _ = self.0.send(ServerCommand::Resume(tx));
async {
let _ = rx.await;
}
}
/// Stop incoming connection processing, stop all workers and exit.
///
/// If server starts with `spawn()` method, then spawned thread get terminated.
pub fn stop(&self, graceful: bool) -> impl Future<Output = ()> {
let (tx, rx) = oneshot::channel();
let _ = self.0.send(ServerCommand::Stop {
graceful,
completion: Some(tx),
});
async {
let _ = rx.await;
}
}
}
impl Clone for Server {
fn clone(&self) -> Self {
Self(self.0.clone(), None)
}
}
impl Future for Server {
type Output = io::Result<()>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.get_mut();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
match self.as_mut().get_mut() {
Self::Error(err) => Poll::Ready(Err(err
.take()
.expect("Server future cannot be polled after error"))),
if this.1.is_none() {
let (tx, rx) = oneshot::channel();
if this.0.send(ServerCommand::Notify(tx)).is_err() {
return Poll::Ready(Ok(()));
Self::Server(inner) => {
// poll Signals
if let Some(ref mut signals) = inner.signals {
if let Poll::Ready(signal) = Pin::new(signals).poll(cx) {
inner.stop_task = inner.handle_signal(signal);
// drop signals listener
inner.signals = None;
}
}
// handle stop tasks and eager drain command channel
loop {
if let Some(ref mut fut) = inner.stop_task {
// only resolve stop task and exit
return fut.as_mut().poll(cx).map(|_| Ok(()));
}
match Pin::new(&mut inner.cmd_rx).poll_recv(cx) {
Poll::Ready(Some(cmd)) => {
// if stop task is required, set it and loop
inner.stop_task = inner.handle_cmd(cmd);
}
_ => return Poll::Pending,
}
}
}
}
}
}
pub struct ServerInner {
worker_handles: Vec<WorkerHandleServer>,
worker_config: ServerWorkerConfig,
services: Vec<Box<dyn InternalServiceFactory>>,
exit: bool,
cmd_tx: UnboundedSender<ServerCommand>,
cmd_rx: UnboundedReceiver<ServerCommand>,
signals: Option<Signals>,
waker_queue: WakerQueue,
stop_task: Option<BoxFuture<'static, ()>>,
}
impl ServerInner {
fn handle_cmd(&mut self, item: ServerCommand) -> Option<BoxFuture<'static, ()>> {
match item {
ServerCommand::Pause(tx) => {
self.waker_queue.wake(WakerInterest::Pause);
let _ = tx.send(());
None
}
ServerCommand::Resume(tx) => {
self.waker_queue.wake(WakerInterest::Resume);
let _ = tx.send(());
None
}
ServerCommand::Stop {
graceful,
completion,
} => {
let exit = self.exit;
// stop accept thread
self.waker_queue.wake(WakerInterest::Stop);
// stop workers
let workers_stop = self
.worker_handles
.iter()
.map(|worker| worker.stop(graceful))
.collect::<Vec<_>>();
Some(Box::pin(async move {
if graceful {
// wait for all workers to shut down
let _ = join_all(workers_stop).await;
}
if let Some(tx) = completion {
let _ = tx.send(());
}
if exit {
sleep(Duration::from_millis(300)).await;
System::try_current().as_ref().map(System::stop);
}
}))
}
ServerCommand::WorkerFaulted(idx) => {
// TODO: maybe just return with warning log if not found ?
assert!(self.worker_handles.iter().any(|wrk| wrk.idx == idx));
error!("Worker {} has died; restarting", idx);
let factories = self
.services
.iter()
.map(|service| service.clone_factory())
.collect();
match ServerWorker::start(
idx,
factories,
self.waker_queue.clone(),
self.worker_config,
) {
Ok((handle_accept, handle_server)) => {
*self
.worker_handles
.iter_mut()
.find(|wrk| wrk.idx == idx)
.unwrap() = handle_server;
self.waker_queue.wake(WakerInterest::Worker(handle_accept));
}
Err(err) => error!("can not restart worker {}: {}", idx, err),
};
None
}
}
}
fn handle_signal(&mut self, signal: Signal) -> Option<BoxFuture<'static, ()>> {
match signal {
Signal::Int => {
info!("SIGINT received; starting forced shutdown");
self.exit = true;
self.handle_cmd(ServerCommand::Stop {
graceful: false,
completion: None,
})
}
Signal::Term => {
info!("SIGTERM received; starting graceful shutdown");
self.exit = true;
self.handle_cmd(ServerCommand::Stop {
graceful: true,
completion: None,
})
}
Signal::Quit => {
info!("SIGQUIT received; starting forced shutdown");
self.exit = true;
self.handle_cmd(ServerCommand::Stop {
graceful: false,
completion: None,
})
}
this.1 = Some(rx);
}
match Pin::new(this.1.as_mut().unwrap()).poll(cx) {
Poll::Pending => Poll::Pending,
Poll::Ready(_) => Poll::Ready(Ok(())),
}
}
}

View File

@@ -3,12 +3,12 @@ use std::net::SocketAddr;
use std::task::{Context, Poll};
use actix_service::{Service, ServiceFactory as BaseServiceFactory};
use actix_utils::counter::CounterGuard;
use actix_utils::future::{ready, Ready};
use futures_core::future::LocalBoxFuture;
use log::error;
use crate::socket::{FromStream, MioStream};
use crate::{ready, Ready, Token};
use crate::worker::WorkerCounterGuard;
pub trait ServiceFactory<Stream: FromStream>: Send + Clone + 'static {
type Factory: BaseServiceFactory<Stream, Config = ()>;
@@ -17,16 +17,16 @@ pub trait ServiceFactory<Stream: FromStream>: Send + Clone + 'static {
}
pub(crate) trait InternalServiceFactory: Send {
fn name(&self, token: Token) -> &str;
fn name(&self, token: usize) -> &str;
fn clone_factory(&self) -> Box<dyn InternalServiceFactory>;
fn create(&self) -> LocalBoxFuture<'static, Result<Vec<(Token, BoxedServerService)>, ()>>;
fn create(&self) -> LocalBoxFuture<'static, Result<(usize, BoxedServerService), ()>>;
}
pub(crate) type BoxedServerService = Box<
dyn Service<
(Option<CounterGuard>, MioStream),
(WorkerCounterGuard, MioStream),
Response = (),
Error = (),
Future = Ready<Result<(), ()>>,
@@ -47,7 +47,7 @@ impl<S, I> StreamService<S, I> {
}
}
impl<S, I> Service<(Option<CounterGuard>, MioStream)> for StreamService<S, I>
impl<S, I> Service<(WorkerCounterGuard, MioStream)> for StreamService<S, I>
where
S: Service<I>,
S::Future: 'static,
@@ -58,11 +58,11 @@ where
type Error = ();
type Future = Ready<Result<(), ()>>;
fn poll_ready(&mut self, ctx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
fn poll_ready(&self, ctx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.service.poll_ready(ctx).map_err(|_| ())
}
fn call(&mut self, (guard, req): (Option<CounterGuard>, MioStream)) -> Self::Future {
fn call(&self, (guard, req): (WorkerCounterGuard, MioStream)) -> Self::Future {
ready(match FromStream::from_mio(req) {
Ok(stream) => {
let f = self.service.call(stream);
@@ -83,7 +83,7 @@ where
pub(crate) struct StreamNewService<F: ServiceFactory<Io>, Io: FromStream> {
name: String,
inner: F,
token: Token,
token: usize,
addr: SocketAddr,
_t: PhantomData<Io>,
}
@@ -95,7 +95,7 @@ where
{
pub(crate) fn create(
name: String,
token: Token,
token: usize,
inner: F,
addr: SocketAddr,
) -> Box<dyn InternalServiceFactory> {
@@ -114,7 +114,7 @@ where
F: ServiceFactory<Io>,
Io: FromStream + Send + 'static,
{
fn name(&self, _: Token) -> &str {
fn name(&self, _: usize) -> &str {
&self.name
}
@@ -128,14 +128,14 @@ where
})
}
fn create(&self) -> LocalBoxFuture<'static, Result<Vec<(Token, BoxedServerService)>, ()>> {
fn create(&self) -> LocalBoxFuture<'static, Result<(usize, BoxedServerService), ()>> {
let token = self.token;
let fut = self.inner.create().new_service(());
Box::pin(async move {
match fut.await {
Ok(inner) => {
let service = Box::new(StreamService::new(inner)) as _;
Ok(vec![(token, service)])
Ok((token, service))
}
Err(_) => Err(()),
}

View File

@@ -1,97 +1,108 @@
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::{
fmt,
future::Future,
pin::Pin,
task::{Context, Poll},
};
use futures_core::future::LocalBoxFuture;
use log::trace;
use crate::server::Server;
/// Different types of process signals
#[allow(dead_code)]
#[derive(PartialEq, Clone, Copy, Debug)]
/// Types of process signals.
// #[allow(dead_code)]
#[derive(Debug, Clone, Copy, PartialEq)]
#[allow(dead_code)] // variants are never constructed on non-unix
pub(crate) enum Signal {
/// SIGHUP
Hup,
/// SIGINT
/// `SIGINT`
Int,
/// SIGTERM
/// `SIGTERM`
Term,
/// SIGQUIT
/// `SIGQUIT`
Quit,
}
impl fmt::Display for Signal {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(match self {
Signal::Int => "SIGINT",
Signal::Term => "SIGTERM",
Signal::Quit => "SIGQUIT",
})
}
}
/// Process signal listener.
pub(crate) struct Signals {
srv: Server,
#[cfg(not(unix))]
signals: LocalBoxFuture<'static, std::io::Result<()>>,
signals: futures_core::future::BoxFuture<'static, std::io::Result<()>>,
#[cfg(unix)]
signals: Vec<(Signal, LocalBoxFuture<'static, ()>)>,
signals: Vec<(Signal, actix_rt::signal::unix::Signal)>,
}
impl Signals {
pub(crate) fn start(srv: Server) {
/// Constructs an OS signal listening future.
pub(crate) fn new() -> Self {
trace!("setting up OS signal listener");
#[cfg(not(unix))]
{
actix_rt::spawn(Signals {
srv,
Signals {
signals: Box::pin(actix_rt::signal::ctrl_c()),
});
}
}
#[cfg(unix)]
{
use actix_rt::signal::unix;
let sig_map = [
(unix::SignalKind::interrupt(), Signal::Int),
(unix::SignalKind::hangup(), Signal::Hup),
(unix::SignalKind::terminate(), Signal::Term),
(unix::SignalKind::quit(), Signal::Quit),
];
let mut signals = Vec::new();
let signals = sig_map
.iter()
.filter_map(|(kind, sig)| {
unix::signal(*kind)
.map(|tokio_sig| (*sig, tokio_sig))
.map_err(|e| {
log::error!(
"Can not initialize stream handler for {:?} err: {}",
sig,
e
)
})
.ok()
})
.collect::<Vec<_>>();
for (kind, sig) in sig_map.iter() {
match unix::signal(*kind) {
Ok(mut stream) => {
let fut = Box::pin(async move {
let _ = stream.recv().await;
}) as _;
signals.push((*sig, fut));
}
Err(e) => log::error!(
"Can not initialize stream handler for {:?} err: {}",
sig,
e
),
}
}
actix_rt::spawn(Signals { srv, signals });
Signals { signals }
}
}
}
impl Future for Signals {
type Output = ();
type Output = Signal;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
#[cfg(not(unix))]
match self.signals.as_mut().poll(cx) {
Poll::Ready(_) => {
self.srv.signal(Signal::Int);
Poll::Ready(())
}
Poll::Pending => Poll::Pending,
{
self.signals.as_mut().poll(cx).map(|_| Signal::Int)
}
#[cfg(unix)]
{
for (sig, fut) in self.signals.iter_mut() {
if fut.as_mut().poll(cx).is_ready() {
let sig = *sig;
self.srv.signal(sig);
return Poll::Ready(());
// TODO: match on if let Some ?
if Pin::new(fut).poll_recv(cx).is_ready() {
trace!("{} received", sig);
return Poll::Ready(*sig);
}
}
Poll::Pending
}
}

View File

@@ -2,7 +2,7 @@ pub(crate) use std::net::{
SocketAddr as StdSocketAddr, TcpListener as StdTcpListener, ToSocketAddrs,
};
pub(crate) use mio::net::{TcpListener as MioTcpListener, TcpSocket as MioTcpSocket};
pub(crate) use mio::net::TcpListener as MioTcpListener;
#[cfg(unix)]
pub(crate) use {
mio::net::UnixListener as MioUnixListener,
@@ -12,18 +12,7 @@ pub(crate) use {
use std::{fmt, io};
use actix_rt::net::TcpStream;
use mio::event::Source;
use mio::net::TcpStream as MioTcpStream;
use mio::{Interest, Registry, Token};
#[cfg(windows)]
use std::os::windows::io::{FromRawSocket, IntoRawSocket};
#[cfg(unix)]
use {
actix_rt::net::UnixStream,
mio::net::{SocketAddr as MioSocketAddr, UnixStream as MioUnixStream},
std::os::unix::io::{FromRawFd, IntoRawFd},
};
use mio::{event::Source, Interest, Registry, Token};
pub(crate) enum MioListener {
Tcp(MioTcpListener),
@@ -34,21 +23,23 @@ pub(crate) enum MioListener {
impl MioListener {
pub(crate) fn local_addr(&self) -> SocketAddr {
match *self {
MioListener::Tcp(ref lst) => SocketAddr::Tcp(lst.local_addr().unwrap()),
MioListener::Tcp(ref lst) => lst
.local_addr()
.map(SocketAddr::Tcp)
.unwrap_or(SocketAddr::Unknown),
#[cfg(unix)]
MioListener::Uds(ref lst) => SocketAddr::Uds(lst.local_addr().unwrap()),
MioListener::Uds(ref lst) => lst
.local_addr()
.map(SocketAddr::Uds)
.unwrap_or(SocketAddr::Unknown),
}
}
pub(crate) fn accept(&self) -> io::Result<Option<(MioStream, SocketAddr)>> {
pub(crate) fn accept(&self) -> io::Result<MioStream> {
match *self {
MioListener::Tcp(ref lst) => lst
.accept()
.map(|(stream, addr)| Some((MioStream::Tcp(stream), SocketAddr::Tcp(addr)))),
MioListener::Tcp(ref lst) => lst.accept().map(|(stream, _)| MioStream::Tcp(stream)),
#[cfg(unix)]
MioListener::Uds(ref lst) => lst
.accept()
.map(|(stream, addr)| Some((MioStream::Uds(stream), SocketAddr::Uds(addr)))),
MioListener::Uds(ref lst) => lst.accept().map(|(stream, _)| MioStream::Uds(stream)),
}
}
}
@@ -125,25 +116,27 @@ impl fmt::Debug for MioListener {
impl fmt::Display for MioListener {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
MioListener::Tcp(ref lst) => write!(f, "{}", lst.local_addr().ok().unwrap()),
MioListener::Tcp(ref lst) => write!(f, "{:?}", lst),
#[cfg(unix)]
MioListener::Uds(ref lst) => write!(f, "{:?}", lst.local_addr().ok().unwrap()),
MioListener::Uds(ref lst) => write!(f, "{:?}", lst),
}
}
}
pub(crate) enum SocketAddr {
Unknown,
Tcp(StdSocketAddr),
#[cfg(unix)]
Uds(MioSocketAddr),
Uds(mio::net::SocketAddr),
}
impl fmt::Display for SocketAddr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
SocketAddr::Tcp(ref addr) => write!(f, "{}", addr),
Self::Unknown => write!(f, "Unknown SocketAddr"),
Self::Tcp(ref addr) => write!(f, "{}", addr),
#[cfg(unix)]
SocketAddr::Uds(ref addr) => write!(f, "{:?}", addr),
Self::Uds(ref addr) => write!(f, "{:?}", addr),
}
}
}
@@ -151,18 +144,19 @@ impl fmt::Display for SocketAddr {
impl fmt::Debug for SocketAddr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
SocketAddr::Tcp(ref addr) => write!(f, "{:?}", addr),
Self::Unknown => write!(f, "Unknown SocketAddr"),
Self::Tcp(ref addr) => write!(f, "{:?}", addr),
#[cfg(unix)]
SocketAddr::Uds(ref addr) => write!(f, "{:?}", addr),
Self::Uds(ref addr) => write!(f, "{:?}", addr),
}
}
}
#[derive(Debug)]
pub enum MioStream {
Tcp(MioTcpStream),
Tcp(mio::net::TcpStream),
#[cfg(unix)]
Uds(MioUnixStream),
Uds(mio::net::UnixStream),
}
/// helper trait for converting mio stream to tokio stream.
@@ -170,50 +164,79 @@ pub trait FromStream: Sized {
fn from_mio(sock: MioStream) -> io::Result<Self>;
}
// FIXME: This is a workaround and we need an efficient way to convert between mio and tokio stream
#[cfg(unix)]
impl FromStream for TcpStream {
fn from_mio(sock: MioStream) -> io::Result<Self> {
match sock {
MioStream::Tcp(mio) => {
let raw = IntoRawFd::into_raw_fd(mio);
// SAFETY: This is a in place conversion from mio stream to tokio stream.
TcpStream::from_std(unsafe { FromRawFd::from_raw_fd(raw) })
}
MioStream::Uds(_) => {
panic!("Should not happen, bug in server impl");
}
}
}
}
// FIXME: This is a workaround and we need an efficient way to convert between mio and tokio stream
#[cfg(windows)]
impl FromStream for TcpStream {
fn from_mio(sock: MioStream) -> io::Result<Self> {
match sock {
MioStream::Tcp(mio) => {
let raw = IntoRawSocket::into_raw_socket(mio);
// SAFETY: This is a in place conversion from mio stream to tokio stream.
TcpStream::from_std(unsafe { FromRawSocket::from_raw_socket(raw) })
mod win_impl {
use super::*;
use std::os::windows::io::{FromRawSocket, IntoRawSocket};
// FIXME: This is a workaround and we need an efficient way to convert between mio and tokio stream
impl FromStream for TcpStream {
fn from_mio(sock: MioStream) -> io::Result<Self> {
match sock {
MioStream::Tcp(mio) => {
let raw = IntoRawSocket::into_raw_socket(mio);
// SAFETY: This is a in place conversion from mio stream to tokio stream.
TcpStream::from_std(unsafe { FromRawSocket::from_raw_socket(raw) })
}
}
}
}
}
// FIXME: This is a workaround and we need an efficient way to convert between mio and tokio stream
#[cfg(unix)]
impl FromStream for UnixStream {
fn from_mio(sock: MioStream) -> io::Result<Self> {
match sock {
MioStream::Tcp(_) => panic!("Should not happen, bug in server impl"),
MioStream::Uds(mio) => {
let raw = IntoRawFd::into_raw_fd(mio);
// SAFETY: This is a in place conversion from mio stream to tokio stream.
UnixStream::from_std(unsafe { FromRawFd::from_raw_fd(raw) })
mod unix_impl {
use super::*;
use std::os::unix::io::{FromRawFd, IntoRawFd};
use actix_rt::net::UnixStream;
// FIXME: This is a workaround and we need an efficient way to convert between mio and tokio stream
impl FromStream for TcpStream {
fn from_mio(sock: MioStream) -> io::Result<Self> {
match sock {
MioStream::Tcp(mio) => {
let raw = IntoRawFd::into_raw_fd(mio);
// SAFETY: This is a in place conversion from mio stream to tokio stream.
TcpStream::from_std(unsafe { FromRawFd::from_raw_fd(raw) })
}
MioStream::Uds(_) => {
panic!("Should not happen, bug in server impl");
}
}
}
}
// FIXME: This is a workaround and we need an efficient way to convert between mio and tokio stream
impl FromStream for UnixStream {
fn from_mio(sock: MioStream) -> io::Result<Self> {
match sock {
MioStream::Tcp(_) => panic!("Should not happen, bug in server impl"),
MioStream::Uds(mio) => {
let raw = IntoRawFd::into_raw_fd(mio);
// SAFETY: This is a in place conversion from mio stream to tokio stream.
UnixStream::from_std(unsafe { FromRawFd::from_raw_fd(raw) })
}
}
}
}
}
pub(crate) fn create_mio_tcp_listener(
addr: StdSocketAddr,
backlog: u32,
) -> io::Result<MioTcpListener> {
use socket2::{Domain, Protocol, Socket, Type};
let socket = Socket::new(Domain::for_address(addr), Type::STREAM, Some(Protocol::TCP))?;
socket.set_reuse_address(true)?;
socket.set_nonblocking(true)?;
socket.bind(&addr.into())?;
socket.listen(backlog as i32)?;
Ok(MioTcpListener::from_std(StdTcpListener::from(socket)))
}
#[cfg(test)]
@@ -227,11 +250,8 @@ mod tests {
assert_eq!(format!("{}", addr), "127.0.0.1:8080");
let addr: StdSocketAddr = "127.0.0.1:0".parse().unwrap();
let socket = MioTcpSocket::new_v4().unwrap();
socket.set_reuseaddr(true).unwrap();
socket.bind(addr).unwrap();
let tcp = socket.listen(128).unwrap();
let lst = MioListener::Tcp(tcp);
let lst = create_mio_tcp_listener(addr, 128).unwrap();
let lst = MioListener::Tcp(lst);
assert!(format!("{:?}", lst).contains("TcpListener"));
assert!(format!("{}", lst).contains("127.0.0.1"));
}

View File

@@ -1,18 +1,17 @@
use std::sync::mpsc;
use std::{net, thread};
use std::{io, net, thread};
use actix_rt::{net::TcpStream, System};
use crate::{Server, ServerBuilder, ServiceFactory};
use crate::{Server, ServerBuilder, ServerHandle, ServiceFactory};
/// The `TestServer` type.
/// A testing server.
///
/// `TestServer` is very simple test server that simplify process of writing
/// integration tests for actix-net applications.
/// `TestServer` is very simple test server that simplify process of writing integration tests for
/// network applications.
///
/// # Examples
///
/// ```rust
/// ```
/// use actix_service::fn_service;
/// use actix_server::TestServer;
///
@@ -35,11 +34,12 @@ pub struct TestServerRuntime {
addr: net::SocketAddr,
host: String,
port: u16,
system: System,
server_handle: ServerHandle,
thread_handle: Option<thread::JoinHandle<io::Result<()>>>,
}
impl TestServer {
/// Start new server with server builder
/// Start new server with server builder.
pub fn start<F>(mut factory: F) -> TestServerRuntime
where
F: FnMut(ServerBuilder) -> ServerBuilder + Send + 'static,
@@ -47,62 +47,63 @@ impl TestServer {
let (tx, rx) = mpsc::channel();
// run server in separate thread
thread::spawn(move || {
let sys = System::new("actix-test-server");
factory(Server::build())
.workers(1)
.disable_signals()
.start();
tx.send(System::current()).unwrap();
sys.run()
let thread_handle = thread::spawn(move || {
System::new().block_on(async {
let server = factory(Server::build()).workers(1).disable_signals().run();
tx.send(server.handle()).unwrap();
server.await
})
});
let system = rx.recv().unwrap();
let server_handle = rx.recv().unwrap();
TestServerRuntime {
system,
addr: "127.0.0.1:0".parse().unwrap(),
host: "127.0.0.1".to_string(),
port: 0,
server_handle,
thread_handle: Some(thread_handle),
}
}
/// Start new test server with application factory
/// Start new test server with application factory.
pub fn with<F: ServiceFactory<TcpStream>>(factory: F) -> TestServerRuntime {
let (tx, rx) = mpsc::channel();
// run server in separate thread
thread::spawn(move || {
let sys = System::new("actix-test-server");
let thread_handle = thread::spawn(move || {
let sys = System::new();
let tcp = net::TcpListener::bind("127.0.0.1:0").unwrap();
let local_addr = tcp.local_addr().unwrap();
sys.block_on(async {
Server::build()
let server = Server::build()
.listen("test", tcp, factory)
.unwrap()
.workers(1)
.disable_signals()
.start();
tx.send((System::current(), local_addr)).unwrap();
});
sys.run()
.run();
tx.send((server.handle(), local_addr)).unwrap();
server.await
})
});
let (system, addr) = rx.recv().unwrap();
let (server_handle, addr) = rx.recv().unwrap();
let host = format!("{}", addr.ip());
let port = addr.port();
TestServerRuntime {
system,
addr,
host,
port,
server_handle,
thread_handle: Some(thread_handle),
}
}
/// Get first available unused local address
/// Get first available unused local address.
pub fn unused_addr() -> net::SocketAddr {
let addr: net::SocketAddr = "127.0.0.1:0".parse().unwrap();
let socket = mio::net::TcpSocket::new_v4().unwrap();
@@ -114,27 +115,28 @@ impl TestServer {
}
impl TestServerRuntime {
/// Test server host
/// Test server host.
pub fn host(&self) -> &str {
&self.host
}
/// Test server port
/// Test server port.
pub fn port(&self) -> u16 {
self.port
}
/// Get test server address
/// Get test server address.
pub fn addr(&self) -> net::SocketAddr {
self.addr
}
/// Stop http server
/// Stop server.
fn stop(&mut self) {
self.system.stop();
let _ = self.server_handle.stop(false);
self.thread_handle.take().unwrap().join().unwrap().unwrap();
}
/// Connect to server, return tokio TcpStream
/// Connect to server, returning a Tokio `TcpStream`.
pub fn connect(&self) -> std::io::Result<TcpStream> {
TcpStream::from_std(net::TcpStream::connect(self.addr)?)
}
@@ -145,3 +147,16 @@ impl Drop for TestServerRuntime {
self.stop()
}
}
#[cfg(test)]
mod tests {
use actix_service::fn_service;
use super::*;
#[tokio::test]
async fn plain_tokio_runtime() {
let srv = TestServer::with(|| fn_service(|_sock| async move { Ok::<_, ()>(()) }));
assert!(srv.connect().is_ok());
}
}

View File

@@ -6,9 +6,9 @@ use std::{
use mio::{Registry, Token as MioToken, Waker};
use crate::worker::WorkerHandle;
use crate::worker::WorkerHandleAccept;
/// waker token for `mio::Poll` instance
/// Waker token for `mio::Poll` instance.
pub(crate) const WAKER_TOKEN: MioToken = MioToken(usize::MAX);
/// `mio::Waker` with a queue for waking up the `Accept`'s `Poll` and contains the `WakerInterest`
@@ -30,7 +30,7 @@ impl Deref for WakerQueue {
}
impl WakerQueue {
/// construct a waker queue with given `Poll`'s `Registry` and capacity.
/// Construct a waker queue with given `Poll`'s `Registry` and capacity.
///
/// A fixed `WAKER_TOKEN` is used to identify the wake interest and the `Poll` needs to match
/// event's token for it to properly handle `WakerInterest`.
@@ -41,7 +41,7 @@ impl WakerQueue {
Ok(Self(Arc::new((waker, queue))))
}
/// push a new interest to the queue and wake up the accept poll afterwards.
/// Push a new interest to the queue and wake up the accept poll afterwards.
pub(crate) fn wake(&self, interest: WakerInterest) {
let (waker, queue) = self.deref();
@@ -55,35 +55,30 @@ impl WakerQueue {
.unwrap_or_else(|e| panic!("can not wake up Accept Poll: {}", e));
}
/// get a MutexGuard of the waker queue.
/// Get a MutexGuard of the waker queue.
pub(crate) fn guard(&self) -> MutexGuard<'_, VecDeque<WakerInterest>> {
self.deref().1.lock().expect("Failed to lock WakerQueue")
}
/// reset the waker queue so it does not grow infinitely.
/// Reset the waker queue so it does not grow infinitely.
pub(crate) fn reset(queue: &mut VecDeque<WakerInterest>) {
std::mem::swap(&mut VecDeque::<WakerInterest>::with_capacity(16), queue);
}
}
/// types of interests we would look into when `Accept`'s `Poll` is waked up by waker.
/// Types of interests we would look into when `Accept`'s `Poll` is waked up by waker.
///
/// *. These interests should not be confused with `mio::Interest` and mostly not I/O related
/// These interests should not be confused with `mio::Interest` and mostly not I/O related
pub(crate) enum WakerInterest {
/// `WorkerAvailable` is an interest from `Worker` notifying `Accept` there is a worker
/// available and can accept new tasks.
WorkerAvailable,
WorkerAvailable(usize),
/// `Pause`, `Resume`, `Stop` Interest are from `ServerBuilder` future. It listens to
/// `ServerCommand` and notify `Accept` to do exactly these tasks.
Pause,
Resume,
Stop,
/// `Timer` is an interest sent as a delayed future. When an error happens on accepting
/// connection `Accept` would deregister socket listener temporary and wake up the poll and
/// register them again after the delayed future resolve.
Timer,
/// `Worker` is an interest happen after a worker runs into faulted state(This is determined
/// by if work can be sent to it successfully).`Accept` would be waked up and add the new
/// `WorkerHandle`.
Worker(WorkerHandle),
/// `Worker` is an interest that is triggered after a worker faults. This is determined by
/// trying to send work to it. `Accept` would be waked up and add the new `WorkerHandleAccept`.
Worker(WorkerHandleAccept),
}

View File

@@ -1,144 +1,217 @@
use std::future::Future;
use std::pin::Pin;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::sync::Arc;
use std::task::{Context, Poll};
use std::time::Duration;
use std::{
future::Future,
io, mem,
pin::Pin,
rc::Rc,
sync::{
atomic::{AtomicUsize, Ordering},
Arc,
},
task::{Context, Poll},
time::Duration,
};
use actix_rt::time::{sleep_until, Instant, Sleep};
use actix_rt::{spawn, Arbiter};
use actix_utils::counter::Counter;
use futures_core::future::LocalBoxFuture;
use actix_rt::{
spawn,
time::{sleep, Instant, Sleep},
Arbiter, ArbiterHandle, System,
};
use futures_core::{future::LocalBoxFuture, ready};
use log::{error, info, trace};
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender};
use tokio::sync::oneshot;
use tokio::sync::{
mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender},
oneshot,
};
use crate::service::{BoxedServerService, InternalServiceFactory};
use crate::socket::{MioStream, SocketAddr};
use crate::waker_queue::{WakerInterest, WakerQueue};
use crate::{join_all, Token};
use crate::{
service::{BoxedServerService, InternalServiceFactory},
socket::MioStream,
waker_queue::{WakerInterest, WakerQueue},
};
pub(crate) struct WorkerCommand(Conn);
/// Stop worker message. Returns `true` on successful shutdown
/// and `false` if some connections still alive.
pub(crate) struct StopCommand {
/// Stop worker message. Returns `true` on successful graceful shutdown
/// and `false` if some connections still alive when shutdown execute.
pub(crate) struct Stop {
graceful: bool,
result: oneshot::Sender<bool>,
tx: oneshot::Sender<bool>,
}
#[derive(Debug)]
pub(crate) struct Conn {
pub io: MioStream,
pub token: Token,
pub peer: Option<SocketAddr>,
pub token: usize,
}
static MAX_CONNS: AtomicUsize = AtomicUsize::new(25600);
/// Create accept and server worker handles.
fn handle_pair(
idx: usize,
conn_tx: UnboundedSender<Conn>,
stop_tx: UnboundedSender<Stop>,
counter: Counter,
) -> (WorkerHandleAccept, WorkerHandleServer) {
let accept = WorkerHandleAccept {
idx,
conn_tx,
counter,
};
/// Sets the maximum per-worker number of concurrent connections.
let server = WorkerHandleServer { idx, stop_tx };
(accept, server)
}
/// counter: Arc<AtomicUsize> field is owned by `Accept` thread and `ServerWorker` thread.
///
/// All socket listeners will stop accepting connections when this limit is
/// reached for each worker.
/// `Accept` would increment the counter and `ServerWorker` would decrement it.
///
/// By default max connections is set to a 25k per worker.
pub fn max_concurrent_connections(num: usize) {
MAX_CONNS.store(num, Ordering::Relaxed);
}
thread_local! {
static MAX_CONNS_COUNTER: Counter =
Counter::new(MAX_CONNS.load(Ordering::Relaxed));
}
pub(crate) fn num_connections() -> usize {
MAX_CONNS_COUNTER.with(|conns| conns.total())
}
// a handle to worker that can send message to worker and share the availability of worker to other
// thread.
/// # Atomic Ordering:
///
/// `Accept` always look into it's cached `Availability` field for `ServerWorker` state.
/// It lazily increment counter after successful dispatching new work to `ServerWorker`.
/// On reaching counter limit `Accept` update it's cached `Availability` and mark worker as
/// unable to accept any work.
///
/// `ServerWorker` always decrement the counter when every work received from `Accept` is done.
/// On reaching counter limit worker would use `mio::Waker` and `WakerQueue` to wake up `Accept`
/// and notify it to update cached `Availability` again to mark worker as able to accept work again.
///
/// Hence, a wake up would only happen after `Accept` increment it to limit.
/// And a decrement to limit always wake up `Accept`.
#[derive(Clone)]
pub(crate) struct WorkerHandle {
pub idx: usize,
tx1: UnboundedSender<WorkerCommand>,
tx2: UnboundedSender<StopCommand>,
avail: WorkerAvailability,
pub(crate) struct Counter {
counter: Arc<AtomicUsize>,
limit: usize,
}
impl WorkerHandle {
pub fn new(
idx: usize,
tx1: UnboundedSender<WorkerCommand>,
tx2: UnboundedSender<StopCommand>,
avail: WorkerAvailability,
) -> Self {
WorkerHandle {
idx,
tx1,
tx2,
avail,
impl Counter {
pub(crate) fn new(limit: usize) -> Self {
Self {
counter: Arc::new(AtomicUsize::new(1)),
limit,
}
}
pub fn send(&self, msg: Conn) -> Result<(), Conn> {
self.tx1.send(WorkerCommand(msg)).map_err(|msg| msg.0 .0)
/// Increment counter by 1 and return true when hitting limit
#[inline(always)]
pub(crate) fn inc(&self) -> bool {
self.counter.fetch_add(1, Ordering::Relaxed) != self.limit
}
pub fn available(&self) -> bool {
self.avail.available()
/// Decrement counter by 1 and return true if crossing limit.
#[inline(always)]
pub(crate) fn dec(&self) -> bool {
self.counter.fetch_sub(1, Ordering::Relaxed) == self.limit
}
pub fn stop(&self, graceful: bool) -> oneshot::Receiver<bool> {
let (result, rx) = oneshot::channel();
let _ = self.tx2.send(StopCommand { graceful, result });
pub(crate) fn total(&self) -> usize {
self.counter.load(Ordering::SeqCst) - 1
}
}
pub(crate) struct WorkerCounter {
idx: usize,
inner: Rc<(WakerQueue, Counter)>,
}
impl Clone for WorkerCounter {
fn clone(&self) -> Self {
Self {
idx: self.idx,
inner: self.inner.clone(),
}
}
}
impl WorkerCounter {
pub(crate) fn new(idx: usize, waker_queue: WakerQueue, counter: Counter) -> Self {
Self {
idx,
inner: Rc::new((waker_queue, counter)),
}
}
#[inline(always)]
pub(crate) fn guard(&self) -> WorkerCounterGuard {
WorkerCounterGuard(self.clone())
}
fn total(&self) -> usize {
self.inner.1.total()
}
}
pub(crate) struct WorkerCounterGuard(WorkerCounter);
impl Drop for WorkerCounterGuard {
fn drop(&mut self) {
let (waker_queue, counter) = &*self.0.inner;
if counter.dec() {
waker_queue.wake(WakerInterest::WorkerAvailable(self.0.idx));
}
}
}
/// Handle to worker that can send connection message to worker and share the availability of worker
/// to other threads.
///
/// Held by [Accept](crate::accept::Accept).
pub(crate) struct WorkerHandleAccept {
idx: usize,
conn_tx: UnboundedSender<Conn>,
counter: Counter,
}
impl WorkerHandleAccept {
#[inline(always)]
pub(crate) fn idx(&self) -> usize {
self.idx
}
#[inline(always)]
pub(crate) fn send(&self, conn: Conn) -> Result<(), Conn> {
self.conn_tx.send(conn).map_err(|msg| msg.0)
}
#[inline(always)]
pub(crate) fn inc_counter(&self) -> bool {
self.counter.inc()
}
}
/// Handle to worker than can send stop message to worker.
///
/// Held by [ServerBuilder](crate::builder::ServerBuilder).
#[derive(Debug)]
pub(crate) struct WorkerHandleServer {
pub(crate) idx: usize,
stop_tx: UnboundedSender<Stop>,
}
impl WorkerHandleServer {
pub(crate) fn stop(&self, graceful: bool) -> oneshot::Receiver<bool> {
let (tx, rx) = oneshot::channel();
let _ = self.stop_tx.send(Stop { graceful, tx });
rx
}
}
#[derive(Clone)]
pub(crate) struct WorkerAvailability {
waker: WakerQueue,
available: Arc<AtomicBool>,
}
impl WorkerAvailability {
pub fn new(waker: WakerQueue) -> Self {
WorkerAvailability {
waker,
available: Arc::new(AtomicBool::new(false)),
}
}
pub fn available(&self) -> bool {
self.available.load(Ordering::Acquire)
}
pub fn set(&self, val: bool) {
let old = self.available.swap(val, Ordering::Release);
// notify the accept on switched to available.
if !old && val {
self.waker.wake(WakerInterest::WorkerAvailable);
}
}
}
/// Service worker
/// Service worker.
///
/// Worker accepts Socket objects via unbounded channel and starts stream
/// processing.
pub(crate) struct Worker {
rx: UnboundedReceiver<WorkerCommand>,
rx2: UnboundedReceiver<StopCommand>,
services: Vec<WorkerService>,
availability: WorkerAvailability,
conns: Counter,
factories: Vec<Box<dyn InternalServiceFactory>>,
/// Worker accepts Socket objects via unbounded channel and starts stream processing.
pub(crate) struct ServerWorker {
// UnboundedReceiver<Conn> should always be the first field.
// It must be dropped as soon as ServerWorker dropping.
conn_rx: UnboundedReceiver<Conn>,
stop_rx: UnboundedReceiver<Stop>,
counter: WorkerCounter,
services: Box<[WorkerService]>,
factories: Box<[Box<dyn InternalServiceFactory>]>,
state: WorkerState,
shutdown_timeout: Duration,
}
struct WorkerService {
factory: usize,
factory_idx: usize,
status: WorkerServiceStatus,
service: BoxedServerService,
}
@@ -150,7 +223,7 @@ impl WorkerService {
}
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum WorkerServiceStatus {
Available,
Unavailable,
@@ -160,91 +233,273 @@ enum WorkerServiceStatus {
Stopped,
}
impl Worker {
impl Default for WorkerServiceStatus {
fn default() -> Self {
Self::Unavailable
}
}
/// Config for worker behavior passed down from server builder.
#[derive(Debug, Clone, Copy)]
pub(crate) struct ServerWorkerConfig {
shutdown_timeout: Duration,
max_blocking_threads: usize,
max_concurrent_connections: usize,
}
impl Default for ServerWorkerConfig {
fn default() -> Self {
// 512 is the default max blocking thread count of tokio runtime.
let max_blocking_threads = std::cmp::max(512 / num_cpus::get(), 1);
Self {
shutdown_timeout: Duration::from_secs(30),
max_blocking_threads,
max_concurrent_connections: 25600,
}
}
}
impl ServerWorkerConfig {
pub(crate) fn max_blocking_threads(&mut self, num: usize) {
self.max_blocking_threads = num;
}
pub(crate) fn max_concurrent_connections(&mut self, num: usize) {
self.max_concurrent_connections = num;
}
pub(crate) fn shutdown_timeout(&mut self, dur: Duration) {
self.shutdown_timeout = dur;
}
}
impl ServerWorker {
pub(crate) fn start(
idx: usize,
factories: Vec<Box<dyn InternalServiceFactory>>,
availability: WorkerAvailability,
shutdown_timeout: Duration,
) -> WorkerHandle {
let (tx1, rx) = unbounded_channel();
let (tx2, rx2) = unbounded_channel();
let avail = availability.clone();
waker_queue: WakerQueue,
config: ServerWorkerConfig,
) -> io::Result<(WorkerHandleAccept, WorkerHandleServer)> {
trace!("starting server worker {}", idx);
// every worker runs in it's own arbiter.
Arbiter::new().send(Box::pin(async move {
availability.set(false);
let mut wrk = MAX_CONNS_COUNTER.with(move |conns| Worker {
rx,
rx2,
availability,
factories,
shutdown_timeout,
services: Vec::new(),
conns: conns.clone(),
state: WorkerState::Unavailable,
});
let (tx1, conn_rx) = unbounded_channel();
let (tx2, stop_rx) = unbounded_channel();
let fut = wrk
.factories
.iter()
.enumerate()
.map(|(idx, factory)| {
let fut = factory.create();
async move {
fut.await.map(|r| {
r.into_iter().map(|(t, s)| (idx, t, s)).collect::<Vec<_>>()
})
}
})
.collect::<Vec<_>>();
let counter = Counter::new(config.max_concurrent_connections);
let pair = handle_pair(idx, tx1, tx2, counter.clone());
spawn(async move {
let res: Result<Vec<_>, _> = join_all(fut).await.into_iter().collect();
match res {
Ok(services) => {
for item in services {
for (factory, token, service) in item {
assert_eq!(token.0, wrk.services.len());
wrk.services.push(WorkerService {
factory,
service,
status: WorkerServiceStatus::Unavailable,
});
// get actix system context if it is set
let actix_system = System::try_current();
// get tokio runtime handle if it is set
let tokio_handle = tokio::runtime::Handle::try_current().ok();
// service factories initialization channel
let (factory_tx, factory_rx) = std::sync::mpsc::sync_channel::<io::Result<()>>(1);
// outline of following code:
//
// if system exists
// if uring enabled
// start arbiter using uring method
// else
// start arbiter with regular tokio
// else
// if uring enabled
// start uring in spawned thread
// else
// start regular tokio in spawned thread
// every worker runs in it's own thread and tokio runtime.
// use a custom tokio runtime builder to change the settings of runtime.
match (actix_system, tokio_handle) {
(None, None) => {
panic!("No runtime detected. Start a Tokio (or Actix) runtime.");
}
// no actix system
(None, Some(rt_handle)) => {
std::thread::Builder::new()
.name(format!("actix-server worker {}", idx))
.spawn(move || {
let (worker_stopped_tx, worker_stopped_rx) = oneshot::channel();
// local set for running service init futures and worker services
let ls = tokio::task::LocalSet::new();
// init services using existing Tokio runtime (so probably on main thread)
let services = rt_handle.block_on(ls.run_until(async {
let mut services = Vec::new();
for (idx, factory) in factories.iter().enumerate() {
match factory.create().await {
Ok((token, svc)) => services.push((idx, token, svc)),
Err(err) => {
error!("Can not start worker: {:?}", err);
return Err(io::Error::new(
io::ErrorKind::Other,
format!("can not start server service {}", idx),
));
}
}
}
Ok(services)
}));
let services = match services {
Ok(services) => {
factory_tx.send(Ok(())).unwrap();
services
}
Err(err) => {
factory_tx.send(Err(err)).unwrap();
return;
}
};
let worker_services = wrap_worker_services(services);
let worker_fut = async move {
// spawn to make sure ServerWorker runs as non boxed future.
spawn(async move {
ServerWorker {
conn_rx,
stop_rx,
services: worker_services.into_boxed_slice(),
counter: WorkerCounter::new(idx, waker_queue, counter),
factories: factories.into_boxed_slice(),
state: WorkerState::default(),
shutdown_timeout: config.shutdown_timeout,
}
.await;
// wake up outermost task waiting for shutdown
worker_stopped_tx.send(()).unwrap();
});
worker_stopped_rx.await.unwrap();
};
#[cfg(all(target_os = "linux", feature = "io-uring"))]
{
// TODO: pass max blocking thread config when tokio-uring enable configuration
// on building runtime.
let _ = config.max_blocking_threads;
tokio_uring::start(worker_fut);
}
#[cfg(not(all(target_os = "linux", feature = "io-uring")))]
{
let rt = tokio::runtime::Builder::new_current_thread()
.enable_all()
.max_blocking_threads(config.max_blocking_threads)
.build()
.unwrap();
rt.block_on(ls.run_until(worker_fut));
}
})
.expect("cannot spawn server worker thread");
}
// with actix system
(Some(_sys), _) => {
#[cfg(all(target_os = "linux", feature = "io-uring"))]
let arbiter = {
// TODO: pass max blocking thread config when tokio-uring enable configuration
// on building runtime.
let _ = config.max_blocking_threads;
Arbiter::new()
};
#[cfg(not(all(target_os = "linux", feature = "io-uring")))]
let arbiter = {
Arbiter::with_tokio_rt(move || {
tokio::runtime::Builder::new_current_thread()
.enable_all()
.max_blocking_threads(config.max_blocking_threads)
.build()
.unwrap()
})
};
arbiter.spawn(async move {
// spawn_local to run !Send future tasks.
spawn(async move {
let mut services = Vec::new();
for (idx, factory) in factories.iter().enumerate() {
match factory.create().await {
Ok((token, svc)) => services.push((idx, token, svc)),
Err(err) => {
error!("Can not start worker: {:?}", err);
Arbiter::current().stop();
factory_tx
.send(Err(io::Error::new(
io::ErrorKind::Other,
format!("can not start server service {}", idx),
)))
.unwrap();
return;
}
}
}
}
Err(e) => {
error!("Can not start worker: {:?}", e);
Arbiter::current().stop();
}
}
wrk.await
});
}));
WorkerHandle::new(idx, tx1, tx2, avail)
factory_tx.send(Ok(())).unwrap();
let worker_services = wrap_worker_services(services);
// spawn to make sure ServerWorker runs as non boxed future.
spawn(ServerWorker {
conn_rx,
stop_rx,
services: worker_services.into_boxed_slice(),
counter: WorkerCounter::new(idx, waker_queue, counter),
factories: factories.into_boxed_slice(),
state: Default::default(),
shutdown_timeout: config.shutdown_timeout,
});
});
});
}
};
// wait for service factories initialization
factory_rx.recv().unwrap()?;
Ok(pair)
}
fn restart_service(&mut self, idx: usize, factory_id: usize) {
let factory = &self.factories[factory_id];
trace!("Service {:?} failed, restarting", factory.name(idx));
self.services[idx].status = WorkerServiceStatus::Restarting;
self.state = WorkerState::Restarting(Restart {
factory_id,
token: idx,
fut: factory.create(),
});
}
fn shutdown(&mut self, force: bool) {
if force {
self.services.iter_mut().for_each(|srv| {
if srv.status == WorkerServiceStatus::Available {
srv.status = WorkerServiceStatus::Stopped;
}
self.services
.iter_mut()
.filter(|srv| srv.status == WorkerServiceStatus::Available)
.for_each(|srv| {
srv.status = if force {
WorkerServiceStatus::Stopped
} else {
WorkerServiceStatus::Stopping
};
});
} else {
self.services.iter_mut().for_each(move |srv| {
if srv.status == WorkerServiceStatus::Available {
srv.status = WorkerServiceStatus::Stopping;
}
});
}
}
fn check_readiness(&mut self, cx: &mut Context<'_>) -> Result<bool, (Token, usize)> {
let mut ready = self.conns.available(cx);
let mut failed = None;
fn check_readiness(&mut self, cx: &mut Context<'_>) -> Result<bool, (usize, usize)> {
let mut ready = true;
for (idx, srv) in self.services.iter_mut().enumerate() {
if srv.status == WorkerServiceStatus::Available
|| srv.status == WorkerServiceStatus::Unavailable
@@ -254,7 +509,7 @@ impl Worker {
if srv.status == WorkerServiceStatus::Unavailable {
trace!(
"Service {:?} is available",
self.factories[srv.factory].name(Token(idx))
self.factories[srv.factory_idx].name(idx)
);
srv.status = WorkerServiceStatus::Available;
}
@@ -265,7 +520,7 @@ impl Worker {
if srv.status == WorkerServiceStatus::Available {
trace!(
"Service {:?} is unavailable",
self.factories[srv.factory].name(Token(idx))
self.factories[srv.factory_idx].name(idx)
);
srv.status = WorkerServiceStatus::Unavailable;
}
@@ -273,175 +528,198 @@ impl Worker {
Poll::Ready(Err(_)) => {
error!(
"Service {:?} readiness check returned error, restarting",
self.factories[srv.factory].name(Token(idx))
self.factories[srv.factory_idx].name(idx)
);
failed = Some((Token(idx), srv.factory));
srv.status = WorkerServiceStatus::Failed;
return Err((idx, srv.factory_idx));
}
}
}
}
if let Some(idx) = failed {
Err(idx)
} else {
Ok(ready)
}
Ok(ready)
}
}
enum WorkerState {
Available,
Unavailable,
Restarting(
usize,
Token,
LocalBoxFuture<'static, Result<Vec<(Token, BoxedServerService)>, ()>>,
),
Shutdown(
Pin<Box<Sleep>>,
Pin<Box<Sleep>>,
Option<oneshot::Sender<bool>>,
),
Restarting(Restart),
Shutdown(Shutdown),
}
impl Future for Worker {
struct Restart {
factory_id: usize,
token: usize,
fut: LocalBoxFuture<'static, Result<(usize, BoxedServerService), ()>>,
}
/// State necessary for server shutdown.
struct Shutdown {
// Interval for checking the shutdown progress.
timer: Pin<Box<Sleep>>,
/// Start time of shutdown.
start_from: Instant,
/// Notify caller of the shutdown outcome (graceful/force).
tx: oneshot::Sender<bool>,
}
impl Default for WorkerState {
fn default() -> Self {
Self::Unavailable
}
}
impl Drop for ServerWorker {
fn drop(&mut self) {
Arbiter::try_current().as_ref().map(ArbiterHandle::stop);
}
}
impl Future for ServerWorker {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.as_mut().get_mut();
// `StopWorker` message handler
if let Poll::Ready(Some(StopCommand { graceful, result })) =
Pin::new(&mut self.rx2).poll_recv(cx)
if let Poll::Ready(Some(Stop { graceful, tx })) =
Pin::new(&mut this.stop_rx).poll_recv(cx)
{
self.availability.set(false);
let num = num_connections();
let num = this.counter.total();
if num == 0 {
info!("Shutting down worker, 0 connections");
let _ = result.send(true);
info!("Shutting down idle worker");
let _ = tx.send(true);
return Poll::Ready(());
} else if graceful {
self.shutdown(false);
let num = num_connections();
if num != 0 {
info!("Graceful worker shutdown, {} connections", num);
self.state = WorkerState::Shutdown(
Box::pin(sleep_until(Instant::now() + Duration::from_secs(1))),
Box::pin(sleep_until(Instant::now() + self.shutdown_timeout)),
Some(result),
);
} else {
let _ = result.send(true);
return Poll::Ready(());
}
info!("Graceful worker shutdown; finishing {} connections", num);
this.shutdown(false);
this.state = WorkerState::Shutdown(Shutdown {
timer: Box::pin(sleep(Duration::from_secs(1))),
start_from: Instant::now(),
tx,
});
} else {
info!("Force shutdown worker, {} connections", num);
self.shutdown(true);
let _ = result.send(false);
info!("Force shutdown worker, closing {} connections", num);
this.shutdown(true);
let _ = tx.send(false);
return Poll::Ready(());
}
}
match self.state {
WorkerState::Unavailable => match self.check_readiness(cx) {
match this.state {
WorkerState::Unavailable => match this.check_readiness(cx) {
Ok(true) => {
self.state = WorkerState::Available;
self.availability.set(true);
this.state = WorkerState::Available;
self.poll(cx)
}
Ok(false) => Poll::Pending,
Err((token, idx)) => {
trace!(
"Service {:?} failed, restarting",
self.factories[idx].name(token)
);
self.services[token.0].status = WorkerServiceStatus::Restarting;
self.state =
WorkerState::Restarting(idx, token, self.factories[idx].create());
this.restart_service(token, idx);
self.poll(cx)
}
},
WorkerState::Restarting(idx, token, ref mut fut) => {
match fut.as_mut().poll(cx) {
Poll::Ready(Ok(item)) => {
// only interest in the first item?
if let Some((token, service)) = item.into_iter().next() {
trace!(
"Service {:?} has been restarted",
self.factories[idx].name(token)
);
self.services[token.0].created(service);
self.state = WorkerState::Unavailable;
return self.poll(cx);
}
}
Poll::Ready(Err(_)) => {
WorkerState::Restarting(ref mut restart) => {
let factory_id = restart.factory_id;
let token = restart.token;
let (token_new, service) = ready!(restart.fut.as_mut().poll(cx))
.unwrap_or_else(|_| {
panic!(
"Can not restart {:?} service",
self.factories[idx].name(token)
);
}
Poll::Pending => return Poll::Pending,
}
this.factories[factory_id].name(token)
)
});
assert_eq!(token, token_new);
trace!(
"Service {:?} has been restarted",
this.factories[factory_id].name(token)
);
this.services[token].created(service);
this.state = WorkerState::Unavailable;
self.poll(cx)
}
WorkerState::Shutdown(ref mut t1, ref mut t2, ref mut tx) => {
let num = num_connections();
if num == 0 {
let _ = tx.take().unwrap().send(true);
Arbiter::current().stop();
return Poll::Ready(());
WorkerState::Shutdown(ref mut shutdown) => {
// drop all pending connections in rx channel.
while let Poll::Ready(Some(conn)) = Pin::new(&mut this.conn_rx).poll_recv(cx) {
// WorkerCounterGuard is needed as Accept thread has incremented counter.
// It's guard's job to decrement the counter together with drop of Conn.
let guard = this.counter.guard();
drop((conn, guard));
}
// check graceful timeout
if Pin::new(t2).poll(cx).is_ready() {
let _ = tx.take().unwrap().send(false);
self.shutdown(true);
Arbiter::current().stop();
return Poll::Ready(());
}
// wait for 1 second
ready!(shutdown.timer.as_mut().poll(cx));
// sleep for 1 second and then check again
if t1.as_mut().poll(cx).is_ready() {
*t1 = Box::pin(sleep_until(Instant::now() + Duration::from_secs(1)));
let _ = t1.as_mut().poll(cx);
}
if this.counter.total() == 0 {
// graceful shutdown
if let WorkerState::Shutdown(shutdown) = mem::take(&mut this.state) {
let _ = shutdown.tx.send(true);
}
Poll::Pending
Poll::Ready(())
} else if shutdown.start_from.elapsed() >= this.shutdown_timeout {
// timeout forceful shutdown
if let WorkerState::Shutdown(shutdown) = mem::take(&mut this.state) {
let _ = shutdown.tx.send(false);
}
Poll::Ready(())
} else {
// reset timer and wait for 1 second
let time = Instant::now() + Duration::from_secs(1);
shutdown.timer.as_mut().reset(time);
shutdown.timer.as_mut().poll(cx)
}
}
// actively poll stream and handle worker command
WorkerState::Available => loop {
match self.check_readiness(cx) {
Ok(true) => (),
match this.check_readiness(cx) {
Ok(true) => {}
Ok(false) => {
trace!("Worker is unavailable");
self.availability.set(false);
self.state = WorkerState::Unavailable;
this.state = WorkerState::Unavailable;
return self.poll(cx);
}
Err((token, idx)) => {
trace!(
"Service {:?} failed, restarting",
self.factories[idx].name(token)
);
self.availability.set(false);
self.services[token.0].status = WorkerServiceStatus::Restarting;
self.state =
WorkerState::Restarting(idx, token, self.factories[idx].create());
this.restart_service(token, idx);
return self.poll(cx);
}
}
match Pin::new(&mut self.rx).poll_recv(cx) {
// handle incoming io stream
Poll::Ready(Some(WorkerCommand(msg))) => {
let guard = self.conns.get();
let _ = self.services[msg.token.0]
.service
.call((Some(guard), msg.io));
// handle incoming io stream
match ready!(Pin::new(&mut this.conn_rx).poll_recv(cx)) {
Some(msg) => {
let guard = this.counter.guard();
let _ = this.services[msg.token].service.call((guard, msg.io));
}
Poll::Pending => return Poll::Pending,
Poll::Ready(None) => return Poll::Ready(()),
None => return Poll::Ready(()),
};
},
}
}
}
fn wrap_worker_services(
services: Vec<(usize, usize, BoxedServerService)>,
) -> Vec<WorkerService> {
services
.into_iter()
.fold(Vec::new(), |mut services, (idx, token, service)| {
assert_eq!(token, services.len());
services.push(WorkerService {
factory_idx: idx,
service,
status: WorkerServiceStatus::Unavailable,
});
services
})
}

View File

@@ -1,18 +1,21 @@
use std::sync::atomic::{AtomicUsize, Ordering::Relaxed};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{mpsc, Arc};
use std::{net, thread, time};
use std::{net, thread, time::Duration};
use actix_rt::{net::TcpStream, time::sleep};
use actix_server::Server;
use actix_service::fn_service;
use futures_util::future::{lazy, ok};
use socket2::{Domain, Protocol, Socket, Type};
fn unused_addr() -> net::SocketAddr {
let addr: net::SocketAddr = "127.0.0.1:0".parse().unwrap();
let socket = mio::net::TcpSocket::new_v4().unwrap();
socket.bind(addr).unwrap();
socket.set_reuseaddr(true).unwrap();
let tcp = socket.listen(32).unwrap();
tcp.local_addr().unwrap()
let socket =
Socket::new(Domain::for_address(addr), Type::STREAM, Some(Protocol::TCP)).unwrap();
socket.set_reuse_address(true).unwrap();
socket.set_nonblocking(true).unwrap();
socket.bind(&addr.into()).unwrap();
socket.listen(32).unwrap();
net::TcpListener::from(socket).local_addr().unwrap()
}
#[test]
@@ -21,69 +24,111 @@ fn test_bind() {
let (tx, rx) = mpsc::channel();
let h = thread::spawn(move || {
let sys = actix_rt::System::new("test");
let srv = sys.block_on(lazy(|_| {
Server::build()
actix_rt::System::new().block_on(async {
let srv = Server::build()
.workers(1)
.disable_signals()
.bind("test", addr, move || fn_service(|_| ok::<_, ()>(())))
.unwrap()
.start()
}));
let _ = tx.send((srv, actix_rt::System::current()));
let _ = sys.run();
});
let (_, sys) = rx.recv().unwrap();
.bind("test", addr, move || {
fn_service(|_| async { Ok::<_, ()>(()) })
})?
.run();
thread::sleep(time::Duration::from_millis(500));
let _ = tx.send(srv.handle());
srv.await
})
});
let srv = rx.recv().unwrap();
thread::sleep(Duration::from_millis(500));
assert!(net::TcpStream::connect(addr).is_ok());
sys.stop();
let _ = h.join();
let _ = srv.stop(true);
h.join().unwrap().unwrap();
}
#[test]
fn plain_tokio_runtime() {
let addr = unused_addr();
let (tx, rx) = mpsc::channel();
let h = thread::spawn(move || {
let rt = tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap();
rt.block_on(async {
let srv = Server::build()
.workers(1)
.disable_signals()
.bind("test", addr, move || {
fn_service(|_| async { Ok::<_, ()>(()) })
})?
.run();
tx.send(srv.handle()).unwrap();
srv.await
})
});
let srv = rx.recv().unwrap();
thread::sleep(Duration::from_millis(500));
assert!(net::TcpStream::connect(addr).is_ok());
let _ = srv.stop(true);
h.join().unwrap().unwrap();
}
#[test]
fn test_listen() {
let addr = unused_addr();
let lst = net::TcpListener::bind(addr).unwrap();
let (tx, rx) = mpsc::channel();
let h = thread::spawn(move || {
let sys = actix_rt::System::new("test");
let lst = net::TcpListener::bind(addr).unwrap();
sys.block_on(async {
Server::build()
actix_rt::System::new().block_on(async {
let srv = Server::build()
.disable_signals()
.workers(1)
.listen("test", lst, move || fn_service(|_| ok::<_, ()>(())))
.unwrap()
.start();
let _ = tx.send(actix_rt::System::current());
});
let _ = sys.run();
});
let sys = rx.recv().unwrap();
.listen("test", lst, move || {
fn_service(|_| async { Ok::<_, ()>(()) })
})?
.run();
thread::sleep(time::Duration::from_millis(500));
let _ = tx.send(srv.handle());
srv.await
})
});
let srv = rx.recv().unwrap();
thread::sleep(Duration::from_millis(500));
assert!(net::TcpStream::connect(addr).is_ok());
sys.stop();
let _ = h.join();
let _ = srv.stop(true);
h.join().unwrap().unwrap();
}
#[test]
#[cfg(unix)]
fn test_start() {
use std::io::Read;
use actix_codec::{BytesCodec, Framed};
use actix_rt::net::TcpStream;
use bytes::Bytes;
use futures_util::sink::SinkExt;
use std::io::Read;
let addr = unused_addr();
let (tx, rx) = mpsc::channel();
let h = thread::spawn(move || {
let sys = actix_rt::System::new("test");
let srv = sys.block_on(lazy(|_| {
Server::build()
actix_rt::System::new().block_on(async {
let srv = Server::build()
.backlog(100)
.disable_signals()
.bind("test", addr, move || {
@@ -92,13 +137,13 @@ fn test_start() {
f.send(Bytes::from_static(b"test")).await.unwrap();
Ok::<_, ()>(())
})
})
.unwrap()
.start()
}));
})?
.run();
let _ = tx.send((srv, actix_rt::System::current()));
let _ = sys.run();
let _ = tx.send((srv.handle(), actix_rt::System::current()));
srv.await
})
});
let (srv, sys) = rx.recv().unwrap();
@@ -110,16 +155,16 @@ fn test_start() {
// pause
let _ = srv.pause();
thread::sleep(time::Duration::from_millis(200));
thread::sleep(Duration::from_millis(200));
let mut conn = net::TcpStream::connect(addr).unwrap();
conn.set_read_timeout(Some(time::Duration::from_millis(100)))
conn.set_read_timeout(Some(Duration::from_millis(100)))
.unwrap();
let res = conn.read_exact(&mut buf);
assert!(res.is_err());
// resume
let _ = srv.resume();
thread::sleep(time::Duration::from_millis(100));
thread::sleep(Duration::from_millis(100));
assert!(net::TcpStream::connect(addr).is_ok());
assert!(net::TcpStream::connect(addr).is_ok());
assert!(net::TcpStream::connect(addr).is_ok());
@@ -131,60 +176,314 @@ fn test_start() {
// stop
let _ = srv.stop(false);
thread::sleep(time::Duration::from_millis(100));
assert!(net::TcpStream::connect(addr).is_err());
thread::sleep(time::Duration::from_millis(100));
sys.stop();
let _ = h.join();
h.join().unwrap().unwrap();
thread::sleep(Duration::from_secs(1));
assert!(net::TcpStream::connect(addr).is_err());
}
#[test]
fn test_configure() {
let addr1 = unused_addr();
let addr2 = unused_addr();
let addr3 = unused_addr();
#[actix_rt::test]
async fn test_max_concurrent_connections() {
// Note:
// A tcp listener would accept connects based on it's backlog setting.
//
// The limit test on the other hand is only for concurrent tcp stream limiting a work
// thread accept.
use tokio::io::AsyncWriteExt;
let addr = unused_addr();
let (tx, rx) = mpsc::channel();
let num = Arc::new(AtomicUsize::new(0));
let num2 = num.clone();
let counter = Arc::new(AtomicUsize::new(0));
let counter_clone = counter.clone();
let max_conn = 3;
let h = thread::spawn(move || {
let num = num2.clone();
let sys = actix_rt::System::new("test");
let srv = sys.block_on(lazy(|_| {
Server::build()
.disable_signals()
.configure(move |cfg| {
let num = num.clone();
let lst = net::TcpListener::bind(addr3).unwrap();
cfg.bind("addr1", addr1)
.unwrap()
.bind("addr2", addr2)
.unwrap()
.listen("addr3", lst)
.apply(move |rt| {
let num = num.clone();
rt.service("addr1", fn_service(|_| ok::<_, ()>(())));
rt.service("addr3", fn_service(|_| ok::<_, ()>(())));
rt.on_start(lazy(move |_| {
let _ = num.fetch_add(1, Relaxed);
}))
})
})
.unwrap()
actix_rt::System::new().block_on(async {
let srv = Server::build()
// Set a relative higher backlog.
.backlog(12)
// max connection for a worker is 3.
.max_concurrent_connections(max_conn)
.workers(1)
.start()
}));
let _ = tx.send((srv, actix_rt::System::current()));
let _ = sys.run();
});
let (_, sys) = rx.recv().unwrap();
thread::sleep(time::Duration::from_millis(500));
.disable_signals()
.bind("test", addr, move || {
let counter = counter.clone();
fn_service(move |_io: TcpStream| {
let counter = counter.clone();
async move {
counter.fetch_add(1, Ordering::SeqCst);
sleep(Duration::from_secs(20)).await;
counter.fetch_sub(1, Ordering::SeqCst);
Ok::<(), ()>(())
}
})
})?
.run();
assert!(net::TcpStream::connect(addr1).is_ok());
assert!(net::TcpStream::connect(addr2).is_ok());
assert!(net::TcpStream::connect(addr3).is_ok());
assert_eq!(num.load(Relaxed), 1);
let _ = tx.send((srv.handle(), actix_rt::System::current()));
srv.await
})
});
let (srv, sys) = rx.recv().unwrap();
let mut conns = vec![];
for _ in 0..12 {
let conn = tokio::net::TcpStream::connect(addr).await.unwrap();
conns.push(conn);
}
sleep(Duration::from_secs(5)).await;
// counter would remain at 3 even with 12 successful connection.
// and 9 of them remain in backlog.
assert_eq!(max_conn, counter_clone.load(Ordering::SeqCst));
for mut conn in conns {
conn.shutdown().await.unwrap();
}
srv.stop(false).await;
sys.stop();
let _ = h.join();
h.join().unwrap().unwrap();
}
#[actix_rt::test]
async fn test_service_restart() {
use std::task::{Context, Poll};
use actix_service::{fn_factory, Service};
use futures_core::future::LocalBoxFuture;
use tokio::io::AsyncWriteExt;
struct TestService(Arc<AtomicUsize>);
impl Service<TcpStream> for TestService {
type Response = ();
type Error = ();
type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>;
fn poll_ready(&self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
let TestService(ref counter) = self;
let c = counter.fetch_add(1, Ordering::SeqCst);
// Force the service to restart on first readiness check.
if c > 0 {
Poll::Ready(Ok(()))
} else {
Poll::Ready(Err(()))
}
}
fn call(&self, _: TcpStream) -> Self::Future {
Box::pin(async { Ok(()) })
}
}
let addr1 = unused_addr();
let addr2 = unused_addr();
let (tx, rx) = mpsc::channel();
let num = Arc::new(AtomicUsize::new(0));
let num2 = Arc::new(AtomicUsize::new(0));
let num_clone = num.clone();
let num2_clone = num2.clone();
let h = thread::spawn(move || {
let num = num.clone();
actix_rt::System::new().block_on(async {
let srv = Server::build()
.backlog(1)
.disable_signals()
.bind("addr1", addr1, move || {
let num = num.clone();
fn_factory(move || {
let num = num.clone();
async move { Ok::<_, ()>(TestService(num)) }
})
})?
.bind("addr2", addr2, move || {
let num2 = num2.clone();
fn_factory(move || {
let num2 = num2.clone();
async move { Ok::<_, ()>(TestService(num2)) }
})
})?
.workers(1)
.run();
let _ = tx.send(srv.handle());
srv.await
})
});
let srv = rx.recv().unwrap();
for _ in 0..5 {
TcpStream::connect(addr1)
.await
.unwrap()
.shutdown()
.await
.unwrap();
TcpStream::connect(addr2)
.await
.unwrap()
.shutdown()
.await
.unwrap();
}
sleep(Duration::from_secs(3)).await;
assert!(num_clone.load(Ordering::SeqCst) > 5);
assert!(num2_clone.load(Ordering::SeqCst) > 5);
let _ = srv.stop(false);
h.join().unwrap().unwrap();
}
#[ignore] // non-deterministic on CI
#[actix_rt::test]
async fn worker_restart() {
use actix_service::{Service, ServiceFactory};
use futures_core::future::LocalBoxFuture;
use tokio::io::{AsyncReadExt, AsyncWriteExt};
struct TestServiceFactory(Arc<AtomicUsize>);
impl ServiceFactory<TcpStream> for TestServiceFactory {
type Response = ();
type Error = ();
type Config = ();
type Service = TestService;
type InitError = ();
type Future = LocalBoxFuture<'static, Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: Self::Config) -> Self::Future {
let counter = self.0.fetch_add(1, Ordering::Relaxed);
Box::pin(async move { Ok(TestService(counter)) })
}
}
struct TestService(usize);
impl Service<TcpStream> for TestService {
type Response = ();
type Error = ();
type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>;
actix_service::always_ready!();
fn call(&self, stream: TcpStream) -> Self::Future {
let counter = self.0;
let mut stream = stream.into_std().unwrap();
use std::io::Write;
let str = counter.to_string();
let buf = str.as_bytes();
let mut written = 0;
while written < buf.len() {
if let Ok(n) = stream.write(&buf[written..]) {
written += n;
}
}
stream.flush().unwrap();
stream.shutdown(net::Shutdown::Write).unwrap();
// force worker 2 to restart service once.
if counter == 2 {
panic!("panic on purpose")
} else {
Box::pin(async { Ok(()) })
}
}
}
let addr = unused_addr();
let (tx, rx) = mpsc::channel();
let counter = Arc::new(AtomicUsize::new(1));
let h = thread::spawn(move || {
let counter = counter.clone();
actix_rt::System::new().block_on(async {
let srv = Server::build()
.disable_signals()
.bind("addr", addr, move || TestServiceFactory(counter.clone()))?
.workers(2)
.run();
let _ = tx.send(srv.handle());
srv.await
})
});
let srv = rx.recv().unwrap();
sleep(Duration::from_secs(3)).await;
let mut buf = [0; 8];
// worker 1 would not restart and return it's id consistently.
let mut stream = TcpStream::connect(addr).await.unwrap();
let n = stream.read(&mut buf).await.unwrap();
let id = String::from_utf8_lossy(&buf[0..n]);
assert_eq!("1", id);
stream.shutdown().await.unwrap();
// worker 2 dead after return response.
let mut stream = TcpStream::connect(addr).await.unwrap();
let n = stream.read(&mut buf).await.unwrap();
let id = String::from_utf8_lossy(&buf[0..n]);
assert_eq!("2", id);
stream.shutdown().await.unwrap();
// request to worker 1
let mut stream = TcpStream::connect(addr).await.unwrap();
let n = stream.read(&mut buf).await.unwrap();
let id = String::from_utf8_lossy(&buf[0..n]);
assert_eq!("1", id);
stream.shutdown().await.unwrap();
// TODO: Remove sleep if it can pass CI.
sleep(Duration::from_secs(3)).await;
// worker 2 restarting and work goes to worker 1.
let mut stream = TcpStream::connect(addr).await.unwrap();
let n = stream.read(&mut buf).await.unwrap();
let id = String::from_utf8_lossy(&buf[0..n]);
assert_eq!("1", id);
stream.shutdown().await.unwrap();
// TODO: Remove sleep if it can pass CI.
sleep(Duration::from_secs(3)).await;
// worker 2 restarted but worker 1 was still the next to accept connection.
let mut stream = TcpStream::connect(addr).await.unwrap();
let n = stream.read(&mut buf).await.unwrap();
let id = String::from_utf8_lossy(&buf[0..n]);
assert_eq!("1", id);
stream.shutdown().await.unwrap();
// TODO: Remove sleep if it can pass CI.
sleep(Duration::from_secs(3)).await;
// worker 2 accept connection again but it's id is 3.
let mut stream = TcpStream::connect(addr).await.unwrap();
let n = stream.read(&mut buf).await.unwrap();
let id = String::from_utf8_lossy(&buf[0..n]);
assert_eq!("3", id);
stream.shutdown().await.unwrap();
let _ = srv.stop(false);
h.join().unwrap().unwrap();
}

View File

@@ -3,6 +3,39 @@
## Unreleased - 2021-xx-xx
## 2.0.1 - 2021-10-11
* Documentation fix.
## 2.0.0 - 2021-04-16
* Removed pipeline and related structs/functions. [#335]
[#335]: https://github.com/actix/actix-net/pull/335
## 2.0.0-beta.5 - 2021-03-15
* Add default `Service` trait impl for `Rc<S: Service>` and `&S: Service`. [#288]
* Add `boxed::rc_service` function for constructing `boxed::RcService` type [#290]
[#288]: https://github.com/actix/actix-net/pull/288
[#290]: https://github.com/actix/actix-net/pull/290
## 2.0.0-beta.4 - 2021-02-04
* `Service::poll_ready` and `Service::call` receive `&self`. [#247]
* `apply_fn` and `apply_fn_factory` now receive `Fn(Req, &Service)` function type. [#247]
* `apply_cfg` and `apply_cfg_factory` now receive `Fn(Req, &Service)` function type. [#247]
* `fn_service` and friends now receive `Fn(Req)` function type. [#247]
[#247]: https://github.com/actix/actix-net/pull/247
## 2.0.0-beta.3 - 2021-01-09
* The `forward_ready!` macro converts errors. [#246]
[#246]: https://github.com/actix/actix-net/pull/246
## 2.0.0-beta.2 - 2021-01-03
* Remove redundant type parameter from `map_config`.

View File

@@ -1,17 +1,15 @@
[package]
name = "actix-service"
version = "2.0.0-beta.2"
version = "2.0.1"
authors = [
"Nikolay Kim <fafhrd91@gmail.com>",
"Rob Ede <robjtede@icloud.com>",
"fakeshadow <24548779@qq.com>",
]
description = "Service trait and combinators for representing asynchronous request/response operations."
keywords = ["network", "framework", "async", "futures", "service"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-service"
readme = "README.md"
categories = ["network-programming", "asynchronous"]
categories = ["network-programming", "asynchronous", "no-std"]
repository = "https://github.com/actix/actix-net"
license = "MIT OR Apache-2.0"
edition = "2018"
@@ -21,8 +19,10 @@ path = "src/lib.rs"
[dependencies]
futures-core = { version = "0.3.7", default-features = false }
paste = "1"
pin-project-lite = "0.2"
[dev-dependencies]
actix-rt = "1.0.0"
actix-rt = "2.0.0"
actix-utils = "3.0.0"
futures-util = { version = "0.3.7", default-features = false }

View File

@@ -2,6 +2,12 @@
> Service trait and combinators for representing asynchronous request/response operations.
See documentation for detailed explanations these components: [https://docs.rs/actix-service][docs].
[![crates.io](https://img.shields.io/crates/v/actix-service?label=latest)](https://crates.io/crates/actix-service)
[![Documentation](https://docs.rs/actix-service/badge.svg?version=2.0.1)](https://docs.rs/actix-service/2.0.1)
[![Version](https://img.shields.io/badge/rustc-1.46+-ab6000.svg)](https://blog.rust-lang.org/2020/03/12/Rust-1.46.html)
![License](https://img.shields.io/crates/l/actix-service.svg)
[![Dependency Status](https://deps.rs/crate/actix-service/2.0.1/status.svg)](https://deps.rs/crate/actix-service/2.0.1)
![Download](https://img.shields.io/crates/d/actix-service.svg)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x)
[docs]: https://docs.rs/actix-service
See documentation for detailed explanations of these components: https://docs.rs/actix-service.

View File

@@ -0,0 +1,33 @@
use std::{future::Future, sync::mpsc, time::Duration};
async fn oracle<F, Fut>(f: F) -> (u32, u32)
where
F: FnOnce() -> Fut + Clone + Send + 'static,
Fut: Future<Output = u32> + 'static,
{
let f1 = actix_rt::spawn(f.clone()());
let f2 = actix_rt::spawn(f());
(f1.await.unwrap(), f2.await.unwrap())
}
#[actix_rt::main]
async fn main() {
let (tx, rx) = mpsc::channel();
let (r1, r2) = oracle({
let tx = tx.clone();
|| async move {
tx.send(()).unwrap();
4 * 4
}
})
.await;
assert_eq!(r1, r2);
tx.send(()).unwrap();
rx.recv_timeout(Duration::from_millis(100)).unwrap();
rx.recv_timeout(Duration::from_millis(100)).unwrap();
}

View File

@@ -1,21 +1,21 @@
use alloc::rc::Rc;
use core::{
cell::RefCell,
future::Future,
marker::PhantomData,
pin::Pin,
task::{Context, Poll},
};
use futures_core::ready;
use pin_project_lite::pin_project;
use super::{Service, ServiceFactory};
/// Service for the `and_then` combinator, chaining a computation onto the end
/// of another service which completes successfully.
/// Service for the `and_then` combinator, chaining a computation onto the end of another service
/// which completes successfully.
///
/// This is created by the `Pipeline::and_then` method.
pub(crate) struct AndThenService<A, B, Req>(Rc<RefCell<(A, B)>>, PhantomData<Req>);
pub struct AndThenService<A, B, Req>(Rc<(A, B)>, PhantomData<Req>);
impl<A, B, Req> AndThenService<A, B, Req> {
/// Create new `AndThen` combinator
@@ -24,7 +24,7 @@ impl<A, B, Req> AndThenService<A, B, Req> {
A: Service<Req>,
B: Service<A::Response, Error = A::Error>,
{
Self(Rc::new(RefCell::new((a, b))), PhantomData)
Self(Rc::new((a, b)), PhantomData)
}
}
@@ -43,20 +43,20 @@ where
type Error = A::Error;
type Future = AndThenServiceResponse<A, B, Req>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
let mut srv = self.0.borrow_mut();
let not_ready = !srv.0.poll_ready(cx)?.is_ready();
if !srv.1.poll_ready(cx)?.is_ready() || not_ready {
fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
let (a, b) = &*self.0;
let not_ready = !a.poll_ready(cx)?.is_ready();
if !b.poll_ready(cx)?.is_ready() || not_ready {
Poll::Pending
} else {
Poll::Ready(Ok(()))
}
}
fn call(&mut self, req: Req) -> Self::Future {
fn call(&self, req: Req) -> Self::Future {
AndThenServiceResponse {
state: State::A {
fut: self.0.borrow_mut().0.call(req),
fut: self.0 .0.call(req),
b: Some(self.0.clone()),
},
}
@@ -64,7 +64,7 @@ where
}
pin_project! {
pub(crate) struct AndThenServiceResponse<A, B, Req>
pub struct AndThenServiceResponse<A, B, Req>
where
A: Service<Req>,
B: Service<A::Response, Error = A::Error>,
@@ -84,13 +84,12 @@ pin_project! {
A {
#[pin]
fut: A::Future,
b: Option<Rc<RefCell<(A, B)>>>,
b: Option<Rc<(A, B)>>,
},
B {
#[pin]
fut: B::Future,
},
Empty,
}
}
@@ -105,29 +104,20 @@ where
let mut this = self.as_mut().project();
match this.state.as_mut().project() {
StateProj::A { fut, b } => match fut.poll(cx)? {
Poll::Ready(res) => {
let b = b.take().unwrap();
this.state.set(State::Empty); // drop fut A
let fut = b.borrow_mut().1.call(res);
this.state.set(State::B { fut });
self.poll(cx)
}
Poll::Pending => Poll::Pending,
},
StateProj::B { fut } => fut.poll(cx).map(|r| {
this.state.set(State::Empty);
r
}),
StateProj::Empty => {
panic!("future must not be polled after it returned `Poll::Ready`")
StateProj::A { fut, b } => {
let res = ready!(fut.poll(cx))?;
let b = b.take().unwrap();
let fut = b.1.call(res);
this.state.set(State::B { fut });
self.poll(cx)
}
StateProj::B { fut } => fut.poll(cx),
}
}
}
/// `.and_then()` service factory combinator
pub(crate) struct AndThenServiceFactory<A, B, Req>
pub struct AndThenServiceFactory<A, B, Req>
where
A: ServiceFactory<Req>,
A::Config: Clone,
@@ -210,7 +200,7 @@ where
}
pin_project! {
pub(crate) struct AndThenServiceFactoryResponse<A, B, Req>
pub struct AndThenServiceFactoryResponse<A, B, Req>
where
A: ServiceFactory<Req>,
B: ServiceFactory<A::Response>,
@@ -282,7 +272,9 @@ mod tests {
use futures_util::future::lazy;
use crate::{
fn_factory, ok, pipeline, pipeline_factory, ready, Ready, Service, ServiceFactory,
fn_factory, ok,
pipeline::{pipeline, pipeline_factory},
ready, Ready, Service, ServiceFactory,
};
struct Srv1(Rc<Cell<usize>>);
@@ -292,12 +284,12 @@ mod tests {
type Error = ();
type Future = Ready<Result<Self::Response, ()>>;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
fn poll_ready(&self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.0.set(self.0.get() + 1);
Poll::Ready(Ok(()))
}
fn call(&mut self, req: &'static str) -> Self::Future {
fn call(&self, req: &'static str) -> Self::Future {
ok(req)
}
}
@@ -310,12 +302,12 @@ mod tests {
type Error = ();
type Future = Ready<Result<Self::Response, ()>>;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
fn poll_ready(&self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.0.set(self.0.get() + 1);
Poll::Ready(Ok(()))
}
fn call(&mut self, req: &'static str) -> Self::Future {
fn call(&self, req: &'static str) -> Self::Future {
ok((req, "srv2"))
}
}
@@ -323,7 +315,7 @@ mod tests {
#[actix_rt::test]
async fn test_poll_ready() {
let cnt = Rc::new(Cell::new(0));
let mut srv = pipeline(Srv1(cnt.clone())).and_then(Srv2(cnt.clone()));
let srv = pipeline(Srv1(cnt.clone())).and_then(Srv2(cnt.clone()));
let res = lazy(|cx| srv.poll_ready(cx)).await;
assert_eq!(res, Poll::Ready(Ok(())));
assert_eq!(cnt.get(), 2);
@@ -332,7 +324,7 @@ mod tests {
#[actix_rt::test]
async fn test_call() {
let cnt = Rc::new(Cell::new(0));
let mut srv = pipeline(Srv1(cnt.clone())).and_then(Srv2(cnt));
let srv = pipeline(Srv1(cnt.clone())).and_then(Srv2(cnt));
let res = srv.call("srv1").await;
assert!(res.is_ok());
assert_eq!(res.unwrap(), ("srv1", "srv2"));
@@ -346,7 +338,7 @@ mod tests {
pipeline_factory(fn_factory(move || ready(Ok::<_, ()>(Srv1(cnt2.clone())))))
.and_then(move || ready(Ok(Srv2(cnt.clone()))));
let mut srv = new_srv.new_service(()).await.unwrap();
let srv = new_srv.new_service(()).await.unwrap();
let res = srv.call("srv1").await;
assert!(res.is_ok());
assert_eq!(res.unwrap(), ("srv1", "srv2"));

View File

@@ -20,7 +20,7 @@ pub fn apply_fn<I, S, F, Fut, Req, In, Res, Err>(
where
I: IntoService<S, In>,
S: Service<In, Error = Err>,
F: FnMut(Req, &mut S) -> Fut,
F: Fn(Req, &S) -> Fut,
Fut: Future<Output = Result<Res, Err>>,
{
Apply::new(service.into_service(), wrap_fn)
@@ -36,7 +36,7 @@ pub fn apply_fn_factory<I, SF, F, Fut, Req, In, Res, Err>(
where
I: IntoServiceFactory<SF, In>,
SF: ServiceFactory<In, Error = Err>,
F: FnMut(Req, &mut SF::Service) -> Fut + Clone,
F: Fn(Req, &SF::Service) -> Fut + Clone,
Fut: Future<Output = Result<Res, Err>>,
{
ApplyFactory::new(service.into_factory(), f)
@@ -57,7 +57,7 @@ where
impl<S, F, Fut, Req, In, Res, Err> Apply<S, F, Req, In, Res, Err>
where
S: Service<In, Error = Err>,
F: FnMut(Req, &mut S) -> Fut,
F: Fn(Req, &S) -> Fut,
Fut: Future<Output = Result<Res, Err>>,
{
/// Create new `Apply` combinator
@@ -73,7 +73,7 @@ where
impl<S, F, Fut, Req, In, Res, Err> Clone for Apply<S, F, Req, In, Res, Err>
where
S: Service<In, Error = Err> + Clone,
F: FnMut(Req, &mut S) -> Fut + Clone,
F: Fn(Req, &S) -> Fut + Clone,
Fut: Future<Output = Result<Res, Err>>,
{
fn clone(&self) -> Self {
@@ -88,7 +88,7 @@ where
impl<S, F, Fut, Req, In, Res, Err> Service<Req> for Apply<S, F, Req, In, Res, Err>
where
S: Service<In, Error = Err>,
F: FnMut(Req, &mut S) -> Fut,
F: Fn(Req, &S) -> Fut,
Fut: Future<Output = Result<Res, Err>>,
{
type Response = Res;
@@ -97,8 +97,8 @@ where
crate::forward_ready!(service);
fn call(&mut self, req: Req) -> Self::Future {
(self.wrap_fn)(req, &mut self.service)
fn call(&self, req: Req) -> Self::Future {
(self.wrap_fn)(req, &self.service)
}
}
@@ -112,7 +112,7 @@ pub struct ApplyFactory<SF, F, Req, In, Res, Err> {
impl<SF, F, Fut, Req, In, Res, Err> ApplyFactory<SF, F, Req, In, Res, Err>
where
SF: ServiceFactory<In, Error = Err>,
F: FnMut(Req, &mut SF::Service) -> Fut + Clone,
F: Fn(Req, &SF::Service) -> Fut + Clone,
Fut: Future<Output = Result<Res, Err>>,
{
/// Create new `ApplyFactory` new service instance
@@ -128,7 +128,7 @@ where
impl<SF, F, Fut, Req, In, Res, Err> Clone for ApplyFactory<SF, F, Req, In, Res, Err>
where
SF: ServiceFactory<In, Error = Err> + Clone,
F: FnMut(Req, &mut SF::Service) -> Fut + Clone,
F: Fn(Req, &SF::Service) -> Fut + Clone,
Fut: Future<Output = Result<Res, Err>>,
{
fn clone(&self) -> Self {
@@ -144,7 +144,7 @@ impl<SF, F, Fut, Req, In, Res, Err> ServiceFactory<Req>
for ApplyFactory<SF, F, Req, In, Res, Err>
where
SF: ServiceFactory<In, Error = Err>,
F: FnMut(Req, &mut SF::Service) -> Fut + Clone,
F: Fn(Req, &SF::Service) -> Fut + Clone,
Fut: Future<Output = Result<Res, Err>>,
{
type Response = Res;
@@ -165,7 +165,7 @@ pin_project! {
pub struct ApplyServiceFactoryResponse<SF, F, Fut, Req, In, Res, Err>
where
SF: ServiceFactory<In, Error = Err>,
F: FnMut(Req, &mut SF::Service) -> Fut,
F: Fn(Req, &SF::Service) -> Fut,
Fut: Future<Output = Result<Res, Err>>,
{
#[pin]
@@ -178,7 +178,7 @@ pin_project! {
impl<SF, F, Fut, Req, In, Res, Err> ApplyServiceFactoryResponse<SF, F, Fut, Req, In, Res, Err>
where
SF: ServiceFactory<In, Error = Err>,
F: FnMut(Req, &mut SF::Service) -> Fut,
F: Fn(Req, &SF::Service) -> Fut,
Fut: Future<Output = Result<Res, Err>>,
{
fn new(fut: SF::Future, wrap_fn: F) -> Self {
@@ -194,7 +194,7 @@ impl<SF, F, Fut, Req, In, Res, Err> Future
for ApplyServiceFactoryResponse<SF, F, Fut, Req, In, Res, Err>
where
SF: ServiceFactory<In, Error = Err>,
F: FnMut(Req, &mut SF::Service) -> Fut,
F: Fn(Req, &SF::Service) -> Fut,
Fut: Future<Output = Result<Res, Err>>,
{
type Output = Result<Apply<SF::Service, F, Req, In, Res, Err>, SF::InitError>;
@@ -214,7 +214,11 @@ mod tests {
use futures_util::future::lazy;
use super::*;
use crate::{ok, pipeline, pipeline_factory, Ready, Service, ServiceFactory};
use crate::{
ok,
pipeline::{pipeline, pipeline_factory},
Ready, Service, ServiceFactory,
};
#[derive(Clone)]
struct Srv;
@@ -226,14 +230,14 @@ mod tests {
crate::always_ready!();
fn call(&mut self, _: ()) -> Self::Future {
fn call(&self, _: ()) -> Self::Future {
ok(())
}
}
#[actix_rt::test]
async fn test_call() {
let mut srv = pipeline(apply_fn(Srv, |req: &'static str, srv| {
let srv = pipeline(apply_fn(Srv, |req: &'static str, srv| {
let fut = srv.call(());
async move {
fut.await.unwrap();
@@ -261,7 +265,7 @@ mod tests {
},
));
let mut srv = new_srv.new_service(()).await.unwrap();
let srv = new_srv.new_service(()).await.unwrap();
assert_eq!(lazy(|cx| srv.poll_ready(cx)).await, Poll::Ready(Ok(())));

View File

@@ -1,17 +1,17 @@
use alloc::rc::Rc;
use core::{
cell::RefCell,
future::Future,
marker::PhantomData,
pin::Pin,
task::{Context, Poll},
};
use futures_core::ready;
use pin_project_lite::pin_project;
use crate::{Service, ServiceFactory};
/// Convert `Fn(Config, &mut Service1) -> Future<Service2>` fn to a service factory.
/// Convert `Fn(Config, &Service1) -> Future<Service2>` fn to a service factory.
pub fn apply_cfg<S1, Req, F, Cfg, Fut, S2, Err>(
srv: S1,
f: F,
@@ -26,17 +26,17 @@ pub fn apply_cfg<S1, Req, F, Cfg, Fut, S2, Err>(
> + Clone
where
S1: Service<Req>,
F: FnMut(Cfg, &mut S1) -> Fut,
F: Fn(Cfg, &S1) -> Fut,
Fut: Future<Output = Result<S2, Err>>,
S2: Service<Req>,
{
ApplyConfigService {
srv: Rc::new(RefCell::new((srv, f))),
srv: Rc::new((srv, f)),
_phantom: PhantomData,
}
}
/// Convert `Fn(Config, &mut ServiceFactory1) -> Future<ServiceFactory2>` fn to a service factory.
/// Convert `Fn(Config, &ServiceFactory1) -> Future<ServiceFactory2>` fn to a service factory.
///
/// Service1 get constructed from `T` factory.
pub fn apply_cfg_factory<SF, Req, F, Cfg, Fut, S>(
@@ -52,33 +52,33 @@ pub fn apply_cfg_factory<SF, Req, F, Cfg, Fut, S>(
> + Clone
where
SF: ServiceFactory<Req, Config = ()>,
F: FnMut(Cfg, &mut SF::Service) -> Fut,
F: Fn(Cfg, &SF::Service) -> Fut,
SF::InitError: From<SF::Error>,
Fut: Future<Output = Result<S, SF::InitError>>,
S: Service<Req>,
{
ApplyConfigServiceFactory {
srv: Rc::new(RefCell::new((factory, f))),
srv: Rc::new((factory, f)),
_phantom: PhantomData,
}
}
/// Convert `Fn(Config, &mut Server) -> Future<Service>` fn to NewService\
/// Convert `Fn(Config, &Server) -> Future<Service>` fn to NewService\
struct ApplyConfigService<S1, Req, F, Cfg, Fut, S2, Err>
where
S1: Service<Req>,
F: FnMut(Cfg, &mut S1) -> Fut,
F: Fn(Cfg, &S1) -> Fut,
Fut: Future<Output = Result<S2, Err>>,
S2: Service<Req>,
{
srv: Rc<RefCell<(S1, F)>>,
srv: Rc<(S1, F)>,
_phantom: PhantomData<(Cfg, Req, Fut, S2)>,
}
impl<S1, Req, F, Cfg, Fut, S2, Err> Clone for ApplyConfigService<S1, Req, F, Cfg, Fut, S2, Err>
where
S1: Service<Req>,
F: FnMut(Cfg, &mut S1) -> Fut,
F: Fn(Cfg, &S1) -> Fut,
Fut: Future<Output = Result<S2, Err>>,
S2: Service<Req>,
{
@@ -94,20 +94,20 @@ impl<S1, Req, F, Cfg, Fut, S2, Err> ServiceFactory<Req>
for ApplyConfigService<S1, Req, F, Cfg, Fut, S2, Err>
where
S1: Service<Req>,
F: FnMut(Cfg, &mut S1) -> Fut,
F: Fn(Cfg, &S1) -> Fut,
Fut: Future<Output = Result<S2, Err>>,
S2: Service<Req>,
{
type Config = Cfg;
type Response = S2::Response;
type Error = S2::Error;
type Config = Cfg;
type Service = S2;
type InitError = Err;
type Future = Fut;
fn new_service(&self, cfg: Cfg) -> Self::Future {
let (t, f) = &mut *self.srv.borrow_mut();
let (t, f) = &*self.srv;
f(cfg, t)
}
}
@@ -116,18 +116,18 @@ where
struct ApplyConfigServiceFactory<SF, Req, F, Cfg, Fut, S>
where
SF: ServiceFactory<Req, Config = ()>,
F: FnMut(Cfg, &mut SF::Service) -> Fut,
F: Fn(Cfg, &SF::Service) -> Fut,
Fut: Future<Output = Result<S, SF::InitError>>,
S: Service<Req>,
{
srv: Rc<RefCell<(SF, F)>>,
srv: Rc<(SF, F)>,
_phantom: PhantomData<(Cfg, Req, Fut, S)>,
}
impl<SF, Req, F, Cfg, Fut, S> Clone for ApplyConfigServiceFactory<SF, Req, F, Cfg, Fut, S>
where
SF: ServiceFactory<Req, Config = ()>,
F: FnMut(Cfg, &mut SF::Service) -> Fut,
F: Fn(Cfg, &SF::Service) -> Fut,
Fut: Future<Output = Result<S, SF::InitError>>,
S: Service<Req>,
{
@@ -144,13 +144,13 @@ impl<SF, Req, F, Cfg, Fut, S> ServiceFactory<Req>
where
SF: ServiceFactory<Req, Config = ()>,
SF::InitError: From<SF::Error>,
F: FnMut(Cfg, &mut SF::Service) -> Fut,
F: Fn(Cfg, &SF::Service) -> Fut,
Fut: Future<Output = Result<S, SF::InitError>>,
S: Service<Req>,
{
type Config = Cfg;
type Response = S::Response;
type Error = S::Error;
type Config = Cfg;
type Service = S;
type InitError = SF::InitError;
@@ -161,7 +161,7 @@ where
cfg: Some(cfg),
store: self.srv.clone(),
state: State::A {
fut: self.srv.borrow().0.new_service(()),
fut: self.srv.0.new_service(()),
},
}
}
@@ -172,12 +172,12 @@ pin_project! {
where
SF: ServiceFactory<Req, Config = ()>,
SF::InitError: From<SF::Error>,
F: FnMut(Cfg, &mut SF::Service) -> Fut,
F: Fn(Cfg, &SF::Service) -> Fut,
Fut: Future<Output = Result<S, SF::InitError>>,
S: Service<Req>,
{
cfg: Option<Cfg>,
store: Rc<RefCell<(SF, F)>>,
store: Rc<(SF, F)>,
#[pin]
state: State<SF, Fut, S, Req>,
}
@@ -203,7 +203,7 @@ impl<SF, Req, F, Cfg, Fut, S> Future
where
SF: ServiceFactory<Req, Config = ()>,
SF::InitError: From<SF::Error>,
F: FnMut(Cfg, &mut SF::Service) -> Fut,
F: Fn(Cfg, &SF::Service) -> Fut,
Fut: Future<Output = Result<S, SF::InitError>>,
S: Service<Req>,
{
@@ -213,24 +213,20 @@ where
let mut this = self.as_mut().project();
match this.state.as_mut().project() {
StateProj::A { fut } => match fut.poll(cx)? {
Poll::Pending => Poll::Pending,
Poll::Ready(svc) => {
this.state.set(State::B { svc });
self.poll(cx)
StateProj::A { fut } => {
let svc = ready!(fut.poll(cx))?;
this.state.set(State::B { svc });
self.poll(cx)
}
StateProj::B { svc } => {
ready!(svc.poll_ready(cx))?;
{
let (_, f) = &**this.store;
let fut = f(this.cfg.take().unwrap(), svc);
this.state.set(State::C { fut });
}
},
StateProj::B { svc } => match svc.poll_ready(cx)? {
Poll::Ready(_) => {
{
let (_, f) = &mut *this.store.borrow_mut();
let fut = f(this.cfg.take().unwrap(), svc);
this.state.set(State::C { fut });
}
self.poll(cx)
}
Poll::Pending => Poll::Pending,
},
self.poll(cx)
}
StateProj::C { fut } => fut.poll(cx),
}
}

View File

@@ -1,21 +1,69 @@
use alloc::boxed::Box;
use core::{
future::Future,
marker::PhantomData,
pin::Pin,
task::{Context, Poll},
};
//! Trait object forms of services and service factories.
use alloc::{boxed::Box, rc::Rc};
use core::{future::Future, pin::Pin};
use paste::paste;
use crate::{Service, ServiceFactory};
/// A boxed future with no send bound or lifetime parameters.
pub type BoxFuture<T> = Pin<Box<dyn Future<Output = T>>>;
pub type BoxService<Req, Res, Err> =
Box<dyn Service<Req, Response = Res, Error = Err, Future = BoxFuture<Result<Res, Err>>>>;
macro_rules! service_object {
($name: ident, $type: tt, $fn_name: ident) => {
paste! {
#[doc = "Type alias for service trait object using `" $type "`."]
pub type $name<Req, Res, Err> = $type<
dyn Service<Req, Response = Res, Error = Err, Future = BoxFuture<Result<Res, Err>>>,
>;
#[doc = "Wraps service as a trait object using [`" $name "`]."]
pub fn $fn_name<S, Req>(service: S) -> $name<Req, S::Response, S::Error>
where
S: Service<Req> + 'static,
Req: 'static,
S::Future: 'static,
{
$type::new(ServiceWrapper::new(service))
}
}
};
}
service_object!(BoxService, Box, service);
service_object!(RcService, Rc, rc_service);
struct ServiceWrapper<S> {
inner: S,
}
impl<S> ServiceWrapper<S> {
fn new(inner: S) -> Self {
Self { inner }
}
}
impl<S, Req, Res, Err> Service<Req> for ServiceWrapper<S>
where
S: Service<Req, Response = Res, Error = Err>,
S::Future: 'static,
{
type Response = Res;
type Error = Err;
type Future = BoxFuture<Result<Res, Err>>;
crate::forward_ready!(inner);
fn call(&self, req: Req) -> Self::Future {
Box::pin(self.inner.call(req))
}
}
/// Wrapper for a service factory that will map it's services to boxed trait object services.
pub struct BoxServiceFactory<Cfg, Req, Res, Err, InitErr>(Inner<Cfg, Req, Res, Err, InitErr>);
/// Create boxed service factory
/// Wraps a service factory that returns service trait objects.
pub fn factory<SF, Req>(
factory: SF,
) -> BoxServiceFactory<SF::Config, Req, SF::Response, SF::Error, SF::InitError>
@@ -28,20 +76,7 @@ where
SF::Error: 'static,
SF::InitError: 'static,
{
BoxServiceFactory(Box::new(FactoryWrapper {
factory,
_t: PhantomData,
}))
}
/// Create boxed service
pub fn service<S, Req>(service: S) -> BoxService<Req, S::Response, S::Error>
where
S: Service<Req> + 'static,
Req: 'static,
S::Future: 'static,
{
Box::new(ServiceWrapper(service, PhantomData))
BoxServiceFactory(Box::new(FactoryWrapper(factory)))
}
type Inner<C, Req, Res, Err, InitErr> = Box<
@@ -66,9 +101,9 @@ where
{
type Response = Res;
type Error = Err;
type InitError = InitErr;
type Config = C;
type Service = BoxService<Req, Res, Err>;
type InitError = InitErr;
type Future = BoxFuture<Result<Self::Service, InitErr>>;
@@ -77,12 +112,9 @@ where
}
}
struct FactoryWrapper<SF, Req, Cfg> {
factory: SF,
_t: PhantomData<(Req, Cfg)>,
}
struct FactoryWrapper<SF>(SF);
impl<SF, Req, Cfg, Res, Err, InitErr> ServiceFactory<Req> for FactoryWrapper<SF, Req, Cfg>
impl<SF, Req, Cfg, Res, Err, InitErr> ServiceFactory<Req> for FactoryWrapper<SF>
where
Req: 'static,
Res: 'static,
@@ -95,47 +127,13 @@ where
{
type Response = Res;
type Error = Err;
type InitError = InitErr;
type Config = Cfg;
type Service = BoxService<Req, Res, Err>;
type InitError = InitErr;
type Future = BoxFuture<Result<Self::Service, Self::InitError>>;
fn new_service(&self, cfg: Cfg) -> Self::Future {
let fut = self.factory.new_service(cfg);
Box::pin(async {
let res = fut.await;
res.map(ServiceWrapper::boxed)
})
}
}
struct ServiceWrapper<S: Service<Req>, Req>(S, PhantomData<Req>);
impl<S, Req> ServiceWrapper<S, Req>
where
S: Service<Req> + 'static,
Req: 'static,
S::Future: 'static,
{
fn boxed(service: S) -> BoxService<Req, S::Response, S::Error> {
Box::new(ServiceWrapper(service, PhantomData))
}
}
impl<S, Req, Res, Err> Service<Req> for ServiceWrapper<S, Req>
where
S: Service<Req, Response = Res, Error = Err>,
S::Future: 'static,
{
type Response = Res;
type Error = Err;
type Future = BoxFuture<Result<Res, Err>>;
fn poll_ready(&mut self, ctx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.0.poll_ready(ctx)
}
fn call(&mut self, req: Req) -> Self::Future {
Box::pin(self.0.call(req))
let f = self.0.new_service(cfg);
Box::pin(async { f.await.map(|s| Box::new(ServiceWrapper::new(s)) as _) })
}
}

View File

@@ -1,5 +1,12 @@
use crate::{dev, Service, ServiceFactory};
use crate::{
and_then::{AndThenService, AndThenServiceFactory},
map::Map,
map_err::MapErr,
transform_err::TransformMapInitErr,
IntoService, IntoServiceFactory, Service, ServiceFactory, Transform,
};
/// An extension trait for [`Service`]s that provides a variety of convenient adapters.
pub trait ServiceExt<Req>: Service<Req> {
/// Map this service's output to a different type, returning a new service
/// of the resulting type.
@@ -10,12 +17,12 @@ pub trait ServiceExt<Req>: Service<Req> {
/// Note that this function consumes the receiving service and returns a
/// wrapped version of it, similar to the existing `map` methods in the
/// standard library.
fn map<F, R>(self, f: F) -> dev::Map<Self, F, Req, R>
fn map<F, R>(self, f: F) -> Map<Self, F, Req, R>
where
Self: Sized,
F: FnMut(Self::Response) -> R,
{
dev::Map::new(self, f)
Map::new(self, f)
}
/// Map this service's error to a different error, returning a new service.
@@ -26,17 +33,34 @@ pub trait ServiceExt<Req>: Service<Req> {
///
/// Note that this function consumes the receiving service and returns a
/// wrapped version of it.
fn map_err<F, E>(self, f: F) -> dev::MapErr<Self, Req, F, E>
fn map_err<F, E>(self, f: F) -> MapErr<Self, Req, F, E>
where
Self: Sized,
F: Fn(Self::Error) -> E,
{
dev::MapErr::new(self, f)
MapErr::new(self, f)
}
/// Call another service after call to this one has resolved successfully.
///
/// This function can be used to chain two services together and ensure that the second service
/// isn't called until call to the fist service have finished. Result of the call to the first
/// service is used as an input parameter for the second service's call.
///
/// Note that this function consumes the receiving service and returns a wrapped version of it.
fn and_then<I, S1>(self, service: I) -> AndThenService<Self, S1, Req>
where
Self: Sized,
I: IntoService<S1, Self::Response>,
S1: Service<Self::Response, Error = Self::Error>,
{
AndThenService::new(self, service.into_service())
}
}
impl<S, Req> ServiceExt<Req> for S where S: Service<Req> {}
/// An extension trait for [`ServiceFactory`]s that provides a variety of convenient adapters.
pub trait ServiceFactoryExt<Req>: ServiceFactory<Req> {
/// Map this service's output to a different type, returning a new service
/// of the resulting type.
@@ -65,6 +89,36 @@ pub trait ServiceFactoryExt<Req>: ServiceFactory<Req> {
{
crate::map_init_err::MapInitErr::new(self, f)
}
/// Call another service after call to this one has resolved successfully.
fn and_then<I, SF1>(self, factory: I) -> AndThenServiceFactory<Self, SF1, Req>
where
Self: Sized,
Self::Config: Clone,
I: IntoServiceFactory<SF1, Self::Response>,
SF1: ServiceFactory<
Self::Response,
Config = Self::Config,
Error = Self::Error,
InitError = Self::InitError,
>,
{
AndThenServiceFactory::new(self, factory.into_factory())
}
}
impl<S, Req> ServiceFactoryExt<Req> for S where S: ServiceFactory<Req> {}
impl<SF, Req> ServiceFactoryExt<Req> for SF where SF: ServiceFactory<Req> {}
/// An extension trait for [`Transform`]s that provides a variety of convenient adapters.
pub trait TransformExt<S, Req>: Transform<S, Req> {
/// Return a new `Transform` whose init error is mapped to to a different type.
fn map_init_err<F, E>(self, f: F) -> TransformMapInitErr<Self, S, Req, F, E>
where
Self: Sized,
F: Fn(Self::InitError) -> E + Clone,
{
TransformMapInitErr::new(self, f)
}
}
impl<T, Req> TransformExt<T, Req> for T where T: Transform<T, Req> {}

View File

@@ -1,4 +1,4 @@
use core::{future::Future, marker::PhantomData, task::Poll};
use core::{future::Future, marker::PhantomData};
use crate::{ok, IntoService, IntoServiceFactory, Ready, Service, ServiceFactory};
@@ -7,7 +7,7 @@ pub fn fn_service<F, Fut, Req, Res, Err, Cfg>(
f: F,
) -> FnServiceFactory<F, Fut, Req, Res, Err, Cfg>
where
F: FnMut(Req) -> Fut + Clone,
F: Fn(Req) -> Fut + Clone,
Fut: Future<Output = Result<Res, Err>>,
{
FnServiceFactory::new(f)
@@ -15,9 +15,8 @@ where
/// Create `ServiceFactory` for function that can produce services
///
/// # Example
///
/// ```rust
/// # Examples
/// ```
/// use std::io;
/// use actix_service::{fn_factory, fn_service, Service, ServiceFactory};
/// use futures_util::future::ok;
@@ -39,7 +38,7 @@ where
/// });
///
/// // construct new service
/// let mut srv = factory.new_service(()).await?;
/// let srv = factory.new_service(()).await?;
///
/// // now we can use `div` service
/// let result = srv.call((10, 20)).await?;
@@ -53,21 +52,20 @@ pub fn fn_factory<F, Cfg, Srv, Req, Fut, Err>(
f: F,
) -> FnServiceNoConfig<F, Cfg, Srv, Req, Fut, Err>
where
Srv: Service<Req>,
F: Fn() -> Fut,
Fut: Future<Output = Result<Srv, Err>>,
Srv: Service<Req>,
{
FnServiceNoConfig::new(f)
}
/// Create `ServiceFactory` for function that accepts config argument and can produce services
///
/// Any function that has following form `Fn(Config) -> Future<Output = Service>` could
/// act as a `ServiceFactory`.
/// Any function that has following form `Fn(Config) -> Future<Output = Service>` could act as
/// a `ServiceFactory`.
///
/// # Example
///
/// ```rust
/// # Examples
/// ```
/// use std::io;
/// use actix_service::{fn_factory_with_config, fn_service, Service, ServiceFactory};
/// use futures_util::future::ok;
@@ -81,7 +79,7 @@ where
/// });
///
/// // construct new service with config argument
/// let mut srv = factory.new_service(10).await?;
/// let srv = factory.new_service(10).await?;
///
/// let result = srv.call(10).await?;
/// assert_eq!(result, 100);
@@ -132,7 +130,7 @@ where
impl<F, Fut, Req, Res, Err> Service<Req> for FnService<F, Fut, Req, Res, Err>
where
F: FnMut(Req) -> Fut,
F: Fn(Req) -> Fut,
Fut: Future<Output = Result<Res, Err>>,
{
type Response = Res;
@@ -141,14 +139,14 @@ where
crate::always_ready!();
fn call(&mut self, req: Req) -> Self::Future {
fn call(&self, req: Req) -> Self::Future {
(self.f)(req)
}
}
impl<F, Fut, Req, Res, Err> IntoService<FnService<F, Fut, Req, Res, Err>, Req> for F
where
F: FnMut(Req) -> Fut,
F: Fn(Req) -> Fut,
Fut: Future<Output = Result<Res, Err>>,
{
fn into_service(self) -> FnService<F, Fut, Req, Res, Err> {
@@ -158,7 +156,7 @@ where
pub struct FnServiceFactory<F, Fut, Req, Res, Err, Cfg>
where
F: FnMut(Req) -> Fut,
F: Fn(Req) -> Fut,
Fut: Future<Output = Result<Res, Err>>,
{
f: F,
@@ -167,7 +165,7 @@ where
impl<F, Fut, Req, Res, Err, Cfg> FnServiceFactory<F, Fut, Req, Res, Err, Cfg>
where
F: FnMut(Req) -> Fut + Clone,
F: Fn(Req) -> Fut + Clone,
Fut: Future<Output = Result<Res, Err>>,
{
fn new(f: F) -> Self {
@@ -177,7 +175,7 @@ where
impl<F, Fut, Req, Res, Err, Cfg> Clone for FnServiceFactory<F, Fut, Req, Res, Err, Cfg>
where
F: FnMut(Req) -> Fut + Clone,
F: Fn(Req) -> Fut + Clone,
Fut: Future<Output = Result<Res, Err>>,
{
fn clone(&self) -> Self {
@@ -187,7 +185,7 @@ where
impl<F, Fut, Req, Res, Err> Service<Req> for FnServiceFactory<F, Fut, Req, Res, Err, ()>
where
F: FnMut(Req) -> Fut + Clone,
F: Fn(Req) -> Fut + Clone,
Fut: Future<Output = Result<Res, Err>>,
{
type Response = Res;
@@ -196,7 +194,7 @@ where
crate::always_ready!();
fn call(&mut self, req: Req) -> Self::Future {
fn call(&self, req: Req) -> Self::Future {
(self.f)(req)
}
}
@@ -204,7 +202,7 @@ where
impl<F, Fut, Req, Res, Err, Cfg> ServiceFactory<Req>
for FnServiceFactory<F, Fut, Req, Res, Err, Cfg>
where
F: FnMut(Req) -> Fut + Clone,
F: Fn(Req) -> Fut + Clone,
Fut: Future<Output = Result<Res, Err>>,
{
type Response = Res;
@@ -318,8 +316,8 @@ where
{
type Response = Srv::Response;
type Error = Srv::Error;
type Service = Srv;
type Config = Cfg;
type Service = Srv;
type InitError = Err;
type Future = Fut;
@@ -364,7 +362,7 @@ mod tests {
async fn test_fn_service() {
let new_srv = fn_service(|()| ok::<_, ()>("srv"));
let mut srv = new_srv.new_service(()).await.unwrap();
let srv = new_srv.new_service(()).await.unwrap();
let res = srv.call(()).await;
assert_eq!(lazy(|cx| srv.poll_ready(cx)).await, Poll::Ready(Ok(())));
assert!(res.is_ok());
@@ -373,7 +371,7 @@ mod tests {
#[actix_rt::test]
async fn test_fn_service_service() {
let mut srv = fn_service(|()| ok::<_, ()>("srv"));
let srv = fn_service(|()| ok::<_, ()>("srv"));
let res = srv.call(()).await;
assert_eq!(lazy(|cx| srv.poll_ready(cx)).await, Poll::Ready(Ok(())));
@@ -387,7 +385,7 @@ mod tests {
ok::<_, ()>(fn_service(move |()| ok::<_, ()>(("srv", cfg))))
});
let mut srv = new_srv.new_service(1).await.unwrap();
let srv = new_srv.new_service(1).await.unwrap();
let res = srv.call(()).await;
assert_eq!(lazy(|cx| srv.poll_ready(cx)).await, Poll::Ready(Ok(())));
assert!(res.is_ok());

View File

@@ -1,7 +1,8 @@
//! See [`Service`] docs for information on this crate's foundational trait.
#![no_std]
#![deny(rust_2018_idioms, nonstandard_style)]
#![deny(rust_2018_idioms, nonstandard_style, future_incompatible)]
#![warn(missing_docs)]
#![allow(clippy::type_complexity)]
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
@@ -21,6 +22,7 @@ mod apply_cfg;
pub mod boxed;
mod ext;
mod fn_service;
mod macros;
mod map;
mod map_config;
mod map_err;
@@ -33,11 +35,10 @@ mod transform_err;
pub use self::apply::{apply_fn, apply_fn_factory};
pub use self::apply_cfg::{apply_cfg, apply_cfg_factory};
pub use self::ext::{ServiceExt, ServiceFactoryExt};
pub use self::ext::{ServiceExt, ServiceFactoryExt, TransformExt};
pub use self::fn_service::{fn_factory, fn_factory_with_config, fn_service};
pub use self::map_config::{map_config, unit_config};
pub use self::pipeline::{pipeline, pipeline_factory, Pipeline, PipelineFactory};
pub use self::transform::{apply, Transform};
pub use self::transform::{apply, ApplyTransform, Transform};
#[allow(unused_imports)]
use self::ready::{err, ok, ready, Ready};
@@ -48,38 +49,46 @@ use self::ready::{err, ok, ready, Ready};
/// replies. You can think about a service as a function with one argument that returns some result
/// asynchronously. Conceptually, the operation looks like this:
///
/// ```rust,ignore
/// ```ignore
/// async fn(Request) -> Result<Response, Err>
/// ```
///
/// The `Service` trait just generalizes this form where each parameter is described as an
/// associated type on the trait. Services can also have mutable state that influence computation.
/// The `Service` trait just generalizes this form. Requests are defined as a generic type parameter
/// and responses and other details are defined as associated types on the trait impl. Notice that
/// this design means that services can receive many request types and converge them to a single
/// response type.
///
/// Services can also have mutable state that influence computation by using a `Cell`, `RefCell`
/// or `Mutex`. Services intentionally do not take `&mut self` to reduce overhead in the
/// common cases.
///
/// `Service` provides a symmetric and uniform API; the same abstractions can be used to represent
/// both clients and servers. Services describe only _transformation_ operations which encourage
/// simple API surfaces. This leads to simpler design of each service, improves test-ability and
/// makes composition easier.
///
/// ```rust,ignore
/// ```ignore
/// struct MyService;
///
/// impl Service for MyService {
/// type Request = u8;
/// impl Service<u8> for MyService {
/// type Response = u64;
/// type Error = MyError;
/// type Future = Pin<Box<Future<Output=Result<Self::Response, Self::Error>>>>;
/// type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>>>>;
///
/// fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { ... }
/// fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { ... }
///
/// fn call(&mut self, req: Self::Request) -> Self::Future { ... }
/// fn call(&self, req: u8) -> Self::Future { ... }
/// }
/// ```
///
/// Sometimes it is not necessary to implement the Service trait. For example, the above service
/// could be rewritten as a simple function and passed to [fn_service](fn_service()).
/// could be rewritten as a simple function and passed to [`fn_service`](fn_service()).
///
/// ```rust,ignore
/// ```ignore
/// async fn my_service(req: u8) -> Result<u64, MyError>;
///
/// let svc = fn_service(my_service)
/// svc.call(123)
/// ```
pub trait Service<Req> {
/// Responses given by the service.
@@ -93,40 +102,40 @@ pub trait Service<Req> {
/// Returns `Ready` when the service is able to process requests.
///
/// If the service is at capacity, then `Pending` is returned and the task
/// is notified when the service becomes ready again. This function is
/// expected to be called while on a task.
/// If the service is at capacity, then `Pending` is returned and the task is notified when the
/// service becomes ready again. This function is expected to be called while on a task.
///
/// This is a **best effort** implementation. False positives are permitted.
/// It is permitted for the service to return `Ready` from a `poll_ready`
/// call and the next invocation of `call` results in an error.
/// This is a best effort implementation. False positives are permitted. It is permitted for
/// the service to return `Ready` from a `poll_ready` call and the next invocation of `call`
/// results in an error.
///
/// # Notes
/// 1. `.poll_ready()` might be called on different task from actual service call.
/// 1. In case of chained services, `.poll_ready()` get called for all services at once.
fn poll_ready(&mut self, ctx: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>>;
/// 1. `poll_ready` might be called on a different task to `call`.
/// 1. In cases of chained services, `.poll_ready()` is called for all services at once.
fn poll_ready(&self, ctx: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>>;
/// Process the request and return the response asynchronously.
///
/// This function is expected to be callable off task. As such,
/// implementations should take care to not call `poll_ready`. If the
/// service is at capacity and the request is unable to be handled, the
/// returned `Future` should resolve to an error.
/// This function is expected to be callable off-task. As such, implementations of `call` should
/// take care to not call `poll_ready`. If the service is at capacity and the request is unable
/// to be handled, the returned `Future` should resolve to an error.
///
/// Calling `call` without calling `poll_ready` is permitted. The
/// implementation must be resilient to this fact.
fn call(&mut self, req: Req) -> Self::Future;
/// Invoking `call` without first invoking `poll_ready` is permitted. Implementations must be
/// resilient to this fact.
fn call(&self, req: Req) -> Self::Future;
}
/// Factory for creating `Service`s.
///
/// Acts as a service factory. This is useful for cases where new `Service`s
/// must be produced. One case is a TCP server listener. The listener
/// accepts new TCP streams, obtains a new `Service` using the
/// `ServiceFactory` trait, and uses the new `Service` to process inbound
/// requests on that new TCP stream.
/// This is useful for cases where new `Service`s must be produced. One case is a TCP
/// server listener: a listener accepts new connections, constructs a new `Service` for each using
/// the `ServiceFactory` trait, and uses the new `Service` to process inbound requests on that new
/// connection.
///
/// `Config` is a service factory configuration type.
///
/// Simple factories may be able to use [`fn_factory`] or [`fn_factory_with_config`] to
/// reduce boilerplate.
pub trait ServiceFactory<Req> {
/// Responses given by the created services.
type Response;
@@ -143,13 +152,14 @@ pub trait ServiceFactory<Req> {
/// Errors potentially raised while building a service.
type InitError;
/// The future of the `Service` instance.
/// The future of the `Service` instance.g
type Future: Future<Output = Result<Self::Service, Self::InitError>>;
/// Create and return a new service asynchronously.
fn new_service(&self, cfg: Self::Config) -> Self::Future;
}
// TODO: remove implement on mut reference.
impl<'a, S, Req> Service<Req> for &'a mut S
where
S: Service<Req> + 'a,
@@ -158,11 +168,28 @@ where
type Error = S::Error;
type Future = S::Future;
fn poll_ready(&mut self, ctx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
fn poll_ready(&self, ctx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
(**self).poll_ready(ctx)
}
fn call(&mut self, request: Req) -> S::Future {
fn call(&self, request: Req) -> S::Future {
(**self).call(request)
}
}
impl<'a, S, Req> Service<Req> for &'a S
where
S: Service<Req> + 'a,
{
type Response = S::Response;
type Error = S::Error;
type Future = S::Future;
fn poll_ready(&self, ctx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
(**self).poll_ready(ctx)
}
fn call(&self, request: Req) -> S::Future {
(**self).call(request)
}
}
@@ -175,15 +202,33 @@ where
type Error = S::Error;
type Future = S::Future;
fn poll_ready(&mut self, ctx: &mut Context<'_>) -> Poll<Result<(), S::Error>> {
fn poll_ready(&self, ctx: &mut Context<'_>) -> Poll<Result<(), S::Error>> {
(**self).poll_ready(ctx)
}
fn call(&mut self, request: Req) -> S::Future {
fn call(&self, request: Req) -> S::Future {
(**self).call(request)
}
}
impl<S, Req> Service<Req> for Rc<S>
where
S: Service<Req> + ?Sized,
{
type Response = S::Response;
type Error = S::Error;
type Future = S::Future;
fn poll_ready(&self, ctx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
(**self).poll_ready(ctx)
}
fn call(&self, request: Req) -> S::Future {
(**self).call(request)
}
}
/// This impl is deprecated since v2 because the `Service` trait now receives shared reference.
impl<S, Req> Service<Req> for RefCell<S>
where
S: Service<Req>,
@@ -192,29 +237,12 @@ where
type Error = S::Error;
type Future = S::Future;
fn poll_ready(&mut self, ctx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.borrow_mut().poll_ready(ctx)
fn poll_ready(&self, ctx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.borrow().poll_ready(ctx)
}
fn call(&mut self, request: Req) -> S::Future {
self.borrow_mut().call(request)
}
}
impl<S, Req> Service<Req> for Rc<RefCell<S>>
where
S: Service<Req>,
{
type Response = S::Response;
type Error = S::Error;
type Future = S::Future;
fn poll_ready(&mut self, ctx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.borrow_mut().poll_ready(ctx)
}
fn call(&mut self, request: Req) -> S::Future {
(&mut (**self).borrow_mut()).call(request)
fn call(&self, request: Req) -> S::Future {
self.borrow().call(request)
}
}
@@ -294,40 +322,3 @@ where
{
tp.into_service()
}
pub mod dev {
pub use crate::apply::{Apply, ApplyFactory};
pub use crate::fn_service::{
FnService, FnServiceConfig, FnServiceFactory, FnServiceNoConfig,
};
pub use crate::map::{Map, MapServiceFactory};
pub use crate::map_config::{MapConfig, UnitConfig};
pub use crate::map_err::{MapErr, MapErrServiceFactory};
pub use crate::map_init_err::MapInitErr;
pub use crate::transform::ApplyTransform;
pub use crate::transform_err::TransformMapInitErr;
}
#[macro_export]
macro_rules! always_ready {
() => {
fn poll_ready(
&mut self,
_: &mut ::core::task::Context<'_>,
) -> ::core::task::Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
};
}
#[macro_export]
macro_rules! forward_ready {
($field:ident) => {
fn poll_ready(
&mut self,
cx: &mut ::core::task::Context<'_>,
) -> ::core::task::Poll<Result<(), Self::Error>> {
self.$field.poll_ready(cx)
}
};
}

185
actix-service/src/macros.rs Normal file
View File

@@ -0,0 +1,185 @@
/// An implementation of [`poll_ready`]() that always signals readiness.
///
/// This should only be used for basic leaf services that have no concept of un-readiness.
/// For wrapper or other service types, use [`forward_ready!`] for simple cases or write a bespoke
/// `poll_ready` implementation.
///
/// [`poll_ready`]: crate::Service::poll_ready
///
/// # Examples
/// ```no_run
/// use actix_service::Service;
/// use futures_util::future::{ready, Ready};
///
/// struct IdentityService;
///
/// impl Service<u32> for IdentityService {
/// type Response = u32;
/// type Error = ();
/// type Future = Ready<Result<Self::Response, Self::Error>>;
///
/// actix_service::always_ready!();
///
/// fn call(&self, req: u32) -> Self::Future {
/// ready(Ok(req))
/// }
/// }
/// ```
#[macro_export]
macro_rules! always_ready {
() => {
#[inline]
fn poll_ready(
&self,
_: &mut ::core::task::Context<'_>,
) -> ::core::task::Poll<Result<(), Self::Error>> {
::core::task::Poll::Ready(Ok(()))
}
};
}
/// An implementation of [`poll_ready`] that forwards readiness checks to a
/// named struct field.
///
/// Tuple structs are not supported.
///
/// [`poll_ready`]: crate::Service::poll_ready
///
/// # Examples
/// ```no_run
/// use actix_service::Service;
/// use futures_util::future::{ready, Ready};
///
/// struct WrapperService<S> {
/// inner: S,
/// }
///
/// impl<S> Service<()> for WrapperService<S>
/// where
/// S: Service<()>,
/// {
/// type Response = S::Response;
/// type Error = S::Error;
/// type Future = S::Future;
///
/// actix_service::forward_ready!(inner);
///
/// fn call(&self, req: ()) -> Self::Future {
/// self.inner.call(req)
/// }
/// }
/// ```
#[macro_export]
macro_rules! forward_ready {
($field:ident) => {
#[inline]
fn poll_ready(
&self,
cx: &mut ::core::task::Context<'_>,
) -> ::core::task::Poll<Result<(), Self::Error>> {
self.$field
.poll_ready(cx)
.map_err(::core::convert::Into::into)
}
};
}
#[cfg(test)]
mod tests {
use core::{
cell::Cell,
convert::Infallible,
task::{self, Context, Poll},
};
use futures_util::{
future::{ready, Ready},
task::noop_waker,
};
use crate::Service;
struct IdentityService;
impl Service<u32> for IdentityService {
type Response = u32;
type Error = Infallible;
type Future = Ready<Result<Self::Response, Self::Error>>;
always_ready!();
fn call(&self, req: u32) -> Self::Future {
ready(Ok(req))
}
}
struct CountdownService(Cell<u32>);
impl Service<()> for CountdownService {
type Response = ();
type Error = Infallible;
type Future = Ready<Result<Self::Response, Self::Error>>;
fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
let count = self.0.get();
if count == 0 {
Poll::Ready(Ok(()))
} else {
self.0.set(count - 1);
cx.waker().wake_by_ref();
Poll::Pending
}
}
fn call(&self, _: ()) -> Self::Future {
ready(Ok(()))
}
}
struct WrapperService<S> {
inner: S,
}
impl<S> Service<()> for WrapperService<S>
where
S: Service<()>,
{
type Response = S::Response;
type Error = S::Error;
type Future = S::Future;
forward_ready!(inner);
fn call(&self, _: ()) -> Self::Future {
self.inner.call(())
}
}
#[test]
fn test_always_ready_macro() {
let waker = noop_waker();
let mut cx = task::Context::from_waker(&waker);
let svc = IdentityService;
assert!(svc.poll_ready(&mut cx).is_ready());
assert!(svc.poll_ready(&mut cx).is_ready());
assert!(svc.poll_ready(&mut cx).is_ready());
}
#[test]
fn test_forward_ready_macro() {
let waker = noop_waker();
let mut cx = task::Context::from_waker(&waker);
let svc = WrapperService {
inner: CountdownService(Cell::new(3)),
};
assert!(svc.poll_ready(&mut cx).is_pending());
assert!(svc.poll_ready(&mut cx).is_pending());
assert!(svc.poll_ready(&mut cx).is_pending());
assert!(svc.poll_ready(&mut cx).is_ready());
}
}

View File

@@ -58,7 +58,7 @@ where
crate::forward_ready!(service);
fn call(&mut self, req: Req) -> Self::Future {
fn call(&self, req: Req) -> Self::Future {
MapFuture::new(self.service.call(req), self.f.clone())
}
}
@@ -215,21 +215,21 @@ mod tests {
crate::always_ready!();
fn call(&mut self, _: ()) -> Self::Future {
fn call(&self, _: ()) -> Self::Future {
ok(())
}
}
#[actix_rt::test]
async fn test_poll_ready() {
let mut srv = Srv.map(|_| "ok");
let srv = Srv.map(|_| "ok");
let res = lazy(|cx| srv.poll_ready(cx)).await;
assert_eq!(res, Poll::Ready(Ok(())));
}
#[actix_rt::test]
async fn test_call() {
let mut srv = Srv.map(|_| "ok");
let srv = Srv.map(|_| "ok");
let res = srv.call(()).await;
assert!(res.is_ok());
assert_eq!(res.unwrap(), "ok");
@@ -238,7 +238,7 @@ mod tests {
#[actix_rt::test]
async fn test_new_service() {
let new_srv = (|| ok::<_, ()>(Srv)).into_factory().map(|_| "ok");
let mut srv = new_srv.new_service(&()).await.unwrap();
let srv = new_srv.new_service(&()).await.unwrap();
let res = srv.call(()).await;
assert!(res.is_ok());
assert_eq!(res.unwrap(), ("ok"));

View File

@@ -9,26 +9,25 @@ use pin_project_lite::pin_project;
use super::{Service, ServiceFactory};
/// Service for the `map_err` combinator, changing the type of a service's
/// error.
/// Service for the `map_err` combinator, changing the type of a service's error.
///
/// This is created by the `ServiceExt::map_err` method.
pub struct MapErr<S, Req, F, E> {
service: S,
f: F,
mapper: F,
_t: PhantomData<(E, Req)>,
}
impl<S, Req, F, E> MapErr<S, Req, F, E> {
/// Create new `MapErr` combinator
pub(crate) fn new(service: S, f: F) -> Self
pub(crate) fn new(service: S, mapper: F) -> Self
where
S: Service<Req>,
F: Fn(S::Error) -> E,
{
Self {
service,
f,
mapper,
_t: PhantomData,
}
}
@@ -42,7 +41,7 @@ where
fn clone(&self) -> Self {
MapErr {
service: self.service.clone(),
f: self.f.clone(),
mapper: self.mapper.clone(),
_t: PhantomData,
}
}
@@ -57,12 +56,12 @@ where
type Error = E;
type Future = MapErrFuture<A, Req, F, E>;
fn poll_ready(&mut self, ctx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.service.poll_ready(ctx).map_err(&self.f)
fn poll_ready(&self, ctx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.service.poll_ready(ctx).map_err(&self.mapper)
}
fn call(&mut self, req: Req) -> Self::Future {
MapErrFuture::new(self.service.call(req), self.f.clone())
fn call(&self, req: Req) -> Self::Future {
MapErrFuture::new(self.service.call(req), self.mapper.clone())
}
}
@@ -105,23 +104,23 @@ where
/// service's error.
///
/// This is created by the `NewServiceExt::map_err` method.
pub struct MapErrServiceFactory<A, Req, F, E>
pub struct MapErrServiceFactory<SF, Req, F, E>
where
A: ServiceFactory<Req>,
F: Fn(A::Error) -> E + Clone,
SF: ServiceFactory<Req>,
F: Fn(SF::Error) -> E + Clone,
{
a: A,
a: SF,
f: F,
e: PhantomData<(E, Req)>,
}
impl<A, Req, F, E> MapErrServiceFactory<A, Req, F, E>
impl<SF, Req, F, E> MapErrServiceFactory<SF, Req, F, E>
where
A: ServiceFactory<Req>,
F: Fn(A::Error) -> E + Clone,
SF: ServiceFactory<Req>,
F: Fn(SF::Error) -> E + Clone,
{
/// Create new `MapErr` new service instance
pub(crate) fn new(a: A, f: F) -> Self {
pub(crate) fn new(a: SF, f: F) -> Self {
Self {
a,
f,
@@ -130,10 +129,10 @@ where
}
}
impl<A, Req, F, E> Clone for MapErrServiceFactory<A, Req, F, E>
impl<SF, Req, F, E> Clone for MapErrServiceFactory<SF, Req, F, E>
where
A: ServiceFactory<Req> + Clone,
F: Fn(A::Error) -> E + Clone,
SF: ServiceFactory<Req> + Clone,
F: Fn(SF::Error) -> E + Clone,
{
fn clone(&self) -> Self {
Self {
@@ -144,57 +143,57 @@ where
}
}
impl<A, Req, F, E> ServiceFactory<Req> for MapErrServiceFactory<A, Req, F, E>
impl<SF, Req, F, E> ServiceFactory<Req> for MapErrServiceFactory<SF, Req, F, E>
where
A: ServiceFactory<Req>,
F: Fn(A::Error) -> E + Clone,
SF: ServiceFactory<Req>,
F: Fn(SF::Error) -> E + Clone,
{
type Response = A::Response;
type Response = SF::Response;
type Error = E;
type Config = A::Config;
type Service = MapErr<A::Service, Req, F, E>;
type InitError = A::InitError;
type Future = MapErrServiceFuture<A, Req, F, E>;
type Config = SF::Config;
type Service = MapErr<SF::Service, Req, F, E>;
type InitError = SF::InitError;
type Future = MapErrServiceFuture<SF, Req, F, E>;
fn new_service(&self, cfg: A::Config) -> Self::Future {
fn new_service(&self, cfg: SF::Config) -> Self::Future {
MapErrServiceFuture::new(self.a.new_service(cfg), self.f.clone())
}
}
pin_project! {
pub struct MapErrServiceFuture<A, Req, F, E>
pub struct MapErrServiceFuture<SF, Req, F, E>
where
A: ServiceFactory<Req>,
F: Fn(A::Error) -> E,
SF: ServiceFactory<Req>,
F: Fn(SF::Error) -> E,
{
#[pin]
fut: A::Future,
f: F,
fut: SF::Future,
mapper: F,
}
}
impl<A, Req, F, E> MapErrServiceFuture<A, Req, F, E>
impl<SF, Req, F, E> MapErrServiceFuture<SF, Req, F, E>
where
A: ServiceFactory<Req>,
F: Fn(A::Error) -> E,
SF: ServiceFactory<Req>,
F: Fn(SF::Error) -> E,
{
fn new(fut: A::Future, f: F) -> Self {
MapErrServiceFuture { f, fut }
fn new(fut: SF::Future, mapper: F) -> Self {
MapErrServiceFuture { fut, mapper }
}
}
impl<A, Req, F, E> Future for MapErrServiceFuture<A, Req, F, E>
impl<SF, Req, F, E> Future for MapErrServiceFuture<SF, Req, F, E>
where
A: ServiceFactory<Req>,
F: Fn(A::Error) -> E + Clone,
SF: ServiceFactory<Req>,
F: Fn(SF::Error) -> E + Clone,
{
type Output = Result<MapErr<A::Service, Req, F, E>, A::InitError>;
type Output = Result<MapErr<SF::Service, Req, F, E>, SF::InitError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
if let Poll::Ready(svc) = this.fut.poll(cx)? {
Poll::Ready(Ok(MapErr::new(svc, this.f.clone())))
Poll::Ready(Ok(MapErr::new(svc, this.mapper.clone())))
} else {
Poll::Pending
}
@@ -218,25 +217,25 @@ mod tests {
type Error = ();
type Future = Ready<Result<(), ()>>;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
fn poll_ready(&self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Err(()))
}
fn call(&mut self, _: ()) -> Self::Future {
fn call(&self, _: ()) -> Self::Future {
err(())
}
}
#[actix_rt::test]
async fn test_poll_ready() {
let mut srv = Srv.map_err(|_| "error");
let srv = Srv.map_err(|_| "error");
let res = lazy(|cx| srv.poll_ready(cx)).await;
assert_eq!(res, Poll::Ready(Err("error")));
}
#[actix_rt::test]
async fn test_call() {
let mut srv = Srv.map_err(|_| "error");
let srv = Srv.map_err(|_| "error");
let res = srv.call(()).await;
assert!(res.is_err());
assert_eq!(res.err().unwrap(), "error");
@@ -245,7 +244,7 @@ mod tests {
#[actix_rt::test]
async fn test_new_service() {
let new_srv = (|| ok::<_, ()>(Srv)).into_factory().map_err(|_| "error");
let mut srv = new_srv.new_service(&()).await.unwrap();
let srv = new_srv.new_service(&()).await.unwrap();
let res = srv.call(()).await;
assert!(res.is_err());
assert_eq!(res.err().unwrap(), "error");

View File

@@ -1,3 +1,6 @@
// TODO: see if pipeline is necessary
#![allow(dead_code)]
use core::{
marker::PhantomData,
task::{Context, Poll},
@@ -11,7 +14,7 @@ use crate::then::{ThenService, ThenServiceFactory};
use crate::{IntoService, IntoServiceFactory, Service, ServiceFactory};
/// Construct new pipeline with one service in pipeline chain.
pub fn pipeline<I, S, Req>(service: I) -> Pipeline<S, Req>
pub(crate) fn pipeline<I, S, Req>(service: I) -> Pipeline<S, Req>
where
I: IntoService<S, Req>,
S: Service<Req>,
@@ -23,7 +26,7 @@ where
}
/// Construct new pipeline factory with one service factory.
pub fn pipeline_factory<I, SF, Req>(factory: I) -> PipelineFactory<SF, Req>
pub(crate) fn pipeline_factory<I, SF, Req>(factory: I) -> PipelineFactory<SF, Req>
where
I: IntoServiceFactory<SF, Req>,
SF: ServiceFactory<Req>,
@@ -35,7 +38,7 @@ where
}
/// Pipeline service - pipeline allows to compose multiple service into one service.
pub struct Pipeline<S, Req> {
pub(crate) struct Pipeline<S, Req> {
service: S,
_phantom: PhantomData<Req>,
}
@@ -146,18 +149,18 @@ impl<S: Service<Req>, Req> Service<Req> for Pipeline<S, Req> {
type Future = S::Future;
#[inline]
fn poll_ready(&mut self, ctx: &mut Context<'_>) -> Poll<Result<(), S::Error>> {
fn poll_ready(&self, ctx: &mut Context<'_>) -> Poll<Result<(), S::Error>> {
self.service.poll_ready(ctx)
}
#[inline]
fn call(&mut self, req: Req) -> Self::Future {
fn call(&self, req: Req) -> Self::Future {
self.service.call(req)
}
}
/// Pipeline factory
pub struct PipelineFactory<SF, Req> {
pub(crate) struct PipelineFactory<SF, Req> {
factory: SF,
_phantom: PhantomData<Req>,
}

View File

@@ -1,12 +1,12 @@
use alloc::rc::Rc;
use core::{
cell::RefCell,
future::Future,
marker::PhantomData,
pin::Pin,
task::{Context, Poll},
};
use futures_core::ready;
use pin_project_lite::pin_project;
use super::{Service, ServiceFactory};
@@ -15,7 +15,7 @@ use super::{Service, ServiceFactory};
/// another service.
///
/// This is created by the `Pipeline::then` method.
pub(crate) struct ThenService<A, B, Req>(Rc<RefCell<(A, B)>>, PhantomData<Req>);
pub(crate) struct ThenService<A, B, Req>(Rc<(A, B)>, PhantomData<Req>);
impl<A, B, Req> ThenService<A, B, Req> {
/// Create new `.then()` combinator
@@ -24,7 +24,7 @@ impl<A, B, Req> ThenService<A, B, Req> {
A: Service<Req>,
B: Service<Result<A::Response, A::Error>, Error = A::Error>,
{
Self(Rc::new(RefCell::new((a, b))), PhantomData)
Self(Rc::new((a, b)), PhantomData)
}
}
@@ -43,20 +43,20 @@ where
type Error = B::Error;
type Future = ThenServiceResponse<A, B, Req>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
let mut srv = self.0.borrow_mut();
let not_ready = !srv.0.poll_ready(cx)?.is_ready();
if !srv.1.poll_ready(cx)?.is_ready() || not_ready {
fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
let (a, b) = &*self.0;
let not_ready = !a.poll_ready(cx)?.is_ready();
if !b.poll_ready(cx)?.is_ready() || not_ready {
Poll::Pending
} else {
Poll::Ready(Ok(()))
}
}
fn call(&mut self, req: Req) -> Self::Future {
fn call(&self, req: Req) -> Self::Future {
ThenServiceResponse {
state: State::A {
fut: self.0.borrow_mut().0.call(req),
fut: self.0 .0.call(req),
b: Some(self.0.clone()),
},
}
@@ -81,9 +81,8 @@ pin_project! {
A: Service<Req>,
B: Service<Result<A::Response, A::Error>>,
{
A { #[pin] fut: A::Future, b: Option<Rc<RefCell<(A, B)>>> },
A { #[pin] fut: A::Future, b: Option<Rc<(A, B)>> },
B { #[pin] fut: B::Future },
Empty,
}
}
@@ -98,23 +97,14 @@ where
let mut this = self.as_mut().project();
match this.state.as_mut().project() {
StateProj::A { fut, b } => match fut.poll(cx) {
Poll::Ready(res) => {
let b = b.take().unwrap();
this.state.set(State::Empty); // drop fut A
let fut = b.borrow_mut().1.call(res);
this.state.set(State::B { fut });
self.poll(cx)
}
Poll::Pending => Poll::Pending,
},
StateProj::B { fut } => fut.poll(cx).map(|r| {
this.state.set(State::Empty);
r
}),
StateProj::Empty => {
panic!("future must not be polled after it returned `Poll::Ready`")
StateProj::A { fut, b } => {
let res = ready!(fut.poll(cx));
let b = b.take().unwrap();
let fut = b.1.call(res);
this.state.set(State::B { fut });
self.poll(cx)
}
StateProj::B { fut } => fut.poll(cx),
}
}
}
@@ -256,7 +246,11 @@ mod tests {
use futures_util::future::lazy;
use crate::{err, ok, pipeline, pipeline_factory, ready, Ready, Service, ServiceFactory};
use crate::{
err, ok,
pipeline::{pipeline, pipeline_factory},
ready, Ready, Service, ServiceFactory,
};
#[derive(Clone)]
struct Srv1(Rc<Cell<usize>>);
@@ -266,12 +260,12 @@ mod tests {
type Error = ();
type Future = Ready<Result<Self::Response, Self::Error>>;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
fn poll_ready(&self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.0.set(self.0.get() + 1);
Poll::Ready(Ok(()))
}
fn call(&mut self, req: Result<&'static str, &'static str>) -> Self::Future {
fn call(&self, req: Result<&'static str, &'static str>) -> Self::Future {
match req {
Ok(msg) => ok(msg),
Err(_) => err(()),
@@ -286,12 +280,12 @@ mod tests {
type Error = ();
type Future = Ready<Result<Self::Response, ()>>;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
fn poll_ready(&self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.0.set(self.0.get() + 1);
Poll::Ready(Err(()))
}
fn call(&mut self, req: Result<&'static str, ()>) -> Self::Future {
fn call(&self, req: Result<&'static str, ()>) -> Self::Future {
match req {
Ok(msg) => ok((msg, "ok")),
Err(()) => ok(("srv2", "err")),
@@ -302,7 +296,7 @@ mod tests {
#[actix_rt::test]
async fn test_poll_ready() {
let cnt = Rc::new(Cell::new(0));
let mut srv = pipeline(Srv1(cnt.clone())).then(Srv2(cnt.clone()));
let srv = pipeline(Srv1(cnt.clone())).then(Srv2(cnt.clone()));
let res = lazy(|cx| srv.poll_ready(cx)).await;
assert_eq!(res, Poll::Ready(Err(())));
assert_eq!(cnt.get(), 2);
@@ -311,7 +305,7 @@ mod tests {
#[actix_rt::test]
async fn test_call() {
let cnt = Rc::new(Cell::new(0));
let mut srv = pipeline(Srv1(cnt.clone())).then(Srv2(cnt));
let srv = pipeline(Srv1(cnt.clone())).then(Srv2(cnt));
let res = srv.call(Ok("srv1")).await;
assert!(res.is_ok());
@@ -328,7 +322,7 @@ mod tests {
let cnt2 = cnt.clone();
let blank = move || ready(Ok::<_, ()>(Srv1(cnt2.clone())));
let factory = pipeline_factory(blank).then(move || ready(Ok(Srv2(cnt.clone()))));
let mut srv = factory.new_service(&()).await.unwrap();
let srv = factory.new_service(&()).await.unwrap();
let res = srv.call(Ok("srv1")).await;
assert!(res.is_ok());
assert_eq!(res.unwrap(), ("srv1", "ok"));

View File

@@ -6,12 +6,12 @@ use core::{
task::{Context, Poll},
};
use futures_core::ready;
use pin_project_lite::pin_project;
use crate::transform_err::TransformMapInitErr;
use crate::{IntoServiceFactory, Service, ServiceFactory};
/// Apply transform to a service.
/// Apply a [`Transform`] to a [`Service`].
pub fn apply<T, S, I, Req>(t: T, factory: I) -> ApplyTransform<T, S, Req>
where
I: IntoServiceFactory<S, Req>,
@@ -21,64 +21,51 @@ where
ApplyTransform::new(t, factory.into_factory())
}
/// The `Transform` trait defines the interface of a service factory that wraps inner service
/// during construction.
/// Defines the interface of a service factory that wraps inner service during construction.
///
/// Transform(middleware) wraps inner service and runs during
/// inbound and/or outbound processing in the request/response lifecycle.
/// It may modify request and/or response.
/// Transformers wrap an inner service and runs during inbound and/or outbound processing in the
/// service lifecycle. It may modify request and/or response.
///
/// For example, timeout transform:
/// For example, a timeout service wrapper:
///
/// ```rust,ignore
/// ```ignore
/// pub struct Timeout<S> {
/// service: S,
/// timeout: Duration,
/// }
///
/// impl<S> Service for Timeout<S>
/// where
/// S: Service,
/// {
/// type Request = S::Request;
/// impl<S: Service<Req>, Req> Service<Req> for Timeout<S> {
/// type Response = S::Response;
/// type Error = TimeoutError<S::Error>;
/// type Future = TimeoutServiceResponse<S>;
///
/// fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
/// ready!(self.service.poll_ready(cx)).map_err(TimeoutError::Service)
/// }
/// actix_service::forward_ready!(service);
///
/// fn call(&mut self, req: S::Request) -> Self::Future {
/// fn call(&self, req: Req) -> Self::Future {
/// TimeoutServiceResponse {
/// fut: self.service.call(req),
/// sleep: Delay::new(clock::now() + self.timeout),
/// sleep: Sleep::new(clock::now() + self.timeout),
/// }
/// }
/// }
/// ```
///
/// Timeout service in above example is decoupled from underlying service implementation
/// and could be applied to any service.
/// This wrapper service is decoupled from the underlying service implementation and could be
/// applied to any service.
///
/// The `Transform` trait defines the interface of a Service factory. `Transform`
/// is often implemented for middleware, defining how to construct a
/// middleware Service. A Service that is constructed by the factory takes
/// the Service that follows it during execution as a parameter, assuming
/// ownership of the next Service.
/// The `Transform` trait defines the interface of a service wrapper. `Transform` is often
/// implemented for middleware, defining how to construct a middleware Service. A Service that is
/// constructed by the factory takes the Service that follows it during execution as a parameter,
/// assuming ownership of the next Service.
///
/// Factory for `Timeout` middleware from the above example could look like this:
/// A transform for the `Timeout` middleware could look like this:
///
/// ```rust,,ignore
/// ```ignore
/// pub struct TimeoutTransform {
/// timeout: Duration,
/// }
///
/// impl<S> Transform<S> for TimeoutTransform
/// where
/// S: Service,
/// {
/// type Request = S::Request;
/// impl<S: Service<Req>, Req> Transform<S, Req> for TimeoutTransform {
/// type Response = S::Response;
/// type Error = TimeoutError<S::Error>;
/// type InitError = S::Error;
@@ -86,15 +73,15 @@ where
/// type Future = Ready<Result<Self::Transform, Self::InitError>>;
///
/// fn new_transform(&self, service: S) -> Self::Future {
/// ok(TimeoutService {
/// ready(Ok(Timeout {
/// service,
/// timeout: self.timeout,
/// })
/// }))
/// }
/// }
/// ```
pub trait Transform<S, Req> {
/// Responses given by the service.
/// Responses produced by the service.
type Response;
/// Errors produced by the service.
@@ -111,16 +98,6 @@ pub trait Transform<S, Req> {
/// Creates and returns a new Transform component, asynchronously
fn new_transform(&self, service: S) -> Self::Future;
/// Map this transform's factory error to a different error,
/// returning a new transform service factory.
fn map_init_err<F, E>(self, f: F) -> TransformMapInitErr<Self, S, Req, F, E>
where
Self: Sized,
F: Fn(Self::InitError) -> E + Clone,
{
TransformMapInitErr::new(self, f)
}
}
impl<T, S, Req> Transform<S, Req> for Rc<T>
@@ -129,8 +106,8 @@ where
{
type Response = T::Response;
type Error = T::Error;
type InitError = T::InitError;
type Transform = T::Transform;
type InitError = T::InitError;
type Future = T::Future;
fn new_transform(&self, service: S) -> T::Future {
@@ -144,8 +121,8 @@ where
{
type Response = T::Response;
type Error = T::Error;
type InitError = T::InitError;
type Transform = T::Transform;
type InitError = T::InitError;
type Future = T::Future;
fn new_transform(&self, service: S) -> T::Future {
@@ -153,7 +130,7 @@ where
}
}
/// `Apply` transform to new service
/// Apply a [`Transform`] to a [`Service`].
pub struct ApplyTransform<T, S, Req>(Rc<(T, S)>, PhantomData<Req>);
impl<T, S, Req> ApplyTransform<T, S, Req>
@@ -231,15 +208,63 @@ where
let mut this = self.as_mut().project();
match this.state.as_mut().project() {
ApplyTransformFutureStateProj::A { fut } => match fut.poll(cx)? {
Poll::Ready(srv) => {
let fut = this.store.0.new_transform(srv);
this.state.set(ApplyTransformFutureState::B { fut });
self.poll(cx)
}
Poll::Pending => Poll::Pending,
},
ApplyTransformFutureStateProj::A { fut } => {
let srv = ready!(fut.poll(cx))?;
let fut = this.store.0.new_transform(srv);
this.state.set(ApplyTransformFutureState::B { fut });
self.poll(cx)
}
ApplyTransformFutureStateProj::B { fut } => fut.poll(cx),
}
}
}
#[cfg(test)]
mod tests {
use core::time::Duration;
use actix_utils::future::{ready, Ready};
use super::*;
use crate::Service;
// pseudo-doctest for Transform trait
pub struct TimeoutTransform {
timeout: Duration,
}
// pseudo-doctest for Transform trait
impl<S: Service<Req>, Req> Transform<S, Req> for TimeoutTransform {
type Response = S::Response;
type Error = S::Error;
type InitError = S::Error;
type Transform = Timeout<S>;
type Future = Ready<Result<Self::Transform, Self::InitError>>;
fn new_transform(&self, service: S) -> Self::Future {
ready(Ok(Timeout {
service,
_timeout: self.timeout,
}))
}
}
// pseudo-doctest for Transform trait
pub struct Timeout<S> {
service: S,
_timeout: Duration,
}
// pseudo-doctest for Transform trait
impl<S: Service<Req>, Req> Service<Req> for Timeout<S> {
type Response = S::Response;
type Error = S::Error;
type Future = S::Future;
crate::forward_ready!(service);
fn call(&self, req: Req) -> Self::Future {
self.service.call(req)
}
}
}

View File

@@ -9,10 +9,8 @@ use pin_project_lite::pin_project;
use super::Transform;
/// Transform for the `map_init_err` combinator, changing the type of a new
/// transform's init error.
///
/// This is created by the `Transform::map_init_err` method.
/// Transform for the [`TransformExt::map_init_err`] combinator, changing the type of a new
/// [`Transform`]'s initialization error.
pub struct TransformMapInitErr<T, S, Req, F, E> {
transform: T,
mapper: F,

View File

@@ -1,49 +0,0 @@
# Changes
## [0.3.3] - 2020-07-14
### Changed
* Update parking_lot to 0.11
## [0.3.2] - 2020-05-20
## Added
* Implement `std::error::Error` for `BlockingError` [#120]
[#120]: https://github.com/actix/actix-net/pull/120
## [0.3.1] - 2019-12-12
### Changed
* Update parking_lot to 0.10
## [0.3.0] - 2019-12-02
### Changed
* Expect `Result` type as a function return type
## [0.2.0] - 2019-11-21
### Changed
* Migrate to `std::future`
## [0.1.2] - 2019-08-05
### Changed
* Update `derive_more` to 0.15
* Update `parking_lot` to 0.9
## [0.1.1] - 2019-06-05
* Update parking_lot
## [0.1.0] - 2019-03-28
* Move threadpool to separate crate

View File

@@ -1,27 +0,0 @@
[package]
name = "actix-threadpool"
version = "0.3.3"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix thread pool for sync code"
keywords = ["actix", "network", "framework", "async", "futures"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-threadpool/"
categories = ["network-programming", "asynchronous"]
license = "MIT OR Apache-2.0"
exclude = [".gitignore", ".travis.yml", ".cargo/config", "appveyor.yml"]
edition = "2018"
workspace = ".."
[lib]
name = "actix_threadpool"
path = "src/lib.rs"
[dependencies]
derive_more = "0.99.2"
futures-channel = "0.3.7"
parking_lot = "0.11"
lazy_static = "1.3"
log = "0.4"
num_cpus = "1.10"
threadpool = "1.7"

View File

@@ -1 +0,0 @@
../LICENSE-APACHE

Some files were not shown because too many files have changed in this diff Show More