mirror of
https://github.com/fafhrd91/actix-net
synced 2025-03-14 16:46:26 +01:00
Compare commits
983 Commits
connect-v0
...
master
Author | SHA1 | Date | |
---|---|---|---|
|
8204690568 | ||
|
bbb3139c45 | ||
|
0915904bdb | ||
|
70f0008f42 | ||
|
12df4d7027 | ||
|
6f5b81d2a0 | ||
|
4cf37171b5 | ||
|
00f40e1471 | ||
|
15f0b63492 | ||
|
9d4b1673aa | ||
|
fc902b2d56 | ||
|
f4175a4ad4 | ||
|
323a2e2931 | ||
|
4b9f7ae46d | ||
|
0e119fd9b2 | ||
|
b04b88e81a | ||
|
0a8f2baa11 | ||
|
d79d500ffe | ||
|
7a7e3de430 | ||
|
f062ede06f | ||
|
1338276934 | ||
|
4746b4e2fa | ||
|
0509f0cede | ||
|
3831e0d7fe | ||
|
60945d0481 | ||
|
2615a19e28 | ||
|
1a0f44fff1 | ||
|
8d1cd2ec87 | ||
|
b87174b5f2 | ||
|
8097af6a27 | ||
|
ecba6e21da | ||
|
23f797a81d | ||
|
d2a5091451 | ||
|
e0c09c2aa4 | ||
|
8234543066 | ||
|
34826c6253 | ||
|
42b788d131 | ||
|
9796593b24 | ||
|
c362fc4414 | ||
|
52733337e4 | ||
|
0e36c5f5c4 | ||
|
47f0017899 | ||
|
bb4fc31461 | ||
|
01a104eb82 | ||
|
4ab27bfc4a | ||
|
8084cec705 | ||
|
a2517da225 | ||
|
a4b6943ddc | ||
|
7d24196d5c | ||
|
582edf5444 | ||
|
57485f1a21 | ||
|
e8871d0d06 | ||
|
83e896a6e5 | ||
|
af00dada5c | ||
|
0681b515de | ||
|
3672137d17 | ||
|
fad1fda194 | ||
|
cfae737314 | ||
|
4583daa3c2 | ||
|
b1cbacc7f6 | ||
|
77588aba81 | ||
|
aad3a48edd | ||
|
97e8c571cf | ||
|
0d8c7e5085 | ||
|
baf1b6042a | ||
|
779fa28bd5 | ||
|
e282811d69 | ||
|
5c44115978 | ||
|
ead0e2b200 | ||
|
ace737fc4c | ||
|
c45ae294fb | ||
|
939377f6ab | ||
|
20149f957b | ||
|
544e5d3b40 | ||
|
d482af529c | ||
|
0030800b9a | ||
|
64fa2f8462 | ||
|
b0d1c8d193 | ||
|
46cc62c6d8 | ||
|
912daa3d0a | ||
|
aefa810496 | ||
|
2c443a7620 | ||
|
b3b1583115 | ||
|
0d3d1926bc | ||
|
0c26ecf9fa | ||
|
1bdb15ec20 | ||
|
a524f15e34 | ||
|
451a44c2e0 | ||
|
18071d1fc0 | ||
|
786014cc2f | ||
|
a7ef438f25 | ||
|
375c352810 | ||
|
2d1b5468d0 | ||
|
3696cda155 | ||
|
55e89d1f30 | ||
|
24be36b18d | ||
|
38ae762569 | ||
|
8cf79d3d13 | ||
|
73451070db | ||
|
2632c984cc | ||
|
af8e6cd656 | ||
|
db7988609e | ||
|
1db640f62e | ||
|
9e7d612121 | ||
|
5edbf9e3dc | ||
|
f028a74240 | ||
|
f4139a0878 | ||
|
3147dbe7ca | ||
|
95ca8f0318 | ||
|
f947374a73 | ||
|
85191934c8 | ||
|
234a4c9c7f | ||
|
d5171c2ab3 | ||
|
875218488c | ||
|
b826bf8471 | ||
|
10bd847177 | ||
|
481cf55414 | ||
|
b4990023c4 | ||
|
eb5cec0064 | ||
|
db925bf8e6 | ||
|
850f6c0491 | ||
|
0f71fd5a7a | ||
|
40b10847df | ||
|
39bab04800 | ||
|
3cb247874e | ||
|
5792d9f010 | ||
|
bd8bd1020b | ||
|
21be7d84bd | ||
|
57fd6ea809 | ||
|
9a3f3eef6a | ||
|
e427911cdb | ||
|
a1ae524512 | ||
|
88833355e4 | ||
|
7737ba5cfb | ||
|
fd32a0a97a | ||
|
07e7f82345 | ||
|
1a5d85ec8b | ||
|
bd1467e928 | ||
|
968ad3b854 | ||
|
079f0f66f0 | ||
|
d85903b31a | ||
|
a0675fb0dd | ||
|
af9ccd17d9 | ||
|
eb977e9aeb | ||
|
d28c7db3b3 | ||
|
c5b2d0cd36 | ||
|
02ac0bb4f7 | ||
|
86b000fe71 | ||
|
951e46186b | ||
|
9edc0b393a | ||
|
1945fa0675 | ||
|
923a443950 | ||
|
b526197a9a | ||
|
8fc2253c61 | ||
|
4c12b81492 | ||
|
ef716a8488 | ||
|
2a4df30c63 | ||
|
2d9b147cc3 | ||
|
5515a37002 | ||
|
4067fbe8f0 | ||
|
01f9910e7c | ||
|
df12c10a3f | ||
|
f632ef2ba8 | ||
|
19d03f0454 | ||
|
e9c2a0c318 | ||
|
f967562ac4 | ||
|
61b6e01b02 | ||
|
392e591820 | ||
|
87440e5734 | ||
|
665dec456f | ||
|
7d138f0c31 | ||
|
3cd5d8b07a | ||
|
09548c96b0 | ||
|
17fd135349 | ||
|
b9b628c47b | ||
|
580af3dec4 | ||
|
db54639f0f | ||
|
6d9eb7e162 | ||
|
69e50b5e66 | ||
|
17409cd203 | ||
|
4a7f2c95af | ||
|
c69b8e9ade | ||
|
9f59093adc | ||
|
bfeb4cd9e7 | ||
|
14272a1762 | ||
|
7e043048a0 | ||
|
45a7dcba90 | ||
|
5029beb866 | ||
|
910c181251 | ||
|
150d2c05d3 | ||
|
3b5716c23e | ||
|
0bc310a656 | ||
|
6ce8307060 | ||
|
9cb8a1fadc | ||
|
9017de439f | ||
|
3eba5b152e | ||
|
3c4b0c2755 | ||
|
462ab6a4f0 | ||
|
e539f83615 | ||
|
755b231e00 | ||
|
8d5d1dbf6f | ||
|
177590a7d8 | ||
|
4cbe741230 | ||
|
80320a0325 | ||
|
6d0dc9628b | ||
|
dbce150993 | ||
|
54ec06cd23 | ||
|
c0693da9ba | ||
|
28f36e4e30 | ||
|
c6ebbcf21b | ||
|
c60d2f9ddb | ||
|
a6bece7b33 | ||
|
fbb53f54ef | ||
|
9b388a83c9 | ||
|
878d3a1c74 | ||
|
045cf3f3e8 | ||
|
d13461b337 | ||
|
dde38bbe06 | ||
|
d973d5974a | ||
|
d7afd60606 | ||
|
7e47bf4055 | ||
|
05e7be337e | ||
|
8d964713c9 | ||
|
fe38312db0 | ||
|
2b83f08a40 | ||
|
8e9401f8e1 | ||
|
9ede174e81 | ||
|
bb36e2a072 | ||
|
6061a44a22 | ||
|
363984ad75 | ||
|
00654aadc5 | ||
|
428914e65e | ||
|
df9a9d1a1e | ||
|
056d2cd573 | ||
|
68228a6cf2 | ||
|
d5a9a6a1c5 | ||
|
ade71b7bd3 | ||
|
cb83922b29 | ||
|
25209f5bd8 | ||
|
c4a0f37d0c | ||
|
0e649329b9 | ||
|
66756bc448 | ||
|
126ed4c2e3 | ||
|
283974f3e6 | ||
|
bf2aa3902c | ||
|
71b4e55c92 | ||
|
eb5fa30ada | ||
|
49a034259f | ||
|
3337f63b4e | ||
|
86ce140249 | ||
|
635aebe887 | ||
|
4c1e581a54 | ||
|
dc67ba770d | ||
|
855e3f96fe | ||
|
737b438f73 | ||
|
0cd70b0536 | ||
|
4b6a581ef3 | ||
|
3e132d2bc6 | ||
|
c5d6174cec | ||
|
77d4a69b2f | ||
|
ae5377fd6e | ||
|
bd9bda0504 | ||
|
41ed48219d | ||
|
7804ed12eb | ||
|
2a54065fae | ||
|
217cbd2228 | ||
|
d229c1e886 | ||
|
6792f799a6 | ||
|
72481313cc | ||
|
59b629c74b | ||
|
7988694242 | ||
|
b8a7741524 | ||
|
5e290d76f8 | ||
|
0edb64575f | ||
|
941f67dec9 | ||
|
3e624b8376 | ||
|
26446fdbad | ||
|
b7b7bd2cbf | ||
|
637625f9b7 | ||
|
b1d5d85e72 | ||
|
ed2c07b304 | ||
|
4fe7fec5ef | ||
|
4c9ee88ec4 | ||
|
9ec3cc0fe7 | ||
|
01e0f922de | ||
|
10d3bb6d0d | ||
|
3ba4eafde5 | ||
|
5faa98f6d2 | ||
|
b552d847ed | ||
|
5058e2d14e | ||
|
ae9afd4de7 | ||
|
01d2f18f68 | ||
|
e92b5aaf31 | ||
|
459a6d1b02 | ||
|
9935883905 | ||
|
89a4c2ee27 | ||
|
a4681831a7 | ||
|
5d2da0fdc7 | ||
|
ef18a8342e | ||
|
621deba990 | ||
|
6a9f13c8b4 | ||
|
705b31230f | ||
|
eb490a9125 | ||
|
90f205a465 | ||
|
3a3d654cea | ||
|
ba901c70df | ||
|
4e0dd091f5 | ||
|
8c4ec34cd4 | ||
|
62ffe5f389 | ||
|
07e3b19461 | ||
|
183bcf6ae3 | ||
|
5dc2bfcb01 | ||
|
5556afd524 | ||
|
de5908bfe7 | ||
|
c63880a292 | ||
|
44e4381879 | ||
|
18eced7305 | ||
|
a2437eed29 | ||
|
67b357a175 | ||
|
3597af5c45 | ||
|
8891c2681e | ||
|
233c61ba08 | ||
|
161f239f12 | ||
|
7e7df2f931 | ||
|
ce8ec15eaa | ||
|
ae28ce5377 | ||
|
54d1d9e520 | ||
|
0b0cbd5388 | ||
|
443a328fb4 | ||
|
58a67ade32 | ||
|
38caa8f088 | ||
|
ed987eef06 | ||
|
3658929010 | ||
|
3f49d8ab54 | ||
|
161d1ee94b | ||
|
81ba7cafaa | ||
|
f8f51a2240 | ||
|
a2e765ea6e | ||
|
03dae6a4a4 | ||
|
2080f4c149 | ||
|
b2cef8fcdb | ||
|
15279eaf3d | ||
|
7d98247cb0 | ||
|
5b537c7b10 | ||
|
81d7295486 | ||
|
581e599209 | ||
|
1c8fcaebbc | ||
|
a1d15f2e08 | ||
|
70ea5322ab | ||
|
303666278a | ||
|
669e868370 | ||
|
47f278b17a | ||
|
ca77d8d835 | ||
|
00775884f8 | ||
|
4ff8a2cf68 | ||
|
5c555a9408 | ||
|
ca435b2575 | ||
|
9fa8d7fc5a | ||
|
b03fe7c5b6 | ||
|
6fed1c3e7d | ||
|
c3d697df97 | ||
|
80a362712f | ||
|
2b1edb95ea | ||
|
4644fa41cf | ||
|
98c37fe47d | ||
|
b9455d2ca9 | ||
|
0183b0f8cc | ||
|
b122a1ae1a | ||
|
4303058243 | ||
|
48b2e11509 | ||
|
5379a46a99 | ||
|
f8f1ac94bc | ||
|
82cd5b8290 | ||
|
c65e8524b2 | ||
|
a83dfaa162 | ||
|
e4ec956001 | ||
|
95cba659ff | ||
|
5687e81d9f | ||
|
a0fe2a9b2e | ||
|
ad22a93466 | ||
|
c2d5b2398a | ||
|
5b1ff30dd9 | ||
|
e1317bb3a0 | ||
|
dcea009158 | ||
|
13c18b8a51 | ||
|
06b17d6a43 | ||
|
605ec25143 | ||
|
3824493fd3 | ||
|
3be3e11aa5 | ||
|
6a5ea0342b | ||
|
23b1f63345 | ||
|
3aa037d07d | ||
|
cf21df14f2 | ||
|
a1bf8662c9 | ||
|
6f4d2220fa | ||
|
54b22f9fce | ||
|
983abec77d | ||
|
e4d4ae21ee | ||
|
8ad5f58d38 | ||
|
613b2be51f | ||
|
b2e9640952 | ||
|
76338a5822 | ||
|
978e4f25fb | ||
|
1c4e965366 | ||
|
2435520e67 | ||
|
19468feef8 | ||
|
bd48908792 | ||
|
20c2da17ed | ||
|
fdafc1dd65 | ||
|
7749dfe46a | ||
|
aeb81ad3fd | ||
|
47fba25d67 | ||
|
7a82288066 | ||
|
4e6d88d143 | ||
|
ef206f40fb | ||
|
8e98d9168c | ||
|
3c1f57706a | ||
|
d49ecf7203 | ||
|
e0fb67f646 | ||
|
ddce2d6d12 | ||
|
0a11cf5cba | ||
|
859f45868d | ||
|
d4829b046d | ||
|
5961eb892e | ||
|
995efcf427 | ||
|
f1573931dd | ||
|
3859e91799 | ||
|
8aade720ed | ||
|
8079c50ddb | ||
|
05689b86d9 | ||
|
fd3e5fba02 | ||
|
39d1f282f7 | ||
|
d8889c63ef | ||
|
fdac52aa11 | ||
|
6d66cfb06a | ||
|
fb27ffc525 | ||
|
b068ea16f8 | ||
|
4eebdf4070 | ||
|
b09e7cd417 | ||
|
2c5c9167a5 | ||
|
ee3a548a85 | ||
|
f21eaa954f | ||
|
8becb0db70 | ||
|
26a5af70cb | ||
|
0ee8d032b6 | ||
|
3cf1c548fd | ||
|
4544562e1b | ||
|
bb27bac216 | ||
|
f9262dbec0 | ||
|
12d3942b98 | ||
|
a3c9ebc7fa | ||
|
b7bfff2b32 | ||
|
0c73f13c8b | ||
|
945479e0c3 | ||
|
746cc2ab89 | ||
|
91ea8c5dad | ||
|
0a705b1023 | ||
|
9e2bcec226 | ||
|
382830a37e | ||
|
493a1a32c0 | ||
|
50a195e9ce | ||
|
06ddad0051 | ||
|
789e6a8a46 | ||
|
6e590fd042 | ||
|
fa8ded3a34 | ||
|
841c611233 | ||
|
81a2b6a425 | ||
|
a6e79453d0 | ||
|
17f711a9d6 | ||
|
c3be839a69 | ||
|
8d74cf387d | ||
|
7e483cc356 | ||
|
75d7ae3139 | ||
|
2cfe1d88ad | ||
|
cb07ead392 | ||
|
32543809f9 | ||
|
eb4d29e15e | ||
|
7ee42b50b4 | ||
|
0da848e4ae | ||
|
5f80d85010 | ||
|
16ba77c4c8 | ||
|
b4a3f51659 | ||
|
9d0901e07f | ||
|
ebb9cd055f | ||
|
a77b70aed2 | ||
|
c918da906b | ||
|
b5399c5631 | ||
|
7f0eddd794 | ||
|
db3385e865 | ||
|
4a8693d000 | ||
|
4ec358575e | ||
|
66bd5bf4a2 | ||
|
057e7cd7c9 | ||
|
0b656f51e1 | ||
|
0eb68d1c7b | ||
|
3e6f69885c | ||
|
2fa60b07ae | ||
|
b75254403a | ||
|
1b35ff8ee6 | ||
|
2924419905 | ||
|
6b86b5efc5 | ||
|
ba39c8436d | ||
|
feac376c17 | ||
|
a633d2353c | ||
|
45edff625e | ||
|
cff9deb729 | ||
|
eaefe21b98 | ||
|
636cef8868 | ||
|
874e5f2e50 | ||
|
6112a47529 | ||
|
a2e03700e7 | ||
|
6edf9b8278 | ||
|
f07d807707 | ||
|
d4c46b7da9 | ||
|
b0a8f8411b | ||
|
46bfe5de36 | ||
|
a95afe2800 | ||
|
f751cf5acb | ||
|
a1982bdbad | ||
|
147c4f4f2c | ||
|
5285656bdc | ||
|
296294061f | ||
|
93865de848 | ||
|
6bcf6d8160 | ||
|
14ff379150 | ||
|
647817ef14 | ||
|
b5eefb4d42 | ||
|
03eb96d6d4 | ||
|
0934078947 | ||
|
5759c9e144 | ||
|
3c6de3a81b | ||
|
ef83647ac9 | ||
|
98a17081b8 | ||
|
b7202db8fd | ||
|
a09f9abfcb | ||
|
e4a44b77e6 | ||
|
2ee8f45f5d | ||
|
f48e3f4cb0 | ||
|
3d3bd60368 | ||
|
d684128831 | ||
|
0c12930796 | ||
|
ba44ea7d0b | ||
|
8a58a341a4 | ||
|
33c9aa6988 | ||
|
3ab8c3eb69 | ||
|
518bf3f6a6 | ||
|
43ce25cda1 | ||
|
4e4122b702 | ||
|
b296d0f254 | ||
|
02a902068f | ||
|
049795662f | ||
|
4e43216b99 | ||
|
93889776c4 | ||
|
ab496a71b5 | ||
|
76d956e25c | ||
|
89e56cf661 | ||
|
8aca8d4d07 | ||
|
e0dd2a3d76 | ||
|
59e976aaca | ||
|
4cc1c87724 | ||
|
ca39917d2c | ||
|
704af672b9 | ||
|
242bef269f | ||
|
6c65e2a79f | ||
|
e5ca271764 | ||
|
98a2197a09 | ||
|
fb0aa02b3c | ||
|
681eeb497d | ||
|
3e04b87311 | ||
|
77b7826658 | ||
|
b7a9cb7bb4 | ||
|
88d99ac89c | ||
|
7632f51509 | ||
|
d28687d0d7 | ||
|
27c6be9881 | ||
|
119dc39f5b | ||
|
b3010c13e0 | ||
|
fecdfcd8d4 | ||
|
578a560853 | ||
|
fb098536ee | ||
|
5d28be9ad6 | ||
|
a5a6b6704c | ||
|
afb0a3c9fc | ||
|
02aaa75591 | ||
|
ed4b708c66 | ||
|
235a76dcd4 | ||
|
0c5f1da625 | ||
|
8ace9264b7 | ||
|
0dca1a705a | ||
|
5d6d309e66 | ||
|
8d0bd7ce1c | ||
|
a67e38b4a0 | ||
|
334c98575a | ||
|
a9b5a7b070 | ||
|
61176f6410 | ||
|
10b4c30a06 | ||
|
7f550bcf0f | ||
|
887f11f787 | ||
|
e2a6d352b0 | ||
|
f6c697a2dd | ||
|
5ecdfd684a | ||
|
7140c04c44 | ||
|
9528df4486 | ||
|
755a8bb9d1 | ||
|
f3cb6efc30 | ||
|
87b857705c | ||
|
c897c5d3eb | ||
|
134e76b8b4 | ||
|
f3a401c23b | ||
|
f7e8a912b3 | ||
|
11a1e11858 | ||
|
d0b27ee7e6 | ||
|
2d2b0591a2 | ||
|
abbc5f715f | ||
|
140a6c76e3 | ||
|
2395b28c5e | ||
|
aad4812ba6 | ||
|
ac6c78c476 | ||
|
8218a098e8 | ||
|
49a6f525be | ||
|
f59ff82395 | ||
|
f7cc62564d | ||
|
b125e2bdce | ||
|
a5c185e80e | ||
|
523cee0351 | ||
|
343b3c09fc | ||
|
8a10580663 | ||
|
1b4a117063 | ||
|
700997fe48 | ||
|
4c5568ed70 | ||
|
7d0cfe1b4d | ||
|
e35c261c9f | ||
|
115ef3fcb3 | ||
|
c0482e2532 | ||
|
6906f25e01 | ||
|
06bca19524 | ||
|
e9e2185296 | ||
|
aae52a80ab | ||
|
65e2e8052e | ||
|
783880bb0a | ||
|
69e8df9d62 | ||
|
9addf1a36b | ||
|
187a58472d | ||
|
30aa0b7bb6 | ||
|
e775d08d76 | ||
|
d5f95b54b7 | ||
|
904f90abc2 | ||
|
950c73077c | ||
|
732731a9c8 | ||
|
0dd5a7ce1d | ||
|
7105091e51 | ||
|
08959dfc21 | ||
|
2792433ad6 | ||
|
437a7b05c6 | ||
|
3d125c5381 | ||
|
fbf7d6ef33 | ||
|
e6b6f08369 | ||
|
4e806b3e3f | ||
|
f5b07053fc | ||
|
dd3bec83bf | ||
|
f955e49930 | ||
|
4be11b541b | ||
|
baba533407 | ||
|
2bf50826b0 | ||
|
41b2a3b2e2 | ||
|
7fdd4a1118 | ||
|
cb30f9e86a | ||
|
873f69be51 | ||
|
0967061f30 | ||
|
59902cb3a3 | ||
|
857e50120b | ||
|
36a2edf1cd | ||
|
346bd072d3 | ||
|
8d3d58b3b7 | ||
|
c41b5d8dd4 | ||
|
693d5132a9 | ||
|
f7dac3feb4 | ||
|
ebc11d03f2 | ||
|
e3ad5de270 | ||
|
91118bb2ce | ||
|
6628688bcf | ||
|
b9567359fd | ||
|
7dbc0264b1 | ||
|
1b7c969f6a | ||
|
f1685d8253 | ||
|
e3b6a33b97 | ||
|
13b503435f | ||
|
98f0290f65 | ||
|
b8f66f5e7f | ||
|
dd59ee498e | ||
|
83320efa31 | ||
|
c69bc11e3e | ||
|
aad5c42ad7 | ||
|
4d37858fc6 | ||
|
d402f08bb5 | ||
|
fa25e30427 | ||
|
602db1779e | ||
|
4f2910c6b3 | ||
|
9f7d6bc068 | ||
|
6908b58943 | ||
|
043057ecbd | ||
|
e12bf9200b | ||
|
03d431e663 | ||
|
f0d352604e | ||
|
2f67e4f563 | ||
|
d1155d60ec | ||
|
28d9c6a760 | ||
|
a970c2c997 | ||
|
d5a6c83207 | ||
|
ee0db9a617 | ||
|
e5b5df1261 | ||
|
dbfa13d6be | ||
|
e7c2439543 | ||
|
3116db5168 | ||
|
5940731ef0 | ||
|
aed5fecc8a | ||
|
a751899aad | ||
|
fa800aeba3 | ||
|
2f89483635 | ||
|
3048073919 | ||
|
4bbba803c1 | ||
|
4dcdeb6795 | ||
|
3b4f222242 | ||
|
7c5fa25b23 | ||
|
3551d6674d | ||
|
9f00daea80 | ||
|
7dddeab2a8 | ||
|
dcbcc40da2 | ||
|
b0d44198ba | ||
|
974bd6b01e | ||
|
5779da0f49 | ||
|
1918c8d4f8 | ||
|
e21c58930b | ||
|
59c5e9be6a | ||
|
a2a9d9764d | ||
|
bf0a9d2f6e | ||
|
119027f822 | ||
|
0fe8038d23 | ||
|
b599bc4a0c | ||
|
a80e1f8370 | ||
|
5fe759cc02 | ||
|
05549f0b42 | ||
|
b1430eaded | ||
|
0d3f9e74c5 | ||
|
cab73791ed | ||
|
a7ac1a76ed | ||
|
37bedff6fb | ||
|
33fd6adc11 | ||
|
4305cdba2c | ||
|
52ecb4bcc5 | ||
|
b28f32e82c | ||
|
081205a02f | ||
|
8bb81c0768 | ||
|
c7a8743bf9 | ||
|
f26fcc703b | ||
|
ce4587df82 | ||
|
9957f28137 | ||
|
9d84d14ef4 | ||
|
60bfa1bfb1 | ||
|
2c81c22b3e | ||
|
dded482514 | ||
|
631cb86947 | ||
|
2e5e69c9ba | ||
|
e315cf2893 | ||
|
13fd615966 | ||
|
c094f84b85 | ||
|
25012d290a | ||
|
32202188cc | ||
|
bf734a31dc | ||
|
d29e7c4ba6 | ||
|
7163e2c2a2 | ||
|
1d810b4561 | ||
|
0913badd61 | ||
|
8b3062cd6e | ||
|
35218a4df1 | ||
|
d47f1fb730 | ||
|
1ad0bbfb7f | ||
|
c38a25f102 | ||
|
110457477a | ||
|
a899b1e04d | ||
|
393cf1ab25 | ||
|
40fbbb9c32 | ||
|
99fef4f06b | ||
|
fc0825fcdd | ||
|
6c00ab8296 | ||
|
cbdbc05dbd | ||
|
5674840c01 | ||
|
6f07c9d72a | ||
|
fa48ddcfa1 | ||
|
f89a992daf | ||
|
e670a32ff3 | ||
|
021c742d22 | ||
|
88a60ffa66 | ||
|
cb2845cb26 | ||
|
b18fbc98d5 | ||
|
3a858feaec | ||
|
d49aca9595 | ||
|
6f41b80cb4 | ||
|
c6eb318536 | ||
|
21dcc22e53 | ||
|
de84663768 | ||
|
c4e2051327 | ||
|
0a4fe22003 | ||
|
eb773c8b8c | ||
|
db0bc1e156 | ||
|
9eb12e0467 | ||
|
eb33f0ecbe | ||
|
cbc5da8625 | ||
|
ec8dca8d69 | ||
|
6a9df026e7 | ||
|
2756bedc3d | ||
|
bd4c4cda8b | ||
|
c0ede65317 | ||
|
9f575418c1 | ||
|
9ed35cca7a | ||
|
3385682e09 | ||
|
f55f96bc77 | ||
|
a08b1eba87 | ||
|
d81e72cf06 | ||
|
9fbe6a1f6d | ||
|
16ff283fb2 | ||
|
503c2feb08 | ||
|
bec4efc699 | ||
|
5e5ae2ddec | ||
|
a02064592b | ||
|
af72005159 | ||
|
c254bb978c | ||
|
009f8e2e7c | ||
|
f5aecdee8f | ||
|
4546774f4e | ||
|
2cf140a869 | ||
|
e76ea8e80c | ||
|
52d03fa18c | ||
|
5efac449b1 | ||
|
4ceac79f2c | ||
|
1fddd1e75b | ||
|
905d058454 | ||
|
5265714f68 | ||
|
ae4394c0f2 | ||
|
d3c5518646 | ||
|
3bf83c1d98 | ||
|
617e40a7e9 | ||
|
3105cde168 | ||
|
5b74c79cf9 | ||
|
8bf8ad86d6 | ||
|
877f89eeb7 | ||
|
1354946460 | ||
|
7404d82a9b | ||
|
c1cdc9908a | ||
|
be7904fd57 | ||
|
13049b80ca | ||
|
9fa2a36b4e | ||
|
ed5023128b | ||
|
2e8c2c7733 | ||
|
115e82329f | ||
|
0b0060fe47 | ||
|
35e32d8e55 | ||
|
9982a9498d | ||
|
fa72975f34 | ||
|
fe5de2510d | ||
|
e3155957a8 | ||
|
f6f9e1fcdb | ||
|
2667850d60 | ||
|
fba2002702 | ||
|
e733c562d9 | ||
|
8f05986a9f | ||
|
aa9bbe2114 | ||
|
4837a901e2 | ||
|
a02ff17cb1 | ||
|
dbf566928c | ||
|
ca982b2467 | ||
|
c859d13e3b | ||
|
41e49e8093 | ||
|
715a770d7a | ||
|
5469d8c910 | ||
|
8be5f773f4 | ||
|
b686b4c34e | ||
|
34a7b7f05a | ||
|
b1d9b06a87 | ||
|
94e673b50b | ||
|
1a644c6bb1 | ||
|
aad013f559 | ||
|
7a18d9da26 | ||
|
d59b8ce62e | ||
|
3821d511d0 | ||
|
62e429cb0c | ||
|
a2643d475a | ||
|
34c259a8b5 | ||
|
8b398c3386 | ||
|
0baceb0e56 | ||
|
6be1f37f6c | ||
|
a742768feb | ||
|
f913872159 | ||
|
41145040e1 | ||
|
311bb14d97 | ||
|
2955e49d78 | ||
|
9d1b428b34 | ||
|
42d526bced | ||
|
23a230a83b | ||
|
411e31786f | ||
|
b491d373b1 | ||
|
9271b95c87 | ||
|
1b3cd0d88c | ||
|
da302d4b7a | ||
|
922a919572 | ||
|
5a62175b6e | ||
|
5445e341c3 | ||
|
1b17d274a0 | ||
|
9d8b3e6275 | ||
|
27baf03f64 | ||
|
205cac82ce | ||
|
07708c5e9a | ||
|
1c04ad3238 | ||
|
66aa21740c | ||
|
b183cb3324 | ||
|
158482cd2f | ||
|
9e61f62871 | ||
|
7051888289 | ||
|
0caa47fc47 | ||
|
6d1cbb2d2f | ||
|
ca289ddf7f | ||
|
ad9a197916 | ||
|
c4f05e033f | ||
|
048314913c | ||
|
c1b183e1ce | ||
|
87bc3dacd9 | ||
|
0156f479a0 | ||
|
139fa3b9a2 | ||
|
a14f612382 | ||
|
059e2ad042 | ||
|
fdf2a6f422 | ||
|
fc2631c852 | ||
|
d51b210ae7 | ||
|
0a6cded975 | ||
|
14e3933d8b | ||
|
837504c10f | ||
|
802d808aca | ||
|
7712de3d8e | ||
|
f1d0d5f6f9 | ||
|
a76fcaf4d8 | ||
|
a2134035d6 | ||
|
5f8599faf1 | ||
|
f0776fca94 | ||
|
c7676df697 | ||
|
ecf7a11a20 | ||
|
686958fe0c | ||
|
49ade171f6 | ||
|
0a2a520c35 | ||
|
b0c37dfc87 | ||
|
91e28a4312 | ||
|
508dce8bf1 | ||
|
8ed1099a2e | ||
|
83544bd971 | ||
|
76c317e0b2 | ||
|
3b314e4c8c | ||
|
ae27b87641 | ||
|
fc2dcadc7a | ||
|
54f62b5035 | ||
|
d3208bf7a8 | ||
|
21507d3da1 | ||
|
b9d8a215b4 | ||
|
51c4dfe5cb | ||
|
a60112c71e | ||
|
bd814d6f80 | ||
|
a4e0c71baa | ||
|
b9ea445e70 | ||
|
ba2901269d | ||
|
5cbc29306a | ||
|
810fa869ae | ||
|
33cd51aabf | ||
|
629ed05f82 | ||
|
5e8ae210f7 | ||
|
3add90628f | ||
|
02ab804e0b | ||
|
feac0b43d9 | ||
|
1441355d4f | ||
|
7c5afc09a6 | ||
|
16856c7d3f | ||
|
95d02659d5 | ||
|
bcbd7e6ddf | ||
|
e0d3581239 | ||
|
ef1bdb2eb2 | ||
|
10301ff49d |
@ -1,41 +0,0 @@
|
||||
environment:
|
||||
global:
|
||||
PROJECT_NAME: actix-net
|
||||
matrix:
|
||||
# Stable channel
|
||||
- TARGET: i686-pc-windows-msvc
|
||||
CHANNEL: stable
|
||||
- TARGET: x86_64-pc-windows-gnu
|
||||
CHANNEL: stable
|
||||
- TARGET: x86_64-pc-windows-msvc
|
||||
CHANNEL: stable
|
||||
# Nightly channel
|
||||
- TARGET: i686-pc-windows-msvc
|
||||
CHANNEL: nightly
|
||||
- TARGET: x86_64-pc-windows-gnu
|
||||
CHANNEL: nightly
|
||||
- TARGET: x86_64-pc-windows-msvc
|
||||
CHANNEL: nightly
|
||||
|
||||
# Install Rust and Cargo
|
||||
# (Based on from https://github.com/rust-lang/libc/blob/master/appveyor.yml)
|
||||
install:
|
||||
- ps: >-
|
||||
If ($Env:TARGET -eq 'x86_64-pc-windows-gnu') {
|
||||
$Env:PATH += ';C:\msys64\mingw64\bin'
|
||||
} ElseIf ($Env:TARGET -eq 'i686-pc-windows-gnu') {
|
||||
$Env:PATH += ';C:\MinGW\bin'
|
||||
}
|
||||
- curl -sSf -o rustup-init.exe https://win.rustup.rs
|
||||
- rustup-init.exe --default-host %TARGET% --default-toolchain %CHANNEL% -y
|
||||
- set PATH=%PATH%;C:\Users\appveyor\.cargo\bin
|
||||
- rustc -Vv
|
||||
- cargo -V
|
||||
|
||||
# 'cargo test' takes care of building for us, so disable Appveyor's build stage.
|
||||
build: false
|
||||
|
||||
# Equivalent to Travis' `script` phase
|
||||
test_script:
|
||||
- cargo clean
|
||||
- cargo test
|
15
.cargo/config.toml
Normal file
15
.cargo/config.toml
Normal file
@ -0,0 +1,15 @@
|
||||
[alias]
|
||||
lint = "clippy --workspace --tests --examples --bins -- -Dclippy::todo"
|
||||
lint-all = "clippy --workspace --all-features --tests --examples --bins -- -Dclippy::todo"
|
||||
|
||||
# just check the library (without dev deps)
|
||||
ci-check-min = "hack --workspace check --no-default-features"
|
||||
ci-check-lib = "hack --workspace --feature-powerset --depth=2 --exclude-features=io-uring check"
|
||||
ci-check-lib-linux = "hack --workspace --feature-powerset --depth=2 check"
|
||||
|
||||
# check everything
|
||||
ci-check = "hack --workspace --feature-powerset --depth=2 --exclude-features=io-uring check --tests --examples"
|
||||
ci-check-linux = "hack --workspace --feature-powerset --depth=2 check --tests --examples"
|
||||
|
||||
# tests avoiding io-uring feature
|
||||
ci-test = "hack --feature-powerset --depth=2 --exclude-features=io-uring test --lib --tests --no-fail-fast -- --nocapture"
|
25
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
25
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
## PR Type
|
||||
|
||||
<!-- What kind of change does this PR make? -->
|
||||
<!-- Bug Fix / Feature / Refactor / Code Style / Other -->
|
||||
|
||||
INSERT_PR_TYPE
|
||||
|
||||
## PR Checklist
|
||||
|
||||
Check your PR fulfills the following:
|
||||
|
||||
<!-- For draft PRs check the boxes as you complete them. -->
|
||||
|
||||
- [ ] Tests for the changes have been added / updated.
|
||||
- [ ] Documentation comments have been added / updated.
|
||||
- [ ] A changelog entry has been made for the appropriate packages.
|
||||
- [ ] Format code with the latest stable rustfmt
|
||||
|
||||
## Overview
|
||||
|
||||
<!-- Describe the current and new behavior. -->
|
||||
<!-- Emphasize any breaking changes. -->
|
||||
|
||||
<!-- If this PR fixes or closes an issue, reference it here. -->
|
||||
<!-- Closes #000 -->
|
10
.github/dependabot.yml
vendored
Normal file
10
.github/dependabot.yml
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: github-actions
|
||||
directory: /
|
||||
schedule:
|
||||
interval: weekly
|
||||
- package-ecosystem: cargo
|
||||
directory: /
|
||||
schedule:
|
||||
interval: weekly
|
129
.github/workflows/ci-post-merge.yml
vendored
Normal file
129
.github/workflows/ci-post-merge.yml
vendored
Normal file
@ -0,0 +1,129 @@
|
||||
name: CI (post-merge)
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build_and_test_nightly:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
# prettier-ignore
|
||||
target:
|
||||
- { name: Linux, os: ubuntu-latest, triple: x86_64-unknown-linux-gnu }
|
||||
- { name: macOS, os: macos-latest, triple: x86_64-apple-darwin }
|
||||
- { name: Windows, os: windows-latest, triple: x86_64-pc-windows-msvc }
|
||||
- { name: Windows (MinGW), os: windows-latest, triple: x86_64-pc-windows-gnu }
|
||||
version:
|
||||
- nightly
|
||||
|
||||
name: ${{ matrix.target.name }} / ${{ matrix.version }}
|
||||
runs-on: ${{ matrix.target.os }}
|
||||
|
||||
env: {}
|
||||
|
||||
steps:
|
||||
- name: Setup Routing
|
||||
if: matrix.target.os == 'macos-latest'
|
||||
run: sudo ifconfig lo0 alias 127.0.0.3
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Free Disk Space
|
||||
if: matrix.target.os == 'ubuntu-latest'
|
||||
run: ./scripts/free-disk-space.sh
|
||||
|
||||
- name: Setup mold linker
|
||||
if: matrix.target.os == 'ubuntu-latest'
|
||||
uses: rui314/setup-mold@v1
|
||||
|
||||
- name: Install nasm
|
||||
if: matrix.target.os == 'windows-latest'
|
||||
uses: ilammy/setup-nasm@v1.5.2
|
||||
|
||||
- name: Install OpenSSL
|
||||
if: matrix.target.os == 'windows-latest'
|
||||
shell: bash
|
||||
run: |
|
||||
set -e
|
||||
choco install openssl --version=1.1.1.2100 -y --no-progress
|
||||
echo 'OPENSSL_DIR=C:\Program Files\OpenSSL' >> $GITHUB_ENV
|
||||
echo "RUSTFLAGS=-C target-feature=+crt-static" >> $GITHUB_ENV
|
||||
|
||||
- name: Install Rust (${{ matrix.version }})
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1.11.0
|
||||
with:
|
||||
toolchain: ${{ matrix.version }}
|
||||
|
||||
- name: Install just, cargo-hack, cargo-nextest, cargo-ci-cache-clean
|
||||
uses: taiki-e/install-action@v2.49.19
|
||||
with:
|
||||
tool: just,cargo-hack,cargo-nextest,cargo-ci-cache-clean
|
||||
|
||||
- name: check lib
|
||||
if: >
|
||||
matrix.target.os != 'ubuntu-latest'
|
||||
&& matrix.target.triple != 'x86_64-pc-windows-gnu'
|
||||
run: cargo ci-check-lib
|
||||
- name: check lib
|
||||
if: matrix.target.os == 'ubuntu-latest'
|
||||
run: cargo ci-check-lib-linux
|
||||
- name: check lib
|
||||
if: matrix.target.triple == 'x86_64-pc-windows-gnu'
|
||||
run: cargo ci-check-min
|
||||
|
||||
- name: check full
|
||||
# TODO: compile OpenSSL and run tests on MinGW
|
||||
if: >
|
||||
matrix.target.os != 'ubuntu-latest'
|
||||
&& matrix.target.triple != 'x86_64-pc-windows-gnu'
|
||||
run: cargo ci-check
|
||||
- name: check all
|
||||
if: matrix.target.os == 'ubuntu-latest'
|
||||
run: cargo ci-check-linux
|
||||
|
||||
- name: tests
|
||||
run: just test
|
||||
|
||||
# TODO: re-instate some io-uring tests PRs
|
||||
# - name: tests
|
||||
# if: matrix.target.os == 'ubuntu-latest'
|
||||
# run: >-
|
||||
# sudo bash -c "
|
||||
# ulimit -Sl 512
|
||||
# && ulimit -Hl 512
|
||||
# && PATH=$PATH:/usr/share/rust/.cargo/bin
|
||||
# && RUSTUP_TOOLCHAIN=${{ matrix.version }} cargo ci-test-rustls-020
|
||||
# && RUSTUP_TOOLCHAIN=${{ matrix.version }} cargo ci-test-rustls-021
|
||||
# && RUSTUP_TOOLCHAIN=${{ matrix.version }} cargo ci-test-linux
|
||||
# "
|
||||
|
||||
- name: CI cache clean
|
||||
run: cargo-ci-cache-clean
|
||||
|
||||
minimal-versions:
|
||||
name: minimal versions
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install Rust (nightly)
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1.11.0
|
||||
with:
|
||||
toolchain: nightly
|
||||
|
||||
- name: Install cargo-hack & cargo-minimal-versions
|
||||
uses: taiki-e/install-action@v2.49.19
|
||||
with:
|
||||
tool: cargo-hack,cargo-minimal-versions
|
||||
|
||||
- name: Check With Minimal Versions
|
||||
run: cargo minimal-versions check
|
133
.github/workflows/ci.yml
vendored
Normal file
133
.github/workflows/ci.yml
vendored
Normal file
@ -0,0 +1,133 @@
|
||||
name: CI
|
||||
|
||||
on:
|
||||
pull_request: {}
|
||||
merge_group: { types: [checks_requested] }
|
||||
push: { branches: [master] }
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
read_msrv:
|
||||
name: Read MSRV
|
||||
uses: actions-rust-lang/msrv/.github/workflows/msrv.yml@v0.1.0
|
||||
|
||||
build_and_test:
|
||||
needs:
|
||||
- read_msrv
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
# prettier-ignore
|
||||
target:
|
||||
- { name: Linux, os: ubuntu-latest, triple: x86_64-unknown-linux-gnu }
|
||||
- { name: macOS, os: macos-latest, triple: x86_64-apple-darwin }
|
||||
- { name: Windows, os: windows-latest, triple: x86_64-pc-windows-msvc }
|
||||
- { name: Windows (MinGW), os: windows-latest, triple: x86_64-pc-windows-gnu }
|
||||
version:
|
||||
- { name: msrv, version: "${{ needs.read_msrv.outputs.msrv }}" }
|
||||
- { name: stable, version: stable }
|
||||
|
||||
name: ${{ matrix.target.name }} / ${{ matrix.version.name }}
|
||||
runs-on: ${{ matrix.target.os }}
|
||||
|
||||
env: {}
|
||||
|
||||
steps:
|
||||
- name: Setup Routing
|
||||
if: matrix.target.os == 'macos-latest'
|
||||
run: sudo ifconfig lo0 alias 127.0.0.3
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Free Disk Space
|
||||
if: matrix.target.os == 'ubuntu-latest'
|
||||
run: ./scripts/free-disk-space.sh
|
||||
|
||||
- name: Setup mold linker
|
||||
if: matrix.target.os == 'ubuntu-latest'
|
||||
uses: rui314/setup-mold@v1
|
||||
|
||||
- name: Install nasm
|
||||
if: matrix.target.os == 'windows-latest'
|
||||
uses: ilammy/setup-nasm@v1.5.2
|
||||
|
||||
- name: Install OpenSSL
|
||||
if: matrix.target.os == 'windows-latest'
|
||||
shell: bash
|
||||
run: |
|
||||
set -e
|
||||
choco install openssl --version=1.1.1.2100 -y --no-progress
|
||||
echo 'OPENSSL_DIR=C:\Program Files\OpenSSL' >> $GITHUB_ENV
|
||||
echo "RUSTFLAGS=-C target-feature=+crt-static" >> $GITHUB_ENV
|
||||
|
||||
- name: Install Rust (${{ matrix.version.name }})
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1.11.0
|
||||
with:
|
||||
toolchain: ${{ matrix.version.version }}
|
||||
|
||||
- name: Install just, cargo-hack, cargo-nextest, cargo-ci-cache-clean
|
||||
uses: taiki-e/install-action@v2.49.19
|
||||
with:
|
||||
tool: just,cargo-hack,cargo-nextest,cargo-ci-cache-clean
|
||||
|
||||
- name: Generate Cargo.lock
|
||||
run: cargo generate-lockfile
|
||||
|
||||
- name: workaround MSRV issues
|
||||
if: matrix.version.name == 'msrv'
|
||||
run: just downgrade-for-msrv
|
||||
|
||||
- name: check lib
|
||||
if: >
|
||||
matrix.target.os != 'ubuntu-latest'
|
||||
&& matrix.target.triple != 'x86_64-pc-windows-gnu'
|
||||
run: cargo ci-check-lib
|
||||
- name: check lib
|
||||
if: matrix.target.os == 'ubuntu-latest'
|
||||
run: cargo ci-check-lib-linux
|
||||
- name: check lib
|
||||
if: matrix.target.triple != 'x86_64-pc-windows-gnu'
|
||||
run: cargo ci-check-min
|
||||
|
||||
- name: check full
|
||||
# TODO: compile OpenSSL and run tests on MinGW
|
||||
if: >
|
||||
matrix.target.os != 'ubuntu-latest'
|
||||
&& matrix.target.triple != 'x86_64-pc-windows-gnu'
|
||||
run: cargo ci-check
|
||||
- name: check all
|
||||
if: matrix.target.os == 'ubuntu-latest'
|
||||
run: cargo ci-check-linux
|
||||
|
||||
- name: tests
|
||||
run: just test
|
||||
|
||||
- name: CI cache clean
|
||||
run: cargo-ci-cache-clean
|
||||
|
||||
docs:
|
||||
name: Documentation
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install Rust (nightly)
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1.11.0
|
||||
with:
|
||||
toolchain: nightly
|
||||
|
||||
- name: Install just
|
||||
uses: taiki-e/install-action@v2.49.19
|
||||
with:
|
||||
tool: just
|
||||
|
||||
- name: doc tests
|
||||
run: just test-docs
|
39
.github/workflows/coverage.yml
vendored
Normal file
39
.github/workflows/coverage.yml
vendored
Normal file
@ -0,0 +1,39 @@
|
||||
name: Coverage
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
coverage:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1.11.0
|
||||
with:
|
||||
components: llvm-tools-preview
|
||||
|
||||
- name: Install cargo-llvm-cov
|
||||
uses: taiki-e/install-action@v2.49.19
|
||||
with:
|
||||
tool: cargo-llvm-cov
|
||||
|
||||
- name: Generate code coverage
|
||||
run: cargo llvm-cov --workspace --all-features --codecov --output-path codecov.json
|
||||
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v5.4.0
|
||||
with:
|
||||
files: codecov.json
|
||||
fail_ci_if_error: true
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
69
.github/workflows/lint.yml
vendored
Normal file
69
.github/workflows/lint.yml
vendored
Normal file
@ -0,0 +1,69 @@
|
||||
name: Lint
|
||||
|
||||
on:
|
||||
pull_request: {}
|
||||
merge_group: { types: [checks_requested] }
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
fmt:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1.11.0
|
||||
with:
|
||||
toolchain: nightly
|
||||
components: rustfmt
|
||||
|
||||
- name: Rustfmt Check
|
||||
run: cargo fmt --all -- --check
|
||||
|
||||
clippy:
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions-rust-lang/setup-rust-toolchain@v1.11.0
|
||||
with: { components: clippy }
|
||||
|
||||
- uses: giraffate/clippy-action@v1.0.1
|
||||
with:
|
||||
reporter: "github-pr-check"
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
clippy_flags: --workspace --all-features --tests --examples --bins -- -Dclippy::todo -Aunknown_lints
|
||||
|
||||
check-external-types:
|
||||
if: false # rustdoc mismatch currently
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install Rust (${{ vars.RUST_VERSION_EXTERNAL_TYPES }})
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1.11.0
|
||||
with:
|
||||
toolchain: ${{ vars.RUST_VERSION_EXTERNAL_TYPES }}
|
||||
|
||||
- name: Install just
|
||||
uses: taiki-e/install-action@v2.49.19
|
||||
with:
|
||||
tool: just
|
||||
|
||||
- name: Install cargo-check-external-types
|
||||
uses: taiki-e/cache-cargo-install-action@v2.1.1
|
||||
with:
|
||||
tool: cargo-check-external-types
|
||||
|
||||
- name: check external types
|
||||
run: just check-external-types-all +${{ vars.RUST_VERSION_EXTERNAL_TYPES }}
|
7
.gitignore
vendored
7
.gitignore
vendored
@ -1,4 +1,3 @@
|
||||
Cargo.lock
|
||||
target/
|
||||
guide/build/
|
||||
/gh-pages
|
||||
@ -12,3 +11,9 @@ guide/build/
|
||||
|
||||
# These are backup files generated by rustfmt
|
||||
**/*.rs.bk
|
||||
|
||||
# IDEs
|
||||
.idea
|
||||
|
||||
# direnv
|
||||
/.direnv
|
||||
|
3
.rustfmt.toml
Normal file
3
.rustfmt.toml
Normal file
@ -0,0 +1,3 @@
|
||||
group_imports = "StdExternalCrate"
|
||||
imports_granularity = "Crate"
|
||||
use_field_init_shorthand = true
|
29
.taplo.toml
Normal file
29
.taplo.toml
Normal file
@ -0,0 +1,29 @@
|
||||
exclude = ["target/*"]
|
||||
include = ["**/*.toml"]
|
||||
|
||||
[formatting]
|
||||
column_width = 110
|
||||
|
||||
[[rule]]
|
||||
include = ["**/Cargo.toml"]
|
||||
keys = [
|
||||
"dependencies",
|
||||
"*-dependencies",
|
||||
"workspace.dependencies",
|
||||
"workspace.*-dependencies",
|
||||
"target.*.dependencies",
|
||||
"target.*.*-dependencies",
|
||||
]
|
||||
formatting.reorder_keys = true
|
||||
|
||||
[[rule]]
|
||||
include = ["**/Cargo.toml"]
|
||||
keys = [
|
||||
"dependencies.*",
|
||||
"*-dependencies.*",
|
||||
"workspace.dependencies.*",
|
||||
"workspace.*-dependencies.*",
|
||||
"target.*.dependencies",
|
||||
"target.*.*-dependencies",
|
||||
]
|
||||
formatting.reorder_keys = false
|
72
.travis.yml
72
.travis.yml
@ -1,72 +0,0 @@
|
||||
language: rust
|
||||
sudo: required
|
||||
dist: trusty
|
||||
|
||||
cache:
|
||||
cargo: true
|
||||
apt: true
|
||||
|
||||
matrix:
|
||||
include:
|
||||
- rust: stable
|
||||
- rust: beta
|
||||
- rust: nightly-2019-03-02
|
||||
allow_failures:
|
||||
- rust: nightly-2019-03-02
|
||||
|
||||
env:
|
||||
global:
|
||||
- RUSTFLAGS="-C link-dead-code"
|
||||
- OPENSSL_VERSION=openssl-1.0.2
|
||||
|
||||
before_install:
|
||||
- sudo add-apt-repository -y ppa:0k53d-karl-f830m/openssl
|
||||
- sudo apt-get update -qq
|
||||
- sudo apt-get install -y openssl libssl-dev libelf-dev libdw-dev cmake gcc binutils-dev libiberty-dev
|
||||
|
||||
before_cache: |
|
||||
if [[ "$TRAVIS_RUST_VERSION" == "nightly-2019-03-02" ]]; then
|
||||
RUSTFLAGS="--cfg procmacro2_semver_exempt" cargo install cargo-tarpaulin
|
||||
fi
|
||||
|
||||
# Add clippy
|
||||
before_script:
|
||||
- export PATH=$PATH:~/.cargo/bin
|
||||
|
||||
script:
|
||||
- |
|
||||
if [[ "$TRAVIS_RUST_VERSION" != "nightly-2019-03-02" ]]; then
|
||||
cargo clean
|
||||
cargo test --features="ssl,tls,rust-tls" -- --nocapture
|
||||
cd actix-codec && cargo test && cd ..
|
||||
cd actix-service && cargo test && cd ..
|
||||
cd actix-server && cargo test --all-features -- --nocapture && cd ..
|
||||
cd actix-rt && cargo test && cd ..
|
||||
cd actix-connect && cargo test && cd ..
|
||||
cd actix-utils && cargo test && cd ..
|
||||
cd router && cargo test && cd ..
|
||||
fi
|
||||
|
||||
after_success:
|
||||
- |
|
||||
if [[ "$TRAVIS_RUST_VERSION" == "nightly-2019-03-02" ]]; then
|
||||
#cd actix-service && cargo tarpaulin --out Xml && bash <(curl -s https://codecov.io/bash) && cd ..
|
||||
#cd actix-rt && cargo tarpaulin --out Xml && bash <(curl -s https://codecov.io/bash) && cd ..
|
||||
#cd actix-connect && cargo tarpaulin --out Xml && bash <(curl -s https://codecov.io/bash) && cd ..
|
||||
#cd actix-codec && cargo tarpaulin --out Xml && bash <(curl -s https://codecov.io/bash) && cd ..
|
||||
#cd actix-server && cargo tarpaulin --out Xml && bash <(curl -s https://codecov.io/bash) && cd ..
|
||||
#cd actix-utils && cargo tarpaulin --out Xml && bash <(curl -s https://codecov.io/bash) && cd ..
|
||||
#cd router && cargo tarpaulin --out Xml && bash <(curl -s https://codecov.io/bash) && cd ..
|
||||
|
||||
cd actix-service && cargo tarpaulin --out Xml && cd ..
|
||||
#cd actix-rt && cargo tarpaulin --out Xml && cd ..
|
||||
cd actix-connect && cargo tarpaulin --out Xml && cd ..
|
||||
#cd actix-codec && cargo tarpaulin --out Xml && cd ..
|
||||
#cd actix-server && cargo tarpaulin --out Xml && cd ..
|
||||
cd actix-utils && cargo tarpaulin --out Xml && cd ..
|
||||
cd router && cargo tarpaulin --out Xml && cd ..
|
||||
|
||||
# cargo tarpaulin --all --all-features --out Xml
|
||||
echo "Uploaded code coverage"
|
||||
bash <(curl -s https://codecov.io/bash)
|
||||
fi
|
68
CHANGES.md
68
CHANGES.md
@ -1,68 +0,0 @@
|
||||
# Changes
|
||||
|
||||
## [0.3.0] - xxx
|
||||
|
||||
* Split `Service` trait to separate crate
|
||||
|
||||
* Use new `Service<Request>` trait
|
||||
|
||||
|
||||
## [0.2.4] - 2018-11-21
|
||||
|
||||
### Added
|
||||
|
||||
* Allow to skip name resolution stage in Connector
|
||||
|
||||
|
||||
## [0.2.3] - 2018-11-17
|
||||
|
||||
### Added
|
||||
|
||||
* Framed::is_write_buf_empty() checks if write buffer is flushed
|
||||
|
||||
## [0.2.2] - 2018-11-14
|
||||
|
||||
### Added
|
||||
|
||||
* Add low/high caps to Framed
|
||||
|
||||
### Changed
|
||||
|
||||
* Refactor Connector and Resolver services
|
||||
|
||||
|
||||
### Fixed
|
||||
|
||||
* Fix wrong service to socket binding
|
||||
|
||||
|
||||
## [0.2.0] - 2018-11-08
|
||||
|
||||
### Added
|
||||
|
||||
* Timeout service
|
||||
|
||||
* Added ServiceConfig and ServiceRuntime for server service configuration
|
||||
|
||||
|
||||
### Changed
|
||||
|
||||
* Connector has been refactored
|
||||
|
||||
* timer and LowResTimer renamed to time and LowResTime
|
||||
|
||||
* Refactored `Server::configure()` method
|
||||
|
||||
|
||||
## [0.1.1] - 2018-10-10
|
||||
|
||||
### Changed
|
||||
|
||||
- Set actix min version - 0.7.5
|
||||
|
||||
- Set trust-dns min version
|
||||
|
||||
|
||||
## [0.1.0] - 2018-10-08
|
||||
|
||||
* Initial impl
|
@ -8,19 +8,19 @@ In the interest of fostering an open and welcoming environment, we as contributo
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
- Using welcoming and inclusive language
|
||||
- Being respectful of differing viewpoints and experiences
|
||||
- Gracefully accepting constructive criticism
|
||||
- Focusing on what is best for the community
|
||||
- Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a professional setting
|
||||
- The use of sexualized language or imagery and unwelcome sexual attention or advances
|
||||
- Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
- Public or private harassment
|
||||
- Publishing others' private information, such as a physical or electronic address, without explicit permission
|
||||
- Other conduct which could reasonably be considered inappropriate in a professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
@ -34,10 +34,13 @@ This Code of Conduct applies both within project spaces and in public spaces whe
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at fafhrd91@gmail.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at robjtede@icloud.com ([@robjtede]) or huyuumi@neet.club ([@JohnTitor]). The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
|
||||
|
||||
[@robjtede]: https://github.com/robjtede
|
||||
[@johntitor]: https://github.com/JohnTitor
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
|
||||
|
2914
Cargo.lock
generated
Normal file
2914
Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
66
Cargo.toml
66
Cargo.toml
@ -1,38 +1,44 @@
|
||||
[package]
|
||||
name = "actix-net"
|
||||
version = "0.3.0"
|
||||
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
|
||||
description = "Actix net - framework for the compisible network services for Rust"
|
||||
readme = "README.md"
|
||||
keywords = ["network", "framework", "async", "futures"]
|
||||
homepage = "https://actix.rs"
|
||||
repository = "https://github.com/actix/actix-net.git"
|
||||
documentation = "https://docs.rs/actix-net/"
|
||||
categories = ["network-programming", "asynchronous"]
|
||||
license = "MIT/Apache-2.0"
|
||||
exclude = [".gitignore", ".travis.yml", ".cargo/config", "appveyor.yml"]
|
||||
edition = "2018"
|
||||
|
||||
[workspace]
|
||||
resolver = "2"
|
||||
members = [
|
||||
"actix-codec",
|
||||
"actix-connect",
|
||||
"actix-macros",
|
||||
"actix-rt",
|
||||
"actix-service",
|
||||
"actix-server",
|
||||
"actix-server-config",
|
||||
"actix-test-server",
|
||||
"actix-service",
|
||||
"actix-tls",
|
||||
"actix-tracing",
|
||||
"actix-utils",
|
||||
"router",
|
||||
"bytestring",
|
||||
"local-channel",
|
||||
"local-waker",
|
||||
]
|
||||
|
||||
[dev-dependencies]
|
||||
actix-service = "0.3.3"
|
||||
actix-codec = "0.1.1"
|
||||
actix-rt = "0.2.0"
|
||||
actix-server = { path="actix-server", features=["ssl"] }
|
||||
env_logger = "0.6"
|
||||
futures = "0.1.25"
|
||||
openssl = "0.10"
|
||||
tokio-tcp = "0.1"
|
||||
tokio-openssl = "0.3"
|
||||
[workspace.package]
|
||||
license = "MIT OR Apache-2.0"
|
||||
edition = "2021"
|
||||
rust-version = "1.71.1"
|
||||
|
||||
[patch.crates-io]
|
||||
actix-codec = { path = "actix-codec" }
|
||||
actix-macros = { path = "actix-macros" }
|
||||
actix-rt = { path = "actix-rt" }
|
||||
actix-server = { path = "actix-server" }
|
||||
actix-service = { path = "actix-service" }
|
||||
actix-tls = { path = "actix-tls" }
|
||||
actix-tracing = { path = "actix-tracing" }
|
||||
actix-utils = { path = "actix-utils" }
|
||||
bytestring = { path = "bytestring" }
|
||||
local-channel = { path = "local-channel" }
|
||||
local-waker = { path = "local-waker" }
|
||||
|
||||
[profile.release]
|
||||
lto = true
|
||||
opt-level = 3
|
||||
codegen-units = 1
|
||||
|
||||
[workspace.lints.rust]
|
||||
rust_2018_idioms = "deny"
|
||||
nonstandard-style = "deny"
|
||||
future_incompatible = "deny"
|
||||
missing_docs = { level = "warn", priority = -1 }
|
||||
|
@ -186,7 +186,7 @@
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2017-NOW Nikolay Kim
|
||||
Copyright 2017-NOW Actix Team
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
@ -1,4 +1,4 @@
|
||||
Copyright (c) 2017 Nikolay Kim
|
||||
Copyright (c) 2017-NOW Actix Team
|
||||
|
||||
Permission is hereby granted, free of charge, to any
|
||||
person obtaining a copy of this software and associated
|
||||
|
69
README.md
69
README.md
@ -1,69 +1,30 @@
|
||||
# Actix net [](https://travis-ci.org/actix/actix-net) [](https://codecov.io/gh/actix/actix-net) [](https://crates.io/crates/actix-net) [](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
||||
# Actix Net
|
||||
|
||||
Actix net - framework for composable network services
|
||||
> A collection of lower-level libraries for composable network services.
|
||||
|
||||
## Documentation & community resources
|
||||
[](https://github.com/actix/actix-net/actions/workflows/ci.yml)
|
||||
[](https://codecov.io/gh/actix/actix-net)
|
||||
[](https://discord.gg/NWpN5mmg3x)
|
||||
[](https://deps.rs/repo/github/actix/actix-net)
|
||||
|
||||
* [API Documentation (Development)](https://actix.rs/actix-net/actix_net/)
|
||||
* [Chat on gitter](https://gitter.im/actix/actix)
|
||||
* Cargo package: [actix-net](https://crates.io/crates/actix-net)
|
||||
* Minimum supported Rust version: 1.32 or later
|
||||
## Examples
|
||||
|
||||
## Example
|
||||
See example folders for [`actix-server`](./actix-server/examples) and [`actix-tls`](./actix-tls/examples).
|
||||
|
||||
```rust
|
||||
fn main() -> io::Result<()> {
|
||||
// load ssl keys
|
||||
let mut builder = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap();
|
||||
builder.set_private_key_file("./examples/key.pem", SslFiletype::PEM).unwrap();
|
||||
builder.set_certificate_chain_file("./examples/cert.pem").unwrap();
|
||||
let acceptor = builder.build();
|
||||
## MSRV
|
||||
|
||||
let num = Arc::new(AtomicUsize::new(0));
|
||||
|
||||
// bind socket address and start workers. By default server uses number of
|
||||
// available logical cpu as threads count. actix net start separate
|
||||
// instances of service pipeline in each worker.
|
||||
Server::build()
|
||||
.bind(
|
||||
// configure service pipeline
|
||||
"basic", "0.0.0.0:8443",
|
||||
move || {
|
||||
let num = num.clone();
|
||||
let acceptor = acceptor.clone();
|
||||
|
||||
// service for converting incoming TcpStream to a SslStream<TcpStream>
|
||||
fn_service(move |stream: Io<tokio_tcp::TcpStream>| {
|
||||
SslAcceptorExt::accept_async(&acceptor, stream.into_parts().0)
|
||||
.map_err(|e| println!("Openssl error: {}", e))
|
||||
})
|
||||
// .and_then() combinator uses other service to convert incoming `Request` to a
|
||||
// `Response` and then uses that response as an input for next
|
||||
// service. in this case, on success we use `logger` service
|
||||
.and_then(fn_service(logger))
|
||||
// Next service counts number of connections
|
||||
.and_then(move |_| {
|
||||
let num = num.fetch_add(1, Ordering::Relaxed);
|
||||
println!("got ssl connection {:?}", num);
|
||||
future::ok(())
|
||||
})
|
||||
},
|
||||
)?
|
||||
.run()
|
||||
}
|
||||
```
|
||||
Crates in this repo currently have a Minimum Supported Rust Version (MSRV) of 1.65. As a policy, we permit MSRV increases in non-breaking releases.
|
||||
|
||||
## License
|
||||
|
||||
This project is licensed under either of
|
||||
The crates in repo are licensed under either of:
|
||||
|
||||
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0))
|
||||
* MIT license ([LICENSE-MIT](LICENSE-MIT) or [http://opensource.org/licenses/MIT](http://opensource.org/licenses/MIT))
|
||||
- Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0))
|
||||
- MIT license ([LICENSE-MIT](LICENSE-MIT) or [http://opensource.org/licenses/MIT](http://opensource.org/licenses/MIT))
|
||||
|
||||
at your option.
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
Contribution to the actix-net crate is organized under the terms of the
|
||||
Contributor Covenant, the maintainer of actix-net, @fafhrd91, promises to
|
||||
intervene to uphold that code of conduct.
|
||||
Contribution to the actix-net repo is organized under the terms of the Contributor Covenant.
|
||||
The Actix team promises to intervene to uphold that code of conduct.
|
||||
|
@ -1,10 +1,85 @@
|
||||
# Changes
|
||||
|
||||
## [0.1.0] - 2019-03-06
|
||||
## Unreleased
|
||||
|
||||
* Added `FramedParts::with_read_buffer()` method.
|
||||
- Minimum supported Rust version (MSRV) is now 1.71.
|
||||
|
||||
## 0.5.2
|
||||
|
||||
## [0.1.0] - 2018-12-09
|
||||
- Minimum supported Rust version (MSRV) is now 1.65.
|
||||
|
||||
* Move codec to separate crate
|
||||
## 0.5.1
|
||||
|
||||
- Logs emitted now use the `tracing` crate with `log` compatibility.
|
||||
- Minimum supported Rust version (MSRV) is now 1.49.
|
||||
|
||||
## 0.5.0
|
||||
|
||||
- Updated `tokio-util` dependency to `0.7.0`.
|
||||
|
||||
## 0.4.2
|
||||
|
||||
- No significant changes since `0.4.1`.
|
||||
|
||||
## 0.4.1
|
||||
|
||||
- Added `LinesCodec`.
|
||||
- `Framed::poll_ready` flushes when the buffer is full.
|
||||
|
||||
## 0.4.0
|
||||
|
||||
- No significant changes since v0.4.0-beta.1.
|
||||
|
||||
## 0.4.0-beta.1
|
||||
|
||||
- Replace `pin-project` with `pin-project-lite`.
|
||||
- Upgrade `tokio` dependency to `1`.
|
||||
- Upgrade `tokio-util` dependency to `0.6`.
|
||||
- Upgrade `bytes` dependency to `1`.
|
||||
|
||||
## 0.3.0
|
||||
|
||||
- No changes from beta 2.
|
||||
|
||||
## 0.3.0-beta.2
|
||||
|
||||
- Remove unused type parameter from `Framed::replace_codec`.
|
||||
|
||||
## 0.3.0-beta.1
|
||||
|
||||
- Use `.advance()` instead of `.split_to()`.
|
||||
- Upgrade `tokio-util` to `0.3`.
|
||||
- Improve `BytesCodec::encode()` performance.
|
||||
- Simplify `BytesCodec::decode()`.
|
||||
- Rename methods on `Framed` to better describe their use.
|
||||
- Add method on `Framed` to get a pinned reference to the underlying I/O.
|
||||
- Add method on `Framed` check emptiness of read buffer.
|
||||
|
||||
## 0.2.0
|
||||
|
||||
- Use specific futures dependencies.
|
||||
|
||||
## 0.2.0-alpha.4
|
||||
|
||||
- Fix buffer remaining capacity calculation.
|
||||
|
||||
## 0.2.0-alpha.3
|
||||
|
||||
- Use tokio 0.2.
|
||||
- Fix low/high watermark for write/read buffers.
|
||||
|
||||
## 0.2.0-alpha.2
|
||||
|
||||
- Migrated to `std::future`.
|
||||
|
||||
## 0.1.2
|
||||
|
||||
- Added `Framed::map_io()` method.
|
||||
|
||||
## 0.1.1
|
||||
|
||||
- Added `FramedParts::with_read_buffer()` method.
|
||||
|
||||
## 0.1.0
|
||||
|
||||
- Move codec to separate crate.
|
||||
|
@ -1,25 +1,36 @@
|
||||
[package]
|
||||
name = "actix-codec"
|
||||
version = "0.1.1"
|
||||
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
|
||||
description = "Utilities for encoding and decoding frames"
|
||||
version = "0.5.2"
|
||||
authors = ["Nikolay Kim <fafhrd91@gmail.com>", "Rob Ede <robjtede@icloud.com>"]
|
||||
description = "Codec utilities for working with framed protocols"
|
||||
keywords = ["network", "framework", "async", "futures"]
|
||||
homepage = "https://actix.rs"
|
||||
repository = "https://github.com/actix/actix-net.git"
|
||||
documentation = "https://docs.rs/actix-codec/"
|
||||
repository = "https://github.com/actix/actix-net"
|
||||
categories = ["network-programming", "asynchronous"]
|
||||
license = "MIT/Apache-2.0"
|
||||
exclude = [".gitignore", ".travis.yml", ".cargo/config", "appveyor.yml"]
|
||||
edition = "2018"
|
||||
workspace = "../"
|
||||
license = "MIT OR Apache-2.0"
|
||||
edition.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[lib]
|
||||
name = "actix_codec"
|
||||
path = "src/lib.rs"
|
||||
[package.metadata.cargo_check_external_types]
|
||||
allowed_external_types = ["bytes::*", "futures_core::*", "futures_sink::*", "tokio::*", "tokio_util::*"]
|
||||
|
||||
[dependencies]
|
||||
bytes = "0.4"
|
||||
futures = "0.1.24"
|
||||
tokio-io = "0.1"
|
||||
tokio-codec = "0.1"
|
||||
log = "0.4"
|
||||
bitflags = "2"
|
||||
bytes = "1"
|
||||
futures-core = { version = "0.3.7", default-features = false }
|
||||
futures-sink = { version = "0.3.7", default-features = false }
|
||||
memchr = "2.3"
|
||||
pin-project-lite = "0.2"
|
||||
tokio = "1.23.1"
|
||||
tokio-util = { version = "0.7", features = ["codec", "io"] }
|
||||
tracing = { version = "0.1.30", default-features = false, features = ["log"] }
|
||||
|
||||
[dev-dependencies]
|
||||
criterion = { version = "0.5", features = ["html_reports"] }
|
||||
tokio-test = "0.4.2"
|
||||
|
||||
[[bench]]
|
||||
name = "lines"
|
||||
harness = false
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
1
actix-codec/LICENSE-APACHE
Symbolic link
1
actix-codec/LICENSE-APACHE
Symbolic link
@ -0,0 +1 @@
|
||||
../LICENSE-APACHE
|
1
actix-codec/LICENSE-MIT
Symbolic link
1
actix-codec/LICENSE-MIT
Symbolic link
@ -0,0 +1 @@
|
||||
../LICENSE-MIT
|
59
actix-codec/benches/lines.rs
Normal file
59
actix-codec/benches/lines.rs
Normal file
@ -0,0 +1,59 @@
|
||||
#![allow(missing_docs)]
|
||||
|
||||
use bytes::BytesMut;
|
||||
use criterion::{criterion_group, criterion_main, Criterion};
|
||||
|
||||
const INPUT: &[u8] = include_bytes!("./lorem.txt");
|
||||
|
||||
fn bench_lines_codec(c: &mut Criterion) {
|
||||
let mut decode_group = c.benchmark_group("lines decode");
|
||||
|
||||
decode_group.bench_function("actix", |b| {
|
||||
b.iter(|| {
|
||||
use actix_codec::Decoder as _;
|
||||
|
||||
let mut codec = actix_codec::LinesCodec::default();
|
||||
let mut buf = BytesMut::from(INPUT);
|
||||
while let Ok(Some(_bytes)) = codec.decode_eof(&mut buf) {}
|
||||
});
|
||||
});
|
||||
|
||||
decode_group.bench_function("tokio", |b| {
|
||||
b.iter(|| {
|
||||
use tokio_util::codec::Decoder as _;
|
||||
|
||||
let mut codec = tokio_util::codec::LinesCodec::new();
|
||||
let mut buf = BytesMut::from(INPUT);
|
||||
while let Ok(Some(_bytes)) = codec.decode_eof(&mut buf) {}
|
||||
});
|
||||
});
|
||||
|
||||
decode_group.finish();
|
||||
|
||||
let mut encode_group = c.benchmark_group("lines encode");
|
||||
|
||||
encode_group.bench_function("actix", |b| {
|
||||
b.iter(|| {
|
||||
use actix_codec::Encoder as _;
|
||||
|
||||
let mut codec = actix_codec::LinesCodec::default();
|
||||
let mut buf = BytesMut::new();
|
||||
codec.encode("123", &mut buf).unwrap();
|
||||
});
|
||||
});
|
||||
|
||||
encode_group.bench_function("tokio", |b| {
|
||||
b.iter(|| {
|
||||
use tokio_util::codec::Encoder as _;
|
||||
|
||||
let mut codec = tokio_util::codec::LinesCodec::new();
|
||||
let mut buf = BytesMut::new();
|
||||
codec.encode("123", &mut buf).unwrap();
|
||||
});
|
||||
});
|
||||
|
||||
encode_group.finish();
|
||||
}
|
||||
|
||||
criterion_group!(benches, bench_lines_codec);
|
||||
criterion_main!(benches);
|
5
actix-codec/benches/lorem.txt
Normal file
5
actix-codec/benches/lorem.txt
Normal file
@ -0,0 +1,5 @@
|
||||
Lorem ipsum dolor sit amet, consectetur adipiscing elit. In tortor quam, pulvinar sit amet vestibulum eget, tincidunt non urna. Sed eu sem in felis malesuada venenatis. Suspendisse volutpat aliquet nisi, in condimentum nibh convallis id. Quisque gravida felis scelerisque ipsum aliquam consequat. Praesent libero odio, malesuada vitae odio quis, aliquam aliquet enim. In fringilla ut turpis nec pharetra. Duis eu posuere metus. Sed a aliquet massa. Mauris non tempus mi, quis mattis libero. Vivamus ornare ex at semper cursus. Vestibulum sed facilisis erat, aliquet mollis est. In interdum, magna iaculis ultricies elementum, mi ante vestibulum mauris, nec viverra turpis lorem quis ante. Proin in auctor erat. Vivamus dictum congue massa, fermentum bibendum leo pretium quis. Integer dapibus sodales ligula, sit amet imperdiet felis suscipit eu. Phasellus non ornare enim.
|
||||
Nam feugiat neque sit amet hendrerit rhoncus. Nunc suscipit molestie vehicula. Aenean vulputate porttitor augue, sit amet molestie dolor volutpat vitae. Nulla vitae condimentum eros. Aliquam tristique purus at metus lacinia egestas. Cras euismod lorem eu orci lobortis, sed tincidunt nisl laoreet. Ut suscipit fermentum mi, et euismod tortor. Pellentesque vitae tempor quam, sed dignissim mi. Suspendisse luctus lacus vitae ligula blandit vehicula. Quisque interdum iaculis tincidunt. Nunc elementum mi vitae tempor placerat. Suspendisse potenti. Donec blandit laoreet ipsum, quis rhoncus velit vulputate sed.
|
||||
Aliquam suscipit lectus eros, at maximus dolor efficitur quis. Integer blandit tortor orci, nec mattis nunc eleifend ac. Mauris pharetra vel quam quis lacinia. Duis lobortis condimentum nunc ut facilisis. Praesent arcu nisi, porta sit amet viverra sit amet, pellentesque ut nisi. Nunc gravida tortor eu ligula tempus, in interdum magna pretium. Fusce eu ornare sapien. Nullam pellentesque cursus eros. Nam orci massa, faucibus eget leo eget, elementum vulputate erat. Fusce vehicula augue et dui hendrerit vulputate. Mauris neque lacus, porttitor ut condimentum id, efficitur ac neque. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia curae; Donec accumsan, lectus fermentum elementum tristique, ipsum tortor mollis ante, non lacinia nibh ex quis sapien.
|
||||
Donec pharetra, elit eget rutrum luctus, urna ligula facilisis lorem, sit amet rhoncus ante est eu mi. Vestibulum vestibulum ultricies interdum. Nulla tincidunt ante non hendrerit venenatis. Curabitur vestibulum turpis erat, id efficitur quam venenatis eu. Fusce nulla sem, dapibus vel quam feugiat, ornare fermentum ligula. Praesent tempus tincidunt mauris, non pellentesque felis varius in. Aenean eu arcu ligula. Morbi dapibus maximus nulla a pharetra. Fusce leo metus, luctus ut cursus non, sollicitudin non lectus. Integer pellentesque eleifend erat, vel gravida purus tempus a. Mauris id vestibulum quam. Nunc vitae ullamcorper metus, pharetra placerat enim. Fusce in ultrices nisl. Curabitur justo mauris, dignissim in aliquam sit amet, sollicitudin ut risus. Cras tempor rutrum justo, non tincidunt est maximus at.
|
||||
Aliquam ac velit tincidunt, ullamcorper velit sit amet, pulvinar nisi. Nullam rhoncus rhoncus egestas. Cras ac luctus nisi. Mauris sit amet risus at magna volutpat ultrices quis ac dui. Aliquam condimentum tellus purus, vel sagittis odio vulputate at. Sed ut finibus tellus. Aliquam tincidunt vehicula diam.
|
@ -1,20 +1,19 @@
|
||||
use std::io;
|
||||
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use tokio_codec::{Decoder, Encoder};
|
||||
use bytes::{Buf, Bytes, BytesMut};
|
||||
|
||||
/// Bytes codec.
|
||||
///
|
||||
/// Reads/Writes chunks of bytes from a stream.
|
||||
use super::{Decoder, Encoder};
|
||||
|
||||
/// Bytes codec. Reads/writes chunks of bytes from a stream.
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub struct BytesCodec;
|
||||
|
||||
impl Encoder for BytesCodec {
|
||||
type Item = Bytes;
|
||||
impl Encoder<Bytes> for BytesCodec {
|
||||
type Error = io::Error;
|
||||
|
||||
#[inline]
|
||||
fn encode(&mut self, item: Bytes, dst: &mut BytesMut) -> Result<(), Self::Error> {
|
||||
dst.extend_from_slice(&item[..]);
|
||||
dst.extend_from_slice(item.chunk());
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@ -27,7 +26,7 @@ impl Decoder for BytesCodec {
|
||||
if src.is_empty() {
|
||||
Ok(None)
|
||||
} else {
|
||||
Ok(Some(src.take()))
|
||||
Ok(Some(src.split()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,207 +1,281 @@
|
||||
#![allow(deprecated)]
|
||||
use std::{
|
||||
fmt, io,
|
||||
pin::Pin,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use std::fmt;
|
||||
use std::io::{self, Read, Write};
|
||||
use bitflags::bitflags;
|
||||
use bytes::{Buf, BytesMut};
|
||||
use futures_core::{ready, Stream};
|
||||
use futures_sink::Sink;
|
||||
use pin_project_lite::pin_project;
|
||||
|
||||
use bytes::BytesMut;
|
||||
use futures::{Poll, Sink, StartSend, Stream};
|
||||
use tokio_codec::{Decoder, Encoder};
|
||||
use tokio_io::{AsyncRead, AsyncWrite};
|
||||
|
||||
use super::framed_read::{framed_read2, framed_read2_with_buffer, FramedRead2};
|
||||
use super::framed_write::{framed_write2, framed_write2_with_buffer, FramedWrite2};
|
||||
use crate::{AsyncRead, AsyncWrite, Decoder, Encoder};
|
||||
|
||||
/// Low-water mark
|
||||
const LW: usize = 1024;
|
||||
/// High-water mark
|
||||
const HW: usize = 8 * 1024;
|
||||
|
||||
/// A unified `Stream` and `Sink` interface to an underlying I/O object, using
|
||||
/// the `Encoder` and `Decoder` traits to encode and decode frames.
|
||||
///
|
||||
/// You can create a `Framed` instance by using the `AsyncRead::framed` adapter.
|
||||
pub struct Framed<T, U> {
|
||||
inner: FramedRead2<FramedWrite2<Fuse<T, U>>>,
|
||||
bitflags! {
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
struct Flags: u8 {
|
||||
const EOF = 0b0001;
|
||||
const READABLE = 0b0010;
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Fuse<T, U>(pub T, pub U);
|
||||
pin_project! {
|
||||
/// A unified `Stream` and `Sink` interface to an underlying I/O object, using the `Encoder` and
|
||||
/// `Decoder` traits to encode and decode frames.
|
||||
///
|
||||
/// Raw I/O objects work with byte sequences, but higher-level code usually wants to batch these
|
||||
/// into meaningful chunks, called "frames". This method layers framing on top of an I/O object,
|
||||
/// by using the `Encoder`/`Decoder` traits to handle encoding and decoding of message frames.
|
||||
/// Note that the incoming and outgoing frame types may be distinct.
|
||||
pub struct Framed<T, U> {
|
||||
#[pin]
|
||||
io: T,
|
||||
codec: U,
|
||||
flags: Flags,
|
||||
read_buf: BytesMut,
|
||||
write_buf: BytesMut,
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, U> Framed<T, U>
|
||||
where
|
||||
T: AsyncRead + AsyncWrite,
|
||||
U: Decoder + Encoder,
|
||||
U: Decoder,
|
||||
{
|
||||
/// Provides a `Stream` and `Sink` interface for reading and writing to this
|
||||
/// `Io` object, using `Decode` and `Encode` to read and write the raw data.
|
||||
///
|
||||
/// Raw I/O objects work with byte sequences, but higher-level code usually
|
||||
/// wants to batch these into meaningful chunks, called "frames". This
|
||||
/// method layers framing on top of an I/O object, by using the `Codec`
|
||||
/// traits to handle encoding and decoding of messages frames. Note that
|
||||
/// the incoming and outgoing frame types may be distinct.
|
||||
///
|
||||
/// This function returns a *single* object that is both `Stream` and
|
||||
/// `Sink`; grouping this into a single object is often useful for layering
|
||||
/// things like gzip or TLS, which require both read and write access to the
|
||||
/// underlying object.
|
||||
///
|
||||
/// If you want to work more directly with the streams and sink, consider
|
||||
/// calling `split` on the `Framed` returned by this method, which will
|
||||
/// break them into separate objects, allowing them to interact more easily.
|
||||
pub fn new(inner: T, codec: U) -> Framed<T, U> {
|
||||
/// This function returns a *single* object that is both `Stream` and `Sink`; grouping this into
|
||||
/// a single object is often useful for layering things like gzip or TLS, which require both
|
||||
/// read and write access to the underlying object.
|
||||
pub fn new(io: T, codec: U) -> Framed<T, U> {
|
||||
Framed {
|
||||
inner: framed_read2(framed_write2(Fuse(inner, codec), LW, HW)),
|
||||
io,
|
||||
codec,
|
||||
flags: Flags::empty(),
|
||||
read_buf: BytesMut::with_capacity(HW),
|
||||
write_buf: BytesMut::with_capacity(HW),
|
||||
}
|
||||
}
|
||||
|
||||
/// Same as `Framed::new()` with ability to specify write buffer low/high capacity watermarks.
|
||||
pub fn new_with_caps(inner: T, codec: U, lw: usize, hw: usize) -> Framed<T, U> {
|
||||
debug_assert!((lw < hw) && hw != 0);
|
||||
Framed {
|
||||
inner: framed_read2(framed_write2(Fuse(inner, codec), lw, hw)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Force send item
|
||||
pub fn force_send(
|
||||
&mut self,
|
||||
item: <U as Encoder>::Item,
|
||||
) -> Result<(), <U as Encoder>::Error> {
|
||||
self.inner.get_mut().force_send(item)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, U> Framed<T, U> {
|
||||
/// Provides a `Stream` and `Sink` interface for reading and writing to this
|
||||
/// `Io` object, using `Decode` and `Encode` to read and write the raw data.
|
||||
///
|
||||
/// Raw I/O objects work with byte sequences, but higher-level code usually
|
||||
/// wants to batch these into meaningful chunks, called "frames". This
|
||||
/// method layers framing on top of an I/O object, by using the `Codec`
|
||||
/// traits to handle encoding and decoding of messages frames. Note that
|
||||
/// the incoming and outgoing frame types may be distinct.
|
||||
///
|
||||
/// This function returns a *single* object that is both `Stream` and
|
||||
/// `Sink`; grouping this into a single object is often useful for layering
|
||||
/// things like gzip or TLS, which require both read and write access to the
|
||||
/// underlying object.
|
||||
///
|
||||
/// This objects takes a stream and a readbuffer and a writebuffer. These
|
||||
/// field can be obtained from an existing `Framed` with the
|
||||
/// `into_parts` method.
|
||||
///
|
||||
/// If you want to work more directly with the streams and sink, consider
|
||||
/// calling `split` on the `Framed` returned by this method, which will
|
||||
/// break them into separate objects, allowing them to interact more easily.
|
||||
pub fn from_parts(parts: FramedParts<T, U>) -> Framed<T, U> {
|
||||
Framed {
|
||||
inner: framed_read2_with_buffer(
|
||||
framed_write2_with_buffer(
|
||||
Fuse(parts.io, parts.codec),
|
||||
parts.write_buf,
|
||||
parts.write_buf_lw,
|
||||
parts.write_buf_hw,
|
||||
),
|
||||
parts.read_buf,
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a reference to the underlying codec.
|
||||
pub fn get_codec(&self) -> &U {
|
||||
&self.inner.get_ref().get_ref().1
|
||||
pub fn codec_ref(&self) -> &U {
|
||||
&self.codec
|
||||
}
|
||||
|
||||
/// Returns a mutable reference to the underlying codec.
|
||||
pub fn get_codec_mut(&mut self) -> &mut U {
|
||||
&mut self.inner.get_mut().get_mut().1
|
||||
pub fn codec_mut(&mut self) -> &mut U {
|
||||
&mut self.codec
|
||||
}
|
||||
|
||||
/// Returns a reference to the underlying I/O stream wrapped by
|
||||
/// `Frame`.
|
||||
/// Returns a reference to the underlying I/O stream wrapped by `Frame`.
|
||||
///
|
||||
/// Note that care should be taken to not tamper with the underlying stream
|
||||
/// of data coming in as it may corrupt the stream of frames otherwise
|
||||
/// being worked with.
|
||||
pub fn get_ref(&self) -> &T {
|
||||
&self.inner.get_ref().get_ref().0
|
||||
/// Note that care should be taken to not tamper with the underlying stream of data coming in as
|
||||
/// it may corrupt the stream of frames otherwise being worked with.
|
||||
pub fn io_ref(&self) -> &T {
|
||||
&self.io
|
||||
}
|
||||
|
||||
/// Returns a mutable reference to the underlying I/O stream wrapped by
|
||||
/// `Frame`.
|
||||
/// Returns a mutable reference to the underlying I/O stream.
|
||||
///
|
||||
/// Note that care should be taken to not tamper with the underlying stream
|
||||
/// of data coming in as it may corrupt the stream of frames otherwise
|
||||
/// being worked with.
|
||||
pub fn get_mut(&mut self) -> &mut T {
|
||||
&mut self.inner.get_mut().get_mut().0
|
||||
/// Note that care should be taken to not tamper with the underlying stream of data coming in as
|
||||
/// it may corrupt the stream of frames otherwise being worked with.
|
||||
pub fn io_mut(&mut self) -> &mut T {
|
||||
&mut self.io
|
||||
}
|
||||
|
||||
/// Returns a `Pin` of a mutable reference to the underlying I/O stream.
|
||||
pub fn io_pin(self: Pin<&mut Self>) -> Pin<&mut T> {
|
||||
self.project().io
|
||||
}
|
||||
|
||||
/// Check if read buffer is empty.
|
||||
pub fn is_read_buf_empty(&self) -> bool {
|
||||
self.read_buf.is_empty()
|
||||
}
|
||||
|
||||
/// Check if write buffer is empty.
|
||||
pub fn is_write_buf_empty(&self) -> bool {
|
||||
self.inner.get_ref().is_empty()
|
||||
self.write_buf.is_empty()
|
||||
}
|
||||
|
||||
/// Check if write buffer is full.
|
||||
pub fn is_write_buf_full(&self) -> bool {
|
||||
self.inner.get_ref().is_full()
|
||||
self.write_buf.len() >= HW
|
||||
}
|
||||
|
||||
/// Consumes the `Frame`, returning its underlying I/O stream.
|
||||
/// Check if framed is able to write more data.
|
||||
///
|
||||
/// Note that care should be taken to not tamper with the underlying stream
|
||||
/// of data coming in as it may corrupt the stream of frames otherwise
|
||||
/// being worked with.
|
||||
pub fn into_inner(self) -> T {
|
||||
self.inner.into_inner().into_inner().0
|
||||
/// `Framed` object considers ready if there is free space in write buffer.
|
||||
pub fn is_write_ready(&self) -> bool {
|
||||
self.write_buf.len() < HW
|
||||
}
|
||||
|
||||
/// Consume the `Frame`, returning `Frame` with different codec.
|
||||
pub fn into_framed<U2>(self, codec: U2) -> Framed<T, U2> {
|
||||
let (inner, read_buf) = self.inner.into_parts();
|
||||
let (inner, write_buf, lw, hw) = inner.into_parts();
|
||||
|
||||
pub fn replace_codec<U2>(self, codec: U2) -> Framed<T, U2> {
|
||||
Framed {
|
||||
inner: framed_read2_with_buffer(
|
||||
framed_write2_with_buffer(Fuse(inner.0, codec), write_buf, lw, hw),
|
||||
read_buf,
|
||||
),
|
||||
codec,
|
||||
io: self.io,
|
||||
flags: self.flags,
|
||||
read_buf: self.read_buf,
|
||||
write_buf: self.write_buf,
|
||||
}
|
||||
}
|
||||
|
||||
/// Consume the `Frame`, returning `Frame` with different io.
|
||||
pub fn into_map_io<F, T2>(self, f: F) -> Framed<T2, U>
|
||||
where
|
||||
F: Fn(T) -> T2,
|
||||
{
|
||||
Framed {
|
||||
io: f(self.io),
|
||||
codec: self.codec,
|
||||
flags: self.flags,
|
||||
read_buf: self.read_buf,
|
||||
write_buf: self.write_buf,
|
||||
}
|
||||
}
|
||||
|
||||
/// Consume the `Frame`, returning `Frame` with different codec.
|
||||
pub fn map_codec<F, U2>(self, f: F) -> Framed<T, U2>
|
||||
pub fn into_map_codec<F, U2>(self, f: F) -> Framed<T, U2>
|
||||
where
|
||||
F: Fn(U) -> U2,
|
||||
{
|
||||
let (inner, read_buf) = self.inner.into_parts();
|
||||
let (inner, write_buf, lw, hw) = inner.into_parts();
|
||||
|
||||
Framed {
|
||||
inner: framed_read2_with_buffer(
|
||||
framed_write2_with_buffer(Fuse(inner.0, f(inner.1)), write_buf, lw, hw),
|
||||
read_buf,
|
||||
),
|
||||
io: self.io,
|
||||
codec: f(self.codec),
|
||||
flags: self.flags,
|
||||
read_buf: self.read_buf,
|
||||
write_buf: self.write_buf,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, U> Framed<T, U> {
|
||||
/// Serialize item and write to the inner buffer
|
||||
pub fn write<I>(mut self: Pin<&mut Self>, item: I) -> Result<(), <U as Encoder<I>>::Error>
|
||||
where
|
||||
T: AsyncWrite,
|
||||
U: Encoder<I>,
|
||||
{
|
||||
let this = self.as_mut().project();
|
||||
let remaining = this.write_buf.capacity() - this.write_buf.len();
|
||||
if remaining < LW {
|
||||
this.write_buf.reserve(HW - remaining);
|
||||
}
|
||||
|
||||
this.codec.encode(item, this.write_buf)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Try to read underlying I/O stream and decode item.
|
||||
pub fn next_item(
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<<U as Decoder>::Item, U::Error>>>
|
||||
where
|
||||
T: AsyncRead,
|
||||
U: Decoder,
|
||||
{
|
||||
loop {
|
||||
let this = self.as_mut().project();
|
||||
// Repeatedly call `decode` or `decode_eof` as long as it is "readable". Readable is
|
||||
// defined as not having returned `None`. If the upstream has returned EOF, and the
|
||||
// decoder is no longer readable, it can be assumed that the decoder will never become
|
||||
// readable again, at which point the stream is terminated.
|
||||
|
||||
if this.flags.contains(Flags::READABLE) {
|
||||
if this.flags.contains(Flags::EOF) {
|
||||
match this.codec.decode_eof(this.read_buf) {
|
||||
Ok(Some(frame)) => return Poll::Ready(Some(Ok(frame))),
|
||||
Ok(None) => return Poll::Ready(None),
|
||||
Err(err) => return Poll::Ready(Some(Err(err))),
|
||||
}
|
||||
}
|
||||
|
||||
tracing::trace!("attempting to decode a frame");
|
||||
|
||||
match this.codec.decode(this.read_buf) {
|
||||
Ok(Some(frame)) => {
|
||||
tracing::trace!("frame decoded from buffer");
|
||||
return Poll::Ready(Some(Ok(frame)));
|
||||
}
|
||||
Err(err) => return Poll::Ready(Some(Err(err))),
|
||||
_ => (), // Need more data
|
||||
}
|
||||
|
||||
this.flags.remove(Flags::READABLE);
|
||||
}
|
||||
|
||||
debug_assert!(!this.flags.contains(Flags::EOF));
|
||||
|
||||
// Otherwise, try to read more data and try again. Make sure we've got room.
|
||||
let remaining = this.read_buf.capacity() - this.read_buf.len();
|
||||
if remaining < LW {
|
||||
this.read_buf.reserve(HW - remaining)
|
||||
}
|
||||
|
||||
let cnt = match tokio_util::io::poll_read_buf(this.io, cx, this.read_buf) {
|
||||
Poll::Pending => return Poll::Pending,
|
||||
Poll::Ready(Err(err)) => return Poll::Ready(Some(Err(err.into()))),
|
||||
Poll::Ready(Ok(cnt)) => cnt,
|
||||
};
|
||||
|
||||
if cnt == 0 {
|
||||
this.flags.insert(Flags::EOF);
|
||||
}
|
||||
this.flags.insert(Flags::READABLE);
|
||||
}
|
||||
}
|
||||
|
||||
/// Consumes the `Frame`, returning its underlying I/O stream, the buffer
|
||||
/// with unprocessed data, and the codec.
|
||||
///
|
||||
/// Note that care should be taken to not tamper with the underlying stream
|
||||
/// of data coming in as it may corrupt the stream of frames otherwise
|
||||
/// being worked with.
|
||||
pub fn into_parts(self) -> FramedParts<T, U> {
|
||||
let (inner, read_buf) = self.inner.into_parts();
|
||||
let (inner, write_buf, write_buf_lw, write_buf_hw) = inner.into_parts();
|
||||
/// Flush write buffer to underlying I/O stream.
|
||||
pub fn flush<I>(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), U::Error>>
|
||||
where
|
||||
T: AsyncWrite,
|
||||
U: Encoder<I>,
|
||||
{
|
||||
let mut this = self.as_mut().project();
|
||||
tracing::trace!("flushing framed transport");
|
||||
|
||||
FramedParts {
|
||||
io: inner.0,
|
||||
codec: inner.1,
|
||||
read_buf,
|
||||
write_buf,
|
||||
write_buf_lw,
|
||||
write_buf_hw,
|
||||
_priv: (),
|
||||
while !this.write_buf.is_empty() {
|
||||
tracing::trace!("writing; remaining={}", this.write_buf.len());
|
||||
|
||||
let n = ready!(this.io.as_mut().poll_write(cx, this.write_buf))?;
|
||||
|
||||
if n == 0 {
|
||||
return Poll::Ready(Err(io::Error::new(
|
||||
io::ErrorKind::WriteZero,
|
||||
"failed to write frame to transport",
|
||||
)
|
||||
.into()));
|
||||
}
|
||||
|
||||
// remove written data
|
||||
this.write_buf.advance(n);
|
||||
}
|
||||
|
||||
// Try flushing the underlying IO
|
||||
ready!(this.io.poll_flush(cx))?;
|
||||
|
||||
tracing::trace!("framed transport flushed");
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
/// Flush write buffer and shutdown underlying I/O stream.
|
||||
pub fn close<I>(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), U::Error>>
|
||||
where
|
||||
T: AsyncWrite,
|
||||
U: Encoder<I>,
|
||||
{
|
||||
let mut this = self.as_mut().project();
|
||||
ready!(this.io.as_mut().poll_flush(cx))?;
|
||||
ready!(this.io.as_mut().poll_shutdown(cx))?;
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
}
|
||||
|
||||
@ -210,36 +284,39 @@ where
|
||||
T: AsyncRead,
|
||||
U: Decoder,
|
||||
{
|
||||
type Item = U::Item;
|
||||
type Error = U::Error;
|
||||
type Item = Result<U::Item, U::Error>;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
|
||||
self.inner.poll()
|
||||
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||
self.next_item(cx)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, U> Sink for Framed<T, U>
|
||||
impl<T, U, I> Sink<I> for Framed<T, U>
|
||||
where
|
||||
T: AsyncWrite,
|
||||
U: Encoder,
|
||||
U: Encoder<I>,
|
||||
U::Error: From<io::Error>,
|
||||
{
|
||||
type SinkItem = U::Item;
|
||||
type SinkError = U::Error;
|
||||
type Error = U::Error;
|
||||
|
||||
fn start_send(
|
||||
&mut self,
|
||||
item: Self::SinkItem,
|
||||
) -> StartSend<Self::SinkItem, Self::SinkError> {
|
||||
self.inner.get_mut().start_send(item)
|
||||
fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
if self.is_write_ready() {
|
||||
Poll::Ready(Ok(()))
|
||||
} else {
|
||||
self.flush(cx)
|
||||
}
|
||||
}
|
||||
|
||||
fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
|
||||
self.inner.get_mut().poll_complete()
|
||||
fn start_send(self: Pin<&mut Self>, item: I) -> Result<(), Self::Error> {
|
||||
self.write(item)
|
||||
}
|
||||
|
||||
fn close(&mut self) -> Poll<(), Self::SinkError> {
|
||||
self.inner.get_mut().close()
|
||||
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
self.flush(cx)
|
||||
}
|
||||
|
||||
fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
self.close(cx)
|
||||
}
|
||||
}
|
||||
|
||||
@ -248,75 +325,57 @@ where
|
||||
T: fmt::Debug,
|
||||
U: fmt::Debug,
|
||||
{
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("Framed")
|
||||
.field("io", &self.inner.get_ref().get_ref().0)
|
||||
.field("codec", &self.inner.get_ref().get_ref().1)
|
||||
.field("io", &self.io)
|
||||
.field("codec", &self.codec)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
// ===== impl Fuse =====
|
||||
|
||||
impl<T: Read, U> Read for Fuse<T, U> {
|
||||
fn read(&mut self, dst: &mut [u8]) -> io::Result<usize> {
|
||||
self.0.read(dst)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: AsyncRead, U> AsyncRead for Fuse<T, U> {
|
||||
unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool {
|
||||
self.0.prepare_uninitialized_buffer(buf)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Write, U> Write for Fuse<T, U> {
|
||||
fn write(&mut self, src: &[u8]) -> io::Result<usize> {
|
||||
self.0.write(src)
|
||||
impl<T, U> Framed<T, U> {
|
||||
/// This function returns a *single* object that is both `Stream` and `Sink`; grouping this into
|
||||
/// a single object is often useful for layering things like gzip or TLS, which require both
|
||||
/// read and write access to the underlying object.
|
||||
///
|
||||
/// These objects take a stream, a read buffer and a write buffer. These fields can be obtained
|
||||
/// from an existing `Framed` with the `into_parts` method.
|
||||
pub fn from_parts(parts: FramedParts<T, U>) -> Framed<T, U> {
|
||||
Framed {
|
||||
io: parts.io,
|
||||
codec: parts.codec,
|
||||
flags: parts.flags,
|
||||
write_buf: parts.write_buf,
|
||||
read_buf: parts.read_buf,
|
||||
}
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
self.0.flush()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: AsyncWrite, U> AsyncWrite for Fuse<T, U> {
|
||||
fn shutdown(&mut self) -> Poll<(), io::Error> {
|
||||
self.0.shutdown()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, U: Decoder> Decoder for Fuse<T, U> {
|
||||
type Item = U::Item;
|
||||
type Error = U::Error;
|
||||
|
||||
fn decode(&mut self, buffer: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
|
||||
self.1.decode(buffer)
|
||||
}
|
||||
|
||||
fn decode_eof(&mut self, buffer: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
|
||||
self.1.decode_eof(buffer)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, U: Encoder> Encoder for Fuse<T, U> {
|
||||
type Item = U::Item;
|
||||
type Error = U::Error;
|
||||
|
||||
fn encode(&mut self, item: Self::Item, dst: &mut BytesMut) -> Result<(), Self::Error> {
|
||||
self.1.encode(item, dst)
|
||||
/// Consumes the `Frame`, returning its underlying I/O stream, the buffer with unprocessed data,
|
||||
/// and the codec.
|
||||
///
|
||||
/// Note that care should be taken to not tamper with the underlying stream of data coming in as
|
||||
/// it may corrupt the stream of frames otherwise being worked with.
|
||||
pub fn into_parts(self) -> FramedParts<T, U> {
|
||||
FramedParts {
|
||||
io: self.io,
|
||||
codec: self.codec,
|
||||
flags: self.flags,
|
||||
read_buf: self.read_buf,
|
||||
write_buf: self.write_buf,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// `FramedParts` contains an export of the data of a Framed transport.
|
||||
/// It can be used to construct a new `Framed` with a different codec.
|
||||
/// It contains all current buffers and the inner transport.
|
||||
///
|
||||
/// It can be used to construct a new `Framed` with a different codec. It contains all current
|
||||
/// buffers and the inner transport.
|
||||
#[derive(Debug)]
|
||||
pub struct FramedParts<T, U> {
|
||||
/// The inner transport used to read bytes to and write bytes to
|
||||
/// The inner transport used to read bytes to and write bytes to.
|
||||
pub io: T,
|
||||
|
||||
/// The codec
|
||||
/// The codec object.
|
||||
pub codec: U,
|
||||
|
||||
/// The buffer with read but unprocessed data.
|
||||
@ -325,41 +384,29 @@ pub struct FramedParts<T, U> {
|
||||
/// A buffer with unprocessed data which are not written yet.
|
||||
pub write_buf: BytesMut,
|
||||
|
||||
/// A buffer low watermark capacity
|
||||
pub write_buf_lw: usize,
|
||||
|
||||
/// A buffer high watermark capacity
|
||||
pub write_buf_hw: usize,
|
||||
|
||||
/// This private field allows us to add additional fields in the future in a
|
||||
/// backwards compatible way.
|
||||
_priv: (),
|
||||
flags: Flags,
|
||||
}
|
||||
|
||||
impl<T, U> FramedParts<T, U> {
|
||||
/// Create a new, default, `FramedParts`
|
||||
/// Creates a new default `FramedParts`.
|
||||
pub fn new(io: T, codec: U) -> FramedParts<T, U> {
|
||||
FramedParts {
|
||||
io,
|
||||
codec,
|
||||
flags: Flags::empty(),
|
||||
read_buf: BytesMut::new(),
|
||||
write_buf: BytesMut::new(),
|
||||
write_buf_lw: LW,
|
||||
write_buf_hw: HW,
|
||||
_priv: (),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new `FramedParts` with read buffer
|
||||
/// Creates a new `FramedParts` with read buffer.
|
||||
pub fn with_read_buf(io: T, codec: U, read_buf: BytesMut) -> FramedParts<T, U> {
|
||||
FramedParts {
|
||||
io,
|
||||
codec,
|
||||
read_buf,
|
||||
flags: Flags::empty(),
|
||||
write_buf: BytesMut::new(),
|
||||
write_buf_lw: LW,
|
||||
write_buf_hw: HW,
|
||||
_priv: (),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,218 +0,0 @@
|
||||
use std::fmt;
|
||||
|
||||
use bytes::BytesMut;
|
||||
use futures::{try_ready, Async, Poll, Sink, StartSend, Stream};
|
||||
use log::trace;
|
||||
use tokio_codec::Decoder;
|
||||
use tokio_io::AsyncRead;
|
||||
|
||||
use super::framed::Fuse;
|
||||
|
||||
/// A `Stream` of messages decoded from an `AsyncRead`.
|
||||
pub struct FramedRead<T, D> {
|
||||
inner: FramedRead2<Fuse<T, D>>,
|
||||
}
|
||||
|
||||
pub struct FramedRead2<T> {
|
||||
inner: T,
|
||||
eof: bool,
|
||||
is_readable: bool,
|
||||
buffer: BytesMut,
|
||||
}
|
||||
|
||||
const INITIAL_CAPACITY: usize = 8 * 1024;
|
||||
|
||||
// ===== impl FramedRead =====
|
||||
|
||||
impl<T, D> FramedRead<T, D>
|
||||
where
|
||||
T: AsyncRead,
|
||||
D: Decoder,
|
||||
{
|
||||
/// Creates a new `FramedRead` with the given `decoder`.
|
||||
pub fn new(inner: T, decoder: D) -> FramedRead<T, D> {
|
||||
FramedRead {
|
||||
inner: framed_read2(Fuse(inner, decoder)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, D> FramedRead<T, D> {
|
||||
/// Returns a reference to the underlying I/O stream wrapped by
|
||||
/// `FramedRead`.
|
||||
///
|
||||
/// Note that care should be taken to not tamper with the underlying stream
|
||||
/// of data coming in as it may corrupt the stream of frames otherwise
|
||||
/// being worked with.
|
||||
pub fn get_ref(&self) -> &T {
|
||||
&self.inner.inner.0
|
||||
}
|
||||
|
||||
/// Returns a mutable reference to the underlying I/O stream wrapped by
|
||||
/// `FramedRead`.
|
||||
///
|
||||
/// Note that care should be taken to not tamper with the underlying stream
|
||||
/// of data coming in as it may corrupt the stream of frames otherwise
|
||||
/// being worked with.
|
||||
pub fn get_mut(&mut self) -> &mut T {
|
||||
&mut self.inner.inner.0
|
||||
}
|
||||
|
||||
/// Consumes the `FramedRead`, returning its underlying I/O stream.
|
||||
///
|
||||
/// Note that care should be taken to not tamper with the underlying stream
|
||||
/// of data coming in as it may corrupt the stream of frames otherwise
|
||||
/// being worked with.
|
||||
pub fn into_inner(self) -> T {
|
||||
self.inner.inner.0
|
||||
}
|
||||
|
||||
/// Returns a reference to the underlying decoder.
|
||||
pub fn decoder(&self) -> &D {
|
||||
&self.inner.inner.1
|
||||
}
|
||||
|
||||
/// Returns a mutable reference to the underlying decoder.
|
||||
pub fn decoder_mut(&mut self) -> &mut D {
|
||||
&mut self.inner.inner.1
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, D> Stream for FramedRead<T, D>
|
||||
where
|
||||
T: AsyncRead,
|
||||
D: Decoder,
|
||||
{
|
||||
type Item = D::Item;
|
||||
type Error = D::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
|
||||
self.inner.poll()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, D> Sink for FramedRead<T, D>
|
||||
where
|
||||
T: Sink,
|
||||
{
|
||||
type SinkItem = T::SinkItem;
|
||||
type SinkError = T::SinkError;
|
||||
|
||||
fn start_send(
|
||||
&mut self,
|
||||
item: Self::SinkItem,
|
||||
) -> StartSend<Self::SinkItem, Self::SinkError> {
|
||||
self.inner.inner.0.start_send(item)
|
||||
}
|
||||
|
||||
fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
|
||||
self.inner.inner.0.poll_complete()
|
||||
}
|
||||
|
||||
fn close(&mut self) -> Poll<(), Self::SinkError> {
|
||||
self.inner.inner.0.close()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, D> fmt::Debug for FramedRead<T, D>
|
||||
where
|
||||
T: fmt::Debug,
|
||||
D: fmt::Debug,
|
||||
{
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
f.debug_struct("FramedRead")
|
||||
.field("inner", &self.inner.inner.0)
|
||||
.field("decoder", &self.inner.inner.1)
|
||||
.field("eof", &self.inner.eof)
|
||||
.field("is_readable", &self.inner.is_readable)
|
||||
.field("buffer", &self.inner.buffer)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
// ===== impl FramedRead2 =====
|
||||
|
||||
pub fn framed_read2<T>(inner: T) -> FramedRead2<T> {
|
||||
FramedRead2 {
|
||||
inner,
|
||||
eof: false,
|
||||
is_readable: false,
|
||||
buffer: BytesMut::with_capacity(INITIAL_CAPACITY),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn framed_read2_with_buffer<T>(inner: T, mut buf: BytesMut) -> FramedRead2<T> {
|
||||
if buf.capacity() < INITIAL_CAPACITY {
|
||||
let bytes_to_reserve = INITIAL_CAPACITY - buf.capacity();
|
||||
buf.reserve(bytes_to_reserve);
|
||||
}
|
||||
FramedRead2 {
|
||||
inner,
|
||||
eof: false,
|
||||
is_readable: !buf.is_empty(),
|
||||
buffer: buf,
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> FramedRead2<T> {
|
||||
pub fn get_ref(&self) -> &T {
|
||||
&self.inner
|
||||
}
|
||||
|
||||
pub fn into_inner(self) -> T {
|
||||
self.inner
|
||||
}
|
||||
|
||||
pub fn into_parts(self) -> (T, BytesMut) {
|
||||
(self.inner, self.buffer)
|
||||
}
|
||||
|
||||
pub fn get_mut(&mut self) -> &mut T {
|
||||
&mut self.inner
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Stream for FramedRead2<T>
|
||||
where
|
||||
T: AsyncRead + Decoder,
|
||||
{
|
||||
type Item = T::Item;
|
||||
type Error = T::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
|
||||
loop {
|
||||
// Repeatedly call `decode` or `decode_eof` as long as it is
|
||||
// "readable". Readable is defined as not having returned `None`. If
|
||||
// the upstream has returned EOF, and the decoder is no longer
|
||||
// readable, it can be assumed that the decoder will never become
|
||||
// readable again, at which point the stream is terminated.
|
||||
if self.is_readable {
|
||||
if self.eof {
|
||||
let frame = self.inner.decode_eof(&mut self.buffer)?;
|
||||
return Ok(Async::Ready(frame));
|
||||
}
|
||||
|
||||
trace!("attempting to decode a frame");
|
||||
|
||||
if let Some(frame) = self.inner.decode(&mut self.buffer)? {
|
||||
trace!("frame decoded from buffer");
|
||||
return Ok(Async::Ready(Some(frame)));
|
||||
}
|
||||
|
||||
self.is_readable = false;
|
||||
}
|
||||
|
||||
assert!(!self.eof);
|
||||
|
||||
// Otherwise, try to read more data and try again. Make sure we've
|
||||
// got room for at least one byte to read to ensure that we don't
|
||||
// get a spurious 0 that looks like EOF
|
||||
self.buffer.reserve(1);
|
||||
if 0 == try_ready!(self.inner.read_buf(&mut self.buffer)) {
|
||||
self.eof = true;
|
||||
}
|
||||
|
||||
self.is_readable = true;
|
||||
}
|
||||
}
|
||||
}
|
@ -1,303 +0,0 @@
|
||||
use std::fmt;
|
||||
use std::io::{self, Read};
|
||||
|
||||
use bytes::BytesMut;
|
||||
use futures::{try_ready, Async, AsyncSink, Poll, Sink, StartSend, Stream};
|
||||
use log::trace;
|
||||
use tokio_codec::{Decoder, Encoder};
|
||||
use tokio_io::{AsyncRead, AsyncWrite};
|
||||
|
||||
use super::framed::Fuse;
|
||||
|
||||
/// A `Sink` of frames encoded to an `AsyncWrite`.
|
||||
pub struct FramedWrite<T, E> {
|
||||
inner: FramedWrite2<Fuse<T, E>>,
|
||||
}
|
||||
|
||||
pub struct FramedWrite2<T> {
|
||||
inner: T,
|
||||
buffer: BytesMut,
|
||||
low_watermark: usize,
|
||||
high_watermark: usize,
|
||||
}
|
||||
|
||||
impl<T, E> FramedWrite<T, E>
|
||||
where
|
||||
T: AsyncWrite,
|
||||
E: Encoder,
|
||||
{
|
||||
/// Creates a new `FramedWrite` with the given `encoder`.
|
||||
pub fn new(inner: T, encoder: E, lw: usize, hw: usize) -> FramedWrite<T, E> {
|
||||
FramedWrite {
|
||||
inner: framed_write2(Fuse(inner, encoder), lw, hw),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, E> FramedWrite<T, E> {
|
||||
/// Returns a reference to the underlying I/O stream wrapped by
|
||||
/// `FramedWrite`.
|
||||
///
|
||||
/// Note that care should be taken to not tamper with the underlying stream
|
||||
/// of data coming in as it may corrupt the stream of frames otherwise
|
||||
/// being worked with.
|
||||
pub fn get_ref(&self) -> &T {
|
||||
&self.inner.inner.0
|
||||
}
|
||||
|
||||
/// Returns a mutable reference to the underlying I/O stream wrapped by
|
||||
/// `FramedWrite`.
|
||||
///
|
||||
/// Note that care should be taken to not tamper with the underlying stream
|
||||
/// of data coming in as it may corrupt the stream of frames otherwise
|
||||
/// being worked with.
|
||||
pub fn get_mut(&mut self) -> &mut T {
|
||||
&mut self.inner.inner.0
|
||||
}
|
||||
|
||||
/// Consumes the `FramedWrite`, returning its underlying I/O stream.
|
||||
///
|
||||
/// Note that care should be taken to not tamper with the underlying stream
|
||||
/// of data coming in as it may corrupt the stream of frames otherwise
|
||||
/// being worked with.
|
||||
pub fn into_inner(self) -> T {
|
||||
self.inner.inner.0
|
||||
}
|
||||
|
||||
/// Returns a reference to the underlying decoder.
|
||||
pub fn encoder(&self) -> &E {
|
||||
&self.inner.inner.1
|
||||
}
|
||||
|
||||
/// Returns a mutable reference to the underlying decoder.
|
||||
pub fn encoder_mut(&mut self) -> &mut E {
|
||||
&mut self.inner.inner.1
|
||||
}
|
||||
|
||||
/// Check if write buffer is full
|
||||
pub fn is_full(&self) -> bool {
|
||||
self.inner.is_full()
|
||||
}
|
||||
|
||||
/// Check if write buffer is empty.
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.inner.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, E> FramedWrite<T, E>
|
||||
where
|
||||
E: Encoder,
|
||||
{
|
||||
/// Force send item
|
||||
pub fn force_send(&mut self, item: E::Item) -> Result<(), E::Error> {
|
||||
self.inner.force_send(item)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, E> Sink for FramedWrite<T, E>
|
||||
where
|
||||
T: AsyncWrite,
|
||||
E: Encoder,
|
||||
{
|
||||
type SinkItem = E::Item;
|
||||
type SinkError = E::Error;
|
||||
|
||||
fn start_send(&mut self, item: E::Item) -> StartSend<E::Item, E::Error> {
|
||||
self.inner.start_send(item)
|
||||
}
|
||||
|
||||
fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
|
||||
self.inner.poll_complete()
|
||||
}
|
||||
|
||||
fn close(&mut self) -> Poll<(), Self::SinkError> {
|
||||
Ok(self.inner.close()?)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, D> Stream for FramedWrite<T, D>
|
||||
where
|
||||
T: Stream,
|
||||
{
|
||||
type Item = T::Item;
|
||||
type Error = T::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
|
||||
self.inner.inner.0.poll()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, U> fmt::Debug for FramedWrite<T, U>
|
||||
where
|
||||
T: fmt::Debug,
|
||||
U: fmt::Debug,
|
||||
{
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
f.debug_struct("FramedWrite")
|
||||
.field("inner", &self.inner.get_ref().0)
|
||||
.field("encoder", &self.inner.get_ref().1)
|
||||
.field("buffer", &self.inner.buffer)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
// ===== impl FramedWrite2 =====
|
||||
|
||||
pub fn framed_write2<T>(
|
||||
inner: T,
|
||||
low_watermark: usize,
|
||||
high_watermark: usize,
|
||||
) -> FramedWrite2<T> {
|
||||
FramedWrite2 {
|
||||
inner,
|
||||
low_watermark,
|
||||
high_watermark,
|
||||
buffer: BytesMut::with_capacity(high_watermark),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn framed_write2_with_buffer<T>(
|
||||
inner: T,
|
||||
mut buffer: BytesMut,
|
||||
low_watermark: usize,
|
||||
high_watermark: usize,
|
||||
) -> FramedWrite2<T> {
|
||||
if buffer.capacity() < high_watermark {
|
||||
let bytes_to_reserve = high_watermark - buffer.capacity();
|
||||
buffer.reserve(bytes_to_reserve);
|
||||
}
|
||||
FramedWrite2 {
|
||||
inner,
|
||||
buffer,
|
||||
low_watermark,
|
||||
high_watermark,
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> FramedWrite2<T> {
|
||||
pub fn get_ref(&self) -> &T {
|
||||
&self.inner
|
||||
}
|
||||
|
||||
pub fn into_inner(self) -> T {
|
||||
self.inner
|
||||
}
|
||||
|
||||
pub fn into_parts(self) -> (T, BytesMut, usize, usize) {
|
||||
(
|
||||
self.inner,
|
||||
self.buffer,
|
||||
self.low_watermark,
|
||||
self.high_watermark,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn get_mut(&mut self) -> &mut T {
|
||||
&mut self.inner
|
||||
}
|
||||
|
||||
pub fn is_full(&self) -> bool {
|
||||
self.buffer.len() >= self.high_watermark
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.buffer.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> FramedWrite2<T>
|
||||
where
|
||||
T: Encoder,
|
||||
{
|
||||
pub fn force_send(&mut self, item: T::Item) -> Result<(), T::Error> {
|
||||
let len = self.buffer.len();
|
||||
if len < self.low_watermark {
|
||||
self.buffer.reserve(self.high_watermark - len)
|
||||
}
|
||||
self.inner.encode(item, &mut self.buffer)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Sink for FramedWrite2<T>
|
||||
where
|
||||
T: AsyncWrite + Encoder,
|
||||
{
|
||||
type SinkItem = T::Item;
|
||||
type SinkError = T::Error;
|
||||
|
||||
fn start_send(&mut self, item: T::Item) -> StartSend<T::Item, T::Error> {
|
||||
// Check the buffer capacity
|
||||
let len = self.buffer.len();
|
||||
if len >= self.high_watermark {
|
||||
return Ok(AsyncSink::NotReady(item));
|
||||
}
|
||||
if len < self.low_watermark {
|
||||
self.buffer.reserve(self.high_watermark - len)
|
||||
}
|
||||
|
||||
self.inner.encode(item, &mut self.buffer)?;
|
||||
|
||||
Ok(AsyncSink::Ready)
|
||||
}
|
||||
|
||||
fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
|
||||
trace!("flushing framed transport");
|
||||
|
||||
while !self.buffer.is_empty() {
|
||||
trace!("writing; remaining={}", self.buffer.len());
|
||||
|
||||
let n = try_ready!(self.inner.poll_write(&self.buffer));
|
||||
|
||||
if n == 0 {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::WriteZero,
|
||||
"failed to \
|
||||
write frame to transport",
|
||||
)
|
||||
.into());
|
||||
}
|
||||
|
||||
// TODO: Add a way to `bytes` to do this w/o returning the drained
|
||||
// data.
|
||||
let _ = self.buffer.split_to(n);
|
||||
}
|
||||
|
||||
// Try flushing the underlying IO
|
||||
try_ready!(self.inner.poll_flush());
|
||||
|
||||
trace!("framed transport flushed");
|
||||
Ok(Async::Ready(()))
|
||||
}
|
||||
|
||||
fn close(&mut self) -> Poll<(), Self::SinkError> {
|
||||
try_ready!(self.poll_complete());
|
||||
Ok(self.inner.shutdown()?)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Decoder> Decoder for FramedWrite2<T> {
|
||||
type Item = T::Item;
|
||||
type Error = T::Error;
|
||||
|
||||
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<T::Item>, T::Error> {
|
||||
self.inner.decode(src)
|
||||
}
|
||||
|
||||
fn decode_eof(&mut self, src: &mut BytesMut) -> Result<Option<T::Item>, T::Error> {
|
||||
self.inner.decode_eof(src)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Read> Read for FramedWrite2<T> {
|
||||
fn read(&mut self, dst: &mut [u8]) -> io::Result<usize> {
|
||||
self.inner.read(dst)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: AsyncRead> AsyncRead for FramedWrite2<T> {
|
||||
unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool {
|
||||
self.inner.prepare_uninitialized_buffer(buf)
|
||||
}
|
||||
}
|
@ -1,24 +1,26 @@
|
||||
//! Utilities for encoding and decoding frames.
|
||||
//! Codec utilities for working with framed protocols.
|
||||
//!
|
||||
//! Contains adapters to go from streams of bytes, [`AsyncRead`] and
|
||||
//! [`AsyncWrite`], to framed streams implementing [`Sink`] and [`Stream`].
|
||||
//! Framed streams are also known as [transports].
|
||||
//! Contains adapters to go from streams of bytes, [`AsyncRead`] and [`AsyncWrite`], to framed
|
||||
//! streams implementing [`Sink`] and [`Stream`]. Framed streams are also known as `transports`.
|
||||
//!
|
||||
//! [`AsyncRead`]: #
|
||||
//! [`AsyncWrite`]: #
|
||||
//! [`Sink`]: #
|
||||
//! [`Stream`]: #
|
||||
//! [transports]: #
|
||||
//! [`Sink`]: futures_sink::Sink
|
||||
//! [`Stream`]: futures_core::Stream
|
||||
|
||||
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
|
||||
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
|
||||
|
||||
pub use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
|
||||
pub use tokio_util::{
|
||||
codec::{Decoder, Encoder},
|
||||
io::poll_read_buf,
|
||||
};
|
||||
|
||||
mod bcodec;
|
||||
mod framed;
|
||||
mod framed_read;
|
||||
mod framed_write;
|
||||
mod lines;
|
||||
|
||||
pub use self::bcodec::BytesCodec;
|
||||
pub use self::framed::{Framed, FramedParts};
|
||||
pub use self::framed_read::FramedRead;
|
||||
pub use self::framed_write::FramedWrite;
|
||||
|
||||
pub use tokio_codec::{Decoder, Encoder};
|
||||
pub use tokio_io::{AsyncRead, AsyncWrite};
|
||||
pub use self::{
|
||||
bcodec::BytesCodec,
|
||||
framed::{Framed, FramedParts},
|
||||
lines::LinesCodec,
|
||||
};
|
||||
|
158
actix-codec/src/lines.rs
Normal file
158
actix-codec/src/lines.rs
Normal file
@ -0,0 +1,158 @@
|
||||
use std::io;
|
||||
|
||||
use bytes::{Buf, BufMut, Bytes, BytesMut};
|
||||
use memchr::memchr;
|
||||
|
||||
use super::{Decoder, Encoder};
|
||||
|
||||
/// Lines codec. Reads/writes line delimited strings.
|
||||
///
|
||||
/// Will split input up by LF or CRLF delimiters. Carriage return characters at the end of lines are
|
||||
/// not preserved.
|
||||
#[derive(Debug, Copy, Clone, Default)]
|
||||
#[non_exhaustive]
|
||||
pub struct LinesCodec;
|
||||
|
||||
impl<T: AsRef<str>> Encoder<T> for LinesCodec {
|
||||
type Error = io::Error;
|
||||
|
||||
#[inline]
|
||||
fn encode(&mut self, item: T, dst: &mut BytesMut) -> Result<(), Self::Error> {
|
||||
let item = item.as_ref();
|
||||
dst.reserve(item.len() + 1);
|
||||
dst.put_slice(item.as_bytes());
|
||||
dst.put_u8(b'\n');
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Decoder for LinesCodec {
|
||||
type Item = String;
|
||||
type Error = io::Error;
|
||||
|
||||
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
|
||||
if src.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let len = match memchr(b'\n', src) {
|
||||
Some(n) => n,
|
||||
None => {
|
||||
return Ok(None);
|
||||
}
|
||||
};
|
||||
|
||||
// split up to new line char
|
||||
let mut buf = src.split_to(len);
|
||||
debug_assert_eq!(len, buf.len());
|
||||
|
||||
// remove new line char from source
|
||||
src.advance(1);
|
||||
|
||||
match buf.last() {
|
||||
// remove carriage returns at the end of buf
|
||||
Some(b'\r') => buf.truncate(len - 1),
|
||||
|
||||
// line is empty
|
||||
None => return Ok(Some(String::new())),
|
||||
|
||||
_ => {}
|
||||
}
|
||||
|
||||
try_into_utf8(buf.freeze())
|
||||
}
|
||||
|
||||
fn decode_eof(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
|
||||
match self.decode(src)? {
|
||||
Some(frame) => Ok(Some(frame)),
|
||||
None if src.is_empty() => Ok(None),
|
||||
None => {
|
||||
let buf = match src.last() {
|
||||
// if last line ends in a CR then take everything up to it
|
||||
Some(b'\r') => src.split_to(src.len() - 1),
|
||||
|
||||
// take all bytes from source
|
||||
_ => src.split(),
|
||||
};
|
||||
|
||||
if buf.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
try_into_utf8(buf.freeze())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Attempts to convert bytes into a `String`.
|
||||
fn try_into_utf8(buf: Bytes) -> io::Result<Option<String>> {
|
||||
String::from_utf8(buf.to_vec())
|
||||
.map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err))
|
||||
.map(Some)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use bytes::BufMut as _;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn lines_decoder() {
|
||||
let mut codec = LinesCodec::default();
|
||||
let mut buf = BytesMut::from("\nline 1\nline 2\r\nline 3\n\r\n\r");
|
||||
|
||||
assert_eq!("", codec.decode(&mut buf).unwrap().unwrap());
|
||||
assert_eq!("line 1", codec.decode(&mut buf).unwrap().unwrap());
|
||||
assert_eq!("line 2", codec.decode(&mut buf).unwrap().unwrap());
|
||||
assert_eq!("line 3", codec.decode(&mut buf).unwrap().unwrap());
|
||||
assert_eq!("", codec.decode(&mut buf).unwrap().unwrap());
|
||||
assert!(codec.decode(&mut buf).unwrap().is_none());
|
||||
assert!(codec.decode_eof(&mut buf).unwrap().is_none());
|
||||
|
||||
buf.put_slice(b"k");
|
||||
assert!(codec.decode(&mut buf).unwrap().is_none());
|
||||
assert_eq!("\rk", codec.decode_eof(&mut buf).unwrap().unwrap());
|
||||
|
||||
assert!(codec.decode(&mut buf).unwrap().is_none());
|
||||
assert!(codec.decode_eof(&mut buf).unwrap().is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn lines_encoder() {
|
||||
let mut codec = LinesCodec::default();
|
||||
|
||||
let mut buf = BytesMut::new();
|
||||
|
||||
codec.encode("", &mut buf).unwrap();
|
||||
assert_eq!(&buf[..], b"\n");
|
||||
|
||||
codec.encode("test", &mut buf).unwrap();
|
||||
assert_eq!(&buf[..], b"\ntest\n");
|
||||
|
||||
codec.encode("a\nb", &mut buf).unwrap();
|
||||
assert_eq!(&buf[..], b"\ntest\na\nb\n");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn lines_encoder_no_overflow() {
|
||||
let mut codec = LinesCodec::default();
|
||||
|
||||
let mut buf = BytesMut::new();
|
||||
codec.encode("1234567", &mut buf).unwrap();
|
||||
assert_eq!(&buf[..], b"1234567\n");
|
||||
|
||||
let mut buf = BytesMut::new();
|
||||
codec.encode("12345678", &mut buf).unwrap();
|
||||
assert_eq!(&buf[..], b"12345678\n");
|
||||
|
||||
let mut buf = BytesMut::new();
|
||||
codec.encode("123456789111213", &mut buf).unwrap();
|
||||
assert_eq!(&buf[..], b"123456789111213\n");
|
||||
|
||||
let mut buf = BytesMut::new();
|
||||
codec.encode("1234567891112131", &mut buf).unwrap();
|
||||
assert_eq!(&buf[..], b"1234567891112131\n");
|
||||
}
|
||||
}
|
224
actix-codec/tests/test_framed_sink.rs
Normal file
224
actix-codec/tests/test_framed_sink.rs
Normal file
@ -0,0 +1,224 @@
|
||||
#![allow(missing_docs)]
|
||||
|
||||
use std::{
|
||||
collections::VecDeque,
|
||||
io::{self, Write},
|
||||
pin::Pin,
|
||||
task::{
|
||||
Context,
|
||||
Poll::{self, Pending, Ready},
|
||||
},
|
||||
};
|
||||
|
||||
use actix_codec::*;
|
||||
use bytes::{Buf as _, BufMut as _, BytesMut};
|
||||
use futures_sink::Sink;
|
||||
use tokio_test::{assert_ready, task};
|
||||
|
||||
macro_rules! bilateral {
|
||||
($($x:expr,)*) => {{
|
||||
let mut v = VecDeque::new();
|
||||
v.extend(vec![$($x),*]);
|
||||
Bilateral { calls: v }
|
||||
}};
|
||||
}
|
||||
|
||||
macro_rules! assert_ready {
|
||||
($e:expr) => {{
|
||||
use core::task::Poll::*;
|
||||
match $e {
|
||||
Ready(v) => v,
|
||||
Pending => panic!("pending"),
|
||||
}
|
||||
}};
|
||||
($e:expr, $($msg:tt),+) => {{
|
||||
use core::task::Poll::*;
|
||||
match $e {
|
||||
Ready(v) => v,
|
||||
Pending => {
|
||||
let msg = format_args!($($msg),+);
|
||||
panic!("pending; {}", msg)
|
||||
}
|
||||
}
|
||||
}};
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Bilateral {
|
||||
pub calls: VecDeque<io::Result<Vec<u8>>>,
|
||||
}
|
||||
|
||||
impl Write for Bilateral {
|
||||
fn write(&mut self, src: &[u8]) -> io::Result<usize> {
|
||||
match self.calls.pop_front() {
|
||||
Some(Ok(data)) => {
|
||||
assert!(src.len() >= data.len());
|
||||
assert_eq!(&data[..], &src[..data.len()]);
|
||||
Ok(data.len())
|
||||
}
|
||||
Some(Err(err)) => Err(err),
|
||||
None => panic!("unexpected write; {:?}", src),
|
||||
}
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl AsyncWrite for Bilateral {
|
||||
fn poll_write(
|
||||
self: Pin<&mut Self>,
|
||||
_cx: &mut Context<'_>,
|
||||
buf: &[u8],
|
||||
) -> Poll<Result<usize, io::Error>> {
|
||||
match Pin::get_mut(self).write(buf) {
|
||||
Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => Pending,
|
||||
other => Ready(other),
|
||||
}
|
||||
}
|
||||
fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
|
||||
match Pin::get_mut(self).flush() {
|
||||
Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => Pending,
|
||||
other => Ready(other),
|
||||
}
|
||||
}
|
||||
fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
impl AsyncRead for Bilateral {
|
||||
fn poll_read(
|
||||
mut self: Pin<&mut Self>,
|
||||
_: &mut Context<'_>,
|
||||
buf: &mut ReadBuf<'_>,
|
||||
) -> Poll<Result<(), std::io::Error>> {
|
||||
use io::ErrorKind::WouldBlock;
|
||||
|
||||
match self.calls.pop_front() {
|
||||
Some(Ok(data)) => {
|
||||
debug_assert!(buf.remaining() >= data.len());
|
||||
buf.put_slice(&data);
|
||||
Ready(Ok(()))
|
||||
}
|
||||
Some(Err(ref err)) if err.kind() == WouldBlock => Pending,
|
||||
Some(Err(err)) => Ready(Err(err)),
|
||||
None => Ready(Ok(())),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct U32;
|
||||
|
||||
impl Encoder<u32> for U32 {
|
||||
type Error = io::Error;
|
||||
|
||||
fn encode(&mut self, item: u32, dst: &mut BytesMut) -> io::Result<()> {
|
||||
// Reserve space
|
||||
dst.reserve(4);
|
||||
dst.put_u32(item);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Decoder for U32 {
|
||||
type Item = u32;
|
||||
type Error = io::Error;
|
||||
|
||||
fn decode(&mut self, buf: &mut BytesMut) -> io::Result<Option<u32>> {
|
||||
if buf.len() < 4 {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let n = buf.split_to(4).get_u32();
|
||||
Ok(Some(n))
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_write_hits_highwater_mark() {
|
||||
// see here for what this test is based on:
|
||||
// https://github.com/tokio-rs/tokio/blob/75c07770bfbfea4e5fd914af819c741ed9c3fc36/tokio-util/tests/framed_write.rs#L69
|
||||
|
||||
const ITER: usize = 2 * 1024;
|
||||
|
||||
let mut bi = bilateral! {
|
||||
Err(io::Error::new(io::ErrorKind::WouldBlock, "not ready")),
|
||||
Ok(b"".to_vec()),
|
||||
};
|
||||
|
||||
for i in 0..=ITER {
|
||||
let mut b = BytesMut::with_capacity(4);
|
||||
b.put_u32(i as u32);
|
||||
|
||||
// Append to the end
|
||||
match bi.calls.back_mut().unwrap() {
|
||||
Ok(ref mut data) => {
|
||||
// Write in 2kb chunks
|
||||
if data.len() < ITER {
|
||||
data.extend_from_slice(&b[..]);
|
||||
continue;
|
||||
} // else fall through and create a new buffer
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
|
||||
// Push a new new chunk
|
||||
bi.calls.push_back(Ok(b[..].to_vec()));
|
||||
}
|
||||
|
||||
assert_eq!(bi.calls.len(), 6);
|
||||
let mut framed = Framed::new(bi, U32);
|
||||
// Send 8KB. This fills up FramedWrite2 buffer
|
||||
let mut task = task::spawn(());
|
||||
task.enter(|cx, _| {
|
||||
// Send 8KB. This fills up Framed buffer
|
||||
for i in 0..ITER {
|
||||
{
|
||||
#[allow(unused_mut)]
|
||||
let mut framed = Pin::new(&mut framed);
|
||||
assert!(assert_ready!(framed.poll_ready(cx)).is_ok());
|
||||
}
|
||||
|
||||
#[allow(unused_mut)]
|
||||
let mut framed = Pin::new(&mut framed);
|
||||
// write the buffer
|
||||
assert!(framed.start_send(i as u32).is_ok());
|
||||
}
|
||||
|
||||
{
|
||||
#[allow(unused_mut)]
|
||||
let mut framed = Pin::new(&mut framed);
|
||||
|
||||
// Now we poll_ready which forces a flush. The bilateral pops the front message
|
||||
// and decides to block.
|
||||
assert!(framed.poll_ready(cx).is_pending());
|
||||
}
|
||||
|
||||
{
|
||||
#[allow(unused_mut)]
|
||||
let mut framed = Pin::new(&mut framed);
|
||||
// We poll again, forcing another flush, which this time succeeds
|
||||
// The whole 8KB buffer is flushed
|
||||
assert!(assert_ready!(framed.poll_ready(cx)).is_ok());
|
||||
}
|
||||
|
||||
{
|
||||
#[allow(unused_mut)]
|
||||
let mut framed = Pin::new(&mut framed);
|
||||
// Send more data. This matches the final message expected by the bilateral
|
||||
assert!(framed.start_send(ITER as u32).is_ok());
|
||||
}
|
||||
|
||||
{
|
||||
#[allow(unused_mut)]
|
||||
let mut framed = Pin::new(&mut framed);
|
||||
// Flush the rest of the buffer
|
||||
assert!(assert_ready!(framed.poll_flush(cx)).is_ok());
|
||||
}
|
||||
|
||||
// Ensure the mock is empty
|
||||
assert_eq!(0, Pin::new(&framed).get_ref().io_ref().calls.len());
|
||||
});
|
||||
}
|
@ -1,14 +0,0 @@
|
||||
# Changes
|
||||
|
||||
## [0.1.1] - 2019-03-15
|
||||
|
||||
### Fixed
|
||||
|
||||
* Fix error handling for single address
|
||||
|
||||
|
||||
## [0.1.0] - 2019-03-14
|
||||
|
||||
* Refactor resolver and connector services
|
||||
|
||||
* Rename crate
|
@ -1,55 +0,0 @@
|
||||
[package]
|
||||
name = "actix-connect"
|
||||
version = "0.1.1"
|
||||
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
|
||||
description = "Actix Connector - tcp connector service"
|
||||
keywords = ["network", "framework", "async", "futures"]
|
||||
homepage = "https://actix.rs"
|
||||
repository = "https://github.com/actix/actix-net.git"
|
||||
documentation = "https://docs.rs/actix-connect/"
|
||||
categories = ["network-programming", "asynchronous"]
|
||||
license = "MIT/Apache-2.0"
|
||||
exclude = [".gitignore", ".travis.yml", ".cargo/config", "appveyor.yml"]
|
||||
edition = "2018"
|
||||
workspace = ".."
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
features = ["ssl", "uri"]
|
||||
|
||||
[lib]
|
||||
name = "actix_connect"
|
||||
path = "src/lib.rs"
|
||||
|
||||
[features]
|
||||
default = ["uri"]
|
||||
|
||||
# openssl
|
||||
ssl = ["openssl", "tokio-openssl"]
|
||||
|
||||
# support http::Uri as connect address
|
||||
uri = ["http"]
|
||||
|
||||
[dependencies]
|
||||
actix-service = "0.3.4"
|
||||
actix-codec = "0.1.1"
|
||||
actix-utils = "0.3.4"
|
||||
derive_more = "0.14.0"
|
||||
either = "1.5.1"
|
||||
futures = "0.1.25"
|
||||
http = { version = "0.1.16", optional = true }
|
||||
log = "0.4"
|
||||
tokio-tcp = "0.1.3"
|
||||
tokio-current-thread = "0.1.5"
|
||||
trust-dns-resolver = { version="0.11.0-alpha.2", default-features = false }
|
||||
|
||||
# openssl
|
||||
openssl = { version="0.10", optional = true }
|
||||
tokio-openssl = { version="0.3", optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
bytes = "0.4"
|
||||
actix-connect = { path=".", features=["ssl"] }
|
||||
actix-test-server = { version="0.2.0", features=["ssl"] }
|
||||
actix-server-config = "0.1.0"
|
||||
actix-utils = "0.3.4"
|
||||
tokio-tcp = "0.1"
|
@ -1,172 +0,0 @@
|
||||
use std::collections::VecDeque;
|
||||
use std::fmt;
|
||||
use std::net::SocketAddr;
|
||||
|
||||
use either::Either;
|
||||
|
||||
/// Connect request
|
||||
pub trait Address {
|
||||
/// Host name of the request
|
||||
fn host(&self) -> &str;
|
||||
|
||||
/// Port of the request
|
||||
fn port(&self) -> Option<u16>;
|
||||
}
|
||||
|
||||
impl Address for String {
|
||||
fn host(&self) -> &str {
|
||||
&self
|
||||
}
|
||||
|
||||
fn port(&self) -> Option<u16> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
impl Address for &'static str {
|
||||
fn host(&self) -> &str {
|
||||
self
|
||||
}
|
||||
|
||||
fn port(&self) -> Option<u16> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Connect request
|
||||
#[derive(Eq, PartialEq, Debug, Hash)]
|
||||
pub struct Connect<T> {
|
||||
pub(crate) req: T,
|
||||
pub(crate) port: u16,
|
||||
pub(crate) addr: Option<Either<SocketAddr, VecDeque<SocketAddr>>>,
|
||||
}
|
||||
|
||||
impl<T: Address> Connect<T> {
|
||||
/// Create `Connect` instance by spliting the string by ':' and convert the second part to u16
|
||||
pub fn new(req: T) -> Connect<T> {
|
||||
let (_, port) = parse(req.host());
|
||||
Connect {
|
||||
req,
|
||||
port: port.unwrap_or(0),
|
||||
addr: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create new `Connect` instance from host and address. Connector skips name resolution stage for such connect messages.
|
||||
pub fn with(req: T, addr: SocketAddr) -> Connect<T> {
|
||||
Connect {
|
||||
req,
|
||||
port: 0,
|
||||
addr: Some(Either::Left(addr)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Use port if address does not provide one.
|
||||
///
|
||||
/// By default it set to 0
|
||||
pub fn set_port(mut self, port: u16) -> Self {
|
||||
self.port = port;
|
||||
self
|
||||
}
|
||||
|
||||
/// Host name
|
||||
pub fn host(&self) -> &str {
|
||||
self.req.host()
|
||||
}
|
||||
|
||||
/// Port of the request
|
||||
pub fn port(&self) -> u16 {
|
||||
self.req.port().unwrap_or(self.port)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Address> From<T> for Connect<T> {
|
||||
fn from(addr: T) -> Self {
|
||||
Connect::new(addr)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Address> fmt::Display for Connect<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{}:{}", self.host(), self.port())
|
||||
}
|
||||
}
|
||||
|
||||
fn parse(host: &str) -> (&str, Option<u16>) {
|
||||
let mut parts_iter = host.splitn(2, ':');
|
||||
if let Some(host) = parts_iter.next() {
|
||||
let port_str = parts_iter.next().unwrap_or("");
|
||||
if let Ok(port) = port_str.parse::<u16>() {
|
||||
(host, Some(port))
|
||||
} else {
|
||||
(host, None)
|
||||
}
|
||||
} else {
|
||||
(host, None)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Connection<T, U> {
|
||||
io: U,
|
||||
req: T,
|
||||
}
|
||||
|
||||
impl<T, U> Connection<T, U> {
|
||||
pub fn new(io: U, req: T) -> Self {
|
||||
Self { io, req }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, U> Connection<T, U> {
|
||||
/// Reconstruct from a parts.
|
||||
pub fn from_parts(io: U, req: T) -> Self {
|
||||
Self { io, req }
|
||||
}
|
||||
|
||||
/// Deconstruct into a parts.
|
||||
pub fn into_parts(self) -> (U, T) {
|
||||
(self.io, self.req)
|
||||
}
|
||||
|
||||
/// Replace inclosed object, return new Stream and old object
|
||||
pub fn replace<Y>(self, io: Y) -> (U, Connection<T, Y>) {
|
||||
(self.io, Connection { io, req: self.req })
|
||||
}
|
||||
|
||||
/// Returns a shared reference to the underlying stream.
|
||||
pub fn get_ref(&self) -> &U {
|
||||
&self.io
|
||||
}
|
||||
|
||||
/// Returns a mutable reference to the underlying stream.
|
||||
pub fn get_mut(&mut self) -> &mut U {
|
||||
&mut self.io
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Address, U> Connection<T, U> {
|
||||
/// Get request
|
||||
pub fn host(&self) -> &str {
|
||||
&self.req.host()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, U> std::ops::Deref for Connection<T, U> {
|
||||
type Target = U;
|
||||
|
||||
fn deref(&self) -> &U {
|
||||
&self.io
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, U> std::ops::DerefMut for Connection<T, U> {
|
||||
fn deref_mut(&mut self) -> &mut U {
|
||||
&mut self.io
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, U: fmt::Debug> fmt::Debug for Connection<T, U> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "Stream {{{:?}}}", self.io)
|
||||
}
|
||||
}
|
@ -1,156 +0,0 @@
|
||||
use std::collections::VecDeque;
|
||||
use std::marker::PhantomData;
|
||||
use std::net::SocketAddr;
|
||||
|
||||
use actix_service::{NewService, Service};
|
||||
use futures::future::{err, ok, Either, FutureResult};
|
||||
use futures::{Async, Future, Poll};
|
||||
use tokio_tcp::{ConnectFuture, TcpStream};
|
||||
|
||||
use super::connect::{Address, Connect, Connection};
|
||||
use super::error::ConnectError;
|
||||
|
||||
/// Tcp connector service factory
|
||||
#[derive(Debug)]
|
||||
pub struct ConnectorFactory<T>(PhantomData<T>);
|
||||
|
||||
impl<T> ConnectorFactory<T> {
|
||||
pub fn new() -> Self {
|
||||
ConnectorFactory(PhantomData)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Clone for ConnectorFactory<T> {
|
||||
fn clone(&self) -> Self {
|
||||
ConnectorFactory(PhantomData)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Address> NewService for ConnectorFactory<T> {
|
||||
type Request = Connect<T>;
|
||||
type Response = Connection<T, TcpStream>;
|
||||
type Error = ConnectError;
|
||||
type Service = Connector<T>;
|
||||
type InitError = ();
|
||||
type Future = FutureResult<Self::Service, Self::InitError>;
|
||||
|
||||
fn new_service(&self, _: &()) -> Self::Future {
|
||||
ok(Connector(PhantomData))
|
||||
}
|
||||
}
|
||||
|
||||
/// Tcp connector service
|
||||
#[derive(Debug)]
|
||||
pub struct Connector<T>(PhantomData<T>);
|
||||
|
||||
impl<T> Connector<T> {
|
||||
pub fn new() -> Self {
|
||||
Connector(PhantomData)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Clone for Connector<T> {
|
||||
fn clone(&self) -> Self {
|
||||
Connector(PhantomData)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Address> Service for Connector<T> {
|
||||
type Request = Connect<T>;
|
||||
type Response = Connection<T, TcpStream>;
|
||||
type Error = ConnectError;
|
||||
type Future = Either<ConnectorResponse<T>, FutureResult<Self::Response, Self::Error>>;
|
||||
|
||||
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
|
||||
Ok(Async::Ready(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, req: Connect<T>) -> Self::Future {
|
||||
let port = req.port();
|
||||
let Connect { req, addr, .. } = req;
|
||||
|
||||
if let Some(addr) = addr {
|
||||
Either::A(ConnectorResponse::new(req, port, addr))
|
||||
} else {
|
||||
error!("TCP connector: got unresolved address");
|
||||
Either::B(err(ConnectError::Unresolverd))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
/// Tcp stream connector response future
|
||||
pub struct ConnectorResponse<T> {
|
||||
req: Option<T>,
|
||||
port: u16,
|
||||
addrs: Option<VecDeque<SocketAddr>>,
|
||||
stream: Option<ConnectFuture>,
|
||||
}
|
||||
|
||||
impl<T: Address> ConnectorResponse<T> {
|
||||
pub fn new(
|
||||
req: T,
|
||||
port: u16,
|
||||
addr: either::Either<SocketAddr, VecDeque<SocketAddr>>,
|
||||
) -> ConnectorResponse<T> {
|
||||
trace!(
|
||||
"TCP connector - connecting to {:?} port:{}",
|
||||
req.host(),
|
||||
port
|
||||
);
|
||||
|
||||
match addr {
|
||||
either::Either::Left(addr) => ConnectorResponse {
|
||||
req: Some(req),
|
||||
port,
|
||||
addrs: None,
|
||||
stream: Some(TcpStream::connect(&addr)),
|
||||
},
|
||||
either::Either::Right(addrs) => ConnectorResponse {
|
||||
req: Some(req),
|
||||
port,
|
||||
addrs: Some(addrs),
|
||||
stream: None,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Address> Future for ConnectorResponse<T> {
|
||||
type Item = Connection<T, TcpStream>;
|
||||
type Error = ConnectError;
|
||||
|
||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
||||
// connect
|
||||
loop {
|
||||
if let Some(new) = self.stream.as_mut() {
|
||||
match new.poll() {
|
||||
Ok(Async::Ready(sock)) => {
|
||||
let req = self.req.take().unwrap();
|
||||
trace!(
|
||||
"TCP connector - successfully connected to connecting to {:?} - {:?}",
|
||||
req.host(), sock.peer_addr()
|
||||
);
|
||||
return Ok(Async::Ready(Connection::new(sock, req)));
|
||||
}
|
||||
Ok(Async::NotReady) => return Ok(Async::NotReady),
|
||||
Err(err) => {
|
||||
trace!(
|
||||
"TCP connector - failed to connect to connecting to {:?} port: {}",
|
||||
self.req.as_ref().unwrap().host(),
|
||||
self.port,
|
||||
);
|
||||
if self.addrs.is_none() || self.addrs.as_ref().unwrap().is_empty() {
|
||||
return Err(err.into());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// try to connect
|
||||
self.stream = Some(TcpStream::connect(
|
||||
&self.addrs.as_mut().unwrap().pop_front().unwrap(),
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
@ -1,26 +0,0 @@
|
||||
use std::io;
|
||||
|
||||
use derive_more::{Display, From};
|
||||
use trust_dns_resolver::error::ResolveError;
|
||||
|
||||
#[derive(Debug, From, Display)]
|
||||
pub enum ConnectError {
|
||||
/// Failed to resolve the hostname
|
||||
#[display(fmt = "Failed resolving hostname: {}", _0)]
|
||||
Resolver(ResolveError),
|
||||
|
||||
/// No dns records
|
||||
#[display(fmt = "No dns records found for the input")]
|
||||
NoRecords,
|
||||
|
||||
/// Invalid input
|
||||
InvalidInput,
|
||||
|
||||
/// Unresolved host name
|
||||
#[display(fmt = "Connector received `Connect` method with unresolved host")]
|
||||
Unresolverd,
|
||||
|
||||
/// Connection io error
|
||||
#[display(fmt = "{}", _0)]
|
||||
Io(io::Error),
|
||||
}
|
@ -1,85 +0,0 @@
|
||||
//! Actix connect - tcp connector service
|
||||
//!
|
||||
//! ## Package feature
|
||||
//!
|
||||
//! * `ssl` - enables ssl support via `openssl` crate
|
||||
//! * `rust-tls` - enables ssl support via `rustls` crate
|
||||
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
|
||||
mod connect;
|
||||
mod connector;
|
||||
mod error;
|
||||
mod resolver;
|
||||
pub mod ssl;
|
||||
|
||||
#[cfg(feature = "uri")]
|
||||
mod uri;
|
||||
|
||||
pub use trust_dns_resolver::{error::ResolveError, AsyncResolver};
|
||||
|
||||
pub use self::connect::{Address, Connect, Connection};
|
||||
pub use self::connector::{Connector, ConnectorFactory};
|
||||
pub use self::error::ConnectError;
|
||||
pub use self::resolver::{Resolver, ResolverFactory};
|
||||
|
||||
use actix_service::{NewService, Service, ServiceExt};
|
||||
use tokio_tcp::TcpStream;
|
||||
use trust_dns_resolver::config::{ResolverConfig, ResolverOpts};
|
||||
use trust_dns_resolver::system_conf::read_system_conf;
|
||||
|
||||
pub fn start_resolver(cfg: ResolverConfig, opts: ResolverOpts) -> AsyncResolver {
|
||||
let (resolver, bg) = AsyncResolver::new(cfg, opts);
|
||||
tokio_current_thread::spawn(bg);
|
||||
resolver
|
||||
}
|
||||
|
||||
pub fn start_default_resolver() -> AsyncResolver {
|
||||
let (cfg, opts) = if let Ok((cfg, opts)) = read_system_conf() {
|
||||
(cfg, opts)
|
||||
} else {
|
||||
(ResolverConfig::default(), ResolverOpts::default())
|
||||
};
|
||||
|
||||
let (resolver, bg) = AsyncResolver::new(cfg, opts);
|
||||
tokio_current_thread::spawn(bg);
|
||||
resolver
|
||||
}
|
||||
|
||||
/// Create tcp connector service
|
||||
pub fn new_connector<T: Address>(
|
||||
resolver: AsyncResolver,
|
||||
) -> impl Service<Request = Connect<T>, Response = Connection<T, TcpStream>, Error = ConnectError>
|
||||
+ Clone {
|
||||
Resolver::new(resolver).and_then(Connector::new())
|
||||
}
|
||||
|
||||
/// Create tcp connector service
|
||||
pub fn new_connector_factory<T: Address>(
|
||||
resolver: AsyncResolver,
|
||||
) -> impl NewService<
|
||||
Request = Connect<T>,
|
||||
Response = Connection<T, TcpStream>,
|
||||
Error = ConnectError,
|
||||
InitError = (),
|
||||
> + Clone {
|
||||
ResolverFactory::new(resolver).and_then(ConnectorFactory::new())
|
||||
}
|
||||
|
||||
/// Create connector service with default parameters
|
||||
pub fn default_connector<T: Address>(
|
||||
) -> impl Service<Request = Connect<T>, Response = Connection<T, TcpStream>, Error = ConnectError>
|
||||
+ Clone {
|
||||
Resolver::new(start_default_resolver()).and_then(Connector::new())
|
||||
}
|
||||
|
||||
/// Create connector service factory with default parameters
|
||||
pub fn default_connector_factory<T: Address>() -> impl NewService<
|
||||
Request = Connect<T>,
|
||||
Response = Connection<T, TcpStream>,
|
||||
Error = ConnectError,
|
||||
InitError = (),
|
||||
> + Clone {
|
||||
ResolverFactory::new(start_default_resolver()).and_then(ConnectorFactory::new())
|
||||
}
|
@ -1,168 +0,0 @@
|
||||
use std::collections::VecDeque;
|
||||
use std::marker::PhantomData;
|
||||
use std::net::SocketAddr;
|
||||
|
||||
use actix_service::{NewService, Service};
|
||||
use futures::future::{ok, Either, FutureResult};
|
||||
use futures::{Async, Future, Poll};
|
||||
use trust_dns_resolver::lookup_ip::LookupIpFuture;
|
||||
use trust_dns_resolver::{AsyncResolver, Background};
|
||||
|
||||
use crate::connect::{Address, Connect};
|
||||
use crate::error::ConnectError;
|
||||
|
||||
/// DNS Resolver Service factory
|
||||
pub struct ResolverFactory<T> {
|
||||
resolver: AsyncResolver,
|
||||
_t: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<T> ResolverFactory<T> {
|
||||
/// Create new resolver instance with custom configuration and options.
|
||||
pub fn new(resolver: AsyncResolver) -> Self {
|
||||
ResolverFactory {
|
||||
resolver,
|
||||
_t: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn resolver(&self) -> &AsyncResolver {
|
||||
&self.resolver
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Clone for ResolverFactory<T> {
|
||||
fn clone(&self) -> Self {
|
||||
ResolverFactory {
|
||||
resolver: self.resolver.clone(),
|
||||
_t: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Address> NewService for ResolverFactory<T> {
|
||||
type Request = Connect<T>;
|
||||
type Response = Connect<T>;
|
||||
type Error = ConnectError;
|
||||
type Service = Resolver<T>;
|
||||
type InitError = ();
|
||||
type Future = FutureResult<Self::Service, Self::InitError>;
|
||||
|
||||
fn new_service(&self, _: &()) -> Self::Future {
|
||||
ok(Resolver {
|
||||
resolver: self.resolver.clone(),
|
||||
_t: PhantomData,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// DNS Resolver Service
|
||||
pub struct Resolver<T> {
|
||||
resolver: AsyncResolver,
|
||||
_t: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<T> Resolver<T> {
|
||||
/// Create new resolver instance with custom configuration and options.
|
||||
pub fn new(resolver: AsyncResolver) -> Self {
|
||||
Resolver {
|
||||
resolver,
|
||||
_t: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Clone for Resolver<T> {
|
||||
fn clone(&self) -> Self {
|
||||
Resolver {
|
||||
resolver: self.resolver.clone(),
|
||||
_t: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Address> Service for Resolver<T> {
|
||||
type Request = Connect<T>;
|
||||
type Response = Connect<T>;
|
||||
type Error = ConnectError;
|
||||
type Future = Either<ResolverFuture<T>, FutureResult<Connect<T>, Self::Error>>;
|
||||
|
||||
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
|
||||
Ok(Async::Ready(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, mut req: Connect<T>) -> Self::Future {
|
||||
if req.addr.is_some() {
|
||||
Either::B(ok(req))
|
||||
} else {
|
||||
if let Ok(ip) = req.host().parse() {
|
||||
req.addr = Some(either::Either::Left(SocketAddr::new(ip, req.port())));
|
||||
Either::B(ok(req))
|
||||
} else {
|
||||
trace!("DNS resolver: resolving host {:?}", req.host());
|
||||
Either::A(ResolverFuture::new(req, &self.resolver))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
/// Resolver future
|
||||
pub struct ResolverFuture<T: Address> {
|
||||
req: Option<Connect<T>>,
|
||||
lookup: Background<LookupIpFuture>,
|
||||
}
|
||||
|
||||
impl<T: Address> ResolverFuture<T> {
|
||||
pub fn new(req: Connect<T>, resolver: &AsyncResolver) -> Self {
|
||||
let lookup = if let Some(host) = req.host().splitn(2, ':').next() {
|
||||
resolver.lookup_ip(host)
|
||||
} else {
|
||||
resolver.lookup_ip(req.host())
|
||||
};
|
||||
|
||||
ResolverFuture {
|
||||
lookup,
|
||||
req: Some(req),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Address> Future for ResolverFuture<T> {
|
||||
type Item = Connect<T>;
|
||||
type Error = ConnectError;
|
||||
|
||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
||||
match self.lookup.poll().map_err(|e| {
|
||||
trace!(
|
||||
"DNS resolver: failed to resolve host {:?} err: {}",
|
||||
self.req.as_ref().unwrap().host(),
|
||||
e
|
||||
);
|
||||
e
|
||||
})? {
|
||||
Async::NotReady => Ok(Async::NotReady),
|
||||
Async::Ready(ips) => {
|
||||
let mut req = self.req.take().unwrap();
|
||||
let mut addrs: VecDeque<_> = ips
|
||||
.iter()
|
||||
.map(|ip| SocketAddr::new(ip, req.port()))
|
||||
.collect();
|
||||
trace!(
|
||||
"DNS resolver: host {:?} resolved to {:?}",
|
||||
req.host(),
|
||||
addrs
|
||||
);
|
||||
if addrs.is_empty() {
|
||||
Err(ConnectError::NoRecords)
|
||||
} else if addrs.len() == 1 {
|
||||
req.addr = Some(either::Either::Left(addrs.pop_front().unwrap()));
|
||||
Ok(Async::Ready(req))
|
||||
} else {
|
||||
req.addr = Some(either::Either::Right(addrs));
|
||||
Ok(Async::Ready(req))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -1,6 +0,0 @@
|
||||
//! SSL Services
|
||||
|
||||
#[cfg(feature = "ssl")]
|
||||
mod openssl;
|
||||
#[cfg(feature = "ssl")]
|
||||
pub use self::openssl::OpensslConnector;
|
@ -1,127 +0,0 @@
|
||||
use std::fmt;
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use actix_codec::{AsyncRead, AsyncWrite};
|
||||
use actix_service::{NewService, Service};
|
||||
use futures::{future::ok, future::FutureResult, Async, Future, Poll};
|
||||
use openssl::ssl::{HandshakeError, SslConnector};
|
||||
use tokio_openssl::{ConnectAsync, SslConnectorExt, SslStream};
|
||||
|
||||
use crate::{Address, Connection};
|
||||
|
||||
/// Openssl connector factory
|
||||
pub struct OpensslConnector<T, U> {
|
||||
connector: SslConnector,
|
||||
_t: PhantomData<(T, U)>,
|
||||
}
|
||||
|
||||
impl<T, U> OpensslConnector<T, U> {
|
||||
pub fn new(connector: SslConnector) -> Self {
|
||||
OpensslConnector {
|
||||
connector,
|
||||
_t: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, U> OpensslConnector<T, U>
|
||||
where
|
||||
T: Address,
|
||||
U: AsyncRead + AsyncWrite + fmt::Debug,
|
||||
{
|
||||
pub fn service(
|
||||
connector: SslConnector,
|
||||
) -> impl Service<
|
||||
Request = Connection<T, U>,
|
||||
Response = Connection<T, SslStream<U>>,
|
||||
Error = HandshakeError<U>,
|
||||
> {
|
||||
OpensslConnectorService {
|
||||
connector: connector,
|
||||
_t: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, U> Clone for OpensslConnector<T, U> {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
connector: self.connector.clone(),
|
||||
_t: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Address, U> NewService<()> for OpensslConnector<T, U>
|
||||
where
|
||||
U: AsyncRead + AsyncWrite + fmt::Debug,
|
||||
{
|
||||
type Request = Connection<T, U>;
|
||||
type Response = Connection<T, SslStream<U>>;
|
||||
type Error = HandshakeError<U>;
|
||||
type Service = OpensslConnectorService<T, U>;
|
||||
type InitError = ();
|
||||
type Future = FutureResult<Self::Service, Self::InitError>;
|
||||
|
||||
fn new_service(&self, _: &()) -> Self::Future {
|
||||
ok(OpensslConnectorService {
|
||||
connector: self.connector.clone(),
|
||||
_t: PhantomData,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub struct OpensslConnectorService<T, U> {
|
||||
connector: SslConnector,
|
||||
_t: PhantomData<(T, U)>,
|
||||
}
|
||||
|
||||
impl<T: Address, U> Service for OpensslConnectorService<T, U>
|
||||
where
|
||||
U: AsyncRead + AsyncWrite + fmt::Debug,
|
||||
{
|
||||
type Request = Connection<T, U>;
|
||||
type Response = Connection<T, SslStream<U>>;
|
||||
type Error = HandshakeError<U>;
|
||||
type Future = ConnectAsyncExt<T, U>;
|
||||
|
||||
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
|
||||
Ok(Async::Ready(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, stream: Connection<T, U>) -> Self::Future {
|
||||
trace!("SSL Handshake start for: {:?}", stream.host());
|
||||
let (io, stream) = stream.replace(());
|
||||
ConnectAsyncExt {
|
||||
fut: SslConnectorExt::connect_async(&self.connector, stream.host(), io),
|
||||
stream: Some(stream),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ConnectAsyncExt<T, U> {
|
||||
fut: ConnectAsync<U>,
|
||||
stream: Option<Connection<T, ()>>,
|
||||
}
|
||||
|
||||
impl<T: Address, U> Future for ConnectAsyncExt<T, U>
|
||||
where
|
||||
U: AsyncRead + AsyncWrite + fmt::Debug,
|
||||
{
|
||||
type Item = Connection<T, SslStream<U>>;
|
||||
type Error = HandshakeError<U>;
|
||||
|
||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
||||
match self.fut.poll().map_err(|e| {
|
||||
trace!("SSL Handshake error: {:?}", e);
|
||||
e
|
||||
})? {
|
||||
Async::Ready(stream) => {
|
||||
let s = self.stream.take().unwrap();
|
||||
trace!("SSL Handshake success: {:?}", s.host());
|
||||
Ok(Async::Ready(s.replace(stream).1))
|
||||
}
|
||||
Async::NotReady => Ok(Async::NotReady),
|
||||
}
|
||||
}
|
||||
}
|
@ -1,37 +0,0 @@
|
||||
use http::Uri;
|
||||
|
||||
use crate::Address;
|
||||
|
||||
impl Address for Uri {
|
||||
fn host(&self) -> &str {
|
||||
self.host().unwrap_or("")
|
||||
}
|
||||
|
||||
fn port(&self) -> Option<u16> {
|
||||
if let Some(port) = self.port_u16() {
|
||||
Some(port)
|
||||
} else {
|
||||
port(self.scheme_str())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: load data from file
|
||||
fn port(scheme: Option<&str>) -> Option<u16> {
|
||||
if let Some(scheme) = scheme {
|
||||
match scheme {
|
||||
"http" => Some(80),
|
||||
"https" => Some(443),
|
||||
"ws" => Some(80),
|
||||
"wss" => Some(443),
|
||||
"amqp" => Some(5672),
|
||||
"amqps" => Some(5671),
|
||||
"sb" => Some(5671),
|
||||
"mqtt" => Some(1883),
|
||||
"mqtts" => Some(8883),
|
||||
_ => None,
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
@ -1,111 +0,0 @@
|
||||
use actix_codec::{BytesCodec, Framed};
|
||||
use actix_server_config::Io;
|
||||
use actix_service::{fn_service, NewService, Service};
|
||||
use actix_test_server::TestServer;
|
||||
use bytes::Bytes;
|
||||
use futures::{future::lazy, Future, Sink};
|
||||
use http::{HttpTryFrom, Uri};
|
||||
use trust_dns_resolver::config::{ResolverConfig, ResolverOpts};
|
||||
|
||||
use actix_connect::{default_connector, Connect};
|
||||
|
||||
#[test]
|
||||
fn test_string() {
|
||||
let mut srv = TestServer::with(|| {
|
||||
fn_service(|io: Io<tokio_tcp::TcpStream>| {
|
||||
Framed::new(io.into_parts().0, BytesCodec)
|
||||
.send(Bytes::from_static(b"test"))
|
||||
.then(|_| Ok::<_, ()>(()))
|
||||
})
|
||||
});
|
||||
|
||||
let mut conn = srv
|
||||
.block_on(lazy(|| Ok::<_, ()>(default_connector())))
|
||||
.unwrap();
|
||||
let addr = format!("localhost:{}", srv.port());
|
||||
let con = srv.block_on(conn.call(addr.into())).unwrap();
|
||||
assert_eq!(con.peer_addr().unwrap(), srv.addr());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_static_str() {
|
||||
let mut srv = TestServer::with(|| {
|
||||
fn_service(|io: Io<tokio_tcp::TcpStream>| {
|
||||
Framed::new(io.into_parts().0, BytesCodec)
|
||||
.send(Bytes::from_static(b"test"))
|
||||
.then(|_| Ok::<_, ()>(()))
|
||||
})
|
||||
});
|
||||
|
||||
let resolver = srv
|
||||
.block_on(lazy(
|
||||
|| Ok::<_, ()>(actix_connect::start_default_resolver()),
|
||||
))
|
||||
.unwrap();
|
||||
let mut conn = srv
|
||||
.block_on(lazy(|| {
|
||||
Ok::<_, ()>(actix_connect::new_connector(resolver.clone()))
|
||||
}))
|
||||
.unwrap();
|
||||
|
||||
let con = srv
|
||||
.block_on(conn.call(Connect::with("10", srv.addr())))
|
||||
.unwrap();
|
||||
assert_eq!(con.peer_addr().unwrap(), srv.addr());
|
||||
|
||||
let connect = Connect::new(srv.host().to_owned());
|
||||
let mut conn = srv
|
||||
.block_on(lazy(|| Ok::<_, ()>(actix_connect::new_connector(resolver))))
|
||||
.unwrap();
|
||||
let con = srv.block_on(conn.call(connect));
|
||||
assert!(con.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_new_service() {
|
||||
let mut srv = TestServer::with(|| {
|
||||
fn_service(|io: Io<tokio_tcp::TcpStream>| {
|
||||
Framed::new(io.into_parts().0, BytesCodec)
|
||||
.send(Bytes::from_static(b"test"))
|
||||
.then(|_| Ok::<_, ()>(()))
|
||||
})
|
||||
});
|
||||
|
||||
let resolver = srv
|
||||
.block_on(lazy(|| {
|
||||
Ok::<_, ()>(actix_connect::start_resolver(
|
||||
ResolverConfig::default(),
|
||||
ResolverOpts::default(),
|
||||
))
|
||||
}))
|
||||
.unwrap();
|
||||
let factory = srv
|
||||
.block_on(lazy(|| {
|
||||
Ok::<_, ()>(actix_connect::new_connector_factory(resolver))
|
||||
}))
|
||||
.unwrap();
|
||||
|
||||
let mut conn = srv.block_on(factory.new_service(&())).unwrap();
|
||||
let con = srv
|
||||
.block_on(conn.call(Connect::with("10", srv.addr())))
|
||||
.unwrap();
|
||||
assert_eq!(con.peer_addr().unwrap(), srv.addr());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_uri() {
|
||||
let mut srv = TestServer::with(|| {
|
||||
fn_service(|io: Io<tokio_tcp::TcpStream>| {
|
||||
Framed::new(io.into_parts().0, BytesCodec)
|
||||
.send(Bytes::from_static(b"test"))
|
||||
.then(|_| Ok::<_, ()>(()))
|
||||
})
|
||||
});
|
||||
|
||||
let mut conn = srv
|
||||
.block_on(lazy(|| Ok::<_, ()>(default_connector())))
|
||||
.unwrap();
|
||||
let addr = Uri::try_from(format!("https://localhost:{}", srv.port())).unwrap();
|
||||
let con = srv.block_on(conn.call(addr.into())).unwrap();
|
||||
assert_eq!(con.peer_addr().unwrap(), srv.addr());
|
||||
}
|
1
actix-macros/.gitignore
vendored
Normal file
1
actix-macros/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
/wip
|
53
actix-macros/CHANGES.md
Normal file
53
actix-macros/CHANGES.md
Normal file
@ -0,0 +1,53 @@
|
||||
# Changes
|
||||
|
||||
## Unreleased
|
||||
|
||||
- Minimum supported Rust version (MSRV) is now 1.71.
|
||||
|
||||
## 0.2.4
|
||||
|
||||
- Update `syn` dependency to `2`.
|
||||
- Minimum supported Rust version (MSRV) is now 1.65.
|
||||
|
||||
## 0.2.3
|
||||
|
||||
- Fix test macro in presence of other imports named "test". [#399]
|
||||
|
||||
[#399]: https://github.com/actix/actix-net/pull/399
|
||||
|
||||
## 0.2.2
|
||||
|
||||
- Improve error recovery potential when macro input is invalid. [#391]
|
||||
- Allow custom `System`s on test macro. [#391]
|
||||
|
||||
[#391]: https://github.com/actix/actix-net/pull/391
|
||||
|
||||
## 0.2.1
|
||||
|
||||
- Add optional argument `system` to `main` macro which can be used to specify the path to `actix_rt::System` (useful for re-exports). [#363]
|
||||
|
||||
[#363]: https://github.com/actix/actix-net/pull/363
|
||||
|
||||
## 0.2.0
|
||||
|
||||
- Update to latest `actix_rt::System::new` signature. [#261]
|
||||
|
||||
[#261]: https://github.com/actix/actix-net/pull/261
|
||||
|
||||
## 0.2.0-beta.1
|
||||
|
||||
- Remove `actix-reexport` feature. [#218]
|
||||
|
||||
[#218]: https://github.com/actix/actix-net/pull/218
|
||||
|
||||
## 0.1.3
|
||||
|
||||
- Add `actix-reexport` feature. [#218]
|
||||
|
||||
[#218]: https://github.com/actix/actix-net/pull/218
|
||||
|
||||
## 0.1.2
|
||||
|
||||
- Forward actix_rt::test arguments to test function [#127]
|
||||
|
||||
[#127]: https://github.com/actix/actix-net/pull/127
|
39
actix-macros/Cargo.toml
Normal file
39
actix-macros/Cargo.toml
Normal file
@ -0,0 +1,39 @@
|
||||
[package]
|
||||
name = "actix-macros"
|
||||
version = "0.2.4"
|
||||
authors = [
|
||||
"Nikolay Kim <fafhrd91@gmail.com>",
|
||||
"Ibraheem Ahmed <ibrah1440@gmail.com>",
|
||||
"Rob Ede <robjtede@icloud.com>",
|
||||
]
|
||||
description = "Macros for Actix system and runtime"
|
||||
repository = "https://github.com/actix/actix-net"
|
||||
categories = ["network-programming", "asynchronous"]
|
||||
license.workspace = true
|
||||
edition.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[package.metadata.cargo-machete]
|
||||
ignored = [
|
||||
"proc_macro2", # specified for minimal versions compat
|
||||
]
|
||||
|
||||
[lib]
|
||||
proc-macro = true
|
||||
|
||||
[dependencies]
|
||||
quote = "1"
|
||||
syn = { version = "2", features = ["full"] }
|
||||
|
||||
# minimal versions compat
|
||||
[target.'cfg(any())'.dependencies]
|
||||
proc-macro2 = "1.0.60"
|
||||
|
||||
[dev-dependencies]
|
||||
actix-rt = "2"
|
||||
futures-util = { version = "0.3.17", default-features = false }
|
||||
rustversion-msrv = "0.100"
|
||||
trybuild = "1"
|
||||
|
||||
[lints]
|
||||
workspace = true
|
1
actix-macros/LICENSE-APACHE
Symbolic link
1
actix-macros/LICENSE-APACHE
Symbolic link
@ -0,0 +1 @@
|
||||
../LICENSE-APACHE
|
1
actix-macros/LICENSE-MIT
Symbolic link
1
actix-macros/LICENSE-MIT
Symbolic link
@ -0,0 +1 @@
|
||||
../LICENSE-MIT
|
219
actix-macros/src/lib.rs
Normal file
219
actix-macros/src/lib.rs
Normal file
@ -0,0 +1,219 @@
|
||||
//! Macros for Actix system and runtime.
|
||||
//!
|
||||
//! The [`actix-rt`](https://docs.rs/actix-rt) crate must be available for macro output to compile.
|
||||
//!
|
||||
//! # Entry-point
|
||||
//! See docs for the [`#[main]`](macro@main) macro.
|
||||
//!
|
||||
//! # Tests
|
||||
//! See docs for the [`#[test]`](macro@test) macro.
|
||||
|
||||
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
|
||||
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
|
||||
|
||||
use proc_macro::TokenStream;
|
||||
use quote::quote;
|
||||
use syn::parse::Parser as _;
|
||||
|
||||
type AttributeArgs = syn::punctuated::Punctuated<syn::Meta, syn::Token![,]>;
|
||||
|
||||
/// Marks async entry-point function to be executed by Actix system.
|
||||
///
|
||||
/// # Examples
|
||||
/// ```
|
||||
/// #[actix_rt::main]
|
||||
/// async fn main() {
|
||||
/// println!("Hello world");
|
||||
/// }
|
||||
/// ```
|
||||
#[proc_macro_attribute]
|
||||
pub fn main(args: TokenStream, item: TokenStream) -> TokenStream {
|
||||
let mut input = match syn::parse::<syn::ItemFn>(item.clone()) {
|
||||
Ok(input) => input,
|
||||
// on parse err, make IDEs happy; see fn docs
|
||||
Err(err) => return input_and_compile_error(item, err),
|
||||
};
|
||||
|
||||
let parser = AttributeArgs::parse_terminated;
|
||||
let args = match parser.parse(args.clone()) {
|
||||
Ok(args) => args,
|
||||
Err(err) => return input_and_compile_error(args, err),
|
||||
};
|
||||
|
||||
let attrs = &input.attrs;
|
||||
let vis = &input.vis;
|
||||
let sig = &mut input.sig;
|
||||
let body = &input.block;
|
||||
|
||||
if sig.asyncness.is_none() {
|
||||
return syn::Error::new_spanned(
|
||||
sig.fn_token,
|
||||
"the async keyword is missing from the function declaration",
|
||||
)
|
||||
.to_compile_error()
|
||||
.into();
|
||||
}
|
||||
|
||||
let mut system = syn::parse_str::<syn::Path>("::actix_rt::System").unwrap();
|
||||
|
||||
for arg in &args {
|
||||
match arg {
|
||||
syn::Meta::NameValue(syn::MetaNameValue {
|
||||
path,
|
||||
value:
|
||||
syn::Expr::Lit(syn::ExprLit {
|
||||
lit: syn::Lit::Str(lit),
|
||||
..
|
||||
}),
|
||||
..
|
||||
}) => match path
|
||||
.get_ident()
|
||||
.map(|i| i.to_string().to_lowercase())
|
||||
.as_deref()
|
||||
{
|
||||
Some("system") => match lit.parse() {
|
||||
Ok(path) => system = path,
|
||||
Err(_) => {
|
||||
return syn::Error::new_spanned(lit, "Expected path")
|
||||
.to_compile_error()
|
||||
.into();
|
||||
}
|
||||
},
|
||||
_ => {
|
||||
return syn::Error::new_spanned(arg, "Unknown attribute specified")
|
||||
.to_compile_error()
|
||||
.into();
|
||||
}
|
||||
},
|
||||
|
||||
_ => {
|
||||
return syn::Error::new_spanned(arg, "Unknown attribute specified")
|
||||
.to_compile_error()
|
||||
.into();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sig.asyncness = None;
|
||||
|
||||
(quote! {
|
||||
#(#attrs)*
|
||||
#vis #sig {
|
||||
<#system>::new().block_on(async move { #body })
|
||||
}
|
||||
})
|
||||
.into()
|
||||
}
|
||||
|
||||
/// Marks async test function to be executed in an Actix system.
|
||||
///
|
||||
/// # Examples
|
||||
/// ```
|
||||
/// #[actix_rt::test]
|
||||
/// async fn my_test() {
|
||||
/// assert!(true);
|
||||
/// }
|
||||
/// ```
|
||||
#[proc_macro_attribute]
|
||||
pub fn test(args: TokenStream, item: TokenStream) -> TokenStream {
|
||||
let mut input = match syn::parse::<syn::ItemFn>(item.clone()) {
|
||||
Ok(input) => input,
|
||||
// on parse err, make IDEs happy; see fn docs
|
||||
Err(err) => return input_and_compile_error(item, err),
|
||||
};
|
||||
|
||||
let parser = AttributeArgs::parse_terminated;
|
||||
let args = match parser.parse(args.clone()) {
|
||||
Ok(args) => args,
|
||||
Err(err) => return input_and_compile_error(args, err),
|
||||
};
|
||||
|
||||
let attrs = &input.attrs;
|
||||
let vis = &input.vis;
|
||||
let sig = &mut input.sig;
|
||||
let body = &input.block;
|
||||
let mut has_test_attr = false;
|
||||
|
||||
for attr in attrs {
|
||||
if attr.path().is_ident("test") {
|
||||
has_test_attr = true;
|
||||
}
|
||||
}
|
||||
|
||||
if sig.asyncness.is_none() {
|
||||
return syn::Error::new_spanned(
|
||||
input.sig.fn_token,
|
||||
"the async keyword is missing from the function declaration",
|
||||
)
|
||||
.to_compile_error()
|
||||
.into();
|
||||
}
|
||||
|
||||
sig.asyncness = None;
|
||||
|
||||
let missing_test_attr = if has_test_attr {
|
||||
quote! {}
|
||||
} else {
|
||||
quote! { #[::core::prelude::v1::test] }
|
||||
};
|
||||
|
||||
let mut system = syn::parse_str::<syn::Path>("::actix_rt::System").unwrap();
|
||||
|
||||
for arg in &args {
|
||||
match arg {
|
||||
syn::Meta::NameValue(syn::MetaNameValue {
|
||||
path,
|
||||
value:
|
||||
syn::Expr::Lit(syn::ExprLit {
|
||||
lit: syn::Lit::Str(lit),
|
||||
..
|
||||
}),
|
||||
..
|
||||
}) => match path
|
||||
.get_ident()
|
||||
.map(|i| i.to_string().to_lowercase())
|
||||
.as_deref()
|
||||
{
|
||||
Some("system") => match lit.parse() {
|
||||
Ok(path) => system = path,
|
||||
Err(_) => {
|
||||
return syn::Error::new_spanned(lit, "Expected path")
|
||||
.to_compile_error()
|
||||
.into();
|
||||
}
|
||||
},
|
||||
_ => {
|
||||
return syn::Error::new_spanned(arg, "Unknown attribute specified")
|
||||
.to_compile_error()
|
||||
.into();
|
||||
}
|
||||
},
|
||||
_ => {
|
||||
return syn::Error::new_spanned(arg, "Unknown attribute specified")
|
||||
.to_compile_error()
|
||||
.into();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
(quote! {
|
||||
#missing_test_attr
|
||||
#(#attrs)*
|
||||
#vis #sig {
|
||||
<#system>::new().block_on(async { #body })
|
||||
}
|
||||
})
|
||||
.into()
|
||||
}
|
||||
|
||||
/// Converts the error to a token stream and appends it to the original input.
|
||||
///
|
||||
/// Returning the original input in addition to the error is good for IDEs which can gracefully
|
||||
/// recover and show more precise errors within the macro body.
|
||||
///
|
||||
/// See <https://github.com/rust-analyzer/rust-analyzer/issues/10468> for more info.
|
||||
fn input_and_compile_error(mut item: TokenStream, err: syn::Error) -> TokenStream {
|
||||
let compile_err = TokenStream::from(err.to_compile_error());
|
||||
item.extend(compile_err);
|
||||
item
|
||||
}
|
21
actix-macros/tests/trybuild.rs
Normal file
21
actix-macros/tests/trybuild.rs
Normal file
@ -0,0 +1,21 @@
|
||||
#![allow(missing_docs)]
|
||||
|
||||
#[rustversion_msrv::msrv]
|
||||
#[test]
|
||||
fn compile_macros() {
|
||||
let t = trybuild::TestCases::new();
|
||||
|
||||
t.pass("tests/trybuild/main-01-basic.rs");
|
||||
t.compile_fail("tests/trybuild/main-02-only-async.rs");
|
||||
t.pass("tests/trybuild/main-03-fn-params.rs");
|
||||
t.pass("tests/trybuild/main-04-system-path.rs");
|
||||
t.compile_fail("tests/trybuild/main-05-system-expect-path.rs");
|
||||
t.compile_fail("tests/trybuild/main-06-unknown-attr.rs");
|
||||
|
||||
t.pass("tests/trybuild/test-01-basic.rs");
|
||||
t.pass("tests/trybuild/test-02-keep-attrs.rs");
|
||||
t.compile_fail("tests/trybuild/test-03-only-async.rs");
|
||||
t.pass("tests/trybuild/test-04-system-path.rs");
|
||||
t.compile_fail("tests/trybuild/test-05-system-expect-path.rs");
|
||||
t.compile_fail("tests/trybuild/test-06-unknown-attr.rs");
|
||||
}
|
4
actix-macros/tests/trybuild/main-01-basic.rs
Normal file
4
actix-macros/tests/trybuild/main-01-basic.rs
Normal file
@ -0,0 +1,4 @@
|
||||
#[actix_rt::main]
|
||||
async fn main() {
|
||||
println!("Hello world");
|
||||
}
|
4
actix-macros/tests/trybuild/main-02-only-async.rs
Normal file
4
actix-macros/tests/trybuild/main-02-only-async.rs
Normal file
@ -0,0 +1,4 @@
|
||||
#[actix_rt::main]
|
||||
fn main() {
|
||||
futures_util::future::ready(()).await
|
||||
}
|
11
actix-macros/tests/trybuild/main-02-only-async.stderr
Normal file
11
actix-macros/tests/trybuild/main-02-only-async.stderr
Normal file
@ -0,0 +1,11 @@
|
||||
error: the async keyword is missing from the function declaration
|
||||
--> tests/trybuild/main-02-only-async.rs:2:1
|
||||
|
|
||||
2 | fn main() {
|
||||
| ^^
|
||||
|
||||
error[E0601]: `main` function not found in crate `$CRATE`
|
||||
--> tests/trybuild/main-02-only-async.rs:4:2
|
||||
|
|
||||
4 | }
|
||||
| ^ consider adding a `main` function to `$DIR/tests/trybuild/main-02-only-async.rs`
|
6
actix-macros/tests/trybuild/main-03-fn-params.rs
Normal file
6
actix-macros/tests/trybuild/main-03-fn-params.rs
Normal file
@ -0,0 +1,6 @@
|
||||
#[actix_rt::main]
|
||||
async fn main2(_param: bool) {
|
||||
futures_util::future::ready(()).await
|
||||
}
|
||||
|
||||
fn main() {}
|
8
actix-macros/tests/trybuild/main-04-system-path.rs
Normal file
8
actix-macros/tests/trybuild/main-04-system-path.rs
Normal file
@ -0,0 +1,8 @@
|
||||
mod system {
|
||||
pub use actix_rt::System as MySystem;
|
||||
}
|
||||
|
||||
#[actix_rt::main(system = "system::MySystem")]
|
||||
async fn main() {
|
||||
futures_util::future::ready(()).await
|
||||
}
|
@ -0,0 +1,4 @@
|
||||
#[actix_rt::main(system = "!@#*&")]
|
||||
async fn main2() {}
|
||||
|
||||
fn main() {}
|
@ -0,0 +1,5 @@
|
||||
error: Expected path
|
||||
--> $DIR/main-05-system-expect-path.rs:1:27
|
||||
|
|
||||
1 | #[actix_rt::main(system = "!@#*&")]
|
||||
| ^^^^^^^
|
7
actix-macros/tests/trybuild/main-06-unknown-attr.rs
Normal file
7
actix-macros/tests/trybuild/main-06-unknown-attr.rs
Normal file
@ -0,0 +1,7 @@
|
||||
#[actix_rt::main(foo = "bar")]
|
||||
async fn async_main() {}
|
||||
|
||||
#[actix_rt::main(bar::baz)]
|
||||
async fn async_main2() {}
|
||||
|
||||
fn main() {}
|
11
actix-macros/tests/trybuild/main-06-unknown-attr.stderr
Normal file
11
actix-macros/tests/trybuild/main-06-unknown-attr.stderr
Normal file
@ -0,0 +1,11 @@
|
||||
error: Unknown attribute specified
|
||||
--> $DIR/main-06-unknown-attr.rs:1:18
|
||||
|
|
||||
1 | #[actix_rt::main(foo = "bar")]
|
||||
| ^^^^^^^^^^^
|
||||
|
||||
error: Unknown attribute specified
|
||||
--> $DIR/main-06-unknown-attr.rs:4:18
|
||||
|
|
||||
4 | #[actix_rt::main(bar::baz)]
|
||||
| ^^^^^^^^
|
6
actix-macros/tests/trybuild/test-01-basic.rs
Normal file
6
actix-macros/tests/trybuild/test-01-basic.rs
Normal file
@ -0,0 +1,6 @@
|
||||
#[actix_rt::test]
|
||||
async fn my_test() {
|
||||
assert!(true);
|
||||
}
|
||||
|
||||
fn main() {}
|
7
actix-macros/tests/trybuild/test-02-keep-attrs.rs
Normal file
7
actix-macros/tests/trybuild/test-02-keep-attrs.rs
Normal file
@ -0,0 +1,7 @@
|
||||
#[actix_rt::test]
|
||||
#[should_panic]
|
||||
async fn my_test() {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn main() {}
|
6
actix-macros/tests/trybuild/test-03-only-async.rs
Normal file
6
actix-macros/tests/trybuild/test-03-only-async.rs
Normal file
@ -0,0 +1,6 @@
|
||||
#[actix_rt::test]
|
||||
fn my_test() {
|
||||
futures_util::future::ready(()).await
|
||||
}
|
||||
|
||||
fn main() {}
|
5
actix-macros/tests/trybuild/test-03-only-async.stderr
Normal file
5
actix-macros/tests/trybuild/test-03-only-async.stderr
Normal file
@ -0,0 +1,5 @@
|
||||
error: the async keyword is missing from the function declaration
|
||||
--> $DIR/test-03-only-async.rs:2:1
|
||||
|
|
||||
2 | fn my_test() {
|
||||
| ^^
|
10
actix-macros/tests/trybuild/test-04-system-path.rs
Normal file
10
actix-macros/tests/trybuild/test-04-system-path.rs
Normal file
@ -0,0 +1,10 @@
|
||||
mod system {
|
||||
pub use actix_rt::System as MySystem;
|
||||
}
|
||||
|
||||
#[actix_rt::test(system = "system::MySystem")]
|
||||
async fn my_test() {
|
||||
futures_util::future::ready(()).await
|
||||
}
|
||||
|
||||
fn main() {}
|
@ -0,0 +1,4 @@
|
||||
#[actix_rt::test(system = "!@#*&")]
|
||||
async fn my_test() {}
|
||||
|
||||
fn main() {}
|
@ -0,0 +1,5 @@
|
||||
error: Expected path
|
||||
--> $DIR/test-05-system-expect-path.rs:1:27
|
||||
|
|
||||
1 | #[actix_rt::test(system = "!@#*&")]
|
||||
| ^^^^^^^
|
7
actix-macros/tests/trybuild/test-06-unknown-attr.rs
Normal file
7
actix-macros/tests/trybuild/test-06-unknown-attr.rs
Normal file
@ -0,0 +1,7 @@
|
||||
#[actix_rt::test(foo = "bar")]
|
||||
async fn my_test_1() {}
|
||||
|
||||
#[actix_rt::test(bar::baz)]
|
||||
async fn my_test_2() {}
|
||||
|
||||
fn main() {}
|
11
actix-macros/tests/trybuild/test-06-unknown-attr.stderr
Normal file
11
actix-macros/tests/trybuild/test-06-unknown-attr.stderr
Normal file
@ -0,0 +1,11 @@
|
||||
error: Unknown attribute specified
|
||||
--> $DIR/test-06-unknown-attr.rs:1:18
|
||||
|
|
||||
1 | #[actix_rt::test(foo = "bar")]
|
||||
| ^^^^^^^^^^^
|
||||
|
||||
error: Unknown attribute specified
|
||||
--> $DIR/test-06-unknown-attr.rs:4:18
|
||||
|
|
||||
4 | #[actix_rt::test(bar::baz)]
|
||||
| ^^^^^^^^
|
@ -1,21 +1,174 @@
|
||||
# Changes
|
||||
|
||||
## [0.2.1] - 2019-03-11
|
||||
## Unreleased
|
||||
|
||||
### Added
|
||||
- Minimum supported Rust version (MSRV) is now 1.71.
|
||||
|
||||
* Added `blocking` module
|
||||
## 2.10.0
|
||||
|
||||
* Arbiter::exec_fn - execute fn on the arbiter's thread
|
||||
- Relax `F`'s bound (`Fn => FnOnce`) on `{Arbiter, System}::with_tokio_rt()` functions.
|
||||
- Update `tokio-uring` dependency to `0.5`.
|
||||
- Minimum supported Rust version (MSRV) is now 1.70.
|
||||
|
||||
* Arbiter::exec - execute fn on the arbiter's thread and wait result
|
||||
## 2.9.0
|
||||
|
||||
## [0.2.0] - 2019-03-06
|
||||
- Add `actix_rt::System::runtime()` method to retrieve the underlying `actix_rt::Runtime` runtime.
|
||||
- Add `actix_rt::Runtime::tokio_runtime()` method to retrieve the underlying Tokio runtime.
|
||||
- Minimum supported Rust version (MSRV) is now 1.65.
|
||||
|
||||
* `run` method returns `io::Result<()>`
|
||||
## 2.8.0
|
||||
|
||||
* Removed `Handle`
|
||||
- Add `#[track_caller]` attribute to `spawn` functions and methods.
|
||||
- Update `tokio-uring` dependency to `0.4`.
|
||||
- Minimum supported Rust version (MSRV) is now 1.59.
|
||||
|
||||
## [0.1.0] - 2018-12-09
|
||||
## 2.7.0
|
||||
|
||||
* Initial release
|
||||
- Update `tokio-uring` dependency to `0.3`.
|
||||
- Minimum supported Rust version (MSRV) is now 1.49.
|
||||
|
||||
## 2.6.0
|
||||
|
||||
- Update `tokio-uring` dependency to `0.2`.
|
||||
|
||||
## 2.5.1
|
||||
|
||||
- Expose `System::with_tokio_rt` and `Arbiter::with_tokio_rt`.
|
||||
|
||||
## 2.5.0
|
||||
|
||||
- Add `System::run_with_code` to allow retrieving the exit code on stop.
|
||||
|
||||
## 2.4.0
|
||||
|
||||
- Add `Arbiter::try_current` for situations where thread may or may not have Arbiter context.
|
||||
- Start io-uring with `System::new` when feature is enabled.
|
||||
|
||||
## 2.3.0
|
||||
|
||||
- The `spawn` method can now resolve with non-unit outputs.
|
||||
- Add experimental (semver-exempt) `io-uring` feature for enabling async file I/O on linux.
|
||||
|
||||
## 2.2.0
|
||||
|
||||
- **BREAKING** `ActixStream::{poll_read_ready, poll_write_ready}` methods now return `Ready` object in ok variant.
|
||||
- Breakage is acceptable since `ActixStream` was not intended to be public.
|
||||
|
||||
## 2.1.0
|
||||
|
||||
- Add `ActixStream` extension trait to include readiness methods.
|
||||
- Re-export `tokio::net::TcpSocket` in `net` module
|
||||
|
||||
## 2.0.2
|
||||
|
||||
- Add `Arbiter::handle` to get a handle of an owned Arbiter.
|
||||
- Add `System::try_current` for situations where actix may or may not be running a System.
|
||||
|
||||
## 2.0.1
|
||||
|
||||
- Expose `JoinError` from Tokio.
|
||||
|
||||
## 2.0.0
|
||||
|
||||
- Remove all Arbiter-local storage methods.
|
||||
- Re-export `tokio::pin`.
|
||||
|
||||
## 2.0.0-beta.3
|
||||
|
||||
- Remove `run_in_tokio`, `attach_to_tokio` and `AsyncSystemRunner`.
|
||||
- Return `JoinHandle` from `actix_rt::spawn`.
|
||||
- Remove old `Arbiter::spawn`. Implementation is now inlined into `actix_rt::spawn`.
|
||||
- Rename `Arbiter::{send => spawn}` and `Arbiter::{exec_fn => spawn_fn}`.
|
||||
- Remove `Arbiter::exec`.
|
||||
- Remove deprecated `Arbiter::local_join` and `Arbiter::is_running`.
|
||||
- `Arbiter::spawn` now accepts !Unpin futures.
|
||||
- `System::new` no longer takes arguments.
|
||||
- Remove `System::with_current`.
|
||||
- Remove `Builder`.
|
||||
- Add `System::with_init` as replacement for `Builder::run`.
|
||||
- Rename `System::{is_set => is_registered}`.
|
||||
- Add `ArbiterHandle` for sending messages to non-current-thread arbiters.
|
||||
- `System::arbiter` now returns an `&ArbiterHandle`.
|
||||
- `Arbiter::current` now returns an `ArbiterHandle` instead.
|
||||
- `Arbiter::join` now takes self by value.
|
||||
|
||||
## 2.0.0-beta.2
|
||||
|
||||
- Add `task` mod with re-export of `tokio::task::{spawn_blocking, yield_now, JoinHandle}`
|
||||
- Add default "macros" feature to allow faster compile times when using `default-features=false`.
|
||||
|
||||
## 2.0.0-beta.1
|
||||
|
||||
- Add `System::attach_to_tokio` method.
|
||||
- Update `tokio` dependency to `1.0`.
|
||||
- Rename `time` module `delay_for` to `sleep`, `delay_until` to `sleep_until`, `Delay` to `Sleep` to stay aligned with Tokio's naming.
|
||||
- Remove `'static` lifetime requirement for `Runtime::block_on` and `SystemRunner::block_on`.
|
||||
- These methods now accept `&self` when calling.
|
||||
- Remove `'static` lifetime requirement for `System::run` and `Builder::run`.
|
||||
- `Arbiter::spawn` now panics when `System` is not in scope.
|
||||
- Fix work load issue by removing `PENDING` thread local.
|
||||
|
||||
## 1.1.1
|
||||
|
||||
- Fix memory leak due to
|
||||
|
||||
## 1.1.0 _(YANKED)_
|
||||
|
||||
- Expose `System::is_set` to check if current system has ben started
|
||||
- Add `Arbiter::is_running` to check if event loop is running
|
||||
- Add `Arbiter::local_join` associated function to get be able to `await` for spawned futures
|
||||
|
||||
## 1.0.0
|
||||
|
||||
- Update dependencies
|
||||
|
||||
## 1.0.0-alpha.3
|
||||
|
||||
- Migrate to tokio 0.2
|
||||
- Fix compilation on non-unix platforms
|
||||
|
||||
## 1.0.0-alpha.2
|
||||
|
||||
- Export `main` and `test` attribute macros
|
||||
- Export `time` module (re-export of tokio-timer)
|
||||
- Export `net` module (re-export of tokio-net)
|
||||
|
||||
## 1.0.0-alpha.1
|
||||
|
||||
- Migrate to std::future and tokio 0.2
|
||||
|
||||
## 0.2.6
|
||||
|
||||
- Allow to join arbiter's thread. #60
|
||||
- Fix arbiter's thread panic message.
|
||||
|
||||
## 0.2.5
|
||||
|
||||
- Add arbiter specific storage
|
||||
|
||||
## 0.2.4
|
||||
|
||||
- Avoid a copy of the Future when initializing the Box. #29
|
||||
|
||||
## 0.2.3
|
||||
|
||||
- Allow to start System using existing CurrentThread Handle #22
|
||||
|
||||
## 0.2.2
|
||||
|
||||
- Moved `blocking` module to `actix-threadpool` crate
|
||||
|
||||
## 0.2.1
|
||||
|
||||
- Added `blocking` module
|
||||
- Added `Arbiter::exec_fn` - execute fn on the arbiter's thread
|
||||
- Added `Arbiter::exec` - execute fn on the arbiter's thread and wait result
|
||||
|
||||
## 0.2.0
|
||||
|
||||
- `run` method returns `io::Result<()>`
|
||||
- Removed `Handle`
|
||||
|
||||
## 0.1.0
|
||||
|
||||
- Initial release
|
||||
|
@ -1,32 +1,36 @@
|
||||
[package]
|
||||
name = "actix-rt"
|
||||
version = "0.2.1"
|
||||
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
|
||||
description = "Actix runtime"
|
||||
keywords = ["network", "framework", "async", "futures"]
|
||||
version = "2.10.0"
|
||||
authors = ["Nikolay Kim <fafhrd91@gmail.com>", "Rob Ede <robjtede@icloud.com>"]
|
||||
description = "Tokio-based single-threaded async runtime for the Actix ecosystem"
|
||||
keywords = ["async", "futures", "io", "runtime"]
|
||||
homepage = "https://actix.rs"
|
||||
repository = "https://github.com/actix/actix-net.git"
|
||||
documentation = "https://docs.rs/actix-rt/"
|
||||
repository = "https://github.com/actix/actix-net"
|
||||
categories = ["network-programming", "asynchronous"]
|
||||
license = "MIT/Apache-2.0"
|
||||
exclude = [".gitignore", ".travis.yml", ".cargo/config", "appveyor.yml"]
|
||||
edition = "2018"
|
||||
workspace = "../"
|
||||
license = "MIT OR Apache-2.0"
|
||||
edition.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[lib]
|
||||
name = "actix_rt"
|
||||
path = "src/lib.rs"
|
||||
[package.metadata.cargo_check_external_types]
|
||||
allowed_external_types = ["actix_macros::*", "tokio::*"]
|
||||
|
||||
[features]
|
||||
default = ["macros"]
|
||||
macros = ["actix-macros"]
|
||||
io-uring = ["tokio-uring"]
|
||||
|
||||
[dependencies]
|
||||
bytes = "0.4"
|
||||
derive_more = "0.14"
|
||||
futures = "0.1.25"
|
||||
parking_lot = "0.7"
|
||||
lazy_static = "1.2"
|
||||
log = "0.4"
|
||||
num_cpus = "1.10"
|
||||
threadpool = "1.7"
|
||||
tokio-current-thread = "0.1"
|
||||
tokio-executor = "0.1.5"
|
||||
tokio-reactor = "0.1.7"
|
||||
tokio-timer = "0.2.8"
|
||||
actix-macros = { version = "0.2.3", optional = true }
|
||||
|
||||
futures-core = { version = "0.3", default-features = false }
|
||||
tokio = { version = "1.23.1", features = ["rt", "net", "parking_lot", "signal", "sync", "time"] }
|
||||
|
||||
# runtime for `io-uring` feature
|
||||
[target.'cfg(target_os = "linux")'.dependencies]
|
||||
tokio-uring = { version = "0.5", optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { version = "1.23.1", features = ["full"] }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
1
actix-rt/LICENSE-APACHE
Symbolic link
1
actix-rt/LICENSE-APACHE
Symbolic link
@ -0,0 +1 @@
|
||||
../LICENSE-APACHE
|
1
actix-rt/LICENSE-MIT
Symbolic link
1
actix-rt/LICENSE-MIT
Symbolic link
@ -0,0 +1 @@
|
||||
../LICENSE-MIT
|
14
actix-rt/README.md
Normal file
14
actix-rt/README.md
Normal file
@ -0,0 +1,14 @@
|
||||
# actix-rt
|
||||
|
||||
> Tokio-based single-threaded async runtime for the Actix ecosystem.
|
||||
|
||||
[](https://crates.io/crates/actix-rt)
|
||||
[](https://docs.rs/actix-rt/2.10.0)
|
||||
[](https://blog.rust-lang.org/2020/03/12/Rust-1.46.html)
|
||||

|
||||
<br />
|
||||
[](https://deps.rs/crate/actix-rt/2.10.0)
|
||||

|
||||
[](https://discord.gg/WghFtEH6Hb)
|
||||
|
||||
See crate documentation for more: https://docs.rs/actix-rt.
|
60
actix-rt/examples/multi_thread_system.rs
Normal file
60
actix-rt/examples/multi_thread_system.rs
Normal file
@ -0,0 +1,60 @@
|
||||
//! An example on how to build a multi-thread tokio runtime for Actix System.
|
||||
//! Then spawn async task that can make use of work stealing of tokio runtime.
|
||||
|
||||
use actix_rt::System;
|
||||
|
||||
fn main() {
|
||||
System::with_tokio_rt(|| {
|
||||
// build system with a multi-thread tokio runtime.
|
||||
tokio::runtime::Builder::new_multi_thread()
|
||||
.worker_threads(2)
|
||||
.enable_all()
|
||||
.build()
|
||||
.unwrap()
|
||||
})
|
||||
.block_on(async_main());
|
||||
}
|
||||
|
||||
// async main function that acts like #[actix_web::main] or #[tokio::main]
|
||||
async fn async_main() {
|
||||
let (tx, rx) = tokio::sync::oneshot::channel();
|
||||
|
||||
// get a handle to system arbiter and spawn async task on it
|
||||
System::current().arbiter().spawn(async {
|
||||
// use tokio::spawn to get inside the context of multi thread tokio runtime
|
||||
let h1 = tokio::spawn(async {
|
||||
println!("thread id is {:?}", std::thread::current().id());
|
||||
std::thread::sleep(std::time::Duration::from_secs(2));
|
||||
});
|
||||
|
||||
// work stealing occurs for this task spawn
|
||||
let h2 = tokio::spawn(async {
|
||||
println!("thread id is {:?}", std::thread::current().id());
|
||||
});
|
||||
|
||||
h1.await.unwrap();
|
||||
h2.await.unwrap();
|
||||
let _ = tx.send(());
|
||||
});
|
||||
|
||||
rx.await.unwrap();
|
||||
|
||||
let (tx, rx) = tokio::sync::oneshot::channel();
|
||||
let now = std::time::Instant::now();
|
||||
|
||||
// without additional tokio::spawn, all spawned tasks run on single thread
|
||||
System::current().arbiter().spawn(async {
|
||||
println!("thread id is {:?}", std::thread::current().id());
|
||||
std::thread::sleep(std::time::Duration::from_secs(2));
|
||||
let _ = tx.send(());
|
||||
});
|
||||
|
||||
// previous spawn task has blocked the system arbiter thread
|
||||
// so this task will wait for 2 seconds until it can be run
|
||||
System::current().arbiter().spawn(async move {
|
||||
println!("thread id is {:?}", std::thread::current().id());
|
||||
assert!(now.elapsed() > std::time::Duration::from_secs(2));
|
||||
});
|
||||
|
||||
rx.await.unwrap();
|
||||
}
|
@ -1,317 +1,317 @@
|
||||
use std::cell::{Cell, RefCell};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::{fmt, thread};
|
||||
use std::{
|
||||
cell::RefCell,
|
||||
fmt,
|
||||
future::Future,
|
||||
pin::Pin,
|
||||
sync::atomic::{AtomicUsize, Ordering},
|
||||
task::{Context, Poll},
|
||||
thread,
|
||||
};
|
||||
|
||||
use futures::sync::mpsc::{unbounded, UnboundedReceiver, UnboundedSender};
|
||||
use futures::sync::oneshot::{channel, Canceled, Sender};
|
||||
use futures::{future, Async, Future, IntoFuture, Poll, Stream};
|
||||
use tokio_current_thread::spawn;
|
||||
use futures_core::ready;
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
use crate::builder::Builder;
|
||||
use crate::system::System;
|
||||
|
||||
thread_local!(
|
||||
static ADDR: RefCell<Option<Arbiter>> = RefCell::new(None);
|
||||
static RUNNING: Cell<bool> = Cell::new(false);
|
||||
static Q: RefCell<Vec<Box<Future<Item = (), Error = ()>>>> = RefCell::new(Vec::new());
|
||||
);
|
||||
use crate::system::{System, SystemCommand};
|
||||
|
||||
pub(crate) static COUNT: AtomicUsize = AtomicUsize::new(0);
|
||||
|
||||
thread_local!(
|
||||
static HANDLE: RefCell<Option<ArbiterHandle>> = const { RefCell::new(None) };
|
||||
);
|
||||
|
||||
pub(crate) enum ArbiterCommand {
|
||||
Stop,
|
||||
Execute(Box<Future<Item = (), Error = ()> + Send>),
|
||||
ExecuteFn(Box<FnExec>),
|
||||
Execute(Pin<Box<dyn Future<Output = ()> + Send>>),
|
||||
}
|
||||
|
||||
impl fmt::Debug for ArbiterCommand {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
ArbiterCommand::Stop => write!(f, "ArbiterCommand::Stop"),
|
||||
ArbiterCommand::Execute(_) => write!(f, "ArbiterCommand::Execute"),
|
||||
ArbiterCommand::ExecuteFn(_) => write!(f, "ArbiterCommand::ExecuteFn"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A handle for sending spawn and stop messages to an [Arbiter].
|
||||
#[derive(Debug, Clone)]
|
||||
/// Arbiters provide an asynchronous execution environment for actors, functions
|
||||
/// and futures. When an Arbiter is created, they spawn a new OS thread, and
|
||||
/// host an event loop. Some Arbiter functions execute on the current thread.
|
||||
pub struct Arbiter(UnboundedSender<ArbiterCommand>);
|
||||
|
||||
impl Default for Arbiter {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
pub struct ArbiterHandle {
|
||||
tx: mpsc::UnboundedSender<ArbiterCommand>,
|
||||
}
|
||||
|
||||
impl Arbiter {
|
||||
pub(crate) fn new_system() -> Self {
|
||||
let (tx, rx) = unbounded();
|
||||
|
||||
let arb = Arbiter(tx);
|
||||
ADDR.with(|cell| *cell.borrow_mut() = Some(arb.clone()));
|
||||
RUNNING.with(|cell| cell.set(false));
|
||||
Arbiter::spawn(ArbiterController { stop: None, rx });
|
||||
|
||||
arb
|
||||
impl ArbiterHandle {
|
||||
pub(crate) fn new(tx: mpsc::UnboundedSender<ArbiterCommand>) -> Self {
|
||||
Self { tx }
|
||||
}
|
||||
|
||||
/// Returns the current thread's arbiter's address. If no Arbiter is present, then this
|
||||
/// function will panic!
|
||||
pub fn current() -> Arbiter {
|
||||
ADDR.with(|cell| match *cell.borrow() {
|
||||
Some(ref addr) => addr.clone(),
|
||||
None => panic!("Arbiter is not running"),
|
||||
})
|
||||
}
|
||||
|
||||
/// Stop arbiter from continuing it's event loop.
|
||||
pub fn stop(&self) {
|
||||
let _ = self.0.unbounded_send(ArbiterCommand::Stop);
|
||||
}
|
||||
|
||||
/// Spawn new thread and run event loop in spawned thread.
|
||||
/// Returns address of newly created arbiter.
|
||||
pub fn new() -> Arbiter {
|
||||
let id = COUNT.fetch_add(1, Ordering::Relaxed);
|
||||
let name = format!("actix-rt:worker:{}", id);
|
||||
let sys = System::current();
|
||||
let (arb_tx, arb_rx) = unbounded();
|
||||
let arb_tx2 = arb_tx.clone();
|
||||
|
||||
let _ = thread::Builder::new().name(name.clone()).spawn(move || {
|
||||
let mut rt = Builder::new().build_rt().expect("Can not create Runtime");
|
||||
let arb = Arbiter(arb_tx);
|
||||
|
||||
let (stop, stop_rx) = channel();
|
||||
RUNNING.with(|cell| cell.set(true));
|
||||
|
||||
System::set_current(sys);
|
||||
|
||||
// start arbiter controller
|
||||
rt.spawn(ArbiterController {
|
||||
stop: Some(stop),
|
||||
rx: arb_rx,
|
||||
});
|
||||
ADDR.with(|cell| *cell.borrow_mut() = Some(arb.clone()));
|
||||
|
||||
// register arbiter
|
||||
let _ = System::current()
|
||||
.sys()
|
||||
.unbounded_send(SystemCommand::RegisterArbiter(id, arb.clone()));
|
||||
|
||||
// run loop
|
||||
let _ = match rt.block_on(stop_rx) {
|
||||
Ok(code) => code,
|
||||
Err(_) => 1,
|
||||
};
|
||||
|
||||
// unregister arbiter
|
||||
let _ = System::current()
|
||||
.sys()
|
||||
.unbounded_send(SystemCommand::UnregisterArbiter(id));
|
||||
});
|
||||
|
||||
Arbiter(arb_tx2)
|
||||
}
|
||||
|
||||
pub(crate) fn run_system() {
|
||||
RUNNING.with(|cell| cell.set(true));
|
||||
Q.with(|cell| {
|
||||
let mut v = cell.borrow_mut();
|
||||
for fut in v.drain(..) {
|
||||
spawn(fut);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
pub(crate) fn stop_system() {
|
||||
RUNNING.with(|cell| cell.set(false));
|
||||
}
|
||||
|
||||
/// Spawn a future on the current thread. This does not create a new Arbiter
|
||||
/// or Arbiter address, it is simply a helper for spawning futures on the current
|
||||
/// thread.
|
||||
pub fn spawn<F>(future: F)
|
||||
/// Send a future to the [Arbiter]'s thread and spawn it.
|
||||
///
|
||||
/// If you require a result, include a response channel in the future.
|
||||
///
|
||||
/// Returns true if future was sent successfully and false if the [Arbiter] has died.
|
||||
pub fn spawn<Fut>(&self, future: Fut) -> bool
|
||||
where
|
||||
F: Future<Item = (), Error = ()> + 'static,
|
||||
Fut: Future<Output = ()> + Send + 'static,
|
||||
{
|
||||
RUNNING.with(move |cell| {
|
||||
if cell.get() {
|
||||
spawn(Box::new(future));
|
||||
} else {
|
||||
Q.with(move |cell| cell.borrow_mut().push(Box::new(future)));
|
||||
}
|
||||
});
|
||||
self.tx
|
||||
.send(ArbiterCommand::Execute(Box::pin(future)))
|
||||
.is_ok()
|
||||
}
|
||||
|
||||
/// Executes a future on the current thread. This does not create a new Arbiter
|
||||
/// or Arbiter address, it is simply a helper for executing futures on the current
|
||||
/// thread.
|
||||
pub fn spawn_fn<F, R>(f: F)
|
||||
where
|
||||
F: FnOnce() -> R + 'static,
|
||||
R: IntoFuture<Item = (), Error = ()> + 'static,
|
||||
{
|
||||
Arbiter::spawn(future::lazy(f))
|
||||
}
|
||||
|
||||
/// Send a future to the Arbiter's thread, and spawn it.
|
||||
pub fn send<F>(&self, future: F)
|
||||
where
|
||||
F: Future<Item = (), Error = ()> + Send + 'static,
|
||||
{
|
||||
let _ = self
|
||||
.0
|
||||
.unbounded_send(ArbiterCommand::Execute(Box::new(future)));
|
||||
}
|
||||
|
||||
/// Send a function to the Arbiter's thread, and execute it. Any result from the function
|
||||
/// is discarded.
|
||||
pub fn exec_fn<F>(&self, f: F)
|
||||
/// Send a function to the [Arbiter]'s thread and execute it.
|
||||
///
|
||||
/// Any result from the function is discarded. If you require a result, include a response
|
||||
/// channel in the function.
|
||||
///
|
||||
/// Returns true if function was sent successfully and false if the [Arbiter] has died.
|
||||
pub fn spawn_fn<F>(&self, f: F) -> bool
|
||||
where
|
||||
F: FnOnce() + Send + 'static,
|
||||
{
|
||||
let _ = self
|
||||
.0
|
||||
.unbounded_send(ArbiterCommand::ExecuteFn(Box::new(move || {
|
||||
let _ = f();
|
||||
})));
|
||||
self.spawn(async { f() })
|
||||
}
|
||||
|
||||
/// Send a function to the Arbiter's thread. This function will be executed asynchronously.
|
||||
/// A future is created, and when resolved will contain the result of the function sent
|
||||
/// to the Arbiters thread.
|
||||
pub fn exec<F, R>(&self, f: F) -> impl Future<Item = R, Error = Canceled>
|
||||
/// Instruct [Arbiter] to stop processing it's event loop.
|
||||
///
|
||||
/// Returns true if stop message was sent successfully and false if the [Arbiter] has
|
||||
/// been dropped.
|
||||
pub fn stop(&self) -> bool {
|
||||
self.tx.send(ArbiterCommand::Stop).is_ok()
|
||||
}
|
||||
}
|
||||
|
||||
/// An Arbiter represents a thread that provides an asynchronous execution environment for futures
|
||||
/// and functions.
|
||||
///
|
||||
/// When an arbiter is created, it spawns a new [OS thread](thread), and hosts an event loop.
|
||||
#[derive(Debug)]
|
||||
pub struct Arbiter {
|
||||
tx: mpsc::UnboundedSender<ArbiterCommand>,
|
||||
thread_handle: thread::JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl Arbiter {
|
||||
/// Spawn a new Arbiter thread and start its event loop.
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics if a [System] is not registered on the current thread.
|
||||
#[cfg(not(all(target_os = "linux", feature = "io-uring")))]
|
||||
#[allow(clippy::new_without_default)]
|
||||
pub fn new() -> Arbiter {
|
||||
Self::with_tokio_rt(|| {
|
||||
crate::runtime::default_tokio_runtime().expect("Cannot create new Arbiter's Runtime.")
|
||||
})
|
||||
}
|
||||
|
||||
/// Spawn a new Arbiter using the [Tokio Runtime](tokio-runtime) returned from a closure.
|
||||
///
|
||||
/// [tokio-runtime]: tokio::runtime::Runtime
|
||||
#[cfg(not(all(target_os = "linux", feature = "io-uring")))]
|
||||
pub fn with_tokio_rt<F>(runtime_factory: F) -> Arbiter
|
||||
where
|
||||
F: FnOnce() -> R + Send + 'static,
|
||||
R: Send + 'static,
|
||||
F: FnOnce() -> tokio::runtime::Runtime + Send + 'static,
|
||||
{
|
||||
let (tx, rx) = channel();
|
||||
let _ = self
|
||||
.0
|
||||
.unbounded_send(ArbiterCommand::ExecuteFn(Box::new(move || {
|
||||
if !tx.is_canceled() {
|
||||
let _ = tx.send(f());
|
||||
let sys = System::current();
|
||||
let system_id = sys.id();
|
||||
let arb_id = COUNT.fetch_add(1, Ordering::Relaxed);
|
||||
|
||||
let name = format!("actix-rt|system:{}|arbiter:{}", system_id, arb_id);
|
||||
let (tx, rx) = mpsc::unbounded_channel();
|
||||
|
||||
let (ready_tx, ready_rx) = std::sync::mpsc::channel::<()>();
|
||||
|
||||
let thread_handle = thread::Builder::new()
|
||||
.name(name.clone())
|
||||
.spawn({
|
||||
let tx = tx.clone();
|
||||
move || {
|
||||
let rt = crate::runtime::Runtime::from(runtime_factory());
|
||||
let hnd = ArbiterHandle::new(tx);
|
||||
|
||||
System::set_current(sys);
|
||||
|
||||
HANDLE.with(|cell| *cell.borrow_mut() = Some(hnd.clone()));
|
||||
|
||||
// register arbiter
|
||||
let _ = System::current()
|
||||
.tx()
|
||||
.send(SystemCommand::RegisterArbiter(arb_id, hnd));
|
||||
|
||||
ready_tx.send(()).unwrap();
|
||||
|
||||
// run arbiter event processing loop
|
||||
rt.block_on(ArbiterRunner { rx });
|
||||
|
||||
// deregister arbiter
|
||||
let _ = System::current()
|
||||
.tx()
|
||||
.send(SystemCommand::DeregisterArbiter(arb_id));
|
||||
}
|
||||
})));
|
||||
rx
|
||||
})
|
||||
.unwrap_or_else(|err| panic!("Cannot spawn Arbiter's thread: {name:?}: {err:?}"));
|
||||
|
||||
ready_rx.recv().unwrap();
|
||||
|
||||
Arbiter { tx, thread_handle }
|
||||
}
|
||||
|
||||
/// Spawn a new Arbiter thread and start its event loop with `tokio-uring` runtime.
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics if a [System] is not registered on the current thread.
|
||||
#[cfg(all(target_os = "linux", feature = "io-uring"))]
|
||||
#[allow(clippy::new_without_default)]
|
||||
pub fn new() -> Arbiter {
|
||||
let sys = System::current();
|
||||
let system_id = sys.id();
|
||||
let arb_id = COUNT.fetch_add(1, Ordering::Relaxed);
|
||||
|
||||
let name = format!("actix-rt|system:{}|arbiter:{}", system_id, arb_id);
|
||||
let (tx, rx) = mpsc::unbounded_channel();
|
||||
|
||||
let (ready_tx, ready_rx) = std::sync::mpsc::channel::<()>();
|
||||
|
||||
let thread_handle = thread::Builder::new()
|
||||
.name(name.clone())
|
||||
.spawn({
|
||||
let tx = tx.clone();
|
||||
move || {
|
||||
let hnd = ArbiterHandle::new(tx);
|
||||
|
||||
System::set_current(sys);
|
||||
|
||||
HANDLE.with(|cell| *cell.borrow_mut() = Some(hnd.clone()));
|
||||
|
||||
// register arbiter
|
||||
let _ = System::current()
|
||||
.tx()
|
||||
.send(SystemCommand::RegisterArbiter(arb_id, hnd));
|
||||
|
||||
ready_tx.send(()).unwrap();
|
||||
|
||||
// run arbiter event processing loop
|
||||
tokio_uring::start(ArbiterRunner { rx });
|
||||
|
||||
// deregister arbiter
|
||||
let _ = System::current()
|
||||
.tx()
|
||||
.send(SystemCommand::DeregisterArbiter(arb_id));
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|err| panic!("Cannot spawn Arbiter's thread: {name:?}: {err:?}"));
|
||||
|
||||
ready_rx.recv().unwrap();
|
||||
|
||||
Arbiter { tx, thread_handle }
|
||||
}
|
||||
|
||||
/// Sets up an Arbiter runner in a new System using the environment's local set.
|
||||
pub(crate) fn in_new_system() -> ArbiterHandle {
|
||||
let (tx, rx) = mpsc::unbounded_channel();
|
||||
|
||||
let hnd = ArbiterHandle::new(tx);
|
||||
|
||||
HANDLE.with(|cell| *cell.borrow_mut() = Some(hnd.clone()));
|
||||
|
||||
crate::spawn(ArbiterRunner { rx });
|
||||
|
||||
hnd
|
||||
}
|
||||
|
||||
/// Return a handle to the this Arbiter's message sender.
|
||||
pub fn handle(&self) -> ArbiterHandle {
|
||||
ArbiterHandle::new(self.tx.clone())
|
||||
}
|
||||
|
||||
/// Return a handle to the current thread's Arbiter's message sender.
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics if no Arbiter is running on the current thread.
|
||||
pub fn current() -> ArbiterHandle {
|
||||
HANDLE.with(|cell| match *cell.borrow() {
|
||||
Some(ref hnd) => hnd.clone(),
|
||||
None => panic!("Arbiter is not running."),
|
||||
})
|
||||
}
|
||||
|
||||
/// Try to get current running arbiter handle.
|
||||
///
|
||||
/// Returns `None` if no Arbiter has been started.
|
||||
///
|
||||
/// Unlike [`current`](Self::current), this never panics.
|
||||
pub fn try_current() -> Option<ArbiterHandle> {
|
||||
HANDLE.with(|cell| cell.borrow().clone())
|
||||
}
|
||||
|
||||
/// Stop Arbiter from continuing it's event loop.
|
||||
///
|
||||
/// Returns true if stop message was sent successfully and false if the Arbiter has been dropped.
|
||||
pub fn stop(&self) -> bool {
|
||||
self.tx.send(ArbiterCommand::Stop).is_ok()
|
||||
}
|
||||
|
||||
/// Send a future to the Arbiter's thread and spawn it.
|
||||
///
|
||||
/// If you require a result, include a response channel in the future.
|
||||
///
|
||||
/// Returns true if future was sent successfully and false if the Arbiter has died.
|
||||
#[track_caller]
|
||||
pub fn spawn<Fut>(&self, future: Fut) -> bool
|
||||
where
|
||||
Fut: Future<Output = ()> + Send + 'static,
|
||||
{
|
||||
self.tx
|
||||
.send(ArbiterCommand::Execute(Box::pin(future)))
|
||||
.is_ok()
|
||||
}
|
||||
|
||||
/// Send a function to the Arbiter's thread and execute it.
|
||||
///
|
||||
/// Any result from the function is discarded. If you require a result, include a response
|
||||
/// channel in the function.
|
||||
///
|
||||
/// Returns true if function was sent successfully and false if the Arbiter has died.
|
||||
#[track_caller]
|
||||
pub fn spawn_fn<F>(&self, f: F) -> bool
|
||||
where
|
||||
F: FnOnce() + Send + 'static,
|
||||
{
|
||||
self.spawn(async { f() })
|
||||
}
|
||||
|
||||
/// Wait for Arbiter's event loop to complete.
|
||||
///
|
||||
/// Joins the underlying OS thread handle. See [`JoinHandle::join`](thread::JoinHandle::join).
|
||||
pub fn join(self) -> thread::Result<()> {
|
||||
self.thread_handle.join()
|
||||
}
|
||||
}
|
||||
|
||||
struct ArbiterController {
|
||||
stop: Option<Sender<i32>>,
|
||||
rx: UnboundedReceiver<ArbiterCommand>,
|
||||
/// A persistent future that processes [Arbiter] commands.
|
||||
struct ArbiterRunner {
|
||||
rx: mpsc::UnboundedReceiver<ArbiterCommand>,
|
||||
}
|
||||
|
||||
impl Drop for ArbiterController {
|
||||
fn drop(&mut self) {
|
||||
if thread::panicking() {
|
||||
eprintln!("Panic in Arbiter thread, shutting down system.");
|
||||
if System::current().stop_on_panic() {
|
||||
System::current().stop_with_code(1)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
impl Future for ArbiterRunner {
|
||||
type Output = ();
|
||||
|
||||
impl Future for ArbiterController {
|
||||
type Item = ();
|
||||
type Error = ();
|
||||
|
||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
// process all items currently buffered in channel
|
||||
loop {
|
||||
match self.rx.poll() {
|
||||
Ok(Async::Ready(None)) | Err(_) => return Ok(Async::Ready(())),
|
||||
Ok(Async::Ready(Some(item))) => match item {
|
||||
match ready!(self.rx.poll_recv(cx)) {
|
||||
// channel closed; no more messages can be received
|
||||
None => return Poll::Ready(()),
|
||||
|
||||
// process arbiter command
|
||||
Some(item) => match item {
|
||||
ArbiterCommand::Stop => {
|
||||
if let Some(stop) = self.stop.take() {
|
||||
let _ = stop.send(0);
|
||||
};
|
||||
return Ok(Async::Ready(()));
|
||||
return Poll::Ready(());
|
||||
}
|
||||
ArbiterCommand::Execute(fut) => {
|
||||
spawn(fut);
|
||||
}
|
||||
ArbiterCommand::ExecuteFn(f) => {
|
||||
f.call_box();
|
||||
ArbiterCommand::Execute(task_fut) => {
|
||||
tokio::task::spawn_local(task_fut);
|
||||
}
|
||||
},
|
||||
Ok(Async::NotReady) => return Ok(Async::NotReady),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) enum SystemCommand {
|
||||
Exit(i32),
|
||||
RegisterArbiter(usize, Arbiter),
|
||||
UnregisterArbiter(usize),
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct SystemArbiter {
|
||||
stop: Option<Sender<i32>>,
|
||||
commands: UnboundedReceiver<SystemCommand>,
|
||||
arbiters: HashMap<usize, Arbiter>,
|
||||
}
|
||||
|
||||
impl SystemArbiter {
|
||||
pub(crate) fn new(stop: Sender<i32>, commands: UnboundedReceiver<SystemCommand>) -> Self {
|
||||
SystemArbiter {
|
||||
commands,
|
||||
stop: Some(stop),
|
||||
arbiters: HashMap::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Future for SystemArbiter {
|
||||
type Item = ();
|
||||
type Error = ();
|
||||
|
||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
||||
loop {
|
||||
match self.commands.poll() {
|
||||
Ok(Async::Ready(None)) | Err(_) => return Ok(Async::Ready(())),
|
||||
Ok(Async::Ready(Some(cmd))) => match cmd {
|
||||
SystemCommand::Exit(code) => {
|
||||
// stop arbiters
|
||||
for arb in self.arbiters.values() {
|
||||
arb.stop();
|
||||
}
|
||||
// stop event loop
|
||||
if let Some(stop) = self.stop.take() {
|
||||
let _ = stop.send(code);
|
||||
}
|
||||
}
|
||||
SystemCommand::RegisterArbiter(name, hnd) => {
|
||||
self.arbiters.insert(name, hnd);
|
||||
}
|
||||
SystemCommand::UnregisterArbiter(name) => {
|
||||
self.arbiters.remove(&name);
|
||||
}
|
||||
},
|
||||
Ok(Async::NotReady) => return Ok(Async::NotReady),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub trait FnExec: Send + 'static {
|
||||
fn call_box(self: Box<Self>);
|
||||
}
|
||||
|
||||
impl<F> FnExec for F
|
||||
where
|
||||
F: FnOnce() + Send + 'static,
|
||||
{
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(boxed_local))]
|
||||
fn call_box(self: Box<Self>) {
|
||||
(*self)()
|
||||
}
|
||||
}
|
||||
|
@ -1,88 +0,0 @@
|
||||
//! Thread pool for blocking operations
|
||||
|
||||
use std::fmt;
|
||||
|
||||
use derive_more::Display;
|
||||
use futures::sync::oneshot;
|
||||
use futures::{Async, Future, Poll};
|
||||
use parking_lot::Mutex;
|
||||
use threadpool::ThreadPool;
|
||||
|
||||
/// Env variable for default cpu pool size
|
||||
const ENV_CPU_POOL_VAR: &str = "ACTIX_CPU_POOL";
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
pub(crate) static ref DEFAULT_POOL: Mutex<ThreadPool> = {
|
||||
let default = match std::env::var(ENV_CPU_POOL_VAR) {
|
||||
Ok(val) => {
|
||||
if let Ok(val) = val.parse() {
|
||||
val
|
||||
} else {
|
||||
log::error!("Can not parse ACTIX_CPU_POOL value");
|
||||
num_cpus::get() * 5
|
||||
}
|
||||
}
|
||||
Err(_) => num_cpus::get() * 5,
|
||||
};
|
||||
Mutex::new(
|
||||
threadpool::Builder::new()
|
||||
.thread_name("actix-web".to_owned())
|
||||
.num_threads(default)
|
||||
.build(),
|
||||
)
|
||||
};
|
||||
}
|
||||
|
||||
thread_local! {
|
||||
static POOL: ThreadPool = {
|
||||
DEFAULT_POOL.lock().clone()
|
||||
};
|
||||
}
|
||||
|
||||
/// Blocking operation execution error
|
||||
#[derive(Debug, Display)]
|
||||
pub enum BlockingError<E: fmt::Debug> {
|
||||
#[display(fmt = "{:?}", _0)]
|
||||
Error(E),
|
||||
#[display(fmt = "Thread pool is gone")]
|
||||
Canceled,
|
||||
}
|
||||
|
||||
/// Execute blocking function on a thread pool, returns future that resolves
|
||||
/// to result of the function execution.
|
||||
pub fn run<F, I, E>(f: F) -> CpuFuture<I, E>
|
||||
where
|
||||
F: FnOnce() -> Result<I, E> + Send + 'static,
|
||||
I: Send + 'static,
|
||||
E: Send + fmt::Debug + 'static,
|
||||
{
|
||||
let (tx, rx) = oneshot::channel();
|
||||
POOL.with(|pool| {
|
||||
pool.execute(move || {
|
||||
if !tx.is_canceled() {
|
||||
let _ = tx.send(f());
|
||||
}
|
||||
})
|
||||
});
|
||||
|
||||
CpuFuture { rx }
|
||||
}
|
||||
|
||||
/// Blocking operation completion future. It resolves with results
|
||||
/// of blocking function execution.
|
||||
pub struct CpuFuture<I, E> {
|
||||
rx: oneshot::Receiver<Result<I, E>>,
|
||||
}
|
||||
|
||||
impl<I, E: fmt::Debug> Future for CpuFuture<I, E> {
|
||||
type Item = I;
|
||||
type Error = BlockingError<E>;
|
||||
|
||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
||||
let res = futures::try_ready!(self.rx.poll().map_err(|_| BlockingError::Canceled));
|
||||
match res {
|
||||
Ok(val) => Ok(Async::Ready(val)),
|
||||
Err(err) => Err(BlockingError::Error(err)),
|
||||
}
|
||||
}
|
||||
}
|
@ -1,184 +0,0 @@
|
||||
use std::borrow::Cow;
|
||||
use std::io;
|
||||
|
||||
use futures::future::{lazy, Future};
|
||||
use futures::sync::mpsc::unbounded;
|
||||
use futures::sync::oneshot::{channel, Receiver};
|
||||
|
||||
use tokio_current_thread::CurrentThread;
|
||||
use tokio_reactor::Reactor;
|
||||
use tokio_timer::clock::Clock;
|
||||
use tokio_timer::timer::Timer;
|
||||
|
||||
use crate::arbiter::{Arbiter, SystemArbiter};
|
||||
use crate::runtime::Runtime;
|
||||
use crate::system::System;
|
||||
|
||||
/// Builder struct for a actix runtime.
|
||||
///
|
||||
/// Either use `Builder::build` to create a system and start actors.
|
||||
/// Alternatively, use `Builder::run` to start the tokio runtime and
|
||||
/// run a function in its context.
|
||||
pub struct Builder {
|
||||
/// Name of the System. Defaults to "actix" if unset.
|
||||
name: Cow<'static, str>,
|
||||
|
||||
/// The clock to use
|
||||
clock: Clock,
|
||||
|
||||
/// Whether the Arbiter will stop the whole System on uncaught panic. Defaults to false.
|
||||
stop_on_panic: bool,
|
||||
}
|
||||
|
||||
impl Builder {
|
||||
pub(crate) fn new() -> Self {
|
||||
Builder {
|
||||
name: Cow::Borrowed("actix"),
|
||||
clock: Clock::new(),
|
||||
stop_on_panic: false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Sets the name of the System.
|
||||
pub fn name<T: Into<String>>(mut self, name: T) -> Self {
|
||||
self.name = Cow::Owned(name.into());
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the Clock instance that will be used by this System.
|
||||
///
|
||||
/// Defaults to the system clock.
|
||||
pub fn clock(mut self, clock: Clock) -> Self {
|
||||
self.clock = clock;
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the option 'stop_on_panic' which controls whether the System is stopped when an
|
||||
/// uncaught panic is thrown from a worker thread.
|
||||
///
|
||||
/// Defaults to false.
|
||||
pub fn stop_on_panic(mut self, stop_on_panic: bool) -> Self {
|
||||
self.stop_on_panic = stop_on_panic;
|
||||
self
|
||||
}
|
||||
|
||||
/// Create new System.
|
||||
///
|
||||
/// This method panics if it can not create tokio runtime
|
||||
pub fn build(self) -> SystemRunner {
|
||||
self.create_runtime(|| {})
|
||||
}
|
||||
|
||||
/// This function will start tokio runtime and will finish once the
|
||||
/// `System::stop()` message get called.
|
||||
/// Function `f` get called within tokio runtime context.
|
||||
pub fn run<F>(self, f: F) -> io::Result<()>
|
||||
where
|
||||
F: FnOnce() + 'static,
|
||||
{
|
||||
self.create_runtime(f).run()
|
||||
}
|
||||
|
||||
fn create_runtime<F>(self, f: F) -> SystemRunner
|
||||
where
|
||||
F: FnOnce() + 'static,
|
||||
{
|
||||
let (stop_tx, stop) = channel();
|
||||
let (sys_sender, sys_receiver) = unbounded();
|
||||
|
||||
let arbiter = Arbiter::new_system();
|
||||
let system = System::construct(sys_sender, arbiter.clone(), self.stop_on_panic);
|
||||
|
||||
// system arbiter
|
||||
let arb = SystemArbiter::new(stop_tx, sys_receiver);
|
||||
|
||||
let mut rt = self.build_rt().unwrap();
|
||||
rt.spawn(arb);
|
||||
|
||||
// init system arbiter and run configuration method
|
||||
let _ = rt.block_on(lazy(move || {
|
||||
f();
|
||||
Ok::<_, ()>(())
|
||||
}));
|
||||
|
||||
SystemRunner { rt, stop, system }
|
||||
}
|
||||
|
||||
pub(crate) fn build_rt(&self) -> io::Result<Runtime> {
|
||||
// We need a reactor to receive events about IO objects from kernel
|
||||
let reactor = Reactor::new()?;
|
||||
let reactor_handle = reactor.handle();
|
||||
|
||||
// Place a timer wheel on top of the reactor. If there are no timeouts to fire, it'll let the
|
||||
// reactor pick up some new external events.
|
||||
let timer = Timer::new_with_now(reactor, self.clock.clone());
|
||||
let timer_handle = timer.handle();
|
||||
|
||||
// And now put a single-threaded executor on top of the timer. When there are no futures ready
|
||||
// to do something, it'll let the timer or the reactor to generate some new stimuli for the
|
||||
// futures to continue in their life.
|
||||
let executor = CurrentThread::new_with_park(timer);
|
||||
|
||||
Ok(Runtime::new2(
|
||||
reactor_handle,
|
||||
timer_handle,
|
||||
self.clock.clone(),
|
||||
executor,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper object that runs System's event loop
|
||||
#[must_use = "SystemRunner must be run"]
|
||||
#[derive(Debug)]
|
||||
pub struct SystemRunner {
|
||||
rt: Runtime,
|
||||
stop: Receiver<i32>,
|
||||
system: System,
|
||||
}
|
||||
|
||||
impl SystemRunner {
|
||||
/// This function will start event loop and will finish once the
|
||||
/// `System::stop()` function is called.
|
||||
pub fn run(self) -> io::Result<()> {
|
||||
let SystemRunner { mut rt, stop, .. } = self;
|
||||
|
||||
// run loop
|
||||
let _ = rt.block_on(lazy(move || {
|
||||
Arbiter::run_system();
|
||||
Ok::<_, ()>(())
|
||||
}));
|
||||
let result = match rt.block_on(stop) {
|
||||
Ok(code) => {
|
||||
if code != 0 {
|
||||
Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("Non-zero exit code: {}", code),
|
||||
))
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
Err(e) => Err(io::Error::new(io::ErrorKind::Other, e)),
|
||||
};
|
||||
Arbiter::stop_system();
|
||||
result
|
||||
}
|
||||
|
||||
/// Execute a future and wait for result.
|
||||
pub fn block_on<F, I, E>(&mut self, fut: F) -> Result<I, E>
|
||||
where
|
||||
F: Future<Item = I, Error = E>,
|
||||
{
|
||||
let _ = self.rt.block_on(lazy(move || {
|
||||
Arbiter::run_system();
|
||||
Ok::<_, ()>(())
|
||||
}));
|
||||
let res = self.rt.block_on(fut);
|
||||
let _ = self.rt.block_on(lazy(move || {
|
||||
Arbiter::stop_system();
|
||||
Ok::<_, ()>(())
|
||||
}));
|
||||
res
|
||||
}
|
||||
}
|
@ -1,28 +1,207 @@
|
||||
//! A runtime implementation that runs everything on the current thread.
|
||||
//! Tokio-based single-threaded async runtime for the Actix ecosystem.
|
||||
//!
|
||||
//! In most parts of the the Actix ecosystem, it has been chosen to use !Send futures. For this
|
||||
//! reason, a single-threaded runtime is appropriate since it is guaranteed that futures will not
|
||||
//! be moved between threads. This can result in small performance improvements over cases where
|
||||
//! atomics would otherwise be needed.
|
||||
//!
|
||||
//! To achieve similar performance to multi-threaded, work-stealing runtimes, applications
|
||||
//! using `actix-rt` will create multiple, mostly disconnected, single-threaded runtimes.
|
||||
//! This approach has good performance characteristics for workloads where the majority of tasks
|
||||
//! have similar runtime expense.
|
||||
//!
|
||||
//! The disadvantage is that idle threads will not steal work from very busy, stuck or otherwise
|
||||
//! backlogged threads. Tasks that are disproportionately expensive should be offloaded to the
|
||||
//! blocking task thread-pool using [`task::spawn_blocking`].
|
||||
//!
|
||||
//! # Examples
|
||||
//! ```no_run
|
||||
//! use std::sync::mpsc;
|
||||
//! use actix_rt::{Arbiter, System};
|
||||
//!
|
||||
//! let _ = System::new();
|
||||
//!
|
||||
//! let (tx, rx) = mpsc::channel::<u32>();
|
||||
//!
|
||||
//! let arbiter = Arbiter::new();
|
||||
//! arbiter.spawn_fn(move || tx.send(42).unwrap());
|
||||
//!
|
||||
//! let num = rx.recv().unwrap();
|
||||
//! assert_eq!(num, 42);
|
||||
//!
|
||||
//! arbiter.stop();
|
||||
//! arbiter.join().unwrap();
|
||||
//! ```
|
||||
//!
|
||||
//! # `io-uring` Support
|
||||
//!
|
||||
//! There is experimental support for using io-uring with this crate by enabling the
|
||||
//! `io-uring` feature. For now, it is semver exempt.
|
||||
//!
|
||||
//! Note that there are currently some unimplemented parts of using `actix-rt` with `io-uring`.
|
||||
//! In particular, when running a `System`, only `System::block_on` is supported.
|
||||
|
||||
#![allow(clippy::type_complexity)]
|
||||
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
|
||||
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
|
||||
|
||||
#[cfg(all(not(target_os = "linux"), feature = "io-uring"))]
|
||||
compile_error!("io_uring is a linux only feature.");
|
||||
|
||||
use std::future::Future;
|
||||
|
||||
// Cannot define a main macro when compiled into test harness.
|
||||
// Workaround for https://github.com/rust-lang/rust/issues/62127.
|
||||
#[cfg(all(feature = "macros", not(test)))]
|
||||
pub use actix_macros::main;
|
||||
#[cfg(feature = "macros")]
|
||||
pub use actix_macros::test;
|
||||
|
||||
mod arbiter;
|
||||
pub mod blocking;
|
||||
mod builder;
|
||||
mod runtime;
|
||||
mod system;
|
||||
|
||||
pub use self::arbiter::Arbiter;
|
||||
pub use self::builder::{Builder, SystemRunner};
|
||||
pub use self::runtime::Runtime;
|
||||
pub use self::system::System;
|
||||
pub use tokio::pin;
|
||||
use tokio::task::JoinHandle;
|
||||
|
||||
/// Spawns a future on the current arbiter.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if actix system is not running.
|
||||
pub fn spawn<F>(f: F)
|
||||
where
|
||||
F: futures::Future<Item = (), Error = ()> + 'static,
|
||||
{
|
||||
if !System::is_set() {
|
||||
panic!("System is not running");
|
||||
pub use self::{
|
||||
arbiter::{Arbiter, ArbiterHandle},
|
||||
runtime::Runtime,
|
||||
system::{System, SystemRunner},
|
||||
};
|
||||
|
||||
pub mod signal {
|
||||
//! Asynchronous signal handling (Tokio re-exports).
|
||||
|
||||
#[cfg(unix)]
|
||||
pub mod unix {
|
||||
//! Unix specific signals (Tokio re-exports).
|
||||
pub use tokio::signal::unix::*;
|
||||
}
|
||||
pub use tokio::signal::ctrl_c;
|
||||
}
|
||||
|
||||
pub mod net {
|
||||
//! TCP/UDP/Unix bindings (mostly Tokio re-exports).
|
||||
|
||||
use std::{
|
||||
future::Future,
|
||||
io,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use tokio::io::{AsyncRead, AsyncWrite, Interest};
|
||||
#[cfg(unix)]
|
||||
pub use tokio::net::{UnixDatagram, UnixListener, UnixStream};
|
||||
pub use tokio::{
|
||||
io::Ready,
|
||||
net::{TcpListener, TcpSocket, TcpStream, UdpSocket},
|
||||
};
|
||||
|
||||
/// Extension trait over async read+write types that can also signal readiness.
|
||||
#[doc(hidden)]
|
||||
pub trait ActixStream: AsyncRead + AsyncWrite + Unpin {
|
||||
/// Poll stream and check read readiness of Self.
|
||||
///
|
||||
/// See [tokio::net::TcpStream::poll_read_ready] for detail on intended use.
|
||||
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>>;
|
||||
|
||||
/// Poll stream and check write readiness of Self.
|
||||
///
|
||||
/// See [tokio::net::TcpStream::poll_write_ready] for detail on intended use.
|
||||
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>>;
|
||||
}
|
||||
|
||||
Arbiter::spawn(f);
|
||||
impl ActixStream for TcpStream {
|
||||
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
|
||||
let ready = self.ready(Interest::READABLE);
|
||||
tokio::pin!(ready);
|
||||
ready.poll(cx)
|
||||
}
|
||||
|
||||
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
|
||||
let ready = self.ready(Interest::WRITABLE);
|
||||
tokio::pin!(ready);
|
||||
ready.poll(cx)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
impl ActixStream for UnixStream {
|
||||
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
|
||||
let ready = self.ready(Interest::READABLE);
|
||||
tokio::pin!(ready);
|
||||
ready.poll(cx)
|
||||
}
|
||||
|
||||
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
|
||||
let ready = self.ready(Interest::WRITABLE);
|
||||
tokio::pin!(ready);
|
||||
ready.poll(cx)
|
||||
}
|
||||
}
|
||||
|
||||
impl<Io: ActixStream + ?Sized> ActixStream for Box<Io> {
|
||||
fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
|
||||
(**self).poll_read_ready(cx)
|
||||
}
|
||||
|
||||
fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<Ready>> {
|
||||
(**self).poll_write_ready(cx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub mod time {
|
||||
//! Utilities for tracking time (Tokio re-exports).
|
||||
|
||||
pub use tokio::time::{
|
||||
interval, interval_at, sleep, sleep_until, timeout, Instant, Interval, Sleep, Timeout,
|
||||
};
|
||||
}
|
||||
|
||||
pub mod task {
|
||||
//! Task management (Tokio re-exports).
|
||||
|
||||
pub use tokio::task::{spawn_blocking, yield_now, JoinError, JoinHandle};
|
||||
}
|
||||
|
||||
/// Spawns a future on the current thread as a new task.
|
||||
///
|
||||
/// If not immediately awaited, the task can be cancelled using [`JoinHandle::abort`].
|
||||
///
|
||||
/// The provided future is spawned as a new task; therefore, panics are caught.
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics if Actix system is not running.
|
||||
///
|
||||
/// # Examples
|
||||
/// ```
|
||||
/// # use std::time::Duration;
|
||||
/// # actix_rt::Runtime::new().unwrap().block_on(async {
|
||||
/// // task resolves successfully
|
||||
/// assert_eq!(actix_rt::spawn(async { 1 }).await.unwrap(), 1);
|
||||
///
|
||||
/// // task panics
|
||||
/// assert!(actix_rt::spawn(async {
|
||||
/// panic!("panic is caught at task boundary");
|
||||
/// })
|
||||
/// .await
|
||||
/// .unwrap_err()
|
||||
/// .is_panic());
|
||||
///
|
||||
/// // task is cancelled before completion
|
||||
/// let handle = actix_rt::spawn(actix_rt::time::sleep(Duration::from_secs(100)));
|
||||
/// handle.abort();
|
||||
/// assert!(handle.await.unwrap_err().is_cancelled());
|
||||
/// # });
|
||||
/// ```
|
||||
#[track_caller]
|
||||
#[inline]
|
||||
pub fn spawn<Fut>(f: Fut) -> JoinHandle<Fut::Output>
|
||||
where
|
||||
Fut: Future + 'static,
|
||||
Fut::Output: 'static,
|
||||
{
|
||||
tokio::task::spawn_local(f)
|
||||
}
|
||||
|
@ -1,92 +0,0 @@
|
||||
//! A runtime implementation that runs everything on the current thread.
|
||||
//!
|
||||
//! [`current_thread::Runtime`][rt] is similar to the primary
|
||||
//! [`Runtime`][concurrent-rt] except that it runs all components on the current
|
||||
//! thread instead of using a thread pool. This means that it is able to spawn
|
||||
//! futures that do not implement `Send`.
|
||||
//!
|
||||
//! Same as the default [`Runtime`][concurrent-rt], the
|
||||
//! [`current_thread::Runtime`][rt] includes:
|
||||
//!
|
||||
//! * A [reactor] to drive I/O resources.
|
||||
//! * An [executor] to execute tasks that use these I/O resources.
|
||||
//! * A [timer] for scheduling work to run after a set period of time.
|
||||
//!
|
||||
//! Note that [`current_thread::Runtime`][rt] does not implement `Send` itself
|
||||
//! and cannot be safely moved to other threads.
|
||||
//!
|
||||
//! # Spawning from other threads
|
||||
//!
|
||||
//! While [`current_thread::Runtime`][rt] does not implement `Send` and cannot
|
||||
//! safely be moved to other threads, it provides a `Handle` that can be sent
|
||||
//! to other threads and allows to spawn new tasks from there.
|
||||
//!
|
||||
//! For example:
|
||||
//!
|
||||
//! ```
|
||||
//! # extern crate tokio;
|
||||
//! # extern crate futures;
|
||||
//! use tokio::runtime::current_thread::Runtime;
|
||||
//! use tokio::prelude::*;
|
||||
//! use std::thread;
|
||||
//!
|
||||
//! # fn main() {
|
||||
//! let mut runtime = Runtime::new().unwrap();
|
||||
//! let handle = runtime.handle();
|
||||
//!
|
||||
//! thread::spawn(move || {
|
||||
//! handle.spawn(future::ok(()));
|
||||
//! }).join().unwrap();
|
||||
//!
|
||||
//! # /*
|
||||
//! runtime.run().unwrap();
|
||||
//! # */
|
||||
//! # }
|
||||
//! ```
|
||||
//!
|
||||
//! # Examples
|
||||
//!
|
||||
//! Creating a new `Runtime` and running a future `f` until its completion and
|
||||
//! returning its result.
|
||||
//!
|
||||
//! ```
|
||||
//! use tokio::runtime::current_thread::Runtime;
|
||||
//! use tokio::prelude::*;
|
||||
//!
|
||||
//! let mut runtime = Runtime::new().unwrap();
|
||||
//!
|
||||
//! // Use the runtime...
|
||||
//! // runtime.block_on(f); // where f is a future
|
||||
//! ```
|
||||
//!
|
||||
//! [rt]: struct.Runtime.html
|
||||
//! [concurrent-rt]: ../struct.Runtime.html
|
||||
//! [chan]: https://docs.rs/futures/0.1/futures/sync/mpsc/fn.channel.html
|
||||
//! [reactor]: ../../reactor/struct.Reactor.html
|
||||
//! [executor]: https://tokio.rs/docs/getting-started/runtime-model/#executors
|
||||
//! [timer]: ../../timer/index.html
|
||||
|
||||
mod builder;
|
||||
mod runtime;
|
||||
|
||||
pub use self::builder::Builder;
|
||||
pub use self::runtime::{Runtime, Handle};
|
||||
pub use tokio_current_thread::spawn;
|
||||
pub use tokio_current_thread::TaskExecutor;
|
||||
|
||||
use futures::Future;
|
||||
|
||||
/// Run the provided future to completion using a runtime running on the current thread.
|
||||
///
|
||||
/// This first creates a new [`Runtime`], and calls [`Runtime::block_on`] with the provided future,
|
||||
/// which blocks the current thread until the provided future completes. It then calls
|
||||
/// [`Runtime::run`] to wait for any other spawned futures to resolve.
|
||||
pub fn block_on_all<F>(future: F) -> Result<F::Item, F::Error>
|
||||
where
|
||||
F: Future,
|
||||
{
|
||||
let mut r = Runtime::new().expect("failed to start runtime on current thread");
|
||||
let v = r.block_on(future)?;
|
||||
r.run().expect("failed to resolve remaining futures");
|
||||
Ok(v)
|
||||
}
|
@ -1,174 +1,149 @@
|
||||
use std::error::Error;
|
||||
use std::{fmt, io};
|
||||
use std::{future::Future, io};
|
||||
|
||||
use futures::Future;
|
||||
use tokio_current_thread::{self as current_thread, CurrentThread};
|
||||
use tokio_executor;
|
||||
use tokio_reactor::{self, Reactor};
|
||||
use tokio_timer::clock::{self, Clock};
|
||||
use tokio_timer::timer::{self, Timer};
|
||||
use tokio::task::{JoinHandle, LocalSet};
|
||||
|
||||
use crate::builder::Builder;
|
||||
|
||||
/// Single-threaded runtime provides a way to start reactor
|
||||
/// and executor on the current thread.
|
||||
/// A Tokio-based runtime proxy.
|
||||
///
|
||||
/// See [module level][mod] documentation for more details.
|
||||
///
|
||||
/// [mod]: index.html
|
||||
/// All spawned futures will be executed on the current thread. Therefore, there is no `Send` bound
|
||||
/// on submitted futures.
|
||||
#[derive(Debug)]
|
||||
pub struct Runtime {
|
||||
reactor_handle: tokio_reactor::Handle,
|
||||
timer_handle: timer::Handle,
|
||||
clock: Clock,
|
||||
executor: CurrentThread<Timer<Reactor>>,
|
||||
local: LocalSet,
|
||||
rt: tokio::runtime::Runtime,
|
||||
}
|
||||
|
||||
/// Error returned by the `run` function.
|
||||
#[derive(Debug)]
|
||||
pub struct RunError {
|
||||
inner: current_thread::RunError,
|
||||
}
|
||||
|
||||
impl fmt::Display for RunError {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(fmt, "{}", self.inner)
|
||||
}
|
||||
}
|
||||
|
||||
impl Error for RunError {
|
||||
fn description(&self) -> &str {
|
||||
self.inner.description()
|
||||
}
|
||||
fn cause(&self) -> Option<&Error> {
|
||||
self.inner.source()
|
||||
}
|
||||
pub(crate) fn default_tokio_runtime() -> io::Result<tokio::runtime::Runtime> {
|
||||
tokio::runtime::Builder::new_current_thread()
|
||||
.enable_io()
|
||||
.enable_time()
|
||||
.build()
|
||||
}
|
||||
|
||||
impl Runtime {
|
||||
#[allow(clippy::new_ret_no_self)]
|
||||
/// Returns a new runtime initialized with default configuration values.
|
||||
pub fn new() -> io::Result<Runtime> {
|
||||
Builder::new().build_rt()
|
||||
#[allow(clippy::new_ret_no_self)]
|
||||
pub fn new() -> io::Result<Self> {
|
||||
let rt = default_tokio_runtime()?;
|
||||
|
||||
Ok(Runtime {
|
||||
rt,
|
||||
local: LocalSet::new(),
|
||||
})
|
||||
}
|
||||
|
||||
pub(super) fn new2(
|
||||
reactor_handle: tokio_reactor::Handle,
|
||||
timer_handle: timer::Handle,
|
||||
clock: Clock,
|
||||
executor: CurrentThread<Timer<Reactor>>,
|
||||
) -> Runtime {
|
||||
Runtime {
|
||||
reactor_handle,
|
||||
timer_handle,
|
||||
clock,
|
||||
executor,
|
||||
}
|
||||
}
|
||||
|
||||
/// Spawn a future onto the single-threaded Tokio runtime.
|
||||
/// Offload a future onto the single-threaded runtime.
|
||||
///
|
||||
/// See [module level][mod] documentation for more details.
|
||||
/// The returned join handle can be used to await the future's result.
|
||||
///
|
||||
/// [mod]: index.html
|
||||
/// See [crate root][crate] documentation for more details.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```rust
|
||||
/// # use futures::{future, Future, Stream};
|
||||
/// use actix_rt::Runtime;
|
||||
///
|
||||
/// # fn dox() {
|
||||
/// // Create the runtime
|
||||
/// let mut rt = Runtime::new().unwrap();
|
||||
/// ```
|
||||
/// let rt = actix_rt::Runtime::new().unwrap();
|
||||
///
|
||||
/// // Spawn a future onto the runtime
|
||||
/// rt.spawn(future::lazy(|| {
|
||||
/// let handle = rt.spawn(async {
|
||||
/// println!("running on the runtime");
|
||||
/// Ok(())
|
||||
/// }));
|
||||
/// # }
|
||||
/// # pub fn main() {}
|
||||
/// 42
|
||||
/// });
|
||||
///
|
||||
/// assert_eq!(rt.block_on(handle).unwrap(), 42);
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if the spawn fails. Failure occurs if the executor
|
||||
/// is currently at capacity and is unable to spawn a new future.
|
||||
pub fn spawn<F>(&mut self, future: F) -> &mut Self
|
||||
/// This function panics if the spawn fails. Failure occurs if the executor is currently at
|
||||
/// capacity and is unable to spawn a new future.
|
||||
#[track_caller]
|
||||
pub fn spawn<F>(&self, future: F) -> JoinHandle<F::Output>
|
||||
where
|
||||
F: Future<Item = (), Error = ()> + 'static,
|
||||
F: Future + 'static,
|
||||
{
|
||||
self.executor.spawn(future);
|
||||
self
|
||||
self.local.spawn_local(future)
|
||||
}
|
||||
|
||||
/// Runs the provided future, blocking the current thread until the future
|
||||
/// completes.
|
||||
/// Retrieves a reference to the underlying Tokio runtime associated with this instance.
|
||||
///
|
||||
/// This function can be used to synchronously block the current thread
|
||||
/// until the provided `future` has resolved either successfully or with an
|
||||
/// error. The result of the future is then returned from this function
|
||||
/// call.
|
||||
/// The Tokio runtime is responsible for executing asynchronous tasks and managing
|
||||
/// the event loop for an asynchronous Rust program. This method allows accessing
|
||||
/// the runtime to interact with its features directly.
|
||||
///
|
||||
/// Note that this function will **also** execute any spawned futures on the
|
||||
/// current thread, but will **not** block until these other spawned futures
|
||||
/// have completed. Once the function returns, any uncompleted futures
|
||||
/// remain pending in the `Runtime` instance. These futures will not run
|
||||
/// In a typical use case, you might need to share the same runtime between different
|
||||
/// modules of your project. For example, a module might require a `tokio::runtime::Handle`
|
||||
/// to spawn tasks on the same runtime, or the runtime itself to configure more complex
|
||||
/// behaviours.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use actix_rt::Runtime;
|
||||
///
|
||||
/// mod module_a {
|
||||
/// pub fn do_something(handle: tokio::runtime::Handle) {
|
||||
/// handle.spawn(async {
|
||||
/// // Some asynchronous task here
|
||||
/// });
|
||||
/// }
|
||||
/// }
|
||||
///
|
||||
/// mod module_b {
|
||||
/// pub fn do_something_else(rt: &tokio::runtime::Runtime) {
|
||||
/// rt.spawn(async {
|
||||
/// // Another asynchronous task here
|
||||
/// });
|
||||
/// }
|
||||
/// }
|
||||
///
|
||||
/// let actix_runtime = actix_rt::Runtime::new().unwrap();
|
||||
/// let tokio_runtime = actix_runtime.tokio_runtime();
|
||||
///
|
||||
/// let handle = tokio_runtime.handle().clone();
|
||||
///
|
||||
/// module_a::do_something(handle);
|
||||
/// module_b::do_something_else(tokio_runtime);
|
||||
/// ```
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// An immutable reference to the `tokio::runtime::Runtime` instance associated with this
|
||||
/// `Runtime` instance.
|
||||
///
|
||||
/// # Note
|
||||
///
|
||||
/// While this method provides an immutable reference to the Tokio runtime, which is safe to share across threads,
|
||||
/// be aware that spawning blocking tasks on the Tokio runtime could potentially impact the execution
|
||||
/// of the Actix runtime. This is because Tokio is responsible for driving the Actix system,
|
||||
/// and blocking tasks could delay or deadlock other tasks in run loop.
|
||||
pub fn tokio_runtime(&self) -> &tokio::runtime::Runtime {
|
||||
&self.rt
|
||||
}
|
||||
|
||||
/// Runs the provided future, blocking the current thread until the future completes.
|
||||
///
|
||||
/// This function can be used to synchronously block the current thread until the provided
|
||||
/// `future` has resolved either successfully or with an error. The result of the future is
|
||||
/// then returned from this function call.
|
||||
///
|
||||
/// Note that this function will also execute any spawned futures on the current thread, but
|
||||
/// will not block until these other spawned futures have completed. Once the function returns,
|
||||
/// any uncompleted futures remain pending in the `Runtime` instance. These futures will not run
|
||||
/// until `block_on` or `run` is called again.
|
||||
///
|
||||
/// The caller is responsible for ensuring that other spawned futures
|
||||
/// complete execution by calling `block_on` or `run`.
|
||||
pub fn block_on<F>(&mut self, f: F) -> Result<F::Item, F::Error>
|
||||
/// The caller is responsible for ensuring that other spawned futures complete execution by
|
||||
/// calling `block_on` or `run`.
|
||||
#[track_caller]
|
||||
pub fn block_on<F>(&self, f: F) -> F::Output
|
||||
where
|
||||
F: Future,
|
||||
{
|
||||
self.enter(|executor| {
|
||||
// Run the provided future
|
||||
let ret = executor.block_on(f);
|
||||
ret.map_err(|e| e.into_inner().expect("unexpected execution error"))
|
||||
})
|
||||
}
|
||||
|
||||
/// Run the executor to completion, blocking the thread until **all**
|
||||
/// spawned futures have completed.
|
||||
pub fn run(&mut self) -> Result<(), RunError> {
|
||||
self.enter(|executor| executor.run())
|
||||
.map_err(|e| RunError { inner: e })
|
||||
}
|
||||
|
||||
fn enter<F, R>(&mut self, f: F) -> R
|
||||
where
|
||||
F: FnOnce(&mut current_thread::Entered<Timer<Reactor>>) -> R,
|
||||
{
|
||||
let Runtime {
|
||||
ref reactor_handle,
|
||||
ref timer_handle,
|
||||
ref clock,
|
||||
ref mut executor,
|
||||
..
|
||||
} = *self;
|
||||
|
||||
// Binds an executor to this thread
|
||||
let mut enter = tokio_executor::enter().expect("Multiple executors at once");
|
||||
|
||||
// This will set the default handle and timer to use inside the closure
|
||||
// and run the future.
|
||||
tokio_reactor::with_default(&reactor_handle, &mut enter, |enter| {
|
||||
clock::with_default(clock, enter, |enter| {
|
||||
timer::with_default(&timer_handle, enter, |enter| {
|
||||
// The TaskExecutor is a fake executor that looks into the
|
||||
// current single-threaded executor when used. This is a trick,
|
||||
// because we need two mutable references to the executor (one
|
||||
// to run the provided future, another to install as the default
|
||||
// one). We use the fake one here as the default one.
|
||||
let mut default_executor = current_thread::TaskExecutor::current();
|
||||
tokio_executor::with_default(&mut default_executor, enter, |enter| {
|
||||
let mut executor = executor.enter(enter);
|
||||
f(&mut executor)
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
self.local.block_on(&self.rt, f)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<tokio::runtime::Runtime> for Runtime {
|
||||
fn from(rt: tokio::runtime::Runtime) -> Self {
|
||||
Self {
|
||||
local: LocalSet::new(),
|
||||
rt,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,61 +1,119 @@
|
||||
use std::cell::RefCell;
|
||||
use std::io;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::{
|
||||
cell::RefCell,
|
||||
collections::HashMap,
|
||||
future::Future,
|
||||
io,
|
||||
pin::Pin,
|
||||
sync::atomic::{AtomicUsize, Ordering},
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use futures::sync::mpsc::UnboundedSender;
|
||||
use futures_core::ready;
|
||||
use tokio::sync::{mpsc, oneshot};
|
||||
|
||||
use crate::arbiter::{Arbiter, SystemCommand};
|
||||
use crate::builder::{Builder, SystemRunner};
|
||||
use crate::{arbiter::ArbiterHandle, Arbiter};
|
||||
|
||||
static SYSTEM_COUNT: AtomicUsize = AtomicUsize::new(0);
|
||||
|
||||
/// System is a runtime manager.
|
||||
thread_local!(
|
||||
static CURRENT: RefCell<Option<System>> = const { RefCell::new(None) };
|
||||
);
|
||||
|
||||
/// A manager for a per-thread distributed async runtime.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct System {
|
||||
id: usize,
|
||||
sys: UnboundedSender<SystemCommand>,
|
||||
arbiter: Arbiter,
|
||||
stop_on_panic: bool,
|
||||
sys_tx: mpsc::UnboundedSender<SystemCommand>,
|
||||
|
||||
/// Handle to the first [Arbiter] that is created with the System.
|
||||
arbiter_handle: ArbiterHandle,
|
||||
}
|
||||
|
||||
thread_local!(
|
||||
static CURRENT: RefCell<Option<System>> = RefCell::new(None);
|
||||
);
|
||||
#[cfg(not(feature = "io-uring"))]
|
||||
impl System {
|
||||
/// Create a new system.
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics if underlying Tokio runtime can not be created.
|
||||
#[allow(clippy::new_ret_no_self)]
|
||||
pub fn new() -> SystemRunner {
|
||||
Self::with_tokio_rt(|| {
|
||||
crate::runtime::default_tokio_runtime()
|
||||
.expect("Default Actix (Tokio) runtime could not be created.")
|
||||
})
|
||||
}
|
||||
|
||||
/// Create a new System using the [Tokio Runtime](tokio-runtime) returned from a closure.
|
||||
///
|
||||
/// [tokio-runtime]: tokio::runtime::Runtime
|
||||
pub fn with_tokio_rt<F>(runtime_factory: F) -> SystemRunner
|
||||
where
|
||||
F: FnOnce() -> tokio::runtime::Runtime,
|
||||
{
|
||||
let (stop_tx, stop_rx) = oneshot::channel();
|
||||
let (sys_tx, sys_rx) = mpsc::unbounded_channel();
|
||||
|
||||
let rt = crate::runtime::Runtime::from(runtime_factory());
|
||||
let sys_arbiter = rt.block_on(async { Arbiter::in_new_system() });
|
||||
let system = System::construct(sys_tx, sys_arbiter.clone());
|
||||
|
||||
system
|
||||
.tx()
|
||||
.send(SystemCommand::RegisterArbiter(usize::MAX, sys_arbiter))
|
||||
.unwrap();
|
||||
|
||||
// init background system arbiter
|
||||
let sys_ctrl = SystemController::new(sys_rx, stop_tx);
|
||||
rt.spawn(sys_ctrl);
|
||||
|
||||
SystemRunner { rt, stop_rx }
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "io-uring")]
|
||||
impl System {
|
||||
/// Create a new system.
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics if underlying Tokio runtime can not be created.
|
||||
#[allow(clippy::new_ret_no_self)]
|
||||
pub fn new() -> SystemRunner {
|
||||
SystemRunner
|
||||
}
|
||||
|
||||
/// Create a new System using the [Tokio Runtime](tokio-runtime) returned from a closure.
|
||||
///
|
||||
/// [tokio-runtime]: tokio::runtime::Runtime
|
||||
#[doc(hidden)]
|
||||
pub fn with_tokio_rt<F>(_: F) -> SystemRunner
|
||||
where
|
||||
F: FnOnce() -> tokio::runtime::Runtime,
|
||||
{
|
||||
unimplemented!("System::with_tokio_rt is not implemented for io-uring feature yet")
|
||||
}
|
||||
}
|
||||
|
||||
impl System {
|
||||
/// Constructs new system and sets it as current
|
||||
/// Constructs new system and registers it on the current thread.
|
||||
pub(crate) fn construct(
|
||||
sys: UnboundedSender<SystemCommand>,
|
||||
arbiter: Arbiter,
|
||||
stop_on_panic: bool,
|
||||
sys_tx: mpsc::UnboundedSender<SystemCommand>,
|
||||
arbiter_handle: ArbiterHandle,
|
||||
) -> Self {
|
||||
let sys = System {
|
||||
sys,
|
||||
arbiter,
|
||||
stop_on_panic,
|
||||
sys_tx,
|
||||
arbiter_handle,
|
||||
id: SYSTEM_COUNT.fetch_add(1, Ordering::SeqCst),
|
||||
};
|
||||
|
||||
System::set_current(sys.clone());
|
||||
|
||||
sys
|
||||
}
|
||||
|
||||
/// Build a new system with a customized tokio runtime.
|
||||
///
|
||||
/// This allows to customize the runtime. See struct level docs on
|
||||
/// `Builder` for more information.
|
||||
pub fn builder() -> Builder {
|
||||
Builder::new()
|
||||
}
|
||||
|
||||
#[allow(clippy::new_ret_no_self)]
|
||||
/// Create new system.
|
||||
///
|
||||
/// This method panics if it can not create tokio runtime
|
||||
pub fn new<T: Into<String>>(name: T) -> SystemRunner {
|
||||
Self::builder().name(name).build()
|
||||
}
|
||||
|
||||
/// Get current running system.
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics if no system is registered on the current thread.
|
||||
pub fn current() -> System {
|
||||
CURRENT.with(|cell| match *cell.borrow() {
|
||||
Some(ref sys) => sys.clone(),
|
||||
@ -63,67 +121,237 @@ impl System {
|
||||
})
|
||||
}
|
||||
|
||||
/// Set current running system.
|
||||
pub(crate) fn is_set() -> bool {
|
||||
CURRENT.with(|cell| cell.borrow().is_some())
|
||||
/// Try to get current running system.
|
||||
///
|
||||
/// Returns `None` if no System has been started.
|
||||
///
|
||||
/// Unlike [`current`](Self::current), this never panics.
|
||||
pub fn try_current() -> Option<System> {
|
||||
CURRENT.with(|cell| cell.borrow().clone())
|
||||
}
|
||||
|
||||
/// Set current running system.
|
||||
/// Get handle to a the System's initial [Arbiter].
|
||||
pub fn arbiter(&self) -> &ArbiterHandle {
|
||||
&self.arbiter_handle
|
||||
}
|
||||
|
||||
/// Check if there is a System registered on the current thread.
|
||||
pub fn is_registered() -> bool {
|
||||
CURRENT.with(|sys| sys.borrow().is_some())
|
||||
}
|
||||
|
||||
/// Register given system on current thread.
|
||||
#[doc(hidden)]
|
||||
pub fn set_current(sys: System) {
|
||||
CURRENT.with(|s| {
|
||||
*s.borrow_mut() = Some(sys);
|
||||
CURRENT.with(|cell| {
|
||||
*cell.borrow_mut() = Some(sys);
|
||||
})
|
||||
}
|
||||
|
||||
/// Execute function with system reference.
|
||||
pub fn with_current<F, R>(f: F) -> R
|
||||
where
|
||||
F: FnOnce(&System) -> R,
|
||||
{
|
||||
CURRENT.with(|cell| match *cell.borrow() {
|
||||
Some(ref sys) => f(sys),
|
||||
None => panic!("System is not running"),
|
||||
})
|
||||
}
|
||||
|
||||
/// System id
|
||||
/// Numeric system identifier.
|
||||
///
|
||||
/// Useful when using multiple Systems.
|
||||
pub fn id(&self) -> usize {
|
||||
self.id
|
||||
}
|
||||
|
||||
/// Stop the system
|
||||
/// Stop the system (with code 0).
|
||||
pub fn stop(&self) {
|
||||
self.stop_with_code(0)
|
||||
}
|
||||
|
||||
/// Stop the system with a particular exit code.
|
||||
/// Stop the system with a given exit code.
|
||||
pub fn stop_with_code(&self, code: i32) {
|
||||
let _ = self.sys.unbounded_send(SystemCommand::Exit(code));
|
||||
let _ = self.sys_tx.send(SystemCommand::Exit(code));
|
||||
}
|
||||
|
||||
pub(crate) fn sys(&self) -> &UnboundedSender<SystemCommand> {
|
||||
&self.sys
|
||||
}
|
||||
|
||||
/// Return status of 'stop_on_panic' option which controls whether the System is stopped when an
|
||||
/// uncaught panic is thrown from a worker thread.
|
||||
pub fn stop_on_panic(&self) -> bool {
|
||||
self.stop_on_panic
|
||||
}
|
||||
|
||||
/// System arbiter
|
||||
pub fn arbiter(&self) -> &Arbiter {
|
||||
&self.arbiter
|
||||
}
|
||||
|
||||
/// This function will start tokio runtime and will finish once the
|
||||
/// `System::stop()` message get called.
|
||||
/// Function `f` get called within tokio runtime context.
|
||||
pub fn run<F>(f: F) -> io::Result<()>
|
||||
where
|
||||
F: FnOnce() + 'static,
|
||||
{
|
||||
Self::builder().run(f)
|
||||
pub(crate) fn tx(&self) -> &mpsc::UnboundedSender<SystemCommand> {
|
||||
&self.sys_tx
|
||||
}
|
||||
}
|
||||
|
||||
/// Runner that keeps a [System]'s event loop alive until stop message is received.
|
||||
#[cfg(not(feature = "io-uring"))]
|
||||
#[must_use = "A SystemRunner does nothing unless `run` is called."]
|
||||
#[derive(Debug)]
|
||||
pub struct SystemRunner {
|
||||
rt: crate::runtime::Runtime,
|
||||
stop_rx: oneshot::Receiver<i32>,
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "io-uring"))]
|
||||
impl SystemRunner {
|
||||
/// Starts event loop and will return once [System] is [stopped](System::stop).
|
||||
pub fn run(self) -> io::Result<()> {
|
||||
let exit_code = self.run_with_code()?;
|
||||
|
||||
match exit_code {
|
||||
0 => Ok(()),
|
||||
nonzero => Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("Non-zero exit code: {}", nonzero),
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Runs the event loop until [stopped](System::stop_with_code), returning the exit code.
|
||||
pub fn run_with_code(self) -> io::Result<i32> {
|
||||
let SystemRunner { rt, stop_rx, .. } = self;
|
||||
|
||||
// run loop
|
||||
rt.block_on(stop_rx)
|
||||
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))
|
||||
}
|
||||
|
||||
/// Retrieves a reference to the underlying [Actix runtime](crate::Runtime) associated with this
|
||||
/// `SystemRunner` instance.
|
||||
///
|
||||
/// The Actix runtime is responsible for managing the event loop for an Actix system and
|
||||
/// executing asynchronous tasks. This method provides access to the runtime, allowing direct
|
||||
/// interaction with its features.
|
||||
///
|
||||
/// In a typical use case, you might need to share the same runtime between different
|
||||
/// parts of your project. For example, some components might require a [`Runtime`] to spawn
|
||||
/// tasks on the same runtime.
|
||||
///
|
||||
/// Read more in the documentation for [`Runtime`].
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// let system_runner = actix_rt::System::new();
|
||||
/// let actix_runtime = system_runner.runtime();
|
||||
///
|
||||
/// // Use the runtime to spawn an async task or perform other operations
|
||||
/// ```
|
||||
///
|
||||
/// # Note
|
||||
///
|
||||
/// While this method provides an immutable reference to the Actix runtime, which is safe to
|
||||
/// share across threads, be aware that spawning blocking tasks on the Actix runtime could
|
||||
/// potentially impact system performance. This is because the Actix runtime is responsible for
|
||||
/// driving the system, and blocking tasks could delay other tasks in the run loop.
|
||||
///
|
||||
/// [`Runtime`]: crate::Runtime
|
||||
pub fn runtime(&self) -> &crate::runtime::Runtime {
|
||||
&self.rt
|
||||
}
|
||||
|
||||
/// Runs the provided future, blocking the current thread until the future completes.
|
||||
#[track_caller]
|
||||
#[inline]
|
||||
pub fn block_on<F: Future>(&self, fut: F) -> F::Output {
|
||||
self.rt.block_on(fut)
|
||||
}
|
||||
}
|
||||
|
||||
/// Runner that keeps a [System]'s event loop alive until stop message is received.
|
||||
#[cfg(feature = "io-uring")]
|
||||
#[must_use = "A SystemRunner does nothing unless `run` is called."]
|
||||
#[derive(Debug)]
|
||||
pub struct SystemRunner;
|
||||
|
||||
#[cfg(feature = "io-uring")]
|
||||
impl SystemRunner {
|
||||
/// Starts event loop and will return once [System] is [stopped](System::stop).
|
||||
pub fn run(self) -> io::Result<()> {
|
||||
unimplemented!("SystemRunner::run is not implemented for io-uring feature yet");
|
||||
}
|
||||
|
||||
/// Runs the event loop until [stopped](System::stop_with_code), returning the exit code.
|
||||
pub fn run_with_code(self) -> io::Result<i32> {
|
||||
unimplemented!("SystemRunner::run_with_code is not implemented for io-uring feature yet");
|
||||
}
|
||||
|
||||
/// Runs the provided future, blocking the current thread until the future completes.
|
||||
#[inline]
|
||||
pub fn block_on<F: Future>(&self, fut: F) -> F::Output {
|
||||
tokio_uring::start(async move {
|
||||
let (stop_tx, stop_rx) = oneshot::channel();
|
||||
let (sys_tx, sys_rx) = mpsc::unbounded_channel();
|
||||
|
||||
let sys_arbiter = Arbiter::in_new_system();
|
||||
let system = System::construct(sys_tx, sys_arbiter.clone());
|
||||
|
||||
system
|
||||
.tx()
|
||||
.send(SystemCommand::RegisterArbiter(usize::MAX, sys_arbiter))
|
||||
.unwrap();
|
||||
|
||||
// init background system arbiter
|
||||
let sys_ctrl = SystemController::new(sys_rx, stop_tx);
|
||||
tokio_uring::spawn(sys_ctrl);
|
||||
|
||||
let res = fut.await;
|
||||
drop(stop_rx);
|
||||
res
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) enum SystemCommand {
|
||||
Exit(i32),
|
||||
RegisterArbiter(usize, ArbiterHandle),
|
||||
DeregisterArbiter(usize),
|
||||
}
|
||||
|
||||
/// There is one `SystemController` per [System]. It runs in the background, keeping track of
|
||||
/// [Arbiter]s and is able to distribute a system-wide stop command.
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct SystemController {
|
||||
stop_tx: Option<oneshot::Sender<i32>>,
|
||||
cmd_rx: mpsc::UnboundedReceiver<SystemCommand>,
|
||||
arbiters: HashMap<usize, ArbiterHandle>,
|
||||
}
|
||||
|
||||
impl SystemController {
|
||||
pub(crate) fn new(
|
||||
cmd_rx: mpsc::UnboundedReceiver<SystemCommand>,
|
||||
stop_tx: oneshot::Sender<i32>,
|
||||
) -> Self {
|
||||
SystemController {
|
||||
cmd_rx,
|
||||
stop_tx: Some(stop_tx),
|
||||
arbiters: HashMap::with_capacity(4),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Future for SystemController {
|
||||
type Output = ();
|
||||
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
// process all items currently buffered in channel
|
||||
loop {
|
||||
match ready!(self.cmd_rx.poll_recv(cx)) {
|
||||
// channel closed; no more messages can be received
|
||||
None => return Poll::Ready(()),
|
||||
|
||||
// process system command
|
||||
Some(cmd) => match cmd {
|
||||
SystemCommand::Exit(code) => {
|
||||
// stop all arbiters
|
||||
for arb in self.arbiters.values() {
|
||||
arb.stop();
|
||||
}
|
||||
|
||||
// stop event loop
|
||||
// will only fire once
|
||||
if let Some(stop_tx) = self.stop_tx.take() {
|
||||
let _ = stop_tx.send(code);
|
||||
}
|
||||
}
|
||||
|
||||
SystemCommand::RegisterArbiter(id, arb) => {
|
||||
self.arbiters.insert(id, arb);
|
||||
}
|
||||
|
||||
SystemCommand::DeregisterArbiter(id) => {
|
||||
self.arbiters.remove(&id);
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
17
actix-rt/tests/test-macro-import-conflict.rs
Normal file
17
actix-rt/tests/test-macro-import-conflict.rs
Normal file
@ -0,0 +1,17 @@
|
||||
//! Checks that test macro does not cause problems in the presence of imports named "test" that
|
||||
//! could be either a module with test items or the "test with runtime" macro itself.
|
||||
//!
|
||||
//! Before actix/actix-net#399 was implemented, this macro was running twice. The first run output
|
||||
//! `#[test]` and it got run again and since it was in scope.
|
||||
//!
|
||||
//! Prevented by using the fully-qualified test marker (`#[::core::prelude::v1::test]`).
|
||||
|
||||
#![cfg(feature = "macros")]
|
||||
|
||||
use actix_rt::time as test;
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_naming_conflict() {
|
||||
use test as time;
|
||||
time::sleep(std::time::Duration::from_millis(2)).await;
|
||||
}
|
378
actix-rt/tests/tests.rs
Normal file
378
actix-rt/tests/tests.rs
Normal file
@ -0,0 +1,378 @@
|
||||
#![allow(missing_docs)]
|
||||
|
||||
use std::{
|
||||
future::Future,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
use actix_rt::{task::JoinError, Arbiter, System};
|
||||
#[cfg(not(feature = "io-uring"))]
|
||||
use {
|
||||
std::{sync::mpsc::channel, thread},
|
||||
tokio::sync::oneshot,
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn await_for_timer() {
|
||||
let time = Duration::from_secs(1);
|
||||
let instant = Instant::now();
|
||||
System::new().block_on(async move {
|
||||
tokio::time::sleep(time).await;
|
||||
});
|
||||
assert!(
|
||||
instant.elapsed() >= time,
|
||||
"Block on should poll awaited future to completion"
|
||||
);
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "io-uring"))]
|
||||
#[test]
|
||||
fn run_with_code() {
|
||||
let sys = System::new();
|
||||
System::current().stop_with_code(42);
|
||||
let exit_code = sys.run_with_code().expect("system stop should not error");
|
||||
assert_eq!(exit_code, 42);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn join_another_arbiter() {
|
||||
let time = Duration::from_secs(1);
|
||||
let instant = Instant::now();
|
||||
System::new().block_on(async move {
|
||||
let arbiter = Arbiter::new();
|
||||
arbiter.spawn(Box::pin(async move {
|
||||
tokio::time::sleep(time).await;
|
||||
Arbiter::current().stop();
|
||||
}));
|
||||
arbiter.join().unwrap();
|
||||
});
|
||||
assert!(
|
||||
instant.elapsed() >= time,
|
||||
"Join on another arbiter should complete only when it calls stop"
|
||||
);
|
||||
|
||||
let instant = Instant::now();
|
||||
System::new().block_on(async move {
|
||||
let arbiter = Arbiter::new();
|
||||
arbiter.spawn_fn(move || {
|
||||
actix_rt::spawn(async move {
|
||||
tokio::time::sleep(time).await;
|
||||
Arbiter::current().stop();
|
||||
});
|
||||
});
|
||||
arbiter.join().unwrap();
|
||||
});
|
||||
assert!(
|
||||
instant.elapsed() >= time,
|
||||
"Join on an arbiter that has used actix_rt::spawn should wait for said future"
|
||||
);
|
||||
|
||||
let instant = Instant::now();
|
||||
System::new().block_on(async move {
|
||||
let arbiter = Arbiter::new();
|
||||
arbiter.spawn(Box::pin(async move {
|
||||
tokio::time::sleep(time).await;
|
||||
Arbiter::current().stop();
|
||||
}));
|
||||
arbiter.stop();
|
||||
arbiter.join().unwrap();
|
||||
});
|
||||
assert!(
|
||||
instant.elapsed() < time,
|
||||
"Premature stop of arbiter should conclude regardless of it's current state"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn non_static_block_on() {
|
||||
let string = String::from("test_str");
|
||||
let string = string.as_str();
|
||||
|
||||
let sys = System::new();
|
||||
|
||||
sys.block_on(async {
|
||||
actix_rt::time::sleep(Duration::from_millis(1)).await;
|
||||
assert_eq!("test_str", string);
|
||||
});
|
||||
|
||||
let rt = actix_rt::Runtime::new().unwrap();
|
||||
|
||||
rt.block_on(async {
|
||||
actix_rt::time::sleep(Duration::from_millis(1)).await;
|
||||
assert_eq!("test_str", string);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn wait_for_spawns() {
|
||||
let rt = actix_rt::Runtime::new().unwrap();
|
||||
|
||||
let handle = rt.spawn(async {
|
||||
println!("running on the runtime");
|
||||
// panic is caught at task boundary
|
||||
panic!("intentional test panic");
|
||||
});
|
||||
|
||||
assert!(rt.block_on(handle).is_err());
|
||||
}
|
||||
|
||||
// Temporary disabled tests for io-uring feature.
|
||||
// They should be enabled when possible.
|
||||
|
||||
#[cfg(not(feature = "io-uring"))]
|
||||
#[test]
|
||||
fn arbiter_spawn_fn_runs() {
|
||||
let _ = System::new();
|
||||
|
||||
let (tx, rx) = channel::<u32>();
|
||||
|
||||
let arbiter = Arbiter::new();
|
||||
arbiter.spawn_fn(move || tx.send(42).unwrap());
|
||||
|
||||
let num = rx.recv().unwrap();
|
||||
assert_eq!(num, 42);
|
||||
|
||||
arbiter.stop();
|
||||
arbiter.join().unwrap();
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "io-uring"))]
|
||||
#[test]
|
||||
fn arbiter_handle_spawn_fn_runs() {
|
||||
let sys = System::new();
|
||||
|
||||
let (tx, rx) = channel::<u32>();
|
||||
|
||||
let arbiter = Arbiter::new();
|
||||
let handle = arbiter.handle();
|
||||
drop(arbiter);
|
||||
|
||||
handle.spawn_fn(move || {
|
||||
tx.send(42).unwrap();
|
||||
System::current().stop()
|
||||
});
|
||||
|
||||
let num = rx.recv_timeout(Duration::from_secs(2)).unwrap();
|
||||
assert_eq!(num, 42);
|
||||
|
||||
handle.stop();
|
||||
sys.run().unwrap();
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "io-uring"))]
|
||||
#[test]
|
||||
fn arbiter_drop_no_panic_fn() {
|
||||
let _ = System::new();
|
||||
|
||||
let arbiter = Arbiter::new();
|
||||
arbiter.spawn_fn(|| panic!("test"));
|
||||
|
||||
arbiter.stop();
|
||||
arbiter.join().unwrap();
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "io-uring"))]
|
||||
#[test]
|
||||
fn arbiter_drop_no_panic_fut() {
|
||||
let _ = System::new();
|
||||
|
||||
let arbiter = Arbiter::new();
|
||||
arbiter.spawn(async { panic!("test") });
|
||||
|
||||
arbiter.stop();
|
||||
arbiter.join().unwrap();
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "io-uring"))]
|
||||
#[test]
|
||||
fn system_arbiter_spawn() {
|
||||
let runner = System::new();
|
||||
|
||||
let (tx, rx) = oneshot::channel();
|
||||
let sys = System::current();
|
||||
|
||||
thread::spawn(|| {
|
||||
// this thread will have no arbiter in it's thread local so call will panic
|
||||
Arbiter::current();
|
||||
})
|
||||
.join()
|
||||
.unwrap_err();
|
||||
|
||||
let thread = thread::spawn(|| {
|
||||
// this thread will have no arbiter in it's thread local so use the system handle instead
|
||||
System::set_current(sys);
|
||||
let sys = System::current();
|
||||
|
||||
let arb = sys.arbiter();
|
||||
arb.spawn(async move {
|
||||
tx.send(42u32).unwrap();
|
||||
System::current().stop();
|
||||
});
|
||||
});
|
||||
|
||||
assert_eq!(runner.block_on(rx).unwrap(), 42);
|
||||
thread.join().unwrap();
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "io-uring"))]
|
||||
#[test]
|
||||
fn system_stop_stops_arbiters() {
|
||||
let sys = System::new();
|
||||
let arb = Arbiter::new();
|
||||
|
||||
// arbiter should be alive to receive spawn msg
|
||||
assert!(Arbiter::current().spawn_fn(|| {}));
|
||||
assert!(arb.spawn_fn(|| {}));
|
||||
|
||||
System::current().stop();
|
||||
sys.run().unwrap();
|
||||
|
||||
// account for slightly slow thread de-spawns
|
||||
thread::sleep(Duration::from_millis(500));
|
||||
|
||||
// arbiter should be dead and return false
|
||||
assert!(!Arbiter::current().spawn_fn(|| {}));
|
||||
assert!(!arb.spawn_fn(|| {}));
|
||||
|
||||
arb.join().unwrap();
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "io-uring"))]
|
||||
#[test]
|
||||
fn new_system_with_tokio() {
|
||||
let (tx, rx) = channel();
|
||||
|
||||
let res = System::with_tokio_rt(move || {
|
||||
tokio::runtime::Builder::new_multi_thread()
|
||||
.enable_io()
|
||||
.enable_time()
|
||||
.thread_keep_alive(Duration::from_millis(1000))
|
||||
.worker_threads(2)
|
||||
.max_blocking_threads(2)
|
||||
.on_thread_start(|| {})
|
||||
.on_thread_stop(|| {})
|
||||
.build()
|
||||
.unwrap()
|
||||
})
|
||||
.block_on(async {
|
||||
actix_rt::time::sleep(Duration::from_millis(1)).await;
|
||||
|
||||
tokio::task::spawn(async move {
|
||||
tx.send(42).unwrap();
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
123usize
|
||||
});
|
||||
|
||||
assert_eq!(res, 123);
|
||||
assert_eq!(rx.recv().unwrap(), 42);
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "io-uring"))]
|
||||
#[test]
|
||||
fn new_arbiter_with_tokio() {
|
||||
use std::sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Arc,
|
||||
};
|
||||
|
||||
let _ = System::new();
|
||||
|
||||
let arb = Arbiter::with_tokio_rt(|| {
|
||||
tokio::runtime::Builder::new_current_thread()
|
||||
.enable_all()
|
||||
.build()
|
||||
.unwrap()
|
||||
});
|
||||
|
||||
let counter = Arc::new(AtomicBool::new(true));
|
||||
|
||||
let counter1 = counter.clone();
|
||||
let did_spawn = arb.spawn(async move {
|
||||
actix_rt::time::sleep(Duration::from_millis(1)).await;
|
||||
counter1.store(false, Ordering::SeqCst);
|
||||
Arbiter::current().stop();
|
||||
});
|
||||
|
||||
assert!(did_spawn);
|
||||
|
||||
arb.join().unwrap();
|
||||
|
||||
assert!(!counter.load(Ordering::SeqCst));
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn no_system_current_panic() {
|
||||
System::current();
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn no_system_arbiter_new_panic() {
|
||||
Arbiter::new();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn try_current_no_system() {
|
||||
assert!(System::try_current().is_none())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn try_current_with_system() {
|
||||
System::new().block_on(async { assert!(System::try_current().is_some()) });
|
||||
}
|
||||
|
||||
#[allow(clippy::unit_cmp)]
|
||||
#[test]
|
||||
fn spawn_local() {
|
||||
System::new().block_on(async {
|
||||
// demonstrate that spawn -> R is strictly more capable than spawn -> ()
|
||||
|
||||
assert_eq!(actix_rt::spawn(async {}).await.unwrap(), ());
|
||||
assert_eq!(actix_rt::spawn(async { 1 }).await.unwrap(), 1);
|
||||
assert!(actix_rt::spawn(async { panic!("") }).await.is_err());
|
||||
|
||||
actix_rt::spawn(async { tokio::time::sleep(Duration::from_millis(50)).await })
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
fn g<F: Future<Output = Result<(), JoinError>>>(_f: F) {}
|
||||
g(actix_rt::spawn(async {}));
|
||||
// g(actix_rt::spawn(async { 1 })); // compile err
|
||||
|
||||
fn h<F: Future<Output = Result<R, JoinError>>, R>(_f: F) {}
|
||||
h(actix_rt::spawn(async {}));
|
||||
h(actix_rt::spawn(async { 1 }));
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(all(target_os = "linux", feature = "io-uring"))]
|
||||
#[test]
|
||||
fn tokio_uring_arbiter() {
|
||||
System::new().block_on(async {
|
||||
let (tx, rx) = std::sync::mpsc::channel();
|
||||
|
||||
Arbiter::new().spawn(async move {
|
||||
let handle = actix_rt::spawn(async move {
|
||||
let f = tokio_uring::fs::File::create("test.txt").await.unwrap();
|
||||
let buf = b"Hello World!";
|
||||
|
||||
let (res, _) = f.write_all_at(&buf[..], 0).await;
|
||||
assert!(res.is_ok());
|
||||
|
||||
f.sync_all().await.unwrap();
|
||||
f.close().await.unwrap();
|
||||
|
||||
std::fs::remove_file("test.txt").unwrap();
|
||||
});
|
||||
|
||||
handle.await.unwrap();
|
||||
tx.send(true).unwrap();
|
||||
});
|
||||
|
||||
assert!(rx.recv().unwrap());
|
||||
})
|
||||
}
|
@ -1,17 +0,0 @@
|
||||
[package]
|
||||
name = "actix-server-config"
|
||||
version = "0.1.0"
|
||||
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
|
||||
description = "Actix server config utils"
|
||||
homepage = "https://actix.rs"
|
||||
repository = "https://github.com/actix/actix-net.git"
|
||||
license = "MIT/Apache-2.0"
|
||||
edition = "2018"
|
||||
workspace = ".."
|
||||
|
||||
[lib]
|
||||
name = "actix_server_config"
|
||||
path = "src/lib.rs"
|
||||
|
||||
[dependencies]
|
||||
futures = "0.1.25"
|
@ -1,132 +0,0 @@
|
||||
use std::cell::Cell;
|
||||
use std::fmt;
|
||||
use std::net::SocketAddr;
|
||||
use std::rc::Rc;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ServerConfig {
|
||||
addr: SocketAddr,
|
||||
secure: Rc<Cell<bool>>,
|
||||
}
|
||||
|
||||
impl ServerConfig {
|
||||
pub fn new(addr: SocketAddr) -> Self {
|
||||
ServerConfig {
|
||||
addr,
|
||||
secure: Rc::new(Cell::new(false)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the address of the local half of this TCP server socket
|
||||
pub fn local_addr(&self) -> SocketAddr {
|
||||
self.addr
|
||||
}
|
||||
|
||||
/// Returns true if connection is secure (tls enabled)
|
||||
pub fn secure(&self) -> bool {
|
||||
self.secure.as_ref().get()
|
||||
}
|
||||
|
||||
/// Set secure flag
|
||||
pub fn set_secure(&self) {
|
||||
self.secure.as_ref().set(true)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
|
||||
pub enum Protocol {
|
||||
Unknown,
|
||||
Http10,
|
||||
Http11,
|
||||
Http2,
|
||||
Proto1,
|
||||
Proto2,
|
||||
Proto3,
|
||||
Proto4,
|
||||
Proto5,
|
||||
Proto6,
|
||||
}
|
||||
|
||||
pub struct Io<T, P = ()> {
|
||||
io: T,
|
||||
proto: Protocol,
|
||||
params: P,
|
||||
}
|
||||
|
||||
impl<T> Io<T, ()> {
|
||||
pub fn new(io: T) -> Self {
|
||||
Self {
|
||||
io,
|
||||
proto: Protocol::Unknown,
|
||||
params: (),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, P> Io<T, P> {
|
||||
/// Reconstruct from a parts.
|
||||
pub fn from_parts(io: T, params: P, proto: Protocol) -> Self {
|
||||
Self { io, params, proto }
|
||||
}
|
||||
|
||||
/// Deconstruct into a parts.
|
||||
pub fn into_parts(self) -> (T, P, Protocol) {
|
||||
(self.io, self.params, self.proto)
|
||||
}
|
||||
|
||||
/// Returns a shared reference to the underlying stream.
|
||||
pub fn get_ref(&self) -> &T {
|
||||
&self.io
|
||||
}
|
||||
|
||||
/// Returns a mutable reference to the underlying stream.
|
||||
pub fn get_mut(&mut self) -> &mut T {
|
||||
&mut self.io
|
||||
}
|
||||
|
||||
/// Get selected protocol
|
||||
pub fn protocol(&self) -> Protocol {
|
||||
self.proto
|
||||
}
|
||||
|
||||
/// Return new Io object with new parameter.
|
||||
pub fn set<U>(self, params: U) -> Io<T, U> {
|
||||
Io {
|
||||
io: self.io,
|
||||
proto: self.proto,
|
||||
params: params,
|
||||
}
|
||||
}
|
||||
|
||||
/// Maps an Io<_, P> to Io<_, U> by applying a function to a contained value.
|
||||
pub fn map<U, F>(self, op: F) -> Io<T, U>
|
||||
where
|
||||
F: FnOnce(P) -> U,
|
||||
{
|
||||
Io {
|
||||
io: self.io,
|
||||
proto: self.proto,
|
||||
params: op(self.params),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, P> std::ops::Deref for Io<T, P> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &T {
|
||||
&self.io
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, P> std::ops::DerefMut for Io<T, P> {
|
||||
fn deref_mut(&mut self) -> &mut T {
|
||||
&mut self.io
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: fmt::Debug, P> fmt::Debug for Io<T, P> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "Io {{{:?}}}", self.io)
|
||||
}
|
||||
}
|
@ -1,86 +1,232 @@
|
||||
# Changes
|
||||
|
||||
## [0.4.1] - 2019-03-14
|
||||
## Unreleased
|
||||
|
||||
### Added
|
||||
## 2.5.1
|
||||
|
||||
* `SystemRuntime::on_start()` - allow to run future before server service initialization
|
||||
- Fix panic in test server.
|
||||
- Minimum supported Rust version (MSRV) is now 1.71.
|
||||
|
||||
## 2.5.0
|
||||
|
||||
## [0.4.0] - 2019-03-12
|
||||
- Update `mio` dependency to `1`.
|
||||
|
||||
### Changed
|
||||
## 2.4.0
|
||||
|
||||
* Use `ServerConfig` for service factory
|
||||
- Update `tokio-uring` dependency to `0.5`.
|
||||
- Minimum supported Rust version (MSRV) is now 1.70.
|
||||
|
||||
* Wrap tcp socket to `Io` type
|
||||
## 2.3.0
|
||||
|
||||
* Upgrade actix-service
|
||||
- Add support for MultiPath TCP (MPTCP) with `MpTcp` enum and `ServerBuilder::mptcp()` method.
|
||||
- Minimum supported Rust version (MSRV) is now 1.65.
|
||||
|
||||
## 2.2.0
|
||||
|
||||
## [0.3.1] - 2019-03-04
|
||||
- Minimum supported Rust version (MSRV) is now 1.59.
|
||||
- Update `tokio-uring` dependency to `0.4`.
|
||||
|
||||
### Added
|
||||
## 2.1.1
|
||||
|
||||
* Add `ServerBuilder::maxconnrate` sets the maximum per-worker number of concurrent connections
|
||||
- No significant changes since `2.1.0`.
|
||||
|
||||
* Add helper ssl error `SslError`
|
||||
## 2.1.0
|
||||
|
||||
- Update `tokio-uring` dependency to `0.3`.
|
||||
- Logs emitted now use the `tracing` crate with `log` compatibility.
|
||||
- Wait for accept thread to stop before sending completion signal.
|
||||
|
||||
### Changed
|
||||
## 2.0.0
|
||||
|
||||
* Rename `StreamServiceFactory` to `ServiceFactory`
|
||||
- No significant changes since `2.0.0-rc.4`.
|
||||
|
||||
* Deprecate `StreamServiceFactory`
|
||||
## 2.0.0-rc.4
|
||||
|
||||
- Update `tokio-uring` dependency to `0.2`.
|
||||
|
||||
## [0.3.0] - 2019-03-02
|
||||
## 2.0.0-rc.3
|
||||
|
||||
### Changed
|
||||
- No significant changes since `2.0.0-rc.2`.
|
||||
|
||||
* Use new `NewService` trait
|
||||
## 2.0.0-rc.2
|
||||
|
||||
- Simplify `TestServer`.
|
||||
|
||||
## [0.2.1] - 2019-02-09
|
||||
## 2.0.0-rc.1
|
||||
|
||||
### Changed
|
||||
- Hide implementation details of `Server`.
|
||||
- `Server` now runs only after awaiting it.
|
||||
|
||||
* Drop service response
|
||||
## 2.0.0-beta.9
|
||||
|
||||
- Restore `Arbiter` support lost in `beta.8`.
|
||||
|
||||
## [0.2.0] - 2019-02-01
|
||||
## 2.0.0-beta.8
|
||||
|
||||
### Changed
|
||||
- Fix non-unix signal handler.
|
||||
|
||||
* Migrate to actix-service 0.2
|
||||
## 2.0.0-beta.7
|
||||
|
||||
* Updated rustls dependency
|
||||
- Server can be started in regular Tokio runtime.
|
||||
- Expose new `Server` type whose `Future` impl resolves when server stops.
|
||||
- Rename `Server` to `ServerHandle`.
|
||||
- Add `Server::handle` to obtain handle to server.
|
||||
- Rename `ServerBuilder::{maxconn => max_concurrent_connections}`.
|
||||
- Deprecate crate-level `new` shortcut for server builder.
|
||||
- Minimum supported Rust version (MSRV) is now 1.52.
|
||||
|
||||
## 2.0.0-beta.6
|
||||
|
||||
## [0.1.3] - 2018-12-21
|
||||
- Add experimental (semver-exempt) `io-uring` feature for enabling async file I/O on linux.
|
||||
- Server no long listens to `SIGHUP` signal. Previously, the received was not used but did block subsequent exit signals from working.
|
||||
- Remove `config` module. `ServiceConfig`, `ServiceRuntime` public types are removed due to this change.
|
||||
- Remove `ServerBuilder::configure`.
|
||||
|
||||
### Fixed
|
||||
## 2.0.0-beta.5
|
||||
|
||||
* Fix max concurrent connections handling
|
||||
- Server shutdown notifies all workers to exit regardless if shutdown is graceful. This causes all workers to shutdown immediately in force shutdown case.
|
||||
|
||||
## 2.0.0-beta.4
|
||||
|
||||
## [0.1.2] - 2018-12-12
|
||||
- Prevent panic when `shutdown_timeout` is very large. [f9262db]
|
||||
|
||||
### Changed
|
||||
## 2.0.0-beta.3
|
||||
|
||||
* rename ServiceConfig::rt() to ServiceConfig::apply()
|
||||
- Hidden `ServerBuilder::start` method has been removed. Use `ServerBuilder::run`.
|
||||
- Add retry for EINTR signal (`io::Interrupted`) in `Accept`'s poll loop.
|
||||
- Add `ServerBuilder::worker_max_blocking_threads` to customize blocking thread pool size.
|
||||
- Update `actix-rt` to `2.0.0`.
|
||||
|
||||
## 2.0.0-beta.2
|
||||
|
||||
### Fixed
|
||||
- Merge `actix-testing` to `actix-server` as `test_server` mod.
|
||||
|
||||
* Fix back-pressure for concurrent ssl handshakes
|
||||
## 2.0.0-beta.1
|
||||
|
||||
- Added explicit info log message on accept queue pause.
|
||||
- Prevent double registration of sockets when back-pressure is resolved.
|
||||
- Update `mio` dependency to `0.7.3`.
|
||||
- Remove `socket2` dependency.
|
||||
- `ServerBuilder::backlog` now accepts `u32` instead of `i32`.
|
||||
- Remove `AcceptNotify` type and pass `WakerQueue` to `Worker` to wake up `Accept`'s `Poll`.
|
||||
- Convert `mio::net::TcpStream` to `actix_rt::net::TcpStream`(`UnixStream` for uds) using `FromRawFd` and `IntoRawFd`(`FromRawSocket` and `IntoRawSocket` on windows).
|
||||
- Remove `AsyncRead` and `AsyncWrite` trait bound for `socket::FromStream` trait.
|
||||
|
||||
## [0.1.1] - 2018-12-11
|
||||
## 1.0.4
|
||||
|
||||
* Fix signal handling on windows
|
||||
- Update actix-codec to 0.3.0.
|
||||
- Workers must be greater than 0.
|
||||
|
||||
## 1.0.3
|
||||
|
||||
## [0.1.0] - 2018-12-09
|
||||
- Replace deprecated `net2` crate with `socket2`.
|
||||
|
||||
* Move server to separate crate
|
||||
## 1.0.2
|
||||
|
||||
- Avoid error by calling `reregister()` on Windows.
|
||||
|
||||
## 1.0.1
|
||||
|
||||
- Rename `.start()` method to `.run()`
|
||||
|
||||
## 1.0.0
|
||||
|
||||
- Use actix-net releases
|
||||
|
||||
## 1.0.0-alpha.4
|
||||
|
||||
- Use actix-service 1.0.0-alpha.4
|
||||
|
||||
## 1.0.0-alpha.3
|
||||
|
||||
- Migrate to tokio 0.2
|
||||
- Fix compilation on non-unix platforms
|
||||
- Better handling server configuration
|
||||
|
||||
## 1.0.0-alpha.2
|
||||
|
||||
- Simplify server service (remove actix-server-config)
|
||||
- Allow to wait on `Server` until server stops
|
||||
|
||||
## 0.8.0-alpha.1
|
||||
|
||||
- Migrate to `std::future`
|
||||
|
||||
## 0.7.0
|
||||
|
||||
- Update `rustls` to 0.16
|
||||
- Minimum required Rust version upped to 1.37.0
|
||||
|
||||
## 0.6.1
|
||||
|
||||
- Add UDS listening support to `ServerBuilder`
|
||||
|
||||
## 0.6.0
|
||||
|
||||
- Support Unix domain sockets #3
|
||||
|
||||
## 0.5.1
|
||||
|
||||
- ServerBuilder::shutdown_timeout() accepts u64
|
||||
|
||||
## 0.5.0
|
||||
|
||||
- Add `Debug` impl for `SslError`
|
||||
- Derive debug for `Server` and `ServerCommand`
|
||||
- Upgrade to actix-service 0.4
|
||||
|
||||
## 0.4.3
|
||||
|
||||
- Re-export `IoStream` trait
|
||||
- Depend on `ssl` and `rust-tls` features from actix-server-config
|
||||
|
||||
## 0.4.2
|
||||
|
||||
- Fix SIGINT force shutdown
|
||||
|
||||
## 0.4.1
|
||||
|
||||
- `SystemRuntime::on_start()` - allow to run future before server service initialization
|
||||
|
||||
## 0.4.0
|
||||
|
||||
- Use `ServerConfig` for service factory
|
||||
- Wrap tcp socket to `Io` type
|
||||
- Upgrade actix-service
|
||||
|
||||
## 0.3.1
|
||||
|
||||
- Add `ServerBuilder::maxconnrate` sets the maximum per-worker number of concurrent connections
|
||||
- Add helper ssl error `SslError`
|
||||
- Rename `StreamServiceFactory` to `ServiceFactory`
|
||||
- Deprecate `StreamServiceFactory`
|
||||
|
||||
## 0.3.0
|
||||
|
||||
- Use new `NewService` trait
|
||||
|
||||
## 0.2.1
|
||||
|
||||
- Drop service response
|
||||
|
||||
## 0.2.0
|
||||
|
||||
- Migrate to actix-service 0.2
|
||||
- Updated rustls dependency
|
||||
|
||||
## 0.1.3
|
||||
|
||||
- Fix max concurrent connections handling
|
||||
|
||||
## 0.1.2
|
||||
|
||||
- rename ServiceConfig::rt() to ServiceConfig::apply()
|
||||
- Fix back-pressure for concurrent ssl handshakes
|
||||
|
||||
## 0.1.1
|
||||
|
||||
- Fix signal handling on windows
|
||||
|
||||
## 0.1.0
|
||||
|
||||
- Move server to separate crate
|
||||
|
@ -1,69 +1,50 @@
|
||||
[package]
|
||||
name = "actix-server"
|
||||
version = "0.4.1"
|
||||
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
|
||||
description = "Actix server - General purpose tcp server"
|
||||
keywords = ["network", "framework", "async", "futures"]
|
||||
homepage = "https://actix.rs"
|
||||
repository = "https://github.com/actix/actix-net.git"
|
||||
documentation = "https://docs.rs/actix-server/"
|
||||
version = "2.5.1"
|
||||
authors = [
|
||||
"Nikolay Kim <fafhrd91@gmail.com>",
|
||||
"Rob Ede <robjtede@icloud.com>",
|
||||
"Ali MJ Al-Nasrawy <alimjalnasrawy@gmail.com>",
|
||||
]
|
||||
description = "General purpose TCP server built for the Actix ecosystem"
|
||||
keywords = ["network", "tcp", "server", "framework", "async"]
|
||||
categories = ["network-programming", "asynchronous"]
|
||||
license = "MIT/Apache-2.0"
|
||||
exclude = [".gitignore", ".travis.yml", ".cargo/config", "appveyor.yml"]
|
||||
edition = "2018"
|
||||
workspace = ".."
|
||||
homepage = "https://actix.rs"
|
||||
repository = "https://github.com/actix/actix-net/tree/master/actix-server"
|
||||
license = "MIT OR Apache-2.0"
|
||||
edition.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
features = ["ssl", "tls", "rust-tls"]
|
||||
|
||||
[lib]
|
||||
name = "actix_server"
|
||||
path = "src/lib.rs"
|
||||
[package.metadata.cargo_check_external_types]
|
||||
allowed_external_types = ["tokio::*"]
|
||||
|
||||
[features]
|
||||
default = []
|
||||
|
||||
# tls
|
||||
tls = ["native-tls"]
|
||||
|
||||
# openssl
|
||||
ssl = ["openssl", "tokio-openssl"]
|
||||
|
||||
# rustls
|
||||
rust-tls = ["rustls", "tokio-rustls", "webpki", "webpki-roots"]
|
||||
io-uring = ["tokio-uring", "actix-rt/io-uring"]
|
||||
|
||||
[dependencies]
|
||||
actix-rt = "0.2.1"
|
||||
actix-service = "0.3.4"
|
||||
actix-server-config = "0.1.0"
|
||||
actix-rt = { version = "2.10", default-features = false }
|
||||
actix-service = "2"
|
||||
actix-utils = "3"
|
||||
futures-core = { version = "0.3.17", default-features = false, features = ["alloc"] }
|
||||
futures-util = { version = "0.3.17", default-features = false, features = ["alloc"] }
|
||||
mio = { version = "1", features = ["os-poll", "net"] }
|
||||
socket2 = "0.5"
|
||||
tokio = { version = "1.23.1", features = ["sync"] }
|
||||
tracing = { version = "0.1.30", default-features = false, features = ["log"] }
|
||||
|
||||
log = "0.4"
|
||||
num_cpus = "1.0"
|
||||
|
||||
mio = "^0.6.13"
|
||||
net2 = "0.2"
|
||||
futures = "0.1"
|
||||
slab = "0.4"
|
||||
tokio-io = "0.1"
|
||||
tokio-tcp = "0.1"
|
||||
tokio-timer = "0.2.8"
|
||||
tokio-reactor = "0.1"
|
||||
tokio-signal = "0.2"
|
||||
|
||||
# native-tls
|
||||
native-tls = { version="0.2", optional = true }
|
||||
|
||||
# openssl
|
||||
openssl = { version="0.10", optional = true }
|
||||
tokio-openssl = { version="0.3", optional = true }
|
||||
|
||||
#rustls
|
||||
rustls = { version = "^0.15", optional = true }
|
||||
tokio-rustls = { version = "^0.9", optional = true }
|
||||
webpki = { version = "0.19", optional = true }
|
||||
webpki-roots = { version = "0.16", optional = true }
|
||||
# runtime for `io-uring` feature
|
||||
[target.'cfg(target_os = "linux")'.dependencies]
|
||||
tokio-uring = { version = "0.5", optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
bytes = "0.4"
|
||||
actix-codec = "0.1.0"
|
||||
env_logger = "0.6"
|
||||
actix-codec = "0.5"
|
||||
actix-rt = "2.8"
|
||||
|
||||
bytes = "1"
|
||||
futures-util = { version = "0.3.17", default-features = false, features = ["sink", "async-await-macro"] }
|
||||
pretty_env_logger = "0.5"
|
||||
tokio = { version = "1.23.1", features = ["io-util", "rt-multi-thread", "macros", "fs"] }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
1
actix-server/LICENSE-APACHE
Symbolic link
1
actix-server/LICENSE-APACHE
Symbolic link
@ -0,0 +1 @@
|
||||
../LICENSE-APACHE
|
1
actix-server/LICENSE-MIT
Symbolic link
1
actix-server/LICENSE-MIT
Symbolic link
@ -0,0 +1 @@
|
||||
../LICENSE-MIT
|
21
actix-server/README.md
Normal file
21
actix-server/README.md
Normal file
@ -0,0 +1,21 @@
|
||||
# actix-server
|
||||
|
||||
> General purpose TCP server built for the Actix ecosystem.
|
||||
|
||||
<!-- prettier-ignore-start -->
|
||||
|
||||
[](https://crates.io/crates/actix-server)
|
||||
[](https://docs.rs/actix-server/2.5.1)
|
||||
[](https://blog.rust-lang.org/2021/05/06/Rust-1.52.0.html)
|
||||

|
||||
<br />
|
||||
[](https://deps.rs/crate/actix-server/2.5.1)
|
||||

|
||||
[](https://discord.gg/NWpN5mmg3x)
|
||||
|
||||
<!-- prettier-ignore-end -->
|
||||
|
||||
## Resources
|
||||
|
||||
- [Library Documentation](https://docs.rs/actix-server)
|
||||
- [Examples](/actix-server/examples)
|
98
actix-server/examples/file-reader.rs
Normal file
98
actix-server/examples/file-reader.rs
Normal file
@ -0,0 +1,98 @@
|
||||
//! Simple file-reader TCP server with framed stream.
|
||||
//!
|
||||
//! Using the following command:
|
||||
//!
|
||||
//! ```sh
|
||||
//! nc 127.0.0.1 8080
|
||||
//! ```
|
||||
//!
|
||||
//! Follow the prompt and enter a file path, relative or absolute.
|
||||
|
||||
#![allow(missing_docs)]
|
||||
|
||||
use std::io;
|
||||
|
||||
use actix_codec::{Framed, LinesCodec};
|
||||
use actix_rt::net::TcpStream;
|
||||
use actix_server::Server;
|
||||
use actix_service::{fn_service, ServiceFactoryExt as _};
|
||||
use futures_util::{SinkExt as _, StreamExt as _};
|
||||
use tokio::{fs::File, io::AsyncReadExt as _};
|
||||
|
||||
async fn run() -> io::Result<()> {
|
||||
pretty_env_logger::formatted_timed_builder()
|
||||
.parse_env(pretty_env_logger::env_logger::Env::default().default_filter_or("info"));
|
||||
|
||||
let addr = ("127.0.0.1", 8080);
|
||||
tracing::info!("starting server on port: {}", &addr.0);
|
||||
|
||||
// Bind socket address and start worker(s). By default, the server uses the number of physical
|
||||
// CPU cores as the worker count. For this reason, the closure passed to bind needs to return
|
||||
// a service *factory*; so it can be created once per worker.
|
||||
Server::build()
|
||||
.bind("file-reader", addr, move || {
|
||||
fn_service(move |stream: TcpStream| async move {
|
||||
// set up codec to use with I/O resource
|
||||
let mut framed = Framed::new(stream, LinesCodec::default());
|
||||
|
||||
loop {
|
||||
// prompt for file name
|
||||
framed.send("Type file name to return:").await?;
|
||||
|
||||
// wait for next line
|
||||
match framed.next().await {
|
||||
Some(Ok(line)) => {
|
||||
match File::open(&line).await {
|
||||
Ok(mut file) => {
|
||||
tracing::info!("reading file: {}", &line);
|
||||
|
||||
// read file into String buffer
|
||||
let mut buf = String::new();
|
||||
file.read_to_string(&mut buf).await?;
|
||||
|
||||
// send String into framed object
|
||||
framed.send(buf).await?;
|
||||
|
||||
// break out of loop and
|
||||
break;
|
||||
}
|
||||
Err(err) => {
|
||||
tracing::error!("{}", err);
|
||||
framed
|
||||
.send("File not found or not readable. Try again.")
|
||||
.await?;
|
||||
continue;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// not being able to read a line from the stream is unrecoverable
|
||||
Some(Err(err)) => return Err(err),
|
||||
|
||||
// This EOF won't be hit.
|
||||
None => continue,
|
||||
}
|
||||
}
|
||||
|
||||
// close connection after file has been copied to TCP stream
|
||||
Ok(())
|
||||
})
|
||||
.map_err(|err| tracing::error!("service error: {:?}", err))
|
||||
})?
|
||||
.workers(2)
|
||||
.run()
|
||||
.await
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> io::Result<()> {
|
||||
run().await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// alternatively:
|
||||
// #[actix_rt::main]
|
||||
// async fn main() -> io::Result<()> {
|
||||
// run().await?;
|
||||
// Ok(())
|
||||
// }
|
101
actix-server/examples/tcp-echo.rs
Normal file
101
actix-server/examples/tcp-echo.rs
Normal file
@ -0,0 +1,101 @@
|
||||
//! Simple composite-service TCP echo server.
|
||||
//!
|
||||
//! Using the following command:
|
||||
//!
|
||||
//! ```sh
|
||||
//! nc 127.0.0.1 8080
|
||||
//! ```
|
||||
//!
|
||||
//! Start typing. When you press enter the typed line will be echoed back. The server will log
|
||||
//! the length of each line it echos and the total size of data sent when the connection is closed.
|
||||
|
||||
use std::{
|
||||
io,
|
||||
sync::{
|
||||
atomic::{AtomicUsize, Ordering},
|
||||
Arc,
|
||||
},
|
||||
};
|
||||
|
||||
use actix_rt::net::TcpStream;
|
||||
use actix_server::Server;
|
||||
use actix_service::{fn_service, ServiceFactoryExt as _};
|
||||
use bytes::BytesMut;
|
||||
use futures_util::future::ok;
|
||||
use tokio::io::{AsyncReadExt as _, AsyncWriteExt as _};
|
||||
|
||||
async fn run() -> io::Result<()> {
|
||||
pretty_env_logger::formatted_timed_builder()
|
||||
.parse_env(pretty_env_logger::env_logger::Env::default().default_filter_or("info"));
|
||||
|
||||
let count = Arc::new(AtomicUsize::new(0));
|
||||
|
||||
let addr = ("127.0.0.1", 8080);
|
||||
tracing::info!("starting server on port: {}", &addr.0);
|
||||
|
||||
// Bind socket address and start worker(s). By default, the server uses the number of physical
|
||||
// CPU cores as the worker count. For this reason, the closure passed to bind needs to return
|
||||
// a service *factory*; so it can be created once per worker.
|
||||
Server::build()
|
||||
.bind("echo", addr, move || {
|
||||
let count = Arc::clone(&count);
|
||||
let num2 = Arc::clone(&count);
|
||||
|
||||
fn_service(move |mut stream: TcpStream| {
|
||||
let count = Arc::clone(&count);
|
||||
|
||||
async move {
|
||||
let num = count.fetch_add(1, Ordering::SeqCst);
|
||||
let num = num + 1;
|
||||
|
||||
let mut size = 0;
|
||||
let mut buf = BytesMut::new();
|
||||
|
||||
loop {
|
||||
match stream.read_buf(&mut buf).await {
|
||||
// end of stream; bail from loop
|
||||
Ok(0) => break,
|
||||
|
||||
// more bytes to process
|
||||
Ok(bytes_read) => {
|
||||
tracing::info!("[{}] read {} bytes", num, bytes_read);
|
||||
stream.write_all(&buf[size..]).await.unwrap();
|
||||
size += bytes_read;
|
||||
}
|
||||
|
||||
// stream error; bail from loop with error
|
||||
Err(err) => {
|
||||
tracing::error!("stream error: {:?}", err);
|
||||
return Err(());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// send data down service pipeline
|
||||
Ok((buf.freeze(), size))
|
||||
}
|
||||
})
|
||||
.map_err(|err| tracing::error!("service error: {:?}", err))
|
||||
.and_then(move |(_, size)| {
|
||||
let num = num2.load(Ordering::SeqCst);
|
||||
tracing::info!("[{}] total bytes read: {}", num, size);
|
||||
ok(size)
|
||||
})
|
||||
})?
|
||||
.workers(2)
|
||||
.run()
|
||||
.await
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> io::Result<()> {
|
||||
run().await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// alternatively:
|
||||
// #[actix_rt::main]
|
||||
// async fn main() -> io::Result<()> {
|
||||
// run().await?;
|
||||
// Ok(())
|
||||
// }
|
@ -1,461 +1,462 @@
|
||||
use std::sync::mpsc as sync_mpsc;
|
||||
use std::time::{Duration, Instant};
|
||||
use std::{io, net, thread};
|
||||
use std::{io, thread, time::Duration};
|
||||
|
||||
use actix_rt::System;
|
||||
use futures::future::{lazy, Future};
|
||||
use log::{error, info};
|
||||
use mio;
|
||||
use slab::Slab;
|
||||
use tokio_timer::Delay;
|
||||
use actix_rt::time::Instant;
|
||||
use mio::{Interest, Poll, Token as MioToken};
|
||||
use tracing::{debug, error, info};
|
||||
|
||||
use super::server::Server;
|
||||
use super::worker::{Conn, WorkerClient};
|
||||
use super::Token;
|
||||
use crate::{
|
||||
availability::Availability,
|
||||
socket::MioListener,
|
||||
waker_queue::{WakerInterest, WakerQueue, WAKER_TOKEN},
|
||||
worker::{Conn, ServerWorker, WorkerHandleAccept, WorkerHandleServer},
|
||||
ServerBuilder, ServerHandle,
|
||||
};
|
||||
|
||||
pub(crate) enum Command {
|
||||
Pause,
|
||||
Resume,
|
||||
Stop,
|
||||
Worker(WorkerClient),
|
||||
}
|
||||
const TIMEOUT_DURATION_ON_ERROR: Duration = Duration::from_millis(510);
|
||||
|
||||
struct ServerSocketInfo {
|
||||
addr: net::SocketAddr,
|
||||
token: Token,
|
||||
sock: mio::net::TcpListener,
|
||||
timeout: Option<Instant>,
|
||||
token: usize,
|
||||
|
||||
lst: MioListener,
|
||||
|
||||
/// Timeout is used to mark the deadline when this socket's listener should be registered again
|
||||
/// after an error.
|
||||
timeout: Option<actix_rt::time::Instant>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct AcceptNotify(mio::SetReadiness);
|
||||
|
||||
impl AcceptNotify {
|
||||
pub(crate) fn new(ready: mio::SetReadiness) -> Self {
|
||||
AcceptNotify(ready)
|
||||
}
|
||||
|
||||
pub(crate) fn notify(&self) {
|
||||
let _ = self.0.set_readiness(mio::Ready::readable());
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for AcceptNotify {
|
||||
fn default() -> Self {
|
||||
AcceptNotify::new(mio::Registration::new2().1)
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct AcceptLoop {
|
||||
cmd_reg: Option<mio::Registration>,
|
||||
cmd_ready: mio::SetReadiness,
|
||||
notify_reg: Option<mio::Registration>,
|
||||
notify_ready: mio::SetReadiness,
|
||||
tx: sync_mpsc::Sender<Command>,
|
||||
rx: Option<sync_mpsc::Receiver<Command>>,
|
||||
srv: Option<Server>,
|
||||
}
|
||||
|
||||
impl AcceptLoop {
|
||||
pub fn new(srv: Server) -> AcceptLoop {
|
||||
let (tx, rx) = sync_mpsc::channel();
|
||||
let (cmd_reg, cmd_ready) = mio::Registration::new2();
|
||||
let (notify_reg, notify_ready) = mio::Registration::new2();
|
||||
|
||||
AcceptLoop {
|
||||
tx,
|
||||
cmd_ready,
|
||||
cmd_reg: Some(cmd_reg),
|
||||
notify_ready,
|
||||
notify_reg: Some(notify_reg),
|
||||
rx: Some(rx),
|
||||
srv: Some(srv),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn send(&self, msg: Command) {
|
||||
let _ = self.tx.send(msg);
|
||||
let _ = self.cmd_ready.set_readiness(mio::Ready::readable());
|
||||
}
|
||||
|
||||
pub fn get_notify(&self) -> AcceptNotify {
|
||||
AcceptNotify::new(self.notify_ready.clone())
|
||||
}
|
||||
|
||||
pub(crate) fn start(
|
||||
&mut self,
|
||||
socks: Vec<(Token, net::TcpListener)>,
|
||||
workers: Vec<WorkerClient>,
|
||||
) {
|
||||
let srv = self.srv.take().expect("Can not re-use AcceptInfo");
|
||||
|
||||
Accept::start(
|
||||
self.rx.take().expect("Can not re-use AcceptInfo"),
|
||||
self.cmd_reg.take().expect("Can not re-use AcceptInfo"),
|
||||
self.notify_reg.take().expect("Can not re-use AcceptInfo"),
|
||||
socks,
|
||||
srv,
|
||||
workers,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
struct Accept {
|
||||
poll: mio::Poll,
|
||||
rx: sync_mpsc::Receiver<Command>,
|
||||
sockets: Slab<ServerSocketInfo>,
|
||||
workers: Vec<WorkerClient>,
|
||||
srv: Server,
|
||||
timer: (mio::Registration, mio::SetReadiness),
|
||||
/// Poll instance of the server.
|
||||
pub(crate) struct Accept {
|
||||
poll: Poll,
|
||||
waker_queue: WakerQueue,
|
||||
handles: Vec<WorkerHandleAccept>,
|
||||
srv: ServerHandle,
|
||||
next: usize,
|
||||
backpressure: bool,
|
||||
}
|
||||
|
||||
const DELTA: usize = 100;
|
||||
const CMD: mio::Token = mio::Token(0);
|
||||
const TIMER: mio::Token = mio::Token(1);
|
||||
const NOTIFY: mio::Token = mio::Token(2);
|
||||
|
||||
/// This function defines errors that are per-connection. Which basically
|
||||
/// means that if we get this error from `accept()` system call it means
|
||||
/// next connection might be ready to be accepted.
|
||||
///
|
||||
/// All other errors will incur a timeout before next `accept()` is performed.
|
||||
/// The timeout is useful to handle resource exhaustion errors like ENFILE
|
||||
/// and EMFILE. Otherwise, could enter into tight loop.
|
||||
fn connection_error(e: &io::Error) -> bool {
|
||||
e.kind() == io::ErrorKind::ConnectionRefused
|
||||
|| e.kind() == io::ErrorKind::ConnectionAborted
|
||||
|| e.kind() == io::ErrorKind::ConnectionReset
|
||||
avail: Availability,
|
||||
/// use the smallest duration from sockets timeout.
|
||||
timeout: Option<Duration>,
|
||||
paused: bool,
|
||||
}
|
||||
|
||||
impl Accept {
|
||||
#![allow(clippy::too_many_arguments)]
|
||||
pub(crate) fn start(
|
||||
rx: sync_mpsc::Receiver<Command>,
|
||||
cmd_reg: mio::Registration,
|
||||
notify_reg: mio::Registration,
|
||||
socks: Vec<(Token, net::TcpListener)>,
|
||||
srv: Server,
|
||||
workers: Vec<WorkerClient>,
|
||||
) {
|
||||
let sys = System::current();
|
||||
sockets: Vec<(usize, MioListener)>,
|
||||
builder: &ServerBuilder,
|
||||
) -> io::Result<(WakerQueue, Vec<WorkerHandleServer>, thread::JoinHandle<()>)> {
|
||||
let handle_server = ServerHandle::new(builder.cmd_tx.clone());
|
||||
|
||||
// start accept thread
|
||||
let _ = thread::Builder::new()
|
||||
.name("actix-server accept loop".to_owned())
|
||||
.spawn(move || {
|
||||
System::set_current(sys);
|
||||
let mut accept = Accept::new(rx, socks, workers, srv);
|
||||
// construct poll instance and its waker
|
||||
let poll = Poll::new()?;
|
||||
let waker_queue = WakerQueue::new(poll.registry())?;
|
||||
|
||||
// Start listening for incoming commands
|
||||
if let Err(err) = accept.poll.register(
|
||||
&cmd_reg,
|
||||
CMD,
|
||||
mio::Ready::readable(),
|
||||
mio::PollOpt::edge(),
|
||||
) {
|
||||
panic!("Can not register Registration: {}", err);
|
||||
}
|
||||
// start workers and collect handles
|
||||
let (handles_accept, handles_server) = (0..builder.threads)
|
||||
.map(|idx| {
|
||||
// clone service factories
|
||||
let factories = builder
|
||||
.factories
|
||||
.iter()
|
||||
.map(|f| f.clone_factory())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// Start listening for notify updates
|
||||
if let Err(err) = accept.poll.register(
|
||||
¬ify_reg,
|
||||
NOTIFY,
|
||||
mio::Ready::readable(),
|
||||
mio::PollOpt::edge(),
|
||||
) {
|
||||
panic!("Can not register Registration: {}", err);
|
||||
}
|
||||
// start worker using service factories
|
||||
ServerWorker::start(idx, factories, waker_queue.clone(), builder.worker_config)
|
||||
})
|
||||
.collect::<io::Result<Vec<_>>>()?
|
||||
.into_iter()
|
||||
.unzip();
|
||||
|
||||
accept.poll();
|
||||
});
|
||||
let (mut accept, mut sockets) = Accept::new_with_sockets(
|
||||
poll,
|
||||
waker_queue.clone(),
|
||||
sockets,
|
||||
handles_accept,
|
||||
handle_server,
|
||||
)?;
|
||||
|
||||
let accept_handle = thread::Builder::new()
|
||||
.name("actix-server acceptor".to_owned())
|
||||
.spawn(move || accept.poll_with(&mut sockets))
|
||||
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))?;
|
||||
|
||||
Ok((waker_queue, handles_server, accept_handle))
|
||||
}
|
||||
|
||||
fn new(
|
||||
rx: sync_mpsc::Receiver<Command>,
|
||||
socks: Vec<(Token, net::TcpListener)>,
|
||||
workers: Vec<WorkerClient>,
|
||||
srv: Server,
|
||||
) -> Accept {
|
||||
// Create a poll instance
|
||||
let poll = match mio::Poll::new() {
|
||||
Ok(poll) => poll,
|
||||
Err(err) => panic!("Can not create mio::Poll: {}", err),
|
||||
fn new_with_sockets(
|
||||
poll: Poll,
|
||||
waker_queue: WakerQueue,
|
||||
sockets: Vec<(usize, MioListener)>,
|
||||
accept_handles: Vec<WorkerHandleAccept>,
|
||||
server_handle: ServerHandle,
|
||||
) -> io::Result<(Accept, Box<[ServerSocketInfo]>)> {
|
||||
let sockets = sockets
|
||||
.into_iter()
|
||||
.map(|(token, mut lst)| {
|
||||
// Start listening for incoming connections
|
||||
poll.registry()
|
||||
.register(&mut lst, MioToken(token), Interest::READABLE)?;
|
||||
|
||||
Ok(ServerSocketInfo {
|
||||
token,
|
||||
lst,
|
||||
timeout: None,
|
||||
})
|
||||
})
|
||||
.collect::<io::Result<_>>()?;
|
||||
|
||||
let mut avail = Availability::default();
|
||||
|
||||
// Assume all handles are avail at construct time.
|
||||
avail.set_available_all(&accept_handles);
|
||||
|
||||
let accept = Accept {
|
||||
poll,
|
||||
waker_queue,
|
||||
handles: accept_handles,
|
||||
srv: server_handle,
|
||||
next: 0,
|
||||
avail,
|
||||
timeout: None,
|
||||
paused: false,
|
||||
};
|
||||
|
||||
// Start accept
|
||||
let mut sockets = Slab::new();
|
||||
for (hnd_token, lst) in socks.into_iter() {
|
||||
let addr = lst.local_addr().unwrap();
|
||||
let server = mio::net::TcpListener::from_std(lst)
|
||||
.expect("Can not create mio::net::TcpListener");
|
||||
|
||||
let entry = sockets.vacant_entry();
|
||||
let token = entry.key();
|
||||
|
||||
// Start listening for incoming connections
|
||||
if let Err(err) = poll.register(
|
||||
&server,
|
||||
mio::Token(token + DELTA),
|
||||
mio::Ready::readable(),
|
||||
mio::PollOpt::edge(),
|
||||
) {
|
||||
panic!("Can not register io: {}", err);
|
||||
}
|
||||
|
||||
entry.insert(ServerSocketInfo {
|
||||
addr,
|
||||
token: hnd_token,
|
||||
sock: server,
|
||||
timeout: None,
|
||||
});
|
||||
}
|
||||
|
||||
// Timer
|
||||
let (tm, tmr) = mio::Registration::new2();
|
||||
if let Err(err) =
|
||||
poll.register(&tm, TIMER, mio::Ready::readable(), mio::PollOpt::edge())
|
||||
{
|
||||
panic!("Can not register Registration: {}", err);
|
||||
}
|
||||
|
||||
Accept {
|
||||
poll,
|
||||
rx,
|
||||
sockets,
|
||||
workers,
|
||||
srv,
|
||||
next: 0,
|
||||
timer: (tm, tmr),
|
||||
backpressure: false,
|
||||
}
|
||||
Ok((accept, sockets))
|
||||
}
|
||||
|
||||
fn poll(&mut self) {
|
||||
// Create storage for events
|
||||
let mut events = mio::Events::with_capacity(128);
|
||||
/// blocking wait for readiness events triggered by mio
|
||||
fn poll_with(&mut self, sockets: &mut [ServerSocketInfo]) {
|
||||
let mut events = mio::Events::with_capacity(256);
|
||||
|
||||
loop {
|
||||
if let Err(err) = self.poll.poll(&mut events, None) {
|
||||
panic!("Poll error: {}", err);
|
||||
if let Err(err) = self.poll.poll(&mut events, self.timeout) {
|
||||
match err.kind() {
|
||||
io::ErrorKind::Interrupted => {}
|
||||
_ => panic!("Poll error: {}", err),
|
||||
}
|
||||
}
|
||||
|
||||
for event in events.iter() {
|
||||
let token = event.token();
|
||||
match token {
|
||||
CMD => {
|
||||
if !self.process_cmd() {
|
||||
WAKER_TOKEN => {
|
||||
let exit = self.handle_waker(sockets);
|
||||
if exit {
|
||||
info!("accept thread stopped");
|
||||
return;
|
||||
}
|
||||
}
|
||||
TIMER => self.process_timer(),
|
||||
NOTIFY => self.backpressure(false),
|
||||
_ => {
|
||||
let token = usize::from(token);
|
||||
if token < DELTA {
|
||||
continue;
|
||||
}
|
||||
self.accept(token - DELTA);
|
||||
self.accept(sockets, token);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// check for timeout and re-register sockets
|
||||
self.process_timeout(sockets);
|
||||
}
|
||||
}
|
||||
|
||||
fn process_timer(&mut self) {
|
||||
let now = Instant::now();
|
||||
for (token, info) in self.sockets.iter_mut() {
|
||||
if let Some(inst) = info.timeout.take() {
|
||||
if now > inst {
|
||||
if let Err(err) = self.poll.register(
|
||||
&info.sock,
|
||||
mio::Token(token + DELTA),
|
||||
mio::Ready::readable(),
|
||||
mio::PollOpt::edge(),
|
||||
) {
|
||||
error!("Can not register server socket {}", err);
|
||||
} else {
|
||||
info!("Resume accepting connections on {}", info.addr);
|
||||
}
|
||||
} else {
|
||||
info.timeout = Some(inst);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn process_cmd(&mut self) -> bool {
|
||||
fn handle_waker(&mut self, sockets: &mut [ServerSocketInfo]) -> bool {
|
||||
// This is a loop because interests for command from previous version was
|
||||
// a loop that would try to drain the command channel. It's yet unknown
|
||||
// if it's necessary/good practice to actively drain the waker queue.
|
||||
loop {
|
||||
match self.rx.try_recv() {
|
||||
Ok(cmd) => match cmd {
|
||||
Command::Pause => {
|
||||
for (_, info) in self.sockets.iter_mut() {
|
||||
if let Err(err) = self.poll.deregister(&info.sock) {
|
||||
error!("Can not deregister server socket {}", err);
|
||||
} else {
|
||||
info!("Paused accepting connections on {}", info.addr);
|
||||
}
|
||||
}
|
||||
}
|
||||
Command::Resume => {
|
||||
for (token, info) in self.sockets.iter() {
|
||||
if let Err(err) = self.poll.register(
|
||||
&info.sock,
|
||||
mio::Token(token + DELTA),
|
||||
mio::Ready::readable(),
|
||||
mio::PollOpt::edge(),
|
||||
) {
|
||||
error!("Can not resume socket accept process: {}", err);
|
||||
} else {
|
||||
info!(
|
||||
"Accepting connections on {} has been resumed",
|
||||
info.addr
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
Command::Stop => {
|
||||
for (_, info) in self.sockets.iter() {
|
||||
let _ = self.poll.deregister(&info.sock);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
Command::Worker(worker) => {
|
||||
self.backpressure(false);
|
||||
self.workers.push(worker);
|
||||
}
|
||||
},
|
||||
Err(err) => match err {
|
||||
sync_mpsc::TryRecvError::Empty => break,
|
||||
sync_mpsc::TryRecvError::Disconnected => {
|
||||
for (_, info) in self.sockets.iter() {
|
||||
let _ = self.poll.deregister(&info.sock);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
true
|
||||
}
|
||||
// Take guard with every iteration so no new interests can be added until the current
|
||||
// task is done. Take care not to take the guard again inside this loop.
|
||||
let mut guard = self.waker_queue.guard();
|
||||
|
||||
fn backpressure(&mut self, on: bool) {
|
||||
if self.backpressure {
|
||||
if !on {
|
||||
self.backpressure = false;
|
||||
for (token, info) in self.sockets.iter() {
|
||||
if let Err(err) = self.poll.register(
|
||||
&info.sock,
|
||||
mio::Token(token + DELTA),
|
||||
mio::Ready::readable(),
|
||||
mio::PollOpt::edge(),
|
||||
) {
|
||||
error!("Can not resume socket accept process: {}", err);
|
||||
} else {
|
||||
info!("Accepting connections on {} has been resumed", info.addr);
|
||||
#[allow(clippy::significant_drop_in_scrutinee)]
|
||||
match guard.pop_front() {
|
||||
// Worker notified it became available.
|
||||
Some(WakerInterest::WorkerAvailable(idx)) => {
|
||||
drop(guard);
|
||||
|
||||
self.avail.set_available(idx, true);
|
||||
|
||||
if !self.paused {
|
||||
self.accept_all(sockets);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if on {
|
||||
self.backpressure = true;
|
||||
for (_, info) in self.sockets.iter() {
|
||||
let _ = self.poll.deregister(&info.sock);
|
||||
|
||||
// A new worker thread has been created so store its handle.
|
||||
Some(WakerInterest::Worker(handle)) => {
|
||||
drop(guard);
|
||||
|
||||
self.avail.set_available(handle.idx(), true);
|
||||
self.handles.push(handle);
|
||||
|
||||
if !self.paused {
|
||||
self.accept_all(sockets);
|
||||
}
|
||||
}
|
||||
|
||||
Some(WakerInterest::Pause) => {
|
||||
drop(guard);
|
||||
|
||||
if !self.paused {
|
||||
self.paused = true;
|
||||
|
||||
self.deregister_all(sockets);
|
||||
}
|
||||
}
|
||||
|
||||
Some(WakerInterest::Resume) => {
|
||||
drop(guard);
|
||||
|
||||
if self.paused {
|
||||
self.paused = false;
|
||||
|
||||
sockets.iter_mut().for_each(|info| {
|
||||
self.register_logged(info);
|
||||
});
|
||||
|
||||
self.accept_all(sockets);
|
||||
}
|
||||
}
|
||||
|
||||
Some(WakerInterest::Stop) => {
|
||||
if !self.paused {
|
||||
self.deregister_all(sockets);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// waker queue is drained
|
||||
None => {
|
||||
// Reset the WakerQueue before break so it does not grow infinitely
|
||||
WakerQueue::reset(&mut guard);
|
||||
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn accept_one(&mut self, mut msg: Conn) {
|
||||
if self.backpressure {
|
||||
while !self.workers.is_empty() {
|
||||
match self.workers[self.next].send(msg) {
|
||||
Ok(_) => (),
|
||||
Err(tmp) => {
|
||||
self.srv.worker_died(self.workers[self.next].idx);
|
||||
msg = tmp;
|
||||
self.workers.swap_remove(self.next);
|
||||
if self.workers.is_empty() {
|
||||
error!("No workers");
|
||||
return;
|
||||
} else if self.workers.len() <= self.next {
|
||||
self.next = 0;
|
||||
}
|
||||
continue;
|
||||
fn process_timeout(&mut self, sockets: &mut [ServerSocketInfo]) {
|
||||
// always remove old timeouts
|
||||
if self.timeout.take().is_some() {
|
||||
let now = Instant::now();
|
||||
|
||||
sockets
|
||||
.iter_mut()
|
||||
// Only sockets that had an associated timeout were deregistered.
|
||||
.filter(|info| info.timeout.is_some())
|
||||
.for_each(|info| {
|
||||
let inst = info.timeout.take().unwrap();
|
||||
|
||||
if now < inst {
|
||||
// still timed out; try to set new timeout
|
||||
info.timeout = Some(inst);
|
||||
self.set_timeout(inst - now);
|
||||
} else if !self.paused {
|
||||
// timeout expired; register socket again
|
||||
self.register_logged(info);
|
||||
}
|
||||
}
|
||||
self.next = (self.next + 1) % self.workers.len();
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
let mut idx = 0;
|
||||
while idx < self.workers.len() {
|
||||
idx += 1;
|
||||
if self.workers[self.next].available() {
|
||||
match self.workers[self.next].send(msg) {
|
||||
Ok(_) => {
|
||||
self.next = (self.next + 1) % self.workers.len();
|
||||
return;
|
||||
}
|
||||
Err(tmp) => {
|
||||
self.srv.worker_died(self.workers[self.next].idx);
|
||||
msg = tmp;
|
||||
self.workers.swap_remove(self.next);
|
||||
if self.workers.is_empty() {
|
||||
error!("No workers");
|
||||
self.backpressure(true);
|
||||
return;
|
||||
} else if self.workers.len() <= self.next {
|
||||
self.next = 0;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
self.next = (self.next + 1) % self.workers.len();
|
||||
}
|
||||
// enable backpressure
|
||||
self.backpressure(true);
|
||||
self.accept_one(msg);
|
||||
|
||||
// Drop the timeout if server is paused and socket timeout is expired.
|
||||
// When server recovers from pause it will register all sockets without
|
||||
// a timeout value so this socket register will be delayed till then.
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
fn accept(&mut self, token: usize) {
|
||||
/// Update accept timeout with `duration` if it is shorter than current timeout.
|
||||
fn set_timeout(&mut self, duration: Duration) {
|
||||
match self.timeout {
|
||||
Some(ref mut timeout) => {
|
||||
if *timeout > duration {
|
||||
*timeout = duration;
|
||||
}
|
||||
}
|
||||
None => self.timeout = Some(duration),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
fn register(&self, info: &mut ServerSocketInfo) -> io::Result<()> {
|
||||
let token = MioToken(info.token);
|
||||
self.poll
|
||||
.registry()
|
||||
.register(&mut info.lst, token, Interest::READABLE)
|
||||
}
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
fn register(&self, info: &mut ServerSocketInfo) -> io::Result<()> {
|
||||
// On windows, calling register without deregister cause an error.
|
||||
// See https://github.com/actix/actix-web/issues/905
|
||||
// Calling reregister seems to fix the issue.
|
||||
let token = MioToken(info.token);
|
||||
self.poll
|
||||
.registry()
|
||||
.register(&mut info.lst, token, Interest::READABLE)
|
||||
.or_else(|_| {
|
||||
self.poll
|
||||
.registry()
|
||||
.reregister(&mut info.lst, token, Interest::READABLE)
|
||||
})
|
||||
}
|
||||
|
||||
fn register_logged(&self, info: &mut ServerSocketInfo) {
|
||||
match self.register(info) {
|
||||
Ok(_) => debug!("resume accepting connections on {}", info.lst.local_addr()),
|
||||
Err(err) => error!("can not register server socket {}", err),
|
||||
}
|
||||
}
|
||||
|
||||
fn deregister_logged(&self, info: &mut ServerSocketInfo) {
|
||||
match self.poll.registry().deregister(&mut info.lst) {
|
||||
Ok(_) => debug!("paused accepting connections on {}", info.lst.local_addr()),
|
||||
Err(err) => {
|
||||
error!("can not deregister server socket {}", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn deregister_all(&self, sockets: &mut [ServerSocketInfo]) {
|
||||
// This is a best effort implementation with following limitation:
|
||||
//
|
||||
// Every ServerSocketInfo with associated timeout will be skipped and it's timeout is
|
||||
// removed in the process.
|
||||
//
|
||||
// Therefore WakerInterest::Pause followed by WakerInterest::Resume in a very short gap
|
||||
// (less than 500ms) would cause all timing out ServerSocketInfos be re-registered before
|
||||
// expected timing.
|
||||
sockets
|
||||
.iter_mut()
|
||||
// Take all timeout.
|
||||
// This is to prevent Accept::process_timer method re-register a socket afterwards.
|
||||
.map(|info| (info.timeout.take(), info))
|
||||
// Socket info with a timeout is already deregistered so skip them.
|
||||
.filter(|(timeout, _)| timeout.is_none())
|
||||
.for_each(|(_, info)| self.deregister_logged(info));
|
||||
}
|
||||
|
||||
// Send connection to worker and handle error.
|
||||
fn send_connection(&mut self, conn: Conn) -> Result<(), Conn> {
|
||||
let next = self.next();
|
||||
match next.send(conn) {
|
||||
Ok(_) => {
|
||||
// Increment counter of WorkerHandle.
|
||||
// Set worker to unavailable with it hit max (Return false).
|
||||
if !next.inc_counter() {
|
||||
let idx = next.idx();
|
||||
self.avail.set_available(idx, false);
|
||||
}
|
||||
self.set_next();
|
||||
Ok(())
|
||||
}
|
||||
Err(conn) => {
|
||||
// Worker thread is error and could be gone.
|
||||
// Remove worker handle and notify `ServerBuilder`.
|
||||
self.remove_next();
|
||||
|
||||
if self.handles.is_empty() {
|
||||
error!("no workers");
|
||||
// All workers are gone and Conn is nowhere to be sent.
|
||||
// Treat this situation as Ok and drop Conn.
|
||||
return Ok(());
|
||||
} else if self.handles.len() <= self.next {
|
||||
self.next = 0;
|
||||
}
|
||||
|
||||
Err(conn)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn accept_one(&mut self, mut conn: Conn) {
|
||||
loop {
|
||||
let msg = if let Some(info) = self.sockets.get_mut(token) {
|
||||
match info.sock.accept_std() {
|
||||
Ok((io, addr)) => Conn {
|
||||
io,
|
||||
token: info.token,
|
||||
peer: Some(addr),
|
||||
},
|
||||
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => return,
|
||||
Err(ref e) if connection_error(e) => continue,
|
||||
Err(e) => {
|
||||
error!("Error accepting connection: {}", e);
|
||||
if let Err(err) = self.poll.deregister(&info.sock) {
|
||||
error!("Can not deregister server socket {}", err);
|
||||
}
|
||||
let next = self.next();
|
||||
let idx = next.idx();
|
||||
|
||||
// sleep after error
|
||||
info.timeout = Some(Instant::now() + Duration::from_millis(500));
|
||||
|
||||
let r = self.timer.1.clone();
|
||||
System::current().arbiter().send(lazy(move || {
|
||||
Delay::new(Instant::now() + Duration::from_millis(510))
|
||||
.map_err(|_| ())
|
||||
.and_then(move |_| {
|
||||
let _ = r.set_readiness(mio::Ready::readable());
|
||||
Ok(())
|
||||
})
|
||||
}));
|
||||
return;
|
||||
}
|
||||
if self.avail.get_available(idx) {
|
||||
match self.send_connection(conn) {
|
||||
Ok(_) => return,
|
||||
Err(c) => conn = c,
|
||||
}
|
||||
} else {
|
||||
return;
|
||||
};
|
||||
self.avail.set_available(idx, false);
|
||||
self.set_next();
|
||||
|
||||
self.accept_one(msg);
|
||||
if !self.avail.available() {
|
||||
while let Err(c) = self.send_connection(conn) {
|
||||
conn = c;
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn accept(&mut self, sockets: &mut [ServerSocketInfo], token: usize) {
|
||||
while self.avail.available() {
|
||||
let info = &mut sockets[token];
|
||||
|
||||
match info.lst.accept() {
|
||||
Ok(io) => {
|
||||
let conn = Conn { io, token };
|
||||
self.accept_one(conn);
|
||||
}
|
||||
Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => return,
|
||||
Err(ref err) if connection_error(err) => continue,
|
||||
Err(err) => {
|
||||
error!("error accepting connection: {}", err);
|
||||
|
||||
// deregister listener temporary
|
||||
self.deregister_logged(info);
|
||||
|
||||
// sleep after error. write the timeout to socket info as later
|
||||
// the poll would need it mark which socket and when it's
|
||||
// listener should be registered
|
||||
info.timeout = Some(Instant::now() + Duration::from_millis(500));
|
||||
self.set_timeout(TIMEOUT_DURATION_ON_ERROR);
|
||||
|
||||
return;
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
fn accept_all(&mut self, sockets: &mut [ServerSocketInfo]) {
|
||||
sockets
|
||||
.iter_mut()
|
||||
.map(|info| info.token)
|
||||
.collect::<Vec<_>>()
|
||||
.into_iter()
|
||||
.for_each(|idx| self.accept(sockets, idx))
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn next(&self) -> &WorkerHandleAccept {
|
||||
&self.handles[self.next]
|
||||
}
|
||||
|
||||
/// Set next worker handle that would accept connection.
|
||||
#[inline(always)]
|
||||
fn set_next(&mut self) {
|
||||
self.next = (self.next + 1) % self.handles.len();
|
||||
}
|
||||
|
||||
/// Remove next worker handle that fail to accept connection.
|
||||
fn remove_next(&mut self) {
|
||||
let handle = self.handles.swap_remove(self.next);
|
||||
let idx = handle.idx();
|
||||
// A message is sent to `ServerBuilder` future to notify it a new worker
|
||||
// should be made.
|
||||
self.srv.worker_faulted(idx);
|
||||
self.avail.set_available(idx, false);
|
||||
}
|
||||
}
|
||||
|
||||
/// This function defines errors that are per-connection; if we get this error from the `accept()`
|
||||
/// system call it means the next connection might be ready to be accepted.
|
||||
///
|
||||
/// All other errors will incur a timeout before next `accept()` call is attempted. The timeout is
|
||||
/// useful to handle resource exhaustion errors like `ENFILE` and `EMFILE`. Otherwise, it could
|
||||
/// enter into a temporary spin loop.
|
||||
fn connection_error(e: &io::Error) -> bool {
|
||||
e.kind() == io::ErrorKind::ConnectionRefused
|
||||
|| e.kind() == io::ErrorKind::ConnectionAborted
|
||||
|| e.kind() == io::ErrorKind::ConnectionReset
|
||||
}
|
||||
|
121
actix-server/src/availability.rs
Normal file
121
actix-server/src/availability.rs
Normal file
@ -0,0 +1,121 @@
|
||||
use crate::worker::WorkerHandleAccept;
|
||||
|
||||
/// Array of u128 with every bit as marker for a worker handle's availability.
|
||||
#[derive(Debug, Default)]
|
||||
pub(crate) struct Availability([u128; 4]);
|
||||
|
||||
impl Availability {
|
||||
/// Check if any worker handle is available
|
||||
#[inline(always)]
|
||||
pub(crate) fn available(&self) -> bool {
|
||||
self.0.iter().any(|a| *a != 0)
|
||||
}
|
||||
|
||||
/// Check if worker handle is available by index
|
||||
#[inline(always)]
|
||||
pub(crate) fn get_available(&self, idx: usize) -> bool {
|
||||
let (offset, idx) = Self::offset(idx);
|
||||
|
||||
self.0[offset] & (1 << idx as u128) != 0
|
||||
}
|
||||
|
||||
/// Set worker handle available state by index.
|
||||
pub(crate) fn set_available(&mut self, idx: usize, avail: bool) {
|
||||
let (offset, idx) = Self::offset(idx);
|
||||
|
||||
let off = 1 << idx as u128;
|
||||
if avail {
|
||||
self.0[offset] |= off;
|
||||
} else {
|
||||
self.0[offset] &= !off
|
||||
}
|
||||
}
|
||||
|
||||
/// Set all worker handle to available state.
|
||||
/// This would result in a re-check on all workers' availability.
|
||||
pub(crate) fn set_available_all(&mut self, handles: &[WorkerHandleAccept]) {
|
||||
handles.iter().for_each(|handle| {
|
||||
self.set_available(handle.idx(), true);
|
||||
})
|
||||
}
|
||||
|
||||
/// Get offset and adjusted index of given worker handle index.
|
||||
pub(crate) fn offset(idx: usize) -> (usize, usize) {
|
||||
if idx < 128 {
|
||||
(0, idx)
|
||||
} else if idx < 128 * 2 {
|
||||
(1, idx - 128)
|
||||
} else if idx < 128 * 3 {
|
||||
(2, idx - 128 * 2)
|
||||
} else if idx < 128 * 4 {
|
||||
(3, idx - 128 * 3)
|
||||
} else {
|
||||
panic!("Max WorkerHandle count is 512")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn single(aval: &mut Availability, idx: usize) {
|
||||
aval.set_available(idx, true);
|
||||
assert!(aval.available());
|
||||
|
||||
aval.set_available(idx, true);
|
||||
|
||||
aval.set_available(idx, false);
|
||||
assert!(!aval.available());
|
||||
|
||||
aval.set_available(idx, false);
|
||||
assert!(!aval.available());
|
||||
}
|
||||
|
||||
fn multi(aval: &mut Availability, mut idx: Vec<usize>) {
|
||||
idx.iter().for_each(|idx| aval.set_available(*idx, true));
|
||||
|
||||
assert!(aval.available());
|
||||
|
||||
while let Some(idx) = idx.pop() {
|
||||
assert!(aval.available());
|
||||
aval.set_available(idx, false);
|
||||
}
|
||||
|
||||
assert!(!aval.available());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn availability() {
|
||||
let mut aval = Availability::default();
|
||||
|
||||
single(&mut aval, 1);
|
||||
single(&mut aval, 128);
|
||||
single(&mut aval, 256);
|
||||
single(&mut aval, 511);
|
||||
|
||||
let idx = (0..511).filter(|i| i % 3 == 0 && i % 5 == 0).collect();
|
||||
|
||||
multi(&mut aval, idx);
|
||||
|
||||
multi(&mut aval, (0..511).collect())
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn overflow() {
|
||||
let mut aval = Availability::default();
|
||||
single(&mut aval, 512);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn pin_point() {
|
||||
let mut aval = Availability::default();
|
||||
|
||||
aval.set_available(438, true);
|
||||
|
||||
aval.set_available(479, true);
|
||||
|
||||
assert_eq!(aval.0[3], 1 << (438 - 384) | 1 << (479 - 384));
|
||||
}
|
||||
}
|
@ -1,38 +1,47 @@
|
||||
use std::time::Duration;
|
||||
use std::{io, mem, net};
|
||||
use std::{io, num::NonZeroUsize, time::Duration};
|
||||
|
||||
use actix_rt::{spawn, Arbiter, System};
|
||||
use futures::future::{lazy, ok};
|
||||
use futures::stream::futures_unordered;
|
||||
use futures::sync::mpsc::{unbounded, UnboundedReceiver};
|
||||
use futures::{Async, Future, Poll, Stream};
|
||||
use log::{error, info};
|
||||
use net2::TcpBuilder;
|
||||
use num_cpus;
|
||||
use tokio_timer::sleep;
|
||||
use actix_rt::net::TcpStream;
|
||||
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender};
|
||||
|
||||
use crate::accept::{AcceptLoop, AcceptNotify, Command};
|
||||
use crate::config::{ConfiguredService, ServiceConfig};
|
||||
use crate::server::{Server, ServerCommand};
|
||||
use crate::services::{InternalServiceFactory, ServiceFactory, StreamNewService};
|
||||
use crate::signals::{Signal, Signals};
|
||||
use crate::worker::{self, Worker, WorkerAvailability, WorkerClient};
|
||||
use crate::{ssl, Token};
|
||||
use crate::{
|
||||
server::ServerCommand,
|
||||
service::{InternalServiceFactory, ServerServiceFactory, StreamNewService},
|
||||
socket::{create_mio_tcp_listener, MioListener, MioTcpListener, StdTcpListener, ToSocketAddrs},
|
||||
worker::ServerWorkerConfig,
|
||||
Server,
|
||||
};
|
||||
|
||||
/// Server builder
|
||||
/// Multipath TCP (MPTCP) preference.
|
||||
///
|
||||
/// Currently only useful on Linux.
|
||||
///
|
||||
#[cfg_attr(target_os = "linux", doc = "Also see [`ServerBuilder::mptcp()`].")]
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum MpTcp {
|
||||
/// MPTCP will not be used when binding sockets.
|
||||
Disabled,
|
||||
|
||||
/// MPTCP will be attempted when binding sockets. If errors occur, regular TCP will be
|
||||
/// attempted, too.
|
||||
TcpFallback,
|
||||
|
||||
/// MPTCP will be used when binding sockets (with no fallback).
|
||||
NoFallback,
|
||||
}
|
||||
|
||||
/// [Server] builder.
|
||||
pub struct ServerBuilder {
|
||||
threads: usize,
|
||||
token: Token,
|
||||
backlog: i32,
|
||||
workers: Vec<(usize, WorkerClient)>,
|
||||
services: Vec<Box<InternalServiceFactory>>,
|
||||
sockets: Vec<(Token, net::TcpListener)>,
|
||||
accept: AcceptLoop,
|
||||
exit: bool,
|
||||
shutdown_timeout: Duration,
|
||||
no_signals: bool,
|
||||
cmd: UnboundedReceiver<ServerCommand>,
|
||||
server: Server,
|
||||
pub(crate) threads: usize,
|
||||
pub(crate) token: usize,
|
||||
pub(crate) backlog: u32,
|
||||
pub(crate) factories: Vec<Box<dyn InternalServiceFactory>>,
|
||||
pub(crate) sockets: Vec<(usize, String, MioListener)>,
|
||||
pub(crate) mptcp: MpTcp,
|
||||
pub(crate) exit: bool,
|
||||
pub(crate) listen_os_signals: bool,
|
||||
pub(crate) cmd_tx: UnboundedSender<ServerCommand>,
|
||||
pub(crate) cmd_rx: UnboundedReceiver<ServerCommand>,
|
||||
pub(crate) worker_config: ServerWorkerConfig,
|
||||
}
|
||||
|
||||
impl Default for ServerBuilder {
|
||||
@ -44,409 +53,326 @@ impl Default for ServerBuilder {
|
||||
impl ServerBuilder {
|
||||
/// Create new Server builder instance
|
||||
pub fn new() -> ServerBuilder {
|
||||
let (tx, rx) = unbounded();
|
||||
let server = Server::new(tx);
|
||||
let (cmd_tx, cmd_rx) = unbounded_channel();
|
||||
|
||||
ServerBuilder {
|
||||
threads: num_cpus::get(),
|
||||
token: Token(0),
|
||||
workers: Vec::new(),
|
||||
services: Vec::new(),
|
||||
threads: std::thread::available_parallelism().map_or(2, NonZeroUsize::get),
|
||||
token: 0,
|
||||
factories: Vec::new(),
|
||||
sockets: Vec::new(),
|
||||
accept: AcceptLoop::new(server.clone()),
|
||||
backlog: 2048,
|
||||
mptcp: MpTcp::Disabled,
|
||||
exit: false,
|
||||
shutdown_timeout: Duration::from_secs(30),
|
||||
no_signals: false,
|
||||
cmd: rx,
|
||||
server,
|
||||
listen_os_signals: true,
|
||||
cmd_tx,
|
||||
cmd_rx,
|
||||
worker_config: ServerWorkerConfig::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Set number of workers to start.
|
||||
/// Sets number of workers to start.
|
||||
///
|
||||
/// By default server uses number of available logical cpu as workers
|
||||
/// count.
|
||||
/// See [`bind()`](Self::bind()) for more details on how worker count affects the number of
|
||||
/// server factory instantiations.
|
||||
///
|
||||
/// The default worker count is the determined by [`std::thread::available_parallelism()`]. See
|
||||
/// its documentation to determine what behavior you should expect when server is run.
|
||||
///
|
||||
/// `num` must be greater than 0.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Panics if `num` is 0.
|
||||
pub fn workers(mut self, num: usize) -> Self {
|
||||
assert_ne!(num, 0, "workers must be greater than 0");
|
||||
self.threads = num;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set max number of threads for each worker's blocking task thread pool.
|
||||
///
|
||||
/// One thread pool is set up **per worker**; not shared across workers.
|
||||
///
|
||||
/// # Examples:
|
||||
/// ```
|
||||
/// # use actix_server::ServerBuilder;
|
||||
/// let builder = ServerBuilder::new()
|
||||
/// .workers(4) // server has 4 worker thread.
|
||||
/// .worker_max_blocking_threads(4); // every worker has 4 max blocking threads.
|
||||
/// ```
|
||||
///
|
||||
/// See [tokio::runtime::Builder::max_blocking_threads] for behavior reference.
|
||||
pub fn worker_max_blocking_threads(mut self, num: usize) -> Self {
|
||||
self.worker_config.max_blocking_threads(num);
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the maximum number of pending connections.
|
||||
///
|
||||
/// This refers to the number of clients that can be waiting to be served.
|
||||
/// Exceeding this number results in the client getting an error when
|
||||
/// attempting to connect. It should only affect servers under significant
|
||||
/// load.
|
||||
/// This refers to the number of clients that can be waiting to be served. Exceeding this number
|
||||
/// results in the client getting an error when attempting to connect. It should only affect
|
||||
/// servers under significant load.
|
||||
///
|
||||
/// Generally set in the 64-2048 range. Default value is 2048.
|
||||
///
|
||||
/// This method should be called before `bind()` method call.
|
||||
pub fn backlog(mut self, num: i32) -> Self {
|
||||
pub fn backlog(mut self, num: u32) -> Self {
|
||||
self.backlog = num;
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets MultiPath TCP (MPTCP) preference on bound sockets.
|
||||
///
|
||||
/// Multipath TCP (MPTCP) builds on top of TCP to improve connection redundancy and performance
|
||||
/// by sharing a network data stream across multiple underlying TCP sessions. See [mptcp.dev]
|
||||
/// for more info about MPTCP itself.
|
||||
///
|
||||
/// MPTCP is available on Linux kernel version 5.6 and higher. In addition, you'll also need to
|
||||
/// ensure the kernel option is enabled using `sysctl net.mptcp.enabled=1`.
|
||||
///
|
||||
/// This method will have no effect if called after a `bind()`.
|
||||
///
|
||||
/// [mptcp.dev]: https://www.mptcp.dev
|
||||
#[cfg(target_os = "linux")]
|
||||
pub fn mptcp(mut self, mptcp_enabled: MpTcp) -> Self {
|
||||
self.mptcp = mptcp_enabled;
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the maximum per-worker number of concurrent connections.
|
||||
///
|
||||
/// All socket listeners will stop accepting connections when this limit is
|
||||
/// reached for each worker.
|
||||
/// All socket listeners will stop accepting connections when this limit is reached for
|
||||
/// each worker.
|
||||
///
|
||||
/// By default max connections is set to a 25k per worker.
|
||||
pub fn max_concurrent_connections(mut self, num: usize) -> Self {
|
||||
self.worker_config.max_concurrent_connections(num);
|
||||
self
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
#[deprecated(since = "2.0.0", note = "Renamed to `max_concurrent_connections`.")]
|
||||
pub fn maxconn(self, num: usize) -> Self {
|
||||
worker::max_concurrent_connections(num);
|
||||
self
|
||||
self.max_concurrent_connections(num)
|
||||
}
|
||||
|
||||
/// Sets the maximum per-worker concurrent connection establish process.
|
||||
/// Sets flag to stop Actix `System` after server shutdown.
|
||||
///
|
||||
/// All listeners will stop accepting connections when this limit is reached. It
|
||||
/// can be used to limit the global SSL CPU usage.
|
||||
///
|
||||
/// By default max connections is set to a 256.
|
||||
pub fn maxconnrate(self, num: usize) -> Self {
|
||||
ssl::max_concurrent_ssl_connect(num);
|
||||
self
|
||||
}
|
||||
|
||||
/// Stop actix system.
|
||||
/// This has no effect when server is running in a Tokio-only runtime.
|
||||
pub fn system_exit(mut self) -> Self {
|
||||
self.exit = true;
|
||||
self
|
||||
}
|
||||
|
||||
/// Disable signal handling
|
||||
/// Disables OS signal handling.
|
||||
pub fn disable_signals(mut self) -> Self {
|
||||
self.no_signals = true;
|
||||
self.listen_os_signals = false;
|
||||
self
|
||||
}
|
||||
|
||||
/// Timeout for graceful workers shutdown in seconds.
|
||||
///
|
||||
/// After receiving a stop signal, workers have this much time to finish
|
||||
/// serving requests. Workers still alive after the timeout are force
|
||||
/// dropped.
|
||||
/// After receiving a stop signal, workers have this much time to finish serving requests.
|
||||
/// Workers still alive after the timeout are force dropped.
|
||||
///
|
||||
/// By default shutdown timeout sets to 30 seconds.
|
||||
pub fn shutdown_timeout(mut self, sec: u16) -> Self {
|
||||
self.shutdown_timeout = Duration::from_secs(u64::from(sec));
|
||||
pub fn shutdown_timeout(mut self, sec: u64) -> Self {
|
||||
self.worker_config
|
||||
.shutdown_timeout(Duration::from_secs(sec));
|
||||
self
|
||||
}
|
||||
|
||||
/// Execute external configuration as part of the server building
|
||||
/// process.
|
||||
/// Adds new service to the server.
|
||||
///
|
||||
/// This function is useful for moving parts of configuration to a
|
||||
/// different module or even library.
|
||||
pub fn configure<F>(mut self, f: F) -> io::Result<ServerBuilder>
|
||||
/// Note that, if a DNS lookup is required, resolving hostnames is a blocking operation.
|
||||
///
|
||||
/// # Worker Count
|
||||
///
|
||||
/// The `factory` will be instantiated multiple times in most scenarios. The number of
|
||||
/// instantiations is number of [`workers`](Self::workers()) × number of sockets resolved by
|
||||
/// `addrs`.
|
||||
///
|
||||
/// For example, if you've manually set [`workers`](Self::workers()) to 2, and use `127.0.0.1`
|
||||
/// as the bind `addrs`, then `factory` will be instantiated twice. However, using `localhost`
|
||||
/// as the bind `addrs` can often resolve to both `127.0.0.1` (IPv4) _and_ `::1` (IPv6), causing
|
||||
/// the `factory` to be instantiated 4 times (2 workers × 2 bind addresses).
|
||||
///
|
||||
/// Using a bind address of `0.0.0.0`, which signals to use all interfaces, may also multiple
|
||||
/// the number of instantiations in a similar way.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an `io::Error` if:
|
||||
/// - `addrs` cannot be resolved into one or more socket addresses;
|
||||
/// - all the resolved socket addresses are already bound.
|
||||
pub fn bind<F, U, N>(mut self, name: N, addrs: U, factory: F) -> io::Result<Self>
|
||||
where
|
||||
F: Fn(&mut ServiceConfig) -> io::Result<()>,
|
||||
F: ServerServiceFactory<TcpStream>,
|
||||
U: ToSocketAddrs,
|
||||
N: AsRef<str>,
|
||||
{
|
||||
let mut cfg = ServiceConfig::new(self.threads, self.backlog);
|
||||
let sockets = bind_addr(addrs, self.backlog, &self.mptcp)?;
|
||||
|
||||
f(&mut cfg)?;
|
||||
|
||||
if let Some(apply) = cfg.apply {
|
||||
let mut srv = ConfiguredService::new(apply);
|
||||
for (name, lst) in cfg.services {
|
||||
let token = self.token.next();
|
||||
srv.stream(token, name, lst.local_addr()?);
|
||||
self.sockets.push((token, lst));
|
||||
}
|
||||
self.services.push(Box::new(srv));
|
||||
}
|
||||
self.threads = cfg.threads;
|
||||
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Add new service to the server.
|
||||
pub fn bind<F, U, N: AsRef<str>>(mut self, name: N, addr: U, factory: F) -> io::Result<Self>
|
||||
where
|
||||
F: ServiceFactory,
|
||||
U: net::ToSocketAddrs,
|
||||
{
|
||||
let sockets = bind_addr(addr, self.backlog)?;
|
||||
tracing::trace!("binding server to: {sockets:?}");
|
||||
|
||||
for lst in sockets {
|
||||
let token = self.token.next();
|
||||
self.services.push(StreamNewService::create(
|
||||
let token = self.next_token();
|
||||
|
||||
self.factories.push(StreamNewService::create(
|
||||
name.as_ref().to_string(),
|
||||
token,
|
||||
factory.clone(),
|
||||
lst.local_addr()?,
|
||||
));
|
||||
self.sockets.push((token, lst));
|
||||
|
||||
self.sockets
|
||||
.push((token, name.as_ref().to_string(), MioListener::Tcp(lst)));
|
||||
}
|
||||
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Add new service to the server.
|
||||
/// Adds service to the server using a socket listener already bound.
|
||||
///
|
||||
/// # Worker Count
|
||||
///
|
||||
/// The `factory` will be instantiated multiple times in most scenarios. The number of
|
||||
/// instantiations is: number of [`workers`](Self::workers()).
|
||||
pub fn listen<F, N: AsRef<str>>(
|
||||
mut self,
|
||||
name: N,
|
||||
lst: net::TcpListener,
|
||||
lst: StdTcpListener,
|
||||
factory: F,
|
||||
) -> io::Result<Self>
|
||||
where
|
||||
F: ServiceFactory,
|
||||
F: ServerServiceFactory<TcpStream>,
|
||||
{
|
||||
let token = self.token.next();
|
||||
self.services.push(StreamNewService::create(
|
||||
lst.set_nonblocking(true)?;
|
||||
let addr = lst.local_addr()?;
|
||||
|
||||
let token = self.next_token();
|
||||
self.factories.push(StreamNewService::create(
|
||||
name.as_ref().to_string(),
|
||||
token,
|
||||
factory,
|
||||
lst.local_addr()?,
|
||||
addr,
|
||||
));
|
||||
self.sockets.push((token, lst));
|
||||
|
||||
self.sockets
|
||||
.push((token, name.as_ref().to_string(), MioListener::from(lst)));
|
||||
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Spawn new thread and start listening for incoming connections.
|
||||
///
|
||||
/// This method spawns new thread and starts new actix system. Other than
|
||||
/// that it is similar to `start()` method. This method blocks.
|
||||
///
|
||||
/// This methods panics if no socket addresses get bound.
|
||||
///
|
||||
/// ```rust,ignore
|
||||
/// use actix_web::*;
|
||||
///
|
||||
/// fn main() -> std::io::Result<()> {
|
||||
/// Server::new().
|
||||
/// .service(
|
||||
/// HttpServer::new(|| App::new().service(web::service("/").to(|| HttpResponse::Ok())))
|
||||
/// .bind("127.0.0.1:0")
|
||||
/// .run()
|
||||
/// }
|
||||
/// ```
|
||||
pub fn run(self) -> io::Result<()> {
|
||||
let sys = System::new("http-server");
|
||||
self.start();
|
||||
sys.run()
|
||||
}
|
||||
|
||||
/// Starts processing incoming connections and return server controller.
|
||||
pub fn start(mut self) -> Server {
|
||||
pub fn run(self) -> Server {
|
||||
if self.sockets.is_empty() {
|
||||
panic!("Server should have at least one bound socket");
|
||||
} else {
|
||||
info!("Starting {} workers", self.threads);
|
||||
|
||||
// start workers
|
||||
let mut workers = Vec::new();
|
||||
for idx in 0..self.threads {
|
||||
let worker = self.start_worker(idx, self.accept.get_notify());
|
||||
workers.push(worker.clone());
|
||||
self.workers.push((idx, worker));
|
||||
}
|
||||
|
||||
// start accept thread
|
||||
for sock in &self.sockets {
|
||||
info!("Starting server on {}", sock.1.local_addr().ok().unwrap());
|
||||
}
|
||||
self.accept
|
||||
.start(mem::replace(&mut self.sockets, Vec::new()), workers);
|
||||
|
||||
// handle signals
|
||||
if !self.no_signals {
|
||||
Signals::start(self.server.clone());
|
||||
}
|
||||
|
||||
// start http server actor
|
||||
let server = self.server.clone();
|
||||
spawn(self);
|
||||
server
|
||||
tracing::info!("starting {} workers", self.threads);
|
||||
Server::new(self)
|
||||
}
|
||||
}
|
||||
|
||||
fn start_worker(&self, idx: usize, notify: AcceptNotify) -> WorkerClient {
|
||||
let (tx1, rx1) = unbounded();
|
||||
let (tx2, rx2) = unbounded();
|
||||
let timeout = self.shutdown_timeout;
|
||||
let avail = WorkerAvailability::new(notify);
|
||||
let worker = WorkerClient::new(idx, tx1, tx2, avail.clone());
|
||||
let services: Vec<Box<InternalServiceFactory>> =
|
||||
self.services.iter().map(|v| v.clone_factory()).collect();
|
||||
|
||||
Arbiter::new().send(lazy(move || {
|
||||
Worker::start(rx1, rx2, services, avail, timeout);
|
||||
Ok::<_, ()>(())
|
||||
}));
|
||||
|
||||
worker
|
||||
}
|
||||
|
||||
fn handle_cmd(&mut self, item: ServerCommand) {
|
||||
match item {
|
||||
ServerCommand::Pause(tx) => {
|
||||
self.accept.send(Command::Pause);
|
||||
let _ = tx.send(());
|
||||
}
|
||||
ServerCommand::Resume(tx) => {
|
||||
self.accept.send(Command::Resume);
|
||||
let _ = tx.send(());
|
||||
}
|
||||
ServerCommand::Signal(sig) => {
|
||||
// Signals support
|
||||
// Handle `SIGINT`, `SIGTERM`, `SIGQUIT` signals and stop actix system
|
||||
match sig {
|
||||
Signal::Int => {
|
||||
info!("SIGINT received, exiting");
|
||||
self.exit = true;
|
||||
self.handle_cmd(ServerCommand::Stop {
|
||||
graceful: false,
|
||||
completion: None,
|
||||
})
|
||||
}
|
||||
Signal::Term => {
|
||||
info!("SIGTERM received, stopping");
|
||||
self.exit = true;
|
||||
self.handle_cmd(ServerCommand::Stop {
|
||||
graceful: true,
|
||||
completion: None,
|
||||
})
|
||||
}
|
||||
Signal::Quit => {
|
||||
info!("SIGQUIT received, exiting");
|
||||
self.exit = true;
|
||||
self.handle_cmd(ServerCommand::Stop {
|
||||
graceful: false,
|
||||
completion: None,
|
||||
})
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
ServerCommand::Stop {
|
||||
graceful,
|
||||
completion,
|
||||
} => {
|
||||
let exit = self.exit;
|
||||
|
||||
// stop accept thread
|
||||
self.accept.send(Command::Stop);
|
||||
|
||||
// stop workers
|
||||
if !self.workers.is_empty() {
|
||||
spawn(
|
||||
futures_unordered(
|
||||
self.workers
|
||||
.iter()
|
||||
.map(move |worker| worker.1.stop(graceful)),
|
||||
)
|
||||
.collect()
|
||||
.then(move |_| {
|
||||
if let Some(tx) = completion {
|
||||
let _ = tx.send(());
|
||||
}
|
||||
if exit {
|
||||
spawn(sleep(Duration::from_millis(300)).then(|_| {
|
||||
System::current().stop();
|
||||
ok(())
|
||||
}));
|
||||
}
|
||||
ok(())
|
||||
}),
|
||||
)
|
||||
} else {
|
||||
// we need to stop system if server was spawned
|
||||
if self.exit {
|
||||
spawn(sleep(Duration::from_millis(300)).then(|_| {
|
||||
System::current().stop();
|
||||
ok(())
|
||||
}));
|
||||
}
|
||||
if let Some(tx) = completion {
|
||||
let _ = tx.send(());
|
||||
}
|
||||
}
|
||||
}
|
||||
ServerCommand::WorkerDied(idx) => {
|
||||
let mut found = false;
|
||||
for i in 0..self.workers.len() {
|
||||
if self.workers[i].0 == idx {
|
||||
self.workers.swap_remove(i);
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if found {
|
||||
error!("Worker has died {:?}, restarting", idx);
|
||||
|
||||
let mut new_idx = self.workers.len();
|
||||
'found: loop {
|
||||
for i in 0..self.workers.len() {
|
||||
if self.workers[i].0 == new_idx {
|
||||
new_idx += 1;
|
||||
continue 'found;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
let worker = self.start_worker(new_idx, self.accept.get_notify());
|
||||
self.workers.push((new_idx, worker.clone()));
|
||||
self.accept.send(Command::Worker(worker));
|
||||
}
|
||||
}
|
||||
}
|
||||
fn next_token(&mut self) -> usize {
|
||||
let token = self.token;
|
||||
self.token += 1;
|
||||
token
|
||||
}
|
||||
}
|
||||
|
||||
impl Future for ServerBuilder {
|
||||
type Item = ();
|
||||
type Error = ();
|
||||
|
||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
||||
loop {
|
||||
match self.cmd.poll() {
|
||||
Ok(Async::Ready(None)) | Err(_) => return Ok(Async::Ready(())),
|
||||
Ok(Async::NotReady) => return Ok(Async::NotReady),
|
||||
Ok(Async::Ready(Some(item))) => self.handle_cmd(item),
|
||||
#[cfg(unix)]
|
||||
impl ServerBuilder {
|
||||
/// Adds new service to the server using a UDS (unix domain socket) address.
|
||||
///
|
||||
/// # Worker Count
|
||||
///
|
||||
/// The `factory` will be instantiated multiple times in most scenarios. The number of
|
||||
/// instantiations is: number of [`workers`](Self::workers()).
|
||||
pub fn bind_uds<F, U, N>(self, name: N, addr: U, factory: F) -> io::Result<Self>
|
||||
where
|
||||
F: ServerServiceFactory<actix_rt::net::UnixStream>,
|
||||
N: AsRef<str>,
|
||||
U: AsRef<std::path::Path>,
|
||||
{
|
||||
// The path must not exist when we try to bind.
|
||||
// Try to remove it to avoid bind error.
|
||||
if let Err(err) = std::fs::remove_file(addr.as_ref()) {
|
||||
// NotFound is expected and not an issue. Anything else is.
|
||||
if err.kind() != std::io::ErrorKind::NotFound {
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
|
||||
let lst = crate::socket::StdUnixListener::bind(addr)?;
|
||||
self.listen_uds(name, lst, factory)
|
||||
}
|
||||
|
||||
/// Adds new service to the server using a UDS (unix domain socket) listener already bound.
|
||||
///
|
||||
/// Useful when running as a systemd service and a socket FD is acquired externally.
|
||||
///
|
||||
/// # Worker Count
|
||||
///
|
||||
/// The `factory` will be instantiated multiple times in most scenarios. The number of
|
||||
/// instantiations is: number of [`workers`](Self::workers()).
|
||||
pub fn listen_uds<F, N: AsRef<str>>(
|
||||
mut self,
|
||||
name: N,
|
||||
lst: crate::socket::StdUnixListener,
|
||||
factory: F,
|
||||
) -> io::Result<Self>
|
||||
where
|
||||
F: ServerServiceFactory<actix_rt::net::UnixStream>,
|
||||
{
|
||||
use std::net::{IpAddr, Ipv4Addr};
|
||||
|
||||
lst.set_nonblocking(true)?;
|
||||
|
||||
let token = self.next_token();
|
||||
let addr = crate::socket::StdSocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
|
||||
|
||||
self.factories.push(StreamNewService::create(
|
||||
name.as_ref().to_string(),
|
||||
token,
|
||||
factory,
|
||||
addr,
|
||||
));
|
||||
|
||||
self.sockets
|
||||
.push((token, name.as_ref().to_string(), MioListener::from(lst)));
|
||||
|
||||
Ok(self)
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn bind_addr<S: net::ToSocketAddrs>(
|
||||
pub(super) fn bind_addr<S: ToSocketAddrs>(
|
||||
addr: S,
|
||||
backlog: i32,
|
||||
) -> io::Result<Vec<net::TcpListener>> {
|
||||
let mut err = None;
|
||||
let mut succ = false;
|
||||
backlog: u32,
|
||||
mptcp: &MpTcp,
|
||||
) -> io::Result<Vec<MioTcpListener>> {
|
||||
let mut opt_err = None;
|
||||
let mut success = false;
|
||||
let mut sockets = Vec::new();
|
||||
|
||||
for addr in addr.to_socket_addrs()? {
|
||||
match create_tcp_listener(addr, backlog) {
|
||||
match create_mio_tcp_listener(addr, backlog, mptcp) {
|
||||
Ok(lst) => {
|
||||
succ = true;
|
||||
success = true;
|
||||
sockets.push(lst);
|
||||
}
|
||||
Err(e) => err = Some(e),
|
||||
Err(err) => opt_err = Some(err),
|
||||
}
|
||||
}
|
||||
|
||||
if !succ {
|
||||
if let Some(e) = err.take() {
|
||||
Err(e)
|
||||
} else {
|
||||
Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
"Can not bind to address.",
|
||||
))
|
||||
}
|
||||
} else {
|
||||
if success {
|
||||
Ok(sockets)
|
||||
} else if let Some(err) = opt_err.take() {
|
||||
Err(err)
|
||||
} else {
|
||||
Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
"Can not bind to address.",
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
fn create_tcp_listener(addr: net::SocketAddr, backlog: i32) -> io::Result<net::TcpListener> {
|
||||
let builder = match addr {
|
||||
net::SocketAddr::V4(_) => TcpBuilder::new_v4()?,
|
||||
net::SocketAddr::V6(_) => TcpBuilder::new_v6()?,
|
||||
};
|
||||
builder.reuse_address(true)?;
|
||||
builder.bind(addr)?;
|
||||
Ok(builder.listen(backlog)?)
|
||||
}
|
||||
|
@ -1,275 +0,0 @@
|
||||
use std::collections::HashMap;
|
||||
use std::{fmt, io, net};
|
||||
|
||||
use actix_server_config::{Io, ServerConfig};
|
||||
use actix_service::{IntoNewService, NewService};
|
||||
use futures::future::{join_all, Future};
|
||||
use log::error;
|
||||
use tokio_tcp::TcpStream;
|
||||
|
||||
use crate::counter::CounterGuard;
|
||||
|
||||
use super::builder::bind_addr;
|
||||
use super::services::{
|
||||
BoxedServerService, InternalServiceFactory, ServerMessage, StreamService,
|
||||
};
|
||||
use super::Token;
|
||||
|
||||
pub struct ServiceConfig {
|
||||
pub(crate) services: Vec<(String, net::TcpListener)>,
|
||||
pub(crate) apply: Option<Box<ServiceRuntimeConfiguration>>,
|
||||
pub(crate) threads: usize,
|
||||
pub(crate) backlog: i32,
|
||||
}
|
||||
|
||||
impl ServiceConfig {
|
||||
pub(super) fn new(threads: usize, backlog: i32) -> ServiceConfig {
|
||||
ServiceConfig {
|
||||
threads,
|
||||
backlog,
|
||||
services: Vec::new(),
|
||||
apply: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set number of workers to start.
|
||||
///
|
||||
/// By default server uses number of available logical cpu as workers
|
||||
/// count.
|
||||
pub fn workers(&mut self, num: usize) {
|
||||
self.threads = num;
|
||||
}
|
||||
|
||||
/// Add new service to server
|
||||
pub fn bind<U, N: AsRef<str>>(&mut self, name: N, addr: U) -> io::Result<&mut Self>
|
||||
where
|
||||
U: net::ToSocketAddrs,
|
||||
{
|
||||
let sockets = bind_addr(addr, self.backlog)?;
|
||||
|
||||
for lst in sockets {
|
||||
self.listen(name.as_ref(), lst);
|
||||
}
|
||||
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Add new service to server
|
||||
pub fn listen<N: AsRef<str>>(&mut self, name: N, lst: net::TcpListener) -> &mut Self {
|
||||
if self.apply.is_none() {
|
||||
self.apply = Some(Box::new(not_configured));
|
||||
}
|
||||
self.services.push((name.as_ref().to_string(), lst));
|
||||
self
|
||||
}
|
||||
|
||||
/// Register service configuration function. This function get called
|
||||
/// during worker runtime configuration. It get executed in worker thread.
|
||||
pub fn apply<F>(&mut self, f: F) -> io::Result<()>
|
||||
where
|
||||
F: Fn(&mut ServiceRuntime) + Send + Clone + 'static,
|
||||
{
|
||||
self.apply = Some(Box::new(f));
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) struct ConfiguredService {
|
||||
rt: Box<ServiceRuntimeConfiguration>,
|
||||
names: HashMap<Token, (String, net::SocketAddr)>,
|
||||
services: HashMap<String, Token>,
|
||||
}
|
||||
|
||||
impl ConfiguredService {
|
||||
pub(super) fn new(rt: Box<ServiceRuntimeConfiguration>) -> Self {
|
||||
ConfiguredService {
|
||||
rt,
|
||||
names: HashMap::new(),
|
||||
services: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn stream(&mut self, token: Token, name: String, addr: net::SocketAddr) {
|
||||
self.names.insert(token, (name.clone(), addr));
|
||||
self.services.insert(name, token);
|
||||
}
|
||||
}
|
||||
|
||||
impl InternalServiceFactory for ConfiguredService {
|
||||
fn name(&self, token: Token) -> &str {
|
||||
&self.names[&token].0
|
||||
}
|
||||
|
||||
fn clone_factory(&self) -> Box<InternalServiceFactory> {
|
||||
Box::new(Self {
|
||||
rt: self.rt.clone(),
|
||||
names: self.names.clone(),
|
||||
services: self.services.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
fn create(&self) -> Box<Future<Item = Vec<(Token, BoxedServerService)>, Error = ()>> {
|
||||
// configure services
|
||||
let mut rt = ServiceRuntime::new(self.services.clone());
|
||||
self.rt.configure(&mut rt);
|
||||
rt.validate();
|
||||
|
||||
let services = rt.services;
|
||||
|
||||
// on start futures
|
||||
if rt.onstart.is_empty() {
|
||||
// construct services
|
||||
let mut fut = Vec::new();
|
||||
for (token, ns) in services {
|
||||
let config = ServerConfig::new(self.names[&token].1);
|
||||
fut.push(ns.new_service(&config).map(move |service| (token, service)));
|
||||
}
|
||||
|
||||
Box::new(join_all(fut).map_err(|e| {
|
||||
error!("Can not construct service: {:?}", e);
|
||||
}))
|
||||
} else {
|
||||
let names = self.names.clone();
|
||||
|
||||
// run onstart future and then construct services
|
||||
Box::new(
|
||||
join_all(rt.onstart)
|
||||
.map_err(|e| {
|
||||
error!("Can not construct service: {:?}", e);
|
||||
})
|
||||
.and_then(move |_| {
|
||||
// construct services
|
||||
let mut fut = Vec::new();
|
||||
for (token, ns) in services {
|
||||
let config = ServerConfig::new(names[&token].1);
|
||||
fut.push(
|
||||
ns.new_service(&config).map(move |service| (token, service)),
|
||||
);
|
||||
}
|
||||
join_all(fut).map_err(|e| {
|
||||
error!("Can not construct service: {:?}", e);
|
||||
})
|
||||
}),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) trait ServiceRuntimeConfiguration: Send {
|
||||
fn clone(&self) -> Box<ServiceRuntimeConfiguration>;
|
||||
|
||||
fn configure(&self, rt: &mut ServiceRuntime);
|
||||
}
|
||||
|
||||
impl<F> ServiceRuntimeConfiguration for F
|
||||
where
|
||||
F: Fn(&mut ServiceRuntime) + Send + Clone + 'static,
|
||||
{
|
||||
fn clone(&self) -> Box<ServiceRuntimeConfiguration> {
|
||||
Box::new(self.clone())
|
||||
}
|
||||
|
||||
fn configure(&self, rt: &mut ServiceRuntime) {
|
||||
(self)(rt)
|
||||
}
|
||||
}
|
||||
|
||||
fn not_configured(_: &mut ServiceRuntime) {
|
||||
error!("Service is not configured");
|
||||
}
|
||||
|
||||
pub struct ServiceRuntime {
|
||||
names: HashMap<String, Token>,
|
||||
services: HashMap<Token, BoxedNewService>,
|
||||
onstart: Vec<Box<Future<Item = (), Error = ()>>>,
|
||||
}
|
||||
|
||||
impl ServiceRuntime {
|
||||
fn new(names: HashMap<String, Token>) -> Self {
|
||||
ServiceRuntime {
|
||||
names,
|
||||
services: HashMap::new(),
|
||||
onstart: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn validate(&self) {
|
||||
for (name, token) in &self.names {
|
||||
if !self.services.contains_key(&token) {
|
||||
error!("Service {:?} is not configured", name);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Register service.
|
||||
///
|
||||
/// Name of the service must be registered during configuration stage with
|
||||
/// *ServiceConfig::bind()* or *ServiceConfig::listen()* methods.
|
||||
pub fn service<T, F>(&mut self, name: &str, service: F)
|
||||
where
|
||||
F: IntoNewService<T, ServerConfig>,
|
||||
T: NewService<ServerConfig, Request = Io<TcpStream>> + 'static,
|
||||
T::Future: 'static,
|
||||
T::Service: 'static,
|
||||
T::InitError: fmt::Debug,
|
||||
{
|
||||
// let name = name.to_owned();
|
||||
if let Some(token) = self.names.get(name) {
|
||||
self.services.insert(
|
||||
token.clone(),
|
||||
Box::new(ServiceFactory {
|
||||
inner: service.into_new_service(),
|
||||
}),
|
||||
);
|
||||
} else {
|
||||
panic!("Unknown service: {:?}", name);
|
||||
}
|
||||
}
|
||||
|
||||
/// Execute future before services initialization.
|
||||
pub fn on_start<F>(&mut self, fut: F)
|
||||
where
|
||||
F: Future<Item = (), Error = ()> + 'static,
|
||||
{
|
||||
self.onstart.push(Box::new(fut))
|
||||
}
|
||||
}
|
||||
|
||||
type BoxedNewService = Box<
|
||||
NewService<
|
||||
ServerConfig,
|
||||
Request = (Option<CounterGuard>, ServerMessage),
|
||||
Response = (),
|
||||
Error = (),
|
||||
InitError = (),
|
||||
Service = BoxedServerService,
|
||||
Future = Box<Future<Item = BoxedServerService, Error = ()>>,
|
||||
>,
|
||||
>;
|
||||
|
||||
struct ServiceFactory<T> {
|
||||
inner: T,
|
||||
}
|
||||
|
||||
impl<T> NewService<ServerConfig> for ServiceFactory<T>
|
||||
where
|
||||
T: NewService<ServerConfig, Request = Io<TcpStream>>,
|
||||
T::Future: 'static,
|
||||
T::Service: 'static,
|
||||
T::Error: 'static,
|
||||
T::InitError: fmt::Debug + 'static,
|
||||
{
|
||||
type Request = (Option<CounterGuard>, ServerMessage);
|
||||
type Response = ();
|
||||
type Error = ();
|
||||
type InitError = ();
|
||||
type Service = BoxedServerService;
|
||||
type Future = Box<Future<Item = BoxedServerService, Error = ()>>;
|
||||
|
||||
fn new_service(&self, cfg: &ServerConfig) -> Self::Future {
|
||||
Box::new(self.inner.new_service(cfg).map_err(|_| ()).map(|s| {
|
||||
let service: BoxedServerService = Box::new(StreamService::new(s));
|
||||
service
|
||||
}))
|
||||
}
|
||||
}
|
@ -1,80 +0,0 @@
|
||||
use std::cell::Cell;
|
||||
use std::rc::Rc;
|
||||
|
||||
use futures::task::AtomicTask;
|
||||
|
||||
#[derive(Clone)]
|
||||
/// Simple counter with ability to notify task on reaching specific number
|
||||
///
|
||||
/// Counter could be cloned, total ncount is shared across all clones.
|
||||
pub struct Counter(Rc<CounterInner>);
|
||||
|
||||
#[derive(Debug)]
|
||||
struct CounterInner {
|
||||
count: Cell<usize>,
|
||||
capacity: usize,
|
||||
task: AtomicTask,
|
||||
}
|
||||
|
||||
impl Counter {
|
||||
/// Create `Counter` instance and set max value.
|
||||
pub fn new(capacity: usize) -> Self {
|
||||
Counter(Rc::new(CounterInner {
|
||||
capacity,
|
||||
count: Cell::new(0),
|
||||
task: AtomicTask::new(),
|
||||
}))
|
||||
}
|
||||
|
||||
pub fn get(&self) -> CounterGuard {
|
||||
CounterGuard::new(self.0.clone())
|
||||
}
|
||||
|
||||
/// Check if counter is not at capacity
|
||||
pub fn available(&self) -> bool {
|
||||
self.0.available()
|
||||
}
|
||||
|
||||
/// Get total number of acquired counts
|
||||
pub fn total(&self) -> usize {
|
||||
self.0.count.get()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct CounterGuard(Rc<CounterInner>);
|
||||
|
||||
impl CounterGuard {
|
||||
fn new(inner: Rc<CounterInner>) -> Self {
|
||||
inner.inc();
|
||||
CounterGuard(inner)
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for CounterGuard {
|
||||
fn drop(&mut self) {
|
||||
self.0.dec();
|
||||
}
|
||||
}
|
||||
|
||||
impl CounterInner {
|
||||
fn inc(&self) {
|
||||
self.count.set(self.count.get() + 1);
|
||||
}
|
||||
|
||||
fn dec(&self) {
|
||||
let num = self.count.get();
|
||||
self.count.set(num - 1);
|
||||
if num == self.capacity {
|
||||
self.task.notify();
|
||||
}
|
||||
}
|
||||
|
||||
fn available(&self) -> bool {
|
||||
let avail = self.count.get() < self.capacity;
|
||||
if !avail {
|
||||
self.task.register();
|
||||
}
|
||||
avail
|
||||
}
|
||||
}
|
56
actix-server/src/handle.rs
Normal file
56
actix-server/src/handle.rs
Normal file
@ -0,0 +1,56 @@
|
||||
use std::future::Future;
|
||||
|
||||
use tokio::sync::{mpsc::UnboundedSender, oneshot};
|
||||
|
||||
use crate::server::ServerCommand;
|
||||
|
||||
/// Server handle.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ServerHandle {
|
||||
cmd_tx: UnboundedSender<ServerCommand>,
|
||||
}
|
||||
|
||||
impl ServerHandle {
|
||||
pub(crate) fn new(cmd_tx: UnboundedSender<ServerCommand>) -> Self {
|
||||
ServerHandle { cmd_tx }
|
||||
}
|
||||
|
||||
pub(crate) fn worker_faulted(&self, idx: usize) {
|
||||
let _ = self.cmd_tx.send(ServerCommand::WorkerFaulted(idx));
|
||||
}
|
||||
|
||||
/// Pause accepting incoming connections.
|
||||
///
|
||||
/// May drop socket pending connection. All open connections remain active.
|
||||
pub fn pause(&self) -> impl Future<Output = ()> {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
let _ = self.cmd_tx.send(ServerCommand::Pause(tx));
|
||||
async {
|
||||
let _ = rx.await;
|
||||
}
|
||||
}
|
||||
|
||||
/// Resume accepting incoming connections.
|
||||
pub fn resume(&self) -> impl Future<Output = ()> {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
let _ = self.cmd_tx.send(ServerCommand::Resume(tx));
|
||||
async {
|
||||
let _ = rx.await;
|
||||
}
|
||||
}
|
||||
|
||||
/// Stop incoming connection processing, stop all workers and exit.
|
||||
pub fn stop(&self, graceful: bool) -> impl Future<Output = ()> {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
|
||||
let _ = self.cmd_tx.send(ServerCommand::Stop {
|
||||
graceful,
|
||||
completion: Some(tx),
|
||||
force_system_stop: false,
|
||||
});
|
||||
|
||||
async {
|
||||
let _ = rx.await;
|
||||
}
|
||||
}
|
||||
}
|
78
actix-server/src/join_all.rs
Normal file
78
actix-server/src/join_all.rs
Normal file
@ -0,0 +1,78 @@
|
||||
use std::{
|
||||
future::Future,
|
||||
pin::Pin,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use futures_core::future::BoxFuture;
|
||||
|
||||
// a poor man's join future. joined future is only used when starting/stopping the server.
|
||||
// pin_project and pinned futures are overkill for this task.
|
||||
pub(crate) struct JoinAll<T> {
|
||||
fut: Vec<JoinFuture<T>>,
|
||||
}
|
||||
|
||||
pub(crate) fn join_all<T>(fut: Vec<impl Future<Output = T> + Send + 'static>) -> JoinAll<T> {
|
||||
let fut = fut
|
||||
.into_iter()
|
||||
.map(|f| JoinFuture::Future(Box::pin(f)))
|
||||
.collect();
|
||||
|
||||
JoinAll { fut }
|
||||
}
|
||||
|
||||
enum JoinFuture<T> {
|
||||
Future(BoxFuture<'static, T>),
|
||||
Result(Option<T>),
|
||||
}
|
||||
|
||||
impl<T> Unpin for JoinAll<T> {}
|
||||
|
||||
impl<T> Future for JoinAll<T> {
|
||||
type Output = Vec<T>;
|
||||
|
||||
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
let mut ready = true;
|
||||
|
||||
let this = self.get_mut();
|
||||
for fut in this.fut.iter_mut() {
|
||||
if let JoinFuture::Future(f) = fut {
|
||||
match f.as_mut().poll(cx) {
|
||||
Poll::Ready(t) => {
|
||||
*fut = JoinFuture::Result(Some(t));
|
||||
}
|
||||
Poll::Pending => ready = false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ready {
|
||||
let mut res = Vec::new();
|
||||
for fut in this.fut.iter_mut() {
|
||||
if let JoinFuture::Result(f) = fut {
|
||||
res.push(f.take().unwrap());
|
||||
}
|
||||
}
|
||||
|
||||
Poll::Ready(res)
|
||||
} else {
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use actix_utils::future::ready;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_join_all() {
|
||||
let futs = vec![ready(Ok(1)), ready(Err(3)), ready(Ok(9))];
|
||||
let mut res = join_all(futs).await.into_iter();
|
||||
assert_eq!(Ok(1), res.next().unwrap());
|
||||
assert_eq!(Err(3), res.next().unwrap());
|
||||
assert_eq!(Ok(9), res.next().unwrap());
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user