mirror of
https://github.com/fafhrd91/actix-web
synced 2025-08-19 20:35:36 +02:00
Compare commits
592 Commits
web-v3.0.0
...
awc-v3.0.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
76684a786e | ||
|
|
2308f8afa4 | ||
|
|
554ae7a868 | ||
|
|
ac0c4eb684 | ||
|
|
2e493cf791 | ||
|
|
5860fe5381 | ||
|
|
adf9935841 | ||
|
|
34e5c7c799 | ||
|
|
01cbfc5724 | ||
|
|
3756dfc2ce | ||
|
|
d2590fd46c | ||
|
|
1296e07c48 | ||
|
|
7b1512d863 | ||
|
|
cd025f5c0b | ||
|
|
1769812d0b | ||
|
|
324eba7e0b | ||
|
|
b3ac918d70 | ||
|
|
de20d21703 | ||
|
|
212c6926f9 | ||
|
|
1ea619f2a1 | ||
|
|
40a0162074 | ||
|
|
f8488aff1e | ||
|
|
64c2e5e1cd | ||
|
|
17f636a183 | ||
|
|
2e00776d5e | ||
|
|
7d507a41ee | ||
|
|
fb036264cc | ||
|
|
d2b9724010 | ||
|
|
5c53db1e4d | ||
|
|
84ea9e7e88 | ||
|
|
0bd5ccc432 | ||
|
|
9cd8526085 | ||
|
|
73bbe56971 | ||
|
|
8340b63b7b | ||
|
|
6c2c7b68e2 | ||
|
|
7bf47967cc | ||
|
|
ae47d96fc6 | ||
|
|
5842a3279d | ||
|
|
1d6f5ba6d6 | ||
|
|
aa31086af5 | ||
|
|
57ea322ce5 | ||
|
|
2cf27863cb | ||
|
|
5359fa56c2 | ||
|
|
a2467718ac | ||
|
|
3c0d059d92 | ||
|
|
44b7302845 | ||
|
|
a6d5776481 | ||
|
|
156cc20ac8 | ||
|
|
dd4a372613 | ||
|
|
05255c7f7c | ||
|
|
fb091b2b88 | ||
|
|
11ee8ec3ab | ||
|
|
551a0d973c | ||
|
|
cea44be670 | ||
|
|
b41b346c00 | ||
|
|
5b0a50249b | ||
|
|
60b030ff53 | ||
|
|
fc4e9ff96b | ||
|
|
6481a5fb73 | ||
|
|
0cd7c17682 | ||
|
|
ed2f5b40b9 | ||
|
|
cc37be9700 | ||
|
|
e1cdabe5cb | ||
|
|
d0f4c809ca | ||
|
|
65dd5dfa7b | ||
|
|
f62383a975 | ||
|
|
f9348d7129 | ||
|
|
774ac7fec4 | ||
|
|
69fa17f66f | ||
|
|
816d68dee8 | ||
|
|
7dc034f0fb | ||
|
|
07f2fe385b | ||
|
|
406f694095 | ||
|
|
e49e559f47 | ||
|
|
d35b7644dc | ||
|
|
069cf2da07 | ||
|
|
6460e67f84 | ||
|
|
9587261c20 | ||
|
|
606a371ec3 | ||
|
|
bed72d9bb7 | ||
|
|
c596f573a6 | ||
|
|
627c0dc22f | ||
|
|
2d053b7036 | ||
|
|
59be0c65c6 | ||
|
|
e1a2d9c606 | ||
|
|
d89c706cd6 | ||
|
|
4c9ca7196d | ||
|
|
fa7f3e6908 | ||
|
|
c7c02ef99d | ||
|
|
a2d5c5a058 | ||
|
|
deece8d519 | ||
|
|
2a72bdae09 | ||
|
|
075d871e63 | ||
|
|
c4b20df56a | ||
|
|
0df275c478 | ||
|
|
697238fadc | ||
|
|
e045418038 | ||
|
|
a978b417f3 | ||
|
|
fa82b698b7 | ||
|
|
fc4cdf81eb | ||
|
|
654dc64a09 | ||
|
|
cf54388534 | ||
|
|
39243095b5 | ||
|
|
89c6d62656 | ||
|
|
52bbbd1d73 | ||
|
|
3e6e9779dc | ||
|
|
9bdd334bb4 | ||
|
|
bcbbc115aa | ||
|
|
ab5eb7c1aa | ||
|
|
18b8ef0765 | ||
|
|
b806b4773c | ||
|
|
0062d99b6f | ||
|
|
99e6a9c26d | ||
|
|
5f5bd2184e | ||
|
|
88e074879d | ||
|
|
e7987e7429 | ||
|
|
a172f5968d | ||
|
|
a2a42ec152 | ||
|
|
dd347e0bd0 | ||
|
|
194a691537 | ||
|
|
56ee97f722 | ||
|
|
66620a1012 | ||
|
|
e33618ed6d | ||
|
|
1fe309bcc6 | ||
|
|
168a7284d3 | ||
|
|
68a3acb9c2 | ||
|
|
84c6d25fd3 | ||
|
|
0a135c7dc9 | ||
|
|
668a33c793 | ||
|
|
d8cbb879dd | ||
|
|
13cf5a9e44 | ||
|
|
4df1cd78b7 | ||
|
|
e8a0e16863 | ||
|
|
a2f59c02f7 | ||
|
|
2754608f3c | ||
|
|
c020cedb63 | ||
|
|
5e554dca35 | ||
|
|
6ec2d7b909 | ||
|
|
ec6d284a8e | ||
|
|
be9530eb72 | ||
|
|
855e260fdb | ||
|
|
d13854505f | ||
|
|
d40b6748bc | ||
|
|
c79b9a0df3 | ||
|
|
4af414064b | ||
|
|
9abe166d52 | ||
|
|
c09ec6af4c | ||
|
|
37f2bf5625 | ||
|
|
4f6f0b0137 | ||
|
|
591abc37c3 | ||
|
|
ad22cc4e7f | ||
|
|
efdf3ab1c3 | ||
|
|
6b3ea4fc61 | ||
|
|
99985fc4ec | ||
|
|
a6707fb7ee | ||
|
|
a3806cde19 | ||
|
|
efefa0d0ce | ||
|
|
450ff5fa1d | ||
|
|
8ae278cb68 | ||
|
|
46699e3429 | ||
|
|
ba88d3b4bf | ||
|
|
8dd30611fa | ||
|
|
1383c7d701 | ||
|
|
d8a0f46f26 | ||
|
|
53ec66caf4 | ||
|
|
93112644d3 | ||
|
|
ddc8c16cb3 | ||
|
|
373b3f91df | ||
|
|
7d01ece355 | ||
|
|
c50eef6166 | ||
|
|
dade818eba | ||
|
|
ae35e69382 | ||
|
|
5128b1bdfc | ||
|
|
168b2f227d | ||
|
|
4bb32fb19b | ||
|
|
f9da6e48e0 | ||
|
|
ff07816b65 | ||
|
|
5f412c67db | ||
|
|
a0c0bff944 | ||
|
|
384164cc14 | ||
|
|
e965d8298f | ||
|
|
f6e69919ed | ||
|
|
293c52c3ef | ||
|
|
5a14ffeef2 | ||
|
|
7ae132cb68 | ||
|
|
d8deed0475 | ||
|
|
2504c2ecb0 | ||
|
|
604be5495f | ||
|
|
262c6bc828 | ||
|
|
5eba95b731 | ||
|
|
09afd033fc | ||
|
|
539697292a | ||
|
|
5a480d1d78 | ||
|
|
9a26393375 | ||
|
|
2eacb735a4 | ||
|
|
767e4efe22 | ||
|
|
e559a197cc | ||
|
|
93aa86e30b | ||
|
|
2d8d2f5ab0 | ||
|
|
083ee05d50 | ||
|
|
ed0516d724 | ||
|
|
7535a1ade8 | ||
|
|
8846808804 | ||
|
|
3b6333e65f | ||
|
|
b1148fd735 | ||
|
|
12f7720309 | ||
|
|
2d8530feb3 | ||
|
|
7faeffc5ab | ||
|
|
f81d4bdae7 | ||
|
|
6893773280 | ||
|
|
73a655544e | ||
|
|
baa5a663c4 | ||
|
|
c260fb1c48 | ||
|
|
532f7b9923 | ||
|
|
bb0331ae28 | ||
|
|
8d124713fc | ||
|
|
fb2b362b60 | ||
|
|
75f65fea4f | ||
|
|
812269d656 | ||
|
|
e46cda5228 | ||
|
|
2e1d761854 | ||
|
|
b1e841f168 | ||
|
|
0bb035cfa7 | ||
|
|
3479293416 | ||
|
|
136dac1352 | ||
|
|
e5b713b04a | ||
|
|
3847429d00 | ||
|
|
bb7d33c9d4 | ||
|
|
4598a7c0cc | ||
|
|
b1de196509 | ||
|
|
2a8c650f2c | ||
|
|
f277b128b6 | ||
|
|
4903950b22 | ||
|
|
f55e8d7a11 | ||
|
|
900c9e270e | ||
|
|
a9dc1586a0 | ||
|
|
947caa3599 | ||
|
|
7d1d5c8acd | ||
|
|
ddaf8c3e43 | ||
|
|
dd1a3e7675 | ||
|
|
c17662fe39 | ||
|
|
3a0fb3f89e | ||
|
|
1fcf92e11f | ||
|
|
6a29a50f25 | ||
|
|
75867bd073 | ||
|
|
f44a0bc159 | ||
|
|
07036b5640 | ||
|
|
a7cd4e85cf | ||
|
|
6a9c4f1026 | ||
|
|
427fe6bd82 | ||
|
|
2aa674c1fd | ||
|
|
52bb2b5daf | ||
|
|
db97974dc1 | ||
|
|
b9dbc58e20 | ||
|
|
35f8188410 | ||
|
|
8ffb1f2011 | ||
|
|
26e9c80626 | ||
|
|
f462aaa7b6 | ||
|
|
5a162932f3 | ||
|
|
b2d6b6a70c | ||
|
|
f743e885a3 | ||
|
|
5747f84736 | ||
|
|
879a4cbcd8 | ||
|
|
2449f2555c | ||
|
|
d8f56eee3e | ||
|
|
8d88a0a9af | ||
|
|
845c02cb86 | ||
|
|
64bed506c2 | ||
|
|
ff65f1d006 | ||
|
|
a9f26286f9 | ||
|
|
037ac80a32 | ||
|
|
1bfdfd1f41 | ||
|
|
5202bf03c1 | ||
|
|
387c229f28 | ||
|
|
23e0c9b6e0 | ||
|
|
02ced426fd | ||
|
|
4442535a45 | ||
|
|
edd9f14752 | ||
|
|
ce50cc9523 | ||
|
|
981c54432c | ||
|
|
44c55dd036 | ||
|
|
c72d77065d | ||
|
|
44a2d2214c | ||
|
|
3f5a73793a | ||
|
|
e0b2246c68 | ||
|
|
e0ae8e59bf | ||
|
|
a9641e475a | ||
|
|
05c7505563 | ||
|
|
8561263545 | ||
|
|
a32151525c | ||
|
|
546e7c5da4 | ||
|
|
6fb06a720a | ||
|
|
c54a0713de | ||
|
|
50dc13f280 | ||
|
|
c8ed8dd1a4 | ||
|
|
a807d33600 | ||
|
|
1f1be6fd3d | ||
|
|
c49fe79207 | ||
|
|
f66774e30b | ||
|
|
1281a748d0 | ||
|
|
222acfd070 | ||
|
|
980ecc5f07 | ||
|
|
e8ce73b496 | ||
|
|
f954a30c34 | ||
|
|
60f9cfbb2a | ||
|
|
6822bf2f58 | ||
|
|
2f7f1fa97a | ||
|
|
8c2ce2dedb | ||
|
|
3188ef5731 | ||
|
|
9704beddf8 | ||
|
|
1be54efbeb | ||
|
|
746d983849 | ||
|
|
8d9de76826 | ||
|
|
9488757c29 | ||
|
|
351286486c | ||
|
|
78fcd0237a | ||
|
|
81942d31d6 | ||
|
|
b75b5114c3 | ||
|
|
abcb444dd9 | ||
|
|
983b6904a7 | ||
|
|
3dc2d145ef | ||
|
|
c8f6d37290 | ||
|
|
69dd1a9bd6 | ||
|
|
d93314a683 | ||
|
|
a55e87faaa | ||
|
|
515d0e3fb4 | ||
|
|
22dcc31193 | ||
|
|
909ef0344b | ||
|
|
a2b0e86632 | ||
|
|
d0c1f1a84c | ||
|
|
b62da7e86b | ||
|
|
5e9a3eb6ae | ||
|
|
3451d6874f | ||
|
|
18c3783a1c | ||
|
|
4b46351d36 | ||
|
|
b7c406637d | ||
|
|
c4e5651215 | ||
|
|
23b0e64199 | ||
|
|
fc31b091e4 | ||
|
|
effacf8fc8 | ||
|
|
95130fcfd0 | ||
|
|
5e81105317 | ||
|
|
5b4105e1e6 | ||
|
|
2d3a0d6038 | ||
|
|
fe0b3f459f | ||
|
|
ca69b6577e | ||
|
|
880b863f95 | ||
|
|
78384c3ff5 | ||
|
|
c1c4400c4a | ||
|
|
fc6f974617 | ||
|
|
14b249b804 | ||
|
|
0195824794 | ||
|
|
fb019f15b4 | ||
|
|
abc7fd374b | ||
|
|
cd652dca75 | ||
|
|
c836de44af | ||
|
|
badae2f8fd | ||
|
|
1f34718ecd | ||
|
|
ebda60fd6b | ||
|
|
d242f57758 | ||
|
|
b95e1dda34 | ||
|
|
8f2a97c6e3 | ||
|
|
ebaf25d55a | ||
|
|
42711c23d7 | ||
|
|
f6393728c7 | ||
|
|
d92ab7e8e0 | ||
|
|
5845b3965c | ||
|
|
aacec30ad1 | ||
|
|
2dbdf61c37 | ||
|
|
83365058ce | ||
|
|
3b93c62e23 | ||
|
|
946cccaa1a | ||
|
|
1838d9cd0f | ||
|
|
f62a982a51 | ||
|
|
dfd9dc40ea | ||
|
|
5efea652e3 | ||
|
|
dfa795ff9d | ||
|
|
2cc6b47fcf | ||
|
|
117025a96b | ||
|
|
3e0a9b99ff | ||
|
|
17b3e7e225 | ||
|
|
c065729468 | ||
|
|
55db3ec65c | ||
|
|
0404b78b54 | ||
|
|
68d1bd88b1 | ||
|
|
308b70b039 | ||
|
|
7fa6333a0c | ||
|
|
3279070f9f | ||
|
|
b37669cb3b | ||
|
|
1e538bf73e | ||
|
|
366c032c36 | ||
|
|
95113ad12f | ||
|
|
ce9b2770e2 | ||
|
|
4fc7d76759 | ||
|
|
81bef93e5e | ||
|
|
31d9ed81c5 | ||
|
|
c1af5089b9 | ||
|
|
77efc09362 | ||
|
|
871ca5e4ae | ||
|
|
ceace26ed4 | ||
|
|
75a9a72e78 | ||
|
|
d9d0d1d1a2 | ||
|
|
ea5ce3befb | ||
|
|
e18464b274 | ||
|
|
bd26083f33 | ||
|
|
991363a104 | ||
|
|
a290e58982 | ||
|
|
dcad9724bc | ||
|
|
949d14ae2b | ||
|
|
a6ed4aee84 | ||
|
|
519d7f2b8a | ||
|
|
dddb623a11 | ||
|
|
266cf0622c | ||
|
|
9604e249c9 | ||
|
|
dbc47c9122 | ||
|
|
4c243cbf89 | ||
|
|
deafb7c8b8 | ||
|
|
50309aa295 | ||
|
|
9eaea6a2fd | ||
|
|
830fb2cdb2 | ||
|
|
7cfed73be8 | ||
|
|
41bc04b1c4 | ||
|
|
20cf0094e5 | ||
|
|
83fb4978ad | ||
|
|
51e54dac8b | ||
|
|
c201c15f8c | ||
|
|
0c8196f8b0 | ||
|
|
ee10148444 | ||
|
|
1c95fc2654 | ||
|
|
da69bb4d12 | ||
|
|
0a506bf2e9 | ||
|
|
b2a9ba2ee4 | ||
|
|
f976150b67 | ||
|
|
b1dd8d28bc | ||
|
|
4edeb5ce47 | ||
|
|
d34a8689e5 | ||
|
|
a919d2de56 | ||
|
|
46a8f28b74 | ||
|
|
57398c6df1 | ||
|
|
7affc6878e | ||
|
|
46b2f7eaaf | ||
|
|
9e401b6ef7 | ||
|
|
fe392abeb4 | ||
|
|
f6cc829758 | ||
|
|
6575ee93f2 | ||
|
|
530d03791d | ||
|
|
d40ae8c8ca | ||
|
|
2204614134 | ||
|
|
188ee44f81 | ||
|
|
a4c9aaf337 | ||
|
|
c09186a2c0 | ||
|
|
d3c476b8c2 | ||
|
|
dc23559f23 | ||
|
|
6d710629af | ||
|
|
85753130d9 | ||
|
|
00ba8d5549 | ||
|
|
51e9e1500b | ||
|
|
a03dbe2dcf | ||
|
|
57a3722146 | ||
|
|
57da1d3c0f | ||
|
|
68117543ea | ||
|
|
4f5971d79e | ||
|
|
93161df141 | ||
|
|
e567873326 | ||
|
|
7d632d0b7b | ||
|
|
36aee18c64 | ||
|
|
007a145988 | ||
|
|
2d4a174420 | ||
|
|
21f6c9d7a5 | ||
|
|
e1683313ec | ||
|
|
32de9f8840 | ||
|
|
1f202d40e4 | ||
|
|
ad608aa64e | ||
|
|
a1b00b2cd0 | ||
|
|
3beb4cf2da | ||
|
|
522c9a5ea6 | ||
|
|
102bb8f9ab | ||
|
|
20b46cdaf9 | ||
|
|
2a2a20c3e7 | ||
|
|
093d3a6c59 | ||
|
|
8c9ea43e23 | ||
|
|
f9fcf56d5c | ||
|
|
cbda928a33 | ||
|
|
1032f04ded | ||
|
|
b373e1370d | ||
|
|
404b5a7709 | ||
|
|
ecf08d5156 | ||
|
|
87655b3028 | ||
|
|
3a192400a6 | ||
|
|
2a7f2c1d59 | ||
|
|
05f104c240 | ||
|
|
4dccd092f3 | ||
|
|
95ccf1c9bc | ||
|
|
6cbf27508a | ||
|
|
79de04d862 | ||
|
|
a4dbaa8ed1 | ||
|
|
c7b4c6edfa | ||
|
|
2a5215c1d6 | ||
|
|
97f615c245 | ||
|
|
1a361273e7 | ||
|
|
d7ce648445 | ||
|
|
fabc68659b | ||
|
|
542db82282 | ||
|
|
ae63eb8bb2 | ||
|
|
7a3776b770 | ||
|
|
ff79c33fd4 | ||
|
|
b75a9b7a20 | ||
|
|
d0c6ca7671 | ||
|
|
24d525d978 | ||
|
|
1f70ef155d | ||
|
|
7981e0068a | ||
|
|
32d59ca904 | ||
|
|
ea8bf36104 | ||
|
|
0b5b463cfa | ||
|
|
fe6ad816cc | ||
|
|
e72b787ba7 | ||
|
|
efc317d3b0 | ||
|
|
31057becca | ||
|
|
f1a9b45437 | ||
|
|
5af46775b8 | ||
|
|
70f4747a23 | ||
|
|
2f11ef089b | ||
|
|
4100c50c70 | ||
|
|
a929209967 | ||
|
|
49e945c88f | ||
|
|
9b42333fac | ||
|
|
e5b86d189c | ||
|
|
4bfd5c2781 | ||
|
|
9b6a089b36 | ||
|
|
ceac97bb8d | ||
|
|
61b65aa64a | ||
|
|
5468c3c410 | ||
|
|
b6385c2b4e | ||
|
|
5135c1e3a0 | ||
|
|
22b451cf2d | ||
|
|
42f51eb962 | ||
|
|
156c97cef2 | ||
|
|
798d744eef | ||
|
|
4cb833616a | ||
|
|
9963a5ef54 | ||
|
|
4519db36b2 | ||
|
|
7030bf5fe8 | ||
|
|
20078fe603 | ||
|
|
06e5042b94 | ||
|
|
41e7cec72f | ||
|
|
d45a1aa6b6 | ||
|
|
98243db9f1 | ||
|
|
f92742bdac | ||
|
|
e563025b16 | ||
|
|
cfd5b381f1 | ||
|
|
2f84914146 | ||
|
|
d765e9099d | ||
|
|
34b23f31c9 | ||
|
|
26c1a901d9 | ||
|
|
c2c71cc626 | ||
|
|
aa11231ee5 | ||
|
|
b5812b15f0 | ||
|
|
b4e02fe29a | ||
|
|
37c76a39ab | ||
|
|
60e7e52276 | ||
|
|
c53e9468bc | ||
|
|
162121bf8d | ||
|
|
f7bcad9567 | ||
|
|
f9e3f78e45 | ||
|
|
1596893ef7 | ||
|
|
2a2474ca09 | ||
|
|
509b2e6eec | ||
|
|
d707704556 | ||
|
|
a429ee6646 | ||
|
|
7f8073233a | ||
|
|
4b4c9d1b93 | ||
|
|
3fde3be3d8 | ||
|
|
f861508789 | ||
|
|
a4546f02d2 | ||
|
|
64a2c13cdf | ||
|
|
bf53fe5a22 | ||
|
|
cf5138e740 | ||
|
|
121075c1ef | ||
|
|
22089aff87 | ||
|
|
7787638f26 | ||
|
|
2f6e9738c4 | ||
|
|
e39d166a17 | ||
|
|
059d1671d7 | ||
|
|
3a27580ebe | ||
|
|
9d0534999d | ||
|
|
c54d73e0bb | ||
|
|
9a9d4b182e | ||
|
|
4e321595bc | ||
|
|
01cbef700f | ||
|
|
8497b5f490 | ||
|
|
75d86a6beb | ||
|
|
3892a95c11 |
14
.cargo/config.toml
Normal file
14
.cargo/config.toml
Normal file
@@ -0,0 +1,14 @@
|
||||
[alias]
|
||||
lint = "clippy --workspace --tests --examples --bins -- -Dclippy::todo"
|
||||
lint-all = "clippy --workspace --all-features --tests --examples --bins -- -Dclippy::todo"
|
||||
|
||||
# lib checking
|
||||
ci-check-min = "hack --workspace check --no-default-features"
|
||||
ci-check-default = "hack --workspace check"
|
||||
ci-check-default-tests = "check --workspace --tests"
|
||||
ci-check-all-feature-powerset="hack --workspace --feature-powerset --skip=__compress,io-uring check"
|
||||
ci-check-all-feature-powerset-linux="hack --workspace --feature-powerset --skip=__compress check"
|
||||
|
||||
# testing
|
||||
ci-doctest-default = "test --workspace --doc --no-fail-fast -- --nocapture"
|
||||
ci-doctest = "test --workspace --all-features --doc --no-fail-fast -- --nocapture"
|
||||
3
.github/FUNDING.yml
vendored
Normal file
3
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
# These are supported funding model platforms
|
||||
|
||||
github: [robjtede]
|
||||
4
.github/ISSUE_TEMPLATE/bug_report.md
vendored
4
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -33,5 +33,5 @@ Please search on the [Actix Web issue tracker](https://github.com/actix/actix-we
|
||||
## Your Environment
|
||||
<!--- Include as many relevant details about the environment you experienced the bug in -->
|
||||
|
||||
* Rust Version (I.e, output of `rustc -V`):
|
||||
* Actix Web Version:
|
||||
- Rust Version (I.e, output of `rustc -V`):
|
||||
- Actix Web Version:
|
||||
|
||||
12
.github/ISSUE_TEMPLATE/config.yml
vendored
12
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -1,8 +1,8 @@
|
||||
blank_issues_enabled: true
|
||||
contact_links:
|
||||
- name: Gitter channel (actix-web)
|
||||
url: https://gitter.im/actix/actix-web
|
||||
about: Please ask and answer questions about the actix-web here.
|
||||
- name: Gitter channel (actix)
|
||||
url: https://gitter.im/actix/actix
|
||||
about: Please ask and answer questions about the actix here.
|
||||
- name: Actix Discord
|
||||
url: https://discord.gg/NWpN5mmg3x
|
||||
about: Actix developer discussion and community chat
|
||||
- name: GitHub Discussions
|
||||
url: https://github.com/actix/actix-web/discussions
|
||||
about: Actix Web Q&A
|
||||
|
||||
10
.github/PULL_REQUEST_TEMPLATE.md
vendored
10
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,21 +1,21 @@
|
||||
<!-- Thanks for considering contributing actix! -->
|
||||
<!-- Please fill out the following to make our reviews easy. -->
|
||||
<!-- Please fill out the following to get your PR reviewed quicker. -->
|
||||
|
||||
## PR Type
|
||||
<!-- What kind of change does this PR make? -->
|
||||
<!-- Bug Fix / Feature / Refactor / Code Style / Other -->
|
||||
INSERT_PR_TYPE
|
||||
PR_TYPE
|
||||
|
||||
|
||||
## PR Checklist
|
||||
Check your PR fulfills the following:
|
||||
|
||||
<!-- Check your PR fulfills the following items. -->
|
||||
<!-- For draft PRs check the boxes as you complete them. -->
|
||||
|
||||
- [ ] Tests for the changes have been added / updated.
|
||||
- [ ] Documentation comments have been added / updated.
|
||||
- [ ] A changelog entry has been made for the appropriate packages.
|
||||
- [ ] Format code with the latest stable rustfmt
|
||||
- [ ] Format code with the latest stable rustfmt.
|
||||
- [ ] (Team) Label with affected crates and semver status.
|
||||
|
||||
|
||||
## Overview
|
||||
|
||||
2
.github/workflows/bench.yml
vendored
2
.github/workflows/bench.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: Benchmark (Linux)
|
||||
name: Benchmark
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
|
||||
66
.github/workflows/ci-master.yml
vendored
Normal file
66
.github/workflows/ci-master.yml
vendored
Normal file
@@ -0,0 +1,66 @@
|
||||
name: CI (master only)
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master]
|
||||
|
||||
jobs:
|
||||
ci_feature_powerset_check:
|
||||
name: Verify Feature Combinations
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install stable
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable-x86_64-unknown-linux-gnu
|
||||
profile: minimal
|
||||
override: true
|
||||
|
||||
- name: Generate Cargo.lock
|
||||
uses: actions-rs/cargo@v1
|
||||
with: { command: generate-lockfile }
|
||||
- name: Cache Dependencies
|
||||
uses: Swatinem/rust-cache@v1.2.0
|
||||
|
||||
- name: Install cargo-hack
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: install
|
||||
args: cargo-hack
|
||||
|
||||
- name: check feature combinations
|
||||
uses: actions-rs/cargo@v1
|
||||
with: { command: ci-check-all-feature-powerset }
|
||||
|
||||
- name: check feature combinations
|
||||
uses: actions-rs/cargo@v1
|
||||
with: { command: ci-check-all-feature-powerset-linux }
|
||||
|
||||
coverage:
|
||||
name: coverage
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install stable
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable-x86_64-unknown-linux-gnu
|
||||
profile: minimal
|
||||
override: true
|
||||
|
||||
- name: Generate Cargo.lock
|
||||
uses: actions-rs/cargo@v1
|
||||
with: { command: generate-lockfile }
|
||||
- name: Cache Dependencies
|
||||
uses: Swatinem/rust-cache@v1.2.0
|
||||
|
||||
- name: Generate coverage file
|
||||
run: |
|
||||
cargo install cargo-tarpaulin --vers "^0.13"
|
||||
cargo tarpaulin --workspace --features=rustls,openssl --out Xml --verbose
|
||||
- name: Upload to Codecov
|
||||
uses: codecov/codecov-action@v1
|
||||
with: { file: cobertura.xml }
|
||||
121
.github/workflows/ci.yml
vendored
Normal file
121
.github/workflows/ci.yml
vendored
Normal file
@@ -0,0 +1,121 @@
|
||||
name: CI
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
push:
|
||||
branches: [master]
|
||||
|
||||
jobs:
|
||||
build_and_test:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target:
|
||||
- { name: Linux, os: ubuntu-latest, triple: x86_64-unknown-linux-gnu }
|
||||
- { name: macOS, os: macos-latest, triple: x86_64-apple-darwin }
|
||||
- { name: Windows, os: windows-2022, triple: x86_64-pc-windows-msvc }
|
||||
version:
|
||||
- 1.52.0 # MSRV
|
||||
- stable
|
||||
- nightly
|
||||
|
||||
name: ${{ matrix.target.name }} / ${{ matrix.version }}
|
||||
runs-on: ${{ matrix.target.os }}
|
||||
|
||||
env:
|
||||
CI: 1
|
||||
CARGO_INCREMENTAL: 0
|
||||
VCPKGRS_DYNAMIC: 1
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
# install OpenSSL on Windows
|
||||
# TODO: GitHub actions docs state that OpenSSL is
|
||||
# already installed on these Windows machines somewhere
|
||||
- name: Set vcpkg root
|
||||
if: matrix.target.triple == 'x86_64-pc-windows-msvc'
|
||||
run: echo "VCPKG_ROOT=$env:VCPKG_INSTALLATION_ROOT" | Out-File -FilePath $env:GITHUB_ENV -Append
|
||||
- name: Install OpenSSL
|
||||
if: matrix.target.triple == 'x86_64-pc-windows-msvc'
|
||||
run: vcpkg install openssl:x64-windows
|
||||
|
||||
- name: Install ${{ matrix.version }}
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: ${{ matrix.version }}-${{ matrix.target.triple }}
|
||||
profile: minimal
|
||||
override: true
|
||||
|
||||
- name: Generate Cargo.lock
|
||||
uses: actions-rs/cargo@v1
|
||||
with: { command: generate-lockfile }
|
||||
- name: Cache Dependencies
|
||||
uses: Swatinem/rust-cache@v1.2.0
|
||||
|
||||
- name: Install cargo-hack
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: install
|
||||
args: cargo-hack
|
||||
|
||||
- name: check minimal
|
||||
uses: actions-rs/cargo@v1
|
||||
with: { command: ci-check-min }
|
||||
|
||||
- name: check default
|
||||
uses: actions-rs/cargo@v1
|
||||
with: { command: ci-check-default }
|
||||
|
||||
- name: tests
|
||||
timeout-minutes: 60
|
||||
run: |
|
||||
cargo test --lib --tests -p=actix-router --all-features
|
||||
cargo test --lib --tests -p=actix-http --all-features
|
||||
cargo test --lib --tests -p=actix-web --features=rustls,openssl -- --skip=test_reading_deflate_encoding_large_random_rustls
|
||||
cargo test --lib --tests -p=actix-web-codegen --all-features
|
||||
cargo test --lib --tests -p=awc --all-features
|
||||
cargo test --lib --tests -p=actix-http-test --all-features
|
||||
cargo test --lib --tests -p=actix-test --all-features
|
||||
cargo test --lib --tests -p=actix-files
|
||||
cargo test --lib --tests -p=actix-multipart --all-features
|
||||
cargo test --lib --tests -p=actix-web-actors --all-features
|
||||
|
||||
- name: tests (io-uring)
|
||||
if: matrix.target.os == 'ubuntu-latest'
|
||||
timeout-minutes: 60
|
||||
run: >
|
||||
sudo bash -c "ulimit -Sl 512
|
||||
&& ulimit -Hl 512
|
||||
&& PATH=$PATH:/usr/share/rust/.cargo/bin
|
||||
&& RUSTUP_TOOLCHAIN=${{ matrix.version }} cargo test --lib --tests -p=actix-files --all-features"
|
||||
|
||||
- name: Clear the cargo caches
|
||||
run: |
|
||||
cargo install cargo-cache --version 0.6.3 --no-default-features --features ci-autoclean
|
||||
cargo-cache
|
||||
|
||||
rustdoc:
|
||||
name: doc tests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install Rust (nightly)
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-x86_64-unknown-linux-gnu
|
||||
profile: minimal
|
||||
override: true
|
||||
|
||||
- name: Generate Cargo.lock
|
||||
uses: actions-rs/cargo@v1
|
||||
with: { command: generate-lockfile }
|
||||
- name: Cache Dependencies
|
||||
uses: Swatinem/rust-cache@v1.3.0
|
||||
|
||||
- name: doc tests
|
||||
uses: actions-rs/cargo@v1
|
||||
timeout-minutes: 60
|
||||
with: { command: ci-doctest }
|
||||
21
.github/workflows/clippy-fmt.yml
vendored
21
.github/workflows/clippy-fmt.yml
vendored
@@ -1,32 +1,39 @@
|
||||
name: Lint
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
|
||||
name: Clippy and rustfmt Check
|
||||
jobs:
|
||||
clippy_check:
|
||||
fmt:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- uses: actions-rs/toolchain@v1
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
components: rustfmt
|
||||
override: true
|
||||
- name: Check with rustfmt
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: fmt
|
||||
args: --all -- --check
|
||||
|
||||
- uses: actions-rs/toolchain@v1
|
||||
clippy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly
|
||||
toolchain: stable
|
||||
components: clippy
|
||||
override: true
|
||||
- name: Check with Clippy
|
||||
uses: actions-rs/clippy-check@v1
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
args: --all-features --all --tests
|
||||
args: --workspace --all-features --tests
|
||||
|
||||
69
.github/workflows/linux.yml
vendored
69
.github/workflows/linux.yml
vendored
@@ -1,69 +0,0 @@
|
||||
name: CI (Linux)
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
build_and_test:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
version:
|
||||
- 1.42.0 # MSRV
|
||||
- stable
|
||||
- nightly
|
||||
|
||||
name: ${{ matrix.version }} - x86_64-unknown-linux-gnu
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install ${{ matrix.version }}
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: ${{ matrix.version }}-x86_64-unknown-linux-gnu
|
||||
profile: minimal
|
||||
override: true
|
||||
|
||||
- name: check build
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: check
|
||||
args: --all --bins --examples --tests
|
||||
|
||||
- name: tests
|
||||
uses: actions-rs/cargo@v1
|
||||
timeout-minutes: 40
|
||||
with:
|
||||
command: test
|
||||
args: --all --all-features --no-fail-fast -- --nocapture
|
||||
|
||||
- name: tests (actix-http)
|
||||
uses: actions-rs/cargo@v1
|
||||
timeout-minutes: 40
|
||||
with:
|
||||
command: test
|
||||
args: --package=actix-http --no-default-features --features=rustls -- --nocapture
|
||||
|
||||
- name: tests (awc)
|
||||
uses: actions-rs/cargo@v1
|
||||
timeout-minutes: 40
|
||||
with:
|
||||
command: test
|
||||
args: --package=awc --no-default-features --features=rustls -- --nocapture
|
||||
|
||||
- name: Generate coverage file
|
||||
if: matrix.version == 'stable' && (github.ref == 'refs/heads/master' || github.event_name == 'pull_request')
|
||||
run: |
|
||||
cargo install cargo-tarpaulin --vers "^0.13"
|
||||
cargo tarpaulin --out Xml
|
||||
- name: Upload to Codecov
|
||||
if: matrix.version == 'stable' && (github.ref == 'refs/heads/master' || github.event_name == 'pull_request')
|
||||
uses: codecov/codecov-action@v1
|
||||
with:
|
||||
file: cobertura.xml
|
||||
44
.github/workflows/macos.yml
vendored
44
.github/workflows/macos.yml
vendored
@@ -1,44 +0,0 @@
|
||||
name: CI (macOS)
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
build_and_test:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
version:
|
||||
- stable
|
||||
- nightly
|
||||
|
||||
name: ${{ matrix.version }} - x86_64-apple-darwin
|
||||
runs-on: macOS-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install ${{ matrix.version }}
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: ${{ matrix.version }}-x86_64-apple-darwin
|
||||
profile: minimal
|
||||
override: true
|
||||
|
||||
- name: check build
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: check
|
||||
args: --all --bins --examples --tests
|
||||
|
||||
- name: tests
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
args: --all --all-features --no-fail-fast -- --nocapture
|
||||
--skip=test_h2_content_length
|
||||
--skip=test_reading_deflate_encoding_large_random_rustls
|
||||
16
.github/workflows/upload-doc.yml
vendored
16
.github/workflows/upload-doc.yml
vendored
@@ -1,14 +1,12 @@
|
||||
name: Upload documentation
|
||||
name: Upload Documentation
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
branches: [master]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'actix/actix-web'
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
@@ -16,21 +14,21 @@ jobs:
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable-x86_64-unknown-linux-gnu
|
||||
toolchain: nightly-x86_64-unknown-linux-gnu
|
||||
profile: minimal
|
||||
override: true
|
||||
|
||||
- name: check build
|
||||
- name: Build Docs
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: doc
|
||||
args: --no-deps --workspace --all-features
|
||||
args: --workspace --all-features --no-deps
|
||||
|
||||
- name: Tweak HTML
|
||||
run: echo "<meta http-equiv=refresh content=0;url=os_balloon/index.html>" > target/doc/index.html
|
||||
run: echo '<meta http-equiv="refresh" content="0;url=actix_web/index.html">' > target/doc/index.html
|
||||
|
||||
- name: Deploy to GitHub Pages
|
||||
uses: JamesIves/github-pages-deploy-action@3.5.8
|
||||
uses: JamesIves/github-pages-deploy-action@3.7.1
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
BRANCH: gh-pages
|
||||
|
||||
64
.github/workflows/windows.yml
vendored
64
.github/workflows/windows.yml
vendored
@@ -1,64 +0,0 @@
|
||||
name: CI (Windows)
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
env:
|
||||
VCPKGRS_DYNAMIC: 1
|
||||
|
||||
jobs:
|
||||
build_and_test:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
version:
|
||||
- stable
|
||||
- nightly
|
||||
|
||||
name: ${{ matrix.version }} - x86_64-pc-windows-msvc
|
||||
runs-on: windows-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install ${{ matrix.version }}
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: ${{ matrix.version }}-x86_64-pc-windows-msvc
|
||||
profile: minimal
|
||||
override: true
|
||||
|
||||
- name: Install OpenSSL
|
||||
run: |
|
||||
vcpkg integrate install
|
||||
vcpkg install openssl:x64-windows
|
||||
Copy-Item C:\vcpkg\installed\x64-windows\bin\libcrypto-1_1-x64.dll C:\vcpkg\installed\x64-windows\bin\libcrypto.dll
|
||||
Copy-Item C:\vcpkg\installed\x64-windows\bin\libssl-1_1-x64.dll C:\vcpkg\installed\x64-windows\bin\libssl.dll
|
||||
Get-ChildItem C:\vcpkg\installed\x64-windows\bin
|
||||
Get-ChildItem C:\vcpkg\installed\x64-windows\lib
|
||||
|
||||
- name: check build
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: check
|
||||
args: --all --bins --examples --tests
|
||||
|
||||
- name: tests
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
args: --all --all-features --no-fail-fast -- --nocapture
|
||||
--skip=test_h2_content_length
|
||||
--skip=test_reading_deflate_encoding_large_random_rustls
|
||||
--skip=test_params
|
||||
--skip=test_simple
|
||||
--skip=test_expect_continue
|
||||
--skip=test_http10_keepalive
|
||||
--skip=test_slow_request
|
||||
--skip=test_connection_force_close
|
||||
--skip=test_connection_server_close
|
||||
--skip=test_connection_wait_queue_force_close
|
||||
6
.gitignore
vendored
6
.gitignore
vendored
@@ -13,3 +13,9 @@ guide/build/
|
||||
|
||||
# These are backup files generated by rustfmt
|
||||
**/*.rs.bk
|
||||
|
||||
# Configuration directory generated by CLion
|
||||
.idea
|
||||
|
||||
# Configuration directory generated by VSCode
|
||||
.vscode
|
||||
|
||||
715
CHANGES.md
715
CHANGES.md
File diff suppressed because it is too large
Load Diff
@@ -8,19 +8,19 @@ In the interest of fostering an open and welcoming environment, we as contributo
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
- Using welcoming and inclusive language
|
||||
- Being respectful of differing viewpoints and experiences
|
||||
- Gracefully accepting constructive criticism
|
||||
- Focusing on what is best for the community
|
||||
- Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a professional setting
|
||||
- The use of sexualized language or imagery and unwelcome sexual attention or advances
|
||||
- Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
- Public or private harassment
|
||||
- Publishing others' private information, such as a physical or electronic address, without explicit permission
|
||||
- Other conduct which could reasonably be considered inappropriate in a professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
@@ -34,10 +34,13 @@ This Code of Conduct applies both within project spaces and in public spaces whe
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at fafhrd91@gmail.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at robjtede@icloud.com ([@robjtede]) or huyuumi@neet.club ([@JohnTitor]). The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
|
||||
|
||||
[@robjtede]: https://github.com/robjtede
|
||||
[@JohnTitor]: https://github.com/JohnTitor
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
|
||||
|
||||
195
Cargo.toml
195
Cargo.toml
@@ -1,114 +1,129 @@
|
||||
[package]
|
||||
name = "actix-web"
|
||||
version = "3.0.0-beta.3"
|
||||
version = "4.0.0-beta.16"
|
||||
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
|
||||
description = "Actix web is a simple, pragmatic and extremely fast web framework for Rust."
|
||||
readme = "README.md"
|
||||
description = "Actix Web is a powerful, pragmatic, and extremely fast web framework for Rust"
|
||||
keywords = ["actix", "http", "web", "framework", "async"]
|
||||
categories = [
|
||||
"network-programming",
|
||||
"asynchronous",
|
||||
"web-programming::http-server",
|
||||
"web-programming::websocket"
|
||||
]
|
||||
homepage = "https://actix.rs"
|
||||
repository = "https://github.com/actix/actix-web.git"
|
||||
documentation = "https://docs.rs/actix-web/"
|
||||
categories = ["network-programming", "asynchronous",
|
||||
"web-programming::http-server",
|
||||
"web-programming::websocket"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
edition = "2018"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
features = ["openssl", "rustls", "compress", "secure-cookies"]
|
||||
|
||||
[badges]
|
||||
travis-ci = { repository = "actix/actix-web", branch = "master" }
|
||||
codecov = { repository = "actix/actix-web", branch = "master", service = "github" }
|
||||
# features that docs.rs will build with
|
||||
features = ["openssl", "rustls", "compress-brotli", "compress-gzip", "compress-zstd", "cookies", "secure-cookies"]
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
|
||||
[lib]
|
||||
name = "actix_web"
|
||||
path = "src/lib.rs"
|
||||
|
||||
[workspace]
|
||||
resolver = "2"
|
||||
members = [
|
||||
".",
|
||||
"awc",
|
||||
"actix-http",
|
||||
"actix-files",
|
||||
"actix-multipart",
|
||||
"actix-web-actors",
|
||||
"actix-web-codegen",
|
||||
"test-server",
|
||||
".",
|
||||
"awc",
|
||||
"actix-http",
|
||||
"actix-files",
|
||||
"actix-multipart",
|
||||
"actix-web-actors",
|
||||
"actix-web-codegen",
|
||||
"actix-http-test",
|
||||
"actix-test",
|
||||
"actix-router",
|
||||
]
|
||||
|
||||
[features]
|
||||
default = ["compress"]
|
||||
default = ["compress-brotli", "compress-gzip", "compress-zstd", "cookies"]
|
||||
|
||||
# content-encoding support
|
||||
compress = ["actix-http/compress", "awc/compress"]
|
||||
# Brotli algorithm content-encoding support
|
||||
compress-brotli = ["actix-http/compress-brotli", "__compress"]
|
||||
# Gzip and deflate algorithms content-encoding support
|
||||
compress-gzip = ["actix-http/compress-gzip", "__compress"]
|
||||
# Zstd algorithm content-encoding support
|
||||
compress-zstd = ["actix-http/compress-zstd", "__compress"]
|
||||
|
||||
# sessions feature
|
||||
secure-cookies = ["actix-http/secure-cookies"]
|
||||
# support for cookies
|
||||
cookies = ["cookie"]
|
||||
|
||||
# secure cookies feature
|
||||
secure-cookies = ["cookie/secure"]
|
||||
|
||||
# openssl
|
||||
openssl = ["actix-tls/openssl", "awc/openssl", "open-ssl"]
|
||||
openssl = ["actix-http/openssl", "actix-tls/accept", "actix-tls/openssl"]
|
||||
|
||||
# rustls
|
||||
rustls = ["actix-tls/rustls", "awc/rustls", "rust-tls"]
|
||||
rustls = ["actix-http/rustls", "actix-tls/accept", "actix-tls/rustls"]
|
||||
|
||||
[[example]]
|
||||
name = "basic"
|
||||
required-features = ["compress"]
|
||||
# Internal (PRIVATE!) features used to aid testing and checking feature status.
|
||||
# Don't rely on these whatsoever. They may disappear at anytime.
|
||||
__compress = []
|
||||
|
||||
[[example]]
|
||||
name = "uds"
|
||||
required-features = ["compress"]
|
||||
|
||||
[[test]]
|
||||
name = "test_server"
|
||||
required-features = ["compress"]
|
||||
# io-uring feature only avaiable for Linux OSes.
|
||||
experimental-io-uring = ["actix-server/io-uring"]
|
||||
|
||||
[dependencies]
|
||||
actix-codec = "0.2.0"
|
||||
actix-service = "1.0.2"
|
||||
actix-utils = "1.0.6"
|
||||
actix-router = "0.2.4"
|
||||
actix-rt = "1.1.1"
|
||||
actix-server = "1.0.0"
|
||||
actix-testing = "1.0.0"
|
||||
actix-macros = "0.1.0"
|
||||
actix-threadpool = "0.3.1"
|
||||
actix-tls = "2.0.0-alpha.2"
|
||||
actix-codec = "0.4.1"
|
||||
actix-macros = "0.2.3"
|
||||
actix-rt = "2.3"
|
||||
actix-server = "2.0.0-rc.2"
|
||||
actix-service = "2.0.0"
|
||||
actix-utils = "3.0.0"
|
||||
actix-tls = { version = "3.0.0", default-features = false, optional = true }
|
||||
|
||||
actix-web-codegen = "0.3.0-beta.1"
|
||||
actix-http = "2.0.0-beta.3"
|
||||
awc = { version = "2.0.0-beta.3", default-features = false }
|
||||
actix-http = "3.0.0-beta.17"
|
||||
actix-router = "0.5.0-beta.3"
|
||||
actix-web-codegen = "0.5.0-beta.6"
|
||||
|
||||
bytes = "0.5.3"
|
||||
derive_more = "0.99.2"
|
||||
ahash = "0.7"
|
||||
bytes = "1"
|
||||
cfg-if = "1"
|
||||
cookie = { version = "0.15", features = ["percent-encode"], optional = true }
|
||||
derive_more = "0.99.5"
|
||||
encoding_rs = "0.8"
|
||||
futures-channel = { version = "0.3.5", default-features = false }
|
||||
futures-core = { version = "0.3.5", default-features = false }
|
||||
futures-util = { version = "0.3.5", default-features = false }
|
||||
fxhash = "0.2.1"
|
||||
futures-core = { version = "0.3.7", default-features = false }
|
||||
futures-util = { version = "0.3.7", default-features = false }
|
||||
itoa = "1"
|
||||
language-tags = "0.3"
|
||||
once_cell = "1.5"
|
||||
log = "0.4"
|
||||
mime = "0.3"
|
||||
socket2 = "0.3"
|
||||
pin-project = "0.4.17"
|
||||
regex = "1.3"
|
||||
serde = { version = "1.0", features=["derive"] }
|
||||
paste = "1"
|
||||
pin-project-lite = "0.2.7"
|
||||
regex = "1.4"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
serde_urlencoded = "0.6.1"
|
||||
time = { version = "0.2.7", default-features = false, features = ["std"] }
|
||||
serde_urlencoded = "0.7"
|
||||
smallvec = "1.6.1"
|
||||
socket2 = "0.4.0"
|
||||
time = { version = "0.3", default-features = false, features = ["formatting"] }
|
||||
url = "2.1"
|
||||
open-ssl = { package = "openssl", version = "0.10", optional = true }
|
||||
rust-tls = { package = "rustls", version = "0.18.0", optional = true }
|
||||
tinyvec = { version = "0.3", features = ["alloc"] }
|
||||
|
||||
[dev-dependencies]
|
||||
actix = "0.10.0-alpha.1"
|
||||
rand = "0.7"
|
||||
env_logger = "0.7"
|
||||
serde_derive = "1.0"
|
||||
actix-test = { version = "0.1.0-beta.10", features = ["openssl", "rustls"] }
|
||||
awc = { version = "3.0.0-beta.15", features = ["openssl"] }
|
||||
|
||||
brotli2 = "0.3.2"
|
||||
criterion = { version = "0.3", features = ["html_reports"] }
|
||||
env_logger = "0.9"
|
||||
flate2 = "1.0.13"
|
||||
criterion = "0.3"
|
||||
futures-util = { version = "0.3.7", default-features = false, features = ["std"] }
|
||||
rand = "0.8"
|
||||
rcgen = "0.8"
|
||||
rustls-pemfile = "0.2"
|
||||
tls-openssl = { package = "openssl", version = "0.10.9" }
|
||||
tls-rustls = { package = "rustls", version = "0.20.0" }
|
||||
zstd = "0.9"
|
||||
|
||||
[profile.dev]
|
||||
# Disabling debug info speeds up builds a bunch and we don't rely on it for debugging that much.
|
||||
debug = 0
|
||||
|
||||
[profile.release]
|
||||
lto = true
|
||||
@@ -116,17 +131,41 @@ opt-level = 3
|
||||
codegen-units = 1
|
||||
|
||||
[patch.crates-io]
|
||||
actix-web = { path = "." }
|
||||
actix-http = { path = "actix-http" }
|
||||
actix-http-test = { path = "test-server" }
|
||||
actix-web-codegen = { path = "actix-web-codegen" }
|
||||
actix-files = { path = "actix-files" }
|
||||
actix-http = { path = "actix-http" }
|
||||
actix-http-test = { path = "actix-http-test" }
|
||||
actix-multipart = { path = "actix-multipart" }
|
||||
actix-router = { path = "actix-router" }
|
||||
actix-test = { path = "actix-test" }
|
||||
actix-web = { path = "." }
|
||||
actix-web-actors = { path = "actix-web-actors" }
|
||||
actix-web-codegen = { path = "actix-web-codegen" }
|
||||
awc = { path = "awc" }
|
||||
|
||||
# uncomment for quick testing against local actix-net repo
|
||||
# actix-service = { path = "../actix-net/actix-service" }
|
||||
# actix-macros = { path = "../actix-net/actix-macros" }
|
||||
# actix-rt = { path = "../actix-net/actix-rt" }
|
||||
# actix-codec = { path = "../actix-net/actix-codec" }
|
||||
# actix-utils = { path = "../actix-net/actix-utils" }
|
||||
# actix-tls = { path = "../actix-net/actix-tls" }
|
||||
# actix-server = { path = "../actix-net/actix-server" }
|
||||
|
||||
[[test]]
|
||||
name = "test_server"
|
||||
required-features = ["compress-brotli", "compress-gzip", "compress-zstd", "cookies"]
|
||||
|
||||
[[example]]
|
||||
name = "client"
|
||||
required-features = ["rustls"]
|
||||
name = "basic"
|
||||
required-features = ["compress-gzip"]
|
||||
|
||||
[[example]]
|
||||
name = "uds"
|
||||
required-features = ["compress-gzip"]
|
||||
|
||||
[[example]]
|
||||
name = "on_connect"
|
||||
required-features = []
|
||||
|
||||
[[bench]]
|
||||
name = "server"
|
||||
@@ -135,3 +174,7 @@ harness = false
|
||||
[[bench]]
|
||||
name = "service"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "responder"
|
||||
harness = false
|
||||
|
||||
@@ -186,7 +186,7 @@
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2017-NOW Nikolay Kim
|
||||
Copyright 2017-NOW Actix Team
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
Copyright (c) 2017 Nikolay Kim
|
||||
Copyright (c) 2017-NOW Actix Team
|
||||
|
||||
Permission is hereby granted, free of charge, to any
|
||||
person obtaining a copy of this software and associated
|
||||
|
||||
186
MIGRATION.md
186
MIGRATION.md
@@ -1,18 +1,57 @@
|
||||
## Unreleased
|
||||
|
||||
* Setting a cookie's SameSite property, explicitly, to `SameSite::None` will now
|
||||
- The default `NormalizePath` behavior now strips trailing slashes by default. This was
|
||||
previously documented to be the case in v3 but the behavior now matches. The effect is that
|
||||
routes defined with trailing slashes will become inaccessible when
|
||||
using `NormalizePath::default()`. As such, calling `NormalizePath::default()` will log a warning.
|
||||
It is advised that the `new` method be used instead.
|
||||
|
||||
Before: `#[get("/test/")]`
|
||||
After: `#[get("/test")]`
|
||||
|
||||
Alternatively, explicitly require trailing slashes: `NormalizePath::new(TrailingSlash::Always)`.
|
||||
|
||||
- The `type Config` of `FromRequest` was removed.
|
||||
|
||||
- Feature flag `compress` has been split into its supported algorithm (brotli, gzip, zstd).
|
||||
By default all compression algorithms are enabled.
|
||||
To select algorithm you want to include with `middleware::Compress` use following flags:
|
||||
- `compress-brotli`
|
||||
- `compress-gzip`
|
||||
- `compress-zstd`
|
||||
If you have set in your `Cargo.toml` dedicated `actix-web` features and you still want
|
||||
to have compression enabled. Please change features selection like bellow:
|
||||
|
||||
Before: `"compress"`
|
||||
After: `"compress-brotli", "compress-gzip", "compress-zstd"`
|
||||
|
||||
|
||||
## 3.0.0
|
||||
|
||||
- The return type for `ServiceRequest::app_data::<T>()` was changed from returning a `Data<T>` to
|
||||
simply a `T`. To access a `Data<T>` use `ServiceRequest::app_data::<Data<T>>()`.
|
||||
|
||||
- Cookie handling has been offloaded to the `cookie` crate:
|
||||
* `USERINFO_ENCODE_SET` is no longer exposed. Percent-encoding is still supported; check docs.
|
||||
* Some types now require lifetime parameters.
|
||||
|
||||
- The time crate was updated to `v0.2`, a major breaking change to the time crate, which affects
|
||||
any `actix-web` method previously expecting a time v0.1 input.
|
||||
|
||||
- Setting a cookie's SameSite property, explicitly, to `SameSite::None` will now
|
||||
result in `SameSite=None` being sent with the response Set-Cookie header.
|
||||
To create a cookie without a SameSite attribute, remove any calls setting same_site.
|
||||
|
||||
* actix-http support for Actors messages was moved to actix-http crate and is enabled
|
||||
- actix-http support for Actors messages was moved to actix-http crate and is enabled
|
||||
with feature `actors`
|
||||
* content_length function is removed from actix-http.
|
||||
|
||||
- content_length function is removed from actix-http.
|
||||
You can set Content-Length by normally setting the response body or calling no_chunking function.
|
||||
|
||||
* `BodySize::Sized64` variant has been removed. `BodySize::Sized` now receives a
|
||||
- `BodySize::Sized64` variant has been removed. `BodySize::Sized` now receives a
|
||||
`u64` instead of a `usize`.
|
||||
|
||||
* Code that was using `path.<index>` to access a `web::Path<(A, B, C)>`s elements now needs to use
|
||||
- Code that was using `path.<index>` to access a `web::Path<(A, B, C)>`s elements now needs to use
|
||||
destructuring or `.into_inner()`. For example:
|
||||
|
||||
```rust
|
||||
@@ -32,26 +71,35 @@
|
||||
}
|
||||
```
|
||||
|
||||
- `middleware::NormalizePath` can now also be configured to trim trailing slashes instead of always keeping one.
|
||||
It will need `middleware::normalize::TrailingSlash` when being constructed with `NormalizePath::new(...)`,
|
||||
or for an easier migration you can replace `wrap(middleware::NormalizePath)` with `wrap(middleware::NormalizePath::new(TrailingSlash::MergeOnly))`.
|
||||
|
||||
- `HttpServer::maxconn` is renamed to the more expressive `HttpServer::max_connections`.
|
||||
|
||||
- `HttpServer::maxconnrate` is renamed to the more expressive `HttpServer::max_connection_rate`.
|
||||
|
||||
|
||||
## 2.0.0
|
||||
|
||||
* `HttpServer::start()` renamed to `HttpServer::run()`. It also possible to
|
||||
- `HttpServer::start()` renamed to `HttpServer::run()`. It also possible to
|
||||
`.await` on `run` method result, in that case it awaits server exit.
|
||||
|
||||
* `App::register_data()` renamed to `App::app_data()` and accepts any type `T: 'static`.
|
||||
- `App::register_data()` renamed to `App::app_data()` and accepts any type `T: 'static`.
|
||||
Stored data is available via `HttpRequest::app_data()` method at runtime.
|
||||
|
||||
* Extractor configuration must be registered with `App::app_data()` instead of `App::data()`
|
||||
- Extractor configuration must be registered with `App::app_data()` instead of `App::data()`
|
||||
|
||||
* Sync handlers has been removed. `.to_async()` method has been renamed to `.to()`
|
||||
- Sync handlers has been removed. `.to_async()` method has been renamed to `.to()`
|
||||
replace `fn` with `async fn` to convert sync handler to async
|
||||
|
||||
* `actix_http_test::TestServer` moved to `actix_web::test` module. To start
|
||||
- `actix_http_test::TestServer` moved to `actix_web::test` module. To start
|
||||
test server use `test::start()` or `test_start_with_config()` methods
|
||||
|
||||
* `ResponseError` trait has been reafctored. `ResponseError::error_response()` renders
|
||||
- `ResponseError` trait has been reafctored. `ResponseError::error_response()` renders
|
||||
http response.
|
||||
|
||||
* Feature `rust-tls` renamed to `rustls`
|
||||
- Feature `rust-tls` renamed to `rustls`
|
||||
|
||||
instead of
|
||||
|
||||
@@ -65,7 +113,7 @@
|
||||
actix-web = { version = "2.0.0", features = ["rustls"] }
|
||||
```
|
||||
|
||||
* Feature `ssl` renamed to `openssl`
|
||||
- Feature `ssl` renamed to `openssl`
|
||||
|
||||
instead of
|
||||
|
||||
@@ -78,11 +126,11 @@
|
||||
```rust
|
||||
actix-web = { version = "2.0.0", features = ["openssl"] }
|
||||
```
|
||||
* `Cors` builder now requires that you call `.finish()` to construct the middleware
|
||||
- `Cors` builder now requires that you call `.finish()` to construct the middleware
|
||||
|
||||
## 1.0.1
|
||||
|
||||
* Cors middleware has been moved to `actix-cors` crate
|
||||
- Cors middleware has been moved to `actix-cors` crate
|
||||
|
||||
instead of
|
||||
|
||||
@@ -96,7 +144,7 @@
|
||||
use actix_cors::Cors;
|
||||
```
|
||||
|
||||
* Identity middleware has been moved to `actix-identity` crate
|
||||
- Identity middleware has been moved to `actix-identity` crate
|
||||
|
||||
instead of
|
||||
|
||||
@@ -113,7 +161,7 @@
|
||||
|
||||
## 1.0.0
|
||||
|
||||
* Extractor configuration. In version 1.0 this is handled with the new `Data` mechanism for both setting and retrieving the configuration
|
||||
- Extractor configuration. In version 1.0 this is handled with the new `Data` mechanism for both setting and retrieving the configuration
|
||||
|
||||
instead of
|
||||
|
||||
@@ -171,7 +219,7 @@
|
||||
)
|
||||
```
|
||||
|
||||
* Resource registration. 1.0 version uses generalized resource
|
||||
- Resource registration. 1.0 version uses generalized resource
|
||||
registration via `.service()` method.
|
||||
|
||||
instead of
|
||||
@@ -191,7 +239,7 @@
|
||||
.route(web::post().to(post_handler))
|
||||
```
|
||||
|
||||
* Scope registration.
|
||||
- Scope registration.
|
||||
|
||||
instead of
|
||||
|
||||
@@ -215,7 +263,7 @@
|
||||
);
|
||||
```
|
||||
|
||||
* `.with()`, `.with_async()` registration methods have been renamed to `.to()` and `.to_async()`.
|
||||
- `.with()`, `.with_async()` registration methods have been renamed to `.to()` and `.to_async()`.
|
||||
|
||||
instead of
|
||||
|
||||
@@ -229,7 +277,7 @@
|
||||
App.new().service(web::resource("/welcome").to(welcome))
|
||||
```
|
||||
|
||||
* Passing arguments to handler with extractors, multiple arguments are allowed
|
||||
- Passing arguments to handler with extractors, multiple arguments are allowed
|
||||
|
||||
instead of
|
||||
|
||||
@@ -247,7 +295,7 @@
|
||||
}
|
||||
```
|
||||
|
||||
* `.f()`, `.a()` and `.h()` handler registration methods have been removed.
|
||||
- `.f()`, `.a()` and `.h()` handler registration methods have been removed.
|
||||
Use `.to()` for handlers and `.to_async()` for async handlers. Handler function
|
||||
must use extractors.
|
||||
|
||||
@@ -263,7 +311,7 @@
|
||||
App.new().service(web::resource("/welcome").to(welcome))
|
||||
```
|
||||
|
||||
* `HttpRequest` does not provide access to request's payload stream.
|
||||
- `HttpRequest` does not provide access to request's payload stream.
|
||||
|
||||
instead of
|
||||
|
||||
@@ -293,7 +341,7 @@
|
||||
}
|
||||
```
|
||||
|
||||
* `State` is now `Data`. You register Data during the App initialization process
|
||||
- `State` is now `Data`. You register Data during the App initialization process
|
||||
and then access it from handlers either using a Data extractor or using
|
||||
HttpRequest's api.
|
||||
|
||||
@@ -329,7 +377,7 @@
|
||||
```
|
||||
|
||||
|
||||
* AsyncResponder is removed, use `.to_async()` registration method and `impl Future<>` as result type.
|
||||
- AsyncResponder is removed, use `.to_async()` registration method and `impl Future<>` as result type.
|
||||
|
||||
instead of
|
||||
|
||||
@@ -345,7 +393,7 @@
|
||||
.. simply omit AsyncResponder and the corresponding responder() finish method
|
||||
|
||||
|
||||
* Middleware
|
||||
- Middleware
|
||||
|
||||
instead of
|
||||
|
||||
@@ -362,7 +410,7 @@
|
||||
.route("/index.html", web::get().to(index));
|
||||
```
|
||||
|
||||
* `HttpRequest::body()`, `HttpRequest::urlencoded()`, `HttpRequest::json()`, `HttpRequest::multipart()`
|
||||
- `HttpRequest::body()`, `HttpRequest::urlencoded()`, `HttpRequest::json()`, `HttpRequest::multipart()`
|
||||
method have been removed. Use `Bytes`, `String`, `Form`, `Json`, `Multipart` extractors instead.
|
||||
|
||||
instead of
|
||||
@@ -384,9 +432,9 @@
|
||||
}
|
||||
```
|
||||
|
||||
* `actix_web::server` module has been removed. To start http server use `actix_web::HttpServer` type
|
||||
- `actix_web::server` module has been removed. To start http server use `actix_web::HttpServer` type
|
||||
|
||||
* StaticFiles and NamedFile have been moved to a separate crate.
|
||||
- StaticFiles and NamedFile have been moved to a separate crate.
|
||||
|
||||
instead of `use actix_web::fs::StaticFile`
|
||||
|
||||
@@ -396,20 +444,20 @@
|
||||
|
||||
use `use actix_files::NamedFile`
|
||||
|
||||
* Multipart has been moved to a separate crate.
|
||||
- Multipart has been moved to a separate crate.
|
||||
|
||||
instead of `use actix_web::multipart::Multipart`
|
||||
|
||||
use `use actix_multipart::Multipart`
|
||||
|
||||
* Response compression is not enabled by default.
|
||||
- Response compression is not enabled by default.
|
||||
To enable, use `Compress` middleware, `App::new().wrap(Compress::default())`.
|
||||
|
||||
* Session middleware moved to actix-session crate
|
||||
- Session middleware moved to actix-session crate
|
||||
|
||||
* Actors support have been moved to `actix-web-actors` crate
|
||||
- Actors support have been moved to `actix-web-actors` crate
|
||||
|
||||
* Custom Error
|
||||
- Custom Error
|
||||
|
||||
Instead of error_response method alone, ResponseError now provides two methods: error_response and render_response respectively. Where, error_response creates the error response and render_response returns the error response to the caller.
|
||||
|
||||
@@ -423,7 +471,7 @@
|
||||
|
||||
## 0.7.15
|
||||
|
||||
* The `' '` character is not percent decoded anymore before matching routes. If you need to use it in
|
||||
- The `' '` character is not percent decoded anymore before matching routes. If you need to use it in
|
||||
your routes, you should use `%20`.
|
||||
|
||||
instead of
|
||||
@@ -448,18 +496,18 @@
|
||||
}
|
||||
```
|
||||
|
||||
* If you used `AsyncResult::async` you need to replace it with `AsyncResult::future`
|
||||
- If you used `AsyncResult::async` you need to replace it with `AsyncResult::future`
|
||||
|
||||
|
||||
## 0.7.4
|
||||
|
||||
* `Route::with_config()`/`Route::with_async_config()` always passes configuration objects as tuple
|
||||
- `Route::with_config()`/`Route::with_async_config()` always passes configuration objects as tuple
|
||||
even for handler with one parameter.
|
||||
|
||||
|
||||
## 0.7
|
||||
|
||||
* `HttpRequest` does not implement `Stream` anymore. If you need to read request payload
|
||||
- `HttpRequest` does not implement `Stream` anymore. If you need to read request payload
|
||||
use `HttpMessage::payload()` method.
|
||||
|
||||
instead of
|
||||
@@ -485,10 +533,10 @@
|
||||
}
|
||||
```
|
||||
|
||||
* [Middleware](https://actix.rs/actix-web/actix_web/middleware/trait.Middleware.html)
|
||||
- [Middleware](https://actix.rs/actix-web/actix_web/middleware/trait.Middleware.html)
|
||||
trait uses `&HttpRequest` instead of `&mut HttpRequest`.
|
||||
|
||||
* Removed `Route::with2()` and `Route::with3()` use tuple of extractors instead.
|
||||
- Removed `Route::with2()` and `Route::with3()` use tuple of extractors instead.
|
||||
|
||||
instead of
|
||||
|
||||
@@ -502,17 +550,17 @@
|
||||
fn index((query, json): (Query<..>, Json<MyStruct)) -> impl Responder {}
|
||||
```
|
||||
|
||||
* `Handler::handle()` uses `&self` instead of `&mut self`
|
||||
- `Handler::handle()` uses `&self` instead of `&mut self`
|
||||
|
||||
* `Handler::handle()` accepts reference to `HttpRequest<_>` instead of value
|
||||
- `Handler::handle()` accepts reference to `HttpRequest<_>` instead of value
|
||||
|
||||
* Removed deprecated `HttpServer::threads()`, use
|
||||
- Removed deprecated `HttpServer::threads()`, use
|
||||
[HttpServer::workers()](https://actix.rs/actix-web/actix_web/server/struct.HttpServer.html#method.workers) instead.
|
||||
|
||||
* Renamed `client::ClientConnectorError::Connector` to
|
||||
- Renamed `client::ClientConnectorError::Connector` to
|
||||
`client::ClientConnectorError::Resolver`
|
||||
|
||||
* `Route::with()` does not return `ExtractorConfig`, to configure
|
||||
- `Route::with()` does not return `ExtractorConfig`, to configure
|
||||
extractor use `Route::with_config()`
|
||||
|
||||
instead of
|
||||
@@ -541,26 +589,26 @@
|
||||
}
|
||||
```
|
||||
|
||||
* `Route::with_async()` does not return `ExtractorConfig`, to configure
|
||||
- `Route::with_async()` does not return `ExtractorConfig`, to configure
|
||||
extractor use `Route::with_async_config()`
|
||||
|
||||
|
||||
## 0.6
|
||||
|
||||
* `Path<T>` extractor return `ErrorNotFound` on failure instead of `ErrorBadRequest`
|
||||
- `Path<T>` extractor return `ErrorNotFound` on failure instead of `ErrorBadRequest`
|
||||
|
||||
* `ws::Message::Close` now includes optional close reason.
|
||||
- `ws::Message::Close` now includes optional close reason.
|
||||
`ws::CloseCode::Status` and `ws::CloseCode::Empty` have been removed.
|
||||
|
||||
* `HttpServer::threads()` renamed to `HttpServer::workers()`.
|
||||
- `HttpServer::threads()` renamed to `HttpServer::workers()`.
|
||||
|
||||
* `HttpServer::start_ssl()` and `HttpServer::start_tls()` deprecated.
|
||||
- `HttpServer::start_ssl()` and `HttpServer::start_tls()` deprecated.
|
||||
Use `HttpServer::bind_ssl()` and `HttpServer::bind_tls()` instead.
|
||||
|
||||
* `HttpRequest::extensions()` returns read only reference to the request's Extension
|
||||
- `HttpRequest::extensions()` returns read only reference to the request's Extension
|
||||
`HttpRequest::extensions_mut()` returns mutable reference.
|
||||
|
||||
* Instead of
|
||||
- Instead of
|
||||
|
||||
`use actix_web::middleware::{
|
||||
CookieSessionBackend, CookieSessionError, RequestSession,
|
||||
@@ -571,15 +619,15 @@
|
||||
`use actix_web::middleware::session{CookieSessionBackend, CookieSessionError,
|
||||
RequestSession, Session, SessionBackend, SessionImpl, SessionStorage};`
|
||||
|
||||
* `FromRequest::from_request()` accepts mutable reference to a request
|
||||
- `FromRequest::from_request()` accepts mutable reference to a request
|
||||
|
||||
* `FromRequest::Result` has to implement `Into<Reply<Self>>`
|
||||
- `FromRequest::Result` has to implement `Into<Reply<Self>>`
|
||||
|
||||
* [`Responder::respond_to()`](
|
||||
- [`Responder::respond_to()`](
|
||||
https://actix.rs/actix-web/actix_web/trait.Responder.html#tymethod.respond_to)
|
||||
is generic over `S`
|
||||
|
||||
* Use `Query` extractor instead of HttpRequest::query()`.
|
||||
- Use `Query` extractor instead of HttpRequest::query()`.
|
||||
|
||||
```rust
|
||||
fn index(q: Query<HashMap<String, String>>) -> Result<..> {
|
||||
@@ -593,37 +641,37 @@
|
||||
let q = Query::<HashMap<String, String>>::extract(req);
|
||||
```
|
||||
|
||||
* Websocket operations are implemented as `WsWriter` trait.
|
||||
- Websocket operations are implemented as `WsWriter` trait.
|
||||
you need to use `use actix_web::ws::WsWriter`
|
||||
|
||||
|
||||
## 0.5
|
||||
|
||||
* `HttpResponseBuilder::body()`, `.finish()`, `.json()`
|
||||
- `HttpResponseBuilder::body()`, `.finish()`, `.json()`
|
||||
methods return `HttpResponse` instead of `Result<HttpResponse>`
|
||||
|
||||
* `actix_web::Method`, `actix_web::StatusCode`, `actix_web::Version`
|
||||
- `actix_web::Method`, `actix_web::StatusCode`, `actix_web::Version`
|
||||
moved to `actix_web::http` module
|
||||
|
||||
* `actix_web::header` moved to `actix_web::http::header`
|
||||
- `actix_web::header` moved to `actix_web::http::header`
|
||||
|
||||
* `NormalizePath` moved to `actix_web::http` module
|
||||
- `NormalizePath` moved to `actix_web::http` module
|
||||
|
||||
* `HttpServer` moved to `actix_web::server`, added new `actix_web::server::new()` function,
|
||||
- `HttpServer` moved to `actix_web::server`, added new `actix_web::server::new()` function,
|
||||
shortcut for `actix_web::server::HttpServer::new()`
|
||||
|
||||
* `DefaultHeaders` middleware does not use separate builder, all builder methods moved to type itself
|
||||
- `DefaultHeaders` middleware does not use separate builder, all builder methods moved to type itself
|
||||
|
||||
* `StaticFiles::new()`'s show_index parameter removed, use `show_files_listing()` method instead.
|
||||
- `StaticFiles::new()`'s show_index parameter removed, use `show_files_listing()` method instead.
|
||||
|
||||
* `CookieSessionBackendBuilder` removed, all methods moved to `CookieSessionBackend` type
|
||||
- `CookieSessionBackendBuilder` removed, all methods moved to `CookieSessionBackend` type
|
||||
|
||||
* `actix_web::httpcodes` module is deprecated, `HttpResponse::Ok()`, `HttpResponse::Found()` and other `HttpResponse::XXX()`
|
||||
- `actix_web::httpcodes` module is deprecated, `HttpResponse::Ok()`, `HttpResponse::Found()` and other `HttpResponse::XXX()`
|
||||
functions should be used instead
|
||||
|
||||
* `ClientRequestBuilder::body()` returns `Result<_, actix_web::Error>`
|
||||
- `ClientRequestBuilder::body()` returns `Result<_, actix_web::Error>`
|
||||
instead of `Result<_, http::Error>`
|
||||
|
||||
* `Application` renamed to a `App`
|
||||
- `Application` renamed to a `App`
|
||||
|
||||
* `actix_web::Reply`, `actix_web::Resource` moved to `actix_web::dev`
|
||||
- `actix_web::Reply`, `actix_web::Resource` moved to `actix_web::dev`
|
||||
|
||||
95
README.md
95
README.md
@@ -1,53 +1,48 @@
|
||||
<div align="center">
|
||||
<h1>Actix web</h1>
|
||||
<h1>Actix Web</h1>
|
||||
<p>
|
||||
<strong>Actix web is a powerful, pragmatic, and extremely fast web framework for Rust</strong>
|
||||
<strong>Actix Web is a powerful, pragmatic, and extremely fast web framework for Rust</strong>
|
||||
</p>
|
||||
<p>
|
||||
|
||||
[](https://crates.io/crates/actix-web)
|
||||
[](https://docs.rs/actix-web)
|
||||
[](https://blog.rust-lang.org/2020/03/12/Rust-1.42.html)
|
||||

|
||||
[](https://crates.io/crates/actix-web)
|
||||
[](https://docs.rs/actix-web/4.0.0-beta.16)
|
||||
[](https://blog.rust-lang.org/2021/05/06/Rust-1.52.0.html)
|
||||

|
||||
[](https://deps.rs/crate/actix-web/4.0.0-beta.16)
|
||||
<br />
|
||||
[](https://travis-ci.org/actix/actix-web)
|
||||
[](https://github.com/actix/actix-web/actions)
|
||||
[](https://codecov.io/gh/actix/actix-web)
|
||||
[](https://crates.io/crates/actix-web)
|
||||
[](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
||||

|
||||
[](https://discord.gg/NWpN5mmg3x)
|
||||
|
||||
</p>
|
||||
</div>
|
||||
|
||||
## Features
|
||||
|
||||
* Supports *HTTP/1.x* and *HTTP/2*
|
||||
* Streaming and pipelining
|
||||
* Keep-alive and slow requests handling
|
||||
* Client/server [WebSockets](https://actix.rs/docs/websockets/) support
|
||||
* Transparent content compression/decompression (br, gzip, deflate)
|
||||
* Powerful [request routing](https://actix.rs/docs/url-dispatch/)
|
||||
* Multipart streams
|
||||
* Static assets
|
||||
* SSL support using OpenSSL or Rustls
|
||||
* Middlewares ([Logger, Session, CORS, etc](https://actix.rs/docs/middleware/))
|
||||
* Includes an async [HTTP client](https://actix.rs/actix-web/actix_web/client/index.html)
|
||||
* Supports [Actix actor framework](https://github.com/actix/actix)
|
||||
* Runs on stable Rust 1.42+
|
||||
- Supports *HTTP/1.x* and *HTTP/2*
|
||||
- Streaming and pipelining
|
||||
- Keep-alive and slow requests handling
|
||||
- Client/server [WebSockets](https://actix.rs/docs/websockets/) support
|
||||
- Transparent content compression/decompression (br, gzip, deflate, zstd)
|
||||
- Powerful [request routing](https://actix.rs/docs/url-dispatch/)
|
||||
- Multipart streams
|
||||
- Static assets
|
||||
- SSL support using OpenSSL or Rustls
|
||||
- Middlewares ([Logger, Session, CORS, etc](https://actix.rs/docs/middleware/))
|
||||
- Includes an async [HTTP client](https://docs.rs/awc/)
|
||||
- Runs on stable Rust 1.52+
|
||||
|
||||
## Documentation
|
||||
|
||||
* [Website & User Guide](https://actix.rs)
|
||||
* [Examples Repository](https://actix.rs/actix-web/actix_web)
|
||||
* [API Documentation](https://docs.rs/actix-web)
|
||||
* [API Documentation (master branch)](https://actix.rs/actix-web/actix_web)
|
||||
- [Website & User Guide](https://actix.rs)
|
||||
- [Examples Repository](https://github.com/actix/examples)
|
||||
- [API Documentation](https://docs.rs/actix-web)
|
||||
- [API Documentation (master branch)](https://actix.rs/actix-web/actix_web)
|
||||
|
||||
## Example
|
||||
|
||||
<h2>
|
||||
WARNING: This example is for the master branch which is currently in beta stages for v3. For
|
||||
Actix web v2 see the <a href="https://actix.rs/docs/getting-started/">getting started guide</a>.
|
||||
</h2>
|
||||
|
||||
Dependencies:
|
||||
|
||||
```toml
|
||||
@@ -76,18 +71,18 @@ async fn main() -> std::io::Result<()> {
|
||||
|
||||
### More examples
|
||||
|
||||
* [Basic Setup](https://github.com/actix/examples/tree/master/basics/)
|
||||
* [Application State](https://github.com/actix/examples/tree/master/state/)
|
||||
* [JSON Handling](https://github.com/actix/examples/tree/master/json/)
|
||||
* [Multipart Streams](https://github.com/actix/examples/tree/master/multipart/)
|
||||
* [Diesel Integration](https://github.com/actix/examples/tree/master/diesel/)
|
||||
* [r2d2 Integration](https://github.com/actix/examples/tree/master/r2d2/)
|
||||
* [Simple WebSocket](https://github.com/actix/examples/tree/master/websocket/)
|
||||
* [Tera Templates](https://github.com/actix/examples/tree/master/template_tera/)
|
||||
* [Askama Templates](https://github.com/actix/examples/tree/master/template_askama/)
|
||||
* [HTTPS using Rustls](https://github.com/actix/examples/tree/master/rustls/)
|
||||
* [HTTPS using OpenSSL](https://github.com/actix/examples/tree/master/openssl/)
|
||||
* [WebSocket Chat](https://github.com/actix/examples/tree/master/websocket-chat/)
|
||||
- [Basic Setup](https://github.com/actix/examples/tree/master/basics/basics/)
|
||||
- [Application State](https://github.com/actix/examples/tree/master/basics/state/)
|
||||
- [JSON Handling](https://github.com/actix/examples/tree/master/json/json/)
|
||||
- [Multipart Streams](https://github.com/actix/examples/tree/master/forms/multipart/)
|
||||
- [Diesel Integration](https://github.com/actix/examples/tree/master/database_interactions/diesel/)
|
||||
- [r2d2 Integration](https://github.com/actix/examples/tree/master/database_interactions/r2d2/)
|
||||
- [Simple WebSocket](https://github.com/actix/examples/tree/master/websockets/websocket/)
|
||||
- [Tera Templates](https://github.com/actix/examples/tree/master/template_engines/tera/)
|
||||
- [Askama Templates](https://github.com/actix/examples/tree/master/template_engines/askama/)
|
||||
- [HTTPS using Rustls](https://github.com/actix/examples/tree/master/security/rustls/)
|
||||
- [HTTPS using OpenSSL](https://github.com/actix/examples/tree/master/security/openssl/)
|
||||
- [WebSocket Chat](https://github.com/actix/examples/tree/master/websockets/chat/)
|
||||
|
||||
You may consider checking out
|
||||
[this directory](https://github.com/actix/examples/tree/master/) for more examples.
|
||||
@@ -95,20 +90,20 @@ You may consider checking out
|
||||
## Benchmarks
|
||||
|
||||
One of the fastest web frameworks available according to the
|
||||
[TechEmpower Framework Benchmark](https://www.techempower.com/benchmarks/#section=data-r19).
|
||||
[TechEmpower Framework Benchmark](https://www.techempower.com/benchmarks/#section=data-r20&test=composite).
|
||||
|
||||
## License
|
||||
|
||||
This project is licensed under either of
|
||||
|
||||
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
[http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0))
|
||||
* MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
[http://opensource.org/licenses/MIT](http://opensource.org/licenses/MIT))
|
||||
- Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
[http://www.apache.org/licenses/LICENSE-2.0])
|
||||
- MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
[http://opensource.org/licenses/MIT])
|
||||
|
||||
at your option.
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
Contribution to the actix-web crate is organized under the terms of the Contributor Covenant, the
|
||||
maintainers of Actix web, promises to intervene to uphold that code of conduct.
|
||||
Contribution to the actix-web repo is organized under the terms of the Contributor Covenant.
|
||||
The Actix team promises to intervene to uphold that code of conduct.
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
# Cors Middleware for actix web framework [](https://travis-ci.org/actix/actix-web) [](https://codecov.io/gh/actix/actix-web) [](https://crates.io/crates/actix-cors) [](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
||||
|
||||
**This crate moved to https://github.com/actix/actix-extras.**
|
||||
|
||||
## Documentation & community resources
|
||||
|
||||
* [User Guide](https://actix.rs/docs/)
|
||||
* [API Documentation](https://docs.rs/actix-cors/)
|
||||
* [Chat on gitter](https://gitter.im/actix/actix)
|
||||
* Cargo package: [actix-cors](https://crates.io/crates/actix-cors)
|
||||
* Minimum supported Rust version: 1.34 or later
|
||||
@@ -1,90 +1,181 @@
|
||||
# Changes
|
||||
|
||||
## [Unreleased] - 2020-xx-xx
|
||||
## Unreleased - 2021-xx-xx
|
||||
|
||||
## [0.3.0-beta.1] - 2020-07-15
|
||||
* Update `v_htmlescape` to 0.10
|
||||
* Update `actix-web` and `actix-http` dependencies to beta.1
|
||||
|
||||
## [0.3.0-alpha.1] - 2020-05-23
|
||||
* Update `actix-web` and `actix-http` dependencies to alpha
|
||||
* Fix some typos in the docs
|
||||
* Bump minimum supported Rust version to 1.40
|
||||
* Support sending Content-Length when Content-Range is specified [#1384]
|
||||
## 0.6.0-beta.11 - 2021-12-27
|
||||
* No significant changes since `0.6.0-beta.10`.
|
||||
|
||||
|
||||
## 0.6.0-beta.10 - 2021-12-11
|
||||
- No significant changes since `0.6.0-beta.9`.
|
||||
|
||||
|
||||
## 0.6.0-beta.9 - 2021-11-22
|
||||
- Add crate feature `experimental-io-uring`, enabling async file I/O to be utilized. This feature is only available on Linux OSes with recent kernel versions. This feature is semver-exempt. [#2408]
|
||||
- Add `NamedFile::open_async`. [#2408]
|
||||
- Fix 304 Not Modified responses to omit the Content-Length header, as per the spec. [#2453]
|
||||
- The `Responder` impl for `NamedFile` now has a boxed future associated type. [#2408]
|
||||
- The `Service` impl for `NamedFileService` now has a boxed future associated type. [#2408]
|
||||
- Add `impl Clone` for `FilesService`. [#2408]
|
||||
|
||||
[#2408]: https://github.com/actix/actix-web/pull/2408
|
||||
[#2453]: https://github.com/actix/actix-web/pull/2453
|
||||
|
||||
|
||||
## 0.6.0-beta.8 - 2021-10-20
|
||||
- Minimum supported Rust version (MSRV) is now 1.52.
|
||||
|
||||
|
||||
## 0.6.0-beta.7 - 2021-09-09
|
||||
- Minimum supported Rust version (MSRV) is now 1.51.
|
||||
|
||||
|
||||
## 0.6.0-beta.6 - 2021-06-26
|
||||
- Added `Files::path_filter()`. [#2274]
|
||||
- `Files::show_files_listing()` can now be used with `Files::index_file()` to show files listing as a fallback when the index file is not found. [#2228]
|
||||
|
||||
[#2274]: https://github.com/actix/actix-web/pull/2274
|
||||
[#2228]: https://github.com/actix/actix-web/pull/2228
|
||||
|
||||
|
||||
## 0.6.0-beta.5 - 2021-06-17
|
||||
- `NamedFile` now implements `ServiceFactory` and `HttpServiceFactory` making it much more useful in routing. For example, it can be used directly as a default service. [#2135]
|
||||
- For symbolic links, `Content-Disposition` header no longer shows the filename of the original file. [#2156]
|
||||
- `Files::redirect_to_slash_directory()` now works as expected when used with `Files::show_files_listing()`. [#2225]
|
||||
- `application/{javascript, json, wasm}` mime type now have `inline` disposition by default. [#2257]
|
||||
|
||||
[#2135]: https://github.com/actix/actix-web/pull/2135
|
||||
[#2156]: https://github.com/actix/actix-web/pull/2156
|
||||
[#2225]: https://github.com/actix/actix-web/pull/2225
|
||||
[#2257]: https://github.com/actix/actix-web/pull/2257
|
||||
|
||||
|
||||
## 0.6.0-beta.4 - 2021-04-02
|
||||
- Add support for `.guard` in `Files` to selectively filter `Files` services. [#2046]
|
||||
|
||||
[#2046]: https://github.com/actix/actix-web/pull/2046
|
||||
|
||||
|
||||
## 0.6.0-beta.3 - 2021-03-09
|
||||
- No notable changes.
|
||||
|
||||
|
||||
## 0.6.0-beta.2 - 2021-02-10
|
||||
- Fix If-Modified-Since and If-Unmodified-Since to not compare using sub-second timestamps. [#1887]
|
||||
- Replace `v_htmlescape` with `askama_escape`. [#1953]
|
||||
|
||||
[#1887]: https://github.com/actix/actix-web/pull/1887
|
||||
[#1953]: https://github.com/actix/actix-web/pull/1953
|
||||
|
||||
|
||||
## 0.6.0-beta.1 - 2021-01-07
|
||||
- `HttpRange::parse` now has its own error type.
|
||||
- Update `bytes` to `1.0`. [#1813]
|
||||
|
||||
[#1813]: https://github.com/actix/actix-web/pull/1813
|
||||
|
||||
|
||||
## 0.5.0 - 2020-12-26
|
||||
- Optionally support hidden files/directories. [#1811]
|
||||
|
||||
[#1811]: https://github.com/actix/actix-web/pull/1811
|
||||
|
||||
|
||||
## 0.4.1 - 2020-11-24
|
||||
- Clarify order of parameters in `Files::new` and improve docs.
|
||||
|
||||
|
||||
## 0.4.0 - 2020-10-06
|
||||
- Add `Files::prefer_utf8` option that adds UTF-8 charset on certain response types. [#1714]
|
||||
|
||||
[#1714]: https://github.com/actix/actix-web/pull/1714
|
||||
|
||||
|
||||
## 0.3.0 - 2020-09-11
|
||||
- No significant changes from 0.3.0-beta.1.
|
||||
|
||||
|
||||
## 0.3.0-beta.1 - 2020-07-15
|
||||
- Update `v_htmlescape` to 0.10
|
||||
- Update `actix-web` and `actix-http` dependencies to beta.1
|
||||
|
||||
|
||||
## 0.3.0-alpha.1 - 2020-05-23
|
||||
- Update `actix-web` and `actix-http` dependencies to alpha
|
||||
- Fix some typos in the docs
|
||||
- Bump minimum supported Rust version to 1.40
|
||||
- Support sending Content-Length when Content-Range is specified [#1384]
|
||||
|
||||
[#1384]: https://github.com/actix/actix-web/pull/1384
|
||||
|
||||
## [0.2.1] - 2019-12-22
|
||||
|
||||
* Use the same format for file URLs regardless of platforms
|
||||
## 0.2.1 - 2019-12-22
|
||||
- Use the same format for file URLs regardless of platforms
|
||||
|
||||
## [0.2.0] - 2019-12-20
|
||||
|
||||
* Fix BodyEncoding trait import #1220
|
||||
## 0.2.0 - 2019-12-20
|
||||
- Fix BodyEncoding trait import #1220
|
||||
|
||||
## [0.2.0-alpha.1] - 2019-12-07
|
||||
|
||||
* Migrate to `std::future`
|
||||
## 0.2.0-alpha.1 - 2019-12-07
|
||||
- Migrate to `std::future`
|
||||
|
||||
## [0.1.7] - 2019-11-06
|
||||
|
||||
* Add an additional `filename*` param in the `Content-Disposition` header of `actix_files::NamedFile` to be more compatible. (#1151)
|
||||
## 0.1.7 - 2019-11-06
|
||||
- Add an additional `filename*` param in the `Content-Disposition` header of
|
||||
`actix_files::NamedFile` to be more compatible. (#1151)
|
||||
|
||||
## [0.1.6] - 2019-10-14
|
||||
## 0.1.6 - 2019-10-14
|
||||
- Add option to redirect to a slash-ended path `Files` #1132
|
||||
|
||||
* Add option to redirect to a slash-ended path `Files` #1132
|
||||
|
||||
## [0.1.5] - 2019-10-08
|
||||
## 0.1.5 - 2019-10-08
|
||||
- Bump up `mime_guess` crate version to 2.0.1
|
||||
- Bump up `percent-encoding` crate version to 2.1
|
||||
- Allow user defined request guards for `Files` #1113
|
||||
|
||||
* Bump up `mime_guess` crate version to 2.0.1
|
||||
|
||||
* Bump up `percent-encoding` crate version to 2.1
|
||||
## 0.1.4 - 2019-07-20
|
||||
- Allow to disable `Content-Disposition` header #686
|
||||
|
||||
* Allow user defined request guards for `Files` #1113
|
||||
|
||||
## [0.1.4] - 2019-07-20
|
||||
## 0.1.3 - 2019-06-28
|
||||
- Do not set `Content-Length` header, let actix-http set it #930
|
||||
|
||||
* Allow to disable `Content-Disposition` header #686
|
||||
|
||||
## [0.1.3] - 2019-06-28
|
||||
## 0.1.2 - 2019-06-13
|
||||
- Content-Length is 0 for NamedFile HEAD request #914
|
||||
- Fix ring dependency from actix-web default features for #741
|
||||
|
||||
* Do not set `Content-Length` header, let actix-http set it #930
|
||||
|
||||
## [0.1.2] - 2019-06-13
|
||||
## 0.1.1 - 2019-06-01
|
||||
- Static files are incorrectly served as both chunked and with length #812
|
||||
|
||||
* Content-Length is 0 for NamedFile HEAD request #914
|
||||
|
||||
* Fix ring dependency from actix-web default features for #741
|
||||
## 0.1.0 - 2019-05-25
|
||||
- NamedFile last-modified check always fails due to nano-seconds in file modified date #820
|
||||
|
||||
## [0.1.1] - 2019-06-01
|
||||
|
||||
* Static files are incorrectly served as both chunked and with length #812
|
||||
## 0.1.0-beta.4 - 2019-05-12
|
||||
- Update actix-web to beta.4
|
||||
|
||||
## [0.1.0] - 2019-05-25
|
||||
|
||||
* NamedFile last-modified check always fails due to nano-seconds
|
||||
in file modified date #820
|
||||
## 0.1.0-beta.1 - 2019-04-20
|
||||
- Update actix-web to beta.1
|
||||
|
||||
## [0.1.0-beta.4] - 2019-05-12
|
||||
|
||||
* Update actix-web to beta.4
|
||||
## 0.1.0-alpha.6 - 2019-04-14
|
||||
- Update actix-web to alpha6
|
||||
|
||||
## [0.1.0-beta.1] - 2019-04-20
|
||||
|
||||
* Update actix-web to beta.1
|
||||
## 0.1.0-alpha.4 - 2019-04-08
|
||||
- Update actix-web to alpha4
|
||||
|
||||
## [0.1.0-alpha.6] - 2019-04-14
|
||||
|
||||
* Update actix-web to alpha6
|
||||
## 0.1.0-alpha.2 - 2019-04-02
|
||||
- Add default handler support
|
||||
|
||||
## [0.1.0-alpha.4] - 2019-04-08
|
||||
|
||||
* Update actix-web to alpha4
|
||||
|
||||
## [0.1.0-alpha.2] - 2019-04-02
|
||||
|
||||
* Add default handler support
|
||||
|
||||
## [0.1.0-alpha.1] - 2019-03-28
|
||||
|
||||
* Initial impl
|
||||
## 0.1.0-alpha.1 - 2019-03-28
|
||||
- Initial impl
|
||||
|
||||
@@ -1,13 +1,15 @@
|
||||
[package]
|
||||
name = "actix-files"
|
||||
version = "0.3.0-beta.1"
|
||||
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
|
||||
description = "Static files support for actix web."
|
||||
readme = "README.md"
|
||||
version = "0.6.0-beta.11"
|
||||
authors = [
|
||||
"Nikolay Kim <fafhrd91@gmail.com>",
|
||||
"fakeshadow <24548779@qq.com>",
|
||||
"Rob Ede <robjtede@icloud.com>",
|
||||
]
|
||||
description = "Static file serving for Actix Web"
|
||||
keywords = ["actix", "http", "async", "futures"]
|
||||
homepage = "https://actix.rs"
|
||||
repository = "https://github.com/actix/actix-web.git"
|
||||
documentation = "https://docs.rs/actix-files/"
|
||||
repository = "https://github.com/actix/actix-web"
|
||||
categories = ["asynchronous", "web-programming::http-server"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
edition = "2018"
|
||||
@@ -16,21 +18,30 @@ edition = "2018"
|
||||
name = "actix_files"
|
||||
path = "src/lib.rs"
|
||||
|
||||
[features]
|
||||
experimental-io-uring = ["actix-web/experimental-io-uring", "tokio-uring"]
|
||||
|
||||
[dependencies]
|
||||
actix-web = { version = "3.0.0-beta.3", default-features = false }
|
||||
actix-http = "2.0.0-beta.3"
|
||||
actix-service = "1.0.1"
|
||||
actix-http = "3.0.0-beta.17"
|
||||
actix-service = "2"
|
||||
actix-utils = "3"
|
||||
actix-web = { version = "4.0.0-beta.16", default-features = false }
|
||||
|
||||
askama_escape = "0.10"
|
||||
bitflags = "1"
|
||||
bytes = "0.5.3"
|
||||
futures-core = { version = "0.3.5", default-features = false }
|
||||
futures-util = { version = "0.3.5", default-features = false }
|
||||
derive_more = "0.99.2"
|
||||
bytes = "1"
|
||||
derive_more = "0.99.5"
|
||||
futures-core = { version = "0.3.7", default-features = false, features = ["alloc"] }
|
||||
http-range = "0.1.4"
|
||||
log = "0.4"
|
||||
mime = "0.3"
|
||||
mime_guess = "2.0.1"
|
||||
percent-encoding = "2.1"
|
||||
v_htmlescape = "0.10"
|
||||
pin-project-lite = "0.2.7"
|
||||
|
||||
tokio-uring = { version = "0.1", optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
actix-rt = "1.0.0"
|
||||
actix-web = { version = "3.0.0-beta.3", features = ["openssl"] }
|
||||
actix-rt = "2.2"
|
||||
actix-test = "0.1.0-beta.10"
|
||||
actix-web = "4.0.0-beta.16"
|
||||
|
||||
@@ -1,9 +1,18 @@
|
||||
# Static files support for actix web [](https://travis-ci.org/actix/actix-web) [](https://codecov.io/gh/actix/actix-web) [](https://crates.io/crates/actix-files) [](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
||||
# actix-files
|
||||
|
||||
## Documentation & community resources
|
||||
> Static file serving for Actix Web
|
||||
|
||||
* [User Guide](https://actix.rs/docs/)
|
||||
* [API Documentation](https://docs.rs/actix-files/)
|
||||
* [Chat on gitter](https://gitter.im/actix/actix)
|
||||
* Cargo package: [actix-files](https://crates.io/crates/actix-files)
|
||||
* Minimum supported Rust version: 1.40 or later
|
||||
[](https://crates.io/crates/actix-files)
|
||||
[](https://docs.rs/actix-files/0.6.0-beta.11)
|
||||
[](https://blog.rust-lang.org/2021/05/06/Rust-1.52.0.html)
|
||||

|
||||
<br />
|
||||
[](https://deps.rs/crate/actix-files/0.6.0-beta.11)
|
||||
[](https://crates.io/crates/actix-files)
|
||||
[](https://discord.gg/NWpN5mmg3x)
|
||||
|
||||
## Documentation & Resources
|
||||
|
||||
- [API Documentation](https://docs.rs/actix-files/)
|
||||
- [Example Project](https://github.com/actix/examples/tree/master/basics/static_index)
|
||||
- Minimum Supported Rust Version (MSRV): 1.52
|
||||
|
||||
277
actix-files/src/chunked.rs
Normal file
277
actix-files/src/chunked.rs
Normal file
@@ -0,0 +1,277 @@
|
||||
use std::{
|
||||
cmp, fmt,
|
||||
future::Future,
|
||||
io,
|
||||
pin::Pin,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use actix_web::{error::Error, web::Bytes};
|
||||
use futures_core::{ready, Stream};
|
||||
use pin_project_lite::pin_project;
|
||||
|
||||
use super::named::File;
|
||||
|
||||
pin_project! {
|
||||
/// Adapter to read a `std::file::File` in chunks.
|
||||
#[doc(hidden)]
|
||||
pub struct ChunkedReadFile<F, Fut> {
|
||||
size: u64,
|
||||
offset: u64,
|
||||
#[pin]
|
||||
state: ChunkedReadFileState<Fut>,
|
||||
counter: u64,
|
||||
callback: F,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "experimental-io-uring"))]
|
||||
pin_project! {
|
||||
#[project = ChunkedReadFileStateProj]
|
||||
#[project_replace = ChunkedReadFileStateProjReplace]
|
||||
enum ChunkedReadFileState<Fut> {
|
||||
File { file: Option<File>, },
|
||||
Future { #[pin] fut: Fut },
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "experimental-io-uring")]
|
||||
pin_project! {
|
||||
#[project = ChunkedReadFileStateProj]
|
||||
#[project_replace = ChunkedReadFileStateProjReplace]
|
||||
enum ChunkedReadFileState<Fut> {
|
||||
File { file: Option<(File, BytesMut)> },
|
||||
Future { #[pin] fut: Fut },
|
||||
}
|
||||
}
|
||||
|
||||
impl<F, Fut> fmt::Debug for ChunkedReadFile<F, Fut> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.write_str("ChunkedReadFile")
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn new_chunked_read(
|
||||
size: u64,
|
||||
offset: u64,
|
||||
file: File,
|
||||
) -> impl Stream<Item = Result<Bytes, Error>> {
|
||||
ChunkedReadFile {
|
||||
size,
|
||||
offset,
|
||||
#[cfg(not(feature = "experimental-io-uring"))]
|
||||
state: ChunkedReadFileState::File { file: Some(file) },
|
||||
#[cfg(feature = "experimental-io-uring")]
|
||||
state: ChunkedReadFileState::File {
|
||||
file: Some((file, BytesMut::new())),
|
||||
},
|
||||
counter: 0,
|
||||
callback: chunked_read_file_callback,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "experimental-io-uring"))]
|
||||
async fn chunked_read_file_callback(
|
||||
mut file: File,
|
||||
offset: u64,
|
||||
max_bytes: usize,
|
||||
) -> Result<(File, Bytes), Error> {
|
||||
use io::{Read as _, Seek as _};
|
||||
|
||||
let res = actix_web::rt::task::spawn_blocking(move || {
|
||||
let mut buf = Vec::with_capacity(max_bytes);
|
||||
|
||||
file.seek(io::SeekFrom::Start(offset))?;
|
||||
|
||||
let n_bytes = file.by_ref().take(max_bytes as u64).read_to_end(&mut buf)?;
|
||||
|
||||
if n_bytes == 0 {
|
||||
Err(io::Error::from(io::ErrorKind::UnexpectedEof))
|
||||
} else {
|
||||
Ok((file, Bytes::from(buf)))
|
||||
}
|
||||
})
|
||||
.await
|
||||
.map_err(|_| actix_web::error::BlockingError)??;
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
#[cfg(feature = "experimental-io-uring")]
|
||||
async fn chunked_read_file_callback(
|
||||
file: File,
|
||||
offset: u64,
|
||||
max_bytes: usize,
|
||||
mut bytes_mut: BytesMut,
|
||||
) -> io::Result<(File, Bytes, BytesMut)> {
|
||||
bytes_mut.reserve(max_bytes);
|
||||
|
||||
let (res, mut bytes_mut) = file.read_at(bytes_mut, offset).await;
|
||||
let n_bytes = res?;
|
||||
|
||||
if n_bytes == 0 {
|
||||
return Err(io::ErrorKind::UnexpectedEof.into());
|
||||
}
|
||||
|
||||
let bytes = bytes_mut.split_to(n_bytes).freeze();
|
||||
|
||||
Ok((file, bytes, bytes_mut))
|
||||
}
|
||||
|
||||
#[cfg(feature = "experimental-io-uring")]
|
||||
impl<F, Fut> Stream for ChunkedReadFile<F, Fut>
|
||||
where
|
||||
F: Fn(File, u64, usize, BytesMut) -> Fut,
|
||||
Fut: Future<Output = io::Result<(File, Bytes, BytesMut)>>,
|
||||
{
|
||||
type Item = Result<Bytes, Error>;
|
||||
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||
let mut this = self.as_mut().project();
|
||||
match this.state.as_mut().project() {
|
||||
ChunkedReadFileStateProj::File { file } => {
|
||||
let size = *this.size;
|
||||
let offset = *this.offset;
|
||||
let counter = *this.counter;
|
||||
|
||||
if size == counter {
|
||||
Poll::Ready(None)
|
||||
} else {
|
||||
let max_bytes = cmp::min(size.saturating_sub(counter), 65_536) as usize;
|
||||
|
||||
let (file, bytes_mut) = file
|
||||
.take()
|
||||
.expect("ChunkedReadFile polled after completion");
|
||||
|
||||
let fut = (this.callback)(file, offset, max_bytes, bytes_mut);
|
||||
|
||||
this.state
|
||||
.project_replace(ChunkedReadFileState::Future { fut });
|
||||
|
||||
self.poll_next(cx)
|
||||
}
|
||||
}
|
||||
ChunkedReadFileStateProj::Future { fut } => {
|
||||
let (file, bytes, bytes_mut) = ready!(fut.poll(cx))?;
|
||||
|
||||
this.state.project_replace(ChunkedReadFileState::File {
|
||||
file: Some((file, bytes_mut)),
|
||||
});
|
||||
|
||||
*this.offset += bytes.len() as u64;
|
||||
*this.counter += bytes.len() as u64;
|
||||
|
||||
Poll::Ready(Some(Ok(bytes)))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "experimental-io-uring"))]
|
||||
impl<F, Fut> Stream for ChunkedReadFile<F, Fut>
|
||||
where
|
||||
F: Fn(File, u64, usize) -> Fut,
|
||||
Fut: Future<Output = Result<(File, Bytes), Error>>,
|
||||
{
|
||||
type Item = Result<Bytes, Error>;
|
||||
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||
let mut this = self.as_mut().project();
|
||||
match this.state.as_mut().project() {
|
||||
ChunkedReadFileStateProj::File { file } => {
|
||||
let size = *this.size;
|
||||
let offset = *this.offset;
|
||||
let counter = *this.counter;
|
||||
|
||||
if size == counter {
|
||||
Poll::Ready(None)
|
||||
} else {
|
||||
let max_bytes = cmp::min(size.saturating_sub(counter), 65_536) as usize;
|
||||
|
||||
let file = file
|
||||
.take()
|
||||
.expect("ChunkedReadFile polled after completion");
|
||||
|
||||
let fut = (this.callback)(file, offset, max_bytes);
|
||||
|
||||
this.state
|
||||
.project_replace(ChunkedReadFileState::Future { fut });
|
||||
|
||||
self.poll_next(cx)
|
||||
}
|
||||
}
|
||||
ChunkedReadFileStateProj::Future { fut } => {
|
||||
let (file, bytes) = ready!(fut.poll(cx))?;
|
||||
|
||||
this.state
|
||||
.project_replace(ChunkedReadFileState::File { file: Some(file) });
|
||||
|
||||
*this.offset += bytes.len() as u64;
|
||||
*this.counter += bytes.len() as u64;
|
||||
|
||||
Poll::Ready(Some(Ok(bytes)))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "experimental-io-uring")]
|
||||
use bytes_mut::BytesMut;
|
||||
|
||||
// TODO: remove new type and use bytes::BytesMut directly
|
||||
#[doc(hidden)]
|
||||
#[cfg(feature = "experimental-io-uring")]
|
||||
mod bytes_mut {
|
||||
use std::ops::{Deref, DerefMut};
|
||||
|
||||
use tokio_uring::buf::{IoBuf, IoBufMut};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct BytesMut(bytes::BytesMut);
|
||||
|
||||
impl BytesMut {
|
||||
pub(super) fn new() -> Self {
|
||||
Self(bytes::BytesMut::new())
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for BytesMut {
|
||||
type Target = bytes::BytesMut;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl DerefMut for BytesMut {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.0
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl IoBuf for BytesMut {
|
||||
fn stable_ptr(&self) -> *const u8 {
|
||||
self.0.as_ptr()
|
||||
}
|
||||
|
||||
fn bytes_init(&self) -> usize {
|
||||
self.0.len()
|
||||
}
|
||||
|
||||
fn bytes_total(&self) -> usize {
|
||||
self.0.capacity()
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl IoBufMut for BytesMut {
|
||||
fn stable_mut_ptr(&mut self) -> *mut u8 {
|
||||
self.0.as_mut_ptr()
|
||||
}
|
||||
|
||||
unsafe fn set_init(&mut self, init_len: usize) {
|
||||
if self.len() < init_len {
|
||||
self.0.set_len(init_len);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
112
actix-files/src/directory.rs
Normal file
112
actix-files/src/directory.rs
Normal file
@@ -0,0 +1,112 @@
|
||||
use std::{fmt::Write, fs::DirEntry, io, path::Path, path::PathBuf};
|
||||
|
||||
use actix_web::{dev::ServiceResponse, HttpRequest, HttpResponse};
|
||||
use askama_escape::{escape as escape_html_entity, Html};
|
||||
use percent_encoding::{utf8_percent_encode, CONTROLS};
|
||||
|
||||
/// A directory; responds with the generated directory listing.
|
||||
#[derive(Debug)]
|
||||
pub struct Directory {
|
||||
/// Base directory.
|
||||
pub base: PathBuf,
|
||||
|
||||
/// Path of subdirectory to generate listing for.
|
||||
pub path: PathBuf,
|
||||
}
|
||||
|
||||
impl Directory {
|
||||
/// Create a new directory
|
||||
pub fn new(base: PathBuf, path: PathBuf) -> Directory {
|
||||
Directory { base, path }
|
||||
}
|
||||
|
||||
/// Is this entry visible from this directory?
|
||||
pub fn is_visible(&self, entry: &io::Result<DirEntry>) -> bool {
|
||||
if let Ok(ref entry) = *entry {
|
||||
if let Some(name) = entry.file_name().to_str() {
|
||||
if name.starts_with('.') {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if let Ok(ref md) = entry.metadata() {
|
||||
let ft = md.file_type();
|
||||
return ft.is_dir() || ft.is_file() || ft.is_symlink();
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) type DirectoryRenderer =
|
||||
dyn Fn(&Directory, &HttpRequest) -> Result<ServiceResponse, io::Error>;
|
||||
|
||||
// show file url as relative to static path
|
||||
macro_rules! encode_file_url {
|
||||
($path:ident) => {
|
||||
utf8_percent_encode(&$path, CONTROLS)
|
||||
};
|
||||
}
|
||||
|
||||
// " -- " & -- & ' -- ' < -- < > -- > / -- /
|
||||
macro_rules! encode_file_name {
|
||||
($entry:ident) => {
|
||||
escape_html_entity(&$entry.file_name().to_string_lossy(), Html)
|
||||
};
|
||||
}
|
||||
|
||||
pub(crate) fn directory_listing(
|
||||
dir: &Directory,
|
||||
req: &HttpRequest,
|
||||
) -> Result<ServiceResponse, io::Error> {
|
||||
let index_of = format!("Index of {}", req.path());
|
||||
let mut body = String::new();
|
||||
let base = Path::new(req.path());
|
||||
|
||||
for entry in dir.path.read_dir()? {
|
||||
if dir.is_visible(&entry) {
|
||||
let entry = entry.unwrap();
|
||||
let p = match entry.path().strip_prefix(&dir.path) {
|
||||
Ok(p) if cfg!(windows) => base.join(p).to_string_lossy().replace("\\", "/"),
|
||||
Ok(p) => base.join(p).to_string_lossy().into_owned(),
|
||||
Err(_) => continue,
|
||||
};
|
||||
|
||||
// if file is a directory, add '/' to the end of the name
|
||||
if let Ok(metadata) = entry.metadata() {
|
||||
if metadata.is_dir() {
|
||||
let _ = write!(
|
||||
body,
|
||||
"<li><a href=\"{}\">{}/</a></li>",
|
||||
encode_file_url!(p),
|
||||
encode_file_name!(entry),
|
||||
);
|
||||
} else {
|
||||
let _ = write!(
|
||||
body,
|
||||
"<li><a href=\"{}\">{}</a></li>",
|
||||
encode_file_url!(p),
|
||||
encode_file_name!(entry),
|
||||
);
|
||||
}
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let html = format!(
|
||||
"<html>\
|
||||
<head><title>{}</title></head>\
|
||||
<body><h1>{}</h1>\
|
||||
<ul>\
|
||||
{}\
|
||||
</ul></body>\n</html>",
|
||||
index_of, index_of, body
|
||||
);
|
||||
Ok(ServiceResponse::new(
|
||||
req.clone(),
|
||||
HttpResponse::Ok()
|
||||
.content_type("text/html; charset=utf-8")
|
||||
.body(html),
|
||||
))
|
||||
}
|
||||
52
actix-files/src/encoding.rs
Normal file
52
actix-files/src/encoding.rs
Normal file
@@ -0,0 +1,52 @@
|
||||
use mime::Mime;
|
||||
|
||||
/// Transforms MIME `text/*` types into their UTF-8 equivalent, if supported.
|
||||
///
|
||||
/// MIME types that are converted
|
||||
/// - application/javascript
|
||||
/// - text/html
|
||||
/// - text/css
|
||||
/// - text/plain
|
||||
/// - text/csv
|
||||
/// - text/tab-separated-values
|
||||
pub(crate) fn equiv_utf8_text(ct: Mime) -> Mime {
|
||||
// use (roughly) order of file-type popularity for a web server
|
||||
|
||||
if ct == mime::APPLICATION_JAVASCRIPT {
|
||||
return mime::APPLICATION_JAVASCRIPT_UTF_8;
|
||||
}
|
||||
|
||||
if ct == mime::TEXT_HTML {
|
||||
return mime::TEXT_HTML_UTF_8;
|
||||
}
|
||||
|
||||
if ct == mime::TEXT_CSS {
|
||||
return mime::TEXT_CSS_UTF_8;
|
||||
}
|
||||
|
||||
if ct == mime::TEXT_PLAIN {
|
||||
return mime::TEXT_PLAIN_UTF_8;
|
||||
}
|
||||
|
||||
if ct == mime::TEXT_CSV {
|
||||
return mime::TEXT_CSV_UTF_8;
|
||||
}
|
||||
|
||||
if ct == mime::TEXT_TAB_SEPARATED_VALUES {
|
||||
return mime::TEXT_TAB_SEPARATED_VALUES_UTF_8;
|
||||
}
|
||||
|
||||
ct
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_equiv_utf8_text() {
|
||||
assert_eq!(equiv_utf8_text(mime::TEXT_PLAIN), mime::TEXT_PLAIN_UTF_8);
|
||||
assert_eq!(equiv_utf8_text(mime::TEXT_XML), mime::TEXT_XML);
|
||||
assert_eq!(equiv_utf8_text(mime::IMAGE_PNG), mime::IMAGE_PNG);
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
use actix_web::{http::StatusCode, HttpResponse, ResponseError};
|
||||
use actix_web::{http::StatusCode, ResponseError};
|
||||
use derive_more::Display;
|
||||
|
||||
/// Errors which can occur when serving static files.
|
||||
@@ -16,11 +16,12 @@ pub enum FilesError {
|
||||
|
||||
/// Return `NotFound` for `FilesError`
|
||||
impl ResponseError for FilesError {
|
||||
fn error_response(&self) -> HttpResponse {
|
||||
HttpResponse::new(StatusCode::NOT_FOUND)
|
||||
fn status_code(&self) -> StatusCode {
|
||||
StatusCode::NOT_FOUND
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::enum_variant_names)]
|
||||
#[derive(Display, Debug, PartialEq)]
|
||||
pub enum UriSegmentError {
|
||||
/// The segment started with the wrapped invalid character.
|
||||
|
||||
391
actix-files/src/files.rs
Normal file
391
actix-files/src/files.rs
Normal file
@@ -0,0 +1,391 @@
|
||||
use std::{
|
||||
cell::RefCell,
|
||||
fmt, io,
|
||||
path::{Path, PathBuf},
|
||||
rc::Rc,
|
||||
};
|
||||
|
||||
use actix_service::{boxed, IntoServiceFactory, ServiceFactory, ServiceFactoryExt};
|
||||
use actix_web::{
|
||||
dev::{
|
||||
AppService, HttpServiceFactory, RequestHead, ResourceDef, ServiceRequest,
|
||||
ServiceResponse,
|
||||
},
|
||||
error::Error,
|
||||
guard::Guard,
|
||||
http::header::DispositionType,
|
||||
HttpRequest,
|
||||
};
|
||||
use futures_core::future::LocalBoxFuture;
|
||||
|
||||
use crate::{
|
||||
directory_listing, named,
|
||||
service::{FilesService, FilesServiceInner},
|
||||
Directory, DirectoryRenderer, HttpNewService, MimeOverride, PathFilter,
|
||||
};
|
||||
|
||||
/// Static files handling service.
|
||||
///
|
||||
/// `Files` service must be registered with `App::service()` method.
|
||||
///
|
||||
/// ```
|
||||
/// use actix_web::App;
|
||||
/// use actix_files::Files;
|
||||
///
|
||||
/// let app = App::new()
|
||||
/// .service(Files::new("/static", "."));
|
||||
/// ```
|
||||
pub struct Files {
|
||||
path: String,
|
||||
directory: PathBuf,
|
||||
index: Option<String>,
|
||||
show_index: bool,
|
||||
redirect_to_slash: bool,
|
||||
default: Rc<RefCell<Option<Rc<HttpNewService>>>>,
|
||||
renderer: Rc<DirectoryRenderer>,
|
||||
mime_override: Option<Rc<MimeOverride>>,
|
||||
path_filter: Option<Rc<PathFilter>>,
|
||||
file_flags: named::Flags,
|
||||
use_guards: Option<Rc<dyn Guard>>,
|
||||
guards: Vec<Rc<dyn Guard>>,
|
||||
hidden_files: bool,
|
||||
}
|
||||
|
||||
impl fmt::Debug for Files {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.write_str("Files")
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for Files {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
directory: self.directory.clone(),
|
||||
index: self.index.clone(),
|
||||
show_index: self.show_index,
|
||||
redirect_to_slash: self.redirect_to_slash,
|
||||
default: self.default.clone(),
|
||||
renderer: self.renderer.clone(),
|
||||
file_flags: self.file_flags,
|
||||
path: self.path.clone(),
|
||||
mime_override: self.mime_override.clone(),
|
||||
path_filter: self.path_filter.clone(),
|
||||
use_guards: self.use_guards.clone(),
|
||||
guards: self.guards.clone(),
|
||||
hidden_files: self.hidden_files,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Files {
|
||||
/// Create new `Files` instance for a specified base directory.
|
||||
///
|
||||
/// # Argument Order
|
||||
/// The first argument (`mount_path`) is the root URL at which the static files are served.
|
||||
/// For example, `/assets` will serve files at `example.com/assets/...`.
|
||||
///
|
||||
/// The second argument (`serve_from`) is the location on disk at which files are loaded.
|
||||
/// This can be a relative path. For example, `./` would serve files from the current
|
||||
/// working directory.
|
||||
///
|
||||
/// # Implementation Notes
|
||||
/// If the mount path is set as the root path `/`, services registered after this one will
|
||||
/// be inaccessible. Register more specific handlers and services first.
|
||||
///
|
||||
/// `Files` utilizes the existing Tokio thread-pool for blocking filesystem operations.
|
||||
/// The number of running threads is adjusted over time as needed, up to a maximum of 512 times
|
||||
/// the number of server [workers](actix_web::HttpServer::workers), by default.
|
||||
pub fn new<T: Into<PathBuf>>(mount_path: &str, serve_from: T) -> Files {
|
||||
let orig_dir = serve_from.into();
|
||||
let dir = match orig_dir.canonicalize() {
|
||||
Ok(canon_dir) => canon_dir,
|
||||
Err(_) => {
|
||||
log::error!("Specified path is not a directory: {:?}", orig_dir);
|
||||
PathBuf::new()
|
||||
}
|
||||
};
|
||||
|
||||
Files {
|
||||
path: mount_path.trim_end_matches('/').to_owned(),
|
||||
directory: dir,
|
||||
index: None,
|
||||
show_index: false,
|
||||
redirect_to_slash: false,
|
||||
default: Rc::new(RefCell::new(None)),
|
||||
renderer: Rc::new(directory_listing),
|
||||
mime_override: None,
|
||||
path_filter: None,
|
||||
file_flags: named::Flags::default(),
|
||||
use_guards: None,
|
||||
guards: Vec::new(),
|
||||
hidden_files: false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Show files listing for directories.
|
||||
///
|
||||
/// By default show files listing is disabled.
|
||||
///
|
||||
/// When used with [`Files::index_file()`], files listing is shown as a fallback
|
||||
/// when the index file is not found.
|
||||
pub fn show_files_listing(mut self) -> Self {
|
||||
self.show_index = true;
|
||||
self
|
||||
}
|
||||
|
||||
/// Redirects to a slash-ended path when browsing a directory.
|
||||
///
|
||||
/// By default never redirect.
|
||||
pub fn redirect_to_slash_directory(mut self) -> Self {
|
||||
self.redirect_to_slash = true;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set custom directory renderer
|
||||
pub fn files_listing_renderer<F>(mut self, f: F) -> Self
|
||||
where
|
||||
for<'r, 's> F:
|
||||
Fn(&'r Directory, &'s HttpRequest) -> Result<ServiceResponse, io::Error> + 'static,
|
||||
{
|
||||
self.renderer = Rc::new(f);
|
||||
self
|
||||
}
|
||||
|
||||
/// Specifies mime override callback
|
||||
pub fn mime_override<F>(mut self, f: F) -> Self
|
||||
where
|
||||
F: Fn(&mime::Name<'_>) -> DispositionType + 'static,
|
||||
{
|
||||
self.mime_override = Some(Rc::new(f));
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets path filtering closure.
|
||||
///
|
||||
/// The path provided to the closure is relative to `serve_from` path.
|
||||
/// You can safely join this path with the `serve_from` path to get the real path.
|
||||
/// However, the real path may not exist since the filter is called before checking path existence.
|
||||
///
|
||||
/// When a path doesn't pass the filter, [`Files::default_handler`] is called if set, otherwise,
|
||||
/// `404 Not Found` is returned.
|
||||
///
|
||||
/// # Examples
|
||||
/// ```
|
||||
/// use std::path::Path;
|
||||
/// use actix_files::Files;
|
||||
///
|
||||
/// // prevent searching subdirectories and following symlinks
|
||||
/// let files_service = Files::new("/", "./static").path_filter(|path, _| {
|
||||
/// path.components().count() == 1
|
||||
/// && Path::new("./static")
|
||||
/// .join(path)
|
||||
/// .symlink_metadata()
|
||||
/// .map(|m| !m.file_type().is_symlink())
|
||||
/// .unwrap_or(false)
|
||||
/// });
|
||||
/// ```
|
||||
pub fn path_filter<F>(mut self, f: F) -> Self
|
||||
where
|
||||
F: Fn(&Path, &RequestHead) -> bool + 'static,
|
||||
{
|
||||
self.path_filter = Some(Rc::new(f));
|
||||
self
|
||||
}
|
||||
|
||||
/// Set index file
|
||||
///
|
||||
/// Shows specific index file for directories instead of
|
||||
/// showing files listing.
|
||||
///
|
||||
/// If the index file is not found, files listing is shown as a fallback if
|
||||
/// [`Files::show_files_listing()`] is set.
|
||||
pub fn index_file<T: Into<String>>(mut self, index: T) -> Self {
|
||||
self.index = Some(index.into());
|
||||
self
|
||||
}
|
||||
|
||||
/// Specifies whether to use ETag or not.
|
||||
///
|
||||
/// Default is true.
|
||||
pub fn use_etag(mut self, value: bool) -> Self {
|
||||
self.file_flags.set(named::Flags::ETAG, value);
|
||||
self
|
||||
}
|
||||
|
||||
/// Specifies whether to use Last-Modified or not.
|
||||
///
|
||||
/// Default is true.
|
||||
pub fn use_last_modified(mut self, value: bool) -> Self {
|
||||
self.file_flags.set(named::Flags::LAST_MD, value);
|
||||
self
|
||||
}
|
||||
|
||||
/// Specifies whether text responses should signal a UTF-8 encoding.
|
||||
///
|
||||
/// Default is false (but will default to true in a future version).
|
||||
pub fn prefer_utf8(mut self, value: bool) -> Self {
|
||||
self.file_flags.set(named::Flags::PREFER_UTF8, value);
|
||||
self
|
||||
}
|
||||
|
||||
/// Adds a routing guard.
|
||||
///
|
||||
/// Use this to allow multiple chained file services that respond to strictly different
|
||||
/// properties of a request. Due to the way routing works, if a guard check returns true and the
|
||||
/// request starts being handled by the file service, it will not be able to back-out and try
|
||||
/// the next service, you will simply get a 404 (or 405) error response.
|
||||
///
|
||||
/// To allow `POST` requests to retrieve files, see [`Files::use_guards`].
|
||||
///
|
||||
/// # Examples
|
||||
/// ```
|
||||
/// use actix_web::{guard::Header, App};
|
||||
/// use actix_files::Files;
|
||||
///
|
||||
/// App::new().service(
|
||||
/// Files::new("/","/my/site/files")
|
||||
/// .guard(Header("Host", "example.com"))
|
||||
/// );
|
||||
/// ```
|
||||
pub fn guard<G: Guard + 'static>(mut self, guard: G) -> Self {
|
||||
self.guards.push(Rc::new(guard));
|
||||
self
|
||||
}
|
||||
|
||||
/// Specifies guard to check before fetching directory listings or files.
|
||||
///
|
||||
/// Note that this guard has no effect on routing; it's main use is to guard on the request's
|
||||
/// method just before serving the file, only allowing `GET` and `HEAD` requests by default.
|
||||
/// See [`Files::guard`] for routing guards.
|
||||
pub fn method_guard<G: Guard + 'static>(mut self, guard: G) -> Self {
|
||||
self.use_guards = Some(Rc::new(guard));
|
||||
self
|
||||
}
|
||||
|
||||
/// See [`Files::method_guard`].
|
||||
#[doc(hidden)]
|
||||
#[deprecated(since = "0.6.0", note = "Renamed to `method_guard`.")]
|
||||
pub fn use_guards<G: Guard + 'static>(self, guard: G) -> Self {
|
||||
self.method_guard(guard)
|
||||
}
|
||||
|
||||
/// Disable `Content-Disposition` header.
|
||||
///
|
||||
/// By default Content-Disposition` header is enabled.
|
||||
pub fn disable_content_disposition(mut self) -> Self {
|
||||
self.file_flags.remove(named::Flags::CONTENT_DISPOSITION);
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets default handler which is used when no matched file could be found.
|
||||
///
|
||||
/// # Examples
|
||||
/// Setting a fallback static file handler:
|
||||
/// ```
|
||||
/// use actix_files::{Files, NamedFile};
|
||||
/// use actix_web::dev::{ServiceRequest, ServiceResponse, fn_service};
|
||||
///
|
||||
/// # fn run() -> Result<(), actix_web::Error> {
|
||||
/// let files = Files::new("/", "./static")
|
||||
/// .index_file("index.html")
|
||||
/// .default_handler(fn_service(|req: ServiceRequest| async {
|
||||
/// let (req, _) = req.into_parts();
|
||||
/// let file = NamedFile::open_async("./static/404.html").await?;
|
||||
/// let res = file.into_response(&req);
|
||||
/// Ok(ServiceResponse::new(req, res))
|
||||
/// }));
|
||||
/// # Ok(())
|
||||
/// # }
|
||||
/// ```
|
||||
pub fn default_handler<F, U>(mut self, f: F) -> Self
|
||||
where
|
||||
F: IntoServiceFactory<U, ServiceRequest>,
|
||||
U: ServiceFactory<
|
||||
ServiceRequest,
|
||||
Config = (),
|
||||
Response = ServiceResponse,
|
||||
Error = Error,
|
||||
> + 'static,
|
||||
{
|
||||
// create and configure default resource
|
||||
self.default = Rc::new(RefCell::new(Some(Rc::new(boxed::factory(
|
||||
f.into_factory().map_init_err(|_| ()),
|
||||
)))));
|
||||
|
||||
self
|
||||
}
|
||||
|
||||
/// Enables serving hidden files and directories, allowing a leading dots in url fragments.
|
||||
pub fn use_hidden_files(mut self) -> Self {
|
||||
self.hidden_files = true;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl HttpServiceFactory for Files {
|
||||
fn register(mut self, config: &mut AppService) {
|
||||
let guards = if self.guards.is_empty() {
|
||||
None
|
||||
} else {
|
||||
let guards = std::mem::take(&mut self.guards);
|
||||
Some(
|
||||
guards
|
||||
.into_iter()
|
||||
.map(|guard| -> Box<dyn Guard> { Box::new(guard) })
|
||||
.collect::<Vec<_>>(),
|
||||
)
|
||||
};
|
||||
|
||||
if self.default.borrow().is_none() {
|
||||
*self.default.borrow_mut() = Some(config.default_service());
|
||||
}
|
||||
|
||||
let rdef = if config.is_root() {
|
||||
ResourceDef::root_prefix(&self.path)
|
||||
} else {
|
||||
ResourceDef::prefix(&self.path)
|
||||
};
|
||||
|
||||
config.register_service(rdef, guards, self, None)
|
||||
}
|
||||
}
|
||||
|
||||
impl ServiceFactory<ServiceRequest> for Files {
|
||||
type Response = ServiceResponse;
|
||||
type Error = Error;
|
||||
type Config = ();
|
||||
type Service = FilesService;
|
||||
type InitError = ();
|
||||
type Future = LocalBoxFuture<'static, Result<Self::Service, Self::InitError>>;
|
||||
|
||||
fn new_service(&self, _: ()) -> Self::Future {
|
||||
let mut inner = FilesServiceInner {
|
||||
directory: self.directory.clone(),
|
||||
index: self.index.clone(),
|
||||
show_index: self.show_index,
|
||||
redirect_to_slash: self.redirect_to_slash,
|
||||
default: None,
|
||||
renderer: self.renderer.clone(),
|
||||
mime_override: self.mime_override.clone(),
|
||||
path_filter: self.path_filter.clone(),
|
||||
file_flags: self.file_flags,
|
||||
guards: self.use_guards.clone(),
|
||||
hidden_files: self.hidden_files,
|
||||
};
|
||||
|
||||
if let Some(ref default) = *self.default.borrow() {
|
||||
let fut = default.new_service(());
|
||||
Box::pin(async {
|
||||
match fut.await {
|
||||
Ok(default) => {
|
||||
inner.default = Some(default);
|
||||
Ok(FilesService(Rc::new(inner)))
|
||||
}
|
||||
Err(_) => Err(()),
|
||||
}
|
||||
})
|
||||
} else {
|
||||
Box::pin(async move { Ok(FilesService(Rc::new(inner))) })
|
||||
}
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,45 +1,78 @@
|
||||
use std::fs::{File, Metadata};
|
||||
use std::io;
|
||||
use std::ops::{Deref, DerefMut};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
#[cfg(unix)]
|
||||
use std::os::unix::fs::MetadataExt;
|
||||
use std::{
|
||||
fmt,
|
||||
fs::Metadata,
|
||||
io,
|
||||
path::{Path, PathBuf},
|
||||
time::{SystemTime, UNIX_EPOCH},
|
||||
};
|
||||
|
||||
use actix_service::{Service, ServiceFactory};
|
||||
use actix_web::{
|
||||
body::{self, BoxBody, SizedStream},
|
||||
dev::{
|
||||
AppService, BodyEncoding, HttpServiceFactory, ResourceDef, ServiceRequest,
|
||||
ServiceResponse,
|
||||
},
|
||||
http::{
|
||||
header::{
|
||||
self, Charset, ContentDisposition, ContentEncoding, DispositionParam,
|
||||
DispositionType, ExtendedValue,
|
||||
},
|
||||
StatusCode,
|
||||
},
|
||||
Error, HttpMessage, HttpRequest, HttpResponse, Responder,
|
||||
};
|
||||
use bitflags::bitflags;
|
||||
use derive_more::{Deref, DerefMut};
|
||||
use futures_core::future::LocalBoxFuture;
|
||||
use mime_guess::from_path;
|
||||
|
||||
use actix_http::body::SizedStream;
|
||||
use actix_web::dev::BodyEncoding;
|
||||
use actix_web::http::header::{
|
||||
self, Charset, ContentDisposition, DispositionParam, DispositionType, ExtendedValue,
|
||||
};
|
||||
use actix_web::http::{ContentEncoding, StatusCode};
|
||||
use actix_web::{Error, HttpMessage, HttpRequest, HttpResponse, Responder};
|
||||
use futures_util::future::{ready, Ready};
|
||||
|
||||
use crate::range::HttpRange;
|
||||
use crate::ChunkedReadFile;
|
||||
use crate::{encoding::equiv_utf8_text, range::HttpRange};
|
||||
|
||||
bitflags! {
|
||||
pub(crate) struct Flags: u8 {
|
||||
const ETAG = 0b0000_0001;
|
||||
const LAST_MD = 0b0000_0010;
|
||||
const ETAG = 0b0000_0001;
|
||||
const LAST_MD = 0b0000_0010;
|
||||
const CONTENT_DISPOSITION = 0b0000_0100;
|
||||
const PREFER_UTF8 = 0b0000_1000;
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Flags {
|
||||
fn default() -> Self {
|
||||
Flags::all()
|
||||
Flags::from_bits_truncate(0b0000_0111)
|
||||
}
|
||||
}
|
||||
|
||||
/// A file with an associated name.
|
||||
#[derive(Debug)]
|
||||
///
|
||||
/// `NamedFile` can be registered as services:
|
||||
/// ```
|
||||
/// use actix_web::App;
|
||||
/// use actix_files::NamedFile;
|
||||
///
|
||||
/// # async fn run() -> Result<(), Box<dyn std::error::Error>> {
|
||||
/// let file = NamedFile::open_async("./static/index.html").await?;
|
||||
/// let app = App::new().service(file);
|
||||
/// # Ok(())
|
||||
/// # }
|
||||
/// ```
|
||||
///
|
||||
/// They can also be returned from handlers:
|
||||
/// ```
|
||||
/// use actix_web::{Responder, get};
|
||||
/// use actix_files::NamedFile;
|
||||
///
|
||||
/// #[get("/")]
|
||||
/// async fn index() -> impl Responder {
|
||||
/// NamedFile::open_async("./static/index.html").await
|
||||
/// }
|
||||
/// ```
|
||||
#[derive(Deref, DerefMut)]
|
||||
pub struct NamedFile {
|
||||
path: PathBuf,
|
||||
#[deref]
|
||||
#[deref_mut]
|
||||
file: File,
|
||||
modified: Option<SystemTime>,
|
||||
pub(crate) md: Metadata,
|
||||
@@ -50,6 +83,39 @@ pub struct NamedFile {
|
||||
pub(crate) encoding: Option<ContentEncoding>,
|
||||
}
|
||||
|
||||
impl fmt::Debug for NamedFile {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("NamedFile")
|
||||
.field("path", &self.path)
|
||||
.field(
|
||||
"file",
|
||||
#[cfg(feature = "experimental-io-uring")]
|
||||
{
|
||||
&"tokio_uring::File"
|
||||
},
|
||||
#[cfg(not(feature = "experimental-io-uring"))]
|
||||
{
|
||||
&self.file
|
||||
},
|
||||
)
|
||||
.field("modified", &self.modified)
|
||||
.field("md", &self.md)
|
||||
.field("flags", &self.flags)
|
||||
.field("status_code", &self.status_code)
|
||||
.field("content_type", &self.content_type)
|
||||
.field("content_disposition", &self.content_disposition)
|
||||
.field("encoding", &self.encoding)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "experimental-io-uring"))]
|
||||
pub(crate) use std::fs::File;
|
||||
#[cfg(feature = "experimental-io-uring")]
|
||||
pub(crate) use tokio_uring::fs::File;
|
||||
|
||||
use super::chunked;
|
||||
|
||||
impl NamedFile {
|
||||
/// Creates an instance from a previously opened file.
|
||||
///
|
||||
@@ -57,8 +123,7 @@ impl NamedFile {
|
||||
/// `ContentDisposition` headers.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```rust
|
||||
/// ```ignore
|
||||
/// use actix_files::NamedFile;
|
||||
/// use std::io::{self, Write};
|
||||
/// use std::env;
|
||||
@@ -89,12 +154,20 @@ impl NamedFile {
|
||||
};
|
||||
|
||||
let ct = from_path(&path).first_or_octet_stream();
|
||||
|
||||
let disposition = match ct.type_() {
|
||||
mime::IMAGE | mime::TEXT | mime::VIDEO => DispositionType::Inline,
|
||||
mime::APPLICATION => match ct.subtype() {
|
||||
mime::JAVASCRIPT | mime::JSON => DispositionType::Inline,
|
||||
name if name == "wasm" => DispositionType::Inline,
|
||||
_ => DispositionType::Attachment,
|
||||
},
|
||||
_ => DispositionType::Attachment,
|
||||
};
|
||||
|
||||
let mut parameters =
|
||||
vec![DispositionParam::Filename(String::from(filename.as_ref()))];
|
||||
|
||||
if !filename.is_ascii() {
|
||||
parameters.push(DispositionParam::FilenameExt(ExtendedValue {
|
||||
charset: Charset::Ext(String::from("UTF-8")),
|
||||
@@ -102,16 +175,42 @@ impl NamedFile {
|
||||
value: filename.into_owned().into_bytes(),
|
||||
}))
|
||||
}
|
||||
|
||||
let cd = ContentDisposition {
|
||||
disposition,
|
||||
parameters,
|
||||
};
|
||||
|
||||
(ct, cd)
|
||||
};
|
||||
|
||||
let md = file.metadata()?;
|
||||
let md = {
|
||||
#[cfg(not(feature = "experimental-io-uring"))]
|
||||
{
|
||||
file.metadata()?
|
||||
}
|
||||
|
||||
#[cfg(feature = "experimental-io-uring")]
|
||||
{
|
||||
use std::os::unix::prelude::{AsRawFd, FromRawFd};
|
||||
|
||||
let fd = file.as_raw_fd();
|
||||
|
||||
// SAFETY: fd is borrowed and lives longer than the unsafe block
|
||||
unsafe {
|
||||
let file = std::fs::File::from_raw_fd(fd);
|
||||
let md = file.metadata();
|
||||
// SAFETY: forget the fd before exiting block in success or error case but don't
|
||||
// run destructor (that would close file handle)
|
||||
std::mem::forget(file);
|
||||
md?
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let modified = md.modified().ok();
|
||||
let encoding = None;
|
||||
|
||||
Ok(NamedFile {
|
||||
path,
|
||||
file,
|
||||
@@ -125,17 +224,45 @@ impl NamedFile {
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "experimental-io-uring"))]
|
||||
/// Attempts to open a file in read-only mode.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```rust
|
||||
/// ```
|
||||
/// use actix_files::NamedFile;
|
||||
///
|
||||
/// let file = NamedFile::open("foo.txt");
|
||||
/// ```
|
||||
pub fn open<P: AsRef<Path>>(path: P) -> io::Result<NamedFile> {
|
||||
Self::from_file(File::open(&path)?, path)
|
||||
let file = File::open(&path)?;
|
||||
Self::from_file(file, path)
|
||||
}
|
||||
|
||||
/// Attempts to open a file asynchronously in read-only mode.
|
||||
///
|
||||
/// When the `experimental-io-uring` crate feature is enabled, this will be async.
|
||||
/// Otherwise, it will be just like [`open`][Self::open].
|
||||
///
|
||||
/// # Examples
|
||||
/// ```
|
||||
/// use actix_files::NamedFile;
|
||||
/// # async fn open() {
|
||||
/// let file = NamedFile::open_async("foo.txt").await.unwrap();
|
||||
/// # }
|
||||
/// ```
|
||||
pub async fn open_async<P: AsRef<Path>>(path: P) -> io::Result<NamedFile> {
|
||||
let file = {
|
||||
#[cfg(not(feature = "experimental-io-uring"))]
|
||||
{
|
||||
File::open(&path)?
|
||||
}
|
||||
|
||||
#[cfg(feature = "experimental-io-uring")]
|
||||
{
|
||||
File::open(&path).await?
|
||||
}
|
||||
};
|
||||
|
||||
Self::from_file(file, path)
|
||||
}
|
||||
|
||||
/// Returns reference to the underlying `File` object.
|
||||
@@ -147,13 +274,12 @@ impl NamedFile {
|
||||
/// Retrieve the path of this file.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```rust
|
||||
/// ```
|
||||
/// # use std::io;
|
||||
/// use actix_files::NamedFile;
|
||||
///
|
||||
/// # fn path() -> io::Result<()> {
|
||||
/// let file = NamedFile::open("test.txt")?;
|
||||
/// # async fn path() -> io::Result<()> {
|
||||
/// let file = NamedFile::open_async("test.txt").await?;
|
||||
/// assert_eq!(file.path().as_os_str(), "foo.txt");
|
||||
/// # Ok(())
|
||||
/// # }
|
||||
@@ -179,11 +305,13 @@ impl NamedFile {
|
||||
|
||||
/// Set the Content-Disposition for serving this file. This allows
|
||||
/// changing the inline/attachment disposition as well as the filename
|
||||
/// sent to the peer. By default the disposition is `inline` for text,
|
||||
/// image, and video content types, and `attachment` otherwise, and
|
||||
/// the filename is taken from the path provided in the `open` method
|
||||
/// sent to the peer.
|
||||
///
|
||||
/// By default the disposition is `inline` for `text/*`, `image/*`, `video/*` and
|
||||
/// `application/{javascript, json, wasm}` mime types, and `attachment` otherwise,
|
||||
/// and the filename is taken from the path provided in the `open` method
|
||||
/// after converting it to UTF-8 using.
|
||||
/// [to_string_lossy](https://doc.rust-lang.org/std/ffi/struct.OsStr.html#method.to_string_lossy).
|
||||
/// [`std::ffi::OsStr::to_string_lossy`]
|
||||
#[inline]
|
||||
pub fn set_content_disposition(mut self, cd: header::ContentDisposition) -> Self {
|
||||
self.content_disposition = cd;
|
||||
@@ -201,38 +329,53 @@ impl NamedFile {
|
||||
}
|
||||
|
||||
/// Set content encoding for serving this file
|
||||
///
|
||||
/// Must be used with [`actix_web::middleware::Compress`] to take effect.
|
||||
#[inline]
|
||||
pub fn set_content_encoding(mut self, enc: ContentEncoding) -> Self {
|
||||
self.encoding = Some(enc);
|
||||
self
|
||||
}
|
||||
|
||||
#[inline]
|
||||
///Specifies whether to use ETag or not.
|
||||
/// Specifies whether to use ETag or not.
|
||||
///
|
||||
///Default is true.
|
||||
/// Default is true.
|
||||
#[inline]
|
||||
pub fn use_etag(mut self, value: bool) -> Self {
|
||||
self.flags.set(Flags::ETAG, value);
|
||||
self
|
||||
}
|
||||
|
||||
#[inline]
|
||||
///Specifies whether to use Last-Modified or not.
|
||||
/// Specifies whether to use Last-Modified or not.
|
||||
///
|
||||
///Default is true.
|
||||
/// Default is true.
|
||||
#[inline]
|
||||
pub fn use_last_modified(mut self, value: bool) -> Self {
|
||||
self.flags.set(Flags::LAST_MD, value);
|
||||
self
|
||||
}
|
||||
|
||||
/// Specifies whether text responses should signal a UTF-8 encoding.
|
||||
///
|
||||
/// Default is false (but will default to true in a future version).
|
||||
#[inline]
|
||||
pub fn prefer_utf8(mut self, value: bool) -> Self {
|
||||
self.flags.set(Flags::PREFER_UTF8, value);
|
||||
self
|
||||
}
|
||||
|
||||
/// Creates a etag in a format is similar to Apache's.
|
||||
pub(crate) fn etag(&self) -> Option<header::EntityTag> {
|
||||
// This etag format is similar to Apache's.
|
||||
self.modified.as_ref().map(|mtime| {
|
||||
let ino = {
|
||||
#[cfg(unix)]
|
||||
{
|
||||
#[cfg(unix)]
|
||||
use std::os::unix::fs::MetadataExt as _;
|
||||
|
||||
self.md.ino()
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
{
|
||||
0
|
||||
@@ -242,6 +385,7 @@ impl NamedFile {
|
||||
let dur = mtime
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.expect("modification time must be after epoch");
|
||||
|
||||
header::EntityTag::strong(format!(
|
||||
"{:x}:{:x}:{:x}:{:x}",
|
||||
ino,
|
||||
@@ -256,27 +400,32 @@ impl NamedFile {
|
||||
self.modified.map(|mtime| mtime.into())
|
||||
}
|
||||
|
||||
pub fn into_response(self, req: &HttpRequest) -> Result<HttpResponse, Error> {
|
||||
/// Creates an `HttpResponse` with file as a streaming body.
|
||||
pub fn into_response(self, req: &HttpRequest) -> HttpResponse<BoxBody> {
|
||||
if self.status_code != StatusCode::OK {
|
||||
let mut resp = HttpResponse::build(self.status_code);
|
||||
resp.set(header::ContentType(self.content_type.clone()))
|
||||
.if_true(self.flags.contains(Flags::CONTENT_DISPOSITION), |res| {
|
||||
res.header(
|
||||
header::CONTENT_DISPOSITION,
|
||||
self.content_disposition.to_string(),
|
||||
);
|
||||
});
|
||||
if let Some(current_encoding) = self.encoding {
|
||||
resp.encoding(current_encoding);
|
||||
let mut res = HttpResponse::build(self.status_code);
|
||||
|
||||
if self.flags.contains(Flags::PREFER_UTF8) {
|
||||
let ct = equiv_utf8_text(self.content_type.clone());
|
||||
res.insert_header((header::CONTENT_TYPE, ct.to_string()));
|
||||
} else {
|
||||
res.insert_header((header::CONTENT_TYPE, self.content_type.to_string()));
|
||||
}
|
||||
let reader = ChunkedReadFile {
|
||||
size: self.md.len(),
|
||||
offset: 0,
|
||||
file: Some(self.file),
|
||||
fut: None,
|
||||
counter: 0,
|
||||
};
|
||||
return Ok(resp.streaming(reader));
|
||||
|
||||
if self.flags.contains(Flags::CONTENT_DISPOSITION) {
|
||||
res.insert_header((
|
||||
header::CONTENT_DISPOSITION,
|
||||
self.content_disposition.to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if let Some(current_encoding) = self.encoding {
|
||||
res.encoding(current_encoding);
|
||||
}
|
||||
|
||||
let reader = chunked::new_chunked_read(self.md.len(), 0, self.file);
|
||||
|
||||
return res.streaming(reader);
|
||||
}
|
||||
|
||||
let etag = if self.flags.contains(Flags::ETAG) {
|
||||
@@ -284,6 +433,7 @@ impl NamedFile {
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let last_modified = if self.flags.contains(Flags::LAST_MD) {
|
||||
self.last_modified()
|
||||
} else {
|
||||
@@ -296,10 +446,11 @@ impl NamedFile {
|
||||
} else if let (Some(ref m), Some(header::IfUnmodifiedSince(ref since))) =
|
||||
(last_modified, req.get_header())
|
||||
{
|
||||
let t1: SystemTime = m.clone().into();
|
||||
let t2: SystemTime = since.clone().into();
|
||||
let t1: SystemTime = (*m).into();
|
||||
let t2: SystemTime = (*since).into();
|
||||
|
||||
match (t1.duration_since(UNIX_EPOCH), t2.duration_since(UNIX_EPOCH)) {
|
||||
(Ok(t1), Ok(t2)) => t1 > t2,
|
||||
(Ok(t1), Ok(t2)) => t1.as_secs() > t2.as_secs(),
|
||||
_ => false,
|
||||
}
|
||||
} else {
|
||||
@@ -309,104 +460,93 @@ impl NamedFile {
|
||||
// check last modified
|
||||
let not_modified = if !none_match(etag.as_ref(), req) {
|
||||
true
|
||||
} else if req.headers().contains_key(&header::IF_NONE_MATCH) {
|
||||
} else if req.headers().contains_key(header::IF_NONE_MATCH) {
|
||||
false
|
||||
} else if let (Some(ref m), Some(header::IfModifiedSince(ref since))) =
|
||||
(last_modified, req.get_header())
|
||||
{
|
||||
let t1: SystemTime = m.clone().into();
|
||||
let t2: SystemTime = since.clone().into();
|
||||
let t1: SystemTime = (*m).into();
|
||||
let t2: SystemTime = (*since).into();
|
||||
|
||||
match (t1.duration_since(UNIX_EPOCH), t2.duration_since(UNIX_EPOCH)) {
|
||||
(Ok(t1), Ok(t2)) => t1 <= t2,
|
||||
(Ok(t1), Ok(t2)) => t1.as_secs() <= t2.as_secs(),
|
||||
_ => false,
|
||||
}
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
let mut resp = HttpResponse::build(self.status_code);
|
||||
resp.set(header::ContentType(self.content_type.clone()))
|
||||
.if_true(self.flags.contains(Flags::CONTENT_DISPOSITION), |res| {
|
||||
res.header(
|
||||
header::CONTENT_DISPOSITION,
|
||||
self.content_disposition.to_string(),
|
||||
);
|
||||
});
|
||||
// default compressing
|
||||
if let Some(current_encoding) = self.encoding {
|
||||
resp.encoding(current_encoding);
|
||||
let mut res = HttpResponse::build(self.status_code);
|
||||
|
||||
if self.flags.contains(Flags::PREFER_UTF8) {
|
||||
let ct = equiv_utf8_text(self.content_type.clone());
|
||||
res.insert_header((header::CONTENT_TYPE, ct.to_string()));
|
||||
} else {
|
||||
res.insert_header((header::CONTENT_TYPE, self.content_type.to_string()));
|
||||
}
|
||||
|
||||
resp.if_some(last_modified, |lm, resp| {
|
||||
resp.set(header::LastModified(lm));
|
||||
})
|
||||
.if_some(etag, |etag, resp| {
|
||||
resp.set(header::ETag(etag));
|
||||
});
|
||||
if self.flags.contains(Flags::CONTENT_DISPOSITION) {
|
||||
res.insert_header((
|
||||
header::CONTENT_DISPOSITION,
|
||||
self.content_disposition.to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
resp.header(header::ACCEPT_RANGES, "bytes");
|
||||
// default compressing
|
||||
if let Some(current_encoding) = self.encoding {
|
||||
res.encoding(current_encoding);
|
||||
}
|
||||
|
||||
if let Some(lm) = last_modified {
|
||||
res.insert_header((header::LAST_MODIFIED, lm.to_string()));
|
||||
}
|
||||
|
||||
if let Some(etag) = etag {
|
||||
res.insert_header((header::ETAG, etag.to_string()));
|
||||
}
|
||||
|
||||
res.insert_header((header::ACCEPT_RANGES, "bytes"));
|
||||
|
||||
let mut length = self.md.len();
|
||||
let mut offset = 0;
|
||||
|
||||
// check for range header
|
||||
if let Some(ranges) = req.headers().get(&header::RANGE) {
|
||||
if let Ok(rangesheader) = ranges.to_str() {
|
||||
if let Ok(rangesvec) = HttpRange::parse(rangesheader, length) {
|
||||
length = rangesvec[0].length;
|
||||
offset = rangesvec[0].start;
|
||||
resp.encoding(ContentEncoding::Identity);
|
||||
resp.header(
|
||||
if let Some(ranges) = req.headers().get(header::RANGE) {
|
||||
if let Ok(ranges_header) = ranges.to_str() {
|
||||
if let Ok(ranges) = HttpRange::parse(ranges_header, length) {
|
||||
length = ranges[0].length;
|
||||
offset = ranges[0].start;
|
||||
|
||||
res.encoding(ContentEncoding::Identity);
|
||||
res.insert_header((
|
||||
header::CONTENT_RANGE,
|
||||
format!(
|
||||
"bytes {}-{}/{}",
|
||||
offset,
|
||||
offset + length - 1,
|
||||
self.md.len()
|
||||
),
|
||||
);
|
||||
format!("bytes {}-{}/{}", offset, offset + length - 1, self.md.len()),
|
||||
));
|
||||
} else {
|
||||
resp.header(header::CONTENT_RANGE, format!("bytes */{}", length));
|
||||
return Ok(resp.status(StatusCode::RANGE_NOT_SATISFIABLE).finish());
|
||||
res.insert_header((header::CONTENT_RANGE, format!("bytes */{}", length)));
|
||||
return res.status(StatusCode::RANGE_NOT_SATISFIABLE).finish();
|
||||
};
|
||||
} else {
|
||||
return Ok(resp.status(StatusCode::BAD_REQUEST).finish());
|
||||
return res.status(StatusCode::BAD_REQUEST).finish();
|
||||
};
|
||||
};
|
||||
|
||||
if precondition_failed {
|
||||
return Ok(resp.status(StatusCode::PRECONDITION_FAILED).finish());
|
||||
return res.status(StatusCode::PRECONDITION_FAILED).finish();
|
||||
} else if not_modified {
|
||||
return Ok(resp.status(StatusCode::NOT_MODIFIED).finish());
|
||||
return res
|
||||
.status(StatusCode::NOT_MODIFIED)
|
||||
.body(body::None::new())
|
||||
.map_into_boxed_body();
|
||||
}
|
||||
|
||||
let reader = ChunkedReadFile {
|
||||
offset,
|
||||
size: length,
|
||||
file: Some(self.file),
|
||||
fut: None,
|
||||
counter: 0,
|
||||
};
|
||||
let reader = chunked::new_chunked_read(length, offset, self.file);
|
||||
|
||||
if offset != 0 || length != self.md.len() {
|
||||
resp.status(StatusCode::PARTIAL_CONTENT);
|
||||
res.status(StatusCode::PARTIAL_CONTENT);
|
||||
}
|
||||
|
||||
Ok(resp.body(SizedStream::new(length, reader)))
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for NamedFile {
|
||||
type Target = File;
|
||||
|
||||
fn deref(&self) -> &File {
|
||||
&self.file
|
||||
}
|
||||
}
|
||||
|
||||
impl DerefMut for NamedFile {
|
||||
fn deref_mut(&mut self) -> &mut File {
|
||||
&mut self.file
|
||||
res.body(SizedStream::new(length, reader))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -414,6 +554,7 @@ impl DerefMut for NamedFile {
|
||||
fn any_match(etag: Option<&header::EntityTag>, req: &HttpRequest) -> bool {
|
||||
match req.get_header::<header::IfMatch>() {
|
||||
None | Some(header::IfMatch::Any) => true,
|
||||
|
||||
Some(header::IfMatch::Items(ref items)) => {
|
||||
if let Some(some_etag) = etag {
|
||||
for item in items {
|
||||
@@ -422,6 +563,7 @@ fn any_match(etag: Option<&header::EntityTag>, req: &HttpRequest) -> bool {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
}
|
||||
@@ -431,6 +573,7 @@ fn any_match(etag: Option<&header::EntityTag>, req: &HttpRequest) -> bool {
|
||||
fn none_match(etag: Option<&header::EntityTag>, req: &HttpRequest) -> bool {
|
||||
match req.get_header::<header::IfNoneMatch>() {
|
||||
Some(header::IfNoneMatch::Any) => false,
|
||||
|
||||
Some(header::IfNoneMatch::Items(ref items)) => {
|
||||
if let Some(some_etag) = etag {
|
||||
for item in items {
|
||||
@@ -439,17 +582,71 @@ fn none_match(etag: Option<&header::EntityTag>, req: &HttpRequest) -> bool {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
true
|
||||
}
|
||||
|
||||
None => true,
|
||||
}
|
||||
}
|
||||
|
||||
impl Responder for NamedFile {
|
||||
type Error = Error;
|
||||
type Future = Ready<Result<HttpResponse, Error>>;
|
||||
type Body = BoxBody;
|
||||
|
||||
fn respond_to(self, req: &HttpRequest) -> Self::Future {
|
||||
ready(self.into_response(req))
|
||||
fn respond_to(self, req: &HttpRequest) -> HttpResponse<Self::Body> {
|
||||
self.into_response(req)
|
||||
}
|
||||
}
|
||||
|
||||
impl ServiceFactory<ServiceRequest> for NamedFile {
|
||||
type Response = ServiceResponse;
|
||||
type Error = Error;
|
||||
type Config = ();
|
||||
type Service = NamedFileService;
|
||||
type InitError = ();
|
||||
type Future = LocalBoxFuture<'static, Result<Self::Service, Self::InitError>>;
|
||||
|
||||
fn new_service(&self, _: ()) -> Self::Future {
|
||||
let service = NamedFileService {
|
||||
path: self.path.clone(),
|
||||
};
|
||||
|
||||
Box::pin(async move { Ok(service) })
|
||||
}
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
#[derive(Debug)]
|
||||
pub struct NamedFileService {
|
||||
path: PathBuf,
|
||||
}
|
||||
|
||||
impl Service<ServiceRequest> for NamedFileService {
|
||||
type Response = ServiceResponse;
|
||||
type Error = Error;
|
||||
type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>;
|
||||
|
||||
actix_service::always_ready!();
|
||||
|
||||
fn call(&self, req: ServiceRequest) -> Self::Future {
|
||||
let (req, _) = req.into_parts();
|
||||
|
||||
let path = self.path.clone();
|
||||
Box::pin(async move {
|
||||
let file = NamedFile::open_async(path).await?;
|
||||
let res = file.into_response(&req);
|
||||
Ok(ServiceResponse::new(req, res))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl HttpServiceFactory for NamedFile {
|
||||
fn register(self, config: &mut AppService) {
|
||||
config.register_service(
|
||||
ResourceDef::root_prefix(self.path.to_string_lossy().as_ref()),
|
||||
None,
|
||||
self,
|
||||
None,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
140
actix-files/src/path_buf.rs
Normal file
140
actix-files/src/path_buf.rs
Normal file
@@ -0,0 +1,140 @@
|
||||
use std::{
|
||||
path::{Path, PathBuf},
|
||||
str::FromStr,
|
||||
};
|
||||
|
||||
use actix_utils::future::{ready, Ready};
|
||||
use actix_web::{dev::Payload, FromRequest, HttpRequest};
|
||||
|
||||
use crate::error::UriSegmentError;
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub(crate) struct PathBufWrap(PathBuf);
|
||||
|
||||
impl FromStr for PathBufWrap {
|
||||
type Err = UriSegmentError;
|
||||
|
||||
fn from_str(path: &str) -> Result<Self, Self::Err> {
|
||||
Self::parse_path(path, false)
|
||||
}
|
||||
}
|
||||
|
||||
impl PathBufWrap {
|
||||
/// Parse a path, giving the choice of allowing hidden files to be considered valid segments.
|
||||
///
|
||||
/// Path traversal is guarded by this method.
|
||||
pub fn parse_path(path: &str, hidden_files: bool) -> Result<Self, UriSegmentError> {
|
||||
let mut buf = PathBuf::new();
|
||||
|
||||
for segment in path.split('/') {
|
||||
if segment == ".." {
|
||||
buf.pop();
|
||||
} else if !hidden_files && segment.starts_with('.') {
|
||||
return Err(UriSegmentError::BadStart('.'));
|
||||
} else if segment.starts_with('*') {
|
||||
return Err(UriSegmentError::BadStart('*'));
|
||||
} else if segment.ends_with(':') {
|
||||
return Err(UriSegmentError::BadEnd(':'));
|
||||
} else if segment.ends_with('>') {
|
||||
return Err(UriSegmentError::BadEnd('>'));
|
||||
} else if segment.ends_with('<') {
|
||||
return Err(UriSegmentError::BadEnd('<'));
|
||||
} else if segment.is_empty() {
|
||||
continue;
|
||||
} else if cfg!(windows) && segment.contains('\\') {
|
||||
return Err(UriSegmentError::BadChar('\\'));
|
||||
} else {
|
||||
buf.push(segment)
|
||||
}
|
||||
}
|
||||
|
||||
Ok(PathBufWrap(buf))
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<Path> for PathBufWrap {
|
||||
fn as_ref(&self) -> &Path {
|
||||
self.0.as_ref()
|
||||
}
|
||||
}
|
||||
|
||||
impl FromRequest for PathBufWrap {
|
||||
type Error = UriSegmentError;
|
||||
type Future = Ready<Result<Self, Self::Error>>;
|
||||
|
||||
fn from_request(req: &HttpRequest, _: &mut Payload) -> Self::Future {
|
||||
ready(req.match_info().path().parse())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::iter::FromIterator;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_path_buf() {
|
||||
assert_eq!(
|
||||
PathBufWrap::from_str("/test/.tt").map(|t| t.0),
|
||||
Err(UriSegmentError::BadStart('.'))
|
||||
);
|
||||
assert_eq!(
|
||||
PathBufWrap::from_str("/test/*tt").map(|t| t.0),
|
||||
Err(UriSegmentError::BadStart('*'))
|
||||
);
|
||||
assert_eq!(
|
||||
PathBufWrap::from_str("/test/tt:").map(|t| t.0),
|
||||
Err(UriSegmentError::BadEnd(':'))
|
||||
);
|
||||
assert_eq!(
|
||||
PathBufWrap::from_str("/test/tt<").map(|t| t.0),
|
||||
Err(UriSegmentError::BadEnd('<'))
|
||||
);
|
||||
assert_eq!(
|
||||
PathBufWrap::from_str("/test/tt>").map(|t| t.0),
|
||||
Err(UriSegmentError::BadEnd('>'))
|
||||
);
|
||||
assert_eq!(
|
||||
PathBufWrap::from_str("/seg1/seg2/").unwrap().0,
|
||||
PathBuf::from_iter(vec!["seg1", "seg2"])
|
||||
);
|
||||
assert_eq!(
|
||||
PathBufWrap::from_str("/seg1/../seg2/").unwrap().0,
|
||||
PathBuf::from_iter(vec!["seg2"])
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_path() {
|
||||
assert_eq!(
|
||||
PathBufWrap::parse_path("/test/.tt", false).map(|t| t.0),
|
||||
Err(UriSegmentError::BadStart('.'))
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
PathBufWrap::parse_path("/test/.tt", true).unwrap().0,
|
||||
PathBuf::from_iter(vec!["test", ".tt"])
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn path_traversal() {
|
||||
assert_eq!(
|
||||
PathBufWrap::parse_path("/../README.md", false).unwrap().0,
|
||||
PathBuf::from_iter(vec!["README.md"])
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
PathBufWrap::parse_path("/../README.md", true).unwrap().0,
|
||||
PathBuf::from_iter(vec!["README.md"])
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
PathBufWrap::parse_path("/../../../../../../../../../../etc/passwd", false)
|
||||
.unwrap()
|
||||
.0,
|
||||
PathBuf::from_iter(vec!["etc/passwd"])
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,95 +1,35 @@
|
||||
use derive_more::{Display, Error};
|
||||
|
||||
/// HTTP Range header representation.
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct HttpRange {
|
||||
/// Start of range.
|
||||
pub start: u64,
|
||||
|
||||
/// Length of range.
|
||||
pub length: u64,
|
||||
}
|
||||
|
||||
static PREFIX: &str = "bytes=";
|
||||
const PREFIX_LEN: usize = 6;
|
||||
#[derive(Debug, Clone, Display, Error)]
|
||||
#[display(fmt = "Parse HTTP Range failed")]
|
||||
pub struct ParseRangeErr(#[error(not(source))] ());
|
||||
|
||||
impl HttpRange {
|
||||
/// Parses Range HTTP header string as per RFC 2616.
|
||||
///
|
||||
/// `header` is HTTP Range header (e.g. `bytes=bytes=0-9`).
|
||||
/// `size` is full size of response (file).
|
||||
pub fn parse(header: &str, size: u64) -> Result<Vec<HttpRange>, ()> {
|
||||
if header.is_empty() {
|
||||
return Ok(Vec::new());
|
||||
pub fn parse(header: &str, size: u64) -> Result<Vec<HttpRange>, ParseRangeErr> {
|
||||
match http_range::HttpRange::parse(header, size) {
|
||||
Ok(ranges) => Ok(ranges
|
||||
.iter()
|
||||
.map(|range| HttpRange {
|
||||
start: range.start,
|
||||
length: range.length,
|
||||
})
|
||||
.collect()),
|
||||
Err(_) => Err(ParseRangeErr(())),
|
||||
}
|
||||
if !header.starts_with(PREFIX) {
|
||||
return Err(());
|
||||
}
|
||||
|
||||
let size_sig = size as i64;
|
||||
let mut no_overlap = false;
|
||||
|
||||
let all_ranges: Vec<Option<HttpRange>> = header[PREFIX_LEN..]
|
||||
.split(',')
|
||||
.map(|x| x.trim())
|
||||
.filter(|x| !x.is_empty())
|
||||
.map(|ra| {
|
||||
let mut start_end_iter = ra.split('-');
|
||||
|
||||
let start_str = start_end_iter.next().ok_or(())?.trim();
|
||||
let end_str = start_end_iter.next().ok_or(())?.trim();
|
||||
|
||||
if start_str.is_empty() {
|
||||
// If no start is specified, end specifies the
|
||||
// range start relative to the end of the file.
|
||||
let mut length: i64 = end_str.parse().map_err(|_| ())?;
|
||||
|
||||
if length > size_sig {
|
||||
length = size_sig;
|
||||
}
|
||||
|
||||
Ok(Some(HttpRange {
|
||||
start: (size_sig - length) as u64,
|
||||
length: length as u64,
|
||||
}))
|
||||
} else {
|
||||
let start: i64 = start_str.parse().map_err(|_| ())?;
|
||||
|
||||
if start < 0 {
|
||||
return Err(());
|
||||
}
|
||||
if start >= size_sig {
|
||||
no_overlap = true;
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let length = if end_str.is_empty() {
|
||||
// If no end is specified, range extends to end of the file.
|
||||
size_sig - start
|
||||
} else {
|
||||
let mut end: i64 = end_str.parse().map_err(|_| ())?;
|
||||
|
||||
if start > end {
|
||||
return Err(());
|
||||
}
|
||||
|
||||
if end >= size_sig {
|
||||
end = size_sig - 1;
|
||||
}
|
||||
|
||||
end - start + 1
|
||||
};
|
||||
|
||||
Ok(Some(HttpRange {
|
||||
start: start as u64,
|
||||
length: length as u64,
|
||||
}))
|
||||
}
|
||||
})
|
||||
.collect::<Result<_, _>>()?;
|
||||
|
||||
let ranges: Vec<HttpRange> = all_ranges.into_iter().filter_map(|x| x).collect();
|
||||
|
||||
if no_overlap && ranges.is_empty() {
|
||||
return Err(());
|
||||
}
|
||||
|
||||
Ok(ranges)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -330,8 +270,7 @@ mod tests {
|
||||
if expected.is_empty() {
|
||||
continue;
|
||||
} else {
|
||||
assert!(
|
||||
false,
|
||||
panic!(
|
||||
"parse({}, {}) returned error {:?}",
|
||||
header,
|
||||
size,
|
||||
@@ -343,28 +282,24 @@ mod tests {
|
||||
let got = res.unwrap();
|
||||
|
||||
if got.len() != expected.len() {
|
||||
assert!(
|
||||
false,
|
||||
panic!(
|
||||
"len(parseRange({}, {})) = {}, want {}",
|
||||
header,
|
||||
size,
|
||||
got.len(),
|
||||
expected.len()
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
for i in 0..expected.len() {
|
||||
if got[i].start != expected[i].start {
|
||||
assert!(
|
||||
false,
|
||||
panic!(
|
||||
"parseRange({}, {})[{}].start = {}, want {}",
|
||||
header, size, i, got[i].start, expected[i].start
|
||||
)
|
||||
}
|
||||
if got[i].length != expected[i].length {
|
||||
assert!(
|
||||
false,
|
||||
panic!(
|
||||
"parseRange({}, {})[{}].length = {}, want {}",
|
||||
header, size, i, got[i].length, expected[i].length
|
||||
)
|
||||
|
||||
195
actix-files/src/service.rs
Normal file
195
actix-files/src/service.rs
Normal file
@@ -0,0 +1,195 @@
|
||||
use std::{fmt, io, ops::Deref, path::PathBuf, rc::Rc};
|
||||
|
||||
use actix_service::Service;
|
||||
use actix_web::{
|
||||
dev::{ServiceRequest, ServiceResponse},
|
||||
error::Error,
|
||||
guard::Guard,
|
||||
http::{header, Method},
|
||||
HttpResponse,
|
||||
};
|
||||
use futures_core::future::LocalBoxFuture;
|
||||
|
||||
use crate::{
|
||||
named, Directory, DirectoryRenderer, FilesError, HttpService, MimeOverride, NamedFile,
|
||||
PathBufWrap, PathFilter,
|
||||
};
|
||||
|
||||
/// Assembled file serving service.
|
||||
#[derive(Clone)]
|
||||
pub struct FilesService(pub(crate) Rc<FilesServiceInner>);
|
||||
|
||||
impl Deref for FilesService {
|
||||
type Target = FilesServiceInner;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&*self.0
|
||||
}
|
||||
}
|
||||
|
||||
pub struct FilesServiceInner {
|
||||
pub(crate) directory: PathBuf,
|
||||
pub(crate) index: Option<String>,
|
||||
pub(crate) show_index: bool,
|
||||
pub(crate) redirect_to_slash: bool,
|
||||
pub(crate) default: Option<HttpService>,
|
||||
pub(crate) renderer: Rc<DirectoryRenderer>,
|
||||
pub(crate) mime_override: Option<Rc<MimeOverride>>,
|
||||
pub(crate) path_filter: Option<Rc<PathFilter>>,
|
||||
pub(crate) file_flags: named::Flags,
|
||||
pub(crate) guards: Option<Rc<dyn Guard>>,
|
||||
pub(crate) hidden_files: bool,
|
||||
}
|
||||
|
||||
impl fmt::Debug for FilesServiceInner {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.write_str("FilesServiceInner")
|
||||
}
|
||||
}
|
||||
|
||||
impl FilesService {
|
||||
async fn handle_err(
|
||||
&self,
|
||||
err: io::Error,
|
||||
req: ServiceRequest,
|
||||
) -> Result<ServiceResponse, Error> {
|
||||
log::debug!("error handling {}: {}", req.path(), err);
|
||||
|
||||
if let Some(ref default) = self.default {
|
||||
default.call(req).await
|
||||
} else {
|
||||
Ok(req.error_response(err))
|
||||
}
|
||||
}
|
||||
|
||||
fn serve_named_file(
|
||||
&self,
|
||||
req: ServiceRequest,
|
||||
mut named_file: NamedFile,
|
||||
) -> ServiceResponse {
|
||||
if let Some(ref mime_override) = self.mime_override {
|
||||
let new_disposition = mime_override(&named_file.content_type.type_());
|
||||
named_file.content_disposition.disposition = new_disposition;
|
||||
}
|
||||
named_file.flags = self.file_flags;
|
||||
|
||||
let (req, _) = req.into_parts();
|
||||
let res = named_file.into_response(&req);
|
||||
ServiceResponse::new(req, res)
|
||||
}
|
||||
|
||||
fn show_index(&self, req: ServiceRequest, path: PathBuf) -> ServiceResponse {
|
||||
let dir = Directory::new(self.directory.clone(), path);
|
||||
|
||||
let (req, _) = req.into_parts();
|
||||
|
||||
(self.renderer)(&dir, &req).unwrap_or_else(|e| ServiceResponse::from_err(e, req))
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for FilesService {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.write_str("FilesService")
|
||||
}
|
||||
}
|
||||
|
||||
impl Service<ServiceRequest> for FilesService {
|
||||
type Response = ServiceResponse;
|
||||
type Error = Error;
|
||||
type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>;
|
||||
|
||||
actix_service::always_ready!();
|
||||
|
||||
fn call(&self, req: ServiceRequest) -> Self::Future {
|
||||
let is_method_valid = if let Some(guard) = &self.guards {
|
||||
// execute user defined guards
|
||||
(**guard).check(req.head())
|
||||
} else {
|
||||
// default behavior
|
||||
matches!(*req.method(), Method::HEAD | Method::GET)
|
||||
};
|
||||
|
||||
let this = self.clone();
|
||||
|
||||
Box::pin(async move {
|
||||
if !is_method_valid {
|
||||
return Ok(req.into_response(
|
||||
actix_web::HttpResponse::MethodNotAllowed()
|
||||
.insert_header(header::ContentType(mime::TEXT_PLAIN_UTF_8))
|
||||
.body("Request did not meet this resource's requirements."),
|
||||
));
|
||||
}
|
||||
|
||||
let real_path =
|
||||
match PathBufWrap::parse_path(req.match_info().path(), this.hidden_files) {
|
||||
Ok(item) => item,
|
||||
Err(e) => return Ok(req.error_response(e)),
|
||||
};
|
||||
|
||||
if let Some(filter) = &this.path_filter {
|
||||
if !filter(real_path.as_ref(), req.head()) {
|
||||
if let Some(ref default) = this.default {
|
||||
return default.call(req).await;
|
||||
} else {
|
||||
return Ok(
|
||||
req.into_response(actix_web::HttpResponse::NotFound().finish())
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// full file path
|
||||
let path = this.directory.join(&real_path);
|
||||
if let Err(err) = path.canonicalize() {
|
||||
return this.handle_err(err, req).await;
|
||||
}
|
||||
|
||||
if path.is_dir() {
|
||||
if this.redirect_to_slash
|
||||
&& !req.path().ends_with('/')
|
||||
&& (this.index.is_some() || this.show_index)
|
||||
{
|
||||
let redirect_to = format!("{}/", req.path());
|
||||
|
||||
return Ok(req.into_response(
|
||||
HttpResponse::Found()
|
||||
.insert_header((header::LOCATION, redirect_to))
|
||||
.finish(),
|
||||
));
|
||||
}
|
||||
|
||||
match this.index {
|
||||
Some(ref index) => {
|
||||
let named_path = path.join(index);
|
||||
match NamedFile::open_async(named_path).await {
|
||||
Ok(named_file) => Ok(this.serve_named_file(req, named_file)),
|
||||
Err(_) if this.show_index => Ok(this.show_index(req, path)),
|
||||
Err(err) => this.handle_err(err, req).await,
|
||||
}
|
||||
}
|
||||
None if this.show_index => Ok(this.show_index(req, path)),
|
||||
_ => Ok(ServiceResponse::from_err(
|
||||
FilesError::IsDirectory,
|
||||
req.into_parts().0,
|
||||
)),
|
||||
}
|
||||
} else {
|
||||
match NamedFile::open_async(&path).await {
|
||||
Ok(mut named_file) => {
|
||||
if let Some(ref mime_override) = this.mime_override {
|
||||
let new_disposition =
|
||||
mime_override(&named_file.content_type.type_());
|
||||
named_file.content_disposition.disposition = new_disposition;
|
||||
}
|
||||
named_file.flags = this.file_flags;
|
||||
|
||||
let (req, _) = req.into_parts();
|
||||
let res = named_file.into_response(&req);
|
||||
Ok(ServiceResponse::new(req, res))
|
||||
}
|
||||
Err(err) => this.handle_err(err, req).await,
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
38
actix-files/tests/encoding.rs
Normal file
38
actix-files/tests/encoding.rs
Normal file
@@ -0,0 +1,38 @@
|
||||
use actix_files::Files;
|
||||
use actix_web::{
|
||||
http::{
|
||||
header::{self, HeaderValue},
|
||||
StatusCode,
|
||||
},
|
||||
test::{self, TestRequest},
|
||||
App,
|
||||
};
|
||||
|
||||
#[actix_web::test]
|
||||
async fn test_utf8_file_contents() {
|
||||
// use default ISO-8859-1 encoding
|
||||
let srv = test::init_service(App::new().service(Files::new("/", "./tests"))).await;
|
||||
|
||||
let req = TestRequest::with_uri("/utf8.txt").to_request();
|
||||
let res = test::call_service(&srv, req).await;
|
||||
|
||||
assert_eq!(res.status(), StatusCode::OK);
|
||||
assert_eq!(
|
||||
res.headers().get(header::CONTENT_TYPE),
|
||||
Some(&HeaderValue::from_static("text/plain")),
|
||||
);
|
||||
|
||||
// prefer UTF-8 encoding
|
||||
let srv =
|
||||
test::init_service(App::new().service(Files::new("/", "./tests").prefer_utf8(true)))
|
||||
.await;
|
||||
|
||||
let req = TestRequest::with_uri("/utf8.txt").to_request();
|
||||
let res = test::call_service(&srv, req).await;
|
||||
|
||||
assert_eq!(res.status(), StatusCode::OK);
|
||||
assert_eq!(
|
||||
res.headers().get(header::CONTENT_TYPE),
|
||||
Some(&HeaderValue::from_static("text/plain; charset=utf-8")),
|
||||
);
|
||||
}
|
||||
1
actix-files/tests/fixtures/guards/first/index.txt
vendored
Normal file
1
actix-files/tests/fixtures/guards/first/index.txt
vendored
Normal file
@@ -0,0 +1 @@
|
||||
first
|
||||
1
actix-files/tests/fixtures/guards/second/index.txt
vendored
Normal file
1
actix-files/tests/fixtures/guards/second/index.txt
vendored
Normal file
@@ -0,0 +1 @@
|
||||
second
|
||||
36
actix-files/tests/guard.rs
Normal file
36
actix-files/tests/guard.rs
Normal file
@@ -0,0 +1,36 @@
|
||||
use actix_files::Files;
|
||||
use actix_web::{
|
||||
guard::Host,
|
||||
http::StatusCode,
|
||||
test::{self, TestRequest},
|
||||
App,
|
||||
};
|
||||
use bytes::Bytes;
|
||||
|
||||
#[actix_web::test]
|
||||
async fn test_guard_filter() {
|
||||
let srv = test::init_service(
|
||||
App::new()
|
||||
.service(Files::new("/", "./tests/fixtures/guards/first").guard(Host("first.com")))
|
||||
.service(
|
||||
Files::new("/", "./tests/fixtures/guards/second").guard(Host("second.com")),
|
||||
),
|
||||
)
|
||||
.await;
|
||||
|
||||
let req = TestRequest::with_uri("/index.txt")
|
||||
.append_header(("Host", "first.com"))
|
||||
.to_request();
|
||||
let res = test::call_service(&srv, req).await;
|
||||
|
||||
assert_eq!(res.status(), StatusCode::OK);
|
||||
assert_eq!(test::read_body(res).await, Bytes::from("first"));
|
||||
|
||||
let req = TestRequest::with_uri("/index.txt")
|
||||
.append_header(("Host", "second.com"))
|
||||
.to_request();
|
||||
let res = test::call_service(&srv, req).await;
|
||||
|
||||
assert_eq!(res.status(), StatusCode::OK);
|
||||
assert_eq!(test::read_body(res).await, Bytes::from("second"));
|
||||
}
|
||||
1
actix-files/tests/symlink-test.png
Symbolic link
1
actix-files/tests/symlink-test.png
Symbolic link
@@ -0,0 +1 @@
|
||||
test.png
|
||||
1
actix-files/tests/test.js
Normal file
1
actix-files/tests/test.js
Normal file
@@ -0,0 +1 @@
|
||||
// this file is empty.
|
||||
27
actix-files/tests/traversal.rs
Normal file
27
actix-files/tests/traversal.rs
Normal file
@@ -0,0 +1,27 @@
|
||||
use actix_files::Files;
|
||||
use actix_web::{
|
||||
http::StatusCode,
|
||||
test::{self, TestRequest},
|
||||
App,
|
||||
};
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_directory_traversal_prevention() {
|
||||
let srv = test::init_service(App::new().service(Files::new("/", "./tests"))).await;
|
||||
|
||||
let req =
|
||||
TestRequest::with_uri("/../../../../../../../../../../../etc/passwd").to_request();
|
||||
let res = test::call_service(&srv, req).await;
|
||||
assert_eq!(res.status(), StatusCode::NOT_FOUND);
|
||||
|
||||
let req = TestRequest::with_uri(
|
||||
"/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/etc/passwd",
|
||||
)
|
||||
.to_request();
|
||||
let res = test::call_service(&srv, req).await;
|
||||
assert_eq!(res.status(), StatusCode::NOT_FOUND);
|
||||
|
||||
let req = TestRequest::with_uri("/%00/etc/passwd%00").to_request();
|
||||
let res = test::call_service(&srv, req).await;
|
||||
assert_eq!(res.status(), StatusCode::NOT_FOUND);
|
||||
}
|
||||
3
actix-files/tests/utf8.txt
Normal file
3
actix-files/tests/utf8.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
中文内容显示正确。
|
||||
|
||||
English is OK.
|
||||
@@ -1,3 +0,0 @@
|
||||
# Framed app for actix web
|
||||
|
||||
**This crate has been deprecated and removed.**
|
||||
134
actix-http-test/CHANGES.md
Normal file
134
actix-http-test/CHANGES.md
Normal file
@@ -0,0 +1,134 @@
|
||||
# Changes
|
||||
|
||||
## Unreleased - 2021-xx-xx
|
||||
|
||||
|
||||
## 3.0.0-beta.10 - 2021-12-27
|
||||
- Update `actix-server` to `2.0.0-rc.2`. [#2550]
|
||||
|
||||
[#2550]: https://github.com/actix/actix-web/pull/2550
|
||||
|
||||
|
||||
## 3.0.0-beta.9 - 2021-12-11
|
||||
- No significant changes since `3.0.0-beta.8`.
|
||||
|
||||
|
||||
## 3.0.0-beta.8 - 2021-11-30
|
||||
- Update `actix-tls` to `3.0.0-rc.1`. [#2474]
|
||||
|
||||
[#2474]: https://github.com/actix/actix-web/pull/2474
|
||||
|
||||
|
||||
## 3.0.0-beta.7 - 2021-11-22
|
||||
- Fix compatibility with experimental `io-uring` feature of `actix-rt`. [#2408]
|
||||
|
||||
[#2408]: https://github.com/actix/actix-web/pull/2408
|
||||
|
||||
|
||||
## 3.0.0-beta.6 - 2021-11-15
|
||||
- `TestServer::stop` is now async and will wait for the server and system to shutdown. [#2442]
|
||||
- Update `actix-server` to `2.0.0-beta.9`. [#2442]
|
||||
- Minimum supported Rust version (MSRV) is now 1.52.
|
||||
|
||||
[#2442]: https://github.com/actix/actix-web/pull/2442
|
||||
|
||||
|
||||
## 3.0.0-beta.5 - 2021-09-09
|
||||
- Minimum supported Rust version (MSRV) is now 1.51.
|
||||
|
||||
|
||||
## 3.0.0-beta.4 - 2021-04-02
|
||||
- Added `TestServer::client_headers` method. [#2097]
|
||||
|
||||
[#2097]: https://github.com/actix/actix-web/pull/2097
|
||||
|
||||
|
||||
## 3.0.0-beta.3 - 2021-03-09
|
||||
- No notable changes.
|
||||
|
||||
|
||||
## 3.0.0-beta.2 - 2021-02-10
|
||||
- No notable changes.
|
||||
|
||||
|
||||
## 3.0.0-beta.1 - 2021-01-07
|
||||
- Update `bytes` to `1.0`. [#1813]
|
||||
|
||||
[#1813]: https://github.com/actix/actix-web/pull/1813
|
||||
|
||||
|
||||
## 2.1.0 - 2020-11-25
|
||||
- Add ability to set address for `TestServer`. [#1645]
|
||||
- Upgrade `base64` to `0.13`.
|
||||
- Upgrade `serde_urlencoded` to `0.7`. [#1773]
|
||||
|
||||
[#1773]: https://github.com/actix/actix-web/pull/1773
|
||||
[#1645]: https://github.com/actix/actix-web/pull/1645
|
||||
|
||||
|
||||
## 2.0.0 - 2020-09-11
|
||||
- Update actix-codec and actix-utils dependencies.
|
||||
|
||||
|
||||
## 2.0.0-alpha.1 - 2020-05-23
|
||||
- Update the `time` dependency to 0.2.7
|
||||
- Update `actix-connect` dependency to 2.0.0-alpha.2
|
||||
- Make `test_server` `async` fn.
|
||||
- Bump minimum supported Rust version to 1.40
|
||||
- Replace deprecated `net2` crate with `socket2`
|
||||
- Update `base64` dependency to 0.12
|
||||
- Update `env_logger` dependency to 0.7
|
||||
|
||||
## 1.0.0 - 2019-12-13
|
||||
- Replaced `TestServer::start()` with `test_server()`
|
||||
|
||||
|
||||
## 1.0.0-alpha.3 - 2019-12-07
|
||||
- Migrate to `std::future`
|
||||
|
||||
|
||||
## 0.2.5 - 2019-09-17
|
||||
- Update serde_urlencoded to "0.6.1"
|
||||
- Increase TestServerRuntime timeouts from 500ms to 3000ms
|
||||
- Do not override current `System`
|
||||
|
||||
|
||||
## 0.2.4 - 2019-07-18
|
||||
- Update actix-server to 0.6
|
||||
|
||||
|
||||
## 0.2.3 - 2019-07-16
|
||||
- Add `delete`, `options`, `patch` methods to `TestServerRunner`
|
||||
|
||||
|
||||
## 0.2.2 - 2019-06-16
|
||||
- Add .put() and .sput() methods
|
||||
|
||||
|
||||
## 0.2.1 - 2019-06-05
|
||||
- Add license files
|
||||
|
||||
|
||||
## 0.2.0 - 2019-05-12
|
||||
- Update awc and actix-http deps
|
||||
|
||||
|
||||
## 0.1.1 - 2019-04-24
|
||||
- Always make new connection for http client
|
||||
|
||||
|
||||
## 0.1.0 - 2019-04-16
|
||||
- No changes
|
||||
|
||||
|
||||
## 0.1.0-alpha.3 - 2019-04-02
|
||||
- Request functions accept path #743
|
||||
|
||||
|
||||
## 0.1.0-alpha.2 - 2019-03-29
|
||||
- Added TestServerRuntime::load_body() method
|
||||
- Update actix-http and awc libraries
|
||||
|
||||
|
||||
## 0.1.0-alpha.1 - 2019-03-28
|
||||
- Initial impl
|
||||
55
actix-http-test/Cargo.toml
Normal file
55
actix-http-test/Cargo.toml
Normal file
@@ -0,0 +1,55 @@
|
||||
[package]
|
||||
name = "actix-http-test"
|
||||
version = "3.0.0-beta.10"
|
||||
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
|
||||
description = "Various helpers for Actix applications to use during testing"
|
||||
keywords = ["http", "web", "framework", "async", "futures"]
|
||||
homepage = "https://actix.rs"
|
||||
repository = "https://github.com/actix/actix-web.git"
|
||||
categories = [
|
||||
"network-programming",
|
||||
"asynchronous",
|
||||
"web-programming::http-server",
|
||||
"web-programming::websocket",
|
||||
]
|
||||
license = "MIT OR Apache-2.0"
|
||||
edition = "2018"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
features = []
|
||||
|
||||
[lib]
|
||||
name = "actix_http_test"
|
||||
path = "src/lib.rs"
|
||||
|
||||
[features]
|
||||
default = []
|
||||
|
||||
# openssl
|
||||
openssl = ["tls-openssl", "awc/openssl"]
|
||||
|
||||
[dependencies]
|
||||
actix-service = "2.0.0"
|
||||
actix-codec = "0.4.1"
|
||||
actix-tls = "3.0.0"
|
||||
actix-utils = "3.0.0"
|
||||
actix-rt = "2.2"
|
||||
actix-server = "2.0.0-rc.2"
|
||||
awc = { version = "3.0.0-beta.15", default-features = false }
|
||||
|
||||
base64 = "0.13"
|
||||
bytes = "1"
|
||||
futures-core = { version = "0.3.7", default-features = false }
|
||||
http = "0.2.5"
|
||||
log = "0.4"
|
||||
socket2 = "0.4"
|
||||
serde = "1.0"
|
||||
serde_json = "1.0"
|
||||
slab = "0.4"
|
||||
serde_urlencoded = "0.7"
|
||||
tls-openssl = { version = "0.10.9", package = "openssl", optional = true }
|
||||
tokio = { version = "1.8", features = ["sync"] }
|
||||
|
||||
[dev-dependencies]
|
||||
actix-web = { version = "4.0.0-beta.16", default-features = false, features = ["cookies"] }
|
||||
actix-http = "3.0.0-beta.17"
|
||||
17
actix-http-test/README.md
Normal file
17
actix-http-test/README.md
Normal file
@@ -0,0 +1,17 @@
|
||||
# actix-http-test
|
||||
|
||||
> Various helpers for Actix applications to use during testing.
|
||||
|
||||
[](https://crates.io/crates/actix-http-test)
|
||||
[](https://docs.rs/actix-http-test/3.0.0-beta.10)
|
||||
[](https://blog.rust-lang.org/2021/05/06/Rust-1.52.0.html)
|
||||

|
||||
<br>
|
||||
[](https://deps.rs/crate/actix-http-test/3.0.0-beta.10)
|
||||
[](https://crates.io/crates/actix-http-test)
|
||||
[](https://discord.gg/NWpN5mmg3x)
|
||||
|
||||
## Documentation & Resources
|
||||
|
||||
- [API Documentation](https://docs.rs/actix-http-test)
|
||||
- Minimum Supported Rust Version (MSRV): 1.52
|
||||
@@ -1,40 +1,48 @@
|
||||
//! Various helpers for Actix applications to use during testing.
|
||||
use std::sync::mpsc;
|
||||
use std::{net, thread, time};
|
||||
|
||||
#![deny(rust_2018_idioms, nonstandard_style)]
|
||||
#![warn(future_incompatible)]
|
||||
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
|
||||
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
|
||||
|
||||
#[cfg(feature = "openssl")]
|
||||
extern crate tls_openssl as openssl;
|
||||
|
||||
use std::{net, thread, time::Duration};
|
||||
|
||||
use actix_codec::{AsyncRead, AsyncWrite, Framed};
|
||||
use actix_rt::{net::TcpStream, System};
|
||||
use actix_server::{Server, ServiceFactory};
|
||||
use awc::{error::PayloadError, ws, Client, ClientRequest, ClientResponse, Connector};
|
||||
use actix_server::{Server, ServerServiceFactory};
|
||||
use awc::{
|
||||
error::PayloadError, http::header::HeaderMap, ws, Client, ClientRequest, ClientResponse,
|
||||
Connector,
|
||||
};
|
||||
use bytes::Bytes;
|
||||
use futures_core::stream::Stream;
|
||||
use http::Method;
|
||||
use socket2::{Domain, Protocol, Socket, Type};
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
pub use actix_testing::*;
|
||||
|
||||
/// Start test server
|
||||
/// Start test server.
|
||||
///
|
||||
/// `TestServer` is very simple test server that simplify process of writing
|
||||
/// integration tests cases for actix web applications.
|
||||
/// `TestServer` is very simple test server that simplify process of writing integration tests cases
|
||||
/// for HTTP applications.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```rust
|
||||
/// ```no_run
|
||||
/// use actix_http::HttpService;
|
||||
/// use actix_http_test::TestServer;
|
||||
/// use actix_http_test::test_server;
|
||||
/// use actix_web::{web, App, HttpResponse, Error};
|
||||
///
|
||||
/// async fn my_handler() -> Result<HttpResponse, Error> {
|
||||
/// Ok(HttpResponse::Ok().into())
|
||||
/// }
|
||||
///
|
||||
/// #[actix_rt::test]
|
||||
/// #[actix_web::test]
|
||||
/// async fn test_example() {
|
||||
/// let mut srv = TestServer::start(
|
||||
/// || HttpService::new(
|
||||
/// App::new().service(
|
||||
/// web::resource("/").to(my_handler))
|
||||
/// let mut srv = TestServer::start(||
|
||||
/// HttpService::new(
|
||||
/// App::new().service(web::resource("/").to(my_handler))
|
||||
/// )
|
||||
/// );
|
||||
///
|
||||
@@ -43,80 +51,91 @@ pub use actix_testing::*;
|
||||
/// assert!(response.status().is_success());
|
||||
/// }
|
||||
/// ```
|
||||
pub async fn test_server<F: ServiceFactory<TcpStream>>(factory: F) -> TestServer {
|
||||
let (tx, rx) = mpsc::channel();
|
||||
pub async fn test_server<F: ServerServiceFactory<TcpStream>>(factory: F) -> TestServer {
|
||||
let tcp = net::TcpListener::bind("127.0.0.1:0").unwrap();
|
||||
test_server_with_addr(tcp, factory).await
|
||||
}
|
||||
|
||||
/// Start [`test server`](test_server()) on an existing address binding.
|
||||
pub async fn test_server_with_addr<F: ServerServiceFactory<TcpStream>>(
|
||||
tcp: net::TcpListener,
|
||||
factory: F,
|
||||
) -> TestServer {
|
||||
let (started_tx, started_rx) = std::sync::mpsc::channel();
|
||||
let (thread_stop_tx, thread_stop_rx) = mpsc::channel(1);
|
||||
|
||||
// run server in separate thread
|
||||
thread::spawn(move || {
|
||||
let sys = System::new("actix-test-server");
|
||||
let tcp = net::TcpListener::bind("127.0.0.1:0").unwrap();
|
||||
let local_addr = tcp.local_addr().unwrap();
|
||||
System::new().block_on(async move {
|
||||
let local_addr = tcp.local_addr().unwrap();
|
||||
|
||||
Server::build()
|
||||
.listen("test", tcp, factory)?
|
||||
.workers(1)
|
||||
.disable_signals()
|
||||
.start();
|
||||
let srv = Server::build()
|
||||
.workers(1)
|
||||
.disable_signals()
|
||||
.system_exit()
|
||||
.listen("test", tcp, factory)
|
||||
.expect("test server could not be created");
|
||||
|
||||
tx.send((System::current(), local_addr)).unwrap();
|
||||
sys.run()
|
||||
let srv = srv.run();
|
||||
started_tx
|
||||
.send((System::current(), srv.handle(), local_addr))
|
||||
.unwrap();
|
||||
|
||||
// drive server loop
|
||||
srv.await.unwrap();
|
||||
});
|
||||
|
||||
// notify TestServer that server and system have shut down
|
||||
// all thread managed resources should be dropped at this point
|
||||
let _ = thread_stop_tx.send(());
|
||||
});
|
||||
|
||||
let (system, addr) = rx.recv().unwrap();
|
||||
let (system, server, addr) = started_rx.recv().unwrap();
|
||||
|
||||
let client = {
|
||||
#[cfg(feature = "openssl")]
|
||||
let connector = {
|
||||
#[cfg(feature = "openssl")]
|
||||
{
|
||||
use open_ssl::ssl::{SslConnector, SslMethod, SslVerifyMode};
|
||||
use openssl::ssl::{SslConnector, SslMethod, SslVerifyMode};
|
||||
|
||||
let mut builder = SslConnector::builder(SslMethod::tls()).unwrap();
|
||||
builder.set_verify(SslVerifyMode::NONE);
|
||||
let _ = builder
|
||||
.set_alpn_protos(b"\x02h2\x08http/1.1")
|
||||
.map_err(|e| log::error!("Can not set alpn protocol: {:?}", e));
|
||||
Connector::new()
|
||||
.conn_lifetime(time::Duration::from_secs(0))
|
||||
.timeout(time::Duration::from_millis(30000))
|
||||
.ssl(builder.build())
|
||||
.finish()
|
||||
}
|
||||
#[cfg(not(feature = "openssl"))]
|
||||
{
|
||||
Connector::new()
|
||||
.conn_lifetime(time::Duration::from_secs(0))
|
||||
.timeout(time::Duration::from_millis(30000))
|
||||
.finish()
|
||||
}
|
||||
let mut builder = SslConnector::builder(SslMethod::tls()).unwrap();
|
||||
|
||||
builder.set_verify(SslVerifyMode::NONE);
|
||||
let _ = builder
|
||||
.set_alpn_protos(b"\x02h2\x08http/1.1")
|
||||
.map_err(|e| log::error!("Can not set alpn protocol: {:?}", e));
|
||||
|
||||
Connector::new()
|
||||
.conn_lifetime(Duration::from_secs(0))
|
||||
.timeout(Duration::from_millis(30000))
|
||||
.openssl(builder.build())
|
||||
};
|
||||
|
||||
Client::build().connector(connector).finish()
|
||||
#[cfg(not(feature = "openssl"))]
|
||||
let connector = {
|
||||
Connector::new()
|
||||
.conn_lifetime(Duration::from_secs(0))
|
||||
.timeout(Duration::from_millis(30000))
|
||||
};
|
||||
|
||||
Client::builder().connector(connector).finish()
|
||||
};
|
||||
actix_connect::start_default_resolver().await.unwrap();
|
||||
|
||||
TestServer {
|
||||
addr,
|
||||
server,
|
||||
client,
|
||||
system,
|
||||
addr,
|
||||
thread_stop_rx,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get first available unused address
|
||||
pub fn unused_addr() -> net::SocketAddr {
|
||||
let addr: net::SocketAddr = "127.0.0.1:0".parse().unwrap();
|
||||
let socket =
|
||||
Socket::new(Domain::ipv4(), Type::stream(), Some(Protocol::tcp())).unwrap();
|
||||
socket.bind(&addr.into()).unwrap();
|
||||
socket.set_reuse_address(true).unwrap();
|
||||
let tcp = socket.into_tcp_listener();
|
||||
tcp.local_addr().unwrap()
|
||||
}
|
||||
|
||||
/// Test server controller
|
||||
pub struct TestServer {
|
||||
server: actix_server::ServerHandle,
|
||||
client: awc::Client,
|
||||
system: actix_rt::System,
|
||||
addr: net::SocketAddr,
|
||||
client: Client,
|
||||
system: System,
|
||||
thread_stop_rx: mpsc::Receiver<()>,
|
||||
}
|
||||
|
||||
impl TestServer {
|
||||
@@ -134,7 +153,7 @@ impl TestServer {
|
||||
}
|
||||
}
|
||||
|
||||
/// Construct test https server url
|
||||
/// Construct test HTTPS server URL.
|
||||
pub fn surl(&self, uri: &str) -> String {
|
||||
if uri.starts_with('/') {
|
||||
format!("https://localhost:{}{}", self.addr.port(), uri)
|
||||
@@ -148,7 +167,7 @@ impl TestServer {
|
||||
self.client.get(self.url(path.as_ref()).as_str())
|
||||
}
|
||||
|
||||
/// Create https `GET` request
|
||||
/// Create HTTPS `GET` request
|
||||
pub fn sget<S: AsRef<str>>(&self, path: S) -> ClientRequest {
|
||||
self.client.get(self.surl(path.as_ref()).as_str())
|
||||
}
|
||||
@@ -158,7 +177,7 @@ impl TestServer {
|
||||
self.client.post(self.url(path.as_ref()).as_str())
|
||||
}
|
||||
|
||||
/// Create https `POST` request
|
||||
/// Create HTTPS `POST` request
|
||||
pub fn spost<S: AsRef<str>>(&self, path: S) -> ClientRequest {
|
||||
self.client.post(self.surl(path.as_ref()).as_str())
|
||||
}
|
||||
@@ -168,7 +187,7 @@ impl TestServer {
|
||||
self.client.head(self.url(path.as_ref()).as_str())
|
||||
}
|
||||
|
||||
/// Create https `HEAD` request
|
||||
/// Create HTTPS `HEAD` request
|
||||
pub fn shead<S: AsRef<str>>(&self, path: S) -> ClientRequest {
|
||||
self.client.head(self.surl(path.as_ref()).as_str())
|
||||
}
|
||||
@@ -178,7 +197,7 @@ impl TestServer {
|
||||
self.client.put(self.url(path.as_ref()).as_str())
|
||||
}
|
||||
|
||||
/// Create https `PUT` request
|
||||
/// Create HTTPS `PUT` request
|
||||
pub fn sput<S: AsRef<str>>(&self, path: S) -> ClientRequest {
|
||||
self.client.put(self.surl(path.as_ref()).as_str())
|
||||
}
|
||||
@@ -188,7 +207,7 @@ impl TestServer {
|
||||
self.client.patch(self.url(path.as_ref()).as_str())
|
||||
}
|
||||
|
||||
/// Create https `PATCH` request
|
||||
/// Create HTTPS `PATCH` request
|
||||
pub fn spatch<S: AsRef<str>>(&self, path: S) -> ClientRequest {
|
||||
self.client.patch(self.surl(path.as_ref()).as_str())
|
||||
}
|
||||
@@ -198,7 +217,7 @@ impl TestServer {
|
||||
self.client.delete(self.url(path.as_ref()).as_str())
|
||||
}
|
||||
|
||||
/// Create https `DELETE` request
|
||||
/// Create HTTPS `DELETE` request
|
||||
pub fn sdelete<S: AsRef<str>>(&self, path: S) -> ClientRequest {
|
||||
self.client.delete(self.surl(path.as_ref()).as_str())
|
||||
}
|
||||
@@ -208,12 +227,12 @@ impl TestServer {
|
||||
self.client.options(self.url(path.as_ref()).as_str())
|
||||
}
|
||||
|
||||
/// Create https `OPTIONS` request
|
||||
/// Create HTTPS `OPTIONS` request
|
||||
pub fn soptions<S: AsRef<str>>(&self, path: S) -> ClientRequest {
|
||||
self.client.options(self.surl(path.as_ref()).as_str())
|
||||
}
|
||||
|
||||
/// Connect to test http server
|
||||
/// Connect to test HTTP server
|
||||
pub fn request<S: AsRef<str>>(&self, method: Method, path: S) -> ClientRequest {
|
||||
self.client.request(method, path.as_ref())
|
||||
}
|
||||
@@ -228,33 +247,66 @@ impl TestServer {
|
||||
response.body().limit(10_485_760).await
|
||||
}
|
||||
|
||||
/// Connect to websocket server at a given path
|
||||
/// Connect to WebSocket server at a given path.
|
||||
pub async fn ws_at(
|
||||
&mut self,
|
||||
path: &str,
|
||||
) -> Result<Framed<impl AsyncRead + AsyncWrite, ws::Codec>, awc::error::WsClientError>
|
||||
{
|
||||
) -> Result<Framed<impl AsyncRead + AsyncWrite, ws::Codec>, awc::error::WsClientError> {
|
||||
let url = self.url(path);
|
||||
let connect = self.client.ws(url).connect();
|
||||
connect.await.map(|(_, framed)| framed)
|
||||
}
|
||||
|
||||
/// Connect to a websocket server
|
||||
/// Connect to a WebSocket server.
|
||||
pub async fn ws(
|
||||
&mut self,
|
||||
) -> Result<Framed<impl AsyncRead + AsyncWrite, ws::Codec>, awc::error::WsClientError>
|
||||
{
|
||||
) -> Result<Framed<impl AsyncRead + AsyncWrite, ws::Codec>, awc::error::WsClientError> {
|
||||
self.ws_at("/").await
|
||||
}
|
||||
|
||||
/// Stop http server
|
||||
fn stop(&mut self) {
|
||||
/// Get default HeaderMap of Client.
|
||||
///
|
||||
/// Returns Some(&mut HeaderMap) when Client object is unique
|
||||
/// (No other clone of client exists at the same time).
|
||||
pub fn client_headers(&mut self) -> Option<&mut HeaderMap> {
|
||||
self.client.headers()
|
||||
}
|
||||
|
||||
/// Stop HTTP server.
|
||||
///
|
||||
/// Waits for spawned `Server` and `System` to (force) shutdown.
|
||||
pub async fn stop(&mut self) {
|
||||
// signal server to stop
|
||||
self.server.stop(false).await;
|
||||
|
||||
// also signal system to stop
|
||||
// though this is handled by `ServerBuilder::exit_system` too
|
||||
self.system.stop();
|
||||
|
||||
// wait for thread to be stopped but don't care about result
|
||||
let _ = self.thread_stop_rx.recv().await;
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for TestServer {
|
||||
fn drop(&mut self) {
|
||||
self.stop()
|
||||
// calls in this Drop impl should be enough to shut down the server, system, and thread
|
||||
// without needing to await anything
|
||||
|
||||
// signal server to stop
|
||||
let _ = self.server.stop(true);
|
||||
|
||||
// signal system to stop
|
||||
self.system.stop();
|
||||
}
|
||||
}
|
||||
|
||||
/// Get a localhost socket address with random, unused port.
|
||||
pub fn unused_addr() -> net::SocketAddr {
|
||||
let addr: net::SocketAddr = "127.0.0.1:0".parse().unwrap();
|
||||
let socket = Socket::new(Domain::IPV4, Type::STREAM, Some(Protocol::TCP)).unwrap();
|
||||
socket.bind(&addr.into()).unwrap();
|
||||
socket.set_reuse_address(true).unwrap();
|
||||
let tcp = net::TcpListener::from(socket);
|
||||
tcp.local_addr().unwrap()
|
||||
}
|
||||
@@ -1,406 +1,727 @@
|
||||
# Changes
|
||||
|
||||
## Unreleased
|
||||
## Unreleased - 2021-xx-xx
|
||||
|
||||
|
||||
## [2.0.0-beta.3] - 2020-08-14
|
||||
## 3.0.0-beta.17 - 2021-12-27
|
||||
### Changes
|
||||
- `HeaderMap::get_all` now returns a `std::slice::Iter`. [#2527]
|
||||
- `Payload` inner fields are now named. [#2545]
|
||||
- `impl Stream` for `Payload` no longer requires the `Stream` variant be `Unpin`. [#2545]
|
||||
- `impl Future` for `h1::SendResponse` no longer requires the body type be `Unpin`. [#2545]
|
||||
- `impl Stream` for `encoding::Decoder` no longer requires the stream type be `Unpin`. [#2545]
|
||||
- Rename `PayloadStream` to `BoxedPayloadStream`. [#2545]
|
||||
|
||||
### Removed
|
||||
- `h1::Payload::readany`. [#2545]
|
||||
|
||||
[#2527]: https://github.com/actix/actix-web/pull/2527
|
||||
[#2545]: https://github.com/actix/actix-web/pull/2545
|
||||
|
||||
|
||||
## 3.0.0-beta.16 - 2021-12-17
|
||||
### Added
|
||||
- New method on `MessageBody` trait, `try_into_bytes`, with default implementation, for optimizations on body types that complete in exactly one poll. Replaces `is_complete_body` and `take_complete_body`. [#2522]
|
||||
|
||||
### Changed
|
||||
- Rename trait `IntoHeaderPair => TryIntoHeaderPair`. [#2510]
|
||||
- Rename `TryIntoHeaderPair::{try_into_header_pair => try_into_pair}`. [#2510]
|
||||
- Rename trait `IntoHeaderValue => TryIntoHeaderValue`. [#2510]
|
||||
|
||||
### Removed
|
||||
- `MessageBody::{is_complete_body,take_complete_body}`. [#2522]
|
||||
|
||||
[#2510]: https://github.com/actix/actix-web/pull/2510
|
||||
[#2522]: https://github.com/actix/actix-web/pull/2522
|
||||
|
||||
|
||||
## 3.0.0-beta.15 - 2021-12-11
|
||||
### Added
|
||||
- Add timeout for canceling HTTP/2 server side connection handshake. Default to 5 seconds. [#2483]
|
||||
- HTTP/2 handshake timeout can be configured with `ServiceConfig::client_timeout`. [#2483]
|
||||
- `Response::map_into_boxed_body`. [#2468]
|
||||
- `body::EitherBody` enum. [#2468]
|
||||
- `body::None` struct. [#2468]
|
||||
- Impl `MessageBody` for `bytestring::ByteString`. [#2468]
|
||||
- `impl Clone for ws::HandshakeError`. [#2468]
|
||||
- `#[must_use]` for `ws::Codec` to prevent subtle bugs. [#1920]
|
||||
- `impl Default ` for `ws::Codec`. [#1920]
|
||||
- `header::QualityItem::{max, min}`. [#2486]
|
||||
- `header::Quality::{MAX, MIN}`. [#2486]
|
||||
- `impl Display` for `header::Quality`. [#2486]
|
||||
- Connection data set through the `on_connect_ext` callbacks is now accessible only from the new `Request::conn_data()` method. [#2491]
|
||||
- `Request::take_conn_data()`. [#2491]
|
||||
- `Request::take_req_data()`. [#2487]
|
||||
- `impl Clone` for `RequestHead`. [#2487]
|
||||
- New methods on `MessageBody` trait, `is_complete_body` and `take_complete_body`, both with default implementations, for optimizations on body types that are done in exactly one poll/chunk. [#2497]
|
||||
- New `boxed` method on `MessageBody` trait for wrapping body type. [#2520]
|
||||
|
||||
### Changed
|
||||
- Rename `body::BoxBody::{from_body => new}`. [#2468]
|
||||
- Body type for `Responses` returned from `Response::{new, ok, etc...}` is now `BoxBody`. [#2468]
|
||||
- The `Error` associated type on `MessageBody` type now requires `impl Error` (or similar). [#2468]
|
||||
- Error types using in service builders now require `Into<Response<BoxBody>>`. [#2468]
|
||||
- `From` implementations on error types now return a `Response<BoxBody>`. [#2468]
|
||||
- `ResponseBuilder::body(B)` now returns `Response<EitherBody<B>>`. [#2468]
|
||||
- `ResponseBuilder::finish()` now returns `Response<EitherBody<()>>`. [#2468]
|
||||
|
||||
### Removed
|
||||
- `ResponseBuilder::streaming`. [#2468]
|
||||
- `impl Future` for `ResponseBuilder`. [#2468]
|
||||
- Remove unnecessary `MessageBody` bound on types passed to `body::AnyBody::new`. [#2468]
|
||||
- Move `body::AnyBody` to `awc`. Replaced with `EitherBody` and `BoxBody`. [#2468]
|
||||
- `impl Copy` for `ws::Codec`. [#1920]
|
||||
- `header::qitem` helper. Replaced with `header::QualityItem::max`. [#2486]
|
||||
- `impl TryFrom<u16>` for `header::Quality`. [#2486]
|
||||
- `http` module. Most everything it contained is exported at the crate root. [#2488]
|
||||
|
||||
[#2483]: https://github.com/actix/actix-web/pull/2483
|
||||
[#2468]: https://github.com/actix/actix-web/pull/2468
|
||||
[#1920]: https://github.com/actix/actix-web/pull/1920
|
||||
[#2486]: https://github.com/actix/actix-web/pull/2486
|
||||
[#2487]: https://github.com/actix/actix-web/pull/2487
|
||||
[#2488]: https://github.com/actix/actix-web/pull/2488
|
||||
[#2491]: https://github.com/actix/actix-web/pull/2491
|
||||
[#2497]: https://github.com/actix/actix-web/pull/2497
|
||||
[#2520]: https://github.com/actix/actix-web/pull/2520
|
||||
|
||||
|
||||
## 3.0.0-beta.14 - 2021-11-30
|
||||
### Changed
|
||||
- Guarantee ordering of `header::GetAll` iterator to be same as insertion order. [#2467]
|
||||
- Expose `header::map` module. [#2467]
|
||||
- Implement `ExactSizeIterator` and `FusedIterator` for all `HeaderMap` iterators. [#2470]
|
||||
- Update `actix-tls` to `3.0.0-rc.1`. [#2474]
|
||||
|
||||
[#2467]: https://github.com/actix/actix-web/pull/2467
|
||||
[#2470]: https://github.com/actix/actix-web/pull/2470
|
||||
[#2474]: https://github.com/actix/actix-web/pull/2474
|
||||
|
||||
|
||||
## 3.0.0-beta.13 - 2021-11-22
|
||||
### Added
|
||||
- `body::AnyBody::empty` for quickly creating an empty body. [#2446]
|
||||
- `body::AnyBody::none` for quickly creating a "none" body. [#2456]
|
||||
- `impl Clone` for `body::AnyBody<S> where S: Clone`. [#2448]
|
||||
- `body::AnyBody::into_boxed` for quickly converting to a type-erased, boxed body type. [#2448]
|
||||
|
||||
### Changed
|
||||
- Rename `body::AnyBody::{Message => Body}`. [#2446]
|
||||
- Rename `body::AnyBody::{from_message => new_boxed}`. [#2448]
|
||||
- Rename `body::AnyBody::{from_slice => copy_from_slice}`. [#2448]
|
||||
- Rename `body::{BoxAnyBody => BoxBody}`. [#2448]
|
||||
- Change representation of `AnyBody` to include a type parameter in `Body` variant. Defaults to `BoxBody`. [#2448]
|
||||
- `Encoder::response` now returns `AnyBody<Encoder<B>>`. [#2448]
|
||||
|
||||
### Removed
|
||||
- `body::AnyBody::Empty`; an empty body can now only be represented as a zero-length `Bytes` variant. [#2446]
|
||||
- `body::BodySize::Empty`; an empty body can now only be represented as a `Sized(0)` variant. [#2446]
|
||||
- `EncoderError::Boxed`; it is no longer required. [#2446]
|
||||
- `body::ResponseBody`; is function is replaced by the new `body::AnyBody` enum. [#2446]
|
||||
|
||||
[#2446]: https://github.com/actix/actix-web/pull/2446
|
||||
[#2448]: https://github.com/actix/actix-web/pull/2448
|
||||
[#2456]: https://github.com/actix/actix-web/pull/2456
|
||||
|
||||
|
||||
## 3.0.0-beta.12 - 2021-11-15
|
||||
### Changed
|
||||
- Update `actix-server` to `2.0.0-beta.9`. [#2442]
|
||||
|
||||
### Removed
|
||||
- `client` module. [#2425]
|
||||
- `trust-dns` feature. [#2425]
|
||||
|
||||
[#2425]: https://github.com/actix/actix-web/pull/2425
|
||||
[#2442]: https://github.com/actix/actix-web/pull/2442
|
||||
|
||||
|
||||
## 3.0.0-beta.11 - 2021-10-20
|
||||
### Changed
|
||||
- Updated rustls to v0.20. [#2414]
|
||||
- Minimum supported Rust version (MSRV) is now 1.52.
|
||||
|
||||
[#2414]: https://github.com/actix/actix-web/pull/2414
|
||||
|
||||
|
||||
## 3.0.0-beta.10 - 2021-09-09
|
||||
### Changed
|
||||
- `ContentEncoding` is now marked `#[non_exhaustive]`. [#2377]
|
||||
- Minimum supported Rust version (MSRV) is now 1.51.
|
||||
|
||||
### Fixed
|
||||
* Memory leak of `client::pool::ConnectorPoolSupport`. [#1626]
|
||||
- Remove slice creation pointing to potential uninitialized data on h1 encoder. [#2364]
|
||||
- Remove `Into<Error>` bound on `Encoder` body types. [#2375]
|
||||
- Fix quality parse error in Accept-Encoding header. [#2344]
|
||||
|
||||
[#2364]: https://github.com/actix/actix-web/pull/2364
|
||||
[#2375]: https://github.com/actix/actix-web/pull/2375
|
||||
[#2344]: https://github.com/actix/actix-web/pull/2344
|
||||
[#2377]: https://github.com/actix/actix-web/pull/2377
|
||||
|
||||
|
||||
## 3.0.0-beta.9 - 2021-08-09
|
||||
### Fixed
|
||||
- Potential HTTP request smuggling vulnerabilities. [RUSTSEC-2021-0081](https://github.com/rustsec/advisory-db/pull/977)
|
||||
|
||||
|
||||
## 3.0.0-beta.8 - 2021-06-26
|
||||
### Changed
|
||||
- Change compression algorithm features flags. [#2250]
|
||||
|
||||
### Removed
|
||||
- `downcast` and `downcast_get_type_id` macros. [#2291]
|
||||
|
||||
[#2291]: https://github.com/actix/actix-web/pull/2291
|
||||
[#2250]: https://github.com/actix/actix-web/pull/2250
|
||||
|
||||
|
||||
## 3.0.0-beta.7 - 2021-06-17
|
||||
### Added
|
||||
- Alias `body::Body` as `body::AnyBody`. [#2215]
|
||||
- `BoxAnyBody`: a boxed message body with boxed errors. [#2183]
|
||||
- Re-export `http` crate's `Error` type as `error::HttpError`. [#2171]
|
||||
- Re-export `StatusCode`, `Method`, `Version` and `Uri` at the crate root. [#2171]
|
||||
- Re-export `ContentEncoding` and `ConnectionType` at the crate root. [#2171]
|
||||
- `Response::into_body` that consumes response and returns body type. [#2201]
|
||||
- `impl Default` for `Response`. [#2201]
|
||||
- Add zstd support for `ContentEncoding`. [#2244]
|
||||
|
||||
### Changed
|
||||
- The `MessageBody` trait now has an associated `Error` type. [#2183]
|
||||
- All error trait bounds in server service builders have changed from `Into<Error>` to `Into<Response<AnyBody>>`. [#2253]
|
||||
- All error trait bounds in message body and stream impls changed from `Into<Error>` to `Into<Box<dyn std::error::Error>>`. [#2253]
|
||||
- Places in `Response` where `ResponseBody<B>` was received or returned now simply use `B`. [#2201]
|
||||
- `header` mod is now public. [#2171]
|
||||
- `uri` mod is now public. [#2171]
|
||||
- Update `language-tags` to `0.3`.
|
||||
- Reduce the level from `error` to `debug` for the log line that is emitted when a `500 Internal Server Error` is built using `HttpResponse::from_error`. [#2201]
|
||||
- `ResponseBuilder::message_body` now returns a `Result`. [#2201]
|
||||
- Remove `Unpin` bound on `ResponseBuilder::streaming`. [#2253]
|
||||
- `HttpServer::{listen_rustls(), bind_rustls()}` now honor the ALPN protocols in the configuation parameter. [#2226]
|
||||
|
||||
### Removed
|
||||
- Stop re-exporting `http` crate's `HeaderMap` types in addition to ours. [#2171]
|
||||
- Down-casting for `MessageBody` types. [#2183]
|
||||
- `error::Result` alias. [#2201]
|
||||
- Error field from `Response` and `Response::error`. [#2205]
|
||||
- `impl Future` for `Response`. [#2201]
|
||||
- `Response::take_body` and old `Response::into_body` method that casted body type. [#2201]
|
||||
- `InternalError` and all the error types it constructed. [#2215]
|
||||
- Conversion (`impl Into`) of `Response<Body>` and `ResponseBuilder` to `Error`. [#2215]
|
||||
|
||||
[#2171]: https://github.com/actix/actix-web/pull/2171
|
||||
[#2183]: https://github.com/actix/actix-web/pull/2183
|
||||
[#2196]: https://github.com/actix/actix-web/pull/2196
|
||||
[#2201]: https://github.com/actix/actix-web/pull/2201
|
||||
[#2205]: https://github.com/actix/actix-web/pull/2205
|
||||
[#2215]: https://github.com/actix/actix-web/pull/2215
|
||||
[#2253]: https://github.com/actix/actix-web/pull/2253
|
||||
[#2244]: https://github.com/actix/actix-web/pull/2244
|
||||
|
||||
|
||||
|
||||
## 3.0.0-beta.6 - 2021-04-17
|
||||
### Added
|
||||
- `impl<T: MessageBody> MessageBody for Pin<Box<T>>`. [#2152]
|
||||
- `Response::{ok, bad_request, not_found, internal_server_error}`. [#2159]
|
||||
- Helper `body::to_bytes` for async collecting message body into Bytes. [#2158]
|
||||
|
||||
### Changes
|
||||
- The type parameter of `Response` no longer has a default. [#2152]
|
||||
- The `Message` variant of `body::Body` is now `Pin<Box<dyn MessageBody>>`. [#2152]
|
||||
- `BodyStream` and `SizedStream` are no longer restricted to Unpin types. [#2152]
|
||||
- Error enum types are marked `#[non_exhaustive]`. [#2161]
|
||||
|
||||
### Removed
|
||||
- `cookies` feature flag. [#2065]
|
||||
- Top-level `cookies` mod (re-export). [#2065]
|
||||
- `HttpMessage` trait loses the `cookies` and `cookie` methods. [#2065]
|
||||
- `impl ResponseError for CookieParseError`. [#2065]
|
||||
- Deprecated methods on `ResponseBuilder`: `if_true`, `if_some`. [#2148]
|
||||
- `ResponseBuilder::json`. [#2148]
|
||||
- `ResponseBuilder::{set_header, header}`. [#2148]
|
||||
- `impl From<serde_json::Value> for Body`. [#2148]
|
||||
- `Response::build_from`. [#2159]
|
||||
- Most of the status code builders on `Response`. [#2159]
|
||||
|
||||
[#2065]: https://github.com/actix/actix-web/pull/2065
|
||||
[#2148]: https://github.com/actix/actix-web/pull/2148
|
||||
[#2152]: https://github.com/actix/actix-web/pull/2152
|
||||
[#2159]: https://github.com/actix/actix-web/pull/2159
|
||||
[#2158]: https://github.com/actix/actix-web/pull/2158
|
||||
[#2161]: https://github.com/actix/actix-web/pull/2161
|
||||
|
||||
|
||||
## 3.0.0-beta.5 - 2021-04-02
|
||||
### Added
|
||||
- `client::Connector::handshake_timeout` method for customizing TLS connection handshake timeout. [#2081]
|
||||
- `client::ConnectorService` as `client::Connector::finish` method's return type [#2081]
|
||||
- `client::ConnectionIo` trait alias [#2081]
|
||||
|
||||
### Changed
|
||||
- `client::Connector` type now only have one generic type for `actix_service::Service`. [#2063]
|
||||
|
||||
### Removed
|
||||
- Common typed HTTP headers were moved to actix-web. [2094]
|
||||
- `ResponseError` impl for `actix_utils::timeout::TimeoutError`. [#2127]
|
||||
|
||||
[#2063]: https://github.com/actix/actix-web/pull/2063
|
||||
[#2081]: https://github.com/actix/actix-web/pull/2081
|
||||
[#2094]: https://github.com/actix/actix-web/pull/2094
|
||||
[#2127]: https://github.com/actix/actix-web/pull/2127
|
||||
|
||||
|
||||
## 3.0.0-beta.4 - 2021-03-08
|
||||
### Changed
|
||||
- Feature `cookies` is now optional and disabled by default. [#1981]
|
||||
- `ws::hash_key` now returns array. [#2035]
|
||||
- `ResponseBuilder::json` now takes `impl Serialize`. [#2052]
|
||||
|
||||
### Removed
|
||||
- Re-export of `futures_channel::oneshot::Canceled` is removed from `error` mod. [#1994]
|
||||
- `ResponseError` impl for `futures_channel::oneshot::Canceled` is removed. [#1994]
|
||||
|
||||
[#1981]: https://github.com/actix/actix-web/pull/1981
|
||||
[#1994]: https://github.com/actix/actix-web/pull/1994
|
||||
[#2035]: https://github.com/actix/actix-web/pull/2035
|
||||
[#2052]: https://github.com/actix/actix-web/pull/2052
|
||||
|
||||
|
||||
## 3.0.0-beta.3 - 2021-02-10
|
||||
- No notable changes.
|
||||
|
||||
|
||||
## 3.0.0-beta.2 - 2021-02-10
|
||||
### Added
|
||||
- `TryIntoHeaderPair` trait that allows using typed and untyped headers in the same methods. [#1869]
|
||||
- `ResponseBuilder::insert_header` method which allows using typed headers. [#1869]
|
||||
- `ResponseBuilder::append_header` method which allows using typed headers. [#1869]
|
||||
- `TestRequest::insert_header` method which allows using typed headers. [#1869]
|
||||
- `ContentEncoding` implements all necessary header traits. [#1912]
|
||||
- `HeaderMap::len_keys` has the behavior of the old `len` method. [#1964]
|
||||
- `HeaderMap::drain` as an efficient draining iterator. [#1964]
|
||||
- Implement `IntoIterator` for owned `HeaderMap`. [#1964]
|
||||
- `trust-dns` optional feature to enable `trust-dns-resolver` as client dns resolver. [#1969]
|
||||
|
||||
### Changed
|
||||
- `ResponseBuilder::content_type` now takes an `impl TryIntoHeaderValue` to support using typed
|
||||
`mime` types. [#1894]
|
||||
- Renamed `TryIntoHeaderValue::{try_into => try_into_value}` to avoid ambiguity with std
|
||||
`TryInto` trait. [#1894]
|
||||
- `Extensions::insert` returns Option of replaced item. [#1904]
|
||||
- Remove `HttpResponseBuilder::json2()`. [#1903]
|
||||
- Enable `HttpResponseBuilder::json()` to receive data by value and reference. [#1903]
|
||||
- `client::error::ConnectError` Resolver variant contains `Box<dyn std::error::Error>` type. [#1905]
|
||||
- `client::ConnectorConfig` default timeout changed to 5 seconds. [#1905]
|
||||
- Simplify `BlockingError` type to a unit struct. It's now only triggered when blocking thread pool
|
||||
is dead. [#1957]
|
||||
- `HeaderMap::len` now returns number of values instead of number of keys. [#1964]
|
||||
- `HeaderMap::insert` now returns iterator of removed values. [#1964]
|
||||
- `HeaderMap::remove` now returns iterator of removed values. [#1964]
|
||||
|
||||
### Removed
|
||||
- `ResponseBuilder::set`; use `ResponseBuilder::insert_header`. [#1869]
|
||||
- `ResponseBuilder::set_header`; use `ResponseBuilder::insert_header`. [#1869]
|
||||
- `ResponseBuilder::header`; use `ResponseBuilder::append_header`. [#1869]
|
||||
- `TestRequest::with_hdr`; use `TestRequest::default().insert_header()`. [#1869]
|
||||
- `TestRequest::with_header`; use `TestRequest::default().insert_header()`. [#1869]
|
||||
- `actors` optional feature. [#1969]
|
||||
- `ResponseError` impl for `actix::MailboxError`. [#1969]
|
||||
|
||||
### Documentation
|
||||
- Vastly improve docs and add examples for `HeaderMap`. [#1964]
|
||||
|
||||
[#1869]: https://github.com/actix/actix-web/pull/1869
|
||||
[#1894]: https://github.com/actix/actix-web/pull/1894
|
||||
[#1903]: https://github.com/actix/actix-web/pull/1903
|
||||
[#1904]: https://github.com/actix/actix-web/pull/1904
|
||||
[#1905]: https://github.com/actix/actix-web/pull/1905
|
||||
[#1912]: https://github.com/actix/actix-web/pull/1912
|
||||
[#1957]: https://github.com/actix/actix-web/pull/1957
|
||||
[#1964]: https://github.com/actix/actix-web/pull/1964
|
||||
[#1969]: https://github.com/actix/actix-web/pull/1969
|
||||
|
||||
|
||||
## 3.0.0-beta.1 - 2021-01-07
|
||||
### Added
|
||||
- Add `Http3` to `Protocol` enum for future compatibility and also mark `#[non_exhaustive]`.
|
||||
|
||||
### Changed
|
||||
- Update `actix-*` dependencies to tokio `1.0` based versions. [#1813]
|
||||
- Bumped `rand` to `0.8`.
|
||||
- Update `bytes` to `1.0`. [#1813]
|
||||
- Update `h2` to `0.3`. [#1813]
|
||||
- The `ws::Message::Text` enum variant now contains a `bytestring::ByteString`. [#1864]
|
||||
|
||||
### Removed
|
||||
- Deprecated `on_connect` methods have been removed. Prefer the new
|
||||
`on_connect_ext` technique. [#1857]
|
||||
- Remove `ResponseError` impl for `actix::actors::resolver::ResolverError`
|
||||
due to deprecate of resolver actor. [#1813]
|
||||
- Remove `ConnectError::SslHandshakeError` and re-export of `HandshakeError`.
|
||||
due to the removal of this type from `tokio-openssl` crate. openssl handshake
|
||||
error would return as `ConnectError::SslError`. [#1813]
|
||||
- Remove `actix-threadpool` dependency. Use `actix_rt::task::spawn_blocking`.
|
||||
Due to this change `actix_threadpool::BlockingError` type is moved into
|
||||
`actix_http::error` module. [#1878]
|
||||
|
||||
[#1813]: https://github.com/actix/actix-web/pull/1813
|
||||
[#1857]: https://github.com/actix/actix-web/pull/1857
|
||||
[#1864]: https://github.com/actix/actix-web/pull/1864
|
||||
[#1878]: https://github.com/actix/actix-web/pull/1878
|
||||
|
||||
|
||||
## 2.2.1 - 2021-08-09
|
||||
### Fixed
|
||||
- Potential HTTP request smuggling vulnerabilities. [RUSTSEC-2021-0081](https://github.com/rustsec/advisory-db/pull/977)
|
||||
|
||||
|
||||
## 2.2.0 - 2020-11-25
|
||||
### Added
|
||||
- HttpResponse builders for 1xx status codes. [#1768]
|
||||
- `Accept::mime_precedence` and `Accept::mime_preference`. [#1793]
|
||||
- `TryFrom<u16>` and `TryFrom<f32>` for `http::header::Quality`. [#1797]
|
||||
|
||||
### Fixed
|
||||
- Started dropping `transfer-encoding: chunked` and `Content-Length` for 1XX and 204 responses. [#1767]
|
||||
|
||||
### Changed
|
||||
- Upgrade `serde_urlencoded` to `0.7`. [#1773]
|
||||
|
||||
[#1773]: https://github.com/actix/actix-web/pull/1773
|
||||
[#1767]: https://github.com/actix/actix-web/pull/1767
|
||||
[#1768]: https://github.com/actix/actix-web/pull/1768
|
||||
[#1793]: https://github.com/actix/actix-web/pull/1793
|
||||
[#1797]: https://github.com/actix/actix-web/pull/1797
|
||||
|
||||
|
||||
## 2.1.0 - 2020-10-30
|
||||
### Added
|
||||
- Added more flexible `on_connect_ext` methods for on-connect handling. [#1754]
|
||||
|
||||
### Changed
|
||||
- Upgrade `base64` to `0.13`. [#1744]
|
||||
- Upgrade `pin-project` to `1.0`. [#1733]
|
||||
- Deprecate `ResponseBuilder::{if_some, if_true}`. [#1760]
|
||||
|
||||
[#1760]: https://github.com/actix/actix-web/pull/1760
|
||||
[#1754]: https://github.com/actix/actix-web/pull/1754
|
||||
[#1733]: https://github.com/actix/actix-web/pull/1733
|
||||
[#1744]: https://github.com/actix/actix-web/pull/1744
|
||||
|
||||
|
||||
## 2.0.0 - 2020-09-11
|
||||
- No significant changes from `2.0.0-beta.4`.
|
||||
|
||||
|
||||
## 2.0.0-beta.4 - 2020-09-09
|
||||
### Changed
|
||||
- Update actix-codec and actix-utils dependencies.
|
||||
- Update actix-connect and actix-tls dependencies.
|
||||
|
||||
|
||||
## 2.0.0-beta.3 - 2020-08-14
|
||||
### Fixed
|
||||
- Memory leak of `client::pool::ConnectorPoolSupport`. [#1626]
|
||||
|
||||
[#1626]: https://github.com/actix/actix-web/pull/1626
|
||||
|
||||
|
||||
## [2.0.0-beta.2] - 2020-07-21
|
||||
## 2.0.0-beta.2 - 2020-07-21
|
||||
### Fixed
|
||||
* Potential UB in h1 decoder using uninitialized memory. [#1614]
|
||||
- Potential UB in h1 decoder using uninitialized memory. [#1614]
|
||||
|
||||
### Changed
|
||||
* Fix illegal chunked encoding. [#1615]
|
||||
- Fix illegal chunked encoding. [#1615]
|
||||
|
||||
[#1614]: https://github.com/actix/actix-web/pull/1614
|
||||
[#1615]: https://github.com/actix/actix-web/pull/1615
|
||||
|
||||
|
||||
## [2.0.0-beta.1] - 2020-07-11
|
||||
|
||||
## 2.0.0-beta.1 - 2020-07-11
|
||||
### Changed
|
||||
|
||||
* Migrate cookie handling to `cookie` crate. [#1558]
|
||||
* Update `sha-1` to 0.9. [#1586]
|
||||
* Fix leak in client pool. [#1580]
|
||||
* MSRV is now 1.41.1.
|
||||
- Migrate cookie handling to `cookie` crate. [#1558]
|
||||
- Update `sha-1` to 0.9. [#1586]
|
||||
- Fix leak in client pool. [#1580]
|
||||
- MSRV is now 1.41.1.
|
||||
|
||||
[#1558]: https://github.com/actix/actix-web/pull/1558
|
||||
[#1586]: https://github.com/actix/actix-web/pull/1586
|
||||
[#1580]: https://github.com/actix/actix-web/pull/1580
|
||||
|
||||
## [2.0.0-alpha.4] - 2020-05-21
|
||||
|
||||
## 2.0.0-alpha.4 - 2020-05-21
|
||||
### Changed
|
||||
|
||||
* Bump minimum supported Rust version to 1.40
|
||||
* content_length function is removed, and you can set Content-Length by calling no_chunking function [#1439]
|
||||
* `BodySize::Sized64` variant has been removed. `BodySize::Sized` now receives a
|
||||
- Bump minimum supported Rust version to 1.40
|
||||
- content_length function is removed, and you can set Content-Length by calling
|
||||
no_chunking function [#1439]
|
||||
- `BodySize::Sized64` variant has been removed. `BodySize::Sized` now receives a
|
||||
`u64` instead of a `usize`.
|
||||
* Update `base64` dependency to 0.12
|
||||
- Update `base64` dependency to 0.12
|
||||
|
||||
### Fixed
|
||||
|
||||
* Support parsing of `SameSite=None` [#1503]
|
||||
- Support parsing of `SameSite=None` [#1503]
|
||||
|
||||
[#1439]: https://github.com/actix/actix-web/pull/1439
|
||||
[#1503]: https://github.com/actix/actix-web/pull/1503
|
||||
|
||||
## [2.0.0-alpha.3] - 2020-05-08
|
||||
|
||||
## 2.0.0-alpha.3 - 2020-05-08
|
||||
### Fixed
|
||||
|
||||
* Correct spelling of ConnectError::Unresolved [#1487]
|
||||
* Fix a mistake in the encoding of websocket continuation messages wherein
|
||||
- Correct spelling of ConnectError::Unresolved [#1487]
|
||||
- Fix a mistake in the encoding of websocket continuation messages wherein
|
||||
Item::FirstText and Item::FirstBinary are each encoded as the other.
|
||||
|
||||
### Changed
|
||||
|
||||
* Implement `std::error::Error` for our custom errors [#1422]
|
||||
* Remove `failure` support for `ResponseError` since that crate
|
||||
- Implement `std::error::Error` for our custom errors [#1422]
|
||||
- Remove `failure` support for `ResponseError` since that crate
|
||||
will be deprecated in the near future.
|
||||
|
||||
[#1422]: https://github.com/actix/actix-web/pull/1422
|
||||
[#1487]: https://github.com/actix/actix-web/pull/1487
|
||||
|
||||
## [2.0.0-alpha.2] - 2020-03-07
|
||||
|
||||
## 2.0.0-alpha.2 - 2020-03-07
|
||||
### Changed
|
||||
|
||||
* Update `actix-connect` and `actix-tls` dependency to 2.0.0-alpha.1. [#1395]
|
||||
|
||||
* Change default initial window size and connection window size for HTTP2 to 2MB and 1MB respectively
|
||||
to improve download speed for awc when downloading large objects. [#1394]
|
||||
|
||||
* client::Connector accepts initial_window_size and initial_connection_window_size HTTP2 configuration. [#1394]
|
||||
|
||||
* client::Connector allowing to set max_http_version to limit HTTP version to be used. [#1394]
|
||||
- Update `actix-connect` and `actix-tls` dependency to 2.0.0-alpha.1. [#1395]
|
||||
- Change default initial window size and connection window size for HTTP2 to 2MB and 1MB
|
||||
respectively to improve download speed for awc when downloading large objects. [#1394]
|
||||
- client::Connector accepts initial_window_size and initial_connection_window_size
|
||||
HTTP2 configuration. [#1394]
|
||||
- client::Connector allowing to set max_http_version to limit HTTP version to be used. [#1394]
|
||||
|
||||
[#1394]: https://github.com/actix/actix-web/pull/1394
|
||||
[#1395]: https://github.com/actix/actix-web/pull/1395
|
||||
|
||||
## [2.0.0-alpha.1] - 2020-02-27
|
||||
|
||||
## 2.0.0-alpha.1 - 2020-02-27
|
||||
### Changed
|
||||
|
||||
* Update the `time` dependency to 0.2.7.
|
||||
* Moved actors messages support from actix crate, enabled with feature `actors`.
|
||||
* Breaking change: trait MessageBody requires Unpin and accepting Pin<&mut Self> instead of &mut self in the poll_next().
|
||||
* MessageBody is not implemented for &'static [u8] anymore.
|
||||
- Update the `time` dependency to 0.2.7.
|
||||
- Moved actors messages support from actix crate, enabled with feature `actors`.
|
||||
- Breaking change: trait MessageBody requires Unpin and accepting `Pin<&mut Self>` instead of
|
||||
`&mut self` in the poll_next().
|
||||
- MessageBody is not implemented for &'static [u8] anymore.
|
||||
|
||||
### Fixed
|
||||
- Allow `SameSite=None` cookies to be sent in a response.
|
||||
|
||||
* Allow `SameSite=None` cookies to be sent in a response.
|
||||
|
||||
## [1.0.1] - 2019-12-20
|
||||
|
||||
## 1.0.1 - 2019-12-20
|
||||
### Fixed
|
||||
- Poll upgrade service's readiness from HTTP service handlers
|
||||
- Replace brotli with brotli2 #1224
|
||||
|
||||
* Poll upgrade service's readiness from HTTP service handlers
|
||||
|
||||
* Replace brotli with brotli2 #1224
|
||||
|
||||
## [1.0.0] - 2019-12-13
|
||||
|
||||
## 1.0.0 - 2019-12-13
|
||||
### Added
|
||||
|
||||
* Add websockets continuation frame support
|
||||
- Add websockets continuation frame support
|
||||
|
||||
### Changed
|
||||
- Replace `flate2-xxx` features with `compress`
|
||||
|
||||
* Replace `flate2-xxx` features with `compress`
|
||||
|
||||
## [1.0.0-alpha.5] - 2019-12-09
|
||||
|
||||
## 1.0.0-alpha.5 - 2019-12-09
|
||||
### Fixed
|
||||
|
||||
* Check `Upgrade` service readiness before calling it
|
||||
|
||||
* Fix buffer remaining capacity calcualtion
|
||||
- Check `Upgrade` service readiness before calling it
|
||||
- Fix buffer remaining capacity calculation
|
||||
|
||||
### Changed
|
||||
- Websockets: Ping and Pong should have binary data #1049
|
||||
|
||||
* Websockets: Ping and Pong should have binary data #1049
|
||||
|
||||
## [1.0.0-alpha.4] - 2019-12-08
|
||||
|
||||
## 1.0.0-alpha.4 - 2019-12-08
|
||||
### Added
|
||||
|
||||
* Add impl ResponseBuilder for Error
|
||||
- Add impl ResponseBuilder for Error
|
||||
|
||||
### Changed
|
||||
- Use rust based brotli compression library
|
||||
|
||||
* Use rust based brotli compression library
|
||||
|
||||
## [1.0.0-alpha.3] - 2019-12-07
|
||||
|
||||
## 1.0.0-alpha.3 - 2019-12-07
|
||||
### Changed
|
||||
|
||||
* Migrate to tokio 0.2
|
||||
|
||||
* Migrate to `std::future`
|
||||
- Migrate to tokio 0.2
|
||||
- Migrate to `std::future`
|
||||
|
||||
|
||||
## [0.2.11] - 2019-11-06
|
||||
|
||||
## 0.2.11 - 2019-11-06
|
||||
### Added
|
||||
|
||||
* Add support for serde_json::Value to be passed as argument to ResponseBuilder.body()
|
||||
|
||||
* Add an additional `filename*` param in the `Content-Disposition` header of `actix_files::NamedFile` to be more compatible. (#1151)
|
||||
|
||||
* Allow to use `std::convert::Infallible` as `actix_http::error::Error`
|
||||
- Add support for serde_json::Value to be passed as argument to ResponseBuilder.body()
|
||||
- Add an additional `filename*` param in the `Content-Disposition` header of
|
||||
`actix_files::NamedFile` to be more compatible. (#1151)
|
||||
- Allow to use `std::convert::Infallible` as `actix_http::error::Error`
|
||||
|
||||
### Fixed
|
||||
- To be compatible with non-English error responses, `ResponseError` rendered with `text/plain;
|
||||
charset=utf-8` header [#1118]
|
||||
|
||||
* To be compatible with non-English error responses, `ResponseError` rendered with `text/plain; charset=utf-8` header #1118
|
||||
[#1878]: https://github.com/actix/actix-web/pull/1878
|
||||
|
||||
|
||||
## [0.2.10] - 2019-09-11
|
||||
|
||||
## 0.2.10 - 2019-09-11
|
||||
### Added
|
||||
|
||||
* Add support for sending HTTP requests with `Rc<RequestHead>` in addition to sending HTTP requests with `RequestHead`
|
||||
- Add support for sending HTTP requests with `Rc<RequestHead>` in addition to sending HTTP requests
|
||||
with `RequestHead`
|
||||
|
||||
### Fixed
|
||||
|
||||
* h2 will use error response #1080
|
||||
|
||||
* on_connect result isn't added to request extensions for http2 requests #1009
|
||||
- h2 will use error response #1080
|
||||
- on_connect result isn't added to request extensions for http2 requests #1009
|
||||
|
||||
|
||||
## [0.2.9] - 2019-08-13
|
||||
## 0.2.9 - 2019-08-13
|
||||
### Changed
|
||||
- Dropped the `byteorder`-dependency in favor of `stdlib`-implementation
|
||||
- Update percent-encoding to 2.1
|
||||
- Update serde_urlencoded to 0.6.1
|
||||
|
||||
### Fixed
|
||||
- Fixed a panic in the HTTP2 handshake in client HTTP requests (#1031)
|
||||
|
||||
|
||||
## 0.2.8 - 2019-08-01
|
||||
### Added
|
||||
- Add `rustls` support
|
||||
- Add `Clone` impl for `HeaderMap`
|
||||
|
||||
### Fixed
|
||||
- awc client panic #1016
|
||||
- Invalid response with compression middleware enabled, but compression-related features
|
||||
disabled #997
|
||||
|
||||
|
||||
## 0.2.7 - 2019-07-18
|
||||
### Added
|
||||
- Add support for downcasting response errors #986
|
||||
|
||||
|
||||
## 0.2.6 - 2019-07-17
|
||||
### Changed
|
||||
- Replace `ClonableService` with local copy
|
||||
- Upgrade `rand` dependency version to 0.7
|
||||
|
||||
|
||||
## 0.2.5 - 2019-06-28
|
||||
### Added
|
||||
- Add `on-connect` callback, `HttpServiceBuilder::on_connect()` #946
|
||||
|
||||
### Changed
|
||||
- Use `encoding_rs` crate instead of unmaintained `encoding` crate
|
||||
- Add `Copy` and `Clone` impls for `ws::Codec`
|
||||
|
||||
* Dropped the `byteorder`-dependency in favor of `stdlib`-implementation
|
||||
|
||||
* Update percent-encoding to 2.1
|
||||
|
||||
* Update serde_urlencoded to 0.6.1
|
||||
|
||||
## 0.2.4 - 2019-06-16
|
||||
### Fixed
|
||||
|
||||
* Fixed a panic in the HTTP2 handshake in client HTTP requests (#1031)
|
||||
- Do not compress NoContent (204) responses #918
|
||||
|
||||
|
||||
## [0.2.8] - 2019-08-01
|
||||
|
||||
## 0.2.3 - 2019-06-02
|
||||
### Added
|
||||
|
||||
* Add `rustls` support
|
||||
|
||||
* Add `Clone` impl for `HeaderMap`
|
||||
|
||||
### Fixed
|
||||
|
||||
* awc client panic #1016
|
||||
|
||||
* Invalid response with compression middleware enabled, but compression-related features disabled #997
|
||||
|
||||
|
||||
## [0.2.7] - 2019-07-18
|
||||
|
||||
### Added
|
||||
|
||||
* Add support for downcasting response errors #986
|
||||
|
||||
|
||||
## [0.2.6] - 2019-07-17
|
||||
- Debug impl for ResponseBuilder
|
||||
- From SizedStream and BodyStream for Body
|
||||
|
||||
### Changed
|
||||
|
||||
* Replace `ClonableService` with local copy
|
||||
|
||||
* Upgrade `rand` dependency version to 0.7
|
||||
- SizedStream uses u64
|
||||
|
||||
|
||||
## [0.2.5] - 2019-06-28
|
||||
|
||||
### Added
|
||||
|
||||
* Add `on-connect` callback, `HttpServiceBuilder::on_connect()` #946
|
||||
|
||||
### Changed
|
||||
|
||||
* Use `encoding_rs` crate instead of unmaintained `encoding` crate
|
||||
|
||||
* Add `Copy` and `Clone` impls for `ws::Codec`
|
||||
|
||||
|
||||
## [0.2.4] - 2019-06-16
|
||||
|
||||
## 0.2.2 - 2019-05-29
|
||||
### Fixed
|
||||
|
||||
* Do not compress NoContent (204) responses #918
|
||||
- Parse incoming stream before closing stream on disconnect #868
|
||||
|
||||
|
||||
## [0.2.3] - 2019-06-02
|
||||
|
||||
### Added
|
||||
|
||||
* Debug impl for ResponseBuilder
|
||||
|
||||
* From SizedStream and BodyStream for Body
|
||||
|
||||
### Changed
|
||||
|
||||
* SizedStream uses u64
|
||||
|
||||
|
||||
## [0.2.2] - 2019-05-29
|
||||
|
||||
## 0.2.1 - 2019-05-25
|
||||
### Fixed
|
||||
|
||||
* Parse incoming stream before closing stream on disconnect #868
|
||||
- Handle socket read disconnect
|
||||
|
||||
|
||||
## [0.2.1] - 2019-05-25
|
||||
|
||||
### Fixed
|
||||
|
||||
* Handle socket read disconnect
|
||||
|
||||
|
||||
## [0.2.0] - 2019-05-12
|
||||
|
||||
## 0.2.0 - 2019-05-12
|
||||
### Changed
|
||||
|
||||
* Update actix-service to 0.4
|
||||
|
||||
* Expect and upgrade services accept `ServerConfig` config.
|
||||
- Update actix-service to 0.4
|
||||
- Expect and upgrade services accept `ServerConfig` config.
|
||||
|
||||
### Deleted
|
||||
|
||||
* `OneRequest` service
|
||||
- `OneRequest` service
|
||||
|
||||
|
||||
## [0.1.5] - 2019-05-04
|
||||
## 0.1.5 - 2019-05-04
|
||||
### Fixed
|
||||
- Clean up response extensions in response pool #817
|
||||
|
||||
|
||||
## 0.1.4 - 2019-04-24
|
||||
### Added
|
||||
- Allow to render h1 request headers in `Camel-Case`
|
||||
|
||||
### Fixed
|
||||
|
||||
* Clean up response extensions in response pool #817
|
||||
- Read until eof for http/1.0 responses #771
|
||||
|
||||
|
||||
## [0.1.4] - 2019-04-24
|
||||
|
||||
### Added
|
||||
|
||||
* Allow to render h1 request headers in `Camel-Case`
|
||||
|
||||
## 0.1.3 - 2019-04-23
|
||||
### Fixed
|
||||
|
||||
* Read until eof for http/1.0 responses #771
|
||||
- Fix http client pool management
|
||||
- Fix http client wait queue management #794
|
||||
|
||||
|
||||
## [0.1.3] - 2019-04-23
|
||||
|
||||
## 0.1.2 - 2019-04-23
|
||||
### Fixed
|
||||
|
||||
* Fix http client pool management
|
||||
|
||||
* Fix http client wait queue management #794
|
||||
- Fix BorrowMutError panic in client connector #793
|
||||
|
||||
|
||||
## [0.1.2] - 2019-04-23
|
||||
|
||||
### Fixed
|
||||
|
||||
* Fix BorrowMutError panic in client connector #793
|
||||
|
||||
|
||||
## [0.1.1] - 2019-04-19
|
||||
|
||||
## 0.1.1 - 2019-04-19
|
||||
### Changed
|
||||
|
||||
* Cookie::max_age() accepts value in seconds
|
||||
|
||||
* Cookie::max_age_time() accepts value in time::Duration
|
||||
|
||||
* Allow to specify server address for client connector
|
||||
- Cookie::max_age() accepts value in seconds
|
||||
- Cookie::max_age_time() accepts value in time::Duration
|
||||
- Allow to specify server address for client connector
|
||||
|
||||
|
||||
## [0.1.0] - 2019-04-16
|
||||
|
||||
## 0.1.0 - 2019-04-16
|
||||
### Added
|
||||
|
||||
* Expose peer addr via `Request::peer_addr()` and `RequestHead::peer_addr`
|
||||
- Expose peer addr via `Request::peer_addr()` and `RequestHead::peer_addr`
|
||||
|
||||
### Changed
|
||||
|
||||
* `actix_http::encoding` always available
|
||||
|
||||
* use trust-dns-resolver 0.11.0
|
||||
- `actix_http::encoding` always available
|
||||
- use trust-dns-resolver 0.11.0
|
||||
|
||||
|
||||
## [0.1.0-alpha.5] - 2019-04-12
|
||||
|
||||
## 0.1.0-alpha.5 - 2019-04-12
|
||||
### Added
|
||||
|
||||
* Allow to use custom service for upgrade requests
|
||||
|
||||
* Added `h1::SendResponse` future.
|
||||
- Allow to use custom service for upgrade requests
|
||||
- Added `h1::SendResponse` future.
|
||||
|
||||
### Changed
|
||||
|
||||
* MessageBody::length() renamed to MessageBody::size() for consistency
|
||||
|
||||
* ws handshake verification functions take RequestHead instead of Request
|
||||
- MessageBody::length() renamed to MessageBody::size() for consistency
|
||||
- ws handshake verification functions take RequestHead instead of Request
|
||||
|
||||
|
||||
## [0.1.0-alpha.4] - 2019-04-08
|
||||
|
||||
## 0.1.0-alpha.4 - 2019-04-08
|
||||
### Added
|
||||
|
||||
* Allow to use custom `Expect` handler
|
||||
|
||||
* Add minimal `std::error::Error` impl for `Error`
|
||||
- Allow to use custom `Expect` handler
|
||||
- Add minimal `std::error::Error` impl for `Error`
|
||||
|
||||
### Changed
|
||||
|
||||
* Export IntoHeaderValue
|
||||
|
||||
* Render error and return as response body
|
||||
|
||||
* Use thread pool for response body comression
|
||||
- Export IntoHeaderValue
|
||||
- Render error and return as response body
|
||||
- Use thread pool for response body compression
|
||||
|
||||
### Deleted
|
||||
|
||||
* Removed PayloadBuffer
|
||||
- Removed PayloadBuffer
|
||||
|
||||
|
||||
## [0.1.0-alpha.3] - 2019-04-02
|
||||
|
||||
## 0.1.0-alpha.3 - 2019-04-02
|
||||
### Added
|
||||
|
||||
* Warn when an unsealed private cookie isn't valid UTF-8
|
||||
- Warn when an unsealed private cookie isn't valid UTF-8
|
||||
|
||||
### Fixed
|
||||
|
||||
* Rust 1.31.0 compatibility
|
||||
|
||||
* Preallocate read buffer for h1 codec
|
||||
|
||||
* Detect socket disconnection during protocol selection
|
||||
- Rust 1.31.0 compatibility
|
||||
- Preallocate read buffer for h1 codec
|
||||
- Detect socket disconnection during protocol selection
|
||||
|
||||
|
||||
## [0.1.0-alpha.2] - 2019-03-29
|
||||
|
||||
## 0.1.0-alpha.2 - 2019-03-29
|
||||
### Added
|
||||
|
||||
* Added ws::Message::Nop, no-op websockets message
|
||||
- Added ws::Message::Nop, no-op websockets message
|
||||
|
||||
### Changed
|
||||
|
||||
* Do not use thread pool for decomression if chunk size is smaller than 2048.
|
||||
- Do not use thread pool for decompression if chunk size is smaller than 2048.
|
||||
|
||||
|
||||
## [0.1.0-alpha.1] - 2019-03-28
|
||||
|
||||
* Initial impl
|
||||
## 0.1.0-alpha.1 - 2019-03-28
|
||||
- Initial impl
|
||||
|
||||
@@ -1,46 +0,0 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at fafhrd91@gmail.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
|
||||
|
||||
[homepage]: http://contributor-covenant.org
|
||||
[version]: http://contributor-covenant.org/version/1/4/
|
||||
@@ -1,21 +1,23 @@
|
||||
[package]
|
||||
name = "actix-http"
|
||||
version = "2.0.0-beta.3"
|
||||
version = "3.0.0-beta.17"
|
||||
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
|
||||
description = "Actix HTTP primitives"
|
||||
readme = "README.md"
|
||||
description = "HTTP primitives for the Actix ecosystem"
|
||||
keywords = ["actix", "http", "framework", "async", "futures"]
|
||||
homepage = "https://actix.rs"
|
||||
repository = "https://github.com/actix/actix-web.git"
|
||||
documentation = "https://docs.rs/actix-http/"
|
||||
categories = ["network-programming", "asynchronous",
|
||||
"web-programming::http-server",
|
||||
"web-programming::websocket"]
|
||||
categories = [
|
||||
"network-programming",
|
||||
"asynchronous",
|
||||
"web-programming::http-server",
|
||||
"web-programming::websocket",
|
||||
]
|
||||
license = "MIT OR Apache-2.0"
|
||||
edition = "2018"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
features = ["openssl", "rustls", "compress", "secure-cookies", "actors"]
|
||||
# features that docs.rs will build with
|
||||
features = ["openssl", "rustls", "compress-brotli", "compress-gzip", "compress-zstd"]
|
||||
|
||||
[lib]
|
||||
name = "actix_http"
|
||||
@@ -25,79 +27,83 @@ path = "src/lib.rs"
|
||||
default = []
|
||||
|
||||
# openssl
|
||||
openssl = ["actix-tls/openssl", "actix-connect/openssl"]
|
||||
openssl = ["actix-tls/accept", "actix-tls/openssl"]
|
||||
|
||||
# rustls support
|
||||
rustls = ["actix-tls/rustls", "actix-connect/rustls"]
|
||||
rustls = ["actix-tls/accept", "actix-tls/rustls"]
|
||||
|
||||
# enable compressison support
|
||||
compress = ["flate2", "brotli2"]
|
||||
# enable compression support
|
||||
compress-brotli = ["brotli2", "__compress"]
|
||||
compress-gzip = ["flate2", "__compress"]
|
||||
compress-zstd = ["zstd", "__compress"]
|
||||
|
||||
# support for secure cookies
|
||||
secure-cookies = ["cookie/secure"]
|
||||
|
||||
# support for actix Actor messages
|
||||
actors = ["actix"]
|
||||
# Internal (PRIVATE!) features used to aid testing and cheking feature status.
|
||||
# Don't rely on these whatsoever. They may disappear at anytime.
|
||||
__compress = []
|
||||
|
||||
[dependencies]
|
||||
actix-service = "1.0.5"
|
||||
actix-codec = "0.2.0"
|
||||
actix-connect = "2.0.0-alpha.4"
|
||||
actix-utils = "1.0.6"
|
||||
actix-rt = "1.0.0"
|
||||
actix-threadpool = "0.3.1"
|
||||
actix-tls = { version = "2.0.0-alpha.2", optional = true }
|
||||
actix = { version = "0.10.0-alpha.1", optional = true }
|
||||
actix-service = "2.0.0"
|
||||
actix-codec = "0.4.1"
|
||||
actix-utils = "3.0.0"
|
||||
actix-rt = { version = "2.2", default-features = false }
|
||||
|
||||
base64 = "0.12"
|
||||
ahash = "0.7"
|
||||
base64 = "0.13"
|
||||
bitflags = "1.2"
|
||||
bytes = "0.5.3"
|
||||
cookie = { version = "0.14.1", features = ["percent-encode"] }
|
||||
copyless = "0.1.4"
|
||||
derive_more = "0.99.2"
|
||||
either = "1.5.3"
|
||||
bytes = "1"
|
||||
bytestring = "1"
|
||||
derive_more = "0.99.5"
|
||||
encoding_rs = "0.8"
|
||||
futures-channel = { version = "0.3.5", default-features = false }
|
||||
futures-core = { version = "0.3.5", default-features = false }
|
||||
futures-util = { version = "0.3.5", default-features = false }
|
||||
fxhash = "0.2.1"
|
||||
h2 = "0.2.1"
|
||||
http = "0.2.0"
|
||||
httparse = "1.3"
|
||||
indexmap = "1.3"
|
||||
itoa = "0.4"
|
||||
lazy_static = "1.4"
|
||||
language-tags = "0.2"
|
||||
futures-core = { version = "0.3.7", default-features = false, features = ["alloc"] }
|
||||
h2 = "0.3.9"
|
||||
http = "0.2.5"
|
||||
httparse = "1.5.1"
|
||||
httpdate = "1.0.1"
|
||||
itoa = "1"
|
||||
language-tags = "0.3"
|
||||
local-channel = "0.1"
|
||||
log = "0.4"
|
||||
mime = "0.3"
|
||||
percent-encoding = "2.1"
|
||||
pin-project = "0.4.17"
|
||||
rand = "0.7"
|
||||
regex = "1.3"
|
||||
serde = "1.0"
|
||||
serde_json = "1.0"
|
||||
sha-1 = "0.9"
|
||||
slab = "0.4"
|
||||
serde_urlencoded = "0.6.1"
|
||||
time = { version = "0.2.7", default-features = false, features = ["std"] }
|
||||
pin-project-lite = "0.2"
|
||||
rand = "0.8"
|
||||
sha-1 = "0.10"
|
||||
smallvec = "1.6.1"
|
||||
|
||||
# tls
|
||||
actix-tls = { version = "3.0.0", default-features = false, optional = true }
|
||||
|
||||
# compression
|
||||
brotli2 = { version="0.3.2", optional = true }
|
||||
flate2 = { version = "1.0.13", optional = true }
|
||||
zstd = { version = "0.9", optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
actix-server = "1.0.1"
|
||||
actix-connect = { version = "2.0.0-alpha.4", features = ["openssl"] }
|
||||
actix-http-test = { version = "2.0.0-alpha.1", features = ["openssl"] }
|
||||
actix-tls = { version = "2.0.0-alpha.2", features = ["openssl"] }
|
||||
criterion = "0.3"
|
||||
env_logger = "0.7"
|
||||
serde_derive = "1.0"
|
||||
open-ssl = { version="0.10", package = "openssl" }
|
||||
rust-tls = { version="0.18", package = "rustls" }
|
||||
actix-http-test = { version = "3.0.0-beta.10", features = ["openssl"] }
|
||||
actix-server = "2.0.0-rc.2"
|
||||
actix-tls = { version = "3.0.0", features = ["openssl"] }
|
||||
actix-web = "4.0.0-beta.16"
|
||||
|
||||
async-stream = "0.3"
|
||||
criterion = { version = "0.3", features = ["html_reports"] }
|
||||
env_logger = "0.9"
|
||||
futures-util = { version = "0.3.7", default-features = false, features = ["alloc"] }
|
||||
rcgen = "0.8"
|
||||
regex = "1.3"
|
||||
rustls-pemfile = "0.2"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
static_assertions = "1"
|
||||
tls-openssl = { package = "openssl", version = "0.10.9" }
|
||||
tls-rustls = { package = "rustls", version = "0.20.0" }
|
||||
tokio = { version = "1.8", features = ["net", "rt", "macros"] }
|
||||
|
||||
[[example]]
|
||||
name = "ws"
|
||||
required-features = ["rustls"]
|
||||
|
||||
[[bench]]
|
||||
name = "content-length"
|
||||
name = "write-camel-case"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
@@ -107,3 +113,7 @@ harness = false
|
||||
[[bench]]
|
||||
name = "uninit-headers"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "quality-value"
|
||||
harness = false
|
||||
|
||||
@@ -1,201 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2017-NOW Nikolay Kim
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
1
actix-http/LICENSE-APACHE
Symbolic link
1
actix-http/LICENSE-APACHE
Symbolic link
@@ -0,0 +1 @@
|
||||
../LICENSE-APACHE
|
||||
@@ -1,25 +0,0 @@
|
||||
Copyright (c) 2017 Nikolay Kim
|
||||
|
||||
Permission is hereby granted, free of charge, to any
|
||||
person obtaining a copy of this software and associated
|
||||
documentation files (the "Software"), to deal in the
|
||||
Software without restriction, including without
|
||||
limitation the rights to use, copy, modify, merge,
|
||||
publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software
|
||||
is furnished to do so, subject to the following
|
||||
conditions:
|
||||
|
||||
The above copyright notice and this permission notice
|
||||
shall be included in all copies or substantial portions
|
||||
of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
DEALINGS IN THE SOFTWARE.
|
||||
1
actix-http/LICENSE-MIT
Symbolic link
1
actix-http/LICENSE-MIT
Symbolic link
@@ -0,0 +1 @@
|
||||
../LICENSE-MIT
|
||||
@@ -1,24 +1,29 @@
|
||||
# Actix http [](https://travis-ci.org/actix/actix-web) [](https://codecov.io/gh/actix/actix-web) [](https://crates.io/crates/actix-http) [](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
||||
# actix-http
|
||||
|
||||
Actix http
|
||||
> HTTP primitives for the Actix ecosystem.
|
||||
|
||||
## Documentation & community resources
|
||||
[](https://crates.io/crates/actix-http)
|
||||
[](https://docs.rs/actix-http/3.0.0-beta.17)
|
||||
[](https://blog.rust-lang.org/2021/05/06/Rust-1.52.0.html)
|
||||

|
||||
<br />
|
||||
[](https://deps.rs/crate/actix-http/3.0.0-beta.17)
|
||||
[](https://crates.io/crates/actix-http)
|
||||
[](https://discord.gg/NWpN5mmg3x)
|
||||
|
||||
* [User Guide](https://actix.rs/docs/)
|
||||
* [API Documentation](https://docs.rs/actix-http/)
|
||||
* [Chat on gitter](https://gitter.im/actix/actix)
|
||||
* Cargo package: [actix-http](https://crates.io/crates/actix-http)
|
||||
* Minimum supported Rust version: 1.40 or later
|
||||
## Documentation & Resources
|
||||
|
||||
- [API Documentation](https://docs.rs/actix-http)
|
||||
- Minimum Supported Rust Version (MSRV): 1.52
|
||||
|
||||
## Example
|
||||
|
||||
```rust
|
||||
// see examples/framed_hello.rs for complete list of used crates.
|
||||
use std::{env, io};
|
||||
|
||||
use actix_http::{HttpService, Response};
|
||||
use actix_server::Server;
|
||||
use futures::future;
|
||||
use futures_util::future;
|
||||
use http::header::HeaderValue;
|
||||
use log::info;
|
||||
|
||||
@@ -49,8 +54,8 @@ async fn main() -> io::Result<()> {
|
||||
|
||||
This project is licensed under either of
|
||||
|
||||
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0))
|
||||
* MIT license ([LICENSE-MIT](LICENSE-MIT) or [http://opensource.org/licenses/MIT](http://opensource.org/licenses/MIT))
|
||||
- Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0))
|
||||
- MIT license ([LICENSE-MIT](LICENSE-MIT) or [http://opensource.org/licenses/MIT](http://opensource.org/licenses/MIT))
|
||||
|
||||
at your option.
|
||||
|
||||
|
||||
@@ -1,291 +0,0 @@
|
||||
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
|
||||
|
||||
use bytes::BytesMut;
|
||||
|
||||
// benchmark sending all requests at the same time
|
||||
fn bench_write_content_length(c: &mut Criterion) {
|
||||
let mut group = c.benchmark_group("write_content_length");
|
||||
|
||||
let sizes = [
|
||||
0, 1, 11, 83, 101, 653, 1001, 6323, 10001, 56329, 100001, 123456, 98724245,
|
||||
4294967202,
|
||||
];
|
||||
|
||||
for i in sizes.iter() {
|
||||
group.bench_with_input(BenchmarkId::new("Original (unsafe)", i), i, |b, &i| {
|
||||
b.iter(|| {
|
||||
let mut b = BytesMut::with_capacity(35);
|
||||
_original::write_content_length(i, &mut b)
|
||||
})
|
||||
});
|
||||
|
||||
group.bench_with_input(BenchmarkId::new("New (safe)", i), i, |b, &i| {
|
||||
b.iter(|| {
|
||||
let mut b = BytesMut::with_capacity(35);
|
||||
_new::write_content_length(i, &mut b)
|
||||
})
|
||||
});
|
||||
|
||||
group.bench_with_input(BenchmarkId::new("itoa", i), i, |b, &i| {
|
||||
b.iter(|| {
|
||||
let mut b = BytesMut::with_capacity(35);
|
||||
_itoa::write_content_length(i, &mut b)
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
group.finish();
|
||||
}
|
||||
|
||||
criterion_group!(benches, bench_write_content_length);
|
||||
criterion_main!(benches);
|
||||
|
||||
mod _itoa {
|
||||
use bytes::{BufMut, BytesMut};
|
||||
|
||||
pub fn write_content_length(n: usize, bytes: &mut BytesMut) {
|
||||
if n == 0 {
|
||||
bytes.put_slice(b"\r\ncontent-length: 0\r\n");
|
||||
return;
|
||||
}
|
||||
|
||||
let mut buf = itoa::Buffer::new();
|
||||
|
||||
bytes.put_slice(b"\r\ncontent-length: ");
|
||||
bytes.put_slice(buf.format(n).as_bytes());
|
||||
bytes.put_slice(b"\r\n");
|
||||
}
|
||||
}
|
||||
|
||||
mod _new {
|
||||
use bytes::{BufMut, BytesMut};
|
||||
|
||||
const DIGITS_START: u8 = b'0';
|
||||
|
||||
/// NOTE: bytes object has to contain enough space
|
||||
pub fn write_content_length(n: usize, bytes: &mut BytesMut) {
|
||||
if n == 0 {
|
||||
bytes.put_slice(b"\r\ncontent-length: 0\r\n");
|
||||
return;
|
||||
}
|
||||
|
||||
bytes.put_slice(b"\r\ncontent-length: ");
|
||||
|
||||
if n < 10 {
|
||||
bytes.put_u8(DIGITS_START + (n as u8));
|
||||
} else if n < 100 {
|
||||
let n = n as u8;
|
||||
|
||||
let d10 = n / 10;
|
||||
let d1 = n % 10;
|
||||
|
||||
bytes.put_u8(DIGITS_START + d10);
|
||||
bytes.put_u8(DIGITS_START + d1);
|
||||
} else if n < 1000 {
|
||||
let n = n as u16;
|
||||
|
||||
let d100 = (n / 100) as u8;
|
||||
let d10 = ((n / 10) % 10) as u8;
|
||||
let d1 = (n % 10) as u8;
|
||||
|
||||
bytes.put_u8(DIGITS_START + d100);
|
||||
bytes.put_u8(DIGITS_START + d10);
|
||||
bytes.put_u8(DIGITS_START + d1);
|
||||
} else if n < 10_000 {
|
||||
let n = n as u16;
|
||||
|
||||
let d1000 = (n / 1000) as u8;
|
||||
let d100 = ((n / 100) % 10) as u8;
|
||||
let d10 = ((n / 10) % 10) as u8;
|
||||
let d1 = (n % 10) as u8;
|
||||
|
||||
bytes.put_u8(DIGITS_START + d1000);
|
||||
bytes.put_u8(DIGITS_START + d100);
|
||||
bytes.put_u8(DIGITS_START + d10);
|
||||
bytes.put_u8(DIGITS_START + d1);
|
||||
} else if n < 100_000 {
|
||||
let n = n as u32;
|
||||
|
||||
let d10000 = (n / 10000) as u8;
|
||||
let d1000 = ((n / 1000) % 10) as u8;
|
||||
let d100 = ((n / 100) % 10) as u8;
|
||||
let d10 = ((n / 10) % 10) as u8;
|
||||
let d1 = (n % 10) as u8;
|
||||
|
||||
bytes.put_u8(DIGITS_START + d10000);
|
||||
bytes.put_u8(DIGITS_START + d1000);
|
||||
bytes.put_u8(DIGITS_START + d100);
|
||||
bytes.put_u8(DIGITS_START + d10);
|
||||
bytes.put_u8(DIGITS_START + d1);
|
||||
} else if n < 1_000_000 {
|
||||
let n = n as u32;
|
||||
|
||||
let d100000 = (n / 100000) as u8;
|
||||
let d10000 = ((n / 10000) % 10) as u8;
|
||||
let d1000 = ((n / 1000) % 10) as u8;
|
||||
let d100 = ((n / 100) % 10) as u8;
|
||||
let d10 = ((n / 10) % 10) as u8;
|
||||
let d1 = (n % 10) as u8;
|
||||
|
||||
bytes.put_u8(DIGITS_START + d100000);
|
||||
bytes.put_u8(DIGITS_START + d10000);
|
||||
bytes.put_u8(DIGITS_START + d1000);
|
||||
bytes.put_u8(DIGITS_START + d100);
|
||||
bytes.put_u8(DIGITS_START + d10);
|
||||
bytes.put_u8(DIGITS_START + d1);
|
||||
} else {
|
||||
write_usize(n, bytes);
|
||||
}
|
||||
|
||||
bytes.put_slice(b"\r\n");
|
||||
}
|
||||
|
||||
fn write_usize(n: usize, bytes: &mut BytesMut) {
|
||||
let mut n = n;
|
||||
|
||||
// 20 chars is max length of a usize (2^64)
|
||||
// digits will be added to the buffer from lsd to msd
|
||||
let mut buf = BytesMut::with_capacity(20);
|
||||
|
||||
while n > 9 {
|
||||
// "pop" the least-significant digit
|
||||
let lsd = (n % 10) as u8;
|
||||
|
||||
// remove the lsd from n
|
||||
n = n / 10;
|
||||
|
||||
buf.put_u8(DIGITS_START + lsd);
|
||||
}
|
||||
|
||||
// put msd to result buffer
|
||||
bytes.put_u8(DIGITS_START + (n as u8));
|
||||
|
||||
// put, in reverse (msd to lsd), remaining digits to buffer
|
||||
for i in (0..buf.len()).rev() {
|
||||
bytes.put_u8(buf[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mod _original {
|
||||
use std::{mem, ptr, slice};
|
||||
|
||||
use bytes::{BufMut, BytesMut};
|
||||
|
||||
const DEC_DIGITS_LUT: &[u8] = b"0001020304050607080910111213141516171819\
|
||||
2021222324252627282930313233343536373839\
|
||||
4041424344454647484950515253545556575859\
|
||||
6061626364656667686970717273747576777879\
|
||||
8081828384858687888990919293949596979899";
|
||||
|
||||
/// NOTE: bytes object has to contain enough space
|
||||
pub fn write_content_length(mut n: usize, bytes: &mut BytesMut) {
|
||||
if n < 10 {
|
||||
let mut buf: [u8; 21] = [
|
||||
b'\r', b'\n', b'c', b'o', b'n', b't', b'e', b'n', b't', b'-', b'l',
|
||||
b'e', b'n', b'g', b't', b'h', b':', b' ', b'0', b'\r', b'\n',
|
||||
];
|
||||
buf[18] = (n as u8) + b'0';
|
||||
bytes.put_slice(&buf);
|
||||
} else if n < 100 {
|
||||
let mut buf: [u8; 22] = [
|
||||
b'\r', b'\n', b'c', b'o', b'n', b't', b'e', b'n', b't', b'-', b'l',
|
||||
b'e', b'n', b'g', b't', b'h', b':', b' ', b'0', b'0', b'\r', b'\n',
|
||||
];
|
||||
let d1 = n << 1;
|
||||
unsafe {
|
||||
ptr::copy_nonoverlapping(
|
||||
DEC_DIGITS_LUT.as_ptr().add(d1),
|
||||
buf.as_mut_ptr().offset(18),
|
||||
2,
|
||||
);
|
||||
}
|
||||
bytes.put_slice(&buf);
|
||||
} else if n < 1000 {
|
||||
let mut buf: [u8; 23] = [
|
||||
b'\r', b'\n', b'c', b'o', b'n', b't', b'e', b'n', b't', b'-', b'l',
|
||||
b'e', b'n', b'g', b't', b'h', b':', b' ', b'0', b'0', b'0', b'\r',
|
||||
b'\n',
|
||||
];
|
||||
// decode 2 more chars, if > 2 chars
|
||||
let d1 = (n % 100) << 1;
|
||||
n /= 100;
|
||||
unsafe {
|
||||
ptr::copy_nonoverlapping(
|
||||
DEC_DIGITS_LUT.as_ptr().add(d1),
|
||||
buf.as_mut_ptr().offset(19),
|
||||
2,
|
||||
)
|
||||
};
|
||||
|
||||
// decode last 1
|
||||
buf[18] = (n as u8) + b'0';
|
||||
|
||||
bytes.put_slice(&buf);
|
||||
} else {
|
||||
bytes.put_slice(b"\r\ncontent-length: ");
|
||||
convert_usize(n, bytes);
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn convert_usize(mut n: usize, bytes: &mut BytesMut) {
|
||||
let mut curr: isize = 39;
|
||||
let mut buf: [u8; 41] = unsafe { mem::MaybeUninit::uninit().assume_init() };
|
||||
buf[39] = b'\r';
|
||||
buf[40] = b'\n';
|
||||
let buf_ptr = buf.as_mut_ptr();
|
||||
let lut_ptr = DEC_DIGITS_LUT.as_ptr();
|
||||
|
||||
// eagerly decode 4 characters at a time
|
||||
while n >= 10_000 {
|
||||
let rem = (n % 10_000) as isize;
|
||||
n /= 10_000;
|
||||
|
||||
let d1 = (rem / 100) << 1;
|
||||
let d2 = (rem % 100) << 1;
|
||||
curr -= 4;
|
||||
unsafe {
|
||||
ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
|
||||
ptr::copy_nonoverlapping(
|
||||
lut_ptr.offset(d2),
|
||||
buf_ptr.offset(curr + 2),
|
||||
2,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// if we reach here numbers are <= 9999, so at most 4 chars long
|
||||
let mut n = n as isize; // possibly reduce 64bit math
|
||||
|
||||
// decode 2 more chars, if > 2 chars
|
||||
if n >= 100 {
|
||||
let d1 = (n % 100) << 1;
|
||||
n /= 100;
|
||||
curr -= 2;
|
||||
unsafe {
|
||||
ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
|
||||
}
|
||||
}
|
||||
|
||||
// decode last 1 or 2 chars
|
||||
if n < 10 {
|
||||
curr -= 1;
|
||||
unsafe {
|
||||
*buf_ptr.offset(curr) = (n as u8) + b'0';
|
||||
}
|
||||
} else {
|
||||
let d1 = n << 1;
|
||||
curr -= 2;
|
||||
unsafe {
|
||||
ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
|
||||
}
|
||||
}
|
||||
|
||||
unsafe {
|
||||
bytes.extend_from_slice(slice::from_raw_parts(
|
||||
buf_ptr.offset(curr),
|
||||
41 - curr as usize,
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
90
actix-http/benches/quality-value.rs
Normal file
90
actix-http/benches/quality-value.rs
Normal file
@@ -0,0 +1,90 @@
|
||||
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
|
||||
|
||||
const CODES: &[u16] = &[0, 1000, 201, 800, 550];
|
||||
|
||||
fn bench_quality_display_impls(c: &mut Criterion) {
|
||||
let mut group = c.benchmark_group("quality value display impls");
|
||||
|
||||
for i in CODES.iter() {
|
||||
group.bench_with_input(BenchmarkId::new("New (fast?)", i), i, |b, &i| {
|
||||
b.iter(|| _new::Quality(i).to_string())
|
||||
});
|
||||
|
||||
group.bench_with_input(BenchmarkId::new("Naive", i), i, |b, &i| {
|
||||
b.iter(|| _naive::Quality(i).to_string())
|
||||
});
|
||||
}
|
||||
|
||||
group.finish();
|
||||
}
|
||||
|
||||
criterion_group!(benches, bench_quality_display_impls);
|
||||
criterion_main!(benches);
|
||||
|
||||
mod _new {
|
||||
use std::fmt;
|
||||
|
||||
pub struct Quality(pub(crate) u16);
|
||||
|
||||
impl fmt::Display for Quality {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self.0 {
|
||||
0 => f.write_str("0"),
|
||||
1000 => f.write_str("1"),
|
||||
|
||||
// some number in the range 1–999
|
||||
x => {
|
||||
f.write_str("0.")?;
|
||||
|
||||
// this implementation avoids string allocation otherwise required
|
||||
// for `.trim_end_matches('0')`
|
||||
|
||||
if x < 10 {
|
||||
f.write_str("00")?;
|
||||
// 0 is handled so it's not possible to have a trailing 0, we can just return
|
||||
itoa::fmt(f, x)
|
||||
} else if x < 100 {
|
||||
f.write_str("0")?;
|
||||
if x % 10 == 0 {
|
||||
// trailing 0, divide by 10 and write
|
||||
itoa::fmt(f, x / 10)
|
||||
} else {
|
||||
itoa::fmt(f, x)
|
||||
}
|
||||
} else {
|
||||
// x is in range 101–999
|
||||
|
||||
if x % 100 == 0 {
|
||||
// two trailing 0s, divide by 100 and write
|
||||
itoa::fmt(f, x / 100)
|
||||
} else if x % 10 == 0 {
|
||||
// one trailing 0, divide by 10 and write
|
||||
itoa::fmt(f, x / 10)
|
||||
} else {
|
||||
itoa::fmt(f, x)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mod _naive {
|
||||
use std::fmt;
|
||||
|
||||
pub struct Quality(pub(crate) u16);
|
||||
|
||||
impl fmt::Display for Quality {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self.0 {
|
||||
0 => f.write_str("0"),
|
||||
1000 => f.write_str("1"),
|
||||
|
||||
x => {
|
||||
write!(f, "{}", format!("{:03}", x).trim_end_matches('0'))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -176,7 +176,7 @@ mod _original {
|
||||
buf[5] = b'0';
|
||||
buf[7] = b'9';
|
||||
}
|
||||
_ => (),
|
||||
_ => {}
|
||||
}
|
||||
|
||||
let mut curr: isize = 12;
|
||||
@@ -189,11 +189,7 @@ mod _original {
|
||||
n /= 100;
|
||||
curr -= 2;
|
||||
unsafe {
|
||||
ptr::copy_nonoverlapping(
|
||||
lut_ptr.offset(d1 as isize),
|
||||
buf_ptr.offset(curr),
|
||||
2,
|
||||
);
|
||||
ptr::copy_nonoverlapping(lut_ptr.offset(d1 as isize), buf_ptr.offset(curr), 2);
|
||||
}
|
||||
|
||||
// decode last 1 or 2 chars
|
||||
@@ -206,11 +202,7 @@ mod _original {
|
||||
let d1 = n << 1;
|
||||
curr -= 2;
|
||||
unsafe {
|
||||
ptr::copy_nonoverlapping(
|
||||
lut_ptr.offset(d1 as isize),
|
||||
buf_ptr.offset(curr),
|
||||
2,
|
||||
);
|
||||
ptr::copy_nonoverlapping(lut_ptr.offset(d1 as isize), buf_ptr.offset(curr), 2);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -54,15 +54,10 @@ const EMPTY_HEADER_INDEX: HeaderIndex = HeaderIndex {
|
||||
value: (0, 0),
|
||||
};
|
||||
|
||||
const EMPTY_HEADER_INDEX_ARRAY: [HeaderIndex; MAX_HEADERS] =
|
||||
[EMPTY_HEADER_INDEX; MAX_HEADERS];
|
||||
const EMPTY_HEADER_INDEX_ARRAY: [HeaderIndex; MAX_HEADERS] = [EMPTY_HEADER_INDEX; MAX_HEADERS];
|
||||
|
||||
impl HeaderIndex {
|
||||
fn record(
|
||||
bytes: &[u8],
|
||||
headers: &[httparse::Header<'_>],
|
||||
indices: &mut [HeaderIndex],
|
||||
) {
|
||||
fn record(bytes: &[u8], headers: &[httparse::Header<'_>], indices: &mut [HeaderIndex]) {
|
||||
let bytes_ptr = bytes.as_ptr() as usize;
|
||||
for (header, indices) in headers.iter().zip(indices.iter_mut()) {
|
||||
let name_start = header.name.as_ptr() as usize - bytes_ptr;
|
||||
@@ -78,12 +73,12 @@ impl HeaderIndex {
|
||||
// test cases taken from:
|
||||
// https://github.com/seanmonstar/httparse/blob/master/benches/parse.rs
|
||||
|
||||
const REQ_SHORT: &'static [u8] = b"\
|
||||
const REQ_SHORT: &[u8] = b"\
|
||||
GET / HTTP/1.0\r\n\
|
||||
Host: example.com\r\n\
|
||||
Cookie: session=60; user_id=1\r\n\r\n";
|
||||
|
||||
const REQ: &'static [u8] = b"\
|
||||
const REQ: &[u8] = b"\
|
||||
GET /wp-content/uploads/2010/03/hello-kitty-darth-vader-pink.jpg HTTP/1.1\r\n\
|
||||
Host: www.kittyhell.com\r\n\
|
||||
User-Agent: Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; ja-JP-mac; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 Pathtraq/0.9\r\n\
|
||||
@@ -119,6 +114,8 @@ mod _original {
|
||||
use std::mem::MaybeUninit;
|
||||
|
||||
pub fn parse_headers(src: &mut BytesMut) -> usize {
|
||||
#![allow(clippy::uninit_assumed_init)]
|
||||
|
||||
let mut headers: [HeaderIndex; MAX_HEADERS] =
|
||||
unsafe { MaybeUninit::uninit().assume_init() };
|
||||
|
||||
|
||||
93
actix-http/benches/write-camel-case.rs
Normal file
93
actix-http/benches/write-camel-case.rs
Normal file
@@ -0,0 +1,93 @@
|
||||
use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion};
|
||||
|
||||
fn bench_write_camel_case(c: &mut Criterion) {
|
||||
let mut group = c.benchmark_group("write_camel_case");
|
||||
|
||||
let names = ["connection", "Transfer-Encoding", "transfer-encoding"];
|
||||
|
||||
for &i in &names {
|
||||
let bts = i.as_bytes();
|
||||
|
||||
group.bench_with_input(BenchmarkId::new("Original", i), bts, |b, bts| {
|
||||
b.iter(|| {
|
||||
let mut buf = black_box([0; 24]);
|
||||
_original::write_camel_case(black_box(bts), &mut buf)
|
||||
});
|
||||
});
|
||||
|
||||
group.bench_with_input(BenchmarkId::new("New", i), bts, |b, bts| {
|
||||
b.iter(|| {
|
||||
let mut buf = black_box([0; 24]);
|
||||
let len = black_box(bts.len());
|
||||
_new::write_camel_case(black_box(bts), buf.as_mut_ptr(), len)
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
group.finish();
|
||||
}
|
||||
|
||||
criterion_group!(benches, bench_write_camel_case);
|
||||
criterion_main!(benches);
|
||||
|
||||
mod _new {
|
||||
pub fn write_camel_case(value: &[u8], buf: *mut u8, len: usize) {
|
||||
// first copy entire (potentially wrong) slice to output
|
||||
let buffer = unsafe {
|
||||
std::ptr::copy_nonoverlapping(value.as_ptr(), buf, len);
|
||||
std::slice::from_raw_parts_mut(buf, len)
|
||||
};
|
||||
|
||||
let mut iter = value.iter();
|
||||
|
||||
// first character should be uppercase
|
||||
if let Some(c @ b'a'..=b'z') = iter.next() {
|
||||
buffer[0] = c & 0b1101_1111;
|
||||
}
|
||||
|
||||
// track 1 ahead of the current position since that's the location being assigned to
|
||||
let mut index = 2;
|
||||
|
||||
// remaining characters after hyphens should also be uppercase
|
||||
while let Some(&c) = iter.next() {
|
||||
if c == b'-' {
|
||||
// advance iter by one and uppercase if needed
|
||||
if let Some(c @ b'a'..=b'z') = iter.next() {
|
||||
buffer[index] = c & 0b1101_1111;
|
||||
}
|
||||
}
|
||||
|
||||
index += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mod _original {
|
||||
pub fn write_camel_case(value: &[u8], buffer: &mut [u8]) {
|
||||
let mut index = 0;
|
||||
let key = value;
|
||||
let mut key_iter = key.iter();
|
||||
|
||||
if let Some(c) = key_iter.next() {
|
||||
if *c >= b'a' && *c <= b'z' {
|
||||
buffer[index] = *c ^ b' ';
|
||||
index += 1;
|
||||
}
|
||||
} else {
|
||||
return;
|
||||
}
|
||||
|
||||
while let Some(c) = key_iter.next() {
|
||||
buffer[index] = *c;
|
||||
index += 1;
|
||||
if *c == b'-' {
|
||||
if let Some(c) = key_iter.next() {
|
||||
if *c >= b'a' && *c <= b'z' {
|
||||
buffer[index] = *c ^ b' ';
|
||||
index += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
26
actix-http/examples/actix-web.rs
Normal file
26
actix-http/examples/actix-web.rs
Normal file
@@ -0,0 +1,26 @@
|
||||
use actix_http::HttpService;
|
||||
use actix_server::Server;
|
||||
use actix_service::map_config;
|
||||
use actix_web::{dev::AppConfig, get, App};
|
||||
|
||||
#[get("/")]
|
||||
async fn index() -> &'static str {
|
||||
"Hello, world. From Actix Web!"
|
||||
}
|
||||
|
||||
#[tokio::main(flavor = "current_thread")]
|
||||
async fn main() -> std::io::Result<()> {
|
||||
Server::build()
|
||||
.bind("hello-world", "127.0.0.1:8080", || {
|
||||
// construct actix-web app
|
||||
let app = App::new().service(index);
|
||||
|
||||
HttpService::build()
|
||||
// pass the app to service builder
|
||||
// map_config is used to map App's configuration to ServiceBuilder
|
||||
.finish(map_config(app, |_| AppConfig::default()))
|
||||
.tcp()
|
||||
})?
|
||||
.run()
|
||||
.await
|
||||
}
|
||||
@@ -1,19 +1,17 @@
|
||||
use std::{env, io};
|
||||
use std::io;
|
||||
|
||||
use actix_http::{Error, HttpService, Request, Response};
|
||||
use actix_http::{Error, HttpService, Request, Response, StatusCode};
|
||||
use actix_server::Server;
|
||||
use bytes::BytesMut;
|
||||
use futures_util::StreamExt;
|
||||
use futures_util::StreamExt as _;
|
||||
use http::header::HeaderValue;
|
||||
use log::info;
|
||||
|
||||
#[actix_rt::main]
|
||||
async fn main() -> io::Result<()> {
|
||||
env::set_var("RUST_LOG", "echo=info");
|
||||
env_logger::init();
|
||||
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
|
||||
|
||||
Server::build()
|
||||
.bind("echo", "127.0.0.1:8080", || {
|
||||
.bind("echo", ("127.0.0.1", 8080), || {
|
||||
HttpService::build()
|
||||
.client_timeout(1000)
|
||||
.client_disconnect(1000)
|
||||
@@ -23,10 +21,11 @@ async fn main() -> io::Result<()> {
|
||||
body.extend_from_slice(&item?);
|
||||
}
|
||||
|
||||
info!("request body: {:?}", body);
|
||||
log::info!("request body: {:?}", body);
|
||||
|
||||
Ok::<_, Error>(
|
||||
Response::Ok()
|
||||
.header("x-head", HeaderValue::from_static("dummy value!"))
|
||||
Response::build(StatusCode::OK)
|
||||
.insert_header(("x-head", HeaderValue::from_static("dummy value!")))
|
||||
.body(body),
|
||||
)
|
||||
})
|
||||
|
||||
@@ -1,31 +1,31 @@
|
||||
use std::{env, io};
|
||||
use std::io;
|
||||
|
||||
use actix_http::http::HeaderValue;
|
||||
use actix_http::{Error, HttpService, Request, Response};
|
||||
use actix_http::{
|
||||
body::MessageBody, header::HeaderValue, Error, HttpService, Request, Response, StatusCode,
|
||||
};
|
||||
use actix_server::Server;
|
||||
use bytes::BytesMut;
|
||||
use futures_util::StreamExt;
|
||||
use log::info;
|
||||
use futures_util::StreamExt as _;
|
||||
|
||||
async fn handle_request(mut req: Request) -> Result<Response, Error> {
|
||||
async fn handle_request(mut req: Request) -> Result<Response<impl MessageBody>, Error> {
|
||||
let mut body = BytesMut::new();
|
||||
while let Some(item) = req.payload().next().await {
|
||||
body.extend_from_slice(&item?)
|
||||
}
|
||||
|
||||
info!("request body: {:?}", body);
|
||||
Ok(Response::Ok()
|
||||
.header("x-head", HeaderValue::from_static("dummy value!"))
|
||||
log::info!("request body: {:?}", body);
|
||||
|
||||
Ok(Response::build(StatusCode::OK)
|
||||
.insert_header(("x-head", HeaderValue::from_static("dummy value!")))
|
||||
.body(body))
|
||||
}
|
||||
|
||||
#[actix_rt::main]
|
||||
async fn main() -> io::Result<()> {
|
||||
env::set_var("RUST_LOG", "echo=info");
|
||||
env_logger::init();
|
||||
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
|
||||
|
||||
Server::build()
|
||||
.bind("echo", "127.0.0.1:8080", || {
|
||||
.bind("echo", ("127.0.0.1", 8080), || {
|
||||
HttpService::build().finish(handle_request).tcp()
|
||||
})?
|
||||
.run()
|
||||
|
||||
@@ -1,26 +1,35 @@
|
||||
use std::{env, io};
|
||||
use std::{convert::Infallible, io};
|
||||
|
||||
use actix_http::{HttpService, Response};
|
||||
use actix_http::{
|
||||
header::HeaderValue, HttpMessage, HttpService, Request, Response, StatusCode,
|
||||
};
|
||||
use actix_server::Server;
|
||||
use futures_util::future;
|
||||
use http::header::HeaderValue;
|
||||
use log::info;
|
||||
|
||||
#[actix_rt::main]
|
||||
async fn main() -> io::Result<()> {
|
||||
env::set_var("RUST_LOG", "hello_world=info");
|
||||
env_logger::init();
|
||||
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
|
||||
|
||||
Server::build()
|
||||
.bind("hello-world", "127.0.0.1:8080", || {
|
||||
.bind("hello-world", ("127.0.0.1", 8080), || {
|
||||
HttpService::build()
|
||||
.client_timeout(1000)
|
||||
.client_disconnect(1000)
|
||||
.finish(|_req| {
|
||||
info!("{:?}", _req);
|
||||
let mut res = Response::Ok();
|
||||
res.header("x-head", HeaderValue::from_static("dummy value!"));
|
||||
future::ok::<_, ()>(res.body("Hello world!"))
|
||||
.on_connect_ext(|_, ext| {
|
||||
ext.insert(42u32);
|
||||
})
|
||||
.finish(|req: Request| async move {
|
||||
log::info!("{:?}", req);
|
||||
|
||||
let mut res = Response::build(StatusCode::OK);
|
||||
res.insert_header(("x-head", HeaderValue::from_static("dummy value!")));
|
||||
|
||||
let forty_two = req.extensions().get::<u32>().unwrap().to_string();
|
||||
res.insert_header((
|
||||
"x-forty-two",
|
||||
HeaderValue::from_str(&forty_two).unwrap(),
|
||||
));
|
||||
|
||||
Ok::<_, Infallible>(res.body("Hello world!"))
|
||||
})
|
||||
.tcp()
|
||||
})?
|
||||
|
||||
40
actix-http/examples/streaming-error.rs
Normal file
40
actix-http/examples/streaming-error.rs
Normal file
@@ -0,0 +1,40 @@
|
||||
//! Example showing response body (chunked) stream erroring.
|
||||
//!
|
||||
//! Test using `nc` or `curl`.
|
||||
//! ```sh
|
||||
//! $ curl -vN 127.0.0.1:8080
|
||||
//! $ echo 'GET / HTTP/1.1\n\n' | nc 127.0.0.1 8080
|
||||
//! ```
|
||||
|
||||
use std::{convert::Infallible, io, time::Duration};
|
||||
|
||||
use actix_http::{body::BodyStream, HttpService, Response};
|
||||
use actix_server::Server;
|
||||
use async_stream::stream;
|
||||
use bytes::Bytes;
|
||||
|
||||
#[actix_rt::main]
|
||||
async fn main() -> io::Result<()> {
|
||||
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
|
||||
|
||||
Server::build()
|
||||
.bind("streaming-error", ("127.0.0.1", 8080), || {
|
||||
HttpService::build()
|
||||
.finish(|req| async move {
|
||||
log::info!("{:?}", req);
|
||||
let res = Response::ok();
|
||||
|
||||
Ok::<_, Infallible>(res.set_body(BodyStream::new(stream! {
|
||||
yield Ok(Bytes::from("123"));
|
||||
yield Ok(Bytes::from("456"));
|
||||
|
||||
actix_rt::time::sleep(Duration::from_millis(1000)).await;
|
||||
|
||||
yield Err(io::Error::new(io::ErrorKind::Other, ""));
|
||||
})))
|
||||
})
|
||||
.tcp()
|
||||
})?
|
||||
.run()
|
||||
.await
|
||||
}
|
||||
112
actix-http/examples/ws.rs
Normal file
112
actix-http/examples/ws.rs
Normal file
@@ -0,0 +1,112 @@
|
||||
//! Sets up a WebSocket server over TCP and TLS.
|
||||
//! Sends a heartbeat message every 4 seconds but does not respond to any incoming frames.
|
||||
|
||||
extern crate tls_rustls as rustls;
|
||||
|
||||
use std::{
|
||||
io,
|
||||
pin::Pin,
|
||||
task::{Context, Poll},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use actix_codec::Encoder;
|
||||
use actix_http::{body::BodyStream, error::Error, ws, HttpService, Request, Response};
|
||||
use actix_rt::time::{interval, Interval};
|
||||
use actix_server::Server;
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use bytestring::ByteString;
|
||||
use futures_core::{ready, Stream};
|
||||
|
||||
#[actix_rt::main]
|
||||
async fn main() -> io::Result<()> {
|
||||
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
|
||||
|
||||
Server::build()
|
||||
.bind("tcp", ("127.0.0.1", 8080), || {
|
||||
HttpService::build().h1(handler).tcp()
|
||||
})?
|
||||
.bind("tls", ("127.0.0.1", 8443), || {
|
||||
HttpService::build().finish(handler).rustls(tls_config())
|
||||
})?
|
||||
.run()
|
||||
.await
|
||||
}
|
||||
|
||||
async fn handler(req: Request) -> Result<Response<BodyStream<Heartbeat>>, Error> {
|
||||
log::info!("handshaking");
|
||||
let mut res = ws::handshake(req.head())?;
|
||||
|
||||
// handshake will always fail under HTTP/2
|
||||
|
||||
log::info!("responding");
|
||||
Ok(res.message_body(BodyStream::new(Heartbeat::new(ws::Codec::new())))?)
|
||||
}
|
||||
|
||||
struct Heartbeat {
|
||||
codec: ws::Codec,
|
||||
interval: Interval,
|
||||
}
|
||||
|
||||
impl Heartbeat {
|
||||
fn new(codec: ws::Codec) -> Self {
|
||||
Self {
|
||||
codec,
|
||||
interval: interval(Duration::from_secs(4)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Stream for Heartbeat {
|
||||
type Item = Result<Bytes, Error>;
|
||||
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||
log::trace!("poll");
|
||||
|
||||
ready!(self.as_mut().interval.poll_tick(cx));
|
||||
|
||||
let mut buffer = BytesMut::new();
|
||||
|
||||
self.as_mut()
|
||||
.codec
|
||||
.encode(
|
||||
ws::Message::Text(ByteString::from_static("hello world")),
|
||||
&mut buffer,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
Poll::Ready(Some(Ok(buffer.freeze())))
|
||||
}
|
||||
}
|
||||
|
||||
fn tls_config() -> rustls::ServerConfig {
|
||||
use std::io::BufReader;
|
||||
|
||||
use rustls::{Certificate, PrivateKey};
|
||||
use rustls_pemfile::{certs, pkcs8_private_keys};
|
||||
|
||||
let cert = rcgen::generate_simple_self_signed(vec!["localhost".to_owned()]).unwrap();
|
||||
let cert_file = cert.serialize_pem().unwrap();
|
||||
let key_file = cert.serialize_private_key_pem();
|
||||
|
||||
let cert_file = &mut BufReader::new(cert_file.as_bytes());
|
||||
let key_file = &mut BufReader::new(key_file.as_bytes());
|
||||
|
||||
let cert_chain = certs(cert_file)
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
.map(Certificate)
|
||||
.collect();
|
||||
let mut keys = pkcs8_private_keys(key_file).unwrap();
|
||||
|
||||
let mut config = rustls::ServerConfig::builder()
|
||||
.with_safe_defaults()
|
||||
.with_no_client_auth()
|
||||
.with_single_cert(cert_chain, PrivateKey(keys.remove(0)))
|
||||
.unwrap();
|
||||
|
||||
config.alpn_protocols.push(b"http/1.1".to_vec());
|
||||
config.alpn_protocols.push(b"h2".to_vec());
|
||||
|
||||
config
|
||||
}
|
||||
@@ -1,5 +0,0 @@
|
||||
max_width = 89
|
||||
reorder_imports = true
|
||||
#wrap_comments = true
|
||||
#fn_args_density = "Compressed"
|
||||
#use_small_heuristics = false
|
||||
@@ -1,723 +0,0 @@
|
||||
use std::marker::PhantomData;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
use std::{fmt, mem};
|
||||
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use futures_core::Stream;
|
||||
use futures_util::ready;
|
||||
use pin_project::pin_project;
|
||||
|
||||
use crate::error::Error;
|
||||
|
||||
#[derive(Debug, PartialEq, Copy, Clone)]
|
||||
/// Body size hint
|
||||
pub enum BodySize {
|
||||
None,
|
||||
Empty,
|
||||
Sized(u64),
|
||||
Stream,
|
||||
}
|
||||
|
||||
impl BodySize {
|
||||
pub fn is_eof(&self) -> bool {
|
||||
matches!(self, BodySize::None | BodySize::Empty | BodySize::Sized(0))
|
||||
}
|
||||
}
|
||||
|
||||
/// Type that provides this trait can be streamed to a peer.
|
||||
pub trait MessageBody {
|
||||
fn size(&self) -> BodySize;
|
||||
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, Error>>>;
|
||||
|
||||
downcast_get_type_id!();
|
||||
}
|
||||
|
||||
downcast!(MessageBody);
|
||||
|
||||
impl MessageBody for () {
|
||||
fn size(&self) -> BodySize {
|
||||
BodySize::Empty
|
||||
}
|
||||
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
_: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, Error>>> {
|
||||
Poll::Ready(None)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: MessageBody + Unpin> MessageBody for Box<T> {
|
||||
fn size(&self) -> BodySize {
|
||||
self.as_ref().size()
|
||||
}
|
||||
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, Error>>> {
|
||||
Pin::new(self.get_mut().as_mut()).poll_next(cx)
|
||||
}
|
||||
}
|
||||
|
||||
#[pin_project(project = ResponseBodyProj)]
|
||||
pub enum ResponseBody<B> {
|
||||
Body(#[pin] B),
|
||||
Other(#[pin] Body),
|
||||
}
|
||||
|
||||
impl ResponseBody<Body> {
|
||||
pub fn into_body<B>(self) -> ResponseBody<B> {
|
||||
match self {
|
||||
ResponseBody::Body(b) => ResponseBody::Other(b),
|
||||
ResponseBody::Other(b) => ResponseBody::Other(b),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> ResponseBody<B> {
|
||||
pub fn take_body(&mut self) -> ResponseBody<B> {
|
||||
std::mem::replace(self, ResponseBody::Other(Body::None))
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: MessageBody> ResponseBody<B> {
|
||||
pub fn as_ref(&self) -> Option<&B> {
|
||||
if let ResponseBody::Body(ref b) = self {
|
||||
Some(b)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: MessageBody> MessageBody for ResponseBody<B> {
|
||||
fn size(&self) -> BodySize {
|
||||
match self {
|
||||
ResponseBody::Body(ref body) => body.size(),
|
||||
ResponseBody::Other(ref body) => body.size(),
|
||||
}
|
||||
}
|
||||
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, Error>>> {
|
||||
match self.project() {
|
||||
ResponseBodyProj::Body(body) => body.poll_next(cx),
|
||||
ResponseBodyProj::Other(body) => body.poll_next(cx),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: MessageBody> Stream for ResponseBody<B> {
|
||||
type Item = Result<Bytes, Error>;
|
||||
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Self::Item>> {
|
||||
match self.project() {
|
||||
ResponseBodyProj::Body(body) => body.poll_next(cx),
|
||||
ResponseBodyProj::Other(body) => body.poll_next(cx),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[pin_project(project = BodyProj)]
|
||||
/// Represents various types of http message body.
|
||||
pub enum Body {
|
||||
/// Empty response. `Content-Length` header is not set.
|
||||
None,
|
||||
/// Zero sized response body. `Content-Length` header is set to `0`.
|
||||
Empty,
|
||||
/// Specific response body.
|
||||
Bytes(Bytes),
|
||||
/// Generic message body.
|
||||
Message(Box<dyn MessageBody + Unpin>),
|
||||
}
|
||||
|
||||
impl Body {
|
||||
/// Create body from slice (copy)
|
||||
pub fn from_slice(s: &[u8]) -> Body {
|
||||
Body::Bytes(Bytes::copy_from_slice(s))
|
||||
}
|
||||
|
||||
/// Create body from generic message body.
|
||||
pub fn from_message<B: MessageBody + Unpin + 'static>(body: B) -> Body {
|
||||
Body::Message(Box::new(body))
|
||||
}
|
||||
}
|
||||
|
||||
impl MessageBody for Body {
|
||||
fn size(&self) -> BodySize {
|
||||
match self {
|
||||
Body::None => BodySize::None,
|
||||
Body::Empty => BodySize::Empty,
|
||||
Body::Bytes(ref bin) => BodySize::Sized(bin.len() as u64),
|
||||
Body::Message(ref body) => body.size(),
|
||||
}
|
||||
}
|
||||
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, Error>>> {
|
||||
match self.project() {
|
||||
BodyProj::None => Poll::Ready(None),
|
||||
BodyProj::Empty => Poll::Ready(None),
|
||||
BodyProj::Bytes(ref mut bin) => {
|
||||
let len = bin.len();
|
||||
if len == 0 {
|
||||
Poll::Ready(None)
|
||||
} else {
|
||||
Poll::Ready(Some(Ok(mem::take(bin))))
|
||||
}
|
||||
}
|
||||
BodyProj::Message(ref mut body) => Pin::new(body.as_mut()).poll_next(cx),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for Body {
|
||||
fn eq(&self, other: &Body) -> bool {
|
||||
match *self {
|
||||
Body::None => matches!(*other, Body::None),
|
||||
Body::Empty => matches!(*other, Body::Empty),
|
||||
Body::Bytes(ref b) => match *other {
|
||||
Body::Bytes(ref b2) => b == b2,
|
||||
_ => false,
|
||||
},
|
||||
Body::Message(_) => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Body {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match *self {
|
||||
Body::None => write!(f, "Body::None"),
|
||||
Body::Empty => write!(f, "Body::Empty"),
|
||||
Body::Bytes(ref b) => write!(f, "Body::Bytes({:?})", b),
|
||||
Body::Message(_) => write!(f, "Body::Message(_)"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&'static str> for Body {
|
||||
fn from(s: &'static str) -> Body {
|
||||
Body::Bytes(Bytes::from_static(s.as_ref()))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&'static [u8]> for Body {
|
||||
fn from(s: &'static [u8]) -> Body {
|
||||
Body::Bytes(Bytes::from_static(s))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Vec<u8>> for Body {
|
||||
fn from(vec: Vec<u8>) -> Body {
|
||||
Body::Bytes(Bytes::from(vec))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<String> for Body {
|
||||
fn from(s: String) -> Body {
|
||||
s.into_bytes().into()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> From<&'a String> for Body {
|
||||
fn from(s: &'a String) -> Body {
|
||||
Body::Bytes(Bytes::copy_from_slice(AsRef::<[u8]>::as_ref(&s)))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Bytes> for Body {
|
||||
fn from(s: Bytes) -> Body {
|
||||
Body::Bytes(s)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<BytesMut> for Body {
|
||||
fn from(s: BytesMut) -> Body {
|
||||
Body::Bytes(s.freeze())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<serde_json::Value> for Body {
|
||||
fn from(v: serde_json::Value) -> Body {
|
||||
Body::Bytes(v.to_string().into())
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> From<SizedStream<S>> for Body
|
||||
where
|
||||
S: Stream<Item = Result<Bytes, Error>> + Unpin + 'static,
|
||||
{
|
||||
fn from(s: SizedStream<S>) -> Body {
|
||||
Body::from_message(s)
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, E> From<BodyStream<S, E>> for Body
|
||||
where
|
||||
S: Stream<Item = Result<Bytes, E>> + Unpin + 'static,
|
||||
E: Into<Error> + 'static,
|
||||
{
|
||||
fn from(s: BodyStream<S, E>) -> Body {
|
||||
Body::from_message(s)
|
||||
}
|
||||
}
|
||||
|
||||
impl MessageBody for Bytes {
|
||||
fn size(&self) -> BodySize {
|
||||
BodySize::Sized(self.len() as u64)
|
||||
}
|
||||
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
_: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, Error>>> {
|
||||
if self.is_empty() {
|
||||
Poll::Ready(None)
|
||||
} else {
|
||||
Poll::Ready(Some(Ok(mem::take(self.get_mut()))))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl MessageBody for BytesMut {
|
||||
fn size(&self) -> BodySize {
|
||||
BodySize::Sized(self.len() as u64)
|
||||
}
|
||||
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
_: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, Error>>> {
|
||||
if self.is_empty() {
|
||||
Poll::Ready(None)
|
||||
} else {
|
||||
Poll::Ready(Some(Ok(mem::take(self.get_mut()).freeze())))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl MessageBody for &'static str {
|
||||
fn size(&self) -> BodySize {
|
||||
BodySize::Sized(self.len() as u64)
|
||||
}
|
||||
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
_: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, Error>>> {
|
||||
if self.is_empty() {
|
||||
Poll::Ready(None)
|
||||
} else {
|
||||
Poll::Ready(Some(Ok(Bytes::from_static(
|
||||
mem::take(self.get_mut()).as_ref(),
|
||||
))))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl MessageBody for Vec<u8> {
|
||||
fn size(&self) -> BodySize {
|
||||
BodySize::Sized(self.len() as u64)
|
||||
}
|
||||
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
_: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, Error>>> {
|
||||
if self.is_empty() {
|
||||
Poll::Ready(None)
|
||||
} else {
|
||||
Poll::Ready(Some(Ok(Bytes::from(mem::take(self.get_mut())))))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl MessageBody for String {
|
||||
fn size(&self) -> BodySize {
|
||||
BodySize::Sized(self.len() as u64)
|
||||
}
|
||||
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
_: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, Error>>> {
|
||||
if self.is_empty() {
|
||||
Poll::Ready(None)
|
||||
} else {
|
||||
Poll::Ready(Some(Ok(Bytes::from(
|
||||
mem::take(self.get_mut()).into_bytes(),
|
||||
))))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Type represent streaming body.
|
||||
/// Response does not contain `content-length` header and appropriate transfer encoding is used.
|
||||
#[pin_project]
|
||||
pub struct BodyStream<S: Unpin, E> {
|
||||
#[pin]
|
||||
stream: S,
|
||||
_t: PhantomData<E>,
|
||||
}
|
||||
|
||||
impl<S, E> BodyStream<S, E>
|
||||
where
|
||||
S: Stream<Item = Result<Bytes, E>> + Unpin,
|
||||
E: Into<Error>,
|
||||
{
|
||||
pub fn new(stream: S) -> Self {
|
||||
BodyStream {
|
||||
stream,
|
||||
_t: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, E> MessageBody for BodyStream<S, E>
|
||||
where
|
||||
S: Stream<Item = Result<Bytes, E>> + Unpin,
|
||||
E: Into<Error>,
|
||||
{
|
||||
fn size(&self) -> BodySize {
|
||||
BodySize::Stream
|
||||
}
|
||||
|
||||
/// Attempts to pull out the next value of the underlying [`Stream`].
|
||||
///
|
||||
/// Empty values are skipped to prevent [`BodyStream`]'s transmission being
|
||||
/// ended on a zero-length chunk, but rather proceed until the underlying
|
||||
/// [`Stream`] ends.
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, Error>>> {
|
||||
let mut stream = self.project().stream;
|
||||
loop {
|
||||
let stream = stream.as_mut();
|
||||
return Poll::Ready(match ready!(stream.poll_next(cx)) {
|
||||
Some(Ok(ref bytes)) if bytes.is_empty() => continue,
|
||||
opt => opt.map(|res| res.map_err(Into::into)),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Type represent streaming body. This body implementation should be used
|
||||
/// if total size of stream is known. Data get sent as is without using transfer encoding.
|
||||
#[pin_project]
|
||||
pub struct SizedStream<S: Unpin> {
|
||||
size: u64,
|
||||
#[pin]
|
||||
stream: S,
|
||||
}
|
||||
|
||||
impl<S> SizedStream<S>
|
||||
where
|
||||
S: Stream<Item = Result<Bytes, Error>> + Unpin,
|
||||
{
|
||||
pub fn new(size: u64, stream: S) -> Self {
|
||||
SizedStream { size, stream }
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> MessageBody for SizedStream<S>
|
||||
where
|
||||
S: Stream<Item = Result<Bytes, Error>> + Unpin,
|
||||
{
|
||||
fn size(&self) -> BodySize {
|
||||
BodySize::Sized(self.size as u64)
|
||||
}
|
||||
|
||||
/// Attempts to pull out the next value of the underlying [`Stream`].
|
||||
///
|
||||
/// Empty values are skipped to prevent [`SizedStream`]'s transmission being
|
||||
/// ended on a zero-length chunk, but rather proceed until the underlying
|
||||
/// [`Stream`] ends.
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, Error>>> {
|
||||
let mut stream: Pin<&mut S> = self.project().stream;
|
||||
loop {
|
||||
let stream = stream.as_mut();
|
||||
return Poll::Ready(match ready!(stream.poll_next(cx)) {
|
||||
Some(Ok(ref bytes)) if bytes.is_empty() => continue,
|
||||
val => val,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use futures_util::future::poll_fn;
|
||||
use futures_util::pin_mut;
|
||||
use futures_util::stream;
|
||||
|
||||
impl Body {
|
||||
pub(crate) fn get_ref(&self) -> &[u8] {
|
||||
match *self {
|
||||
Body::Bytes(ref bin) => &bin,
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ResponseBody<Body> {
|
||||
pub(crate) fn get_ref(&self) -> &[u8] {
|
||||
match *self {
|
||||
ResponseBody::Body(ref b) => b.get_ref(),
|
||||
ResponseBody::Other(ref b) => b.get_ref(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_static_str() {
|
||||
assert_eq!(Body::from("").size(), BodySize::Sized(0));
|
||||
assert_eq!(Body::from("test").size(), BodySize::Sized(4));
|
||||
assert_eq!(Body::from("test").get_ref(), b"test");
|
||||
|
||||
assert_eq!("test".size(), BodySize::Sized(4));
|
||||
assert_eq!(
|
||||
poll_fn(|cx| Pin::new(&mut "test").poll_next(cx))
|
||||
.await
|
||||
.unwrap()
|
||||
.ok(),
|
||||
Some(Bytes::from("test"))
|
||||
);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_static_bytes() {
|
||||
assert_eq!(Body::from(b"test".as_ref()).size(), BodySize::Sized(4));
|
||||
assert_eq!(Body::from(b"test".as_ref()).get_ref(), b"test");
|
||||
assert_eq!(
|
||||
Body::from_slice(b"test".as_ref()).size(),
|
||||
BodySize::Sized(4)
|
||||
);
|
||||
assert_eq!(Body::from_slice(b"test".as_ref()).get_ref(), b"test");
|
||||
let sb = Bytes::from(&b"test"[..]);
|
||||
pin_mut!(sb);
|
||||
|
||||
assert_eq!(sb.size(), BodySize::Sized(4));
|
||||
assert_eq!(
|
||||
poll_fn(|cx| sb.as_mut().poll_next(cx)).await.unwrap().ok(),
|
||||
Some(Bytes::from("test"))
|
||||
);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_vec() {
|
||||
assert_eq!(Body::from(Vec::from("test")).size(), BodySize::Sized(4));
|
||||
assert_eq!(Body::from(Vec::from("test")).get_ref(), b"test");
|
||||
let test_vec = Vec::from("test");
|
||||
pin_mut!(test_vec);
|
||||
|
||||
assert_eq!(test_vec.size(), BodySize::Sized(4));
|
||||
assert_eq!(
|
||||
poll_fn(|cx| test_vec.as_mut().poll_next(cx))
|
||||
.await
|
||||
.unwrap()
|
||||
.ok(),
|
||||
Some(Bytes::from("test"))
|
||||
);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_bytes() {
|
||||
let b = Bytes::from("test");
|
||||
assert_eq!(Body::from(b.clone()).size(), BodySize::Sized(4));
|
||||
assert_eq!(Body::from(b.clone()).get_ref(), b"test");
|
||||
pin_mut!(b);
|
||||
|
||||
assert_eq!(b.size(), BodySize::Sized(4));
|
||||
assert_eq!(
|
||||
poll_fn(|cx| b.as_mut().poll_next(cx)).await.unwrap().ok(),
|
||||
Some(Bytes::from("test"))
|
||||
);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_bytes_mut() {
|
||||
let b = BytesMut::from("test");
|
||||
assert_eq!(Body::from(b.clone()).size(), BodySize::Sized(4));
|
||||
assert_eq!(Body::from(b.clone()).get_ref(), b"test");
|
||||
pin_mut!(b);
|
||||
|
||||
assert_eq!(b.size(), BodySize::Sized(4));
|
||||
assert_eq!(
|
||||
poll_fn(|cx| b.as_mut().poll_next(cx)).await.unwrap().ok(),
|
||||
Some(Bytes::from("test"))
|
||||
);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_string() {
|
||||
let b = "test".to_owned();
|
||||
assert_eq!(Body::from(b.clone()).size(), BodySize::Sized(4));
|
||||
assert_eq!(Body::from(b.clone()).get_ref(), b"test");
|
||||
assert_eq!(Body::from(&b).size(), BodySize::Sized(4));
|
||||
assert_eq!(Body::from(&b).get_ref(), b"test");
|
||||
pin_mut!(b);
|
||||
|
||||
assert_eq!(b.size(), BodySize::Sized(4));
|
||||
assert_eq!(
|
||||
poll_fn(|cx| b.as_mut().poll_next(cx)).await.unwrap().ok(),
|
||||
Some(Bytes::from("test"))
|
||||
);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_unit() {
|
||||
assert_eq!(().size(), BodySize::Empty);
|
||||
assert!(poll_fn(|cx| Pin::new(&mut ()).poll_next(cx))
|
||||
.await
|
||||
.is_none());
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_box() {
|
||||
let val = Box::new(());
|
||||
pin_mut!(val);
|
||||
assert_eq!(val.size(), BodySize::Empty);
|
||||
assert!(poll_fn(|cx| val.as_mut().poll_next(cx)).await.is_none());
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_body_eq() {
|
||||
assert!(
|
||||
Body::Bytes(Bytes::from_static(b"1"))
|
||||
== Body::Bytes(Bytes::from_static(b"1"))
|
||||
);
|
||||
assert!(Body::Bytes(Bytes::from_static(b"1")) != Body::None);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_body_debug() {
|
||||
assert!(format!("{:?}", Body::None).contains("Body::None"));
|
||||
assert!(format!("{:?}", Body::Empty).contains("Body::Empty"));
|
||||
assert!(format!("{:?}", Body::Bytes(Bytes::from_static(b"1"))).contains('1'));
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_serde_json() {
|
||||
use serde_json::json;
|
||||
assert_eq!(
|
||||
Body::from(serde_json::Value::String("test".into())).size(),
|
||||
BodySize::Sized(6)
|
||||
);
|
||||
assert_eq!(
|
||||
Body::from(json!({"test-key":"test-value"})).size(),
|
||||
BodySize::Sized(25)
|
||||
);
|
||||
}
|
||||
|
||||
mod body_stream {
|
||||
use super::*;
|
||||
//use futures::task::noop_waker;
|
||||
//use futures::stream::once;
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn skips_empty_chunks() {
|
||||
let body = BodyStream::new(stream::iter(
|
||||
["1", "", "2"]
|
||||
.iter()
|
||||
.map(|&v| Ok(Bytes::from(v)) as Result<Bytes, ()>),
|
||||
));
|
||||
pin_mut!(body);
|
||||
|
||||
assert_eq!(
|
||||
poll_fn(|cx| body.as_mut().poll_next(cx))
|
||||
.await
|
||||
.unwrap()
|
||||
.ok(),
|
||||
Some(Bytes::from("1")),
|
||||
);
|
||||
assert_eq!(
|
||||
poll_fn(|cx| body.as_mut().poll_next(cx))
|
||||
.await
|
||||
.unwrap()
|
||||
.ok(),
|
||||
Some(Bytes::from("2")),
|
||||
);
|
||||
}
|
||||
|
||||
/* Now it does not compile as it should
|
||||
#[actix_rt::test]
|
||||
async fn move_pinned_pointer() {
|
||||
let (sender, receiver) = futures::channel::oneshot::channel();
|
||||
let mut body_stream = Ok(BodyStream::new(once(async {
|
||||
let x = Box::new(0i32);
|
||||
let y = &x;
|
||||
receiver.await.unwrap();
|
||||
let _z = **y;
|
||||
Ok::<_, ()>(Bytes::new())
|
||||
})));
|
||||
|
||||
let waker = noop_waker();
|
||||
let mut context = Context::from_waker(&waker);
|
||||
pin_mut!(body_stream);
|
||||
|
||||
let _ = body_stream.as_mut().unwrap().poll_next(&mut context);
|
||||
sender.send(()).unwrap();
|
||||
let _ = std::mem::replace(&mut body_stream, Err([0; 32])).unwrap().poll_next(&mut context);
|
||||
}*/
|
||||
}
|
||||
|
||||
mod sized_stream {
|
||||
use super::*;
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn skips_empty_chunks() {
|
||||
let body = SizedStream::new(
|
||||
2,
|
||||
stream::iter(["1", "", "2"].iter().map(|&v| Ok(Bytes::from(v)))),
|
||||
);
|
||||
pin_mut!(body);
|
||||
assert_eq!(
|
||||
poll_fn(|cx| body.as_mut().poll_next(cx))
|
||||
.await
|
||||
.unwrap()
|
||||
.ok(),
|
||||
Some(Bytes::from("1")),
|
||||
);
|
||||
assert_eq!(
|
||||
poll_fn(|cx| body.as_mut().poll_next(cx))
|
||||
.await
|
||||
.unwrap()
|
||||
.ok(),
|
||||
Some(Bytes::from("2")),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_body_casting() {
|
||||
let mut body = String::from("hello cast");
|
||||
let resp_body: &mut dyn MessageBody = &mut body;
|
||||
let body = resp_body.downcast_ref::<String>().unwrap();
|
||||
assert_eq!(body, "hello cast");
|
||||
let body = &mut resp_body.downcast_mut::<String>().unwrap();
|
||||
body.push_str("!");
|
||||
let body = resp_body.downcast_ref::<String>().unwrap();
|
||||
assert_eq!(body, "hello cast!");
|
||||
let not_body = resp_body.downcast_ref::<()>();
|
||||
assert!(not_body.is_none());
|
||||
}
|
||||
}
|
||||
215
actix-http/src/body/body_stream.rs
Normal file
215
actix-http/src/body/body_stream.rs
Normal file
@@ -0,0 +1,215 @@
|
||||
use std::{
|
||||
error::Error as StdError,
|
||||
pin::Pin,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use bytes::Bytes;
|
||||
use futures_core::{ready, Stream};
|
||||
use pin_project_lite::pin_project;
|
||||
|
||||
use super::{BodySize, MessageBody};
|
||||
|
||||
pin_project! {
|
||||
/// Streaming response wrapper.
|
||||
///
|
||||
/// Response does not contain `Content-Length` header and appropriate transfer encoding is used.
|
||||
pub struct BodyStream<S> {
|
||||
#[pin]
|
||||
stream: S,
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: from_infallible method
|
||||
|
||||
impl<S, E> BodyStream<S>
|
||||
where
|
||||
S: Stream<Item = Result<Bytes, E>>,
|
||||
E: Into<Box<dyn StdError>> + 'static,
|
||||
{
|
||||
#[inline]
|
||||
pub fn new(stream: S) -> Self {
|
||||
BodyStream { stream }
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, E> MessageBody for BodyStream<S>
|
||||
where
|
||||
S: Stream<Item = Result<Bytes, E>>,
|
||||
E: Into<Box<dyn StdError>> + 'static,
|
||||
{
|
||||
type Error = E;
|
||||
|
||||
#[inline]
|
||||
fn size(&self) -> BodySize {
|
||||
BodySize::Stream
|
||||
}
|
||||
|
||||
/// Attempts to pull out the next value of the underlying [`Stream`].
|
||||
///
|
||||
/// Empty values are skipped to prevent [`BodyStream`]'s transmission being
|
||||
/// ended on a zero-length chunk, but rather proceed until the underlying
|
||||
/// [`Stream`] ends.
|
||||
fn poll_next(
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, Self::Error>>> {
|
||||
loop {
|
||||
let stream = self.as_mut().project().stream;
|
||||
|
||||
let chunk = match ready!(stream.poll_next(cx)) {
|
||||
Some(Ok(ref bytes)) if bytes.is_empty() => continue,
|
||||
opt => opt,
|
||||
};
|
||||
|
||||
return Poll::Ready(chunk);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::{convert::Infallible, time::Duration};
|
||||
|
||||
use actix_rt::{
|
||||
pin,
|
||||
time::{sleep, Sleep},
|
||||
};
|
||||
use actix_utils::future::poll_fn;
|
||||
use derive_more::{Display, Error};
|
||||
use futures_core::ready;
|
||||
use futures_util::{stream, FutureExt as _};
|
||||
use pin_project_lite::pin_project;
|
||||
use static_assertions::{assert_impl_all, assert_not_impl_all};
|
||||
|
||||
use super::*;
|
||||
use crate::body::to_bytes;
|
||||
|
||||
assert_impl_all!(BodyStream<stream::Empty<Result<Bytes, crate::Error>>>: MessageBody);
|
||||
assert_impl_all!(BodyStream<stream::Empty<Result<Bytes, &'static str>>>: MessageBody);
|
||||
assert_impl_all!(BodyStream<stream::Repeat<Result<Bytes, &'static str>>>: MessageBody);
|
||||
assert_impl_all!(BodyStream<stream::Empty<Result<Bytes, Infallible>>>: MessageBody);
|
||||
assert_impl_all!(BodyStream<stream::Repeat<Result<Bytes, Infallible>>>: MessageBody);
|
||||
|
||||
assert_not_impl_all!(BodyStream<stream::Empty<Bytes>>: MessageBody);
|
||||
assert_not_impl_all!(BodyStream<stream::Repeat<Bytes>>: MessageBody);
|
||||
// crate::Error is not Clone
|
||||
assert_not_impl_all!(BodyStream<stream::Repeat<Result<Bytes, crate::Error>>>: MessageBody);
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn skips_empty_chunks() {
|
||||
let body = BodyStream::new(stream::iter(
|
||||
["1", "", "2"]
|
||||
.iter()
|
||||
.map(|&v| Ok::<_, Infallible>(Bytes::from(v))),
|
||||
));
|
||||
pin!(body);
|
||||
|
||||
assert_eq!(
|
||||
poll_fn(|cx| body.as_mut().poll_next(cx))
|
||||
.await
|
||||
.unwrap()
|
||||
.ok(),
|
||||
Some(Bytes::from("1")),
|
||||
);
|
||||
assert_eq!(
|
||||
poll_fn(|cx| body.as_mut().poll_next(cx))
|
||||
.await
|
||||
.unwrap()
|
||||
.ok(),
|
||||
Some(Bytes::from("2")),
|
||||
);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn read_to_bytes() {
|
||||
let body = BodyStream::new(stream::iter(
|
||||
["1", "", "2"]
|
||||
.iter()
|
||||
.map(|&v| Ok::<_, Infallible>(Bytes::from(v))),
|
||||
));
|
||||
|
||||
assert_eq!(to_bytes(body).await.ok(), Some(Bytes::from("12")));
|
||||
}
|
||||
#[derive(Debug, Display, Error)]
|
||||
#[display(fmt = "stream error")]
|
||||
struct StreamErr;
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn stream_immediate_error() {
|
||||
let body = BodyStream::new(stream::once(async { Err(StreamErr) }));
|
||||
assert!(matches!(to_bytes(body).await, Err(StreamErr)));
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn stream_string_error() {
|
||||
// `&'static str` does not impl `Error`
|
||||
// but it does impl `Into<Box<dyn Error>>`
|
||||
|
||||
let body = BodyStream::new(stream::once(async { Err("stringy error") }));
|
||||
assert!(matches!(to_bytes(body).await, Err("stringy error")));
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn stream_boxed_error() {
|
||||
// `Box<dyn Error>` does not impl `Error`
|
||||
// but it does impl `Into<Box<dyn Error>>`
|
||||
|
||||
let body = BodyStream::new(stream::once(async {
|
||||
Err(Box::<dyn StdError>::from("stringy error"))
|
||||
}));
|
||||
|
||||
assert_eq!(
|
||||
to_bytes(body).await.unwrap_err().to_string(),
|
||||
"stringy error"
|
||||
);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn stream_delayed_error() {
|
||||
let body = BodyStream::new(stream::iter(vec![Ok(Bytes::from("1")), Err(StreamErr)]));
|
||||
assert!(matches!(to_bytes(body).await, Err(StreamErr)));
|
||||
|
||||
pin_project! {
|
||||
#[derive(Debug)]
|
||||
#[project = TimeDelayStreamProj]
|
||||
enum TimeDelayStream {
|
||||
Start,
|
||||
Sleep { delay: Pin<Box<Sleep>> },
|
||||
Done,
|
||||
}
|
||||
}
|
||||
|
||||
impl Stream for TimeDelayStream {
|
||||
type Item = Result<Bytes, StreamErr>;
|
||||
|
||||
fn poll_next(
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Self::Item>> {
|
||||
match self.as_mut().get_mut() {
|
||||
TimeDelayStream::Start => {
|
||||
let sleep = sleep(Duration::from_millis(1));
|
||||
self.as_mut().set(TimeDelayStream::Sleep {
|
||||
delay: Box::pin(sleep),
|
||||
});
|
||||
cx.waker().wake_by_ref();
|
||||
Poll::Pending
|
||||
}
|
||||
|
||||
TimeDelayStream::Sleep { ref mut delay } => {
|
||||
ready!(delay.poll_unpin(cx));
|
||||
self.set(TimeDelayStream::Done);
|
||||
cx.waker().wake_by_ref();
|
||||
Poll::Pending
|
||||
}
|
||||
|
||||
TimeDelayStream::Done => Poll::Ready(Some(Err(StreamErr))),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let body = BodyStream::new(TimeDelayStream::Start);
|
||||
assert!(matches!(to_bytes(body).await, Err(StreamErr)));
|
||||
}
|
||||
}
|
||||
127
actix-http/src/body/boxed.rs
Normal file
127
actix-http/src/body/boxed.rs
Normal file
@@ -0,0 +1,127 @@
|
||||
use std::{
|
||||
error::Error as StdError,
|
||||
fmt,
|
||||
pin::Pin,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use bytes::Bytes;
|
||||
|
||||
use super::{BodySize, MessageBody, MessageBodyMapErr};
|
||||
use crate::body;
|
||||
|
||||
/// A boxed message body with boxed errors.
|
||||
#[derive(Debug)]
|
||||
pub struct BoxBody(BoxBodyInner);
|
||||
|
||||
enum BoxBodyInner {
|
||||
None(body::None),
|
||||
Bytes(Bytes),
|
||||
Stream(Pin<Box<dyn MessageBody<Error = Box<dyn StdError>>>>),
|
||||
}
|
||||
|
||||
impl fmt::Debug for BoxBodyInner {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
Self::None(arg0) => f.debug_tuple("None").field(arg0).finish(),
|
||||
Self::Bytes(arg0) => f.debug_tuple("Bytes").field(arg0).finish(),
|
||||
Self::Stream(_) => f.debug_tuple("Stream").field(&"dyn MessageBody").finish(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl BoxBody {
|
||||
/// Same as `MessageBody::boxed`.
|
||||
///
|
||||
/// If the body type to wrap is unknown or generic it is better to use [`MessageBody::boxed`] to
|
||||
/// avoid double boxing.
|
||||
#[inline]
|
||||
pub fn new<B>(body: B) -> Self
|
||||
where
|
||||
B: MessageBody + 'static,
|
||||
{
|
||||
match body.size() {
|
||||
BodySize::None => Self(BoxBodyInner::None(body::None)),
|
||||
_ => match body.try_into_bytes() {
|
||||
Ok(bytes) => Self(BoxBodyInner::Bytes(bytes)),
|
||||
Err(body) => {
|
||||
let body = MessageBodyMapErr::new(body, Into::into);
|
||||
Self(BoxBodyInner::Stream(Box::pin(body)))
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a mutable pinned reference to the inner message body type.
|
||||
#[inline]
|
||||
pub fn as_pin_mut(&mut self) -> Pin<&mut Self> {
|
||||
Pin::new(self)
|
||||
}
|
||||
}
|
||||
|
||||
impl MessageBody for BoxBody {
|
||||
type Error = Box<dyn StdError>;
|
||||
|
||||
#[inline]
|
||||
fn size(&self) -> BodySize {
|
||||
match &self.0 {
|
||||
BoxBodyInner::None(none) => none.size(),
|
||||
BoxBodyInner::Bytes(bytes) => bytes.size(),
|
||||
BoxBodyInner::Stream(stream) => stream.size(),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn poll_next(
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, Self::Error>>> {
|
||||
match &mut self.0 {
|
||||
BoxBodyInner::None(body) => {
|
||||
Pin::new(body).poll_next(cx).map_err(|err| match err {})
|
||||
}
|
||||
BoxBodyInner::Bytes(body) => {
|
||||
Pin::new(body).poll_next(cx).map_err(|err| match err {})
|
||||
}
|
||||
BoxBodyInner::Stream(body) => Pin::new(body).poll_next(cx),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn try_into_bytes(self) -> Result<Bytes, Self> {
|
||||
match self.0 {
|
||||
BoxBodyInner::None(body) => Ok(body.try_into_bytes().unwrap()),
|
||||
BoxBodyInner::Bytes(body) => Ok(body.try_into_bytes().unwrap()),
|
||||
_ => Err(self),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn boxed(self) -> BoxBody {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use static_assertions::{assert_impl_all, assert_not_impl_all};
|
||||
|
||||
use super::*;
|
||||
use crate::body::to_bytes;
|
||||
|
||||
assert_impl_all!(BoxBody: MessageBody, fmt::Debug, Unpin);
|
||||
|
||||
assert_not_impl_all!(BoxBody: Send, Sync, Unpin);
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn nested_boxed_body() {
|
||||
let body = Bytes::from_static(&[1, 2, 3]);
|
||||
let boxed_body = BoxBody::new(BoxBody::new(body));
|
||||
|
||||
assert_eq!(
|
||||
to_bytes(boxed_body).await.unwrap(),
|
||||
Bytes::from(vec![1, 2, 3]),
|
||||
);
|
||||
}
|
||||
}
|
||||
108
actix-http/src/body/either.rs
Normal file
108
actix-http/src/body/either.rs
Normal file
@@ -0,0 +1,108 @@
|
||||
use std::{
|
||||
pin::Pin,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use bytes::Bytes;
|
||||
use pin_project_lite::pin_project;
|
||||
|
||||
use super::{BodySize, BoxBody, MessageBody};
|
||||
use crate::Error;
|
||||
|
||||
pin_project! {
|
||||
#[project = EitherBodyProj]
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum EitherBody<L, R = BoxBody> {
|
||||
/// A body of type `L`.
|
||||
Left { #[pin] body: L },
|
||||
|
||||
/// A body of type `R`.
|
||||
Right { #[pin] body: R },
|
||||
}
|
||||
}
|
||||
|
||||
impl<L> EitherBody<L, BoxBody> {
|
||||
/// Creates new `EitherBody` using left variant and boxed right variant.
|
||||
#[inline]
|
||||
pub fn new(body: L) -> Self {
|
||||
Self::Left { body }
|
||||
}
|
||||
}
|
||||
|
||||
impl<L, R> EitherBody<L, R> {
|
||||
/// Creates new `EitherBody` using left variant.
|
||||
#[inline]
|
||||
pub fn left(body: L) -> Self {
|
||||
Self::Left { body }
|
||||
}
|
||||
|
||||
/// Creates new `EitherBody` using right variant.
|
||||
#[inline]
|
||||
pub fn right(body: R) -> Self {
|
||||
Self::Right { body }
|
||||
}
|
||||
}
|
||||
|
||||
impl<L, R> MessageBody for EitherBody<L, R>
|
||||
where
|
||||
L: MessageBody + 'static,
|
||||
R: MessageBody + 'static,
|
||||
{
|
||||
type Error = Error;
|
||||
|
||||
#[inline]
|
||||
fn size(&self) -> BodySize {
|
||||
match self {
|
||||
EitherBody::Left { body } => body.size(),
|
||||
EitherBody::Right { body } => body.size(),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, Self::Error>>> {
|
||||
match self.project() {
|
||||
EitherBodyProj::Left { body } => body
|
||||
.poll_next(cx)
|
||||
.map_err(|err| Error::new_body().with_cause(err)),
|
||||
EitherBodyProj::Right { body } => body
|
||||
.poll_next(cx)
|
||||
.map_err(|err| Error::new_body().with_cause(err)),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn try_into_bytes(self) -> Result<Bytes, Self> {
|
||||
match self {
|
||||
EitherBody::Left { body } => body
|
||||
.try_into_bytes()
|
||||
.map_err(|body| EitherBody::Left { body }),
|
||||
EitherBody::Right { body } => body
|
||||
.try_into_bytes()
|
||||
.map_err(|body| EitherBody::Right { body }),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn boxed(self) -> BoxBody {
|
||||
match self {
|
||||
EitherBody::Left { body } => body.boxed(),
|
||||
EitherBody::Right { body } => body.boxed(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn type_parameter_inference() {
|
||||
let _body: EitherBody<(), _> = EitherBody::new(());
|
||||
|
||||
let _body: EitherBody<_, ()> = EitherBody::left(());
|
||||
let _body: EitherBody<(), _> = EitherBody::right(());
|
||||
}
|
||||
}
|
||||
554
actix-http/src/body/message_body.rs
Normal file
554
actix-http/src/body/message_body.rs
Normal file
@@ -0,0 +1,554 @@
|
||||
//! [`MessageBody`] trait and foreign implementations.
|
||||
|
||||
use std::{
|
||||
convert::Infallible,
|
||||
error::Error as StdError,
|
||||
mem,
|
||||
pin::Pin,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use futures_core::ready;
|
||||
use pin_project_lite::pin_project;
|
||||
|
||||
use super::{BodySize, BoxBody};
|
||||
|
||||
/// An interface types that can converted to bytes and used as response bodies.
|
||||
// TODO: examples
|
||||
pub trait MessageBody {
|
||||
/// The type of error that will be returned if streaming body fails.
|
||||
///
|
||||
/// Since it is not appropriate to generate a response mid-stream, it only requires `Error` for
|
||||
/// internal use and logging.
|
||||
type Error: Into<Box<dyn StdError>>;
|
||||
|
||||
/// Body size hint.
|
||||
///
|
||||
/// If [`BodySize::None`] is returned, optimizations that skip reading the body are allowed.
|
||||
fn size(&self) -> BodySize;
|
||||
|
||||
/// Attempt to pull out the next chunk of body bytes.
|
||||
// TODO: expand documentation
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, Self::Error>>>;
|
||||
|
||||
/// Try to convert into the complete chunk of body bytes.
|
||||
///
|
||||
/// Implement this method if the entire body can be trivially extracted. This is useful for
|
||||
/// optimizations where `poll_next` calls can be avoided.
|
||||
///
|
||||
/// Body types with [`BodySize::None`] are allowed to return empty `Bytes`. Although, if calling
|
||||
/// this method, it is recommended to check `size` first and return early.
|
||||
///
|
||||
/// # Errors
|
||||
/// The default implementation will error and return the original type back to the caller for
|
||||
/// further use.
|
||||
#[inline]
|
||||
fn try_into_bytes(self) -> Result<Bytes, Self>
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
Err(self)
|
||||
}
|
||||
|
||||
/// Converts this body into `BoxBody`.
|
||||
#[inline]
|
||||
fn boxed(self) -> BoxBody
|
||||
where
|
||||
Self: Sized + 'static,
|
||||
{
|
||||
BoxBody::new(self)
|
||||
}
|
||||
}
|
||||
|
||||
mod foreign_impls {
|
||||
use super::*;
|
||||
|
||||
impl MessageBody for Infallible {
|
||||
type Error = Infallible;
|
||||
|
||||
fn size(&self) -> BodySize {
|
||||
match *self {}
|
||||
}
|
||||
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
_cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, Self::Error>>> {
|
||||
match *self {}
|
||||
}
|
||||
}
|
||||
|
||||
impl MessageBody for () {
|
||||
type Error = Infallible;
|
||||
|
||||
#[inline]
|
||||
fn size(&self) -> BodySize {
|
||||
BodySize::Sized(0)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
_cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, Self::Error>>> {
|
||||
Poll::Ready(None)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn try_into_bytes(self) -> Result<Bytes, Self> {
|
||||
Ok(Bytes::new())
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> MessageBody for Box<B>
|
||||
where
|
||||
B: MessageBody + Unpin + ?Sized,
|
||||
{
|
||||
type Error = B::Error;
|
||||
|
||||
#[inline]
|
||||
fn size(&self) -> BodySize {
|
||||
self.as_ref().size()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, Self::Error>>> {
|
||||
Pin::new(self.get_mut().as_mut()).poll_next(cx)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> MessageBody for Pin<Box<B>>
|
||||
where
|
||||
B: MessageBody + ?Sized,
|
||||
{
|
||||
type Error = B::Error;
|
||||
|
||||
#[inline]
|
||||
fn size(&self) -> BodySize {
|
||||
self.as_ref().size()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, Self::Error>>> {
|
||||
self.get_mut().as_mut().poll_next(cx)
|
||||
}
|
||||
}
|
||||
|
||||
impl MessageBody for &'static [u8] {
|
||||
type Error = Infallible;
|
||||
|
||||
#[inline]
|
||||
fn size(&self) -> BodySize {
|
||||
BodySize::Sized(self.len() as u64)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
_cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, Self::Error>>> {
|
||||
if self.is_empty() {
|
||||
Poll::Ready(None)
|
||||
} else {
|
||||
Poll::Ready(Some(Ok(Bytes::from_static(mem::take(self.get_mut())))))
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn try_into_bytes(self) -> Result<Bytes, Self> {
|
||||
Ok(Bytes::from_static(self))
|
||||
}
|
||||
}
|
||||
|
||||
impl MessageBody for Bytes {
|
||||
type Error = Infallible;
|
||||
|
||||
#[inline]
|
||||
fn size(&self) -> BodySize {
|
||||
BodySize::Sized(self.len() as u64)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
_cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, Self::Error>>> {
|
||||
if self.is_empty() {
|
||||
Poll::Ready(None)
|
||||
} else {
|
||||
Poll::Ready(Some(Ok(mem::take(self.get_mut()))))
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn try_into_bytes(self) -> Result<Bytes, Self> {
|
||||
Ok(self)
|
||||
}
|
||||
}
|
||||
|
||||
impl MessageBody for BytesMut {
|
||||
type Error = Infallible;
|
||||
|
||||
#[inline]
|
||||
fn size(&self) -> BodySize {
|
||||
BodySize::Sized(self.len() as u64)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
_cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, Self::Error>>> {
|
||||
if self.is_empty() {
|
||||
Poll::Ready(None)
|
||||
} else {
|
||||
Poll::Ready(Some(Ok(mem::take(self.get_mut()).freeze())))
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn try_into_bytes(self) -> Result<Bytes, Self> {
|
||||
Ok(self.freeze())
|
||||
}
|
||||
}
|
||||
|
||||
impl MessageBody for Vec<u8> {
|
||||
type Error = Infallible;
|
||||
|
||||
#[inline]
|
||||
fn size(&self) -> BodySize {
|
||||
BodySize::Sized(self.len() as u64)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
_cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, Self::Error>>> {
|
||||
if self.is_empty() {
|
||||
Poll::Ready(None)
|
||||
} else {
|
||||
Poll::Ready(Some(Ok(mem::take(self.get_mut()).into())))
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn try_into_bytes(self) -> Result<Bytes, Self> {
|
||||
Ok(Bytes::from(self))
|
||||
}
|
||||
}
|
||||
|
||||
impl MessageBody for &'static str {
|
||||
type Error = Infallible;
|
||||
|
||||
#[inline]
|
||||
fn size(&self) -> BodySize {
|
||||
BodySize::Sized(self.len() as u64)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
_cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, Self::Error>>> {
|
||||
if self.is_empty() {
|
||||
Poll::Ready(None)
|
||||
} else {
|
||||
let string = mem::take(self.get_mut());
|
||||
let bytes = Bytes::from_static(string.as_bytes());
|
||||
Poll::Ready(Some(Ok(bytes)))
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn try_into_bytes(self) -> Result<Bytes, Self> {
|
||||
Ok(Bytes::from_static(self.as_bytes()))
|
||||
}
|
||||
}
|
||||
|
||||
impl MessageBody for String {
|
||||
type Error = Infallible;
|
||||
|
||||
#[inline]
|
||||
fn size(&self) -> BodySize {
|
||||
BodySize::Sized(self.len() as u64)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
_cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, Self::Error>>> {
|
||||
if self.is_empty() {
|
||||
Poll::Ready(None)
|
||||
} else {
|
||||
let string = mem::take(self.get_mut());
|
||||
Poll::Ready(Some(Ok(Bytes::from(string))))
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn try_into_bytes(self) -> Result<Bytes, Self> {
|
||||
Ok(Bytes::from(self))
|
||||
}
|
||||
}
|
||||
|
||||
impl MessageBody for bytestring::ByteString {
|
||||
type Error = Infallible;
|
||||
|
||||
#[inline]
|
||||
fn size(&self) -> BodySize {
|
||||
BodySize::Sized(self.len() as u64)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
_cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, Self::Error>>> {
|
||||
let string = mem::take(self.get_mut());
|
||||
Poll::Ready(Some(Ok(string.into_bytes())))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn try_into_bytes(self) -> Result<Bytes, Self> {
|
||||
Ok(self.into_bytes())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pin_project! {
|
||||
pub(crate) struct MessageBodyMapErr<B, F> {
|
||||
#[pin]
|
||||
body: B,
|
||||
mapper: Option<F>,
|
||||
}
|
||||
}
|
||||
|
||||
impl<B, F, E> MessageBodyMapErr<B, F>
|
||||
where
|
||||
B: MessageBody,
|
||||
F: FnOnce(B::Error) -> E,
|
||||
{
|
||||
pub(crate) fn new(body: B, mapper: F) -> Self {
|
||||
Self {
|
||||
body,
|
||||
mapper: Some(mapper),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<B, F, E> MessageBody for MessageBodyMapErr<B, F>
|
||||
where
|
||||
B: MessageBody,
|
||||
F: FnOnce(B::Error) -> E,
|
||||
E: Into<Box<dyn StdError>>,
|
||||
{
|
||||
type Error = E;
|
||||
|
||||
#[inline]
|
||||
fn size(&self) -> BodySize {
|
||||
self.body.size()
|
||||
}
|
||||
|
||||
fn poll_next(
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, Self::Error>>> {
|
||||
let this = self.as_mut().project();
|
||||
|
||||
match ready!(this.body.poll_next(cx)) {
|
||||
Some(Err(err)) => {
|
||||
let f = self.as_mut().project().mapper.take().unwrap();
|
||||
let mapped_err = (f)(err);
|
||||
Poll::Ready(Some(Err(mapped_err)))
|
||||
}
|
||||
Some(Ok(val)) => Poll::Ready(Some(Ok(val))),
|
||||
None => Poll::Ready(None),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn try_into_bytes(self) -> Result<Bytes, Self> {
|
||||
let Self { body, mapper } = self;
|
||||
body.try_into_bytes().map_err(|body| Self { body, mapper })
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use actix_rt::pin;
|
||||
use actix_utils::future::poll_fn;
|
||||
use bytes::{Bytes, BytesMut};
|
||||
|
||||
use super::*;
|
||||
use crate::body::{self, EitherBody};
|
||||
|
||||
macro_rules! assert_poll_next {
|
||||
($pin:expr, $exp:expr) => {
|
||||
assert_eq!(
|
||||
poll_fn(|cx| $pin.as_mut().poll_next(cx))
|
||||
.await
|
||||
.unwrap() // unwrap option
|
||||
.unwrap(), // unwrap result
|
||||
$exp
|
||||
);
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! assert_poll_next_none {
|
||||
($pin:expr) => {
|
||||
assert!(poll_fn(|cx| $pin.as_mut().poll_next(cx)).await.is_none());
|
||||
};
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn boxing_equivalence() {
|
||||
assert_eq!(().size(), BodySize::Sized(0));
|
||||
assert_eq!(().size(), Box::new(()).size());
|
||||
assert_eq!(().size(), Box::pin(()).size());
|
||||
|
||||
let pl = Box::new(());
|
||||
pin!(pl);
|
||||
assert_poll_next_none!(pl);
|
||||
|
||||
let mut pl = Box::pin(());
|
||||
assert_poll_next_none!(pl);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_unit() {
|
||||
let pl = ();
|
||||
assert_eq!(pl.size(), BodySize::Sized(0));
|
||||
pin!(pl);
|
||||
assert_poll_next_none!(pl);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_static_str() {
|
||||
assert_eq!("".size(), BodySize::Sized(0));
|
||||
assert_eq!("test".size(), BodySize::Sized(4));
|
||||
|
||||
let pl = "test";
|
||||
pin!(pl);
|
||||
assert_poll_next!(pl, Bytes::from("test"));
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_static_bytes() {
|
||||
assert_eq!(b"".as_ref().size(), BodySize::Sized(0));
|
||||
assert_eq!(b"test".as_ref().size(), BodySize::Sized(4));
|
||||
|
||||
let pl = b"test".as_ref();
|
||||
pin!(pl);
|
||||
assert_poll_next!(pl, Bytes::from("test"));
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_vec() {
|
||||
assert_eq!(vec![0; 0].size(), BodySize::Sized(0));
|
||||
assert_eq!(Vec::from("test").size(), BodySize::Sized(4));
|
||||
|
||||
let pl = Vec::from("test");
|
||||
pin!(pl);
|
||||
assert_poll_next!(pl, Bytes::from("test"));
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_bytes() {
|
||||
assert_eq!(Bytes::new().size(), BodySize::Sized(0));
|
||||
assert_eq!(Bytes::from_static(b"test").size(), BodySize::Sized(4));
|
||||
|
||||
let pl = Bytes::from_static(b"test");
|
||||
pin!(pl);
|
||||
assert_poll_next!(pl, Bytes::from("test"));
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_bytes_mut() {
|
||||
assert_eq!(BytesMut::new().size(), BodySize::Sized(0));
|
||||
assert_eq!(BytesMut::from(b"test".as_ref()).size(), BodySize::Sized(4));
|
||||
|
||||
let pl = BytesMut::from("test");
|
||||
pin!(pl);
|
||||
assert_poll_next!(pl, Bytes::from("test"));
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_string() {
|
||||
assert_eq!(String::new().size(), BodySize::Sized(0));
|
||||
assert_eq!("test".to_owned().size(), BodySize::Sized(4));
|
||||
|
||||
let pl = "test".to_owned();
|
||||
pin!(pl);
|
||||
assert_poll_next!(pl, Bytes::from("test"));
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn complete_body_combinators() {
|
||||
let body = Bytes::from_static(b"test");
|
||||
let body = BoxBody::new(body);
|
||||
let body = EitherBody::<_, ()>::left(body);
|
||||
let body = EitherBody::<(), _>::right(body);
|
||||
// Do not support try_into_bytes:
|
||||
// let body = Box::new(body);
|
||||
// let body = Box::pin(body);
|
||||
|
||||
assert_eq!(body.try_into_bytes().unwrap(), Bytes::from("test"));
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn complete_body_combinators_poll() {
|
||||
let body = Bytes::from_static(b"test");
|
||||
let body = BoxBody::new(body);
|
||||
let body = EitherBody::<_, ()>::left(body);
|
||||
let body = EitherBody::<(), _>::right(body);
|
||||
let mut body = body;
|
||||
|
||||
assert_eq!(body.size(), BodySize::Sized(4));
|
||||
assert_poll_next!(Pin::new(&mut body), Bytes::from("test"));
|
||||
assert_poll_next_none!(Pin::new(&mut body));
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn none_body_combinators() {
|
||||
fn none_body() -> BoxBody {
|
||||
let body = body::None;
|
||||
let body = BoxBody::new(body);
|
||||
let body = EitherBody::<_, ()>::left(body);
|
||||
let body = EitherBody::<(), _>::right(body);
|
||||
body.boxed()
|
||||
}
|
||||
|
||||
assert_eq!(none_body().size(), BodySize::None);
|
||||
assert_eq!(none_body().try_into_bytes().unwrap(), Bytes::new());
|
||||
assert_poll_next_none!(Pin::new(&mut none_body()));
|
||||
}
|
||||
|
||||
// down-casting used to be done with a method on MessageBody trait
|
||||
// test is kept to demonstrate equivalence of Any trait
|
||||
#[actix_rt::test]
|
||||
async fn test_body_casting() {
|
||||
let mut body = String::from("hello cast");
|
||||
// let mut resp_body: &mut dyn MessageBody<Error = Error> = &mut body;
|
||||
let resp_body: &mut dyn std::any::Any = &mut body;
|
||||
let body = resp_body.downcast_ref::<String>().unwrap();
|
||||
assert_eq!(body, "hello cast");
|
||||
let body = &mut resp_body.downcast_mut::<String>().unwrap();
|
||||
body.push('!');
|
||||
let body = resp_body.downcast_ref::<String>().unwrap();
|
||||
assert_eq!(body, "hello cast!");
|
||||
let not_body = resp_body.downcast_ref::<()>();
|
||||
assert!(not_body.is_none());
|
||||
}
|
||||
}
|
||||
20
actix-http/src/body/mod.rs
Normal file
20
actix-http/src/body/mod.rs
Normal file
@@ -0,0 +1,20 @@
|
||||
//! Traits and structures to aid consuming and writing HTTP payloads.
|
||||
|
||||
mod body_stream;
|
||||
mod boxed;
|
||||
mod either;
|
||||
mod message_body;
|
||||
mod none;
|
||||
mod size;
|
||||
mod sized_stream;
|
||||
mod utils;
|
||||
|
||||
pub use self::body_stream::BodyStream;
|
||||
pub use self::boxed::BoxBody;
|
||||
pub use self::either::EitherBody;
|
||||
pub use self::message_body::MessageBody;
|
||||
pub(crate) use self::message_body::MessageBodyMapErr;
|
||||
pub use self::none::None;
|
||||
pub use self::size::BodySize;
|
||||
pub use self::sized_stream::SizedStream;
|
||||
pub use self::utils::to_bytes;
|
||||
48
actix-http/src/body/none.rs
Normal file
48
actix-http/src/body/none.rs
Normal file
@@ -0,0 +1,48 @@
|
||||
use std::{
|
||||
convert::Infallible,
|
||||
pin::Pin,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use bytes::Bytes;
|
||||
|
||||
use super::{BodySize, MessageBody};
|
||||
|
||||
/// Body type for responses that forbid payloads.
|
||||
///
|
||||
/// Distinct from an empty response which would contain a Content-Length header.
|
||||
///
|
||||
/// For an "empty" body, use `()` or `Bytes::new()`.
|
||||
#[derive(Debug, Clone, Copy, Default)]
|
||||
#[non_exhaustive]
|
||||
pub struct None;
|
||||
|
||||
impl None {
|
||||
/// Constructs new "none" body.
|
||||
#[inline]
|
||||
pub fn new() -> Self {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
impl MessageBody for None {
|
||||
type Error = Infallible;
|
||||
|
||||
#[inline]
|
||||
fn size(&self) -> BodySize {
|
||||
BodySize::None
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
_cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, Self::Error>>> {
|
||||
Poll::Ready(Option::None)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn try_into_bytes(self) -> Result<Bytes, Self> {
|
||||
Ok(Bytes::new())
|
||||
}
|
||||
}
|
||||
41
actix-http/src/body/size.rs
Normal file
41
actix-http/src/body/size.rs
Normal file
@@ -0,0 +1,41 @@
|
||||
/// Body size hint.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum BodySize {
|
||||
/// Implicitly empty body.
|
||||
///
|
||||
/// Will omit the Content-Length header. Used for responses to certain methods (e.g., `HEAD`) or
|
||||
/// with particular status codes (e.g., 204 No Content). Consumers that read this as a body size
|
||||
/// hint are allowed to make optimizations that skip reading or writing the payload.
|
||||
None,
|
||||
|
||||
/// Known size body.
|
||||
///
|
||||
/// Will write `Content-Length: N` header.
|
||||
Sized(u64),
|
||||
|
||||
/// Unknown size body.
|
||||
///
|
||||
/// Will not write Content-Length header. Can be used with chunked Transfer-Encoding.
|
||||
Stream,
|
||||
}
|
||||
|
||||
impl BodySize {
|
||||
/// Equivalent to `BodySize::Sized(0)`;
|
||||
pub const ZERO: Self = Self::Sized(0);
|
||||
|
||||
/// Returns true if size hint indicates omitted or empty body.
|
||||
///
|
||||
/// Streams will return false because it cannot be known without reading the stream.
|
||||
///
|
||||
/// ```
|
||||
/// # use actix_http::body::BodySize;
|
||||
/// assert!(BodySize::None.is_eof());
|
||||
/// assert!(BodySize::Sized(0).is_eof());
|
||||
///
|
||||
/// assert!(!BodySize::Sized(64).is_eof());
|
||||
/// assert!(!BodySize::Stream.is_eof());
|
||||
/// ```
|
||||
pub fn is_eof(&self) -> bool {
|
||||
matches!(self, BodySize::None | BodySize::Sized(0))
|
||||
}
|
||||
}
|
||||
171
actix-http/src/body/sized_stream.rs
Normal file
171
actix-http/src/body/sized_stream.rs
Normal file
@@ -0,0 +1,171 @@
|
||||
use std::{
|
||||
error::Error as StdError,
|
||||
pin::Pin,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use bytes::Bytes;
|
||||
use futures_core::{ready, Stream};
|
||||
use pin_project_lite::pin_project;
|
||||
|
||||
use super::{BodySize, MessageBody};
|
||||
|
||||
pin_project! {
|
||||
/// Known sized streaming response wrapper.
|
||||
///
|
||||
/// This body implementation should be used if total size of stream is known. Data is sent as-is
|
||||
/// without using chunked transfer encoding.
|
||||
pub struct SizedStream<S> {
|
||||
size: u64,
|
||||
#[pin]
|
||||
stream: S,
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, E> SizedStream<S>
|
||||
where
|
||||
S: Stream<Item = Result<Bytes, E>>,
|
||||
E: Into<Box<dyn StdError>> + 'static,
|
||||
{
|
||||
#[inline]
|
||||
pub fn new(size: u64, stream: S) -> Self {
|
||||
SizedStream { size, stream }
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: from_infallible method
|
||||
|
||||
impl<S, E> MessageBody for SizedStream<S>
|
||||
where
|
||||
S: Stream<Item = Result<Bytes, E>>,
|
||||
E: Into<Box<dyn StdError>> + 'static,
|
||||
{
|
||||
type Error = E;
|
||||
|
||||
#[inline]
|
||||
fn size(&self) -> BodySize {
|
||||
BodySize::Sized(self.size as u64)
|
||||
}
|
||||
|
||||
/// Attempts to pull out the next value of the underlying [`Stream`].
|
||||
///
|
||||
/// Empty values are skipped to prevent [`SizedStream`]'s transmission being
|
||||
/// ended on a zero-length chunk, but rather proceed until the underlying
|
||||
/// [`Stream`] ends.
|
||||
fn poll_next(
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, Self::Error>>> {
|
||||
loop {
|
||||
let stream = self.as_mut().project().stream;
|
||||
|
||||
let chunk = match ready!(stream.poll_next(cx)) {
|
||||
Some(Ok(ref bytes)) if bytes.is_empty() => continue,
|
||||
val => val,
|
||||
};
|
||||
|
||||
return Poll::Ready(chunk);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::convert::Infallible;
|
||||
|
||||
use actix_rt::pin;
|
||||
use actix_utils::future::poll_fn;
|
||||
use futures_util::stream;
|
||||
use static_assertions::{assert_impl_all, assert_not_impl_all};
|
||||
|
||||
use super::*;
|
||||
use crate::body::to_bytes;
|
||||
|
||||
assert_impl_all!(SizedStream<stream::Empty<Result<Bytes, crate::Error>>>: MessageBody);
|
||||
assert_impl_all!(SizedStream<stream::Empty<Result<Bytes, &'static str>>>: MessageBody);
|
||||
assert_impl_all!(SizedStream<stream::Repeat<Result<Bytes, &'static str>>>: MessageBody);
|
||||
assert_impl_all!(SizedStream<stream::Empty<Result<Bytes, Infallible>>>: MessageBody);
|
||||
assert_impl_all!(SizedStream<stream::Repeat<Result<Bytes, Infallible>>>: MessageBody);
|
||||
|
||||
assert_not_impl_all!(SizedStream<stream::Empty<Bytes>>: MessageBody);
|
||||
assert_not_impl_all!(SizedStream<stream::Repeat<Bytes>>: MessageBody);
|
||||
// crate::Error is not Clone
|
||||
assert_not_impl_all!(SizedStream<stream::Repeat<Result<Bytes, crate::Error>>>: MessageBody);
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn skips_empty_chunks() {
|
||||
let body = SizedStream::new(
|
||||
2,
|
||||
stream::iter(
|
||||
["1", "", "2"]
|
||||
.iter()
|
||||
.map(|&v| Ok::<_, Infallible>(Bytes::from(v))),
|
||||
),
|
||||
);
|
||||
|
||||
pin!(body);
|
||||
|
||||
assert_eq!(
|
||||
poll_fn(|cx| body.as_mut().poll_next(cx))
|
||||
.await
|
||||
.unwrap()
|
||||
.ok(),
|
||||
Some(Bytes::from("1")),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
poll_fn(|cx| body.as_mut().poll_next(cx))
|
||||
.await
|
||||
.unwrap()
|
||||
.ok(),
|
||||
Some(Bytes::from("2")),
|
||||
);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn read_to_bytes() {
|
||||
let body = SizedStream::new(
|
||||
2,
|
||||
stream::iter(
|
||||
["1", "", "2"]
|
||||
.iter()
|
||||
.map(|&v| Ok::<_, Infallible>(Bytes::from(v))),
|
||||
),
|
||||
);
|
||||
|
||||
assert_eq!(to_bytes(body).await.ok(), Some(Bytes::from("12")));
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn stream_string_error() {
|
||||
// `&'static str` does not impl `Error`
|
||||
// but it does impl `Into<Box<dyn Error>>`
|
||||
|
||||
let body = SizedStream::new(0, stream::once(async { Err("stringy error") }));
|
||||
assert_eq!(to_bytes(body).await, Ok(Bytes::new()));
|
||||
|
||||
let body = SizedStream::new(1, stream::once(async { Err("stringy error") }));
|
||||
assert!(matches!(to_bytes(body).await, Err("stringy error")));
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn stream_boxed_error() {
|
||||
// `Box<dyn Error>` does not impl `Error`
|
||||
// but it does impl `Into<Box<dyn Error>>`
|
||||
|
||||
let body = SizedStream::new(
|
||||
0,
|
||||
stream::once(async { Err(Box::<dyn StdError>::from("stringy error")) }),
|
||||
);
|
||||
assert_eq!(to_bytes(body).await.unwrap(), Bytes::new());
|
||||
|
||||
let body = SizedStream::new(
|
||||
1,
|
||||
stream::once(async { Err(Box::<dyn StdError>::from("stringy error")) }),
|
||||
);
|
||||
assert_eq!(
|
||||
to_bytes(body).await.unwrap_err().to_string(),
|
||||
"stringy error"
|
||||
);
|
||||
}
|
||||
}
|
||||
77
actix-http/src/body/utils.rs
Normal file
77
actix-http/src/body/utils.rs
Normal file
@@ -0,0 +1,77 @@
|
||||
use std::task::Poll;
|
||||
|
||||
use actix_rt::pin;
|
||||
use actix_utils::future::poll_fn;
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use futures_core::ready;
|
||||
|
||||
use super::{BodySize, MessageBody};
|
||||
|
||||
/// Collects the body produced by a `MessageBody` implementation into `Bytes`.
|
||||
///
|
||||
/// Any errors produced by the body stream are returned immediately.
|
||||
///
|
||||
/// # Examples
|
||||
/// ```
|
||||
/// use actix_http::body::{self, to_bytes};
|
||||
/// use bytes::Bytes;
|
||||
///
|
||||
/// # async fn test_to_bytes() {
|
||||
/// let body = body::None::new();
|
||||
/// let bytes = to_bytes(body).await.unwrap();
|
||||
/// assert!(bytes.is_empty());
|
||||
///
|
||||
/// let body = Bytes::from_static(b"123");
|
||||
/// let bytes = to_bytes(body).await.unwrap();
|
||||
/// assert_eq!(bytes, b"123"[..]);
|
||||
/// # }
|
||||
/// ```
|
||||
pub async fn to_bytes<B: MessageBody>(body: B) -> Result<Bytes, B::Error> {
|
||||
let cap = match body.size() {
|
||||
BodySize::None | BodySize::Sized(0) => return Ok(Bytes::new()),
|
||||
BodySize::Sized(size) => size as usize,
|
||||
// good enough first guess for chunk size
|
||||
BodySize::Stream => 32_768,
|
||||
};
|
||||
|
||||
let mut buf = BytesMut::with_capacity(cap);
|
||||
|
||||
pin!(body);
|
||||
|
||||
poll_fn(|cx| loop {
|
||||
let body = body.as_mut();
|
||||
|
||||
match ready!(body.poll_next(cx)) {
|
||||
Some(Ok(bytes)) => buf.extend_from_slice(&*bytes),
|
||||
None => return Poll::Ready(Ok(())),
|
||||
Some(Err(err)) => return Poll::Ready(Err(err)),
|
||||
}
|
||||
})
|
||||
.await?;
|
||||
|
||||
Ok(buf.freeze())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use futures_util::{stream, StreamExt as _};
|
||||
|
||||
use super::*;
|
||||
use crate::{body::BodyStream, Error};
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_to_bytes() {
|
||||
let bytes = to_bytes(()).await.unwrap();
|
||||
assert!(bytes.is_empty());
|
||||
|
||||
let body = Bytes::from_static(b"123");
|
||||
let bytes = to_bytes(body).await.unwrap();
|
||||
assert_eq!(bytes, b"123"[..]);
|
||||
|
||||
let stream = stream::iter(vec![Bytes::from_static(b"123"), Bytes::from_static(b"abc")])
|
||||
.map(Ok::<_, Error>);
|
||||
let body = BodyStream::new(stream);
|
||||
let bytes = to_bytes(body).await.unwrap();
|
||||
assert_eq!(bytes, b"123abc"[..]);
|
||||
}
|
||||
}
|
||||
@@ -1,25 +1,22 @@
|
||||
use std::marker::PhantomData;
|
||||
use std::rc::Rc;
|
||||
use std::{fmt, net};
|
||||
use std::{fmt, marker::PhantomData, net, rc::Rc};
|
||||
|
||||
use actix_codec::Framed;
|
||||
use actix_service::{IntoServiceFactory, Service, ServiceFactory};
|
||||
|
||||
use crate::body::MessageBody;
|
||||
use crate::config::{KeepAlive, ServiceConfig};
|
||||
use crate::error::Error;
|
||||
use crate::h1::{Codec, ExpectHandler, H1Service, UpgradeHandler};
|
||||
use crate::h2::H2Service;
|
||||
use crate::helpers::{Data, DataFactory};
|
||||
use crate::request::Request;
|
||||
use crate::response::Response;
|
||||
use crate::service::HttpService;
|
||||
use crate::{
|
||||
body::{BoxBody, MessageBody},
|
||||
config::{KeepAlive, ServiceConfig},
|
||||
h1::{self, ExpectHandler, H1Service, UpgradeHandler},
|
||||
h2::H2Service,
|
||||
service::HttpService,
|
||||
ConnectCallback, Extensions, Request, Response,
|
||||
};
|
||||
|
||||
/// A http service builder
|
||||
/// A HTTP service builder
|
||||
///
|
||||
/// This type can be used to construct an instance of `http service` through a
|
||||
/// This type can be used to construct an instance of [`HttpService`] through a
|
||||
/// builder-like pattern.
|
||||
pub struct HttpServiceBuilder<T, S, X = ExpectHandler, U = UpgradeHandler<T>> {
|
||||
pub struct HttpServiceBuilder<T, S, X = ExpectHandler, U = UpgradeHandler> {
|
||||
keep_alive: KeepAlive,
|
||||
client_timeout: u64,
|
||||
client_disconnect: u64,
|
||||
@@ -27,18 +24,19 @@ pub struct HttpServiceBuilder<T, S, X = ExpectHandler, U = UpgradeHandler<T>> {
|
||||
local_addr: Option<net::SocketAddr>,
|
||||
expect: X,
|
||||
upgrade: Option<U>,
|
||||
on_connect: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
|
||||
_t: PhantomData<(T, S)>,
|
||||
on_connect_ext: Option<Rc<ConnectCallback<T>>>,
|
||||
_phantom: PhantomData<S>,
|
||||
}
|
||||
|
||||
impl<T, S> HttpServiceBuilder<T, S, ExpectHandler, UpgradeHandler<T>>
|
||||
impl<T, S> HttpServiceBuilder<T, S, ExpectHandler, UpgradeHandler>
|
||||
where
|
||||
S: ServiceFactory<Config = (), Request = Request>,
|
||||
S::Error: Into<Error> + 'static,
|
||||
S: ServiceFactory<Request, Config = ()>,
|
||||
S::Error: Into<Response<BoxBody>> + 'static,
|
||||
S::InitError: fmt::Debug,
|
||||
<S::Service as Service>::Future: 'static,
|
||||
<S::Service as Service<Request>>::Future: 'static,
|
||||
{
|
||||
/// Create instance of `ServiceConfigBuilder`
|
||||
#[allow(clippy::new_without_default)]
|
||||
pub fn new() -> Self {
|
||||
HttpServiceBuilder {
|
||||
keep_alive: KeepAlive::Timeout(5),
|
||||
@@ -48,26 +46,24 @@ where
|
||||
local_addr: None,
|
||||
expect: ExpectHandler,
|
||||
upgrade: None,
|
||||
on_connect: None,
|
||||
_t: PhantomData,
|
||||
on_connect_ext: None,
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, S, X, U> HttpServiceBuilder<T, S, X, U>
|
||||
where
|
||||
S: ServiceFactory<Config = (), Request = Request>,
|
||||
S::Error: Into<Error> + 'static,
|
||||
S: ServiceFactory<Request, Config = ()>,
|
||||
S::Error: Into<Response<BoxBody>> + 'static,
|
||||
S::InitError: fmt::Debug,
|
||||
<S::Service as Service>::Future: 'static,
|
||||
X: ServiceFactory<Config = (), Request = Request, Response = Request>,
|
||||
X::Error: Into<Error>,
|
||||
<S::Service as Service<Request>>::Future: 'static,
|
||||
X: ServiceFactory<Request, Config = (), Response = Request>,
|
||||
X::Error: Into<Response<BoxBody>>,
|
||||
X::InitError: fmt::Debug,
|
||||
<X::Service as Service>::Future: 'static,
|
||||
U: ServiceFactory<Config = (), Request = (Request, Framed<T, Codec>), Response = ()>,
|
||||
U: ServiceFactory<(Request, Framed<T, h1::Codec>), Config = (), Response = ()>,
|
||||
U::Error: fmt::Display,
|
||||
U::InitError: fmt::Debug,
|
||||
<U::Service as Service>::Future: 'static,
|
||||
{
|
||||
/// Set server keep-alive setting.
|
||||
///
|
||||
@@ -123,11 +119,10 @@ where
|
||||
/// request will be forwarded to main service.
|
||||
pub fn expect<F, X1>(self, expect: F) -> HttpServiceBuilder<T, S, X1, U>
|
||||
where
|
||||
F: IntoServiceFactory<X1>,
|
||||
X1: ServiceFactory<Config = (), Request = Request, Response = Request>,
|
||||
X1::Error: Into<Error>,
|
||||
F: IntoServiceFactory<X1, Request>,
|
||||
X1: ServiceFactory<Request, Config = (), Response = Request>,
|
||||
X1::Error: Into<Response<BoxBody>>,
|
||||
X1::InitError: fmt::Debug,
|
||||
<X1::Service as Service>::Future: 'static,
|
||||
{
|
||||
HttpServiceBuilder {
|
||||
keep_alive: self.keep_alive,
|
||||
@@ -137,8 +132,8 @@ where
|
||||
local_addr: self.local_addr,
|
||||
expect: expect.into_factory(),
|
||||
upgrade: self.upgrade,
|
||||
on_connect: self.on_connect,
|
||||
_t: PhantomData,
|
||||
on_connect_ext: self.on_connect_ext,
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -148,15 +143,10 @@ where
|
||||
/// and this service get called with original request and framed object.
|
||||
pub fn upgrade<F, U1>(self, upgrade: F) -> HttpServiceBuilder<T, S, X, U1>
|
||||
where
|
||||
F: IntoServiceFactory<U1>,
|
||||
U1: ServiceFactory<
|
||||
Config = (),
|
||||
Request = (Request, Framed<T, Codec>),
|
||||
Response = (),
|
||||
>,
|
||||
F: IntoServiceFactory<U1, (Request, Framed<T, h1::Codec>)>,
|
||||
U1: ServiceFactory<(Request, Framed<T, h1::Codec>), Config = (), Response = ()>,
|
||||
U1::Error: fmt::Display,
|
||||
U1::InitError: fmt::Debug,
|
||||
<U1::Service as Service>::Future: 'static,
|
||||
{
|
||||
HttpServiceBuilder {
|
||||
keep_alive: self.keep_alive,
|
||||
@@ -166,30 +156,30 @@ where
|
||||
local_addr: self.local_addr,
|
||||
expect: self.expect,
|
||||
upgrade: Some(upgrade.into_factory()),
|
||||
on_connect: self.on_connect,
|
||||
_t: PhantomData,
|
||||
on_connect_ext: self.on_connect_ext,
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set on-connect callback.
|
||||
/// Sets the callback to be run on connection establishment.
|
||||
///
|
||||
/// It get called once per connection and result of the call
|
||||
/// get stored to the request's extensions.
|
||||
pub fn on_connect<F, I>(mut self, f: F) -> Self
|
||||
/// Has mutable access to a data container that will be merged into request extensions.
|
||||
/// This enables transport layer data (like client certificates) to be accessed in middleware
|
||||
/// and handlers.
|
||||
pub fn on_connect_ext<F>(mut self, f: F) -> Self
|
||||
where
|
||||
F: Fn(&T) -> I + 'static,
|
||||
I: Clone + 'static,
|
||||
F: Fn(&T, &mut Extensions) + 'static,
|
||||
{
|
||||
self.on_connect = Some(Rc::new(move |io| Box::new(Data(f(io)))));
|
||||
self.on_connect_ext = Some(Rc::new(f));
|
||||
self
|
||||
}
|
||||
|
||||
/// Finish service configuration and create *http service* for HTTP/1 protocol.
|
||||
/// Finish service configuration and create a HTTP Service for HTTP/1 protocol.
|
||||
pub fn h1<F, B>(self, service: F) -> H1Service<T, S, B, X, U>
|
||||
where
|
||||
B: MessageBody,
|
||||
F: IntoServiceFactory<S>,
|
||||
S::Error: Into<Error>,
|
||||
F: IntoServiceFactory<S, Request>,
|
||||
S::Error: Into<Response<BoxBody>>,
|
||||
S::InitError: fmt::Debug,
|
||||
S::Response: Into<Response<B>>,
|
||||
{
|
||||
@@ -200,21 +190,22 @@ where
|
||||
self.secure,
|
||||
self.local_addr,
|
||||
);
|
||||
|
||||
H1Service::with_config(cfg, service.into_factory())
|
||||
.expect(self.expect)
|
||||
.upgrade(self.upgrade)
|
||||
.on_connect(self.on_connect)
|
||||
.on_connect_ext(self.on_connect_ext)
|
||||
}
|
||||
|
||||
/// Finish service configuration and create *http service* for HTTP/2 protocol.
|
||||
/// Finish service configuration and create a HTTP service for HTTP/2 protocol.
|
||||
pub fn h2<F, B>(self, service: F) -> H2Service<T, S, B>
|
||||
where
|
||||
B: MessageBody + 'static,
|
||||
F: IntoServiceFactory<S>,
|
||||
S::Error: Into<Error> + 'static,
|
||||
F: IntoServiceFactory<S, Request>,
|
||||
S::Error: Into<Response<BoxBody>> + 'static,
|
||||
S::InitError: fmt::Debug,
|
||||
S::Response: Into<Response<B>> + 'static,
|
||||
<S::Service as Service>::Future: 'static,
|
||||
|
||||
B: MessageBody + 'static,
|
||||
{
|
||||
let cfg = ServiceConfig::new(
|
||||
self.keep_alive,
|
||||
@@ -223,18 +214,19 @@ where
|
||||
self.secure,
|
||||
self.local_addr,
|
||||
);
|
||||
H2Service::with_config(cfg, service.into_factory()).on_connect(self.on_connect)
|
||||
|
||||
H2Service::with_config(cfg, service.into_factory()).on_connect_ext(self.on_connect_ext)
|
||||
}
|
||||
|
||||
/// Finish service configuration and create `HttpService` instance.
|
||||
pub fn finish<F, B>(self, service: F) -> HttpService<T, S, B, X, U>
|
||||
where
|
||||
B: MessageBody + 'static,
|
||||
F: IntoServiceFactory<S>,
|
||||
S::Error: Into<Error> + 'static,
|
||||
F: IntoServiceFactory<S, Request>,
|
||||
S::Error: Into<Response<BoxBody>> + 'static,
|
||||
S::InitError: fmt::Debug,
|
||||
S::Response: Into<Response<B>> + 'static,
|
||||
<S::Service as Service>::Future: 'static,
|
||||
|
||||
B: MessageBody + 'static,
|
||||
{
|
||||
let cfg = ServiceConfig::new(
|
||||
self.keep_alive,
|
||||
@@ -243,9 +235,10 @@ where
|
||||
self.secure,
|
||||
self.local_addr,
|
||||
);
|
||||
|
||||
HttpService::with_config(cfg, service.into_factory())
|
||||
.expect(self.expect)
|
||||
.upgrade(self.upgrade)
|
||||
.on_connect(self.on_connect)
|
||||
.on_connect_ext(self.on_connect_ext)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,287 +0,0 @@
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
use std::{fmt, io, mem, time};
|
||||
|
||||
use actix_codec::{AsyncRead, AsyncWrite, Framed};
|
||||
use bytes::{Buf, Bytes};
|
||||
use futures_util::future::{err, Either, FutureExt, LocalBoxFuture, Ready};
|
||||
use h2::client::SendRequest;
|
||||
use pin_project::pin_project;
|
||||
|
||||
use crate::body::MessageBody;
|
||||
use crate::h1::ClientCodec;
|
||||
use crate::message::{RequestHeadType, ResponseHead};
|
||||
use crate::payload::Payload;
|
||||
|
||||
use super::error::SendRequestError;
|
||||
use super::pool::{Acquired, Protocol};
|
||||
use super::{h1proto, h2proto};
|
||||
|
||||
pub(crate) enum ConnectionType<Io> {
|
||||
H1(Io),
|
||||
H2(SendRequest<Bytes>),
|
||||
}
|
||||
|
||||
pub trait Connection {
|
||||
type Io: AsyncRead + AsyncWrite + Unpin;
|
||||
type Future: Future<Output = Result<(ResponseHead, Payload), SendRequestError>>;
|
||||
|
||||
fn protocol(&self) -> Protocol;
|
||||
|
||||
/// Send request and body
|
||||
fn send_request<B: MessageBody + 'static, H: Into<RequestHeadType>>(
|
||||
self,
|
||||
head: H,
|
||||
body: B,
|
||||
) -> Self::Future;
|
||||
|
||||
type TunnelFuture: Future<
|
||||
Output = Result<(ResponseHead, Framed<Self::Io, ClientCodec>), SendRequestError>,
|
||||
>;
|
||||
|
||||
/// Send request, returns Response and Framed
|
||||
fn open_tunnel<H: Into<RequestHeadType>>(self, head: H) -> Self::TunnelFuture;
|
||||
}
|
||||
|
||||
pub(crate) trait ConnectionLifetime: AsyncRead + AsyncWrite + 'static {
|
||||
/// Close connection
|
||||
fn close(&mut self);
|
||||
|
||||
/// Release connection to the connection pool
|
||||
fn release(&mut self);
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
/// HTTP client connection
|
||||
pub struct IoConnection<T> {
|
||||
io: Option<ConnectionType<T>>,
|
||||
created: time::Instant,
|
||||
pool: Option<Acquired<T>>,
|
||||
}
|
||||
|
||||
impl<T> fmt::Debug for IoConnection<T>
|
||||
where
|
||||
T: fmt::Debug,
|
||||
{
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self.io {
|
||||
Some(ConnectionType::H1(ref io)) => write!(f, "H1Connection({:?})", io),
|
||||
Some(ConnectionType::H2(_)) => write!(f, "H2Connection"),
|
||||
None => write!(f, "Connection(Empty)"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: AsyncRead + AsyncWrite + Unpin> IoConnection<T> {
|
||||
pub(crate) fn new(
|
||||
io: ConnectionType<T>,
|
||||
created: time::Instant,
|
||||
pool: Option<Acquired<T>>,
|
||||
) -> Self {
|
||||
IoConnection {
|
||||
pool,
|
||||
created,
|
||||
io: Some(io),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn into_inner(self) -> (ConnectionType<T>, time::Instant) {
|
||||
(self.io.unwrap(), self.created)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Connection for IoConnection<T>
|
||||
where
|
||||
T: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
{
|
||||
type Io = T;
|
||||
type Future =
|
||||
LocalBoxFuture<'static, Result<(ResponseHead, Payload), SendRequestError>>;
|
||||
|
||||
fn protocol(&self) -> Protocol {
|
||||
match self.io {
|
||||
Some(ConnectionType::H1(_)) => Protocol::Http1,
|
||||
Some(ConnectionType::H2(_)) => Protocol::Http2,
|
||||
None => Protocol::Http1,
|
||||
}
|
||||
}
|
||||
|
||||
fn send_request<B: MessageBody + 'static, H: Into<RequestHeadType>>(
|
||||
mut self,
|
||||
head: H,
|
||||
body: B,
|
||||
) -> Self::Future {
|
||||
match self.io.take().unwrap() {
|
||||
ConnectionType::H1(io) => {
|
||||
h1proto::send_request(io, head.into(), body, self.created, self.pool)
|
||||
.boxed_local()
|
||||
}
|
||||
ConnectionType::H2(io) => {
|
||||
h2proto::send_request(io, head.into(), body, self.created, self.pool)
|
||||
.boxed_local()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type TunnelFuture = Either<
|
||||
LocalBoxFuture<
|
||||
'static,
|
||||
Result<(ResponseHead, Framed<Self::Io, ClientCodec>), SendRequestError>,
|
||||
>,
|
||||
Ready<Result<(ResponseHead, Framed<Self::Io, ClientCodec>), SendRequestError>>,
|
||||
>;
|
||||
|
||||
/// Send request, returns Response and Framed
|
||||
fn open_tunnel<H: Into<RequestHeadType>>(mut self, head: H) -> Self::TunnelFuture {
|
||||
match self.io.take().unwrap() {
|
||||
ConnectionType::H1(io) => {
|
||||
Either::Left(h1proto::open_tunnel(io, head.into()).boxed_local())
|
||||
}
|
||||
ConnectionType::H2(io) => {
|
||||
if let Some(mut pool) = self.pool.take() {
|
||||
pool.release(IoConnection::new(
|
||||
ConnectionType::H2(io),
|
||||
self.created,
|
||||
None,
|
||||
));
|
||||
}
|
||||
Either::Right(err(SendRequestError::TunnelNotSupported))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub(crate) enum EitherConnection<A, B> {
|
||||
A(IoConnection<A>),
|
||||
B(IoConnection<B>),
|
||||
}
|
||||
|
||||
impl<A, B> Connection for EitherConnection<A, B>
|
||||
where
|
||||
A: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
B: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
{
|
||||
type Io = EitherIo<A, B>;
|
||||
type Future =
|
||||
LocalBoxFuture<'static, Result<(ResponseHead, Payload), SendRequestError>>;
|
||||
|
||||
fn protocol(&self) -> Protocol {
|
||||
match self {
|
||||
EitherConnection::A(con) => con.protocol(),
|
||||
EitherConnection::B(con) => con.protocol(),
|
||||
}
|
||||
}
|
||||
|
||||
fn send_request<RB: MessageBody + 'static, H: Into<RequestHeadType>>(
|
||||
self,
|
||||
head: H,
|
||||
body: RB,
|
||||
) -> Self::Future {
|
||||
match self {
|
||||
EitherConnection::A(con) => con.send_request(head, body),
|
||||
EitherConnection::B(con) => con.send_request(head, body),
|
||||
}
|
||||
}
|
||||
|
||||
type TunnelFuture = LocalBoxFuture<
|
||||
'static,
|
||||
Result<(ResponseHead, Framed<Self::Io, ClientCodec>), SendRequestError>,
|
||||
>;
|
||||
|
||||
/// Send request, returns Response and Framed
|
||||
fn open_tunnel<H: Into<RequestHeadType>>(self, head: H) -> Self::TunnelFuture {
|
||||
match self {
|
||||
EitherConnection::A(con) => con
|
||||
.open_tunnel(head)
|
||||
.map(|res| res.map(|(head, framed)| (head, framed.map_io(EitherIo::A))))
|
||||
.boxed_local(),
|
||||
EitherConnection::B(con) => con
|
||||
.open_tunnel(head)
|
||||
.map(|res| res.map(|(head, framed)| (head, framed.map_io(EitherIo::B))))
|
||||
.boxed_local(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[pin_project(project = EitherIoProj)]
|
||||
pub enum EitherIo<A, B> {
|
||||
A(#[pin] A),
|
||||
B(#[pin] B),
|
||||
}
|
||||
|
||||
impl<A, B> AsyncRead for EitherIo<A, B>
|
||||
where
|
||||
A: AsyncRead,
|
||||
B: AsyncRead,
|
||||
{
|
||||
fn poll_read(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
buf: &mut [u8],
|
||||
) -> Poll<io::Result<usize>> {
|
||||
match self.project() {
|
||||
EitherIoProj::A(val) => val.poll_read(cx, buf),
|
||||
EitherIoProj::B(val) => val.poll_read(cx, buf),
|
||||
}
|
||||
}
|
||||
|
||||
unsafe fn prepare_uninitialized_buffer(
|
||||
&self,
|
||||
buf: &mut [mem::MaybeUninit<u8>],
|
||||
) -> bool {
|
||||
match self {
|
||||
EitherIo::A(ref val) => val.prepare_uninitialized_buffer(buf),
|
||||
EitherIo::B(ref val) => val.prepare_uninitialized_buffer(buf),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<A, B> AsyncWrite for EitherIo<A, B>
|
||||
where
|
||||
A: AsyncWrite,
|
||||
B: AsyncWrite,
|
||||
{
|
||||
fn poll_write(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
buf: &[u8],
|
||||
) -> Poll<io::Result<usize>> {
|
||||
match self.project() {
|
||||
EitherIoProj::A(val) => val.poll_write(cx, buf),
|
||||
EitherIoProj::B(val) => val.poll_write(cx, buf),
|
||||
}
|
||||
}
|
||||
|
||||
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||
match self.project() {
|
||||
EitherIoProj::A(val) => val.poll_flush(cx),
|
||||
EitherIoProj::B(val) => val.poll_flush(cx),
|
||||
}
|
||||
}
|
||||
|
||||
fn poll_shutdown(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<io::Result<()>> {
|
||||
match self.project() {
|
||||
EitherIoProj::A(val) => val.poll_shutdown(cx),
|
||||
EitherIoProj::B(val) => val.poll_shutdown(cx),
|
||||
}
|
||||
}
|
||||
|
||||
fn poll_write_buf<U: Buf>(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
buf: &mut U,
|
||||
) -> Poll<Result<usize, io::Error>>
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
match self.project() {
|
||||
EitherIoProj::A(val) => val.poll_write_buf(cx, buf),
|
||||
EitherIoProj::B(val) => val.poll_write_buf(cx, buf),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,547 +0,0 @@
|
||||
use std::fmt;
|
||||
use std::marker::PhantomData;
|
||||
use std::time::Duration;
|
||||
|
||||
use actix_codec::{AsyncRead, AsyncWrite};
|
||||
use actix_connect::{
|
||||
default_connector, Connect as TcpConnect, Connection as TcpConnection,
|
||||
};
|
||||
use actix_rt::net::TcpStream;
|
||||
use actix_service::{apply_fn, Service};
|
||||
use actix_utils::timeout::{TimeoutError, TimeoutService};
|
||||
use http::Uri;
|
||||
|
||||
use super::config::ConnectorConfig;
|
||||
use super::connection::Connection;
|
||||
use super::error::ConnectError;
|
||||
use super::pool::{ConnectionPool, Protocol};
|
||||
use super::Connect;
|
||||
|
||||
#[cfg(feature = "openssl")]
|
||||
use actix_connect::ssl::openssl::SslConnector as OpensslConnector;
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
use actix_connect::ssl::rustls::ClientConfig;
|
||||
#[cfg(feature = "rustls")]
|
||||
use std::sync::Arc;
|
||||
|
||||
#[cfg(any(feature = "openssl", feature = "rustls"))]
|
||||
enum SslConnector {
|
||||
#[cfg(feature = "openssl")]
|
||||
Openssl(OpensslConnector),
|
||||
#[cfg(feature = "rustls")]
|
||||
Rustls(Arc<ClientConfig>),
|
||||
}
|
||||
#[cfg(not(any(feature = "openssl", feature = "rustls")))]
|
||||
type SslConnector = ();
|
||||
|
||||
/// Manages http client network connectivity
|
||||
/// The `Connector` type uses a builder-like combinator pattern for service
|
||||
/// construction that finishes by calling the `.finish()` method.
|
||||
///
|
||||
/// ```rust,ignore
|
||||
/// use std::time::Duration;
|
||||
/// use actix_http::client::Connector;
|
||||
///
|
||||
/// let connector = Connector::new()
|
||||
/// .timeout(Duration::from_secs(5))
|
||||
/// .finish();
|
||||
/// ```
|
||||
pub struct Connector<T, U> {
|
||||
connector: T,
|
||||
config: ConnectorConfig,
|
||||
#[allow(dead_code)]
|
||||
ssl: SslConnector,
|
||||
_t: PhantomData<U>,
|
||||
}
|
||||
|
||||
trait Io: AsyncRead + AsyncWrite + Unpin {}
|
||||
impl<T: AsyncRead + AsyncWrite + Unpin> Io for T {}
|
||||
|
||||
impl Connector<(), ()> {
|
||||
#[allow(clippy::new_ret_no_self, clippy::let_unit_value)]
|
||||
pub fn new() -> Connector<
|
||||
impl Service<
|
||||
Request = TcpConnect<Uri>,
|
||||
Response = TcpConnection<Uri, TcpStream>,
|
||||
Error = actix_connect::ConnectError,
|
||||
> + Clone,
|
||||
TcpStream,
|
||||
> {
|
||||
Connector {
|
||||
ssl: Self::build_ssl(vec![b"h2".to_vec(), b"http/1.1".to_vec()]),
|
||||
connector: default_connector(),
|
||||
config: ConnectorConfig::default(),
|
||||
_t: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
// Build Ssl connector with openssl, based on supplied alpn protocols
|
||||
#[cfg(feature = "openssl")]
|
||||
fn build_ssl(protocols: Vec<Vec<u8>>) -> SslConnector {
|
||||
use actix_connect::ssl::openssl::SslMethod;
|
||||
use bytes::{BufMut, BytesMut};
|
||||
|
||||
let mut alpn = BytesMut::with_capacity(20);
|
||||
for proto in protocols.iter() {
|
||||
alpn.put_u8(proto.len() as u8);
|
||||
alpn.put(proto.as_slice());
|
||||
}
|
||||
|
||||
let mut ssl = OpensslConnector::builder(SslMethod::tls()).unwrap();
|
||||
let _ = ssl
|
||||
.set_alpn_protos(&alpn)
|
||||
.map_err(|e| error!("Can not set alpn protocol: {:?}", e));
|
||||
SslConnector::Openssl(ssl.build())
|
||||
}
|
||||
|
||||
// Build Ssl connector with rustls, based on supplied alpn protocols
|
||||
#[cfg(all(not(feature = "openssl"), feature = "rustls"))]
|
||||
fn build_ssl(protocols: Vec<Vec<u8>>) -> SslConnector {
|
||||
let mut config = ClientConfig::new();
|
||||
config.set_protocols(&protocols);
|
||||
config
|
||||
.root_store
|
||||
.add_server_trust_anchors(&actix_tls::rustls::TLS_SERVER_ROOTS);
|
||||
SslConnector::Rustls(Arc::new(config))
|
||||
}
|
||||
|
||||
// ssl turned off, provides empty ssl connector
|
||||
#[cfg(not(any(feature = "openssl", feature = "rustls")))]
|
||||
fn build_ssl(_: Vec<Vec<u8>>) -> SslConnector {}
|
||||
}
|
||||
|
||||
impl<T, U> Connector<T, U> {
|
||||
/// Use custom connector.
|
||||
pub fn connector<T1, U1>(self, connector: T1) -> Connector<T1, U1>
|
||||
where
|
||||
U1: AsyncRead + AsyncWrite + Unpin + fmt::Debug,
|
||||
T1: Service<
|
||||
Request = TcpConnect<Uri>,
|
||||
Response = TcpConnection<Uri, U1>,
|
||||
Error = actix_connect::ConnectError,
|
||||
> + Clone,
|
||||
{
|
||||
Connector {
|
||||
connector,
|
||||
config: self.config,
|
||||
ssl: self.ssl,
|
||||
_t: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, U> Connector<T, U>
|
||||
where
|
||||
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug + 'static,
|
||||
T: Service<
|
||||
Request = TcpConnect<Uri>,
|
||||
Response = TcpConnection<Uri, U>,
|
||||
Error = actix_connect::ConnectError,
|
||||
> + Clone
|
||||
+ 'static,
|
||||
{
|
||||
/// Connection timeout, i.e. max time to connect to remote host including dns name resolution.
|
||||
/// Set to 1 second by default.
|
||||
pub fn timeout(mut self, timeout: Duration) -> Self {
|
||||
self.config.timeout = timeout;
|
||||
self
|
||||
}
|
||||
|
||||
#[cfg(feature = "openssl")]
|
||||
/// Use custom `SslConnector` instance.
|
||||
pub fn ssl(mut self, connector: OpensslConnector) -> Self {
|
||||
self.ssl = SslConnector::Openssl(connector);
|
||||
self
|
||||
}
|
||||
|
||||
#[cfg(feature = "rustls")]
|
||||
pub fn rustls(mut self, connector: Arc<ClientConfig>) -> Self {
|
||||
self.ssl = SslConnector::Rustls(connector);
|
||||
self
|
||||
}
|
||||
|
||||
/// Maximum supported http major version
|
||||
/// Supported versions http/1.1, http/2
|
||||
pub fn max_http_version(mut self, val: http::Version) -> Self {
|
||||
let versions = match val {
|
||||
http::Version::HTTP_11 => vec![b"http/1.1".to_vec()],
|
||||
http::Version::HTTP_2 => vec![b"h2".to_vec(), b"http/1.1".to_vec()],
|
||||
_ => {
|
||||
unimplemented!("actix-http:client: supported versions http/1.1, http/2")
|
||||
}
|
||||
};
|
||||
self.ssl = Connector::build_ssl(versions);
|
||||
self
|
||||
}
|
||||
|
||||
/// Indicates the initial window size (in octets) for
|
||||
/// HTTP2 stream-level flow control for received data.
|
||||
///
|
||||
/// The default value is 65,535 and is good for APIs, but not for big objects.
|
||||
pub fn initial_window_size(mut self, size: u32) -> Self {
|
||||
self.config.stream_window_size = size;
|
||||
self
|
||||
}
|
||||
|
||||
/// Indicates the initial window size (in octets) for
|
||||
/// HTTP2 connection-level flow control for received data.
|
||||
///
|
||||
/// The default value is 65,535 and is good for APIs, but not for big objects.
|
||||
pub fn initial_connection_window_size(mut self, size: u32) -> Self {
|
||||
self.config.conn_window_size = size;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set total number of simultaneous connections per type of scheme.
|
||||
///
|
||||
/// If limit is 0, the connector has no limit.
|
||||
/// The default limit size is 100.
|
||||
pub fn limit(mut self, limit: usize) -> Self {
|
||||
self.config.limit = limit;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set keep-alive period for opened connection.
|
||||
///
|
||||
/// Keep-alive period is the period between connection usage. If
|
||||
/// the delay between repeated usages of the same connection
|
||||
/// exceeds this period, the connection is closed.
|
||||
/// Default keep-alive period is 15 seconds.
|
||||
pub fn conn_keep_alive(mut self, dur: Duration) -> Self {
|
||||
self.config.conn_keep_alive = dur;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set max lifetime period for connection.
|
||||
///
|
||||
/// Connection lifetime is max lifetime of any opened connection
|
||||
/// until it is closed regardless of keep-alive period.
|
||||
/// Default lifetime period is 75 seconds.
|
||||
pub fn conn_lifetime(mut self, dur: Duration) -> Self {
|
||||
self.config.conn_lifetime = dur;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set server connection disconnect timeout in milliseconds.
|
||||
///
|
||||
/// Defines a timeout for disconnect connection. If a disconnect procedure does not complete
|
||||
/// within this time, the socket get dropped. This timeout affects only secure connections.
|
||||
///
|
||||
/// To disable timeout set value to 0.
|
||||
///
|
||||
/// By default disconnect timeout is set to 3000 milliseconds.
|
||||
pub fn disconnect_timeout(mut self, dur: Duration) -> Self {
|
||||
self.config.disconnect_timeout = Some(dur);
|
||||
self
|
||||
}
|
||||
|
||||
/// Finish configuration process and create connector service.
|
||||
/// The Connector builder always concludes by calling `finish()` last in
|
||||
/// its combinator chain.
|
||||
pub fn finish(
|
||||
self,
|
||||
) -> impl Service<Request = Connect, Response = impl Connection, Error = ConnectError>
|
||||
+ Clone {
|
||||
#[cfg(not(any(feature = "openssl", feature = "rustls")))]
|
||||
{
|
||||
let connector = TimeoutService::new(
|
||||
self.config.timeout,
|
||||
apply_fn(self.connector, |msg: Connect, srv| {
|
||||
srv.call(TcpConnect::new(msg.uri).set_addr(msg.addr))
|
||||
})
|
||||
.map_err(ConnectError::from)
|
||||
.map(|stream| (stream.into_parts().0, Protocol::Http1)),
|
||||
)
|
||||
.map_err(|e| match e {
|
||||
TimeoutError::Service(e) => e,
|
||||
TimeoutError::Timeout => ConnectError::Timeout,
|
||||
});
|
||||
|
||||
connect_impl::InnerConnector {
|
||||
tcp_pool: ConnectionPool::new(
|
||||
connector,
|
||||
self.config.no_disconnect_timeout(),
|
||||
),
|
||||
}
|
||||
}
|
||||
#[cfg(any(feature = "openssl", feature = "rustls"))]
|
||||
{
|
||||
const H2: &[u8] = b"h2";
|
||||
#[cfg(feature = "openssl")]
|
||||
use actix_connect::ssl::openssl::OpensslConnector;
|
||||
#[cfg(feature = "rustls")]
|
||||
use actix_connect::ssl::rustls::{RustlsConnector, Session};
|
||||
use actix_service::{boxed::service, pipeline};
|
||||
|
||||
let ssl_service = TimeoutService::new(
|
||||
self.config.timeout,
|
||||
pipeline(
|
||||
apply_fn(self.connector.clone(), |msg: Connect, srv| {
|
||||
srv.call(TcpConnect::new(msg.uri).set_addr(msg.addr))
|
||||
})
|
||||
.map_err(ConnectError::from),
|
||||
)
|
||||
.and_then(match self.ssl {
|
||||
#[cfg(feature = "openssl")]
|
||||
SslConnector::Openssl(ssl) => service(
|
||||
OpensslConnector::service(ssl)
|
||||
.map(|stream| {
|
||||
let sock = stream.into_parts().0;
|
||||
let h2 = sock
|
||||
.ssl()
|
||||
.selected_alpn_protocol()
|
||||
.map(|protos| protos.windows(2).any(|w| w == H2))
|
||||
.unwrap_or(false);
|
||||
if h2 {
|
||||
(Box::new(sock) as Box<dyn Io>, Protocol::Http2)
|
||||
} else {
|
||||
(Box::new(sock) as Box<dyn Io>, Protocol::Http1)
|
||||
}
|
||||
})
|
||||
.map_err(ConnectError::from),
|
||||
),
|
||||
#[cfg(feature = "rustls")]
|
||||
SslConnector::Rustls(ssl) => service(
|
||||
RustlsConnector::service(ssl)
|
||||
.map_err(ConnectError::from)
|
||||
.map(|stream| {
|
||||
let sock = stream.into_parts().0;
|
||||
let h2 = sock
|
||||
.get_ref()
|
||||
.1
|
||||
.get_alpn_protocol()
|
||||
.map(|protos| protos.windows(2).any(|w| w == H2))
|
||||
.unwrap_or(false);
|
||||
if h2 {
|
||||
(Box::new(sock) as Box<dyn Io>, Protocol::Http2)
|
||||
} else {
|
||||
(Box::new(sock) as Box<dyn Io>, Protocol::Http1)
|
||||
}
|
||||
}),
|
||||
),
|
||||
}),
|
||||
)
|
||||
.map_err(|e| match e {
|
||||
TimeoutError::Service(e) => e,
|
||||
TimeoutError::Timeout => ConnectError::Timeout,
|
||||
});
|
||||
|
||||
let tcp_service = TimeoutService::new(
|
||||
self.config.timeout,
|
||||
apply_fn(self.connector, |msg: Connect, srv| {
|
||||
srv.call(TcpConnect::new(msg.uri).set_addr(msg.addr))
|
||||
})
|
||||
.map_err(ConnectError::from)
|
||||
.map(|stream| (stream.into_parts().0, Protocol::Http1)),
|
||||
)
|
||||
.map_err(|e| match e {
|
||||
TimeoutError::Service(e) => e,
|
||||
TimeoutError::Timeout => ConnectError::Timeout,
|
||||
});
|
||||
|
||||
connect_impl::InnerConnector {
|
||||
tcp_pool: ConnectionPool::new(
|
||||
tcp_service,
|
||||
self.config.no_disconnect_timeout(),
|
||||
),
|
||||
ssl_pool: ConnectionPool::new(ssl_service, self.config),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(any(feature = "openssl", feature = "rustls")))]
|
||||
mod connect_impl {
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use futures_util::future::{err, Either, Ready};
|
||||
|
||||
use super::*;
|
||||
use crate::client::connection::IoConnection;
|
||||
|
||||
pub(crate) struct InnerConnector<T, Io>
|
||||
where
|
||||
Io: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>
|
||||
+ 'static,
|
||||
{
|
||||
pub(crate) tcp_pool: ConnectionPool<T, Io>,
|
||||
}
|
||||
|
||||
impl<T, Io> Clone for InnerConnector<T, Io>
|
||||
where
|
||||
Io: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>
|
||||
+ 'static,
|
||||
{
|
||||
fn clone(&self) -> Self {
|
||||
InnerConnector {
|
||||
tcp_pool: self.tcp_pool.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, Io> Service for InnerConnector<T, Io>
|
||||
where
|
||||
Io: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>
|
||||
+ 'static,
|
||||
{
|
||||
type Request = Connect;
|
||||
type Response = IoConnection<Io>;
|
||||
type Error = ConnectError;
|
||||
type Future = Either<
|
||||
<ConnectionPool<T, Io> as Service>::Future,
|
||||
Ready<Result<IoConnection<Io>, ConnectError>>,
|
||||
>;
|
||||
|
||||
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
self.tcp_pool.poll_ready(cx)
|
||||
}
|
||||
|
||||
fn call(&mut self, req: Connect) -> Self::Future {
|
||||
match req.uri.scheme_str() {
|
||||
Some("https") | Some("wss") => {
|
||||
Either::Right(err(ConnectError::SslIsNotSupported))
|
||||
}
|
||||
_ => Either::Left(self.tcp_pool.call(req)),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(any(feature = "openssl", feature = "rustls"))]
|
||||
mod connect_impl {
|
||||
use std::future::Future;
|
||||
use std::marker::PhantomData;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use futures_core::ready;
|
||||
use futures_util::future::Either;
|
||||
|
||||
use super::*;
|
||||
use crate::client::connection::EitherConnection;
|
||||
|
||||
pub(crate) struct InnerConnector<T1, T2, Io1, Io2>
|
||||
where
|
||||
Io1: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
Io2: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
T1: Service<Request = Connect, Response = (Io1, Protocol), Error = ConnectError>,
|
||||
T2: Service<Request = Connect, Response = (Io2, Protocol), Error = ConnectError>,
|
||||
{
|
||||
pub(crate) tcp_pool: ConnectionPool<T1, Io1>,
|
||||
pub(crate) ssl_pool: ConnectionPool<T2, Io2>,
|
||||
}
|
||||
|
||||
impl<T1, T2, Io1, Io2> Clone for InnerConnector<T1, T2, Io1, Io2>
|
||||
where
|
||||
Io1: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
Io2: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
T1: Service<Request = Connect, Response = (Io1, Protocol), Error = ConnectError>
|
||||
+ 'static,
|
||||
T2: Service<Request = Connect, Response = (Io2, Protocol), Error = ConnectError>
|
||||
+ 'static,
|
||||
{
|
||||
fn clone(&self) -> Self {
|
||||
InnerConnector {
|
||||
tcp_pool: self.tcp_pool.clone(),
|
||||
ssl_pool: self.ssl_pool.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T1, T2, Io1, Io2> Service for InnerConnector<T1, T2, Io1, Io2>
|
||||
where
|
||||
Io1: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
Io2: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
T1: Service<Request = Connect, Response = (Io1, Protocol), Error = ConnectError>
|
||||
+ 'static,
|
||||
T2: Service<Request = Connect, Response = (Io2, Protocol), Error = ConnectError>
|
||||
+ 'static,
|
||||
{
|
||||
type Request = Connect;
|
||||
type Response = EitherConnection<Io1, Io2>;
|
||||
type Error = ConnectError;
|
||||
type Future = Either<
|
||||
InnerConnectorResponseA<T1, Io1, Io2>,
|
||||
InnerConnectorResponseB<T2, Io1, Io2>,
|
||||
>;
|
||||
|
||||
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
self.tcp_pool.poll_ready(cx)
|
||||
}
|
||||
|
||||
fn call(&mut self, req: Connect) -> Self::Future {
|
||||
match req.uri.scheme_str() {
|
||||
Some("https") | Some("wss") => Either::Right(InnerConnectorResponseB {
|
||||
fut: self.ssl_pool.call(req),
|
||||
_t: PhantomData,
|
||||
}),
|
||||
_ => Either::Left(InnerConnectorResponseA {
|
||||
fut: self.tcp_pool.call(req),
|
||||
_t: PhantomData,
|
||||
}),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[pin_project::pin_project]
|
||||
pub(crate) struct InnerConnectorResponseA<T, Io1, Io2>
|
||||
where
|
||||
Io1: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
T: Service<Request = Connect, Response = (Io1, Protocol), Error = ConnectError>
|
||||
+ 'static,
|
||||
{
|
||||
#[pin]
|
||||
fut: <ConnectionPool<T, Io1> as Service>::Future,
|
||||
_t: PhantomData<Io2>,
|
||||
}
|
||||
|
||||
impl<T, Io1, Io2> Future for InnerConnectorResponseA<T, Io1, Io2>
|
||||
where
|
||||
T: Service<Request = Connect, Response = (Io1, Protocol), Error = ConnectError>
|
||||
+ 'static,
|
||||
Io1: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
Io2: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
{
|
||||
type Output = Result<EitherConnection<Io1, Io2>, ConnectError>;
|
||||
|
||||
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
Poll::Ready(
|
||||
ready!(Pin::new(&mut self.get_mut().fut).poll(cx))
|
||||
.map(EitherConnection::A),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[pin_project::pin_project]
|
||||
pub(crate) struct InnerConnectorResponseB<T, Io1, Io2>
|
||||
where
|
||||
Io2: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
T: Service<Request = Connect, Response = (Io2, Protocol), Error = ConnectError>
|
||||
+ 'static,
|
||||
{
|
||||
#[pin]
|
||||
fut: <ConnectionPool<T, Io2> as Service>::Future,
|
||||
_t: PhantomData<Io1>,
|
||||
}
|
||||
|
||||
impl<T, Io1, Io2> Future for InnerConnectorResponseB<T, Io1, Io2>
|
||||
where
|
||||
T: Service<Request = Connect, Response = (Io2, Protocol), Error = ConnectError>
|
||||
+ 'static,
|
||||
Io1: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
Io2: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
{
|
||||
type Output = Result<EitherConnection<Io1, Io2>, ConnectError>;
|
||||
|
||||
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
Poll::Ready(
|
||||
ready!(Pin::new(&mut self.get_mut().fut).poll(cx))
|
||||
.map(EitherConnection::B),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,293 +0,0 @@
|
||||
use std::io::Write;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
use std::{io, mem, time};
|
||||
|
||||
use actix_codec::{AsyncRead, AsyncWrite, Framed};
|
||||
use bytes::buf::BufMutExt;
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use futures_core::Stream;
|
||||
use futures_util::future::poll_fn;
|
||||
use futures_util::{pin_mut, SinkExt, StreamExt};
|
||||
|
||||
use crate::error::PayloadError;
|
||||
use crate::h1;
|
||||
use crate::header::HeaderMap;
|
||||
use crate::http::header::{IntoHeaderValue, HOST};
|
||||
use crate::message::{RequestHeadType, ResponseHead};
|
||||
use crate::payload::{Payload, PayloadStream};
|
||||
|
||||
use super::connection::{ConnectionLifetime, ConnectionType, IoConnection};
|
||||
use super::error::{ConnectError, SendRequestError};
|
||||
use super::pool::Acquired;
|
||||
use crate::body::{BodySize, MessageBody};
|
||||
|
||||
pub(crate) async fn send_request<T, B>(
|
||||
io: T,
|
||||
mut head: RequestHeadType,
|
||||
body: B,
|
||||
created: time::Instant,
|
||||
pool: Option<Acquired<T>>,
|
||||
) -> Result<(ResponseHead, Payload), SendRequestError>
|
||||
where
|
||||
T: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
B: MessageBody,
|
||||
{
|
||||
// set request host header
|
||||
if !head.as_ref().headers.contains_key(HOST)
|
||||
&& !head.extra_headers().iter().any(|h| h.contains_key(HOST))
|
||||
{
|
||||
if let Some(host) = head.as_ref().uri.host() {
|
||||
let mut wrt = BytesMut::with_capacity(host.len() + 5).writer();
|
||||
|
||||
let _ = match head.as_ref().uri.port_u16() {
|
||||
None | Some(80) | Some(443) => write!(wrt, "{}", host),
|
||||
Some(port) => write!(wrt, "{}:{}", host, port),
|
||||
};
|
||||
|
||||
match wrt.get_mut().split().freeze().try_into() {
|
||||
Ok(value) => match head {
|
||||
RequestHeadType::Owned(ref mut head) => {
|
||||
head.headers.insert(HOST, value)
|
||||
}
|
||||
RequestHeadType::Rc(_, ref mut extra_headers) => {
|
||||
let headers = extra_headers.get_or_insert(HeaderMap::new());
|
||||
headers.insert(HOST, value)
|
||||
}
|
||||
},
|
||||
Err(e) => log::error!("Can not set HOST header {}", e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let io = H1Connection {
|
||||
created,
|
||||
pool,
|
||||
io: Some(io),
|
||||
};
|
||||
|
||||
// create Framed and send request
|
||||
let mut framed = Framed::new(io, h1::ClientCodec::default());
|
||||
framed.send((head, body.size()).into()).await?;
|
||||
|
||||
// send request body
|
||||
match body.size() {
|
||||
BodySize::None | BodySize::Empty | BodySize::Sized(0) => (),
|
||||
_ => send_body(body, &mut framed).await?,
|
||||
};
|
||||
|
||||
// read response and init read body
|
||||
let res = framed.into_future().await;
|
||||
let (head, framed) = if let (Some(result), framed) = res {
|
||||
let item = result.map_err(SendRequestError::from)?;
|
||||
(item, framed)
|
||||
} else {
|
||||
return Err(SendRequestError::from(ConnectError::Disconnected));
|
||||
};
|
||||
|
||||
match framed.get_codec().message_type() {
|
||||
h1::MessageType::None => {
|
||||
let force_close = !framed.get_codec().keepalive();
|
||||
release_connection(framed, force_close);
|
||||
Ok((head, Payload::None))
|
||||
}
|
||||
_ => {
|
||||
let pl: PayloadStream = PlStream::new(framed).boxed_local();
|
||||
Ok((head, pl.into()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn open_tunnel<T>(
|
||||
io: T,
|
||||
head: RequestHeadType,
|
||||
) -> Result<(ResponseHead, Framed<T, h1::ClientCodec>), SendRequestError>
|
||||
where
|
||||
T: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
{
|
||||
// create Framed and send request
|
||||
let mut framed = Framed::new(io, h1::ClientCodec::default());
|
||||
framed.send((head, BodySize::None).into()).await?;
|
||||
|
||||
// read response
|
||||
if let (Some(result), framed) = framed.into_future().await {
|
||||
let head = result.map_err(SendRequestError::from)?;
|
||||
Ok((head, framed))
|
||||
} else {
|
||||
Err(SendRequestError::from(ConnectError::Disconnected))
|
||||
}
|
||||
}
|
||||
|
||||
/// send request body to the peer
|
||||
pub(crate) async fn send_body<I, B>(
|
||||
body: B,
|
||||
framed: &mut Framed<I, h1::ClientCodec>,
|
||||
) -> Result<(), SendRequestError>
|
||||
where
|
||||
I: ConnectionLifetime,
|
||||
B: MessageBody,
|
||||
{
|
||||
let mut eof = false;
|
||||
pin_mut!(body);
|
||||
while !eof {
|
||||
while !eof && !framed.is_write_buf_full() {
|
||||
match poll_fn(|cx| body.as_mut().poll_next(cx)).await {
|
||||
Some(result) => {
|
||||
framed.write(h1::Message::Chunk(Some(result?)))?;
|
||||
}
|
||||
None => {
|
||||
eof = true;
|
||||
framed.write(h1::Message::Chunk(None))?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !framed.is_write_buf_empty() {
|
||||
poll_fn(|cx| match framed.flush(cx) {
|
||||
Poll::Ready(Ok(_)) => Poll::Ready(Ok(())),
|
||||
Poll::Ready(Err(err)) => Poll::Ready(Err(err)),
|
||||
Poll::Pending => {
|
||||
if !framed.is_write_buf_full() {
|
||||
Poll::Ready(Ok(()))
|
||||
} else {
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
})
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
|
||||
SinkExt::flush(framed).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
/// HTTP client connection
|
||||
pub struct H1Connection<T> {
|
||||
io: Option<T>,
|
||||
created: time::Instant,
|
||||
pool: Option<Acquired<T>>,
|
||||
}
|
||||
|
||||
impl<T> ConnectionLifetime for H1Connection<T>
|
||||
where
|
||||
T: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
{
|
||||
/// Close connection
|
||||
fn close(&mut self) {
|
||||
if let Some(mut pool) = self.pool.take() {
|
||||
if let Some(io) = self.io.take() {
|
||||
pool.close(IoConnection::new(
|
||||
ConnectionType::H1(io),
|
||||
self.created,
|
||||
None,
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Release this connection to the connection pool
|
||||
fn release(&mut self) {
|
||||
if let Some(mut pool) = self.pool.take() {
|
||||
if let Some(io) = self.io.take() {
|
||||
pool.release(IoConnection::new(
|
||||
ConnectionType::H1(io),
|
||||
self.created,
|
||||
None,
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: AsyncRead + AsyncWrite + Unpin + 'static> AsyncRead for H1Connection<T> {
|
||||
unsafe fn prepare_uninitialized_buffer(
|
||||
&self,
|
||||
buf: &mut [mem::MaybeUninit<u8>],
|
||||
) -> bool {
|
||||
self.io.as_ref().unwrap().prepare_uninitialized_buffer(buf)
|
||||
}
|
||||
|
||||
fn poll_read(
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
buf: &mut [u8],
|
||||
) -> Poll<io::Result<usize>> {
|
||||
Pin::new(&mut self.io.as_mut().unwrap()).poll_read(cx, buf)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: AsyncRead + AsyncWrite + Unpin + 'static> AsyncWrite for H1Connection<T> {
|
||||
fn poll_write(
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
buf: &[u8],
|
||||
) -> Poll<io::Result<usize>> {
|
||||
Pin::new(&mut self.io.as_mut().unwrap()).poll_write(cx, buf)
|
||||
}
|
||||
|
||||
fn poll_flush(
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<io::Result<()>> {
|
||||
Pin::new(self.io.as_mut().unwrap()).poll_flush(cx)
|
||||
}
|
||||
|
||||
fn poll_shutdown(
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Result<(), io::Error>> {
|
||||
Pin::new(self.io.as_mut().unwrap()).poll_shutdown(cx)
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct PlStream<Io> {
|
||||
framed: Option<Framed<Io, h1::ClientPayloadCodec>>,
|
||||
}
|
||||
|
||||
impl<Io: ConnectionLifetime> PlStream<Io> {
|
||||
fn new(framed: Framed<Io, h1::ClientCodec>) -> Self {
|
||||
PlStream {
|
||||
framed: Some(framed.map_codec(|codec| codec.into_payload_codec())),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<Io: ConnectionLifetime> Stream for PlStream<Io> {
|
||||
type Item = Result<Bytes, PayloadError>;
|
||||
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Self::Item>> {
|
||||
let this = self.get_mut();
|
||||
|
||||
match this.framed.as_mut().unwrap().next_item(cx)? {
|
||||
Poll::Pending => Poll::Pending,
|
||||
Poll::Ready(Some(chunk)) => {
|
||||
if let Some(chunk) = chunk {
|
||||
Poll::Ready(Some(Ok(chunk)))
|
||||
} else {
|
||||
let framed = this.framed.take().unwrap();
|
||||
let force_close = !framed.get_codec().keepalive();
|
||||
release_connection(framed, force_close);
|
||||
Poll::Ready(None)
|
||||
}
|
||||
}
|
||||
Poll::Ready(None) => Poll::Ready(None),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn release_connection<T, U>(framed: Framed<T, U>, force_close: bool)
|
||||
where
|
||||
T: ConnectionLifetime,
|
||||
{
|
||||
let mut parts = framed.into_parts();
|
||||
if !force_close && parts.read_buf.is_empty() && parts.write_buf.is_empty() {
|
||||
parts.io.release()
|
||||
} else {
|
||||
parts.io.close()
|
||||
}
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
//! Http client api
|
||||
use http::Uri;
|
||||
|
||||
mod config;
|
||||
mod connection;
|
||||
mod connector;
|
||||
mod error;
|
||||
mod h1proto;
|
||||
mod h2proto;
|
||||
mod pool;
|
||||
|
||||
pub use self::connection::Connection;
|
||||
pub use self::connector::Connector;
|
||||
pub use self::error::{ConnectError, FreezeRequestError, InvalidUrl, SendRequestError};
|
||||
pub use self::pool::Protocol;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Connect {
|
||||
pub uri: Uri,
|
||||
pub addr: Option<std::net::SocketAddr>,
|
||||
}
|
||||
@@ -1,644 +0,0 @@
|
||||
use std::cell::RefCell;
|
||||
use std::collections::VecDeque;
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::rc::Rc;
|
||||
use std::task::{Context, Poll};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use actix_codec::{AsyncRead, AsyncWrite};
|
||||
use actix_rt::time::{delay_for, Delay};
|
||||
use actix_service::Service;
|
||||
use actix_utils::{oneshot, task::LocalWaker};
|
||||
use bytes::Bytes;
|
||||
use futures_util::future::{poll_fn, FutureExt, LocalBoxFuture};
|
||||
use fxhash::FxHashMap;
|
||||
use h2::client::{Connection, SendRequest};
|
||||
use http::uri::Authority;
|
||||
use indexmap::IndexSet;
|
||||
use pin_project::pin_project;
|
||||
use slab::Slab;
|
||||
|
||||
use super::config::ConnectorConfig;
|
||||
use super::connection::{ConnectionType, IoConnection};
|
||||
use super::error::ConnectError;
|
||||
use super::h2proto::handshake;
|
||||
use super::Connect;
|
||||
|
||||
#[derive(Clone, Copy, PartialEq)]
|
||||
/// Protocol version
|
||||
pub enum Protocol {
|
||||
Http1,
|
||||
Http2,
|
||||
}
|
||||
|
||||
#[derive(Hash, Eq, PartialEq, Clone, Debug)]
|
||||
pub(crate) struct Key {
|
||||
authority: Authority,
|
||||
}
|
||||
|
||||
impl From<Authority> for Key {
|
||||
fn from(authority: Authority) -> Key {
|
||||
Key { authority }
|
||||
}
|
||||
}
|
||||
|
||||
/// Connections pool
|
||||
pub(crate) struct ConnectionPool<T, Io: 'static>(Rc<RefCell<T>>, Rc<RefCell<Inner<Io>>>);
|
||||
|
||||
impl<T, Io> ConnectionPool<T, Io>
|
||||
where
|
||||
Io: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>
|
||||
+ 'static,
|
||||
{
|
||||
pub(crate) fn new(connector: T, config: ConnectorConfig) -> Self {
|
||||
let connector_rc = Rc::new(RefCell::new(connector));
|
||||
let inner_rc = Rc::new(RefCell::new(Inner {
|
||||
config,
|
||||
acquired: 0,
|
||||
waiters: Slab::new(),
|
||||
waiters_queue: IndexSet::new(),
|
||||
available: FxHashMap::default(),
|
||||
waker: LocalWaker::new(),
|
||||
}));
|
||||
|
||||
// start support future
|
||||
actix_rt::spawn(ConnectorPoolSupport {
|
||||
connector: Rc::clone(&connector_rc),
|
||||
inner: Rc::clone(&inner_rc),
|
||||
});
|
||||
|
||||
ConnectionPool(connector_rc, inner_rc)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, Io> Clone for ConnectionPool<T, Io>
|
||||
where
|
||||
Io: 'static,
|
||||
{
|
||||
fn clone(&self) -> Self {
|
||||
ConnectionPool(self.0.clone(), self.1.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, Io> Drop for ConnectionPool<T, Io> {
|
||||
fn drop(&mut self) {
|
||||
// wake up the ConnectorPoolSupport when dropping so it can exit properly.
|
||||
self.1.borrow().waker.wake();
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, Io> Service for ConnectionPool<T, Io>
|
||||
where
|
||||
Io: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>
|
||||
+ 'static,
|
||||
{
|
||||
type Request = Connect;
|
||||
type Response = IoConnection<Io>;
|
||||
type Error = ConnectError;
|
||||
type Future = LocalBoxFuture<'static, Result<IoConnection<Io>, ConnectError>>;
|
||||
|
||||
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
self.0.poll_ready(cx)
|
||||
}
|
||||
|
||||
fn call(&mut self, req: Connect) -> Self::Future {
|
||||
let mut connector = self.0.clone();
|
||||
let inner = self.1.clone();
|
||||
|
||||
let fut = async move {
|
||||
let key = if let Some(authority) = req.uri.authority() {
|
||||
authority.clone().into()
|
||||
} else {
|
||||
return Err(ConnectError::Unresolved);
|
||||
};
|
||||
|
||||
// acquire connection
|
||||
match poll_fn(|cx| Poll::Ready(inner.borrow_mut().acquire(&key, cx))).await {
|
||||
Acquire::Acquired(io, created) => {
|
||||
// use existing connection
|
||||
return Ok(IoConnection::new(
|
||||
io,
|
||||
created,
|
||||
Some(Acquired(key, Some(inner))),
|
||||
));
|
||||
}
|
||||
Acquire::Available => {
|
||||
// open tcp connection
|
||||
let (io, proto) = connector.call(req).await?;
|
||||
|
||||
let config = inner.borrow().config.clone();
|
||||
|
||||
let guard = OpenGuard::new(key, inner);
|
||||
|
||||
if proto == Protocol::Http1 {
|
||||
Ok(IoConnection::new(
|
||||
ConnectionType::H1(io),
|
||||
Instant::now(),
|
||||
Some(guard.consume()),
|
||||
))
|
||||
} else {
|
||||
let (snd, connection) = handshake(io, &config).await?;
|
||||
actix_rt::spawn(connection.map(|_| ()));
|
||||
Ok(IoConnection::new(
|
||||
ConnectionType::H2(snd),
|
||||
Instant::now(),
|
||||
Some(guard.consume()),
|
||||
))
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
// connection is not available, wait
|
||||
let (rx, token) = inner.borrow_mut().wait_for(req);
|
||||
|
||||
let guard = WaiterGuard::new(key, token, inner);
|
||||
let res = match rx.await {
|
||||
Err(_) => Err(ConnectError::Disconnected),
|
||||
Ok(res) => res,
|
||||
};
|
||||
guard.consume();
|
||||
res
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
fut.boxed_local()
|
||||
}
|
||||
}
|
||||
|
||||
struct WaiterGuard<Io>
|
||||
where
|
||||
Io: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
{
|
||||
key: Key,
|
||||
token: usize,
|
||||
inner: Option<Rc<RefCell<Inner<Io>>>>,
|
||||
}
|
||||
|
||||
impl<Io> WaiterGuard<Io>
|
||||
where
|
||||
Io: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
{
|
||||
fn new(key: Key, token: usize, inner: Rc<RefCell<Inner<Io>>>) -> Self {
|
||||
Self {
|
||||
key,
|
||||
token,
|
||||
inner: Some(inner),
|
||||
}
|
||||
}
|
||||
|
||||
fn consume(mut self) {
|
||||
let _ = self.inner.take();
|
||||
}
|
||||
}
|
||||
|
||||
impl<Io> Drop for WaiterGuard<Io>
|
||||
where
|
||||
Io: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
{
|
||||
fn drop(&mut self) {
|
||||
if let Some(i) = self.inner.take() {
|
||||
let mut inner = i.as_ref().borrow_mut();
|
||||
inner.release_waiter(&self.key, self.token);
|
||||
inner.check_availability();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct OpenGuard<Io>
|
||||
where
|
||||
Io: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
{
|
||||
key: Key,
|
||||
inner: Option<Rc<RefCell<Inner<Io>>>>,
|
||||
}
|
||||
|
||||
impl<Io> OpenGuard<Io>
|
||||
where
|
||||
Io: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
{
|
||||
fn new(key: Key, inner: Rc<RefCell<Inner<Io>>>) -> Self {
|
||||
Self {
|
||||
key,
|
||||
inner: Some(inner),
|
||||
}
|
||||
}
|
||||
|
||||
fn consume(mut self) -> Acquired<Io> {
|
||||
Acquired(self.key.clone(), self.inner.take())
|
||||
}
|
||||
}
|
||||
|
||||
impl<Io> Drop for OpenGuard<Io>
|
||||
where
|
||||
Io: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
{
|
||||
fn drop(&mut self) {
|
||||
if let Some(i) = self.inner.take() {
|
||||
let mut inner = i.as_ref().borrow_mut();
|
||||
inner.release();
|
||||
inner.check_availability();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
enum Acquire<T> {
|
||||
Acquired(ConnectionType<T>, Instant),
|
||||
Available,
|
||||
NotAvailable,
|
||||
}
|
||||
|
||||
struct AvailableConnection<Io> {
|
||||
io: ConnectionType<Io>,
|
||||
used: Instant,
|
||||
created: Instant,
|
||||
}
|
||||
|
||||
pub(crate) struct Inner<Io> {
|
||||
config: ConnectorConfig,
|
||||
acquired: usize,
|
||||
available: FxHashMap<Key, VecDeque<AvailableConnection<Io>>>,
|
||||
waiters: Slab<
|
||||
Option<(
|
||||
Connect,
|
||||
oneshot::Sender<Result<IoConnection<Io>, ConnectError>>,
|
||||
)>,
|
||||
>,
|
||||
waiters_queue: IndexSet<(Key, usize)>,
|
||||
waker: LocalWaker,
|
||||
}
|
||||
|
||||
impl<Io> Inner<Io> {
|
||||
fn reserve(&mut self) {
|
||||
self.acquired += 1;
|
||||
}
|
||||
|
||||
fn release(&mut self) {
|
||||
self.acquired -= 1;
|
||||
}
|
||||
|
||||
fn release_waiter(&mut self, key: &Key, token: usize) {
|
||||
self.waiters.remove(token);
|
||||
let _ = self.waiters_queue.shift_remove(&(key.clone(), token));
|
||||
}
|
||||
}
|
||||
|
||||
impl<Io> Inner<Io>
|
||||
where
|
||||
Io: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
{
|
||||
/// connection is not available, wait
|
||||
fn wait_for(
|
||||
&mut self,
|
||||
connect: Connect,
|
||||
) -> (
|
||||
oneshot::Receiver<Result<IoConnection<Io>, ConnectError>>,
|
||||
usize,
|
||||
) {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
|
||||
let key: Key = connect.uri.authority().unwrap().clone().into();
|
||||
let entry = self.waiters.vacant_entry();
|
||||
let token = entry.key();
|
||||
entry.insert(Some((connect, tx)));
|
||||
assert!(self.waiters_queue.insert((key, token)));
|
||||
|
||||
(rx, token)
|
||||
}
|
||||
|
||||
fn acquire(&mut self, key: &Key, cx: &mut Context<'_>) -> Acquire<Io> {
|
||||
// check limits
|
||||
if self.config.limit > 0 && self.acquired >= self.config.limit {
|
||||
return Acquire::NotAvailable;
|
||||
}
|
||||
|
||||
self.reserve();
|
||||
|
||||
// check if open connection is available
|
||||
// cleanup stale connections at the same time
|
||||
if let Some(ref mut connections) = self.available.get_mut(key) {
|
||||
let now = Instant::now();
|
||||
while let Some(conn) = connections.pop_back() {
|
||||
// check if it still usable
|
||||
if (now - conn.used) > self.config.conn_keep_alive
|
||||
|| (now - conn.created) > self.config.conn_lifetime
|
||||
{
|
||||
if let Some(timeout) = self.config.disconnect_timeout {
|
||||
if let ConnectionType::H1(io) = conn.io {
|
||||
actix_rt::spawn(CloseConnection::new(io, timeout))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let mut io = conn.io;
|
||||
let mut buf = [0; 2];
|
||||
if let ConnectionType::H1(ref mut s) = io {
|
||||
match Pin::new(s).poll_read(cx, &mut buf) {
|
||||
Poll::Pending => (),
|
||||
Poll::Ready(Ok(n)) if n > 0 => {
|
||||
if let Some(timeout) = self.config.disconnect_timeout {
|
||||
if let ConnectionType::H1(io) = io {
|
||||
actix_rt::spawn(CloseConnection::new(
|
||||
io, timeout,
|
||||
))
|
||||
}
|
||||
}
|
||||
continue;
|
||||
}
|
||||
_ => continue,
|
||||
}
|
||||
}
|
||||
return Acquire::Acquired(io, conn.created);
|
||||
}
|
||||
}
|
||||
}
|
||||
Acquire::Available
|
||||
}
|
||||
|
||||
fn release_conn(&mut self, key: &Key, io: ConnectionType<Io>, created: Instant) {
|
||||
self.acquired -= 1;
|
||||
self.available
|
||||
.entry(key.clone())
|
||||
.or_insert_with(VecDeque::new)
|
||||
.push_back(AvailableConnection {
|
||||
io,
|
||||
created,
|
||||
used: Instant::now(),
|
||||
});
|
||||
self.check_availability();
|
||||
}
|
||||
|
||||
fn release_close(&mut self, io: ConnectionType<Io>) {
|
||||
self.acquired -= 1;
|
||||
if let Some(timeout) = self.config.disconnect_timeout {
|
||||
if let ConnectionType::H1(io) = io {
|
||||
actix_rt::spawn(CloseConnection::new(io, timeout))
|
||||
}
|
||||
}
|
||||
self.check_availability();
|
||||
}
|
||||
|
||||
fn check_availability(&self) {
|
||||
if !self.waiters_queue.is_empty() && self.acquired < self.config.limit {
|
||||
self.waker.wake();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct CloseConnection<T> {
|
||||
io: T,
|
||||
timeout: Delay,
|
||||
}
|
||||
|
||||
impl<T> CloseConnection<T>
|
||||
where
|
||||
T: AsyncWrite + Unpin,
|
||||
{
|
||||
fn new(io: T, timeout: Duration) -> Self {
|
||||
CloseConnection {
|
||||
io,
|
||||
timeout: delay_for(timeout),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Future for CloseConnection<T>
|
||||
where
|
||||
T: AsyncWrite + Unpin,
|
||||
{
|
||||
type Output = ();
|
||||
|
||||
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
|
||||
let this = self.get_mut();
|
||||
|
||||
match Pin::new(&mut this.timeout).poll(cx) {
|
||||
Poll::Ready(_) => Poll::Ready(()),
|
||||
Poll::Pending => match Pin::new(&mut this.io).poll_shutdown(cx) {
|
||||
Poll::Ready(_) => Poll::Ready(()),
|
||||
Poll::Pending => Poll::Pending,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[pin_project]
|
||||
struct ConnectorPoolSupport<T, Io>
|
||||
where
|
||||
Io: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
{
|
||||
connector: T,
|
||||
inner: Rc<RefCell<Inner<Io>>>,
|
||||
}
|
||||
|
||||
impl<T, Io> Future for ConnectorPoolSupport<T, Io>
|
||||
where
|
||||
Io: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>,
|
||||
T::Future: 'static,
|
||||
{
|
||||
type Output = ();
|
||||
|
||||
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
let this = self.project();
|
||||
|
||||
if Rc::strong_count(this.inner) == 1 {
|
||||
// If we are last copy of Inner<Io> it means the ConnectionPool is already gone
|
||||
// and we are safe to exit.
|
||||
return Poll::Ready(());
|
||||
}
|
||||
|
||||
let mut inner = this.inner.borrow_mut();
|
||||
inner.waker.register(cx.waker());
|
||||
|
||||
// check waiters
|
||||
loop {
|
||||
let (key, token) = {
|
||||
if let Some((key, token)) = inner.waiters_queue.get_index(0) {
|
||||
(key.clone(), *token)
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
};
|
||||
if inner.waiters.get(token).unwrap().is_none() {
|
||||
continue;
|
||||
}
|
||||
|
||||
match inner.acquire(&key, cx) {
|
||||
Acquire::NotAvailable => break,
|
||||
Acquire::Acquired(io, created) => {
|
||||
let tx = inner.waiters.get_mut(token).unwrap().take().unwrap().1;
|
||||
if let Err(conn) = tx.send(Ok(IoConnection::new(
|
||||
io,
|
||||
created,
|
||||
Some(Acquired(key.clone(), Some(this.inner.clone()))),
|
||||
))) {
|
||||
let (io, created) = conn.unwrap().into_inner();
|
||||
inner.release_conn(&key, io, created);
|
||||
}
|
||||
}
|
||||
Acquire::Available => {
|
||||
let (connect, tx) =
|
||||
inner.waiters.get_mut(token).unwrap().take().unwrap();
|
||||
OpenWaitingConnection::spawn(
|
||||
key.clone(),
|
||||
tx,
|
||||
this.inner.clone(),
|
||||
this.connector.call(connect),
|
||||
inner.config.clone(),
|
||||
);
|
||||
}
|
||||
}
|
||||
let _ = inner.waiters_queue.swap_remove_index(0);
|
||||
}
|
||||
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
|
||||
#[pin_project::pin_project(PinnedDrop)]
|
||||
struct OpenWaitingConnection<F, Io>
|
||||
where
|
||||
Io: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
{
|
||||
#[pin]
|
||||
fut: F,
|
||||
key: Key,
|
||||
h2: Option<
|
||||
LocalBoxFuture<
|
||||
'static,
|
||||
Result<(SendRequest<Bytes>, Connection<Io, Bytes>), h2::Error>,
|
||||
>,
|
||||
>,
|
||||
rx: Option<oneshot::Sender<Result<IoConnection<Io>, ConnectError>>>,
|
||||
inner: Option<Rc<RefCell<Inner<Io>>>>,
|
||||
config: ConnectorConfig,
|
||||
}
|
||||
|
||||
impl<F, Io> OpenWaitingConnection<F, Io>
|
||||
where
|
||||
F: Future<Output = Result<(Io, Protocol), ConnectError>> + 'static,
|
||||
Io: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
{
|
||||
fn spawn(
|
||||
key: Key,
|
||||
rx: oneshot::Sender<Result<IoConnection<Io>, ConnectError>>,
|
||||
inner: Rc<RefCell<Inner<Io>>>,
|
||||
fut: F,
|
||||
config: ConnectorConfig,
|
||||
) {
|
||||
actix_rt::spawn(OpenWaitingConnection {
|
||||
key,
|
||||
fut,
|
||||
h2: None,
|
||||
rx: Some(rx),
|
||||
inner: Some(inner),
|
||||
config,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[pin_project::pinned_drop]
|
||||
impl<F, Io> PinnedDrop for OpenWaitingConnection<F, Io>
|
||||
where
|
||||
Io: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
{
|
||||
fn drop(self: Pin<&mut Self>) {
|
||||
if let Some(inner) = self.project().inner.take() {
|
||||
let mut inner = inner.as_ref().borrow_mut();
|
||||
inner.release();
|
||||
inner.check_availability();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<F, Io> Future for OpenWaitingConnection<F, Io>
|
||||
where
|
||||
F: Future<Output = Result<(Io, Protocol), ConnectError>>,
|
||||
Io: AsyncRead + AsyncWrite + Unpin,
|
||||
{
|
||||
type Output = ();
|
||||
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
let this = self.as_mut().project();
|
||||
|
||||
if let Some(ref mut h2) = this.h2 {
|
||||
return match Pin::new(h2).poll(cx) {
|
||||
Poll::Ready(Ok((snd, connection))) => {
|
||||
actix_rt::spawn(connection.map(|_| ()));
|
||||
let rx = this.rx.take().unwrap();
|
||||
let _ = rx.send(Ok(IoConnection::new(
|
||||
ConnectionType::H2(snd),
|
||||
Instant::now(),
|
||||
Some(Acquired(this.key.clone(), this.inner.take())),
|
||||
)));
|
||||
Poll::Ready(())
|
||||
}
|
||||
Poll::Pending => Poll::Pending,
|
||||
Poll::Ready(Err(err)) => {
|
||||
let _ = this.inner.take();
|
||||
if let Some(rx) = this.rx.take() {
|
||||
let _ = rx.send(Err(ConnectError::H2(err)));
|
||||
}
|
||||
Poll::Ready(())
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
match this.fut.poll(cx) {
|
||||
Poll::Ready(Err(err)) => {
|
||||
let _ = this.inner.take();
|
||||
if let Some(rx) = this.rx.take() {
|
||||
let _ = rx.send(Err(err));
|
||||
}
|
||||
Poll::Ready(())
|
||||
}
|
||||
Poll::Ready(Ok((io, proto))) => {
|
||||
if proto == Protocol::Http1 {
|
||||
let rx = this.rx.take().unwrap();
|
||||
let _ = rx.send(Ok(IoConnection::new(
|
||||
ConnectionType::H1(io),
|
||||
Instant::now(),
|
||||
Some(Acquired(this.key.clone(), this.inner.take())),
|
||||
)));
|
||||
Poll::Ready(())
|
||||
} else {
|
||||
*this.h2 = Some(handshake(io, this.config).boxed_local());
|
||||
self.poll(cx)
|
||||
}
|
||||
}
|
||||
Poll::Pending => Poll::Pending,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct Acquired<T>(Key, Option<Rc<RefCell<Inner<T>>>>);
|
||||
|
||||
impl<T> Acquired<T>
|
||||
where
|
||||
T: AsyncRead + AsyncWrite + Unpin + 'static,
|
||||
{
|
||||
pub(crate) fn close(&mut self, conn: IoConnection<T>) {
|
||||
if let Some(inner) = self.1.take() {
|
||||
let (io, _) = conn.into_inner();
|
||||
inner.as_ref().borrow_mut().release_close(io);
|
||||
}
|
||||
}
|
||||
pub(crate) fn release(&mut self, conn: IoConnection<T>) {
|
||||
if let Some(inner) = self.1.take() {
|
||||
let (io, created) = conn.into_inner();
|
||||
inner
|
||||
.as_ref()
|
||||
.borrow_mut()
|
||||
.release_conn(&self.0, io, created);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Drop for Acquired<T> {
|
||||
fn drop(&mut self) {
|
||||
if let Some(inner) = self.1.take() {
|
||||
inner.as_ref().borrow_mut().release();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,40 +0,0 @@
|
||||
use std::cell::RefCell;
|
||||
use std::rc::Rc;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use actix_service::Service;
|
||||
|
||||
#[doc(hidden)]
|
||||
/// Service that allows to turn non-clone service to a service with `Clone` impl
|
||||
///
|
||||
/// # Panics
|
||||
/// CloneableService might panic with some creative use of thread local storage.
|
||||
/// See https://github.com/actix/actix-web/issues/1295 for example
|
||||
pub(crate) struct CloneableService<T: Service>(Rc<RefCell<T>>);
|
||||
|
||||
impl<T: Service> CloneableService<T> {
|
||||
pub(crate) fn new(service: T) -> Self {
|
||||
Self(Rc::new(RefCell::new(service)))
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Service> Clone for CloneableService<T> {
|
||||
fn clone(&self) -> Self {
|
||||
Self(self.0.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Service> Service for CloneableService<T> {
|
||||
type Request = T::Request;
|
||||
type Response = T::Response;
|
||||
type Error = T::Error;
|
||||
type Future = T::Future;
|
||||
|
||||
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
self.0.borrow_mut().poll_ready(cx)
|
||||
}
|
||||
|
||||
fn call(&mut self, req: T::Request) -> Self::Future {
|
||||
self.0.borrow_mut().call(req)
|
||||
}
|
||||
}
|
||||
@@ -1,24 +1,29 @@
|
||||
use std::cell::Cell;
|
||||
use std::fmt::Write;
|
||||
use std::rc::Rc;
|
||||
use std::time::Duration;
|
||||
use std::{fmt, net};
|
||||
use std::{
|
||||
cell::Cell,
|
||||
fmt::{self, Write},
|
||||
net,
|
||||
rc::Rc,
|
||||
time::{Duration, SystemTime},
|
||||
};
|
||||
|
||||
use actix_rt::time::{delay_for, delay_until, Delay, Instant};
|
||||
use actix_rt::{
|
||||
task::JoinHandle,
|
||||
time::{interval, sleep_until, Instant, Sleep},
|
||||
};
|
||||
use bytes::BytesMut;
|
||||
use futures_util::{future, FutureExt};
|
||||
use time::OffsetDateTime;
|
||||
|
||||
// "Sun, 06 Nov 1994 08:49:37 GMT".len()
|
||||
const DATE_VALUE_LENGTH: usize = 29;
|
||||
/// "Sun, 06 Nov 1994 08:49:37 GMT".len()
|
||||
pub(crate) const DATE_VALUE_LENGTH: usize = 29;
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Copy)]
|
||||
/// Server keep-alive setting
|
||||
pub enum KeepAlive {
|
||||
/// Keep alive in seconds
|
||||
Timeout(usize),
|
||||
/// Relay on OS to shutdown tcp connection
|
||||
|
||||
/// Rely on OS to shutdown tcp connection
|
||||
Os,
|
||||
|
||||
/// Disabled
|
||||
Disabled,
|
||||
}
|
||||
@@ -49,7 +54,7 @@ struct Inner {
|
||||
ka_enabled: bool,
|
||||
secure: bool,
|
||||
local_addr: Option<std::net::SocketAddr>,
|
||||
timer: DateService,
|
||||
date_service: DateService,
|
||||
}
|
||||
|
||||
impl Clone for ServiceConfig {
|
||||
@@ -91,42 +96,42 @@ impl ServiceConfig {
|
||||
client_disconnect,
|
||||
secure,
|
||||
local_addr,
|
||||
timer: DateService::new(),
|
||||
date_service: DateService::new(),
|
||||
}))
|
||||
}
|
||||
|
||||
/// Returns true if connection is secure (HTTPS)
|
||||
#[inline]
|
||||
/// Returns true if connection is secure(https)
|
||||
pub fn secure(&self) -> bool {
|
||||
self.0.secure
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// Returns the local address that this server is bound to.
|
||||
///
|
||||
/// Returns `None` for connections via UDS (Unix Domain Socket).
|
||||
#[inline]
|
||||
pub fn local_addr(&self) -> Option<net::SocketAddr> {
|
||||
self.0.local_addr
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// Keep alive duration if configured.
|
||||
#[inline]
|
||||
pub fn keep_alive(&self) -> Option<Duration> {
|
||||
self.0.keep_alive
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// Return state of connection keep-alive functionality
|
||||
#[inline]
|
||||
pub fn keep_alive_enabled(&self) -> bool {
|
||||
self.0.ka_enabled
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// Client timeout for first request.
|
||||
pub fn client_timer(&self) -> Option<Delay> {
|
||||
#[inline]
|
||||
pub fn client_timer(&self) -> Option<Sleep> {
|
||||
let delay_time = self.0.client_timeout;
|
||||
if delay_time != 0 {
|
||||
Some(delay_until(
|
||||
self.0.timer.now() + Duration::from_millis(delay_time),
|
||||
))
|
||||
Some(sleep_until(self.now() + Duration::from_millis(delay_time)))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
@@ -136,7 +141,7 @@ impl ServiceConfig {
|
||||
pub fn client_timer_expire(&self) -> Option<Instant> {
|
||||
let delay = self.0.client_timeout;
|
||||
if delay != 0 {
|
||||
Some(self.0.timer.now() + Duration::from_millis(delay))
|
||||
Some(self.now() + Duration::from_millis(delay))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
@@ -146,34 +151,26 @@ impl ServiceConfig {
|
||||
pub fn client_disconnect_timer(&self) -> Option<Instant> {
|
||||
let delay = self.0.client_disconnect;
|
||||
if delay != 0 {
|
||||
Some(self.0.timer.now() + Duration::from_millis(delay))
|
||||
Some(self.now() + Duration::from_millis(delay))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// Return keep-alive timer delay is configured.
|
||||
pub fn keep_alive_timer(&self) -> Option<Delay> {
|
||||
if let Some(ka) = self.0.keep_alive {
|
||||
Some(delay_until(self.0.timer.now() + ka))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
#[inline]
|
||||
pub fn keep_alive_timer(&self) -> Option<Sleep> {
|
||||
self.keep_alive().map(|ka| sleep_until(self.now() + ka))
|
||||
}
|
||||
|
||||
/// Keep-alive expire time
|
||||
pub fn keep_alive_expire(&self) -> Option<Instant> {
|
||||
if let Some(ka) = self.0.keep_alive {
|
||||
Some(self.0.timer.now() + ka)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
self.keep_alive().map(|ka| self.now() + ka)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub(crate) fn now(&self) -> Instant {
|
||||
self.0.timer.now()
|
||||
self.0.date_service.now()
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
@@ -181,7 +178,7 @@ impl ServiceConfig {
|
||||
let mut buf: [u8; 39] = [0; 39];
|
||||
buf[..6].copy_from_slice(b"date: ");
|
||||
self.0
|
||||
.timer
|
||||
.date_service
|
||||
.set_date(|date| buf[6..35].copy_from_slice(&date.bytes));
|
||||
buf[35..].copy_from_slice(b"\r\n\r\n");
|
||||
dst.extend_from_slice(&buf);
|
||||
@@ -189,7 +186,7 @@ impl ServiceConfig {
|
||||
|
||||
pub(crate) fn set_date_header(&self, dst: &mut BytesMut) {
|
||||
self.0
|
||||
.timer
|
||||
.date_service
|
||||
.set_date(|date| dst.extend_from_slice(&date.bytes));
|
||||
}
|
||||
}
|
||||
@@ -209,14 +206,10 @@ impl Date {
|
||||
date.update();
|
||||
date
|
||||
}
|
||||
|
||||
fn update(&mut self) {
|
||||
self.pos = 0;
|
||||
write!(
|
||||
self,
|
||||
"{}",
|
||||
OffsetDateTime::now_utc().format("%a, %d %b %Y %H:%M:%S GMT")
|
||||
)
|
||||
.unwrap();
|
||||
write!(self, "{}", httpdate::fmt_http_date(SystemTime::now())).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -229,57 +222,102 @@ impl fmt::Write for Date {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct DateService(Rc<DateServiceInner>);
|
||||
|
||||
struct DateServiceInner {
|
||||
current: Cell<Option<(Date, Instant)>>,
|
||||
/// Service for update Date and Instant periodically at 500 millis interval.
|
||||
struct DateService {
|
||||
current: Rc<Cell<(Date, Instant)>>,
|
||||
handle: JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl DateServiceInner {
|
||||
fn new() -> Self {
|
||||
DateServiceInner {
|
||||
current: Cell::new(None),
|
||||
}
|
||||
}
|
||||
|
||||
fn reset(&self) {
|
||||
self.current.take();
|
||||
}
|
||||
|
||||
fn update(&self) {
|
||||
let now = Instant::now();
|
||||
let date = Date::new();
|
||||
self.current.set(Some((date, now)));
|
||||
impl Drop for DateService {
|
||||
fn drop(&mut self) {
|
||||
// stop the timer update async task on drop.
|
||||
self.handle.abort();
|
||||
}
|
||||
}
|
||||
|
||||
impl DateService {
|
||||
fn new() -> Self {
|
||||
DateService(Rc::new(DateServiceInner::new()))
|
||||
}
|
||||
// shared date and timer for DateService and update async task.
|
||||
let current = Rc::new(Cell::new((Date::new(), Instant::now())));
|
||||
let current_clone = Rc::clone(¤t);
|
||||
// spawn an async task sleep for 500 milli and update current date/timer in a loop.
|
||||
// handle is used to stop the task on DateService drop.
|
||||
let handle = actix_rt::spawn(async move {
|
||||
#[cfg(test)]
|
||||
let _notify = notify_on_drop::NotifyOnDrop::new();
|
||||
|
||||
fn check_date(&self) {
|
||||
if self.0.current.get().is_none() {
|
||||
self.0.update();
|
||||
let mut interval = interval(Duration::from_millis(500));
|
||||
loop {
|
||||
let now = interval.tick().await;
|
||||
let date = Date::new();
|
||||
current_clone.set((date, now));
|
||||
}
|
||||
});
|
||||
|
||||
// periodic date update
|
||||
let s = self.clone();
|
||||
actix_rt::spawn(delay_for(Duration::from_millis(500)).then(move |_| {
|
||||
s.0.reset();
|
||||
future::ready(())
|
||||
}));
|
||||
}
|
||||
DateService { current, handle }
|
||||
}
|
||||
|
||||
fn now(&self) -> Instant {
|
||||
self.check_date();
|
||||
self.0.current.get().unwrap().1
|
||||
self.current.get().1
|
||||
}
|
||||
|
||||
fn set_date<F: FnMut(&Date)>(&self, mut f: F) {
|
||||
self.check_date();
|
||||
f(&self.0.current.get().unwrap().0);
|
||||
f(&self.current.get().0);
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: move to a util module for testing all spawn handle drop style tasks.
|
||||
/// Test Module for checking the drop state of certain async tasks that are spawned
|
||||
/// with `actix_rt::spawn`
|
||||
///
|
||||
/// The target task must explicitly generate `NotifyOnDrop` when spawn the task
|
||||
#[cfg(test)]
|
||||
mod notify_on_drop {
|
||||
use std::cell::RefCell;
|
||||
|
||||
thread_local! {
|
||||
static NOTIFY_DROPPED: RefCell<Option<bool>> = RefCell::new(None);
|
||||
}
|
||||
|
||||
/// Check if the spawned task is dropped.
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics when there was no `NotifyOnDrop` instance on current thread.
|
||||
pub(crate) fn is_dropped() -> bool {
|
||||
NOTIFY_DROPPED.with(|bool| {
|
||||
bool.borrow()
|
||||
.expect("No NotifyOnDrop existed on current thread")
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) struct NotifyOnDrop;
|
||||
|
||||
impl NotifyOnDrop {
|
||||
/// # Panic:
|
||||
///
|
||||
/// When construct multiple instances on any given thread.
|
||||
pub(crate) fn new() -> Self {
|
||||
NOTIFY_DROPPED.with(|bool| {
|
||||
let mut bool = bool.borrow_mut();
|
||||
if bool.is_some() {
|
||||
panic!("NotifyOnDrop existed on current thread");
|
||||
} else {
|
||||
*bool = Some(false);
|
||||
}
|
||||
});
|
||||
|
||||
NotifyOnDrop
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for NotifyOnDrop {
|
||||
fn drop(&mut self) {
|
||||
NOTIFY_DROPPED.with(|bool| {
|
||||
if let Some(b) = bool.borrow_mut().as_mut() {
|
||||
*b = true;
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -287,14 +325,67 @@ impl DateService {
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
// Test modifying the date from within the closure
|
||||
// passed to `set_date`
|
||||
#[test]
|
||||
fn test_evil_date() {
|
||||
let service = DateService::new();
|
||||
// Make sure that `check_date` doesn't try to spawn a task
|
||||
service.0.update();
|
||||
service.set_date(|_| service.0.reset());
|
||||
use actix_rt::{task::yield_now, time::sleep};
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_date_service_update() {
|
||||
let settings = ServiceConfig::new(KeepAlive::Os, 0, 0, false, None);
|
||||
|
||||
yield_now().await;
|
||||
|
||||
let mut buf1 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
|
||||
settings.set_date(&mut buf1);
|
||||
let now1 = settings.now();
|
||||
|
||||
sleep_until(Instant::now() + Duration::from_secs(2)).await;
|
||||
yield_now().await;
|
||||
|
||||
let now2 = settings.now();
|
||||
let mut buf2 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
|
||||
settings.set_date(&mut buf2);
|
||||
|
||||
assert_ne!(now1, now2);
|
||||
|
||||
assert_ne!(buf1, buf2);
|
||||
|
||||
drop(settings);
|
||||
|
||||
// Ensure the task will drop eventually
|
||||
let mut times = 0;
|
||||
while !notify_on_drop::is_dropped() {
|
||||
sleep(Duration::from_millis(100)).await;
|
||||
times += 1;
|
||||
assert!(times < 10, "Timeout waiting for task drop");
|
||||
}
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_date_service_drop() {
|
||||
let service = Rc::new(DateService::new());
|
||||
|
||||
// yield so date service have a chance to register the spawned timer update task.
|
||||
yield_now().await;
|
||||
|
||||
let clone1 = service.clone();
|
||||
let clone2 = service.clone();
|
||||
let clone3 = service.clone();
|
||||
|
||||
drop(clone1);
|
||||
assert!(!notify_on_drop::is_dropped());
|
||||
drop(clone2);
|
||||
assert!(!notify_on_drop::is_dropped());
|
||||
drop(clone3);
|
||||
assert!(!notify_on_drop::is_dropped());
|
||||
|
||||
drop(service);
|
||||
|
||||
// Ensure the task will drop eventually
|
||||
let mut times = 0;
|
||||
while !notify_on_drop::is_dropped() {
|
||||
sleep(Duration::from_millis(100)).await;
|
||||
times += 1;
|
||||
assert!(times < 10, "Timeout waiting for task drop");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -1,25 +1,41 @@
|
||||
use std::future::Future;
|
||||
use std::io::{self, Write};
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
//! Stream decoders.
|
||||
|
||||
use actix_threadpool::{run, CpuFuture};
|
||||
use brotli2::write::BrotliDecoder;
|
||||
use std::{
|
||||
future::Future,
|
||||
io::{self, Write as _},
|
||||
pin::Pin,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use actix_rt::task::{spawn_blocking, JoinHandle};
|
||||
use bytes::Bytes;
|
||||
use flate2::write::{GzDecoder, ZlibDecoder};
|
||||
use futures_core::{ready, Stream};
|
||||
|
||||
use super::Writer;
|
||||
use crate::error::PayloadError;
|
||||
use crate::http::header::{ContentEncoding, HeaderMap, CONTENT_ENCODING};
|
||||
#[cfg(feature = "compress-brotli")]
|
||||
use brotli2::write::BrotliDecoder;
|
||||
|
||||
const INPLACE: usize = 2049;
|
||||
#[cfg(feature = "compress-gzip")]
|
||||
use flate2::write::{GzDecoder, ZlibDecoder};
|
||||
|
||||
pub struct Decoder<S> {
|
||||
decoder: Option<ContentDecoder>,
|
||||
stream: S,
|
||||
eof: bool,
|
||||
fut: Option<CpuFuture<(Option<Bytes>, ContentDecoder), io::Error>>,
|
||||
#[cfg(feature = "compress-zstd")]
|
||||
use zstd::stream::write::Decoder as ZstdDecoder;
|
||||
|
||||
use crate::{
|
||||
encoding::Writer,
|
||||
error::{BlockingError, PayloadError},
|
||||
header::{ContentEncoding, HeaderMap, CONTENT_ENCODING},
|
||||
};
|
||||
|
||||
const MAX_CHUNK_SIZE_DECODE_IN_PLACE: usize = 2049;
|
||||
|
||||
pin_project_lite::pin_project! {
|
||||
pub struct Decoder<S> {
|
||||
decoder: Option<ContentDecoder>,
|
||||
#[pin]
|
||||
stream: S,
|
||||
eof: bool,
|
||||
fut: Option<JoinHandle<Result<(Option<Bytes>, ContentDecoder), io::Error>>>,
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> Decoder<S>
|
||||
@@ -30,17 +46,28 @@ where
|
||||
#[inline]
|
||||
pub fn new(stream: S, encoding: ContentEncoding) -> Decoder<S> {
|
||||
let decoder = match encoding {
|
||||
ContentEncoding::Br => Some(ContentDecoder::Br(Box::new(
|
||||
BrotliDecoder::new(Writer::new()),
|
||||
))),
|
||||
#[cfg(feature = "compress-brotli")]
|
||||
ContentEncoding::Br => Some(ContentDecoder::Br(Box::new(BrotliDecoder::new(
|
||||
Writer::new(),
|
||||
)))),
|
||||
#[cfg(feature = "compress-gzip")]
|
||||
ContentEncoding::Deflate => Some(ContentDecoder::Deflate(Box::new(
|
||||
ZlibDecoder::new(Writer::new()),
|
||||
))),
|
||||
ContentEncoding::Gzip => Some(ContentDecoder::Gzip(Box::new(
|
||||
GzDecoder::new(Writer::new()),
|
||||
#[cfg(feature = "compress-gzip")]
|
||||
ContentEncoding::Gzip => Some(ContentDecoder::Gzip(Box::new(GzDecoder::new(
|
||||
Writer::new(),
|
||||
)))),
|
||||
#[cfg(feature = "compress-zstd")]
|
||||
ContentEncoding::Zstd => Some(ContentDecoder::Zstd(Box::new(
|
||||
ZstdDecoder::new(Writer::new()).expect(
|
||||
"Failed to create zstd decoder. This is a bug. \
|
||||
Please report it to the actix-web repository.",
|
||||
),
|
||||
))),
|
||||
_ => None,
|
||||
};
|
||||
|
||||
Decoder {
|
||||
decoder,
|
||||
stream,
|
||||
@@ -53,15 +80,11 @@ where
|
||||
#[inline]
|
||||
pub fn from_headers(stream: S, headers: &HeaderMap) -> Decoder<S> {
|
||||
// check content-encoding
|
||||
let encoding = if let Some(enc) = headers.get(&CONTENT_ENCODING) {
|
||||
if let Ok(enc) = enc.to_str() {
|
||||
ContentEncoding::from(enc)
|
||||
} else {
|
||||
ContentEncoding::Identity
|
||||
}
|
||||
} else {
|
||||
ContentEncoding::Identity
|
||||
};
|
||||
let encoding = headers
|
||||
.get(&CONTENT_ENCODING)
|
||||
.and_then(|val| val.to_str().ok())
|
||||
.and_then(|x| x.parse().ok())
|
||||
.unwrap_or(ContentEncoding::Identity);
|
||||
|
||||
Self::new(stream, encoding)
|
||||
}
|
||||
@@ -69,55 +92,59 @@ where
|
||||
|
||||
impl<S> Stream for Decoder<S>
|
||||
where
|
||||
S: Stream<Item = Result<Bytes, PayloadError>> + Unpin,
|
||||
S: Stream<Item = Result<Bytes, PayloadError>>,
|
||||
{
|
||||
type Item = Result<Bytes, PayloadError>;
|
||||
|
||||
fn poll_next(
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Self::Item>> {
|
||||
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||
let mut this = self.project();
|
||||
|
||||
loop {
|
||||
if let Some(ref mut fut) = self.fut {
|
||||
let (chunk, decoder) = match ready!(Pin::new(fut).poll(cx)) {
|
||||
Ok(item) => item,
|
||||
Err(e) => return Poll::Ready(Some(Err(e.into()))),
|
||||
};
|
||||
self.decoder = Some(decoder);
|
||||
self.fut.take();
|
||||
if let Some(ref mut fut) = this.fut {
|
||||
let (chunk, decoder) =
|
||||
ready!(Pin::new(fut).poll(cx)).map_err(|_| BlockingError)??;
|
||||
|
||||
*this.decoder = Some(decoder);
|
||||
this.fut.take();
|
||||
|
||||
if let Some(chunk) = chunk {
|
||||
return Poll::Ready(Some(Ok(chunk)));
|
||||
}
|
||||
}
|
||||
|
||||
if self.eof {
|
||||
if *this.eof {
|
||||
return Poll::Ready(None);
|
||||
}
|
||||
|
||||
match Pin::new(&mut self.stream).poll_next(cx) {
|
||||
Poll::Ready(Some(Err(err))) => return Poll::Ready(Some(Err(err))),
|
||||
Poll::Ready(Some(Ok(chunk))) => {
|
||||
if let Some(mut decoder) = self.decoder.take() {
|
||||
if chunk.len() < INPLACE {
|
||||
match ready!(this.stream.as_mut().poll_next(cx)) {
|
||||
Some(Err(err)) => return Poll::Ready(Some(Err(err))),
|
||||
|
||||
Some(Ok(chunk)) => {
|
||||
if let Some(mut decoder) = this.decoder.take() {
|
||||
if chunk.len() < MAX_CHUNK_SIZE_DECODE_IN_PLACE {
|
||||
let chunk = decoder.feed_data(chunk)?;
|
||||
self.decoder = Some(decoder);
|
||||
*this.decoder = Some(decoder);
|
||||
|
||||
if let Some(chunk) = chunk {
|
||||
return Poll::Ready(Some(Ok(chunk)));
|
||||
}
|
||||
} else {
|
||||
self.fut = Some(run(move || {
|
||||
*this.fut = Some(spawn_blocking(move || {
|
||||
let chunk = decoder.feed_data(chunk)?;
|
||||
Ok((chunk, decoder))
|
||||
}));
|
||||
}
|
||||
|
||||
continue;
|
||||
} else {
|
||||
return Poll::Ready(Some(Ok(chunk)));
|
||||
}
|
||||
}
|
||||
Poll::Ready(None) => {
|
||||
self.eof = true;
|
||||
return if let Some(mut decoder) = self.decoder.take() {
|
||||
|
||||
None => {
|
||||
*this.eof = true;
|
||||
|
||||
return if let Some(mut decoder) = this.decoder.take() {
|
||||
match decoder.feed_eof() {
|
||||
Ok(Some(res)) => Poll::Ready(Some(Ok(res))),
|
||||
Ok(None) => Poll::Ready(None),
|
||||
@@ -127,25 +154,32 @@ where
|
||||
Poll::Ready(None)
|
||||
};
|
||||
}
|
||||
Poll::Pending => break,
|
||||
}
|
||||
}
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
|
||||
enum ContentDecoder {
|
||||
#[cfg(feature = "compress-gzip")]
|
||||
Deflate(Box<ZlibDecoder<Writer>>),
|
||||
#[cfg(feature = "compress-gzip")]
|
||||
Gzip(Box<GzDecoder<Writer>>),
|
||||
#[cfg(feature = "compress-brotli")]
|
||||
Br(Box<BrotliDecoder<Writer>>),
|
||||
// We need explicit 'static lifetime here because ZstdDecoder need lifetime
|
||||
// argument, and we use `spawn_blocking` in `Decoder::poll_next` that require `FnOnce() -> R + Send + 'static`
|
||||
#[cfg(feature = "compress-zstd")]
|
||||
Zstd(Box<ZstdDecoder<'static, Writer>>),
|
||||
}
|
||||
|
||||
impl ContentDecoder {
|
||||
fn feed_eof(&mut self) -> io::Result<Option<Bytes>> {
|
||||
match self {
|
||||
#[cfg(feature = "compress-brotli")]
|
||||
ContentDecoder::Br(ref mut decoder) => match decoder.flush() {
|
||||
Ok(()) => {
|
||||
let b = decoder.get_mut().take();
|
||||
|
||||
if !b.is_empty() {
|
||||
Ok(Some(b))
|
||||
} else {
|
||||
@@ -154,7 +188,23 @@ impl ContentDecoder {
|
||||
}
|
||||
Err(e) => Err(e),
|
||||
},
|
||||
|
||||
#[cfg(feature = "compress-gzip")]
|
||||
ContentDecoder::Gzip(ref mut decoder) => match decoder.try_finish() {
|
||||
Ok(_) => {
|
||||
let b = decoder.get_mut().take();
|
||||
|
||||
if !b.is_empty() {
|
||||
Ok(Some(b))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
Err(e) => Err(e),
|
||||
},
|
||||
|
||||
#[cfg(feature = "compress-gzip")]
|
||||
ContentDecoder::Deflate(ref mut decoder) => match decoder.try_finish() {
|
||||
Ok(_) => {
|
||||
let b = decoder.get_mut().take();
|
||||
if !b.is_empty() {
|
||||
@@ -165,7 +215,9 @@ impl ContentDecoder {
|
||||
}
|
||||
Err(e) => Err(e),
|
||||
},
|
||||
ContentDecoder::Deflate(ref mut decoder) => match decoder.try_finish() {
|
||||
|
||||
#[cfg(feature = "compress-zstd")]
|
||||
ContentDecoder::Zstd(ref mut decoder) => match decoder.flush() {
|
||||
Ok(_) => {
|
||||
let b = decoder.get_mut().take();
|
||||
if !b.is_empty() {
|
||||
@@ -181,10 +233,12 @@ impl ContentDecoder {
|
||||
|
||||
fn feed_data(&mut self, data: Bytes) -> io::Result<Option<Bytes>> {
|
||||
match self {
|
||||
#[cfg(feature = "compress-brotli")]
|
||||
ContentDecoder::Br(ref mut decoder) => match decoder.write_all(&data) {
|
||||
Ok(_) => {
|
||||
decoder.flush()?;
|
||||
let b = decoder.get_mut().take();
|
||||
|
||||
if !b.is_empty() {
|
||||
Ok(Some(b))
|
||||
} else {
|
||||
@@ -193,9 +247,27 @@ impl ContentDecoder {
|
||||
}
|
||||
Err(e) => Err(e),
|
||||
},
|
||||
|
||||
#[cfg(feature = "compress-gzip")]
|
||||
ContentDecoder::Gzip(ref mut decoder) => match decoder.write_all(&data) {
|
||||
Ok(_) => {
|
||||
decoder.flush()?;
|
||||
let b = decoder.get_mut().take();
|
||||
|
||||
if !b.is_empty() {
|
||||
Ok(Some(b))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
Err(e) => Err(e),
|
||||
},
|
||||
|
||||
#[cfg(feature = "compress-gzip")]
|
||||
ContentDecoder::Deflate(ref mut decoder) => match decoder.write_all(&data) {
|
||||
Ok(_) => {
|
||||
decoder.flush()?;
|
||||
|
||||
let b = decoder.get_mut().take();
|
||||
if !b.is_empty() {
|
||||
Ok(Some(b))
|
||||
@@ -205,9 +277,12 @@ impl ContentDecoder {
|
||||
}
|
||||
Err(e) => Err(e),
|
||||
},
|
||||
ContentDecoder::Deflate(ref mut decoder) => match decoder.write_all(&data) {
|
||||
|
||||
#[cfg(feature = "compress-zstd")]
|
||||
ContentDecoder::Zstd(ref mut decoder) => match decoder.write_all(&data) {
|
||||
Ok(_) => {
|
||||
decoder.flush()?;
|
||||
|
||||
let b = decoder.get_mut().take();
|
||||
if !b.is_empty() {
|
||||
Ok(Some(b))
|
||||
|
||||
@@ -1,133 +1,173 @@
|
||||
//! Stream encoder
|
||||
use std::future::Future;
|
||||
use std::io::{self, Write};
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
//! Stream encoders.
|
||||
|
||||
use actix_threadpool::{run, CpuFuture};
|
||||
use brotli2::write::BrotliEncoder;
|
||||
use std::{
|
||||
error::Error as StdError,
|
||||
future::Future,
|
||||
io::{self, Write as _},
|
||||
pin::Pin,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use actix_rt::task::{spawn_blocking, JoinHandle};
|
||||
use bytes::Bytes;
|
||||
use flate2::write::{GzEncoder, ZlibEncoder};
|
||||
use derive_more::Display;
|
||||
use futures_core::ready;
|
||||
use pin_project::pin_project;
|
||||
use pin_project_lite::pin_project;
|
||||
|
||||
use crate::body::{Body, BodySize, MessageBody, ResponseBody};
|
||||
use crate::http::header::{ContentEncoding, CONTENT_ENCODING};
|
||||
use crate::http::{HeaderValue, StatusCode};
|
||||
use crate::{Error, ResponseHead};
|
||||
#[cfg(feature = "compress-brotli")]
|
||||
use brotli2::write::BrotliEncoder;
|
||||
|
||||
#[cfg(feature = "compress-gzip")]
|
||||
use flate2::write::{GzEncoder, ZlibEncoder};
|
||||
|
||||
#[cfg(feature = "compress-zstd")]
|
||||
use zstd::stream::write::Encoder as ZstdEncoder;
|
||||
|
||||
use super::Writer;
|
||||
use crate::{
|
||||
body::{self, BodySize, MessageBody},
|
||||
error::BlockingError,
|
||||
header::{self, ContentEncoding, HeaderValue, CONTENT_ENCODING},
|
||||
ResponseHead, StatusCode,
|
||||
};
|
||||
|
||||
const INPLACE: usize = 1024;
|
||||
const MAX_CHUNK_SIZE_ENCODE_IN_PLACE: usize = 1024;
|
||||
|
||||
#[pin_project]
|
||||
pub struct Encoder<B> {
|
||||
eof: bool,
|
||||
#[pin]
|
||||
body: EncoderBody<B>,
|
||||
encoder: Option<ContentEncoder>,
|
||||
fut: Option<CpuFuture<ContentEncoder, io::Error>>,
|
||||
pin_project! {
|
||||
pub struct Encoder<B> {
|
||||
#[pin]
|
||||
body: EncoderBody<B>,
|
||||
encoder: Option<ContentEncoder>,
|
||||
fut: Option<JoinHandle<Result<ContentEncoder, io::Error>>>,
|
||||
eof: bool,
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: MessageBody> Encoder<B> {
|
||||
pub fn response(
|
||||
encoding: ContentEncoding,
|
||||
head: &mut ResponseHead,
|
||||
body: ResponseBody<B>,
|
||||
) -> ResponseBody<Encoder<B>> {
|
||||
fn none() -> Self {
|
||||
Encoder {
|
||||
body: EncoderBody::None {
|
||||
body: body::None::new(),
|
||||
},
|
||||
encoder: None,
|
||||
fut: None,
|
||||
eof: true,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn response(encoding: ContentEncoding, head: &mut ResponseHead, body: B) -> Self {
|
||||
let can_encode = !(head.headers().contains_key(&CONTENT_ENCODING)
|
||||
|| head.status == StatusCode::SWITCHING_PROTOCOLS
|
||||
|| head.status == StatusCode::NO_CONTENT
|
||||
|| encoding == ContentEncoding::Identity
|
||||
|| encoding == ContentEncoding::Auto);
|
||||
|
||||
let body = match body {
|
||||
ResponseBody::Other(b) => match b {
|
||||
Body::None => return ResponseBody::Other(Body::None),
|
||||
Body::Empty => return ResponseBody::Other(Body::Empty),
|
||||
Body::Bytes(buf) => {
|
||||
if can_encode {
|
||||
EncoderBody::Bytes(buf)
|
||||
} else {
|
||||
return ResponseBody::Other(Body::Bytes(buf));
|
||||
}
|
||||
}
|
||||
Body::Message(stream) => EncoderBody::BoxedStream(stream),
|
||||
},
|
||||
ResponseBody::Body(stream) => EncoderBody::Stream(stream),
|
||||
// no need to compress an empty body
|
||||
if matches!(body.size(), BodySize::None) {
|
||||
return Self::none();
|
||||
}
|
||||
|
||||
let body = match body.try_into_bytes() {
|
||||
Ok(body) => EncoderBody::Full { body },
|
||||
Err(body) => EncoderBody::Stream { body },
|
||||
};
|
||||
|
||||
if can_encode {
|
||||
// Modify response body only if encoder is not None
|
||||
// Modify response body only if encoder is set
|
||||
if let Some(enc) = ContentEncoder::encoder(encoding) {
|
||||
update_head(encoding, head);
|
||||
head.no_chunking(false);
|
||||
return ResponseBody::Body(Encoder {
|
||||
|
||||
return Encoder {
|
||||
body,
|
||||
eof: false,
|
||||
fut: None,
|
||||
encoder: Some(enc),
|
||||
});
|
||||
fut: None,
|
||||
eof: false,
|
||||
};
|
||||
}
|
||||
}
|
||||
ResponseBody::Body(Encoder {
|
||||
|
||||
Encoder {
|
||||
body,
|
||||
eof: false,
|
||||
fut: None,
|
||||
encoder: None,
|
||||
})
|
||||
fut: None,
|
||||
eof: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[pin_project(project = EncoderBodyProj)]
|
||||
enum EncoderBody<B> {
|
||||
Bytes(Bytes),
|
||||
Stream(#[pin] B),
|
||||
BoxedStream(Box<dyn MessageBody + Unpin>),
|
||||
pin_project! {
|
||||
#[project = EncoderBodyProj]
|
||||
enum EncoderBody<B> {
|
||||
None { body: body::None },
|
||||
Full { body: Bytes },
|
||||
Stream { #[pin] body: B },
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: MessageBody> MessageBody for EncoderBody<B> {
|
||||
impl<B> MessageBody for EncoderBody<B>
|
||||
where
|
||||
B: MessageBody,
|
||||
{
|
||||
type Error = EncoderError;
|
||||
|
||||
#[inline]
|
||||
fn size(&self) -> BodySize {
|
||||
match self {
|
||||
EncoderBody::Bytes(ref b) => b.size(),
|
||||
EncoderBody::Stream(ref b) => b.size(),
|
||||
EncoderBody::BoxedStream(ref b) => b.size(),
|
||||
EncoderBody::None { body } => body.size(),
|
||||
EncoderBody::Full { body } => body.size(),
|
||||
EncoderBody::Stream { body } => body.size(),
|
||||
}
|
||||
}
|
||||
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, Error>>> {
|
||||
) -> Poll<Option<Result<Bytes, Self::Error>>> {
|
||||
match self.project() {
|
||||
EncoderBodyProj::Bytes(b) => {
|
||||
if b.is_empty() {
|
||||
Poll::Ready(None)
|
||||
} else {
|
||||
Poll::Ready(Some(Ok(std::mem::take(b))))
|
||||
}
|
||||
EncoderBodyProj::None { body } => {
|
||||
Pin::new(body).poll_next(cx).map_err(|err| match err {})
|
||||
}
|
||||
EncoderBodyProj::Stream(b) => b.poll_next(cx),
|
||||
EncoderBodyProj::BoxedStream(ref mut b) => {
|
||||
Pin::new(b.as_mut()).poll_next(cx)
|
||||
EncoderBodyProj::Full { body } => {
|
||||
Pin::new(body).poll_next(cx).map_err(|err| match err {})
|
||||
}
|
||||
EncoderBodyProj::Stream { body } => body
|
||||
.poll_next(cx)
|
||||
.map_err(|err| EncoderError::Body(err.into())),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn try_into_bytes(self) -> Result<Bytes, Self>
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
match self {
|
||||
EncoderBody::None { body } => Ok(body.try_into_bytes().unwrap()),
|
||||
EncoderBody::Full { body } => Ok(body.try_into_bytes().unwrap()),
|
||||
_ => Err(self),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: MessageBody> MessageBody for Encoder<B> {
|
||||
impl<B> MessageBody for Encoder<B>
|
||||
where
|
||||
B: MessageBody,
|
||||
{
|
||||
type Error = EncoderError;
|
||||
|
||||
#[inline]
|
||||
fn size(&self) -> BodySize {
|
||||
if self.encoder.is_none() {
|
||||
self.body.size()
|
||||
} else {
|
||||
if self.encoder.is_some() {
|
||||
BodySize::Stream
|
||||
} else {
|
||||
self.body.size()
|
||||
}
|
||||
}
|
||||
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, Error>>> {
|
||||
) -> Poll<Option<Result<Bytes, Self::Error>>> {
|
||||
let mut this = self.project();
|
||||
loop {
|
||||
if *this.eof {
|
||||
@@ -135,32 +175,36 @@ impl<B: MessageBody> MessageBody for Encoder<B> {
|
||||
}
|
||||
|
||||
if let Some(ref mut fut) = this.fut {
|
||||
let mut encoder = match ready!(Pin::new(fut).poll(cx)) {
|
||||
Ok(item) => item,
|
||||
Err(e) => return Poll::Ready(Some(Err(e.into()))),
|
||||
};
|
||||
let mut encoder = ready!(Pin::new(fut).poll(cx))
|
||||
.map_err(|_| EncoderError::Blocking(BlockingError))?
|
||||
.map_err(EncoderError::Io)?;
|
||||
|
||||
let chunk = encoder.take();
|
||||
*this.encoder = Some(encoder);
|
||||
this.fut.take();
|
||||
|
||||
if !chunk.is_empty() {
|
||||
return Poll::Ready(Some(Ok(chunk)));
|
||||
}
|
||||
}
|
||||
|
||||
let result = this.body.as_mut().poll_next(cx);
|
||||
let result = ready!(this.body.as_mut().poll_next(cx));
|
||||
|
||||
match result {
|
||||
Poll::Ready(Some(Ok(chunk))) => {
|
||||
Some(Err(err)) => return Poll::Ready(Some(Err(err))),
|
||||
|
||||
Some(Ok(chunk)) => {
|
||||
if let Some(mut encoder) = this.encoder.take() {
|
||||
if chunk.len() < INPLACE {
|
||||
encoder.write(&chunk)?;
|
||||
if chunk.len() < MAX_CHUNK_SIZE_ENCODE_IN_PLACE {
|
||||
encoder.write(&chunk).map_err(EncoderError::Io)?;
|
||||
let chunk = encoder.take();
|
||||
*this.encoder = Some(encoder);
|
||||
|
||||
if !chunk.is_empty() {
|
||||
return Poll::Ready(Some(Ok(chunk)));
|
||||
}
|
||||
} else {
|
||||
*this.fut = Some(run(move || {
|
||||
*this.fut = Some(spawn_blocking(move || {
|
||||
encoder.write(&chunk)?;
|
||||
Ok(encoder)
|
||||
}));
|
||||
@@ -169,9 +213,11 @@ impl<B: MessageBody> MessageBody for Encoder<B> {
|
||||
return Poll::Ready(Some(Ok(chunk)));
|
||||
}
|
||||
}
|
||||
Poll::Ready(None) => {
|
||||
|
||||
None => {
|
||||
if let Some(encoder) = this.encoder.take() {
|
||||
let chunk = encoder.finish()?;
|
||||
let chunk = encoder.finish().map_err(EncoderError::Io)?;
|
||||
|
||||
if chunk.is_empty() {
|
||||
return Poll::Ready(None);
|
||||
} else {
|
||||
@@ -182,7 +228,24 @@ impl<B: MessageBody> MessageBody for Encoder<B> {
|
||||
return Poll::Ready(None);
|
||||
}
|
||||
}
|
||||
val => return val,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn try_into_bytes(mut self) -> Result<Bytes, Self>
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
if self.encoder.is_some() {
|
||||
Err(self)
|
||||
} else {
|
||||
match self.body.try_into_bytes() {
|
||||
Ok(body) => Ok(body),
|
||||
Err(body) => {
|
||||
self.body = body;
|
||||
Err(self)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -190,31 +253,55 @@ impl<B: MessageBody> MessageBody for Encoder<B> {
|
||||
|
||||
fn update_head(encoding: ContentEncoding, head: &mut ResponseHead) {
|
||||
head.headers_mut().insert(
|
||||
CONTENT_ENCODING,
|
||||
header::CONTENT_ENCODING,
|
||||
HeaderValue::from_static(encoding.as_str()),
|
||||
);
|
||||
|
||||
head.no_chunking(false);
|
||||
}
|
||||
|
||||
enum ContentEncoder {
|
||||
#[cfg(feature = "compress-gzip")]
|
||||
Deflate(ZlibEncoder<Writer>),
|
||||
|
||||
#[cfg(feature = "compress-gzip")]
|
||||
Gzip(GzEncoder<Writer>),
|
||||
|
||||
#[cfg(feature = "compress-brotli")]
|
||||
Br(BrotliEncoder<Writer>),
|
||||
|
||||
// Wwe need explicit 'static lifetime here because ZstdEncoder needs a lifetime argument and we
|
||||
// use `spawn_blocking` in `Encoder::poll_next` that requires `FnOnce() -> R + Send + 'static`.
|
||||
#[cfg(feature = "compress-zstd")]
|
||||
Zstd(ZstdEncoder<'static, Writer>),
|
||||
}
|
||||
|
||||
impl ContentEncoder {
|
||||
fn encoder(encoding: ContentEncoding) -> Option<Self> {
|
||||
match encoding {
|
||||
#[cfg(feature = "compress-gzip")]
|
||||
ContentEncoding::Deflate => Some(ContentEncoder::Deflate(ZlibEncoder::new(
|
||||
Writer::new(),
|
||||
flate2::Compression::fast(),
|
||||
))),
|
||||
|
||||
#[cfg(feature = "compress-gzip")]
|
||||
ContentEncoding::Gzip => Some(ContentEncoder::Gzip(GzEncoder::new(
|
||||
Writer::new(),
|
||||
flate2::Compression::fast(),
|
||||
))),
|
||||
|
||||
#[cfg(feature = "compress-brotli")]
|
||||
ContentEncoding::Br => {
|
||||
Some(ContentEncoder::Br(BrotliEncoder::new(Writer::new(), 3)))
|
||||
}
|
||||
|
||||
#[cfg(feature = "compress-zstd")]
|
||||
ContentEncoding::Zstd => {
|
||||
let encoder = ZstdEncoder::new(Writer::new(), 3).ok()?;
|
||||
Some(ContentEncoder::Zstd(encoder))
|
||||
}
|
||||
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
@@ -222,31 +309,51 @@ impl ContentEncoder {
|
||||
#[inline]
|
||||
pub(crate) fn take(&mut self) -> Bytes {
|
||||
match *self {
|
||||
#[cfg(feature = "compress-brotli")]
|
||||
ContentEncoder::Br(ref mut encoder) => encoder.get_mut().take(),
|
||||
|
||||
#[cfg(feature = "compress-gzip")]
|
||||
ContentEncoder::Deflate(ref mut encoder) => encoder.get_mut().take(),
|
||||
|
||||
#[cfg(feature = "compress-gzip")]
|
||||
ContentEncoder::Gzip(ref mut encoder) => encoder.get_mut().take(),
|
||||
|
||||
#[cfg(feature = "compress-zstd")]
|
||||
ContentEncoder::Zstd(ref mut encoder) => encoder.get_mut().take(),
|
||||
}
|
||||
}
|
||||
|
||||
fn finish(self) -> Result<Bytes, io::Error> {
|
||||
match self {
|
||||
#[cfg(feature = "compress-brotli")]
|
||||
ContentEncoder::Br(encoder) => match encoder.finish() {
|
||||
Ok(writer) => Ok(writer.buf.freeze()),
|
||||
Err(err) => Err(err),
|
||||
},
|
||||
|
||||
#[cfg(feature = "compress-gzip")]
|
||||
ContentEncoder::Gzip(encoder) => match encoder.finish() {
|
||||
Ok(writer) => Ok(writer.buf.freeze()),
|
||||
Err(err) => Err(err),
|
||||
},
|
||||
|
||||
#[cfg(feature = "compress-gzip")]
|
||||
ContentEncoder::Deflate(encoder) => match encoder.finish() {
|
||||
Ok(writer) => Ok(writer.buf.freeze()),
|
||||
Err(err) => Err(err),
|
||||
},
|
||||
|
||||
#[cfg(feature = "compress-zstd")]
|
||||
ContentEncoder::Zstd(encoder) => match encoder.finish() {
|
||||
Ok(writer) => Ok(writer.buf.freeze()),
|
||||
Err(err) => Err(err),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn write(&mut self, data: &[u8]) -> Result<(), io::Error> {
|
||||
match *self {
|
||||
#[cfg(feature = "compress-brotli")]
|
||||
ContentEncoder::Br(ref mut encoder) => match encoder.write_all(data) {
|
||||
Ok(_) => Ok(()),
|
||||
Err(err) => {
|
||||
@@ -254,6 +361,8 @@ impl ContentEncoder {
|
||||
Err(err)
|
||||
}
|
||||
},
|
||||
|
||||
#[cfg(feature = "compress-gzip")]
|
||||
ContentEncoder::Gzip(ref mut encoder) => match encoder.write_all(data) {
|
||||
Ok(_) => Ok(()),
|
||||
Err(err) => {
|
||||
@@ -261,6 +370,8 @@ impl ContentEncoder {
|
||||
Err(err)
|
||||
}
|
||||
},
|
||||
|
||||
#[cfg(feature = "compress-gzip")]
|
||||
ContentEncoder::Deflate(ref mut encoder) => match encoder.write_all(data) {
|
||||
Ok(_) => Ok(()),
|
||||
Err(err) => {
|
||||
@@ -268,6 +379,44 @@ impl ContentEncoder {
|
||||
Err(err)
|
||||
}
|
||||
},
|
||||
|
||||
#[cfg(feature = "compress-zstd")]
|
||||
ContentEncoder::Zstd(ref mut encoder) => match encoder.write_all(data) {
|
||||
Ok(_) => Ok(()),
|
||||
Err(err) => {
|
||||
trace!("Error decoding ztsd encoding: {}", err);
|
||||
Err(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Display)]
|
||||
#[non_exhaustive]
|
||||
pub enum EncoderError {
|
||||
#[display(fmt = "body")]
|
||||
Body(Box<dyn StdError>),
|
||||
|
||||
#[display(fmt = "blocking")]
|
||||
Blocking(BlockingError),
|
||||
|
||||
#[display(fmt = "io")]
|
||||
Io(io::Error),
|
||||
}
|
||||
|
||||
impl StdError for EncoderError {
|
||||
fn source(&self) -> Option<&(dyn StdError + 'static)> {
|
||||
match self {
|
||||
EncoderError::Body(err) => Some(&**err),
|
||||
EncoderError::Blocking(err) => Some(err),
|
||||
EncoderError::Io(err) => Some(err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<EncoderError> for crate::Error {
|
||||
fn from(err: EncoderError) -> Self {
|
||||
crate::Error::new_encoder().with_cause(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
//! Content-Encoding support
|
||||
//! Content-Encoding support.
|
||||
|
||||
use std::io;
|
||||
|
||||
use bytes::{Bytes, BytesMut};
|
||||
@@ -9,6 +10,9 @@ mod encoder;
|
||||
pub use self::decoder::Decoder;
|
||||
pub use self::encoder::Encoder;
|
||||
|
||||
/// Special-purpose writer for streaming (de-)compression.
|
||||
///
|
||||
/// Pre-allocates 8KiB of capacity.
|
||||
pub(self) struct Writer {
|
||||
buf: BytesMut,
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,66 +1,127 @@
|
||||
use std::any::{Any, TypeId};
|
||||
use std::fmt;
|
||||
use std::{
|
||||
any::{Any, TypeId},
|
||||
fmt,
|
||||
};
|
||||
|
||||
use fxhash::FxHashMap;
|
||||
use ahash::AHashMap;
|
||||
|
||||
/// A type map for request extensions.
|
||||
///
|
||||
/// All entries into this map must be owned types (or static references).
|
||||
#[derive(Default)]
|
||||
/// A type map of request extensions.
|
||||
pub struct Extensions {
|
||||
/// Use FxHasher with a std HashMap with for faster
|
||||
/// lookups on the small `TypeId` (u64 equivalent) keys.
|
||||
map: FxHashMap<TypeId, Box<dyn Any>>,
|
||||
/// Use AHasher with a std HashMap with for faster lookups on the small `TypeId` keys.
|
||||
map: AHashMap<TypeId, Box<dyn Any>>,
|
||||
}
|
||||
|
||||
impl Extensions {
|
||||
/// Create an empty `Extensions`.
|
||||
/// Creates an empty `Extensions`.
|
||||
#[inline]
|
||||
pub fn new() -> Extensions {
|
||||
Extensions {
|
||||
map: FxHashMap::default(),
|
||||
map: AHashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Insert a type into this `Extensions`.
|
||||
/// Insert an item into the map.
|
||||
///
|
||||
/// If a extension of this type already existed, it will
|
||||
/// be returned.
|
||||
pub fn insert<T: 'static>(&mut self, val: T) {
|
||||
self.map.insert(TypeId::of::<T>(), Box::new(val));
|
||||
/// If an item of this type was already stored, it will be replaced and returned.
|
||||
///
|
||||
/// ```
|
||||
/// # use actix_http::Extensions;
|
||||
/// let mut map = Extensions::new();
|
||||
/// assert_eq!(map.insert(""), None);
|
||||
/// assert_eq!(map.insert(1u32), None);
|
||||
/// assert_eq!(map.insert(2u32), Some(1u32));
|
||||
/// assert_eq!(*map.get::<u32>().unwrap(), 2u32);
|
||||
/// ```
|
||||
pub fn insert<T: 'static>(&mut self, val: T) -> Option<T> {
|
||||
self.map
|
||||
.insert(TypeId::of::<T>(), Box::new(val))
|
||||
.and_then(downcast_owned)
|
||||
}
|
||||
|
||||
/// Check if container contains entry
|
||||
/// Check if map contains an item of a given type.
|
||||
///
|
||||
/// ```
|
||||
/// # use actix_http::Extensions;
|
||||
/// let mut map = Extensions::new();
|
||||
/// assert!(!map.contains::<u32>());
|
||||
///
|
||||
/// assert_eq!(map.insert(1u32), None);
|
||||
/// assert!(map.contains::<u32>());
|
||||
/// ```
|
||||
pub fn contains<T: 'static>(&self) -> bool {
|
||||
self.map.contains_key(&TypeId::of::<T>())
|
||||
}
|
||||
|
||||
/// Get a reference to a type previously inserted on this `Extensions`.
|
||||
/// Get a reference to an item of a given type.
|
||||
///
|
||||
/// ```
|
||||
/// # use actix_http::Extensions;
|
||||
/// let mut map = Extensions::new();
|
||||
/// map.insert(1u32);
|
||||
/// assert_eq!(map.get::<u32>(), Some(&1u32));
|
||||
/// ```
|
||||
pub fn get<T: 'static>(&self) -> Option<&T> {
|
||||
self.map
|
||||
.get(&TypeId::of::<T>())
|
||||
.and_then(|boxed| boxed.downcast_ref())
|
||||
}
|
||||
|
||||
/// Get a mutable reference to a type previously inserted on this `Extensions`.
|
||||
/// Get a mutable reference to an item of a given type.
|
||||
///
|
||||
/// ```
|
||||
/// # use actix_http::Extensions;
|
||||
/// let mut map = Extensions::new();
|
||||
/// map.insert(1u32);
|
||||
/// assert_eq!(map.get_mut::<u32>(), Some(&mut 1u32));
|
||||
/// ```
|
||||
pub fn get_mut<T: 'static>(&mut self) -> Option<&mut T> {
|
||||
self.map
|
||||
.get_mut(&TypeId::of::<T>())
|
||||
.and_then(|boxed| boxed.downcast_mut())
|
||||
}
|
||||
|
||||
/// Remove a type from this `Extensions`.
|
||||
/// Remove an item from the map of a given type.
|
||||
///
|
||||
/// If a extension of this type existed, it will be returned.
|
||||
/// If an item of this type was already stored, it will be returned.
|
||||
///
|
||||
/// ```
|
||||
/// # use actix_http::Extensions;
|
||||
/// let mut map = Extensions::new();
|
||||
///
|
||||
/// map.insert(1u32);
|
||||
/// assert_eq!(map.get::<u32>(), Some(&1u32));
|
||||
///
|
||||
/// assert_eq!(map.remove::<u32>(), Some(1u32));
|
||||
/// assert!(!map.contains::<u32>());
|
||||
/// ```
|
||||
pub fn remove<T: 'static>(&mut self) -> Option<T> {
|
||||
self.map
|
||||
.remove(&TypeId::of::<T>())
|
||||
.and_then(|boxed| boxed.downcast().ok().map(|boxed| *boxed))
|
||||
self.map.remove(&TypeId::of::<T>()).and_then(downcast_owned)
|
||||
}
|
||||
|
||||
/// Clear the `Extensions` of all inserted extensions.
|
||||
///
|
||||
/// ```
|
||||
/// # use actix_http::Extensions;
|
||||
/// let mut map = Extensions::new();
|
||||
///
|
||||
/// map.insert(1u32);
|
||||
/// assert!(map.contains::<u32>());
|
||||
///
|
||||
/// map.clear();
|
||||
/// assert!(!map.contains::<u32>());
|
||||
/// ```
|
||||
#[inline]
|
||||
pub fn clear(&mut self) {
|
||||
self.map.clear();
|
||||
}
|
||||
|
||||
/// Extends self with the items from another `Extensions`.
|
||||
pub fn extend(&mut self, other: Extensions) {
|
||||
self.map.extend(other.map);
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Extensions {
|
||||
@@ -69,6 +130,10 @@ impl fmt::Debug for Extensions {
|
||||
}
|
||||
}
|
||||
|
||||
fn downcast_owned<T: 'static>(boxed: Box<dyn Any>) -> Option<T> {
|
||||
boxed.downcast().ok().map(|boxed| *boxed)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
@@ -108,6 +173,8 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_integers() {
|
||||
static A: u32 = 8;
|
||||
|
||||
let mut map = Extensions::new();
|
||||
|
||||
map.insert::<i8>(8);
|
||||
@@ -120,6 +187,7 @@ mod tests {
|
||||
map.insert::<u32>(32);
|
||||
map.insert::<u64>(64);
|
||||
map.insert::<u128>(128);
|
||||
map.insert::<&'static u32>(&A);
|
||||
assert!(map.get::<i8>().is_some());
|
||||
assert!(map.get::<i16>().is_some());
|
||||
assert!(map.get::<i32>().is_some());
|
||||
@@ -130,6 +198,7 @@ mod tests {
|
||||
assert!(map.get::<u32>().is_some());
|
||||
assert!(map.get::<u64>().is_some());
|
||||
assert!(map.get::<u128>().is_some());
|
||||
assert!(map.get::<&'static u32>().is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -178,4 +247,34 @@ mod tests {
|
||||
assert_eq!(extensions.get::<bool>(), None);
|
||||
assert_eq!(extensions.get(), Some(&MyType(10)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extend() {
|
||||
#[derive(Debug, PartialEq)]
|
||||
struct MyType(i32);
|
||||
|
||||
let mut extensions = Extensions::new();
|
||||
|
||||
extensions.insert(5i32);
|
||||
extensions.insert(MyType(10));
|
||||
|
||||
let mut other = Extensions::new();
|
||||
|
||||
other.insert(15i32);
|
||||
other.insert(20u8);
|
||||
|
||||
extensions.extend(other);
|
||||
|
||||
assert_eq!(extensions.get(), Some(&15i32));
|
||||
assert_eq!(extensions.get_mut(), Some(&mut 15i32));
|
||||
|
||||
assert_eq!(extensions.remove::<i32>(), Some(15i32));
|
||||
assert!(extensions.get::<i32>().is_none());
|
||||
|
||||
assert_eq!(extensions.get::<bool>(), None);
|
||||
assert_eq!(extensions.get(), Some(&MyType(10)));
|
||||
|
||||
assert_eq!(extensions.get(), Some(&20u8));
|
||||
assert_eq!(extensions.get_mut(), Some(&mut 20u8));
|
||||
}
|
||||
}
|
||||
|
||||
426
actix-http/src/h1/chunked.rs
Normal file
426
actix-http/src/h1/chunked.rs
Normal file
@@ -0,0 +1,426 @@
|
||||
use std::{io, task::Poll};
|
||||
|
||||
use bytes::{Buf as _, Bytes, BytesMut};
|
||||
|
||||
macro_rules! byte (
|
||||
($rdr:ident) => ({
|
||||
if $rdr.len() > 0 {
|
||||
let b = $rdr[0];
|
||||
$rdr.advance(1);
|
||||
b
|
||||
} else {
|
||||
return Poll::Pending
|
||||
}
|
||||
})
|
||||
);
|
||||
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
pub(super) enum ChunkedState {
|
||||
Size,
|
||||
SizeLws,
|
||||
Extension,
|
||||
SizeLf,
|
||||
Body,
|
||||
BodyCr,
|
||||
BodyLf,
|
||||
EndCr,
|
||||
EndLf,
|
||||
End,
|
||||
}
|
||||
|
||||
impl ChunkedState {
|
||||
pub(super) fn step(
|
||||
&self,
|
||||
body: &mut BytesMut,
|
||||
size: &mut u64,
|
||||
buf: &mut Option<Bytes>,
|
||||
) -> Poll<Result<ChunkedState, io::Error>> {
|
||||
use self::ChunkedState::*;
|
||||
match *self {
|
||||
Size => ChunkedState::read_size(body, size),
|
||||
SizeLws => ChunkedState::read_size_lws(body),
|
||||
Extension => ChunkedState::read_extension(body),
|
||||
SizeLf => ChunkedState::read_size_lf(body, *size),
|
||||
Body => ChunkedState::read_body(body, size, buf),
|
||||
BodyCr => ChunkedState::read_body_cr(body),
|
||||
BodyLf => ChunkedState::read_body_lf(body),
|
||||
EndCr => ChunkedState::read_end_cr(body),
|
||||
EndLf => ChunkedState::read_end_lf(body),
|
||||
End => Poll::Ready(Ok(ChunkedState::End)),
|
||||
}
|
||||
}
|
||||
|
||||
fn read_size(rdr: &mut BytesMut, size: &mut u64) -> Poll<Result<ChunkedState, io::Error>> {
|
||||
let radix = 16;
|
||||
|
||||
let rem = match byte!(rdr) {
|
||||
b @ b'0'..=b'9' => b - b'0',
|
||||
b @ b'a'..=b'f' => b + 10 - b'a',
|
||||
b @ b'A'..=b'F' => b + 10 - b'A',
|
||||
b'\t' | b' ' => return Poll::Ready(Ok(ChunkedState::SizeLws)),
|
||||
b';' => return Poll::Ready(Ok(ChunkedState::Extension)),
|
||||
b'\r' => return Poll::Ready(Ok(ChunkedState::SizeLf)),
|
||||
_ => {
|
||||
return Poll::Ready(Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"Invalid chunk size line: Invalid Size",
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
match size.checked_mul(radix) {
|
||||
Some(n) => {
|
||||
*size = n as u64;
|
||||
*size += rem as u64;
|
||||
|
||||
Poll::Ready(Ok(ChunkedState::Size))
|
||||
}
|
||||
None => {
|
||||
log::debug!("chunk size would overflow u64");
|
||||
Poll::Ready(Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"Invalid chunk size line: Size is too big",
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn read_size_lws(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
|
||||
match byte!(rdr) {
|
||||
// LWS can follow the chunk size, but no more digits can come
|
||||
b'\t' | b' ' => Poll::Ready(Ok(ChunkedState::SizeLws)),
|
||||
b';' => Poll::Ready(Ok(ChunkedState::Extension)),
|
||||
b'\r' => Poll::Ready(Ok(ChunkedState::SizeLf)),
|
||||
_ => Poll::Ready(Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"Invalid chunk size linear white space",
|
||||
))),
|
||||
}
|
||||
}
|
||||
fn read_extension(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
|
||||
match byte!(rdr) {
|
||||
b'\r' => Poll::Ready(Ok(ChunkedState::SizeLf)),
|
||||
// strictly 0x20 (space) should be disallowed but we don't parse quoted strings here
|
||||
0x00..=0x08 | 0x0a..=0x1f | 0x7f => Poll::Ready(Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"Invalid character in chunk extension",
|
||||
))),
|
||||
_ => Poll::Ready(Ok(ChunkedState::Extension)), // no supported extensions
|
||||
}
|
||||
}
|
||||
fn read_size_lf(rdr: &mut BytesMut, size: u64) -> Poll<Result<ChunkedState, io::Error>> {
|
||||
match byte!(rdr) {
|
||||
b'\n' if size > 0 => Poll::Ready(Ok(ChunkedState::Body)),
|
||||
b'\n' if size == 0 => Poll::Ready(Ok(ChunkedState::EndCr)),
|
||||
_ => Poll::Ready(Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"Invalid chunk size LF",
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
fn read_body(
|
||||
rdr: &mut BytesMut,
|
||||
rem: &mut u64,
|
||||
buf: &mut Option<Bytes>,
|
||||
) -> Poll<Result<ChunkedState, io::Error>> {
|
||||
log::trace!("Chunked read, remaining={:?}", rem);
|
||||
|
||||
let len = rdr.len() as u64;
|
||||
if len == 0 {
|
||||
Poll::Ready(Ok(ChunkedState::Body))
|
||||
} else {
|
||||
let slice;
|
||||
if *rem > len {
|
||||
slice = rdr.split().freeze();
|
||||
*rem -= len;
|
||||
} else {
|
||||
slice = rdr.split_to(*rem as usize).freeze();
|
||||
*rem = 0;
|
||||
}
|
||||
*buf = Some(slice);
|
||||
if *rem > 0 {
|
||||
Poll::Ready(Ok(ChunkedState::Body))
|
||||
} else {
|
||||
Poll::Ready(Ok(ChunkedState::BodyCr))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn read_body_cr(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
|
||||
match byte!(rdr) {
|
||||
b'\r' => Poll::Ready(Ok(ChunkedState::BodyLf)),
|
||||
_ => Poll::Ready(Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"Invalid chunk body CR",
|
||||
))),
|
||||
}
|
||||
}
|
||||
fn read_body_lf(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
|
||||
match byte!(rdr) {
|
||||
b'\n' => Poll::Ready(Ok(ChunkedState::Size)),
|
||||
_ => Poll::Ready(Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"Invalid chunk body LF",
|
||||
))),
|
||||
}
|
||||
}
|
||||
fn read_end_cr(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
|
||||
match byte!(rdr) {
|
||||
b'\r' => Poll::Ready(Ok(ChunkedState::EndLf)),
|
||||
_ => Poll::Ready(Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"Invalid chunk end CR",
|
||||
))),
|
||||
}
|
||||
}
|
||||
fn read_end_lf(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
|
||||
match byte!(rdr) {
|
||||
b'\n' => Poll::Ready(Ok(ChunkedState::End)),
|
||||
_ => Poll::Ready(Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"Invalid chunk end LF",
|
||||
))),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use actix_codec::Decoder as _;
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use http::Method;
|
||||
|
||||
use crate::{
|
||||
error::ParseError,
|
||||
h1::decoder::{MessageDecoder, PayloadItem},
|
||||
HttpMessage as _, Request,
|
||||
};
|
||||
|
||||
macro_rules! parse_ready {
|
||||
($e:expr) => {{
|
||||
match MessageDecoder::<Request>::default().decode($e) {
|
||||
Ok(Some((msg, _))) => msg,
|
||||
Ok(_) => unreachable!("Eof during parsing http request"),
|
||||
Err(err) => unreachable!("Error during parsing http request: {:?}", err),
|
||||
}
|
||||
}};
|
||||
}
|
||||
|
||||
macro_rules! expect_parse_err {
|
||||
($e:expr) => {{
|
||||
match MessageDecoder::<Request>::default().decode($e) {
|
||||
Err(err) => match err {
|
||||
ParseError::Io(_) => unreachable!("Parse error expected"),
|
||||
_ => {}
|
||||
},
|
||||
_ => unreachable!("Error expected"),
|
||||
}
|
||||
}};
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_chunked_payload_chunk_extension() {
|
||||
let mut buf = BytesMut::from(
|
||||
"GET /test HTTP/1.1\r\n\
|
||||
transfer-encoding: chunked\r\n\
|
||||
\r\n",
|
||||
);
|
||||
|
||||
let mut reader = MessageDecoder::<Request>::default();
|
||||
let (msg, pl) = reader.decode(&mut buf).unwrap().unwrap();
|
||||
let mut pl = pl.unwrap();
|
||||
assert!(msg.chunked().unwrap());
|
||||
|
||||
buf.extend(b"4;test\r\ndata\r\n4\r\nline\r\n0\r\n\r\n"); // test: test\r\n\r\n")
|
||||
let chunk = pl.decode(&mut buf).unwrap().unwrap().chunk();
|
||||
assert_eq!(chunk, Bytes::from_static(b"data"));
|
||||
let chunk = pl.decode(&mut buf).unwrap().unwrap().chunk();
|
||||
assert_eq!(chunk, Bytes::from_static(b"line"));
|
||||
let msg = pl.decode(&mut buf).unwrap().unwrap();
|
||||
assert!(msg.eof());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_request_chunked() {
|
||||
let mut buf = BytesMut::from(
|
||||
"GET /test HTTP/1.1\r\n\
|
||||
transfer-encoding: chunked\r\n\r\n",
|
||||
);
|
||||
let req = parse_ready!(&mut buf);
|
||||
|
||||
if let Ok(val) = req.chunked() {
|
||||
assert!(val);
|
||||
} else {
|
||||
unreachable!("Error");
|
||||
}
|
||||
|
||||
// intentional typo in "chunked"
|
||||
let mut buf = BytesMut::from(
|
||||
"GET /test HTTP/1.1\r\n\
|
||||
transfer-encoding: chnked\r\n\r\n",
|
||||
);
|
||||
expect_parse_err!(&mut buf);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_http_request_chunked_payload() {
|
||||
let mut buf = BytesMut::from(
|
||||
"GET /test HTTP/1.1\r\n\
|
||||
transfer-encoding: chunked\r\n\r\n",
|
||||
);
|
||||
let mut reader = MessageDecoder::<Request>::default();
|
||||
let (req, pl) = reader.decode(&mut buf).unwrap().unwrap();
|
||||
let mut pl = pl.unwrap();
|
||||
assert!(req.chunked().unwrap());
|
||||
|
||||
buf.extend(b"4\r\ndata\r\n4\r\nline\r\n0\r\n\r\n");
|
||||
assert_eq!(
|
||||
pl.decode(&mut buf).unwrap().unwrap().chunk().as_ref(),
|
||||
b"data"
|
||||
);
|
||||
assert_eq!(
|
||||
pl.decode(&mut buf).unwrap().unwrap().chunk().as_ref(),
|
||||
b"line"
|
||||
);
|
||||
assert!(pl.decode(&mut buf).unwrap().unwrap().eof());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_http_request_chunked_payload_and_next_message() {
|
||||
let mut buf = BytesMut::from(
|
||||
"GET /test HTTP/1.1\r\n\
|
||||
transfer-encoding: chunked\r\n\r\n",
|
||||
);
|
||||
let mut reader = MessageDecoder::<Request>::default();
|
||||
let (req, pl) = reader.decode(&mut buf).unwrap().unwrap();
|
||||
let mut pl = pl.unwrap();
|
||||
assert!(req.chunked().unwrap());
|
||||
|
||||
buf.extend(
|
||||
b"4\r\ndata\r\n4\r\nline\r\n0\r\n\r\n\
|
||||
POST /test2 HTTP/1.1\r\n\
|
||||
transfer-encoding: chunked\r\n\r\n"
|
||||
.iter(),
|
||||
);
|
||||
let msg = pl.decode(&mut buf).unwrap().unwrap();
|
||||
assert_eq!(msg.chunk().as_ref(), b"data");
|
||||
let msg = pl.decode(&mut buf).unwrap().unwrap();
|
||||
assert_eq!(msg.chunk().as_ref(), b"line");
|
||||
let msg = pl.decode(&mut buf).unwrap().unwrap();
|
||||
assert!(msg.eof());
|
||||
|
||||
let (req, _) = reader.decode(&mut buf).unwrap().unwrap();
|
||||
assert!(req.chunked().unwrap());
|
||||
assert_eq!(*req.method(), Method::POST);
|
||||
assert!(req.chunked().unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_http_request_chunked_payload_chunks() {
|
||||
let mut buf = BytesMut::from(
|
||||
"GET /test HTTP/1.1\r\n\
|
||||
transfer-encoding: chunked\r\n\r\n",
|
||||
);
|
||||
|
||||
let mut reader = MessageDecoder::<Request>::default();
|
||||
let (req, pl) = reader.decode(&mut buf).unwrap().unwrap();
|
||||
let mut pl = pl.unwrap();
|
||||
assert!(req.chunked().unwrap());
|
||||
|
||||
buf.extend(b"4\r\n1111\r\n");
|
||||
let msg = pl.decode(&mut buf).unwrap().unwrap();
|
||||
assert_eq!(msg.chunk().as_ref(), b"1111");
|
||||
|
||||
buf.extend(b"4\r\ndata\r");
|
||||
let msg = pl.decode(&mut buf).unwrap().unwrap();
|
||||
assert_eq!(msg.chunk().as_ref(), b"data");
|
||||
|
||||
buf.extend(b"\n4");
|
||||
assert!(pl.decode(&mut buf).unwrap().is_none());
|
||||
|
||||
buf.extend(b"\r");
|
||||
assert!(pl.decode(&mut buf).unwrap().is_none());
|
||||
buf.extend(b"\n");
|
||||
assert!(pl.decode(&mut buf).unwrap().is_none());
|
||||
|
||||
buf.extend(b"li");
|
||||
let msg = pl.decode(&mut buf).unwrap().unwrap();
|
||||
assert_eq!(msg.chunk().as_ref(), b"li");
|
||||
|
||||
//trailers
|
||||
//buf.feed_data("test: test\r\n");
|
||||
//not_ready!(reader.parse(&mut buf, &mut readbuf));
|
||||
|
||||
buf.extend(b"ne\r\n0\r\n");
|
||||
let msg = pl.decode(&mut buf).unwrap().unwrap();
|
||||
assert_eq!(msg.chunk().as_ref(), b"ne");
|
||||
assert!(pl.decode(&mut buf).unwrap().is_none());
|
||||
|
||||
buf.extend(b"\r\n");
|
||||
assert!(pl.decode(&mut buf).unwrap().unwrap().eof());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn chunk_extension_quoted() {
|
||||
let mut buf = BytesMut::from(
|
||||
"GET /test HTTP/1.1\r\n\
|
||||
Host: localhost:8080\r\n\
|
||||
Transfer-Encoding: chunked\r\n\
|
||||
\r\n\
|
||||
2;hello=b;one=\"1 2 3\"\r\n\
|
||||
xx",
|
||||
);
|
||||
|
||||
let mut reader = MessageDecoder::<Request>::default();
|
||||
let (_msg, pl) = reader.decode(&mut buf).unwrap().unwrap();
|
||||
let mut pl = pl.unwrap();
|
||||
|
||||
let chunk = pl.decode(&mut buf).unwrap().unwrap();
|
||||
assert_eq!(chunk, PayloadItem::Chunk(Bytes::from_static(b"xx")));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hrs_chunk_extension_invalid() {
|
||||
let mut buf = BytesMut::from(
|
||||
"GET / HTTP/1.1\r\n\
|
||||
Host: localhost:8080\r\n\
|
||||
Transfer-Encoding: chunked\r\n\
|
||||
\r\n\
|
||||
2;x\nx\r\n\
|
||||
4c\r\n\
|
||||
0\r\n",
|
||||
);
|
||||
|
||||
let mut reader = MessageDecoder::<Request>::default();
|
||||
let (_msg, pl) = reader.decode(&mut buf).unwrap().unwrap();
|
||||
let mut pl = pl.unwrap();
|
||||
|
||||
let err = pl.decode(&mut buf).unwrap_err();
|
||||
assert!(err
|
||||
.to_string()
|
||||
.contains("Invalid character in chunk extension"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hrs_chunk_size_overflow() {
|
||||
let mut buf = BytesMut::from(
|
||||
"GET / HTTP/1.1\r\n\
|
||||
Host: example.com\r\n\
|
||||
Transfer-Encoding: chunked\r\n\
|
||||
\r\n\
|
||||
f0000000000000003\r\n\
|
||||
abc\r\n\
|
||||
0\r\n",
|
||||
);
|
||||
|
||||
let mut reader = MessageDecoder::<Request>::default();
|
||||
let (_msg, pl) = reader.decode(&mut buf).unwrap().unwrap();
|
||||
let mut pl = pl.unwrap();
|
||||
|
||||
let err = pl.decode(&mut buf).unwrap_err();
|
||||
assert!(err
|
||||
.to_string()
|
||||
.contains("Invalid chunk size line: Size is too big"));
|
||||
}
|
||||
}
|
||||
@@ -5,13 +5,15 @@ use bitflags::bitflags;
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use http::{Method, Version};
|
||||
|
||||
use super::decoder::{PayloadDecoder, PayloadItem, PayloadType};
|
||||
use super::{decoder, encoder, reserve_readbuf};
|
||||
use super::{Message, MessageType};
|
||||
use crate::body::BodySize;
|
||||
use crate::config::ServiceConfig;
|
||||
use crate::error::{ParseError, PayloadError};
|
||||
use crate::message::{ConnectionType, RequestHeadType, ResponseHead};
|
||||
use super::{
|
||||
decoder::{self, PayloadDecoder, PayloadItem, PayloadType},
|
||||
encoder, reserve_readbuf, Message, MessageType,
|
||||
};
|
||||
use crate::{
|
||||
body::BodySize,
|
||||
error::{ParseError, PayloadError},
|
||||
ConnectionType, RequestHeadType, ResponseHead, ServiceConfig,
|
||||
};
|
||||
|
||||
bitflags! {
|
||||
struct Flags: u8 {
|
||||
@@ -120,7 +122,7 @@ impl Decoder for ClientCodec {
|
||||
debug_assert!(!self.inner.payload.is_some(), "Payload decoder is set");
|
||||
|
||||
if let Some((req, payload)) = self.inner.decoder.decode(src)? {
|
||||
if let Some(ctype) = req.ctype() {
|
||||
if let Some(ctype) = req.conn_type() {
|
||||
// do not use peer's keep-alive
|
||||
self.inner.ctype = if ctype == ConnectionType::KeepAlive {
|
||||
self.inner.ctype
|
||||
@@ -173,13 +175,12 @@ impl Decoder for ClientPayloadCodec {
|
||||
}
|
||||
}
|
||||
|
||||
impl Encoder for ClientCodec {
|
||||
type Item = Message<(RequestHeadType, BodySize)>;
|
||||
impl Encoder<Message<(RequestHeadType, BodySize)>> for ClientCodec {
|
||||
type Error = io::Error;
|
||||
|
||||
fn encode(
|
||||
&mut self,
|
||||
item: Self::Item,
|
||||
item: Message<(RequestHeadType, BodySize)>,
|
||||
dst: &mut BytesMut,
|
||||
) -> Result<(), Self::Error> {
|
||||
match item {
|
||||
@@ -224,15 +225,3 @@ impl Encoder for ClientCodec {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Writer<'a>(pub &'a mut BytesMut);
|
||||
|
||||
impl<'a> io::Write for Writer<'a> {
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
self.0.extend_from_slice(buf);
|
||||
Ok(buf.len())
|
||||
}
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,15 +5,13 @@ use bitflags::bitflags;
|
||||
use bytes::BytesMut;
|
||||
use http::{Method, Version};
|
||||
|
||||
use super::decoder::{PayloadDecoder, PayloadItem, PayloadType};
|
||||
use super::{decoder, encoder};
|
||||
use super::{Message, MessageType};
|
||||
use crate::body::BodySize;
|
||||
use crate::config::ServiceConfig;
|
||||
use crate::error::ParseError;
|
||||
use crate::message::ConnectionType;
|
||||
use crate::request::Request;
|
||||
use crate::response::Response;
|
||||
use super::{
|
||||
decoder::{self, PayloadDecoder, PayloadItem, PayloadType},
|
||||
encoder, Message, MessageType,
|
||||
};
|
||||
use crate::{
|
||||
body::BodySize, error::ParseError, ConnectionType, Request, Response, ServiceConfig,
|
||||
};
|
||||
|
||||
bitflags! {
|
||||
struct Flags: u8 {
|
||||
@@ -29,7 +27,7 @@ pub struct Codec {
|
||||
decoder: decoder::MessageDecoder<Request>,
|
||||
payload: Option<PayloadDecoder>,
|
||||
version: Version,
|
||||
ctype: ConnectionType,
|
||||
conn_type: ConnectionType,
|
||||
|
||||
// encoder part
|
||||
flags: Flags,
|
||||
@@ -58,37 +56,38 @@ impl Codec {
|
||||
} else {
|
||||
Flags::empty()
|
||||
};
|
||||
|
||||
Codec {
|
||||
config,
|
||||
flags,
|
||||
decoder: decoder::MessageDecoder::default(),
|
||||
payload: None,
|
||||
version: Version::HTTP_11,
|
||||
ctype: ConnectionType::Close,
|
||||
conn_type: ConnectionType::Close,
|
||||
encoder: encoder::MessageEncoder::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if request is upgrade.
|
||||
#[inline]
|
||||
/// Check if request is upgrade
|
||||
pub fn upgrade(&self) -> bool {
|
||||
self.ctype == ConnectionType::Upgrade
|
||||
self.conn_type == ConnectionType::Upgrade
|
||||
}
|
||||
|
||||
/// Check if last response is keep-alive.
|
||||
#[inline]
|
||||
/// Check if last response is keep-alive
|
||||
pub fn keepalive(&self) -> bool {
|
||||
self.ctype == ConnectionType::KeepAlive
|
||||
self.conn_type == ConnectionType::KeepAlive
|
||||
}
|
||||
|
||||
/// Check if keep-alive enabled on server level.
|
||||
#[inline]
|
||||
/// Check if keep-alive enabled on server level
|
||||
pub fn keepalive_enabled(&self) -> bool {
|
||||
self.flags.contains(Flags::KEEPALIVE_ENABLED)
|
||||
}
|
||||
|
||||
/// Check last request's message type.
|
||||
#[inline]
|
||||
/// Check last request's message type
|
||||
pub fn message_type(&self) -> MessageType {
|
||||
if self.flags.contains(Flags::STREAM) {
|
||||
MessageType::Stream
|
||||
@@ -110,8 +109,8 @@ impl Decoder for Codec {
|
||||
type Error = ParseError;
|
||||
|
||||
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
|
||||
if self.payload.is_some() {
|
||||
Ok(match self.payload.as_mut().unwrap().decode(src)? {
|
||||
if let Some(ref mut payload) = self.payload {
|
||||
Ok(match payload.decode(src)? {
|
||||
Some(PayloadItem::Chunk(chunk)) => Some(Message::Chunk(Some(chunk))),
|
||||
Some(PayloadItem::Eof) => {
|
||||
self.payload.take();
|
||||
@@ -123,11 +122,11 @@ impl Decoder for Codec {
|
||||
let head = req.head();
|
||||
self.flags.set(Flags::HEAD, head.method == Method::HEAD);
|
||||
self.version = head.version;
|
||||
self.ctype = head.connection_type();
|
||||
if self.ctype == ConnectionType::KeepAlive
|
||||
self.conn_type = head.connection_type();
|
||||
if self.conn_type == ConnectionType::KeepAlive
|
||||
&& !self.flags.contains(Flags::KEEPALIVE_ENABLED)
|
||||
{
|
||||
self.ctype = ConnectionType::Close
|
||||
self.conn_type = ConnectionType::Close
|
||||
}
|
||||
match payload {
|
||||
PayloadType::None => self.payload = None,
|
||||
@@ -144,13 +143,12 @@ impl Decoder for Codec {
|
||||
}
|
||||
}
|
||||
|
||||
impl Encoder for Codec {
|
||||
type Item = Message<(Response<()>, BodySize)>;
|
||||
impl Encoder<Message<(Response<()>, BodySize)>> for Codec {
|
||||
type Error = io::Error;
|
||||
|
||||
fn encode(
|
||||
&mut self,
|
||||
item: Self::Item,
|
||||
item: Message<(Response<()>, BodySize)>,
|
||||
dst: &mut BytesMut,
|
||||
) -> Result<(), Self::Error> {
|
||||
match item {
|
||||
@@ -159,14 +157,14 @@ impl Encoder for Codec {
|
||||
res.head_mut().version = self.version;
|
||||
|
||||
// connection status
|
||||
self.ctype = if let Some(ct) = res.head().ctype() {
|
||||
self.conn_type = if let Some(ct) = res.head().conn_type() {
|
||||
if ct == ConnectionType::KeepAlive {
|
||||
self.ctype
|
||||
self.conn_type
|
||||
} else {
|
||||
ct
|
||||
}
|
||||
} else {
|
||||
self.ctype
|
||||
self.conn_type
|
||||
};
|
||||
|
||||
// encode message
|
||||
@@ -177,10 +175,9 @@ impl Encoder for Codec {
|
||||
self.flags.contains(Flags::STREAM),
|
||||
self.version,
|
||||
length,
|
||||
self.ctype,
|
||||
self.conn_type,
|
||||
&self.config,
|
||||
)?;
|
||||
// self.headers_size = (dst.len() - len) as u32;
|
||||
}
|
||||
Message::Chunk(Some(bytes)) => {
|
||||
self.encoder.encode_chunk(bytes.as_ref(), dst)?;
|
||||
@@ -189,6 +186,7 @@ impl Encoder for Codec {
|
||||
self.encoder.encode_eof(dst)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -199,10 +197,10 @@ mod tests {
|
||||
use http::Method;
|
||||
|
||||
use super::*;
|
||||
use crate::httpmessage::HttpMessage;
|
||||
use crate::HttpMessage as _;
|
||||
|
||||
#[test]
|
||||
fn test_http_request_chunked_payload_and_next_message() {
|
||||
#[actix_rt::test]
|
||||
async fn test_http_request_chunked_payload_and_next_message() {
|
||||
let mut codec = Codec::default();
|
||||
|
||||
let mut buf = BytesMut::from(
|
||||
|
||||
@@ -1,20 +1,17 @@
|
||||
use std::convert::TryFrom;
|
||||
use std::io;
|
||||
use std::marker::PhantomData;
|
||||
use std::task::Poll;
|
||||
use std::{convert::TryFrom, io, marker::PhantomData, mem::MaybeUninit, task::Poll};
|
||||
|
||||
use actix_codec::Decoder;
|
||||
use bytes::{Buf, Bytes, BytesMut};
|
||||
use http::header::{HeaderName, HeaderValue};
|
||||
use http::{header, Method, StatusCode, Uri, Version};
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use http::{
|
||||
header::{self, HeaderName, HeaderValue},
|
||||
Method, StatusCode, Uri, Version,
|
||||
};
|
||||
use log::{debug, error, trace};
|
||||
|
||||
use crate::error::ParseError;
|
||||
use crate::header::HeaderMap;
|
||||
use crate::message::{ConnectionType, ResponseHead};
|
||||
use crate::request::Request;
|
||||
use super::chunked::ChunkedState;
|
||||
use crate::{error::ParseError, header::HeaderMap, ConnectionType, Request, ResponseHead};
|
||||
|
||||
const MAX_BUFFER_SIZE: usize = 131_072;
|
||||
pub(crate) const MAX_BUFFER_SIZE: usize = 131_072;
|
||||
const MAX_HEADERS: usize = 96;
|
||||
|
||||
/// Incoming message decoder
|
||||
@@ -50,7 +47,7 @@ pub(crate) enum PayloadLength {
|
||||
}
|
||||
|
||||
pub(crate) trait MessageType: Sized {
|
||||
fn set_connection_type(&mut self, ctype: Option<ConnectionType>);
|
||||
fn set_connection_type(&mut self, conn_type: Option<ConnectionType>);
|
||||
|
||||
fn set_expect(&mut self);
|
||||
|
||||
@@ -67,24 +64,35 @@ pub(crate) trait MessageType: Sized {
|
||||
let mut has_upgrade_websocket = false;
|
||||
let mut expect = false;
|
||||
let mut chunked = false;
|
||||
let mut seen_te = false;
|
||||
let mut content_length = None;
|
||||
|
||||
{
|
||||
let headers = self.headers_mut();
|
||||
|
||||
for idx in raw_headers.iter() {
|
||||
let name =
|
||||
HeaderName::from_bytes(&slice[idx.name.0..idx.name.1]).unwrap();
|
||||
let name = HeaderName::from_bytes(&slice[idx.name.0..idx.name.1]).unwrap();
|
||||
|
||||
// SAFETY: httparse checks header value is valid UTF-8
|
||||
// SAFETY: httparse already checks header value is only visible ASCII bytes
|
||||
// from_maybe_shared_unchecked contains debug assertions so they are omitted here
|
||||
let value = unsafe {
|
||||
HeaderValue::from_maybe_shared_unchecked(
|
||||
slice.slice(idx.value.0..idx.value.1),
|
||||
)
|
||||
};
|
||||
|
||||
match name {
|
||||
header::CONTENT_LENGTH => {
|
||||
if let Ok(s) = value.to_str() {
|
||||
header::CONTENT_LENGTH if content_length.is_some() => {
|
||||
debug!("multiple Content-Length");
|
||||
return Err(ParseError::Header);
|
||||
}
|
||||
|
||||
header::CONTENT_LENGTH => match value.to_str() {
|
||||
Ok(s) if s.trim().starts_with('+') => {
|
||||
debug!("illegal Content-Length: {:?}", s);
|
||||
return Err(ParseError::Header);
|
||||
}
|
||||
Ok(s) => {
|
||||
if let Ok(len) = s.parse::<u64>() {
|
||||
if len != 0 {
|
||||
content_length = Some(len);
|
||||
@@ -93,22 +101,38 @@ pub(crate) trait MessageType: Sized {
|
||||
debug!("illegal Content-Length: {:?}", s);
|
||||
return Err(ParseError::Header);
|
||||
}
|
||||
} else {
|
||||
}
|
||||
Err(_) => {
|
||||
debug!("illegal Content-Length: {:?}", value);
|
||||
return Err(ParseError::Header);
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
// transfer-encoding
|
||||
header::TRANSFER_ENCODING if seen_te => {
|
||||
debug!("multiple Transfer-Encoding not allowed");
|
||||
return Err(ParseError::Header);
|
||||
}
|
||||
|
||||
header::TRANSFER_ENCODING => {
|
||||
if let Ok(s) = value.to_str().map(|s| s.trim()) {
|
||||
chunked = s.eq_ignore_ascii_case("chunked");
|
||||
seen_te = true;
|
||||
|
||||
if let Ok(s) = value.to_str().map(str::trim) {
|
||||
if s.eq_ignore_ascii_case("chunked") {
|
||||
chunked = true;
|
||||
} else if s.eq_ignore_ascii_case("identity") {
|
||||
// allow silently since multiple TE headers are already checked
|
||||
} else {
|
||||
debug!("illegal Transfer-Encoding: {:?}", s);
|
||||
return Err(ParseError::Header);
|
||||
}
|
||||
} else {
|
||||
return Err(ParseError::Header);
|
||||
}
|
||||
}
|
||||
// connection keep-alive state
|
||||
header::CONNECTION => {
|
||||
ka = if let Ok(conn) = value.to_str().map(|conn| conn.trim()) {
|
||||
ka = if let Ok(conn) = value.to_str().map(str::trim) {
|
||||
if conn.eq_ignore_ascii_case("keep-alive") {
|
||||
Some(ConnectionType::KeepAlive)
|
||||
} else if conn.eq_ignore_ascii_case("close") {
|
||||
@@ -123,7 +147,7 @@ pub(crate) trait MessageType: Sized {
|
||||
};
|
||||
}
|
||||
header::UPGRADE => {
|
||||
if let Ok(val) = value.to_str().map(|val| val.trim()) {
|
||||
if let Ok(val) = value.to_str().map(str::trim) {
|
||||
if val.eq_ignore_ascii_case("websocket") {
|
||||
has_upgrade_websocket = true;
|
||||
}
|
||||
@@ -135,7 +159,7 @@ pub(crate) trait MessageType: Sized {
|
||||
expect = true;
|
||||
}
|
||||
}
|
||||
_ => (),
|
||||
_ => {}
|
||||
}
|
||||
|
||||
headers.append(name, value);
|
||||
@@ -146,7 +170,7 @@ pub(crate) trait MessageType: Sized {
|
||||
self.set_expect()
|
||||
}
|
||||
|
||||
// https://tools.ietf.org/html/rfc7230#section-3.3.3
|
||||
// https://datatracker.ietf.org/doc/html/rfc7230#section-3.3.3
|
||||
if chunked {
|
||||
// Chunked encoding
|
||||
Ok(PayloadLength::Payload(PayloadType::Payload(
|
||||
@@ -166,8 +190,8 @@ pub(crate) trait MessageType: Sized {
|
||||
}
|
||||
|
||||
impl MessageType for Request {
|
||||
fn set_connection_type(&mut self, ctype: Option<ConnectionType>) {
|
||||
if let Some(ctype) = ctype {
|
||||
fn set_connection_type(&mut self, conn_type: Option<ConnectionType>) {
|
||||
if let Some(ctype) = conn_type {
|
||||
self.head_mut().set_connection_type(ctype);
|
||||
}
|
||||
}
|
||||
@@ -184,10 +208,17 @@ impl MessageType for Request {
|
||||
let mut headers: [HeaderIndex; MAX_HEADERS] = EMPTY_HEADER_INDEX_ARRAY;
|
||||
|
||||
let (len, method, uri, ver, h_len) = {
|
||||
let mut parsed: [httparse::Header<'_>; MAX_HEADERS] = EMPTY_HEADER_ARRAY;
|
||||
// SAFETY:
|
||||
// Create an uninitialized array of `MaybeUninit`. The `assume_init` is
|
||||
// safe because the type we are claiming to have initialized here is a
|
||||
// bunch of `MaybeUninit`s, which do not require initialization.
|
||||
let mut parsed = unsafe {
|
||||
MaybeUninit::<[MaybeUninit<httparse::Header<'_>>; MAX_HEADERS]>::uninit()
|
||||
.assume_init()
|
||||
};
|
||||
|
||||
let mut req = httparse::Request::new(&mut parsed);
|
||||
match req.parse(src)? {
|
||||
let mut req = httparse::Request::new(&mut []);
|
||||
match req.parse_with_uninit_headers(src, &mut parsed)? {
|
||||
httparse::Status::Complete(len) => {
|
||||
let method = Method::from_bytes(req.method.unwrap().as_bytes())
|
||||
.map_err(|_| ParseError::Method)?;
|
||||
@@ -201,7 +232,15 @@ impl MessageType for Request {
|
||||
|
||||
(len, method, uri, version, req.headers.len())
|
||||
}
|
||||
httparse::Status::Partial => return Ok(None),
|
||||
httparse::Status::Partial => {
|
||||
return if src.len() >= MAX_BUFFER_SIZE {
|
||||
trace!("MAX_BUFFER_SIZE unprocessed data reached, closing");
|
||||
Err(ParseError::TooLarge)
|
||||
} else {
|
||||
// Return None to notify more read are needed for parsing request
|
||||
Ok(None)
|
||||
};
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
@@ -214,15 +253,12 @@ impl MessageType for Request {
|
||||
let decoder = match length {
|
||||
PayloadLength::Payload(pl) => pl,
|
||||
PayloadLength::UpgradeWebSocket => {
|
||||
// upgrade(websocket)
|
||||
// upgrade (WebSocket)
|
||||
PayloadType::Stream(PayloadDecoder::eof())
|
||||
}
|
||||
PayloadLength::None => {
|
||||
if method == Method::CONNECT {
|
||||
PayloadType::Stream(PayloadDecoder::eof())
|
||||
} else if src.len() >= MAX_BUFFER_SIZE {
|
||||
trace!("MAX_BUFFER_SIZE unprocessed data reached, closing");
|
||||
return Err(ParseError::TooLarge);
|
||||
} else {
|
||||
PayloadType::None
|
||||
}
|
||||
@@ -239,8 +275,8 @@ impl MessageType for Request {
|
||||
}
|
||||
|
||||
impl MessageType for ResponseHead {
|
||||
fn set_connection_type(&mut self, ctype: Option<ConnectionType>) {
|
||||
if let Some(ctype) = ctype {
|
||||
fn set_connection_type(&mut self, conn_type: Option<ConnectionType>) {
|
||||
if let Some(ctype) = conn_type {
|
||||
ResponseHead::set_connection_type(self, ctype);
|
||||
}
|
||||
}
|
||||
@@ -271,7 +307,14 @@ impl MessageType for ResponseHead {
|
||||
|
||||
(len, version, status, res.headers.len())
|
||||
}
|
||||
httparse::Status::Partial => return Ok(None),
|
||||
httparse::Status::Partial => {
|
||||
return if src.len() >= MAX_BUFFER_SIZE {
|
||||
error!("MAX_BUFFER_SIZE unprocessed data reached, closing");
|
||||
Err(ParseError::TooLarge)
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
@@ -287,9 +330,6 @@ impl MessageType for ResponseHead {
|
||||
} else if status == StatusCode::SWITCHING_PROTOCOLS {
|
||||
// switching protocol or connect
|
||||
PayloadType::Stream(PayloadDecoder::eof())
|
||||
} else if src.len() >= MAX_BUFFER_SIZE {
|
||||
error!("MAX_BUFFER_SIZE unprocessed data reached, closing");
|
||||
return Err(ParseError::TooLarge);
|
||||
} else {
|
||||
// for HTTP/1.0 read to eof and close connection
|
||||
if msg.version == Version::HTTP_10 {
|
||||
@@ -397,20 +437,6 @@ enum Kind {
|
||||
Eof,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
enum ChunkedState {
|
||||
Size,
|
||||
SizeLws,
|
||||
Extension,
|
||||
SizeLf,
|
||||
Body,
|
||||
BodyCr,
|
||||
BodyLf,
|
||||
EndCr,
|
||||
EndLf,
|
||||
End,
|
||||
}
|
||||
|
||||
impl Decoder for PayloadDecoder {
|
||||
type Item = PayloadItem;
|
||||
type Error = io::Error;
|
||||
@@ -440,19 +466,23 @@ impl Decoder for PayloadDecoder {
|
||||
Kind::Chunked(ref mut state, ref mut size) => {
|
||||
loop {
|
||||
let mut buf = None;
|
||||
|
||||
// advances the chunked state
|
||||
*state = match state.step(src, size, &mut buf) {
|
||||
Poll::Pending => return Ok(None),
|
||||
Poll::Ready(Ok(state)) => state,
|
||||
Poll::Ready(Err(e)) => return Err(e),
|
||||
};
|
||||
|
||||
if *state == ChunkedState::End {
|
||||
trace!("End of chunked stream");
|
||||
return Ok(Some(PayloadItem::Eof));
|
||||
}
|
||||
|
||||
if let Some(buf) = buf {
|
||||
return Ok(Some(PayloadItem::Chunk(buf)));
|
||||
}
|
||||
|
||||
if src.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
@@ -469,201 +499,40 @@ impl Decoder for PayloadDecoder {
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! byte (
|
||||
($rdr:ident) => ({
|
||||
if $rdr.len() > 0 {
|
||||
let b = $rdr[0];
|
||||
$rdr.advance(1);
|
||||
b
|
||||
} else {
|
||||
return Poll::Pending
|
||||
}
|
||||
})
|
||||
);
|
||||
|
||||
impl ChunkedState {
|
||||
fn step(
|
||||
&self,
|
||||
body: &mut BytesMut,
|
||||
size: &mut u64,
|
||||
buf: &mut Option<Bytes>,
|
||||
) -> Poll<Result<ChunkedState, io::Error>> {
|
||||
use self::ChunkedState::*;
|
||||
match *self {
|
||||
Size => ChunkedState::read_size(body, size),
|
||||
SizeLws => ChunkedState::read_size_lws(body),
|
||||
Extension => ChunkedState::read_extension(body),
|
||||
SizeLf => ChunkedState::read_size_lf(body, size),
|
||||
Body => ChunkedState::read_body(body, size, buf),
|
||||
BodyCr => ChunkedState::read_body_cr(body),
|
||||
BodyLf => ChunkedState::read_body_lf(body),
|
||||
EndCr => ChunkedState::read_end_cr(body),
|
||||
EndLf => ChunkedState::read_end_lf(body),
|
||||
End => Poll::Ready(Ok(ChunkedState::End)),
|
||||
}
|
||||
}
|
||||
|
||||
fn read_size(
|
||||
rdr: &mut BytesMut,
|
||||
size: &mut u64,
|
||||
) -> Poll<Result<ChunkedState, io::Error>> {
|
||||
let radix = 16;
|
||||
match byte!(rdr) {
|
||||
b @ b'0'..=b'9' => {
|
||||
*size *= radix;
|
||||
*size += u64::from(b - b'0');
|
||||
}
|
||||
b @ b'a'..=b'f' => {
|
||||
*size *= radix;
|
||||
*size += u64::from(b + 10 - b'a');
|
||||
}
|
||||
b @ b'A'..=b'F' => {
|
||||
*size *= radix;
|
||||
*size += u64::from(b + 10 - b'A');
|
||||
}
|
||||
b'\t' | b' ' => return Poll::Ready(Ok(ChunkedState::SizeLws)),
|
||||
b';' => return Poll::Ready(Ok(ChunkedState::Extension)),
|
||||
b'\r' => return Poll::Ready(Ok(ChunkedState::SizeLf)),
|
||||
_ => {
|
||||
return Poll::Ready(Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"Invalid chunk size line: Invalid Size",
|
||||
)));
|
||||
}
|
||||
}
|
||||
Poll::Ready(Ok(ChunkedState::Size))
|
||||
}
|
||||
|
||||
fn read_size_lws(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
|
||||
trace!("read_size_lws");
|
||||
match byte!(rdr) {
|
||||
// LWS can follow the chunk size, but no more digits can come
|
||||
b'\t' | b' ' => Poll::Ready(Ok(ChunkedState::SizeLws)),
|
||||
b';' => Poll::Ready(Ok(ChunkedState::Extension)),
|
||||
b'\r' => Poll::Ready(Ok(ChunkedState::SizeLf)),
|
||||
_ => Poll::Ready(Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"Invalid chunk size linear white space",
|
||||
))),
|
||||
}
|
||||
}
|
||||
fn read_extension(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
|
||||
match byte!(rdr) {
|
||||
b'\r' => Poll::Ready(Ok(ChunkedState::SizeLf)),
|
||||
_ => Poll::Ready(Ok(ChunkedState::Extension)), // no supported extensions
|
||||
}
|
||||
}
|
||||
fn read_size_lf(
|
||||
rdr: &mut BytesMut,
|
||||
size: &mut u64,
|
||||
) -> Poll<Result<ChunkedState, io::Error>> {
|
||||
match byte!(rdr) {
|
||||
b'\n' if *size > 0 => Poll::Ready(Ok(ChunkedState::Body)),
|
||||
b'\n' if *size == 0 => Poll::Ready(Ok(ChunkedState::EndCr)),
|
||||
_ => Poll::Ready(Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"Invalid chunk size LF",
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
fn read_body(
|
||||
rdr: &mut BytesMut,
|
||||
rem: &mut u64,
|
||||
buf: &mut Option<Bytes>,
|
||||
) -> Poll<Result<ChunkedState, io::Error>> {
|
||||
trace!("Chunked read, remaining={:?}", rem);
|
||||
|
||||
let len = rdr.len() as u64;
|
||||
if len == 0 {
|
||||
Poll::Ready(Ok(ChunkedState::Body))
|
||||
} else {
|
||||
let slice;
|
||||
if *rem > len {
|
||||
slice = rdr.split().freeze();
|
||||
*rem -= len;
|
||||
} else {
|
||||
slice = rdr.split_to(*rem as usize).freeze();
|
||||
*rem = 0;
|
||||
}
|
||||
*buf = Some(slice);
|
||||
if *rem > 0 {
|
||||
Poll::Ready(Ok(ChunkedState::Body))
|
||||
} else {
|
||||
Poll::Ready(Ok(ChunkedState::BodyCr))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn read_body_cr(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
|
||||
match byte!(rdr) {
|
||||
b'\r' => Poll::Ready(Ok(ChunkedState::BodyLf)),
|
||||
_ => Poll::Ready(Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"Invalid chunk body CR",
|
||||
))),
|
||||
}
|
||||
}
|
||||
fn read_body_lf(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
|
||||
match byte!(rdr) {
|
||||
b'\n' => Poll::Ready(Ok(ChunkedState::Size)),
|
||||
_ => Poll::Ready(Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"Invalid chunk body LF",
|
||||
))),
|
||||
}
|
||||
}
|
||||
fn read_end_cr(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
|
||||
match byte!(rdr) {
|
||||
b'\r' => Poll::Ready(Ok(ChunkedState::EndLf)),
|
||||
_ => Poll::Ready(Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"Invalid chunk end CR",
|
||||
))),
|
||||
}
|
||||
}
|
||||
fn read_end_lf(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
|
||||
match byte!(rdr) {
|
||||
b'\n' => Poll::Ready(Ok(ChunkedState::End)),
|
||||
_ => Poll::Ready(Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"Invalid chunk end LF",
|
||||
))),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use http::{Method, Version};
|
||||
|
||||
use super::*;
|
||||
use crate::error::ParseError;
|
||||
use crate::http::header::{HeaderName, SET_COOKIE};
|
||||
use crate::httpmessage::HttpMessage;
|
||||
use crate::{
|
||||
error::ParseError,
|
||||
header::{HeaderName, SET_COOKIE},
|
||||
HttpMessage as _,
|
||||
};
|
||||
|
||||
impl PayloadType {
|
||||
fn unwrap(self) -> PayloadDecoder {
|
||||
pub(crate) fn unwrap(self) -> PayloadDecoder {
|
||||
match self {
|
||||
PayloadType::Payload(pl) => pl,
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
|
||||
fn is_unhandled(&self) -> bool {
|
||||
pub(crate) fn is_unhandled(&self) -> bool {
|
||||
matches!(self, PayloadType::Stream(_))
|
||||
}
|
||||
}
|
||||
|
||||
impl PayloadItem {
|
||||
fn chunk(self) -> Bytes {
|
||||
pub(crate) fn chunk(self) -> Bytes {
|
||||
match self {
|
||||
PayloadItem::Chunk(chunk) => chunk,
|
||||
_ => panic!("error"),
|
||||
}
|
||||
}
|
||||
fn eof(&self) -> bool {
|
||||
|
||||
pub(crate) fn eof(&self) -> bool {
|
||||
matches!(*self, PayloadItem::Eof)
|
||||
}
|
||||
}
|
||||
@@ -683,7 +552,7 @@ mod tests {
|
||||
match MessageDecoder::<Request>::default().decode($e) {
|
||||
Err(err) => match err {
|
||||
ParseError::Io(_) => unreachable!("Parse error expected"),
|
||||
_ => (),
|
||||
_ => {}
|
||||
},
|
||||
_ => unreachable!("Error expected"),
|
||||
}
|
||||
@@ -732,8 +601,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_parse_body() {
|
||||
let mut buf =
|
||||
BytesMut::from("GET /test HTTP/1.1\r\nContent-Length: 4\r\n\r\nbody");
|
||||
let mut buf = BytesMut::from("GET /test HTTP/1.1\r\nContent-Length: 4\r\n\r\nbody");
|
||||
|
||||
let mut reader = MessageDecoder::<Request>::default();
|
||||
let (req, pl) = reader.decode(&mut buf).unwrap().unwrap();
|
||||
@@ -749,8 +617,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_parse_body_crlf() {
|
||||
let mut buf =
|
||||
BytesMut::from("\r\nGET /test HTTP/1.1\r\nContent-Length: 4\r\n\r\nbody");
|
||||
let mut buf = BytesMut::from("\r\nGET /test HTTP/1.1\r\nContent-Length: 4\r\n\r\nbody");
|
||||
|
||||
let mut reader = MessageDecoder::<Request>::default();
|
||||
let (req, pl) = reader.decode(&mut buf).unwrap().unwrap();
|
||||
@@ -819,8 +686,8 @@ mod tests {
|
||||
.get_all(SET_COOKIE)
|
||||
.map(|v| v.to_str().unwrap().to_owned())
|
||||
.collect();
|
||||
assert_eq!(val[1], "c1=cookie1");
|
||||
assert_eq!(val[0], "c2=cookie2");
|
||||
assert_eq!(val[0], "c1=cookie1");
|
||||
assert_eq!(val[1], "c2=cookie2");
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -956,34 +823,6 @@ mod tests {
|
||||
assert!(req.upgrade());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_request_chunked() {
|
||||
let mut buf = BytesMut::from(
|
||||
"GET /test HTTP/1.1\r\n\
|
||||
transfer-encoding: chunked\r\n\r\n",
|
||||
);
|
||||
let req = parse_ready!(&mut buf);
|
||||
|
||||
if let Ok(val) = req.chunked() {
|
||||
assert!(val);
|
||||
} else {
|
||||
unreachable!("Error");
|
||||
}
|
||||
|
||||
// intentional typo in "chunked"
|
||||
let mut buf = BytesMut::from(
|
||||
"GET /test HTTP/1.1\r\n\
|
||||
transfer-encoding: chnked\r\n\r\n",
|
||||
);
|
||||
let req = parse_ready!(&mut buf);
|
||||
|
||||
if let Ok(val) = req.chunked() {
|
||||
assert!(!val);
|
||||
} else {
|
||||
unreachable!("Error");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_headers_content_length_err_1() {
|
||||
let mut buf = BytesMut::from(
|
||||
@@ -1101,128 +940,9 @@ mod tests {
|
||||
expect_parse_err!(&mut buf);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_http_request_chunked_payload() {
|
||||
let mut buf = BytesMut::from(
|
||||
"GET /test HTTP/1.1\r\n\
|
||||
transfer-encoding: chunked\r\n\r\n",
|
||||
);
|
||||
let mut reader = MessageDecoder::<Request>::default();
|
||||
let (req, pl) = reader.decode(&mut buf).unwrap().unwrap();
|
||||
let mut pl = pl.unwrap();
|
||||
assert!(req.chunked().unwrap());
|
||||
|
||||
buf.extend(b"4\r\ndata\r\n4\r\nline\r\n0\r\n\r\n");
|
||||
assert_eq!(
|
||||
pl.decode(&mut buf).unwrap().unwrap().chunk().as_ref(),
|
||||
b"data"
|
||||
);
|
||||
assert_eq!(
|
||||
pl.decode(&mut buf).unwrap().unwrap().chunk().as_ref(),
|
||||
b"line"
|
||||
);
|
||||
assert!(pl.decode(&mut buf).unwrap().unwrap().eof());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_http_request_chunked_payload_and_next_message() {
|
||||
let mut buf = BytesMut::from(
|
||||
"GET /test HTTP/1.1\r\n\
|
||||
transfer-encoding: chunked\r\n\r\n",
|
||||
);
|
||||
let mut reader = MessageDecoder::<Request>::default();
|
||||
let (req, pl) = reader.decode(&mut buf).unwrap().unwrap();
|
||||
let mut pl = pl.unwrap();
|
||||
assert!(req.chunked().unwrap());
|
||||
|
||||
buf.extend(
|
||||
b"4\r\ndata\r\n4\r\nline\r\n0\r\n\r\n\
|
||||
POST /test2 HTTP/1.1\r\n\
|
||||
transfer-encoding: chunked\r\n\r\n"
|
||||
.iter(),
|
||||
);
|
||||
let msg = pl.decode(&mut buf).unwrap().unwrap();
|
||||
assert_eq!(msg.chunk().as_ref(), b"data");
|
||||
let msg = pl.decode(&mut buf).unwrap().unwrap();
|
||||
assert_eq!(msg.chunk().as_ref(), b"line");
|
||||
let msg = pl.decode(&mut buf).unwrap().unwrap();
|
||||
assert!(msg.eof());
|
||||
|
||||
let (req, _) = reader.decode(&mut buf).unwrap().unwrap();
|
||||
assert!(req.chunked().unwrap());
|
||||
assert_eq!(*req.method(), Method::POST);
|
||||
assert!(req.chunked().unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_http_request_chunked_payload_chunks() {
|
||||
let mut buf = BytesMut::from(
|
||||
"GET /test HTTP/1.1\r\n\
|
||||
transfer-encoding: chunked\r\n\r\n",
|
||||
);
|
||||
|
||||
let mut reader = MessageDecoder::<Request>::default();
|
||||
let (req, pl) = reader.decode(&mut buf).unwrap().unwrap();
|
||||
let mut pl = pl.unwrap();
|
||||
assert!(req.chunked().unwrap());
|
||||
|
||||
buf.extend(b"4\r\n1111\r\n");
|
||||
let msg = pl.decode(&mut buf).unwrap().unwrap();
|
||||
assert_eq!(msg.chunk().as_ref(), b"1111");
|
||||
|
||||
buf.extend(b"4\r\ndata\r");
|
||||
let msg = pl.decode(&mut buf).unwrap().unwrap();
|
||||
assert_eq!(msg.chunk().as_ref(), b"data");
|
||||
|
||||
buf.extend(b"\n4");
|
||||
assert!(pl.decode(&mut buf).unwrap().is_none());
|
||||
|
||||
buf.extend(b"\r");
|
||||
assert!(pl.decode(&mut buf).unwrap().is_none());
|
||||
buf.extend(b"\n");
|
||||
assert!(pl.decode(&mut buf).unwrap().is_none());
|
||||
|
||||
buf.extend(b"li");
|
||||
let msg = pl.decode(&mut buf).unwrap().unwrap();
|
||||
assert_eq!(msg.chunk().as_ref(), b"li");
|
||||
|
||||
//trailers
|
||||
//buf.feed_data("test: test\r\n");
|
||||
//not_ready!(reader.parse(&mut buf, &mut readbuf));
|
||||
|
||||
buf.extend(b"ne\r\n0\r\n");
|
||||
let msg = pl.decode(&mut buf).unwrap().unwrap();
|
||||
assert_eq!(msg.chunk().as_ref(), b"ne");
|
||||
assert!(pl.decode(&mut buf).unwrap().is_none());
|
||||
|
||||
buf.extend(b"\r\n");
|
||||
assert!(pl.decode(&mut buf).unwrap().unwrap().eof());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_chunked_payload_chunk_extension() {
|
||||
let mut buf = BytesMut::from(
|
||||
&"GET /test HTTP/1.1\r\n\
|
||||
transfer-encoding: chunked\r\n\r\n"[..],
|
||||
);
|
||||
|
||||
let mut reader = MessageDecoder::<Request>::default();
|
||||
let (msg, pl) = reader.decode(&mut buf).unwrap().unwrap();
|
||||
let mut pl = pl.unwrap();
|
||||
assert!(msg.chunked().unwrap());
|
||||
|
||||
buf.extend(b"4;test\r\ndata\r\n4\r\nline\r\n0\r\n\r\n"); // test: test\r\n\r\n")
|
||||
let chunk = pl.decode(&mut buf).unwrap().unwrap().chunk();
|
||||
assert_eq!(chunk, Bytes::from_static(b"data"));
|
||||
let chunk = pl.decode(&mut buf).unwrap().unwrap().chunk();
|
||||
assert_eq!(chunk, Bytes::from_static(b"line"));
|
||||
let msg = pl.decode(&mut buf).unwrap().unwrap();
|
||||
assert!(msg.eof());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_response_http10_read_until_eof() {
|
||||
let mut buf = BytesMut::from(&"HTTP/1.0 200 Ok\r\n\r\ntest data"[..]);
|
||||
let mut buf = BytesMut::from("HTTP/1.0 200 Ok\r\n\r\ntest data");
|
||||
|
||||
let mut reader = MessageDecoder::<ResponseHead>::default();
|
||||
let (_msg, pl) = reader.decode(&mut buf).unwrap().unwrap();
|
||||
@@ -1231,4 +951,84 @@ mod tests {
|
||||
let chunk = pl.decode(&mut buf).unwrap().unwrap();
|
||||
assert_eq!(chunk, PayloadItem::Chunk(Bytes::from_static(b"test data")));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hrs_multiple_content_length() {
|
||||
let mut buf = BytesMut::from(
|
||||
"GET / HTTP/1.1\r\n\
|
||||
Host: example.com\r\n\
|
||||
Content-Length: 4\r\n\
|
||||
Content-Length: 2\r\n\
|
||||
\r\n\
|
||||
abcd",
|
||||
);
|
||||
|
||||
expect_parse_err!(&mut buf);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hrs_content_length_plus() {
|
||||
let mut buf = BytesMut::from(
|
||||
"GET / HTTP/1.1\r\n\
|
||||
Host: example.com\r\n\
|
||||
Content-Length: +3\r\n\
|
||||
\r\n\
|
||||
000",
|
||||
);
|
||||
|
||||
expect_parse_err!(&mut buf);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hrs_unknown_transfer_encoding() {
|
||||
let mut buf = BytesMut::from(
|
||||
"GET / HTTP/1.1\r\n\
|
||||
Host: example.com\r\n\
|
||||
Transfer-Encoding: JUNK\r\n\
|
||||
Transfer-Encoding: chunked\r\n\
|
||||
\r\n\
|
||||
5\r\n\
|
||||
hello\r\n\
|
||||
0",
|
||||
);
|
||||
|
||||
expect_parse_err!(&mut buf);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hrs_multiple_transfer_encoding() {
|
||||
let mut buf = BytesMut::from(
|
||||
"GET / HTTP/1.1\r\n\
|
||||
Host: example.com\r\n\
|
||||
Content-Length: 51\r\n\
|
||||
Transfer-Encoding: identity\r\n\
|
||||
Transfer-Encoding: chunked\r\n\
|
||||
\r\n\
|
||||
0\r\n\
|
||||
\r\n\
|
||||
GET /forbidden HTTP/1.1\r\n\
|
||||
Host: example.com\r\n\r\n",
|
||||
);
|
||||
|
||||
expect_parse_err!(&mut buf);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn transfer_encoding_agrees() {
|
||||
let mut buf = BytesMut::from(
|
||||
"GET /test HTTP/1.1\r\n\
|
||||
Host: example.com\r\n\
|
||||
Content-Length: 3\r\n\
|
||||
Transfer-Encoding: identity\r\n\
|
||||
\r\n\
|
||||
0\r\n",
|
||||
);
|
||||
|
||||
let mut reader = MessageDecoder::<Request>::default();
|
||||
let (_msg, pl) = reader.decode(&mut buf).unwrap().unwrap();
|
||||
let mut pl = pl.unwrap();
|
||||
|
||||
let chunk = pl.decode(&mut buf).unwrap().unwrap();
|
||||
assert_eq!(chunk, PayloadItem::Chunk(Bytes::from_static(b"0\r\n")));
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,27 +1,29 @@
|
||||
use std::io::Write;
|
||||
use std::marker::PhantomData;
|
||||
use std::ptr::copy_nonoverlapping;
|
||||
use std::slice::from_raw_parts_mut;
|
||||
use std::{cmp, io};
|
||||
use std::{
|
||||
cmp,
|
||||
io::{self, Write as _},
|
||||
marker::PhantomData,
|
||||
ptr::copy_nonoverlapping,
|
||||
slice::from_raw_parts_mut,
|
||||
};
|
||||
|
||||
use bytes::{BufMut, BytesMut};
|
||||
|
||||
use crate::body::BodySize;
|
||||
use crate::config::ServiceConfig;
|
||||
use crate::header::map;
|
||||
use crate::helpers;
|
||||
use crate::http::header::{CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING};
|
||||
use crate::http::{HeaderMap, StatusCode, Version};
|
||||
use crate::message::{ConnectionType, RequestHeadType};
|
||||
use crate::response::Response;
|
||||
use crate::{
|
||||
body::BodySize,
|
||||
header::{
|
||||
map::Value, HeaderMap, HeaderName, CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING,
|
||||
},
|
||||
helpers, ConnectionType, RequestHeadType, Response, ServiceConfig, StatusCode, Version,
|
||||
};
|
||||
|
||||
const AVERAGE_HEADER_SIZE: usize = 30;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct MessageEncoder<T: MessageType> {
|
||||
#[allow(dead_code)]
|
||||
pub length: BodySize,
|
||||
pub te: TransferEncoding,
|
||||
_t: PhantomData<T>,
|
||||
_phantom: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<T: MessageType> Default for MessageEncoder<T> {
|
||||
@@ -29,7 +31,7 @@ impl<T: MessageType> Default for MessageEncoder<T> {
|
||||
MessageEncoder {
|
||||
length: BodySize::None,
|
||||
te: TransferEncoding::empty(),
|
||||
_t: PhantomData,
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -54,7 +56,7 @@ pub(crate) trait MessageType: Sized {
|
||||
dst: &mut BytesMut,
|
||||
version: Version,
|
||||
mut length: BodySize,
|
||||
ctype: ConnectionType,
|
||||
conn_type: ConnectionType,
|
||||
config: &ServiceConfig,
|
||||
) -> io::Result<()> {
|
||||
let chunked = self.chunked();
|
||||
@@ -64,19 +66,33 @@ pub(crate) trait MessageType: Sized {
|
||||
// Content length
|
||||
if let Some(status) = self.status() {
|
||||
match status {
|
||||
StatusCode::NO_CONTENT
|
||||
| StatusCode::CONTINUE
|
||||
| StatusCode::PROCESSING => length = BodySize::None,
|
||||
StatusCode::SWITCHING_PROTOCOLS => {
|
||||
StatusCode::CONTINUE
|
||||
| StatusCode::SWITCHING_PROTOCOLS
|
||||
| StatusCode::PROCESSING
|
||||
| StatusCode::NO_CONTENT => {
|
||||
// skip content-length and transfer-encoding headers
|
||||
// see https://datatracker.ietf.org/doc/html/rfc7230#section-3.3.1
|
||||
// and https://datatracker.ietf.org/doc/html/rfc7230#section-3.3.2
|
||||
skip_len = true;
|
||||
length = BodySize::Stream;
|
||||
length = BodySize::None
|
||||
}
|
||||
_ => (),
|
||||
|
||||
StatusCode::NOT_MODIFIED => {
|
||||
// 304 responses should never have a body but should retain a manually set
|
||||
// content-length header
|
||||
// see https://datatracker.ietf.org/doc/html/rfc7232#section-4.1
|
||||
skip_len = false;
|
||||
length = BodySize::None;
|
||||
}
|
||||
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
match length {
|
||||
BodySize::Stream => {
|
||||
if chunked {
|
||||
skip_len = true;
|
||||
if camel_case {
|
||||
dst.put_slice(b"\r\nTransfer-Encoding: chunked\r\n")
|
||||
} else {
|
||||
@@ -87,19 +103,14 @@ pub(crate) trait MessageType: Sized {
|
||||
dst.put_slice(b"\r\n");
|
||||
}
|
||||
}
|
||||
BodySize::Empty => {
|
||||
if camel_case {
|
||||
dst.put_slice(b"\r\nContent-Length: 0\r\n");
|
||||
} else {
|
||||
dst.put_slice(b"\r\ncontent-length: 0\r\n");
|
||||
}
|
||||
}
|
||||
BodySize::Sized(0) if camel_case => dst.put_slice(b"\r\nContent-Length: 0\r\n"),
|
||||
BodySize::Sized(0) => dst.put_slice(b"\r\ncontent-length: 0\r\n"),
|
||||
BodySize::Sized(len) => helpers::write_content_length(len, dst),
|
||||
BodySize::None => dst.put_slice(b"\r\n"),
|
||||
}
|
||||
|
||||
// Connection
|
||||
match ctype {
|
||||
match conn_type {
|
||||
ConnectionType::Upgrade => dst.put_slice(b"connection: upgrade\r\n"),
|
||||
ConnectionType::KeepAlive if version < Version::HTTP_11 => {
|
||||
if camel_case {
|
||||
@@ -115,103 +126,87 @@ pub(crate) trait MessageType: Sized {
|
||||
dst.put_slice(b"connection: close\r\n")
|
||||
}
|
||||
}
|
||||
_ => (),
|
||||
_ => {}
|
||||
}
|
||||
|
||||
// merging headers from head and extra headers. HeaderMap::new() does not allocate.
|
||||
let empty_headers = HeaderMap::new();
|
||||
let extra_headers = self.extra_headers().unwrap_or(&empty_headers);
|
||||
let headers = self
|
||||
.headers()
|
||||
.inner
|
||||
.iter()
|
||||
.filter(|(name, _)| !extra_headers.contains_key(*name))
|
||||
.chain(extra_headers.inner.iter());
|
||||
|
||||
// write headers
|
||||
let mut pos = 0;
|
||||
|
||||
let mut has_date = false;
|
||||
|
||||
let mut buf = dst.chunk_mut().as_mut_ptr();
|
||||
let mut remaining = dst.capacity() - dst.len();
|
||||
let mut buf = dst.bytes_mut().as_mut_ptr() as *mut u8;
|
||||
for (key, value) in headers {
|
||||
|
||||
// tracks bytes written since last buffer resize
|
||||
// since buf is a raw pointer to a bytes container storage but is written to without the
|
||||
// container's knowledge, this is used to sync the containers cursor after data is written
|
||||
let mut pos = 0;
|
||||
|
||||
self.write_headers(|key, value| {
|
||||
match *key {
|
||||
CONNECTION => continue,
|
||||
TRANSFER_ENCODING | CONTENT_LENGTH if skip_len => continue,
|
||||
DATE => {
|
||||
has_date = true;
|
||||
}
|
||||
_ => (),
|
||||
CONNECTION => return,
|
||||
TRANSFER_ENCODING | CONTENT_LENGTH if skip_len => return,
|
||||
DATE => has_date = true,
|
||||
_ => {}
|
||||
}
|
||||
|
||||
let k = key.as_str().as_bytes();
|
||||
match value {
|
||||
map::Value::One(ref val) => {
|
||||
let v = val.as_ref();
|
||||
let v_len = v.len();
|
||||
let k_len = k.len();
|
||||
let len = k_len + v_len + 4;
|
||||
if len > remaining {
|
||||
unsafe {
|
||||
dst.advance_mut(pos);
|
||||
}
|
||||
pos = 0;
|
||||
dst.reserve(len * 2);
|
||||
remaining = dst.capacity() - dst.len();
|
||||
buf = dst.bytes_mut().as_mut_ptr() as *mut u8;
|
||||
}
|
||||
// use upper Camel-Case
|
||||
let k_len = k.len();
|
||||
|
||||
// TODO: drain?
|
||||
for val in value.iter() {
|
||||
let v = val.as_ref();
|
||||
let v_len = v.len();
|
||||
|
||||
// key length + value length + colon + space + \r\n
|
||||
let len = k_len + v_len + 4;
|
||||
|
||||
if len > remaining {
|
||||
// SAFETY: all the bytes written up to position "pos" are initialized
|
||||
// the written byte count and pointer advancement are kept in sync
|
||||
unsafe {
|
||||
if camel_case {
|
||||
write_camel_case(k, from_raw_parts_mut(buf, k_len))
|
||||
} else {
|
||||
write_data(k, buf, k_len)
|
||||
}
|
||||
buf = buf.add(k_len);
|
||||
write_data(b": ", buf, 2);
|
||||
buf = buf.add(2);
|
||||
write_data(v, buf, v_len);
|
||||
buf = buf.add(v_len);
|
||||
write_data(b"\r\n", buf, 2);
|
||||
buf = buf.add(2);
|
||||
pos += len;
|
||||
remaining -= len;
|
||||
dst.advance_mut(pos);
|
||||
}
|
||||
|
||||
pos = 0;
|
||||
dst.reserve(len * 2);
|
||||
remaining = dst.capacity() - dst.len();
|
||||
|
||||
// re-assign buf raw pointer since it's possible that the buffer was
|
||||
// reallocated and/or resized
|
||||
buf = dst.chunk_mut().as_mut_ptr();
|
||||
}
|
||||
map::Value::Multi(ref vec) => {
|
||||
for val in vec {
|
||||
let v = val.as_ref();
|
||||
let v_len = v.len();
|
||||
let k_len = k.len();
|
||||
let len = k_len + v_len + 4;
|
||||
if len > remaining {
|
||||
unsafe {
|
||||
dst.advance_mut(pos);
|
||||
}
|
||||
pos = 0;
|
||||
dst.reserve(len * 2);
|
||||
remaining = dst.capacity() - dst.len();
|
||||
buf = dst.bytes_mut().as_mut_ptr() as *mut u8;
|
||||
}
|
||||
// use upper Camel-Case
|
||||
unsafe {
|
||||
if camel_case {
|
||||
write_camel_case(k, from_raw_parts_mut(buf, k_len));
|
||||
} else {
|
||||
write_data(k, buf, k_len);
|
||||
}
|
||||
buf = buf.add(k_len);
|
||||
write_data(b": ", buf, 2);
|
||||
buf = buf.add(2);
|
||||
write_data(v, buf, v_len);
|
||||
buf = buf.add(v_len);
|
||||
write_data(b"\r\n", buf, 2);
|
||||
buf = buf.add(2);
|
||||
};
|
||||
pos += len;
|
||||
remaining -= len;
|
||||
|
||||
// SAFETY: on each write, it is enough to ensure that the advancement of
|
||||
// the cursor matches the number of bytes written
|
||||
unsafe {
|
||||
if camel_case {
|
||||
// use Camel-Case headers
|
||||
write_camel_case(k, buf, k_len);
|
||||
} else {
|
||||
write_data(k, buf, k_len);
|
||||
}
|
||||
}
|
||||
|
||||
buf = buf.add(k_len);
|
||||
|
||||
write_data(b": ", buf, 2);
|
||||
buf = buf.add(2);
|
||||
|
||||
write_data(v, buf, v_len);
|
||||
buf = buf.add(v_len);
|
||||
|
||||
write_data(b"\r\n", buf, 2);
|
||||
buf = buf.add(2);
|
||||
};
|
||||
|
||||
pos += len;
|
||||
remaining -= len;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// final cursor synchronization with the bytes container
|
||||
//
|
||||
// SAFETY: all the bytes written up to position "pos" are initialized
|
||||
// the written byte count and pointer advancement are kept in sync
|
||||
unsafe {
|
||||
dst.advance_mut(pos);
|
||||
}
|
||||
@@ -226,6 +221,24 @@ pub(crate) trait MessageType: Sized {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn write_headers<F>(&mut self, mut f: F)
|
||||
where
|
||||
F: FnMut(&HeaderName, &Value),
|
||||
{
|
||||
match self.extra_headers() {
|
||||
Some(headers) => {
|
||||
// merging headers from head and extra headers.
|
||||
self.headers()
|
||||
.inner
|
||||
.iter()
|
||||
.filter(|(name, _)| !headers.contains_key(*name))
|
||||
.chain(headers.inner.iter())
|
||||
.for_each(|(k, v)| f(k, v))
|
||||
}
|
||||
None => self.headers().inner.iter().for_each(|(k, v)| f(k, v)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl MessageType for Response<()> {
|
||||
@@ -282,7 +295,7 @@ impl MessageType for RequestHeadType {
|
||||
let head = self.as_ref();
|
||||
dst.reserve(256 + head.headers.len() * AVERAGE_HEADER_SIZE);
|
||||
write!(
|
||||
Writer(dst),
|
||||
helpers::MutWriter(dst),
|
||||
"{} {} {}",
|
||||
head.method,
|
||||
head.uri.path_and_query().map(|u| u.as_str()).unwrap_or("/"),
|
||||
@@ -292,11 +305,7 @@ impl MessageType for RequestHeadType {
|
||||
Version::HTTP_11 => "HTTP/1.1",
|
||||
Version::HTTP_2 => "HTTP/2.0",
|
||||
Version::HTTP_3 => "HTTP/3.0",
|
||||
_ =>
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
"unsupported version"
|
||||
)),
|
||||
_ => return Err(io::Error::new(io::ErrorKind::Other, "unsupported version")),
|
||||
}
|
||||
)
|
||||
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))
|
||||
@@ -322,13 +331,13 @@ impl<T: MessageType> MessageEncoder<T> {
|
||||
stream: bool,
|
||||
version: Version,
|
||||
length: BodySize,
|
||||
ctype: ConnectionType,
|
||||
conn_type: ConnectionType,
|
||||
config: &ServiceConfig,
|
||||
) -> io::Result<()> {
|
||||
// transfer encoding
|
||||
if !head {
|
||||
self.te = match length {
|
||||
BodySize::Empty => TransferEncoding::empty(),
|
||||
BodySize::Sized(0) => TransferEncoding::empty(),
|
||||
BodySize::Sized(len) => TransferEncoding::length(len),
|
||||
BodySize::Stream => {
|
||||
if message.chunked() && !stream {
|
||||
@@ -344,7 +353,7 @@ impl<T: MessageType> MessageEncoder<T> {
|
||||
}
|
||||
|
||||
message.encode_status(dst)?;
|
||||
message.encode_headers(dst, version, length, ctype, config)
|
||||
message.encode_headers(dst, version, length, conn_type, config)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -358,10 +367,12 @@ pub(crate) struct TransferEncoding {
|
||||
enum TransferEncodingKind {
|
||||
/// An Encoder for when Transfer-Encoding includes `chunked`.
|
||||
Chunked(bool),
|
||||
|
||||
/// An Encoder for when Content-Length is set.
|
||||
///
|
||||
/// Enforces that the body is not longer than the Content-Length header.
|
||||
Length(u64),
|
||||
|
||||
/// An Encoder for when Content-Length is not known.
|
||||
///
|
||||
/// Application decides when to stop writing.
|
||||
@@ -415,7 +426,7 @@ impl TransferEncoding {
|
||||
*eof = true;
|
||||
buf.extend_from_slice(b"0\r\n\r\n");
|
||||
} else {
|
||||
writeln!(Writer(buf), "{:X}\r", msg.len())
|
||||
writeln!(helpers::MutWriter(buf), "{:X}\r", msg.len())
|
||||
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
|
||||
|
||||
buf.reserve(msg.len() + 2);
|
||||
@@ -465,47 +476,44 @@ impl TransferEncoding {
|
||||
}
|
||||
}
|
||||
|
||||
struct Writer<'a>(pub &'a mut BytesMut);
|
||||
|
||||
impl<'a> io::Write for Writer<'a> {
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
self.0.extend_from_slice(buf);
|
||||
Ok(buf.len())
|
||||
}
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
/// Callers must ensure that the given `len` matches the given `value` length and that `buf` is
|
||||
/// valid for writes of at least `len` bytes.
|
||||
unsafe fn write_data(value: &[u8], buf: *mut u8, len: usize) {
|
||||
debug_assert_eq!(value.len(), len);
|
||||
copy_nonoverlapping(value.as_ptr(), buf, len);
|
||||
}
|
||||
|
||||
fn write_camel_case(value: &[u8], buffer: &mut [u8]) {
|
||||
let mut index = 0;
|
||||
let key = value;
|
||||
let mut key_iter = key.iter();
|
||||
/// # Safety
|
||||
/// Callers must ensure that the given `len` matches the given `value` length and that `buf` is
|
||||
/// valid for writes of at least `len` bytes.
|
||||
unsafe fn write_camel_case(value: &[u8], buf: *mut u8, len: usize) {
|
||||
// first copy entire (potentially wrong) slice to output
|
||||
write_data(value, buf, len);
|
||||
|
||||
if let Some(c) = key_iter.next() {
|
||||
if *c >= b'a' && *c <= b'z' {
|
||||
buffer[index] = *c ^ b' ';
|
||||
index += 1;
|
||||
}
|
||||
} else {
|
||||
return;
|
||||
// SAFETY: We just initialized the buffer with `value`
|
||||
let buffer = from_raw_parts_mut(buf, len);
|
||||
|
||||
let mut iter = value.iter();
|
||||
|
||||
// first character should be uppercase
|
||||
if let Some(c @ b'a'..=b'z') = iter.next() {
|
||||
buffer[0] = c & 0b1101_1111;
|
||||
}
|
||||
|
||||
while let Some(c) = key_iter.next() {
|
||||
buffer[index] = *c;
|
||||
index += 1;
|
||||
if *c == b'-' {
|
||||
if let Some(c) = key_iter.next() {
|
||||
if *c >= b'a' && *c <= b'z' {
|
||||
buffer[index] = *c ^ b' ';
|
||||
index += 1;
|
||||
}
|
||||
// track 1 ahead of the current position since that's the location being assigned to
|
||||
let mut index = 2;
|
||||
|
||||
// remaining characters after hyphens should also be uppercase
|
||||
while let Some(&c) = iter.next() {
|
||||
if c == b'-' {
|
||||
// advance iter by one and uppercase if needed
|
||||
if let Some(c @ b'a'..=b'z') = iter.next() {
|
||||
buffer[index] = c & 0b1101_1111;
|
||||
}
|
||||
}
|
||||
|
||||
index += 1;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -517,8 +525,10 @@ mod tests {
|
||||
use http::header::AUTHORIZATION;
|
||||
|
||||
use super::*;
|
||||
use crate::http::header::{HeaderValue, CONTENT_TYPE};
|
||||
use crate::RequestHead;
|
||||
use crate::{
|
||||
header::{HeaderValue, CONTENT_TYPE},
|
||||
RequestHead,
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_chunked_te() {
|
||||
@@ -534,8 +544,8 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_camel_case() {
|
||||
#[actix_rt::test]
|
||||
async fn test_camel_case() {
|
||||
let mut bytes = BytesMut::with_capacity(2048);
|
||||
let mut head = RequestHead::default();
|
||||
head.set_camel_case_headers(true);
|
||||
@@ -548,12 +558,12 @@ mod tests {
|
||||
let _ = head.encode_headers(
|
||||
&mut bytes,
|
||||
Version::HTTP_11,
|
||||
BodySize::Empty,
|
||||
BodySize::Sized(0),
|
||||
ConnectionType::Close,
|
||||
&ServiceConfig::default(),
|
||||
);
|
||||
let data =
|
||||
String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
|
||||
let data = String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
|
||||
|
||||
assert!(data.contains("Content-Length: 0\r\n"));
|
||||
assert!(data.contains("Connection: close\r\n"));
|
||||
assert!(data.contains("Content-Type: plain/text\r\n"));
|
||||
@@ -566,8 +576,7 @@ mod tests {
|
||||
ConnectionType::KeepAlive,
|
||||
&ServiceConfig::default(),
|
||||
);
|
||||
let data =
|
||||
String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
|
||||
let data = String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
|
||||
assert!(data.contains("Transfer-Encoding: chunked\r\n"));
|
||||
assert!(data.contains("Content-Type: plain/text\r\n"));
|
||||
assert!(data.contains("Date: date\r\n"));
|
||||
@@ -588,16 +597,15 @@ mod tests {
|
||||
ConnectionType::KeepAlive,
|
||||
&ServiceConfig::default(),
|
||||
);
|
||||
let data =
|
||||
String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
|
||||
let data = String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
|
||||
assert!(data.contains("transfer-encoding: chunked\r\n"));
|
||||
assert!(data.contains("content-type: xml\r\n"));
|
||||
assert!(data.contains("content-type: plain/text\r\n"));
|
||||
assert!(data.contains("date: date\r\n"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extra_headers() {
|
||||
#[actix_rt::test]
|
||||
async fn test_extra_headers() {
|
||||
let mut bytes = BytesMut::with_capacity(2048);
|
||||
|
||||
let mut head = RequestHead::default();
|
||||
@@ -618,15 +626,35 @@ mod tests {
|
||||
let _ = head.encode_headers(
|
||||
&mut bytes,
|
||||
Version::HTTP_11,
|
||||
BodySize::Empty,
|
||||
BodySize::Sized(0),
|
||||
ConnectionType::Close,
|
||||
&ServiceConfig::default(),
|
||||
);
|
||||
let data =
|
||||
String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
|
||||
let data = String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
|
||||
assert!(data.contains("content-length: 0\r\n"));
|
||||
assert!(data.contains("connection: close\r\n"));
|
||||
assert!(data.contains("authorization: another authorization\r\n"));
|
||||
assert!(data.contains("date: date\r\n"));
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_no_content_length() {
|
||||
let mut bytes = BytesMut::with_capacity(2048);
|
||||
|
||||
let mut res = Response::with_body(StatusCode::SWITCHING_PROTOCOLS, ());
|
||||
res.headers_mut().insert(DATE, HeaderValue::from_static(""));
|
||||
res.headers_mut()
|
||||
.insert(CONTENT_LENGTH, HeaderValue::from_static("0"));
|
||||
|
||||
let _ = res.encode_headers(
|
||||
&mut bytes,
|
||||
Version::HTTP_11,
|
||||
BodySize::Stream,
|
||||
ConnectionType::Upgrade,
|
||||
&ServiceConfig::default(),
|
||||
);
|
||||
let data = String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
|
||||
assert!(!data.contains("content-length: 0\r\n"));
|
||||
assert!(!data.contains("transfer-encoding: chunked\r\n"));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,38 +1,33 @@
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use actix_service::{Service, ServiceFactory};
|
||||
use futures_util::future::{ok, Ready};
|
||||
use actix_utils::future::{ready, Ready};
|
||||
|
||||
use crate::error::Error;
|
||||
use crate::request::Request;
|
||||
use crate::{Error, Request};
|
||||
|
||||
pub struct ExpectHandler;
|
||||
|
||||
impl ServiceFactory for ExpectHandler {
|
||||
type Config = ();
|
||||
type Request = Request;
|
||||
impl ServiceFactory<Request> for ExpectHandler {
|
||||
type Response = Request;
|
||||
type Error = Error;
|
||||
type Config = ();
|
||||
type Service = ExpectHandler;
|
||||
type InitError = Error;
|
||||
type Future = Ready<Result<Self::Service, Self::InitError>>;
|
||||
|
||||
fn new_service(&self, _: ()) -> Self::Future {
|
||||
ok(ExpectHandler)
|
||||
fn new_service(&self, _: Self::Config) -> Self::Future {
|
||||
ready(Ok(ExpectHandler))
|
||||
}
|
||||
}
|
||||
|
||||
impl Service for ExpectHandler {
|
||||
type Request = Request;
|
||||
impl Service<Request> for ExpectHandler {
|
||||
type Response = Request;
|
||||
type Error = Error;
|
||||
type Future = Ready<Result<Self::Response, Self::Error>>;
|
||||
|
||||
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
actix_service::always_ready!();
|
||||
|
||||
fn call(&mut self, req: Request) -> Self::Future {
|
||||
ok(req)
|
||||
fn call(&self, req: Request) -> Self::Future {
|
||||
ready(Ok(req))
|
||||
// TODO: add some way to trigger error
|
||||
// Err(error::ErrorExpectationFailed("test"))
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user